GNU Linux-libre 4.9.337-gnu1
[releases.git] / arch / powerpc / kernel / ptrace.c
1 /*
2  *  PowerPC version
3  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4  *
5  *  Derived from "arch/m68k/kernel/ptrace.c"
6  *  Copyright (C) 1994 by Hamish Macdonald
7  *  Taken from linux/kernel/ptrace.c and modified for M680x0.
8  *  linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
9  *
10  * Modified by Cort Dougan (cort@hq.fsmlabs.com)
11  * and Paul Mackerras (paulus@samba.org).
12  *
13  * This file is subject to the terms and conditions of the GNU General
14  * Public License.  See the file README.legal in the main directory of
15  * this archive for more details.
16  */
17
18 #include <linux/kernel.h>
19 #include <linux/sched.h>
20 #include <linux/mm.h>
21 #include <linux/smp.h>
22 #include <linux/errno.h>
23 #include <linux/ptrace.h>
24 #include <linux/regset.h>
25 #include <linux/tracehook.h>
26 #include <linux/elf.h>
27 #include <linux/user.h>
28 #include <linux/security.h>
29 #include <linux/signal.h>
30 #include <linux/seccomp.h>
31 #include <linux/audit.h>
32 #include <trace/syscall.h>
33 #include <linux/hw_breakpoint.h>
34 #include <linux/perf_event.h>
35 #include <linux/context_tracking.h>
36
37 #include <asm/uaccess.h>
38 #include <asm/page.h>
39 #include <asm/pgtable.h>
40 #include <asm/switch_to.h>
41 #include <asm/tm.h>
42 #include <asm/asm-prototypes.h>
43
44 #define CREATE_TRACE_POINTS
45 #include <trace/events/syscalls.h>
46
47 /*
48  * The parameter save area on the stack is used to store arguments being passed
49  * to callee function and is located at fixed offset from stack pointer.
50  */
51 #ifdef CONFIG_PPC32
52 #define PARAMETER_SAVE_AREA_OFFSET      24  /* bytes */
53 #else /* CONFIG_PPC32 */
54 #define PARAMETER_SAVE_AREA_OFFSET      48  /* bytes */
55 #endif
56
57 struct pt_regs_offset {
58         const char *name;
59         int offset;
60 };
61
62 #define STR(s)  #s                      /* convert to string */
63 #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
64 #define GPR_OFFSET_NAME(num)    \
65         {.name = STR(r##num), .offset = offsetof(struct pt_regs, gpr[num])}, \
66         {.name = STR(gpr##num), .offset = offsetof(struct pt_regs, gpr[num])}
67 #define REG_OFFSET_END {.name = NULL, .offset = 0}
68
69 #define TVSO(f) (offsetof(struct thread_vr_state, f))
70 #define TFSO(f) (offsetof(struct thread_fp_state, f))
71 #define TSO(f)  (offsetof(struct thread_struct, f))
72
73 static const struct pt_regs_offset regoffset_table[] = {
74         GPR_OFFSET_NAME(0),
75         GPR_OFFSET_NAME(1),
76         GPR_OFFSET_NAME(2),
77         GPR_OFFSET_NAME(3),
78         GPR_OFFSET_NAME(4),
79         GPR_OFFSET_NAME(5),
80         GPR_OFFSET_NAME(6),
81         GPR_OFFSET_NAME(7),
82         GPR_OFFSET_NAME(8),
83         GPR_OFFSET_NAME(9),
84         GPR_OFFSET_NAME(10),
85         GPR_OFFSET_NAME(11),
86         GPR_OFFSET_NAME(12),
87         GPR_OFFSET_NAME(13),
88         GPR_OFFSET_NAME(14),
89         GPR_OFFSET_NAME(15),
90         GPR_OFFSET_NAME(16),
91         GPR_OFFSET_NAME(17),
92         GPR_OFFSET_NAME(18),
93         GPR_OFFSET_NAME(19),
94         GPR_OFFSET_NAME(20),
95         GPR_OFFSET_NAME(21),
96         GPR_OFFSET_NAME(22),
97         GPR_OFFSET_NAME(23),
98         GPR_OFFSET_NAME(24),
99         GPR_OFFSET_NAME(25),
100         GPR_OFFSET_NAME(26),
101         GPR_OFFSET_NAME(27),
102         GPR_OFFSET_NAME(28),
103         GPR_OFFSET_NAME(29),
104         GPR_OFFSET_NAME(30),
105         GPR_OFFSET_NAME(31),
106         REG_OFFSET_NAME(nip),
107         REG_OFFSET_NAME(msr),
108         REG_OFFSET_NAME(ctr),
109         REG_OFFSET_NAME(link),
110         REG_OFFSET_NAME(xer),
111         REG_OFFSET_NAME(ccr),
112 #ifdef CONFIG_PPC64
113         REG_OFFSET_NAME(softe),
114 #else
115         REG_OFFSET_NAME(mq),
116 #endif
117         REG_OFFSET_NAME(trap),
118         REG_OFFSET_NAME(dar),
119         REG_OFFSET_NAME(dsisr),
120         REG_OFFSET_END,
121 };
122
123 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
124 static void flush_tmregs_to_thread(struct task_struct *tsk)
125 {
126         /*
127          * If task is not current, it will have been flushed already to
128          * it's thread_struct during __switch_to().
129          *
130          * A reclaim flushes ALL the state or if not in TM save TM SPRs
131          * in the appropriate thread structures from live.
132          */
133
134         if ((!cpu_has_feature(CPU_FTR_TM)) || (tsk != current))
135                 return;
136
137         if (MSR_TM_SUSPENDED(mfmsr())) {
138                 tm_reclaim_current(TM_CAUSE_SIGNAL);
139         } else {
140                 tm_enable();
141                 tm_save_sprs(&(tsk->thread));
142         }
143 }
144 #else
145 static inline void flush_tmregs_to_thread(struct task_struct *tsk) { }
146 #endif
147
148 /**
149  * regs_query_register_offset() - query register offset from its name
150  * @name:       the name of a register
151  *
152  * regs_query_register_offset() returns the offset of a register in struct
153  * pt_regs from its name. If the name is invalid, this returns -EINVAL;
154  */
155 int regs_query_register_offset(const char *name)
156 {
157         const struct pt_regs_offset *roff;
158         for (roff = regoffset_table; roff->name != NULL; roff++)
159                 if (!strcmp(roff->name, name))
160                         return roff->offset;
161         return -EINVAL;
162 }
163
164 /**
165  * regs_query_register_name() - query register name from its offset
166  * @offset:     the offset of a register in struct pt_regs.
167  *
168  * regs_query_register_name() returns the name of a register from its
169  * offset in struct pt_regs. If the @offset is invalid, this returns NULL;
170  */
171 const char *regs_query_register_name(unsigned int offset)
172 {
173         const struct pt_regs_offset *roff;
174         for (roff = regoffset_table; roff->name != NULL; roff++)
175                 if (roff->offset == offset)
176                         return roff->name;
177         return NULL;
178 }
179
180 /*
181  * does not yet catch signals sent when the child dies.
182  * in exit.c or in signal.c.
183  */
184
185 /*
186  * Set of msr bits that gdb can change on behalf of a process.
187  */
188 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
189 #define MSR_DEBUGCHANGE 0
190 #else
191 #define MSR_DEBUGCHANGE (MSR_SE | MSR_BE)
192 #endif
193
194 /*
195  * Max register writeable via put_reg
196  */
197 #ifdef CONFIG_PPC32
198 #define PT_MAX_PUT_REG  PT_MQ
199 #else
200 #define PT_MAX_PUT_REG  PT_CCR
201 #endif
202
203 static unsigned long get_user_msr(struct task_struct *task)
204 {
205         return task->thread.regs->msr | task->thread.fpexc_mode;
206 }
207
208 static int set_user_msr(struct task_struct *task, unsigned long msr)
209 {
210         task->thread.regs->msr &= ~MSR_DEBUGCHANGE;
211         task->thread.regs->msr |= msr & MSR_DEBUGCHANGE;
212         return 0;
213 }
214
215 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
216 static unsigned long get_user_ckpt_msr(struct task_struct *task)
217 {
218         return task->thread.ckpt_regs.msr | task->thread.fpexc_mode;
219 }
220
221 static int set_user_ckpt_msr(struct task_struct *task, unsigned long msr)
222 {
223         task->thread.ckpt_regs.msr &= ~MSR_DEBUGCHANGE;
224         task->thread.ckpt_regs.msr |= msr & MSR_DEBUGCHANGE;
225         return 0;
226 }
227
228 static int set_user_ckpt_trap(struct task_struct *task, unsigned long trap)
229 {
230         task->thread.ckpt_regs.trap = trap & 0xfff0;
231         return 0;
232 }
233 #endif
234
235 #ifdef CONFIG_PPC64
236 static int get_user_dscr(struct task_struct *task, unsigned long *data)
237 {
238         *data = task->thread.dscr;
239         return 0;
240 }
241
242 static int set_user_dscr(struct task_struct *task, unsigned long dscr)
243 {
244         task->thread.dscr = dscr;
245         task->thread.dscr_inherit = 1;
246         return 0;
247 }
248 #else
249 static int get_user_dscr(struct task_struct *task, unsigned long *data)
250 {
251         return -EIO;
252 }
253
254 static int set_user_dscr(struct task_struct *task, unsigned long dscr)
255 {
256         return -EIO;
257 }
258 #endif
259
260 /*
261  * We prevent mucking around with the reserved area of trap
262  * which are used internally by the kernel.
263  */
264 static int set_user_trap(struct task_struct *task, unsigned long trap)
265 {
266         task->thread.regs->trap = trap & 0xfff0;
267         return 0;
268 }
269
270 /*
271  * Get contents of register REGNO in task TASK.
272  */
273 int ptrace_get_reg(struct task_struct *task, int regno, unsigned long *data)
274 {
275         if ((task->thread.regs == NULL) || !data)
276                 return -EIO;
277
278         if (regno == PT_MSR) {
279                 *data = get_user_msr(task);
280                 return 0;
281         }
282
283         if (regno == PT_DSCR)
284                 return get_user_dscr(task, data);
285
286         if (regno < (sizeof(struct pt_regs) / sizeof(unsigned long))) {
287                 *data = ((unsigned long *)task->thread.regs)[regno];
288                 return 0;
289         }
290
291         return -EIO;
292 }
293
294 /*
295  * Write contents of register REGNO in task TASK.
296  */
297 int ptrace_put_reg(struct task_struct *task, int regno, unsigned long data)
298 {
299         if (task->thread.regs == NULL)
300                 return -EIO;
301
302         if (regno == PT_MSR)
303                 return set_user_msr(task, data);
304         if (regno == PT_TRAP)
305                 return set_user_trap(task, data);
306         if (regno == PT_DSCR)
307                 return set_user_dscr(task, data);
308
309         if (regno <= PT_MAX_PUT_REG) {
310                 ((unsigned long *)task->thread.regs)[regno] = data;
311                 return 0;
312         }
313         return -EIO;
314 }
315
316 static int gpr_get(struct task_struct *target, const struct user_regset *regset,
317                    unsigned int pos, unsigned int count,
318                    void *kbuf, void __user *ubuf)
319 {
320         int i, ret;
321
322         if (target->thread.regs == NULL)
323                 return -EIO;
324
325         if (!FULL_REGS(target->thread.regs)) {
326                 /* We have a partial register set.  Fill 14-31 with bogus values */
327                 for (i = 14; i < 32; i++)
328                         target->thread.regs->gpr[i] = NV_REG_POISON;
329         }
330
331         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
332                                   target->thread.regs,
333                                   0, offsetof(struct pt_regs, msr));
334         if (!ret) {
335                 unsigned long msr = get_user_msr(target);
336                 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &msr,
337                                           offsetof(struct pt_regs, msr),
338                                           offsetof(struct pt_regs, msr) +
339                                           sizeof(msr));
340         }
341
342         BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
343                      offsetof(struct pt_regs, msr) + sizeof(long));
344
345         if (!ret)
346                 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
347                                           &target->thread.regs->orig_gpr3,
348                                           offsetof(struct pt_regs, orig_gpr3),
349                                           sizeof(struct pt_regs));
350         if (!ret)
351                 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
352                                                sizeof(struct pt_regs), -1);
353
354         return ret;
355 }
356
357 static int gpr_set(struct task_struct *target, const struct user_regset *regset,
358                    unsigned int pos, unsigned int count,
359                    const void *kbuf, const void __user *ubuf)
360 {
361         unsigned long reg;
362         int ret;
363
364         if (target->thread.regs == NULL)
365                 return -EIO;
366
367         CHECK_FULL_REGS(target->thread.regs);
368
369         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
370                                  target->thread.regs,
371                                  0, PT_MSR * sizeof(reg));
372
373         if (!ret && count > 0) {
374                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
375                                          PT_MSR * sizeof(reg),
376                                          (PT_MSR + 1) * sizeof(reg));
377                 if (!ret)
378                         ret = set_user_msr(target, reg);
379         }
380
381         BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
382                      offsetof(struct pt_regs, msr) + sizeof(long));
383
384         if (!ret)
385                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
386                                          &target->thread.regs->orig_gpr3,
387                                          PT_ORIG_R3 * sizeof(reg),
388                                          (PT_MAX_PUT_REG + 1) * sizeof(reg));
389
390         if (PT_MAX_PUT_REG + 1 < PT_TRAP && !ret)
391                 ret = user_regset_copyin_ignore(
392                         &pos, &count, &kbuf, &ubuf,
393                         (PT_MAX_PUT_REG + 1) * sizeof(reg),
394                         PT_TRAP * sizeof(reg));
395
396         if (!ret && count > 0) {
397                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
398                                          PT_TRAP * sizeof(reg),
399                                          (PT_TRAP + 1) * sizeof(reg));
400                 if (!ret)
401                         ret = set_user_trap(target, reg);
402         }
403
404         if (!ret)
405                 ret = user_regset_copyin_ignore(
406                         &pos, &count, &kbuf, &ubuf,
407                         (PT_TRAP + 1) * sizeof(reg), -1);
408
409         return ret;
410 }
411
412 /*
413  * Regardless of transactions, 'fp_state' holds the current running
414  * value of all FPR registers and 'ckfp_state' holds the last checkpointed
415  * value of all FPR registers for the current transaction.
416  *
417  * Userspace interface buffer layout:
418  *
419  * struct data {
420  *      u64     fpr[32];
421  *      u64     fpscr;
422  * };
423  */
424 static int fpr_get(struct task_struct *target, const struct user_regset *regset,
425                    unsigned int pos, unsigned int count,
426                    void *kbuf, void __user *ubuf)
427 {
428 #ifdef CONFIG_VSX
429         u64 buf[33];
430         int i;
431
432         flush_fp_to_thread(target);
433
434         /* copy to local buffer then write that out */
435         for (i = 0; i < 32 ; i++)
436                 buf[i] = target->thread.TS_FPR(i);
437         buf[32] = target->thread.fp_state.fpscr;
438         return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
439 #else
440         BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
441                      offsetof(struct thread_fp_state, fpr[32]));
442
443         flush_fp_to_thread(target);
444
445         return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
446                                    &target->thread.fp_state, 0, -1);
447 #endif
448 }
449
450 /*
451  * Regardless of transactions, 'fp_state' holds the current running
452  * value of all FPR registers and 'ckfp_state' holds the last checkpointed
453  * value of all FPR registers for the current transaction.
454  *
455  * Userspace interface buffer layout:
456  *
457  * struct data {
458  *      u64     fpr[32];
459  *      u64     fpscr;
460  * };
461  *
462  */
463 static int fpr_set(struct task_struct *target, const struct user_regset *regset,
464                    unsigned int pos, unsigned int count,
465                    const void *kbuf, const void __user *ubuf)
466 {
467 #ifdef CONFIG_VSX
468         u64 buf[33];
469         int i;
470
471         flush_fp_to_thread(target);
472
473         for (i = 0; i < 32 ; i++)
474                 buf[i] = target->thread.TS_FPR(i);
475         buf[32] = target->thread.fp_state.fpscr;
476
477         /* copy to local buffer then write that out */
478         i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
479         if (i)
480                 return i;
481
482         for (i = 0; i < 32 ; i++)
483                 target->thread.TS_FPR(i) = buf[i];
484         target->thread.fp_state.fpscr = buf[32];
485         return 0;
486 #else
487         BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
488                      offsetof(struct thread_fp_state, fpr[32]));
489
490         flush_fp_to_thread(target);
491
492         return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
493                                   &target->thread.fp_state, 0, -1);
494 #endif
495 }
496
497 #ifdef CONFIG_ALTIVEC
498 /*
499  * Get/set all the altivec registers vr0..vr31, vscr, vrsave, in one go.
500  * The transfer totals 34 quadword.  Quadwords 0-31 contain the
501  * corresponding vector registers.  Quadword 32 contains the vscr as the
502  * last word (offset 12) within that quadword.  Quadword 33 contains the
503  * vrsave as the first word (offset 0) within the quadword.
504  *
505  * This definition of the VMX state is compatible with the current PPC32
506  * ptrace interface.  This allows signal handling and ptrace to use the
507  * same structures.  This also simplifies the implementation of a bi-arch
508  * (combined (32- and 64-bit) gdb.
509  */
510
511 static int vr_active(struct task_struct *target,
512                      const struct user_regset *regset)
513 {
514         flush_altivec_to_thread(target);
515         return target->thread.used_vr ? regset->n : 0;
516 }
517
518 /*
519  * Regardless of transactions, 'vr_state' holds the current running
520  * value of all the VMX registers and 'ckvr_state' holds the last
521  * checkpointed value of all the VMX registers for the current
522  * transaction to fall back on in case it aborts.
523  *
524  * Userspace interface buffer layout:
525  *
526  * struct data {
527  *      vector128       vr[32];
528  *      vector128       vscr;
529  *      vector128       vrsave;
530  * };
531  */
532 static int vr_get(struct task_struct *target, const struct user_regset *regset,
533                   unsigned int pos, unsigned int count,
534                   void *kbuf, void __user *ubuf)
535 {
536         int ret;
537
538         flush_altivec_to_thread(target);
539
540         BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) !=
541                      offsetof(struct thread_vr_state, vr[32]));
542
543         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
544                                   &target->thread.vr_state, 0,
545                                   33 * sizeof(vector128));
546         if (!ret) {
547                 /*
548                  * Copy out only the low-order word of vrsave.
549                  */
550                 int start, end;
551                 union {
552                         elf_vrreg_t reg;
553                         u32 word;
554                 } vrsave;
555                 memset(&vrsave, 0, sizeof(vrsave));
556
557                 vrsave.word = target->thread.vrsave;
558
559                 start = 33 * sizeof(vector128);
560                 end = start + sizeof(vrsave);
561                 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave,
562                                           start, end);
563         }
564
565         return ret;
566 }
567
568 /*
569  * Regardless of transactions, 'vr_state' holds the current running
570  * value of all the VMX registers and 'ckvr_state' holds the last
571  * checkpointed value of all the VMX registers for the current
572  * transaction to fall back on in case it aborts.
573  *
574  * Userspace interface buffer layout:
575  *
576  * struct data {
577  *      vector128       vr[32];
578  *      vector128       vscr;
579  *      vector128       vrsave;
580  * };
581  */
582 static int vr_set(struct task_struct *target, const struct user_regset *regset,
583                   unsigned int pos, unsigned int count,
584                   const void *kbuf, const void __user *ubuf)
585 {
586         int ret;
587
588         flush_altivec_to_thread(target);
589
590         BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) !=
591                      offsetof(struct thread_vr_state, vr[32]));
592
593         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
594                                  &target->thread.vr_state, 0,
595                                  33 * sizeof(vector128));
596         if (!ret && count > 0) {
597                 /*
598                  * We use only the first word of vrsave.
599                  */
600                 int start, end;
601                 union {
602                         elf_vrreg_t reg;
603                         u32 word;
604                 } vrsave;
605                 memset(&vrsave, 0, sizeof(vrsave));
606
607                 vrsave.word = target->thread.vrsave;
608
609                 start = 33 * sizeof(vector128);
610                 end = start + sizeof(vrsave);
611                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave,
612                                          start, end);
613                 if (!ret)
614                         target->thread.vrsave = vrsave.word;
615         }
616
617         return ret;
618 }
619 #endif /* CONFIG_ALTIVEC */
620
621 #ifdef CONFIG_VSX
622 /*
623  * Currently to set and and get all the vsx state, you need to call
624  * the fp and VMX calls as well.  This only get/sets the lower 32
625  * 128bit VSX registers.
626  */
627
628 static int vsr_active(struct task_struct *target,
629                       const struct user_regset *regset)
630 {
631         flush_vsx_to_thread(target);
632         return target->thread.used_vsr ? regset->n : 0;
633 }
634
635 /*
636  * Regardless of transactions, 'fp_state' holds the current running
637  * value of all FPR registers and 'ckfp_state' holds the last
638  * checkpointed value of all FPR registers for the current
639  * transaction.
640  *
641  * Userspace interface buffer layout:
642  *
643  * struct data {
644  *      u64     vsx[32];
645  * };
646  */
647 static int vsr_get(struct task_struct *target, const struct user_regset *regset,
648                    unsigned int pos, unsigned int count,
649                    void *kbuf, void __user *ubuf)
650 {
651         u64 buf[32];
652         int ret, i;
653
654         flush_tmregs_to_thread(target);
655         flush_fp_to_thread(target);
656         flush_altivec_to_thread(target);
657         flush_vsx_to_thread(target);
658
659         for (i = 0; i < 32 ; i++)
660                 buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
661
662         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
663                                   buf, 0, 32 * sizeof(double));
664
665         return ret;
666 }
667
668 /*
669  * Regardless of transactions, 'fp_state' holds the current running
670  * value of all FPR registers and 'ckfp_state' holds the last
671  * checkpointed value of all FPR registers for the current
672  * transaction.
673  *
674  * Userspace interface buffer layout:
675  *
676  * struct data {
677  *      u64     vsx[32];
678  * };
679  */
680 static int vsr_set(struct task_struct *target, const struct user_regset *regset,
681                    unsigned int pos, unsigned int count,
682                    const void *kbuf, const void __user *ubuf)
683 {
684         u64 buf[32];
685         int ret,i;
686
687         flush_tmregs_to_thread(target);
688         flush_fp_to_thread(target);
689         flush_altivec_to_thread(target);
690         flush_vsx_to_thread(target);
691
692         for (i = 0; i < 32 ; i++)
693                 buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
694
695         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
696                                  buf, 0, 32 * sizeof(double));
697         if (!ret)
698                 for (i = 0; i < 32 ; i++)
699                         target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
700
701         return ret;
702 }
703 #endif /* CONFIG_VSX */
704
705 #ifdef CONFIG_SPE
706
707 /*
708  * For get_evrregs/set_evrregs functions 'data' has the following layout:
709  *
710  * struct {
711  *   u32 evr[32];
712  *   u64 acc;
713  *   u32 spefscr;
714  * }
715  */
716
717 static int evr_active(struct task_struct *target,
718                       const struct user_regset *regset)
719 {
720         flush_spe_to_thread(target);
721         return target->thread.used_spe ? regset->n : 0;
722 }
723
724 static int evr_get(struct task_struct *target, const struct user_regset *regset,
725                    unsigned int pos, unsigned int count,
726                    void *kbuf, void __user *ubuf)
727 {
728         int ret;
729
730         flush_spe_to_thread(target);
731
732         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
733                                   &target->thread.evr,
734                                   0, sizeof(target->thread.evr));
735
736         BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) !=
737                      offsetof(struct thread_struct, spefscr));
738
739         if (!ret)
740                 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
741                                           &target->thread.acc,
742                                           sizeof(target->thread.evr), -1);
743
744         return ret;
745 }
746
747 static int evr_set(struct task_struct *target, const struct user_regset *regset,
748                    unsigned int pos, unsigned int count,
749                    const void *kbuf, const void __user *ubuf)
750 {
751         int ret;
752
753         flush_spe_to_thread(target);
754
755         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
756                                  &target->thread.evr,
757                                  0, sizeof(target->thread.evr));
758
759         BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) !=
760                      offsetof(struct thread_struct, spefscr));
761
762         if (!ret)
763                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
764                                          &target->thread.acc,
765                                          sizeof(target->thread.evr), -1);
766
767         return ret;
768 }
769 #endif /* CONFIG_SPE */
770
771 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
772 /**
773  * tm_cgpr_active - get active number of registers in CGPR
774  * @target:     The target task.
775  * @regset:     The user regset structure.
776  *
777  * This function checks for the active number of available
778  * regisers in transaction checkpointed GPR category.
779  */
780 static int tm_cgpr_active(struct task_struct *target,
781                           const struct user_regset *regset)
782 {
783         if (!cpu_has_feature(CPU_FTR_TM))
784                 return -ENODEV;
785
786         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
787                 return 0;
788
789         return regset->n;
790 }
791
792 /**
793  * tm_cgpr_get - get CGPR registers
794  * @target:     The target task.
795  * @regset:     The user regset structure.
796  * @pos:        The buffer position.
797  * @count:      Number of bytes to copy.
798  * @kbuf:       Kernel buffer to copy from.
799  * @ubuf:       User buffer to copy into.
800  *
801  * This function gets transaction checkpointed GPR registers.
802  *
803  * When the transaction is active, 'ckpt_regs' holds all the checkpointed
804  * GPR register values for the current transaction to fall back on if it
805  * aborts in between. This function gets those checkpointed GPR registers.
806  * The userspace interface buffer layout is as follows.
807  *
808  * struct data {
809  *      struct pt_regs ckpt_regs;
810  * };
811  */
812 static int tm_cgpr_get(struct task_struct *target,
813                         const struct user_regset *regset,
814                         unsigned int pos, unsigned int count,
815                         void *kbuf, void __user *ubuf)
816 {
817         int ret;
818
819         if (!cpu_has_feature(CPU_FTR_TM))
820                 return -ENODEV;
821
822         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
823                 return -ENODATA;
824
825         flush_tmregs_to_thread(target);
826         flush_fp_to_thread(target);
827         flush_altivec_to_thread(target);
828
829         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
830                                   &target->thread.ckpt_regs,
831                                   0, offsetof(struct pt_regs, msr));
832         if (!ret) {
833                 unsigned long msr = get_user_ckpt_msr(target);
834
835                 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &msr,
836                                           offsetof(struct pt_regs, msr),
837                                           offsetof(struct pt_regs, msr) +
838                                           sizeof(msr));
839         }
840
841         BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
842                      offsetof(struct pt_regs, msr) + sizeof(long));
843
844         if (!ret)
845                 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
846                                           &target->thread.ckpt_regs.orig_gpr3,
847                                           offsetof(struct pt_regs, orig_gpr3),
848                                           sizeof(struct pt_regs));
849         if (!ret)
850                 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
851                                                sizeof(struct pt_regs), -1);
852
853         return ret;
854 }
855
856 /*
857  * tm_cgpr_set - set the CGPR registers
858  * @target:     The target task.
859  * @regset:     The user regset structure.
860  * @pos:        The buffer position.
861  * @count:      Number of bytes to copy.
862  * @kbuf:       Kernel buffer to copy into.
863  * @ubuf:       User buffer to copy from.
864  *
865  * This function sets in transaction checkpointed GPR registers.
866  *
867  * When the transaction is active, 'ckpt_regs' holds the checkpointed
868  * GPR register values for the current transaction to fall back on if it
869  * aborts in between. This function sets those checkpointed GPR registers.
870  * The userspace interface buffer layout is as follows.
871  *
872  * struct data {
873  *      struct pt_regs ckpt_regs;
874  * };
875  */
876 static int tm_cgpr_set(struct task_struct *target,
877                         const struct user_regset *regset,
878                         unsigned int pos, unsigned int count,
879                         const void *kbuf, const void __user *ubuf)
880 {
881         unsigned long reg;
882         int ret;
883
884         if (!cpu_has_feature(CPU_FTR_TM))
885                 return -ENODEV;
886
887         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
888                 return -ENODATA;
889
890         flush_tmregs_to_thread(target);
891         flush_fp_to_thread(target);
892         flush_altivec_to_thread(target);
893
894         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
895                                  &target->thread.ckpt_regs,
896                                  0, PT_MSR * sizeof(reg));
897
898         if (!ret && count > 0) {
899                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
900                                          PT_MSR * sizeof(reg),
901                                          (PT_MSR + 1) * sizeof(reg));
902                 if (!ret)
903                         ret = set_user_ckpt_msr(target, reg);
904         }
905
906         BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
907                      offsetof(struct pt_regs, msr) + sizeof(long));
908
909         if (!ret)
910                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
911                                          &target->thread.ckpt_regs.orig_gpr3,
912                                          PT_ORIG_R3 * sizeof(reg),
913                                          (PT_MAX_PUT_REG + 1) * sizeof(reg));
914
915         if (PT_MAX_PUT_REG + 1 < PT_TRAP && !ret)
916                 ret = user_regset_copyin_ignore(
917                         &pos, &count, &kbuf, &ubuf,
918                         (PT_MAX_PUT_REG + 1) * sizeof(reg),
919                         PT_TRAP * sizeof(reg));
920
921         if (!ret && count > 0) {
922                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
923                                          PT_TRAP * sizeof(reg),
924                                          (PT_TRAP + 1) * sizeof(reg));
925                 if (!ret)
926                         ret = set_user_ckpt_trap(target, reg);
927         }
928
929         if (!ret)
930                 ret = user_regset_copyin_ignore(
931                         &pos, &count, &kbuf, &ubuf,
932                         (PT_TRAP + 1) * sizeof(reg), -1);
933
934         return ret;
935 }
936
937 /**
938  * tm_cfpr_active - get active number of registers in CFPR
939  * @target:     The target task.
940  * @regset:     The user regset structure.
941  *
942  * This function checks for the active number of available
943  * regisers in transaction checkpointed FPR category.
944  */
945 static int tm_cfpr_active(struct task_struct *target,
946                                 const struct user_regset *regset)
947 {
948         if (!cpu_has_feature(CPU_FTR_TM))
949                 return -ENODEV;
950
951         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
952                 return 0;
953
954         return regset->n;
955 }
956
957 /**
958  * tm_cfpr_get - get CFPR registers
959  * @target:     The target task.
960  * @regset:     The user regset structure.
961  * @pos:        The buffer position.
962  * @count:      Number of bytes to copy.
963  * @kbuf:       Kernel buffer to copy from.
964  * @ubuf:       User buffer to copy into.
965  *
966  * This function gets in transaction checkpointed FPR registers.
967  *
968  * When the transaction is active 'ckfp_state' holds the checkpointed
969  * values for the current transaction to fall back on if it aborts
970  * in between. This function gets those checkpointed FPR registers.
971  * The userspace interface buffer layout is as follows.
972  *
973  * struct data {
974  *      u64     fpr[32];
975  *      u64     fpscr;
976  *};
977  */
978 static int tm_cfpr_get(struct task_struct *target,
979                         const struct user_regset *regset,
980                         unsigned int pos, unsigned int count,
981                         void *kbuf, void __user *ubuf)
982 {
983         u64 buf[33];
984         int i;
985
986         if (!cpu_has_feature(CPU_FTR_TM))
987                 return -ENODEV;
988
989         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
990                 return -ENODATA;
991
992         flush_tmregs_to_thread(target);
993         flush_fp_to_thread(target);
994         flush_altivec_to_thread(target);
995
996         /* copy to local buffer then write that out */
997         for (i = 0; i < 32 ; i++)
998                 buf[i] = target->thread.TS_CKFPR(i);
999         buf[32] = target->thread.ckfp_state.fpscr;
1000         return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
1001 }
1002
1003 /**
1004  * tm_cfpr_set - set CFPR registers
1005  * @target:     The target task.
1006  * @regset:     The user regset structure.
1007  * @pos:        The buffer position.
1008  * @count:      Number of bytes to copy.
1009  * @kbuf:       Kernel buffer to copy into.
1010  * @ubuf:       User buffer to copy from.
1011  *
1012  * This function sets in transaction checkpointed FPR registers.
1013  *
1014  * When the transaction is active 'ckfp_state' holds the checkpointed
1015  * FPR register values for the current transaction to fall back on
1016  * if it aborts in between. This function sets these checkpointed
1017  * FPR registers. The userspace interface buffer layout is as follows.
1018  *
1019  * struct data {
1020  *      u64     fpr[32];
1021  *      u64     fpscr;
1022  *};
1023  */
1024 static int tm_cfpr_set(struct task_struct *target,
1025                         const struct user_regset *regset,
1026                         unsigned int pos, unsigned int count,
1027                         const void *kbuf, const void __user *ubuf)
1028 {
1029         u64 buf[33];
1030         int i;
1031
1032         if (!cpu_has_feature(CPU_FTR_TM))
1033                 return -ENODEV;
1034
1035         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1036                 return -ENODATA;
1037
1038         flush_tmregs_to_thread(target);
1039         flush_fp_to_thread(target);
1040         flush_altivec_to_thread(target);
1041
1042         for (i = 0; i < 32; i++)
1043                 buf[i] = target->thread.TS_CKFPR(i);
1044         buf[32] = target->thread.ckfp_state.fpscr;
1045
1046         /* copy to local buffer then write that out */
1047         i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
1048         if (i)
1049                 return i;
1050         for (i = 0; i < 32 ; i++)
1051                 target->thread.TS_CKFPR(i) = buf[i];
1052         target->thread.ckfp_state.fpscr = buf[32];
1053         return 0;
1054 }
1055
1056 /**
1057  * tm_cvmx_active - get active number of registers in CVMX
1058  * @target:     The target task.
1059  * @regset:     The user regset structure.
1060  *
1061  * This function checks for the active number of available
1062  * regisers in checkpointed VMX category.
1063  */
1064 static int tm_cvmx_active(struct task_struct *target,
1065                                 const struct user_regset *regset)
1066 {
1067         if (!cpu_has_feature(CPU_FTR_TM))
1068                 return -ENODEV;
1069
1070         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1071                 return 0;
1072
1073         return regset->n;
1074 }
1075
1076 /**
1077  * tm_cvmx_get - get CMVX registers
1078  * @target:     The target task.
1079  * @regset:     The user regset structure.
1080  * @pos:        The buffer position.
1081  * @count:      Number of bytes to copy.
1082  * @kbuf:       Kernel buffer to copy from.
1083  * @ubuf:       User buffer to copy into.
1084  *
1085  * This function gets in transaction checkpointed VMX registers.
1086  *
1087  * When the transaction is active 'ckvr_state' and 'ckvrsave' hold
1088  * the checkpointed values for the current transaction to fall
1089  * back on if it aborts in between. The userspace interface buffer
1090  * layout is as follows.
1091  *
1092  * struct data {
1093  *      vector128       vr[32];
1094  *      vector128       vscr;
1095  *      vector128       vrsave;
1096  *};
1097  */
1098 static int tm_cvmx_get(struct task_struct *target,
1099                         const struct user_regset *regset,
1100                         unsigned int pos, unsigned int count,
1101                         void *kbuf, void __user *ubuf)
1102 {
1103         int ret;
1104
1105         BUILD_BUG_ON(TVSO(vscr) != TVSO(vr[32]));
1106
1107         if (!cpu_has_feature(CPU_FTR_TM))
1108                 return -ENODEV;
1109
1110         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1111                 return -ENODATA;
1112
1113         /* Flush the state */
1114         flush_tmregs_to_thread(target);
1115         flush_fp_to_thread(target);
1116         flush_altivec_to_thread(target);
1117
1118         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1119                                         &target->thread.ckvr_state, 0,
1120                                         33 * sizeof(vector128));
1121         if (!ret) {
1122                 /*
1123                  * Copy out only the low-order word of vrsave.
1124                  */
1125                 union {
1126                         elf_vrreg_t reg;
1127                         u32 word;
1128                 } vrsave;
1129                 memset(&vrsave, 0, sizeof(vrsave));
1130                 vrsave.word = target->thread.ckvrsave;
1131                 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave,
1132                                                 33 * sizeof(vector128), -1);
1133         }
1134
1135         return ret;
1136 }
1137
1138 /**
1139  * tm_cvmx_set - set CMVX registers
1140  * @target:     The target task.
1141  * @regset:     The user regset structure.
1142  * @pos:        The buffer position.
1143  * @count:      Number of bytes to copy.
1144  * @kbuf:       Kernel buffer to copy into.
1145  * @ubuf:       User buffer to copy from.
1146  *
1147  * This function sets in transaction checkpointed VMX registers.
1148  *
1149  * When the transaction is active 'ckvr_state' and 'ckvrsave' hold
1150  * the checkpointed values for the current transaction to fall
1151  * back on if it aborts in between. The userspace interface buffer
1152  * layout is as follows.
1153  *
1154  * struct data {
1155  *      vector128       vr[32];
1156  *      vector128       vscr;
1157  *      vector128       vrsave;
1158  *};
1159  */
1160 static int tm_cvmx_set(struct task_struct *target,
1161                         const struct user_regset *regset,
1162                         unsigned int pos, unsigned int count,
1163                         const void *kbuf, const void __user *ubuf)
1164 {
1165         int ret;
1166
1167         BUILD_BUG_ON(TVSO(vscr) != TVSO(vr[32]));
1168
1169         if (!cpu_has_feature(CPU_FTR_TM))
1170                 return -ENODEV;
1171
1172         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1173                 return -ENODATA;
1174
1175         flush_tmregs_to_thread(target);
1176         flush_fp_to_thread(target);
1177         flush_altivec_to_thread(target);
1178
1179         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1180                                         &target->thread.ckvr_state, 0,
1181                                         33 * sizeof(vector128));
1182         if (!ret && count > 0) {
1183                 /*
1184                  * We use only the low-order word of vrsave.
1185                  */
1186                 union {
1187                         elf_vrreg_t reg;
1188                         u32 word;
1189                 } vrsave;
1190                 memset(&vrsave, 0, sizeof(vrsave));
1191                 vrsave.word = target->thread.ckvrsave;
1192                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave,
1193                                                 33 * sizeof(vector128), -1);
1194                 if (!ret)
1195                         target->thread.ckvrsave = vrsave.word;
1196         }
1197
1198         return ret;
1199 }
1200
1201 /**
1202  * tm_cvsx_active - get active number of registers in CVSX
1203  * @target:     The target task.
1204  * @regset:     The user regset structure.
1205  *
1206  * This function checks for the active number of available
1207  * regisers in transaction checkpointed VSX category.
1208  */
1209 static int tm_cvsx_active(struct task_struct *target,
1210                                 const struct user_regset *regset)
1211 {
1212         if (!cpu_has_feature(CPU_FTR_TM))
1213                 return -ENODEV;
1214
1215         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1216                 return 0;
1217
1218         flush_vsx_to_thread(target);
1219         return target->thread.used_vsr ? regset->n : 0;
1220 }
1221
1222 /**
1223  * tm_cvsx_get - get CVSX registers
1224  * @target:     The target task.
1225  * @regset:     The user regset structure.
1226  * @pos:        The buffer position.
1227  * @count:      Number of bytes to copy.
1228  * @kbuf:       Kernel buffer to copy from.
1229  * @ubuf:       User buffer to copy into.
1230  *
1231  * This function gets in transaction checkpointed VSX registers.
1232  *
1233  * When the transaction is active 'ckfp_state' holds the checkpointed
1234  * values for the current transaction to fall back on if it aborts
1235  * in between. This function gets those checkpointed VSX registers.
1236  * The userspace interface buffer layout is as follows.
1237  *
1238  * struct data {
1239  *      u64     vsx[32];
1240  *};
1241  */
1242 static int tm_cvsx_get(struct task_struct *target,
1243                         const struct user_regset *regset,
1244                         unsigned int pos, unsigned int count,
1245                         void *kbuf, void __user *ubuf)
1246 {
1247         u64 buf[32];
1248         int ret, i;
1249
1250         if (!cpu_has_feature(CPU_FTR_TM))
1251                 return -ENODEV;
1252
1253         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1254                 return -ENODATA;
1255
1256         /* Flush the state */
1257         flush_tmregs_to_thread(target);
1258         flush_fp_to_thread(target);
1259         flush_altivec_to_thread(target);
1260         flush_vsx_to_thread(target);
1261
1262         for (i = 0; i < 32 ; i++)
1263                 buf[i] = target->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET];
1264         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1265                                   buf, 0, 32 * sizeof(double));
1266
1267         return ret;
1268 }
1269
1270 /**
1271  * tm_cvsx_set - set CFPR registers
1272  * @target:     The target task.
1273  * @regset:     The user regset structure.
1274  * @pos:        The buffer position.
1275  * @count:      Number of bytes to copy.
1276  * @kbuf:       Kernel buffer to copy into.
1277  * @ubuf:       User buffer to copy from.
1278  *
1279  * This function sets in transaction checkpointed VSX registers.
1280  *
1281  * When the transaction is active 'ckfp_state' holds the checkpointed
1282  * VSX register values for the current transaction to fall back on
1283  * if it aborts in between. This function sets these checkpointed
1284  * FPR registers. The userspace interface buffer layout is as follows.
1285  *
1286  * struct data {
1287  *      u64     vsx[32];
1288  *};
1289  */
1290 static int tm_cvsx_set(struct task_struct *target,
1291                         const struct user_regset *regset,
1292                         unsigned int pos, unsigned int count,
1293                         const void *kbuf, const void __user *ubuf)
1294 {
1295         u64 buf[32];
1296         int ret, i;
1297
1298         if (!cpu_has_feature(CPU_FTR_TM))
1299                 return -ENODEV;
1300
1301         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1302                 return -ENODATA;
1303
1304         /* Flush the state */
1305         flush_tmregs_to_thread(target);
1306         flush_fp_to_thread(target);
1307         flush_altivec_to_thread(target);
1308         flush_vsx_to_thread(target);
1309
1310         for (i = 0; i < 32 ; i++)
1311                 buf[i] = target->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET];
1312
1313         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1314                                  buf, 0, 32 * sizeof(double));
1315         if (!ret)
1316                 for (i = 0; i < 32 ; i++)
1317                         target->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
1318
1319         return ret;
1320 }
1321
1322 /**
1323  * tm_spr_active - get active number of registers in TM SPR
1324  * @target:     The target task.
1325  * @regset:     The user regset structure.
1326  *
1327  * This function checks the active number of available
1328  * regisers in the transactional memory SPR category.
1329  */
1330 static int tm_spr_active(struct task_struct *target,
1331                          const struct user_regset *regset)
1332 {
1333         if (!cpu_has_feature(CPU_FTR_TM))
1334                 return -ENODEV;
1335
1336         return regset->n;
1337 }
1338
1339 /**
1340  * tm_spr_get - get the TM related SPR registers
1341  * @target:     The target task.
1342  * @regset:     The user regset structure.
1343  * @pos:        The buffer position.
1344  * @count:      Number of bytes to copy.
1345  * @kbuf:       Kernel buffer to copy from.
1346  * @ubuf:       User buffer to copy into.
1347  *
1348  * This function gets transactional memory related SPR registers.
1349  * The userspace interface buffer layout is as follows.
1350  *
1351  * struct {
1352  *      u64             tm_tfhar;
1353  *      u64             tm_texasr;
1354  *      u64             tm_tfiar;
1355  * };
1356  */
1357 static int tm_spr_get(struct task_struct *target,
1358                       const struct user_regset *regset,
1359                       unsigned int pos, unsigned int count,
1360                       void *kbuf, void __user *ubuf)
1361 {
1362         int ret;
1363
1364         /* Build tests */
1365         BUILD_BUG_ON(TSO(tm_tfhar) + sizeof(u64) != TSO(tm_texasr));
1366         BUILD_BUG_ON(TSO(tm_texasr) + sizeof(u64) != TSO(tm_tfiar));
1367         BUILD_BUG_ON(TSO(tm_tfiar) + sizeof(u64) != TSO(ckpt_regs));
1368
1369         if (!cpu_has_feature(CPU_FTR_TM))
1370                 return -ENODEV;
1371
1372         /* Flush the states */
1373         flush_tmregs_to_thread(target);
1374         flush_fp_to_thread(target);
1375         flush_altivec_to_thread(target);
1376
1377         /* TFHAR register */
1378         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1379                                 &target->thread.tm_tfhar, 0, sizeof(u64));
1380
1381         /* TEXASR register */
1382         if (!ret)
1383                 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1384                                 &target->thread.tm_texasr, sizeof(u64),
1385                                 2 * sizeof(u64));
1386
1387         /* TFIAR register */
1388         if (!ret)
1389                 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1390                                 &target->thread.tm_tfiar,
1391                                 2 * sizeof(u64), 3 * sizeof(u64));
1392         return ret;
1393 }
1394
1395 /**
1396  * tm_spr_set - set the TM related SPR registers
1397  * @target:     The target task.
1398  * @regset:     The user regset structure.
1399  * @pos:        The buffer position.
1400  * @count:      Number of bytes to copy.
1401  * @kbuf:       Kernel buffer to copy into.
1402  * @ubuf:       User buffer to copy from.
1403  *
1404  * This function sets transactional memory related SPR registers.
1405  * The userspace interface buffer layout is as follows.
1406  *
1407  * struct {
1408  *      u64             tm_tfhar;
1409  *      u64             tm_texasr;
1410  *      u64             tm_tfiar;
1411  * };
1412  */
1413 static int tm_spr_set(struct task_struct *target,
1414                       const struct user_regset *regset,
1415                       unsigned int pos, unsigned int count,
1416                       const void *kbuf, const void __user *ubuf)
1417 {
1418         int ret;
1419
1420         /* Build tests */
1421         BUILD_BUG_ON(TSO(tm_tfhar) + sizeof(u64) != TSO(tm_texasr));
1422         BUILD_BUG_ON(TSO(tm_texasr) + sizeof(u64) != TSO(tm_tfiar));
1423         BUILD_BUG_ON(TSO(tm_tfiar) + sizeof(u64) != TSO(ckpt_regs));
1424
1425         if (!cpu_has_feature(CPU_FTR_TM))
1426                 return -ENODEV;
1427
1428         /* Flush the states */
1429         flush_tmregs_to_thread(target);
1430         flush_fp_to_thread(target);
1431         flush_altivec_to_thread(target);
1432
1433         /* TFHAR register */
1434         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1435                                 &target->thread.tm_tfhar, 0, sizeof(u64));
1436
1437         /* TEXASR register */
1438         if (!ret)
1439                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1440                                 &target->thread.tm_texasr, sizeof(u64),
1441                                 2 * sizeof(u64));
1442
1443         /* TFIAR register */
1444         if (!ret)
1445                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1446                                 &target->thread.tm_tfiar,
1447                                  2 * sizeof(u64), 3 * sizeof(u64));
1448         return ret;
1449 }
1450
1451 static int tm_tar_active(struct task_struct *target,
1452                          const struct user_regset *regset)
1453 {
1454         if (!cpu_has_feature(CPU_FTR_TM))
1455                 return -ENODEV;
1456
1457         if (MSR_TM_ACTIVE(target->thread.regs->msr))
1458                 return regset->n;
1459
1460         return 0;
1461 }
1462
1463 static int tm_tar_get(struct task_struct *target,
1464                       const struct user_regset *regset,
1465                       unsigned int pos, unsigned int count,
1466                       void *kbuf, void __user *ubuf)
1467 {
1468         int ret;
1469
1470         if (!cpu_has_feature(CPU_FTR_TM))
1471                 return -ENODEV;
1472
1473         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1474                 return -ENODATA;
1475
1476         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1477                                 &target->thread.tm_tar, 0, sizeof(u64));
1478         return ret;
1479 }
1480
1481 static int tm_tar_set(struct task_struct *target,
1482                       const struct user_regset *regset,
1483                       unsigned int pos, unsigned int count,
1484                       const void *kbuf, const void __user *ubuf)
1485 {
1486         int ret;
1487
1488         if (!cpu_has_feature(CPU_FTR_TM))
1489                 return -ENODEV;
1490
1491         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1492                 return -ENODATA;
1493
1494         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1495                                 &target->thread.tm_tar, 0, sizeof(u64));
1496         return ret;
1497 }
1498
1499 static int tm_ppr_active(struct task_struct *target,
1500                          const struct user_regset *regset)
1501 {
1502         if (!cpu_has_feature(CPU_FTR_TM))
1503                 return -ENODEV;
1504
1505         if (MSR_TM_ACTIVE(target->thread.regs->msr))
1506                 return regset->n;
1507
1508         return 0;
1509 }
1510
1511
1512 static int tm_ppr_get(struct task_struct *target,
1513                       const struct user_regset *regset,
1514                       unsigned int pos, unsigned int count,
1515                       void *kbuf, void __user *ubuf)
1516 {
1517         int ret;
1518
1519         if (!cpu_has_feature(CPU_FTR_TM))
1520                 return -ENODEV;
1521
1522         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1523                 return -ENODATA;
1524
1525         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1526                                 &target->thread.tm_ppr, 0, sizeof(u64));
1527         return ret;
1528 }
1529
1530 static int tm_ppr_set(struct task_struct *target,
1531                       const struct user_regset *regset,
1532                       unsigned int pos, unsigned int count,
1533                       const void *kbuf, const void __user *ubuf)
1534 {
1535         int ret;
1536
1537         if (!cpu_has_feature(CPU_FTR_TM))
1538                 return -ENODEV;
1539
1540         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1541                 return -ENODATA;
1542
1543         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1544                                 &target->thread.tm_ppr, 0, sizeof(u64));
1545         return ret;
1546 }
1547
1548 static int tm_dscr_active(struct task_struct *target,
1549                          const struct user_regset *regset)
1550 {
1551         if (!cpu_has_feature(CPU_FTR_TM))
1552                 return -ENODEV;
1553
1554         if (MSR_TM_ACTIVE(target->thread.regs->msr))
1555                 return regset->n;
1556
1557         return 0;
1558 }
1559
1560 static int tm_dscr_get(struct task_struct *target,
1561                       const struct user_regset *regset,
1562                       unsigned int pos, unsigned int count,
1563                       void *kbuf, void __user *ubuf)
1564 {
1565         int ret;
1566
1567         if (!cpu_has_feature(CPU_FTR_TM))
1568                 return -ENODEV;
1569
1570         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1571                 return -ENODATA;
1572
1573         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1574                                 &target->thread.tm_dscr, 0, sizeof(u64));
1575         return ret;
1576 }
1577
1578 static int tm_dscr_set(struct task_struct *target,
1579                       const struct user_regset *regset,
1580                       unsigned int pos, unsigned int count,
1581                       const void *kbuf, const void __user *ubuf)
1582 {
1583         int ret;
1584
1585         if (!cpu_has_feature(CPU_FTR_TM))
1586                 return -ENODEV;
1587
1588         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1589                 return -ENODATA;
1590
1591         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1592                                 &target->thread.tm_dscr, 0, sizeof(u64));
1593         return ret;
1594 }
1595 #endif  /* CONFIG_PPC_TRANSACTIONAL_MEM */
1596
1597 #ifdef CONFIG_PPC64
1598 static int ppr_get(struct task_struct *target,
1599                       const struct user_regset *regset,
1600                       unsigned int pos, unsigned int count,
1601                       void *kbuf, void __user *ubuf)
1602 {
1603         int ret;
1604
1605         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1606                                 &target->thread.ppr, 0, sizeof(u64));
1607         return ret;
1608 }
1609
1610 static int ppr_set(struct task_struct *target,
1611                       const struct user_regset *regset,
1612                       unsigned int pos, unsigned int count,
1613                       const void *kbuf, const void __user *ubuf)
1614 {
1615         int ret;
1616
1617         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1618                                 &target->thread.ppr, 0, sizeof(u64));
1619         return ret;
1620 }
1621
1622 static int dscr_get(struct task_struct *target,
1623                       const struct user_regset *regset,
1624                       unsigned int pos, unsigned int count,
1625                       void *kbuf, void __user *ubuf)
1626 {
1627         int ret;
1628
1629         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1630                                 &target->thread.dscr, 0, sizeof(u64));
1631         return ret;
1632 }
1633 static int dscr_set(struct task_struct *target,
1634                       const struct user_regset *regset,
1635                       unsigned int pos, unsigned int count,
1636                       const void *kbuf, const void __user *ubuf)
1637 {
1638         int ret;
1639
1640         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1641                                 &target->thread.dscr, 0, sizeof(u64));
1642         return ret;
1643 }
1644 #endif
1645 #ifdef CONFIG_PPC_BOOK3S_64
1646 static int tar_get(struct task_struct *target,
1647                       const struct user_regset *regset,
1648                       unsigned int pos, unsigned int count,
1649                       void *kbuf, void __user *ubuf)
1650 {
1651         int ret;
1652
1653         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1654                                 &target->thread.tar, 0, sizeof(u64));
1655         return ret;
1656 }
1657 static int tar_set(struct task_struct *target,
1658                       const struct user_regset *regset,
1659                       unsigned int pos, unsigned int count,
1660                       const void *kbuf, const void __user *ubuf)
1661 {
1662         int ret;
1663
1664         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1665                                 &target->thread.tar, 0, sizeof(u64));
1666         return ret;
1667 }
1668
1669 static int ebb_active(struct task_struct *target,
1670                          const struct user_regset *regset)
1671 {
1672         if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1673                 return -ENODEV;
1674
1675         if (target->thread.used_ebb)
1676                 return regset->n;
1677
1678         return 0;
1679 }
1680
1681 static int ebb_get(struct task_struct *target,
1682                       const struct user_regset *regset,
1683                       unsigned int pos, unsigned int count,
1684                       void *kbuf, void __user *ubuf)
1685 {
1686         /* Build tests */
1687         BUILD_BUG_ON(TSO(ebbrr) + sizeof(unsigned long) != TSO(ebbhr));
1688         BUILD_BUG_ON(TSO(ebbhr) + sizeof(unsigned long) != TSO(bescr));
1689
1690         if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1691                 return -ENODEV;
1692
1693         if (!target->thread.used_ebb)
1694                 return -ENODATA;
1695
1696         return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1697                         &target->thread.ebbrr, 0, 3 * sizeof(unsigned long));
1698 }
1699
1700 static int ebb_set(struct task_struct *target,
1701                       const struct user_regset *regset,
1702                       unsigned int pos, unsigned int count,
1703                       const void *kbuf, const void __user *ubuf)
1704 {
1705         int ret = 0;
1706
1707         /* Build tests */
1708         BUILD_BUG_ON(TSO(ebbrr) + sizeof(unsigned long) != TSO(ebbhr));
1709         BUILD_BUG_ON(TSO(ebbhr) + sizeof(unsigned long) != TSO(bescr));
1710
1711         if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1712                 return -ENODEV;
1713
1714         if (target->thread.used_ebb)
1715                 return -ENODATA;
1716
1717         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1718                         &target->thread.ebbrr, 0, sizeof(unsigned long));
1719
1720         if (!ret)
1721                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1722                         &target->thread.ebbhr, sizeof(unsigned long),
1723                         2 * sizeof(unsigned long));
1724
1725         if (!ret)
1726                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1727                         &target->thread.bescr,
1728                         2 * sizeof(unsigned long), 3 * sizeof(unsigned long));
1729
1730         return ret;
1731 }
1732 static int pmu_active(struct task_struct *target,
1733                          const struct user_regset *regset)
1734 {
1735         if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1736                 return -ENODEV;
1737
1738         return regset->n;
1739 }
1740
1741 static int pmu_get(struct task_struct *target,
1742                       const struct user_regset *regset,
1743                       unsigned int pos, unsigned int count,
1744                       void *kbuf, void __user *ubuf)
1745 {
1746         /* Build tests */
1747         BUILD_BUG_ON(TSO(siar) + sizeof(unsigned long) != TSO(sdar));
1748         BUILD_BUG_ON(TSO(sdar) + sizeof(unsigned long) != TSO(sier));
1749         BUILD_BUG_ON(TSO(sier) + sizeof(unsigned long) != TSO(mmcr2));
1750         BUILD_BUG_ON(TSO(mmcr2) + sizeof(unsigned long) != TSO(mmcr0));
1751
1752         if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1753                 return -ENODEV;
1754
1755         return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1756                         &target->thread.siar, 0,
1757                         5 * sizeof(unsigned long));
1758 }
1759
1760 static int pmu_set(struct task_struct *target,
1761                       const struct user_regset *regset,
1762                       unsigned int pos, unsigned int count,
1763                       const void *kbuf, const void __user *ubuf)
1764 {
1765         int ret = 0;
1766
1767         /* Build tests */
1768         BUILD_BUG_ON(TSO(siar) + sizeof(unsigned long) != TSO(sdar));
1769         BUILD_BUG_ON(TSO(sdar) + sizeof(unsigned long) != TSO(sier));
1770         BUILD_BUG_ON(TSO(sier) + sizeof(unsigned long) != TSO(mmcr2));
1771         BUILD_BUG_ON(TSO(mmcr2) + sizeof(unsigned long) != TSO(mmcr0));
1772
1773         if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1774                 return -ENODEV;
1775
1776         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1777                         &target->thread.siar, 0,
1778                         sizeof(unsigned long));
1779
1780         if (!ret)
1781                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1782                         &target->thread.sdar, sizeof(unsigned long),
1783                         2 * sizeof(unsigned long));
1784
1785         if (!ret)
1786                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1787                         &target->thread.sier, 2 * sizeof(unsigned long),
1788                         3 * sizeof(unsigned long));
1789
1790         if (!ret)
1791                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1792                         &target->thread.mmcr2, 3 * sizeof(unsigned long),
1793                         4 * sizeof(unsigned long));
1794
1795         if (!ret)
1796                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1797                         &target->thread.mmcr0, 4 * sizeof(unsigned long),
1798                         5 * sizeof(unsigned long));
1799         return ret;
1800 }
1801 #endif
1802 /*
1803  * These are our native regset flavors.
1804  */
1805 enum powerpc_regset {
1806         REGSET_GPR,
1807         REGSET_FPR,
1808 #ifdef CONFIG_ALTIVEC
1809         REGSET_VMX,
1810 #endif
1811 #ifdef CONFIG_VSX
1812         REGSET_VSX,
1813 #endif
1814 #ifdef CONFIG_SPE
1815         REGSET_SPE,
1816 #endif
1817 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1818         REGSET_TM_CGPR,         /* TM checkpointed GPR registers */
1819         REGSET_TM_CFPR,         /* TM checkpointed FPR registers */
1820         REGSET_TM_CVMX,         /* TM checkpointed VMX registers */
1821         REGSET_TM_CVSX,         /* TM checkpointed VSX registers */
1822         REGSET_TM_SPR,          /* TM specific SPR registers */
1823         REGSET_TM_CTAR,         /* TM checkpointed TAR register */
1824         REGSET_TM_CPPR,         /* TM checkpointed PPR register */
1825         REGSET_TM_CDSCR,        /* TM checkpointed DSCR register */
1826 #endif
1827 #ifdef CONFIG_PPC64
1828         REGSET_PPR,             /* PPR register */
1829         REGSET_DSCR,            /* DSCR register */
1830 #endif
1831 #ifdef CONFIG_PPC_BOOK3S_64
1832         REGSET_TAR,             /* TAR register */
1833         REGSET_EBB,             /* EBB registers */
1834         REGSET_PMR,             /* Performance Monitor Registers */
1835 #endif
1836 };
1837
1838 static const struct user_regset native_regsets[] = {
1839         [REGSET_GPR] = {
1840                 .core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
1841                 .size = sizeof(long), .align = sizeof(long),
1842                 .get = gpr_get, .set = gpr_set
1843         },
1844         [REGSET_FPR] = {
1845                 .core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
1846                 .size = sizeof(double), .align = sizeof(double),
1847                 .get = fpr_get, .set = fpr_set
1848         },
1849 #ifdef CONFIG_ALTIVEC
1850         [REGSET_VMX] = {
1851                 .core_note_type = NT_PPC_VMX, .n = 34,
1852                 .size = sizeof(vector128), .align = sizeof(vector128),
1853                 .active = vr_active, .get = vr_get, .set = vr_set
1854         },
1855 #endif
1856 #ifdef CONFIG_VSX
1857         [REGSET_VSX] = {
1858                 .core_note_type = NT_PPC_VSX, .n = 32,
1859                 .size = sizeof(double), .align = sizeof(double),
1860                 .active = vsr_active, .get = vsr_get, .set = vsr_set
1861         },
1862 #endif
1863 #ifdef CONFIG_SPE
1864         [REGSET_SPE] = {
1865                 .core_note_type = NT_PPC_SPE, .n = 35,
1866                 .size = sizeof(u32), .align = sizeof(u32),
1867                 .active = evr_active, .get = evr_get, .set = evr_set
1868         },
1869 #endif
1870 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1871         [REGSET_TM_CGPR] = {
1872                 .core_note_type = NT_PPC_TM_CGPR, .n = ELF_NGREG,
1873                 .size = sizeof(long), .align = sizeof(long),
1874                 .active = tm_cgpr_active, .get = tm_cgpr_get, .set = tm_cgpr_set
1875         },
1876         [REGSET_TM_CFPR] = {
1877                 .core_note_type = NT_PPC_TM_CFPR, .n = ELF_NFPREG,
1878                 .size = sizeof(double), .align = sizeof(double),
1879                 .active = tm_cfpr_active, .get = tm_cfpr_get, .set = tm_cfpr_set
1880         },
1881         [REGSET_TM_CVMX] = {
1882                 .core_note_type = NT_PPC_TM_CVMX, .n = ELF_NVMX,
1883                 .size = sizeof(vector128), .align = sizeof(vector128),
1884                 .active = tm_cvmx_active, .get = tm_cvmx_get, .set = tm_cvmx_set
1885         },
1886         [REGSET_TM_CVSX] = {
1887                 .core_note_type = NT_PPC_TM_CVSX, .n = ELF_NVSX,
1888                 .size = sizeof(double), .align = sizeof(double),
1889                 .active = tm_cvsx_active, .get = tm_cvsx_get, .set = tm_cvsx_set
1890         },
1891         [REGSET_TM_SPR] = {
1892                 .core_note_type = NT_PPC_TM_SPR, .n = ELF_NTMSPRREG,
1893                 .size = sizeof(u64), .align = sizeof(u64),
1894                 .active = tm_spr_active, .get = tm_spr_get, .set = tm_spr_set
1895         },
1896         [REGSET_TM_CTAR] = {
1897                 .core_note_type = NT_PPC_TM_CTAR, .n = 1,
1898                 .size = sizeof(u64), .align = sizeof(u64),
1899                 .active = tm_tar_active, .get = tm_tar_get, .set = tm_tar_set
1900         },
1901         [REGSET_TM_CPPR] = {
1902                 .core_note_type = NT_PPC_TM_CPPR, .n = 1,
1903                 .size = sizeof(u64), .align = sizeof(u64),
1904                 .active = tm_ppr_active, .get = tm_ppr_get, .set = tm_ppr_set
1905         },
1906         [REGSET_TM_CDSCR] = {
1907                 .core_note_type = NT_PPC_TM_CDSCR, .n = 1,
1908                 .size = sizeof(u64), .align = sizeof(u64),
1909                 .active = tm_dscr_active, .get = tm_dscr_get, .set = tm_dscr_set
1910         },
1911 #endif
1912 #ifdef CONFIG_PPC64
1913         [REGSET_PPR] = {
1914                 .core_note_type = NT_PPC_PPR, .n = 1,
1915                 .size = sizeof(u64), .align = sizeof(u64),
1916                 .get = ppr_get, .set = ppr_set
1917         },
1918         [REGSET_DSCR] = {
1919                 .core_note_type = NT_PPC_DSCR, .n = 1,
1920                 .size = sizeof(u64), .align = sizeof(u64),
1921                 .get = dscr_get, .set = dscr_set
1922         },
1923 #endif
1924 #ifdef CONFIG_PPC_BOOK3S_64
1925         [REGSET_TAR] = {
1926                 .core_note_type = NT_PPC_TAR, .n = 1,
1927                 .size = sizeof(u64), .align = sizeof(u64),
1928                 .get = tar_get, .set = tar_set
1929         },
1930         [REGSET_EBB] = {
1931                 .core_note_type = NT_PPC_EBB, .n = ELF_NEBB,
1932                 .size = sizeof(u64), .align = sizeof(u64),
1933                 .active = ebb_active, .get = ebb_get, .set = ebb_set
1934         },
1935         [REGSET_PMR] = {
1936                 .core_note_type = NT_PPC_PMU, .n = ELF_NPMU,
1937                 .size = sizeof(u64), .align = sizeof(u64),
1938                 .active = pmu_active, .get = pmu_get, .set = pmu_set
1939         },
1940 #endif
1941 };
1942
1943 static const struct user_regset_view user_ppc_native_view = {
1944         .name = UTS_MACHINE, .e_machine = ELF_ARCH, .ei_osabi = ELF_OSABI,
1945         .regsets = native_regsets, .n = ARRAY_SIZE(native_regsets)
1946 };
1947
1948 #ifdef CONFIG_PPC64
1949 #include <linux/compat.h>
1950
1951 static int gpr32_get_common(struct task_struct *target,
1952                      const struct user_regset *regset,
1953                      unsigned int pos, unsigned int count,
1954                             void *kbuf, void __user *ubuf,
1955                             unsigned long *regs)
1956 {
1957         compat_ulong_t *k = kbuf;
1958         compat_ulong_t __user *u = ubuf;
1959         compat_ulong_t reg;
1960
1961         pos /= sizeof(reg);
1962         count /= sizeof(reg);
1963
1964         if (kbuf)
1965                 for (; count > 0 && pos < PT_MSR; --count)
1966                         *k++ = regs[pos++];
1967         else
1968                 for (; count > 0 && pos < PT_MSR; --count)
1969                         if (__put_user((compat_ulong_t) regs[pos++], u++))
1970                                 return -EFAULT;
1971
1972         if (count > 0 && pos == PT_MSR) {
1973                 reg = get_user_msr(target);
1974                 if (kbuf)
1975                         *k++ = reg;
1976                 else if (__put_user(reg, u++))
1977                         return -EFAULT;
1978                 ++pos;
1979                 --count;
1980         }
1981
1982         if (kbuf)
1983                 for (; count > 0 && pos < PT_REGS_COUNT; --count)
1984                         *k++ = regs[pos++];
1985         else
1986                 for (; count > 0 && pos < PT_REGS_COUNT; --count)
1987                         if (__put_user((compat_ulong_t) regs[pos++], u++))
1988                                 return -EFAULT;
1989
1990         kbuf = k;
1991         ubuf = u;
1992         pos *= sizeof(reg);
1993         count *= sizeof(reg);
1994         return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
1995                                         PT_REGS_COUNT * sizeof(reg), -1);
1996 }
1997
1998 static int gpr32_set_common(struct task_struct *target,
1999                      const struct user_regset *regset,
2000                      unsigned int pos, unsigned int count,
2001                      const void *kbuf, const void __user *ubuf,
2002                      unsigned long *regs)
2003 {
2004         const compat_ulong_t *k = kbuf;
2005         const compat_ulong_t __user *u = ubuf;
2006         compat_ulong_t reg;
2007
2008         pos /= sizeof(reg);
2009         count /= sizeof(reg);
2010
2011         if (kbuf)
2012                 for (; count > 0 && pos < PT_MSR; --count)
2013                         regs[pos++] = *k++;
2014         else
2015                 for (; count > 0 && pos < PT_MSR; --count) {
2016                         if (__get_user(reg, u++))
2017                                 return -EFAULT;
2018                         regs[pos++] = reg;
2019                 }
2020
2021
2022         if (count > 0 && pos == PT_MSR) {
2023                 if (kbuf)
2024                         reg = *k++;
2025                 else if (__get_user(reg, u++))
2026                         return -EFAULT;
2027                 set_user_msr(target, reg);
2028                 ++pos;
2029                 --count;
2030         }
2031
2032         if (kbuf) {
2033                 for (; count > 0 && pos <= PT_MAX_PUT_REG; --count)
2034                         regs[pos++] = *k++;
2035                 for (; count > 0 && pos < PT_TRAP; --count, ++pos)
2036                         ++k;
2037         } else {
2038                 for (; count > 0 && pos <= PT_MAX_PUT_REG; --count) {
2039                         if (__get_user(reg, u++))
2040                                 return -EFAULT;
2041                         regs[pos++] = reg;
2042                 }
2043                 for (; count > 0 && pos < PT_TRAP; --count, ++pos)
2044                         if (__get_user(reg, u++))
2045                                 return -EFAULT;
2046         }
2047
2048         if (count > 0 && pos == PT_TRAP) {
2049                 if (kbuf)
2050                         reg = *k++;
2051                 else if (__get_user(reg, u++))
2052                         return -EFAULT;
2053                 set_user_trap(target, reg);
2054                 ++pos;
2055                 --count;
2056         }
2057
2058         kbuf = k;
2059         ubuf = u;
2060         pos *= sizeof(reg);
2061         count *= sizeof(reg);
2062         return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
2063                                          (PT_TRAP + 1) * sizeof(reg), -1);
2064 }
2065
2066 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2067 static int tm_cgpr32_get(struct task_struct *target,
2068                      const struct user_regset *regset,
2069                      unsigned int pos, unsigned int count,
2070                      void *kbuf, void __user *ubuf)
2071 {
2072         return gpr32_get_common(target, regset, pos, count, kbuf, ubuf,
2073                         &target->thread.ckpt_regs.gpr[0]);
2074 }
2075
2076 static int tm_cgpr32_set(struct task_struct *target,
2077                      const struct user_regset *regset,
2078                      unsigned int pos, unsigned int count,
2079                      const void *kbuf, const void __user *ubuf)
2080 {
2081         return gpr32_set_common(target, regset, pos, count, kbuf, ubuf,
2082                         &target->thread.ckpt_regs.gpr[0]);
2083 }
2084 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
2085
2086 static int gpr32_get(struct task_struct *target,
2087                      const struct user_regset *regset,
2088                      unsigned int pos, unsigned int count,
2089                      void *kbuf, void __user *ubuf)
2090 {
2091         int i;
2092
2093         if (target->thread.regs == NULL)
2094                 return -EIO;
2095
2096         if (!FULL_REGS(target->thread.regs)) {
2097                 /*
2098                  * We have a partial register set.
2099                  * Fill 14-31 with bogus values.
2100                  */
2101                 for (i = 14; i < 32; i++)
2102                         target->thread.regs->gpr[i] = NV_REG_POISON;
2103         }
2104         return gpr32_get_common(target, regset, pos, count, kbuf, ubuf,
2105                         &target->thread.regs->gpr[0]);
2106 }
2107
2108 static int gpr32_set(struct task_struct *target,
2109                      const struct user_regset *regset,
2110                      unsigned int pos, unsigned int count,
2111                      const void *kbuf, const void __user *ubuf)
2112 {
2113         if (target->thread.regs == NULL)
2114                 return -EIO;
2115
2116         CHECK_FULL_REGS(target->thread.regs);
2117         return gpr32_set_common(target, regset, pos, count, kbuf, ubuf,
2118                         &target->thread.regs->gpr[0]);
2119 }
2120
2121 /*
2122  * These are the regset flavors matching the CONFIG_PPC32 native set.
2123  */
2124 static const struct user_regset compat_regsets[] = {
2125         [REGSET_GPR] = {
2126                 .core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
2127                 .size = sizeof(compat_long_t), .align = sizeof(compat_long_t),
2128                 .get = gpr32_get, .set = gpr32_set
2129         },
2130         [REGSET_FPR] = {
2131                 .core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
2132                 .size = sizeof(double), .align = sizeof(double),
2133                 .get = fpr_get, .set = fpr_set
2134         },
2135 #ifdef CONFIG_ALTIVEC
2136         [REGSET_VMX] = {
2137                 .core_note_type = NT_PPC_VMX, .n = 34,
2138                 .size = sizeof(vector128), .align = sizeof(vector128),
2139                 .active = vr_active, .get = vr_get, .set = vr_set
2140         },
2141 #endif
2142 #ifdef CONFIG_SPE
2143         [REGSET_SPE] = {
2144                 .core_note_type = NT_PPC_SPE, .n = 35,
2145                 .size = sizeof(u32), .align = sizeof(u32),
2146                 .active = evr_active, .get = evr_get, .set = evr_set
2147         },
2148 #endif
2149 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2150         [REGSET_TM_CGPR] = {
2151                 .core_note_type = NT_PPC_TM_CGPR, .n = ELF_NGREG,
2152                 .size = sizeof(long), .align = sizeof(long),
2153                 .active = tm_cgpr_active,
2154                 .get = tm_cgpr32_get, .set = tm_cgpr32_set
2155         },
2156         [REGSET_TM_CFPR] = {
2157                 .core_note_type = NT_PPC_TM_CFPR, .n = ELF_NFPREG,
2158                 .size = sizeof(double), .align = sizeof(double),
2159                 .active = tm_cfpr_active, .get = tm_cfpr_get, .set = tm_cfpr_set
2160         },
2161         [REGSET_TM_CVMX] = {
2162                 .core_note_type = NT_PPC_TM_CVMX, .n = ELF_NVMX,
2163                 .size = sizeof(vector128), .align = sizeof(vector128),
2164                 .active = tm_cvmx_active, .get = tm_cvmx_get, .set = tm_cvmx_set
2165         },
2166         [REGSET_TM_CVSX] = {
2167                 .core_note_type = NT_PPC_TM_CVSX, .n = ELF_NVSX,
2168                 .size = sizeof(double), .align = sizeof(double),
2169                 .active = tm_cvsx_active, .get = tm_cvsx_get, .set = tm_cvsx_set
2170         },
2171         [REGSET_TM_SPR] = {
2172                 .core_note_type = NT_PPC_TM_SPR, .n = ELF_NTMSPRREG,
2173                 .size = sizeof(u64), .align = sizeof(u64),
2174                 .active = tm_spr_active, .get = tm_spr_get, .set = tm_spr_set
2175         },
2176         [REGSET_TM_CTAR] = {
2177                 .core_note_type = NT_PPC_TM_CTAR, .n = 1,
2178                 .size = sizeof(u64), .align = sizeof(u64),
2179                 .active = tm_tar_active, .get = tm_tar_get, .set = tm_tar_set
2180         },
2181         [REGSET_TM_CPPR] = {
2182                 .core_note_type = NT_PPC_TM_CPPR, .n = 1,
2183                 .size = sizeof(u64), .align = sizeof(u64),
2184                 .active = tm_ppr_active, .get = tm_ppr_get, .set = tm_ppr_set
2185         },
2186         [REGSET_TM_CDSCR] = {
2187                 .core_note_type = NT_PPC_TM_CDSCR, .n = 1,
2188                 .size = sizeof(u64), .align = sizeof(u64),
2189                 .active = tm_dscr_active, .get = tm_dscr_get, .set = tm_dscr_set
2190         },
2191 #endif
2192 #ifdef CONFIG_PPC64
2193         [REGSET_PPR] = {
2194                 .core_note_type = NT_PPC_PPR, .n = 1,
2195                 .size = sizeof(u64), .align = sizeof(u64),
2196                 .get = ppr_get, .set = ppr_set
2197         },
2198         [REGSET_DSCR] = {
2199                 .core_note_type = NT_PPC_DSCR, .n = 1,
2200                 .size = sizeof(u64), .align = sizeof(u64),
2201                 .get = dscr_get, .set = dscr_set
2202         },
2203 #endif
2204 #ifdef CONFIG_PPC_BOOK3S_64
2205         [REGSET_TAR] = {
2206                 .core_note_type = NT_PPC_TAR, .n = 1,
2207                 .size = sizeof(u64), .align = sizeof(u64),
2208                 .get = tar_get, .set = tar_set
2209         },
2210         [REGSET_EBB] = {
2211                 .core_note_type = NT_PPC_EBB, .n = ELF_NEBB,
2212                 .size = sizeof(u64), .align = sizeof(u64),
2213                 .active = ebb_active, .get = ebb_get, .set = ebb_set
2214         },
2215 #endif
2216 };
2217
2218 static const struct user_regset_view user_ppc_compat_view = {
2219         .name = "ppc", .e_machine = EM_PPC, .ei_osabi = ELF_OSABI,
2220         .regsets = compat_regsets, .n = ARRAY_SIZE(compat_regsets)
2221 };
2222 #endif  /* CONFIG_PPC64 */
2223
2224 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
2225 {
2226 #ifdef CONFIG_PPC64
2227         if (test_tsk_thread_flag(task, TIF_32BIT))
2228                 return &user_ppc_compat_view;
2229 #endif
2230         return &user_ppc_native_view;
2231 }
2232
2233
2234 void user_enable_single_step(struct task_struct *task)
2235 {
2236         struct pt_regs *regs = task->thread.regs;
2237
2238         if (regs != NULL) {
2239 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
2240                 task->thread.debug.dbcr0 &= ~DBCR0_BT;
2241                 task->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC;
2242                 regs->msr |= MSR_DE;
2243 #else
2244                 regs->msr &= ~MSR_BE;
2245                 regs->msr |= MSR_SE;
2246 #endif
2247         }
2248         set_tsk_thread_flag(task, TIF_SINGLESTEP);
2249 }
2250
2251 void user_enable_block_step(struct task_struct *task)
2252 {
2253         struct pt_regs *regs = task->thread.regs;
2254
2255         if (regs != NULL) {
2256 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
2257                 task->thread.debug.dbcr0 &= ~DBCR0_IC;
2258                 task->thread.debug.dbcr0 = DBCR0_IDM | DBCR0_BT;
2259                 regs->msr |= MSR_DE;
2260 #else
2261                 regs->msr &= ~MSR_SE;
2262                 regs->msr |= MSR_BE;
2263 #endif
2264         }
2265         set_tsk_thread_flag(task, TIF_SINGLESTEP);
2266 }
2267
2268 void user_disable_single_step(struct task_struct *task)
2269 {
2270         struct pt_regs *regs = task->thread.regs;
2271
2272         if (regs != NULL) {
2273 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
2274                 /*
2275                  * The logic to disable single stepping should be as
2276                  * simple as turning off the Instruction Complete flag.
2277                  * And, after doing so, if all debug flags are off, turn
2278                  * off DBCR0(IDM) and MSR(DE) .... Torez
2279                  */
2280                 task->thread.debug.dbcr0 &= ~(DBCR0_IC|DBCR0_BT);
2281                 /*
2282                  * Test to see if any of the DBCR_ACTIVE_EVENTS bits are set.
2283                  */
2284                 if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0,
2285                                         task->thread.debug.dbcr1)) {
2286                         /*
2287                          * All debug events were off.....
2288                          */
2289                         task->thread.debug.dbcr0 &= ~DBCR0_IDM;
2290                         regs->msr &= ~MSR_DE;
2291                 }
2292 #else
2293                 regs->msr &= ~(MSR_SE | MSR_BE);
2294 #endif
2295         }
2296         clear_tsk_thread_flag(task, TIF_SINGLESTEP);
2297 }
2298
2299 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2300 void ptrace_triggered(struct perf_event *bp,
2301                       struct perf_sample_data *data, struct pt_regs *regs)
2302 {
2303         struct perf_event_attr attr;
2304
2305         /*
2306          * Disable the breakpoint request here since ptrace has defined a
2307          * one-shot behaviour for breakpoint exceptions in PPC64.
2308          * The SIGTRAP signal is generated automatically for us in do_dabr().
2309          * We don't have to do anything about that here
2310          */
2311         attr = bp->attr;
2312         attr.disabled = true;
2313         modify_user_hw_breakpoint(bp, &attr);
2314 }
2315 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
2316
2317 static int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
2318                                unsigned long data)
2319 {
2320 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2321         int ret;
2322         struct thread_struct *thread = &(task->thread);
2323         struct perf_event *bp;
2324         struct perf_event_attr attr;
2325 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
2326 #ifndef CONFIG_PPC_ADV_DEBUG_REGS
2327         struct arch_hw_breakpoint hw_brk;
2328 #endif
2329
2330         /* For ppc64 we support one DABR and no IABR's at the moment (ppc64).
2331          *  For embedded processors we support one DAC and no IAC's at the
2332          *  moment.
2333          */
2334         if (addr > 0)
2335                 return -EINVAL;
2336
2337         /* The bottom 3 bits in dabr are flags */
2338         if ((data & ~0x7UL) >= TASK_SIZE)
2339                 return -EIO;
2340
2341 #ifndef CONFIG_PPC_ADV_DEBUG_REGS
2342         /* For processors using DABR (i.e. 970), the bottom 3 bits are flags.
2343          *  It was assumed, on previous implementations, that 3 bits were
2344          *  passed together with the data address, fitting the design of the
2345          *  DABR register, as follows:
2346          *
2347          *  bit 0: Read flag
2348          *  bit 1: Write flag
2349          *  bit 2: Breakpoint translation
2350          *
2351          *  Thus, we use them here as so.
2352          */
2353
2354         /* Ensure breakpoint translation bit is set */
2355         if (data && !(data & HW_BRK_TYPE_TRANSLATE))
2356                 return -EIO;
2357         hw_brk.address = data & (~HW_BRK_TYPE_DABR);
2358         hw_brk.type = (data & HW_BRK_TYPE_DABR) | HW_BRK_TYPE_PRIV_ALL;
2359         hw_brk.len = 8;
2360 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2361         bp = thread->ptrace_bps[0];
2362         if ((!data) || !(hw_brk.type & HW_BRK_TYPE_RDWR)) {
2363                 if (bp) {
2364                         unregister_hw_breakpoint(bp);
2365                         thread->ptrace_bps[0] = NULL;
2366                 }
2367                 return 0;
2368         }
2369         if (bp) {
2370                 attr = bp->attr;
2371                 attr.bp_addr = hw_brk.address;
2372                 arch_bp_generic_fields(hw_brk.type, &attr.bp_type);
2373
2374                 /* Enable breakpoint */
2375                 attr.disabled = false;
2376
2377                 ret =  modify_user_hw_breakpoint(bp, &attr);
2378                 if (ret) {
2379                         return ret;
2380                 }
2381                 thread->ptrace_bps[0] = bp;
2382                 thread->hw_brk = hw_brk;
2383                 return 0;
2384         }
2385
2386         /* Create a new breakpoint request if one doesn't exist already */
2387         hw_breakpoint_init(&attr);
2388         attr.bp_addr = hw_brk.address;
2389         attr.bp_len = 8;
2390         arch_bp_generic_fields(hw_brk.type,
2391                                &attr.bp_type);
2392
2393         thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr,
2394                                                ptrace_triggered, NULL, task);
2395         if (IS_ERR(bp)) {
2396                 thread->ptrace_bps[0] = NULL;
2397                 return PTR_ERR(bp);
2398         }
2399
2400 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
2401         task->thread.hw_brk = hw_brk;
2402 #else /* CONFIG_PPC_ADV_DEBUG_REGS */
2403         /* As described above, it was assumed 3 bits were passed with the data
2404          *  address, but we will assume only the mode bits will be passed
2405          *  as to not cause alignment restrictions for DAC-based processors.
2406          */
2407
2408         /* DAC's hold the whole address without any mode flags */
2409         task->thread.debug.dac1 = data & ~0x3UL;
2410
2411         if (task->thread.debug.dac1 == 0) {
2412                 dbcr_dac(task) &= ~(DBCR_DAC1R | DBCR_DAC1W);
2413                 if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0,
2414                                         task->thread.debug.dbcr1)) {
2415                         task->thread.regs->msr &= ~MSR_DE;
2416                         task->thread.debug.dbcr0 &= ~DBCR0_IDM;
2417                 }
2418                 return 0;
2419         }
2420
2421         /* Read or Write bits must be set */
2422
2423         if (!(data & 0x3UL))
2424                 return -EINVAL;
2425
2426         /* Set the Internal Debugging flag (IDM bit 1) for the DBCR0
2427            register */
2428         task->thread.debug.dbcr0 |= DBCR0_IDM;
2429
2430         /* Check for write and read flags and set DBCR0
2431            accordingly */
2432         dbcr_dac(task) &= ~(DBCR_DAC1R|DBCR_DAC1W);
2433         if (data & 0x1UL)
2434                 dbcr_dac(task) |= DBCR_DAC1R;
2435         if (data & 0x2UL)
2436                 dbcr_dac(task) |= DBCR_DAC1W;
2437         task->thread.regs->msr |= MSR_DE;
2438 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
2439         return 0;
2440 }
2441
2442 /*
2443  * Called by kernel/ptrace.c when detaching..
2444  *
2445  * Make sure single step bits etc are not set.
2446  */
2447 void ptrace_disable(struct task_struct *child)
2448 {
2449         /* make sure the single step bit is not set. */
2450         user_disable_single_step(child);
2451 }
2452
2453 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
2454 static long set_instruction_bp(struct task_struct *child,
2455                               struct ppc_hw_breakpoint *bp_info)
2456 {
2457         int slot;
2458         int slot1_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC1) != 0);
2459         int slot2_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC2) != 0);
2460         int slot3_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC3) != 0);
2461         int slot4_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC4) != 0);
2462
2463         if (dbcr_iac_range(child) & DBCR_IAC12MODE)
2464                 slot2_in_use = 1;
2465         if (dbcr_iac_range(child) & DBCR_IAC34MODE)
2466                 slot4_in_use = 1;
2467
2468         if (bp_info->addr >= TASK_SIZE)
2469                 return -EIO;
2470
2471         if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT) {
2472
2473                 /* Make sure range is valid. */
2474                 if (bp_info->addr2 >= TASK_SIZE)
2475                         return -EIO;
2476
2477                 /* We need a pair of IAC regsisters */
2478                 if ((!slot1_in_use) && (!slot2_in_use)) {
2479                         slot = 1;
2480                         child->thread.debug.iac1 = bp_info->addr;
2481                         child->thread.debug.iac2 = bp_info->addr2;
2482                         child->thread.debug.dbcr0 |= DBCR0_IAC1;
2483                         if (bp_info->addr_mode ==
2484                                         PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
2485                                 dbcr_iac_range(child) |= DBCR_IAC12X;
2486                         else
2487                                 dbcr_iac_range(child) |= DBCR_IAC12I;
2488 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
2489                 } else if ((!slot3_in_use) && (!slot4_in_use)) {
2490                         slot = 3;
2491                         child->thread.debug.iac3 = bp_info->addr;
2492                         child->thread.debug.iac4 = bp_info->addr2;
2493                         child->thread.debug.dbcr0 |= DBCR0_IAC3;
2494                         if (bp_info->addr_mode ==
2495                                         PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
2496                                 dbcr_iac_range(child) |= DBCR_IAC34X;
2497                         else
2498                                 dbcr_iac_range(child) |= DBCR_IAC34I;
2499 #endif
2500                 } else
2501                         return -ENOSPC;
2502         } else {
2503                 /* We only need one.  If possible leave a pair free in
2504                  * case a range is needed later
2505                  */
2506                 if (!slot1_in_use) {
2507                         /*
2508                          * Don't use iac1 if iac1-iac2 are free and either
2509                          * iac3 or iac4 (but not both) are free
2510                          */
2511                         if (slot2_in_use || (slot3_in_use == slot4_in_use)) {
2512                                 slot = 1;
2513                                 child->thread.debug.iac1 = bp_info->addr;
2514                                 child->thread.debug.dbcr0 |= DBCR0_IAC1;
2515                                 goto out;
2516                         }
2517                 }
2518                 if (!slot2_in_use) {
2519                         slot = 2;
2520                         child->thread.debug.iac2 = bp_info->addr;
2521                         child->thread.debug.dbcr0 |= DBCR0_IAC2;
2522 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
2523                 } else if (!slot3_in_use) {
2524                         slot = 3;
2525                         child->thread.debug.iac3 = bp_info->addr;
2526                         child->thread.debug.dbcr0 |= DBCR0_IAC3;
2527                 } else if (!slot4_in_use) {
2528                         slot = 4;
2529                         child->thread.debug.iac4 = bp_info->addr;
2530                         child->thread.debug.dbcr0 |= DBCR0_IAC4;
2531 #endif
2532                 } else
2533                         return -ENOSPC;
2534         }
2535 out:
2536         child->thread.debug.dbcr0 |= DBCR0_IDM;
2537         child->thread.regs->msr |= MSR_DE;
2538
2539         return slot;
2540 }
2541
2542 static int del_instruction_bp(struct task_struct *child, int slot)
2543 {
2544         switch (slot) {
2545         case 1:
2546                 if ((child->thread.debug.dbcr0 & DBCR0_IAC1) == 0)
2547                         return -ENOENT;
2548
2549                 if (dbcr_iac_range(child) & DBCR_IAC12MODE) {
2550                         /* address range - clear slots 1 & 2 */
2551                         child->thread.debug.iac2 = 0;
2552                         dbcr_iac_range(child) &= ~DBCR_IAC12MODE;
2553                 }
2554                 child->thread.debug.iac1 = 0;
2555                 child->thread.debug.dbcr0 &= ~DBCR0_IAC1;
2556                 break;
2557         case 2:
2558                 if ((child->thread.debug.dbcr0 & DBCR0_IAC2) == 0)
2559                         return -ENOENT;
2560
2561                 if (dbcr_iac_range(child) & DBCR_IAC12MODE)
2562                         /* used in a range */
2563                         return -EINVAL;
2564                 child->thread.debug.iac2 = 0;
2565                 child->thread.debug.dbcr0 &= ~DBCR0_IAC2;
2566                 break;
2567 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
2568         case 3:
2569                 if ((child->thread.debug.dbcr0 & DBCR0_IAC3) == 0)
2570                         return -ENOENT;
2571
2572                 if (dbcr_iac_range(child) & DBCR_IAC34MODE) {
2573                         /* address range - clear slots 3 & 4 */
2574                         child->thread.debug.iac4 = 0;
2575                         dbcr_iac_range(child) &= ~DBCR_IAC34MODE;
2576                 }
2577                 child->thread.debug.iac3 = 0;
2578                 child->thread.debug.dbcr0 &= ~DBCR0_IAC3;
2579                 break;
2580         case 4:
2581                 if ((child->thread.debug.dbcr0 & DBCR0_IAC4) == 0)
2582                         return -ENOENT;
2583
2584                 if (dbcr_iac_range(child) & DBCR_IAC34MODE)
2585                         /* Used in a range */
2586                         return -EINVAL;
2587                 child->thread.debug.iac4 = 0;
2588                 child->thread.debug.dbcr0 &= ~DBCR0_IAC4;
2589                 break;
2590 #endif
2591         default:
2592                 return -EINVAL;
2593         }
2594         return 0;
2595 }
2596
2597 static int set_dac(struct task_struct *child, struct ppc_hw_breakpoint *bp_info)
2598 {
2599         int byte_enable =
2600                 (bp_info->condition_mode >> PPC_BREAKPOINT_CONDITION_BE_SHIFT)
2601                 & 0xf;
2602         int condition_mode =
2603                 bp_info->condition_mode & PPC_BREAKPOINT_CONDITION_MODE;
2604         int slot;
2605
2606         if (byte_enable && (condition_mode == 0))
2607                 return -EINVAL;
2608
2609         if (bp_info->addr >= TASK_SIZE)
2610                 return -EIO;
2611
2612         if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0) {
2613                 slot = 1;
2614                 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
2615                         dbcr_dac(child) |= DBCR_DAC1R;
2616                 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
2617                         dbcr_dac(child) |= DBCR_DAC1W;
2618                 child->thread.debug.dac1 = (unsigned long)bp_info->addr;
2619 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
2620                 if (byte_enable) {
2621                         child->thread.debug.dvc1 =
2622                                 (unsigned long)bp_info->condition_value;
2623                         child->thread.debug.dbcr2 |=
2624                                 ((byte_enable << DBCR2_DVC1BE_SHIFT) |
2625                                  (condition_mode << DBCR2_DVC1M_SHIFT));
2626                 }
2627 #endif
2628 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2629         } else if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE) {
2630                 /* Both dac1 and dac2 are part of a range */
2631                 return -ENOSPC;
2632 #endif
2633         } else if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0) {
2634                 slot = 2;
2635                 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
2636                         dbcr_dac(child) |= DBCR_DAC2R;
2637                 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
2638                         dbcr_dac(child) |= DBCR_DAC2W;
2639                 child->thread.debug.dac2 = (unsigned long)bp_info->addr;
2640 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
2641                 if (byte_enable) {
2642                         child->thread.debug.dvc2 =
2643                                 (unsigned long)bp_info->condition_value;
2644                         child->thread.debug.dbcr2 |=
2645                                 ((byte_enable << DBCR2_DVC2BE_SHIFT) |
2646                                  (condition_mode << DBCR2_DVC2M_SHIFT));
2647                 }
2648 #endif
2649         } else
2650                 return -ENOSPC;
2651         child->thread.debug.dbcr0 |= DBCR0_IDM;
2652         child->thread.regs->msr |= MSR_DE;
2653
2654         return slot + 4;
2655 }
2656
2657 static int del_dac(struct task_struct *child, int slot)
2658 {
2659         if (slot == 1) {
2660                 if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0)
2661                         return -ENOENT;
2662
2663                 child->thread.debug.dac1 = 0;
2664                 dbcr_dac(child) &= ~(DBCR_DAC1R | DBCR_DAC1W);
2665 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2666                 if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE) {
2667                         child->thread.debug.dac2 = 0;
2668                         child->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE;
2669                 }
2670                 child->thread.debug.dbcr2 &= ~(DBCR2_DVC1M | DBCR2_DVC1BE);
2671 #endif
2672 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
2673                 child->thread.debug.dvc1 = 0;
2674 #endif
2675         } else if (slot == 2) {
2676                 if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0)
2677                         return -ENOENT;
2678
2679 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2680                 if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE)
2681                         /* Part of a range */
2682                         return -EINVAL;
2683                 child->thread.debug.dbcr2 &= ~(DBCR2_DVC2M | DBCR2_DVC2BE);
2684 #endif
2685 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
2686                 child->thread.debug.dvc2 = 0;
2687 #endif
2688                 child->thread.debug.dac2 = 0;
2689                 dbcr_dac(child) &= ~(DBCR_DAC2R | DBCR_DAC2W);
2690         } else
2691                 return -EINVAL;
2692
2693         return 0;
2694 }
2695 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
2696
2697 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2698 static int set_dac_range(struct task_struct *child,
2699                          struct ppc_hw_breakpoint *bp_info)
2700 {
2701         int mode = bp_info->addr_mode & PPC_BREAKPOINT_MODE_MASK;
2702
2703         /* We don't allow range watchpoints to be used with DVC */
2704         if (bp_info->condition_mode)
2705                 return -EINVAL;
2706
2707         /*
2708          * Best effort to verify the address range.  The user/supervisor bits
2709          * prevent trapping in kernel space, but let's fail on an obvious bad
2710          * range.  The simple test on the mask is not fool-proof, and any
2711          * exclusive range will spill over into kernel space.
2712          */
2713         if (bp_info->addr >= TASK_SIZE)
2714                 return -EIO;
2715         if (mode == PPC_BREAKPOINT_MODE_MASK) {
2716                 /*
2717                  * dac2 is a bitmask.  Don't allow a mask that makes a
2718                  * kernel space address from a valid dac1 value
2719                  */
2720                 if (~((unsigned long)bp_info->addr2) >= TASK_SIZE)
2721                         return -EIO;
2722         } else {
2723                 /*
2724                  * For range breakpoints, addr2 must also be a valid address
2725                  */
2726                 if (bp_info->addr2 >= TASK_SIZE)
2727                         return -EIO;
2728         }
2729
2730         if (child->thread.debug.dbcr0 &
2731             (DBCR0_DAC1R | DBCR0_DAC1W | DBCR0_DAC2R | DBCR0_DAC2W))
2732                 return -ENOSPC;
2733
2734         if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
2735                 child->thread.debug.dbcr0 |= (DBCR0_DAC1R | DBCR0_IDM);
2736         if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
2737                 child->thread.debug.dbcr0 |= (DBCR0_DAC1W | DBCR0_IDM);
2738         child->thread.debug.dac1 = bp_info->addr;
2739         child->thread.debug.dac2 = bp_info->addr2;
2740         if (mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE)
2741                 child->thread.debug.dbcr2  |= DBCR2_DAC12M;
2742         else if (mode == PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
2743                 child->thread.debug.dbcr2  |= DBCR2_DAC12MX;
2744         else    /* PPC_BREAKPOINT_MODE_MASK */
2745                 child->thread.debug.dbcr2  |= DBCR2_DAC12MM;
2746         child->thread.regs->msr |= MSR_DE;
2747
2748         return 5;
2749 }
2750 #endif /* CONFIG_PPC_ADV_DEBUG_DAC_RANGE */
2751
2752 static long ppc_set_hwdebug(struct task_struct *child,
2753                      struct ppc_hw_breakpoint *bp_info)
2754 {
2755 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2756         int len = 0;
2757         struct thread_struct *thread = &(child->thread);
2758         struct perf_event *bp;
2759         struct perf_event_attr attr;
2760 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
2761 #ifndef CONFIG_PPC_ADV_DEBUG_REGS
2762         struct arch_hw_breakpoint brk;
2763 #endif
2764
2765         if (bp_info->version != 1)
2766                 return -ENOTSUPP;
2767 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
2768         /*
2769          * Check for invalid flags and combinations
2770          */
2771         if ((bp_info->trigger_type == 0) ||
2772             (bp_info->trigger_type & ~(PPC_BREAKPOINT_TRIGGER_EXECUTE |
2773                                        PPC_BREAKPOINT_TRIGGER_RW)) ||
2774             (bp_info->addr_mode & ~PPC_BREAKPOINT_MODE_MASK) ||
2775             (bp_info->condition_mode &
2776              ~(PPC_BREAKPOINT_CONDITION_MODE |
2777                PPC_BREAKPOINT_CONDITION_BE_ALL)))
2778                 return -EINVAL;
2779 #if CONFIG_PPC_ADV_DEBUG_DVCS == 0
2780         if (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)
2781                 return -EINVAL;
2782 #endif
2783
2784         if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_EXECUTE) {
2785                 if ((bp_info->trigger_type != PPC_BREAKPOINT_TRIGGER_EXECUTE) ||
2786                     (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE))
2787                         return -EINVAL;
2788                 return set_instruction_bp(child, bp_info);
2789         }
2790         if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT)
2791                 return set_dac(child, bp_info);
2792
2793 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2794         return set_dac_range(child, bp_info);
2795 #else
2796         return -EINVAL;
2797 #endif
2798 #else /* !CONFIG_PPC_ADV_DEBUG_DVCS */
2799         /*
2800          * We only support one data breakpoint
2801          */
2802         if ((bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_RW) == 0 ||
2803             (bp_info->trigger_type & ~PPC_BREAKPOINT_TRIGGER_RW) != 0 ||
2804             bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)
2805                 return -EINVAL;
2806
2807         if ((unsigned long)bp_info->addr >= TASK_SIZE)
2808                 return -EIO;
2809
2810         brk.address = bp_info->addr & ~7UL;
2811         brk.type = HW_BRK_TYPE_TRANSLATE;
2812         brk.len = 8;
2813         if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
2814                 brk.type |= HW_BRK_TYPE_READ;
2815         if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
2816                 brk.type |= HW_BRK_TYPE_WRITE;
2817 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2818         /*
2819          * Check if the request is for 'range' breakpoints. We can
2820          * support it if range < 8 bytes.
2821          */
2822         if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE)
2823                 len = bp_info->addr2 - bp_info->addr;
2824         else if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT)
2825                 len = 1;
2826         else
2827                 return -EINVAL;
2828         bp = thread->ptrace_bps[0];
2829         if (bp)
2830                 return -ENOSPC;
2831
2832         /* Create a new breakpoint request if one doesn't exist already */
2833         hw_breakpoint_init(&attr);
2834         attr.bp_addr = (unsigned long)bp_info->addr & ~HW_BREAKPOINT_ALIGN;
2835         attr.bp_len = len;
2836         arch_bp_generic_fields(brk.type, &attr.bp_type);
2837
2838         thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr,
2839                                                ptrace_triggered, NULL, child);
2840         if (IS_ERR(bp)) {
2841                 thread->ptrace_bps[0] = NULL;
2842                 return PTR_ERR(bp);
2843         }
2844
2845         return 1;
2846 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
2847
2848         if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT)
2849                 return -EINVAL;
2850
2851         if (child->thread.hw_brk.address)
2852                 return -ENOSPC;
2853
2854         child->thread.hw_brk = brk;
2855
2856         return 1;
2857 #endif /* !CONFIG_PPC_ADV_DEBUG_DVCS */
2858 }
2859
2860 static long ppc_del_hwdebug(struct task_struct *child, long data)
2861 {
2862 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2863         int ret = 0;
2864         struct thread_struct *thread = &(child->thread);
2865         struct perf_event *bp;
2866 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
2867 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
2868         int rc;
2869
2870         if (data <= 4)
2871                 rc = del_instruction_bp(child, (int)data);
2872         else
2873                 rc = del_dac(child, (int)data - 4);
2874
2875         if (!rc) {
2876                 if (!DBCR_ACTIVE_EVENTS(child->thread.debug.dbcr0,
2877                                         child->thread.debug.dbcr1)) {
2878                         child->thread.debug.dbcr0 &= ~DBCR0_IDM;
2879                         child->thread.regs->msr &= ~MSR_DE;
2880                 }
2881         }
2882         return rc;
2883 #else
2884         if (data != 1)
2885                 return -EINVAL;
2886
2887 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2888         bp = thread->ptrace_bps[0];
2889         if (bp) {
2890                 unregister_hw_breakpoint(bp);
2891                 thread->ptrace_bps[0] = NULL;
2892         } else
2893                 ret = -ENOENT;
2894         return ret;
2895 #else /* CONFIG_HAVE_HW_BREAKPOINT */
2896         if (child->thread.hw_brk.address == 0)
2897                 return -ENOENT;
2898
2899         child->thread.hw_brk.address = 0;
2900         child->thread.hw_brk.type = 0;
2901 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
2902
2903         return 0;
2904 #endif
2905 }
2906
2907 long arch_ptrace(struct task_struct *child, long request,
2908                  unsigned long addr, unsigned long data)
2909 {
2910         int ret = -EPERM;
2911         void __user *datavp = (void __user *) data;
2912         unsigned long __user *datalp = datavp;
2913
2914         switch (request) {
2915         /* read the word at location addr in the USER area. */
2916         case PTRACE_PEEKUSR: {
2917                 unsigned long index, tmp;
2918
2919                 ret = -EIO;
2920                 /* convert to index and check */
2921 #ifdef CONFIG_PPC32
2922                 index = addr >> 2;
2923                 if ((addr & 3) || (index > PT_FPSCR)
2924                     || (child->thread.regs == NULL))
2925 #else
2926                 index = addr >> 3;
2927                 if ((addr & 7) || (index > PT_FPSCR))
2928 #endif
2929                         break;
2930
2931                 CHECK_FULL_REGS(child->thread.regs);
2932                 if (index < PT_FPR0) {
2933                         ret = ptrace_get_reg(child, (int) index, &tmp);
2934                         if (ret)
2935                                 break;
2936                 } else {
2937                         unsigned int fpidx = index - PT_FPR0;
2938
2939                         flush_fp_to_thread(child);
2940                         if (fpidx < (PT_FPSCR - PT_FPR0))
2941                                 if (IS_ENABLED(CONFIG_PPC32)) {
2942                                         // On 32-bit the index we are passed refers to 32-bit words
2943                                         tmp = ((u32 *)child->thread.fp_state.fpr)[fpidx];
2944                                 } else {
2945                                         memcpy(&tmp, &child->thread.TS_FPR(fpidx),
2946                                                sizeof(long));
2947                                 }
2948                         else
2949                                 tmp = child->thread.fp_state.fpscr;
2950                 }
2951                 ret = put_user(tmp, datalp);
2952                 break;
2953         }
2954
2955         /* write the word at location addr in the USER area */
2956         case PTRACE_POKEUSR: {
2957                 unsigned long index;
2958
2959                 ret = -EIO;
2960                 /* convert to index and check */
2961 #ifdef CONFIG_PPC32
2962                 index = addr >> 2;
2963                 if ((addr & 3) || (index > PT_FPSCR)
2964                     || (child->thread.regs == NULL))
2965 #else
2966                 index = addr >> 3;
2967                 if ((addr & 7) || (index > PT_FPSCR))
2968 #endif
2969                         break;
2970
2971                 CHECK_FULL_REGS(child->thread.regs);
2972                 if (index < PT_FPR0) {
2973                         ret = ptrace_put_reg(child, index, data);
2974                 } else {
2975                         unsigned int fpidx = index - PT_FPR0;
2976
2977                         flush_fp_to_thread(child);
2978                         if (fpidx < (PT_FPSCR - PT_FPR0))
2979                                 if (IS_ENABLED(CONFIG_PPC32)) {
2980                                         // On 32-bit the index we are passed refers to 32-bit words
2981                                         ((u32 *)child->thread.fp_state.fpr)[fpidx] = data;
2982                                 } else {
2983                                         memcpy(&child->thread.TS_FPR(fpidx), &data,
2984                                                sizeof(long));
2985                                 }
2986                         else
2987                                 child->thread.fp_state.fpscr = data;
2988                         ret = 0;
2989                 }
2990                 break;
2991         }
2992
2993         case PPC_PTRACE_GETHWDBGINFO: {
2994                 struct ppc_debug_info dbginfo;
2995
2996                 dbginfo.version = 1;
2997 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
2998                 dbginfo.num_instruction_bps = CONFIG_PPC_ADV_DEBUG_IACS;
2999                 dbginfo.num_data_bps = CONFIG_PPC_ADV_DEBUG_DACS;
3000                 dbginfo.num_condition_regs = CONFIG_PPC_ADV_DEBUG_DVCS;
3001                 dbginfo.data_bp_alignment = 4;
3002                 dbginfo.sizeof_condition = 4;
3003                 dbginfo.features = PPC_DEBUG_FEATURE_INSN_BP_RANGE |
3004                                    PPC_DEBUG_FEATURE_INSN_BP_MASK;
3005 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
3006                 dbginfo.features |=
3007                                    PPC_DEBUG_FEATURE_DATA_BP_RANGE |
3008                                    PPC_DEBUG_FEATURE_DATA_BP_MASK;
3009 #endif
3010 #else /* !CONFIG_PPC_ADV_DEBUG_REGS */
3011                 dbginfo.num_instruction_bps = 0;
3012                 dbginfo.num_data_bps = 1;
3013                 dbginfo.num_condition_regs = 0;
3014 #ifdef CONFIG_PPC64
3015                 dbginfo.data_bp_alignment = 8;
3016 #else
3017                 dbginfo.data_bp_alignment = 4;
3018 #endif
3019                 dbginfo.sizeof_condition = 0;
3020 #ifdef CONFIG_HAVE_HW_BREAKPOINT
3021                 dbginfo.features = PPC_DEBUG_FEATURE_DATA_BP_RANGE;
3022                 if (cpu_has_feature(CPU_FTR_DAWR))
3023                         dbginfo.features |= PPC_DEBUG_FEATURE_DATA_BP_DAWR;
3024 #else
3025                 dbginfo.features = 0;
3026 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
3027 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
3028
3029                 if (!access_ok(VERIFY_WRITE, datavp,
3030                                sizeof(struct ppc_debug_info)))
3031                         return -EFAULT;
3032                 ret = __copy_to_user(datavp, &dbginfo,
3033                                      sizeof(struct ppc_debug_info)) ?
3034                       -EFAULT : 0;
3035                 break;
3036         }
3037
3038         case PPC_PTRACE_SETHWDEBUG: {
3039                 struct ppc_hw_breakpoint bp_info;
3040
3041                 if (!access_ok(VERIFY_READ, datavp,
3042                                sizeof(struct ppc_hw_breakpoint)))
3043                         return -EFAULT;
3044                 ret = __copy_from_user(&bp_info, datavp,
3045                                        sizeof(struct ppc_hw_breakpoint)) ?
3046                       -EFAULT : 0;
3047                 if (!ret)
3048                         ret = ppc_set_hwdebug(child, &bp_info);
3049                 break;
3050         }
3051
3052         case PPC_PTRACE_DELHWDEBUG: {
3053                 ret = ppc_del_hwdebug(child, data);
3054                 break;
3055         }
3056
3057         case PTRACE_GET_DEBUGREG: {
3058 #ifndef CONFIG_PPC_ADV_DEBUG_REGS
3059                 unsigned long dabr_fake;
3060 #endif
3061                 ret = -EINVAL;
3062                 /* We only support one DABR and no IABRS at the moment */
3063                 if (addr > 0)
3064                         break;
3065 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
3066                 ret = put_user(child->thread.debug.dac1, datalp);
3067 #else
3068                 dabr_fake = ((child->thread.hw_brk.address & (~HW_BRK_TYPE_DABR)) |
3069                              (child->thread.hw_brk.type & HW_BRK_TYPE_DABR));
3070                 ret = put_user(dabr_fake, datalp);
3071 #endif
3072                 break;
3073         }
3074
3075         case PTRACE_SET_DEBUGREG:
3076                 ret = ptrace_set_debugreg(child, addr, data);
3077                 break;
3078
3079 #ifdef CONFIG_PPC64
3080         case PTRACE_GETREGS64:
3081 #endif
3082         case PTRACE_GETREGS:    /* Get all pt_regs from the child. */
3083                 return copy_regset_to_user(child, &user_ppc_native_view,
3084                                            REGSET_GPR,
3085                                            0, sizeof(struct pt_regs),
3086                                            datavp);
3087
3088 #ifdef CONFIG_PPC64
3089         case PTRACE_SETREGS64:
3090 #endif
3091         case PTRACE_SETREGS:    /* Set all gp regs in the child. */
3092                 return copy_regset_from_user(child, &user_ppc_native_view,
3093                                              REGSET_GPR,
3094                                              0, sizeof(struct pt_regs),
3095                                              datavp);
3096
3097         case PTRACE_GETFPREGS: /* Get the child FPU state (FPR0...31 + FPSCR) */
3098                 return copy_regset_to_user(child, &user_ppc_native_view,
3099                                            REGSET_FPR,
3100                                            0, sizeof(elf_fpregset_t),
3101                                            datavp);
3102
3103         case PTRACE_SETFPREGS: /* Set the child FPU state (FPR0...31 + FPSCR) */
3104                 return copy_regset_from_user(child, &user_ppc_native_view,
3105                                              REGSET_FPR,
3106                                              0, sizeof(elf_fpregset_t),
3107                                              datavp);
3108
3109 #ifdef CONFIG_ALTIVEC
3110         case PTRACE_GETVRREGS:
3111                 return copy_regset_to_user(child, &user_ppc_native_view,
3112                                            REGSET_VMX,
3113                                            0, (33 * sizeof(vector128) +
3114                                                sizeof(u32)),
3115                                            datavp);
3116
3117         case PTRACE_SETVRREGS:
3118                 return copy_regset_from_user(child, &user_ppc_native_view,
3119                                              REGSET_VMX,
3120                                              0, (33 * sizeof(vector128) +
3121                                                  sizeof(u32)),
3122                                              datavp);
3123 #endif
3124 #ifdef CONFIG_VSX
3125         case PTRACE_GETVSRREGS:
3126                 return copy_regset_to_user(child, &user_ppc_native_view,
3127                                            REGSET_VSX,
3128                                            0, 32 * sizeof(double),
3129                                            datavp);
3130
3131         case PTRACE_SETVSRREGS:
3132                 return copy_regset_from_user(child, &user_ppc_native_view,
3133                                              REGSET_VSX,
3134                                              0, 32 * sizeof(double),
3135                                              datavp);
3136 #endif
3137 #ifdef CONFIG_SPE
3138         case PTRACE_GETEVRREGS:
3139                 /* Get the child spe register state. */
3140                 return copy_regset_to_user(child, &user_ppc_native_view,
3141                                            REGSET_SPE, 0, 35 * sizeof(u32),
3142                                            datavp);
3143
3144         case PTRACE_SETEVRREGS:
3145                 /* Set the child spe register state. */
3146                 return copy_regset_from_user(child, &user_ppc_native_view,
3147                                              REGSET_SPE, 0, 35 * sizeof(u32),
3148                                              datavp);
3149 #endif
3150
3151         default:
3152                 ret = ptrace_request(child, request, addr, data);
3153                 break;
3154         }
3155         return ret;
3156 }
3157
3158 #ifdef CONFIG_SECCOMP
3159 static int do_seccomp(struct pt_regs *regs)
3160 {
3161         if (!test_thread_flag(TIF_SECCOMP))
3162                 return 0;
3163
3164         /*
3165          * The ABI we present to seccomp tracers is that r3 contains
3166          * the syscall return value and orig_gpr3 contains the first
3167          * syscall parameter. This is different to the ptrace ABI where
3168          * both r3 and orig_gpr3 contain the first syscall parameter.
3169          */
3170         regs->gpr[3] = -ENOSYS;
3171
3172         /*
3173          * We use the __ version here because we have already checked
3174          * TIF_SECCOMP. If this fails, there is nothing left to do, we
3175          * have already loaded -ENOSYS into r3, or seccomp has put
3176          * something else in r3 (via SECCOMP_RET_ERRNO/TRACE).
3177          */
3178         if (__secure_computing(NULL))
3179                 return -1;
3180
3181         /*
3182          * The syscall was allowed by seccomp, restore the register
3183          * state to what audit expects.
3184          * Note that we use orig_gpr3, which means a seccomp tracer can
3185          * modify the first syscall parameter (in orig_gpr3) and also
3186          * allow the syscall to proceed.
3187          */
3188         regs->gpr[3] = regs->orig_gpr3;
3189
3190         return 0;
3191 }
3192 #else
3193 static inline int do_seccomp(struct pt_regs *regs) { return 0; }
3194 #endif /* CONFIG_SECCOMP */
3195
3196 /**
3197  * do_syscall_trace_enter() - Do syscall tracing on kernel entry.
3198  * @regs: the pt_regs of the task to trace (current)
3199  *
3200  * Performs various types of tracing on syscall entry. This includes seccomp,
3201  * ptrace, syscall tracepoints and audit.
3202  *
3203  * The pt_regs are potentially visible to userspace via ptrace, so their
3204  * contents is ABI.
3205  *
3206  * One or more of the tracers may modify the contents of pt_regs, in particular
3207  * to modify arguments or even the syscall number itself.
3208  *
3209  * It's also possible that a tracer can choose to reject the system call. In
3210  * that case this function will return an illegal syscall number, and will put
3211  * an appropriate return value in regs->r3.
3212  *
3213  * Return: the (possibly changed) syscall number.
3214  */
3215 long do_syscall_trace_enter(struct pt_regs *regs)
3216 {
3217         user_exit();
3218
3219         /*
3220          * The tracer may decide to abort the syscall, if so tracehook
3221          * will return !0. Note that the tracer may also just change
3222          * regs->gpr[0] to an invalid syscall number, that is handled
3223          * below on the exit path.
3224          */
3225         if (test_thread_flag(TIF_SYSCALL_TRACE) &&
3226             tracehook_report_syscall_entry(regs))
3227                 goto skip;
3228
3229         /* Run seccomp after ptrace; allow it to set gpr[3]. */
3230         if (do_seccomp(regs))
3231                 return -1;
3232
3233         /* Avoid trace and audit when syscall is invalid. */
3234         if (regs->gpr[0] >= NR_syscalls)
3235                 goto skip;
3236
3237         if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
3238                 trace_sys_enter(regs, regs->gpr[0]);
3239
3240 #ifdef CONFIG_PPC64
3241         if (!is_32bit_task())
3242                 audit_syscall_entry(regs->gpr[0], regs->gpr[3], regs->gpr[4],
3243                                     regs->gpr[5], regs->gpr[6]);
3244         else
3245 #endif
3246                 audit_syscall_entry(regs->gpr[0],
3247                                     regs->gpr[3] & 0xffffffff,
3248                                     regs->gpr[4] & 0xffffffff,
3249                                     regs->gpr[5] & 0xffffffff,
3250                                     regs->gpr[6] & 0xffffffff);
3251
3252         /* Return the possibly modified but valid syscall number */
3253         return regs->gpr[0];
3254
3255 skip:
3256         /*
3257          * If we are aborting explicitly, or if the syscall number is
3258          * now invalid, set the return value to -ENOSYS.
3259          */
3260         regs->gpr[3] = -ENOSYS;
3261         return -1;
3262 }
3263
3264 void do_syscall_trace_leave(struct pt_regs *regs)
3265 {
3266         int step;
3267
3268         audit_syscall_exit(regs);
3269
3270         if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
3271                 trace_sys_exit(regs, regs->result);
3272
3273         step = test_thread_flag(TIF_SINGLESTEP);
3274         if (step || test_thread_flag(TIF_SYSCALL_TRACE))
3275                 tracehook_report_syscall_exit(regs, step);
3276
3277         user_enter();
3278 }