GNU Linux-libre 4.14.290-gnu1
[releases.git] / arch / s390 / kernel / ptrace.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Ptrace user space interface.
4  *
5  *    Copyright IBM Corp. 1999, 2010
6  *    Author(s): Denis Joseph Barrow
7  *               Martin Schwidefsky (schwidefsky@de.ibm.com)
8  */
9
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
12 #include <linux/sched/task_stack.h>
13 #include <linux/mm.h>
14 #include <linux/smp.h>
15 #include <linux/errno.h>
16 #include <linux/ptrace.h>
17 #include <linux/user.h>
18 #include <linux/security.h>
19 #include <linux/audit.h>
20 #include <linux/signal.h>
21 #include <linux/elf.h>
22 #include <linux/regset.h>
23 #include <linux/tracehook.h>
24 #include <linux/seccomp.h>
25 #include <linux/compat.h>
26 #include <trace/syscall.h>
27 #include <asm/segment.h>
28 #include <asm/page.h>
29 #include <asm/pgtable.h>
30 #include <asm/pgalloc.h>
31 #include <linux/uaccess.h>
32 #include <asm/unistd.h>
33 #include <asm/switch_to.h>
34 #include "entry.h"
35
36 #ifdef CONFIG_COMPAT
37 #include "compat_ptrace.h"
38 #endif
39
40 #define CREATE_TRACE_POINTS
41 #include <trace/events/syscalls.h>
42
43 void update_cr_regs(struct task_struct *task)
44 {
45         struct pt_regs *regs = task_pt_regs(task);
46         struct thread_struct *thread = &task->thread;
47         struct per_regs old, new;
48         unsigned long cr0_old, cr0_new;
49         unsigned long cr2_old, cr2_new;
50         int cr0_changed, cr2_changed;
51
52         __ctl_store(cr0_old, 0, 0);
53         __ctl_store(cr2_old, 2, 2);
54         cr0_new = cr0_old;
55         cr2_new = cr2_old;
56         /* Take care of the enable/disable of transactional execution. */
57         if (MACHINE_HAS_TE) {
58                 /* Set or clear transaction execution TXC bit 8. */
59                 cr0_new |= (1UL << 55);
60                 if (task->thread.per_flags & PER_FLAG_NO_TE)
61                         cr0_new &= ~(1UL << 55);
62                 /* Set or clear transaction execution TDC bits 62 and 63. */
63                 cr2_new &= ~3UL;
64                 if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND) {
65                         if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND_TEND)
66                                 cr2_new |= 1UL;
67                         else
68                                 cr2_new |= 2UL;
69                 }
70         }
71         /* Take care of enable/disable of guarded storage. */
72         if (MACHINE_HAS_GS) {
73                 cr2_new &= ~(1UL << 4);
74                 if (task->thread.gs_cb)
75                         cr2_new |= (1UL << 4);
76         }
77         /* Load control register 0/2 iff changed */
78         cr0_changed = cr0_new != cr0_old;
79         cr2_changed = cr2_new != cr2_old;
80         if (cr0_changed)
81                 __ctl_load(cr0_new, 0, 0);
82         if (cr2_changed)
83                 __ctl_load(cr2_new, 2, 2);
84         /* Copy user specified PER registers */
85         new.control = thread->per_user.control;
86         new.start = thread->per_user.start;
87         new.end = thread->per_user.end;
88
89         /* merge TIF_SINGLE_STEP into user specified PER registers. */
90         if (test_tsk_thread_flag(task, TIF_SINGLE_STEP) ||
91             test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP)) {
92                 if (test_tsk_thread_flag(task, TIF_BLOCK_STEP))
93                         new.control |= PER_EVENT_BRANCH;
94                 else
95                         new.control |= PER_EVENT_IFETCH;
96                 new.control |= PER_CONTROL_SUSPENSION;
97                 new.control |= PER_EVENT_TRANSACTION_END;
98                 if (test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP))
99                         new.control |= PER_EVENT_IFETCH;
100                 new.start = 0;
101                 new.end = -1UL;
102         }
103
104         /* Take care of the PER enablement bit in the PSW. */
105         if (!(new.control & PER_EVENT_MASK)) {
106                 regs->psw.mask &= ~PSW_MASK_PER;
107                 return;
108         }
109         regs->psw.mask |= PSW_MASK_PER;
110         __ctl_store(old, 9, 11);
111         if (memcmp(&new, &old, sizeof(struct per_regs)) != 0)
112                 __ctl_load(new, 9, 11);
113 }
114
115 void user_enable_single_step(struct task_struct *task)
116 {
117         clear_tsk_thread_flag(task, TIF_BLOCK_STEP);
118         set_tsk_thread_flag(task, TIF_SINGLE_STEP);
119 }
120
121 void user_disable_single_step(struct task_struct *task)
122 {
123         clear_tsk_thread_flag(task, TIF_BLOCK_STEP);
124         clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
125 }
126
127 void user_enable_block_step(struct task_struct *task)
128 {
129         set_tsk_thread_flag(task, TIF_SINGLE_STEP);
130         set_tsk_thread_flag(task, TIF_BLOCK_STEP);
131 }
132
133 /*
134  * Called by kernel/ptrace.c when detaching..
135  *
136  * Clear all debugging related fields.
137  */
138 void ptrace_disable(struct task_struct *task)
139 {
140         memset(&task->thread.per_user, 0, sizeof(task->thread.per_user));
141         memset(&task->thread.per_event, 0, sizeof(task->thread.per_event));
142         clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
143         clear_pt_regs_flag(task_pt_regs(task), PIF_PER_TRAP);
144         task->thread.per_flags = 0;
145 }
146
147 #define __ADDR_MASK 7
148
149 static inline unsigned long __peek_user_per(struct task_struct *child,
150                                             addr_t addr)
151 {
152         struct per_struct_kernel *dummy = NULL;
153
154         if (addr == (addr_t) &dummy->cr9)
155                 /* Control bits of the active per set. */
156                 return test_thread_flag(TIF_SINGLE_STEP) ?
157                         PER_EVENT_IFETCH : child->thread.per_user.control;
158         else if (addr == (addr_t) &dummy->cr10)
159                 /* Start address of the active per set. */
160                 return test_thread_flag(TIF_SINGLE_STEP) ?
161                         0 : child->thread.per_user.start;
162         else if (addr == (addr_t) &dummy->cr11)
163                 /* End address of the active per set. */
164                 return test_thread_flag(TIF_SINGLE_STEP) ?
165                         -1UL : child->thread.per_user.end;
166         else if (addr == (addr_t) &dummy->bits)
167                 /* Single-step bit. */
168                 return test_thread_flag(TIF_SINGLE_STEP) ?
169                         (1UL << (BITS_PER_LONG - 1)) : 0;
170         else if (addr == (addr_t) &dummy->starting_addr)
171                 /* Start address of the user specified per set. */
172                 return child->thread.per_user.start;
173         else if (addr == (addr_t) &dummy->ending_addr)
174                 /* End address of the user specified per set. */
175                 return child->thread.per_user.end;
176         else if (addr == (addr_t) &dummy->perc_atmid)
177                 /* PER code, ATMID and AI of the last PER trap */
178                 return (unsigned long)
179                         child->thread.per_event.cause << (BITS_PER_LONG - 16);
180         else if (addr == (addr_t) &dummy->address)
181                 /* Address of the last PER trap */
182                 return child->thread.per_event.address;
183         else if (addr == (addr_t) &dummy->access_id)
184                 /* Access id of the last PER trap */
185                 return (unsigned long)
186                         child->thread.per_event.paid << (BITS_PER_LONG - 8);
187         return 0;
188 }
189
190 /*
191  * Read the word at offset addr from the user area of a process. The
192  * trouble here is that the information is littered over different
193  * locations. The process registers are found on the kernel stack,
194  * the floating point stuff and the trace settings are stored in
195  * the task structure. In addition the different structures in
196  * struct user contain pad bytes that should be read as zeroes.
197  * Lovely...
198  */
199 static unsigned long __peek_user(struct task_struct *child, addr_t addr)
200 {
201         struct user *dummy = NULL;
202         addr_t offset, tmp;
203
204         if (addr < (addr_t) &dummy->regs.acrs) {
205                 /*
206                  * psw and gprs are stored on the stack
207                  */
208                 tmp = *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr);
209                 if (addr == (addr_t) &dummy->regs.psw.mask) {
210                         /* Return a clean psw mask. */
211                         tmp &= PSW_MASK_USER | PSW_MASK_RI;
212                         tmp |= PSW_USER_BITS;
213                 }
214
215         } else if (addr < (addr_t) &dummy->regs.orig_gpr2) {
216                 /*
217                  * access registers are stored in the thread structure
218                  */
219                 offset = addr - (addr_t) &dummy->regs.acrs;
220                 /*
221                  * Very special case: old & broken 64 bit gdb reading
222                  * from acrs[15]. Result is a 64 bit value. Read the
223                  * 32 bit acrs[15] value and shift it by 32. Sick...
224                  */
225                 if (addr == (addr_t) &dummy->regs.acrs[15])
226                         tmp = ((unsigned long) child->thread.acrs[15]) << 32;
227                 else
228                         tmp = *(addr_t *)((addr_t) &child->thread.acrs + offset);
229
230         } else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
231                 /*
232                  * orig_gpr2 is stored on the kernel stack
233                  */
234                 tmp = (addr_t) task_pt_regs(child)->orig_gpr2;
235
236         } else if (addr < (addr_t) &dummy->regs.fp_regs) {
237                 /*
238                  * prevent reads of padding hole between
239                  * orig_gpr2 and fp_regs on s390.
240                  */
241                 tmp = 0;
242
243         } else if (addr == (addr_t) &dummy->regs.fp_regs.fpc) {
244                 /*
245                  * floating point control reg. is in the thread structure
246                  */
247                 tmp = child->thread.fpu.fpc;
248                 tmp <<= BITS_PER_LONG - 32;
249
250         } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
251                 /*
252                  * floating point regs. are either in child->thread.fpu
253                  * or the child->thread.fpu.vxrs array
254                  */
255                 offset = addr - (addr_t) &dummy->regs.fp_regs.fprs;
256                 if (MACHINE_HAS_VX)
257                         tmp = *(addr_t *)
258                                ((addr_t) child->thread.fpu.vxrs + 2*offset);
259                 else
260                         tmp = *(addr_t *)
261                                ((addr_t) child->thread.fpu.fprs + offset);
262
263         } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
264                 /*
265                  * Handle access to the per_info structure.
266                  */
267                 addr -= (addr_t) &dummy->regs.per_info;
268                 tmp = __peek_user_per(child, addr);
269
270         } else
271                 tmp = 0;
272
273         return tmp;
274 }
275
276 static int
277 peek_user(struct task_struct *child, addr_t addr, addr_t data)
278 {
279         addr_t tmp, mask;
280
281         /*
282          * Stupid gdb peeks/pokes the access registers in 64 bit with
283          * an alignment of 4. Programmers from hell...
284          */
285         mask = __ADDR_MASK;
286         if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs &&
287             addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2)
288                 mask = 3;
289         if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
290                 return -EIO;
291
292         tmp = __peek_user(child, addr);
293         return put_user(tmp, (addr_t __user *) data);
294 }
295
296 static inline void __poke_user_per(struct task_struct *child,
297                                    addr_t addr, addr_t data)
298 {
299         struct per_struct_kernel *dummy = NULL;
300
301         /*
302          * There are only three fields in the per_info struct that the
303          * debugger user can write to.
304          * 1) cr9: the debugger wants to set a new PER event mask
305          * 2) starting_addr: the debugger wants to set a new starting
306          *    address to use with the PER event mask.
307          * 3) ending_addr: the debugger wants to set a new ending
308          *    address to use with the PER event mask.
309          * The user specified PER event mask and the start and end
310          * addresses are used only if single stepping is not in effect.
311          * Writes to any other field in per_info are ignored.
312          */
313         if (addr == (addr_t) &dummy->cr9)
314                 /* PER event mask of the user specified per set. */
315                 child->thread.per_user.control =
316                         data & (PER_EVENT_MASK | PER_CONTROL_MASK);
317         else if (addr == (addr_t) &dummy->starting_addr)
318                 /* Starting address of the user specified per set. */
319                 child->thread.per_user.start = data;
320         else if (addr == (addr_t) &dummy->ending_addr)
321                 /* Ending address of the user specified per set. */
322                 child->thread.per_user.end = data;
323 }
324
325 static void fixup_int_code(struct task_struct *child, addr_t data)
326 {
327         struct pt_regs *regs = task_pt_regs(child);
328         int ilc = regs->int_code >> 16;
329         u16 insn;
330
331         if (ilc > 6)
332                 return;
333
334         if (ptrace_access_vm(child, regs->psw.addr - (regs->int_code >> 16),
335                         &insn, sizeof(insn), FOLL_FORCE) != sizeof(insn))
336                 return;
337
338         /* double check that tracee stopped on svc instruction */
339         if ((insn >> 8) != 0xa)
340                 return;
341
342         regs->int_code = 0x20000 | (data & 0xffff);
343 }
344 /*
345  * Write a word to the user area of a process at location addr. This
346  * operation does have an additional problem compared to peek_user.
347  * Stores to the program status word and on the floating point
348  * control register needs to get checked for validity.
349  */
350 static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
351 {
352         struct user *dummy = NULL;
353         addr_t offset;
354
355
356         if (addr < (addr_t) &dummy->regs.acrs) {
357                 struct pt_regs *regs = task_pt_regs(child);
358                 /*
359                  * psw and gprs are stored on the stack
360                  */
361                 if (addr == (addr_t) &dummy->regs.psw.mask) {
362                         unsigned long mask = PSW_MASK_USER;
363
364                         mask |= is_ri_task(child) ? PSW_MASK_RI : 0;
365                         if ((data ^ PSW_USER_BITS) & ~mask)
366                                 /* Invalid psw mask. */
367                                 return -EINVAL;
368                         if ((data & PSW_MASK_ASC) == PSW_ASC_HOME)
369                                 /* Invalid address-space-control bits */
370                                 return -EINVAL;
371                         if ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA))
372                                 /* Invalid addressing mode bits */
373                                 return -EINVAL;
374                 }
375
376                 if (test_pt_regs_flag(regs, PIF_SYSCALL) &&
377                         addr == offsetof(struct user, regs.gprs[2]))
378                         fixup_int_code(child, data);
379                 *(addr_t *)((addr_t) &regs->psw + addr) = data;
380
381         } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) {
382                 /*
383                  * access registers are stored in the thread structure
384                  */
385                 offset = addr - (addr_t) &dummy->regs.acrs;
386                 /*
387                  * Very special case: old & broken 64 bit gdb writing
388                  * to acrs[15] with a 64 bit value. Ignore the lower
389                  * half of the value and write the upper 32 bit to
390                  * acrs[15]. Sick...
391                  */
392                 if (addr == (addr_t) &dummy->regs.acrs[15])
393                         child->thread.acrs[15] = (unsigned int) (data >> 32);
394                 else
395                         *(addr_t *)((addr_t) &child->thread.acrs + offset) = data;
396
397         } else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
398                 /*
399                  * orig_gpr2 is stored on the kernel stack
400                  */
401                 task_pt_regs(child)->orig_gpr2 = data;
402
403         } else if (addr < (addr_t) &dummy->regs.fp_regs) {
404                 /*
405                  * prevent writes of padding hole between
406                  * orig_gpr2 and fp_regs on s390.
407                  */
408                 return 0;
409
410         } else if (addr == (addr_t) &dummy->regs.fp_regs.fpc) {
411                 /*
412                  * floating point control reg. is in the thread structure
413                  */
414                 if ((unsigned int) data != 0 ||
415                     test_fp_ctl(data >> (BITS_PER_LONG - 32)))
416                         return -EINVAL;
417                 child->thread.fpu.fpc = data >> (BITS_PER_LONG - 32);
418
419         } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
420                 /*
421                  * floating point regs. are either in child->thread.fpu
422                  * or the child->thread.fpu.vxrs array
423                  */
424                 offset = addr - (addr_t) &dummy->regs.fp_regs.fprs;
425                 if (MACHINE_HAS_VX)
426                         *(addr_t *)((addr_t)
427                                 child->thread.fpu.vxrs + 2*offset) = data;
428                 else
429                         *(addr_t *)((addr_t)
430                                 child->thread.fpu.fprs + offset) = data;
431
432         } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
433                 /*
434                  * Handle access to the per_info structure.
435                  */
436                 addr -= (addr_t) &dummy->regs.per_info;
437                 __poke_user_per(child, addr, data);
438
439         }
440
441         return 0;
442 }
443
444 static int poke_user(struct task_struct *child, addr_t addr, addr_t data)
445 {
446         addr_t mask;
447
448         /*
449          * Stupid gdb peeks/pokes the access registers in 64 bit with
450          * an alignment of 4. Programmers from hell indeed...
451          */
452         mask = __ADDR_MASK;
453         if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs &&
454             addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2)
455                 mask = 3;
456         if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
457                 return -EIO;
458
459         return __poke_user(child, addr, data);
460 }
461
462 long arch_ptrace(struct task_struct *child, long request,
463                  unsigned long addr, unsigned long data)
464 {
465         ptrace_area parea; 
466         int copied, ret;
467
468         switch (request) {
469         case PTRACE_PEEKUSR:
470                 /* read the word at location addr in the USER area. */
471                 return peek_user(child, addr, data);
472
473         case PTRACE_POKEUSR:
474                 /* write the word at location addr in the USER area */
475                 return poke_user(child, addr, data);
476
477         case PTRACE_PEEKUSR_AREA:
478         case PTRACE_POKEUSR_AREA:
479                 if (copy_from_user(&parea, (void __force __user *) addr,
480                                                         sizeof(parea)))
481                         return -EFAULT;
482                 addr = parea.kernel_addr;
483                 data = parea.process_addr;
484                 copied = 0;
485                 while (copied < parea.len) {
486                         if (request == PTRACE_PEEKUSR_AREA)
487                                 ret = peek_user(child, addr, data);
488                         else {
489                                 addr_t utmp;
490                                 if (get_user(utmp,
491                                              (addr_t __force __user *) data))
492                                         return -EFAULT;
493                                 ret = poke_user(child, addr, utmp);
494                         }
495                         if (ret)
496                                 return ret;
497                         addr += sizeof(unsigned long);
498                         data += sizeof(unsigned long);
499                         copied += sizeof(unsigned long);
500                 }
501                 return 0;
502         case PTRACE_GET_LAST_BREAK:
503                 put_user(child->thread.last_break,
504                          (unsigned long __user *) data);
505                 return 0;
506         case PTRACE_ENABLE_TE:
507                 if (!MACHINE_HAS_TE)
508                         return -EIO;
509                 child->thread.per_flags &= ~PER_FLAG_NO_TE;
510                 return 0;
511         case PTRACE_DISABLE_TE:
512                 if (!MACHINE_HAS_TE)
513                         return -EIO;
514                 child->thread.per_flags |= PER_FLAG_NO_TE;
515                 child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND;
516                 return 0;
517         case PTRACE_TE_ABORT_RAND:
518                 if (!MACHINE_HAS_TE || (child->thread.per_flags & PER_FLAG_NO_TE))
519                         return -EIO;
520                 switch (data) {
521                 case 0UL:
522                         child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND;
523                         break;
524                 case 1UL:
525                         child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND;
526                         child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND_TEND;
527                         break;
528                 case 2UL:
529                         child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND;
530                         child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND_TEND;
531                         break;
532                 default:
533                         return -EINVAL;
534                 }
535                 return 0;
536         default:
537                 return ptrace_request(child, request, addr, data);
538         }
539 }
540
541 #ifdef CONFIG_COMPAT
542 /*
543  * Now the fun part starts... a 31 bit program running in the
544  * 31 bit emulation tracing another program. PTRACE_PEEKTEXT,
545  * PTRACE_PEEKDATA, PTRACE_POKETEXT and PTRACE_POKEDATA are easy
546  * to handle, the difference to the 64 bit versions of the requests
547  * is that the access is done in multiples of 4 byte instead of
548  * 8 bytes (sizeof(unsigned long) on 31/64 bit).
549  * The ugly part are PTRACE_PEEKUSR, PTRACE_PEEKUSR_AREA,
550  * PTRACE_POKEUSR and PTRACE_POKEUSR_AREA. If the traced program
551  * is a 31 bit program too, the content of struct user can be
552  * emulated. A 31 bit program peeking into the struct user of
553  * a 64 bit program is a no-no.
554  */
555
556 /*
557  * Same as peek_user_per but for a 31 bit program.
558  */
559 static inline __u32 __peek_user_per_compat(struct task_struct *child,
560                                            addr_t addr)
561 {
562         struct compat_per_struct_kernel *dummy32 = NULL;
563
564         if (addr == (addr_t) &dummy32->cr9)
565                 /* Control bits of the active per set. */
566                 return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
567                         PER_EVENT_IFETCH : child->thread.per_user.control;
568         else if (addr == (addr_t) &dummy32->cr10)
569                 /* Start address of the active per set. */
570                 return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
571                         0 : child->thread.per_user.start;
572         else if (addr == (addr_t) &dummy32->cr11)
573                 /* End address of the active per set. */
574                 return test_thread_flag(TIF_SINGLE_STEP) ?
575                         PSW32_ADDR_INSN : child->thread.per_user.end;
576         else if (addr == (addr_t) &dummy32->bits)
577                 /* Single-step bit. */
578                 return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
579                         0x80000000 : 0;
580         else if (addr == (addr_t) &dummy32->starting_addr)
581                 /* Start address of the user specified per set. */
582                 return (__u32) child->thread.per_user.start;
583         else if (addr == (addr_t) &dummy32->ending_addr)
584                 /* End address of the user specified per set. */
585                 return (__u32) child->thread.per_user.end;
586         else if (addr == (addr_t) &dummy32->perc_atmid)
587                 /* PER code, ATMID and AI of the last PER trap */
588                 return (__u32) child->thread.per_event.cause << 16;
589         else if (addr == (addr_t) &dummy32->address)
590                 /* Address of the last PER trap */
591                 return (__u32) child->thread.per_event.address;
592         else if (addr == (addr_t) &dummy32->access_id)
593                 /* Access id of the last PER trap */
594                 return (__u32) child->thread.per_event.paid << 24;
595         return 0;
596 }
597
598 /*
599  * Same as peek_user but for a 31 bit program.
600  */
601 static u32 __peek_user_compat(struct task_struct *child, addr_t addr)
602 {
603         struct compat_user *dummy32 = NULL;
604         addr_t offset;
605         __u32 tmp;
606
607         if (addr < (addr_t) &dummy32->regs.acrs) {
608                 struct pt_regs *regs = task_pt_regs(child);
609                 /*
610                  * psw and gprs are stored on the stack
611                  */
612                 if (addr == (addr_t) &dummy32->regs.psw.mask) {
613                         /* Fake a 31 bit psw mask. */
614                         tmp = (__u32)(regs->psw.mask >> 32);
615                         tmp &= PSW32_MASK_USER | PSW32_MASK_RI;
616                         tmp |= PSW32_USER_BITS;
617                 } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
618                         /* Fake a 31 bit psw address. */
619                         tmp = (__u32) regs->psw.addr |
620                                 (__u32)(regs->psw.mask & PSW_MASK_BA);
621                 } else {
622                         /* gpr 0-15 */
623                         tmp = *(__u32 *)((addr_t) &regs->psw + addr*2 + 4);
624                 }
625         } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
626                 /*
627                  * access registers are stored in the thread structure
628                  */
629                 offset = addr - (addr_t) &dummy32->regs.acrs;
630                 tmp = *(__u32*)((addr_t) &child->thread.acrs + offset);
631
632         } else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) {
633                 /*
634                  * orig_gpr2 is stored on the kernel stack
635                  */
636                 tmp = *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4);
637
638         } else if (addr < (addr_t) &dummy32->regs.fp_regs) {
639                 /*
640                  * prevent reads of padding hole between
641                  * orig_gpr2 and fp_regs on s390.
642                  */
643                 tmp = 0;
644
645         } else if (addr == (addr_t) &dummy32->regs.fp_regs.fpc) {
646                 /*
647                  * floating point control reg. is in the thread structure
648                  */
649                 tmp = child->thread.fpu.fpc;
650
651         } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
652                 /*
653                  * floating point regs. are either in child->thread.fpu
654                  * or the child->thread.fpu.vxrs array
655                  */
656                 offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs;
657                 if (MACHINE_HAS_VX)
658                         tmp = *(__u32 *)
659                                ((addr_t) child->thread.fpu.vxrs + 2*offset);
660                 else
661                         tmp = *(__u32 *)
662                                ((addr_t) child->thread.fpu.fprs + offset);
663
664         } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
665                 /*
666                  * Handle access to the per_info structure.
667                  */
668                 addr -= (addr_t) &dummy32->regs.per_info;
669                 tmp = __peek_user_per_compat(child, addr);
670
671         } else
672                 tmp = 0;
673
674         return tmp;
675 }
676
677 static int peek_user_compat(struct task_struct *child,
678                             addr_t addr, addr_t data)
679 {
680         __u32 tmp;
681
682         if (!is_compat_task() || (addr & 3) || addr > sizeof(struct user) - 3)
683                 return -EIO;
684
685         tmp = __peek_user_compat(child, addr);
686         return put_user(tmp, (__u32 __user *) data);
687 }
688
689 /*
690  * Same as poke_user_per but for a 31 bit program.
691  */
692 static inline void __poke_user_per_compat(struct task_struct *child,
693                                           addr_t addr, __u32 data)
694 {
695         struct compat_per_struct_kernel *dummy32 = NULL;
696
697         if (addr == (addr_t) &dummy32->cr9)
698                 /* PER event mask of the user specified per set. */
699                 child->thread.per_user.control =
700                         data & (PER_EVENT_MASK | PER_CONTROL_MASK);
701         else if (addr == (addr_t) &dummy32->starting_addr)
702                 /* Starting address of the user specified per set. */
703                 child->thread.per_user.start = data;
704         else if (addr == (addr_t) &dummy32->ending_addr)
705                 /* Ending address of the user specified per set. */
706                 child->thread.per_user.end = data;
707 }
708
709 /*
710  * Same as poke_user but for a 31 bit program.
711  */
712 static int __poke_user_compat(struct task_struct *child,
713                               addr_t addr, addr_t data)
714 {
715         struct compat_user *dummy32 = NULL;
716         __u32 tmp = (__u32) data;
717         addr_t offset;
718
719         if (addr < (addr_t) &dummy32->regs.acrs) {
720                 struct pt_regs *regs = task_pt_regs(child);
721                 /*
722                  * psw, gprs, acrs and orig_gpr2 are stored on the stack
723                  */
724                 if (addr == (addr_t) &dummy32->regs.psw.mask) {
725                         __u32 mask = PSW32_MASK_USER;
726
727                         mask |= is_ri_task(child) ? PSW32_MASK_RI : 0;
728                         /* Build a 64 bit psw mask from 31 bit mask. */
729                         if ((tmp ^ PSW32_USER_BITS) & ~mask)
730                                 /* Invalid psw mask. */
731                                 return -EINVAL;
732                         if ((data & PSW32_MASK_ASC) == PSW32_ASC_HOME)
733                                 /* Invalid address-space-control bits */
734                                 return -EINVAL;
735                         regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
736                                 (regs->psw.mask & PSW_MASK_BA) |
737                                 (__u64)(tmp & mask) << 32;
738                 } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
739                         /* Build a 64 bit psw address from 31 bit address. */
740                         regs->psw.addr = (__u64) tmp & PSW32_ADDR_INSN;
741                         /* Transfer 31 bit amode bit to psw mask. */
742                         regs->psw.mask = (regs->psw.mask & ~PSW_MASK_BA) |
743                                 (__u64)(tmp & PSW32_ADDR_AMODE);
744                 } else {
745
746                         if (test_pt_regs_flag(regs, PIF_SYSCALL) &&
747                                 addr == offsetof(struct compat_user, regs.gprs[2]))
748                                 fixup_int_code(child, data);
749                         /* gpr 0-15 */
750                         *(__u32*)((addr_t) &regs->psw + addr*2 + 4) = tmp;
751                 }
752         } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
753                 /*
754                  * access registers are stored in the thread structure
755                  */
756                 offset = addr - (addr_t) &dummy32->regs.acrs;
757                 *(__u32*)((addr_t) &child->thread.acrs + offset) = tmp;
758
759         } else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) {
760                 /*
761                  * orig_gpr2 is stored on the kernel stack
762                  */
763                 *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4) = tmp;
764
765         } else if (addr < (addr_t) &dummy32->regs.fp_regs) {
766                 /*
767                  * prevent writess of padding hole between
768                  * orig_gpr2 and fp_regs on s390.
769                  */
770                 return 0;
771
772         } else if (addr == (addr_t) &dummy32->regs.fp_regs.fpc) {
773                 /*
774                  * floating point control reg. is in the thread structure
775                  */
776                 if (test_fp_ctl(tmp))
777                         return -EINVAL;
778                 child->thread.fpu.fpc = data;
779
780         } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
781                 /*
782                  * floating point regs. are either in child->thread.fpu
783                  * or the child->thread.fpu.vxrs array
784                  */
785                 offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs;
786                 if (MACHINE_HAS_VX)
787                         *(__u32 *)((addr_t)
788                                 child->thread.fpu.vxrs + 2*offset) = tmp;
789                 else
790                         *(__u32 *)((addr_t)
791                                 child->thread.fpu.fprs + offset) = tmp;
792
793         } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
794                 /*
795                  * Handle access to the per_info structure.
796                  */
797                 addr -= (addr_t) &dummy32->regs.per_info;
798                 __poke_user_per_compat(child, addr, data);
799         }
800
801         return 0;
802 }
803
804 static int poke_user_compat(struct task_struct *child,
805                             addr_t addr, addr_t data)
806 {
807         if (!is_compat_task() || (addr & 3) ||
808             addr > sizeof(struct compat_user) - 3)
809                 return -EIO;
810
811         return __poke_user_compat(child, addr, data);
812 }
813
814 long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
815                         compat_ulong_t caddr, compat_ulong_t cdata)
816 {
817         unsigned long addr = caddr;
818         unsigned long data = cdata;
819         compat_ptrace_area parea;
820         int copied, ret;
821
822         switch (request) {
823         case PTRACE_PEEKUSR:
824                 /* read the word at location addr in the USER area. */
825                 return peek_user_compat(child, addr, data);
826
827         case PTRACE_POKEUSR:
828                 /* write the word at location addr in the USER area */
829                 return poke_user_compat(child, addr, data);
830
831         case PTRACE_PEEKUSR_AREA:
832         case PTRACE_POKEUSR_AREA:
833                 if (copy_from_user(&parea, (void __force __user *) addr,
834                                                         sizeof(parea)))
835                         return -EFAULT;
836                 addr = parea.kernel_addr;
837                 data = parea.process_addr;
838                 copied = 0;
839                 while (copied < parea.len) {
840                         if (request == PTRACE_PEEKUSR_AREA)
841                                 ret = peek_user_compat(child, addr, data);
842                         else {
843                                 __u32 utmp;
844                                 if (get_user(utmp,
845                                              (__u32 __force __user *) data))
846                                         return -EFAULT;
847                                 ret = poke_user_compat(child, addr, utmp);
848                         }
849                         if (ret)
850                                 return ret;
851                         addr += sizeof(unsigned int);
852                         data += sizeof(unsigned int);
853                         copied += sizeof(unsigned int);
854                 }
855                 return 0;
856         case PTRACE_GET_LAST_BREAK:
857                 put_user(child->thread.last_break,
858                          (unsigned int __user *) data);
859                 return 0;
860         }
861         return compat_ptrace_request(child, request, addr, data);
862 }
863 #endif
864
865 asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
866 {
867         unsigned long mask = -1UL;
868
869         /*
870          * The sysc_tracesys code in entry.S stored the system
871          * call number to gprs[2].
872          */
873         if (test_thread_flag(TIF_SYSCALL_TRACE) &&
874             (tracehook_report_syscall_entry(regs) ||
875              regs->gprs[2] >= NR_syscalls)) {
876                 /*
877                  * Tracing decided this syscall should not happen or the
878                  * debugger stored an invalid system call number. Skip
879                  * the system call and the system call restart handling.
880                  */
881                 clear_pt_regs_flag(regs, PIF_SYSCALL);
882                 return -1;
883         }
884
885         /* Do the secure computing check after ptrace. */
886         if (secure_computing(NULL)) {
887                 /* seccomp failures shouldn't expose any additional code. */
888                 return -1;
889         }
890
891         if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
892                 trace_sys_enter(regs, regs->gprs[2]);
893
894         if (is_compat_task())
895                 mask = 0xffffffff;
896
897         audit_syscall_entry(regs->gprs[2], regs->orig_gpr2 & mask,
898                             regs->gprs[3] &mask, regs->gprs[4] &mask,
899                             regs->gprs[5] &mask);
900
901         return regs->gprs[2];
902 }
903
904 asmlinkage void do_syscall_trace_exit(struct pt_regs *regs)
905 {
906         audit_syscall_exit(regs);
907
908         if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
909                 trace_sys_exit(regs, regs->gprs[2]);
910
911         if (test_thread_flag(TIF_SYSCALL_TRACE))
912                 tracehook_report_syscall_exit(regs, 0);
913 }
914
915 /*
916  * user_regset definitions.
917  */
918
919 static int s390_regs_get(struct task_struct *target,
920                          const struct user_regset *regset,
921                          unsigned int pos, unsigned int count,
922                          void *kbuf, void __user *ubuf)
923 {
924         if (target == current)
925                 save_access_regs(target->thread.acrs);
926
927         if (kbuf) {
928                 unsigned long *k = kbuf;
929                 while (count > 0) {
930                         *k++ = __peek_user(target, pos);
931                         count -= sizeof(*k);
932                         pos += sizeof(*k);
933                 }
934         } else {
935                 unsigned long __user *u = ubuf;
936                 while (count > 0) {
937                         if (__put_user(__peek_user(target, pos), u++))
938                                 return -EFAULT;
939                         count -= sizeof(*u);
940                         pos += sizeof(*u);
941                 }
942         }
943         return 0;
944 }
945
946 static int s390_regs_set(struct task_struct *target,
947                          const struct user_regset *regset,
948                          unsigned int pos, unsigned int count,
949                          const void *kbuf, const void __user *ubuf)
950 {
951         int rc = 0;
952
953         if (target == current)
954                 save_access_regs(target->thread.acrs);
955
956         if (kbuf) {
957                 const unsigned long *k = kbuf;
958                 while (count > 0 && !rc) {
959                         rc = __poke_user(target, pos, *k++);
960                         count -= sizeof(*k);
961                         pos += sizeof(*k);
962                 }
963         } else {
964                 const unsigned long  __user *u = ubuf;
965                 while (count > 0 && !rc) {
966                         unsigned long word;
967                         rc = __get_user(word, u++);
968                         if (rc)
969                                 break;
970                         rc = __poke_user(target, pos, word);
971                         count -= sizeof(*u);
972                         pos += sizeof(*u);
973                 }
974         }
975
976         if (rc == 0 && target == current)
977                 restore_access_regs(target->thread.acrs);
978
979         return rc;
980 }
981
982 static int s390_fpregs_get(struct task_struct *target,
983                            const struct user_regset *regset, unsigned int pos,
984                            unsigned int count, void *kbuf, void __user *ubuf)
985 {
986         _s390_fp_regs fp_regs;
987
988         if (target == current)
989                 save_fpu_regs();
990
991         fp_regs.fpc = target->thread.fpu.fpc;
992         fpregs_store(&fp_regs, &target->thread.fpu);
993
994         return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
995                                    &fp_regs, 0, -1);
996 }
997
998 static int s390_fpregs_set(struct task_struct *target,
999                            const struct user_regset *regset, unsigned int pos,
1000                            unsigned int count, const void *kbuf,
1001                            const void __user *ubuf)
1002 {
1003         int rc = 0;
1004         freg_t fprs[__NUM_FPRS];
1005
1006         if (target == current)
1007                 save_fpu_regs();
1008
1009         if (MACHINE_HAS_VX)
1010                 convert_vx_to_fp(fprs, target->thread.fpu.vxrs);
1011         else
1012                 memcpy(&fprs, target->thread.fpu.fprs, sizeof(fprs));
1013
1014         /* If setting FPC, must validate it first. */
1015         if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) {
1016                 u32 ufpc[2] = { target->thread.fpu.fpc, 0 };
1017                 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ufpc,
1018                                         0, offsetof(s390_fp_regs, fprs));
1019                 if (rc)
1020                         return rc;
1021                 if (ufpc[1] != 0 || test_fp_ctl(ufpc[0]))
1022                         return -EINVAL;
1023                 target->thread.fpu.fpc = ufpc[0];
1024         }
1025
1026         if (rc == 0 && count > 0)
1027                 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1028                                         fprs, offsetof(s390_fp_regs, fprs), -1);
1029         if (rc)
1030                 return rc;
1031
1032         if (MACHINE_HAS_VX)
1033                 convert_fp_to_vx(target->thread.fpu.vxrs, fprs);
1034         else
1035                 memcpy(target->thread.fpu.fprs, &fprs, sizeof(fprs));
1036
1037         return rc;
1038 }
1039
1040 static int s390_last_break_get(struct task_struct *target,
1041                                const struct user_regset *regset,
1042                                unsigned int pos, unsigned int count,
1043                                void *kbuf, void __user *ubuf)
1044 {
1045         if (count > 0) {
1046                 if (kbuf) {
1047                         unsigned long *k = kbuf;
1048                         *k = target->thread.last_break;
1049                 } else {
1050                         unsigned long  __user *u = ubuf;
1051                         if (__put_user(target->thread.last_break, u))
1052                                 return -EFAULT;
1053                 }
1054         }
1055         return 0;
1056 }
1057
1058 static int s390_last_break_set(struct task_struct *target,
1059                                const struct user_regset *regset,
1060                                unsigned int pos, unsigned int count,
1061                                const void *kbuf, const void __user *ubuf)
1062 {
1063         return 0;
1064 }
1065
1066 static int s390_tdb_get(struct task_struct *target,
1067                         const struct user_regset *regset,
1068                         unsigned int pos, unsigned int count,
1069                         void *kbuf, void __user *ubuf)
1070 {
1071         struct pt_regs *regs = task_pt_regs(target);
1072         unsigned char *data;
1073
1074         if (!(regs->int_code & 0x200))
1075                 return -ENODATA;
1076         data = target->thread.trap_tdb;
1077         return user_regset_copyout(&pos, &count, &kbuf, &ubuf, data, 0, 256);
1078 }
1079
1080 static int s390_tdb_set(struct task_struct *target,
1081                         const struct user_regset *regset,
1082                         unsigned int pos, unsigned int count,
1083                         const void *kbuf, const void __user *ubuf)
1084 {
1085         return 0;
1086 }
1087
1088 static int s390_vxrs_low_get(struct task_struct *target,
1089                              const struct user_regset *regset,
1090                              unsigned int pos, unsigned int count,
1091                              void *kbuf, void __user *ubuf)
1092 {
1093         __u64 vxrs[__NUM_VXRS_LOW];
1094         int i;
1095
1096         if (!MACHINE_HAS_VX)
1097                 return -ENODEV;
1098         if (target == current)
1099                 save_fpu_regs();
1100         for (i = 0; i < __NUM_VXRS_LOW; i++)
1101                 vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1);
1102         return user_regset_copyout(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
1103 }
1104
1105 static int s390_vxrs_low_set(struct task_struct *target,
1106                              const struct user_regset *regset,
1107                              unsigned int pos, unsigned int count,
1108                              const void *kbuf, const void __user *ubuf)
1109 {
1110         __u64 vxrs[__NUM_VXRS_LOW];
1111         int i, rc;
1112
1113         if (!MACHINE_HAS_VX)
1114                 return -ENODEV;
1115         if (target == current)
1116                 save_fpu_regs();
1117
1118         for (i = 0; i < __NUM_VXRS_LOW; i++)
1119                 vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1);
1120
1121         rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
1122         if (rc == 0)
1123                 for (i = 0; i < __NUM_VXRS_LOW; i++)
1124                         *((__u64 *)(target->thread.fpu.vxrs + i) + 1) = vxrs[i];
1125
1126         return rc;
1127 }
1128
1129 static int s390_vxrs_high_get(struct task_struct *target,
1130                               const struct user_regset *regset,
1131                               unsigned int pos, unsigned int count,
1132                               void *kbuf, void __user *ubuf)
1133 {
1134         __vector128 vxrs[__NUM_VXRS_HIGH];
1135
1136         if (!MACHINE_HAS_VX)
1137                 return -ENODEV;
1138         if (target == current)
1139                 save_fpu_regs();
1140         memcpy(vxrs, target->thread.fpu.vxrs + __NUM_VXRS_LOW, sizeof(vxrs));
1141
1142         return user_regset_copyout(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
1143 }
1144
1145 static int s390_vxrs_high_set(struct task_struct *target,
1146                               const struct user_regset *regset,
1147                               unsigned int pos, unsigned int count,
1148                               const void *kbuf, const void __user *ubuf)
1149 {
1150         int rc;
1151
1152         if (!MACHINE_HAS_VX)
1153                 return -ENODEV;
1154         if (target == current)
1155                 save_fpu_regs();
1156
1157         rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1158                                 target->thread.fpu.vxrs + __NUM_VXRS_LOW, 0, -1);
1159         return rc;
1160 }
1161
1162 static int s390_system_call_get(struct task_struct *target,
1163                                 const struct user_regset *regset,
1164                                 unsigned int pos, unsigned int count,
1165                                 void *kbuf, void __user *ubuf)
1166 {
1167         unsigned int *data = &target->thread.system_call;
1168         return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1169                                    data, 0, sizeof(unsigned int));
1170 }
1171
1172 static int s390_system_call_set(struct task_struct *target,
1173                                 const struct user_regset *regset,
1174                                 unsigned int pos, unsigned int count,
1175                                 const void *kbuf, const void __user *ubuf)
1176 {
1177         unsigned int *data = &target->thread.system_call;
1178         return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1179                                   data, 0, sizeof(unsigned int));
1180 }
1181
1182 static int s390_gs_cb_get(struct task_struct *target,
1183                           const struct user_regset *regset,
1184                           unsigned int pos, unsigned int count,
1185                           void *kbuf, void __user *ubuf)
1186 {
1187         struct gs_cb *data = target->thread.gs_cb;
1188
1189         if (!MACHINE_HAS_GS)
1190                 return -ENODEV;
1191         if (!data)
1192                 return -ENODATA;
1193         if (target == current)
1194                 save_gs_cb(data);
1195         return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1196                                    data, 0, sizeof(struct gs_cb));
1197 }
1198
1199 static int s390_gs_cb_set(struct task_struct *target,
1200                           const struct user_regset *regset,
1201                           unsigned int pos, unsigned int count,
1202                           const void *kbuf, const void __user *ubuf)
1203 {
1204         struct gs_cb gs_cb = { }, *data = NULL;
1205         int rc;
1206
1207         if (!MACHINE_HAS_GS)
1208                 return -ENODEV;
1209         if (!target->thread.gs_cb) {
1210                 data = kzalloc(sizeof(*data), GFP_KERNEL);
1211                 if (!data)
1212                         return -ENOMEM;
1213         }
1214         if (!target->thread.gs_cb)
1215                 gs_cb.gsd = 25;
1216         else if (target == current)
1217                 save_gs_cb(&gs_cb);
1218         else
1219                 gs_cb = *target->thread.gs_cb;
1220         rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1221                                 &gs_cb, 0, sizeof(gs_cb));
1222         if (rc) {
1223                 kfree(data);
1224                 return -EFAULT;
1225         }
1226         preempt_disable();
1227         if (!target->thread.gs_cb)
1228                 target->thread.gs_cb = data;
1229         *target->thread.gs_cb = gs_cb;
1230         if (target == current) {
1231                 __ctl_set_bit(2, 4);
1232                 restore_gs_cb(target->thread.gs_cb);
1233         }
1234         preempt_enable();
1235         return rc;
1236 }
1237
1238 static int s390_gs_bc_get(struct task_struct *target,
1239                           const struct user_regset *regset,
1240                           unsigned int pos, unsigned int count,
1241                           void *kbuf, void __user *ubuf)
1242 {
1243         struct gs_cb *data = target->thread.gs_bc_cb;
1244
1245         if (!MACHINE_HAS_GS)
1246                 return -ENODEV;
1247         if (!data)
1248                 return -ENODATA;
1249         return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1250                                    data, 0, sizeof(struct gs_cb));
1251 }
1252
1253 static int s390_gs_bc_set(struct task_struct *target,
1254                           const struct user_regset *regset,
1255                           unsigned int pos, unsigned int count,
1256                           const void *kbuf, const void __user *ubuf)
1257 {
1258         struct gs_cb *data = target->thread.gs_bc_cb;
1259
1260         if (!MACHINE_HAS_GS)
1261                 return -ENODEV;
1262         if (!data) {
1263                 data = kzalloc(sizeof(*data), GFP_KERNEL);
1264                 if (!data)
1265                         return -ENOMEM;
1266                 target->thread.gs_bc_cb = data;
1267         }
1268         return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1269                                   data, 0, sizeof(struct gs_cb));
1270 }
1271
1272 static const struct user_regset s390_regsets[] = {
1273         {
1274                 .core_note_type = NT_PRSTATUS,
1275                 .n = sizeof(s390_regs) / sizeof(long),
1276                 .size = sizeof(long),
1277                 .align = sizeof(long),
1278                 .get = s390_regs_get,
1279                 .set = s390_regs_set,
1280         },
1281         {
1282                 .core_note_type = NT_PRFPREG,
1283                 .n = sizeof(s390_fp_regs) / sizeof(long),
1284                 .size = sizeof(long),
1285                 .align = sizeof(long),
1286                 .get = s390_fpregs_get,
1287                 .set = s390_fpregs_set,
1288         },
1289         {
1290                 .core_note_type = NT_S390_SYSTEM_CALL,
1291                 .n = 1,
1292                 .size = sizeof(unsigned int),
1293                 .align = sizeof(unsigned int),
1294                 .get = s390_system_call_get,
1295                 .set = s390_system_call_set,
1296         },
1297         {
1298                 .core_note_type = NT_S390_LAST_BREAK,
1299                 .n = 1,
1300                 .size = sizeof(long),
1301                 .align = sizeof(long),
1302                 .get = s390_last_break_get,
1303                 .set = s390_last_break_set,
1304         },
1305         {
1306                 .core_note_type = NT_S390_TDB,
1307                 .n = 1,
1308                 .size = 256,
1309                 .align = 1,
1310                 .get = s390_tdb_get,
1311                 .set = s390_tdb_set,
1312         },
1313         {
1314                 .core_note_type = NT_S390_VXRS_LOW,
1315                 .n = __NUM_VXRS_LOW,
1316                 .size = sizeof(__u64),
1317                 .align = sizeof(__u64),
1318                 .get = s390_vxrs_low_get,
1319                 .set = s390_vxrs_low_set,
1320         },
1321         {
1322                 .core_note_type = NT_S390_VXRS_HIGH,
1323                 .n = __NUM_VXRS_HIGH,
1324                 .size = sizeof(__vector128),
1325                 .align = sizeof(__vector128),
1326                 .get = s390_vxrs_high_get,
1327                 .set = s390_vxrs_high_set,
1328         },
1329         {
1330                 .core_note_type = NT_S390_GS_CB,
1331                 .n = sizeof(struct gs_cb) / sizeof(__u64),
1332                 .size = sizeof(__u64),
1333                 .align = sizeof(__u64),
1334                 .get = s390_gs_cb_get,
1335                 .set = s390_gs_cb_set,
1336         },
1337         {
1338                 .core_note_type = NT_S390_GS_BC,
1339                 .n = sizeof(struct gs_cb) / sizeof(__u64),
1340                 .size = sizeof(__u64),
1341                 .align = sizeof(__u64),
1342                 .get = s390_gs_bc_get,
1343                 .set = s390_gs_bc_set,
1344         },
1345 };
1346
1347 static const struct user_regset_view user_s390_view = {
1348         .name = UTS_MACHINE,
1349         .e_machine = EM_S390,
1350         .regsets = s390_regsets,
1351         .n = ARRAY_SIZE(s390_regsets)
1352 };
1353
1354 #ifdef CONFIG_COMPAT
1355 static int s390_compat_regs_get(struct task_struct *target,
1356                                 const struct user_regset *regset,
1357                                 unsigned int pos, unsigned int count,
1358                                 void *kbuf, void __user *ubuf)
1359 {
1360         if (target == current)
1361                 save_access_regs(target->thread.acrs);
1362
1363         if (kbuf) {
1364                 compat_ulong_t *k = kbuf;
1365                 while (count > 0) {
1366                         *k++ = __peek_user_compat(target, pos);
1367                         count -= sizeof(*k);
1368                         pos += sizeof(*k);
1369                 }
1370         } else {
1371                 compat_ulong_t __user *u = ubuf;
1372                 while (count > 0) {
1373                         if (__put_user(__peek_user_compat(target, pos), u++))
1374                                 return -EFAULT;
1375                         count -= sizeof(*u);
1376                         pos += sizeof(*u);
1377                 }
1378         }
1379         return 0;
1380 }
1381
1382 static int s390_compat_regs_set(struct task_struct *target,
1383                                 const struct user_regset *regset,
1384                                 unsigned int pos, unsigned int count,
1385                                 const void *kbuf, const void __user *ubuf)
1386 {
1387         int rc = 0;
1388
1389         if (target == current)
1390                 save_access_regs(target->thread.acrs);
1391
1392         if (kbuf) {
1393                 const compat_ulong_t *k = kbuf;
1394                 while (count > 0 && !rc) {
1395                         rc = __poke_user_compat(target, pos, *k++);
1396                         count -= sizeof(*k);
1397                         pos += sizeof(*k);
1398                 }
1399         } else {
1400                 const compat_ulong_t  __user *u = ubuf;
1401                 while (count > 0 && !rc) {
1402                         compat_ulong_t word;
1403                         rc = __get_user(word, u++);
1404                         if (rc)
1405                                 break;
1406                         rc = __poke_user_compat(target, pos, word);
1407                         count -= sizeof(*u);
1408                         pos += sizeof(*u);
1409                 }
1410         }
1411
1412         if (rc == 0 && target == current)
1413                 restore_access_regs(target->thread.acrs);
1414
1415         return rc;
1416 }
1417
1418 static int s390_compat_regs_high_get(struct task_struct *target,
1419                                      const struct user_regset *regset,
1420                                      unsigned int pos, unsigned int count,
1421                                      void *kbuf, void __user *ubuf)
1422 {
1423         compat_ulong_t *gprs_high;
1424
1425         gprs_high = (compat_ulong_t *)
1426                 &task_pt_regs(target)->gprs[pos / sizeof(compat_ulong_t)];
1427         if (kbuf) {
1428                 compat_ulong_t *k = kbuf;
1429                 while (count > 0) {
1430                         *k++ = *gprs_high;
1431                         gprs_high += 2;
1432                         count -= sizeof(*k);
1433                 }
1434         } else {
1435                 compat_ulong_t __user *u = ubuf;
1436                 while (count > 0) {
1437                         if (__put_user(*gprs_high, u++))
1438                                 return -EFAULT;
1439                         gprs_high += 2;
1440                         count -= sizeof(*u);
1441                 }
1442         }
1443         return 0;
1444 }
1445
1446 static int s390_compat_regs_high_set(struct task_struct *target,
1447                                      const struct user_regset *regset,
1448                                      unsigned int pos, unsigned int count,
1449                                      const void *kbuf, const void __user *ubuf)
1450 {
1451         compat_ulong_t *gprs_high;
1452         int rc = 0;
1453
1454         gprs_high = (compat_ulong_t *)
1455                 &task_pt_regs(target)->gprs[pos / sizeof(compat_ulong_t)];
1456         if (kbuf) {
1457                 const compat_ulong_t *k = kbuf;
1458                 while (count > 0) {
1459                         *gprs_high = *k++;
1460                         *gprs_high += 2;
1461                         count -= sizeof(*k);
1462                 }
1463         } else {
1464                 const compat_ulong_t  __user *u = ubuf;
1465                 while (count > 0 && !rc) {
1466                         unsigned long word;
1467                         rc = __get_user(word, u++);
1468                         if (rc)
1469                                 break;
1470                         *gprs_high = word;
1471                         *gprs_high += 2;
1472                         count -= sizeof(*u);
1473                 }
1474         }
1475
1476         return rc;
1477 }
1478
1479 static int s390_compat_last_break_get(struct task_struct *target,
1480                                       const struct user_regset *regset,
1481                                       unsigned int pos, unsigned int count,
1482                                       void *kbuf, void __user *ubuf)
1483 {
1484         compat_ulong_t last_break;
1485
1486         if (count > 0) {
1487                 last_break = target->thread.last_break;
1488                 if (kbuf) {
1489                         unsigned long *k = kbuf;
1490                         *k = last_break;
1491                 } else {
1492                         unsigned long  __user *u = ubuf;
1493                         if (__put_user(last_break, u))
1494                                 return -EFAULT;
1495                 }
1496         }
1497         return 0;
1498 }
1499
1500 static int s390_compat_last_break_set(struct task_struct *target,
1501                                       const struct user_regset *regset,
1502                                       unsigned int pos, unsigned int count,
1503                                       const void *kbuf, const void __user *ubuf)
1504 {
1505         return 0;
1506 }
1507
1508 static const struct user_regset s390_compat_regsets[] = {
1509         {
1510                 .core_note_type = NT_PRSTATUS,
1511                 .n = sizeof(s390_compat_regs) / sizeof(compat_long_t),
1512                 .size = sizeof(compat_long_t),
1513                 .align = sizeof(compat_long_t),
1514                 .get = s390_compat_regs_get,
1515                 .set = s390_compat_regs_set,
1516         },
1517         {
1518                 .core_note_type = NT_PRFPREG,
1519                 .n = sizeof(s390_fp_regs) / sizeof(compat_long_t),
1520                 .size = sizeof(compat_long_t),
1521                 .align = sizeof(compat_long_t),
1522                 .get = s390_fpregs_get,
1523                 .set = s390_fpregs_set,
1524         },
1525         {
1526                 .core_note_type = NT_S390_SYSTEM_CALL,
1527                 .n = 1,
1528                 .size = sizeof(compat_uint_t),
1529                 .align = sizeof(compat_uint_t),
1530                 .get = s390_system_call_get,
1531                 .set = s390_system_call_set,
1532         },
1533         {
1534                 .core_note_type = NT_S390_LAST_BREAK,
1535                 .n = 1,
1536                 .size = sizeof(long),
1537                 .align = sizeof(long),
1538                 .get = s390_compat_last_break_get,
1539                 .set = s390_compat_last_break_set,
1540         },
1541         {
1542                 .core_note_type = NT_S390_TDB,
1543                 .n = 1,
1544                 .size = 256,
1545                 .align = 1,
1546                 .get = s390_tdb_get,
1547                 .set = s390_tdb_set,
1548         },
1549         {
1550                 .core_note_type = NT_S390_VXRS_LOW,
1551                 .n = __NUM_VXRS_LOW,
1552                 .size = sizeof(__u64),
1553                 .align = sizeof(__u64),
1554                 .get = s390_vxrs_low_get,
1555                 .set = s390_vxrs_low_set,
1556         },
1557         {
1558                 .core_note_type = NT_S390_VXRS_HIGH,
1559                 .n = __NUM_VXRS_HIGH,
1560                 .size = sizeof(__vector128),
1561                 .align = sizeof(__vector128),
1562                 .get = s390_vxrs_high_get,
1563                 .set = s390_vxrs_high_set,
1564         },
1565         {
1566                 .core_note_type = NT_S390_HIGH_GPRS,
1567                 .n = sizeof(s390_compat_regs_high) / sizeof(compat_long_t),
1568                 .size = sizeof(compat_long_t),
1569                 .align = sizeof(compat_long_t),
1570                 .get = s390_compat_regs_high_get,
1571                 .set = s390_compat_regs_high_set,
1572         },
1573         {
1574                 .core_note_type = NT_S390_GS_CB,
1575                 .n = sizeof(struct gs_cb) / sizeof(__u64),
1576                 .size = sizeof(__u64),
1577                 .align = sizeof(__u64),
1578                 .get = s390_gs_cb_get,
1579                 .set = s390_gs_cb_set,
1580         },
1581 };
1582
1583 static const struct user_regset_view user_s390_compat_view = {
1584         .name = "s390",
1585         .e_machine = EM_S390,
1586         .regsets = s390_compat_regsets,
1587         .n = ARRAY_SIZE(s390_compat_regsets)
1588 };
1589 #endif
1590
1591 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
1592 {
1593 #ifdef CONFIG_COMPAT
1594         if (test_tsk_thread_flag(task, TIF_31BIT))
1595                 return &user_s390_compat_view;
1596 #endif
1597         return &user_s390_view;
1598 }
1599
1600 static const char *gpr_names[NUM_GPRS] = {
1601         "r0", "r1",  "r2",  "r3",  "r4",  "r5",  "r6",  "r7",
1602         "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
1603 };
1604
1605 unsigned long regs_get_register(struct pt_regs *regs, unsigned int offset)
1606 {
1607         if (offset >= NUM_GPRS)
1608                 return 0;
1609         return regs->gprs[offset];
1610 }
1611
1612 int regs_query_register_offset(const char *name)
1613 {
1614         unsigned long offset;
1615
1616         if (!name || *name != 'r')
1617                 return -EINVAL;
1618         if (kstrtoul(name + 1, 10, &offset))
1619                 return -EINVAL;
1620         if (offset >= NUM_GPRS)
1621                 return -EINVAL;
1622         return offset;
1623 }
1624
1625 const char *regs_query_register_name(unsigned int offset)
1626 {
1627         if (offset >= NUM_GPRS)
1628                 return NULL;
1629         return gpr_names[offset];
1630 }
1631
1632 static int regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
1633 {
1634         unsigned long ksp = kernel_stack_pointer(regs);
1635
1636         return (addr & ~(THREAD_SIZE - 1)) == (ksp & ~(THREAD_SIZE - 1));
1637 }
1638
1639 /**
1640  * regs_get_kernel_stack_nth() - get Nth entry of the stack
1641  * @regs:pt_regs which contains kernel stack pointer.
1642  * @n:stack entry number.
1643  *
1644  * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
1645  * is specifined by @regs. If the @n th entry is NOT in the kernel stack,
1646  * this returns 0.
1647  */
1648 unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
1649 {
1650         unsigned long addr;
1651
1652         addr = kernel_stack_pointer(regs) + n * sizeof(long);
1653         if (!regs_within_kernel_stack(regs, addr))
1654                 return 0;
1655         return *(unsigned long *)addr;
1656 }