2 * Performance counter callchain support - powerpc architecture code
4 * Copyright © 2009 Paul Mackerras, IBM Corporation.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 #include <linux/kernel.h>
12 #include <linux/sched.h>
13 #include <linux/perf_event.h>
14 #include <linux/percpu.h>
15 #include <linux/uaccess.h>
17 #include <asm/ptrace.h>
18 #include <asm/pgtable.h>
19 #include <asm/sigcontext.h>
20 #include <asm/ucontext.h>
23 #include "../kernel/ppc32.h"
25 #include <asm/pte-walk.h>
29 * Is sp valid as the address of the next kernel stack frame after prev_sp?
30 * The next frame may be in a different stack area but should not go
31 * back down in the same stack area.
33 static int valid_next_sp(unsigned long sp, unsigned long prev_sp)
36 return 0; /* must be 16-byte aligned */
37 if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD))
39 if (sp >= prev_sp + STACK_FRAME_MIN_SIZE)
42 * sp could decrease when we jump off an interrupt stack
43 * back to the regular process stack.
45 if ((sp & ~(THREAD_SIZE - 1)) != (prev_sp & ~(THREAD_SIZE - 1)))
51 perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
53 unsigned long sp, next_sp;
54 unsigned long next_ip;
61 perf_callchain_store(entry, perf_instruction_pointer(regs));
63 if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD))
67 fp = (unsigned long *) sp;
70 if (next_sp == sp + STACK_INT_FRAME_SIZE &&
71 validate_sp(sp, current, STACK_INT_FRAME_SIZE) &&
72 fp[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
74 * This looks like an interrupt frame for an
75 * interrupt that occurred in the kernel
77 regs = (struct pt_regs *)(sp + STACK_FRAME_OVERHEAD);
81 perf_callchain_store_context(entry, PERF_CONTEXT_KERNEL);
87 next_ip = fp[STACK_FRAME_LR_SAVE];
90 * We can't tell which of the first two addresses
91 * we get are valid, but we can filter out the
92 * obviously bogus ones here. We replace them
93 * with 0 rather than removing them entirely so
94 * that userspace can tell which is which.
96 if ((level == 1 && next_ip == lr) ||
97 (level <= 1 && !kernel_text_address(next_ip)))
103 perf_callchain_store(entry, next_ip);
104 if (!valid_next_sp(next_sp, sp))
112 * On 64-bit we don't want to invoke hash_page on user addresses from
113 * interrupt context, so if the access faults, we read the page tables
114 * to find which page (if any) is mapped and access it directly.
116 static int read_user_stack_slow(void __user *ptr, void *buf, int nb)
122 unsigned long addr = (unsigned long) ptr;
123 unsigned long offset;
124 unsigned long pfn, flags;
127 pgdir = current->mm->pgd;
131 local_irq_save(flags);
132 ptep = find_current_mm_pte(pgdir, addr, NULL, &shift);
138 /* align address to page boundary */
139 offset = addr & ((1UL << shift) - 1);
141 pte = READ_ONCE(*ptep);
142 if (!pte_present(pte) || !pte_user(pte))
145 if (!page_is_ram(pfn))
148 /* no highmem to worry about here */
149 kaddr = pfn_to_kaddr(pfn);
150 memcpy(buf, kaddr + offset, nb);
153 local_irq_restore(flags);
157 static int read_user_stack_64(unsigned long __user *ptr, unsigned long *ret)
159 if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned long) ||
160 ((unsigned long)ptr & 7))
164 if (!__get_user_inatomic(*ret, ptr)) {
170 return read_user_stack_slow(ptr, ret, 8);
173 static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
175 if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned int) ||
176 ((unsigned long)ptr & 3))
180 if (!__get_user_inatomic(*ret, ptr)) {
186 return read_user_stack_slow(ptr, ret, 4);
189 static inline int valid_user_sp(unsigned long sp, int is_64)
191 if (!sp || (sp & 7) || sp > (is_64 ? TASK_SIZE : 0x100000000UL) - 32)
197 * 64-bit user processes use the same stack frame for RT and non-RT signals.
199 struct signal_frame_64 {
200 char dummy[__SIGNAL_FRAMESIZE];
202 unsigned long unused[2];
203 unsigned int tramp[6];
204 struct siginfo *pinfo;
210 static int is_sigreturn_64_address(unsigned long nip, unsigned long fp)
212 if (nip == fp + offsetof(struct signal_frame_64, tramp))
214 if (vdso64_rt_sigtramp && current->mm->context.vdso_base &&
215 nip == current->mm->context.vdso_base + vdso64_rt_sigtramp)
221 * Do some sanity checking on the signal frame pointed to by sp.
222 * We check the pinfo and puc pointers in the frame.
224 static int sane_signal_64_frame(unsigned long sp)
226 struct signal_frame_64 __user *sf;
227 unsigned long pinfo, puc;
229 sf = (struct signal_frame_64 __user *) sp;
230 if (read_user_stack_64((unsigned long __user *) &sf->pinfo, &pinfo) ||
231 read_user_stack_64((unsigned long __user *) &sf->puc, &puc))
233 return pinfo == (unsigned long) &sf->info &&
234 puc == (unsigned long) &sf->uc;
237 static void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
238 struct pt_regs *regs)
240 unsigned long sp, next_sp;
241 unsigned long next_ip;
244 struct signal_frame_64 __user *sigframe;
245 unsigned long __user *fp, *uregs;
247 next_ip = perf_instruction_pointer(regs);
250 perf_callchain_store(entry, next_ip);
252 while (entry->nr < entry->max_stack) {
253 fp = (unsigned long __user *) sp;
254 if (!valid_user_sp(sp, 1) || read_user_stack_64(fp, &next_sp))
256 if (level > 0 && read_user_stack_64(&fp[2], &next_ip))
260 * Note: the next_sp - sp >= signal frame size check
261 * is true when next_sp < sp, which can happen when
262 * transitioning from an alternate signal stack to the
265 if (next_sp - sp >= sizeof(struct signal_frame_64) &&
266 (is_sigreturn_64_address(next_ip, sp) ||
267 (level <= 1 && is_sigreturn_64_address(lr, sp))) &&
268 sane_signal_64_frame(sp)) {
270 * This looks like an signal frame
272 sigframe = (struct signal_frame_64 __user *) sp;
273 uregs = sigframe->uc.uc_mcontext.gp_regs;
274 if (read_user_stack_64(&uregs[PT_NIP], &next_ip) ||
275 read_user_stack_64(&uregs[PT_LNK], &lr) ||
276 read_user_stack_64(&uregs[PT_R1], &sp))
279 perf_callchain_store_context(entry, PERF_CONTEXT_USER);
280 perf_callchain_store(entry, next_ip);
286 perf_callchain_store(entry, next_ip);
292 static inline int current_is_64bit(void)
295 * We can't use test_thread_flag() here because we may be on an
296 * interrupt stack, and the thread flags don't get copied over
297 * from the thread_info on the main stack to the interrupt stack.
299 return !test_ti_thread_flag(task_thread_info(current), TIF_32BIT);
302 #else /* CONFIG_PPC64 */
304 * On 32-bit we just access the address and let hash_page create a
305 * HPTE if necessary, so there is no need to fall back to reading
306 * the page tables. Since this is called at interrupt level,
307 * do_page_fault() won't treat a DSI as a page fault.
309 static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
313 if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned int) ||
314 ((unsigned long)ptr & 3))
318 rc = __get_user_inatomic(*ret, ptr);
324 static inline void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
325 struct pt_regs *regs)
329 static inline int current_is_64bit(void)
334 static inline int valid_user_sp(unsigned long sp, int is_64)
336 if (!sp || (sp & 7) || sp > TASK_SIZE - 32)
341 #define __SIGNAL_FRAMESIZE32 __SIGNAL_FRAMESIZE
342 #define sigcontext32 sigcontext
343 #define mcontext32 mcontext
344 #define ucontext32 ucontext
345 #define compat_siginfo_t struct siginfo
347 #endif /* CONFIG_PPC64 */
350 * Layout for non-RT signal frames
352 struct signal_frame_32 {
353 char dummy[__SIGNAL_FRAMESIZE32];
354 struct sigcontext32 sctx;
355 struct mcontext32 mctx;
360 * Layout for RT signal frames
362 struct rt_signal_frame_32 {
363 char dummy[__SIGNAL_FRAMESIZE32 + 16];
364 compat_siginfo_t info;
365 struct ucontext32 uc;
369 static int is_sigreturn_32_address(unsigned int nip, unsigned int fp)
371 if (nip == fp + offsetof(struct signal_frame_32, mctx.mc_pad))
373 if (vdso32_sigtramp && current->mm->context.vdso_base &&
374 nip == current->mm->context.vdso_base + vdso32_sigtramp)
379 static int is_rt_sigreturn_32_address(unsigned int nip, unsigned int fp)
381 if (nip == fp + offsetof(struct rt_signal_frame_32,
382 uc.uc_mcontext.mc_pad))
384 if (vdso32_rt_sigtramp && current->mm->context.vdso_base &&
385 nip == current->mm->context.vdso_base + vdso32_rt_sigtramp)
390 static int sane_signal_32_frame(unsigned int sp)
392 struct signal_frame_32 __user *sf;
395 sf = (struct signal_frame_32 __user *) (unsigned long) sp;
396 if (read_user_stack_32((unsigned int __user *) &sf->sctx.regs, ®s))
398 return regs == (unsigned long) &sf->mctx;
401 static int sane_rt_signal_32_frame(unsigned int sp)
403 struct rt_signal_frame_32 __user *sf;
406 sf = (struct rt_signal_frame_32 __user *) (unsigned long) sp;
407 if (read_user_stack_32((unsigned int __user *) &sf->uc.uc_regs, ®s))
409 return regs == (unsigned long) &sf->uc.uc_mcontext;
412 static unsigned int __user *signal_frame_32_regs(unsigned int sp,
413 unsigned int next_sp, unsigned int next_ip)
415 struct mcontext32 __user *mctx = NULL;
416 struct signal_frame_32 __user *sf;
417 struct rt_signal_frame_32 __user *rt_sf;
420 * Note: the next_sp - sp >= signal frame size check
421 * is true when next_sp < sp, for example, when
422 * transitioning from an alternate signal stack to the
425 if (next_sp - sp >= sizeof(struct signal_frame_32) &&
426 is_sigreturn_32_address(next_ip, sp) &&
427 sane_signal_32_frame(sp)) {
428 sf = (struct signal_frame_32 __user *) (unsigned long) sp;
432 if (!mctx && next_sp - sp >= sizeof(struct rt_signal_frame_32) &&
433 is_rt_sigreturn_32_address(next_ip, sp) &&
434 sane_rt_signal_32_frame(sp)) {
435 rt_sf = (struct rt_signal_frame_32 __user *) (unsigned long) sp;
436 mctx = &rt_sf->uc.uc_mcontext;
441 return mctx->mc_gregs;
444 static void perf_callchain_user_32(struct perf_callchain_entry_ctx *entry,
445 struct pt_regs *regs)
447 unsigned int sp, next_sp;
448 unsigned int next_ip;
451 unsigned int __user *fp, *uregs;
453 next_ip = perf_instruction_pointer(regs);
456 perf_callchain_store(entry, next_ip);
458 while (entry->nr < entry->max_stack) {
459 fp = (unsigned int __user *) (unsigned long) sp;
460 if (!valid_user_sp(sp, 0) || read_user_stack_32(fp, &next_sp))
462 if (level > 0 && read_user_stack_32(&fp[1], &next_ip))
465 uregs = signal_frame_32_regs(sp, next_sp, next_ip);
466 if (!uregs && level <= 1)
467 uregs = signal_frame_32_regs(sp, next_sp, lr);
470 * This looks like an signal frame, so restart
471 * the stack trace with the values in it.
473 if (read_user_stack_32(&uregs[PT_NIP], &next_ip) ||
474 read_user_stack_32(&uregs[PT_LNK], &lr) ||
475 read_user_stack_32(&uregs[PT_R1], &sp))
478 perf_callchain_store_context(entry, PERF_CONTEXT_USER);
479 perf_callchain_store(entry, next_ip);
485 perf_callchain_store(entry, next_ip);
492 perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
494 if (current_is_64bit())
495 perf_callchain_user_64(entry, regs);
497 perf_callchain_user_32(entry, regs);