2 * Kernel Probes (KProbes)
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * Copyright (C) IBM Corporation, 2002, 2004
20 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
21 * Probes initial implementation ( includes contributions from
23 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
24 * interface to access function arguments.
25 * 2004-Oct Jim Keniston <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
26 * <prasanna@in.ibm.com> adapted for x86_64 from i386.
27 * 2005-Mar Roland McGrath <roland@redhat.com>
28 * Fixed to handle %rip-relative addressing mode correctly.
29 * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
30 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
31 * <prasanna@in.ibm.com> added function-return probes.
32 * 2005-May Rusty Lynch <rusty.lynch@intel.com>
33 * Added function return probes functionality
34 * 2006-Feb Masami Hiramatsu <hiramatu@sdl.hitachi.co.jp> added
35 * kprobe-booster and kretprobe-booster for i386.
36 * 2007-Dec Masami Hiramatsu <mhiramat@redhat.com> added kprobe-booster
37 * and kretprobe-booster for x86-64
38 * 2007-Dec Masami Hiramatsu <mhiramat@redhat.com>, Arjan van de Ven
39 * <arjan@infradead.org> and Jim Keniston <jkenisto@us.ibm.com>
40 * unified x86 kprobes code.
42 #include <linux/kprobes.h>
43 #include <linux/ptrace.h>
44 #include <linux/string.h>
45 #include <linux/slab.h>
46 #include <linux/hardirq.h>
47 #include <linux/preempt.h>
48 #include <linux/sched/debug.h>
49 #include <linux/extable.h>
50 #include <linux/kdebug.h>
51 #include <linux/kallsyms.h>
52 #include <linux/ftrace.h>
53 #include <linux/frame.h>
54 #include <linux/kasan.h>
55 #include <linux/moduleloader.h>
57 #include <asm/text-patching.h>
58 #include <asm/cacheflush.h>
60 #include <asm/pgtable.h>
61 #include <linux/uaccess.h>
62 #include <asm/alternative.h>
64 #include <asm/debugreg.h>
65 #include <asm/set_memory.h>
69 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
70 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
72 #define stack_addr(regs) ((unsigned long *)kernel_stack_pointer(regs))
74 #define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\
75 (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \
76 (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) | \
77 (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) | \
78 (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf)) \
81 * Undefined/reserved opcodes, conditional jump, Opcode Extension
82 * Groups, and some special opcodes can not boost.
83 * This is non-const and volatile to keep gcc from statically
84 * optimizing it out, as variable_test_bit makes gcc think only
85 * *(unsigned long*) is used.
87 static volatile u32 twobyte_is_boostable[256 / 32] = {
88 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
89 /* ---------------------------------------------- */
90 W(0x00, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0) | /* 00 */
91 W(0x10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1) , /* 10 */
92 W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 20 */
93 W(0x30, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 30 */
94 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
95 W(0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 50 */
96 W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1) | /* 60 */
97 W(0x70, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1) , /* 70 */
98 W(0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 80 */
99 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */
100 W(0xa0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) | /* a0 */
101 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1) , /* b0 */
102 W(0xc0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* c0 */
103 W(0xd0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) , /* d0 */
104 W(0xe0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) | /* e0 */
105 W(0xf0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0) /* f0 */
106 /* ----------------------------------------------- */
107 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
111 struct kretprobe_blackpoint kretprobe_blacklist[] = {
112 {"__switch_to", }, /* This function switches only current task, but
113 doesn't switch kernel stack.*/
114 {NULL, NULL} /* Terminator */
117 const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist);
119 static nokprobe_inline void
120 __synthesize_relative_insn(void *dest, void *from, void *to, u8 op)
122 struct __arch_relative_insn {
127 insn = (struct __arch_relative_insn *)dest;
128 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
132 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
133 void synthesize_reljump(void *dest, void *from, void *to)
135 __synthesize_relative_insn(dest, from, to, RELATIVEJUMP_OPCODE);
137 NOKPROBE_SYMBOL(synthesize_reljump);
139 /* Insert a call instruction at address 'from', which calls address 'to'.*/
140 void synthesize_relcall(void *dest, void *from, void *to)
142 __synthesize_relative_insn(dest, from, to, RELATIVECALL_OPCODE);
144 NOKPROBE_SYMBOL(synthesize_relcall);
147 * Skip the prefixes of the instruction.
149 static kprobe_opcode_t *skip_prefixes(kprobe_opcode_t *insn)
153 attr = inat_get_opcode_attribute((insn_byte_t)*insn);
154 while (inat_is_legacy_prefix(attr)) {
156 attr = inat_get_opcode_attribute((insn_byte_t)*insn);
159 if (inat_is_rex_prefix(attr))
164 NOKPROBE_SYMBOL(skip_prefixes);
167 * Returns non-zero if INSN is boostable.
168 * RIP relative instructions are adjusted at copying time in 64 bits mode
170 int can_boost(struct insn *insn, void *addr)
172 kprobe_opcode_t opcode;
176 if (search_exception_tables((unsigned long)addr))
177 return 0; /* Page fault may occur on this address. */
179 /* 2nd-byte opcode */
180 if (insn->opcode.nbytes == 2)
181 return test_bit(insn->opcode.bytes[1],
182 (unsigned long *)twobyte_is_boostable);
184 if (insn->opcode.nbytes != 1)
187 for_each_insn_prefix(insn, i, prefix) {
190 attr = inat_get_opcode_attribute(prefix);
191 /* Can't boost Address-size override prefix and CS override prefix */
192 if (prefix == 0x2e || inat_is_address_size_prefix(attr))
196 opcode = insn->opcode.bytes[0];
198 switch (opcode & 0xf0) {
200 /* can't boost "bound" */
201 return (opcode != 0x62);
203 return 0; /* can't boost conditional jump */
205 return opcode != 0x9a; /* can't boost call far */
207 /* can't boost software-interruptions */
208 return (0xc1 < opcode && opcode < 0xcc) || opcode == 0xcf;
210 /* can boost AA* and XLAT */
211 return (opcode == 0xd4 || opcode == 0xd5 || opcode == 0xd7);
213 /* can boost in/out and absolute jmps */
214 return ((opcode & 0x04) || opcode == 0xea);
216 /* clear and set flags are boostable */
217 return (opcode == 0xf5 || (0xf7 < opcode && opcode < 0xfe));
219 /* call is not boostable */
220 return opcode != 0x9a;
225 __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
230 kp = get_kprobe((void *)addr);
231 faddr = ftrace_location(addr);
233 * Addresses inside the ftrace location are refused by
234 * arch_check_ftrace_location(). Something went terribly wrong
235 * if such an address is checked here.
237 if (WARN_ON(faddr && faddr != addr))
240 * Use the current code if it is not modified by Kprobe
241 * and it cannot be modified by ftrace.
247 * Basically, kp->ainsn.insn has an original instruction.
248 * However, RIP-relative instruction can not do single-stepping
249 * at different place, __copy_instruction() tweaks the displacement of
250 * that instruction. In that case, we can't recover the instruction
251 * from the kp->ainsn.insn.
253 * On the other hand, in case on normal Kprobe, kp->opcode has a copy
254 * of the first byte of the probed instruction, which is overwritten
255 * by int3. And the instruction at kp->addr is not modified by kprobes
256 * except for the first byte, we can recover the original instruction
257 * from it and kp->opcode.
259 * In case of Kprobes using ftrace, we do not have a copy of
260 * the original instruction. In fact, the ftrace location might
261 * be modified at anytime and even could be in an inconsistent state.
262 * Fortunately, we know that the original code is the ideal 5-byte
265 if (probe_kernel_read(buf, (void *)addr,
266 MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
270 memcpy(buf, ideal_nops[NOP_ATOMIC5], 5);
273 return (unsigned long)buf;
277 * Recover the probed instruction at addr for further analysis.
278 * Caller must lock kprobes by kprobe_mutex, or disable preemption
279 * for preventing to release referencing kprobes.
280 * Returns zero if the instruction can not get recovered (or access failed).
282 unsigned long recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr)
284 unsigned long __addr;
286 __addr = __recover_optprobed_insn(buf, addr);
290 return __recover_probed_insn(buf, addr);
293 /* Check if paddr is at an instruction boundary */
294 static int can_probe(unsigned long paddr)
296 unsigned long addr, __addr, offset = 0;
298 kprobe_opcode_t buf[MAX_INSN_SIZE];
300 if (!kallsyms_lookup_size_offset(paddr, NULL, &offset))
303 /* Decode instructions */
304 addr = paddr - offset;
305 while (addr < paddr) {
307 * Check if the instruction has been modified by another
308 * kprobe, in which case we replace the breakpoint by the
309 * original instruction in our buffer.
310 * Also, jump optimization will change the breakpoint to
311 * relative-jump. Since the relative-jump itself is
312 * normally used, we just go through if there is no kprobe.
314 __addr = recover_probed_instruction(buf, addr);
317 kernel_insn_init(&insn, (void *)__addr, MAX_INSN_SIZE);
318 insn_get_length(&insn);
321 * Another debugging subsystem might insert this breakpoint.
322 * In that case, we can't recover it.
324 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
329 return (addr == paddr);
333 * Returns non-zero if opcode modifies the interrupt flag.
335 static int is_IF_modifier(kprobe_opcode_t *insn)
338 insn = skip_prefixes(insn);
343 case 0xcf: /* iret/iretd */
344 case 0x9d: /* popf/popfd */
352 * Copy an instruction with recovering modified instruction by kprobes
353 * and adjust the displacement if the instruction uses the %rip-relative
354 * addressing mode. Note that since @real will be the final place of copied
355 * instruction, displacement must be adjust by @real, not @dest.
356 * This returns the length of copied instruction, or 0 if it has an error.
358 int __copy_instruction(u8 *dest, u8 *src, u8 *real, struct insn *insn)
360 kprobe_opcode_t buf[MAX_INSN_SIZE];
361 unsigned long recovered_insn =
362 recover_probed_instruction(buf, (unsigned long)src);
364 if (!recovered_insn || !insn)
367 /* This can access kernel text if given address is not recovered */
368 if (probe_kernel_read(dest, (void *)recovered_insn, MAX_INSN_SIZE))
371 kernel_insn_init(insn, dest, MAX_INSN_SIZE);
372 insn_get_length(insn);
374 /* Another subsystem puts a breakpoint, failed to recover */
375 if (insn->opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
378 /* We should not singlestep on the exception masking instructions */
379 if (insn_masking_exception(insn))
383 /* Only x86_64 has RIP relative instructions */
384 if (insn_rip_relative(insn)) {
388 * The copied instruction uses the %rip-relative addressing
389 * mode. Adjust the displacement for the difference between
390 * the original location of this instruction and the location
391 * of the copy that will actually be run. The tricky bit here
392 * is making sure that the sign extension happens correctly in
393 * this calculation, since we need a signed 32-bit result to
394 * be sign-extended to 64 bits when it's added to the %rip
395 * value and yield the same 64-bit result that the sign-
396 * extension of the original signed 32-bit displacement would
399 newdisp = (u8 *) src + (s64) insn->displacement.value
401 if ((s64) (s32) newdisp != newdisp) {
402 pr_err("Kprobes error: new displacement does not fit into s32 (%llx)\n", newdisp);
405 disp = (u8 *) dest + insn_offset_displacement(insn);
406 *(s32 *) disp = (s32) newdisp;
412 /* Prepare reljump right after instruction to boost */
413 static int prepare_boost(kprobe_opcode_t *buf, struct kprobe *p,
416 int len = insn->length;
418 if (can_boost(insn, p->addr) &&
419 MAX_INSN_SIZE - len >= RELATIVEJUMP_SIZE) {
421 * These instructions can be executed directly if it
422 * jumps back to correct address.
424 synthesize_reljump(buf + len, p->ainsn.insn + len,
425 p->addr + insn->length);
426 len += RELATIVEJUMP_SIZE;
427 p->ainsn.boostable = true;
429 p->ainsn.boostable = false;
435 /* Make page to RO mode when allocate it */
436 void *alloc_insn_page(void)
440 page = module_alloc(PAGE_SIZE);
445 * First make the page read-only, and only then make it executable to
446 * prevent it from being W+X in between.
448 set_memory_ro((unsigned long)page, 1);
451 * TODO: Once additional kernel code protection mechanisms are set, ensure
452 * that the page was not maliciously altered and it is still zeroed.
454 set_memory_x((unsigned long)page, 1);
459 /* Recover page to RW mode before releasing it */
460 void free_insn_page(void *page)
463 * First make the page non-executable, and only then make it writable to
464 * prevent it from being W+X in between.
466 set_memory_nx((unsigned long)page, 1);
467 set_memory_rw((unsigned long)page, 1);
468 module_memfree(page);
471 static int arch_copy_kprobe(struct kprobe *p)
474 kprobe_opcode_t buf[MAX_INSN_SIZE];
477 /* Copy an instruction with recovering if other optprobe modifies it.*/
478 len = __copy_instruction(buf, p->addr, p->ainsn.insn, &insn);
483 * __copy_instruction can modify the displacement of the instruction,
484 * but it doesn't affect boostable check.
486 len = prepare_boost(buf, p, &insn);
488 /* Check whether the instruction modifies Interrupt Flag or not */
489 p->ainsn.if_modifier = is_IF_modifier(buf);
491 /* Also, displacement change doesn't affect the first byte */
494 /* OK, write back the instruction(s) into ROX insn buffer */
495 text_poke(p->ainsn.insn, buf, len);
500 int arch_prepare_kprobe(struct kprobe *p)
504 if (alternatives_text_reserved(p->addr, p->addr))
507 if (!can_probe((unsigned long)p->addr))
509 /* insn: must be on special executable page on x86. */
510 p->ainsn.insn = get_insn_slot();
514 ret = arch_copy_kprobe(p);
516 free_insn_slot(p->ainsn.insn, 0);
517 p->ainsn.insn = NULL;
523 void arch_arm_kprobe(struct kprobe *p)
525 text_poke(p->addr, ((unsigned char []){BREAKPOINT_INSTRUCTION}), 1);
528 void arch_disarm_kprobe(struct kprobe *p)
530 text_poke(p->addr, &p->opcode, 1);
533 void arch_remove_kprobe(struct kprobe *p)
536 free_insn_slot(p->ainsn.insn, p->ainsn.boostable);
537 p->ainsn.insn = NULL;
541 static nokprobe_inline void
542 save_previous_kprobe(struct kprobe_ctlblk *kcb)
544 kcb->prev_kprobe.kp = kprobe_running();
545 kcb->prev_kprobe.status = kcb->kprobe_status;
546 kcb->prev_kprobe.old_flags = kcb->kprobe_old_flags;
547 kcb->prev_kprobe.saved_flags = kcb->kprobe_saved_flags;
550 static nokprobe_inline void
551 restore_previous_kprobe(struct kprobe_ctlblk *kcb)
553 __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
554 kcb->kprobe_status = kcb->prev_kprobe.status;
555 kcb->kprobe_old_flags = kcb->prev_kprobe.old_flags;
556 kcb->kprobe_saved_flags = kcb->prev_kprobe.saved_flags;
559 static nokprobe_inline void
560 set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
561 struct kprobe_ctlblk *kcb)
563 __this_cpu_write(current_kprobe, p);
564 kcb->kprobe_saved_flags = kcb->kprobe_old_flags
565 = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF));
566 if (p->ainsn.if_modifier)
567 kcb->kprobe_saved_flags &= ~X86_EFLAGS_IF;
570 static nokprobe_inline void clear_btf(void)
572 if (test_thread_flag(TIF_BLOCKSTEP)) {
573 unsigned long debugctl = get_debugctlmsr();
575 debugctl &= ~DEBUGCTLMSR_BTF;
576 update_debugctlmsr(debugctl);
580 static nokprobe_inline void restore_btf(void)
582 if (test_thread_flag(TIF_BLOCKSTEP)) {
583 unsigned long debugctl = get_debugctlmsr();
585 debugctl |= DEBUGCTLMSR_BTF;
586 update_debugctlmsr(debugctl);
590 void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs)
592 unsigned long *sara = stack_addr(regs);
594 ri->ret_addr = (kprobe_opcode_t *) *sara;
597 /* Replace the return addr with trampoline addr */
598 *sara = (unsigned long) &kretprobe_trampoline;
600 NOKPROBE_SYMBOL(arch_prepare_kretprobe);
602 static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
603 struct kprobe_ctlblk *kcb, int reenter)
605 if (setup_detour_execution(p, regs, reenter))
608 #if !defined(CONFIG_PREEMPT)
609 if (p->ainsn.boostable && !p->post_handler) {
610 /* Boost up -- we can execute copied instructions directly */
612 reset_current_kprobe();
614 * Reentering boosted probe doesn't reset current_kprobe,
615 * nor set current_kprobe, because it doesn't use single
618 regs->ip = (unsigned long)p->ainsn.insn;
623 save_previous_kprobe(kcb);
624 set_current_kprobe(p, regs, kcb);
625 kcb->kprobe_status = KPROBE_REENTER;
627 kcb->kprobe_status = KPROBE_HIT_SS;
628 /* Prepare real single stepping */
630 regs->flags |= X86_EFLAGS_TF;
631 regs->flags &= ~X86_EFLAGS_IF;
632 /* single step inline if the instruction is an int3 */
633 if (p->opcode == BREAKPOINT_INSTRUCTION)
634 regs->ip = (unsigned long)p->addr;
636 regs->ip = (unsigned long)p->ainsn.insn;
638 NOKPROBE_SYMBOL(setup_singlestep);
641 * We have reentered the kprobe_handler(), since another probe was hit while
642 * within the handler. We save the original kprobes variables and just single
643 * step on the instruction of the new probe without calling any user handlers.
645 static int reenter_kprobe(struct kprobe *p, struct pt_regs *regs,
646 struct kprobe_ctlblk *kcb)
648 switch (kcb->kprobe_status) {
649 case KPROBE_HIT_SSDONE:
650 case KPROBE_HIT_ACTIVE:
652 kprobes_inc_nmissed_count(p);
653 setup_singlestep(p, regs, kcb, 1);
656 /* A probe has been hit in the codepath leading up to, or just
657 * after, single-stepping of a probed instruction. This entire
658 * codepath should strictly reside in .kprobes.text section.
659 * Raise a BUG or we'll continue in an endless reentering loop
660 * and eventually a stack overflow.
662 pr_err("Unrecoverable kprobe detected.\n");
666 /* impossible cases */
673 NOKPROBE_SYMBOL(reenter_kprobe);
676 * Interrupts are disabled on entry as trap3 is an interrupt gate and they
677 * remain disabled throughout this function.
679 int kprobe_int3_handler(struct pt_regs *regs)
681 kprobe_opcode_t *addr;
683 struct kprobe_ctlblk *kcb;
688 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
690 * We don't want to be preempted for the entire duration of kprobe
691 * processing. Since int3 and debug trap disables irqs and we clear
692 * IF while singlestepping, it must be no preemptible.
695 kcb = get_kprobe_ctlblk();
696 p = get_kprobe(addr);
699 if (kprobe_running()) {
700 if (reenter_kprobe(p, regs, kcb))
703 set_current_kprobe(p, regs, kcb);
704 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
707 * If we have no pre-handler or it returned 0, we
708 * continue with normal processing. If we have a
709 * pre-handler and it returned non-zero, that means
710 * user handler setup registers to exit to another
711 * instruction, we must skip the single stepping.
713 if (!p->pre_handler || !p->pre_handler(p, regs))
714 setup_singlestep(p, regs, kcb, 0);
716 reset_current_kprobe();
719 } else if (*addr != BREAKPOINT_INSTRUCTION) {
721 * The breakpoint instruction was removed right
722 * after we hit it. Another cpu has removed
723 * either a probepoint or a debugger breakpoint
724 * at this address. In either case, no further
725 * handling of this interrupt is appropriate.
726 * Back up over the (now missing) int3 and run
727 * the original instruction.
729 regs->ip = (unsigned long)addr;
731 } /* else: not a kprobe fault; let the kernel handle it */
735 NOKPROBE_SYMBOL(kprobe_int3_handler);
738 * When a retprobed function returns, this code saves registers and
739 * calls trampoline_handler() runs, which calls the kretprobe's handler.
742 ".global kretprobe_trampoline\n"
743 ".type kretprobe_trampoline, @function\n"
744 "kretprobe_trampoline:\n"
746 /* We don't bother saving the ss register */
751 " call trampoline_handler\n"
752 /* Replace saved sp with true return address. */
753 " movq %rax, 152(%rsp)\n"
760 " call trampoline_handler\n"
761 /* Move flags to cs */
762 " movl 56(%esp), %edx\n"
763 " movl %edx, 52(%esp)\n"
764 /* Replace saved flags with true return address. */
765 " movl %eax, 56(%esp)\n"
770 ".size kretprobe_trampoline, .-kretprobe_trampoline\n"
772 NOKPROBE_SYMBOL(kretprobe_trampoline);
773 STACK_FRAME_NON_STANDARD(kretprobe_trampoline);
776 * Called from kretprobe_trampoline
778 __visible __used void *trampoline_handler(struct pt_regs *regs)
780 struct kretprobe_instance *ri = NULL;
781 struct hlist_head *head, empty_rp;
782 struct hlist_node *tmp;
783 unsigned long flags, orig_ret_address = 0;
784 unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
785 kprobe_opcode_t *correct_ret_addr = NULL;
787 bool skipped = false;
790 * Set a dummy kprobe for avoiding kretprobe recursion.
791 * Since kretprobe never run in kprobe handler, kprobe must not
792 * be running at this point.
796 INIT_HLIST_HEAD(&empty_rp);
797 kretprobe_hash_lock(current, &head, &flags);
798 /* fixup registers */
800 regs->cs = __KERNEL_CS;
801 /* On x86-64, we use pt_regs->sp for return address holder. */
802 frame_pointer = ®s->sp;
804 regs->cs = __KERNEL_CS | get_kernel_rpl();
806 /* On x86-32, we use pt_regs->flags for return address holder. */
807 frame_pointer = ®s->flags;
809 regs->ip = trampoline_address;
810 regs->orig_ax = ~0UL;
813 * It is possible to have multiple instances associated with a given
814 * task either because multiple functions in the call path have
815 * return probes installed on them, and/or more than one
816 * return probe was registered for a target function.
818 * We can handle this because:
819 * - instances are always pushed into the head of the list
820 * - when multiple return probes are registered for the same
821 * function, the (chronologically) first instance's ret_addr
822 * will be the real return address, and all the rest will
823 * point to kretprobe_trampoline.
825 hlist_for_each_entry(ri, head, hlist) {
826 if (ri->task != current)
827 /* another task is sharing our hash bucket */
830 * Return probes must be pushed on this hash list correct
831 * order (same as return order) so that it can be poped
832 * correctly. However, if we find it is pushed it incorrect
833 * order, this means we find a function which should not be
834 * probed, because the wrong order entry is pushed on the
835 * path of processing other kretprobe itself.
837 if (ri->fp != frame_pointer) {
839 pr_warn("kretprobe is stacked incorrectly. Trying to fixup.\n");
844 orig_ret_address = (unsigned long)ri->ret_addr;
846 pr_warn("%ps must be blacklisted because of incorrect kretprobe order\n",
849 if (orig_ret_address != trampoline_address)
851 * This is the real return address. Any other
852 * instances associated with this task are for
853 * other calls deeper on the call stack
858 kretprobe_assert(ri, orig_ret_address, trampoline_address);
860 correct_ret_addr = ri->ret_addr;
861 hlist_for_each_entry_safe(ri, tmp, head, hlist) {
862 if (ri->task != current)
863 /* another task is sharing our hash bucket */
865 if (ri->fp != frame_pointer)
868 orig_ret_address = (unsigned long)ri->ret_addr;
869 if (ri->rp && ri->rp->handler) {
870 __this_cpu_write(current_kprobe, &ri->rp->kp);
871 ri->ret_addr = correct_ret_addr;
872 ri->rp->handler(ri, regs);
873 __this_cpu_write(current_kprobe, &kprobe_busy);
876 recycle_rp_inst(ri, &empty_rp);
878 if (orig_ret_address != trampoline_address)
880 * This is the real return address. Any other
881 * instances associated with this task are for
882 * other calls deeper on the call stack
887 kretprobe_hash_unlock(current, &flags);
891 hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
892 hlist_del(&ri->hlist);
895 return (void *)orig_ret_address;
897 NOKPROBE_SYMBOL(trampoline_handler);
900 * Called after single-stepping. p->addr is the address of the
901 * instruction whose first byte has been replaced by the "int 3"
902 * instruction. To avoid the SMP problems that can occur when we
903 * temporarily put back the original opcode to single-step, we
904 * single-stepped a copy of the instruction. The address of this
905 * copy is p->ainsn.insn.
907 * This function prepares to return from the post-single-step
908 * interrupt. We have to fix up the stack as follows:
910 * 0) Except in the case of absolute or indirect jump or call instructions,
911 * the new ip is relative to the copied instruction. We need to make
912 * it relative to the original instruction.
914 * 1) If the single-stepped instruction was pushfl, then the TF and IF
915 * flags are set in the just-pushed flags, and may need to be cleared.
917 * 2) If the single-stepped instruction was a call, the return address
918 * that is atop the stack is the address following the copied instruction.
919 * We need to make it the address following the original instruction.
921 * If this is the first time we've single-stepped the instruction at
922 * this probepoint, and the instruction is boostable, boost it: add a
923 * jump instruction after the copied instruction, that jumps to the next
924 * instruction after the probepoint.
926 static void resume_execution(struct kprobe *p, struct pt_regs *regs,
927 struct kprobe_ctlblk *kcb)
929 unsigned long *tos = stack_addr(regs);
930 unsigned long copy_ip = (unsigned long)p->ainsn.insn;
931 unsigned long orig_ip = (unsigned long)p->addr;
932 kprobe_opcode_t *insn = p->ainsn.insn;
935 insn = skip_prefixes(insn);
937 regs->flags &= ~X86_EFLAGS_TF;
939 case 0x9c: /* pushfl */
940 *tos &= ~(X86_EFLAGS_TF | X86_EFLAGS_IF);
941 *tos |= kcb->kprobe_old_flags;
943 case 0xc2: /* iret/ret/lret */
948 case 0xea: /* jmp absolute -- ip is correct */
949 /* ip is already adjusted, no more changes required */
950 p->ainsn.boostable = true;
952 case 0xe8: /* call relative - Fix return addr */
953 *tos = orig_ip + (*tos - copy_ip);
956 case 0x9a: /* call absolute -- same as call absolute, indirect */
957 *tos = orig_ip + (*tos - copy_ip);
961 if ((insn[1] & 0x30) == 0x10) {
963 * call absolute, indirect
964 * Fix return addr; ip is correct.
965 * But this is not boostable
967 *tos = orig_ip + (*tos - copy_ip);
969 } else if (((insn[1] & 0x31) == 0x20) ||
970 ((insn[1] & 0x31) == 0x21)) {
972 * jmp near and far, absolute indirect
973 * ip is correct. And this is boostable
975 p->ainsn.boostable = true;
982 regs->ip += orig_ip - copy_ip;
987 NOKPROBE_SYMBOL(resume_execution);
990 * Interrupts are disabled on entry as trap1 is an interrupt gate and they
991 * remain disabled throughout this function.
993 int kprobe_debug_handler(struct pt_regs *regs)
995 struct kprobe *cur = kprobe_running();
996 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
1001 resume_execution(cur, regs, kcb);
1002 regs->flags |= kcb->kprobe_saved_flags;
1004 if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
1005 kcb->kprobe_status = KPROBE_HIT_SSDONE;
1006 cur->post_handler(cur, regs, 0);
1009 /* Restore back the original saved kprobes variables and continue. */
1010 if (kcb->kprobe_status == KPROBE_REENTER) {
1011 restore_previous_kprobe(kcb);
1014 reset_current_kprobe();
1017 * if somebody else is singlestepping across a probe point, flags
1018 * will have TF set, in which case, continue the remaining processing
1019 * of do_debug, as if this is not a probe hit.
1021 if (regs->flags & X86_EFLAGS_TF)
1026 NOKPROBE_SYMBOL(kprobe_debug_handler);
1028 int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
1030 struct kprobe *cur = kprobe_running();
1031 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
1033 if (unlikely(regs->ip == (unsigned long)cur->ainsn.insn)) {
1034 /* This must happen on single-stepping */
1035 WARN_ON(kcb->kprobe_status != KPROBE_HIT_SS &&
1036 kcb->kprobe_status != KPROBE_REENTER);
1038 * We are here because the instruction being single
1039 * stepped caused a page fault. We reset the current
1040 * kprobe and the ip points back to the probe address
1041 * and allow the page fault handler to continue as a
1042 * normal page fault.
1044 regs->ip = (unsigned long)cur->addr;
1046 * Trap flag (TF) has been set here because this fault
1047 * happened where the single stepping will be done.
1048 * So clear it by resetting the current kprobe:
1050 regs->flags &= ~X86_EFLAGS_TF;
1052 * Since the single step (trap) has been cancelled,
1053 * we need to restore BTF here.
1058 * If the TF flag was set before the kprobe hit,
1061 regs->flags |= kcb->kprobe_old_flags;
1063 if (kcb->kprobe_status == KPROBE_REENTER)
1064 restore_previous_kprobe(kcb);
1066 reset_current_kprobe();
1067 } else if (kcb->kprobe_status == KPROBE_HIT_ACTIVE ||
1068 kcb->kprobe_status == KPROBE_HIT_SSDONE) {
1070 * We increment the nmissed count for accounting,
1071 * we can also use npre/npostfault count for accounting
1072 * these specific fault cases.
1074 kprobes_inc_nmissed_count(cur);
1077 * We come here because instructions in the pre/post
1078 * handler caused the page_fault, this could happen
1079 * if handler tries to access user space by
1080 * copy_from_user(), get_user() etc. Let the
1081 * user-specified handler try to fix it first.
1083 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
1087 * In case the user-specified fault handler returned
1088 * zero, try to fix up.
1090 if (fixup_exception(regs, trapnr))
1094 * fixup routine could not handle it,
1095 * Let do_page_fault() fix it.
1101 NOKPROBE_SYMBOL(kprobe_fault_handler);
1104 * Wrapper routine for handling exceptions.
1106 int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
1109 struct die_args *args = data;
1110 int ret = NOTIFY_DONE;
1112 if (args->regs && user_mode(args->regs))
1115 if (val == DIE_GPF) {
1117 * To be potentially processing a kprobe fault and to
1118 * trust the result from kprobe_running(), we have
1119 * be non-preemptible.
1121 if (!preemptible() && kprobe_running() &&
1122 kprobe_fault_handler(args->regs, args->trapnr))
1127 NOKPROBE_SYMBOL(kprobe_exceptions_notify);
1129 bool arch_within_kprobe_blacklist(unsigned long addr)
1131 bool is_in_entry_trampoline_section = false;
1133 #ifdef CONFIG_X86_64
1134 is_in_entry_trampoline_section =
1135 (addr >= (unsigned long)__entry_trampoline_start &&
1136 addr < (unsigned long)__entry_trampoline_end);
1138 return (addr >= (unsigned long)__kprobes_text_start &&
1139 addr < (unsigned long)__kprobes_text_end) ||
1140 (addr >= (unsigned long)__entry_text_start &&
1141 addr < (unsigned long)__entry_text_end) ||
1142 is_in_entry_trampoline_section;
1145 int __init arch_populate_kprobe_blacklist(void)
1147 return kprobe_add_area_blacklist((unsigned long)__entry_text_start,
1148 (unsigned long)__entry_text_end);
1151 int __init arch_init_kprobes(void)
1156 int arch_trampoline_kprobe(struct kprobe *p)