2 * Split from ftrace_64.S
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
10 #include <linux/magic.h>
11 #include <asm/ppc_asm.h>
12 #include <asm/asm-offsets.h>
13 #include <asm/ftrace.h>
14 #include <asm/ppc-opcode.h>
15 #include <asm/export.h>
16 #include <asm/thread_info.h>
18 #include <asm/ptrace.h>
20 #ifdef CONFIG_DYNAMIC_FTRACE
23 * ftrace_caller() is the function that replaces _mcount() when ftrace is
26 * We arrive here after a function A calls function B, and we are the trace
27 * function for B. When we enter r1 points to A's stack frame, B has not yet
28 * had a chance to allocate one yet.
30 * Additionally r2 may point either to the TOC for A, or B, depending on
31 * whether B did a TOC setup sequence before calling us.
33 * On entry the LR points back to the _mcount() call site, and r0 holds the
34 * saved LR as it was on entry to B, ie. the original return address at the
37 * Our job is to save the register state into a struct pt_regs (on the stack)
38 * and then arrange for the ftrace function to be called.
40 _GLOBAL(ftrace_caller)
41 /* Save the original return address in A's stack frame */
44 /* Create our stack frame + pt_regs */
45 stdu r1,-SWITCH_FRAME_SIZE(r1)
47 /* Save all gprs to pt_regs */
53 /* Save previous stack pointer (r1) */
54 addi r8, r1, SWITCH_FRAME_SIZE
57 /* Load special regs for save below */
63 /* Get the _mcount() call site out of LR */
65 /* Save it as pt_regs->nip */
67 /* Save the read LR in pt_regs->link */
70 /* Save callee's TOC in the ABI compliant location */
72 ld r2,PACATOC(r13) /* get kernel TOC in r2 */
74 addis r3,r2,function_trace_op@toc@ha
75 addi r3,r3,function_trace_op@toc@l
78 #ifdef CONFIG_LIVEPATCH
79 mr r14,r7 /* remember old NIP */
81 /* Calculate ip from nip-4 into r3 for call below */
82 subi r3, r7, MCOUNT_INSN_SIZE
84 /* Put the original return address in r4 as parent_ip */
87 /* Save special regs */
93 /* Load &pt_regs in r6 for call below */
94 addi r6, r1 ,STACK_FRAME_OVERHEAD
96 /* ftrace_call(r3, r4, r5, r6) */
102 /* Load the possibly modified NIP */
105 #ifdef CONFIG_LIVEPATCH
106 cmpd r14, r15 /* has NIP been altered? */
109 #if defined(CONFIG_LIVEPATCH) && defined(CONFIG_KPROBES_ON_FTRACE)
110 /* NIP has not been altered, skip over further checks */
113 /* Check if there is an active kprobe on us */
115 bl is_current_kprobe_addr
119 * If r3 == 1, then this is a kprobe/jprobe.
120 * else, this is livepatched function.
122 * The conditional branch for livepatch_handler below will use the
123 * result of this comparison. For kprobe/jprobe, we just need to branch to
124 * the new NIP, not call livepatch_handler. The branch below is bne, so we
125 * want CR0[EQ] to be true if this is a kprobe/jprobe. Which means we want
126 * CR0[EQ] = (r3 == 1).
132 /* Load CTR with the possibly modified NIP */
141 /* Restore possibly modified LR */
145 /* Restore callee's TOC */
148 /* Pop our stack frame */
149 addi r1, r1, SWITCH_FRAME_SIZE
151 #ifdef CONFIG_LIVEPATCH
153 * Based on the cmpd or cmpdi above, if the NIP was altered and we're
154 * not on a kprobe/jprobe, then handle livepatch.
156 bne- livepatch_handler
159 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
160 .globl ftrace_graph_call
163 _GLOBAL(ftrace_graph_stub)
166 bctr /* jump after _mcount site */
171 #ifdef CONFIG_LIVEPATCH
173 * This function runs in the mcount context, between two functions. As
174 * such it can only clobber registers which are volatile and used in
177 * We get here when a function A, calls another function B, but B has
178 * been live patched with a new function C.
181 * - we have no stack frame and can not allocate one
182 * - LR points back to the original caller (in A)
183 * - CTR holds the new NIP in C
184 * - r0, r11 & r12 are free
187 CURRENT_THREAD_INFO(r12, r1)
189 /* Allocate 3 x 8 bytes */
190 ld r11, TI_livepatch_sp(r12)
192 std r11, TI_livepatch_sp(r12)
194 /* Save toc & real LR on livepatch stack */
199 /* Store stack end marker */
200 lis r12, STACK_END_MAGIC@h
201 ori r12, r12, STACK_END_MAGIC@l
204 /* Put ctr in r12 for global entry and branch there */
209 * Now we are returning from the patched function to the original
210 * caller A. We are free to use r11, r12 and we can use r2 until we
214 CURRENT_THREAD_INFO(r12, r1)
216 ld r11, TI_livepatch_sp(r12)
218 /* Check stack marker hasn't been trashed */
219 lis r2, STACK_END_MAGIC@h
220 ori r2, r2, STACK_END_MAGIC@l
223 EMIT_BUG_ENTRY 1b, __FILE__, __LINE__ - 1, 0
225 /* Restore LR & toc from livepatch stack */
230 /* Pop livepatch stack frame */
231 CURRENT_THREAD_INFO(r12, r1)
233 std r11, TI_livepatch_sp(r12)
235 /* Return to original caller of live patched function */
237 #endif /* CONFIG_LIVEPATCH */
239 #endif /* CONFIG_DYNAMIC_FTRACE */
241 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
242 _GLOBAL(ftrace_graph_caller)
244 /* with -mprofile-kernel, parameter regs are still alive at _mcount */
254 /* Save callee's TOC in the ABI compliant location */
256 ld r2, PACATOC(r13) /* get kernel TOC in r2 */
258 mfctr r4 /* ftrace_caller has moved local addr here */
260 mflr r3 /* ftrace_caller has restored LR from stack */
261 subi r4, r4, MCOUNT_INSN_SIZE
263 bl prepare_ftrace_return
267 * prepare_ftrace_return gives us the address we divert to.
268 * Change the LR to this.
283 /* Restore callee's TOC */
290 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */