GNU Linux-libre 4.14.266-gnu1
[releases.git] / arch / x86 / kernel / stacktrace.c
1 /*
2  * Stack trace management functions
3  *
4  *  Copyright (C) 2006-2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5  */
6 #include <linux/sched.h>
7 #include <linux/sched/debug.h>
8 #include <linux/sched/task_stack.h>
9 #include <linux/stacktrace.h>
10 #include <linux/export.h>
11 #include <linux/uaccess.h>
12 #include <asm/stacktrace.h>
13 #include <asm/unwind.h>
14
15 static int save_stack_address(struct stack_trace *trace, unsigned long addr,
16                               bool nosched)
17 {
18         if (nosched && in_sched_functions(addr))
19                 return 0;
20
21         if (trace->skip > 0) {
22                 trace->skip--;
23                 return 0;
24         }
25
26         if (trace->nr_entries >= trace->max_entries)
27                 return -1;
28
29         trace->entries[trace->nr_entries++] = addr;
30         return 0;
31 }
32
33 static void __save_stack_trace(struct stack_trace *trace,
34                                struct task_struct *task, struct pt_regs *regs,
35                                bool nosched)
36 {
37         struct unwind_state state;
38         unsigned long addr;
39
40         if (regs)
41                 save_stack_address(trace, regs->ip, nosched);
42
43         for (unwind_start(&state, task, regs, NULL); !unwind_done(&state);
44              unwind_next_frame(&state)) {
45                 addr = unwind_get_return_address(&state);
46                 if (!addr || save_stack_address(trace, addr, nosched))
47                         break;
48         }
49
50         if (trace->nr_entries < trace->max_entries)
51                 trace->entries[trace->nr_entries++] = ULONG_MAX;
52 }
53
54 /*
55  * Save stack-backtrace addresses into a stack_trace buffer.
56  */
57 void save_stack_trace(struct stack_trace *trace)
58 {
59         __save_stack_trace(trace, current, NULL, false);
60 }
61 EXPORT_SYMBOL_GPL(save_stack_trace);
62
63 void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
64 {
65         __save_stack_trace(trace, current, regs, false);
66 }
67
68 void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
69 {
70         if (!try_get_task_stack(tsk))
71                 return;
72
73         __save_stack_trace(trace, tsk, NULL, true);
74
75         put_task_stack(tsk);
76 }
77 EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
78
79 #ifdef CONFIG_HAVE_RELIABLE_STACKTRACE
80
81 #define STACKTRACE_DUMP_ONCE(task) ({                           \
82         static bool __section(.data.unlikely) __dumped;         \
83                                                                 \
84         if (!__dumped) {                                        \
85                 __dumped = true;                                \
86                 WARN_ON(1);                                     \
87                 show_stack(task, NULL);                         \
88         }                                                       \
89 })
90
91 static int __save_stack_trace_reliable(struct stack_trace *trace,
92                                        struct task_struct *task)
93 {
94         struct unwind_state state;
95         struct pt_regs *regs;
96         unsigned long addr;
97
98         for (unwind_start(&state, task, NULL, NULL); !unwind_done(&state);
99              unwind_next_frame(&state)) {
100
101                 regs = unwind_get_entry_regs(&state, NULL);
102                 if (regs) {
103                         /*
104                          * Kernel mode registers on the stack indicate an
105                          * in-kernel interrupt or exception (e.g., preemption
106                          * or a page fault), which can make frame pointers
107                          * unreliable.
108                          */
109                         if (!user_mode(regs))
110                                 return -EINVAL;
111
112                         /*
113                          * The last frame contains the user mode syscall
114                          * pt_regs.  Skip it and finish the unwind.
115                          */
116                         unwind_next_frame(&state);
117                         if (!unwind_done(&state)) {
118                                 STACKTRACE_DUMP_ONCE(task);
119                                 return -EINVAL;
120                         }
121                         break;
122                 }
123
124                 addr = unwind_get_return_address(&state);
125
126                 /*
127                  * A NULL or invalid return address probably means there's some
128                  * generated code which __kernel_text_address() doesn't know
129                  * about.
130                  */
131                 if (!addr) {
132                         STACKTRACE_DUMP_ONCE(task);
133                         return -EINVAL;
134                 }
135
136                 if (save_stack_address(trace, addr, false))
137                         return -EINVAL;
138         }
139
140         /* Check for stack corruption */
141         if (unwind_error(&state)) {
142                 STACKTRACE_DUMP_ONCE(task);
143                 return -EINVAL;
144         }
145
146         if (trace->nr_entries < trace->max_entries)
147                 trace->entries[trace->nr_entries++] = ULONG_MAX;
148
149         return 0;
150 }
151
152 /*
153  * This function returns an error if it detects any unreliable features of the
154  * stack.  Otherwise it guarantees that the stack trace is reliable.
155  *
156  * If the task is not 'current', the caller *must* ensure the task is inactive.
157  */
158 int save_stack_trace_tsk_reliable(struct task_struct *tsk,
159                                   struct stack_trace *trace)
160 {
161         int ret;
162
163         /*
164          * If the task doesn't have a stack (e.g., a zombie), the stack is
165          * "reliably" empty.
166          */
167         if (!try_get_task_stack(tsk))
168                 return 0;
169
170         ret = __save_stack_trace_reliable(trace, tsk);
171
172         put_task_stack(tsk);
173
174         return ret;
175 }
176 #endif /* CONFIG_HAVE_RELIABLE_STACKTRACE */
177
178 /* Userspace stacktrace - based on kernel/trace/trace_sysprof.c */
179
180 struct stack_frame_user {
181         const void __user       *next_fp;
182         unsigned long           ret_addr;
183 };
184
185 static int
186 copy_stack_frame(const void __user *fp, struct stack_frame_user *frame)
187 {
188         int ret;
189
190         if (!access_ok(VERIFY_READ, fp, sizeof(*frame)))
191                 return 0;
192
193         ret = 1;
194         pagefault_disable();
195         if (__copy_from_user_inatomic(frame, fp, sizeof(*frame)))
196                 ret = 0;
197         pagefault_enable();
198
199         return ret;
200 }
201
202 static inline void __save_stack_trace_user(struct stack_trace *trace)
203 {
204         const struct pt_regs *regs = task_pt_regs(current);
205         const void __user *fp = (const void __user *)regs->bp;
206
207         if (trace->nr_entries < trace->max_entries)
208                 trace->entries[trace->nr_entries++] = regs->ip;
209
210         while (trace->nr_entries < trace->max_entries) {
211                 struct stack_frame_user frame;
212
213                 frame.next_fp = NULL;
214                 frame.ret_addr = 0;
215                 if (!copy_stack_frame(fp, &frame))
216                         break;
217                 if ((unsigned long)fp < regs->sp)
218                         break;
219                 if (frame.ret_addr) {
220                         trace->entries[trace->nr_entries++] =
221                                 frame.ret_addr;
222                 }
223                 if (fp == frame.next_fp)
224                         break;
225                 fp = frame.next_fp;
226         }
227 }
228
229 void save_stack_trace_user(struct stack_trace *trace)
230 {
231         /*
232          * Trace user stack if we are not a kernel thread
233          */
234         if (current->mm) {
235                 __save_stack_trace_user(trace);
236         }
237         if (trace->nr_entries < trace->max_entries)
238                 trace->entries[trace->nr_entries++] = ULONG_MAX;
239 }