GNU Linux-libre 4.19.286-gnu1
[releases.git] / arch / sparc / mm / fault_32.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * fault.c:  Page fault handlers for the Sparc.
4  *
5  * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
6  * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
7  * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8  */
9
10 #include <asm/head.h>
11
12 #include <linux/string.h>
13 #include <linux/types.h>
14 #include <linux/sched.h>
15 #include <linux/ptrace.h>
16 #include <linux/mman.h>
17 #include <linux/threads.h>
18 #include <linux/kernel.h>
19 #include <linux/signal.h>
20 #include <linux/mm.h>
21 #include <linux/smp.h>
22 #include <linux/perf_event.h>
23 #include <linux/interrupt.h>
24 #include <linux/kdebug.h>
25 #include <linux/uaccess.h>
26
27 #include <asm/page.h>
28 #include <asm/pgtable.h>
29 #include <asm/openprom.h>
30 #include <asm/oplib.h>
31 #include <asm/setup.h>
32 #include <asm/smp.h>
33 #include <asm/traps.h>
34
35 #include "mm_32.h"
36
37 int show_unhandled_signals = 1;
38
39 static void __noreturn unhandled_fault(unsigned long address,
40                                        struct task_struct *tsk,
41                                        struct pt_regs *regs)
42 {
43         if ((unsigned long) address < PAGE_SIZE) {
44                 printk(KERN_ALERT
45                     "Unable to handle kernel NULL pointer dereference\n");
46         } else {
47                 printk(KERN_ALERT "Unable to handle kernel paging request at virtual address %08lx\n",
48                        address);
49         }
50         printk(KERN_ALERT "tsk->{mm,active_mm}->context = %08lx\n",
51                 (tsk->mm ? tsk->mm->context : tsk->active_mm->context));
52         printk(KERN_ALERT "tsk->{mm,active_mm}->pgd = %08lx\n",
53                 (tsk->mm ? (unsigned long) tsk->mm->pgd :
54                         (unsigned long) tsk->active_mm->pgd));
55         die_if_kernel("Oops", regs);
56 }
57
58 asmlinkage int lookup_fault(unsigned long pc, unsigned long ret_pc,
59                             unsigned long address)
60 {
61         struct pt_regs regs;
62         unsigned long g2;
63         unsigned int insn;
64         int i;
65
66         i = search_extables_range(ret_pc, &g2);
67         switch (i) {
68         case 3:
69                 /* load & store will be handled by fixup */
70                 return 3;
71
72         case 1:
73                 /* store will be handled by fixup, load will bump out */
74                 /* for _to_ macros */
75                 insn = *((unsigned int *) pc);
76                 if ((insn >> 21) & 1)
77                         return 1;
78                 break;
79
80         case 2:
81                 /* load will be handled by fixup, store will bump out */
82                 /* for _from_ macros */
83                 insn = *((unsigned int *) pc);
84                 if (!((insn >> 21) & 1) || ((insn>>19)&0x3f) == 15)
85                         return 2;
86                 break;
87
88         default:
89                 break;
90         }
91
92         memset(&regs, 0, sizeof(regs));
93         regs.pc = pc;
94         regs.npc = pc + 4;
95         __asm__ __volatile__(
96                 "rd %%psr, %0\n\t"
97                 "nop\n\t"
98                 "nop\n\t"
99                 "nop\n" : "=r" (regs.psr));
100         unhandled_fault(address, current, &regs);
101
102         /* Not reached */
103         return 0;
104 }
105
106 static inline void
107 show_signal_msg(struct pt_regs *regs, int sig, int code,
108                 unsigned long address, struct task_struct *tsk)
109 {
110         if (!unhandled_signal(tsk, sig))
111                 return;
112
113         if (!printk_ratelimit())
114                 return;
115
116         printk("%s%s[%d]: segfault at %lx ip %px (rpc %px) sp %px error %x",
117                task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
118                tsk->comm, task_pid_nr(tsk), address,
119                (void *)regs->pc, (void *)regs->u_regs[UREG_I7],
120                (void *)regs->u_regs[UREG_FP], code);
121
122         print_vma_addr(KERN_CONT " in ", regs->pc);
123
124         printk(KERN_CONT "\n");
125 }
126
127 static void __do_fault_siginfo(int code, int sig, struct pt_regs *regs,
128                                unsigned long addr)
129 {
130         if (unlikely(show_unhandled_signals))
131                 show_signal_msg(regs, sig, code,
132                                 addr, current);
133
134         force_sig_fault(sig, code, (void __user *) addr, 0, current);
135 }
136
137 static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
138 {
139         unsigned int insn;
140
141         if (text_fault)
142                 return regs->pc;
143
144         if (regs->psr & PSR_PS)
145                 insn = *(unsigned int *) regs->pc;
146         else
147                 __get_user(insn, (unsigned int *) regs->pc);
148
149         return safe_compute_effective_address(regs, insn);
150 }
151
152 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
153                                       int text_fault)
154 {
155         unsigned long addr = compute_si_addr(regs, text_fault);
156
157         __do_fault_siginfo(code, sig, regs, addr);
158 }
159
160 asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
161                                unsigned long address)
162 {
163         struct vm_area_struct *vma;
164         struct task_struct *tsk = current;
165         struct mm_struct *mm = tsk->mm;
166         unsigned int fixup;
167         unsigned long g2;
168         int from_user = !(regs->psr & PSR_PS);
169         int code;
170         vm_fault_t fault;
171         unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
172
173         if (text_fault)
174                 address = regs->pc;
175
176         /*
177          * We fault-in kernel-space virtual memory on-demand. The
178          * 'reference' page table is init_mm.pgd.
179          *
180          * NOTE! We MUST NOT take any locks for this case. We may
181          * be in an interrupt or a critical region, and should
182          * only copy the information from the master page table,
183          * nothing more.
184          */
185         code = SEGV_MAPERR;
186         if (address >= TASK_SIZE)
187                 goto vmalloc_fault;
188
189         /*
190          * If we're in an interrupt or have no user
191          * context, we must not take the fault..
192          */
193         if (pagefault_disabled() || !mm)
194                 goto no_context;
195
196         perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
197
198 retry:
199         down_read(&mm->mmap_sem);
200
201         if (!from_user && address >= PAGE_OFFSET)
202                 goto bad_area;
203
204         vma = find_vma(mm, address);
205         if (!vma)
206                 goto bad_area;
207         if (vma->vm_start <= address)
208                 goto good_area;
209         if (!(vma->vm_flags & VM_GROWSDOWN))
210                 goto bad_area;
211         if (expand_stack(vma, address))
212                 goto bad_area;
213         /*
214          * Ok, we have a good vm_area for this memory access, so
215          * we can handle it..
216          */
217 good_area:
218         code = SEGV_ACCERR;
219         if (write) {
220                 if (!(vma->vm_flags & VM_WRITE))
221                         goto bad_area;
222         } else {
223                 /* Allow reads even for write-only mappings */
224                 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
225                         goto bad_area;
226         }
227
228         if (from_user)
229                 flags |= FAULT_FLAG_USER;
230         if (write)
231                 flags |= FAULT_FLAG_WRITE;
232
233         /*
234          * If for any reason at all we couldn't handle the fault,
235          * make sure we exit gracefully rather than endlessly redo
236          * the fault.
237          */
238         fault = handle_mm_fault(vma, address, flags);
239
240         if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
241                 return;
242
243         if (unlikely(fault & VM_FAULT_ERROR)) {
244                 if (fault & VM_FAULT_OOM)
245                         goto out_of_memory;
246                 else if (fault & VM_FAULT_SIGSEGV)
247                         goto bad_area;
248                 else if (fault & VM_FAULT_SIGBUS)
249                         goto do_sigbus;
250                 BUG();
251         }
252
253         if (flags & FAULT_FLAG_ALLOW_RETRY) {
254                 if (fault & VM_FAULT_MAJOR) {
255                         current->maj_flt++;
256                         perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ,
257                                       1, regs, address);
258                 } else {
259                         current->min_flt++;
260                         perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN,
261                                       1, regs, address);
262                 }
263                 if (fault & VM_FAULT_RETRY) {
264                         flags &= ~FAULT_FLAG_ALLOW_RETRY;
265                         flags |= FAULT_FLAG_TRIED;
266
267                         /* No need to up_read(&mm->mmap_sem) as we would
268                          * have already released it in __lock_page_or_retry
269                          * in mm/filemap.c.
270                          */
271
272                         goto retry;
273                 }
274         }
275
276         up_read(&mm->mmap_sem);
277         return;
278
279         /*
280          * Something tried to access memory that isn't in our memory map..
281          * Fix it, but check if it's kernel or user first..
282          */
283 bad_area:
284         up_read(&mm->mmap_sem);
285
286 bad_area_nosemaphore:
287         /* User mode accesses just cause a SIGSEGV */
288         if (from_user) {
289                 do_fault_siginfo(code, SIGSEGV, regs, text_fault);
290                 return;
291         }
292
293         /* Is this in ex_table? */
294 no_context:
295         g2 = regs->u_regs[UREG_G2];
296         if (!from_user) {
297                 fixup = search_extables_range(regs->pc, &g2);
298                 /* Values below 10 are reserved for other things */
299                 if (fixup > 10) {
300                         extern const unsigned int __memset_start[];
301                         extern const unsigned int __memset_end[];
302                         extern const unsigned int __csum_partial_copy_start[];
303                         extern const unsigned int __csum_partial_copy_end[];
304
305 #ifdef DEBUG_EXCEPTIONS
306                         printk("Exception: PC<%08lx> faddr<%08lx>\n",
307                                regs->pc, address);
308                         printk("EX_TABLE: insn<%08lx> fixup<%08x> g2<%08lx>\n",
309                                 regs->pc, fixup, g2);
310 #endif
311                         if ((regs->pc >= (unsigned long)__memset_start &&
312                              regs->pc < (unsigned long)__memset_end) ||
313                             (regs->pc >= (unsigned long)__csum_partial_copy_start &&
314                              regs->pc < (unsigned long)__csum_partial_copy_end)) {
315                                 regs->u_regs[UREG_I4] = address;
316                                 regs->u_regs[UREG_I5] = regs->pc;
317                         }
318                         regs->u_regs[UREG_G2] = g2;
319                         regs->pc = fixup;
320                         regs->npc = regs->pc + 4;
321                         return;
322                 }
323         }
324
325         unhandled_fault(address, tsk, regs);
326         do_exit(SIGKILL);
327
328 /*
329  * We ran out of memory, or some other thing happened to us that made
330  * us unable to handle the page fault gracefully.
331  */
332 out_of_memory:
333         up_read(&mm->mmap_sem);
334         if (from_user) {
335                 pagefault_out_of_memory();
336                 return;
337         }
338         goto no_context;
339
340 do_sigbus:
341         up_read(&mm->mmap_sem);
342         do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, text_fault);
343         if (!from_user)
344                 goto no_context;
345
346 vmalloc_fault:
347         {
348                 /*
349                  * Synchronize this task's top level page-table
350                  * with the 'reference' page table.
351                  */
352                 int offset = pgd_index(address);
353                 pgd_t *pgd, *pgd_k;
354                 pmd_t *pmd, *pmd_k;
355
356                 pgd = tsk->active_mm->pgd + offset;
357                 pgd_k = init_mm.pgd + offset;
358
359                 if (!pgd_present(*pgd)) {
360                         if (!pgd_present(*pgd_k))
361                                 goto bad_area_nosemaphore;
362                         pgd_val(*pgd) = pgd_val(*pgd_k);
363                         return;
364                 }
365
366                 pmd = pmd_offset(pgd, address);
367                 pmd_k = pmd_offset(pgd_k, address);
368
369                 if (pmd_present(*pmd) || !pmd_present(*pmd_k))
370                         goto bad_area_nosemaphore;
371
372                 *pmd = *pmd_k;
373                 return;
374         }
375 }
376
377 /* This always deals with user addresses. */
378 static void force_user_fault(unsigned long address, int write)
379 {
380         struct vm_area_struct *vma;
381         struct task_struct *tsk = current;
382         struct mm_struct *mm = tsk->mm;
383         unsigned int flags = FAULT_FLAG_USER;
384         int code;
385
386         code = SEGV_MAPERR;
387
388         down_read(&mm->mmap_sem);
389         vma = find_vma(mm, address);
390         if (!vma)
391                 goto bad_area;
392         if (vma->vm_start <= address)
393                 goto good_area;
394         if (!(vma->vm_flags & VM_GROWSDOWN))
395                 goto bad_area;
396         if (expand_stack(vma, address))
397                 goto bad_area;
398 good_area:
399         code = SEGV_ACCERR;
400         if (write) {
401                 if (!(vma->vm_flags & VM_WRITE))
402                         goto bad_area;
403                 flags |= FAULT_FLAG_WRITE;
404         } else {
405                 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
406                         goto bad_area;
407         }
408         switch (handle_mm_fault(vma, address, flags)) {
409         case VM_FAULT_SIGBUS:
410         case VM_FAULT_OOM:
411                 goto do_sigbus;
412         }
413         up_read(&mm->mmap_sem);
414         return;
415 bad_area:
416         up_read(&mm->mmap_sem);
417         __do_fault_siginfo(code, SIGSEGV, tsk->thread.kregs, address);
418         return;
419
420 do_sigbus:
421         up_read(&mm->mmap_sem);
422         __do_fault_siginfo(BUS_ADRERR, SIGBUS, tsk->thread.kregs, address);
423 }
424
425 static void check_stack_aligned(unsigned long sp)
426 {
427         if (sp & 0x7UL)
428                 force_sig(SIGILL, current);
429 }
430
431 void window_overflow_fault(void)
432 {
433         unsigned long sp;
434
435         sp = current_thread_info()->rwbuf_stkptrs[0];
436         if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
437                 force_user_fault(sp + 0x38, 1);
438         force_user_fault(sp, 1);
439
440         check_stack_aligned(sp);
441 }
442
443 void window_underflow_fault(unsigned long sp)
444 {
445         if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
446                 force_user_fault(sp + 0x38, 0);
447         force_user_fault(sp, 0);
448
449         check_stack_aligned(sp);
450 }
451
452 void window_ret_fault(struct pt_regs *regs)
453 {
454         unsigned long sp;
455
456         sp = regs->u_regs[UREG_FP];
457         if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
458                 force_user_fault(sp + 0x38, 0);
459         force_user_fault(sp, 0);
460
461         check_stack_aligned(sp);
462 }