GNU Linux-libre 4.4.284-gnu1
[releases.git] / arch / arm64 / kernel / hw_breakpoint.c
1 /*
2  * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
3  * using the CPU's debug registers.
4  *
5  * Copyright (C) 2012 ARM Limited
6  * Author: Will Deacon <will.deacon@arm.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
19  */
20
21 #define pr_fmt(fmt) "hw-breakpoint: " fmt
22
23 #include <linux/compat.h>
24 #include <linux/cpu_pm.h>
25 #include <linux/errno.h>
26 #include <linux/hw_breakpoint.h>
27 #include <linux/perf_event.h>
28 #include <linux/ptrace.h>
29 #include <linux/smp.h>
30
31 #include <asm/compat.h>
32 #include <asm/current.h>
33 #include <asm/debug-monitors.h>
34 #include <asm/hw_breakpoint.h>
35 #include <asm/traps.h>
36 #include <asm/cputype.h>
37 #include <asm/system_misc.h>
38 #include <asm/uaccess.h>
39
40 /* Breakpoint currently in use for each BRP. */
41 static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]);
42
43 /* Watchpoint currently in use for each WRP. */
44 static DEFINE_PER_CPU(struct perf_event *, wp_on_reg[ARM_MAX_WRP]);
45
46 /* Currently stepping a per-CPU kernel breakpoint. */
47 static DEFINE_PER_CPU(int, stepping_kernel_bp);
48
49 /* Number of BRP/WRP registers on this CPU. */
50 static int core_num_brps;
51 static int core_num_wrps;
52
53 int hw_breakpoint_slots(int type)
54 {
55         /*
56          * We can be called early, so don't rely on
57          * our static variables being initialised.
58          */
59         switch (type) {
60         case TYPE_INST:
61                 return get_num_brps();
62         case TYPE_DATA:
63                 return get_num_wrps();
64         default:
65                 pr_warning("unknown slot type: %d\n", type);
66                 return 0;
67         }
68 }
69
70 #define READ_WB_REG_CASE(OFF, N, REG, VAL)      \
71         case (OFF + N):                         \
72                 AARCH64_DBG_READ(N, REG, VAL);  \
73                 break
74
75 #define WRITE_WB_REG_CASE(OFF, N, REG, VAL)     \
76         case (OFF + N):                         \
77                 AARCH64_DBG_WRITE(N, REG, VAL); \
78                 break
79
80 #define GEN_READ_WB_REG_CASES(OFF, REG, VAL)    \
81         READ_WB_REG_CASE(OFF,  0, REG, VAL);    \
82         READ_WB_REG_CASE(OFF,  1, REG, VAL);    \
83         READ_WB_REG_CASE(OFF,  2, REG, VAL);    \
84         READ_WB_REG_CASE(OFF,  3, REG, VAL);    \
85         READ_WB_REG_CASE(OFF,  4, REG, VAL);    \
86         READ_WB_REG_CASE(OFF,  5, REG, VAL);    \
87         READ_WB_REG_CASE(OFF,  6, REG, VAL);    \
88         READ_WB_REG_CASE(OFF,  7, REG, VAL);    \
89         READ_WB_REG_CASE(OFF,  8, REG, VAL);    \
90         READ_WB_REG_CASE(OFF,  9, REG, VAL);    \
91         READ_WB_REG_CASE(OFF, 10, REG, VAL);    \
92         READ_WB_REG_CASE(OFF, 11, REG, VAL);    \
93         READ_WB_REG_CASE(OFF, 12, REG, VAL);    \
94         READ_WB_REG_CASE(OFF, 13, REG, VAL);    \
95         READ_WB_REG_CASE(OFF, 14, REG, VAL);    \
96         READ_WB_REG_CASE(OFF, 15, REG, VAL)
97
98 #define GEN_WRITE_WB_REG_CASES(OFF, REG, VAL)   \
99         WRITE_WB_REG_CASE(OFF,  0, REG, VAL);   \
100         WRITE_WB_REG_CASE(OFF,  1, REG, VAL);   \
101         WRITE_WB_REG_CASE(OFF,  2, REG, VAL);   \
102         WRITE_WB_REG_CASE(OFF,  3, REG, VAL);   \
103         WRITE_WB_REG_CASE(OFF,  4, REG, VAL);   \
104         WRITE_WB_REG_CASE(OFF,  5, REG, VAL);   \
105         WRITE_WB_REG_CASE(OFF,  6, REG, VAL);   \
106         WRITE_WB_REG_CASE(OFF,  7, REG, VAL);   \
107         WRITE_WB_REG_CASE(OFF,  8, REG, VAL);   \
108         WRITE_WB_REG_CASE(OFF,  9, REG, VAL);   \
109         WRITE_WB_REG_CASE(OFF, 10, REG, VAL);   \
110         WRITE_WB_REG_CASE(OFF, 11, REG, VAL);   \
111         WRITE_WB_REG_CASE(OFF, 12, REG, VAL);   \
112         WRITE_WB_REG_CASE(OFF, 13, REG, VAL);   \
113         WRITE_WB_REG_CASE(OFF, 14, REG, VAL);   \
114         WRITE_WB_REG_CASE(OFF, 15, REG, VAL)
115
116 static u64 read_wb_reg(int reg, int n)
117 {
118         u64 val = 0;
119
120         switch (reg + n) {
121         GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_BVR, AARCH64_DBG_REG_NAME_BVR, val);
122         GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_BCR, AARCH64_DBG_REG_NAME_BCR, val);
123         GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_WVR, AARCH64_DBG_REG_NAME_WVR, val);
124         GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_WCR, AARCH64_DBG_REG_NAME_WCR, val);
125         default:
126                 pr_warning("attempt to read from unknown breakpoint register %d\n", n);
127         }
128
129         return val;
130 }
131
132 static void write_wb_reg(int reg, int n, u64 val)
133 {
134         switch (reg + n) {
135         GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_BVR, AARCH64_DBG_REG_NAME_BVR, val);
136         GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_BCR, AARCH64_DBG_REG_NAME_BCR, val);
137         GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_WVR, AARCH64_DBG_REG_NAME_WVR, val);
138         GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_WCR, AARCH64_DBG_REG_NAME_WCR, val);
139         default:
140                 pr_warning("attempt to write to unknown breakpoint register %d\n", n);
141         }
142         isb();
143 }
144
145 /*
146  * Convert a breakpoint privilege level to the corresponding exception
147  * level.
148  */
149 static enum dbg_active_el debug_exception_level(int privilege)
150 {
151         switch (privilege) {
152         case AARCH64_BREAKPOINT_EL0:
153                 return DBG_ACTIVE_EL0;
154         case AARCH64_BREAKPOINT_EL1:
155                 return DBG_ACTIVE_EL1;
156         default:
157                 pr_warning("invalid breakpoint privilege level %d\n", privilege);
158                 return -EINVAL;
159         }
160 }
161
162 enum hw_breakpoint_ops {
163         HW_BREAKPOINT_INSTALL,
164         HW_BREAKPOINT_UNINSTALL,
165         HW_BREAKPOINT_RESTORE
166 };
167
168 static int is_compat_bp(struct perf_event *bp)
169 {
170         struct task_struct *tsk = bp->hw.target;
171
172         /*
173          * tsk can be NULL for per-cpu (non-ptrace) breakpoints.
174          * In this case, use the native interface, since we don't have
175          * the notion of a "compat CPU" and could end up relying on
176          * deprecated behaviour if we use unaligned watchpoints in
177          * AArch64 state.
178          */
179         return tsk && is_compat_thread(task_thread_info(tsk));
180 }
181
182 /**
183  * hw_breakpoint_slot_setup - Find and setup a perf slot according to
184  *                            operations
185  *
186  * @slots: pointer to array of slots
187  * @max_slots: max number of slots
188  * @bp: perf_event to setup
189  * @ops: operation to be carried out on the slot
190  *
191  * Return:
192  *      slot index on success
193  *      -ENOSPC if no slot is available/matches
194  *      -EINVAL on wrong operations parameter
195  */
196 static int hw_breakpoint_slot_setup(struct perf_event **slots, int max_slots,
197                                     struct perf_event *bp,
198                                     enum hw_breakpoint_ops ops)
199 {
200         int i;
201         struct perf_event **slot;
202
203         for (i = 0; i < max_slots; ++i) {
204                 slot = &slots[i];
205                 switch (ops) {
206                 case HW_BREAKPOINT_INSTALL:
207                         if (!*slot) {
208                                 *slot = bp;
209                                 return i;
210                         }
211                         break;
212                 case HW_BREAKPOINT_UNINSTALL:
213                         if (*slot == bp) {
214                                 *slot = NULL;
215                                 return i;
216                         }
217                         break;
218                 case HW_BREAKPOINT_RESTORE:
219                         if (*slot == bp)
220                                 return i;
221                         break;
222                 default:
223                         pr_warn_once("Unhandled hw breakpoint ops %d\n", ops);
224                         return -EINVAL;
225                 }
226         }
227         return -ENOSPC;
228 }
229
230 static int hw_breakpoint_control(struct perf_event *bp,
231                                  enum hw_breakpoint_ops ops)
232 {
233         struct arch_hw_breakpoint *info = counter_arch_bp(bp);
234         struct perf_event **slots;
235         struct debug_info *debug_info = &current->thread.debug;
236         int i, max_slots, ctrl_reg, val_reg, reg_enable;
237         enum dbg_active_el dbg_el = debug_exception_level(info->ctrl.privilege);
238         u32 ctrl;
239
240         if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
241                 /* Breakpoint */
242                 ctrl_reg = AARCH64_DBG_REG_BCR;
243                 val_reg = AARCH64_DBG_REG_BVR;
244                 slots = this_cpu_ptr(bp_on_reg);
245                 max_slots = core_num_brps;
246                 reg_enable = !debug_info->bps_disabled;
247         } else {
248                 /* Watchpoint */
249                 ctrl_reg = AARCH64_DBG_REG_WCR;
250                 val_reg = AARCH64_DBG_REG_WVR;
251                 slots = this_cpu_ptr(wp_on_reg);
252                 max_slots = core_num_wrps;
253                 reg_enable = !debug_info->wps_disabled;
254         }
255
256         i = hw_breakpoint_slot_setup(slots, max_slots, bp, ops);
257
258         if (WARN_ONCE(i < 0, "Can't find any breakpoint slot"))
259                 return i;
260
261         switch (ops) {
262         case HW_BREAKPOINT_INSTALL:
263                 /*
264                  * Ensure debug monitors are enabled at the correct exception
265                  * level.
266                  */
267                 enable_debug_monitors(dbg_el);
268                 /* Fall through */
269         case HW_BREAKPOINT_RESTORE:
270                 /* Setup the address register. */
271                 write_wb_reg(val_reg, i, info->address);
272
273                 /* Setup the control register. */
274                 ctrl = encode_ctrl_reg(info->ctrl);
275                 write_wb_reg(ctrl_reg, i,
276                              reg_enable ? ctrl | 0x1 : ctrl & ~0x1);
277                 break;
278         case HW_BREAKPOINT_UNINSTALL:
279                 /* Reset the control register. */
280                 write_wb_reg(ctrl_reg, i, 0);
281
282                 /*
283                  * Release the debug monitors for the correct exception
284                  * level.
285                  */
286                 disable_debug_monitors(dbg_el);
287                 break;
288         }
289
290         return 0;
291 }
292
293 /*
294  * Install a perf counter breakpoint.
295  */
296 int arch_install_hw_breakpoint(struct perf_event *bp)
297 {
298         return hw_breakpoint_control(bp, HW_BREAKPOINT_INSTALL);
299 }
300
301 void arch_uninstall_hw_breakpoint(struct perf_event *bp)
302 {
303         hw_breakpoint_control(bp, HW_BREAKPOINT_UNINSTALL);
304 }
305
306 static int get_hbp_len(u8 hbp_len)
307 {
308         unsigned int len_in_bytes = 0;
309
310         switch (hbp_len) {
311         case ARM_BREAKPOINT_LEN_1:
312                 len_in_bytes = 1;
313                 break;
314         case ARM_BREAKPOINT_LEN_2:
315                 len_in_bytes = 2;
316                 break;
317         case ARM_BREAKPOINT_LEN_4:
318                 len_in_bytes = 4;
319                 break;
320         case ARM_BREAKPOINT_LEN_8:
321                 len_in_bytes = 8;
322                 break;
323         }
324
325         return len_in_bytes;
326 }
327
328 /*
329  * Check whether bp virtual address is in kernel space.
330  */
331 int arch_check_bp_in_kernelspace(struct perf_event *bp)
332 {
333         unsigned int len;
334         unsigned long va;
335         struct arch_hw_breakpoint *info = counter_arch_bp(bp);
336
337         va = info->address;
338         len = get_hbp_len(info->ctrl.len);
339
340         return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
341 }
342
343 /*
344  * Extract generic type and length encodings from an arch_hw_breakpoint_ctrl.
345  * Hopefully this will disappear when ptrace can bypass the conversion
346  * to generic breakpoint descriptions.
347  */
348 int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
349                            int *gen_len, int *gen_type)
350 {
351         /* Type */
352         switch (ctrl.type) {
353         case ARM_BREAKPOINT_EXECUTE:
354                 *gen_type = HW_BREAKPOINT_X;
355                 break;
356         case ARM_BREAKPOINT_LOAD:
357                 *gen_type = HW_BREAKPOINT_R;
358                 break;
359         case ARM_BREAKPOINT_STORE:
360                 *gen_type = HW_BREAKPOINT_W;
361                 break;
362         case ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE:
363                 *gen_type = HW_BREAKPOINT_RW;
364                 break;
365         default:
366                 return -EINVAL;
367         }
368
369         /* Len */
370         switch (ctrl.len) {
371         case ARM_BREAKPOINT_LEN_1:
372                 *gen_len = HW_BREAKPOINT_LEN_1;
373                 break;
374         case ARM_BREAKPOINT_LEN_2:
375                 *gen_len = HW_BREAKPOINT_LEN_2;
376                 break;
377         case ARM_BREAKPOINT_LEN_4:
378                 *gen_len = HW_BREAKPOINT_LEN_4;
379                 break;
380         case ARM_BREAKPOINT_LEN_8:
381                 *gen_len = HW_BREAKPOINT_LEN_8;
382                 break;
383         default:
384                 return -EINVAL;
385         }
386
387         return 0;
388 }
389
390 /*
391  * Construct an arch_hw_breakpoint from a perf_event.
392  */
393 static int arch_build_bp_info(struct perf_event *bp)
394 {
395         struct arch_hw_breakpoint *info = counter_arch_bp(bp);
396
397         /* Type */
398         switch (bp->attr.bp_type) {
399         case HW_BREAKPOINT_X:
400                 info->ctrl.type = ARM_BREAKPOINT_EXECUTE;
401                 break;
402         case HW_BREAKPOINT_R:
403                 info->ctrl.type = ARM_BREAKPOINT_LOAD;
404                 break;
405         case HW_BREAKPOINT_W:
406                 info->ctrl.type = ARM_BREAKPOINT_STORE;
407                 break;
408         case HW_BREAKPOINT_RW:
409                 info->ctrl.type = ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE;
410                 break;
411         default:
412                 return -EINVAL;
413         }
414
415         /* Len */
416         switch (bp->attr.bp_len) {
417         case HW_BREAKPOINT_LEN_1:
418                 info->ctrl.len = ARM_BREAKPOINT_LEN_1;
419                 break;
420         case HW_BREAKPOINT_LEN_2:
421                 info->ctrl.len = ARM_BREAKPOINT_LEN_2;
422                 break;
423         case HW_BREAKPOINT_LEN_4:
424                 info->ctrl.len = ARM_BREAKPOINT_LEN_4;
425                 break;
426         case HW_BREAKPOINT_LEN_8:
427                 info->ctrl.len = ARM_BREAKPOINT_LEN_8;
428                 break;
429         default:
430                 return -EINVAL;
431         }
432
433         /*
434          * On AArch64, we only permit breakpoints of length 4, whereas
435          * AArch32 also requires breakpoints of length 2 for Thumb.
436          * Watchpoints can be of length 1, 2, 4 or 8 bytes.
437          */
438         if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
439                 if (is_compat_bp(bp)) {
440                         if (info->ctrl.len != ARM_BREAKPOINT_LEN_2 &&
441                             info->ctrl.len != ARM_BREAKPOINT_LEN_4)
442                                 return -EINVAL;
443                 } else if (info->ctrl.len != ARM_BREAKPOINT_LEN_4) {
444                         /*
445                          * FIXME: Some tools (I'm looking at you perf) assume
446                          *        that breakpoints should be sizeof(long). This
447                          *        is nonsense. For now, we fix up the parameter
448                          *        but we should probably return -EINVAL instead.
449                          */
450                         info->ctrl.len = ARM_BREAKPOINT_LEN_4;
451                 }
452         }
453
454         /* Address */
455         info->address = bp->attr.bp_addr;
456
457         /*
458          * Privilege
459          * Note that we disallow combined EL0/EL1 breakpoints because
460          * that would complicate the stepping code.
461          */
462         if (arch_check_bp_in_kernelspace(bp))
463                 info->ctrl.privilege = AARCH64_BREAKPOINT_EL1;
464         else
465                 info->ctrl.privilege = AARCH64_BREAKPOINT_EL0;
466
467         /* Enabled? */
468         info->ctrl.enabled = !bp->attr.disabled;
469
470         return 0;
471 }
472
473 /*
474  * Validate the arch-specific HW Breakpoint register settings.
475  */
476 int arch_validate_hwbkpt_settings(struct perf_event *bp)
477 {
478         struct arch_hw_breakpoint *info = counter_arch_bp(bp);
479         int ret;
480         u64 alignment_mask, offset;
481
482         /* Build the arch_hw_breakpoint. */
483         ret = arch_build_bp_info(bp);
484         if (ret)
485                 return ret;
486
487         /*
488          * Check address alignment.
489          * We don't do any clever alignment correction for watchpoints
490          * because using 64-bit unaligned addresses is deprecated for
491          * AArch64.
492          *
493          * AArch32 tasks expect some simple alignment fixups, so emulate
494          * that here.
495          */
496         if (is_compat_bp(bp)) {
497                 if (info->ctrl.len == ARM_BREAKPOINT_LEN_8)
498                         alignment_mask = 0x7;
499                 else
500                         alignment_mask = 0x3;
501                 offset = info->address & alignment_mask;
502                 switch (offset) {
503                 case 0:
504                         /* Aligned */
505                         break;
506                 case 1:
507                 case 2:
508                         /* Allow halfword watchpoints and breakpoints. */
509                         if (info->ctrl.len == ARM_BREAKPOINT_LEN_2)
510                                 break;
511                 case 3:
512                         /* Allow single byte watchpoint. */
513                         if (info->ctrl.len == ARM_BREAKPOINT_LEN_1)
514                                 break;
515                 default:
516                         return -EINVAL;
517                 }
518
519                 info->address &= ~alignment_mask;
520                 info->ctrl.len <<= offset;
521         } else {
522                 if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE)
523                         alignment_mask = 0x3;
524                 else
525                         alignment_mask = 0x7;
526                 if (info->address & alignment_mask)
527                         return -EINVAL;
528         }
529
530         /*
531          * Disallow per-task kernel breakpoints since these would
532          * complicate the stepping code.
533          */
534         if (info->ctrl.privilege == AARCH64_BREAKPOINT_EL1 && bp->hw.target)
535                 return -EINVAL;
536
537         return 0;
538 }
539
540 /*
541  * Enable/disable all of the breakpoints active at the specified
542  * exception level at the register level.
543  * This is used when single-stepping after a breakpoint exception.
544  */
545 static void toggle_bp_registers(int reg, enum dbg_active_el el, int enable)
546 {
547         int i, max_slots, privilege;
548         u32 ctrl;
549         struct perf_event **slots;
550
551         switch (reg) {
552         case AARCH64_DBG_REG_BCR:
553                 slots = this_cpu_ptr(bp_on_reg);
554                 max_slots = core_num_brps;
555                 break;
556         case AARCH64_DBG_REG_WCR:
557                 slots = this_cpu_ptr(wp_on_reg);
558                 max_slots = core_num_wrps;
559                 break;
560         default:
561                 return;
562         }
563
564         for (i = 0; i < max_slots; ++i) {
565                 if (!slots[i])
566                         continue;
567
568                 privilege = counter_arch_bp(slots[i])->ctrl.privilege;
569                 if (debug_exception_level(privilege) != el)
570                         continue;
571
572                 ctrl = read_wb_reg(reg, i);
573                 if (enable)
574                         ctrl |= 0x1;
575                 else
576                         ctrl &= ~0x1;
577                 write_wb_reg(reg, i, ctrl);
578         }
579 }
580
581 /*
582  * Debug exception handlers.
583  */
584 static int breakpoint_handler(unsigned long unused, unsigned int esr,
585                               struct pt_regs *regs)
586 {
587         int i, step = 0, *kernel_step;
588         u32 ctrl_reg;
589         u64 addr, val;
590         struct perf_event *bp, **slots;
591         struct debug_info *debug_info;
592         struct arch_hw_breakpoint_ctrl ctrl;
593
594         slots = this_cpu_ptr(bp_on_reg);
595         addr = instruction_pointer(regs);
596         debug_info = &current->thread.debug;
597
598         for (i = 0; i < core_num_brps; ++i) {
599                 rcu_read_lock();
600
601                 bp = slots[i];
602
603                 if (bp == NULL)
604                         goto unlock;
605
606                 /* Check if the breakpoint value matches. */
607                 val = read_wb_reg(AARCH64_DBG_REG_BVR, i);
608                 if (val != (addr & ~0x3))
609                         goto unlock;
610
611                 /* Possible match, check the byte address select to confirm. */
612                 ctrl_reg = read_wb_reg(AARCH64_DBG_REG_BCR, i);
613                 decode_ctrl_reg(ctrl_reg, &ctrl);
614                 if (!((1 << (addr & 0x3)) & ctrl.len))
615                         goto unlock;
616
617                 counter_arch_bp(bp)->trigger = addr;
618                 perf_bp_event(bp, regs);
619
620                 /* Do we need to handle the stepping? */
621                 if (!bp->overflow_handler)
622                         step = 1;
623 unlock:
624                 rcu_read_unlock();
625         }
626
627         if (!step)
628                 return 0;
629
630         if (user_mode(regs)) {
631                 debug_info->bps_disabled = 1;
632                 toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL0, 0);
633
634                 /* If we're already stepping a watchpoint, just return. */
635                 if (debug_info->wps_disabled)
636                         return 0;
637
638                 if (test_thread_flag(TIF_SINGLESTEP))
639                         debug_info->suspended_step = 1;
640                 else
641                         user_enable_single_step(current);
642         } else {
643                 toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL1, 0);
644                 kernel_step = this_cpu_ptr(&stepping_kernel_bp);
645
646                 if (*kernel_step != ARM_KERNEL_STEP_NONE)
647                         return 0;
648
649                 if (kernel_active_single_step()) {
650                         *kernel_step = ARM_KERNEL_STEP_SUSPEND;
651                 } else {
652                         *kernel_step = ARM_KERNEL_STEP_ACTIVE;
653                         kernel_enable_single_step(regs);
654                 }
655         }
656
657         return 0;
658 }
659
660 static int watchpoint_handler(unsigned long addr, unsigned int esr,
661                               struct pt_regs *regs)
662 {
663         int i, step = 0, *kernel_step, access;
664         u32 ctrl_reg;
665         u64 val, alignment_mask;
666         struct perf_event *wp, **slots;
667         struct debug_info *debug_info;
668         struct arch_hw_breakpoint *info;
669         struct arch_hw_breakpoint_ctrl ctrl;
670
671         slots = this_cpu_ptr(wp_on_reg);
672         debug_info = &current->thread.debug;
673
674         for (i = 0; i < core_num_wrps; ++i) {
675                 rcu_read_lock();
676
677                 wp = slots[i];
678
679                 if (wp == NULL)
680                         goto unlock;
681
682                 info = counter_arch_bp(wp);
683                 /* AArch32 watchpoints are either 4 or 8 bytes aligned. */
684                 if (is_compat_task()) {
685                         if (info->ctrl.len == ARM_BREAKPOINT_LEN_8)
686                                 alignment_mask = 0x7;
687                         else
688                                 alignment_mask = 0x3;
689                 } else {
690                         alignment_mask = 0x7;
691                 }
692
693                 /* Check if the watchpoint value matches. */
694                 val = read_wb_reg(AARCH64_DBG_REG_WVR, i);
695                 if (val != (untagged_addr(addr) & ~alignment_mask))
696                         goto unlock;
697
698                 /* Possible match, check the byte address select to confirm. */
699                 ctrl_reg = read_wb_reg(AARCH64_DBG_REG_WCR, i);
700                 decode_ctrl_reg(ctrl_reg, &ctrl);
701                 if (!((1 << (addr & alignment_mask)) & ctrl.len))
702                         goto unlock;
703
704                 /*
705                  * Check that the access type matches.
706                  * 0 => load, otherwise => store
707                  */
708                 access = (esr & AARCH64_ESR_ACCESS_MASK) ? HW_BREAKPOINT_W :
709                          HW_BREAKPOINT_R;
710                 if (!(access & hw_breakpoint_type(wp)))
711                         goto unlock;
712
713                 info->trigger = addr;
714                 perf_bp_event(wp, regs);
715
716                 /* Do we need to handle the stepping? */
717                 if (!wp->overflow_handler)
718                         step = 1;
719
720 unlock:
721                 rcu_read_unlock();
722         }
723
724         if (!step)
725                 return 0;
726
727         /*
728          * We always disable EL0 watchpoints because the kernel can
729          * cause these to fire via an unprivileged access.
730          */
731         toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL0, 0);
732
733         if (user_mode(regs)) {
734                 debug_info->wps_disabled = 1;
735
736                 /* If we're already stepping a breakpoint, just return. */
737                 if (debug_info->bps_disabled)
738                         return 0;
739
740                 if (test_thread_flag(TIF_SINGLESTEP))
741                         debug_info->suspended_step = 1;
742                 else
743                         user_enable_single_step(current);
744         } else {
745                 toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL1, 0);
746                 kernel_step = this_cpu_ptr(&stepping_kernel_bp);
747
748                 if (*kernel_step != ARM_KERNEL_STEP_NONE)
749                         return 0;
750
751                 if (kernel_active_single_step()) {
752                         *kernel_step = ARM_KERNEL_STEP_SUSPEND;
753                 } else {
754                         *kernel_step = ARM_KERNEL_STEP_ACTIVE;
755                         kernel_enable_single_step(regs);
756                 }
757         }
758
759         return 0;
760 }
761
762 /*
763  * Handle single-step exception.
764  */
765 int reinstall_suspended_bps(struct pt_regs *regs)
766 {
767         struct debug_info *debug_info = &current->thread.debug;
768         int handled_exception = 0, *kernel_step;
769
770         kernel_step = this_cpu_ptr(&stepping_kernel_bp);
771
772         /*
773          * Called from single-step exception handler.
774          * Return 0 if execution can resume, 1 if a SIGTRAP should be
775          * reported.
776          */
777         if (user_mode(regs)) {
778                 if (debug_info->bps_disabled) {
779                         debug_info->bps_disabled = 0;
780                         toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL0, 1);
781                         handled_exception = 1;
782                 }
783
784                 if (debug_info->wps_disabled) {
785                         debug_info->wps_disabled = 0;
786                         toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL0, 1);
787                         handled_exception = 1;
788                 }
789
790                 if (handled_exception) {
791                         if (debug_info->suspended_step) {
792                                 debug_info->suspended_step = 0;
793                                 /* Allow exception handling to fall-through. */
794                                 handled_exception = 0;
795                         } else {
796                                 user_disable_single_step(current);
797                         }
798                 }
799         } else if (*kernel_step != ARM_KERNEL_STEP_NONE) {
800                 toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL1, 1);
801                 toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL1, 1);
802
803                 if (!debug_info->wps_disabled)
804                         toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL0, 1);
805
806                 if (*kernel_step != ARM_KERNEL_STEP_SUSPEND) {
807                         kernel_disable_single_step();
808                         handled_exception = 1;
809                 } else {
810                         handled_exception = 0;
811                 }
812
813                 *kernel_step = ARM_KERNEL_STEP_NONE;
814         }
815
816         return !handled_exception;
817 }
818
819 /*
820  * Context-switcher for restoring suspended breakpoints.
821  */
822 void hw_breakpoint_thread_switch(struct task_struct *next)
823 {
824         /*
825          *           current        next
826          * disabled: 0              0     => The usual case, NOTIFY_DONE
827          *           0              1     => Disable the registers
828          *           1              0     => Enable the registers
829          *           1              1     => NOTIFY_DONE. per-task bps will
830          *                                   get taken care of by perf.
831          */
832
833         struct debug_info *current_debug_info, *next_debug_info;
834
835         current_debug_info = &current->thread.debug;
836         next_debug_info = &next->thread.debug;
837
838         /* Update breakpoints. */
839         if (current_debug_info->bps_disabled != next_debug_info->bps_disabled)
840                 toggle_bp_registers(AARCH64_DBG_REG_BCR,
841                                     DBG_ACTIVE_EL0,
842                                     !next_debug_info->bps_disabled);
843
844         /* Update watchpoints. */
845         if (current_debug_info->wps_disabled != next_debug_info->wps_disabled)
846                 toggle_bp_registers(AARCH64_DBG_REG_WCR,
847                                     DBG_ACTIVE_EL0,
848                                     !next_debug_info->wps_disabled);
849 }
850
851 /*
852  * CPU initialisation.
853  */
854 static void hw_breakpoint_reset(void *unused)
855 {
856         int i;
857         struct perf_event **slots;
858         /*
859          * When a CPU goes through cold-boot, it does not have any installed
860          * slot, so it is safe to share the same function for restoring and
861          * resetting breakpoints; when a CPU is hotplugged in, it goes
862          * through the slots, which are all empty, hence it just resets control
863          * and value for debug registers.
864          * When this function is triggered on warm-boot through a CPU PM
865          * notifier some slots might be initialized; if so they are
866          * reprogrammed according to the debug slots content.
867          */
868         for (slots = this_cpu_ptr(bp_on_reg), i = 0; i < core_num_brps; ++i) {
869                 if (slots[i]) {
870                         hw_breakpoint_control(slots[i], HW_BREAKPOINT_RESTORE);
871                 } else {
872                         write_wb_reg(AARCH64_DBG_REG_BCR, i, 0UL);
873                         write_wb_reg(AARCH64_DBG_REG_BVR, i, 0UL);
874                 }
875         }
876
877         for (slots = this_cpu_ptr(wp_on_reg), i = 0; i < core_num_wrps; ++i) {
878                 if (slots[i]) {
879                         hw_breakpoint_control(slots[i], HW_BREAKPOINT_RESTORE);
880                 } else {
881                         write_wb_reg(AARCH64_DBG_REG_WCR, i, 0UL);
882                         write_wb_reg(AARCH64_DBG_REG_WVR, i, 0UL);
883                 }
884         }
885 }
886
887 static int hw_breakpoint_reset_notify(struct notifier_block *self,
888                                                 unsigned long action,
889                                                 void *hcpu)
890 {
891         int cpu = (long)hcpu;
892         if ((action & ~CPU_TASKS_FROZEN) == CPU_ONLINE)
893                 smp_call_function_single(cpu, hw_breakpoint_reset, NULL, 1);
894         return NOTIFY_OK;
895 }
896
897 static struct notifier_block hw_breakpoint_reset_nb = {
898         .notifier_call = hw_breakpoint_reset_notify,
899 };
900
901 #ifdef CONFIG_CPU_PM
902 extern void cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *));
903 #else
904 static inline void cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *))
905 {
906 }
907 #endif
908
909 /*
910  * One-time initialisation.
911  */
912 static int __init arch_hw_breakpoint_init(void)
913 {
914         core_num_brps = get_num_brps();
915         core_num_wrps = get_num_wrps();
916
917         pr_info("found %d breakpoint and %d watchpoint registers.\n",
918                 core_num_brps, core_num_wrps);
919
920         cpu_notifier_register_begin();
921
922         /*
923          * Reset the breakpoint resources. We assume that a halting
924          * debugger will leave the world in a nice state for us.
925          */
926         smp_call_function(hw_breakpoint_reset, NULL, 1);
927         hw_breakpoint_reset(NULL);
928
929         /* Register debug fault handlers. */
930         hook_debug_fault_code(DBG_ESR_EVT_HWBP, breakpoint_handler, SIGTRAP,
931                               TRAP_HWBKPT, "hw-breakpoint handler");
932         hook_debug_fault_code(DBG_ESR_EVT_HWWP, watchpoint_handler, SIGTRAP,
933                               TRAP_HWBKPT, "hw-watchpoint handler");
934
935         /* Register hotplug notifier. */
936         __register_cpu_notifier(&hw_breakpoint_reset_nb);
937
938         cpu_notifier_register_done();
939
940         /* Register cpu_suspend hw breakpoint restore hook */
941         cpu_suspend_set_dbg_restorer(hw_breakpoint_reset);
942
943         return 0;
944 }
945 arch_initcall(arch_hw_breakpoint_init);
946
947 void hw_breakpoint_pmu_read(struct perf_event *bp)
948 {
949 }
950
951 /*
952  * Dummy function to register with die_notifier.
953  */
954 int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
955                                     unsigned long val, void *data)
956 {
957         return NOTIFY_DONE;
958 }