GNU Linux-libre 4.19.286-gnu1
[releases.git] / arch / powerpc / perf / 8xx-pmu.c
1 /*
2  * Performance event support - PPC 8xx
3  *
4  * Copyright 2016 Christophe Leroy, CS Systemes d'Information
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11
12 #include <linux/kernel.h>
13 #include <linux/sched.h>
14 #include <linux/perf_event.h>
15 #include <linux/percpu.h>
16 #include <linux/hardirq.h>
17 #include <asm/pmc.h>
18 #include <asm/machdep.h>
19 #include <asm/firmware.h>
20 #include <asm/ptrace.h>
21 #include <asm/code-patching.h>
22
23 #define PERF_8xx_ID_CPU_CYCLES          1
24 #define PERF_8xx_ID_HW_INSTRUCTIONS     2
25 #define PERF_8xx_ID_ITLB_LOAD_MISS      3
26 #define PERF_8xx_ID_DTLB_LOAD_MISS      4
27
28 #define C(x)    PERF_COUNT_HW_CACHE_##x
29 #define DTLB_LOAD_MISS  (C(DTLB) | (C(OP_READ) << 8) | (C(RESULT_MISS) << 16))
30 #define ITLB_LOAD_MISS  (C(ITLB) | (C(OP_READ) << 8) | (C(RESULT_MISS) << 16))
31
32 extern unsigned long itlb_miss_counter, dtlb_miss_counter;
33 extern atomic_t instruction_counter;
34 extern unsigned int itlb_miss_perf, dtlb_miss_perf;
35 extern unsigned int itlb_miss_exit_1, itlb_miss_exit_2;
36 extern unsigned int dtlb_miss_exit_1, dtlb_miss_exit_2, dtlb_miss_exit_3;
37
38 static atomic_t insn_ctr_ref;
39 static atomic_t itlb_miss_ref;
40 static atomic_t dtlb_miss_ref;
41
42 static s64 get_insn_ctr(void)
43 {
44         int ctr;
45         unsigned long counta;
46
47         do {
48                 ctr = atomic_read(&instruction_counter);
49                 counta = mfspr(SPRN_COUNTA);
50         } while (ctr != atomic_read(&instruction_counter));
51
52         return ((s64)ctr << 16) | (counta >> 16);
53 }
54
55 static int event_type(struct perf_event *event)
56 {
57         switch (event->attr.type) {
58         case PERF_TYPE_HARDWARE:
59                 if (event->attr.config == PERF_COUNT_HW_CPU_CYCLES)
60                         return PERF_8xx_ID_CPU_CYCLES;
61                 if (event->attr.config == PERF_COUNT_HW_INSTRUCTIONS)
62                         return PERF_8xx_ID_HW_INSTRUCTIONS;
63                 break;
64         case PERF_TYPE_HW_CACHE:
65                 if (event->attr.config == ITLB_LOAD_MISS)
66                         return PERF_8xx_ID_ITLB_LOAD_MISS;
67                 if (event->attr.config == DTLB_LOAD_MISS)
68                         return PERF_8xx_ID_DTLB_LOAD_MISS;
69                 break;
70         case PERF_TYPE_RAW:
71                 break;
72         default:
73                 return -ENOENT;
74         }
75         return -EOPNOTSUPP;
76 }
77
78 static int mpc8xx_pmu_event_init(struct perf_event *event)
79 {
80         int type = event_type(event);
81
82         if (type < 0)
83                 return type;
84         return 0;
85 }
86
87 static int mpc8xx_pmu_add(struct perf_event *event, int flags)
88 {
89         int type = event_type(event);
90         s64 val = 0;
91
92         if (type < 0)
93                 return type;
94
95         switch (type) {
96         case PERF_8xx_ID_CPU_CYCLES:
97                 val = get_tb();
98                 break;
99         case PERF_8xx_ID_HW_INSTRUCTIONS:
100                 if (atomic_inc_return(&insn_ctr_ref) == 1)
101                         mtspr(SPRN_ICTRL, 0xc0080007);
102                 val = get_insn_ctr();
103                 break;
104         case PERF_8xx_ID_ITLB_LOAD_MISS:
105                 if (atomic_inc_return(&itlb_miss_ref) == 1) {
106                         unsigned long target = (unsigned long)&itlb_miss_perf;
107
108                         patch_branch(&itlb_miss_exit_1, target, 0);
109 #ifndef CONFIG_PIN_TLB_TEXT
110                         patch_branch(&itlb_miss_exit_2, target, 0);
111 #endif
112                 }
113                 val = itlb_miss_counter;
114                 break;
115         case PERF_8xx_ID_DTLB_LOAD_MISS:
116                 if (atomic_inc_return(&dtlb_miss_ref) == 1) {
117                         unsigned long target = (unsigned long)&dtlb_miss_perf;
118
119                         patch_branch(&dtlb_miss_exit_1, target, 0);
120                         patch_branch(&dtlb_miss_exit_2, target, 0);
121                         patch_branch(&dtlb_miss_exit_3, target, 0);
122                 }
123                 val = dtlb_miss_counter;
124                 break;
125         }
126         local64_set(&event->hw.prev_count, val);
127         return 0;
128 }
129
130 static void mpc8xx_pmu_read(struct perf_event *event)
131 {
132         int type = event_type(event);
133         s64 prev, val = 0, delta = 0;
134
135         if (type < 0)
136                 return;
137
138         do {
139                 prev = local64_read(&event->hw.prev_count);
140                 switch (type) {
141                 case PERF_8xx_ID_CPU_CYCLES:
142                         val = get_tb();
143                         delta = 16 * (val - prev);
144                         break;
145                 case PERF_8xx_ID_HW_INSTRUCTIONS:
146                         val = get_insn_ctr();
147                         delta = prev - val;
148                         if (delta < 0)
149                                 delta += 0x1000000000000LL;
150                         break;
151                 case PERF_8xx_ID_ITLB_LOAD_MISS:
152                         val = itlb_miss_counter;
153                         delta = (s64)((s32)val - (s32)prev);
154                         break;
155                 case PERF_8xx_ID_DTLB_LOAD_MISS:
156                         val = dtlb_miss_counter;
157                         delta = (s64)((s32)val - (s32)prev);
158                         break;
159                 }
160         } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
161
162         local64_add(delta, &event->count);
163 }
164
165 static void mpc8xx_pmu_del(struct perf_event *event, int flags)
166 {
167         /* mfspr r10, SPRN_SPRG_SCRATCH0 */
168         unsigned int insn = PPC_INST_MFSPR | __PPC_RS(R10) |
169                             __PPC_SPR(SPRN_SPRG_SCRATCH0);
170
171         mpc8xx_pmu_read(event);
172
173         /* If it was the last user, stop counting to avoid useles overhead */
174         switch (event_type(event)) {
175         case PERF_8xx_ID_CPU_CYCLES:
176                 break;
177         case PERF_8xx_ID_HW_INSTRUCTIONS:
178                 if (atomic_dec_return(&insn_ctr_ref) == 0)
179                         mtspr(SPRN_ICTRL, 7);
180                 break;
181         case PERF_8xx_ID_ITLB_LOAD_MISS:
182                 if (atomic_dec_return(&itlb_miss_ref) == 0) {
183                         patch_instruction(&itlb_miss_exit_1, insn);
184 #ifndef CONFIG_PIN_TLB_TEXT
185                         patch_instruction(&itlb_miss_exit_2, insn);
186 #endif
187                 }
188                 break;
189         case PERF_8xx_ID_DTLB_LOAD_MISS:
190                 if (atomic_dec_return(&dtlb_miss_ref) == 0) {
191                         patch_instruction(&dtlb_miss_exit_1, insn);
192                         patch_instruction(&dtlb_miss_exit_2, insn);
193                         patch_instruction(&dtlb_miss_exit_3, insn);
194                 }
195                 break;
196         }
197 }
198
199 static struct pmu mpc8xx_pmu = {
200         .event_init     = mpc8xx_pmu_event_init,
201         .add            = mpc8xx_pmu_add,
202         .del            = mpc8xx_pmu_del,
203         .read           = mpc8xx_pmu_read,
204         .capabilities   = PERF_PMU_CAP_NO_INTERRUPT |
205                           PERF_PMU_CAP_NO_NMI,
206 };
207
208 static int init_mpc8xx_pmu(void)
209 {
210         mtspr(SPRN_ICTRL, 7);
211         mtspr(SPRN_CMPA, 0);
212         mtspr(SPRN_COUNTA, 0xffff);
213
214         return perf_pmu_register(&mpc8xx_pmu, "cpu", PERF_TYPE_RAW);
215 }
216
217 early_initcall(init_mpc8xx_pmu);