1 /* SandyBridge-EP/IvyTown uncore support */
2 #include "perf_event_intel_uncore.h"
5 /* SNB-EP Box level control */
6 #define SNBEP_PMON_BOX_CTL_RST_CTRL (1 << 0)
7 #define SNBEP_PMON_BOX_CTL_RST_CTRS (1 << 1)
8 #define SNBEP_PMON_BOX_CTL_FRZ (1 << 8)
9 #define SNBEP_PMON_BOX_CTL_FRZ_EN (1 << 16)
10 #define SNBEP_PMON_BOX_CTL_INT (SNBEP_PMON_BOX_CTL_RST_CTRL | \
11 SNBEP_PMON_BOX_CTL_RST_CTRS | \
12 SNBEP_PMON_BOX_CTL_FRZ_EN)
13 /* SNB-EP event control */
14 #define SNBEP_PMON_CTL_EV_SEL_MASK 0x000000ff
15 #define SNBEP_PMON_CTL_UMASK_MASK 0x0000ff00
16 #define SNBEP_PMON_CTL_RST (1 << 17)
17 #define SNBEP_PMON_CTL_EDGE_DET (1 << 18)
18 #define SNBEP_PMON_CTL_EV_SEL_EXT (1 << 21)
19 #define SNBEP_PMON_CTL_EN (1 << 22)
20 #define SNBEP_PMON_CTL_INVERT (1 << 23)
21 #define SNBEP_PMON_CTL_TRESH_MASK 0xff000000
22 #define SNBEP_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \
23 SNBEP_PMON_CTL_UMASK_MASK | \
24 SNBEP_PMON_CTL_EDGE_DET | \
25 SNBEP_PMON_CTL_INVERT | \
26 SNBEP_PMON_CTL_TRESH_MASK)
28 /* SNB-EP Ubox event control */
29 #define SNBEP_U_MSR_PMON_CTL_TRESH_MASK 0x1f000000
30 #define SNBEP_U_MSR_PMON_RAW_EVENT_MASK \
31 (SNBEP_PMON_CTL_EV_SEL_MASK | \
32 SNBEP_PMON_CTL_UMASK_MASK | \
33 SNBEP_PMON_CTL_EDGE_DET | \
34 SNBEP_PMON_CTL_INVERT | \
35 SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
37 #define SNBEP_CBO_PMON_CTL_TID_EN (1 << 19)
38 #define SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \
39 SNBEP_CBO_PMON_CTL_TID_EN)
41 /* SNB-EP PCU event control */
42 #define SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK 0x0000c000
43 #define SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK 0x1f000000
44 #define SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT (1 << 30)
45 #define SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET (1 << 31)
46 #define SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK \
47 (SNBEP_PMON_CTL_EV_SEL_MASK | \
48 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
49 SNBEP_PMON_CTL_EDGE_DET | \
50 SNBEP_PMON_CTL_EV_SEL_EXT | \
51 SNBEP_PMON_CTL_INVERT | \
52 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
53 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
54 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
56 #define SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK \
57 (SNBEP_PMON_RAW_EVENT_MASK | \
58 SNBEP_PMON_CTL_EV_SEL_EXT)
60 /* SNB-EP pci control register */
61 #define SNBEP_PCI_PMON_BOX_CTL 0xf4
62 #define SNBEP_PCI_PMON_CTL0 0xd8
63 /* SNB-EP pci counter register */
64 #define SNBEP_PCI_PMON_CTR0 0xa0
66 /* SNB-EP home agent register */
67 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH0 0x40
68 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH1 0x44
69 #define SNBEP_HA_PCI_PMON_BOX_OPCODEMATCH 0x48
70 /* SNB-EP memory controller register */
71 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTL 0xf0
72 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTR 0xd0
73 /* SNB-EP QPI register */
74 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH0 0x228
75 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH1 0x22c
76 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK0 0x238
77 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK1 0x23c
79 /* SNB-EP Ubox register */
80 #define SNBEP_U_MSR_PMON_CTR0 0xc16
81 #define SNBEP_U_MSR_PMON_CTL0 0xc10
83 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTL 0xc08
84 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTR 0xc09
86 /* SNB-EP Cbo register */
87 #define SNBEP_C0_MSR_PMON_CTR0 0xd16
88 #define SNBEP_C0_MSR_PMON_CTL0 0xd10
89 #define SNBEP_C0_MSR_PMON_BOX_CTL 0xd04
90 #define SNBEP_C0_MSR_PMON_BOX_FILTER 0xd14
91 #define SNBEP_CBO_MSR_OFFSET 0x20
93 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_TID 0x1f
94 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_NID 0x3fc00
95 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE 0x7c0000
96 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC 0xff800000
98 #define SNBEP_CBO_EVENT_EXTRA_REG(e, m, i) { \
100 .msr = SNBEP_C0_MSR_PMON_BOX_FILTER, \
101 .config_mask = (m), \
105 /* SNB-EP PCU register */
106 #define SNBEP_PCU_MSR_PMON_CTR0 0xc36
107 #define SNBEP_PCU_MSR_PMON_CTL0 0xc30
108 #define SNBEP_PCU_MSR_PMON_BOX_CTL 0xc24
109 #define SNBEP_PCU_MSR_PMON_BOX_FILTER 0xc34
110 #define SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK 0xffffffff
111 #define SNBEP_PCU_MSR_CORE_C3_CTR 0x3fc
112 #define SNBEP_PCU_MSR_CORE_C6_CTR 0x3fd
114 /* IVBEP event control */
115 #define IVBEP_PMON_BOX_CTL_INT (SNBEP_PMON_BOX_CTL_RST_CTRL | \
116 SNBEP_PMON_BOX_CTL_RST_CTRS)
117 #define IVBEP_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \
118 SNBEP_PMON_CTL_UMASK_MASK | \
119 SNBEP_PMON_CTL_EDGE_DET | \
120 SNBEP_PMON_CTL_TRESH_MASK)
122 #define IVBEP_U_MSR_PMON_GLOBAL_CTL 0xc00
123 #define IVBEP_U_PMON_GLOBAL_FRZ_ALL (1 << 31)
124 #define IVBEP_U_PMON_GLOBAL_UNFRZ_ALL (1 << 29)
126 #define IVBEP_U_MSR_PMON_RAW_EVENT_MASK \
127 (SNBEP_PMON_CTL_EV_SEL_MASK | \
128 SNBEP_PMON_CTL_UMASK_MASK | \
129 SNBEP_PMON_CTL_EDGE_DET | \
130 SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
132 #define IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK (IVBEP_PMON_RAW_EVENT_MASK | \
133 SNBEP_CBO_PMON_CTL_TID_EN)
135 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_TID (0x1fULL << 0)
136 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK (0xfULL << 5)
137 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE (0x3fULL << 17)
138 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NID (0xffffULL << 32)
139 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC (0x1ffULL << 52)
140 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61)
141 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62)
142 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC (0x1ULL << 63)
144 /* IVBEP home agent */
145 #define IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST (1 << 16)
146 #define IVBEP_HA_PCI_PMON_RAW_EVENT_MASK \
147 (IVBEP_PMON_RAW_EVENT_MASK | \
148 IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST)
150 #define IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK \
151 (SNBEP_PMON_CTL_EV_SEL_MASK | \
152 SNBEP_PMON_CTL_EV_SEL_EXT | \
153 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
154 SNBEP_PMON_CTL_EDGE_DET | \
155 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
156 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
157 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
159 #define IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK \
160 (IVBEP_PMON_RAW_EVENT_MASK | \
161 SNBEP_PMON_CTL_EV_SEL_EXT)
163 #define __BITS_VALUE(x, i, n) ((typeof(x))(((x) >> ((i) * (n))) & \
164 ((1ULL << (n)) - 1)))
166 /* Haswell-EP Ubox */
167 #define HSWEP_U_MSR_PMON_CTR0 0x709
168 #define HSWEP_U_MSR_PMON_CTL0 0x705
169 #define HSWEP_U_MSR_PMON_FILTER 0x707
171 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTL 0x703
172 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTR 0x704
174 #define HSWEP_U_MSR_PMON_BOX_FILTER_TID (0x1 << 0)
175 #define HSWEP_U_MSR_PMON_BOX_FILTER_CID (0x1fULL << 1)
176 #define HSWEP_U_MSR_PMON_BOX_FILTER_MASK \
177 (HSWEP_U_MSR_PMON_BOX_FILTER_TID | \
178 HSWEP_U_MSR_PMON_BOX_FILTER_CID)
181 #define HSWEP_C0_MSR_PMON_CTR0 0xe08
182 #define HSWEP_C0_MSR_PMON_CTL0 0xe01
183 #define HSWEP_C0_MSR_PMON_BOX_CTL 0xe00
184 #define HSWEP_C0_MSR_PMON_BOX_FILTER0 0xe05
185 #define HSWEP_CBO_MSR_OFFSET 0x10
188 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_TID (0x3fULL << 0)
189 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK (0xfULL << 6)
190 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE (0x7fULL << 17)
191 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NID (0xffffULL << 32)
192 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC (0x1ffULL << 52)
193 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61)
194 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62)
195 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC (0x1ULL << 63)
198 /* Haswell-EP Sbox */
199 #define HSWEP_S0_MSR_PMON_CTR0 0x726
200 #define HSWEP_S0_MSR_PMON_CTL0 0x721
201 #define HSWEP_S0_MSR_PMON_BOX_CTL 0x720
202 #define HSWEP_SBOX_MSR_OFFSET 0xa
203 #define HSWEP_S_MSR_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \
204 SNBEP_CBO_PMON_CTL_TID_EN)
207 #define HSWEP_PCU_MSR_PMON_CTR0 0x717
208 #define HSWEP_PCU_MSR_PMON_CTL0 0x711
209 #define HSWEP_PCU_MSR_PMON_BOX_CTL 0x710
210 #define HSWEP_PCU_MSR_PMON_BOX_FILTER 0x715
213 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
214 DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
215 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
216 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
217 DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
218 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
219 DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
220 DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28");
221 DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15");
222 DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30");
223 DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51");
224 DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4");
225 DEFINE_UNCORE_FORMAT_ATTR(filter_tid2, filter_tid, "config1:0");
226 DEFINE_UNCORE_FORMAT_ATTR(filter_tid3, filter_tid, "config1:0-5");
227 DEFINE_UNCORE_FORMAT_ATTR(filter_cid, filter_cid, "config1:5");
228 DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8");
229 DEFINE_UNCORE_FORMAT_ATTR(filter_link2, filter_link, "config1:6-8");
230 DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17");
231 DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47");
232 DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22");
233 DEFINE_UNCORE_FORMAT_ATTR(filter_state2, filter_state, "config1:17-22");
234 DEFINE_UNCORE_FORMAT_ATTR(filter_state3, filter_state, "config1:17-23");
235 DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31");
236 DEFINE_UNCORE_FORMAT_ATTR(filter_opc2, filter_opc, "config1:52-60");
237 DEFINE_UNCORE_FORMAT_ATTR(filter_nc, filter_nc, "config1:62");
238 DEFINE_UNCORE_FORMAT_ATTR(filter_c6, filter_c6, "config1:61");
239 DEFINE_UNCORE_FORMAT_ATTR(filter_isoc, filter_isoc, "config1:63");
240 DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7");
241 DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15");
242 DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23");
243 DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31");
244 DEFINE_UNCORE_FORMAT_ATTR(match_rds, match_rds, "config1:48-51");
245 DEFINE_UNCORE_FORMAT_ATTR(match_rnid30, match_rnid30, "config1:32-35");
246 DEFINE_UNCORE_FORMAT_ATTR(match_rnid4, match_rnid4, "config1:31");
247 DEFINE_UNCORE_FORMAT_ATTR(match_dnid, match_dnid, "config1:13-17");
248 DEFINE_UNCORE_FORMAT_ATTR(match_mc, match_mc, "config1:9-12");
249 DEFINE_UNCORE_FORMAT_ATTR(match_opc, match_opc, "config1:5-8");
250 DEFINE_UNCORE_FORMAT_ATTR(match_vnw, match_vnw, "config1:3-4");
251 DEFINE_UNCORE_FORMAT_ATTR(match0, match0, "config1:0-31");
252 DEFINE_UNCORE_FORMAT_ATTR(match1, match1, "config1:32-63");
253 DEFINE_UNCORE_FORMAT_ATTR(mask_rds, mask_rds, "config2:48-51");
254 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid30, mask_rnid30, "config2:32-35");
255 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid4, mask_rnid4, "config2:31");
256 DEFINE_UNCORE_FORMAT_ATTR(mask_dnid, mask_dnid, "config2:13-17");
257 DEFINE_UNCORE_FORMAT_ATTR(mask_mc, mask_mc, "config2:9-12");
258 DEFINE_UNCORE_FORMAT_ATTR(mask_opc, mask_opc, "config2:5-8");
259 DEFINE_UNCORE_FORMAT_ATTR(mask_vnw, mask_vnw, "config2:3-4");
260 DEFINE_UNCORE_FORMAT_ATTR(mask0, mask0, "config2:0-31");
261 DEFINE_UNCORE_FORMAT_ATTR(mask1, mask1, "config2:32-63");
263 static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box)
265 struct pci_dev *pdev = box->pci_dev;
266 int box_ctl = uncore_pci_box_ctl(box);
269 if (!pci_read_config_dword(pdev, box_ctl, &config)) {
270 config |= SNBEP_PMON_BOX_CTL_FRZ;
271 pci_write_config_dword(pdev, box_ctl, config);
275 static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box)
277 struct pci_dev *pdev = box->pci_dev;
278 int box_ctl = uncore_pci_box_ctl(box);
281 if (!pci_read_config_dword(pdev, box_ctl, &config)) {
282 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
283 pci_write_config_dword(pdev, box_ctl, config);
287 static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
289 struct pci_dev *pdev = box->pci_dev;
290 struct hw_perf_event *hwc = &event->hw;
292 pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
295 static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event)
297 struct pci_dev *pdev = box->pci_dev;
298 struct hw_perf_event *hwc = &event->hw;
300 pci_write_config_dword(pdev, hwc->config_base, hwc->config);
303 static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event)
305 struct pci_dev *pdev = box->pci_dev;
306 struct hw_perf_event *hwc = &event->hw;
309 pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
310 pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
315 static void snbep_uncore_pci_init_box(struct intel_uncore_box *box)
317 struct pci_dev *pdev = box->pci_dev;
319 pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, SNBEP_PMON_BOX_CTL_INT);
322 static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box)
327 msr = uncore_msr_box_ctl(box);
330 config |= SNBEP_PMON_BOX_CTL_FRZ;
335 static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box)
340 msr = uncore_msr_box_ctl(box);
343 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
348 static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
350 struct hw_perf_event *hwc = &event->hw;
351 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
353 if (reg1->idx != EXTRA_REG_NONE)
354 wrmsrl(reg1->reg, uncore_shared_reg_config(box, 0));
356 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
359 static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box,
360 struct perf_event *event)
362 struct hw_perf_event *hwc = &event->hw;
364 wrmsrl(hwc->config_base, hwc->config);
367 static void snbep_uncore_msr_init_box(struct intel_uncore_box *box)
369 unsigned msr = uncore_msr_box_ctl(box);
372 wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT);
375 static struct attribute *snbep_uncore_formats_attr[] = {
376 &format_attr_event.attr,
377 &format_attr_umask.attr,
378 &format_attr_edge.attr,
379 &format_attr_inv.attr,
380 &format_attr_thresh8.attr,
384 static struct attribute *snbep_uncore_ubox_formats_attr[] = {
385 &format_attr_event.attr,
386 &format_attr_umask.attr,
387 &format_attr_edge.attr,
388 &format_attr_inv.attr,
389 &format_attr_thresh5.attr,
393 static struct attribute *snbep_uncore_cbox_formats_attr[] = {
394 &format_attr_event.attr,
395 &format_attr_umask.attr,
396 &format_attr_edge.attr,
397 &format_attr_tid_en.attr,
398 &format_attr_inv.attr,
399 &format_attr_thresh8.attr,
400 &format_attr_filter_tid.attr,
401 &format_attr_filter_nid.attr,
402 &format_attr_filter_state.attr,
403 &format_attr_filter_opc.attr,
407 static struct attribute *snbep_uncore_pcu_formats_attr[] = {
408 &format_attr_event_ext.attr,
409 &format_attr_occ_sel.attr,
410 &format_attr_edge.attr,
411 &format_attr_inv.attr,
412 &format_attr_thresh5.attr,
413 &format_attr_occ_invert.attr,
414 &format_attr_occ_edge.attr,
415 &format_attr_filter_band0.attr,
416 &format_attr_filter_band1.attr,
417 &format_attr_filter_band2.attr,
418 &format_attr_filter_band3.attr,
422 static struct attribute *snbep_uncore_qpi_formats_attr[] = {
423 &format_attr_event_ext.attr,
424 &format_attr_umask.attr,
425 &format_attr_edge.attr,
426 &format_attr_inv.attr,
427 &format_attr_thresh8.attr,
428 &format_attr_match_rds.attr,
429 &format_attr_match_rnid30.attr,
430 &format_attr_match_rnid4.attr,
431 &format_attr_match_dnid.attr,
432 &format_attr_match_mc.attr,
433 &format_attr_match_opc.attr,
434 &format_attr_match_vnw.attr,
435 &format_attr_match0.attr,
436 &format_attr_match1.attr,
437 &format_attr_mask_rds.attr,
438 &format_attr_mask_rnid30.attr,
439 &format_attr_mask_rnid4.attr,
440 &format_attr_mask_dnid.attr,
441 &format_attr_mask_mc.attr,
442 &format_attr_mask_opc.attr,
443 &format_attr_mask_vnw.attr,
444 &format_attr_mask0.attr,
445 &format_attr_mask1.attr,
449 static struct uncore_event_desc snbep_uncore_imc_events[] = {
450 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
451 INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x03"),
452 INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
453 INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
454 INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
455 INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
456 INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
457 { /* end: all zeroes */ },
460 static struct uncore_event_desc snbep_uncore_qpi_events[] = {
461 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x14"),
462 INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
463 INTEL_UNCORE_EVENT_DESC(drs_data, "event=0x102,umask=0x08"),
464 INTEL_UNCORE_EVENT_DESC(ncb_data, "event=0x103,umask=0x04"),
465 { /* end: all zeroes */ },
468 static struct attribute_group snbep_uncore_format_group = {
470 .attrs = snbep_uncore_formats_attr,
473 static struct attribute_group snbep_uncore_ubox_format_group = {
475 .attrs = snbep_uncore_ubox_formats_attr,
478 static struct attribute_group snbep_uncore_cbox_format_group = {
480 .attrs = snbep_uncore_cbox_formats_attr,
483 static struct attribute_group snbep_uncore_pcu_format_group = {
485 .attrs = snbep_uncore_pcu_formats_attr,
488 static struct attribute_group snbep_uncore_qpi_format_group = {
490 .attrs = snbep_uncore_qpi_formats_attr,
493 #define __SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \
494 .disable_box = snbep_uncore_msr_disable_box, \
495 .enable_box = snbep_uncore_msr_enable_box, \
496 .disable_event = snbep_uncore_msr_disable_event, \
497 .enable_event = snbep_uncore_msr_enable_event, \
498 .read_counter = uncore_msr_read_counter
500 #define SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \
501 __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), \
502 .init_box = snbep_uncore_msr_init_box \
504 static struct intel_uncore_ops snbep_uncore_msr_ops = {
505 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
508 #define SNBEP_UNCORE_PCI_OPS_COMMON_INIT() \
509 .init_box = snbep_uncore_pci_init_box, \
510 .disable_box = snbep_uncore_pci_disable_box, \
511 .enable_box = snbep_uncore_pci_enable_box, \
512 .disable_event = snbep_uncore_pci_disable_event, \
513 .read_counter = snbep_uncore_pci_read_counter
515 static struct intel_uncore_ops snbep_uncore_pci_ops = {
516 SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
517 .enable_event = snbep_uncore_pci_enable_event, \
520 static struct event_constraint snbep_uncore_cbox_constraints[] = {
521 UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
522 UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
523 UNCORE_EVENT_CONSTRAINT(0x04, 0x3),
524 UNCORE_EVENT_CONSTRAINT(0x05, 0x3),
525 UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
526 UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
527 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
528 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
529 UNCORE_EVENT_CONSTRAINT(0x13, 0x3),
530 UNCORE_EVENT_CONSTRAINT(0x1b, 0xc),
531 UNCORE_EVENT_CONSTRAINT(0x1c, 0xc),
532 UNCORE_EVENT_CONSTRAINT(0x1d, 0xc),
533 UNCORE_EVENT_CONSTRAINT(0x1e, 0xc),
534 EVENT_CONSTRAINT_OVERLAP(0x1f, 0xe, 0xff),
535 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
536 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
537 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
538 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
539 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
540 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
541 UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
542 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
543 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
544 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
545 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
546 UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
550 static struct event_constraint snbep_uncore_r2pcie_constraints[] = {
551 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
552 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
553 UNCORE_EVENT_CONSTRAINT(0x12, 0x1),
554 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
555 UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
556 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
557 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
558 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
559 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
560 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
564 static struct event_constraint snbep_uncore_r3qpi_constraints[] = {
565 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
566 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
567 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
568 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
569 UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
570 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
571 UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
572 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
573 UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
574 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
575 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
576 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
577 UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
578 UNCORE_EVENT_CONSTRAINT(0x2a, 0x3),
579 UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
580 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
581 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
582 UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
583 UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
584 UNCORE_EVENT_CONSTRAINT(0x30, 0x3),
585 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
586 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
587 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
588 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
589 UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
590 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
591 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
592 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
596 static struct intel_uncore_type snbep_uncore_ubox = {
601 .fixed_ctr_bits = 48,
602 .perf_ctr = SNBEP_U_MSR_PMON_CTR0,
603 .event_ctl = SNBEP_U_MSR_PMON_CTL0,
604 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
605 .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
606 .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
607 .ops = &snbep_uncore_msr_ops,
608 .format_group = &snbep_uncore_ubox_format_group,
611 static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
612 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
613 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
614 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
615 SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0x6),
616 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
617 SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0x6),
618 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
619 SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0x6),
620 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
621 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
622 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
623 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xa),
624 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xa),
625 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
626 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
627 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
628 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
629 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
630 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
631 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xa),
632 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xa),
633 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
634 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
635 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
636 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x2),
640 static void snbep_cbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
642 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
643 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
646 if (uncore_box_is_fake(box))
649 for (i = 0; i < 5; i++) {
650 if (reg1->alloc & (0x1 << i))
651 atomic_sub(1 << (i * 6), &er->ref);
656 static struct event_constraint *
657 __snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event,
658 u64 (*cbox_filter_mask)(int fields))
660 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
661 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
666 if (reg1->idx == EXTRA_REG_NONE)
669 raw_spin_lock_irqsave(&er->lock, flags);
670 for (i = 0; i < 5; i++) {
671 if (!(reg1->idx & (0x1 << i)))
673 if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
676 mask = cbox_filter_mask(0x1 << i);
677 if (!__BITS_VALUE(atomic_read(&er->ref), i, 6) ||
678 !((reg1->config ^ er->config) & mask)) {
679 atomic_add(1 << (i * 6), &er->ref);
681 er->config |= reg1->config & mask;
687 raw_spin_unlock_irqrestore(&er->lock, flags);
691 if (!uncore_box_is_fake(box))
692 reg1->alloc |= alloc;
696 for (; i >= 0; i--) {
697 if (alloc & (0x1 << i))
698 atomic_sub(1 << (i * 6), &er->ref);
700 return &uncore_constraint_empty;
703 static u64 snbep_cbox_filter_mask(int fields)
708 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_TID;
710 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_NID;
712 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
714 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
719 static struct event_constraint *
720 snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
722 return __snbep_cbox_get_constraint(box, event, snbep_cbox_filter_mask);
725 static int snbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
727 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
728 struct extra_reg *er;
731 for (er = snbep_uncore_cbox_extra_regs; er->msr; er++) {
732 if (er->event != (event->hw.config & er->config_mask))
738 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
739 SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
740 reg1->config = event->attr.config1 & snbep_cbox_filter_mask(idx);
746 static struct intel_uncore_ops snbep_uncore_cbox_ops = {
747 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
748 .hw_config = snbep_cbox_hw_config,
749 .get_constraint = snbep_cbox_get_constraint,
750 .put_constraint = snbep_cbox_put_constraint,
753 static struct intel_uncore_type snbep_uncore_cbox = {
758 .event_ctl = SNBEP_C0_MSR_PMON_CTL0,
759 .perf_ctr = SNBEP_C0_MSR_PMON_CTR0,
760 .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
761 .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL,
762 .msr_offset = SNBEP_CBO_MSR_OFFSET,
763 .num_shared_regs = 1,
764 .constraints = snbep_uncore_cbox_constraints,
765 .ops = &snbep_uncore_cbox_ops,
766 .format_group = &snbep_uncore_cbox_format_group,
769 static u64 snbep_pcu_alter_er(struct perf_event *event, int new_idx, bool modify)
771 struct hw_perf_event *hwc = &event->hw;
772 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
773 u64 config = reg1->config;
775 if (new_idx > reg1->idx)
776 config <<= 8 * (new_idx - reg1->idx);
778 config >>= 8 * (reg1->idx - new_idx);
781 hwc->config += new_idx - reg1->idx;
782 reg1->config = config;
788 static struct event_constraint *
789 snbep_pcu_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
791 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
792 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
795 u64 mask, config1 = reg1->config;
798 if (reg1->idx == EXTRA_REG_NONE ||
799 (!uncore_box_is_fake(box) && reg1->alloc))
802 mask = 0xffULL << (idx * 8);
803 raw_spin_lock_irqsave(&er->lock, flags);
804 if (!__BITS_VALUE(atomic_read(&er->ref), idx, 8) ||
805 !((config1 ^ er->config) & mask)) {
806 atomic_add(1 << (idx * 8), &er->ref);
808 er->config |= config1 & mask;
811 raw_spin_unlock_irqrestore(&er->lock, flags);
815 if (idx != reg1->idx) {
816 config1 = snbep_pcu_alter_er(event, idx, false);
819 return &uncore_constraint_empty;
822 if (!uncore_box_is_fake(box)) {
823 if (idx != reg1->idx)
824 snbep_pcu_alter_er(event, idx, true);
830 static void snbep_pcu_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
832 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
833 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
835 if (uncore_box_is_fake(box) || !reg1->alloc)
838 atomic_sub(1 << (reg1->idx * 8), &er->ref);
842 static int snbep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
844 struct hw_perf_event *hwc = &event->hw;
845 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
846 int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
848 if (ev_sel >= 0xb && ev_sel <= 0xe) {
849 reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
850 reg1->idx = ev_sel - 0xb;
851 reg1->config = event->attr.config1 & (0xff << (reg1->idx * 8));
856 static struct intel_uncore_ops snbep_uncore_pcu_ops = {
857 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
858 .hw_config = snbep_pcu_hw_config,
859 .get_constraint = snbep_pcu_get_constraint,
860 .put_constraint = snbep_pcu_put_constraint,
863 static struct intel_uncore_type snbep_uncore_pcu = {
868 .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0,
869 .event_ctl = SNBEP_PCU_MSR_PMON_CTL0,
870 .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
871 .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL,
872 .num_shared_regs = 1,
873 .ops = &snbep_uncore_pcu_ops,
874 .format_group = &snbep_uncore_pcu_format_group,
877 static struct intel_uncore_type *snbep_msr_uncores[] = {
884 void snbep_uncore_cpu_init(void)
886 if (snbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
887 snbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
888 uncore_msr_uncores = snbep_msr_uncores;
892 SNBEP_PCI_QPI_PORT0_FILTER,
893 SNBEP_PCI_QPI_PORT1_FILTER,
897 static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event)
899 struct hw_perf_event *hwc = &event->hw;
900 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
901 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
903 if ((hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK) == 0x38) {
905 reg1->reg = SNBEP_Q_Py_PCI_PMON_PKT_MATCH0;
906 reg1->config = event->attr.config1;
907 reg2->reg = SNBEP_Q_Py_PCI_PMON_PKT_MASK0;
908 reg2->config = event->attr.config2;
913 static void snbep_qpi_enable_event(struct intel_uncore_box *box, struct perf_event *event)
915 struct pci_dev *pdev = box->pci_dev;
916 struct hw_perf_event *hwc = &event->hw;
917 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
918 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
920 if (reg1->idx != EXTRA_REG_NONE) {
921 int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER;
922 struct pci_dev *filter_pdev = uncore_extra_pci_dev[box->phys_id][idx];
924 pci_write_config_dword(filter_pdev, reg1->reg,
926 pci_write_config_dword(filter_pdev, reg1->reg + 4,
927 (u32)(reg1->config >> 32));
928 pci_write_config_dword(filter_pdev, reg2->reg,
930 pci_write_config_dword(filter_pdev, reg2->reg + 4,
931 (u32)(reg2->config >> 32));
935 pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
938 static struct intel_uncore_ops snbep_uncore_qpi_ops = {
939 SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
940 .enable_event = snbep_qpi_enable_event,
941 .hw_config = snbep_qpi_hw_config,
942 .get_constraint = uncore_get_constraint,
943 .put_constraint = uncore_put_constraint,
946 #define SNBEP_UNCORE_PCI_COMMON_INIT() \
947 .perf_ctr = SNBEP_PCI_PMON_CTR0, \
948 .event_ctl = SNBEP_PCI_PMON_CTL0, \
949 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, \
950 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \
951 .ops = &snbep_uncore_pci_ops, \
952 .format_group = &snbep_uncore_format_group
954 static struct intel_uncore_type snbep_uncore_ha = {
959 SNBEP_UNCORE_PCI_COMMON_INIT(),
962 static struct intel_uncore_type snbep_uncore_imc = {
967 .fixed_ctr_bits = 48,
968 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
969 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
970 .event_descs = snbep_uncore_imc_events,
971 SNBEP_UNCORE_PCI_COMMON_INIT(),
974 static struct intel_uncore_type snbep_uncore_qpi = {
979 .perf_ctr = SNBEP_PCI_PMON_CTR0,
980 .event_ctl = SNBEP_PCI_PMON_CTL0,
981 .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
982 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
983 .num_shared_regs = 1,
984 .ops = &snbep_uncore_qpi_ops,
985 .event_descs = snbep_uncore_qpi_events,
986 .format_group = &snbep_uncore_qpi_format_group,
990 static struct intel_uncore_type snbep_uncore_r2pcie = {
995 .constraints = snbep_uncore_r2pcie_constraints,
996 SNBEP_UNCORE_PCI_COMMON_INIT(),
999 static struct intel_uncore_type snbep_uncore_r3qpi = {
1003 .perf_ctr_bits = 44,
1004 .constraints = snbep_uncore_r3qpi_constraints,
1005 SNBEP_UNCORE_PCI_COMMON_INIT(),
1009 SNBEP_PCI_UNCORE_HA,
1010 SNBEP_PCI_UNCORE_IMC,
1011 SNBEP_PCI_UNCORE_QPI,
1012 SNBEP_PCI_UNCORE_R2PCIE,
1013 SNBEP_PCI_UNCORE_R3QPI,
1016 static struct intel_uncore_type *snbep_pci_uncores[] = {
1017 [SNBEP_PCI_UNCORE_HA] = &snbep_uncore_ha,
1018 [SNBEP_PCI_UNCORE_IMC] = &snbep_uncore_imc,
1019 [SNBEP_PCI_UNCORE_QPI] = &snbep_uncore_qpi,
1020 [SNBEP_PCI_UNCORE_R2PCIE] = &snbep_uncore_r2pcie,
1021 [SNBEP_PCI_UNCORE_R3QPI] = &snbep_uncore_r3qpi,
1025 static const struct pci_device_id snbep_uncore_pci_ids[] = {
1027 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA),
1028 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_HA, 0),
1030 { /* MC Channel 0 */
1031 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0),
1032 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 0),
1034 { /* MC Channel 1 */
1035 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1),
1036 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 1),
1038 { /* MC Channel 2 */
1039 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2),
1040 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 2),
1042 { /* MC Channel 3 */
1043 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3),
1044 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 3),
1047 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0),
1048 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 0),
1051 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1),
1052 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 1),
1055 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE),
1056 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R2PCIE, 0),
1058 { /* R3QPI Link 0 */
1059 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0),
1060 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 0),
1062 { /* R3QPI Link 1 */
1063 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1),
1064 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 1),
1066 { /* QPI Port 0 filter */
1067 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c86),
1068 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1069 SNBEP_PCI_QPI_PORT0_FILTER),
1071 { /* QPI Port 0 filter */
1072 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c96),
1073 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1074 SNBEP_PCI_QPI_PORT1_FILTER),
1076 { /* end: all zeroes */ }
1079 static struct pci_driver snbep_uncore_pci_driver = {
1080 .name = "snbep_uncore",
1081 .id_table = snbep_uncore_pci_ids,
1084 #define NODE_ID_MASK 0x7
1087 * build pci bus to socket mapping
1089 static int snbep_pci2phy_map_init(int devid)
1091 struct pci_dev *ubox_dev = NULL;
1092 int i, bus, nodeid, segment;
1093 struct pci2phy_map *map;
1098 /* find the UBOX device */
1099 ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, ubox_dev);
1102 bus = ubox_dev->bus->number;
1103 /* get the Node ID of the local register */
1104 err = pci_read_config_dword(ubox_dev, 0x40, &config);
1107 nodeid = config & NODE_ID_MASK;
1108 /* get the Node ID mapping */
1109 err = pci_read_config_dword(ubox_dev, 0x54, &config);
1113 segment = pci_domain_nr(ubox_dev->bus);
1114 raw_spin_lock(&pci2phy_map_lock);
1115 map = __find_pci2phy_map(segment);
1117 raw_spin_unlock(&pci2phy_map_lock);
1123 * every three bits in the Node ID mapping register maps
1124 * to a particular node.
1126 for (i = 0; i < 8; i++) {
1127 if (nodeid == ((config >> (3 * i)) & 0x7)) {
1128 map->pbus_to_physid[bus] = i;
1132 raw_spin_unlock(&pci2phy_map_lock);
1137 * For PCI bus with no UBOX device, find the next bus
1138 * that has UBOX device and use its mapping.
1140 raw_spin_lock(&pci2phy_map_lock);
1141 list_for_each_entry(map, &pci2phy_map_head, list) {
1143 for (bus = 255; bus >= 0; bus--) {
1144 if (map->pbus_to_physid[bus] >= 0)
1145 i = map->pbus_to_physid[bus];
1147 map->pbus_to_physid[bus] = i;
1150 raw_spin_unlock(&pci2phy_map_lock);
1153 pci_dev_put(ubox_dev);
1155 return err ? pcibios_err_to_errno(err) : 0;
1158 int snbep_uncore_pci_init(void)
1160 int ret = snbep_pci2phy_map_init(0x3ce0);
1163 uncore_pci_uncores = snbep_pci_uncores;
1164 uncore_pci_driver = &snbep_uncore_pci_driver;
1167 /* end of Sandy Bridge-EP uncore support */
1169 /* IvyTown uncore support */
1170 static void ivbep_uncore_msr_init_box(struct intel_uncore_box *box)
1172 unsigned msr = uncore_msr_box_ctl(box);
1174 wrmsrl(msr, IVBEP_PMON_BOX_CTL_INT);
1177 static void ivbep_uncore_pci_init_box(struct intel_uncore_box *box)
1179 struct pci_dev *pdev = box->pci_dev;
1181 pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
1184 #define IVBEP_UNCORE_MSR_OPS_COMMON_INIT() \
1185 .init_box = ivbep_uncore_msr_init_box, \
1186 .disable_box = snbep_uncore_msr_disable_box, \
1187 .enable_box = snbep_uncore_msr_enable_box, \
1188 .disable_event = snbep_uncore_msr_disable_event, \
1189 .enable_event = snbep_uncore_msr_enable_event, \
1190 .read_counter = uncore_msr_read_counter
1192 static struct intel_uncore_ops ivbep_uncore_msr_ops = {
1193 IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1196 static struct intel_uncore_ops ivbep_uncore_pci_ops = {
1197 .init_box = ivbep_uncore_pci_init_box,
1198 .disable_box = snbep_uncore_pci_disable_box,
1199 .enable_box = snbep_uncore_pci_enable_box,
1200 .disable_event = snbep_uncore_pci_disable_event,
1201 .enable_event = snbep_uncore_pci_enable_event,
1202 .read_counter = snbep_uncore_pci_read_counter,
1205 #define IVBEP_UNCORE_PCI_COMMON_INIT() \
1206 .perf_ctr = SNBEP_PCI_PMON_CTR0, \
1207 .event_ctl = SNBEP_PCI_PMON_CTL0, \
1208 .event_mask = IVBEP_PMON_RAW_EVENT_MASK, \
1209 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \
1210 .ops = &ivbep_uncore_pci_ops, \
1211 .format_group = &ivbep_uncore_format_group
1213 static struct attribute *ivbep_uncore_formats_attr[] = {
1214 &format_attr_event.attr,
1215 &format_attr_umask.attr,
1216 &format_attr_edge.attr,
1217 &format_attr_inv.attr,
1218 &format_attr_thresh8.attr,
1222 static struct attribute *ivbep_uncore_ubox_formats_attr[] = {
1223 &format_attr_event.attr,
1224 &format_attr_umask.attr,
1225 &format_attr_edge.attr,
1226 &format_attr_inv.attr,
1227 &format_attr_thresh5.attr,
1231 static struct attribute *ivbep_uncore_cbox_formats_attr[] = {
1232 &format_attr_event.attr,
1233 &format_attr_umask.attr,
1234 &format_attr_edge.attr,
1235 &format_attr_tid_en.attr,
1236 &format_attr_thresh8.attr,
1237 &format_attr_filter_tid.attr,
1238 &format_attr_filter_link.attr,
1239 &format_attr_filter_state2.attr,
1240 &format_attr_filter_nid2.attr,
1241 &format_attr_filter_opc2.attr,
1242 &format_attr_filter_nc.attr,
1243 &format_attr_filter_c6.attr,
1244 &format_attr_filter_isoc.attr,
1248 static struct attribute *ivbep_uncore_pcu_formats_attr[] = {
1249 &format_attr_event_ext.attr,
1250 &format_attr_occ_sel.attr,
1251 &format_attr_edge.attr,
1252 &format_attr_thresh5.attr,
1253 &format_attr_occ_invert.attr,
1254 &format_attr_occ_edge.attr,
1255 &format_attr_filter_band0.attr,
1256 &format_attr_filter_band1.attr,
1257 &format_attr_filter_band2.attr,
1258 &format_attr_filter_band3.attr,
1262 static struct attribute *ivbep_uncore_qpi_formats_attr[] = {
1263 &format_attr_event_ext.attr,
1264 &format_attr_umask.attr,
1265 &format_attr_edge.attr,
1266 &format_attr_thresh8.attr,
1267 &format_attr_match_rds.attr,
1268 &format_attr_match_rnid30.attr,
1269 &format_attr_match_rnid4.attr,
1270 &format_attr_match_dnid.attr,
1271 &format_attr_match_mc.attr,
1272 &format_attr_match_opc.attr,
1273 &format_attr_match_vnw.attr,
1274 &format_attr_match0.attr,
1275 &format_attr_match1.attr,
1276 &format_attr_mask_rds.attr,
1277 &format_attr_mask_rnid30.attr,
1278 &format_attr_mask_rnid4.attr,
1279 &format_attr_mask_dnid.attr,
1280 &format_attr_mask_mc.attr,
1281 &format_attr_mask_opc.attr,
1282 &format_attr_mask_vnw.attr,
1283 &format_attr_mask0.attr,
1284 &format_attr_mask1.attr,
1288 static struct attribute_group ivbep_uncore_format_group = {
1290 .attrs = ivbep_uncore_formats_attr,
1293 static struct attribute_group ivbep_uncore_ubox_format_group = {
1295 .attrs = ivbep_uncore_ubox_formats_attr,
1298 static struct attribute_group ivbep_uncore_cbox_format_group = {
1300 .attrs = ivbep_uncore_cbox_formats_attr,
1303 static struct attribute_group ivbep_uncore_pcu_format_group = {
1305 .attrs = ivbep_uncore_pcu_formats_attr,
1308 static struct attribute_group ivbep_uncore_qpi_format_group = {
1310 .attrs = ivbep_uncore_qpi_formats_attr,
1313 static struct intel_uncore_type ivbep_uncore_ubox = {
1317 .perf_ctr_bits = 44,
1318 .fixed_ctr_bits = 48,
1319 .perf_ctr = SNBEP_U_MSR_PMON_CTR0,
1320 .event_ctl = SNBEP_U_MSR_PMON_CTL0,
1321 .event_mask = IVBEP_U_MSR_PMON_RAW_EVENT_MASK,
1322 .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
1323 .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
1324 .ops = &ivbep_uncore_msr_ops,
1325 .format_group = &ivbep_uncore_ubox_format_group,
1328 static struct extra_reg ivbep_uncore_cbox_extra_regs[] = {
1329 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
1330 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
1331 SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
1332 SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
1333 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
1334 SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
1335 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
1336 SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0xc),
1337 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
1338 SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0xc),
1339 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
1340 SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0xc),
1341 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10),
1342 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
1343 SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
1344 SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
1345 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
1346 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
1347 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
1348 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
1349 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
1350 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
1351 SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
1352 SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
1353 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
1354 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
1355 SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
1356 SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1357 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
1358 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
1359 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
1360 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
1361 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
1362 SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
1363 SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
1364 SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
1365 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
1369 static u64 ivbep_cbox_filter_mask(int fields)
1374 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_TID;
1376 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK;
1378 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
1380 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NID;
1381 if (fields & 0x10) {
1382 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
1383 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NC;
1384 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_C6;
1385 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
1391 static struct event_constraint *
1392 ivbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1394 return __snbep_cbox_get_constraint(box, event, ivbep_cbox_filter_mask);
1397 static int ivbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1399 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1400 struct extra_reg *er;
1403 for (er = ivbep_uncore_cbox_extra_regs; er->msr; er++) {
1404 if (er->event != (event->hw.config & er->config_mask))
1410 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1411 SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1412 reg1->config = event->attr.config1 & ivbep_cbox_filter_mask(idx);
1418 static void ivbep_cbox_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1420 struct hw_perf_event *hwc = &event->hw;
1421 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1423 if (reg1->idx != EXTRA_REG_NONE) {
1424 u64 filter = uncore_shared_reg_config(box, 0);
1425 wrmsrl(reg1->reg, filter & 0xffffffff);
1426 wrmsrl(reg1->reg + 6, filter >> 32);
1429 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1432 static struct intel_uncore_ops ivbep_uncore_cbox_ops = {
1433 .init_box = ivbep_uncore_msr_init_box,
1434 .disable_box = snbep_uncore_msr_disable_box,
1435 .enable_box = snbep_uncore_msr_enable_box,
1436 .disable_event = snbep_uncore_msr_disable_event,
1437 .enable_event = ivbep_cbox_enable_event,
1438 .read_counter = uncore_msr_read_counter,
1439 .hw_config = ivbep_cbox_hw_config,
1440 .get_constraint = ivbep_cbox_get_constraint,
1441 .put_constraint = snbep_cbox_put_constraint,
1444 static struct intel_uncore_type ivbep_uncore_cbox = {
1448 .perf_ctr_bits = 44,
1449 .event_ctl = SNBEP_C0_MSR_PMON_CTL0,
1450 .perf_ctr = SNBEP_C0_MSR_PMON_CTR0,
1451 .event_mask = IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
1452 .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL,
1453 .msr_offset = SNBEP_CBO_MSR_OFFSET,
1454 .num_shared_regs = 1,
1455 .constraints = snbep_uncore_cbox_constraints,
1456 .ops = &ivbep_uncore_cbox_ops,
1457 .format_group = &ivbep_uncore_cbox_format_group,
1460 static struct intel_uncore_ops ivbep_uncore_pcu_ops = {
1461 IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1462 .hw_config = snbep_pcu_hw_config,
1463 .get_constraint = snbep_pcu_get_constraint,
1464 .put_constraint = snbep_pcu_put_constraint,
1467 static struct intel_uncore_type ivbep_uncore_pcu = {
1471 .perf_ctr_bits = 48,
1472 .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0,
1473 .event_ctl = SNBEP_PCU_MSR_PMON_CTL0,
1474 .event_mask = IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
1475 .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL,
1476 .num_shared_regs = 1,
1477 .ops = &ivbep_uncore_pcu_ops,
1478 .format_group = &ivbep_uncore_pcu_format_group,
1481 static struct intel_uncore_type *ivbep_msr_uncores[] = {
1488 void ivbep_uncore_cpu_init(void)
1490 if (ivbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
1491 ivbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
1492 uncore_msr_uncores = ivbep_msr_uncores;
1495 static struct intel_uncore_type ivbep_uncore_ha = {
1499 .perf_ctr_bits = 48,
1500 IVBEP_UNCORE_PCI_COMMON_INIT(),
1503 static struct intel_uncore_type ivbep_uncore_imc = {
1507 .perf_ctr_bits = 48,
1508 .fixed_ctr_bits = 48,
1509 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1510 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1511 .event_descs = snbep_uncore_imc_events,
1512 IVBEP_UNCORE_PCI_COMMON_INIT(),
1515 /* registers in IRP boxes are not properly aligned */
1516 static unsigned ivbep_uncore_irp_ctls[] = {0xd8, 0xdc, 0xe0, 0xe4};
1517 static unsigned ivbep_uncore_irp_ctrs[] = {0xa0, 0xb0, 0xb8, 0xc0};
1519 static void ivbep_uncore_irp_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1521 struct pci_dev *pdev = box->pci_dev;
1522 struct hw_perf_event *hwc = &event->hw;
1524 pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx],
1525 hwc->config | SNBEP_PMON_CTL_EN);
1528 static void ivbep_uncore_irp_disable_event(struct intel_uncore_box *box, struct perf_event *event)
1530 struct pci_dev *pdev = box->pci_dev;
1531 struct hw_perf_event *hwc = &event->hw;
1533 pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx], hwc->config);
1536 static u64 ivbep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
1538 struct pci_dev *pdev = box->pci_dev;
1539 struct hw_perf_event *hwc = &event->hw;
1542 pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
1543 pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
1548 static struct intel_uncore_ops ivbep_uncore_irp_ops = {
1549 .init_box = ivbep_uncore_pci_init_box,
1550 .disable_box = snbep_uncore_pci_disable_box,
1551 .enable_box = snbep_uncore_pci_enable_box,
1552 .disable_event = ivbep_uncore_irp_disable_event,
1553 .enable_event = ivbep_uncore_irp_enable_event,
1554 .read_counter = ivbep_uncore_irp_read_counter,
1557 static struct intel_uncore_type ivbep_uncore_irp = {
1561 .perf_ctr_bits = 48,
1562 .event_mask = IVBEP_PMON_RAW_EVENT_MASK,
1563 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
1564 .ops = &ivbep_uncore_irp_ops,
1565 .format_group = &ivbep_uncore_format_group,
1568 static struct intel_uncore_ops ivbep_uncore_qpi_ops = {
1569 .init_box = ivbep_uncore_pci_init_box,
1570 .disable_box = snbep_uncore_pci_disable_box,
1571 .enable_box = snbep_uncore_pci_enable_box,
1572 .disable_event = snbep_uncore_pci_disable_event,
1573 .enable_event = snbep_qpi_enable_event,
1574 .read_counter = snbep_uncore_pci_read_counter,
1575 .hw_config = snbep_qpi_hw_config,
1576 .get_constraint = uncore_get_constraint,
1577 .put_constraint = uncore_put_constraint,
1580 static struct intel_uncore_type ivbep_uncore_qpi = {
1584 .perf_ctr_bits = 48,
1585 .perf_ctr = SNBEP_PCI_PMON_CTR0,
1586 .event_ctl = SNBEP_PCI_PMON_CTL0,
1587 .event_mask = IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1588 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
1589 .num_shared_regs = 1,
1590 .ops = &ivbep_uncore_qpi_ops,
1591 .format_group = &ivbep_uncore_qpi_format_group,
1594 static struct intel_uncore_type ivbep_uncore_r2pcie = {
1598 .perf_ctr_bits = 44,
1599 .constraints = snbep_uncore_r2pcie_constraints,
1600 IVBEP_UNCORE_PCI_COMMON_INIT(),
1603 static struct intel_uncore_type ivbep_uncore_r3qpi = {
1607 .perf_ctr_bits = 44,
1608 .constraints = snbep_uncore_r3qpi_constraints,
1609 IVBEP_UNCORE_PCI_COMMON_INIT(),
1613 IVBEP_PCI_UNCORE_HA,
1614 IVBEP_PCI_UNCORE_IMC,
1615 IVBEP_PCI_UNCORE_IRP,
1616 IVBEP_PCI_UNCORE_QPI,
1617 IVBEP_PCI_UNCORE_R2PCIE,
1618 IVBEP_PCI_UNCORE_R3QPI,
1621 static struct intel_uncore_type *ivbep_pci_uncores[] = {
1622 [IVBEP_PCI_UNCORE_HA] = &ivbep_uncore_ha,
1623 [IVBEP_PCI_UNCORE_IMC] = &ivbep_uncore_imc,
1624 [IVBEP_PCI_UNCORE_IRP] = &ivbep_uncore_irp,
1625 [IVBEP_PCI_UNCORE_QPI] = &ivbep_uncore_qpi,
1626 [IVBEP_PCI_UNCORE_R2PCIE] = &ivbep_uncore_r2pcie,
1627 [IVBEP_PCI_UNCORE_R3QPI] = &ivbep_uncore_r3qpi,
1631 static const struct pci_device_id ivbep_uncore_pci_ids[] = {
1632 { /* Home Agent 0 */
1633 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe30),
1634 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 0),
1636 { /* Home Agent 1 */
1637 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe38),
1638 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 1),
1640 { /* MC0 Channel 0 */
1641 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb4),
1642 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 0),
1644 { /* MC0 Channel 1 */
1645 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb5),
1646 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 1),
1648 { /* MC0 Channel 3 */
1649 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb0),
1650 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 2),
1652 { /* MC0 Channel 4 */
1653 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb1),
1654 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 3),
1656 { /* MC1 Channel 0 */
1657 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef4),
1658 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 4),
1660 { /* MC1 Channel 1 */
1661 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef5),
1662 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 5),
1664 { /* MC1 Channel 3 */
1665 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef0),
1666 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 6),
1668 { /* MC1 Channel 4 */
1669 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1),
1670 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 7),
1673 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe39),
1674 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IRP, 0),
1677 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32),
1678 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 0),
1681 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe33),
1682 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 1),
1685 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3a),
1686 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 2),
1689 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe34),
1690 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R2PCIE, 0),
1692 { /* R3QPI0 Link 0 */
1693 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe36),
1694 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 0),
1696 { /* R3QPI0 Link 1 */
1697 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe37),
1698 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 1),
1700 { /* R3QPI1 Link 2 */
1701 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e),
1702 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 2),
1704 { /* QPI Port 0 filter */
1705 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe86),
1706 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1707 SNBEP_PCI_QPI_PORT0_FILTER),
1709 { /* QPI Port 0 filter */
1710 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe96),
1711 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1712 SNBEP_PCI_QPI_PORT1_FILTER),
1714 { /* end: all zeroes */ }
1717 static struct pci_driver ivbep_uncore_pci_driver = {
1718 .name = "ivbep_uncore",
1719 .id_table = ivbep_uncore_pci_ids,
1722 int ivbep_uncore_pci_init(void)
1724 int ret = snbep_pci2phy_map_init(0x0e1e);
1727 uncore_pci_uncores = ivbep_pci_uncores;
1728 uncore_pci_driver = &ivbep_uncore_pci_driver;
1731 /* end of IvyTown uncore support */
1733 /* Haswell-EP uncore support */
1734 static struct attribute *hswep_uncore_ubox_formats_attr[] = {
1735 &format_attr_event.attr,
1736 &format_attr_umask.attr,
1737 &format_attr_edge.attr,
1738 &format_attr_inv.attr,
1739 &format_attr_thresh5.attr,
1740 &format_attr_filter_tid2.attr,
1741 &format_attr_filter_cid.attr,
1745 static struct attribute_group hswep_uncore_ubox_format_group = {
1747 .attrs = hswep_uncore_ubox_formats_attr,
1750 static int hswep_ubox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1752 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1753 reg1->reg = HSWEP_U_MSR_PMON_FILTER;
1754 reg1->config = event->attr.config1 & HSWEP_U_MSR_PMON_BOX_FILTER_MASK;
1759 static struct intel_uncore_ops hswep_uncore_ubox_ops = {
1760 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1761 .hw_config = hswep_ubox_hw_config,
1762 .get_constraint = uncore_get_constraint,
1763 .put_constraint = uncore_put_constraint,
1766 static struct intel_uncore_type hswep_uncore_ubox = {
1770 .perf_ctr_bits = 44,
1771 .fixed_ctr_bits = 48,
1772 .perf_ctr = HSWEP_U_MSR_PMON_CTR0,
1773 .event_ctl = HSWEP_U_MSR_PMON_CTL0,
1774 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
1775 .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
1776 .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
1777 .num_shared_regs = 1,
1778 .ops = &hswep_uncore_ubox_ops,
1779 .format_group = &hswep_uncore_ubox_format_group,
1782 static struct attribute *hswep_uncore_cbox_formats_attr[] = {
1783 &format_attr_event.attr,
1784 &format_attr_umask.attr,
1785 &format_attr_edge.attr,
1786 &format_attr_tid_en.attr,
1787 &format_attr_thresh8.attr,
1788 &format_attr_filter_tid3.attr,
1789 &format_attr_filter_link2.attr,
1790 &format_attr_filter_state3.attr,
1791 &format_attr_filter_nid2.attr,
1792 &format_attr_filter_opc2.attr,
1793 &format_attr_filter_nc.attr,
1794 &format_attr_filter_c6.attr,
1795 &format_attr_filter_isoc.attr,
1799 static struct attribute_group hswep_uncore_cbox_format_group = {
1801 .attrs = hswep_uncore_cbox_formats_attr,
1804 static struct event_constraint hswep_uncore_cbox_constraints[] = {
1805 UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
1806 UNCORE_EVENT_CONSTRAINT(0x09, 0x1),
1807 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
1808 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
1809 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
1810 UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
1811 UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
1812 EVENT_CONSTRAINT_END
1815 static struct extra_reg hswep_uncore_cbox_extra_regs[] = {
1816 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
1817 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
1818 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
1819 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
1820 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
1821 SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
1822 SNBEP_CBO_EVENT_EXTRA_REG(0x2134, 0xffff, 0x4),
1823 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x4),
1824 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
1825 SNBEP_CBO_EVENT_EXTRA_REG(0x4028, 0x40ff, 0x8),
1826 SNBEP_CBO_EVENT_EXTRA_REG(0x4032, 0x40ff, 0x8),
1827 SNBEP_CBO_EVENT_EXTRA_REG(0x4029, 0x40ff, 0x8),
1828 SNBEP_CBO_EVENT_EXTRA_REG(0x4033, 0x40ff, 0x8),
1829 SNBEP_CBO_EVENT_EXTRA_REG(0x402A, 0x40ff, 0x8),
1830 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x12),
1831 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
1832 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
1833 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
1834 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
1835 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
1836 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
1837 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
1838 SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
1839 SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
1840 SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
1841 SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
1842 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
1843 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
1844 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
1845 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
1846 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
1847 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
1848 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
1849 SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1850 SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
1851 SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
1852 SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
1853 SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
1857 static u64 hswep_cbox_filter_mask(int fields)
1861 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_TID;
1863 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK;
1865 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE;
1867 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NID;
1868 if (fields & 0x10) {
1869 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC;
1870 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NC;
1871 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_C6;
1872 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
1877 static struct event_constraint *
1878 hswep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1880 return __snbep_cbox_get_constraint(box, event, hswep_cbox_filter_mask);
1883 static int hswep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1885 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1886 struct extra_reg *er;
1889 for (er = hswep_uncore_cbox_extra_regs; er->msr; er++) {
1890 if (er->event != (event->hw.config & er->config_mask))
1896 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
1897 HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1898 reg1->config = event->attr.config1 & hswep_cbox_filter_mask(idx);
1904 static void hswep_cbox_enable_event(struct intel_uncore_box *box,
1905 struct perf_event *event)
1907 struct hw_perf_event *hwc = &event->hw;
1908 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1910 if (reg1->idx != EXTRA_REG_NONE) {
1911 u64 filter = uncore_shared_reg_config(box, 0);
1912 wrmsrl(reg1->reg, filter & 0xffffffff);
1913 wrmsrl(reg1->reg + 1, filter >> 32);
1916 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1919 static struct intel_uncore_ops hswep_uncore_cbox_ops = {
1920 .init_box = snbep_uncore_msr_init_box,
1921 .disable_box = snbep_uncore_msr_disable_box,
1922 .enable_box = snbep_uncore_msr_enable_box,
1923 .disable_event = snbep_uncore_msr_disable_event,
1924 .enable_event = hswep_cbox_enable_event,
1925 .read_counter = uncore_msr_read_counter,
1926 .hw_config = hswep_cbox_hw_config,
1927 .get_constraint = hswep_cbox_get_constraint,
1928 .put_constraint = snbep_cbox_put_constraint,
1931 static struct intel_uncore_type hswep_uncore_cbox = {
1935 .perf_ctr_bits = 48,
1936 .event_ctl = HSWEP_C0_MSR_PMON_CTL0,
1937 .perf_ctr = HSWEP_C0_MSR_PMON_CTR0,
1938 .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
1939 .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL,
1940 .msr_offset = HSWEP_CBO_MSR_OFFSET,
1941 .num_shared_regs = 1,
1942 .constraints = hswep_uncore_cbox_constraints,
1943 .ops = &hswep_uncore_cbox_ops,
1944 .format_group = &hswep_uncore_cbox_format_group,
1948 * Write SBOX Initialization register bit by bit to avoid spurious #GPs
1950 static void hswep_uncore_sbox_msr_init_box(struct intel_uncore_box *box)
1952 unsigned msr = uncore_msr_box_ctl(box);
1955 u64 init = SNBEP_PMON_BOX_CTL_INT;
1959 for_each_set_bit(i, (unsigned long *)&init, 64) {
1960 flags |= (1ULL << i);
1966 static struct intel_uncore_ops hswep_uncore_sbox_msr_ops = {
1967 __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1968 .init_box = hswep_uncore_sbox_msr_init_box
1971 static struct attribute *hswep_uncore_sbox_formats_attr[] = {
1972 &format_attr_event.attr,
1973 &format_attr_umask.attr,
1974 &format_attr_edge.attr,
1975 &format_attr_tid_en.attr,
1976 &format_attr_inv.attr,
1977 &format_attr_thresh8.attr,
1981 static struct attribute_group hswep_uncore_sbox_format_group = {
1983 .attrs = hswep_uncore_sbox_formats_attr,
1986 static struct intel_uncore_type hswep_uncore_sbox = {
1990 .perf_ctr_bits = 44,
1991 .event_ctl = HSWEP_S0_MSR_PMON_CTL0,
1992 .perf_ctr = HSWEP_S0_MSR_PMON_CTR0,
1993 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
1994 .box_ctl = HSWEP_S0_MSR_PMON_BOX_CTL,
1995 .msr_offset = HSWEP_SBOX_MSR_OFFSET,
1996 .ops = &hswep_uncore_sbox_msr_ops,
1997 .format_group = &hswep_uncore_sbox_format_group,
2000 static int hswep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2002 struct hw_perf_event *hwc = &event->hw;
2003 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2004 int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
2006 if (ev_sel >= 0xb && ev_sel <= 0xe) {
2007 reg1->reg = HSWEP_PCU_MSR_PMON_BOX_FILTER;
2008 reg1->idx = ev_sel - 0xb;
2009 reg1->config = event->attr.config1 & (0xff << reg1->idx);
2014 static struct intel_uncore_ops hswep_uncore_pcu_ops = {
2015 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2016 .hw_config = hswep_pcu_hw_config,
2017 .get_constraint = snbep_pcu_get_constraint,
2018 .put_constraint = snbep_pcu_put_constraint,
2021 static struct intel_uncore_type hswep_uncore_pcu = {
2025 .perf_ctr_bits = 48,
2026 .perf_ctr = HSWEP_PCU_MSR_PMON_CTR0,
2027 .event_ctl = HSWEP_PCU_MSR_PMON_CTL0,
2028 .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
2029 .box_ctl = HSWEP_PCU_MSR_PMON_BOX_CTL,
2030 .num_shared_regs = 1,
2031 .ops = &hswep_uncore_pcu_ops,
2032 .format_group = &snbep_uncore_pcu_format_group,
2035 static struct intel_uncore_type *hswep_msr_uncores[] = {
2043 void hswep_uncore_cpu_init(void)
2045 if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
2046 hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
2048 /* Detect 6-8 core systems with only two SBOXes */
2049 if (uncore_extra_pci_dev[0][HSWEP_PCI_PCU_3]) {
2052 pci_read_config_dword(uncore_extra_pci_dev[0][HSWEP_PCI_PCU_3],
2054 if (((capid4 >> 6) & 0x3) == 0)
2055 hswep_uncore_sbox.num_boxes = 2;
2058 uncore_msr_uncores = hswep_msr_uncores;
2061 static struct intel_uncore_type hswep_uncore_ha = {
2065 .perf_ctr_bits = 48,
2066 SNBEP_UNCORE_PCI_COMMON_INIT(),
2069 static struct uncore_event_desc hswep_uncore_imc_events[] = {
2070 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x00,umask=0x00"),
2071 INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x03"),
2072 INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
2073 INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
2074 INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
2075 INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
2076 INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
2077 { /* end: all zeroes */ },
2080 static struct intel_uncore_type hswep_uncore_imc = {
2084 .perf_ctr_bits = 48,
2085 .fixed_ctr_bits = 48,
2086 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
2087 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
2088 .event_descs = hswep_uncore_imc_events,
2089 SNBEP_UNCORE_PCI_COMMON_INIT(),
2092 static unsigned hswep_uncore_irp_ctrs[] = {0xa0, 0xa8, 0xb0, 0xb8};
2094 static u64 hswep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
2096 struct pci_dev *pdev = box->pci_dev;
2097 struct hw_perf_event *hwc = &event->hw;
2100 pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
2101 pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
2106 static struct intel_uncore_ops hswep_uncore_irp_ops = {
2107 .init_box = snbep_uncore_pci_init_box,
2108 .disable_box = snbep_uncore_pci_disable_box,
2109 .enable_box = snbep_uncore_pci_enable_box,
2110 .disable_event = ivbep_uncore_irp_disable_event,
2111 .enable_event = ivbep_uncore_irp_enable_event,
2112 .read_counter = hswep_uncore_irp_read_counter,
2115 static struct intel_uncore_type hswep_uncore_irp = {
2119 .perf_ctr_bits = 48,
2120 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
2121 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
2122 .ops = &hswep_uncore_irp_ops,
2123 .format_group = &snbep_uncore_format_group,
2126 static struct intel_uncore_type hswep_uncore_qpi = {
2130 .perf_ctr_bits = 48,
2131 .perf_ctr = SNBEP_PCI_PMON_CTR0,
2132 .event_ctl = SNBEP_PCI_PMON_CTL0,
2133 .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
2134 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
2135 .num_shared_regs = 1,
2136 .ops = &snbep_uncore_qpi_ops,
2137 .format_group = &snbep_uncore_qpi_format_group,
2140 static struct event_constraint hswep_uncore_r2pcie_constraints[] = {
2141 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
2142 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
2143 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
2144 UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
2145 UNCORE_EVENT_CONSTRAINT(0x24, 0x1),
2146 UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
2147 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
2148 UNCORE_EVENT_CONSTRAINT(0x27, 0x1),
2149 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
2150 UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
2151 UNCORE_EVENT_CONSTRAINT(0x2a, 0x1),
2152 UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
2153 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
2154 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
2155 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
2156 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
2157 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
2158 UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
2159 EVENT_CONSTRAINT_END
2162 static struct intel_uncore_type hswep_uncore_r2pcie = {
2166 .perf_ctr_bits = 48,
2167 .constraints = hswep_uncore_r2pcie_constraints,
2168 SNBEP_UNCORE_PCI_COMMON_INIT(),
2171 static struct event_constraint hswep_uncore_r3qpi_constraints[] = {
2172 UNCORE_EVENT_CONSTRAINT(0x01, 0x3),
2173 UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
2174 UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
2175 UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
2176 UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
2177 UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
2178 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
2179 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
2180 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
2181 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
2182 UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
2183 UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
2184 UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
2185 UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
2186 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
2187 UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
2188 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
2189 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
2190 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
2191 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
2192 UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
2193 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
2194 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
2195 UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
2196 UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
2197 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
2198 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
2199 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
2200 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
2201 UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
2202 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
2203 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
2204 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
2205 EVENT_CONSTRAINT_END
2208 static struct intel_uncore_type hswep_uncore_r3qpi = {
2212 .perf_ctr_bits = 44,
2213 .constraints = hswep_uncore_r3qpi_constraints,
2214 SNBEP_UNCORE_PCI_COMMON_INIT(),
2218 HSWEP_PCI_UNCORE_HA,
2219 HSWEP_PCI_UNCORE_IMC,
2220 HSWEP_PCI_UNCORE_IRP,
2221 HSWEP_PCI_UNCORE_QPI,
2222 HSWEP_PCI_UNCORE_R2PCIE,
2223 HSWEP_PCI_UNCORE_R3QPI,
2226 static struct intel_uncore_type *hswep_pci_uncores[] = {
2227 [HSWEP_PCI_UNCORE_HA] = &hswep_uncore_ha,
2228 [HSWEP_PCI_UNCORE_IMC] = &hswep_uncore_imc,
2229 [HSWEP_PCI_UNCORE_IRP] = &hswep_uncore_irp,
2230 [HSWEP_PCI_UNCORE_QPI] = &hswep_uncore_qpi,
2231 [HSWEP_PCI_UNCORE_R2PCIE] = &hswep_uncore_r2pcie,
2232 [HSWEP_PCI_UNCORE_R3QPI] = &hswep_uncore_r3qpi,
2236 static const struct pci_device_id hswep_uncore_pci_ids[] = {
2237 { /* Home Agent 0 */
2238 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f30),
2239 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 0),
2241 { /* Home Agent 1 */
2242 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f38),
2243 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 1),
2245 { /* MC0 Channel 0 */
2246 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb0),
2247 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 0),
2249 { /* MC0 Channel 1 */
2250 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb1),
2251 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 1),
2253 { /* MC0 Channel 2 */
2254 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb4),
2255 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 2),
2257 { /* MC0 Channel 3 */
2258 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb5),
2259 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 3),
2261 { /* MC1 Channel 0 */
2262 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd0),
2263 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 4),
2265 { /* MC1 Channel 1 */
2266 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd1),
2267 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 5),
2269 { /* MC1 Channel 2 */
2270 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd4),
2271 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 6),
2273 { /* MC1 Channel 3 */
2274 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd5),
2275 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 7),
2278 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f39),
2279 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IRP, 0),
2282 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f32),
2283 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 0),
2286 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f33),
2287 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 1),
2290 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3a),
2291 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 2),
2294 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f34),
2295 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R2PCIE, 0),
2297 { /* R3QPI0 Link 0 */
2298 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f36),
2299 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 0),
2301 { /* R3QPI0 Link 1 */
2302 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f37),
2303 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 1),
2305 { /* R3QPI1 Link 2 */
2306 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3e),
2307 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 2),
2309 { /* QPI Port 0 filter */
2310 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f86),
2311 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2312 SNBEP_PCI_QPI_PORT0_FILTER),
2314 { /* QPI Port 1 filter */
2315 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f96),
2316 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2317 SNBEP_PCI_QPI_PORT1_FILTER),
2319 { /* PCU.3 (for Capability registers) */
2320 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fc0),
2321 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2324 { /* end: all zeroes */ }
2327 static struct pci_driver hswep_uncore_pci_driver = {
2328 .name = "hswep_uncore",
2329 .id_table = hswep_uncore_pci_ids,
2332 int hswep_uncore_pci_init(void)
2334 int ret = snbep_pci2phy_map_init(0x2f1e);
2337 uncore_pci_uncores = hswep_pci_uncores;
2338 uncore_pci_driver = &hswep_uncore_pci_driver;
2341 /* end of Haswell-EP uncore support */
2343 /* BDX-DE uncore support */
2345 static struct intel_uncore_type bdx_uncore_ubox = {
2349 .perf_ctr_bits = 48,
2350 .fixed_ctr_bits = 48,
2351 .perf_ctr = HSWEP_U_MSR_PMON_CTR0,
2352 .event_ctl = HSWEP_U_MSR_PMON_CTL0,
2353 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
2354 .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
2355 .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
2356 .num_shared_regs = 1,
2357 .ops = &ivbep_uncore_msr_ops,
2358 .format_group = &ivbep_uncore_ubox_format_group,
2361 static struct event_constraint bdx_uncore_cbox_constraints[] = {
2362 UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
2363 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
2364 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
2365 EVENT_CONSTRAINT_END
2368 static struct intel_uncore_type bdx_uncore_cbox = {
2372 .perf_ctr_bits = 48,
2373 .event_ctl = HSWEP_C0_MSR_PMON_CTL0,
2374 .perf_ctr = HSWEP_C0_MSR_PMON_CTR0,
2375 .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
2376 .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL,
2377 .msr_offset = HSWEP_CBO_MSR_OFFSET,
2378 .num_shared_regs = 1,
2379 .constraints = bdx_uncore_cbox_constraints,
2380 .ops = &hswep_uncore_cbox_ops,
2381 .format_group = &hswep_uncore_cbox_format_group,
2384 static struct intel_uncore_type *bdx_msr_uncores[] = {
2391 void bdx_uncore_cpu_init(void)
2393 if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
2394 bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
2395 uncore_msr_uncores = bdx_msr_uncores;
2398 static struct intel_uncore_type bdx_uncore_ha = {
2402 .perf_ctr_bits = 48,
2403 SNBEP_UNCORE_PCI_COMMON_INIT(),
2406 static struct intel_uncore_type bdx_uncore_imc = {
2410 .perf_ctr_bits = 48,
2411 .fixed_ctr_bits = 48,
2412 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
2413 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
2414 .event_descs = hswep_uncore_imc_events,
2415 SNBEP_UNCORE_PCI_COMMON_INIT(),
2418 static struct intel_uncore_type bdx_uncore_irp = {
2422 .perf_ctr_bits = 48,
2423 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
2424 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
2425 .ops = &hswep_uncore_irp_ops,
2426 .format_group = &snbep_uncore_format_group,
2430 static struct event_constraint bdx_uncore_r2pcie_constraints[] = {
2431 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
2432 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
2433 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
2434 UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
2435 UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
2436 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
2437 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
2438 EVENT_CONSTRAINT_END
2441 static struct intel_uncore_type bdx_uncore_r2pcie = {
2445 .perf_ctr_bits = 48,
2446 .constraints = bdx_uncore_r2pcie_constraints,
2447 SNBEP_UNCORE_PCI_COMMON_INIT(),
2454 BDX_PCI_UNCORE_R2PCIE,
2457 static struct intel_uncore_type *bdx_pci_uncores[] = {
2458 [BDX_PCI_UNCORE_HA] = &bdx_uncore_ha,
2459 [BDX_PCI_UNCORE_IMC] = &bdx_uncore_imc,
2460 [BDX_PCI_UNCORE_IRP] = &bdx_uncore_irp,
2461 [BDX_PCI_UNCORE_R2PCIE] = &bdx_uncore_r2pcie,
2465 static const struct pci_device_id bdx_uncore_pci_ids[] = {
2466 { /* Home Agent 0 */
2467 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f30),
2468 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 0),
2470 { /* MC0 Channel 0 */
2471 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb0),
2472 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 0),
2474 { /* MC0 Channel 1 */
2475 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb1),
2476 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 1),
2479 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f39),
2480 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IRP, 0),
2483 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f34),
2484 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R2PCIE, 0),
2486 { /* end: all zeroes */ }
2489 static struct pci_driver bdx_uncore_pci_driver = {
2490 .name = "bdx_uncore",
2491 .id_table = bdx_uncore_pci_ids,
2494 int bdx_uncore_pci_init(void)
2496 int ret = snbep_pci2phy_map_init(0x6f1e);
2500 uncore_pci_uncores = bdx_pci_uncores;
2501 uncore_pci_driver = &bdx_uncore_pci_driver;
2505 /* end of BDX-DE uncore support */