2 * Performance events - AMD IBS
4 * Copyright (C) 2011 Advanced Micro Devices, Inc., Robert Richter
6 * For licencing details see kernel-base/COPYING
9 #include <linux/perf_event.h>
10 #include <linux/init.h>
11 #include <linux/export.h>
12 #include <linux/pci.h>
13 #include <linux/ptrace.h>
14 #include <linux/syscore_ops.h>
15 #include <linux/sched/clock.h>
19 #include "../perf_event.h"
23 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
25 #include <linux/kprobes.h>
26 #include <linux/hardirq.h>
30 #define IBS_FETCH_CONFIG_MASK (IBS_FETCH_RAND_EN | IBS_FETCH_MAX_CNT)
31 #define IBS_OP_CONFIG_MASK IBS_OP_MAX_CNT
37 * ENABLED; tracks the pmu::add(), pmu::del() state, when set the counter is taken
38 * and any further add()s must fail.
40 * STARTED/STOPPING/STOPPED; deal with pmu::start(), pmu::stop() state but are
41 * complicated by the fact that the IBS hardware can send late NMIs (ie. after
42 * we've cleared the EN bit).
44 * In order to consume these late NMIs we have the STOPPED state, any NMI that
45 * happens after we've cleared the EN state will clear this bit and report the
46 * NMI handled (this is fundamentally racy in the face or multiple NMI sources,
47 * someone else can consume our BIT and our NMI will go unhandled).
49 * And since we cannot set/clear this separate bit together with the EN bit,
50 * there are races; if we cleared STARTED early, an NMI could land in
51 * between clearing STARTED and clearing the EN bit (in fact multiple NMIs
52 * could happen if the period is small enough), and consume our STOPPED bit
53 * and trigger streams of unhandled NMIs.
55 * If, however, we clear STARTED late, an NMI can hit between clearing the
56 * EN bit and clearing STARTED, still see STARTED set and process the event.
57 * If this event will have the VALID bit clear, we bail properly, but this
58 * is not a given. With VALID set we can end up calling pmu::stop() again
59 * (the throttle logic) and trigger the WARNs in there.
61 * So what we do is set STOPPING before clearing EN to avoid the pmu::stop()
62 * nesting, and clear STARTED late, so that we have a well defined state over
63 * the clearing of the EN bit.
65 * XXX: we could probably be using !atomic bitops for all this.
78 struct perf_event *event;
79 unsigned long state[BITS_TO_LONGS(IBS_MAX_STATES)];
90 unsigned long offset_mask[1];
92 unsigned int fetch_count_reset_broken : 1;
93 unsigned int fetch_ignore_if_zero_rip : 1;
94 struct cpu_perf_ibs __percpu *pcpu;
96 struct attribute **format_attrs;
97 struct attribute_group format_group;
98 const struct attribute_group *attr_groups[2];
100 u64 (*get_count)(u64 config);
103 struct perf_ibs_data {
106 u32 data[0]; /* data buffer starts here */
109 u64 regs[MSR_AMD64_IBS_REG_COUNT_MAX];
113 perf_event_set_period(struct hw_perf_event *hwc, u64 min, u64 max, u64 *hw_period)
115 s64 left = local64_read(&hwc->period_left);
116 s64 period = hwc->sample_period;
120 * If we are way outside a reasonable range then just skip forward:
122 if (unlikely(left <= -period)) {
124 local64_set(&hwc->period_left, left);
125 hwc->last_period = period;
129 if (unlikely(left < (s64)min)) {
131 local64_set(&hwc->period_left, left);
132 hwc->last_period = period;
137 * If the hw period that triggers the sw overflow is too short
138 * we might hit the irq handler. This biases the results.
139 * Thus we shorten the next-to-last period and set the last
140 * period to the max period.
150 *hw_period = (u64)left;
156 perf_event_try_update(struct perf_event *event, u64 new_raw_count, int width)
158 struct hw_perf_event *hwc = &event->hw;
159 int shift = 64 - width;
164 * Careful: an NMI might modify the previous event value.
166 * Our tactic to handle this is to first atomically read and
167 * exchange a new raw count - then add that new-prev delta
168 * count to the generic event atomically:
170 prev_raw_count = local64_read(&hwc->prev_count);
171 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
172 new_raw_count) != prev_raw_count)
176 * Now we have the new raw value and have updated the prev
177 * timestamp already. We can now calculate the elapsed delta
178 * (event-)time and add that to the generic event.
180 * Careful, not all hw sign-extends above the physical width
183 delta = (new_raw_count << shift) - (prev_raw_count << shift);
186 local64_add(delta, &event->count);
187 local64_sub(delta, &hwc->period_left);
192 static struct perf_ibs perf_ibs_fetch;
193 static struct perf_ibs perf_ibs_op;
195 static struct perf_ibs *get_ibs_pmu(int type)
197 if (perf_ibs_fetch.pmu.type == type)
198 return &perf_ibs_fetch;
199 if (perf_ibs_op.pmu.type == type)
205 * Use IBS for precise event sampling:
207 * perf record -a -e cpu-cycles:p ... # use ibs op counting cycle count
208 * perf record -a -e r076:p ... # same as -e cpu-cycles:p
209 * perf record -a -e r0C1:p ... # use ibs op counting micro-ops
211 * IbsOpCntCtl (bit 19) of IBS Execution Control Register (IbsOpCtl,
212 * MSRC001_1033) is used to select either cycle or micro-ops counting
215 * The rip of IBS samples has skid 0. Thus, IBS supports precise
216 * levels 1 and 2 and the PERF_EFLAGS_EXACT is set. In rare cases the
217 * rip is invalid when IBS was not able to record the rip correctly.
218 * We clear PERF_EFLAGS_EXACT and take the rip from pt_regs then.
221 static int perf_ibs_precise_event(struct perf_event *event, u64 *config)
223 switch (event->attr.precise_ip) {
233 switch (event->attr.type) {
234 case PERF_TYPE_HARDWARE:
235 switch (event->attr.config) {
236 case PERF_COUNT_HW_CPU_CYCLES:
242 switch (event->attr.config) {
247 *config = IBS_OP_CNT_CTL;
258 static const struct perf_event_attr ibs_notsupp = {
267 static int perf_ibs_init(struct perf_event *event)
269 struct hw_perf_event *hwc = &event->hw;
270 struct perf_ibs *perf_ibs;
274 perf_ibs = get_ibs_pmu(event->attr.type);
276 config = event->attr.config;
278 perf_ibs = &perf_ibs_op;
279 ret = perf_ibs_precise_event(event, &config);
284 if (event->pmu != &perf_ibs->pmu)
287 if (perf_flags(&event->attr) & perf_flags(&ibs_notsupp))
290 if (config & ~perf_ibs->config_mask)
293 if (hwc->sample_period) {
294 if (config & perf_ibs->cnt_mask)
295 /* raw max_cnt may not be set */
297 if (!event->attr.sample_freq && hwc->sample_period & 0x0f)
299 * lower 4 bits can not be set in ibs max cnt,
300 * but allowing it in case we adjust the
301 * sample period to set a frequency.
304 hwc->sample_period &= ~0x0FULL;
305 if (!hwc->sample_period)
306 hwc->sample_period = 0x10;
308 max_cnt = config & perf_ibs->cnt_mask;
309 config &= ~perf_ibs->cnt_mask;
310 event->attr.sample_period = max_cnt << 4;
311 hwc->sample_period = event->attr.sample_period;
314 if (!hwc->sample_period)
318 * If we modify hwc->sample_period, we also need to update
319 * hwc->last_period and hwc->period_left.
321 hwc->last_period = hwc->sample_period;
322 local64_set(&hwc->period_left, hwc->sample_period);
324 hwc->config_base = perf_ibs->msr;
325 hwc->config = config;
328 * rip recorded by IbsOpRip will not be consistent with rsp and rbp
329 * recorded as part of interrupt regs. Thus we need to use rip from
330 * interrupt regs while unwinding call stack. Setting _EARLY flag
331 * makes sure we unwind call-stack before perf sample rip is set to
334 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
335 event->attr.sample_type |= __PERF_SAMPLE_CALLCHAIN_EARLY;
340 static int perf_ibs_set_period(struct perf_ibs *perf_ibs,
341 struct hw_perf_event *hwc, u64 *period)
345 /* ignore lower 4 bits in min count: */
346 overflow = perf_event_set_period(hwc, 1<<4, perf_ibs->max_period, period);
347 local64_set(&hwc->prev_count, 0);
352 static u64 get_ibs_fetch_count(u64 config)
354 return (config & IBS_FETCH_CNT) >> 12;
357 static u64 get_ibs_op_count(u64 config)
362 * If the internal 27-bit counter rolled over, the count is MaxCnt
363 * and the lower 7 bits of CurCnt are randomized.
364 * Otherwise CurCnt has the full 27-bit current counter value.
366 if (config & IBS_OP_VAL)
367 count = (config & IBS_OP_MAX_CNT) << 4;
368 else if (ibs_caps & IBS_CAPS_RDWROPCNT)
369 count = (config & IBS_OP_CUR_CNT) >> 32;
375 perf_ibs_event_update(struct perf_ibs *perf_ibs, struct perf_event *event,
378 u64 count = perf_ibs->get_count(*config);
381 * Set width to 64 since we do not overflow on max width but
382 * instead on max count. In perf_ibs_set_period() we clear
383 * prev count manually on overflow.
385 while (!perf_event_try_update(event, count, 64)) {
386 rdmsrl(event->hw.config_base, *config);
387 count = perf_ibs->get_count(*config);
391 static inline void perf_ibs_enable_event(struct perf_ibs *perf_ibs,
392 struct hw_perf_event *hwc, u64 config)
394 u64 tmp = hwc->config | config;
396 if (perf_ibs->fetch_count_reset_broken)
397 wrmsrl(hwc->config_base, tmp & ~perf_ibs->enable_mask);
399 wrmsrl(hwc->config_base, tmp | perf_ibs->enable_mask);
403 * Erratum #420 Instruction-Based Sampling Engine May Generate
404 * Interrupt that Cannot Be Cleared:
406 * Must clear counter mask first, then clear the enable bit. See
407 * Revision Guide for AMD Family 10h Processors, Publication #41322.
409 static inline void perf_ibs_disable_event(struct perf_ibs *perf_ibs,
410 struct hw_perf_event *hwc, u64 config)
412 config &= ~perf_ibs->cnt_mask;
413 if (boot_cpu_data.x86 == 0x10)
414 wrmsrl(hwc->config_base, config);
415 config &= ~perf_ibs->enable_mask;
416 wrmsrl(hwc->config_base, config);
420 * We cannot restore the ibs pmu state, so we always needs to update
421 * the event while stopping it and then reset the state when starting
422 * again. Thus, ignoring PERF_EF_RELOAD and PERF_EF_UPDATE flags in
423 * perf_ibs_start()/perf_ibs_stop() and instead always do it.
425 static void perf_ibs_start(struct perf_event *event, int flags)
427 struct hw_perf_event *hwc = &event->hw;
428 struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
429 struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
432 if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
435 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
438 perf_ibs_set_period(perf_ibs, hwc, &period);
440 * Set STARTED before enabling the hardware, such that a subsequent NMI
443 set_bit(IBS_STARTED, pcpu->state);
444 clear_bit(IBS_STOPPING, pcpu->state);
445 perf_ibs_enable_event(perf_ibs, hwc, period >> 4);
447 perf_event_update_userpage(event);
450 static void perf_ibs_stop(struct perf_event *event, int flags)
452 struct hw_perf_event *hwc = &event->hw;
453 struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
454 struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
458 if (test_and_set_bit(IBS_STOPPING, pcpu->state))
461 stopping = test_bit(IBS_STARTED, pcpu->state);
463 if (!stopping && (hwc->state & PERF_HES_UPTODATE))
466 rdmsrl(hwc->config_base, config);
470 * Set STOPPED before disabling the hardware, such that it
471 * must be visible to NMIs the moment we clear the EN bit,
472 * at which point we can generate an !VALID sample which
473 * we need to consume.
475 set_bit(IBS_STOPPED, pcpu->state);
476 perf_ibs_disable_event(perf_ibs, hwc, config);
478 * Clear STARTED after disabling the hardware; if it were
479 * cleared before an NMI hitting after the clear but before
480 * clearing the EN bit might think it a spurious NMI and not
483 * Clearing it after, however, creates the problem of the NMI
484 * handler seeing STARTED but not having a valid sample.
486 clear_bit(IBS_STARTED, pcpu->state);
487 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
488 hwc->state |= PERF_HES_STOPPED;
491 if (hwc->state & PERF_HES_UPTODATE)
495 * Clear valid bit to not count rollovers on update, rollovers
496 * are only updated in the irq handler.
498 config &= ~perf_ibs->valid_mask;
500 perf_ibs_event_update(perf_ibs, event, &config);
501 hwc->state |= PERF_HES_UPTODATE;
504 static int perf_ibs_add(struct perf_event *event, int flags)
506 struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
507 struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
509 if (test_and_set_bit(IBS_ENABLED, pcpu->state))
512 event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
516 if (flags & PERF_EF_START)
517 perf_ibs_start(event, PERF_EF_RELOAD);
522 static void perf_ibs_del(struct perf_event *event, int flags)
524 struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
525 struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
527 if (!test_and_clear_bit(IBS_ENABLED, pcpu->state))
530 perf_ibs_stop(event, PERF_EF_UPDATE);
534 perf_event_update_userpage(event);
537 static void perf_ibs_read(struct perf_event *event) { }
539 PMU_FORMAT_ATTR(rand_en, "config:57");
540 PMU_FORMAT_ATTR(cnt_ctl, "config:19");
542 static struct attribute *ibs_fetch_format_attrs[] = {
543 &format_attr_rand_en.attr,
547 static struct attribute *ibs_op_format_attrs[] = {
548 NULL, /* &format_attr_cnt_ctl.attr if IBS_CAPS_OPCNT */
552 static struct perf_ibs perf_ibs_fetch = {
554 .task_ctx_nr = perf_invalid_context,
556 .event_init = perf_ibs_init,
559 .start = perf_ibs_start,
560 .stop = perf_ibs_stop,
561 .read = perf_ibs_read,
563 .msr = MSR_AMD64_IBSFETCHCTL,
564 .config_mask = IBS_FETCH_CONFIG_MASK,
565 .cnt_mask = IBS_FETCH_MAX_CNT,
566 .enable_mask = IBS_FETCH_ENABLE,
567 .valid_mask = IBS_FETCH_VAL,
568 .max_period = IBS_FETCH_MAX_CNT << 4,
569 .offset_mask = { MSR_AMD64_IBSFETCH_REG_MASK },
570 .offset_max = MSR_AMD64_IBSFETCH_REG_COUNT,
571 .format_attrs = ibs_fetch_format_attrs,
573 .get_count = get_ibs_fetch_count,
576 static struct perf_ibs perf_ibs_op = {
578 .task_ctx_nr = perf_invalid_context,
580 .event_init = perf_ibs_init,
583 .start = perf_ibs_start,
584 .stop = perf_ibs_stop,
585 .read = perf_ibs_read,
587 .msr = MSR_AMD64_IBSOPCTL,
588 .config_mask = IBS_OP_CONFIG_MASK,
589 .cnt_mask = IBS_OP_MAX_CNT | IBS_OP_CUR_CNT |
591 .enable_mask = IBS_OP_ENABLE,
592 .valid_mask = IBS_OP_VAL,
593 .max_period = IBS_OP_MAX_CNT << 4,
594 .offset_mask = { MSR_AMD64_IBSOP_REG_MASK },
595 .offset_max = MSR_AMD64_IBSOP_REG_COUNT,
596 .format_attrs = ibs_op_format_attrs,
598 .get_count = get_ibs_op_count,
601 static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs)
603 struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
604 struct perf_event *event = pcpu->event;
605 struct hw_perf_event *hwc;
606 struct perf_sample_data data;
607 struct perf_raw_record raw;
609 struct perf_ibs_data ibs_data;
610 int offset, size, check_rip, offset_max, throttle = 0;
612 u64 *buf, *config, period;
614 if (!test_bit(IBS_STARTED, pcpu->state)) {
617 * Catch spurious interrupts after stopping IBS: After
618 * disabling IBS there could be still incoming NMIs
619 * with samples that even have the valid bit cleared.
620 * Mark all this NMIs as handled.
622 if (test_and_clear_bit(IBS_STOPPED, pcpu->state))
628 if (WARN_ON_ONCE(!event))
632 msr = hwc->config_base;
635 if (!(*buf++ & perf_ibs->valid_mask))
638 config = &ibs_data.regs[0];
639 perf_ibs_event_update(perf_ibs, event, config);
640 perf_sample_data_init(&data, 0, hwc->last_period);
641 if (!perf_ibs_set_period(perf_ibs, hwc, &period))
642 goto out; /* no sw counter overflow */
644 ibs_data.caps = ibs_caps;
647 check_rip = (perf_ibs == &perf_ibs_op && (ibs_caps & IBS_CAPS_RIPINVALIDCHK));
648 if (event->attr.sample_type & PERF_SAMPLE_RAW)
649 offset_max = perf_ibs->offset_max;
655 rdmsrl(msr + offset, *buf++);
657 offset = find_next_bit(perf_ibs->offset_mask,
658 perf_ibs->offset_max,
660 } while (offset < offset_max);
662 * Read IbsBrTarget, IbsOpData4, and IbsExtdCtl separately
663 * depending on their availability.
664 * Can't add to offset_max as they are staggered
666 if (event->attr.sample_type & PERF_SAMPLE_RAW) {
667 if (perf_ibs == &perf_ibs_op) {
668 if (ibs_caps & IBS_CAPS_BRNTRGT) {
669 rdmsrl(MSR_AMD64_IBSBRTARGET, *buf++);
672 if (ibs_caps & IBS_CAPS_OPDATA4) {
673 rdmsrl(MSR_AMD64_IBSOPDATA4, *buf++);
677 if (perf_ibs == &perf_ibs_fetch && (ibs_caps & IBS_CAPS_FETCHCTLEXTD)) {
678 rdmsrl(MSR_AMD64_ICIBSEXTDCTL, *buf++);
682 ibs_data.size = sizeof(u64) * size;
685 if (check_rip && (ibs_data.regs[2] & IBS_RIP_INVALID)) {
686 regs.flags &= ~PERF_EFLAGS_EXACT;
688 /* Workaround for erratum #1197 */
689 if (perf_ibs->fetch_ignore_if_zero_rip && !(ibs_data.regs[1]))
692 set_linear_ip(®s, ibs_data.regs[1]);
693 regs.flags |= PERF_EFLAGS_EXACT;
696 if (event->attr.sample_type & PERF_SAMPLE_RAW) {
697 raw = (struct perf_raw_record){
699 .size = sizeof(u32) + ibs_data.size,
700 .data = ibs_data.data,
707 * rip recorded by IbsOpRip will not be consistent with rsp and rbp
708 * recorded as part of interrupt regs. Thus we need to use rip from
709 * interrupt regs while unwinding call stack.
711 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
712 data.callchain = perf_callchain(event, iregs);
714 throttle = perf_event_overflow(event, &data, ®s);
717 perf_ibs_stop(event, 0);
721 if ((ibs_caps & IBS_CAPS_RDWROPCNT) &&
722 (*config & IBS_OP_CNT_CTL))
723 period |= *config & IBS_OP_CUR_CNT_RAND;
725 perf_ibs_enable_event(perf_ibs, hwc, period);
728 perf_event_update_userpage(event);
734 perf_ibs_nmi_handler(unsigned int cmd, struct pt_regs *regs)
736 u64 stamp = sched_clock();
739 handled += perf_ibs_handle_irq(&perf_ibs_fetch, regs);
740 handled += perf_ibs_handle_irq(&perf_ibs_op, regs);
743 inc_irq_stat(apic_perf_irqs);
745 perf_sample_event_took(sched_clock() - stamp);
749 NOKPROBE_SYMBOL(perf_ibs_nmi_handler);
751 static __init int perf_ibs_pmu_init(struct perf_ibs *perf_ibs, char *name)
753 struct cpu_perf_ibs __percpu *pcpu;
756 pcpu = alloc_percpu(struct cpu_perf_ibs);
760 perf_ibs->pcpu = pcpu;
762 /* register attributes */
763 if (perf_ibs->format_attrs[0]) {
764 memset(&perf_ibs->format_group, 0, sizeof(perf_ibs->format_group));
765 perf_ibs->format_group.name = "format";
766 perf_ibs->format_group.attrs = perf_ibs->format_attrs;
768 memset(&perf_ibs->attr_groups, 0, sizeof(perf_ibs->attr_groups));
769 perf_ibs->attr_groups[0] = &perf_ibs->format_group;
770 perf_ibs->pmu.attr_groups = perf_ibs->attr_groups;
773 ret = perf_pmu_register(&perf_ibs->pmu, name, -1);
775 perf_ibs->pcpu = NULL;
782 static __init void perf_event_ibs_init(void)
784 struct attribute **attr = ibs_op_format_attrs;
787 * Some chips fail to reset the fetch count when it is written; instead
788 * they need a 0-1 transition of IbsFetchEn.
790 if (boot_cpu_data.x86 >= 0x16 && boot_cpu_data.x86 <= 0x18)
791 perf_ibs_fetch.fetch_count_reset_broken = 1;
793 if (boot_cpu_data.x86 == 0x19 && boot_cpu_data.x86_model < 0x10)
794 perf_ibs_fetch.fetch_ignore_if_zero_rip = 1;
796 perf_ibs_pmu_init(&perf_ibs_fetch, "ibs_fetch");
798 if (ibs_caps & IBS_CAPS_OPCNT) {
799 perf_ibs_op.config_mask |= IBS_OP_CNT_CTL;
800 *attr++ = &format_attr_cnt_ctl.attr;
802 perf_ibs_pmu_init(&perf_ibs_op, "ibs_op");
804 register_nmi_handler(NMI_LOCAL, perf_ibs_nmi_handler, 0, "perf_ibs");
805 pr_info("perf: AMD IBS detected (0x%08x)\n", ibs_caps);
808 #else /* defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD) */
810 static __init void perf_event_ibs_init(void) { }
814 /* IBS - apic initialization, for perf and oprofile */
816 static __init u32 __get_ibs_caps(void)
819 unsigned int max_level;
821 if (!boot_cpu_has(X86_FEATURE_IBS))
824 /* check IBS cpuid feature flags */
825 max_level = cpuid_eax(0x80000000);
826 if (max_level < IBS_CPUID_FEATURES)
827 return IBS_CAPS_DEFAULT;
829 caps = cpuid_eax(IBS_CPUID_FEATURES);
830 if (!(caps & IBS_CAPS_AVAIL))
831 /* cpuid flags not valid */
832 return IBS_CAPS_DEFAULT;
837 u32 get_ibs_caps(void)
842 EXPORT_SYMBOL(get_ibs_caps);
844 static inline int get_eilvt(int offset)
846 return !setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_NMI, 1);
849 static inline int put_eilvt(int offset)
851 return !setup_APIC_eilvt(offset, 0, 0, 1);
855 * Check and reserve APIC extended interrupt LVT offset for IBS if available.
857 static inline int ibs_eilvt_valid(void)
865 rdmsrl(MSR_AMD64_IBSCTL, val);
866 offset = val & IBSCTL_LVT_OFFSET_MASK;
868 if (!(val & IBSCTL_LVT_OFFSET_VALID)) {
869 pr_err(FW_BUG "cpu %d, invalid IBS interrupt offset %d (MSR%08X=0x%016llx)\n",
870 smp_processor_id(), offset, MSR_AMD64_IBSCTL, val);
874 if (!get_eilvt(offset)) {
875 pr_err(FW_BUG "cpu %d, IBS interrupt offset %d not available (MSR%08X=0x%016llx)\n",
876 smp_processor_id(), offset, MSR_AMD64_IBSCTL, val);
887 static int setup_ibs_ctl(int ibs_eilvt_off)
889 struct pci_dev *cpu_cfg;
896 cpu_cfg = pci_get_device(PCI_VENDOR_ID_AMD,
897 PCI_DEVICE_ID_AMD_10H_NB_MISC,
902 pci_write_config_dword(cpu_cfg, IBSCTL, ibs_eilvt_off
903 | IBSCTL_LVT_OFFSET_VALID);
904 pci_read_config_dword(cpu_cfg, IBSCTL, &value);
905 if (value != (ibs_eilvt_off | IBSCTL_LVT_OFFSET_VALID)) {
906 pci_dev_put(cpu_cfg);
907 pr_debug("Failed to setup IBS LVT offset, IBSCTL = 0x%08x\n",
914 pr_debug("No CPU node configured for IBS\n");
922 * This runs only on the current cpu. We try to find an LVT offset and
923 * setup the local APIC. For this we must disable preemption. On
924 * success we initialize all nodes with this offset. This updates then
925 * the offset in the IBS_CTL per-node msr. The per-core APIC setup of
926 * the IBS interrupt vector is handled by perf_ibs_cpu_notifier that
927 * is using the new offset.
929 static void force_ibs_eilvt_setup(void)
935 /* find the next free available EILVT entry, skip offset 0 */
936 for (offset = 1; offset < APIC_EILVT_NR_MAX; offset++) {
937 if (get_eilvt(offset))
942 if (offset == APIC_EILVT_NR_MAX) {
943 pr_debug("No EILVT entry available\n");
947 ret = setup_ibs_ctl(offset);
951 if (!ibs_eilvt_valid())
954 pr_info("LVT offset %d assigned\n", offset);
964 static void ibs_eilvt_setup(void)
967 * Force LVT offset assignment for family 10h: The offsets are
968 * not assigned by the BIOS for this family, so the OS is
969 * responsible for doing it. If the OS assignment fails, fall
970 * back to BIOS settings and try to setup this.
972 if (boot_cpu_data.x86 == 0x10)
973 force_ibs_eilvt_setup();
976 static inline int get_ibs_lvt_offset(void)
980 rdmsrl(MSR_AMD64_IBSCTL, val);
981 if (!(val & IBSCTL_LVT_OFFSET_VALID))
984 return val & IBSCTL_LVT_OFFSET_MASK;
987 static void setup_APIC_ibs(void)
991 offset = get_ibs_lvt_offset();
995 if (!setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_NMI, 0))
998 pr_warn("perf: IBS APIC setup failed on cpu #%d\n",
1002 static void clear_APIC_ibs(void)
1006 offset = get_ibs_lvt_offset();
1008 setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_FIX, 1);
1011 static int x86_pmu_amd_ibs_starting_cpu(unsigned int cpu)
1019 static int perf_ibs_suspend(void)
1025 static void perf_ibs_resume(void)
1031 static struct syscore_ops perf_ibs_syscore_ops = {
1032 .resume = perf_ibs_resume,
1033 .suspend = perf_ibs_suspend,
1036 static void perf_ibs_pm_init(void)
1038 register_syscore_ops(&perf_ibs_syscore_ops);
1043 static inline void perf_ibs_pm_init(void) { }
1047 static int x86_pmu_amd_ibs_dying_cpu(unsigned int cpu)
1053 static __init int amd_ibs_init(void)
1057 caps = __get_ibs_caps();
1059 return -ENODEV; /* ibs not supported by the cpu */
1063 if (!ibs_eilvt_valid())
1069 /* make ibs_caps visible to other cpus: */
1072 * x86_pmu_amd_ibs_starting_cpu will be called from core on
1075 cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_IBS_STARTING,
1076 "perf/x86/amd/ibs:starting",
1077 x86_pmu_amd_ibs_starting_cpu,
1078 x86_pmu_amd_ibs_dying_cpu);
1080 perf_event_ibs_init();
1085 /* Since we need the pci subsystem to init ibs we can't do this earlier: */
1086 device_initcall(amd_ibs_init);