2 * (c) 2005-2016 Advanced Micro Devices, Inc.
3 * Your use of this code is subject to the terms and conditions of the
4 * GNU general public license version 2. See "COPYING" or
5 * http://www.gnu.org/licenses/gpl.html
7 * Written by Jacob Shin - AMD, Inc.
8 * Maintained by: Borislav Petkov <bp@alien8.de>
10 * All MC4_MISCi registers are shared between cores on a node.
12 #include <linux/interrupt.h>
13 #include <linux/notifier.h>
14 #include <linux/kobject.h>
15 #include <linux/percpu.h>
16 #include <linux/errno.h>
17 #include <linux/sched.h>
18 #include <linux/sysfs.h>
19 #include <linux/slab.h>
20 #include <linux/init.h>
21 #include <linux/cpu.h>
22 #include <linux/smp.h>
23 #include <linux/string.h>
25 #include <asm/amd_nb.h>
30 #include <asm/trace/irq_vectors.h>
33 #define THRESHOLD_MAX 0xFFF
34 #define INT_TYPE_APIC 0x00020000
35 #define MASK_VALID_HI 0x80000000
36 #define MASK_CNTP_HI 0x40000000
37 #define MASK_LOCKED_HI 0x20000000
38 #define MASK_LVTOFF_HI 0x00F00000
39 #define MASK_COUNT_EN_HI 0x00080000
40 #define MASK_INT_TYPE_HI 0x00060000
41 #define MASK_OVERFLOW_HI 0x00010000
42 #define MASK_ERR_COUNT_HI 0x00000FFF
43 #define MASK_BLKPTR_LO 0xFF000000
44 #define MCG_XBLK_ADDR 0xC0000400
46 /* Deferred error settings */
47 #define MSR_CU_DEF_ERR 0xC0000410
48 #define MASK_DEF_LVTOFF 0x000000F0
49 #define MASK_DEF_INT_TYPE 0x00000006
50 #define DEF_LVT_OFF 0x2
51 #define DEF_INT_TYPE_APIC 0x2
55 /* Threshold LVT offset is at MSR0xC0000410[15:12] */
56 #define SMCA_THR_LVT_OFF 0xF000
58 static const char * const th_names[] = {
67 static const char * const smca_umc_block_names[] = {
72 struct smca_bank_name smca_bank_names[] = {
73 [SMCA_LS] = { "load_store", "Load Store Unit" },
74 [SMCA_IF] = { "insn_fetch", "Instruction Fetch Unit" },
75 [SMCA_L2_CACHE] = { "l2_cache", "L2 Cache" },
76 [SMCA_DE] = { "decode_unit", "Decode Unit" },
77 [SMCA_EX] = { "execution_unit", "Execution Unit" },
78 [SMCA_FP] = { "floating_point", "Floating Point Unit" },
79 [SMCA_L3_CACHE] = { "l3_cache", "L3 Cache" },
80 [SMCA_CS] = { "coherent_slave", "Coherent Slave" },
81 [SMCA_PIE] = { "pie", "Power, Interrupts, etc." },
82 [SMCA_UMC] = { "umc", "Unified Memory Controller" },
83 [SMCA_PB] = { "param_block", "Parameter Block" },
84 [SMCA_PSP] = { "psp", "Platform Security Processor" },
85 [SMCA_SMU] = { "smu", "System Management Unit" },
87 EXPORT_SYMBOL_GPL(smca_bank_names);
89 static struct smca_hwid_mcatype smca_hwid_mcatypes[] = {
90 /* { bank_type, hwid_mcatype, xec_bitmap } */
92 /* ZN Core (HWID=0xB0) MCA types */
93 { SMCA_LS, HWID_MCATYPE(0xB0, 0x0), 0x1FFFEF },
94 { SMCA_IF, HWID_MCATYPE(0xB0, 0x1), 0x3FFF },
95 { SMCA_L2_CACHE, HWID_MCATYPE(0xB0, 0x2), 0xF },
96 { SMCA_DE, HWID_MCATYPE(0xB0, 0x3), 0x1FF },
97 /* HWID 0xB0 MCATYPE 0x4 is Reserved */
98 { SMCA_EX, HWID_MCATYPE(0xB0, 0x5), 0x7FF },
99 { SMCA_FP, HWID_MCATYPE(0xB0, 0x6), 0x7F },
100 { SMCA_L3_CACHE, HWID_MCATYPE(0xB0, 0x7), 0xFF },
102 /* Data Fabric MCA types */
103 { SMCA_CS, HWID_MCATYPE(0x2E, 0x0), 0x1FF },
104 { SMCA_PIE, HWID_MCATYPE(0x2E, 0x1), 0xF },
106 /* Unified Memory Controller MCA type */
107 { SMCA_UMC, HWID_MCATYPE(0x96, 0x0), 0x3F },
109 /* Parameter Block MCA type */
110 { SMCA_PB, HWID_MCATYPE(0x05, 0x0), 0x1 },
112 /* Platform Security Processor MCA type */
113 { SMCA_PSP, HWID_MCATYPE(0xFF, 0x0), 0x1 },
115 /* System Management Unit MCA type */
116 { SMCA_SMU, HWID_MCATYPE(0x01, 0x0), 0x1 },
119 struct smca_bank_info smca_banks[MAX_NR_BANKS];
120 EXPORT_SYMBOL_GPL(smca_banks);
123 * In SMCA enabled processors, we can have multiple banks for a given IP type.
124 * So to define a unique name for each bank, we use a temp c-string to append
125 * the MCA_IPID[InstanceId] to type's name in get_name().
127 * InstanceId is 32 bits which is 8 characters. Make sure MAX_MCATYPE_NAME_LEN
128 * is greater than 8 plus 1 (for underscore) plus length of longest type name.
130 #define MAX_MCATYPE_NAME_LEN 30
131 static char buf_mcatype[MAX_MCATYPE_NAME_LEN];
133 static DEFINE_PER_CPU(struct threshold_bank **, threshold_banks);
134 static DEFINE_PER_CPU(unsigned int, bank_map); /* see which banks are on */
136 static void amd_threshold_interrupt(void);
137 static void amd_deferred_error_interrupt(void);
139 static void default_deferred_error_interrupt(void)
141 pr_err("Unexpected deferred interrupt at vector %x\n", DEFERRED_ERROR_VECTOR);
143 void (*deferred_error_int_vector)(void) = default_deferred_error_interrupt;
149 static void get_smca_bank_info(unsigned int bank)
151 unsigned int i, hwid_mcatype, cpu = smp_processor_id();
152 struct smca_hwid_mcatype *type;
153 u32 high, instanceId;
156 /* Collect bank_info using CPU 0 for now. */
160 if (rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_IPID(bank), &instanceId, &high)) {
161 pr_warn("Failed to read MCA_IPID for bank %d\n", bank);
165 hwid = high & MCI_IPID_HWID;
166 mcatype = (high & MCI_IPID_MCATYPE) >> 16;
167 hwid_mcatype = HWID_MCATYPE(hwid, mcatype);
169 for (i = 0; i < ARRAY_SIZE(smca_hwid_mcatypes); i++) {
170 type = &smca_hwid_mcatypes[i];
171 if (hwid_mcatype == type->hwid_mcatype) {
172 smca_banks[bank].type = type;
173 smca_banks[bank].type_instance = instanceId;
179 struct thresh_restart {
180 struct threshold_block *b;
187 static inline bool is_shared_bank(int bank)
190 * Scalable MCA provides for only one core to have access to the MSRs of
196 /* Bank 4 is for northbridge reporting and is thus shared */
200 static const char *bank4_names(const struct threshold_block *b)
202 switch (b->address) {
214 WARN(1, "Funny MSR: 0x%08x\n", b->address);
220 static bool lvt_interrupt_supported(unsigned int bank, u32 msr_high_bits)
223 * bank 4 supports APIC LVT interrupts implicitly since forever.
229 * IntP: interrupt present; if this bit is set, the thresholding
230 * bank can generate APIC LVT interrupts
232 return msr_high_bits & BIT(28);
235 static int lvt_off_valid(struct threshold_block *b, int apic, u32 lo, u32 hi)
237 int msr = (hi & MASK_LVTOFF_HI) >> 20;
240 pr_err(FW_BUG "cpu %d, failed to setup threshold interrupt "
241 "for bank %d, block %d (MSR%08X=0x%x%08x)\n", b->cpu,
242 b->bank, b->block, b->address, hi, lo);
248 * On SMCA CPUs, LVT offset is programmed at a different MSR, and
249 * the BIOS provides the value. The original field where LVT offset
250 * was set is reserved. Return early here:
255 pr_err(FW_BUG "cpu %d, invalid threshold interrupt offset %d "
256 "for bank %d, block %d (MSR%08X=0x%x%08x)\n",
257 b->cpu, apic, b->bank, b->block, b->address, hi, lo);
264 /* Reprogram MCx_MISC MSR behind this threshold bank. */
265 static void threshold_restart_bank(void *_tr)
267 struct thresh_restart *tr = _tr;
270 rdmsr(tr->b->address, lo, hi);
272 if (tr->b->threshold_limit < (hi & THRESHOLD_MAX))
273 tr->reset = 1; /* limit cannot be lower than err count */
275 if (tr->reset) { /* reset err count and overflow bit */
277 (hi & ~(MASK_ERR_COUNT_HI | MASK_OVERFLOW_HI)) |
278 (THRESHOLD_MAX - tr->b->threshold_limit);
279 } else if (tr->old_limit) { /* change limit w/o reset */
280 int new_count = (hi & THRESHOLD_MAX) +
281 (tr->old_limit - tr->b->threshold_limit);
283 hi = (hi & ~MASK_ERR_COUNT_HI) |
284 (new_count & THRESHOLD_MAX);
288 hi &= ~MASK_INT_TYPE_HI;
290 if (!tr->b->interrupt_capable)
293 if (tr->set_lvt_off) {
294 if (lvt_off_valid(tr->b, tr->lvt_off, lo, hi)) {
295 /* set new lvt offset */
296 hi &= ~MASK_LVTOFF_HI;
297 hi |= tr->lvt_off << 20;
301 if (tr->b->interrupt_enable)
306 hi |= MASK_COUNT_EN_HI;
307 wrmsr(tr->b->address, lo, hi);
310 static void mce_threshold_block_init(struct threshold_block *b, int offset)
312 struct thresh_restart tr = {
318 b->threshold_limit = THRESHOLD_MAX;
319 threshold_restart_bank(&tr);
322 static int setup_APIC_mce_threshold(int reserved, int new)
324 if (reserved < 0 && !setup_APIC_eilvt(new, THRESHOLD_APIC_VECTOR,
325 APIC_EILVT_MSG_FIX, 0))
331 static int setup_APIC_deferred_error(int reserved, int new)
333 if (reserved < 0 && !setup_APIC_eilvt(new, DEFERRED_ERROR_VECTOR,
334 APIC_EILVT_MSG_FIX, 0))
340 static void deferred_error_interrupt_enable(struct cpuinfo_x86 *c)
342 u32 low = 0, high = 0;
343 int def_offset = -1, def_new;
345 if (rdmsr_safe(MSR_CU_DEF_ERR, &low, &high))
348 def_new = (low & MASK_DEF_LVTOFF) >> 4;
349 if (!(low & MASK_DEF_LVTOFF)) {
350 pr_err(FW_BUG "Your BIOS is not setting up LVT offset 0x2 for deferred error IRQs correctly.\n");
351 def_new = DEF_LVT_OFF;
352 low = (low & ~MASK_DEF_LVTOFF) | (DEF_LVT_OFF << 4);
355 def_offset = setup_APIC_deferred_error(def_offset, def_new);
356 if ((def_offset == def_new) &&
357 (deferred_error_int_vector != amd_deferred_error_interrupt))
358 deferred_error_int_vector = amd_deferred_error_interrupt;
360 low = (low & ~MASK_DEF_INT_TYPE) | DEF_INT_TYPE_APIC;
361 wrmsr(MSR_CU_DEF_ERR, low, high);
364 static u32 get_block_address(unsigned int cpu, u32 current_addr, u32 low, u32 high,
365 unsigned int bank, unsigned int block)
367 u32 addr = 0, offset = 0;
369 if (mce_flags.smca) {
371 addr = MSR_AMD64_SMCA_MCx_MISC(bank);
374 * For SMCA enabled processors, BLKPTR field of the
375 * first MISC register (MCx_MISC0) indicates presence of
376 * additional MISC register set (MISC1-4).
380 if (rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_CONFIG(bank), &low, &high))
383 if (!(low & MCI_CONFIG_MCAX))
386 if (!rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_MISC(bank), &low, &high) &&
387 (low & MASK_BLKPTR_LO))
388 addr = MSR_AMD64_SMCA_MCx_MISCy(bank, block - 1);
393 /* Fall back to method we used for older processors: */
396 addr = msr_ops.misc(bank);
399 offset = ((low & MASK_BLKPTR_LO) >> 21);
401 addr = MCG_XBLK_ADDR + offset;
404 addr = ++current_addr;
410 prepare_threshold_block(unsigned int bank, unsigned int block, u32 addr,
411 int offset, u32 misc_high)
413 unsigned int cpu = smp_processor_id();
414 u32 smca_low, smca_high, smca_addr;
415 struct threshold_block b;
419 per_cpu(bank_map, cpu) |= (1 << bank);
421 memset(&b, 0, sizeof(b));
426 b.interrupt_capable = lvt_interrupt_supported(bank, misc_high);
428 if (!b.interrupt_capable)
431 b.interrupt_enable = 1;
433 if (!mce_flags.smca) {
434 new = (misc_high & MASK_LVTOFF_HI) >> 20;
438 smca_addr = MSR_AMD64_SMCA_MCx_CONFIG(bank);
440 if (!rdmsr_safe(smca_addr, &smca_low, &smca_high)) {
442 * OS is required to set the MCAX bit to acknowledge that it is
443 * now using the new MSR ranges and new registers under each
444 * bank. It also means that the OS will configure deferred
445 * errors in the new MCx_CONFIG register. If the bit is not set,
446 * uncorrectable errors will cause a system panic.
448 * MCA_CONFIG[MCAX] is bit 32 (0 in the high portion of the MSR.)
453 * SMCA logs Deferred Error information in MCA_DE{STAT,ADDR}
454 * registers with the option of additionally logging to
455 * MCA_{STATUS,ADDR} if MCA_CONFIG[LogDeferredInMcaStat] is set.
457 * This bit is usually set by BIOS to retain the old behavior
458 * for OSes that don't use the new registers. Linux supports the
459 * new registers so let's disable that additional logging here.
461 * MCA_CONFIG[LogDeferredInMcaStat] is bit 34 (bit 2 in the high
462 * portion of the MSR).
464 smca_high &= ~BIT(2);
467 * SMCA sets the Deferred Error Interrupt type per bank.
469 * MCA_CONFIG[DeferredIntTypeSupported] is bit 5, and tells us
470 * if the DeferredIntType bit field is available.
472 * MCA_CONFIG[DeferredIntType] is bits [38:37] ([6:5] in the
473 * high portion of the MSR). OS should set this to 0x1 to enable
474 * APIC based interrupt. First, check that no interrupt has been
477 if ((smca_low & BIT(5)) && !((smca_high >> 5) & 0x3))
480 wrmsr(smca_addr, smca_low, smca_high);
483 /* Gather LVT offset for thresholding: */
484 if (rdmsr_safe(MSR_CU_DEF_ERR, &smca_low, &smca_high))
487 new = (smca_low & SMCA_THR_LVT_OFF) >> 12;
490 offset = setup_APIC_mce_threshold(offset, new);
492 if ((offset == new) && (mce_threshold_vector != amd_threshold_interrupt))
493 mce_threshold_vector = amd_threshold_interrupt;
496 mce_threshold_block_init(&b, offset);
503 * Turn off MC4_MISC thresholding banks on all family 0x15 models since
504 * they're not supported there.
506 void disable_err_thresholding(struct cpuinfo_x86 *c)
512 0x00000413, /* MC4_MISC0 */
513 0xc0000408, /* MC4_MISC1 */
519 rdmsrl(MSR_K7_HWCR, hwcr);
521 /* McStatusWrEn has to be set */
522 need_toggle = !(hwcr & BIT(18));
525 wrmsrl(MSR_K7_HWCR, hwcr | BIT(18));
527 /* Clear CntP bit safely */
528 for (i = 0; i < ARRAY_SIZE(msrs); i++)
529 msr_clear_bit(msrs[i], 62);
531 /* restore old settings */
533 wrmsrl(MSR_K7_HWCR, hwcr);
536 /* cpu init entry point, called from mce.c with preempt off */
537 void mce_amd_feature_init(struct cpuinfo_x86 *c)
539 u32 low = 0, high = 0, address = 0;
540 unsigned int bank, block, cpu = smp_processor_id();
543 disable_err_thresholding(c);
545 for (bank = 0; bank < mca_cfg.banks; ++bank) {
547 get_smca_bank_info(bank);
549 for (block = 0; block < NR_BLOCKS; ++block) {
550 address = get_block_address(cpu, address, low, high, bank, block);
554 if (rdmsr_safe(address, &low, &high))
557 if (!(high & MASK_VALID_HI))
560 if (!(high & MASK_CNTP_HI) ||
561 (high & MASK_LOCKED_HI))
564 offset = prepare_threshold_block(bank, block, address, offset, high);
568 if (mce_flags.succor)
569 deferred_error_interrupt_enable(c);
573 __log_error(unsigned int bank, bool deferred_err, bool threshold_err, u64 misc)
575 u32 msr_status = msr_ops.status(bank);
576 u32 msr_addr = msr_ops.addr(bank);
580 WARN_ON_ONCE(deferred_err && threshold_err);
582 if (deferred_err && mce_flags.smca) {
583 msr_status = MSR_AMD64_SMCA_MCx_DESTAT(bank);
584 msr_addr = MSR_AMD64_SMCA_MCx_DEADDR(bank);
587 rdmsrl(msr_status, status);
589 if (!(status & MCI_STATUS_VAL))
600 if (m.status & MCI_STATUS_ADDRV) {
601 rdmsrl(msr_addr, m.addr);
604 * Extract [55:<lsb>] where lsb is the least significant
605 * *valid* bit of the address bits.
607 if (mce_flags.smca) {
608 u8 lsb = (m.addr >> 56) & 0x3f;
610 m.addr &= GENMASK_ULL(55, lsb);
614 if (mce_flags.smca) {
615 rdmsrl(MSR_AMD64_SMCA_MCx_IPID(bank), m.ipid);
617 if (m.status & MCI_STATUS_SYNDV)
618 rdmsrl(MSR_AMD64_SMCA_MCx_SYND(bank), m.synd);
623 wrmsrl(msr_status, 0);
626 static inline void __smp_deferred_error_interrupt(void)
628 inc_irq_stat(irq_deferred_error_count);
629 deferred_error_int_vector();
632 asmlinkage __visible void __irq_entry smp_deferred_error_interrupt(void)
635 __smp_deferred_error_interrupt();
639 asmlinkage __visible void __irq_entry smp_trace_deferred_error_interrupt(void)
642 trace_deferred_error_apic_entry(DEFERRED_ERROR_VECTOR);
643 __smp_deferred_error_interrupt();
644 trace_deferred_error_apic_exit(DEFERRED_ERROR_VECTOR);
648 /* APIC interrupt handler for deferred errors */
649 static void amd_deferred_error_interrupt(void)
655 for (bank = 0; bank < mca_cfg.banks; ++bank) {
656 msr_status = (mce_flags.smca) ? MSR_AMD64_SMCA_MCx_DESTAT(bank)
657 : msr_ops.status(bank);
659 rdmsrl(msr_status, status);
661 if (!(status & MCI_STATUS_VAL) ||
662 !(status & MCI_STATUS_DEFERRED))
665 __log_error(bank, true, false, 0);
671 * APIC Interrupt Handler
675 * threshold interrupt handler will service THRESHOLD_APIC_VECTOR.
676 * the interrupt goes off when error_count reaches threshold_limit.
677 * the handler will simply log mcelog w/ software defined bank number.
680 static void amd_threshold_interrupt(void)
682 u32 low = 0, high = 0, address = 0;
683 unsigned int bank, block, cpu = smp_processor_id();
685 /* assume first bank caused it */
686 for (bank = 0; bank < mca_cfg.banks; ++bank) {
687 if (!(per_cpu(bank_map, cpu) & (1 << bank)))
689 for (block = 0; block < NR_BLOCKS; ++block) {
690 address = get_block_address(cpu, address, low, high, bank, block);
694 if (rdmsr_safe(address, &low, &high))
697 if (!(high & MASK_VALID_HI)) {
704 if (!(high & MASK_CNTP_HI) ||
705 (high & MASK_LOCKED_HI))
709 * Log the machine check that caused the threshold
712 if (high & MASK_OVERFLOW_HI)
719 __log_error(bank, false, true, ((u64)high << 32) | low);
726 struct threshold_attr {
727 struct attribute attr;
728 ssize_t (*show) (struct threshold_block *, char *);
729 ssize_t (*store) (struct threshold_block *, const char *, size_t count);
732 #define SHOW_FIELDS(name) \
733 static ssize_t show_ ## name(struct threshold_block *b, char *buf) \
735 return sprintf(buf, "%lu\n", (unsigned long) b->name); \
737 SHOW_FIELDS(interrupt_enable)
738 SHOW_FIELDS(threshold_limit)
741 store_interrupt_enable(struct threshold_block *b, const char *buf, size_t size)
743 struct thresh_restart tr;
746 if (!b->interrupt_capable)
749 if (kstrtoul(buf, 0, &new) < 0)
752 b->interrupt_enable = !!new;
754 memset(&tr, 0, sizeof(tr));
757 smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1);
763 store_threshold_limit(struct threshold_block *b, const char *buf, size_t size)
765 struct thresh_restart tr;
768 if (kstrtoul(buf, 0, &new) < 0)
771 if (new > THRESHOLD_MAX)
776 memset(&tr, 0, sizeof(tr));
777 tr.old_limit = b->threshold_limit;
778 b->threshold_limit = new;
781 smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1);
786 static ssize_t show_error_count(struct threshold_block *b, char *buf)
790 rdmsr_on_cpu(b->cpu, b->address, &lo, &hi);
792 return sprintf(buf, "%u\n", ((hi & THRESHOLD_MAX) -
793 (THRESHOLD_MAX - b->threshold_limit)));
796 static struct threshold_attr error_count = {
797 .attr = {.name = __stringify(error_count), .mode = 0444 },
798 .show = show_error_count,
801 #define RW_ATTR(val) \
802 static struct threshold_attr val = { \
803 .attr = {.name = __stringify(val), .mode = 0644 }, \
804 .show = show_## val, \
805 .store = store_## val, \
808 RW_ATTR(interrupt_enable);
809 RW_ATTR(threshold_limit);
811 static struct attribute *default_attrs[] = {
812 &threshold_limit.attr,
814 NULL, /* possibly interrupt_enable if supported, see below */
818 #define to_block(k) container_of(k, struct threshold_block, kobj)
819 #define to_attr(a) container_of(a, struct threshold_attr, attr)
821 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
823 struct threshold_block *b = to_block(kobj);
824 struct threshold_attr *a = to_attr(attr);
827 ret = a->show ? a->show(b, buf) : -EIO;
832 static ssize_t store(struct kobject *kobj, struct attribute *attr,
833 const char *buf, size_t count)
835 struct threshold_block *b = to_block(kobj);
836 struct threshold_attr *a = to_attr(attr);
839 ret = a->store ? a->store(b, buf, count) : -EIO;
844 static const struct sysfs_ops threshold_ops = {
849 static void threshold_block_release(struct kobject *kobj);
851 static struct kobj_type threshold_ktype = {
852 .sysfs_ops = &threshold_ops,
853 .default_attrs = default_attrs,
854 .release = threshold_block_release,
857 static const char *get_name(unsigned int bank, struct threshold_block *b)
859 unsigned int bank_type;
861 if (!mce_flags.smca) {
863 return bank4_names(b);
865 return th_names[bank];
868 if (!smca_banks[bank].type)
871 bank_type = smca_banks[bank].type->bank_type;
873 if (b && bank_type == SMCA_UMC) {
874 if (b->block < ARRAY_SIZE(smca_umc_block_names))
875 return smca_umc_block_names[b->block];
879 snprintf(buf_mcatype, MAX_MCATYPE_NAME_LEN,
880 "%s_%x", smca_bank_names[bank_type].name,
881 smca_banks[bank].type_instance);
885 static int allocate_threshold_blocks(unsigned int cpu, struct threshold_bank *tb,
886 unsigned int bank, unsigned int block,
889 struct threshold_block *b = NULL;
893 if ((bank >= mca_cfg.banks) || (block >= NR_BLOCKS))
896 if (rdmsr_safe_on_cpu(cpu, address, &low, &high))
899 if (!(high & MASK_VALID_HI)) {
906 if (!(high & MASK_CNTP_HI) ||
907 (high & MASK_LOCKED_HI))
910 b = kzalloc(sizeof(struct threshold_block), GFP_KERNEL);
917 b->address = address;
918 b->interrupt_enable = 0;
919 b->interrupt_capable = lvt_interrupt_supported(bank, high);
920 b->threshold_limit = THRESHOLD_MAX;
922 if (b->interrupt_capable) {
923 threshold_ktype.default_attrs[2] = &interrupt_enable.attr;
924 b->interrupt_enable = 1;
926 threshold_ktype.default_attrs[2] = NULL;
929 INIT_LIST_HEAD(&b->miscj);
932 list_add(&b->miscj, &tb->blocks->miscj);
936 err = kobject_init_and_add(&b->kobj, &threshold_ktype, tb->kobj, get_name(bank, b));
940 address = get_block_address(cpu, address, low, high, bank, ++block);
944 err = allocate_threshold_blocks(cpu, tb, bank, block, address);
949 kobject_uevent(&b->kobj, KOBJ_ADD);
955 kobject_put(&b->kobj);
962 static int __threshold_add_blocks(struct threshold_bank *b)
964 struct list_head *head = &b->blocks->miscj;
965 struct threshold_block *pos = NULL;
966 struct threshold_block *tmp = NULL;
969 err = kobject_add(&b->blocks->kobj, b->kobj, b->blocks->kobj.name);
973 list_for_each_entry_safe(pos, tmp, head, miscj) {
975 err = kobject_add(&pos->kobj, b->kobj, pos->kobj.name);
977 list_for_each_entry_safe_reverse(pos, tmp, head, miscj)
978 kobject_del(&pos->kobj);
986 static int threshold_create_bank(unsigned int cpu, unsigned int bank)
988 struct device *dev = per_cpu(mce_device, cpu);
989 struct amd_northbridge *nb = NULL;
990 struct threshold_bank *b = NULL;
991 const char *name = get_name(bank, NULL);
997 if (is_shared_bank(bank)) {
998 nb = node_to_amd_nb(amd_get_nb_id(cpu));
1000 /* threshold descriptor already initialized on this node? */
1001 if (nb && nb->bank4) {
1004 err = kobject_add(b->kobj, &dev->kobj, name);
1008 per_cpu(threshold_banks, cpu)[bank] = b;
1009 atomic_inc(&b->cpus);
1011 err = __threshold_add_blocks(b);
1017 b = kzalloc(sizeof(struct threshold_bank), GFP_KERNEL);
1023 b->kobj = kobject_create_and_add(name, &dev->kobj);
1029 if (is_shared_bank(bank)) {
1030 atomic_set(&b->cpus, 1);
1032 /* nb is already initialized, see above */
1039 err = allocate_threshold_blocks(cpu, b, bank, 0, msr_ops.misc(bank));
1043 per_cpu(threshold_banks, cpu)[bank] = b;
1054 /* create dir/files for all valid threshold banks */
1055 static int threshold_create_device(unsigned int cpu)
1058 struct threshold_bank **bp;
1061 bp = kzalloc(sizeof(struct threshold_bank *) * mca_cfg.banks,
1066 per_cpu(threshold_banks, cpu) = bp;
1068 for (bank = 0; bank < mca_cfg.banks; ++bank) {
1069 if (!(per_cpu(bank_map, cpu) & (1 << bank)))
1071 err = threshold_create_bank(cpu, bank);
1079 static void threshold_block_release(struct kobject *kobj)
1081 kfree(to_block(kobj));
1084 static void deallocate_threshold_block(unsigned int cpu, unsigned int bank)
1086 struct threshold_block *pos = NULL;
1087 struct threshold_block *tmp = NULL;
1088 struct threshold_bank *head = per_cpu(threshold_banks, cpu)[bank];
1093 list_for_each_entry_safe(pos, tmp, &head->blocks->miscj, miscj) {
1094 list_del(&pos->miscj);
1095 kobject_put(&pos->kobj);
1098 kobject_put(&head->blocks->kobj);
1101 static void __threshold_remove_blocks(struct threshold_bank *b)
1103 struct threshold_block *pos = NULL;
1104 struct threshold_block *tmp = NULL;
1106 kobject_del(b->kobj);
1108 list_for_each_entry_safe(pos, tmp, &b->blocks->miscj, miscj)
1109 kobject_del(&pos->kobj);
1112 static void threshold_remove_bank(unsigned int cpu, int bank)
1114 struct amd_northbridge *nb;
1115 struct threshold_bank *b;
1117 b = per_cpu(threshold_banks, cpu)[bank];
1124 if (is_shared_bank(bank)) {
1125 if (!atomic_dec_and_test(&b->cpus)) {
1126 __threshold_remove_blocks(b);
1127 per_cpu(threshold_banks, cpu)[bank] = NULL;
1131 * the last CPU on this node using the shared bank is
1132 * going away, remove that bank now.
1134 nb = node_to_amd_nb(amd_get_nb_id(cpu));
1139 deallocate_threshold_block(cpu, bank);
1142 kobject_del(b->kobj);
1143 kobject_put(b->kobj);
1145 per_cpu(threshold_banks, cpu)[bank] = NULL;
1148 static void threshold_remove_device(unsigned int cpu)
1152 for (bank = 0; bank < mca_cfg.banks; ++bank) {
1153 if (!(per_cpu(bank_map, cpu) & (1 << bank)))
1155 threshold_remove_bank(cpu, bank);
1157 kfree(per_cpu(threshold_banks, cpu));
1160 /* get notified when a cpu comes on/off */
1162 amd_64_threshold_cpu_callback(unsigned long action, unsigned int cpu)
1166 case CPU_ONLINE_FROZEN:
1167 threshold_create_device(cpu);
1170 case CPU_DEAD_FROZEN:
1171 threshold_remove_device(cpu);
1178 static __init int threshold_init_device(void)
1182 /* to hit CPUs online before the notifier is up */
1183 for_each_online_cpu(lcpu) {
1184 int err = threshold_create_device(lcpu);
1189 threshold_cpu_callback = amd_64_threshold_cpu_callback;
1194 * there are 3 funcs which need to be _initcalled in a logic sequence:
1195 * 1. xen_late_init_mcelog
1196 * 2. mcheck_init_device
1197 * 3. threshold_init_device
1199 * xen_late_init_mcelog must register xen_mce_chrdev_device before
1200 * native mce_chrdev_device registration if running under xen platform;
1202 * mcheck_init_device should be inited before threshold_init_device to
1203 * initialize mce_device, otherwise a NULL ptr dereference will cause panic.
1205 * so we use following _initcalls
1206 * 1. device_initcall(xen_late_init_mcelog);
1207 * 2. device_initcall_sync(mcheck_init_device);
1208 * 3. late_initcall(threshold_init_device);
1210 * when running under xen, the initcall order is 1,2,3;
1211 * on baremetal, we skip 1 and we do only 2 and 3.
1213 late_initcall(threshold_init_device);