GNU Linux-libre 4.9.309-gnu1
[releases.git] / arch / x86 / kernel / cpu / mcheck / mce_amd.c
1 /*
2  *  (c) 2005-2016 Advanced Micro Devices, Inc.
3  *  Your use of this code is subject to the terms and conditions of the
4  *  GNU general public license version 2. See "COPYING" or
5  *  http://www.gnu.org/licenses/gpl.html
6  *
7  *  Written by Jacob Shin - AMD, Inc.
8  *  Maintained by: Borislav Petkov <bp@alien8.de>
9  *
10  *  All MC4_MISCi registers are shared between cores on a node.
11  */
12 #include <linux/interrupt.h>
13 #include <linux/notifier.h>
14 #include <linux/kobject.h>
15 #include <linux/percpu.h>
16 #include <linux/errno.h>
17 #include <linux/sched.h>
18 #include <linux/sysfs.h>
19 #include <linux/slab.h>
20 #include <linux/init.h>
21 #include <linux/cpu.h>
22 #include <linux/smp.h>
23 #include <linux/string.h>
24
25 #include <asm/amd_nb.h>
26 #include <asm/apic.h>
27 #include <asm/idle.h>
28 #include <asm/mce.h>
29 #include <asm/msr.h>
30 #include <asm/trace/irq_vectors.h>
31
32 #define NR_BLOCKS         5
33 #define THRESHOLD_MAX     0xFFF
34 #define INT_TYPE_APIC     0x00020000
35 #define MASK_VALID_HI     0x80000000
36 #define MASK_CNTP_HI      0x40000000
37 #define MASK_LOCKED_HI    0x20000000
38 #define MASK_LVTOFF_HI    0x00F00000
39 #define MASK_COUNT_EN_HI  0x00080000
40 #define MASK_INT_TYPE_HI  0x00060000
41 #define MASK_OVERFLOW_HI  0x00010000
42 #define MASK_ERR_COUNT_HI 0x00000FFF
43 #define MASK_BLKPTR_LO    0xFF000000
44 #define MCG_XBLK_ADDR     0xC0000400
45
46 /* Deferred error settings */
47 #define MSR_CU_DEF_ERR          0xC0000410
48 #define MASK_DEF_LVTOFF         0x000000F0
49 #define MASK_DEF_INT_TYPE       0x00000006
50 #define DEF_LVT_OFF             0x2
51 #define DEF_INT_TYPE_APIC       0x2
52
53 /* Scalable MCA: */
54
55 /* Threshold LVT offset is at MSR0xC0000410[15:12] */
56 #define SMCA_THR_LVT_OFF        0xF000
57
58 static const char * const th_names[] = {
59         "load_store",
60         "insn_fetch",
61         "combined_unit",
62         "decode_unit",
63         "northbridge",
64         "execution_unit",
65 };
66
67 static const char * const smca_umc_block_names[] = {
68         "dram_ecc",
69         "misc_umc"
70 };
71
72 struct smca_bank_name smca_bank_names[] = {
73         [SMCA_LS]       = { "load_store",       "Load Store Unit" },
74         [SMCA_IF]       = { "insn_fetch",       "Instruction Fetch Unit" },
75         [SMCA_L2_CACHE] = { "l2_cache",         "L2 Cache" },
76         [SMCA_DE]       = { "decode_unit",      "Decode Unit" },
77         [SMCA_EX]       = { "execution_unit",   "Execution Unit" },
78         [SMCA_FP]       = { "floating_point",   "Floating Point Unit" },
79         [SMCA_L3_CACHE] = { "l3_cache",         "L3 Cache" },
80         [SMCA_CS]       = { "coherent_slave",   "Coherent Slave" },
81         [SMCA_PIE]      = { "pie",              "Power, Interrupts, etc." },
82         [SMCA_UMC]      = { "umc",              "Unified Memory Controller" },
83         [SMCA_PB]       = { "param_block",      "Parameter Block" },
84         [SMCA_PSP]      = { "psp",              "Platform Security Processor" },
85         [SMCA_SMU]      = { "smu",              "System Management Unit" },
86 };
87 EXPORT_SYMBOL_GPL(smca_bank_names);
88
89 static struct smca_hwid_mcatype smca_hwid_mcatypes[] = {
90         /* { bank_type, hwid_mcatype, xec_bitmap } */
91
92         /* ZN Core (HWID=0xB0) MCA types */
93         { SMCA_LS,       HWID_MCATYPE(0xB0, 0x0), 0x1FFFEF },
94         { SMCA_IF,       HWID_MCATYPE(0xB0, 0x1), 0x3FFF },
95         { SMCA_L2_CACHE, HWID_MCATYPE(0xB0, 0x2), 0xF },
96         { SMCA_DE,       HWID_MCATYPE(0xB0, 0x3), 0x1FF },
97         /* HWID 0xB0 MCATYPE 0x4 is Reserved */
98         { SMCA_EX,       HWID_MCATYPE(0xB0, 0x5), 0x7FF },
99         { SMCA_FP,       HWID_MCATYPE(0xB0, 0x6), 0x7F },
100         { SMCA_L3_CACHE, HWID_MCATYPE(0xB0, 0x7), 0xFF },
101
102         /* Data Fabric MCA types */
103         { SMCA_CS,       HWID_MCATYPE(0x2E, 0x0), 0x1FF },
104         { SMCA_PIE,      HWID_MCATYPE(0x2E, 0x1), 0xF },
105
106         /* Unified Memory Controller MCA type */
107         { SMCA_UMC,      HWID_MCATYPE(0x96, 0x0), 0x3F },
108
109         /* Parameter Block MCA type */
110         { SMCA_PB,       HWID_MCATYPE(0x05, 0x0), 0x1 },
111
112         /* Platform Security Processor MCA type */
113         { SMCA_PSP,      HWID_MCATYPE(0xFF, 0x0), 0x1 },
114
115         /* System Management Unit MCA type */
116         { SMCA_SMU,      HWID_MCATYPE(0x01, 0x0), 0x1 },
117 };
118
119 struct smca_bank_info smca_banks[MAX_NR_BANKS];
120 EXPORT_SYMBOL_GPL(smca_banks);
121
122 /*
123  * In SMCA enabled processors, we can have multiple banks for a given IP type.
124  * So to define a unique name for each bank, we use a temp c-string to append
125  * the MCA_IPID[InstanceId] to type's name in get_name().
126  *
127  * InstanceId is 32 bits which is 8 characters. Make sure MAX_MCATYPE_NAME_LEN
128  * is greater than 8 plus 1 (for underscore) plus length of longest type name.
129  */
130 #define MAX_MCATYPE_NAME_LEN    30
131 static char buf_mcatype[MAX_MCATYPE_NAME_LEN];
132
133 static DEFINE_PER_CPU(struct threshold_bank **, threshold_banks);
134 static DEFINE_PER_CPU(unsigned int, bank_map);  /* see which banks are on */
135
136 static void amd_threshold_interrupt(void);
137 static void amd_deferred_error_interrupt(void);
138
139 static void default_deferred_error_interrupt(void)
140 {
141         pr_err("Unexpected deferred interrupt at vector %x\n", DEFERRED_ERROR_VECTOR);
142 }
143 void (*deferred_error_int_vector)(void) = default_deferred_error_interrupt;
144
145 /*
146  * CPU Initialization
147  */
148
149 static void get_smca_bank_info(unsigned int bank)
150 {
151         unsigned int i, hwid_mcatype, cpu = smp_processor_id();
152         struct smca_hwid_mcatype *type;
153         u32 high, instanceId;
154         u16 hwid, mcatype;
155
156         /* Collect bank_info using CPU 0 for now. */
157         if (cpu)
158                 return;
159
160         if (rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_IPID(bank), &instanceId, &high)) {
161                 pr_warn("Failed to read MCA_IPID for bank %d\n", bank);
162                 return;
163         }
164
165         hwid = high & MCI_IPID_HWID;
166         mcatype = (high & MCI_IPID_MCATYPE) >> 16;
167         hwid_mcatype = HWID_MCATYPE(hwid, mcatype);
168
169         for (i = 0; i < ARRAY_SIZE(smca_hwid_mcatypes); i++) {
170                 type = &smca_hwid_mcatypes[i];
171                 if (hwid_mcatype == type->hwid_mcatype) {
172                         smca_banks[bank].type = type;
173                         smca_banks[bank].type_instance = instanceId;
174                         break;
175                 }
176         }
177 }
178
179 struct thresh_restart {
180         struct threshold_block  *b;
181         int                     reset;
182         int                     set_lvt_off;
183         int                     lvt_off;
184         u16                     old_limit;
185 };
186
187 static inline bool is_shared_bank(int bank)
188 {
189         /*
190          * Scalable MCA provides for only one core to have access to the MSRs of
191          * a shared bank.
192          */
193         if (mce_flags.smca)
194                 return false;
195
196         /* Bank 4 is for northbridge reporting and is thus shared */
197         return (bank == 4);
198 }
199
200 static const char *bank4_names(const struct threshold_block *b)
201 {
202         switch (b->address) {
203         /* MSR4_MISC0 */
204         case 0x00000413:
205                 return "dram";
206
207         case 0xc0000408:
208                 return "ht_links";
209
210         case 0xc0000409:
211                 return "l3_cache";
212
213         default:
214                 WARN(1, "Funny MSR: 0x%08x\n", b->address);
215                 return "";
216         }
217 };
218
219
220 static bool lvt_interrupt_supported(unsigned int bank, u32 msr_high_bits)
221 {
222         /*
223          * bank 4 supports APIC LVT interrupts implicitly since forever.
224          */
225         if (bank == 4)
226                 return true;
227
228         /*
229          * IntP: interrupt present; if this bit is set, the thresholding
230          * bank can generate APIC LVT interrupts
231          */
232         return msr_high_bits & BIT(28);
233 }
234
235 static int lvt_off_valid(struct threshold_block *b, int apic, u32 lo, u32 hi)
236 {
237         int msr = (hi & MASK_LVTOFF_HI) >> 20;
238
239         if (apic < 0) {
240                 pr_err(FW_BUG "cpu %d, failed to setup threshold interrupt "
241                        "for bank %d, block %d (MSR%08X=0x%x%08x)\n", b->cpu,
242                        b->bank, b->block, b->address, hi, lo);
243                 return 0;
244         }
245
246         if (apic != msr) {
247                 /*
248                  * On SMCA CPUs, LVT offset is programmed at a different MSR, and
249                  * the BIOS provides the value. The original field where LVT offset
250                  * was set is reserved. Return early here:
251                  */
252                 if (mce_flags.smca)
253                         return 0;
254
255                 pr_err(FW_BUG "cpu %d, invalid threshold interrupt offset %d "
256                        "for bank %d, block %d (MSR%08X=0x%x%08x)\n",
257                        b->cpu, apic, b->bank, b->block, b->address, hi, lo);
258                 return 0;
259         }
260
261         return 1;
262 };
263
264 /* Reprogram MCx_MISC MSR behind this threshold bank. */
265 static void threshold_restart_bank(void *_tr)
266 {
267         struct thresh_restart *tr = _tr;
268         u32 hi, lo;
269
270         rdmsr(tr->b->address, lo, hi);
271
272         if (tr->b->threshold_limit < (hi & THRESHOLD_MAX))
273                 tr->reset = 1;  /* limit cannot be lower than err count */
274
275         if (tr->reset) {                /* reset err count and overflow bit */
276                 hi =
277                     (hi & ~(MASK_ERR_COUNT_HI | MASK_OVERFLOW_HI)) |
278                     (THRESHOLD_MAX - tr->b->threshold_limit);
279         } else if (tr->old_limit) {     /* change limit w/o reset */
280                 int new_count = (hi & THRESHOLD_MAX) +
281                     (tr->old_limit - tr->b->threshold_limit);
282
283                 hi = (hi & ~MASK_ERR_COUNT_HI) |
284                     (new_count & THRESHOLD_MAX);
285         }
286
287         /* clear IntType */
288         hi &= ~MASK_INT_TYPE_HI;
289
290         if (!tr->b->interrupt_capable)
291                 goto done;
292
293         if (tr->set_lvt_off) {
294                 if (lvt_off_valid(tr->b, tr->lvt_off, lo, hi)) {
295                         /* set new lvt offset */
296                         hi &= ~MASK_LVTOFF_HI;
297                         hi |= tr->lvt_off << 20;
298                 }
299         }
300
301         if (tr->b->interrupt_enable)
302                 hi |= INT_TYPE_APIC;
303
304  done:
305
306         hi |= MASK_COUNT_EN_HI;
307         wrmsr(tr->b->address, lo, hi);
308 }
309
310 static void mce_threshold_block_init(struct threshold_block *b, int offset)
311 {
312         struct thresh_restart tr = {
313                 .b                      = b,
314                 .set_lvt_off            = 1,
315                 .lvt_off                = offset,
316         };
317
318         b->threshold_limit              = THRESHOLD_MAX;
319         threshold_restart_bank(&tr);
320 };
321
322 static int setup_APIC_mce_threshold(int reserved, int new)
323 {
324         if (reserved < 0 && !setup_APIC_eilvt(new, THRESHOLD_APIC_VECTOR,
325                                               APIC_EILVT_MSG_FIX, 0))
326                 return new;
327
328         return reserved;
329 }
330
331 static int setup_APIC_deferred_error(int reserved, int new)
332 {
333         if (reserved < 0 && !setup_APIC_eilvt(new, DEFERRED_ERROR_VECTOR,
334                                               APIC_EILVT_MSG_FIX, 0))
335                 return new;
336
337         return reserved;
338 }
339
340 static void deferred_error_interrupt_enable(struct cpuinfo_x86 *c)
341 {
342         u32 low = 0, high = 0;
343         int def_offset = -1, def_new;
344
345         if (rdmsr_safe(MSR_CU_DEF_ERR, &low, &high))
346                 return;
347
348         def_new = (low & MASK_DEF_LVTOFF) >> 4;
349         if (!(low & MASK_DEF_LVTOFF)) {
350                 pr_err(FW_BUG "Your BIOS is not setting up LVT offset 0x2 for deferred error IRQs correctly.\n");
351                 def_new = DEF_LVT_OFF;
352                 low = (low & ~MASK_DEF_LVTOFF) | (DEF_LVT_OFF << 4);
353         }
354
355         def_offset = setup_APIC_deferred_error(def_offset, def_new);
356         if ((def_offset == def_new) &&
357             (deferred_error_int_vector != amd_deferred_error_interrupt))
358                 deferred_error_int_vector = amd_deferred_error_interrupt;
359
360         low = (low & ~MASK_DEF_INT_TYPE) | DEF_INT_TYPE_APIC;
361         wrmsr(MSR_CU_DEF_ERR, low, high);
362 }
363
364 static u32 get_block_address(unsigned int cpu, u32 current_addr, u32 low, u32 high,
365                              unsigned int bank, unsigned int block)
366 {
367         u32 addr = 0, offset = 0;
368
369         if (mce_flags.smca) {
370                 if (!block) {
371                         addr = MSR_AMD64_SMCA_MCx_MISC(bank);
372                 } else {
373                         /*
374                          * For SMCA enabled processors, BLKPTR field of the
375                          * first MISC register (MCx_MISC0) indicates presence of
376                          * additional MISC register set (MISC1-4).
377                          */
378                         u32 low, high;
379
380                         if (rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_CONFIG(bank), &low, &high))
381                                 return addr;
382
383                         if (!(low & MCI_CONFIG_MCAX))
384                                 return addr;
385
386                         if (!rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_MISC(bank), &low, &high) &&
387                             (low & MASK_BLKPTR_LO))
388                                 addr = MSR_AMD64_SMCA_MCx_MISCy(bank, block - 1);
389                 }
390                 return addr;
391         }
392
393         /* Fall back to method we used for older processors: */
394         switch (block) {
395         case 0:
396                 addr = msr_ops.misc(bank);
397                 break;
398         case 1:
399                 offset = ((low & MASK_BLKPTR_LO) >> 21);
400                 if (offset)
401                         addr = MCG_XBLK_ADDR + offset;
402                 break;
403         default:
404                 addr = ++current_addr;
405         }
406         return addr;
407 }
408
409 static int
410 prepare_threshold_block(unsigned int bank, unsigned int block, u32 addr,
411                         int offset, u32 misc_high)
412 {
413         unsigned int cpu = smp_processor_id();
414         u32 smca_low, smca_high, smca_addr;
415         struct threshold_block b;
416         int new;
417
418         if (!block)
419                 per_cpu(bank_map, cpu) |= (1 << bank);
420
421         memset(&b, 0, sizeof(b));
422         b.cpu                   = cpu;
423         b.bank                  = bank;
424         b.block                 = block;
425         b.address               = addr;
426         b.interrupt_capable     = lvt_interrupt_supported(bank, misc_high);
427
428         if (!b.interrupt_capable)
429                 goto done;
430
431         b.interrupt_enable = 1;
432
433         if (!mce_flags.smca) {
434                 new = (misc_high & MASK_LVTOFF_HI) >> 20;
435                 goto set_offset;
436         }
437
438         smca_addr = MSR_AMD64_SMCA_MCx_CONFIG(bank);
439
440         if (!rdmsr_safe(smca_addr, &smca_low, &smca_high)) {
441                 /*
442                  * OS is required to set the MCAX bit to acknowledge that it is
443                  * now using the new MSR ranges and new registers under each
444                  * bank. It also means that the OS will configure deferred
445                  * errors in the new MCx_CONFIG register. If the bit is not set,
446                  * uncorrectable errors will cause a system panic.
447                  *
448                  * MCA_CONFIG[MCAX] is bit 32 (0 in the high portion of the MSR.)
449                  */
450                 smca_high |= BIT(0);
451
452                 /*
453                  * SMCA logs Deferred Error information in MCA_DE{STAT,ADDR}
454                  * registers with the option of additionally logging to
455                  * MCA_{STATUS,ADDR} if MCA_CONFIG[LogDeferredInMcaStat] is set.
456                  *
457                  * This bit is usually set by BIOS to retain the old behavior
458                  * for OSes that don't use the new registers. Linux supports the
459                  * new registers so let's disable that additional logging here.
460                  *
461                  * MCA_CONFIG[LogDeferredInMcaStat] is bit 34 (bit 2 in the high
462                  * portion of the MSR).
463                  */
464                 smca_high &= ~BIT(2);
465
466                 /*
467                  * SMCA sets the Deferred Error Interrupt type per bank.
468                  *
469                  * MCA_CONFIG[DeferredIntTypeSupported] is bit 5, and tells us
470                  * if the DeferredIntType bit field is available.
471                  *
472                  * MCA_CONFIG[DeferredIntType] is bits [38:37] ([6:5] in the
473                  * high portion of the MSR). OS should set this to 0x1 to enable
474                  * APIC based interrupt. First, check that no interrupt has been
475                  * set.
476                  */
477                 if ((smca_low & BIT(5)) && !((smca_high >> 5) & 0x3))
478                         smca_high |= BIT(5);
479
480                 wrmsr(smca_addr, smca_low, smca_high);
481         }
482
483         /* Gather LVT offset for thresholding: */
484         if (rdmsr_safe(MSR_CU_DEF_ERR, &smca_low, &smca_high))
485                 goto out;
486
487         new = (smca_low & SMCA_THR_LVT_OFF) >> 12;
488
489 set_offset:
490         offset = setup_APIC_mce_threshold(offset, new);
491
492         if ((offset == new) && (mce_threshold_vector != amd_threshold_interrupt))
493                 mce_threshold_vector = amd_threshold_interrupt;
494
495 done:
496         mce_threshold_block_init(&b, offset);
497
498 out:
499         return offset;
500 }
501
502 /*
503  * Turn off MC4_MISC thresholding banks on all family 0x15 models since
504  * they're not supported there.
505  */
506 void disable_err_thresholding(struct cpuinfo_x86 *c)
507 {
508         int i;
509         u64 hwcr;
510         bool need_toggle;
511         u32 msrs[] = {
512                 0x00000413, /* MC4_MISC0 */
513                 0xc0000408, /* MC4_MISC1 */
514         };
515
516         if (c->x86 != 0x15)
517                 return;
518
519         rdmsrl(MSR_K7_HWCR, hwcr);
520
521         /* McStatusWrEn has to be set */
522         need_toggle = !(hwcr & BIT(18));
523
524         if (need_toggle)
525                 wrmsrl(MSR_K7_HWCR, hwcr | BIT(18));
526
527         /* Clear CntP bit safely */
528         for (i = 0; i < ARRAY_SIZE(msrs); i++)
529                 msr_clear_bit(msrs[i], 62);
530
531         /* restore old settings */
532         if (need_toggle)
533                 wrmsrl(MSR_K7_HWCR, hwcr);
534 }
535
536 /* cpu init entry point, called from mce.c with preempt off */
537 void mce_amd_feature_init(struct cpuinfo_x86 *c)
538 {
539         u32 low = 0, high = 0, address = 0;
540         unsigned int bank, block, cpu = smp_processor_id();
541         int offset = -1;
542
543         disable_err_thresholding(c);
544
545         for (bank = 0; bank < mca_cfg.banks; ++bank) {
546                 if (mce_flags.smca)
547                         get_smca_bank_info(bank);
548
549                 for (block = 0; block < NR_BLOCKS; ++block) {
550                         address = get_block_address(cpu, address, low, high, bank, block);
551                         if (!address)
552                                 break;
553
554                         if (rdmsr_safe(address, &low, &high))
555                                 break;
556
557                         if (!(high & MASK_VALID_HI))
558                                 continue;
559
560                         if (!(high & MASK_CNTP_HI)  ||
561                              (high & MASK_LOCKED_HI))
562                                 continue;
563
564                         offset = prepare_threshold_block(bank, block, address, offset, high);
565                 }
566         }
567
568         if (mce_flags.succor)
569                 deferred_error_interrupt_enable(c);
570 }
571
572 static void
573 __log_error(unsigned int bank, bool deferred_err, bool threshold_err, u64 misc)
574 {
575         u32 msr_status = msr_ops.status(bank);
576         u32 msr_addr = msr_ops.addr(bank);
577         struct mce m;
578         u64 status;
579
580         WARN_ON_ONCE(deferred_err && threshold_err);
581
582         if (deferred_err && mce_flags.smca) {
583                 msr_status = MSR_AMD64_SMCA_MCx_DESTAT(bank);
584                 msr_addr = MSR_AMD64_SMCA_MCx_DEADDR(bank);
585         }
586
587         rdmsrl(msr_status, status);
588
589         if (!(status & MCI_STATUS_VAL))
590                 return;
591
592         mce_setup(&m);
593
594         m.status = status;
595         m.bank = bank;
596
597         if (threshold_err)
598                 m.misc = misc;
599
600         if (m.status & MCI_STATUS_ADDRV) {
601                 rdmsrl(msr_addr, m.addr);
602
603                 /*
604                  * Extract [55:<lsb>] where lsb is the least significant
605                  * *valid* bit of the address bits.
606                  */
607                 if (mce_flags.smca) {
608                         u8 lsb = (m.addr >> 56) & 0x3f;
609
610                         m.addr &= GENMASK_ULL(55, lsb);
611                 }
612         }
613
614         if (mce_flags.smca) {
615                 rdmsrl(MSR_AMD64_SMCA_MCx_IPID(bank), m.ipid);
616
617                 if (m.status & MCI_STATUS_SYNDV)
618                         rdmsrl(MSR_AMD64_SMCA_MCx_SYND(bank), m.synd);
619         }
620
621         mce_log(&m);
622
623         wrmsrl(msr_status, 0);
624 }
625
626 static inline void __smp_deferred_error_interrupt(void)
627 {
628         inc_irq_stat(irq_deferred_error_count);
629         deferred_error_int_vector();
630 }
631
632 asmlinkage __visible void __irq_entry smp_deferred_error_interrupt(void)
633 {
634         entering_irq();
635         __smp_deferred_error_interrupt();
636         exiting_ack_irq();
637 }
638
639 asmlinkage __visible void __irq_entry smp_trace_deferred_error_interrupt(void)
640 {
641         entering_irq();
642         trace_deferred_error_apic_entry(DEFERRED_ERROR_VECTOR);
643         __smp_deferred_error_interrupt();
644         trace_deferred_error_apic_exit(DEFERRED_ERROR_VECTOR);
645         exiting_ack_irq();
646 }
647
648 /* APIC interrupt handler for deferred errors */
649 static void amd_deferred_error_interrupt(void)
650 {
651         unsigned int bank;
652         u32 msr_status;
653         u64 status;
654
655         for (bank = 0; bank < mca_cfg.banks; ++bank) {
656                 msr_status = (mce_flags.smca) ? MSR_AMD64_SMCA_MCx_DESTAT(bank)
657                                               : msr_ops.status(bank);
658
659                 rdmsrl(msr_status, status);
660
661                 if (!(status & MCI_STATUS_VAL) ||
662                     !(status & MCI_STATUS_DEFERRED))
663                         continue;
664
665                 __log_error(bank, true, false, 0);
666                 break;
667         }
668 }
669
670 /*
671  * APIC Interrupt Handler
672  */
673
674 /*
675  * threshold interrupt handler will service THRESHOLD_APIC_VECTOR.
676  * the interrupt goes off when error_count reaches threshold_limit.
677  * the handler will simply log mcelog w/ software defined bank number.
678  */
679
680 static void amd_threshold_interrupt(void)
681 {
682         u32 low = 0, high = 0, address = 0;
683         unsigned int bank, block, cpu = smp_processor_id();
684
685         /* assume first bank caused it */
686         for (bank = 0; bank < mca_cfg.banks; ++bank) {
687                 if (!(per_cpu(bank_map, cpu) & (1 << bank)))
688                         continue;
689                 for (block = 0; block < NR_BLOCKS; ++block) {
690                         address = get_block_address(cpu, address, low, high, bank, block);
691                         if (!address)
692                                 break;
693
694                         if (rdmsr_safe(address, &low, &high))
695                                 break;
696
697                         if (!(high & MASK_VALID_HI)) {
698                                 if (block)
699                                         continue;
700                                 else
701                                         break;
702                         }
703
704                         if (!(high & MASK_CNTP_HI)  ||
705                              (high & MASK_LOCKED_HI))
706                                 continue;
707
708                         /*
709                          * Log the machine check that caused the threshold
710                          * event.
711                          */
712                         if (high & MASK_OVERFLOW_HI)
713                                 goto log;
714                 }
715         }
716         return;
717
718 log:
719         __log_error(bank, false, true, ((u64)high << 32) | low);
720 }
721
722 /*
723  * Sysfs Interface
724  */
725
726 struct threshold_attr {
727         struct attribute attr;
728         ssize_t (*show) (struct threshold_block *, char *);
729         ssize_t (*store) (struct threshold_block *, const char *, size_t count);
730 };
731
732 #define SHOW_FIELDS(name)                                               \
733 static ssize_t show_ ## name(struct threshold_block *b, char *buf)      \
734 {                                                                       \
735         return sprintf(buf, "%lu\n", (unsigned long) b->name);          \
736 }
737 SHOW_FIELDS(interrupt_enable)
738 SHOW_FIELDS(threshold_limit)
739
740 static ssize_t
741 store_interrupt_enable(struct threshold_block *b, const char *buf, size_t size)
742 {
743         struct thresh_restart tr;
744         unsigned long new;
745
746         if (!b->interrupt_capable)
747                 return -EINVAL;
748
749         if (kstrtoul(buf, 0, &new) < 0)
750                 return -EINVAL;
751
752         b->interrupt_enable = !!new;
753
754         memset(&tr, 0, sizeof(tr));
755         tr.b            = b;
756
757         smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1);
758
759         return size;
760 }
761
762 static ssize_t
763 store_threshold_limit(struct threshold_block *b, const char *buf, size_t size)
764 {
765         struct thresh_restart tr;
766         unsigned long new;
767
768         if (kstrtoul(buf, 0, &new) < 0)
769                 return -EINVAL;
770
771         if (new > THRESHOLD_MAX)
772                 new = THRESHOLD_MAX;
773         if (new < 1)
774                 new = 1;
775
776         memset(&tr, 0, sizeof(tr));
777         tr.old_limit = b->threshold_limit;
778         b->threshold_limit = new;
779         tr.b = b;
780
781         smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1);
782
783         return size;
784 }
785
786 static ssize_t show_error_count(struct threshold_block *b, char *buf)
787 {
788         u32 lo, hi;
789
790         rdmsr_on_cpu(b->cpu, b->address, &lo, &hi);
791
792         return sprintf(buf, "%u\n", ((hi & THRESHOLD_MAX) -
793                                      (THRESHOLD_MAX - b->threshold_limit)));
794 }
795
796 static struct threshold_attr error_count = {
797         .attr = {.name = __stringify(error_count), .mode = 0444 },
798         .show = show_error_count,
799 };
800
801 #define RW_ATTR(val)                                                    \
802 static struct threshold_attr val = {                                    \
803         .attr   = {.name = __stringify(val), .mode = 0644 },            \
804         .show   = show_## val,                                          \
805         .store  = store_## val,                                         \
806 };
807
808 RW_ATTR(interrupt_enable);
809 RW_ATTR(threshold_limit);
810
811 static struct attribute *default_attrs[] = {
812         &threshold_limit.attr,
813         &error_count.attr,
814         NULL,   /* possibly interrupt_enable if supported, see below */
815         NULL,
816 };
817
818 #define to_block(k)     container_of(k, struct threshold_block, kobj)
819 #define to_attr(a)      container_of(a, struct threshold_attr, attr)
820
821 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
822 {
823         struct threshold_block *b = to_block(kobj);
824         struct threshold_attr *a = to_attr(attr);
825         ssize_t ret;
826
827         ret = a->show ? a->show(b, buf) : -EIO;
828
829         return ret;
830 }
831
832 static ssize_t store(struct kobject *kobj, struct attribute *attr,
833                      const char *buf, size_t count)
834 {
835         struct threshold_block *b = to_block(kobj);
836         struct threshold_attr *a = to_attr(attr);
837         ssize_t ret;
838
839         ret = a->store ? a->store(b, buf, count) : -EIO;
840
841         return ret;
842 }
843
844 static const struct sysfs_ops threshold_ops = {
845         .show                   = show,
846         .store                  = store,
847 };
848
849 static void threshold_block_release(struct kobject *kobj);
850
851 static struct kobj_type threshold_ktype = {
852         .sysfs_ops              = &threshold_ops,
853         .default_attrs          = default_attrs,
854         .release                = threshold_block_release,
855 };
856
857 static const char *get_name(unsigned int bank, struct threshold_block *b)
858 {
859         unsigned int bank_type;
860
861         if (!mce_flags.smca) {
862                 if (b && bank == 4)
863                         return bank4_names(b);
864
865                 return th_names[bank];
866         }
867
868         if (!smca_banks[bank].type)
869                 return NULL;
870
871         bank_type = smca_banks[bank].type->bank_type;
872
873         if (b && bank_type == SMCA_UMC) {
874                 if (b->block < ARRAY_SIZE(smca_umc_block_names))
875                         return smca_umc_block_names[b->block];
876                 return NULL;
877         }
878
879         snprintf(buf_mcatype, MAX_MCATYPE_NAME_LEN,
880                  "%s_%x", smca_bank_names[bank_type].name,
881                           smca_banks[bank].type_instance);
882         return buf_mcatype;
883 }
884
885 static int allocate_threshold_blocks(unsigned int cpu, struct threshold_bank *tb,
886                                      unsigned int bank, unsigned int block,
887                                      u32 address)
888 {
889         struct threshold_block *b = NULL;
890         u32 low, high;
891         int err;
892
893         if ((bank >= mca_cfg.banks) || (block >= NR_BLOCKS))
894                 return 0;
895
896         if (rdmsr_safe_on_cpu(cpu, address, &low, &high))
897                 return 0;
898
899         if (!(high & MASK_VALID_HI)) {
900                 if (block)
901                         goto recurse;
902                 else
903                         return 0;
904         }
905
906         if (!(high & MASK_CNTP_HI)  ||
907              (high & MASK_LOCKED_HI))
908                 goto recurse;
909
910         b = kzalloc(sizeof(struct threshold_block), GFP_KERNEL);
911         if (!b)
912                 return -ENOMEM;
913
914         b->block                = block;
915         b->bank                 = bank;
916         b->cpu                  = cpu;
917         b->address              = address;
918         b->interrupt_enable     = 0;
919         b->interrupt_capable    = lvt_interrupt_supported(bank, high);
920         b->threshold_limit      = THRESHOLD_MAX;
921
922         if (b->interrupt_capable) {
923                 threshold_ktype.default_attrs[2] = &interrupt_enable.attr;
924                 b->interrupt_enable = 1;
925         } else {
926                 threshold_ktype.default_attrs[2] = NULL;
927         }
928
929         INIT_LIST_HEAD(&b->miscj);
930
931         if (tb->blocks)
932                 list_add(&b->miscj, &tb->blocks->miscj);
933         else
934                 tb->blocks = b;
935
936         err = kobject_init_and_add(&b->kobj, &threshold_ktype, tb->kobj, get_name(bank, b));
937         if (err)
938                 goto out_free;
939 recurse:
940         address = get_block_address(cpu, address, low, high, bank, ++block);
941         if (!address)
942                 return 0;
943
944         err = allocate_threshold_blocks(cpu, tb, bank, block, address);
945         if (err)
946                 goto out_free;
947
948         if (b)
949                 kobject_uevent(&b->kobj, KOBJ_ADD);
950
951         return err;
952
953 out_free:
954         if (b) {
955                 kobject_put(&b->kobj);
956                 list_del(&b->miscj);
957                 kfree(b);
958         }
959         return err;
960 }
961
962 static int __threshold_add_blocks(struct threshold_bank *b)
963 {
964         struct list_head *head = &b->blocks->miscj;
965         struct threshold_block *pos = NULL;
966         struct threshold_block *tmp = NULL;
967         int err = 0;
968
969         err = kobject_add(&b->blocks->kobj, b->kobj, b->blocks->kobj.name);
970         if (err)
971                 return err;
972
973         list_for_each_entry_safe(pos, tmp, head, miscj) {
974
975                 err = kobject_add(&pos->kobj, b->kobj, pos->kobj.name);
976                 if (err) {
977                         list_for_each_entry_safe_reverse(pos, tmp, head, miscj)
978                                 kobject_del(&pos->kobj);
979
980                         return err;
981                 }
982         }
983         return err;
984 }
985
986 static int threshold_create_bank(unsigned int cpu, unsigned int bank)
987 {
988         struct device *dev = per_cpu(mce_device, cpu);
989         struct amd_northbridge *nb = NULL;
990         struct threshold_bank *b = NULL;
991         const char *name = get_name(bank, NULL);
992         int err = 0;
993
994         if (!dev)
995                 return -ENODEV;
996
997         if (is_shared_bank(bank)) {
998                 nb = node_to_amd_nb(amd_get_nb_id(cpu));
999
1000                 /* threshold descriptor already initialized on this node? */
1001                 if (nb && nb->bank4) {
1002                         /* yes, use it */
1003                         b = nb->bank4;
1004                         err = kobject_add(b->kobj, &dev->kobj, name);
1005                         if (err)
1006                                 goto out;
1007
1008                         per_cpu(threshold_banks, cpu)[bank] = b;
1009                         atomic_inc(&b->cpus);
1010
1011                         err = __threshold_add_blocks(b);
1012
1013                         goto out;
1014                 }
1015         }
1016
1017         b = kzalloc(sizeof(struct threshold_bank), GFP_KERNEL);
1018         if (!b) {
1019                 err = -ENOMEM;
1020                 goto out;
1021         }
1022
1023         b->kobj = kobject_create_and_add(name, &dev->kobj);
1024         if (!b->kobj) {
1025                 err = -EINVAL;
1026                 goto out_free;
1027         }
1028
1029         if (is_shared_bank(bank)) {
1030                 atomic_set(&b->cpus, 1);
1031
1032                 /* nb is already initialized, see above */
1033                 if (nb) {
1034                         WARN_ON(nb->bank4);
1035                         nb->bank4 = b;
1036                 }
1037         }
1038
1039         err = allocate_threshold_blocks(cpu, b, bank, 0, msr_ops.misc(bank));
1040         if (err)
1041                 goto out_free;
1042
1043         per_cpu(threshold_banks, cpu)[bank] = b;
1044
1045         return 0;
1046
1047  out_free:
1048         kfree(b);
1049
1050  out:
1051         return err;
1052 }
1053
1054 /* create dir/files for all valid threshold banks */
1055 static int threshold_create_device(unsigned int cpu)
1056 {
1057         unsigned int bank;
1058         struct threshold_bank **bp;
1059         int err = 0;
1060
1061         bp = kzalloc(sizeof(struct threshold_bank *) * mca_cfg.banks,
1062                      GFP_KERNEL);
1063         if (!bp)
1064                 return -ENOMEM;
1065
1066         per_cpu(threshold_banks, cpu) = bp;
1067
1068         for (bank = 0; bank < mca_cfg.banks; ++bank) {
1069                 if (!(per_cpu(bank_map, cpu) & (1 << bank)))
1070                         continue;
1071                 err = threshold_create_bank(cpu, bank);
1072                 if (err)
1073                         return err;
1074         }
1075
1076         return err;
1077 }
1078
1079 static void threshold_block_release(struct kobject *kobj)
1080 {
1081         kfree(to_block(kobj));
1082 }
1083
1084 static void deallocate_threshold_block(unsigned int cpu, unsigned int bank)
1085 {
1086         struct threshold_block *pos = NULL;
1087         struct threshold_block *tmp = NULL;
1088         struct threshold_bank *head = per_cpu(threshold_banks, cpu)[bank];
1089
1090         if (!head)
1091                 return;
1092
1093         list_for_each_entry_safe(pos, tmp, &head->blocks->miscj, miscj) {
1094                 list_del(&pos->miscj);
1095                 kobject_put(&pos->kobj);
1096         }
1097
1098         kobject_put(&head->blocks->kobj);
1099 }
1100
1101 static void __threshold_remove_blocks(struct threshold_bank *b)
1102 {
1103         struct threshold_block *pos = NULL;
1104         struct threshold_block *tmp = NULL;
1105
1106         kobject_del(b->kobj);
1107
1108         list_for_each_entry_safe(pos, tmp, &b->blocks->miscj, miscj)
1109                 kobject_del(&pos->kobj);
1110 }
1111
1112 static void threshold_remove_bank(unsigned int cpu, int bank)
1113 {
1114         struct amd_northbridge *nb;
1115         struct threshold_bank *b;
1116
1117         b = per_cpu(threshold_banks, cpu)[bank];
1118         if (!b)
1119                 return;
1120
1121         if (!b->blocks)
1122                 goto free_out;
1123
1124         if (is_shared_bank(bank)) {
1125                 if (!atomic_dec_and_test(&b->cpus)) {
1126                         __threshold_remove_blocks(b);
1127                         per_cpu(threshold_banks, cpu)[bank] = NULL;
1128                         return;
1129                 } else {
1130                         /*
1131                          * the last CPU on this node using the shared bank is
1132                          * going away, remove that bank now.
1133                          */
1134                         nb = node_to_amd_nb(amd_get_nb_id(cpu));
1135                         nb->bank4 = NULL;
1136                 }
1137         }
1138
1139         deallocate_threshold_block(cpu, bank);
1140
1141 free_out:
1142         kobject_del(b->kobj);
1143         kobject_put(b->kobj);
1144         kfree(b);
1145         per_cpu(threshold_banks, cpu)[bank] = NULL;
1146 }
1147
1148 static void threshold_remove_device(unsigned int cpu)
1149 {
1150         unsigned int bank;
1151
1152         for (bank = 0; bank < mca_cfg.banks; ++bank) {
1153                 if (!(per_cpu(bank_map, cpu) & (1 << bank)))
1154                         continue;
1155                 threshold_remove_bank(cpu, bank);
1156         }
1157         kfree(per_cpu(threshold_banks, cpu));
1158 }
1159
1160 /* get notified when a cpu comes on/off */
1161 static void
1162 amd_64_threshold_cpu_callback(unsigned long action, unsigned int cpu)
1163 {
1164         switch (action) {
1165         case CPU_ONLINE:
1166         case CPU_ONLINE_FROZEN:
1167                 threshold_create_device(cpu);
1168                 break;
1169         case CPU_DEAD:
1170         case CPU_DEAD_FROZEN:
1171                 threshold_remove_device(cpu);
1172                 break;
1173         default:
1174                 break;
1175         }
1176 }
1177
1178 static __init int threshold_init_device(void)
1179 {
1180         unsigned lcpu = 0;
1181
1182         /* to hit CPUs online before the notifier is up */
1183         for_each_online_cpu(lcpu) {
1184                 int err = threshold_create_device(lcpu);
1185
1186                 if (err)
1187                         return err;
1188         }
1189         threshold_cpu_callback = amd_64_threshold_cpu_callback;
1190
1191         return 0;
1192 }
1193 /*
1194  * there are 3 funcs which need to be _initcalled in a logic sequence:
1195  * 1. xen_late_init_mcelog
1196  * 2. mcheck_init_device
1197  * 3. threshold_init_device
1198  *
1199  * xen_late_init_mcelog must register xen_mce_chrdev_device before
1200  * native mce_chrdev_device registration if running under xen platform;
1201  *
1202  * mcheck_init_device should be inited before threshold_init_device to
1203  * initialize mce_device, otherwise a NULL ptr dereference will cause panic.
1204  *
1205  * so we use following _initcalls
1206  * 1. device_initcall(xen_late_init_mcelog);
1207  * 2. device_initcall_sync(mcheck_init_device);
1208  * 3. late_initcall(threshold_init_device);
1209  *
1210  * when running under xen, the initcall order is 1,2,3;
1211  * on baremetal, we skip 1 and we do only 2 and 3.
1212  */
1213 late_initcall(threshold_init_device);