1 /*
2 * (c) 2005, 2006 Advanced Micro Devices, Inc.
3 * Your use of this code is subject to the terms and conditions of the
4 * GNU general public license version 2. See "COPYING" or
5 * http://www.gnu.org/licenses/gpl.html
6 *
7 * Written by Jacob Shin - AMD, Inc.
8 *
9 * Support : jacob.shin@amd.com
10 *
11 * April 2006
12 * - added support for AMD Family 0x10 processors
13 *
14 * All MC4_MISCi registers are shared between multi-cores
15 */
16 #include <linux/interrupt.h>
17 #include <linux/notifier.h>
18 #include <linux/kobject.h>
19 #include <linux/percpu.h>
20 #include <linux/errno.h>
21 #include <linux/sched.h>
22 #include <linux/sysfs.h>
23 #include <linux/slab.h>
24 #include <linux/init.h>
25 #include <linux/cpu.h>
26 #include <linux/smp.h>
27
28 #include <asm/apic.h>
29 #include <asm/idle.h>
30 #include <asm/mce.h>
31 #include <asm/msr.h>
32
33 #define NR_BANKS 6
34 #define NR_BLOCKS 9
35 #define THRESHOLD_MAX 0xFFF
36 #define INT_TYPE_APIC 0x00020000
37 #define MASK_VALID_HI 0x80000000
38 #define MASK_CNTP_HI 0x40000000
39 #define MASK_LOCKED_HI 0x20000000
40 #define MASK_LVTOFF_HI 0x00F00000
41 #define MASK_COUNT_EN_HI 0x00080000
42 #define MASK_INT_TYPE_HI 0x00060000
43 #define MASK_OVERFLOW_HI 0x00010000
44 #define MASK_ERR_COUNT_HI 0x00000FFF
45 #define MASK_BLKPTR_LO 0xFF000000
46 #define MCG_XBLK_ADDR 0xC0000400
47
48 struct threshold_block {
49 unsigned int block;
50 unsigned int bank;
51 unsigned int cpu;
52 u32 address;
53 u16 interrupt_enable;
54 bool interrupt_capable;
55 u16 threshold_limit;
56 struct kobject kobj;
57 struct list_head miscj;
58 };
59
60 struct threshold_bank {
61 struct kobject *kobj;
62 struct threshold_block *blocks;
63 cpumask_var_t cpus;
64 };
65 static DEFINE_PER_CPU(struct threshold_bank * [NR_BANKS], threshold_banks);
66
67 static unsigned char shared_bank[NR_BANKS] = {
68 0, 0, 0, 0, 1
69 };
70
71 static DEFINE_PER_CPU(unsigned char, bank_map); /* see which banks are on */
72
73 static void amd_threshold_interrupt(void);
74
75 /*
76 * CPU Initialization
77 */
78
79 struct thresh_restart {
80 struct threshold_block *b;
81 int reset;
82 int set_lvt_off;
83 int lvt_off;
84 u16 old_limit;
85 };
86
lvt_interrupt_supported(unsigned int bank,u32 msr_high_bits)87 static bool lvt_interrupt_supported(unsigned int bank, u32 msr_high_bits)
88 {
89 /*
90 * bank 4 supports APIC LVT interrupts implicitly since forever.
91 */
92 if (bank == 4)
93 return true;
94
95 /*
96 * IntP: interrupt present; if this bit is set, the thresholding
97 * bank can generate APIC LVT interrupts
98 */
99 return msr_high_bits & BIT(28);
100 }
101
lvt_off_valid(struct threshold_block * b,int apic,u32 lo,u32 hi)102 static int lvt_off_valid(struct threshold_block *b, int apic, u32 lo, u32 hi)
103 {
104 int msr = (hi & MASK_LVTOFF_HI) >> 20;
105
106 if (apic < 0) {
107 pr_err(FW_BUG "cpu %d, failed to setup threshold interrupt "
108 "for bank %d, block %d (MSR%08X=0x%x%08x)\n", b->cpu,
109 b->bank, b->block, b->address, hi, lo);
110 return 0;
111 }
112
113 if (apic != msr) {
114 pr_err(FW_BUG "cpu %d, invalid threshold interrupt offset %d "
115 "for bank %d, block %d (MSR%08X=0x%x%08x)\n",
116 b->cpu, apic, b->bank, b->block, b->address, hi, lo);
117 return 0;
118 }
119
120 return 1;
121 };
122
123 /*
124 * Called via smp_call_function_single(), must be called with correct
125 * cpu affinity.
126 */
threshold_restart_bank(void * _tr)127 static void threshold_restart_bank(void *_tr)
128 {
129 struct thresh_restart *tr = _tr;
130 u32 hi, lo;
131
132 rdmsr(tr->b->address, lo, hi);
133
134 if (tr->b->threshold_limit < (hi & THRESHOLD_MAX))
135 tr->reset = 1; /* limit cannot be lower than err count */
136
137 if (tr->reset) { /* reset err count and overflow bit */
138 hi =
139 (hi & ~(MASK_ERR_COUNT_HI | MASK_OVERFLOW_HI)) |
140 (THRESHOLD_MAX - tr->b->threshold_limit);
141 } else if (tr->old_limit) { /* change limit w/o reset */
142 int new_count = (hi & THRESHOLD_MAX) +
143 (tr->old_limit - tr->b->threshold_limit);
144
145 hi = (hi & ~MASK_ERR_COUNT_HI) |
146 (new_count & THRESHOLD_MAX);
147 }
148
149 /* clear IntType */
150 hi &= ~MASK_INT_TYPE_HI;
151
152 if (!tr->b->interrupt_capable)
153 goto done;
154
155 if (tr->set_lvt_off) {
156 if (lvt_off_valid(tr->b, tr->lvt_off, lo, hi)) {
157 /* set new lvt offset */
158 hi &= ~MASK_LVTOFF_HI;
159 hi |= tr->lvt_off << 20;
160 }
161 }
162
163 if (tr->b->interrupt_enable)
164 hi |= INT_TYPE_APIC;
165
166 done:
167
168 hi |= MASK_COUNT_EN_HI;
169 wrmsr(tr->b->address, lo, hi);
170 }
171
mce_threshold_block_init(struct threshold_block * b,int offset)172 static void mce_threshold_block_init(struct threshold_block *b, int offset)
173 {
174 struct thresh_restart tr = {
175 .b = b,
176 .set_lvt_off = 1,
177 .lvt_off = offset,
178 };
179
180 b->threshold_limit = THRESHOLD_MAX;
181 threshold_restart_bank(&tr);
182 };
183
setup_APIC_mce(int reserved,int new)184 static int setup_APIC_mce(int reserved, int new)
185 {
186 if (reserved < 0 && !setup_APIC_eilvt(new, THRESHOLD_APIC_VECTOR,
187 APIC_EILVT_MSG_FIX, 0))
188 return new;
189
190 return reserved;
191 }
192
193 /* cpu init entry point, called from mce.c with preempt off */
mce_amd_feature_init(struct cpuinfo_x86 * c)194 void mce_amd_feature_init(struct cpuinfo_x86 *c)
195 {
196 struct threshold_block b;
197 unsigned int cpu = smp_processor_id();
198 u32 low = 0, high = 0, address = 0;
199 unsigned int bank, block;
200 int offset = -1;
201
202 for (bank = 0; bank < NR_BANKS; ++bank) {
203 for (block = 0; block < NR_BLOCKS; ++block) {
204 if (block == 0)
205 address = MSR_IA32_MC0_MISC + bank * 4;
206 else if (block == 1) {
207 address = (low & MASK_BLKPTR_LO) >> 21;
208 if (!address)
209 break;
210
211 address += MCG_XBLK_ADDR;
212 } else
213 ++address;
214
215 if (rdmsr_safe(address, &low, &high))
216 break;
217
218 if (!(high & MASK_VALID_HI))
219 continue;
220
221 if (!(high & MASK_CNTP_HI) ||
222 (high & MASK_LOCKED_HI))
223 continue;
224
225 if (!block)
226 per_cpu(bank_map, cpu) |= (1 << bank);
227 if (shared_bank[bank] && c->cpu_core_id)
228 break;
229
230 memset(&b, 0, sizeof(b));
231 b.cpu = cpu;
232 b.bank = bank;
233 b.block = block;
234 b.address = address;
235 b.interrupt_capable = lvt_interrupt_supported(bank, high);
236
237 if (b.interrupt_capable) {
238 int new = (high & MASK_LVTOFF_HI) >> 20;
239 offset = setup_APIC_mce(offset, new);
240 }
241
242 mce_threshold_block_init(&b, offset);
243 mce_threshold_vector = amd_threshold_interrupt;
244 }
245 }
246 }
247
248 /*
249 * APIC Interrupt Handler
250 */
251
252 /*
253 * threshold interrupt handler will service THRESHOLD_APIC_VECTOR.
254 * the interrupt goes off when error_count reaches threshold_limit.
255 * the handler will simply log mcelog w/ software defined bank number.
256 */
amd_threshold_interrupt(void)257 static void amd_threshold_interrupt(void)
258 {
259 u32 low = 0, high = 0, address = 0;
260 unsigned int bank, block;
261 struct mce m;
262
263 mce_setup(&m);
264
265 /* assume first bank caused it */
266 for (bank = 0; bank < NR_BANKS; ++bank) {
267 if (!(per_cpu(bank_map, m.cpu) & (1 << bank)))
268 continue;
269 for (block = 0; block < NR_BLOCKS; ++block) {
270 if (block == 0) {
271 address = MSR_IA32_MC0_MISC + bank * 4;
272 } else if (block == 1) {
273 address = (low & MASK_BLKPTR_LO) >> 21;
274 if (!address)
275 break;
276 address += MCG_XBLK_ADDR;
277 } else {
278 ++address;
279 }
280
281 if (rdmsr_safe(address, &low, &high))
282 break;
283
284 if (!(high & MASK_VALID_HI)) {
285 if (block)
286 continue;
287 else
288 break;
289 }
290
291 if (!(high & MASK_CNTP_HI) ||
292 (high & MASK_LOCKED_HI))
293 continue;
294
295 /*
296 * Log the machine check that caused the threshold
297 * event.
298 */
299 machine_check_poll(MCP_TIMESTAMP,
300 &__get_cpu_var(mce_poll_banks));
301
302 if (high & MASK_OVERFLOW_HI) {
303 rdmsrl(address, m.misc);
304 rdmsrl(MSR_IA32_MC0_STATUS + bank * 4,
305 m.status);
306 m.bank = K8_MCE_THRESHOLD_BASE
307 + bank * NR_BLOCKS
308 + block;
309 mce_log(&m);
310 return;
311 }
312 }
313 }
314 }
315
316 /*
317 * Sysfs Interface
318 */
319
320 struct threshold_attr {
321 struct attribute attr;
322 ssize_t (*show) (struct threshold_block *, char *);
323 ssize_t (*store) (struct threshold_block *, const char *, size_t count);
324 };
325
326 #define SHOW_FIELDS(name) \
327 static ssize_t show_ ## name(struct threshold_block *b, char *buf) \
328 { \
329 return sprintf(buf, "%lx\n", (unsigned long) b->name); \
330 }
331 SHOW_FIELDS(interrupt_enable)
SHOW_FIELDS(threshold_limit)332 SHOW_FIELDS(threshold_limit)
333
334 static ssize_t
335 store_interrupt_enable(struct threshold_block *b, const char *buf, size_t size)
336 {
337 struct thresh_restart tr;
338 unsigned long new;
339
340 if (!b->interrupt_capable)
341 return -EINVAL;
342
343 if (strict_strtoul(buf, 0, &new) < 0)
344 return -EINVAL;
345
346 b->interrupt_enable = !!new;
347
348 memset(&tr, 0, sizeof(tr));
349 tr.b = b;
350
351 smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1);
352
353 return size;
354 }
355
356 static ssize_t
store_threshold_limit(struct threshold_block * b,const char * buf,size_t size)357 store_threshold_limit(struct threshold_block *b, const char *buf, size_t size)
358 {
359 struct thresh_restart tr;
360 unsigned long new;
361
362 if (strict_strtoul(buf, 0, &new) < 0)
363 return -EINVAL;
364
365 if (new > THRESHOLD_MAX)
366 new = THRESHOLD_MAX;
367 if (new < 1)
368 new = 1;
369
370 memset(&tr, 0, sizeof(tr));
371 tr.old_limit = b->threshold_limit;
372 b->threshold_limit = new;
373 tr.b = b;
374
375 smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1);
376
377 return size;
378 }
379
380 struct threshold_block_cross_cpu {
381 struct threshold_block *tb;
382 long retval;
383 };
384
local_error_count_handler(void * _tbcc)385 static void local_error_count_handler(void *_tbcc)
386 {
387 struct threshold_block_cross_cpu *tbcc = _tbcc;
388 struct threshold_block *b = tbcc->tb;
389 u32 low, high;
390
391 rdmsr(b->address, low, high);
392 tbcc->retval = (high & 0xFFF) - (THRESHOLD_MAX - b->threshold_limit);
393 }
394
show_error_count(struct threshold_block * b,char * buf)395 static ssize_t show_error_count(struct threshold_block *b, char *buf)
396 {
397 struct threshold_block_cross_cpu tbcc = { .tb = b, };
398
399 smp_call_function_single(b->cpu, local_error_count_handler, &tbcc, 1);
400 return sprintf(buf, "%lx\n", tbcc.retval);
401 }
402
store_error_count(struct threshold_block * b,const char * buf,size_t count)403 static ssize_t store_error_count(struct threshold_block *b,
404 const char *buf, size_t count)
405 {
406 struct thresh_restart tr = { .b = b, .reset = 1, .old_limit = 0 };
407
408 smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1);
409 return 1;
410 }
411
412 #define RW_ATTR(val) \
413 static struct threshold_attr val = { \
414 .attr = {.name = __stringify(val), .mode = 0644 }, \
415 .show = show_## val, \
416 .store = store_## val, \
417 };
418
419 RW_ATTR(interrupt_enable);
420 RW_ATTR(threshold_limit);
421 RW_ATTR(error_count);
422
423 static struct attribute *default_attrs[] = {
424 &interrupt_enable.attr,
425 &threshold_limit.attr,
426 &error_count.attr,
427 NULL
428 };
429
430 #define to_block(k) container_of(k, struct threshold_block, kobj)
431 #define to_attr(a) container_of(a, struct threshold_attr, attr)
432
show(struct kobject * kobj,struct attribute * attr,char * buf)433 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
434 {
435 struct threshold_block *b = to_block(kobj);
436 struct threshold_attr *a = to_attr(attr);
437 ssize_t ret;
438
439 ret = a->show ? a->show(b, buf) : -EIO;
440
441 return ret;
442 }
443
store(struct kobject * kobj,struct attribute * attr,const char * buf,size_t count)444 static ssize_t store(struct kobject *kobj, struct attribute *attr,
445 const char *buf, size_t count)
446 {
447 struct threshold_block *b = to_block(kobj);
448 struct threshold_attr *a = to_attr(attr);
449 ssize_t ret;
450
451 ret = a->store ? a->store(b, buf, count) : -EIO;
452
453 return ret;
454 }
455
456 static const struct sysfs_ops threshold_ops = {
457 .show = show,
458 .store = store,
459 };
460
461 static struct kobj_type threshold_ktype = {
462 .sysfs_ops = &threshold_ops,
463 .default_attrs = default_attrs,
464 };
465
allocate_threshold_blocks(unsigned int cpu,unsigned int bank,unsigned int block,u32 address)466 static __cpuinit int allocate_threshold_blocks(unsigned int cpu,
467 unsigned int bank,
468 unsigned int block,
469 u32 address)
470 {
471 struct threshold_block *b = NULL;
472 u32 low, high;
473 int err;
474
475 if ((bank >= NR_BANKS) || (block >= NR_BLOCKS))
476 return 0;
477
478 if (rdmsr_safe_on_cpu(cpu, address, &low, &high))
479 return 0;
480
481 if (!(high & MASK_VALID_HI)) {
482 if (block)
483 goto recurse;
484 else
485 return 0;
486 }
487
488 if (!(high & MASK_CNTP_HI) ||
489 (high & MASK_LOCKED_HI))
490 goto recurse;
491
492 b = kzalloc(sizeof(struct threshold_block), GFP_KERNEL);
493 if (!b)
494 return -ENOMEM;
495
496 b->block = block;
497 b->bank = bank;
498 b->cpu = cpu;
499 b->address = address;
500 b->interrupt_enable = 0;
501 b->interrupt_capable = lvt_interrupt_supported(bank, high);
502 b->threshold_limit = THRESHOLD_MAX;
503
504 INIT_LIST_HEAD(&b->miscj);
505
506 if (per_cpu(threshold_banks, cpu)[bank]->blocks) {
507 list_add(&b->miscj,
508 &per_cpu(threshold_banks, cpu)[bank]->blocks->miscj);
509 } else {
510 per_cpu(threshold_banks, cpu)[bank]->blocks = b;
511 }
512
513 err = kobject_init_and_add(&b->kobj, &threshold_ktype,
514 per_cpu(threshold_banks, cpu)[bank]->kobj,
515 "misc%i", block);
516 if (err)
517 goto out_free;
518 recurse:
519 if (!block) {
520 address = (low & MASK_BLKPTR_LO) >> 21;
521 if (!address)
522 return 0;
523 address += MCG_XBLK_ADDR;
524 } else {
525 ++address;
526 }
527
528 err = allocate_threshold_blocks(cpu, bank, ++block, address);
529 if (err)
530 goto out_free;
531
532 if (b)
533 kobject_uevent(&b->kobj, KOBJ_ADD);
534
535 return err;
536
537 out_free:
538 if (b) {
539 kobject_put(&b->kobj);
540 list_del(&b->miscj);
541 kfree(b);
542 }
543 return err;
544 }
545
546 static __cpuinit long
local_allocate_threshold_blocks(int cpu,unsigned int bank)547 local_allocate_threshold_blocks(int cpu, unsigned int bank)
548 {
549 return allocate_threshold_blocks(cpu, bank, 0,
550 MSR_IA32_MC0_MISC + bank * 4);
551 }
552
553 /* symlinks sibling shared banks to first core. first core owns dir/files. */
threshold_create_bank(unsigned int cpu,unsigned int bank)554 static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
555 {
556 int i, err = 0;
557 struct threshold_bank *b = NULL;
558 struct device *dev = per_cpu(mce_device, cpu);
559 char name[32];
560
561 sprintf(name, "threshold_bank%i", bank);
562
563 #ifdef CONFIG_SMP
564 if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) { /* symlink */
565 i = cpumask_first(cpu_llc_shared_mask(cpu));
566
567 /* first core not up yet */
568 if (cpu_data(i).cpu_core_id)
569 goto out;
570
571 /* already linked */
572 if (per_cpu(threshold_banks, cpu)[bank])
573 goto out;
574
575 b = per_cpu(threshold_banks, i)[bank];
576
577 if (!b)
578 goto out;
579
580 err = sysfs_create_link(&dev->kobj, b->kobj, name);
581 if (err)
582 goto out;
583
584 cpumask_copy(b->cpus, cpu_llc_shared_mask(cpu));
585 per_cpu(threshold_banks, cpu)[bank] = b;
586
587 goto out;
588 }
589 #endif
590
591 b = kzalloc(sizeof(struct threshold_bank), GFP_KERNEL);
592 if (!b) {
593 err = -ENOMEM;
594 goto out;
595 }
596 if (!zalloc_cpumask_var(&b->cpus, GFP_KERNEL)) {
597 kfree(b);
598 err = -ENOMEM;
599 goto out;
600 }
601
602 b->kobj = kobject_create_and_add(name, &dev->kobj);
603 if (!b->kobj)
604 goto out_free;
605
606 #ifndef CONFIG_SMP
607 cpumask_setall(b->cpus);
608 #else
609 cpumask_set_cpu(cpu, b->cpus);
610 #endif
611
612 per_cpu(threshold_banks, cpu)[bank] = b;
613
614 err = local_allocate_threshold_blocks(cpu, bank);
615 if (err)
616 goto out_free;
617
618 for_each_cpu(i, b->cpus) {
619 if (i == cpu)
620 continue;
621
622 dev = per_cpu(mce_device, i);
623 if (dev)
624 err = sysfs_create_link(&dev->kobj,b->kobj, name);
625 if (err)
626 goto out;
627
628 per_cpu(threshold_banks, i)[bank] = b;
629 }
630
631 goto out;
632
633 out_free:
634 per_cpu(threshold_banks, cpu)[bank] = NULL;
635 free_cpumask_var(b->cpus);
636 kfree(b);
637 out:
638 return err;
639 }
640
641 /* create dir/files for all valid threshold banks */
threshold_create_device(unsigned int cpu)642 static __cpuinit int threshold_create_device(unsigned int cpu)
643 {
644 unsigned int bank;
645 int err = 0;
646
647 for (bank = 0; bank < NR_BANKS; ++bank) {
648 if (!(per_cpu(bank_map, cpu) & (1 << bank)))
649 continue;
650 err = threshold_create_bank(cpu, bank);
651 if (err)
652 return err;
653 }
654
655 return err;
656 }
657
658 /*
659 * let's be hotplug friendly.
660 * in case of multiple core processors, the first core always takes ownership
661 * of shared sysfs dir/files, and rest of the cores will be symlinked to it.
662 */
663
deallocate_threshold_block(unsigned int cpu,unsigned int bank)664 static void deallocate_threshold_block(unsigned int cpu,
665 unsigned int bank)
666 {
667 struct threshold_block *pos = NULL;
668 struct threshold_block *tmp = NULL;
669 struct threshold_bank *head = per_cpu(threshold_banks, cpu)[bank];
670
671 if (!head)
672 return;
673
674 list_for_each_entry_safe(pos, tmp, &head->blocks->miscj, miscj) {
675 kobject_put(&pos->kobj);
676 list_del(&pos->miscj);
677 kfree(pos);
678 }
679
680 kfree(per_cpu(threshold_banks, cpu)[bank]->blocks);
681 per_cpu(threshold_banks, cpu)[bank]->blocks = NULL;
682 }
683
threshold_remove_bank(unsigned int cpu,int bank)684 static void threshold_remove_bank(unsigned int cpu, int bank)
685 {
686 struct threshold_bank *b;
687 struct device *dev;
688 char name[32];
689 int i = 0;
690
691 b = per_cpu(threshold_banks, cpu)[bank];
692 if (!b)
693 return;
694 if (!b->blocks)
695 goto free_out;
696
697 sprintf(name, "threshold_bank%i", bank);
698
699 #ifdef CONFIG_SMP
700 /* sibling symlink */
701 if (shared_bank[bank] && b->blocks->cpu != cpu) {
702 dev = per_cpu(mce_device, cpu);
703 sysfs_remove_link(&dev->kobj, name);
704 per_cpu(threshold_banks, cpu)[bank] = NULL;
705
706 return;
707 }
708 #endif
709
710 /* remove all sibling symlinks before unregistering */
711 for_each_cpu(i, b->cpus) {
712 if (i == cpu)
713 continue;
714
715 dev = per_cpu(mce_device, i);
716 if (dev)
717 sysfs_remove_link(&dev->kobj, name);
718 per_cpu(threshold_banks, i)[bank] = NULL;
719 }
720
721 deallocate_threshold_block(cpu, bank);
722
723 free_out:
724 kobject_del(b->kobj);
725 kobject_put(b->kobj);
726 free_cpumask_var(b->cpus);
727 kfree(b);
728 per_cpu(threshold_banks, cpu)[bank] = NULL;
729 }
730
threshold_remove_device(unsigned int cpu)731 static void threshold_remove_device(unsigned int cpu)
732 {
733 unsigned int bank;
734
735 for (bank = 0; bank < NR_BANKS; ++bank) {
736 if (!(per_cpu(bank_map, cpu) & (1 << bank)))
737 continue;
738 threshold_remove_bank(cpu, bank);
739 }
740 }
741
742 /* get notified when a cpu comes on/off */
743 static void __cpuinit
amd_64_threshold_cpu_callback(unsigned long action,unsigned int cpu)744 amd_64_threshold_cpu_callback(unsigned long action, unsigned int cpu)
745 {
746 switch (action) {
747 case CPU_ONLINE:
748 case CPU_ONLINE_FROZEN:
749 threshold_create_device(cpu);
750 break;
751 case CPU_DEAD:
752 case CPU_DEAD_FROZEN:
753 threshold_remove_device(cpu);
754 break;
755 default:
756 break;
757 }
758 }
759
threshold_init_device(void)760 static __init int threshold_init_device(void)
761 {
762 unsigned lcpu = 0;
763
764 /* to hit CPUs online before the notifier is up */
765 for_each_online_cpu(lcpu) {
766 int err = threshold_create_device(lcpu);
767
768 if (err)
769 return err;
770 }
771 threshold_cpu_callback = amd_64_threshold_cpu_callback;
772
773 return 0;
774 }
775 device_initcall(threshold_init_device);
776