1 #include <linux/perf_event.h>
2 #include <linux/export.h>
3 #include <linux/types.h>
4 #include <linux/init.h>
5 #include <linux/slab.h>
6 #include <asm/apicdef.h>
7
8 #include "perf_event.h"
9
10 static __initconst const u64 amd_hw_cache_event_ids
11 [PERF_COUNT_HW_CACHE_MAX]
12 [PERF_COUNT_HW_CACHE_OP_MAX]
13 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
14 {
15 [ C(L1D) ] = {
16 [ C(OP_READ) ] = {
17 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
18 [ C(RESULT_MISS) ] = 0x0141, /* Data Cache Misses */
19 },
20 [ C(OP_WRITE) ] = {
21 [ C(RESULT_ACCESS) ] = 0x0142, /* Data Cache Refills :system */
22 [ C(RESULT_MISS) ] = 0,
23 },
24 [ C(OP_PREFETCH) ] = {
25 [ C(RESULT_ACCESS) ] = 0x0267, /* Data Prefetcher :attempts */
26 [ C(RESULT_MISS) ] = 0x0167, /* Data Prefetcher :cancelled */
27 },
28 },
29 [ C(L1I ) ] = {
30 [ C(OP_READ) ] = {
31 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction cache fetches */
32 [ C(RESULT_MISS) ] = 0x0081, /* Instruction cache misses */
33 },
34 [ C(OP_WRITE) ] = {
35 [ C(RESULT_ACCESS) ] = -1,
36 [ C(RESULT_MISS) ] = -1,
37 },
38 [ C(OP_PREFETCH) ] = {
39 [ C(RESULT_ACCESS) ] = 0x014B, /* Prefetch Instructions :Load */
40 [ C(RESULT_MISS) ] = 0,
41 },
42 },
43 [ C(LL ) ] = {
44 [ C(OP_READ) ] = {
45 [ C(RESULT_ACCESS) ] = 0x037D, /* Requests to L2 Cache :IC+DC */
46 [ C(RESULT_MISS) ] = 0x037E, /* L2 Cache Misses : IC+DC */
47 },
48 [ C(OP_WRITE) ] = {
49 [ C(RESULT_ACCESS) ] = 0x017F, /* L2 Fill/Writeback */
50 [ C(RESULT_MISS) ] = 0,
51 },
52 [ C(OP_PREFETCH) ] = {
53 [ C(RESULT_ACCESS) ] = 0,
54 [ C(RESULT_MISS) ] = 0,
55 },
56 },
57 [ C(DTLB) ] = {
58 [ C(OP_READ) ] = {
59 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
60 [ C(RESULT_MISS) ] = 0x0746, /* L1_DTLB_AND_L2_DLTB_MISS.ALL */
61 },
62 [ C(OP_WRITE) ] = {
63 [ C(RESULT_ACCESS) ] = 0,
64 [ C(RESULT_MISS) ] = 0,
65 },
66 [ C(OP_PREFETCH) ] = {
67 [ C(RESULT_ACCESS) ] = 0,
68 [ C(RESULT_MISS) ] = 0,
69 },
70 },
71 [ C(ITLB) ] = {
72 [ C(OP_READ) ] = {
73 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes */
74 [ C(RESULT_MISS) ] = 0x0385, /* L1_ITLB_AND_L2_ITLB_MISS.ALL */
75 },
76 [ C(OP_WRITE) ] = {
77 [ C(RESULT_ACCESS) ] = -1,
78 [ C(RESULT_MISS) ] = -1,
79 },
80 [ C(OP_PREFETCH) ] = {
81 [ C(RESULT_ACCESS) ] = -1,
82 [ C(RESULT_MISS) ] = -1,
83 },
84 },
85 [ C(BPU ) ] = {
86 [ C(OP_READ) ] = {
87 [ C(RESULT_ACCESS) ] = 0x00c2, /* Retired Branch Instr. */
88 [ C(RESULT_MISS) ] = 0x00c3, /* Retired Mispredicted BI */
89 },
90 [ C(OP_WRITE) ] = {
91 [ C(RESULT_ACCESS) ] = -1,
92 [ C(RESULT_MISS) ] = -1,
93 },
94 [ C(OP_PREFETCH) ] = {
95 [ C(RESULT_ACCESS) ] = -1,
96 [ C(RESULT_MISS) ] = -1,
97 },
98 },
99 [ C(NODE) ] = {
100 [ C(OP_READ) ] = {
101 [ C(RESULT_ACCESS) ] = 0xb8e9, /* CPU Request to Memory, l+r */
102 [ C(RESULT_MISS) ] = 0x98e9, /* CPU Request to Memory, r */
103 },
104 [ C(OP_WRITE) ] = {
105 [ C(RESULT_ACCESS) ] = -1,
106 [ C(RESULT_MISS) ] = -1,
107 },
108 [ C(OP_PREFETCH) ] = {
109 [ C(RESULT_ACCESS) ] = -1,
110 [ C(RESULT_MISS) ] = -1,
111 },
112 },
113 };
114
115 /*
116 * AMD Performance Monitor K7 and later.
117 */
118 static const u64 amd_perfmon_event_map[] =
119 {
120 [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
121 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
122 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0080,
123 [PERF_COUNT_HW_CACHE_MISSES] = 0x0081,
124 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
125 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
126 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00d0, /* "Decoder empty" event */
127 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x00d1, /* "Dispatch stalls" event */
128 };
129
amd_pmu_event_map(int hw_event)130 static u64 amd_pmu_event_map(int hw_event)
131 {
132 return amd_perfmon_event_map[hw_event];
133 }
134
amd_pmu_hw_config(struct perf_event * event)135 static int amd_pmu_hw_config(struct perf_event *event)
136 {
137 int ret = x86_pmu_hw_config(event);
138
139 if (ret)
140 return ret;
141
142 if (has_branch_stack(event))
143 return -EOPNOTSUPP;
144
145 if (event->attr.exclude_host && event->attr.exclude_guest)
146 /*
147 * When HO == GO == 1 the hardware treats that as GO == HO == 0
148 * and will count in both modes. We don't want to count in that
149 * case so we emulate no-counting by setting US = OS = 0.
150 */
151 event->hw.config &= ~(ARCH_PERFMON_EVENTSEL_USR |
152 ARCH_PERFMON_EVENTSEL_OS);
153 else if (event->attr.exclude_host)
154 event->hw.config |= AMD_PERFMON_EVENTSEL_GUESTONLY;
155 else if (event->attr.exclude_guest)
156 event->hw.config |= AMD_PERFMON_EVENTSEL_HOSTONLY;
157
158 if (event->attr.type != PERF_TYPE_RAW)
159 return 0;
160
161 event->hw.config |= event->attr.config & AMD64_RAW_EVENT_MASK;
162
163 return 0;
164 }
165
166 /*
167 * AMD64 events are detected based on their event codes.
168 */
amd_get_event_code(struct hw_perf_event * hwc)169 static inline unsigned int amd_get_event_code(struct hw_perf_event *hwc)
170 {
171 return ((hwc->config >> 24) & 0x0f00) | (hwc->config & 0x00ff);
172 }
173
amd_is_nb_event(struct hw_perf_event * hwc)174 static inline int amd_is_nb_event(struct hw_perf_event *hwc)
175 {
176 return (hwc->config & 0xe0) == 0xe0;
177 }
178
amd_has_nb(struct cpu_hw_events * cpuc)179 static inline int amd_has_nb(struct cpu_hw_events *cpuc)
180 {
181 struct amd_nb *nb = cpuc->amd_nb;
182
183 return nb && nb->nb_id != -1;
184 }
185
amd_put_event_constraints(struct cpu_hw_events * cpuc,struct perf_event * event)186 static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
187 struct perf_event *event)
188 {
189 struct hw_perf_event *hwc = &event->hw;
190 struct amd_nb *nb = cpuc->amd_nb;
191 int i;
192
193 /*
194 * only care about NB events
195 */
196 if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc)))
197 return;
198
199 /*
200 * need to scan whole list because event may not have
201 * been assigned during scheduling
202 *
203 * no race condition possible because event can only
204 * be removed on one CPU at a time AND PMU is disabled
205 * when we come here
206 */
207 for (i = 0; i < x86_pmu.num_counters; i++) {
208 if (nb->owners[i] == event) {
209 cmpxchg(nb->owners+i, event, NULL);
210 break;
211 }
212 }
213 }
214
215 /*
216 * AMD64 NorthBridge events need special treatment because
217 * counter access needs to be synchronized across all cores
218 * of a package. Refer to BKDG section 3.12
219 *
220 * NB events are events measuring L3 cache, Hypertransport
221 * traffic. They are identified by an event code >= 0xe00.
222 * They measure events on the NorthBride which is shared
223 * by all cores on a package. NB events are counted on a
224 * shared set of counters. When a NB event is programmed
225 * in a counter, the data actually comes from a shared
226 * counter. Thus, access to those counters needs to be
227 * synchronized.
228 *
229 * We implement the synchronization such that no two cores
230 * can be measuring NB events using the same counters. Thus,
231 * we maintain a per-NB allocation table. The available slot
232 * is propagated using the event_constraint structure.
233 *
234 * We provide only one choice for each NB event based on
235 * the fact that only NB events have restrictions. Consequently,
236 * if a counter is available, there is a guarantee the NB event
237 * will be assigned to it. If no slot is available, an empty
238 * constraint is returned and scheduling will eventually fail
239 * for this event.
240 *
241 * Note that all cores attached the same NB compete for the same
242 * counters to host NB events, this is why we use atomic ops. Some
243 * multi-chip CPUs may have more than one NB.
244 *
245 * Given that resources are allocated (cmpxchg), they must be
246 * eventually freed for others to use. This is accomplished by
247 * calling amd_put_event_constraints().
248 *
249 * Non NB events are not impacted by this restriction.
250 */
251 static struct event_constraint *
amd_get_event_constraints(struct cpu_hw_events * cpuc,struct perf_event * event)252 amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
253 {
254 struct hw_perf_event *hwc = &event->hw;
255 struct amd_nb *nb = cpuc->amd_nb;
256 struct perf_event *old = NULL;
257 int max = x86_pmu.num_counters;
258 int i, j, k = -1;
259
260 /*
261 * if not NB event or no NB, then no constraints
262 */
263 if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc)))
264 return &unconstrained;
265
266 /*
267 * detect if already present, if so reuse
268 *
269 * cannot merge with actual allocation
270 * because of possible holes
271 *
272 * event can already be present yet not assigned (in hwc->idx)
273 * because of successive calls to x86_schedule_events() from
274 * hw_perf_group_sched_in() without hw_perf_enable()
275 */
276 for (i = 0; i < max; i++) {
277 /*
278 * keep track of first free slot
279 */
280 if (k == -1 && !nb->owners[i])
281 k = i;
282
283 /* already present, reuse */
284 if (nb->owners[i] == event)
285 goto done;
286 }
287 /*
288 * not present, so grab a new slot
289 * starting either at:
290 */
291 if (hwc->idx != -1) {
292 /* previous assignment */
293 i = hwc->idx;
294 } else if (k != -1) {
295 /* start from free slot found */
296 i = k;
297 } else {
298 /*
299 * event not found, no slot found in
300 * first pass, try again from the
301 * beginning
302 */
303 i = 0;
304 }
305 j = i;
306 do {
307 old = cmpxchg(nb->owners+i, NULL, event);
308 if (!old)
309 break;
310 if (++i == max)
311 i = 0;
312 } while (i != j);
313 done:
314 if (!old)
315 return &nb->event_constraints[i];
316
317 return &emptyconstraint;
318 }
319
amd_alloc_nb(int cpu)320 static struct amd_nb *amd_alloc_nb(int cpu)
321 {
322 struct amd_nb *nb;
323 int i;
324
325 nb = kmalloc_node(sizeof(struct amd_nb), GFP_KERNEL | __GFP_ZERO,
326 cpu_to_node(cpu));
327 if (!nb)
328 return NULL;
329
330 nb->nb_id = -1;
331
332 /*
333 * initialize all possible NB constraints
334 */
335 for (i = 0; i < x86_pmu.num_counters; i++) {
336 __set_bit(i, nb->event_constraints[i].idxmsk);
337 nb->event_constraints[i].weight = 1;
338 }
339 return nb;
340 }
341
amd_pmu_cpu_prepare(int cpu)342 static int amd_pmu_cpu_prepare(int cpu)
343 {
344 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
345
346 WARN_ON_ONCE(cpuc->amd_nb);
347
348 if (boot_cpu_data.x86_max_cores < 2)
349 return NOTIFY_OK;
350
351 cpuc->amd_nb = amd_alloc_nb(cpu);
352 if (!cpuc->amd_nb)
353 return NOTIFY_BAD;
354
355 return NOTIFY_OK;
356 }
357
amd_pmu_cpu_starting(int cpu)358 static void amd_pmu_cpu_starting(int cpu)
359 {
360 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
361 struct amd_nb *nb;
362 int i, nb_id;
363
364 cpuc->perf_ctr_virt_mask = AMD_PERFMON_EVENTSEL_HOSTONLY;
365
366 if (boot_cpu_data.x86_max_cores < 2 || boot_cpu_data.x86 == 0x15)
367 return;
368
369 nb_id = amd_get_nb_id(cpu);
370 WARN_ON_ONCE(nb_id == BAD_APICID);
371
372 for_each_online_cpu(i) {
373 nb = per_cpu(cpu_hw_events, i).amd_nb;
374 if (WARN_ON_ONCE(!nb))
375 continue;
376
377 if (nb->nb_id == nb_id) {
378 cpuc->kfree_on_online = cpuc->amd_nb;
379 cpuc->amd_nb = nb;
380 break;
381 }
382 }
383
384 cpuc->amd_nb->nb_id = nb_id;
385 cpuc->amd_nb->refcnt++;
386 }
387
amd_pmu_cpu_dead(int cpu)388 static void amd_pmu_cpu_dead(int cpu)
389 {
390 struct cpu_hw_events *cpuhw;
391
392 if (boot_cpu_data.x86_max_cores < 2)
393 return;
394
395 cpuhw = &per_cpu(cpu_hw_events, cpu);
396
397 if (cpuhw->amd_nb) {
398 struct amd_nb *nb = cpuhw->amd_nb;
399
400 if (nb->nb_id == -1 || --nb->refcnt == 0)
401 kfree(nb);
402
403 cpuhw->amd_nb = NULL;
404 }
405 }
406
407 PMU_FORMAT_ATTR(event, "config:0-7,32-35");
408 PMU_FORMAT_ATTR(umask, "config:8-15" );
409 PMU_FORMAT_ATTR(edge, "config:18" );
410 PMU_FORMAT_ATTR(inv, "config:23" );
411 PMU_FORMAT_ATTR(cmask, "config:24-31" );
412
413 static struct attribute *amd_format_attr[] = {
414 &format_attr_event.attr,
415 &format_attr_umask.attr,
416 &format_attr_edge.attr,
417 &format_attr_inv.attr,
418 &format_attr_cmask.attr,
419 NULL,
420 };
421
422 static __initconst const struct x86_pmu amd_pmu = {
423 .name = "AMD",
424 .handle_irq = x86_pmu_handle_irq,
425 .disable_all = x86_pmu_disable_all,
426 .enable_all = x86_pmu_enable_all,
427 .enable = x86_pmu_enable_event,
428 .disable = x86_pmu_disable_event,
429 .hw_config = amd_pmu_hw_config,
430 .schedule_events = x86_schedule_events,
431 .eventsel = MSR_K7_EVNTSEL0,
432 .perfctr = MSR_K7_PERFCTR0,
433 .event_map = amd_pmu_event_map,
434 .max_events = ARRAY_SIZE(amd_perfmon_event_map),
435 .num_counters = AMD64_NUM_COUNTERS,
436 .cntval_bits = 48,
437 .cntval_mask = (1ULL << 48) - 1,
438 .apic = 1,
439 /* use highest bit to detect overflow */
440 .max_period = (1ULL << 47) - 1,
441 .get_event_constraints = amd_get_event_constraints,
442 .put_event_constraints = amd_put_event_constraints,
443
444 .format_attrs = amd_format_attr,
445
446 .cpu_prepare = amd_pmu_cpu_prepare,
447 .cpu_starting = amd_pmu_cpu_starting,
448 .cpu_dead = amd_pmu_cpu_dead,
449 };
450
451 /* AMD Family 15h */
452
453 #define AMD_EVENT_TYPE_MASK 0x000000F0ULL
454
455 #define AMD_EVENT_FP 0x00000000ULL ... 0x00000010ULL
456 #define AMD_EVENT_LS 0x00000020ULL ... 0x00000030ULL
457 #define AMD_EVENT_DC 0x00000040ULL ... 0x00000050ULL
458 #define AMD_EVENT_CU 0x00000060ULL ... 0x00000070ULL
459 #define AMD_EVENT_IC_DE 0x00000080ULL ... 0x00000090ULL
460 #define AMD_EVENT_EX_LS 0x000000C0ULL
461 #define AMD_EVENT_DE 0x000000D0ULL
462 #define AMD_EVENT_NB 0x000000E0ULL ... 0x000000F0ULL
463
464 /*
465 * AMD family 15h event code/PMC mappings:
466 *
467 * type = event_code & 0x0F0:
468 *
469 * 0x000 FP PERF_CTL[5:3]
470 * 0x010 FP PERF_CTL[5:3]
471 * 0x020 LS PERF_CTL[5:0]
472 * 0x030 LS PERF_CTL[5:0]
473 * 0x040 DC PERF_CTL[5:0]
474 * 0x050 DC PERF_CTL[5:0]
475 * 0x060 CU PERF_CTL[2:0]
476 * 0x070 CU PERF_CTL[2:0]
477 * 0x080 IC/DE PERF_CTL[2:0]
478 * 0x090 IC/DE PERF_CTL[2:0]
479 * 0x0A0 ---
480 * 0x0B0 ---
481 * 0x0C0 EX/LS PERF_CTL[5:0]
482 * 0x0D0 DE PERF_CTL[2:0]
483 * 0x0E0 NB NB_PERF_CTL[3:0]
484 * 0x0F0 NB NB_PERF_CTL[3:0]
485 *
486 * Exceptions:
487 *
488 * 0x000 FP PERF_CTL[3], PERF_CTL[5:3] (*)
489 * 0x003 FP PERF_CTL[3]
490 * 0x004 FP PERF_CTL[3], PERF_CTL[5:3] (*)
491 * 0x00B FP PERF_CTL[3]
492 * 0x00D FP PERF_CTL[3]
493 * 0x023 DE PERF_CTL[2:0]
494 * 0x02D LS PERF_CTL[3]
495 * 0x02E LS PERF_CTL[3,0]
496 * 0x031 LS PERF_CTL[2:0] (**)
497 * 0x043 CU PERF_CTL[2:0]
498 * 0x045 CU PERF_CTL[2:0]
499 * 0x046 CU PERF_CTL[2:0]
500 * 0x054 CU PERF_CTL[2:0]
501 * 0x055 CU PERF_CTL[2:0]
502 * 0x08F IC PERF_CTL[0]
503 * 0x187 DE PERF_CTL[0]
504 * 0x188 DE PERF_CTL[0]
505 * 0x0DB EX PERF_CTL[5:0]
506 * 0x0DC LS PERF_CTL[5:0]
507 * 0x0DD LS PERF_CTL[5:0]
508 * 0x0DE LS PERF_CTL[5:0]
509 * 0x0DF LS PERF_CTL[5:0]
510 * 0x1C0 EX PERF_CTL[5:3]
511 * 0x1D6 EX PERF_CTL[5:0]
512 * 0x1D8 EX PERF_CTL[5:0]
513 *
514 * (*) depending on the umask all FPU counters may be used
515 * (**) only one unitmask enabled at a time
516 */
517
518 static struct event_constraint amd_f15_PMC0 = EVENT_CONSTRAINT(0, 0x01, 0);
519 static struct event_constraint amd_f15_PMC20 = EVENT_CONSTRAINT(0, 0x07, 0);
520 static struct event_constraint amd_f15_PMC3 = EVENT_CONSTRAINT(0, 0x08, 0);
521 static struct event_constraint amd_f15_PMC30 = EVENT_CONSTRAINT_OVERLAP(0, 0x09, 0);
522 static struct event_constraint amd_f15_PMC50 = EVENT_CONSTRAINT(0, 0x3F, 0);
523 static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0);
524
525 static struct event_constraint *
amd_get_event_constraints_f15h(struct cpu_hw_events * cpuc,struct perf_event * event)526 amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *event)
527 {
528 struct hw_perf_event *hwc = &event->hw;
529 unsigned int event_code = amd_get_event_code(hwc);
530
531 switch (event_code & AMD_EVENT_TYPE_MASK) {
532 case AMD_EVENT_FP:
533 switch (event_code) {
534 case 0x000:
535 if (!(hwc->config & 0x0000F000ULL))
536 break;
537 if (!(hwc->config & 0x00000F00ULL))
538 break;
539 return &amd_f15_PMC3;
540 case 0x004:
541 if (hweight_long(hwc->config & ARCH_PERFMON_EVENTSEL_UMASK) <= 1)
542 break;
543 return &amd_f15_PMC3;
544 case 0x003:
545 case 0x00B:
546 case 0x00D:
547 return &amd_f15_PMC3;
548 }
549 return &amd_f15_PMC53;
550 case AMD_EVENT_LS:
551 case AMD_EVENT_DC:
552 case AMD_EVENT_EX_LS:
553 switch (event_code) {
554 case 0x023:
555 case 0x043:
556 case 0x045:
557 case 0x046:
558 case 0x054:
559 case 0x055:
560 return &amd_f15_PMC20;
561 case 0x02D:
562 return &amd_f15_PMC3;
563 case 0x02E:
564 return &amd_f15_PMC30;
565 case 0x031:
566 if (hweight_long(hwc->config & ARCH_PERFMON_EVENTSEL_UMASK) <= 1)
567 return &amd_f15_PMC20;
568 return &emptyconstraint;
569 case 0x1C0:
570 return &amd_f15_PMC53;
571 default:
572 return &amd_f15_PMC50;
573 }
574 case AMD_EVENT_CU:
575 case AMD_EVENT_IC_DE:
576 case AMD_EVENT_DE:
577 switch (event_code) {
578 case 0x08F:
579 case 0x187:
580 case 0x188:
581 return &amd_f15_PMC0;
582 case 0x0DB ... 0x0DF:
583 case 0x1D6:
584 case 0x1D8:
585 return &amd_f15_PMC50;
586 default:
587 return &amd_f15_PMC20;
588 }
589 case AMD_EVENT_NB:
590 /* not yet implemented */
591 return &emptyconstraint;
592 default:
593 return &emptyconstraint;
594 }
595 }
596
597 static __initconst const struct x86_pmu amd_pmu_f15h = {
598 .name = "AMD Family 15h",
599 .handle_irq = x86_pmu_handle_irq,
600 .disable_all = x86_pmu_disable_all,
601 .enable_all = x86_pmu_enable_all,
602 .enable = x86_pmu_enable_event,
603 .disable = x86_pmu_disable_event,
604 .hw_config = amd_pmu_hw_config,
605 .schedule_events = x86_schedule_events,
606 .eventsel = MSR_F15H_PERF_CTL,
607 .perfctr = MSR_F15H_PERF_CTR,
608 .event_map = amd_pmu_event_map,
609 .max_events = ARRAY_SIZE(amd_perfmon_event_map),
610 .num_counters = AMD64_NUM_COUNTERS_F15H,
611 .cntval_bits = 48,
612 .cntval_mask = (1ULL << 48) - 1,
613 .apic = 1,
614 /* use highest bit to detect overflow */
615 .max_period = (1ULL << 47) - 1,
616 .get_event_constraints = amd_get_event_constraints_f15h,
617 /* nortbridge counters not yet implemented: */
618 #if 0
619 .put_event_constraints = amd_put_event_constraints,
620
621 .cpu_prepare = amd_pmu_cpu_prepare,
622 .cpu_dead = amd_pmu_cpu_dead,
623 #endif
624 .cpu_starting = amd_pmu_cpu_starting,
625 .format_attrs = amd_format_attr,
626 };
627
amd_pmu_init(void)628 __init int amd_pmu_init(void)
629 {
630 /* Performance-monitoring supported from K7 and later: */
631 if (boot_cpu_data.x86 < 6)
632 return -ENODEV;
633
634 /*
635 * If core performance counter extensions exists, it must be
636 * family 15h, otherwise fail. See x86_pmu_addr_offset().
637 */
638 switch (boot_cpu_data.x86) {
639 case 0x15:
640 if (!cpu_has_perfctr_core)
641 return -ENODEV;
642 x86_pmu = amd_pmu_f15h;
643 break;
644 default:
645 if (cpu_has_perfctr_core)
646 return -ENODEV;
647 x86_pmu = amd_pmu;
648 break;
649 }
650
651 /* Events are common for all AMDs */
652 memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
653 sizeof(hw_cache_event_ids));
654
655 return 0;
656 }
657
amd_pmu_enable_virt(void)658 void amd_pmu_enable_virt(void)
659 {
660 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
661
662 cpuc->perf_ctr_virt_mask = 0;
663
664 /* Reload all events */
665 x86_pmu_disable_all();
666 x86_pmu_enable_all(0);
667 }
668 EXPORT_SYMBOL_GPL(amd_pmu_enable_virt);
669
amd_pmu_disable_virt(void)670 void amd_pmu_disable_virt(void)
671 {
672 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
673
674 /*
675 * We only mask out the Host-only bit so that host-only counting works
676 * when SVM is disabled. If someone sets up a guest-only counter when
677 * SVM is disabled the Guest-only bits still gets set and the counter
678 * will not count anything.
679 */
680 cpuc->perf_ctr_virt_mask = AMD_PERFMON_EVENTSEL_HOSTONLY;
681
682 /* Reload all events */
683 x86_pmu_disable_all();
684 x86_pmu_enable_all(0);
685 }
686 EXPORT_SYMBOL_GPL(amd_pmu_disable_virt);
687