1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * linux/arch/arm/include/asm/pmu.h
4 *
5 * Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles
6 */
7
8 #ifndef __ARM_PMU_H__
9 #define __ARM_PMU_H__
10
11 #include <linux/interrupt.h>
12 #include <linux/perf_event.h>
13 #include <linux/platform_device.h>
14 #include <linux/sysfs.h>
15 #include <asm/cputype.h>
16
17 #ifdef CONFIG_ARM_PMU
18
19 /*
20 * The ARMv7 CPU PMU supports up to 32 event counters.
21 */
22 #define ARMPMU_MAX_HWEVENTS 32
23
24 /*
25 * ARM PMU hw_event flags
26 */
27 #define ARMPMU_EVT_64BIT 0x00001 /* Event uses a 64bit counter */
28 #define ARMPMU_EVT_47BIT 0x00002 /* Event uses a 47bit counter */
29 #define ARMPMU_EVT_63BIT 0x00004 /* Event uses a 63bit counter */
30
31 static_assert((PERF_EVENT_FLAG_ARCH & ARMPMU_EVT_64BIT) == ARMPMU_EVT_64BIT);
32 static_assert((PERF_EVENT_FLAG_ARCH & ARMPMU_EVT_47BIT) == ARMPMU_EVT_47BIT);
33 static_assert((PERF_EVENT_FLAG_ARCH & ARMPMU_EVT_63BIT) == ARMPMU_EVT_63BIT);
34
35 #define HW_OP_UNSUPPORTED 0xFFFF
36 #define C(_x) PERF_COUNT_HW_CACHE_##_x
37 #define CACHE_OP_UNSUPPORTED 0xFFFF
38
39 #define PERF_MAP_ALL_UNSUPPORTED \
40 [0 ... PERF_COUNT_HW_MAX - 1] = HW_OP_UNSUPPORTED
41
42 #define PERF_CACHE_MAP_ALL_UNSUPPORTED \
43 [0 ... C(MAX) - 1] = { \
44 [0 ... C(OP_MAX) - 1] = { \
45 [0 ... C(RESULT_MAX) - 1] = CACHE_OP_UNSUPPORTED, \
46 }, \
47 }
48
49 /* The events for a given PMU register set. */
50 struct pmu_hw_events {
51 /*
52 * The events that are active on the PMU for the given index.
53 */
54 struct perf_event *events[ARMPMU_MAX_HWEVENTS];
55
56 /*
57 * A 1 bit for an index indicates that the counter is being used for
58 * an event. A 0 means that the counter can be used.
59 */
60 DECLARE_BITMAP(used_mask, ARMPMU_MAX_HWEVENTS);
61
62 /*
63 * Hardware lock to serialize accesses to PMU registers. Needed for the
64 * read/modify/write sequences.
65 */
66 raw_spinlock_t pmu_lock;
67
68 /*
69 * When using percpu IRQs, we need a percpu dev_id. Place it here as we
70 * already have to allocate this struct per cpu.
71 */
72 struct arm_pmu *percpu_pmu;
73
74 int irq;
75 };
76
77 enum armpmu_attr_groups {
78 ARMPMU_ATTR_GROUP_COMMON,
79 ARMPMU_ATTR_GROUP_EVENTS,
80 ARMPMU_ATTR_GROUP_FORMATS,
81 ARMPMU_ATTR_GROUP_CAPS,
82 ARMPMU_NR_ATTR_GROUPS
83 };
84
85 struct arm_pmu {
86 struct pmu pmu;
87 cpumask_t supported_cpus;
88 char *name;
89 int pmuver;
90 irqreturn_t (*handle_irq)(struct arm_pmu *pmu);
91 void (*enable)(struct perf_event *event);
92 void (*disable)(struct perf_event *event);
93 int (*get_event_idx)(struct pmu_hw_events *hw_events,
94 struct perf_event *event);
95 void (*clear_event_idx)(struct pmu_hw_events *hw_events,
96 struct perf_event *event);
97 int (*set_event_filter)(struct hw_perf_event *evt,
98 struct perf_event_attr *attr);
99 u64 (*read_counter)(struct perf_event *event);
100 void (*write_counter)(struct perf_event *event, u64 val);
101 void (*start)(struct arm_pmu *);
102 void (*stop)(struct arm_pmu *);
103 void (*reset)(void *);
104 int (*map_event)(struct perf_event *event);
105 int num_events;
106 bool secure_access; /* 32-bit ARM only */
107 #define ARMV8_PMUV3_MAX_COMMON_EVENTS 0x40
108 DECLARE_BITMAP(pmceid_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS);
109 #define ARMV8_PMUV3_EXT_COMMON_EVENT_BASE 0x4000
110 DECLARE_BITMAP(pmceid_ext_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS);
111 struct platform_device *plat_device;
112 struct pmu_hw_events __percpu *hw_events;
113 struct hlist_node node;
114 struct notifier_block cpu_pm_nb;
115 /* the attr_groups array must be NULL-terminated */
116 const struct attribute_group *attr_groups[ARMPMU_NR_ATTR_GROUPS + 1];
117 /* store the PMMIR_EL1 to expose slots */
118 u64 reg_pmmir;
119
120 /* Only to be used by ACPI probing code */
121 unsigned long acpi_cpuid;
122 };
123
124 #define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
125
126 u64 armpmu_event_update(struct perf_event *event);
127
128 int armpmu_event_set_period(struct perf_event *event);
129
130 int armpmu_map_event(struct perf_event *event,
131 const unsigned (*event_map)[PERF_COUNT_HW_MAX],
132 const unsigned (*cache_map)[PERF_COUNT_HW_CACHE_MAX]
133 [PERF_COUNT_HW_CACHE_OP_MAX]
134 [PERF_COUNT_HW_CACHE_RESULT_MAX],
135 u32 raw_event_mask);
136
137 typedef int (*armpmu_init_fn)(struct arm_pmu *);
138
139 struct pmu_probe_info {
140 unsigned int cpuid;
141 unsigned int mask;
142 armpmu_init_fn init;
143 };
144
145 #define PMU_PROBE(_cpuid, _mask, _fn) \
146 { \
147 .cpuid = (_cpuid), \
148 .mask = (_mask), \
149 .init = (_fn), \
150 }
151
152 #define ARM_PMU_PROBE(_cpuid, _fn) \
153 PMU_PROBE(_cpuid, ARM_CPU_PART_MASK, _fn)
154
155 #define ARM_PMU_XSCALE_MASK ((0xff << 24) | ARM_CPU_XSCALE_ARCH_MASK)
156
157 #define XSCALE_PMU_PROBE(_version, _fn) \
158 PMU_PROBE(ARM_CPU_IMP_INTEL << 24 | _version, ARM_PMU_XSCALE_MASK, _fn)
159
160 int arm_pmu_device_probe(struct platform_device *pdev,
161 const struct of_device_id *of_table,
162 const struct pmu_probe_info *probe_table);
163
164 #ifdef CONFIG_ACPI
165 int arm_pmu_acpi_probe(armpmu_init_fn init_fn);
166 #else
arm_pmu_acpi_probe(armpmu_init_fn init_fn)167 static inline int arm_pmu_acpi_probe(armpmu_init_fn init_fn) { return 0; }
168 #endif
169
170 #ifdef CONFIG_KVM
171 void kvm_host_pmu_init(struct arm_pmu *pmu);
172 #else
173 #define kvm_host_pmu_init(x) do { } while(0)
174 #endif
175
176 bool arm_pmu_irq_is_nmi(void);
177
178 /* Internal functions only for core arm_pmu code */
179 struct arm_pmu *armpmu_alloc(void);
180 void armpmu_free(struct arm_pmu *pmu);
181 int armpmu_register(struct arm_pmu *pmu);
182 int armpmu_request_irq(int irq, int cpu);
183 void armpmu_free_irq(int irq, int cpu);
184
185 #define ARMV8_PMU_PDEV_NAME "armv8-pmu"
186
187 #endif /* CONFIG_ARM_PMU */
188
189 #define ARMV8_SPE_PDEV_NAME "arm,spe-v1"
190 #define ARMV8_TRBE_PDEV_NAME "arm,trbe"
191
192 #endif /* __ARM_PMU_H__ */
193