1 #ifndef _ASM_X86_PERF_EVENT_H 2 #define _ASM_X86_PERF_EVENT_H 3 4 /* 5 * Performance event hw details: 6 */ 7 8 #define X86_PMC_MAX_GENERIC 32 9 #define X86_PMC_MAX_FIXED 3 10 11 #define X86_PMC_IDX_GENERIC 0 12 #define X86_PMC_IDX_FIXED 32 13 #define X86_PMC_IDX_MAX 64 14 15 #define MSR_ARCH_PERFMON_PERFCTR0 0xc1 16 #define MSR_ARCH_PERFMON_PERFCTR1 0xc2 17 18 #define MSR_ARCH_PERFMON_EVENTSEL0 0x186 19 #define MSR_ARCH_PERFMON_EVENTSEL1 0x187 20 21 #define ARCH_PERFMON_EVENTSEL_EVENT 0x000000FFULL 22 #define ARCH_PERFMON_EVENTSEL_UMASK 0x0000FF00ULL 23 #define ARCH_PERFMON_EVENTSEL_USR (1ULL << 16) 24 #define ARCH_PERFMON_EVENTSEL_OS (1ULL << 17) 25 #define ARCH_PERFMON_EVENTSEL_EDGE (1ULL << 18) 26 #define ARCH_PERFMON_EVENTSEL_PIN_CONTROL (1ULL << 19) 27 #define ARCH_PERFMON_EVENTSEL_INT (1ULL << 20) 28 #define ARCH_PERFMON_EVENTSEL_ANY (1ULL << 21) 29 #define ARCH_PERFMON_EVENTSEL_ENABLE (1ULL << 22) 30 #define ARCH_PERFMON_EVENTSEL_INV (1ULL << 23) 31 #define ARCH_PERFMON_EVENTSEL_CMASK 0xFF000000ULL 32 33 #define AMD_PERFMON_EVENTSEL_GUESTONLY (1ULL << 40) 34 #define AMD_PERFMON_EVENTSEL_HOSTONLY (1ULL << 41) 35 36 #define AMD64_EVENTSEL_EVENT \ 37 (ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32)) 38 #define INTEL_ARCH_EVENT_MASK \ 39 (ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT) 40 41 #define X86_RAW_EVENT_MASK \ 42 (ARCH_PERFMON_EVENTSEL_EVENT | \ 43 ARCH_PERFMON_EVENTSEL_UMASK | \ 44 ARCH_PERFMON_EVENTSEL_EDGE | \ 45 ARCH_PERFMON_EVENTSEL_INV | \ 46 ARCH_PERFMON_EVENTSEL_CMASK) 47 #define AMD64_RAW_EVENT_MASK \ 48 (X86_RAW_EVENT_MASK | \ 49 AMD64_EVENTSEL_EVENT) 50 #define AMD64_NUM_COUNTERS 4 51 #define AMD64_NUM_COUNTERS_F15H 6 52 #define AMD64_NUM_COUNTERS_MAX AMD64_NUM_COUNTERS_F15H 53 54 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c 55 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8) 56 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX 0 57 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \ 58 (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX)) 59 60 #define ARCH_PERFMON_BRANCH_MISSES_RETIRED 6 61 #define ARCH_PERFMON_EVENTS_COUNT 7 62 63 /* 64 * Intel "Architectural Performance Monitoring" CPUID 65 * detection/enumeration details: 66 */ 67 union cpuid10_eax { 68 struct { 69 unsigned int version_id:8; 70 unsigned int num_counters:8; 71 unsigned int bit_width:8; 72 unsigned int mask_length:8; 73 } split; 74 unsigned int full; 75 }; 76 77 union cpuid10_ebx { 78 struct { 79 unsigned int no_unhalted_core_cycles:1; 80 unsigned int no_instructions_retired:1; 81 unsigned int no_unhalted_reference_cycles:1; 82 unsigned int no_llc_reference:1; 83 unsigned int no_llc_misses:1; 84 unsigned int no_branch_instruction_retired:1; 85 unsigned int no_branch_misses_retired:1; 86 } split; 87 unsigned int full; 88 }; 89 90 union cpuid10_edx { 91 struct { 92 unsigned int num_counters_fixed:5; 93 unsigned int bit_width_fixed:8; 94 unsigned int reserved:19; 95 } split; 96 unsigned int full; 97 }; 98 99 struct x86_pmu_capability { 100 int version; 101 int num_counters_gp; 102 int num_counters_fixed; 103 int bit_width_gp; 104 int bit_width_fixed; 105 unsigned int events_mask; 106 int events_mask_len; 107 }; 108 109 /* 110 * Fixed-purpose performance events: 111 */ 112 113 /* 114 * All 3 fixed-mode PMCs are configured via this single MSR: 115 */ 116 #define MSR_ARCH_PERFMON_FIXED_CTR_CTRL 0x38d 117 118 /* 119 * The counts are available in three separate MSRs: 120 */ 121 122 /* Instr_Retired.Any: */ 123 #define MSR_ARCH_PERFMON_FIXED_CTR0 0x309 124 #define X86_PMC_IDX_FIXED_INSTRUCTIONS (X86_PMC_IDX_FIXED + 0) 125 126 /* CPU_CLK_Unhalted.Core: */ 127 #define MSR_ARCH_PERFMON_FIXED_CTR1 0x30a 128 #define X86_PMC_IDX_FIXED_CPU_CYCLES (X86_PMC_IDX_FIXED + 1) 129 130 /* CPU_CLK_Unhalted.Ref: */ 131 #define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b 132 #define X86_PMC_IDX_FIXED_REF_CYCLES (X86_PMC_IDX_FIXED + 2) 133 #define X86_PMC_MSK_FIXED_REF_CYCLES (1ULL << X86_PMC_IDX_FIXED_REF_CYCLES) 134 135 /* 136 * We model BTS tracing as another fixed-mode PMC. 137 * 138 * We choose a value in the middle of the fixed event range, since lower 139 * values are used by actual fixed events and higher values are used 140 * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr. 141 */ 142 #define X86_PMC_IDX_FIXED_BTS (X86_PMC_IDX_FIXED + 16) 143 144 /* 145 * IBS cpuid feature detection 146 */ 147 148 #define IBS_CPUID_FEATURES 0x8000001b 149 150 /* 151 * Same bit mask as for IBS cpuid feature flags (Fn8000_001B_EAX), but 152 * bit 0 is used to indicate the existence of IBS. 153 */ 154 #define IBS_CAPS_AVAIL (1U<<0) 155 #define IBS_CAPS_FETCHSAM (1U<<1) 156 #define IBS_CAPS_OPSAM (1U<<2) 157 #define IBS_CAPS_RDWROPCNT (1U<<3) 158 #define IBS_CAPS_OPCNT (1U<<4) 159 #define IBS_CAPS_BRNTRGT (1U<<5) 160 #define IBS_CAPS_OPCNTEXT (1U<<6) 161 162 #define IBS_CAPS_DEFAULT (IBS_CAPS_AVAIL \ 163 | IBS_CAPS_FETCHSAM \ 164 | IBS_CAPS_OPSAM) 165 166 /* 167 * IBS APIC setup 168 */ 169 #define IBSCTL 0x1cc 170 #define IBSCTL_LVT_OFFSET_VALID (1ULL<<8) 171 #define IBSCTL_LVT_OFFSET_MASK 0x0F 172 173 /* IbsFetchCtl bits/masks */ 174 #define IBS_FETCH_RAND_EN (1ULL<<57) 175 #define IBS_FETCH_VAL (1ULL<<49) 176 #define IBS_FETCH_ENABLE (1ULL<<48) 177 #define IBS_FETCH_CNT 0xFFFF0000ULL 178 #define IBS_FETCH_MAX_CNT 0x0000FFFFULL 179 180 /* IbsOpCtl bits */ 181 #define IBS_OP_CNT_CTL (1ULL<<19) 182 #define IBS_OP_VAL (1ULL<<18) 183 #define IBS_OP_ENABLE (1ULL<<17) 184 #define IBS_OP_MAX_CNT 0x0000FFFFULL 185 #define IBS_OP_MAX_CNT_EXT 0x007FFFFFULL /* not a register bit mask */ 186 187 extern u32 get_ibs_caps(void); 188 189 #ifdef CONFIG_PERF_EVENTS 190 extern void perf_events_lapic_init(void); 191 192 /* 193 * Abuse bit 3 of the cpu eflags register to indicate proper PEBS IP fixups. 194 * This flag is otherwise unused and ABI specified to be 0, so nobody should 195 * care what we do with it. 196 */ 197 #define PERF_EFLAGS_EXACT (1UL << 3) 198 199 struct pt_regs; 200 extern unsigned long perf_instruction_pointer(struct pt_regs *regs); 201 extern unsigned long perf_misc_flags(struct pt_regs *regs); 202 #define perf_misc_flags(regs) perf_misc_flags(regs) 203 204 #include <asm/stacktrace.h> 205 206 /* 207 * We abuse bit 3 from flags to pass exact information, see perf_misc_flags 208 * and the comment with PERF_EFLAGS_EXACT. 209 */ 210 #define perf_arch_fetch_caller_regs(regs, __ip) { \ 211 (regs)->ip = (__ip); \ 212 (regs)->bp = caller_frame_pointer(); \ 213 (regs)->cs = __KERNEL_CS; \ 214 regs->flags = 0; \ 215 asm volatile( \ 216 _ASM_MOV "%%"_ASM_SP ", %0\n" \ 217 : "=m" ((regs)->sp) \ 218 :: "memory" \ 219 ); \ 220 } 221 222 struct perf_guest_switch_msr { 223 unsigned msr; 224 u64 host, guest; 225 }; 226 227 extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr); 228 extern void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap); 229 #else perf_guest_get_msrs(int * nr)230static inline perf_guest_switch_msr *perf_guest_get_msrs(int *nr) 231 { 232 *nr = 0; 233 return NULL; 234 } 235 perf_get_x86_pmu_capability(struct x86_pmu_capability * cap)236static inline void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap) 237 { 238 memset(cap, 0, sizeof(*cap)); 239 } 240 perf_events_lapic_init(void)241static inline void perf_events_lapic_init(void) { } 242 #endif 243 244 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD) 245 extern void amd_pmu_enable_virt(void); 246 extern void amd_pmu_disable_virt(void); 247 #else amd_pmu_enable_virt(void)248 static inline void amd_pmu_enable_virt(void) { } amd_pmu_disable_virt(void)249 static inline void amd_pmu_disable_virt(void) { } 250 #endif 251 252 #endif /* _ASM_X86_PERF_EVENT_H */ 253