/linux-6.6.21/arch/x86/events/intel/ |
D | lbr.c | 129 if (pmi && x86_pmu.version >= 4) in __intel_pmu_lbr_enable() 137 lbr_select = cpuc->lbr_sel->config & x86_pmu.lbr_sel_mask; in __intel_pmu_lbr_enable() 167 for (i = 0; i < x86_pmu.lbr_nr; i++) in intel_pmu_lbr_reset_32() 168 wrmsrl(x86_pmu.lbr_from + i, 0); in intel_pmu_lbr_reset_32() 175 for (i = 0; i < x86_pmu.lbr_nr; i++) { in intel_pmu_lbr_reset_64() 176 wrmsrl(x86_pmu.lbr_from + i, 0); in intel_pmu_lbr_reset_64() 177 wrmsrl(x86_pmu.lbr_to + i, 0); in intel_pmu_lbr_reset_64() 178 if (x86_pmu.lbr_has_info) in intel_pmu_lbr_reset_64() 179 wrmsrl(x86_pmu.lbr_info + i, 0); in intel_pmu_lbr_reset_64() 186 wrmsrl(MSR_ARCH_LBR_DEPTH, x86_pmu.lbr_nr); in intel_pmu_arch_lbr_reset() [all …]
|
D | core.c | 2266 cnt = min_t(unsigned int, cnt, x86_pmu.lbr_nr); in __intel_pmu_snapshot_branch_stack() 2542 if (left == x86_pmu.max_period) { in icl_set_topdown_event_period() 2708 x86_pmu.num_topdown_events - 1); in icl_update_topdown_event() 2783 if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY) in intel_pmu_enable_fixed() 2790 if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip) { in intel_pmu_enable_fixed() 2905 if (x86_pmu.version >= 2) { in intel_pmu_reset() 2911 if (x86_pmu.lbr_nr) { in intel_pmu_reset() 2941 if (!x86_pmu.pebs_ept || !x86_pmu.pebs_active || in x86_pmu_handle_guest_pebs() 2946 INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed) { in x86_pmu_handle_guest_pebs() 2999 status &= ~(cpuc->pebs_enabled & x86_pmu.pebs_capable); in handle_pmi_common() [all …]
|
D | ds.c | 138 data_source = x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX].pebs_data_source; in intel_pmu_pebs_data_source_adl() 142 data_source = x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX].pebs_data_source; in intel_pmu_pebs_data_source_adl() 161 data_source = x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX].pebs_data_source; in intel_pmu_pebs_data_source_mtl() 165 data_source = x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX].pebs_data_source; in intel_pmu_pebs_data_source_mtl() 317 if (x86_pmu.pebs_no_tlb) { in load_latency_data() 327 if (!x86_pmu.pebs_block) { in load_latency_data() 513 size_t bsiz = x86_pmu.pebs_buffer_size; in alloc_pebs_buffer() 517 if (!x86_pmu.pebs) in alloc_pebs_buffer() 528 if (x86_pmu.intel_cap.pebs_format < 2) { in alloc_pebs_buffer() 542 max = x86_pmu.pebs_record_size * (bsiz / x86_pmu.pebs_record_size); in alloc_pebs_buffer() [all …]
|
D | p6.c | 201 static __initconst const struct x86_pmu p6_pmu = { 242 x86_pmu.attr_rdpmc_broken = 1; in p6_pmu_rdpmc_quirk() 243 x86_pmu.attr_rdpmc = 0; in p6_pmu_rdpmc_quirk() 249 x86_pmu = p6_pmu; in p6_pmu_init()
|
D | p4.c | 922 for (idx = 0; idx < x86_pmu.num_counters; idx++) { in p4_pmu_disable_all() 1001 for (idx = 0; idx < x86_pmu.num_counters; idx++) { in p4_pmu_enable_all() 1026 wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask); in p4_pmu_set_period() 1043 for (idx = 0; idx < x86_pmu.num_counters; idx++) { in p4_pmu_handle_irq() 1062 if (!overflow && (val & (1ULL << (x86_pmu.cntval_bits - 1)))) in p4_pmu_handle_irq() 1335 static __initconst const struct x86_pmu p4_pmu = { 1387 x86_pmu = p4_pmu; in p4_pmu_init() 1398 for (i = 0; i < x86_pmu.num_counters; i++) { in p4_pmu_init()
|
D | knc.c | 290 static const struct x86_pmu knc_pmu __initconst = { 316 x86_pmu = knc_pmu; in knc_pmu_init()
|
D | bts.c | 590 if (!boot_cpu_has(X86_FEATURE_DTES64) || !x86_pmu.bts) in bts_init()
|
/linux-6.6.21/arch/x86/events/ |
D | core.c | 47 struct x86_pmu x86_pmu __read_mostly; 63 DEFINE_STATIC_CALL_NULL(x86_pmu_handle_irq, *x86_pmu.handle_irq); 64 DEFINE_STATIC_CALL_NULL(x86_pmu_disable_all, *x86_pmu.disable_all); 65 DEFINE_STATIC_CALL_NULL(x86_pmu_enable_all, *x86_pmu.enable_all); 66 DEFINE_STATIC_CALL_NULL(x86_pmu_enable, *x86_pmu.enable); 67 DEFINE_STATIC_CALL_NULL(x86_pmu_disable, *x86_pmu.disable); 69 DEFINE_STATIC_CALL_NULL(x86_pmu_assign, *x86_pmu.assign); 71 DEFINE_STATIC_CALL_NULL(x86_pmu_add, *x86_pmu.add); 72 DEFINE_STATIC_CALL_NULL(x86_pmu_del, *x86_pmu.del); 73 DEFINE_STATIC_CALL_NULL(x86_pmu_read, *x86_pmu.read); [all …]
|
D | perf_event.h | 696 typeof(&x86_pmu._field) __Fp = &x86_pmu._field; \ 716 bool __Fp = x86_pmu._field; \ 739 struct x86_pmu { struct 991 __quirk.next = x86_pmu.quirks; \ 992 x86_pmu.quirks = &__quirk; \ 1051 extern struct x86_pmu x86_pmu __read_mostly; 1053 DECLARE_STATIC_CALL(x86_pmu_set_period, *x86_pmu.set_period); 1054 DECLARE_STATIC_CALL(x86_pmu_update, *x86_pmu.update); 1066 return x86_pmu.lbr_sel_map && in x86_pmu_has_lbr_callstack() 1067 x86_pmu.lbr_sel_map[PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] > 0; in x86_pmu_has_lbr_callstack() [all …]
|
/linux-6.6.21/arch/x86/events/amd/ |
D | core.c | 324 if (!(x86_pmu.flags & PMU_FL_PAIR)) in amd_is_pair_event_code() 333 DEFINE_STATIC_CALL_RET0(amd_pmu_branch_hw_config, *x86_pmu.hw_config); 350 if ((x86_pmu.flags & PMU_FL_PAIR) && amd_is_pair_event_code(&event->hw)) in amd_core_hw_config() 379 if (has_branch_stack(event) && !x86_pmu.lbr_nr) in amd_pmu_hw_config() 406 for (i = 0; i < x86_pmu.num_counters; i++) { in __amd_put_nb_event_constraints() 473 for_each_set_bit(idx, c->idxmsk, x86_pmu.num_counters) { in __amd_get_nb_event_constraints() 516 for (i = 0; i < x86_pmu.num_counters; i++) { in amd_alloc_nb() 528 if (x86_pmu.lbr_nr) in amd_pmu_cpu_reset() 531 if (x86_pmu.version < 2) in amd_pmu_cpu_reset() 556 if (!x86_pmu.amd_nb_constraints) in amd_pmu_cpu_prepare() [all …]
|
D | brs.c | 62 x86_pmu.lbr_nr = 16; in amd_brs_detect() 65 x86_pmu.lbr_sel_map = NULL; in amd_brs_detect() 66 x86_pmu.lbr_sel_mask = 0; in amd_brs_detect() 86 if (!x86_pmu.lbr_nr) in amd_brs_setup_filter() 148 if (event->attr.sample_period <= x86_pmu.lbr_nr) in amd_brs_hw_config() 170 return (cfg->msroff ? cfg->msroff : x86_pmu.lbr_nr) - 1; in amd_brs_get_tos() 198 pr_cont("%d-deep BRS, ", x86_pmu.lbr_nr); in amd_brs_init() 302 if (WARN_ON_ONCE(cfg.msroff >= x86_pmu.lbr_nr)) in amd_brs_drain()
|
D | lbr.c | 170 for (i = 0; i < x86_pmu.lbr_nr; i++) { in amd_pmu_lbr_read() 251 if (!x86_pmu.lbr_nr) in amd_pmu_lbr_setup_filter() 327 if (!x86_pmu.lbr_nr) in amd_pmu_lbr_reset() 331 for (i = 0; i < x86_pmu.lbr_nr; i++) { in amd_pmu_lbr_reset() 346 if (!x86_pmu.lbr_nr) in amd_pmu_lbr_add() 365 if (!x86_pmu.lbr_nr) in amd_pmu_lbr_del() 394 if (!cpuc->lbr_users || !x86_pmu.lbr_nr) in amd_pmu_lbr_enable_all() 415 if (!cpuc->lbr_users || !x86_pmu.lbr_nr) in amd_pmu_lbr_disable_all() 429 if (x86_pmu.version < 2 || !boot_cpu_has(X86_FEATURE_AMD_LBR_V2)) in amd_pmu_lbr_init() 434 x86_pmu.lbr_nr = ebx.split.lbr_v2_stack_sz; in amd_pmu_lbr_init() [all …]
|
/linux-6.6.21/arch/x86/events/zhaoxin/ |
D | core.c | 262 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl); in zhaoxin_pmu_enable_all() 370 if (x86_pmu.enabled_ack) in zhaoxin_pmu_handle_irq() 427 if (x86_pmu.event_constraints) { in zhaoxin_get_event_constraints() 428 for_each_event_constraint(c, x86_pmu.event_constraints) { in zhaoxin_get_event_constraints() 459 static const struct x86_pmu zhaoxin_pmu __initconst = { 498 for_each_set_bit(bit, x86_pmu.events_mask, ARRAY_SIZE(zx_arch_events_map)) { in zhaoxin_arch_events_quirk() 529 x86_pmu = zhaoxin_pmu; in zhaoxin_pmu_init() 532 x86_pmu.version = version; in zhaoxin_pmu_init() 533 x86_pmu.num_counters = eax.split.num_counters; in zhaoxin_pmu_init() 534 x86_pmu.cntval_bits = eax.split.bit_width; in zhaoxin_pmu_init() [all …]
|
/linux-6.6.21/arch/x86/xen/ |
D | pmu.c | 508 if (x86_pmu.handle_irq(®s)) in xen_pmu_irq_handler()
|