Home
last modified time | relevance | path

Searched refs:lbr_nr (Results 1 – 9 of 9) sorted by relevance

/linux-6.6.21/arch/x86/events/intel/
Dlbr.c167 for (i = 0; i < x86_pmu.lbr_nr; i++) in intel_pmu_lbr_reset_32()
175 for (i = 0; i < x86_pmu.lbr_nr; i++) { in intel_pmu_lbr_reset_64()
186 wrmsrl(MSR_ARCH_LBR_DEPTH, x86_pmu.lbr_nr); in intel_pmu_arch_lbr_reset()
193 if (!x86_pmu.lbr_nr) in intel_pmu_lbr_reset()
368 mask = x86_pmu.lbr_nr - 1; in intel_pmu_lbr_restore()
374 for (; i < x86_pmu.lbr_nr; i++) { in intel_pmu_lbr_restore()
395 if (!entries[x86_pmu.lbr_nr - 1].from) in intel_pmu_arch_lbr_restore()
398 for (i = 0; i < x86_pmu.lbr_nr; i++) { in intel_pmu_arch_lbr_restore()
460 mask = x86_pmu.lbr_nr - 1; in intel_pmu_lbr_save()
462 for (i = 0; i < x86_pmu.lbr_nr; i++) { in intel_pmu_lbr_save()
[all …]
Dcore.c2266 cnt = min_t(unsigned int, cnt, x86_pmu.lbr_nr); in __intel_pmu_snapshot_branch_stack()
2911 if (x86_pmu.lbr_nr) { in intel_pmu_reset()
5125 unsigned long lbr_from_nr = x86_pmu.lbr_from + x86_pmu.lbr_nr; in is_lbr_from()
5466 return snprintf(buf, PAGE_SIZE, "%d\n", x86_pmu.lbr_nr); in branches_show()
5526 return x86_pmu.lbr_nr ? attr->mode : 0; in lbr_is_visible()
6818 x86_pmu.lbr_nr = 0; in intel_pmu_init()
6819 for (i = 0; i < x86_pmu.lbr_nr; i++) { in intel_pmu_init()
6822 x86_pmu.lbr_nr = 0; in intel_pmu_init()
6825 if (x86_pmu.lbr_nr) { in intel_pmu_init()
6828 pr_cont("%d-deep LBR, ", x86_pmu.lbr_nr); in intel_pmu_init()
Dds.c1175 sz += x86_pmu.lbr_nr * sizeof(struct lbr_entry); in adaptive_pebs_record_size_update()
1226 ((x86_pmu.lbr_nr-1) << PEBS_DATACFG_LBR_SHIFT); in pebs_update_adaptive_cfg()
/linux-6.6.21/arch/x86/events/amd/
Dlbr.c170 for (i = 0; i < x86_pmu.lbr_nr; i++) { in amd_pmu_lbr_read()
251 if (!x86_pmu.lbr_nr) in amd_pmu_lbr_setup_filter()
327 if (!x86_pmu.lbr_nr) in amd_pmu_lbr_reset()
331 for (i = 0; i < x86_pmu.lbr_nr; i++) { in amd_pmu_lbr_reset()
346 if (!x86_pmu.lbr_nr) in amd_pmu_lbr_add()
365 if (!x86_pmu.lbr_nr) in amd_pmu_lbr_del()
394 if (!cpuc->lbr_users || !x86_pmu.lbr_nr) in amd_pmu_lbr_enable_all()
415 if (!cpuc->lbr_users || !x86_pmu.lbr_nr) in amd_pmu_lbr_disable_all()
434 x86_pmu.lbr_nr = ebx.split.lbr_v2_stack_sz; in amd_pmu_lbr_init()
436 pr_cont("%d-deep LBR, ", x86_pmu.lbr_nr); in amd_pmu_lbr_init()
Dbrs.c62 x86_pmu.lbr_nr = 16; in amd_brs_detect()
86 if (!x86_pmu.lbr_nr) in amd_brs_setup_filter()
148 if (event->attr.sample_period <= x86_pmu.lbr_nr) in amd_brs_hw_config()
170 return (cfg->msroff ? cfg->msroff : x86_pmu.lbr_nr) - 1; in amd_brs_get_tos()
198 pr_cont("%d-deep BRS, ", x86_pmu.lbr_nr); in amd_brs_init()
302 if (WARN_ON_ONCE(cfg.msroff >= x86_pmu.lbr_nr)) in amd_brs_drain()
Dcore.c379 if (has_branch_stack(event) && !x86_pmu.lbr_nr) in amd_pmu_hw_config()
528 if (x86_pmu.lbr_nr) in amd_pmu_cpu_reset()
1239 if (has_branch_stack(event) && *left > x86_pmu.lbr_nr) in amd_pmu_limit_period()
1240 *left -= x86_pmu.lbr_nr; in amd_pmu_limit_period()
1282 return snprintf(buf, PAGE_SIZE, "%d\n", x86_pmu.lbr_nr); in branches_show()
1295 return x86_pmu.lbr_nr ? attr->mode : 0; in amd_branches_is_visible()
1317 return static_cpu_has(X86_FEATURE_BRS) && x86_pmu.lbr_nr ? in amd_brs_is_visible()
/linux-6.6.21/tools/perf/util/
Dmachine.c2553 int lbr_nr = lbr_stack->nr; in lbr_callchain_add_lbr_ip() local
2600 for (i = 0; i < lbr_nr; i++) { in lbr_callchain_add_lbr_ip()
2615 for (i = lbr_nr - 1; i >= 0; i--) { in lbr_callchain_add_lbr_ip()
2627 if (lbr_nr > 0) { in lbr_callchain_add_lbr_ip()
/linux-6.6.21/arch/x86/events/
Dperf_event.h855 lbr_info, lbr_nr; /* LBR base regs and size */ member
Dcore.c552 if (x86_pmu.lbr_nr || x86_pmu.intel_cap.pebs_format >= 2) in x86_pmu_max_precise()
1551 if (x86_pmu.lbr_nr) { in perf_event_print_debug()