Lines Matching refs:bp

108 static inline struct mutex *get_task_bps_mutex(struct perf_event *bp)  in get_task_bps_mutex()  argument
110 struct task_struct *tsk = bp->hw.target; in get_task_bps_mutex()
115 static struct mutex *bp_constraints_lock(struct perf_event *bp) in bp_constraints_lock() argument
117 struct mutex *tsk_mtx = get_task_bps_mutex(bp); in bp_constraints_lock()
149 static bool bp_constraints_is_locked(struct perf_event *bp) in bp_constraints_is_locked() argument
151 struct mutex *tsk_mtx = get_task_bps_mutex(bp); in bp_constraints_is_locked()
158 static inline void assert_bp_constraints_lock_held(struct perf_event *bp) in assert_bp_constraints_lock_held() argument
160 struct mutex *tsk_mtx = get_task_bps_mutex(bp); in assert_bp_constraints_lock_held()
285 static inline int hw_breakpoint_weight(struct perf_event *bp) in hw_breakpoint_weight() argument
322 static int task_bp_pinned(int cpu, struct perf_event *bp, enum bp_type_idx type) in task_bp_pinned() argument
331 assert_bp_constraints_lock_held(bp); in task_bp_pinned()
334 head = rhltable_lookup(&task_bps_ht, &bp->hw.target, task_bps_ht_params); in task_bp_pinned()
358 static const struct cpumask *cpumask_of_bp(struct perf_event *bp) in cpumask_of_bp() argument
360 if (bp->cpu >= 0) in cpumask_of_bp()
361 return cpumask_of(bp->cpu); in cpumask_of_bp()
370 max_bp_pinned_slots(struct perf_event *bp, enum bp_type_idx type) in max_bp_pinned_slots() argument
372 const struct cpumask *cpumask = cpumask_of_bp(bp); in max_bp_pinned_slots()
376 if (bp->hw.target && bp->cpu < 0) { in max_bp_pinned_slots()
377 int max_pinned = task_bp_pinned(-1, bp, type); in max_bp_pinned_slots()
394 if (!bp->hw.target) in max_bp_pinned_slots()
397 nr += task_bp_pinned(cpu, bp, type); in max_bp_pinned_slots()
409 toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type, int weight) in toggle_bp_slot() argument
416 if (!bp->hw.target) { in toggle_bp_slot()
421 struct bp_cpuinfo *info = get_bp_info(bp->cpu, type); in toggle_bp_slot()
464 int ret = rhltable_remove(&task_bps_ht, &bp->hw.bp_list, task_bps_ht_params); in toggle_bp_slot()
472 next_tsk_pinned = task_bp_pinned(-1, bp, type); in toggle_bp_slot()
475 if (bp->cpu < 0) { /* Case 1: fast path */ in toggle_bp_slot()
477 next_tsk_pinned += hw_breakpoint_weight(bp); in toggle_bp_slot()
486 bp_slots_histogram_add(&get_bp_info(bp->cpu, type)->tsk_pinned, in toggle_bp_slot()
493 bp_slots_histogram_add(&get_bp_info(bp->cpu, type)->tsk_pinned, in toggle_bp_slot()
494 next_tsk_pinned + hw_breakpoint_weight(bp), weight); in toggle_bp_slot()
504 const struct cpumask *cpumask = cpumask_of_bp(bp); in toggle_bp_slot()
507 next_tsk_pinned = task_bp_pinned(cpu, bp, type); in toggle_bp_slot()
509 next_tsk_pinned += hw_breakpoint_weight(bp); in toggle_bp_slot()
518 assert_bp_constraints_lock_held(bp); in toggle_bp_slot()
521 return rhltable_insert(&task_bps_ht, &bp->hw.bp_list, task_bps_ht_params); in toggle_bp_slot()
526 __weak int arch_reserve_bp_slot(struct perf_event *bp) in arch_reserve_bp_slot() argument
531 __weak void arch_release_bp_slot(struct perf_event *bp) in arch_release_bp_slot() argument
538 __weak void arch_unregister_hw_breakpoint(struct perf_event *bp) in arch_unregister_hw_breakpoint() argument
592 static int __reserve_bp_slot(struct perf_event *bp, u64 bp_type) in __reserve_bp_slot() argument
609 weight = hw_breakpoint_weight(bp); in __reserve_bp_slot()
612 max_pinned_slots = max_bp_pinned_slots(bp, type) + weight; in __reserve_bp_slot()
616 ret = arch_reserve_bp_slot(bp); in __reserve_bp_slot()
620 return toggle_bp_slot(bp, true, type, weight); in __reserve_bp_slot()
623 int reserve_bp_slot(struct perf_event *bp) in reserve_bp_slot() argument
625 struct mutex *mtx = bp_constraints_lock(bp); in reserve_bp_slot()
626 int ret = __reserve_bp_slot(bp, bp->attr.bp_type); in reserve_bp_slot()
632 static void __release_bp_slot(struct perf_event *bp, u64 bp_type) in __release_bp_slot() argument
637 arch_release_bp_slot(bp); in __release_bp_slot()
640 weight = hw_breakpoint_weight(bp); in __release_bp_slot()
641 WARN_ON(toggle_bp_slot(bp, false, type, weight)); in __release_bp_slot()
644 void release_bp_slot(struct perf_event *bp) in release_bp_slot() argument
646 struct mutex *mtx = bp_constraints_lock(bp); in release_bp_slot()
648 arch_unregister_hw_breakpoint(bp); in release_bp_slot()
649 __release_bp_slot(bp, bp->attr.bp_type); in release_bp_slot()
653 static int __modify_bp_slot(struct perf_event *bp, u64 old_type, u64 new_type) in __modify_bp_slot() argument
657 __release_bp_slot(bp, old_type); in __modify_bp_slot()
659 err = __reserve_bp_slot(bp, new_type); in __modify_bp_slot()
669 WARN_ON(__reserve_bp_slot(bp, old_type)); in __modify_bp_slot()
675 static int modify_bp_slot(struct perf_event *bp, u64 old_type, u64 new_type) in modify_bp_slot() argument
677 struct mutex *mtx = bp_constraints_lock(bp); in modify_bp_slot()
678 int ret = __modify_bp_slot(bp, old_type, new_type); in modify_bp_slot()
689 int dbg_reserve_bp_slot(struct perf_event *bp) in dbg_reserve_bp_slot() argument
693 if (bp_constraints_is_locked(bp)) in dbg_reserve_bp_slot()
698 ret = __reserve_bp_slot(bp, bp->attr.bp_type); in dbg_reserve_bp_slot()
704 int dbg_release_bp_slot(struct perf_event *bp) in dbg_release_bp_slot() argument
706 if (bp_constraints_is_locked(bp)) in dbg_release_bp_slot()
711 __release_bp_slot(bp, bp->attr.bp_type); in dbg_release_bp_slot()
717 static int hw_breakpoint_parse(struct perf_event *bp, in hw_breakpoint_parse() argument
723 err = hw_breakpoint_arch_parse(bp, attr, hw); in hw_breakpoint_parse()
741 int register_perf_hw_breakpoint(struct perf_event *bp) in register_perf_hw_breakpoint() argument
746 err = reserve_bp_slot(bp); in register_perf_hw_breakpoint()
750 err = hw_breakpoint_parse(bp, &bp->attr, &hw); in register_perf_hw_breakpoint()
752 release_bp_slot(bp); in register_perf_hw_breakpoint()
756 bp->hw.info = hw; in register_perf_hw_breakpoint()
789 modify_user_hw_breakpoint_check(struct perf_event *bp, struct perf_event_attr *attr, in modify_user_hw_breakpoint_check() argument
795 err = hw_breakpoint_parse(bp, attr, &hw); in modify_user_hw_breakpoint_check()
802 old_attr = bp->attr; in modify_user_hw_breakpoint_check()
808 if (bp->attr.bp_type != attr->bp_type) { in modify_user_hw_breakpoint_check()
809 err = modify_bp_slot(bp, bp->attr.bp_type, attr->bp_type); in modify_user_hw_breakpoint_check()
814 hw_breakpoint_copy_attr(&bp->attr, attr); in modify_user_hw_breakpoint_check()
815 bp->hw.info = hw; in modify_user_hw_breakpoint_check()
825 int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr) in modify_user_hw_breakpoint() argument
835 if (irqs_disabled() && bp->ctx && bp->ctx->task == current) in modify_user_hw_breakpoint()
836 perf_event_disable_local(bp); in modify_user_hw_breakpoint()
838 perf_event_disable(bp); in modify_user_hw_breakpoint()
840 err = modify_user_hw_breakpoint_check(bp, attr, false); in modify_user_hw_breakpoint()
842 if (!bp->attr.disabled) in modify_user_hw_breakpoint()
843 perf_event_enable(bp); in modify_user_hw_breakpoint()
853 void unregister_hw_breakpoint(struct perf_event *bp) in unregister_hw_breakpoint() argument
855 if (!bp) in unregister_hw_breakpoint()
857 perf_event_release_kernel(bp); in unregister_hw_breakpoint()
874 struct perf_event * __percpu *cpu_events, *bp; in register_wide_hw_breakpoint() local
884 bp = perf_event_create_kernel_counter(attr, cpu, NULL, in register_wide_hw_breakpoint()
886 if (IS_ERR(bp)) { in register_wide_hw_breakpoint()
887 err = PTR_ERR(bp); in register_wide_hw_breakpoint()
891 per_cpu(*cpu_events, cpu) = bp; in register_wide_hw_breakpoint()
973 static int hw_breakpoint_event_init(struct perf_event *bp) in hw_breakpoint_event_init() argument
977 if (bp->attr.type != PERF_TYPE_BREAKPOINT) in hw_breakpoint_event_init()
983 if (has_branch_stack(bp)) in hw_breakpoint_event_init()
986 err = register_perf_hw_breakpoint(bp); in hw_breakpoint_event_init()
990 bp->destroy = bp_perf_event_destroy; in hw_breakpoint_event_init()
995 static int hw_breakpoint_add(struct perf_event *bp, int flags) in hw_breakpoint_add() argument
998 bp->hw.state = PERF_HES_STOPPED; in hw_breakpoint_add()
1000 if (is_sampling_event(bp)) { in hw_breakpoint_add()
1001 bp->hw.last_period = bp->hw.sample_period; in hw_breakpoint_add()
1002 perf_swevent_set_period(bp); in hw_breakpoint_add()
1005 return arch_install_hw_breakpoint(bp); in hw_breakpoint_add()
1008 static void hw_breakpoint_del(struct perf_event *bp, int flags) in hw_breakpoint_del() argument
1010 arch_uninstall_hw_breakpoint(bp); in hw_breakpoint_del()
1013 static void hw_breakpoint_start(struct perf_event *bp, int flags) in hw_breakpoint_start() argument
1015 bp->hw.state = 0; in hw_breakpoint_start()
1018 static void hw_breakpoint_stop(struct perf_event *bp, int flags) in hw_breakpoint_stop() argument
1020 bp->hw.state = PERF_HES_STOPPED; in hw_breakpoint_stop()