1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __PERF_BPF_COUNTER_H
3 #define __PERF_BPF_COUNTER_H 1
4
5 #include <linux/list.h>
6 #include <sys/resource.h>
7
8 #ifdef HAVE_LIBBPF_SUPPORT
9 #include <bpf/bpf.h>
10 #include <bpf/btf.h>
11 #include <bpf/libbpf.h>
12 #endif
13
14 struct evsel;
15 struct target;
16 struct bpf_counter;
17
18 typedef int (*bpf_counter_evsel_op)(struct evsel *evsel);
19 typedef int (*bpf_counter_evsel_target_op)(struct evsel *evsel,
20 struct target *target);
21 typedef int (*bpf_counter_evsel_install_pe_op)(struct evsel *evsel,
22 int cpu_map_idx,
23 int fd);
24
25 struct bpf_counter_ops {
26 bpf_counter_evsel_target_op load;
27 bpf_counter_evsel_op enable;
28 bpf_counter_evsel_op disable;
29 bpf_counter_evsel_op read;
30 bpf_counter_evsel_op destroy;
31 bpf_counter_evsel_install_pe_op install_pe;
32 };
33
34 struct bpf_counter {
35 void *skel;
36 struct list_head list;
37 };
38
39 #ifdef HAVE_BPF_SKEL
40
41 int bpf_counter__load(struct evsel *evsel, struct target *target);
42 int bpf_counter__enable(struct evsel *evsel);
43 int bpf_counter__disable(struct evsel *evsel);
44 int bpf_counter__read(struct evsel *evsel);
45 void bpf_counter__destroy(struct evsel *evsel);
46 int bpf_counter__install_pe(struct evsel *evsel, int cpu_map_idx, int fd);
47
48 #else /* HAVE_BPF_SKEL */
49
50 #include <linux/err.h>
51
bpf_counter__load(struct evsel * evsel __maybe_unused,struct target * target __maybe_unused)52 static inline int bpf_counter__load(struct evsel *evsel __maybe_unused,
53 struct target *target __maybe_unused)
54 {
55 return 0;
56 }
57
bpf_counter__enable(struct evsel * evsel __maybe_unused)58 static inline int bpf_counter__enable(struct evsel *evsel __maybe_unused)
59 {
60 return 0;
61 }
62
bpf_counter__disable(struct evsel * evsel __maybe_unused)63 static inline int bpf_counter__disable(struct evsel *evsel __maybe_unused)
64 {
65 return 0;
66 }
67
bpf_counter__read(struct evsel * evsel __maybe_unused)68 static inline int bpf_counter__read(struct evsel *evsel __maybe_unused)
69 {
70 return -EAGAIN;
71 }
72
bpf_counter__destroy(struct evsel * evsel __maybe_unused)73 static inline void bpf_counter__destroy(struct evsel *evsel __maybe_unused)
74 {
75 }
76
bpf_counter__install_pe(struct evsel * evsel __maybe_unused,int cpu __maybe_unused,int fd __maybe_unused)77 static inline int bpf_counter__install_pe(struct evsel *evsel __maybe_unused,
78 int cpu __maybe_unused,
79 int fd __maybe_unused)
80 {
81 return 0;
82 }
83
84 #endif /* HAVE_BPF_SKEL */
85
set_max_rlimit(void)86 static inline void set_max_rlimit(void)
87 {
88 struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
89
90 setrlimit(RLIMIT_MEMLOCK, &rinf);
91 }
92
93 #ifdef HAVE_BPF_SKEL
94
bpf_link_get_id(int fd)95 static inline __u32 bpf_link_get_id(int fd)
96 {
97 struct bpf_link_info link_info = { .id = 0, };
98 __u32 link_info_len = sizeof(link_info);
99
100 bpf_obj_get_info_by_fd(fd, &link_info, &link_info_len);
101 return link_info.id;
102 }
103
bpf_link_get_prog_id(int fd)104 static inline __u32 bpf_link_get_prog_id(int fd)
105 {
106 struct bpf_link_info link_info = { .id = 0, };
107 __u32 link_info_len = sizeof(link_info);
108
109 bpf_obj_get_info_by_fd(fd, &link_info, &link_info_len);
110 return link_info.prog_id;
111 }
112
bpf_map_get_id(int fd)113 static inline __u32 bpf_map_get_id(int fd)
114 {
115 struct bpf_map_info map_info = { .id = 0, };
116 __u32 map_info_len = sizeof(map_info);
117
118 bpf_obj_get_info_by_fd(fd, &map_info, &map_info_len);
119 return map_info.id;
120 }
121
122 /* trigger the leader program on a cpu */
bperf_trigger_reading(int prog_fd,int cpu)123 static inline int bperf_trigger_reading(int prog_fd, int cpu)
124 {
125 DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
126 .ctx_in = NULL,
127 .ctx_size_in = 0,
128 .flags = BPF_F_TEST_RUN_ON_CPU,
129 .cpu = cpu,
130 .retval = 0,
131 );
132
133 return bpf_prog_test_run_opts(prog_fd, &opts);
134 }
135 #endif /* HAVE_BPF_SKEL */
136
137 #endif /* __PERF_BPF_COUNTER_H */
138