1 mod consts;
2 mod print;
3
4 use crate::bpf::helper::print::trace_printf;
5 use crate::bpf::map::{BpfCallBackFn, BpfMap};
6 use crate::include::bindings::linux_bpf::BPF_F_CURRENT_CPU;
7 use crate::libs::lazy_init::Lazy;
8 use crate::smp::core::smp_get_processor_id;
9 use alloc::{collections::BTreeMap, sync::Arc};
10 use core::ffi::c_void;
11 use system_error::SystemError;
12
13 type RawBPFHelperFn = fn(u64, u64, u64, u64, u64) -> u64;
14 type Result<T> = core::result::Result<T, SystemError>;
15 macro_rules! define_func {
16 ($name:ident) => {
17 core::mem::transmute::<usize, RawBPFHelperFn>($name as usize)
18 };
19 }
20
21 /// See https://ebpf-docs.dylanreimerink.nl/linux/helper-function/bpf_map_lookup_elem/
raw_map_lookup_elem(map: *mut c_void, key: *const c_void) -> *const c_void22 unsafe fn raw_map_lookup_elem(map: *mut c_void, key: *const c_void) -> *const c_void {
23 let map = Arc::from_raw(map as *const BpfMap);
24 let key_size = map.key_size();
25 let key = core::slice::from_raw_parts(key as *const u8, key_size);
26 let value = map_lookup_elem(&map, key);
27 // log::info!("<raw_map_lookup_elem>: {:x?}", value);
28 // warning: We need to keep the map alive, so we don't drop it here.
29 let _ = Arc::into_raw(map);
30 match value {
31 Ok(Some(value)) => value as *const c_void,
32 _ => core::ptr::null_mut(),
33 }
34 }
35
map_lookup_elem(map: &Arc<BpfMap>, key: &[u8]) -> Result<Option<*const u8>>36 pub fn map_lookup_elem(map: &Arc<BpfMap>, key: &[u8]) -> Result<Option<*const u8>> {
37 let mut binding = map.inner_map().lock();
38 let value = binding.lookup_elem(key);
39 match value {
40 Ok(Some(value)) => Ok(Some(value.as_ptr())),
41 _ => Ok(None),
42 }
43 }
44
45 /// See https://ebpf-docs.dylanreimerink.nl/linux/helper-function/bpf_perf_event_output/
46 ///
47 /// See https://man7.org/linux/man-pages/man7/bpf-helpers.7.html
raw_perf_event_output( ctx: *mut c_void, map: *mut c_void, flags: u64, data: *mut c_void, size: u64, ) -> i6448 unsafe fn raw_perf_event_output(
49 ctx: *mut c_void,
50 map: *mut c_void,
51 flags: u64,
52 data: *mut c_void,
53 size: u64,
54 ) -> i64 {
55 // log::info!("<raw_perf_event_output>: {:x?}", data);
56 let map = Arc::from_raw(map as *const BpfMap);
57 let data = core::slice::from_raw_parts(data as *const u8, size as usize);
58 let res = perf_event_output(ctx, &map, flags, data);
59 // warning: We need to keep the map alive, so we don't drop it here.
60 let _ = Arc::into_raw(map);
61 match res {
62 Ok(_) => 0,
63 Err(e) => e as i64,
64 }
65 }
66
perf_event_output( ctx: *mut c_void, map: &Arc<BpfMap>, flags: u64, data: &[u8], ) -> Result<()>67 pub fn perf_event_output(
68 ctx: *mut c_void,
69 map: &Arc<BpfMap>,
70 flags: u64,
71 data: &[u8],
72 ) -> Result<()> {
73 let mut binding = map.inner_map().lock();
74 let index = flags as u32;
75 let flags = (flags >> 32) as u32;
76 let key = if index == BPF_F_CURRENT_CPU as u32 {
77 smp_get_processor_id().data()
78 } else {
79 index
80 };
81 let fd = binding
82 .lookup_elem(&key.to_ne_bytes())?
83 .ok_or(SystemError::ENOENT)?;
84 let fd = u32::from_ne_bytes(fd.try_into().map_err(|_| SystemError::EINVAL)?);
85 crate::perf::perf_event_output(ctx, fd as usize, flags, data)?;
86 Ok(())
87 }
88
89 /// See https://ebpf-docs.dylanreimerink.nl/linux/helper-function/bpf_probe_read/
raw_bpf_probe_read(dst: *mut c_void, size: u32, unsafe_ptr: *const c_void) -> i6490 fn raw_bpf_probe_read(dst: *mut c_void, size: u32, unsafe_ptr: *const c_void) -> i64 {
91 log::info!(
92 "raw_bpf_probe_read, dst:{:x}, size:{}, unsafe_ptr: {:x}",
93 dst as usize,
94 size,
95 unsafe_ptr as usize
96 );
97 let (dst, src) = unsafe {
98 let dst = core::slice::from_raw_parts_mut(dst as *mut u8, size as usize);
99 let src = core::slice::from_raw_parts(unsafe_ptr as *const u8, size as usize);
100 (dst, src)
101 };
102 let res = bpf_probe_read(dst, src);
103 match res {
104 Ok(_) => 0,
105 Err(e) => e as i64,
106 }
107 }
108
109 /// For tracing programs, safely attempt to read size
110 /// bytes from kernel space address unsafe_ptr and
111 /// store the data in dst.
bpf_probe_read(dst: &mut [u8], src: &[u8]) -> Result<()>112 pub fn bpf_probe_read(dst: &mut [u8], src: &[u8]) -> Result<()> {
113 log::info!("bpf_probe_read: len: {}", dst.len());
114 dst.copy_from_slice(src);
115 Ok(())
116 }
117
raw_map_update_elem( map: *mut c_void, key: *const c_void, value: *const c_void, flags: u64, ) -> i64118 unsafe fn raw_map_update_elem(
119 map: *mut c_void,
120 key: *const c_void,
121 value: *const c_void,
122 flags: u64,
123 ) -> i64 {
124 let map = Arc::from_raw(map as *const BpfMap);
125 let key_size = map.key_size();
126 let value_size = map.value_size();
127 // log::info!("<raw_map_update_elem>: flags: {:x?}", flags);
128 let key = core::slice::from_raw_parts(key as *const u8, key_size);
129 let value = core::slice::from_raw_parts(value as *const u8, value_size);
130 let res = map_update_elem(&map, key, value, flags);
131 let _ = Arc::into_raw(map);
132 match res {
133 Ok(_) => 0,
134 Err(e) => e as _,
135 }
136 }
137
map_update_elem(map: &Arc<BpfMap>, key: &[u8], value: &[u8], flags: u64) -> Result<()>138 pub fn map_update_elem(map: &Arc<BpfMap>, key: &[u8], value: &[u8], flags: u64) -> Result<()> {
139 let mut binding = map.inner_map().lock();
140 let value = binding.update_elem(key, value, flags);
141 value
142 }
143
144 /// Delete entry with key from map.
145 ///
146 /// The delete map element helper call is used to delete values from maps.
raw_map_delete_elem(map: *mut c_void, key: *const c_void) -> i64147 unsafe fn raw_map_delete_elem(map: *mut c_void, key: *const c_void) -> i64 {
148 let map = Arc::from_raw(map as *const BpfMap);
149 let key_size = map.key_size();
150 let key = core::slice::from_raw_parts(key as *const u8, key_size);
151 let res = map_delete_elem(&map, key);
152 let _ = Arc::into_raw(map);
153 match res {
154 Ok(_) => 0,
155 Err(e) => e as i64,
156 }
157 }
158
map_delete_elem(map: &Arc<BpfMap>, key: &[u8]) -> Result<()>159 pub fn map_delete_elem(map: &Arc<BpfMap>, key: &[u8]) -> Result<()> {
160 let mut binding = map.inner_map().lock();
161 let value = binding.delete_elem(key);
162 value
163 }
164
165 /// For each element in map, call callback_fn function with map, callback_ctx and other map-specific
166 /// parameters. The callback_fn should be a static function and the callback_ctx should be a pointer
167 /// to the stack. The flags is used to control certain aspects of the helper. Currently, the flags must
168 /// be 0.
169 ///
170 /// The following are a list of supported map types and their respective expected callback signatures:
171 /// - BPF_MAP_TYPE_HASH
172 /// - BPF_MAP_TYPE_PERCPU_HASH
173 /// - BPF_MAP_TYPE_LRU_HASH
174 /// - BPF_MAP_TYPE_LRU_PERCPU_HASH
175 /// - BPF_MAP_TYPE_ARRAY
176 /// - BPF_MAP_TYPE_PERCPU_ARRAY
177 ///
178 /// `long (*callback_fn)(struct bpf_map *map, const void key, void *value, void *ctx);`
179 ///
180 /// For per_cpu maps, the map_value is the value on the cpu where the bpf_prog is running.
raw_map_for_each_elem( map: *mut c_void, cb: *const c_void, ctx: *const c_void, flags: u64, ) -> i64181 unsafe fn raw_map_for_each_elem(
182 map: *mut c_void,
183 cb: *const c_void,
184 ctx: *const c_void,
185 flags: u64,
186 ) -> i64 {
187 let map = Arc::from_raw(map as *const BpfMap);
188 let cb = *core::mem::transmute::<*const c_void, *const BpfCallBackFn>(cb);
189 let res = map_for_each_elem(&map, cb, ctx as _, flags);
190 let _ = Arc::into_raw(map);
191 match res {
192 Ok(v) => v as i64,
193 Err(e) => e as i64,
194 }
195 }
196
map_for_each_elem( map: &Arc<BpfMap>, cb: BpfCallBackFn, ctx: *const u8, flags: u64, ) -> Result<u32>197 pub fn map_for_each_elem(
198 map: &Arc<BpfMap>,
199 cb: BpfCallBackFn,
200 ctx: *const u8,
201 flags: u64,
202 ) -> Result<u32> {
203 let mut binding = map.inner_map().lock();
204 let value = binding.for_each_elem(cb, ctx, flags);
205 value
206 }
207
208 /// Perform a lookup in percpu map for an entry associated to key on cpu.
209 ///
210 /// See https://ebpf-docs.dylanreimerink.nl/linux/helper-function/bpf_map_lookup_percpu_elem/
raw_map_lookup_percpu_elem( map: *mut c_void, key: *const c_void, cpu: u32, ) -> *const c_void211 unsafe fn raw_map_lookup_percpu_elem(
212 map: *mut c_void,
213 key: *const c_void,
214 cpu: u32,
215 ) -> *const c_void {
216 let map = Arc::from_raw(map as *const BpfMap);
217 let key_size = map.key_size();
218 let key = core::slice::from_raw_parts(key as *const u8, key_size);
219 let value = map_lookup_percpu_elem(&map, key, cpu);
220 // warning: We need to keep the map alive, so we don't drop it here.
221 let _ = Arc::into_raw(map);
222 match value {
223 Ok(Some(value)) => value as *const c_void,
224 _ => core::ptr::null_mut(),
225 }
226 }
227
map_lookup_percpu_elem( map: &Arc<BpfMap>, key: &[u8], cpu: u32, ) -> Result<Option<*const u8>>228 pub fn map_lookup_percpu_elem(
229 map: &Arc<BpfMap>,
230 key: &[u8],
231 cpu: u32,
232 ) -> Result<Option<*const u8>> {
233 let mut binding = map.inner_map().lock();
234 let value = binding.lookup_percpu_elem(key, cpu);
235 match value {
236 Ok(Some(value)) => Ok(Some(value.as_ptr())),
237 _ => Ok(None),
238 }
239 }
240 /// Push an element value in map.
241 ///
242 /// See https://ebpf-docs.dylanreimerink.nl/linux/helper-function/bpf_map_push_elem/
raw_map_push_elem(map: *mut c_void, value: *const c_void, flags: u64) -> i64243 unsafe fn raw_map_push_elem(map: *mut c_void, value: *const c_void, flags: u64) -> i64 {
244 let map = Arc::from_raw(map as *const BpfMap);
245 let value_size = map.value_size();
246 let value = core::slice::from_raw_parts(value as *const u8, value_size);
247 let res = map_push_elem(&map, value, flags);
248 let _ = Arc::into_raw(map);
249 match res {
250 Ok(_) => 0,
251 Err(e) => e as i64,
252 }
253 }
254
map_push_elem(map: &Arc<BpfMap>, value: &[u8], flags: u64) -> Result<()>255 pub fn map_push_elem(map: &Arc<BpfMap>, value: &[u8], flags: u64) -> Result<()> {
256 let mut binding = map.inner_map().lock();
257 let value = binding.push_elem(value, flags);
258 value
259 }
260
261 /// Pop an element from map.
262 ///
263 /// See https://ebpf-docs.dylanreimerink.nl/linux/helper-function/bpf_map_pop_elem/
raw_map_pop_elem(map: *mut c_void, value: *mut c_void) -> i64264 unsafe fn raw_map_pop_elem(map: *mut c_void, value: *mut c_void) -> i64 {
265 let map = Arc::from_raw(map as *const BpfMap);
266 let value_size = map.value_size();
267 let value = core::slice::from_raw_parts_mut(value as *mut u8, value_size);
268 let res = map_pop_elem(&map, value);
269 let _ = Arc::into_raw(map);
270 match res {
271 Ok(_) => 0,
272 Err(e) => e as i64,
273 }
274 }
275
map_pop_elem(map: &Arc<BpfMap>, value: &mut [u8]) -> Result<()>276 pub fn map_pop_elem(map: &Arc<BpfMap>, value: &mut [u8]) -> Result<()> {
277 let mut binding = map.inner_map().lock();
278 let value = binding.pop_elem(value);
279 value
280 }
281
282 /// Get an element from map without removing it.
283 ///
284 /// See https://ebpf-docs.dylanreimerink.nl/linux/helper-function/bpf_map_peek_elem/
raw_map_peek_elem(map: *mut c_void, value: *mut c_void) -> i64285 unsafe fn raw_map_peek_elem(map: *mut c_void, value: *mut c_void) -> i64 {
286 let map = Arc::from_raw(map as *const BpfMap);
287 let value_size = map.value_size();
288 let value = core::slice::from_raw_parts_mut(value as *mut u8, value_size);
289 let res = map_peek_elem(&map, value);
290 let _ = Arc::into_raw(map);
291 match res {
292 Ok(_) => 0,
293 Err(e) => e as i64,
294 }
295 }
296
map_peek_elem(map: &Arc<BpfMap>, value: &mut [u8]) -> Result<()>297 pub fn map_peek_elem(map: &Arc<BpfMap>, value: &mut [u8]) -> Result<()> {
298 let binding = map.inner_map().lock();
299 let value = binding.peek_elem(value);
300 value
301 }
302
303 pub static BPF_HELPER_FUN_SET: Lazy<BTreeMap<u32, RawBPFHelperFn>> = Lazy::new();
304
305 /// Initialize the helper functions.
init_helper_functions()306 pub fn init_helper_functions() {
307 use consts::*;
308 let mut map = BTreeMap::new();
309 unsafe {
310 // Map helpers::Generic map helpers
311 map.insert(HELPER_MAP_LOOKUP_ELEM, define_func!(raw_map_lookup_elem));
312 map.insert(HELPER_MAP_UPDATE_ELEM, define_func!(raw_map_update_elem));
313 map.insert(HELPER_MAP_DELETE_ELEM, define_func!(raw_map_delete_elem));
314 map.insert(
315 HELPER_MAP_FOR_EACH_ELEM,
316 define_func!(raw_map_for_each_elem),
317 );
318 map.insert(
319 HELPER_MAP_LOOKUP_PERCPU_ELEM,
320 define_func!(raw_map_lookup_percpu_elem),
321 );
322 // map.insert(93,define_func!(raw_bpf_spin_lock);
323 // map.insert(94,define_func!(raw_bpf_spin_unlock);
324 // Map helpers::Perf event array helpers
325 map.insert(
326 HELPER_PERF_EVENT_OUTPUT,
327 define_func!(raw_perf_event_output),
328 );
329 // Probe and trace helpers::Memory helpers
330 map.insert(HELPER_BPF_PROBE_READ, define_func!(raw_bpf_probe_read));
331 // Print helpers
332 map.insert(HELPER_TRACE_PRINTF, define_func!(trace_printf));
333
334 // Map helpers::Queue and stack helpers
335 map.insert(HELPER_MAP_PUSH_ELEM, define_func!(raw_map_push_elem));
336 map.insert(HELPER_MAP_POP_ELEM, define_func!(raw_map_pop_elem));
337 map.insert(HELPER_MAP_PEEK_ELEM, define_func!(raw_map_peek_elem));
338 }
339 BPF_HELPER_FUN_SET.init(map);
340 }
341