xref: /DragonOS/kernel/src/bpf/map/lru.rs (revision 7b0ef10895108a0de5ff5ef3d2f93f40cf2e33a5)
1 use super::{BpfCallBackFn, BpfMapCommonOps, Result};
2 use crate::bpf::map::util::BpfMapMeta;
3 use crate::mm::percpu::{PerCpu, PerCpuVar};
4 use crate::smp::cpu::ProcessorId;
5 use alloc::vec::Vec;
6 use core::fmt::Debug;
7 use core::num::NonZero;
8 use lru::LruCache;
9 use system_error::SystemError;
10 
11 type BpfHashMapKey = Vec<u8>;
12 type BpfHashMapValue = Vec<u8>;
13 /// This map is the LRU (Least Recently Used) variant of the BPF_MAP_TYPE_HASH.
14 /// It is a generic map type that stores a fixed maximum number of key/value pairs.
15 /// When the map starts to get at capacity, the approximately least recently
16 /// used elements is removed to make room for new elements.
17 ///
18 /// See https://docs.ebpf.io/linux/map-type/BPF_MAP_TYPE_LRU_HASH/
19 #[derive(Debug)]
20 pub struct LruMap {
21     _max_entries: u32,
22     data: LruCache<BpfHashMapKey, BpfHashMapValue>,
23 }
24 
25 impl LruMap {
26     pub fn new(attr: &BpfMapMeta) -> Result<Self> {
27         if attr.value_size == 0 || attr.max_entries == 0 {
28             return Err(SystemError::EINVAL);
29         }
30         Ok(Self {
31             _max_entries: attr.max_entries,
32             data: LruCache::new(
33                 NonZero::new(attr.max_entries as usize).ok_or(SystemError::EINVAL)?,
34             ),
35         })
36     }
37 }
38 
39 impl BpfMapCommonOps for LruMap {
40     fn lookup_elem(&mut self, key: &[u8]) -> Result<Option<&[u8]>> {
41         let value = self.data.get(key).map(|v| v.as_slice());
42         Ok(value)
43     }
44     fn update_elem(&mut self, key: &[u8], value: &[u8], _flags: u64) -> Result<()> {
45         self.data.put(key.to_vec(), value.to_vec());
46         Ok(())
47     }
48     fn delete_elem(&mut self, key: &[u8]) -> Result<()> {
49         self.data.pop(key);
50         Ok(())
51     }
52     fn for_each_elem(&mut self, cb: BpfCallBackFn, ctx: *const u8, flags: u64) -> Result<u32> {
53         if flags != 0 {
54             return Err(SystemError::EINVAL);
55         }
56         let mut total_used = 0;
57         for (key, value) in self.data.iter() {
58             let res = cb(key, value, ctx);
59             // return value: 0 - continue, 1 - stop and return
60             if res != 0 {
61                 break;
62             }
63             total_used += 1;
64         }
65         Ok(total_used)
66     }
67     fn lookup_and_delete_elem(&mut self, key: &[u8], value: &mut [u8]) -> Result<()> {
68         let v = self
69             .data
70             .get(key)
71             .map(|v| v.as_slice())
72             .ok_or(SystemError::ENOENT)?;
73         value.copy_from_slice(v);
74         self.data.pop(key);
75         Ok(())
76     }
77     fn get_next_key(&self, key: Option<&[u8]>, next_key: &mut [u8]) -> Result<()> {
78         let mut iter = self.data.iter();
79         if let Some(key) = key {
80             for (k, _) in iter.by_ref() {
81                 if k.as_slice() == key {
82                     break;
83                 }
84             }
85         }
86         let res = iter.next();
87         match res {
88             Some((k, _)) => {
89                 next_key.copy_from_slice(k.as_slice());
90                 Ok(())
91             }
92             None => Err(SystemError::ENOENT),
93         }
94     }
95 }
96 
97 /// See https://ebpf-docs.dylanreimerink.nl/linux/map-type/BPF_MAP_TYPE_LRU_PERCPU_HASH/
98 pub struct PerCpuLruMap {
99     per_cpu_maps: PerCpuVar<LruMap>,
100 }
101 
102 impl Debug for PerCpuLruMap {
103     fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
104         f.debug_struct("PerCpuLruMap")
105             .field("maps", &self.per_cpu_maps)
106             .finish()
107     }
108 }
109 
110 impl PerCpuLruMap {
111     pub fn new(attr: &BpfMapMeta) -> Result<Self> {
112         let num_cpus = PerCpu::MAX_CPU_NUM;
113         let mut data = Vec::with_capacity(num_cpus as usize);
114         for _ in 0..num_cpus {
115             let array_map = LruMap::new(attr)?;
116             data.push(array_map);
117         }
118         let per_cpu_maps = PerCpuVar::new(data).ok_or(SystemError::EINVAL)?;
119         Ok(PerCpuLruMap { per_cpu_maps })
120     }
121 }
122 
123 impl BpfMapCommonOps for PerCpuLruMap {
124     fn lookup_elem(&mut self, key: &[u8]) -> Result<Option<&[u8]>> {
125         self.per_cpu_maps.get_mut().lookup_elem(key)
126     }
127     fn update_elem(&mut self, key: &[u8], value: &[u8], flags: u64) -> Result<()> {
128         self.per_cpu_maps.get_mut().update_elem(key, value, flags)
129     }
130     fn delete_elem(&mut self, key: &[u8]) -> Result<()> {
131         self.per_cpu_maps.get_mut().delete_elem(key)
132     }
133     fn for_each_elem(&mut self, cb: BpfCallBackFn, ctx: *const u8, flags: u64) -> Result<u32> {
134         self.per_cpu_maps.get_mut().for_each_elem(cb, ctx, flags)
135     }
136     fn lookup_and_delete_elem(&mut self, key: &[u8], value: &mut [u8]) -> Result<()> {
137         self.per_cpu_maps
138             .get_mut()
139             .lookup_and_delete_elem(key, value)
140     }
141     fn lookup_percpu_elem(&mut self, key: &[u8], cpu: u32) -> Result<Option<&[u8]>> {
142         unsafe {
143             self.per_cpu_maps
144                 .force_get_mut(ProcessorId::new(cpu))
145                 .lookup_elem(key)
146         }
147     }
148     fn get_next_key(&self, key: Option<&[u8]>, next_key: &mut [u8]) -> Result<()> {
149         self.per_cpu_maps.get_mut().get_next_key(key, next_key)
150     }
151 }
152