xref: /DragonOS/kernel/src/mm/memblock.rs (revision 6994f6b113f6fea7b997ec07130a7bdaecfd67b7)
1 use system_error::SystemError;
2 
3 use crate::libs::spinlock::{SpinLock, SpinLockGuard};
4 
5 use super::{PhysAddr, PhysMemoryArea};
6 
7 pub const INITIAL_MEMORY_REGIONS_NUM: usize = 128;
8 
9 /// 初始内存区域
10 static MEM_BLOCK_MANAGER: MemBlockManager = MemBlockManager::new();
11 
12 #[inline(always)]
13 pub fn mem_block_manager() -> &'static MemBlockManager {
14     &MEM_BLOCK_MANAGER
15 }
16 
17 /// 内存区域管理器
18 #[derive(Debug)]
19 pub struct MemBlockManager {
20     inner: SpinLock<InnerMemBlockManager>,
21 }
22 
23 #[derive(Debug)]
24 pub struct InnerMemBlockManager {
25     /// 初始内存区域
26     ///
27     /// 用于记录内核启动时的内存布局, 这些区域保持升序、不重叠
28     initial_memory_regions: [PhysMemoryArea; INITIAL_MEMORY_REGIONS_NUM],
29     initial_memory_regions_num: usize,
30 }
31 
32 impl MemBlockManager {
33     #[allow(dead_code)]
34     pub const MIN_MEMBLOCK_ADDR: PhysAddr = PhysAddr::new(0);
35     #[allow(dead_code)]
36     pub const MAX_MEMBLOCK_ADDR: PhysAddr = PhysAddr::new(usize::MAX);
37     const fn new() -> Self {
38         Self {
39             inner: SpinLock::new(InnerMemBlockManager {
40                 initial_memory_regions: [PhysMemoryArea::DEFAULT; INITIAL_MEMORY_REGIONS_NUM],
41                 initial_memory_regions_num: 0,
42             }),
43         }
44     }
45 
46     /// 添加内存区域
47     ///
48     /// 如果添加的区域与已有区域有重叠,会将重叠的区域合并
49     #[allow(dead_code)]
50     pub fn add_block(&self, base: PhysAddr, size: usize) -> Result<(), SystemError> {
51         if size == 0 {
52             return Ok(());
53         }
54         let mut inner = self.inner.lock();
55         if inner.initial_memory_regions_num >= INITIAL_MEMORY_REGIONS_NUM {
56             panic!("Too many memory regions!");
57         }
58 
59         let block = PhysMemoryArea::new(base, size);
60         // 特判第一个区域
61         if inner.initial_memory_regions_num == 0 {
62             inner.initial_memory_regions[0] = block;
63             inner.initial_memory_regions_num += 1;
64             return Ok(());
65         }
66 
67         // 先计算需要添加的区域数量
68         let blocks_to_add = self
69             .do_add_block(&mut inner, block, false)
70             .expect("Failed to count blocks to add!");
71 
72         if inner.initial_memory_regions_num + blocks_to_add > INITIAL_MEMORY_REGIONS_NUM {
73             kerror!("Too many memory regions!");
74             return Err(SystemError::ENOMEM);
75         }
76 
77         // 然后添加区域
78         self.do_add_block(&mut inner, block, true)
79             .expect("Failed to add block!");
80 
81         return Ok(());
82     }
83 
84     fn do_add_block(
85         &self,
86         inner: &mut SpinLockGuard<'_, InnerMemBlockManager>,
87         block: PhysMemoryArea,
88         insert: bool,
89     ) -> Result<usize, SystemError> {
90         let mut base = block.base;
91         let end = block.base + block.size;
92         let mut i = 0;
93         let mut start_index = -1;
94         let mut end_index = -1;
95 
96         let mut num_to_add = 0;
97 
98         while i < inner.initial_memory_regions_num {
99             let range_base = inner.initial_memory_regions[i].base;
100             let range_end =
101                 inner.initial_memory_regions[i].base + inner.initial_memory_regions[i].size;
102 
103             if range_base >= end {
104                 break;
105             }
106             if range_end <= base {
107                 i += 1;
108                 continue;
109             }
110 
111             // 有重叠
112 
113             if range_base > base {
114                 num_to_add += 1;
115                 if insert {
116                     if start_index == -1 {
117                         start_index = i as isize;
118                     }
119                     end_index = (i + 1) as isize;
120                     self.do_insert_area(inner, i, base, range_base - base);
121                     i += 1;
122                 }
123             }
124 
125             i += 1;
126             base = core::cmp::min(range_end, end);
127         }
128 
129         if base < end {
130             num_to_add += 1;
131             if insert {
132                 if start_index == -1 {
133                     start_index = i as isize;
134                 }
135                 end_index = (i + 1) as isize;
136                 self.do_insert_area(inner, i, base, end - base);
137             }
138         }
139 
140         if num_to_add == 0 {
141             return Ok(0);
142         }
143 
144         if insert {
145             self.do_merge_blocks(inner, start_index, end_index);
146         }
147         return Ok(num_to_add);
148     }
149 
150     fn do_insert_area(
151         &self,
152         inner: &mut SpinLockGuard<'_, InnerMemBlockManager>,
153         index: usize,
154         base: PhysAddr,
155         size: usize,
156     ) {
157         let copy_elements = inner.initial_memory_regions_num - index;
158         inner
159             .initial_memory_regions
160             .copy_within(index..index + copy_elements, index + 1);
161         inner.initial_memory_regions[index] = PhysMemoryArea::new(base, size);
162         inner.initial_memory_regions_num += 1;
163     }
164 
165     fn do_merge_blocks(
166         &self,
167         inner: &mut SpinLockGuard<'_, InnerMemBlockManager>,
168         start_index: isize,
169         mut end_index: isize,
170     ) {
171         let mut i = 0;
172         if start_index > 0 {
173             i = start_index - 1;
174         }
175         end_index = core::cmp::min(end_index, inner.initial_memory_regions_num as isize - 1);
176 
177         while i < end_index {
178             {
179                 let next_base = inner.initial_memory_regions[(i + 1) as usize].base;
180                 let next_size = inner.initial_memory_regions[(i + 1) as usize].size;
181                 let this = &mut inner.initial_memory_regions[i as usize];
182 
183                 if this.base + this.size != next_base {
184                     // BUG_ON(this->base + this->size > next->base);
185                     i += 1;
186                     continue;
187                 }
188                 this.size += next_size;
189             }
190             // 移动后面的区域
191             let copy_elements = inner.initial_memory_regions_num - (i + 2) as usize;
192             inner.initial_memory_regions.copy_within(
193                 (i + 2) as usize..(i as usize + 2 + copy_elements),
194                 (i + 1) as usize,
195             );
196 
197             inner.initial_memory_regions_num -= 1;
198             end_index -= 1;
199         }
200     }
201 
202     /// 移除内存区域
203     ///
204     /// 如果移除的区域与已有区域有重叠,会将重叠的区域分割
205     #[allow(dead_code)]
206     pub fn remove_block(&self, base: PhysAddr, size: usize) -> Result<(), SystemError> {
207         if size == 0 {
208             return Ok(());
209         }
210         let mut inner = self.inner.lock();
211         if inner.initial_memory_regions_num == 0 {
212             return Ok(());
213         }
214 
215         let (start_index, end_index) = self
216             .isolate_range(&mut inner, base, size)
217             .expect("Failed to isolate range!");
218 
219         for i in (start_index..end_index).rev() {
220             self.do_remove_region(&mut inner, i);
221         }
222         return Ok(());
223     }
224 
225     fn do_remove_region(&self, inner: &mut SpinLockGuard<'_, InnerMemBlockManager>, index: usize) {
226         let copy_elements = inner.initial_memory_regions_num - index - 1;
227         inner
228             .initial_memory_regions
229             .copy_within(index + 1..index + 1 + copy_elements, index);
230 
231         inner.initial_memory_regions_num -= 1;
232 
233         if inner.initial_memory_regions_num == 0 {
234             inner.initial_memory_regions[0].base = PhysAddr::new(0);
235             inner.initial_memory_regions[0].size = 0;
236         }
237     }
238 
239     fn isolate_range(
240         &self,
241         inner: &mut SpinLockGuard<'_, InnerMemBlockManager>,
242         base: PhysAddr,
243         size: usize,
244     ) -> Result<(usize, usize), SystemError> {
245         let end = base + size;
246 
247         let mut idx = 0;
248 
249         let mut start_index = 0;
250         let mut end_index = 0;
251 
252         if size == 0 {
253             return Ok((0, 0));
254         }
255 
256         while idx < inner.initial_memory_regions_num {
257             let range_base = inner.initial_memory_regions[idx].base;
258             let range_end = range_base + inner.initial_memory_regions[idx].size;
259 
260             if range_base >= end {
261                 break;
262             }
263             if range_end <= base {
264                 idx = idx.checked_add(1).unwrap_or(0);
265                 continue;
266             }
267 
268             if range_base < base {
269                 // regions[idx] intersects from below
270                 inner.initial_memory_regions[idx].base = base;
271                 inner.initial_memory_regions[idx].size -= base - range_base;
272                 self.do_insert_area(inner, idx, range_base, base - range_base);
273             } else if range_end > end {
274                 // regions[idx] intersects from above
275                 inner.initial_memory_regions[idx].base = end;
276                 inner.initial_memory_regions[idx].size -= end - range_base;
277 
278                 self.do_insert_area(inner, idx, range_base, end - range_base);
279                 if idx == 0 {
280                     idx = usize::MAX;
281                 } else {
282                     idx -= 1;
283                 }
284             } else {
285                 // regions[idx] is inside the range, record it
286                 if end_index == 0 {
287                     start_index = idx;
288                 }
289                 end_index = idx + 1;
290             }
291 
292             idx = idx.checked_add(1).unwrap_or(0);
293         }
294 
295         return Ok((start_index, end_index));
296     }
297 
298     /// 生成迭代器
299     pub fn to_iter(&self) -> MemBlockIter {
300         let inner = self.inner.lock();
301         return MemBlockIter { inner, index: 0 };
302     }
303 
304     /// 获取初始内存区域数量
305     pub fn total_initial_memory_regions(&self) -> usize {
306         let inner = self.inner.lock();
307         return inner.initial_memory_regions_num;
308     }
309 
310     /// 根据索引获取初始内存区域
311     pub fn get_initial_memory_region(&self, index: usize) -> Option<PhysMemoryArea> {
312         let inner = self.inner.lock();
313         return inner.initial_memory_regions.get(index).copied();
314     }
315 }
316 
317 pub struct MemBlockIter<'a> {
318     inner: SpinLockGuard<'a, InnerMemBlockManager>,
319     index: usize,
320 }
321 
322 #[allow(dead_code)]
323 impl<'a> MemBlockIter<'a> {
324     /// 获取内存区域数量
325     pub fn total_num(&self) -> usize {
326         self.inner.initial_memory_regions_num
327     }
328 
329     /// 获取指定索引的内存区域
330     pub fn get_area(&self, index: usize) -> &PhysMemoryArea {
331         &self.inner.initial_memory_regions[index]
332     }
333 
334     /// 获取当前索引
335     pub fn current_index(&self) -> usize {
336         self.index
337     }
338 }
339 
340 impl<'a> Iterator for MemBlockIter<'a> {
341     type Item = PhysMemoryArea;
342 
343     fn next(&mut self) -> Option<Self::Item> {
344         if self.index >= self.inner.initial_memory_regions_num {
345             return None;
346         }
347         let ret = self.inner.initial_memory_regions[self.index];
348         self.index += 1;
349         return Some(ret);
350     }
351 }
352