1 use core::intrinsics::unlikely; 2 3 use system_error::SystemError; 4 5 use crate::libs::spinlock::{SpinLock, SpinLockGuard}; 6 7 use super::{PhysAddr, PhysMemoryArea}; 8 9 pub const INITIAL_MEMORY_REGIONS_NUM: usize = 128; 10 11 /// 初始内存区域 12 static MEM_BLOCK_MANAGER: MemBlockManager = MemBlockManager::new(); 13 14 #[inline(always)] 15 pub fn mem_block_manager() -> &'static MemBlockManager { 16 &MEM_BLOCK_MANAGER 17 } 18 19 /// 内存区域管理器 20 #[derive(Debug)] 21 pub struct MemBlockManager { 22 inner: SpinLock<InnerMemBlockManager>, 23 } 24 25 #[derive(Debug)] 26 pub struct InnerMemBlockManager { 27 /// 初始内存区域 28 /// 29 /// 用于记录内核启动时的内存布局, 这些区域保持升序、不重叠 30 initial_memory_regions: [PhysMemoryArea; INITIAL_MEMORY_REGIONS_NUM], 31 initial_memory_regions_num: usize, 32 } 33 34 impl MemBlockManager { 35 #[allow(dead_code)] 36 pub const MIN_MEMBLOCK_ADDR: PhysAddr = PhysAddr::new(0); 37 #[allow(dead_code)] 38 pub const MAX_MEMBLOCK_ADDR: PhysAddr = PhysAddr::new(usize::MAX); 39 const fn new() -> Self { 40 Self { 41 inner: SpinLock::new(InnerMemBlockManager { 42 initial_memory_regions: [PhysMemoryArea::DEFAULT; INITIAL_MEMORY_REGIONS_NUM], 43 initial_memory_regions_num: 0, 44 }), 45 } 46 } 47 48 /// 添加内存区域 49 /// 50 /// 如果添加的区域与已有区域有重叠,会将重叠的区域合并 51 #[allow(dead_code)] 52 pub fn add_block(&self, base: PhysAddr, size: usize) -> Result<(), SystemError> { 53 return self.add_range(base, size, MemoryAreaAttr::empty()); 54 } 55 56 /// 添加内存区域 57 /// 58 /// 如果添加的区域与已有区域有重叠,会将重叠的区域合并 59 fn add_range( 60 &self, 61 base: PhysAddr, 62 size: usize, 63 flags: MemoryAreaAttr, 64 ) -> Result<(), SystemError> { 65 if size == 0 { 66 return Ok(()); 67 } 68 let mut inner = self.inner.lock(); 69 if inner.initial_memory_regions_num >= INITIAL_MEMORY_REGIONS_NUM { 70 panic!("Too many memory regions!"); 71 } 72 73 let block = PhysMemoryArea::new(base, size, MemoryAreaAttr::empty()); 74 // 特判第一个区域 75 if inner.initial_memory_regions_num == 0 { 76 inner.initial_memory_regions[0] = block; 77 inner.initial_memory_regions_num += 1; 78 return Ok(()); 79 } 80 81 // 先计算需要添加的区域数量 82 let blocks_to_add = self 83 .do_add_block(&mut inner, block, false, flags) 84 .expect("Failed to count blocks to add!"); 85 86 if inner.initial_memory_regions_num + blocks_to_add > INITIAL_MEMORY_REGIONS_NUM { 87 kerror!("Too many memory regions!"); 88 return Err(SystemError::ENOMEM); 89 } 90 91 // 然后添加区域 92 self.do_add_block(&mut inner, block, true, flags) 93 .expect("Failed to add block!"); 94 95 return Ok(()); 96 } 97 98 fn do_add_block( 99 &self, 100 inner: &mut SpinLockGuard<'_, InnerMemBlockManager>, 101 block: PhysMemoryArea, 102 insert: bool, 103 flags: MemoryAreaAttr, 104 ) -> Result<usize, SystemError> { 105 let mut base = block.base; 106 let end = block.base + block.size; 107 let mut i = 0; 108 let mut start_index = -1; 109 let mut end_index = -1; 110 111 let mut num_to_add = 0; 112 113 while i < inner.initial_memory_regions_num { 114 let range_base = inner.initial_memory_regions[i].base; 115 let range_end = 116 inner.initial_memory_regions[i].base + inner.initial_memory_regions[i].size; 117 118 if range_base >= end { 119 break; 120 } 121 if range_end <= base { 122 i += 1; 123 continue; 124 } 125 126 // 有重叠 127 128 if range_base > base { 129 num_to_add += 1; 130 if insert { 131 if start_index == -1 { 132 start_index = i as isize; 133 } 134 end_index = (i + 1) as isize; 135 self.do_insert_area(inner, i, base, range_base - base, flags); 136 i += 1; 137 } 138 } 139 140 i += 1; 141 base = core::cmp::min(range_end, end); 142 } 143 144 if base < end { 145 num_to_add += 1; 146 if insert { 147 if start_index == -1 { 148 start_index = i as isize; 149 } 150 end_index = (i + 1) as isize; 151 self.do_insert_area(inner, i, base, end - base, flags); 152 } 153 } 154 155 if num_to_add == 0 { 156 return Ok(0); 157 } 158 159 if insert { 160 self.do_merge_blocks(inner, start_index, end_index); 161 } 162 return Ok(num_to_add); 163 } 164 165 fn do_insert_area( 166 &self, 167 inner: &mut SpinLockGuard<'_, InnerMemBlockManager>, 168 index: usize, 169 base: PhysAddr, 170 size: usize, 171 flags: MemoryAreaAttr, 172 ) { 173 let copy_elements = inner.initial_memory_regions_num - index; 174 inner 175 .initial_memory_regions 176 .copy_within(index..index + copy_elements, index + 1); 177 inner.initial_memory_regions[index] = PhysMemoryArea::new(base, size, flags); 178 inner.initial_memory_regions_num += 1; 179 } 180 181 fn do_merge_blocks( 182 &self, 183 inner: &mut SpinLockGuard<'_, InnerMemBlockManager>, 184 start_index: isize, 185 mut end_index: isize, 186 ) { 187 let mut i = 0; 188 if start_index > 0 { 189 i = start_index - 1; 190 } 191 end_index = core::cmp::min(end_index, inner.initial_memory_regions_num as isize - 1); 192 193 while i < end_index { 194 { 195 let next_base = inner.initial_memory_regions[(i + 1) as usize].base; 196 let next_size = inner.initial_memory_regions[(i + 1) as usize].size; 197 let next_flags = inner.initial_memory_regions[(i + 1) as usize].flags; 198 let this = &mut inner.initial_memory_regions[i as usize]; 199 200 if this.base + this.size != next_base || this.flags != next_flags { 201 if unlikely(this.base + this.size > next_base) { 202 kBUG!("this->base + this->size > next->base"); 203 } 204 i += 1; 205 continue; 206 } 207 this.size += next_size; 208 } 209 // 移动后面的区域 210 let copy_elements = inner.initial_memory_regions_num - (i + 2) as usize; 211 inner.initial_memory_regions.copy_within( 212 (i + 2) as usize..(i as usize + 2 + copy_elements), 213 (i + 1) as usize, 214 ); 215 216 inner.initial_memory_regions_num -= 1; 217 end_index -= 1; 218 } 219 } 220 221 /// 移除内存区域 222 /// 223 /// 如果移除的区域与已有区域有重叠,会将重叠的区域分割 224 #[allow(dead_code)] 225 pub fn remove_block(&self, base: PhysAddr, size: usize) -> Result<(), SystemError> { 226 if size == 0 { 227 return Ok(()); 228 } 229 let mut inner = self.inner.lock(); 230 if inner.initial_memory_regions_num == 0 { 231 return Ok(()); 232 } 233 234 let (start_index, end_index) = self 235 .isolate_range(&mut inner, base, size) 236 .expect("Failed to isolate range!"); 237 238 for i in (start_index..end_index).rev() { 239 self.do_remove_region(&mut inner, i); 240 } 241 return Ok(()); 242 } 243 244 fn do_remove_region(&self, inner: &mut SpinLockGuard<'_, InnerMemBlockManager>, index: usize) { 245 let copy_elements = inner.initial_memory_regions_num - index - 1; 246 inner 247 .initial_memory_regions 248 .copy_within(index + 1..index + 1 + copy_elements, index); 249 250 inner.initial_memory_regions_num -= 1; 251 252 if inner.initial_memory_regions_num == 0 { 253 inner.initial_memory_regions[0].base = PhysAddr::new(0); 254 inner.initial_memory_regions[0].size = 0; 255 } 256 } 257 258 /// 在一个内存块管理器中找到一个物理地址范围内的 259 /// 空闲块,并隔离出所需的内存大小 260 /// 261 /// ## 返回值 262 /// 263 /// - Ok((start_index, end_index)) 表示成功找到了一个连续的内存区域来满足所需的 size。这里: 264 /// - start_index 是指定的起始内存区域的索引。 265 /// - end_index 是指定的结束内存区域的索引,它实际上不包含在返回的连续区域中,但它标志着下一个可能的不连续区域的开始。 266 /// - Err(SystemError) 则表示没有找到足够的空间来满足请求的 size,可能是因为内存区域不足或存在其他系统错误 267 fn isolate_range( 268 &self, 269 inner: &mut SpinLockGuard<'_, InnerMemBlockManager>, 270 base: PhysAddr, 271 size: usize, 272 ) -> Result<(usize, usize), SystemError> { 273 let end = base + size; 274 275 let mut idx = 0; 276 277 let mut start_index = 0; 278 let mut end_index = 0; 279 280 if size == 0 { 281 return Ok((0, 0)); 282 } 283 284 while idx < inner.initial_memory_regions_num { 285 let range_base = inner.initial_memory_regions[idx].base; 286 let range_end = range_base + inner.initial_memory_regions[idx].size; 287 288 if range_base >= end { 289 break; 290 } 291 if range_end <= base { 292 idx = idx.checked_add(1).unwrap_or(0); 293 continue; 294 } 295 296 if range_base < base { 297 // regions[idx] intersects from below 298 inner.initial_memory_regions[idx].base = base; 299 inner.initial_memory_regions[idx].size -= base - range_base; 300 self.do_insert_area( 301 inner, 302 idx, 303 range_base, 304 base - range_base, 305 inner.initial_memory_regions[idx].flags, 306 ); 307 } else if range_end > end { 308 // regions[idx] intersects from above 309 inner.initial_memory_regions[idx].base = end; 310 inner.initial_memory_regions[idx].size -= end - range_base; 311 312 self.do_insert_area( 313 inner, 314 idx, 315 range_base, 316 end - range_base, 317 inner.initial_memory_regions[idx].flags, 318 ); 319 if idx == 0 { 320 idx = usize::MAX; 321 } else { 322 idx -= 1; 323 } 324 } else { 325 // regions[idx] is inside the range, record it 326 if end_index == 0 { 327 start_index = idx; 328 } 329 end_index = idx + 1; 330 } 331 332 idx = idx.checked_add(1).unwrap_or(0); 333 } 334 335 return Ok((start_index, end_index)); 336 } 337 338 /// mark_nomap - 用`MemoryAreaAttr::NOMAP`标志标记内存区域 339 /// 340 /// ## 参数 341 /// 342 /// - base: 区域的物理基地址 343 /// - size: 区域的大小 344 /// 345 /// 使用`MemoryAreaAttr::NOMAP`标志标记的内存区域将不会被添加到物理内存的直接映射中。这些区域仍然会被内存映射所覆盖。内存映射中代表NOMAP内存帧的struct page将被PageReserved()。 346 /// 注意:如果被标记为`MemoryAreaAttr::NOMAP`的内存是从memblock分配的,调用者必须忽略该内存 347 pub fn mark_nomap(&self, base: PhysAddr, size: usize) -> Result<(), SystemError> { 348 return self.set_or_clear_flags(base, size, true, MemoryAreaAttr::NOMAP); 349 } 350 351 fn set_or_clear_flags( 352 &self, 353 base: PhysAddr, 354 size: usize, 355 set: bool, 356 flags: MemoryAreaAttr, 357 ) -> Result<(), SystemError> { 358 let mut inner = self.inner.lock(); 359 let (start_index, end_index) = self.isolate_range(&mut inner, base, size)?; 360 for i in start_index..end_index { 361 if set { 362 inner.initial_memory_regions[i].flags |= flags; 363 } else { 364 inner.initial_memory_regions[i].flags &= !flags; 365 } 366 } 367 368 let num = inner.initial_memory_regions_num as isize; 369 self.do_merge_blocks(&mut inner, 0, num); 370 return Ok(()); 371 } 372 373 /// 标记内存区域为保留区域 374 pub fn reserve_block(&self, base: PhysAddr, size: usize) -> Result<(), SystemError> { 375 return self.set_or_clear_flags(base, size, true, MemoryAreaAttr::RESERVED); 376 } 377 378 /// 生成迭代器 379 pub fn to_iter(&self) -> MemBlockIter { 380 let inner = self.inner.lock(); 381 return MemBlockIter { inner, index: 0 }; 382 } 383 384 /// 获取初始内存区域数量 385 pub fn total_initial_memory_regions(&self) -> usize { 386 let inner = self.inner.lock(); 387 return inner.initial_memory_regions_num; 388 } 389 390 /// 根据索引获取初始内存区域 391 pub fn get_initial_memory_region(&self, index: usize) -> Option<PhysMemoryArea> { 392 let inner = self.inner.lock(); 393 return inner.initial_memory_regions.get(index).copied(); 394 } 395 } 396 397 pub struct MemBlockIter<'a> { 398 inner: SpinLockGuard<'a, InnerMemBlockManager>, 399 index: usize, 400 } 401 402 #[allow(dead_code)] 403 impl<'a> MemBlockIter<'a> { 404 /// 获取内存区域数量 405 pub fn total_num(&self) -> usize { 406 self.inner.initial_memory_regions_num 407 } 408 409 /// 获取指定索引的内存区域 410 pub fn get_area(&self, index: usize) -> &PhysMemoryArea { 411 &self.inner.initial_memory_regions[index] 412 } 413 414 /// 获取当前索引 415 pub fn current_index(&self) -> usize { 416 self.index 417 } 418 } 419 420 impl<'a> Iterator for MemBlockIter<'a> { 421 type Item = PhysMemoryArea; 422 423 fn next(&mut self) -> Option<Self::Item> { 424 if self.index >= self.inner.initial_memory_regions_num { 425 return None; 426 } 427 let ret = self.inner.initial_memory_regions[self.index]; 428 self.index += 1; 429 return Some(ret); 430 } 431 } 432 433 bitflags! { 434 /// 内存区域属性 435 pub struct MemoryAreaAttr: u32 { 436 /// No special request 437 const NONE = 0x0; 438 /// Hotpluggable region 439 const HOTPLUG = (1 << 0); 440 /// Mirrored region 441 const MIRROR = (1 << 1); 442 /// do not add to kenrel direct mapping 443 const NOMAP = (1 << 2); 444 /// Always detected via a driver 445 const DRIVER_MANAGED = (1 << 3); 446 /// Memory is reserved 447 const RESERVED = (1 << 4); 448 } 449 } 450