1 use core::intrinsics::unlikely; 2 3 use log::error; 4 use system_error::SystemError; 5 6 use crate::libs::{ 7 align::{page_align_down, page_align_up}, 8 spinlock::{SpinLock, SpinLockGuard}, 9 }; 10 11 use super::{PhysAddr, PhysMemoryArea}; 12 13 pub const INITIAL_MEMORY_REGIONS_NUM: usize = 128; 14 15 /// 初始内存区域 16 static MEM_BLOCK_MANAGER: MemBlockManager = MemBlockManager::new(); 17 18 #[inline(always)] 19 pub fn mem_block_manager() -> &'static MemBlockManager { 20 &MEM_BLOCK_MANAGER 21 } 22 23 /// 内存区域管理器 24 #[derive(Debug)] 25 pub struct MemBlockManager { 26 inner: SpinLock<InnerMemBlockManager>, 27 } 28 29 #[derive(Debug)] 30 pub struct InnerMemBlockManager { 31 /// 初始内存区域 32 /// 33 /// 用于记录内核启动时的内存布局, 这些区域保持升序、不重叠 34 initial_memory_regions: [PhysMemoryArea; INITIAL_MEMORY_REGIONS_NUM], 35 initial_memory_regions_num: usize, 36 } 37 38 impl MemBlockManager { 39 #[allow(dead_code)] 40 pub const MIN_MEMBLOCK_ADDR: PhysAddr = PhysAddr::new(0); 41 #[allow(dead_code)] 42 pub const MAX_MEMBLOCK_ADDR: PhysAddr = PhysAddr::new(usize::MAX); 43 const fn new() -> Self { 44 Self { 45 inner: SpinLock::new(InnerMemBlockManager { 46 initial_memory_regions: [PhysMemoryArea::DEFAULT; INITIAL_MEMORY_REGIONS_NUM], 47 initial_memory_regions_num: 0, 48 }), 49 } 50 } 51 52 /// 添加内存区域 53 /// 54 /// 如果添加的区域与已有区域有重叠,会将重叠的区域合并 55 #[allow(dead_code)] 56 pub fn add_block(&self, base: PhysAddr, size: usize) -> Result<(), SystemError> { 57 let r = self.add_range(base, size, MemoryAreaAttr::empty()); 58 return r; 59 } 60 61 /// 添加内存区域 62 /// 63 /// 如果添加的区域与已有区域有重叠,会将重叠的区域合并 64 fn add_range( 65 &self, 66 base: PhysAddr, 67 size: usize, 68 flags: MemoryAreaAttr, 69 ) -> Result<(), SystemError> { 70 if size == 0 { 71 return Ok(()); 72 } 73 let mut inner = self.inner.lock(); 74 if inner.initial_memory_regions_num >= INITIAL_MEMORY_REGIONS_NUM { 75 panic!("Too many memory regions!"); 76 } 77 78 let block = PhysMemoryArea::new(base, size, MemoryAreaAttr::empty()); 79 // 特判第一个区域 80 if inner.initial_memory_regions_num == 0 { 81 inner.initial_memory_regions[0] = block; 82 inner.initial_memory_regions_num += 1; 83 return Ok(()); 84 } 85 86 // 先计算需要添加的区域数量 87 let blocks_to_add = self 88 .do_add_block(&mut inner, block, false, flags) 89 .expect("Failed to count blocks to add!"); 90 91 if inner.initial_memory_regions_num + blocks_to_add > INITIAL_MEMORY_REGIONS_NUM { 92 error!("Too many memory regions!"); 93 return Err(SystemError::ENOMEM); 94 } 95 96 // 然后添加区域 97 self.do_add_block(&mut inner, block, true, flags) 98 .expect("Failed to add block!"); 99 100 return Ok(()); 101 } 102 103 fn do_add_block( 104 &self, 105 inner: &mut SpinLockGuard<'_, InnerMemBlockManager>, 106 block: PhysMemoryArea, 107 insert: bool, 108 flags: MemoryAreaAttr, 109 ) -> Result<usize, SystemError> { 110 let mut base = block.base; 111 let end = block.base + block.size; 112 let mut i = 0; 113 let mut start_index = -1; 114 let mut end_index = -1; 115 116 let mut num_to_add = 0; 117 118 while i < inner.initial_memory_regions_num { 119 let range_base = inner.initial_memory_regions[i].base; 120 let range_end = 121 inner.initial_memory_regions[i].base + inner.initial_memory_regions[i].size; 122 123 if range_base >= end { 124 break; 125 } 126 if range_end <= base { 127 i += 1; 128 continue; 129 } 130 131 // 有重叠 132 133 if range_base > base { 134 num_to_add += 1; 135 if insert { 136 if start_index == -1 { 137 start_index = i as isize; 138 } 139 end_index = (i + 1) as isize; 140 self.do_insert_area(inner, i, base, range_base - base, flags); 141 i += 1; 142 } 143 } 144 145 i += 1; 146 base = core::cmp::min(range_end, end); 147 } 148 149 if base < end { 150 num_to_add += 1; 151 if insert { 152 if start_index == -1 { 153 start_index = i as isize; 154 } 155 end_index = (i + 1) as isize; 156 self.do_insert_area(inner, i, base, end - base, flags); 157 } 158 } 159 160 if num_to_add == 0 { 161 return Ok(0); 162 } 163 164 if insert { 165 self.do_merge_blocks(inner, start_index, end_index); 166 } 167 return Ok(num_to_add); 168 } 169 170 fn do_insert_area( 171 &self, 172 inner: &mut SpinLockGuard<'_, InnerMemBlockManager>, 173 index: usize, 174 base: PhysAddr, 175 size: usize, 176 flags: MemoryAreaAttr, 177 ) { 178 let copy_elements = inner.initial_memory_regions_num - index; 179 inner 180 .initial_memory_regions 181 .copy_within(index..index + copy_elements, index + 1); 182 inner.initial_memory_regions[index] = PhysMemoryArea::new(base, size, flags); 183 inner.initial_memory_regions_num += 1; 184 } 185 186 fn do_merge_blocks( 187 &self, 188 inner: &mut SpinLockGuard<'_, InnerMemBlockManager>, 189 start_index: isize, 190 mut end_index: isize, 191 ) { 192 let mut i = 0; 193 if start_index > 0 { 194 i = start_index - 1; 195 } 196 end_index = core::cmp::min(end_index, inner.initial_memory_regions_num as isize - 1); 197 198 while i < end_index { 199 { 200 let next_base = inner.initial_memory_regions[(i + 1) as usize].base; 201 let next_size = inner.initial_memory_regions[(i + 1) as usize].size; 202 let next_flags = inner.initial_memory_regions[(i + 1) as usize].flags; 203 let this = &mut inner.initial_memory_regions[i as usize]; 204 205 if this.base + this.size != next_base || this.flags != next_flags { 206 if unlikely(this.base + this.size > next_base) { 207 panic!("this->base + this->size > next->base"); 208 } 209 i += 1; 210 continue; 211 } 212 this.size += next_size; 213 } 214 // 移动后面的区域 215 let copy_elements = inner.initial_memory_regions_num - (i + 2) as usize; 216 inner.initial_memory_regions.copy_within( 217 (i + 2) as usize..(i as usize + 2 + copy_elements), 218 (i + 1) as usize, 219 ); 220 221 inner.initial_memory_regions_num -= 1; 222 end_index -= 1; 223 } 224 } 225 226 /// 移除内存区域 227 /// 228 /// 如果移除的区域与已有区域有重叠,会将重叠的区域分割 229 #[allow(dead_code)] 230 pub fn remove_block(&self, base: PhysAddr, size: usize) -> Result<(), SystemError> { 231 if size == 0 { 232 return Ok(()); 233 } 234 let mut inner = self.inner.lock(); 235 if inner.initial_memory_regions_num == 0 { 236 return Ok(()); 237 } 238 239 let (start_index, end_index) = self 240 .isolate_range(&mut inner, base, size) 241 .expect("Failed to isolate range!"); 242 243 for i in (start_index..end_index).rev() { 244 self.do_remove_region(&mut inner, i); 245 } 246 return Ok(()); 247 } 248 249 fn do_remove_region(&self, inner: &mut SpinLockGuard<'_, InnerMemBlockManager>, index: usize) { 250 let copy_elements = inner.initial_memory_regions_num - index - 1; 251 inner 252 .initial_memory_regions 253 .copy_within(index + 1..index + 1 + copy_elements, index); 254 255 inner.initial_memory_regions_num -= 1; 256 257 if inner.initial_memory_regions_num == 0 { 258 inner.initial_memory_regions[0].base = PhysAddr::new(0); 259 inner.initial_memory_regions[0].size = 0; 260 } 261 } 262 263 /// 在一个内存块管理器中找到一个物理地址范围内的 264 /// 空闲块,并隔离出所需的内存大小 265 /// 266 /// ## 返回值 267 /// 268 /// - Ok((start_index, end_index)) 表示成功找到了一个连续的内存区域来满足所需的 size。这里: 269 /// - start_index 是指定的起始内存区域的索引。 270 /// - end_index 是指定的结束内存区域的索引,它实际上不包含在返回的连续区域中,但它标志着下一个可能的不连续区域的开始。 271 /// - Err(SystemError) 则表示没有找到足够的空间来满足请求的 size,可能是因为内存区域不足或存在其他系统错误 272 fn isolate_range( 273 &self, 274 inner: &mut SpinLockGuard<'_, InnerMemBlockManager>, 275 base: PhysAddr, 276 size: usize, 277 ) -> Result<(usize, usize), SystemError> { 278 let end = base + size; 279 280 let mut idx = 0; 281 282 let mut start_index = 0; 283 let mut end_index = 0; 284 285 if size == 0 { 286 return Ok((0, 0)); 287 } 288 289 while idx < inner.initial_memory_regions_num { 290 let range_base = inner.initial_memory_regions[idx].base; 291 let range_end = range_base + inner.initial_memory_regions[idx].size; 292 293 if range_base >= end { 294 break; 295 } 296 if range_end <= base { 297 idx = idx.checked_add(1).unwrap_or(0); 298 continue; 299 } 300 301 if range_base < base { 302 // regions[idx] intersects from below 303 inner.initial_memory_regions[idx].base = base; 304 inner.initial_memory_regions[idx].size -= base - range_base; 305 self.do_insert_area( 306 inner, 307 idx, 308 range_base, 309 base - range_base, 310 inner.initial_memory_regions[idx].flags, 311 ); 312 } else if range_end > end { 313 // regions[idx] intersects from above 314 inner.initial_memory_regions[idx].base = end; 315 inner.initial_memory_regions[idx].size -= end - range_base; 316 317 self.do_insert_area( 318 inner, 319 idx, 320 range_base, 321 end - range_base, 322 inner.initial_memory_regions[idx].flags, 323 ); 324 if idx == 0 { 325 idx = usize::MAX; 326 } else { 327 idx -= 1; 328 } 329 } else { 330 // regions[idx] is inside the range, record it 331 if end_index == 0 { 332 start_index = idx; 333 } 334 end_index = idx + 1; 335 } 336 337 idx = idx.checked_add(1).unwrap_or(0); 338 } 339 340 return Ok((start_index, end_index)); 341 } 342 343 /// mark_nomap - 用`MemoryAreaAttr::NOMAP`标志标记内存区域 344 /// 345 /// ## 参数 346 /// 347 /// - base: 区域的物理基地址 348 /// - size: 区域的大小 349 /// 350 /// 使用`MemoryAreaAttr::NOMAP`标志标记的内存区域将不会被添加到物理内存的直接映射中。这些区域仍然会被内存映射所覆盖。内存映射中代表NOMAP内存帧的struct page将被PageReserved()。 351 /// 注意:如果被标记为`MemoryAreaAttr::NOMAP`的内存是从memblock分配的,调用者必须忽略该内存 352 pub fn mark_nomap(&self, base: PhysAddr, size: usize) -> Result<(), SystemError> { 353 return self.set_or_clear_flags(base, size, true, MemoryAreaAttr::NOMAP); 354 } 355 356 /// 参考 https://code.dragonos.org.cn/xref/linux-6.1.9/mm/memblock.c?fi=memblock_mark_mirror#940 357 pub fn mark_mirror(&self, base: PhysAddr, size: usize) -> Result<(), SystemError> { 358 return self.set_or_clear_flags(base, size, true, MemoryAreaAttr::MIRROR); 359 } 360 361 fn set_or_clear_flags( 362 &self, 363 mut base: PhysAddr, 364 mut size: usize, 365 set: bool, 366 flags: MemoryAreaAttr, 367 ) -> Result<(), SystemError> { 368 let rsvd_base = PhysAddr::new(page_align_down(base.data())); 369 size = page_align_up(size + base.data() - rsvd_base.data()); 370 base = rsvd_base; 371 372 let mut inner = self.inner.lock(); 373 let (start_index, end_index) = self.isolate_range(&mut inner, base, size)?; 374 for i in start_index..end_index { 375 if set { 376 inner.initial_memory_regions[i].flags |= flags; 377 } else { 378 inner.initial_memory_regions[i].flags &= !flags; 379 } 380 } 381 382 let num = inner.initial_memory_regions_num as isize; 383 self.do_merge_blocks(&mut inner, 0, num); 384 return Ok(()); 385 } 386 387 /// 标记内存区域为保留区域 388 pub fn reserve_block(&self, base: PhysAddr, size: usize) -> Result<(), SystemError> { 389 return self.set_or_clear_flags(base, size, true, MemoryAreaAttr::RESERVED); 390 } 391 392 /// 判断[base, base+size)与已有区域是否有重叠 393 pub fn is_overlapped(&self, base: PhysAddr, size: usize) -> bool { 394 let inner = self.inner.lock(); 395 return self.do_is_overlapped(base, size, false, &inner); 396 } 397 398 /// 判断[base, base+size)与已有Reserved区域是否有重叠 399 pub fn is_overlapped_with_reserved(&self, base: PhysAddr, size: usize) -> bool { 400 let inner = self.inner.lock(); 401 return self.do_is_overlapped(base, size, true, &inner); 402 } 403 404 fn do_is_overlapped( 405 &self, 406 base: PhysAddr, 407 size: usize, 408 require_reserved: bool, 409 inner: &SpinLockGuard<'_, InnerMemBlockManager>, 410 ) -> bool { 411 let mut res = false; 412 for i in 0..inner.initial_memory_regions_num { 413 if require_reserved 414 && !inner.initial_memory_regions[i] 415 .flags 416 .contains(MemoryAreaAttr::RESERVED) 417 { 418 // 忽略非保留区域 419 continue; 420 } 421 422 let range_base = inner.initial_memory_regions[i].base; 423 let range_end = range_base + inner.initial_memory_regions[i].size; 424 if (base >= range_base && base < range_end) 425 || (base + size > range_base && base + size <= range_end) 426 || (base <= range_base && base + size >= range_end) 427 { 428 res = true; 429 break; 430 } 431 } 432 433 return res; 434 } 435 436 /// 生成迭代器 437 pub fn to_iter(&self) -> MemBlockIter { 438 let inner = self.inner.lock(); 439 return MemBlockIter { 440 inner, 441 index: 0, 442 usable_only: false, 443 }; 444 } 445 446 /// 生成迭代器,迭代所有可用的物理内存区域 447 pub fn to_iter_available(&self) -> MemBlockIter { 448 let inner = self.inner.lock(); 449 return MemBlockIter { 450 inner, 451 index: 0, 452 usable_only: true, 453 }; 454 } 455 456 /// 获取初始内存区域数量 457 pub fn total_initial_memory_regions(&self) -> usize { 458 let inner = self.inner.lock(); 459 return inner.initial_memory_regions_num; 460 } 461 462 /// 根据索引获取初始内存区域 463 pub fn get_initial_memory_region(&self, index: usize) -> Option<PhysMemoryArea> { 464 let inner = self.inner.lock(); 465 return inner.initial_memory_regions.get(index).copied(); 466 } 467 } 468 469 pub struct MemBlockIter<'a> { 470 inner: SpinLockGuard<'a, InnerMemBlockManager>, 471 index: usize, 472 usable_only: bool, 473 } 474 475 #[allow(dead_code)] 476 impl<'a> MemBlockIter<'a> { 477 /// 获取内存区域数量 478 pub fn total_num(&self) -> usize { 479 self.inner.initial_memory_regions_num 480 } 481 482 /// 获取指定索引的内存区域 483 pub fn get_area(&self, index: usize) -> &PhysMemoryArea { 484 &self.inner.initial_memory_regions[index] 485 } 486 487 /// 获取当前索引 488 pub fn current_index(&self) -> usize { 489 self.index 490 } 491 } 492 493 impl<'a> Iterator for MemBlockIter<'a> { 494 type Item = PhysMemoryArea; 495 496 fn next(&mut self) -> Option<Self::Item> { 497 while self.index < self.inner.initial_memory_regions_num { 498 if self.usable_only 499 && !self.inner.initial_memory_regions[self.index] 500 .flags 501 .is_empty() 502 { 503 self.index += 1; 504 if self.index >= self.inner.initial_memory_regions_num { 505 return None; 506 } 507 continue; 508 } 509 break; 510 } 511 if self.index >= self.inner.initial_memory_regions_num { 512 return None; 513 } 514 let ret = self.inner.initial_memory_regions[self.index]; 515 self.index += 1; 516 return Some(ret); 517 } 518 } 519 520 bitflags! { 521 /// 内存区域属性 522 #[allow(clippy::bad_bit_mask)] 523 pub struct MemoryAreaAttr: u32 { 524 /// No special request 525 const NONE = 0x0; 526 /// Hotpluggable region 527 const HOTPLUG = (1 << 0); 528 /// Mirrored region 529 const MIRROR = (1 << 1); 530 /// do not add to kenrel direct mapping 531 const NOMAP = (1 << 2); 532 /// Always detected via a driver 533 const DRIVER_MANAGED = (1 << 3); 534 /// Memory is reserved 535 const RESERVED = (1 << 4); 536 } 537 } 538