1 pub mod barrier; 2 pub mod bump; 3 pub mod fault; 4 pub mod pkru; 5 6 use alloc::sync::Arc; 7 use alloc::vec::Vec; 8 use hashbrown::HashSet; 9 use x86::time::rdtsc; 10 use x86_64::registers::model_specific::EferFlags; 11 12 use crate::driver::serial::serial8250::send_to_default_serial8250_port; 13 use crate::include::bindings::bindings::{ 14 multiboot2_get_load_base, multiboot2_get_memory, multiboot2_iter, multiboot_mmap_entry_t, 15 multiboot_tag_load_base_addr_t, 16 }; 17 use crate::libs::align::page_align_up; 18 use crate::libs::lib_ui::screen_manager::scm_disable_put_to_window; 19 use crate::libs::spinlock::SpinLock; 20 21 use crate::mm::allocator::page_frame::{FrameAllocator, PageFrameCount, PageFrameUsage}; 22 use crate::mm::memblock::mem_block_manager; 23 use crate::mm::ucontext::LockedVMA; 24 use crate::{ 25 arch::MMArch, 26 mm::allocator::{buddy::BuddyAllocator, bump::BumpAllocator}, 27 }; 28 29 use crate::mm::kernel_mapper::KernelMapper; 30 use crate::mm::page::{PageEntry, PageFlags, PAGE_1G_SHIFT}; 31 use crate::mm::{MemoryManagementArch, PageTableKind, PhysAddr, VirtAddr}; 32 use crate::{kdebug, kinfo, kwarn}; 33 use system_error::SystemError; 34 35 use core::arch::asm; 36 use core::ffi::c_void; 37 use core::fmt::Debug; 38 use core::mem::{self}; 39 40 use core::sync::atomic::{compiler_fence, AtomicBool, Ordering}; 41 42 use super::kvm::vmx::vmcs::VmcsFields; 43 use super::kvm::vmx::vmx_asm_wrapper::vmx_vmread; 44 45 pub type PageMapper = 46 crate::mm::page::PageMapper<crate::arch::x86_64::mm::X86_64MMArch, LockedFrameAllocator>; 47 48 /// 初始的CR3寄存器的值,用于内存管理初始化时,创建的第一个内核页表的位置 49 static mut INITIAL_CR3_VALUE: PhysAddr = PhysAddr::new(0); 50 51 static INNER_ALLOCATOR: SpinLock<Option<BuddyAllocator<MMArch>>> = SpinLock::new(None); 52 53 #[derive(Clone, Copy, Debug)] 54 pub struct X86_64MMBootstrapInfo { 55 kernel_load_base_paddr: usize, 56 kernel_code_start: usize, 57 kernel_code_end: usize, 58 kernel_data_end: usize, 59 kernel_rodata_end: usize, 60 start_brk: usize, 61 } 62 63 pub(super) static mut BOOTSTRAP_MM_INFO: Option<X86_64MMBootstrapInfo> = None; 64 65 /// @brief X86_64的内存管理架构结构体 66 #[derive(Debug, Clone, Copy, Hash)] 67 pub struct X86_64MMArch; 68 69 /// XD标志位是否被保留 70 static XD_RESERVED: AtomicBool = AtomicBool::new(false); 71 72 impl MemoryManagementArch for X86_64MMArch { 73 /// X86目前支持缺页中断 74 const PAGE_FAULT_ENABLED: bool = true; 75 /// 4K页 76 const PAGE_SHIFT: usize = 12; 77 78 /// 每个页表项占8字节,总共有512个页表项 79 const PAGE_ENTRY_SHIFT: usize = 9; 80 81 /// 四级页表(PML4T、PDPT、PDT、PT) 82 const PAGE_LEVELS: usize = 4; 83 84 /// 页表项的有效位的index。在x86_64中,页表项的第[0, 47]位表示地址和flag, 85 /// 第[48, 51]位表示保留。因此,有效位的index为52。 86 /// 请注意,第63位是XD位,表示是否允许执行。 87 const ENTRY_ADDRESS_SHIFT: usize = 52; 88 89 const ENTRY_FLAG_DEFAULT_PAGE: usize = Self::ENTRY_FLAG_PRESENT; 90 91 const ENTRY_FLAG_DEFAULT_TABLE: usize = Self::ENTRY_FLAG_PRESENT; 92 93 const ENTRY_FLAG_PRESENT: usize = 1 << 0; 94 95 const ENTRY_FLAG_READONLY: usize = 0; 96 97 const ENTRY_FLAG_WRITEABLE: usize = 1 << 1; 98 const ENTRY_FLAG_READWRITE: usize = 1 << 1; 99 100 const ENTRY_FLAG_USER: usize = 1 << 2; 101 102 const ENTRY_FLAG_WRITE_THROUGH: usize = 1 << 3; 103 104 const ENTRY_FLAG_CACHE_DISABLE: usize = 1 << 4; 105 106 const ENTRY_FLAG_NO_EXEC: usize = 1 << 63; 107 /// x86_64不存在EXEC标志位,只有NO_EXEC(XD)标志位 108 const ENTRY_FLAG_EXEC: usize = 0; 109 110 const ENTRY_FLAG_ACCESSED: usize = 1 << 5; 111 const ENTRY_FLAG_DIRTY: usize = 1 << 6; 112 const ENTRY_FLAG_HUGE_PAGE: usize = 1 << 7; 113 const ENTRY_FLAG_GLOBAL: usize = 1 << 8; 114 115 /// 物理地址与虚拟地址的偏移量 116 /// 0xffff_8000_0000_0000 117 const PHYS_OFFSET: usize = Self::PAGE_NEGATIVE_MASK + (Self::PAGE_ADDRESS_SIZE >> 1); 118 const KERNEL_LINK_OFFSET: usize = 0x100000; 119 120 // 参考 https://code.dragonos.org.cn/xref/linux-6.1.9/arch/x86/include/asm/page_64_types.h#75 121 const USER_END_VADDR: VirtAddr = 122 VirtAddr::new((Self::PAGE_ADDRESS_SIZE >> 1) - Self::PAGE_SIZE); 123 const USER_BRK_START: VirtAddr = VirtAddr::new(0x700000000000); 124 const USER_STACK_START: VirtAddr = VirtAddr::new(0x6ffff0a00000); 125 126 const FIXMAP_START_VADDR: VirtAddr = VirtAddr::new(0xffffb00000000000); 127 /// 设置FIXMAP区域大小为1M 128 const FIXMAP_SIZE: usize = 256 * 4096; 129 130 const MMIO_BASE: VirtAddr = VirtAddr::new(0xffffa10000000000); 131 const MMIO_SIZE: usize = 1 << PAGE_1G_SHIFT; 132 133 /// @brief 获取物理内存区域 134 unsafe fn init() { 135 extern "C" { 136 fn _text(); 137 fn _etext(); 138 fn _edata(); 139 fn _erodata(); 140 fn _end(); 141 } 142 143 Self::init_xd_rsvd(); 144 let load_base_paddr = Self::get_load_base_paddr(); 145 146 let bootstrap_info = X86_64MMBootstrapInfo { 147 kernel_load_base_paddr: load_base_paddr.data(), 148 kernel_code_start: _text as usize, 149 kernel_code_end: _etext as usize, 150 kernel_data_end: _edata as usize, 151 kernel_rodata_end: _erodata as usize, 152 start_brk: _end as usize, 153 }; 154 155 unsafe { 156 BOOTSTRAP_MM_INFO = Some(bootstrap_info); 157 } 158 159 // 初始化物理内存区域(从multiboot2中获取) 160 Self::init_memory_area_from_multiboot2().expect("init memory area failed"); 161 162 kdebug!("bootstrap info: {:?}", unsafe { BOOTSTRAP_MM_INFO }); 163 kdebug!("phys[0]=virt[0x{:x}]", unsafe { 164 MMArch::phys_2_virt(PhysAddr::new(0)).unwrap().data() 165 }); 166 167 // 初始化内存管理器 168 unsafe { allocator_init() }; 169 170 send_to_default_serial8250_port("x86 64 init done\n\0".as_bytes()); 171 } 172 173 /// @brief 刷新TLB中,关于指定虚拟地址的条目 174 unsafe fn invalidate_page(address: VirtAddr) { 175 compiler_fence(Ordering::SeqCst); 176 asm!("invlpg [{0}]", in(reg) address.data(), options(nostack, preserves_flags)); 177 compiler_fence(Ordering::SeqCst); 178 } 179 180 /// @brief 刷新TLB中,所有的条目 181 unsafe fn invalidate_all() { 182 compiler_fence(Ordering::SeqCst); 183 // 通过设置cr3寄存器,来刷新整个TLB 184 Self::set_table(PageTableKind::User, Self::table(PageTableKind::User)); 185 compiler_fence(Ordering::SeqCst); 186 } 187 188 /// @brief 获取顶级页表的物理地址 189 unsafe fn table(table_kind: PageTableKind) -> PhysAddr { 190 match table_kind { 191 PageTableKind::Kernel | PageTableKind::User => { 192 compiler_fence(Ordering::SeqCst); 193 let cr3 = x86::controlregs::cr3() as usize; 194 compiler_fence(Ordering::SeqCst); 195 return PhysAddr::new(cr3); 196 } 197 PageTableKind::EPT => { 198 let eptp = 199 vmx_vmread(VmcsFields::CTRL_EPTP_PTR as u32).expect("Failed to read eptp"); 200 return PhysAddr::new(eptp as usize); 201 } 202 } 203 } 204 205 /// @brief 设置顶级页表的物理地址到处理器中 206 unsafe fn set_table(_table_kind: PageTableKind, table: PhysAddr) { 207 compiler_fence(Ordering::SeqCst); 208 asm!("mov cr3, {}", in(reg) table.data(), options(nostack, preserves_flags)); 209 compiler_fence(Ordering::SeqCst); 210 } 211 212 /// @brief 判断虚拟地址是否合法 213 fn virt_is_valid(virt: VirtAddr) -> bool { 214 return virt.is_canonical(); 215 } 216 217 /// 获取内存管理初始化时,创建的第一个内核页表的地址 218 fn initial_page_table() -> PhysAddr { 219 unsafe { 220 return INITIAL_CR3_VALUE; 221 } 222 } 223 224 /// @brief 创建新的顶层页表 225 /// 226 /// 该函数会创建页表并复制内核的映射到新的页表中 227 /// 228 /// @return 新的页表 229 fn setup_new_usermapper() -> Result<crate::mm::ucontext::UserMapper, SystemError> { 230 let new_umapper: crate::mm::page::PageMapper<X86_64MMArch, LockedFrameAllocator> = unsafe { 231 PageMapper::create(PageTableKind::User, LockedFrameAllocator) 232 .ok_or(SystemError::ENOMEM)? 233 }; 234 235 let current_ktable: KernelMapper = KernelMapper::lock(); 236 let copy_mapping = |pml4_entry_no| unsafe { 237 let entry: PageEntry<X86_64MMArch> = current_ktable 238 .table() 239 .entry(pml4_entry_no) 240 .unwrap_or_else(|| panic!("entry {} not found", pml4_entry_no)); 241 new_umapper.table().set_entry(pml4_entry_no, entry) 242 }; 243 244 // 复制内核的映射 245 for pml4_entry_no in MMArch::PAGE_KERNEL_INDEX..MMArch::PAGE_ENTRY_NUM { 246 copy_mapping(pml4_entry_no); 247 } 248 249 return Ok(crate::mm::ucontext::UserMapper::new(new_umapper)); 250 } 251 252 const PAGE_SIZE: usize = 1 << Self::PAGE_SHIFT; 253 254 const PAGE_OFFSET_MASK: usize = Self::PAGE_SIZE - 1; 255 256 const PAGE_MASK: usize = !(Self::PAGE_OFFSET_MASK); 257 258 const PAGE_ADDRESS_SHIFT: usize = Self::PAGE_LEVELS * Self::PAGE_ENTRY_SHIFT + Self::PAGE_SHIFT; 259 260 const PAGE_ADDRESS_SIZE: usize = 1 << Self::PAGE_ADDRESS_SHIFT; 261 262 const PAGE_ADDRESS_MASK: usize = Self::PAGE_ADDRESS_SIZE - Self::PAGE_SIZE; 263 264 const PAGE_ENTRY_SIZE: usize = 1 << (Self::PAGE_SHIFT - Self::PAGE_ENTRY_SHIFT); 265 266 const PAGE_ENTRY_NUM: usize = 1 << Self::PAGE_ENTRY_SHIFT; 267 268 const PAGE_ENTRY_MASK: usize = Self::PAGE_ENTRY_NUM - 1; 269 270 const PAGE_KERNEL_INDEX: usize = (Self::PHYS_OFFSET & Self::PAGE_ADDRESS_MASK) 271 >> (Self::PAGE_ADDRESS_SHIFT - Self::PAGE_ENTRY_SHIFT); 272 273 const PAGE_NEGATIVE_MASK: usize = !((Self::PAGE_ADDRESS_SIZE) - 1); 274 275 const ENTRY_ADDRESS_SIZE: usize = 1 << Self::ENTRY_ADDRESS_SHIFT; 276 277 const ENTRY_ADDRESS_MASK: usize = Self::ENTRY_ADDRESS_SIZE - Self::PAGE_SIZE; 278 279 const ENTRY_FLAGS_MASK: usize = !Self::ENTRY_ADDRESS_MASK; 280 281 unsafe fn read<T>(address: VirtAddr) -> T { 282 return core::ptr::read(address.data() as *const T); 283 } 284 285 unsafe fn write<T>(address: VirtAddr, value: T) { 286 core::ptr::write(address.data() as *mut T, value); 287 } 288 289 unsafe fn write_bytes(address: VirtAddr, value: u8, count: usize) { 290 core::ptr::write_bytes(address.data() as *mut u8, value, count); 291 } 292 293 unsafe fn phys_2_virt(phys: PhysAddr) -> Option<VirtAddr> { 294 if let Some(vaddr) = phys.data().checked_add(Self::PHYS_OFFSET) { 295 return Some(VirtAddr::new(vaddr)); 296 } else { 297 return None; 298 } 299 } 300 301 unsafe fn virt_2_phys(virt: VirtAddr) -> Option<PhysAddr> { 302 if let Some(paddr) = virt.data().checked_sub(Self::PHYS_OFFSET) { 303 return Some(PhysAddr::new(paddr)); 304 } else { 305 return None; 306 } 307 } 308 309 #[inline(always)] 310 fn make_entry(paddr: PhysAddr, page_flags: usize) -> usize { 311 return paddr.data() | page_flags; 312 } 313 314 fn vma_access_permitted( 315 vma: Arc<LockedVMA>, 316 write: bool, 317 execute: bool, 318 foreign: bool, 319 ) -> bool { 320 if execute { 321 return true; 322 } 323 if foreign | vma.is_foreign() { 324 return true; 325 } 326 pkru::pkru_allows_pkey(pkru::vma_pkey(vma), write) 327 } 328 } 329 330 impl X86_64MMArch { 331 unsafe fn get_load_base_paddr() -> PhysAddr { 332 let mut mb2_lb_info: [multiboot_tag_load_base_addr_t; 512] = mem::zeroed(); 333 send_to_default_serial8250_port("get_load_base_paddr begin\n\0".as_bytes()); 334 335 let mut mb2_count: u32 = 0; 336 multiboot2_iter( 337 Some(multiboot2_get_load_base), 338 &mut mb2_lb_info as *mut [multiboot_tag_load_base_addr_t; 512] as usize as *mut c_void, 339 &mut mb2_count, 340 ); 341 342 if mb2_count == 0 { 343 send_to_default_serial8250_port( 344 "get_load_base_paddr mb2_count == 0, default to 1MB\n\0".as_bytes(), 345 ); 346 return PhysAddr::new(0x100000); 347 } 348 349 let phys = mb2_lb_info[0].load_base_addr as usize; 350 351 return PhysAddr::new(phys); 352 } 353 unsafe fn init_memory_area_from_multiboot2() -> Result<usize, SystemError> { 354 // 这个数组用来存放内存区域的信息(从C获取) 355 let mut mb2_mem_info: [multiboot_mmap_entry_t; 512] = mem::zeroed(); 356 send_to_default_serial8250_port("init_memory_area_from_multiboot2 begin\n\0".as_bytes()); 357 358 let mut mb2_count: u32 = 0; 359 multiboot2_iter( 360 Some(multiboot2_get_memory), 361 &mut mb2_mem_info as *mut [multiboot_mmap_entry_t; 512] as usize as *mut c_void, 362 &mut mb2_count, 363 ); 364 send_to_default_serial8250_port("init_memory_area_from_multiboot2 2\n\0".as_bytes()); 365 366 let mb2_count = mb2_count as usize; 367 let mut areas_count = 0usize; 368 let mut total_mem_size = 0usize; 369 for info_entry in mb2_mem_info.iter().take(mb2_count) { 370 // Only use the memory area if its type is 1 (RAM) 371 if info_entry.type_ == 1 { 372 // Skip the memory area if its len is 0 373 if info_entry.len == 0 { 374 continue; 375 } 376 377 total_mem_size += info_entry.len as usize; 378 379 mem_block_manager() 380 .add_block( 381 PhysAddr::new(info_entry.addr as usize), 382 info_entry.len as usize, 383 ) 384 .unwrap_or_else(|e| { 385 kwarn!( 386 "Failed to add memory block: base={:#x}, size={:#x}, error={:?}", 387 info_entry.addr, 388 info_entry.len, 389 e 390 ); 391 }); 392 areas_count += 1; 393 } 394 } 395 send_to_default_serial8250_port("init_memory_area_from_multiboot2 end\n\0".as_bytes()); 396 kinfo!("Total memory size: {} MB, total areas from multiboot2: {mb2_count}, valid areas: {areas_count}", total_mem_size / 1024 / 1024); 397 return Ok(areas_count); 398 } 399 400 fn init_xd_rsvd() { 401 // 读取ia32-EFER寄存器的值 402 let efer: EferFlags = x86_64::registers::model_specific::Efer::read(); 403 if !efer.contains(EferFlags::NO_EXECUTE_ENABLE) { 404 // NO_EXECUTE_ENABLE是false,那么就设置xd_reserved为true 405 kdebug!("NO_EXECUTE_ENABLE is false, set XD_RESERVED to true"); 406 XD_RESERVED.store(true, Ordering::Relaxed); 407 } 408 compiler_fence(Ordering::SeqCst); 409 } 410 411 /// 判断XD标志位是否被保留 412 pub fn is_xd_reserved() -> bool { 413 // return XD_RESERVED.load(Ordering::Relaxed); 414 415 // 由于暂时不支持execute disable,因此直接返回true 416 // 不支持的原因是,目前好像没有能正确的设置page-level的xd位,会触发page fault 417 return true; 418 } 419 } 420 421 impl VirtAddr { 422 /// @brief 判断虚拟地址是否合法 423 #[inline(always)] 424 pub fn is_canonical(self) -> bool { 425 let x = self.data() & X86_64MMArch::PHYS_OFFSET; 426 // 如果x为0,说明虚拟地址的高位为0,是合法的用户地址 427 // 如果x为PHYS_OFFSET,说明虚拟地址的高位全为1,是合法的内核地址 428 return x == 0 || x == X86_64MMArch::PHYS_OFFSET; 429 } 430 } 431 432 unsafe fn allocator_init() { 433 let virt_offset = VirtAddr::new(page_align_up(BOOTSTRAP_MM_INFO.unwrap().start_brk)); 434 435 let phy_offset = unsafe { MMArch::virt_2_phys(virt_offset) }.unwrap(); 436 437 mem_block_manager() 438 .reserve_block(PhysAddr::new(0), phy_offset.data()) 439 .expect("Failed to reserve block"); 440 let mut bump_allocator = BumpAllocator::<X86_64MMArch>::new(phy_offset.data()); 441 kdebug!( 442 "BumpAllocator created, offset={:?}", 443 bump_allocator.offset() 444 ); 445 446 // 暂存初始在head.S中指定的页表的地址,后面再考虑是否需要把它加到buddy的可用空间里面! 447 // 现在不加的原因是,我担心会有安全漏洞问题:这些初始的页表,位于内核的数据段。如果归还到buddy, 448 // 可能会产生一定的安全风险(有的代码可能根据虚拟地址来进行安全校验) 449 let _old_page_table = MMArch::table(PageTableKind::Kernel); 450 451 let new_page_table: PhysAddr; 452 // 使用bump分配器,把所有的内存页都映射到页表 453 { 454 // 用bump allocator创建新的页表 455 let mut mapper: crate::mm::page::PageMapper<MMArch, &mut BumpAllocator<MMArch>> = 456 crate::mm::page::PageMapper::<MMArch, _>::create( 457 PageTableKind::Kernel, 458 &mut bump_allocator, 459 ) 460 .expect("Failed to create page mapper"); 461 new_page_table = mapper.table().phys(); 462 kdebug!("PageMapper created"); 463 464 // 取消最开始时候,在head.S中指定的映射(暂时不刷新TLB) 465 { 466 let table = mapper.table(); 467 let empty_entry = PageEntry::<MMArch>::from_usize(0); 468 for i in 0..MMArch::PAGE_ENTRY_NUM { 469 table 470 .set_entry(i, empty_entry) 471 .expect("Failed to empty page table entry"); 472 } 473 } 474 kdebug!("Successfully emptied page table"); 475 476 let total_num = mem_block_manager().total_initial_memory_regions(); 477 for i in 0..total_num { 478 let area = mem_block_manager().get_initial_memory_region(i).unwrap(); 479 // kdebug!("area: base={:?}, size={:#x}, end={:?}", area.base, area.size, area.base + area.size); 480 for i in 0..((area.size + MMArch::PAGE_SIZE - 1) / MMArch::PAGE_SIZE) { 481 let paddr = area.base.add(i * MMArch::PAGE_SIZE); 482 let vaddr = unsafe { MMArch::phys_2_virt(paddr) }.unwrap(); 483 let flags = kernel_page_flags::<MMArch>(vaddr); 484 485 let flusher = mapper 486 .map_phys(vaddr, paddr, flags) 487 .expect("Failed to map frame"); 488 // 暂时不刷新TLB 489 flusher.ignore(); 490 } 491 } 492 } 493 494 unsafe { 495 INITIAL_CR3_VALUE = new_page_table; 496 } 497 kdebug!( 498 "After mapping all physical memory, DragonOS used: {} KB", 499 bump_allocator.offset() / 1024 500 ); 501 502 // 初始化buddy_allocator 503 let buddy_allocator = unsafe { BuddyAllocator::<X86_64MMArch>::new(bump_allocator).unwrap() }; 504 // 设置全局的页帧分配器 505 unsafe { set_inner_allocator(buddy_allocator) }; 506 kinfo!("Successfully initialized buddy allocator"); 507 // 关闭显示输出 508 scm_disable_put_to_window(); 509 510 // make the new page table current 511 { 512 let mut binding = INNER_ALLOCATOR.lock(); 513 let mut allocator_guard = binding.as_mut().unwrap(); 514 kdebug!("To enable new page table."); 515 compiler_fence(Ordering::SeqCst); 516 let mapper = crate::mm::page::PageMapper::<MMArch, _>::new( 517 PageTableKind::Kernel, 518 new_page_table, 519 &mut allocator_guard, 520 ); 521 compiler_fence(Ordering::SeqCst); 522 mapper.make_current(); 523 compiler_fence(Ordering::SeqCst); 524 kdebug!("New page table enabled"); 525 } 526 kdebug!("Successfully enabled new page table"); 527 } 528 529 #[no_mangle] 530 pub extern "C" fn rs_test_buddy() { 531 test_buddy(); 532 } 533 pub fn test_buddy() { 534 // 申请内存然后写入数据然后free掉 535 // 总共申请200MB内存 536 const TOTAL_SIZE: usize = 200 * 1024 * 1024; 537 538 for i in 0..10 { 539 kdebug!("Test buddy, round: {i}"); 540 // 存放申请的内存块 541 let mut v: Vec<(PhysAddr, PageFrameCount)> = Vec::with_capacity(60 * 1024); 542 // 存放已经申请的内存块的地址(用于检查重复) 543 let mut addr_set: HashSet<PhysAddr> = HashSet::new(); 544 545 let mut allocated = 0usize; 546 547 let mut free_count = 0usize; 548 549 while allocated < TOTAL_SIZE { 550 let mut random_size = 0u64; 551 unsafe { x86::random::rdrand64(&mut random_size) }; 552 // 一次最多申请4M 553 random_size %= 1024 * 4096; 554 if random_size == 0 { 555 continue; 556 } 557 let random_size = 558 core::cmp::min(page_align_up(random_size as usize), TOTAL_SIZE - allocated); 559 let random_size = PageFrameCount::from_bytes(random_size.next_power_of_two()).unwrap(); 560 // 获取帧 561 let (paddr, allocated_frame_count) = 562 unsafe { LockedFrameAllocator.allocate(random_size).unwrap() }; 563 assert!(allocated_frame_count.data().is_power_of_two()); 564 assert!(paddr.data() % MMArch::PAGE_SIZE == 0); 565 unsafe { 566 assert!(MMArch::phys_2_virt(paddr) 567 .as_ref() 568 .unwrap() 569 .check_aligned(allocated_frame_count.data() * MMArch::PAGE_SIZE)); 570 } 571 allocated += allocated_frame_count.data() * MMArch::PAGE_SIZE; 572 v.push((paddr, allocated_frame_count)); 573 assert!(addr_set.insert(paddr), "duplicate address: {:?}", paddr); 574 575 // 写入数据 576 let vaddr = unsafe { MMArch::phys_2_virt(paddr).unwrap() }; 577 let slice = unsafe { 578 core::slice::from_raw_parts_mut( 579 vaddr.data() as *mut u8, 580 allocated_frame_count.data() * MMArch::PAGE_SIZE, 581 ) 582 }; 583 for (i, item) in slice.iter_mut().enumerate() { 584 *item = ((i + unsafe { rdtsc() } as usize) % 256) as u8; 585 } 586 587 // 随机释放一个内存块 588 if !v.is_empty() { 589 let mut random_index = 0u64; 590 unsafe { x86::random::rdrand64(&mut random_index) }; 591 // 70%概率释放 592 if random_index % 10 > 7 { 593 continue; 594 } 595 random_index %= v.len() as u64; 596 let random_index = random_index as usize; 597 let (paddr, allocated_frame_count) = v.remove(random_index); 598 assert!(addr_set.remove(&paddr)); 599 unsafe { LockedFrameAllocator.free(paddr, allocated_frame_count) }; 600 free_count += allocated_frame_count.data() * MMArch::PAGE_SIZE; 601 } 602 } 603 604 kdebug!( 605 "Allocated {} MB memory, release: {} MB, no release: {} bytes", 606 allocated / 1024 / 1024, 607 free_count / 1024 / 1024, 608 (allocated - free_count) 609 ); 610 611 kdebug!("Now, to release buddy memory"); 612 // 释放所有的内存 613 for (paddr, allocated_frame_count) in v { 614 unsafe { LockedFrameAllocator.free(paddr, allocated_frame_count) }; 615 assert!(addr_set.remove(&paddr)); 616 free_count += allocated_frame_count.data() * MMArch::PAGE_SIZE; 617 } 618 619 kdebug!("release done!, allocated: {allocated}, free_count: {free_count}"); 620 } 621 } 622 623 /// 全局的页帧分配器 624 #[derive(Debug, Clone, Copy, Hash)] 625 pub struct LockedFrameAllocator; 626 627 impl FrameAllocator for LockedFrameAllocator { 628 unsafe fn allocate(&mut self, mut count: PageFrameCount) -> Option<(PhysAddr, PageFrameCount)> { 629 count = count.next_power_of_two(); 630 if let Some(ref mut allocator) = *INNER_ALLOCATOR.lock_irqsave() { 631 return allocator.allocate(count); 632 } else { 633 return None; 634 } 635 } 636 637 unsafe fn free(&mut self, address: crate::mm::PhysAddr, count: PageFrameCount) { 638 assert!(count.data().is_power_of_two()); 639 if let Some(ref mut allocator) = *INNER_ALLOCATOR.lock_irqsave() { 640 return allocator.free(address, count); 641 } 642 } 643 644 unsafe fn usage(&self) -> PageFrameUsage { 645 if let Some(ref mut allocator) = *INNER_ALLOCATOR.lock_irqsave() { 646 return allocator.usage(); 647 } else { 648 panic!("usage error"); 649 } 650 } 651 } 652 653 /// 获取内核地址默认的页面标志 654 pub unsafe fn kernel_page_flags<A: MemoryManagementArch>(virt: VirtAddr) -> PageFlags<A> { 655 let info: X86_64MMBootstrapInfo = BOOTSTRAP_MM_INFO.unwrap(); 656 657 if virt.data() >= info.kernel_code_start && virt.data() < info.kernel_code_end { 658 // Remap kernel code execute 659 return PageFlags::new().set_execute(true).set_write(true); 660 } else if virt.data() >= info.kernel_data_end && virt.data() < info.kernel_rodata_end { 661 // Remap kernel rodata read only 662 return PageFlags::new().set_execute(true); 663 } else { 664 return PageFlags::new().set_write(true).set_execute(true); 665 } 666 } 667 668 unsafe fn set_inner_allocator(allocator: BuddyAllocator<MMArch>) { 669 static FLAG: AtomicBool = AtomicBool::new(false); 670 if FLAG 671 .compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst) 672 .is_err() 673 { 674 panic!("Cannot set inner allocator twice!"); 675 } 676 *INNER_ALLOCATOR.lock() = Some(allocator); 677 } 678 679 /// 低地址重映射的管理器 680 /// 681 /// 低地址重映射的管理器,在smp初始化完成之前,需要使用低地址的映射,因此需要在smp初始化完成之后,取消这一段映射 682 pub struct LowAddressRemapping; 683 684 impl LowAddressRemapping { 685 // 映射64M 686 const REMAP_SIZE: usize = 64 * 1024 * 1024; 687 688 pub unsafe fn remap_at_low_address(mapper: &mut PageMapper) { 689 for i in 0..(Self::REMAP_SIZE / MMArch::PAGE_SIZE) { 690 let paddr = PhysAddr::new(i * MMArch::PAGE_SIZE); 691 let vaddr = VirtAddr::new(i * MMArch::PAGE_SIZE); 692 let flags = kernel_page_flags::<MMArch>(vaddr); 693 694 let flusher = mapper 695 .map_phys(vaddr, paddr, flags) 696 .expect("Failed to map frame"); 697 // 暂时不刷新TLB 698 flusher.ignore(); 699 } 700 } 701 702 /// 取消低地址的映射 703 pub unsafe fn unmap_at_low_address(mapper: &mut PageMapper, flush: bool) { 704 for i in 0..(Self::REMAP_SIZE / MMArch::PAGE_SIZE) { 705 let vaddr = VirtAddr::new(i * MMArch::PAGE_SIZE); 706 let (_, _, flusher) = mapper 707 .unmap_phys(vaddr, true) 708 .expect("Failed to unmap frame"); 709 if !flush { 710 flusher.ignore(); 711 } 712 } 713 } 714 } 715