1 pub mod barrier; 2 3 use alloc::vec::Vec; 4 use hashbrown::HashSet; 5 use x86::time::rdtsc; 6 use x86_64::registers::model_specific::EferFlags; 7 8 use crate::driver::tty::serial::serial8250::send_to_default_serial8250_port; 9 use crate::include::bindings::bindings::{ 10 multiboot2_get_memory, multiboot2_iter, multiboot_mmap_entry_t, 11 }; 12 use crate::libs::align::page_align_up; 13 use crate::libs::lib_ui::screen_manager::scm_disable_put_to_window; 14 use crate::libs::printk::PrintkWriter; 15 use crate::libs::spinlock::SpinLock; 16 17 use crate::mm::allocator::page_frame::{FrameAllocator, PageFrameCount, PageFrameUsage}; 18 use crate::mm::mmio_buddy::mmio_init; 19 use crate::{ 20 arch::MMArch, 21 mm::allocator::{buddy::BuddyAllocator, bump::BumpAllocator}, 22 }; 23 24 use crate::mm::kernel_mapper::KernelMapper; 25 use crate::mm::page::{PageEntry, PageFlags}; 26 use crate::mm::{MemoryManagementArch, PageTableKind, PhysAddr, PhysMemoryArea, VirtAddr}; 27 use crate::syscall::SystemError; 28 use crate::{kdebug, kinfo}; 29 30 use core::arch::asm; 31 use core::ffi::c_void; 32 use core::fmt::{Debug, Write}; 33 use core::mem::{self}; 34 35 use core::sync::atomic::{compiler_fence, AtomicBool, Ordering}; 36 37 use super::kvm::vmx::vmcs::VmcsFields; 38 use super::kvm::vmx::vmx_asm_wrapper::vmx_vmread; 39 40 pub type PageMapper = 41 crate::mm::page::PageMapper<crate::arch::x86_64::mm::X86_64MMArch, LockedFrameAllocator>; 42 43 /// @brief 用于存储物理内存区域的数组 44 static mut PHYS_MEMORY_AREAS: [PhysMemoryArea; 512] = [PhysMemoryArea { 45 base: PhysAddr::new(0), 46 size: 0, 47 }; 512]; 48 49 /// 初始的CR3寄存器的值,用于内存管理初始化时,创建的第一个内核页表的位置 50 static mut INITIAL_CR3_VALUE: PhysAddr = PhysAddr::new(0); 51 52 /// 内核的第一个页表在pml4中的索引 53 /// 顶级页表的[256, 512)项是内核的页表 54 static KERNEL_PML4E_NO: usize = (X86_64MMArch::PHYS_OFFSET & ((1 << 48) - 1)) >> 39; 55 56 static INNER_ALLOCATOR: SpinLock<Option<BuddyAllocator<MMArch>>> = SpinLock::new(None); 57 58 #[derive(Clone, Copy)] 59 pub struct X86_64MMBootstrapInfo { 60 kernel_code_start: usize, 61 kernel_code_end: usize, 62 kernel_data_end: usize, 63 kernel_rodata_end: usize, 64 start_brk: usize, 65 } 66 67 impl Debug for X86_64MMBootstrapInfo { 68 fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { 69 write!( 70 f, 71 "kernel_code_start: {:x}, kernel_code_end: {:x}, kernel_data_end: {:x}, kernel_rodata_end: {:x}, start_brk: {:x}", 72 self.kernel_code_start, self.kernel_code_end, self.kernel_data_end, self.kernel_rodata_end, self.start_brk) 73 } 74 } 75 76 pub static mut BOOTSTRAP_MM_INFO: Option<X86_64MMBootstrapInfo> = None; 77 78 /// @brief X86_64的内存管理架构结构体 79 #[derive(Debug, Clone, Copy, Hash)] 80 pub struct X86_64MMArch; 81 82 /// XD标志位是否被保留 83 static XD_RESERVED: AtomicBool = AtomicBool::new(false); 84 85 impl MemoryManagementArch for X86_64MMArch { 86 /// 4K页 87 const PAGE_SHIFT: usize = 12; 88 89 /// 每个页表项占8字节,总共有512个页表项 90 const PAGE_ENTRY_SHIFT: usize = 9; 91 92 /// 四级页表(PML4T、PDPT、PDT、PT) 93 const PAGE_LEVELS: usize = 4; 94 95 /// 页表项的有效位的index。在x86_64中,页表项的第[0, 47]位表示地址和flag, 96 /// 第[48, 51]位表示保留。因此,有效位的index为52。 97 /// 请注意,第63位是XD位,表示是否允许执行。 98 const ENTRY_ADDRESS_SHIFT: usize = 52; 99 100 const ENTRY_FLAG_DEFAULT_PAGE: usize = Self::ENTRY_FLAG_PRESENT; 101 102 const ENTRY_FLAG_DEFAULT_TABLE: usize = Self::ENTRY_FLAG_PRESENT; 103 104 const ENTRY_FLAG_PRESENT: usize = 1 << 0; 105 106 const ENTRY_FLAG_READONLY: usize = 0; 107 108 const ENTRY_FLAG_READWRITE: usize = 1 << 1; 109 110 const ENTRY_FLAG_USER: usize = 1 << 2; 111 112 const ENTRY_FLAG_WRITE_THROUGH: usize = 1 << 3; 113 114 const ENTRY_FLAG_CACHE_DISABLE: usize = 1 << 4; 115 116 const ENTRY_FLAG_NO_EXEC: usize = 1 << 63; 117 /// x86_64不存在EXEC标志位,只有NO_EXEC(XD)标志位 118 const ENTRY_FLAG_EXEC: usize = 0; 119 120 /// 物理地址与虚拟地址的偏移量 121 /// 0xffff_8000_0000_0000 122 const PHYS_OFFSET: usize = Self::PAGE_NEGATIVE_MASK + (Self::PAGE_ADDRESS_SIZE >> 1); 123 124 const USER_END_VADDR: VirtAddr = VirtAddr::new(0x0000_7eff_ffff_ffff); 125 const USER_BRK_START: VirtAddr = VirtAddr::new(0x700000000000); 126 const USER_STACK_START: VirtAddr = VirtAddr::new(0x6ffff0a00000); 127 128 /// @brief 获取物理内存区域 129 unsafe fn init() -> &'static [crate::mm::PhysMemoryArea] { 130 extern "C" { 131 fn _text(); 132 fn _etext(); 133 fn _edata(); 134 fn _erodata(); 135 fn _end(); 136 } 137 138 Self::init_xd_rsvd(); 139 140 let bootstrap_info = X86_64MMBootstrapInfo { 141 kernel_code_start: _text as usize, 142 kernel_code_end: _etext as usize, 143 kernel_data_end: _edata as usize, 144 kernel_rodata_end: _erodata as usize, 145 start_brk: _end as usize, 146 }; 147 unsafe { 148 BOOTSTRAP_MM_INFO = Some(bootstrap_info); 149 } 150 151 // 初始化物理内存区域(从multiboot2中获取) 152 let areas_count = 153 Self::init_memory_area_from_multiboot2().expect("init memory area failed"); 154 send_to_default_serial8250_port("x86 64 init end\n\0".as_bytes()); 155 156 return &PHYS_MEMORY_AREAS[0..areas_count]; 157 } 158 159 /// @brief 刷新TLB中,关于指定虚拟地址的条目 160 unsafe fn invalidate_page(address: VirtAddr) { 161 compiler_fence(Ordering::SeqCst); 162 asm!("invlpg [{0}]", in(reg) address.data(), options(nostack, preserves_flags)); 163 compiler_fence(Ordering::SeqCst); 164 } 165 166 /// @brief 刷新TLB中,所有的条目 167 unsafe fn invalidate_all() { 168 compiler_fence(Ordering::SeqCst); 169 // 通过设置cr3寄存器,来刷新整个TLB 170 Self::set_table(PageTableKind::User, Self::table(PageTableKind::User)); 171 compiler_fence(Ordering::SeqCst); 172 } 173 174 /// @brief 获取顶级页表的物理地址 175 unsafe fn table(table_kind: PageTableKind) -> PhysAddr { 176 match table_kind { 177 PageTableKind::Kernel | PageTableKind::User => { 178 let paddr: usize; 179 compiler_fence(Ordering::SeqCst); 180 asm!("mov {}, cr3", out(reg) paddr, options(nomem, nostack, preserves_flags)); 181 compiler_fence(Ordering::SeqCst); 182 return PhysAddr::new(paddr); 183 } 184 PageTableKind::EPT => { 185 let eptp = 186 vmx_vmread(VmcsFields::CTRL_EPTP_PTR as u32).expect("Failed to read eptp"); 187 return PhysAddr::new(eptp as usize); 188 } 189 } 190 } 191 192 /// @brief 设置顶级页表的物理地址到处理器中 193 unsafe fn set_table(_table_kind: PageTableKind, table: PhysAddr) { 194 compiler_fence(Ordering::SeqCst); 195 asm!("mov cr3, {}", in(reg) table.data(), options(nostack, preserves_flags)); 196 compiler_fence(Ordering::SeqCst); 197 } 198 199 /// @brief 判断虚拟地址是否合法 200 fn virt_is_valid(virt: VirtAddr) -> bool { 201 return virt.is_canonical(); 202 } 203 204 /// 获取内存管理初始化时,创建的第一个内核页表的地址 205 fn initial_page_table() -> PhysAddr { 206 unsafe { 207 return INITIAL_CR3_VALUE; 208 } 209 } 210 211 /// @brief 创建新的顶层页表 212 /// 213 /// 该函数会创建页表并复制内核的映射到新的页表中 214 /// 215 /// @return 新的页表 216 fn setup_new_usermapper() -> Result<crate::mm::ucontext::UserMapper, SystemError> { 217 let new_umapper: crate::mm::page::PageMapper<X86_64MMArch, LockedFrameAllocator> = unsafe { 218 PageMapper::create(PageTableKind::User, LockedFrameAllocator) 219 .ok_or(SystemError::ENOMEM)? 220 }; 221 222 let current_ktable: KernelMapper = KernelMapper::lock(); 223 let copy_mapping = |pml4_entry_no| unsafe { 224 let entry: PageEntry<X86_64MMArch> = current_ktable 225 .table() 226 .entry(pml4_entry_no) 227 .unwrap_or_else(|| panic!("entry {} not found", pml4_entry_no)); 228 new_umapper.table().set_entry(pml4_entry_no, entry) 229 }; 230 231 // 复制内核的映射 232 for pml4_entry_no in KERNEL_PML4E_NO..512 { 233 copy_mapping(pml4_entry_no); 234 } 235 236 return Ok(crate::mm::ucontext::UserMapper::new(new_umapper)); 237 } 238 } 239 240 impl X86_64MMArch { 241 unsafe fn init_memory_area_from_multiboot2() -> Result<usize, SystemError> { 242 // 这个数组用来存放内存区域的信息(从C获取) 243 let mut mb2_mem_info: [multiboot_mmap_entry_t; 512] = mem::zeroed(); 244 send_to_default_serial8250_port("init_memory_area_from_multiboot2 begin\n\0".as_bytes()); 245 246 let mut mb2_count: u32 = 0; 247 multiboot2_iter( 248 Some(multiboot2_get_memory), 249 &mut mb2_mem_info as *mut [multiboot_mmap_entry_t; 512] as usize as *mut c_void, 250 &mut mb2_count, 251 ); 252 send_to_default_serial8250_port("init_memory_area_from_multiboot2 2\n\0".as_bytes()); 253 254 let mb2_count = mb2_count as usize; 255 let mut areas_count = 0usize; 256 let mut total_mem_size = 0usize; 257 for i in 0..mb2_count { 258 // Only use the memory area if its type is 1 (RAM) 259 if mb2_mem_info[i].type_ == 1 { 260 // Skip the memory area if its len is 0 261 if mb2_mem_info[i].len == 0 { 262 continue; 263 } 264 total_mem_size += mb2_mem_info[i].len as usize; 265 PHYS_MEMORY_AREAS[areas_count].base = PhysAddr::new(mb2_mem_info[i].addr as usize); 266 PHYS_MEMORY_AREAS[areas_count].size = mb2_mem_info[i].len as usize; 267 areas_count += 1; 268 } 269 } 270 send_to_default_serial8250_port("init_memory_area_from_multiboot2 end\n\0".as_bytes()); 271 kinfo!("Total memory size: {} MB, total areas from multiboot2: {mb2_count}, valid areas: {areas_count}", total_mem_size / 1024 / 1024); 272 273 return Ok(areas_count); 274 } 275 276 fn init_xd_rsvd() { 277 // 读取ia32-EFER寄存器的值 278 let efer: EferFlags = x86_64::registers::model_specific::Efer::read(); 279 if !efer.contains(EferFlags::NO_EXECUTE_ENABLE) { 280 // NO_EXECUTE_ENABLE是false,那么就设置xd_reserved为true 281 kdebug!("NO_EXECUTE_ENABLE is false, set XD_RESERVED to true"); 282 XD_RESERVED.store(true, Ordering::Relaxed); 283 } 284 compiler_fence(Ordering::SeqCst); 285 } 286 287 /// 判断XD标志位是否被保留 288 pub fn is_xd_reserved() -> bool { 289 return XD_RESERVED.load(Ordering::Relaxed); 290 } 291 } 292 293 impl VirtAddr { 294 /// @brief 判断虚拟地址是否合法 295 #[inline(always)] 296 pub fn is_canonical(self) -> bool { 297 let x = self.data() & X86_64MMArch::PHYS_OFFSET; 298 // 如果x为0,说明虚拟地址的高位为0,是合法的用户地址 299 // 如果x为PHYS_OFFSET,说明虚拟地址的高位全为1,是合法的内核地址 300 return x == 0 || x == X86_64MMArch::PHYS_OFFSET; 301 } 302 } 303 304 /// @brief 初始化内存管理模块 305 pub fn mm_init() { 306 send_to_default_serial8250_port("mm_init\n\0".as_bytes()); 307 PrintkWriter 308 .write_fmt(format_args!("mm_init() called\n")) 309 .unwrap(); 310 // printk_color!(GREEN, BLACK, "mm_init() called\n"); 311 static _CALL_ONCE: AtomicBool = AtomicBool::new(false); 312 if _CALL_ONCE 313 .compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst) 314 .is_err() 315 { 316 send_to_default_serial8250_port("mm_init err\n\0".as_bytes()); 317 panic!("mm_init() can only be called once"); 318 } 319 320 unsafe { X86_64MMArch::init() }; 321 kdebug!("bootstrap info: {:?}", unsafe { BOOTSTRAP_MM_INFO }); 322 kdebug!("phys[0]=virt[0x{:x}]", unsafe { 323 MMArch::phys_2_virt(PhysAddr::new(0)).unwrap().data() 324 }); 325 326 // 初始化内存管理器 327 unsafe { allocator_init() }; 328 // enable mmio 329 mmio_init(); 330 } 331 332 unsafe fn allocator_init() { 333 let virt_offset = BOOTSTRAP_MM_INFO.unwrap().start_brk; 334 let phy_offset = 335 unsafe { MMArch::virt_2_phys(VirtAddr::new(page_align_up(virt_offset))) }.unwrap(); 336 337 kdebug!("PhysArea[0..10] = {:?}", &PHYS_MEMORY_AREAS[0..10]); 338 let mut bump_allocator = 339 BumpAllocator::<X86_64MMArch>::new(&PHYS_MEMORY_AREAS, phy_offset.data()); 340 kdebug!( 341 "BumpAllocator created, offset={:?}", 342 bump_allocator.offset() 343 ); 344 345 // 暂存初始在head.S中指定的页表的地址,后面再考虑是否需要把它加到buddy的可用空间里面! 346 // 现在不加的原因是,我担心会有安全漏洞问题:这些初始的页表,位于内核的数据段。如果归还到buddy, 347 // 可能会产生一定的安全风险(有的代码可能根据虚拟地址来进行安全校验) 348 let _old_page_table = MMArch::table(PageTableKind::Kernel); 349 350 let new_page_table: PhysAddr; 351 // 使用bump分配器,把所有的内存页都映射到页表 352 { 353 // 用bump allocator创建新的页表 354 let mut mapper: crate::mm::page::PageMapper<MMArch, &mut BumpAllocator<MMArch>> = 355 crate::mm::page::PageMapper::<MMArch, _>::create( 356 PageTableKind::Kernel, 357 &mut bump_allocator, 358 ) 359 .expect("Failed to create page mapper"); 360 new_page_table = mapper.table().phys(); 361 kdebug!("PageMapper created"); 362 363 // 取消最开始时候,在head.S中指定的映射(暂时不刷新TLB) 364 { 365 let table = mapper.table(); 366 let empty_entry = PageEntry::<MMArch>::new(0); 367 for i in 0..MMArch::PAGE_ENTRY_NUM { 368 table 369 .set_entry(i, empty_entry) 370 .expect("Failed to empty page table entry"); 371 } 372 } 373 kdebug!("Successfully emptied page table"); 374 375 for area in PHYS_MEMORY_AREAS.iter() { 376 // kdebug!("area: base={:?}, size={:#x}, end={:?}", area.base, area.size, area.base + area.size); 377 for i in 0..((area.size + MMArch::PAGE_SIZE - 1) / MMArch::PAGE_SIZE) { 378 let paddr = area.base.add(i * MMArch::PAGE_SIZE); 379 let vaddr = unsafe { MMArch::phys_2_virt(paddr) }.unwrap(); 380 let flags = kernel_page_flags::<MMArch>(vaddr); 381 382 let flusher = mapper 383 .map_phys(vaddr, paddr, flags) 384 .expect("Failed to map frame"); 385 // 暂时不刷新TLB 386 flusher.ignore(); 387 } 388 } 389 390 // 添加低地址的映射(在smp完成初始化之前,需要使用低地址的映射.初始化之后需要取消这一段映射) 391 LowAddressRemapping::remap_at_low_address(&mut mapper); 392 } 393 394 unsafe { 395 INITIAL_CR3_VALUE = new_page_table; 396 } 397 kdebug!( 398 "After mapping all physical memory, DragonOS used: {} KB", 399 bump_allocator.offset() / 1024 400 ); 401 402 // 初始化buddy_allocator 403 let buddy_allocator = unsafe { BuddyAllocator::<X86_64MMArch>::new(bump_allocator).unwrap() }; 404 // 设置全局的页帧分配器 405 unsafe { set_inner_allocator(buddy_allocator) }; 406 kinfo!("Successfully initialized buddy allocator"); 407 // 关闭显示输出 408 scm_disable_put_to_window(); 409 410 // make the new page table current 411 { 412 let mut binding = INNER_ALLOCATOR.lock(); 413 let mut allocator_guard = binding.as_mut().unwrap(); 414 kdebug!("To enable new page table."); 415 compiler_fence(Ordering::SeqCst); 416 let mapper = crate::mm::page::PageMapper::<MMArch, _>::new( 417 PageTableKind::Kernel, 418 new_page_table, 419 &mut allocator_guard, 420 ); 421 compiler_fence(Ordering::SeqCst); 422 mapper.make_current(); 423 compiler_fence(Ordering::SeqCst); 424 kdebug!("New page table enabled"); 425 } 426 kdebug!("Successfully enabled new page table"); 427 } 428 429 #[no_mangle] 430 pub extern "C" fn rs_test_buddy() { 431 test_buddy(); 432 } 433 pub fn test_buddy() { 434 // 申请内存然后写入数据然后free掉 435 // 总共申请200MB内存 436 const TOTAL_SIZE: usize = 200 * 1024 * 1024; 437 438 for i in 0..10 { 439 kdebug!("Test buddy, round: {i}"); 440 // 存放申请的内存块 441 let mut v: Vec<(PhysAddr, PageFrameCount)> = Vec::with_capacity(60 * 1024); 442 // 存放已经申请的内存块的地址(用于检查重复) 443 let mut addr_set: HashSet<PhysAddr> = HashSet::new(); 444 445 let mut allocated = 0usize; 446 447 let mut free_count = 0usize; 448 449 while allocated < TOTAL_SIZE { 450 let mut random_size = 0u64; 451 unsafe { x86::random::rdrand64(&mut random_size) }; 452 // 一次最多申请4M 453 random_size = random_size % (1024 * 4096); 454 if random_size == 0 { 455 continue; 456 } 457 let random_size = 458 core::cmp::min(page_align_up(random_size as usize), TOTAL_SIZE - allocated); 459 let random_size = PageFrameCount::from_bytes(random_size.next_power_of_two()).unwrap(); 460 // 获取帧 461 let (paddr, allocated_frame_count) = 462 unsafe { LockedFrameAllocator.allocate(random_size).unwrap() }; 463 assert!(allocated_frame_count.data().is_power_of_two()); 464 assert!(paddr.data() % MMArch::PAGE_SIZE == 0); 465 unsafe { 466 assert!(MMArch::phys_2_virt(paddr) 467 .as_ref() 468 .unwrap() 469 .check_aligned(allocated_frame_count.data() * MMArch::PAGE_SIZE)); 470 } 471 allocated += allocated_frame_count.data() * MMArch::PAGE_SIZE; 472 v.push((paddr, allocated_frame_count)); 473 assert!(addr_set.insert(paddr), "duplicate address: {:?}", paddr); 474 475 // 写入数据 476 let vaddr = unsafe { MMArch::phys_2_virt(paddr).unwrap() }; 477 let slice = unsafe { 478 core::slice::from_raw_parts_mut( 479 vaddr.data() as *mut u8, 480 allocated_frame_count.data() * MMArch::PAGE_SIZE, 481 ) 482 }; 483 for i in 0..slice.len() { 484 slice[i] = ((i + unsafe { rdtsc() } as usize) % 256) as u8; 485 } 486 487 // 随机释放一个内存块 488 if v.len() > 0 { 489 let mut random_index = 0u64; 490 unsafe { x86::random::rdrand64(&mut random_index) }; 491 // 70%概率释放 492 if random_index % 10 > 7 { 493 continue; 494 } 495 random_index = random_index % v.len() as u64; 496 let random_index = random_index as usize; 497 let (paddr, allocated_frame_count) = v.remove(random_index); 498 assert!(addr_set.remove(&paddr)); 499 unsafe { LockedFrameAllocator.free(paddr, allocated_frame_count) }; 500 free_count += allocated_frame_count.data() * MMArch::PAGE_SIZE; 501 } 502 } 503 504 kdebug!( 505 "Allocated {} MB memory, release: {} MB, no release: {} bytes", 506 allocated / 1024 / 1024, 507 free_count / 1024 / 1024, 508 (allocated - free_count) 509 ); 510 511 kdebug!("Now, to release buddy memory"); 512 // 释放所有的内存 513 for (paddr, allocated_frame_count) in v { 514 unsafe { LockedFrameAllocator.free(paddr, allocated_frame_count) }; 515 assert!(addr_set.remove(&paddr)); 516 free_count += allocated_frame_count.data() * MMArch::PAGE_SIZE; 517 } 518 519 kdebug!("release done!, allocated: {allocated}, free_count: {free_count}"); 520 } 521 } 522 /// 全局的页帧分配器 523 #[derive(Debug, Clone, Copy, Hash)] 524 pub struct LockedFrameAllocator; 525 526 impl FrameAllocator for LockedFrameAllocator { 527 unsafe fn allocate(&mut self, count: PageFrameCount) -> Option<(PhysAddr, PageFrameCount)> { 528 if let Some(ref mut allocator) = *INNER_ALLOCATOR.lock_irqsave() { 529 return allocator.allocate(count); 530 } else { 531 return None; 532 } 533 } 534 535 unsafe fn free(&mut self, address: crate::mm::PhysAddr, count: PageFrameCount) { 536 assert!(count.data().is_power_of_two()); 537 if let Some(ref mut allocator) = *INNER_ALLOCATOR.lock_irqsave() { 538 return allocator.free(address, count); 539 } 540 } 541 542 unsafe fn usage(&self) -> PageFrameUsage { 543 if let Some(ref mut allocator) = *INNER_ALLOCATOR.lock_irqsave() { 544 return allocator.usage(); 545 } else { 546 panic!("usage error"); 547 } 548 } 549 } 550 551 impl LockedFrameAllocator { 552 pub fn get_usage(&self) -> PageFrameUsage { 553 unsafe { self.usage() } 554 } 555 } 556 557 /// 获取内核地址默认的页面标志 558 pub unsafe fn kernel_page_flags<A: MemoryManagementArch>(virt: VirtAddr) -> PageFlags<A> { 559 let info: X86_64MMBootstrapInfo = BOOTSTRAP_MM_INFO.clone().unwrap(); 560 561 if virt.data() >= info.kernel_code_start && virt.data() < info.kernel_code_end { 562 // Remap kernel code execute 563 return PageFlags::new().set_execute(true).set_write(true); 564 } else if virt.data() >= info.kernel_data_end && virt.data() < info.kernel_rodata_end { 565 // Remap kernel rodata read only 566 return PageFlags::new().set_execute(true); 567 } else { 568 return PageFlags::new().set_write(true).set_execute(true); 569 } 570 } 571 572 unsafe fn set_inner_allocator(allocator: BuddyAllocator<MMArch>) { 573 static FLAG: AtomicBool = AtomicBool::new(false); 574 if FLAG 575 .compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst) 576 .is_err() 577 { 578 panic!("Cannot set inner allocator twice!"); 579 } 580 *INNER_ALLOCATOR.lock() = Some(allocator); 581 } 582 583 /// 低地址重映射的管理器 584 /// 585 /// 低地址重映射的管理器,在smp初始化完成之前,需要使用低地址的映射,因此需要在smp初始化完成之后,取消这一段映射 586 pub struct LowAddressRemapping; 587 588 impl LowAddressRemapping { 589 // 映射32M 590 const REMAP_SIZE: usize = 32 * 1024 * 1024; 591 592 pub unsafe fn remap_at_low_address( 593 mapper: &mut crate::mm::page::PageMapper<MMArch, &mut BumpAllocator<MMArch>>, 594 ) { 595 for i in 0..(Self::REMAP_SIZE / MMArch::PAGE_SIZE) { 596 let paddr = PhysAddr::new(i * MMArch::PAGE_SIZE); 597 let vaddr = VirtAddr::new(i * MMArch::PAGE_SIZE); 598 let flags = kernel_page_flags::<MMArch>(vaddr); 599 600 let flusher = mapper 601 .map_phys(vaddr, paddr, flags) 602 .expect("Failed to map frame"); 603 // 暂时不刷新TLB 604 flusher.ignore(); 605 } 606 } 607 608 /// 取消低地址的映射 609 pub unsafe fn unmap_at_low_address(flush: bool) { 610 let mut mapper = KernelMapper::lock(); 611 assert!(mapper.as_mut().is_some()); 612 for i in 0..(Self::REMAP_SIZE / MMArch::PAGE_SIZE) { 613 let vaddr = VirtAddr::new(i * MMArch::PAGE_SIZE); 614 let (_, _, flusher) = mapper 615 .as_mut() 616 .unwrap() 617 .unmap_phys(vaddr, true) 618 .expect("Failed to unmap frame"); 619 if flush == false { 620 flusher.ignore(); 621 } 622 } 623 } 624 } 625 #[no_mangle] 626 pub extern "C" fn rs_mm_init() { 627 mm_init(); 628 } 629