xref: /DragonOS/kernel/src/arch/x86_64/mm/mod.rs (revision 886ce28516f9e3e5940840d1ae64ec3e9c8875fa)
1 pub mod barrier;
2 pub mod bump;
3 pub mod fault;
4 pub mod pkru;
5 
6 use alloc::sync::Arc;
7 use alloc::vec::Vec;
8 use hashbrown::HashSet;
9 use log::{debug, info};
10 use x86::time::rdtsc;
11 use x86_64::registers::model_specific::EferFlags;
12 
13 use crate::driver::serial::serial8250::send_to_default_serial8250_port;
14 
15 use crate::init::boot::boot_callbacks;
16 use crate::libs::align::page_align_up;
17 use crate::libs::lib_ui::screen_manager::scm_disable_put_to_window;
18 use crate::libs::spinlock::SpinLock;
19 
20 use crate::mm::allocator::page_frame::{FrameAllocator, PageFrameCount, PageFrameUsage};
21 use crate::mm::memblock::mem_block_manager;
22 use crate::mm::ucontext::LockedVMA;
23 use crate::{
24     arch::MMArch,
25     mm::allocator::{buddy::BuddyAllocator, bump::BumpAllocator},
26 };
27 
28 use crate::mm::kernel_mapper::KernelMapper;
29 use crate::mm::page::{EntryFlags, PageEntry, PAGE_1G_SHIFT};
30 use crate::mm::{MemoryManagementArch, PageTableKind, PhysAddr, VirtAddr, VmFlags};
31 
32 use system_error::SystemError;
33 
34 use core::arch::asm;
35 use core::fmt::Debug;
36 
37 use core::sync::atomic::{compiler_fence, AtomicBool, Ordering};
38 
39 use super::kvm::vmx::vmcs::VmcsFields;
40 use super::kvm::vmx::vmx_asm_wrapper::vmx_vmread;
41 
42 pub type PageMapper =
43     crate::mm::page::PageMapper<crate::arch::x86_64::mm::X86_64MMArch, LockedFrameAllocator>;
44 
45 /// 初始的CR3寄存器的值,用于内存管理初始化时,创建的第一个内核页表的位置
46 static mut INITIAL_CR3_VALUE: PhysAddr = PhysAddr::new(0);
47 
48 static INNER_ALLOCATOR: SpinLock<Option<BuddyAllocator<MMArch>>> = SpinLock::new(None);
49 
50 #[derive(Clone, Copy, Debug)]
51 pub struct X86_64MMBootstrapInfo {
52     kernel_load_base_paddr: usize,
53     kernel_code_start: usize,
54     kernel_code_end: usize,
55     kernel_data_end: usize,
56     kernel_rodata_end: usize,
57     start_brk: usize,
58 }
59 
60 pub(super) static mut BOOTSTRAP_MM_INFO: Option<X86_64MMBootstrapInfo> = None;
61 
62 pub(super) fn x86_64_set_kernel_load_base_paddr(paddr: PhysAddr) {
63     unsafe {
64         BOOTSTRAP_MM_INFO.as_mut().unwrap().kernel_load_base_paddr = paddr.data();
65     }
66 }
67 
68 /// @brief X86_64的内存管理架构结构体
69 #[derive(Debug, Clone, Copy, Hash)]
70 pub struct X86_64MMArch;
71 
72 /// XD标志位是否被保留
73 static XD_RESERVED: AtomicBool = AtomicBool::new(false);
74 
75 impl MemoryManagementArch for X86_64MMArch {
76     /// X86目前支持缺页中断
77     const PAGE_FAULT_ENABLED: bool = true;
78     /// 4K页
79     const PAGE_SHIFT: usize = 12;
80 
81     /// 每个页表项占8字节,总共有512个页表项
82     const PAGE_ENTRY_SHIFT: usize = 9;
83 
84     /// 四级页表(PML4T、PDPT、PDT、PT)
85     const PAGE_LEVELS: usize = 4;
86 
87     /// 页表项的有效位的index。在x86_64中,页表项的第[0, 47]位表示地址和flag,
88     /// 第[48, 51]位表示保留。因此,有效位的index为52。
89     /// 请注意,第63位是XD位,表示是否允许执行。
90     const ENTRY_ADDRESS_SHIFT: usize = 52;
91 
92     const ENTRY_FLAG_DEFAULT_PAGE: usize = Self::ENTRY_FLAG_PRESENT;
93 
94     const ENTRY_FLAG_DEFAULT_TABLE: usize = Self::ENTRY_FLAG_PRESENT;
95 
96     const ENTRY_FLAG_PRESENT: usize = 1 << 0;
97 
98     const ENTRY_FLAG_READONLY: usize = 0;
99 
100     const ENTRY_FLAG_WRITEABLE: usize = 1 << 1;
101     const ENTRY_FLAG_READWRITE: usize = 1 << 1;
102 
103     const ENTRY_FLAG_USER: usize = 1 << 2;
104 
105     const ENTRY_FLAG_WRITE_THROUGH: usize = 1 << 3;
106 
107     const ENTRY_FLAG_CACHE_DISABLE: usize = 1 << 4;
108 
109     const ENTRY_FLAG_NO_EXEC: usize = 1 << 63;
110     /// x86_64不存在EXEC标志位,只有NO_EXEC(XD)标志位
111     const ENTRY_FLAG_EXEC: usize = 0;
112 
113     const ENTRY_FLAG_ACCESSED: usize = 1 << 5;
114     const ENTRY_FLAG_DIRTY: usize = 1 << 6;
115     const ENTRY_FLAG_HUGE_PAGE: usize = 1 << 7;
116     const ENTRY_FLAG_GLOBAL: usize = 1 << 8;
117 
118     /// 物理地址与虚拟地址的偏移量
119     /// 0xffff_8000_0000_0000
120     const PHYS_OFFSET: usize = Self::PAGE_NEGATIVE_MASK + (Self::PAGE_ADDRESS_SIZE >> 1);
121     const KERNEL_LINK_OFFSET: usize = 0x100000;
122 
123     // 参考 https://code.dragonos.org.cn/xref/linux-6.1.9/arch/x86/include/asm/page_64_types.h#75
124     const USER_END_VADDR: VirtAddr =
125         VirtAddr::new((Self::PAGE_ADDRESS_SIZE >> 1) - Self::PAGE_SIZE);
126     const USER_BRK_START: VirtAddr = VirtAddr::new(0x700000000000);
127     const USER_STACK_START: VirtAddr = VirtAddr::new(0x6ffff0a00000);
128 
129     const FIXMAP_START_VADDR: VirtAddr = VirtAddr::new(0xffffb00000000000);
130     /// 设置FIXMAP区域大小为16M
131     const FIXMAP_SIZE: usize = 256 * 4096 * 16;
132 
133     const MMIO_BASE: VirtAddr = VirtAddr::new(0xffffa10000000000);
134     const MMIO_SIZE: usize = 1 << PAGE_1G_SHIFT;
135 
136     /// @brief 获取物理内存区域
137     unsafe fn init() {
138         extern "C" {
139             fn _text();
140             fn _etext();
141             fn _edata();
142             fn _erodata();
143             fn _end();
144         }
145 
146         Self::init_xd_rsvd();
147 
148         let bootstrap_info = X86_64MMBootstrapInfo {
149             kernel_load_base_paddr: 0,
150             kernel_code_start: _text as usize,
151             kernel_code_end: _etext as usize,
152             kernel_data_end: _edata as usize,
153             kernel_rodata_end: _erodata as usize,
154             start_brk: _end as usize,
155         };
156 
157         unsafe {
158             BOOTSTRAP_MM_INFO = Some(bootstrap_info);
159         }
160 
161         // 初始化物理内存区域
162         boot_callbacks()
163             .early_init_memory_blocks()
164             .expect("init memory area failed");
165 
166         debug!("bootstrap info: {:?}", unsafe { BOOTSTRAP_MM_INFO });
167         debug!("phys[0]=virt[0x{:x}]", unsafe {
168             MMArch::phys_2_virt(PhysAddr::new(0)).unwrap().data()
169         });
170 
171         // 初始化内存管理器
172         unsafe { allocator_init() };
173 
174         send_to_default_serial8250_port("x86 64 mm init done\n\0".as_bytes());
175     }
176 
177     /// @brief 刷新TLB中,关于指定虚拟地址的条目
178     unsafe fn invalidate_page(address: VirtAddr) {
179         compiler_fence(Ordering::SeqCst);
180         asm!("invlpg [{0}]", in(reg) address.data(), options(nostack, preserves_flags));
181         compiler_fence(Ordering::SeqCst);
182     }
183 
184     /// @brief 刷新TLB中,所有的条目
185     unsafe fn invalidate_all() {
186         compiler_fence(Ordering::SeqCst);
187         // 通过设置cr3寄存器,来刷新整个TLB
188         Self::set_table(PageTableKind::User, Self::table(PageTableKind::User));
189         compiler_fence(Ordering::SeqCst);
190     }
191 
192     /// @brief 获取顶级页表的物理地址
193     unsafe fn table(table_kind: PageTableKind) -> PhysAddr {
194         match table_kind {
195             PageTableKind::Kernel | PageTableKind::User => {
196                 compiler_fence(Ordering::SeqCst);
197                 let cr3 = x86::controlregs::cr3() as usize;
198                 compiler_fence(Ordering::SeqCst);
199                 return PhysAddr::new(cr3);
200             }
201             PageTableKind::EPT => {
202                 let eptp =
203                     vmx_vmread(VmcsFields::CTRL_EPTP_PTR as u32).expect("Failed to read eptp");
204                 return PhysAddr::new(eptp as usize);
205             }
206         }
207     }
208 
209     /// @brief 设置顶级页表的物理地址到处理器中
210     unsafe fn set_table(_table_kind: PageTableKind, table: PhysAddr) {
211         compiler_fence(Ordering::SeqCst);
212         asm!("mov cr3, {}", in(reg) table.data(), options(nostack, preserves_flags));
213         compiler_fence(Ordering::SeqCst);
214     }
215 
216     /// @brief 判断虚拟地址是否合法
217     fn virt_is_valid(virt: VirtAddr) -> bool {
218         return virt.is_canonical();
219     }
220 
221     /// 获取内存管理初始化时,创建的第一个内核页表的地址
222     fn initial_page_table() -> PhysAddr {
223         unsafe {
224             return INITIAL_CR3_VALUE;
225         }
226     }
227 
228     /// @brief 创建新的顶层页表
229     ///
230     /// 该函数会创建页表并复制内核的映射到新的页表中
231     ///
232     /// @return 新的页表
233     fn setup_new_usermapper() -> Result<crate::mm::ucontext::UserMapper, SystemError> {
234         let new_umapper: crate::mm::page::PageMapper<X86_64MMArch, LockedFrameAllocator> = unsafe {
235             PageMapper::create(PageTableKind::User, LockedFrameAllocator)
236                 .ok_or(SystemError::ENOMEM)?
237         };
238 
239         let current_ktable: KernelMapper = KernelMapper::lock();
240         let copy_mapping = |pml4_entry_no| unsafe {
241             let entry: PageEntry<X86_64MMArch> = current_ktable
242                 .table()
243                 .entry(pml4_entry_no)
244                 .unwrap_or_else(|| panic!("entry {} not found", pml4_entry_no));
245             new_umapper.table().set_entry(pml4_entry_no, entry)
246         };
247 
248         // 复制内核的映射
249         for pml4_entry_no in MMArch::PAGE_KERNEL_INDEX..MMArch::PAGE_ENTRY_NUM {
250             copy_mapping(pml4_entry_no);
251         }
252 
253         return Ok(crate::mm::ucontext::UserMapper::new(new_umapper));
254     }
255 
256     const PAGE_SIZE: usize = 1 << Self::PAGE_SHIFT;
257 
258     const PAGE_OFFSET_MASK: usize = Self::PAGE_SIZE - 1;
259 
260     const PAGE_MASK: usize = !(Self::PAGE_OFFSET_MASK);
261 
262     const PAGE_ADDRESS_SHIFT: usize = Self::PAGE_LEVELS * Self::PAGE_ENTRY_SHIFT + Self::PAGE_SHIFT;
263 
264     const PAGE_ADDRESS_SIZE: usize = 1 << Self::PAGE_ADDRESS_SHIFT;
265 
266     const PAGE_ADDRESS_MASK: usize = Self::PAGE_ADDRESS_SIZE - Self::PAGE_SIZE;
267 
268     const PAGE_ENTRY_SIZE: usize = 1 << (Self::PAGE_SHIFT - Self::PAGE_ENTRY_SHIFT);
269 
270     const PAGE_ENTRY_NUM: usize = 1 << Self::PAGE_ENTRY_SHIFT;
271 
272     const PAGE_ENTRY_MASK: usize = Self::PAGE_ENTRY_NUM - 1;
273 
274     const PAGE_KERNEL_INDEX: usize = (Self::PHYS_OFFSET & Self::PAGE_ADDRESS_MASK)
275         >> (Self::PAGE_ADDRESS_SHIFT - Self::PAGE_ENTRY_SHIFT);
276 
277     const PAGE_NEGATIVE_MASK: usize = !((Self::PAGE_ADDRESS_SIZE) - 1);
278 
279     const ENTRY_ADDRESS_SIZE: usize = 1 << Self::ENTRY_ADDRESS_SHIFT;
280 
281     const ENTRY_ADDRESS_MASK: usize = Self::ENTRY_ADDRESS_SIZE - Self::PAGE_SIZE;
282 
283     const ENTRY_FLAGS_MASK: usize = !Self::ENTRY_ADDRESS_MASK;
284 
285     unsafe fn read<T>(address: VirtAddr) -> T {
286         return core::ptr::read(address.data() as *const T);
287     }
288 
289     unsafe fn write<T>(address: VirtAddr, value: T) {
290         core::ptr::write(address.data() as *mut T, value);
291     }
292 
293     unsafe fn write_bytes(address: VirtAddr, value: u8, count: usize) {
294         core::ptr::write_bytes(address.data() as *mut u8, value, count);
295     }
296 
297     unsafe fn phys_2_virt(phys: PhysAddr) -> Option<VirtAddr> {
298         if let Some(vaddr) = phys.data().checked_add(Self::PHYS_OFFSET) {
299             return Some(VirtAddr::new(vaddr));
300         } else {
301             return None;
302         }
303     }
304 
305     unsafe fn virt_2_phys(virt: VirtAddr) -> Option<PhysAddr> {
306         if let Some(paddr) = virt.data().checked_sub(Self::PHYS_OFFSET) {
307             return Some(PhysAddr::new(paddr));
308         } else {
309             return None;
310         }
311     }
312 
313     #[inline(always)]
314     fn make_entry(paddr: PhysAddr, page_flags: usize) -> usize {
315         return paddr.data() | page_flags;
316     }
317 
318     fn vma_access_permitted(
319         vma: Arc<LockedVMA>,
320         write: bool,
321         execute: bool,
322         foreign: bool,
323     ) -> bool {
324         if execute {
325             return true;
326         }
327         if foreign | vma.is_foreign() {
328             return true;
329         }
330         pkru::pkru_allows_pkey(pkru::vma_pkey(vma), write)
331     }
332 
333     const PROTECTION_MAP: [EntryFlags<MMArch>; 16] = protection_map();
334 
335     const PAGE_NONE: usize =
336         Self::ENTRY_FLAG_PRESENT | Self::ENTRY_FLAG_ACCESSED | Self::ENTRY_FLAG_GLOBAL;
337 
338     const PAGE_SHARED: usize = Self::ENTRY_FLAG_PRESENT
339         | Self::ENTRY_FLAG_READWRITE
340         | Self::ENTRY_FLAG_USER
341         | Self::ENTRY_FLAG_ACCESSED
342         | Self::ENTRY_FLAG_NO_EXEC;
343 
344     const PAGE_SHARED_EXEC: usize = Self::ENTRY_FLAG_PRESENT
345         | Self::ENTRY_FLAG_READWRITE
346         | Self::ENTRY_FLAG_USER
347         | Self::ENTRY_FLAG_ACCESSED;
348 
349     const PAGE_COPY_NOEXEC: usize = Self::ENTRY_FLAG_PRESENT
350         | Self::ENTRY_FLAG_USER
351         | Self::ENTRY_FLAG_ACCESSED
352         | Self::ENTRY_FLAG_NO_EXEC;
353 
354     const PAGE_COPY_EXEC: usize =
355         Self::ENTRY_FLAG_PRESENT | Self::ENTRY_FLAG_USER | Self::ENTRY_FLAG_ACCESSED;
356 
357     const PAGE_COPY: usize = Self::ENTRY_FLAG_PRESENT
358         | Self::ENTRY_FLAG_USER
359         | Self::ENTRY_FLAG_ACCESSED
360         | Self::ENTRY_FLAG_NO_EXEC;
361 
362     const PAGE_READONLY: usize = Self::ENTRY_FLAG_PRESENT
363         | Self::ENTRY_FLAG_USER
364         | Self::ENTRY_FLAG_ACCESSED
365         | Self::ENTRY_FLAG_NO_EXEC;
366 
367     const PAGE_READONLY_EXEC: usize =
368         Self::ENTRY_FLAG_PRESENT | Self::ENTRY_FLAG_USER | Self::ENTRY_FLAG_ACCESSED;
369 
370     const PAGE_READ: usize = 0;
371     const PAGE_READ_EXEC: usize = 0;
372     const PAGE_WRITE: usize = 0;
373     const PAGE_WRITE_EXEC: usize = 0;
374     const PAGE_EXEC: usize = 0;
375 }
376 
377 /// 获取保护标志的映射表
378 ///
379 ///
380 /// ## 返回值
381 /// - `[usize; 16]`: 长度为16的映射表
382 const fn protection_map() -> [EntryFlags<MMArch>; 16] {
383     let mut map = [unsafe { EntryFlags::from_data(0) }; 16];
384     unsafe {
385         map[VmFlags::VM_NONE.bits()] = EntryFlags::from_data(MMArch::PAGE_NONE);
386         map[VmFlags::VM_READ.bits()] = EntryFlags::from_data(MMArch::PAGE_READONLY);
387         map[VmFlags::VM_WRITE.bits()] = EntryFlags::from_data(MMArch::PAGE_COPY);
388         map[VmFlags::VM_WRITE.bits() | VmFlags::VM_READ.bits()] =
389             EntryFlags::from_data(MMArch::PAGE_COPY);
390         map[VmFlags::VM_EXEC.bits()] = EntryFlags::from_data(MMArch::PAGE_READONLY_EXEC);
391         map[VmFlags::VM_EXEC.bits() | VmFlags::VM_READ.bits()] =
392             EntryFlags::from_data(MMArch::PAGE_READONLY_EXEC);
393         map[VmFlags::VM_EXEC.bits() | VmFlags::VM_WRITE.bits()] =
394             EntryFlags::from_data(MMArch::PAGE_COPY_EXEC);
395         map[VmFlags::VM_EXEC.bits() | VmFlags::VM_WRITE.bits() | VmFlags::VM_READ.bits()] =
396             EntryFlags::from_data(MMArch::PAGE_COPY_EXEC);
397         map[VmFlags::VM_SHARED.bits()] = EntryFlags::from_data(MMArch::PAGE_NONE);
398         map[VmFlags::VM_SHARED.bits() | VmFlags::VM_READ.bits()] =
399             EntryFlags::from_data(MMArch::PAGE_READONLY);
400         map[VmFlags::VM_SHARED.bits() | VmFlags::VM_WRITE.bits()] =
401             EntryFlags::from_data(MMArch::PAGE_SHARED);
402         map[VmFlags::VM_SHARED.bits() | VmFlags::VM_WRITE.bits() | VmFlags::VM_READ.bits()] =
403             EntryFlags::from_data(MMArch::PAGE_SHARED);
404         map[VmFlags::VM_SHARED.bits() | VmFlags::VM_EXEC.bits()] =
405             EntryFlags::from_data(MMArch::PAGE_READONLY_EXEC);
406         map[VmFlags::VM_SHARED.bits() | VmFlags::VM_EXEC.bits() | VmFlags::VM_READ.bits()] =
407             EntryFlags::from_data(MMArch::PAGE_READONLY_EXEC);
408         map[VmFlags::VM_SHARED.bits() | VmFlags::VM_EXEC.bits() | VmFlags::VM_WRITE.bits()] =
409             EntryFlags::from_data(MMArch::PAGE_SHARED_EXEC);
410         map[VmFlags::VM_SHARED.bits()
411             | VmFlags::VM_EXEC.bits()
412             | VmFlags::VM_WRITE.bits()
413             | VmFlags::VM_READ.bits()] = EntryFlags::from_data(MMArch::PAGE_SHARED_EXEC);
414     }
415     // if X86_64MMArch::is_xd_reserved() {
416     //     map.iter_mut().for_each(|x| *x &= !Self::ENTRY_FLAG_NO_EXEC)
417     // }
418     map
419 }
420 
421 impl X86_64MMArch {
422     fn init_xd_rsvd() {
423         // 读取ia32-EFER寄存器的值
424         let efer: EferFlags = x86_64::registers::model_specific::Efer::read();
425         if !efer.contains(EferFlags::NO_EXECUTE_ENABLE) {
426             // NO_EXECUTE_ENABLE是false,那么就设置xd_reserved为true
427             debug!("NO_EXECUTE_ENABLE is false, set XD_RESERVED to true");
428             XD_RESERVED.store(true, Ordering::Relaxed);
429         }
430         compiler_fence(Ordering::SeqCst);
431     }
432 
433     /// 判断XD标志位是否被保留
434     pub fn is_xd_reserved() -> bool {
435         // return XD_RESERVED.load(Ordering::Relaxed);
436 
437         // 由于暂时不支持execute disable,因此直接返回true
438         // 不支持的原因是,目前好像没有能正确的设置page-level的xd位,会触发page fault
439         return true;
440     }
441 }
442 
443 impl VirtAddr {
444     /// @brief 判断虚拟地址是否合法
445     #[inline(always)]
446     pub fn is_canonical(self) -> bool {
447         let x = self.data() & X86_64MMArch::PHYS_OFFSET;
448         // 如果x为0,说明虚拟地址的高位为0,是合法的用户地址
449         // 如果x为PHYS_OFFSET,说明虚拟地址的高位全为1,是合法的内核地址
450         return x == 0 || x == X86_64MMArch::PHYS_OFFSET;
451     }
452 }
453 
454 unsafe fn allocator_init() {
455     let virt_offset = VirtAddr::new(page_align_up(BOOTSTRAP_MM_INFO.unwrap().start_brk));
456 
457     let phy_offset = unsafe { MMArch::virt_2_phys(virt_offset) }.unwrap();
458 
459     mem_block_manager()
460         .reserve_block(PhysAddr::new(0), phy_offset.data())
461         .expect("Failed to reserve block");
462     let mut bump_allocator = BumpAllocator::<X86_64MMArch>::new(phy_offset.data());
463     debug!(
464         "BumpAllocator created, offset={:?}",
465         bump_allocator.offset()
466     );
467 
468     // 暂存初始在head.S中指定的页表的地址,后面再考虑是否需要把它加到buddy的可用空间里面!
469     // 现在不加的原因是,我担心会有安全漏洞问题:这些初始的页表,位于内核的数据段。如果归还到buddy,
470     // 可能会产生一定的安全风险(有的代码可能根据虚拟地址来进行安全校验)
471     let _old_page_table = MMArch::table(PageTableKind::Kernel);
472 
473     let new_page_table: PhysAddr;
474     // 使用bump分配器,把所有的内存页都映射到页表
475     {
476         // 用bump allocator创建新的页表
477         let mut mapper: crate::mm::page::PageMapper<MMArch, &mut BumpAllocator<MMArch>> =
478             crate::mm::page::PageMapper::<MMArch, _>::create(
479                 PageTableKind::Kernel,
480                 &mut bump_allocator,
481             )
482             .expect("Failed to create page mapper");
483         new_page_table = mapper.table().phys();
484         debug!("PageMapper created");
485 
486         // 取消最开始时候,在head.S中指定的映射(暂时不刷新TLB)
487         {
488             let table = mapper.table();
489             let empty_entry = PageEntry::<MMArch>::from_usize(0);
490             for i in 0..MMArch::PAGE_ENTRY_NUM {
491                 table
492                     .set_entry(i, empty_entry)
493                     .expect("Failed to empty page table entry");
494             }
495         }
496         debug!("Successfully emptied page table");
497 
498         let total_num = mem_block_manager().total_initial_memory_regions();
499         for i in 0..total_num {
500             let area = mem_block_manager().get_initial_memory_region(i).unwrap();
501             // debug!("area: base={:?}, size={:#x}, end={:?}", area.base, area.size, area.base + area.size);
502             for i in 0..((area.size + MMArch::PAGE_SIZE - 1) / MMArch::PAGE_SIZE) {
503                 let paddr = area.base.add(i * MMArch::PAGE_SIZE);
504                 let vaddr = unsafe { MMArch::phys_2_virt(paddr) }.unwrap();
505                 let flags = kernel_page_flags::<MMArch>(vaddr);
506 
507                 let flusher = mapper
508                     .map_phys(vaddr, paddr, flags)
509                     .expect("Failed to map frame");
510                 // 暂时不刷新TLB
511                 flusher.ignore();
512             }
513         }
514     }
515 
516     unsafe {
517         INITIAL_CR3_VALUE = new_page_table;
518     }
519     debug!(
520         "After mapping all physical memory, DragonOS used: {} KB",
521         bump_allocator.offset() / 1024
522     );
523 
524     // 初始化buddy_allocator
525     let buddy_allocator = unsafe { BuddyAllocator::<X86_64MMArch>::new(bump_allocator).unwrap() };
526     // 设置全局的页帧分配器
527     unsafe { set_inner_allocator(buddy_allocator) };
528     info!("Successfully initialized buddy allocator");
529     // 关闭显示输出
530     scm_disable_put_to_window();
531 
532     // make the new page table current
533     {
534         let mut binding = INNER_ALLOCATOR.lock();
535         let mut allocator_guard = binding.as_mut().unwrap();
536         debug!("To enable new page table.");
537         compiler_fence(Ordering::SeqCst);
538         let mapper = crate::mm::page::PageMapper::<MMArch, _>::new(
539             PageTableKind::Kernel,
540             new_page_table,
541             &mut allocator_guard,
542         );
543         compiler_fence(Ordering::SeqCst);
544         mapper.make_current();
545         compiler_fence(Ordering::SeqCst);
546         debug!("New page table enabled");
547     }
548     debug!("Successfully enabled new page table");
549 }
550 
551 #[no_mangle]
552 pub extern "C" fn rs_test_buddy() {
553     test_buddy();
554 }
555 pub fn test_buddy() {
556     // 申请内存然后写入数据然后free掉
557     // 总共申请200MB内存
558     const TOTAL_SIZE: usize = 200 * 1024 * 1024;
559 
560     for i in 0..10 {
561         debug!("Test buddy, round: {i}");
562         // 存放申请的内存块
563         let mut v: Vec<(PhysAddr, PageFrameCount)> = Vec::with_capacity(60 * 1024);
564         // 存放已经申请的内存块的地址(用于检查重复)
565         let mut addr_set: HashSet<PhysAddr> = HashSet::new();
566 
567         let mut allocated = 0usize;
568 
569         let mut free_count = 0usize;
570 
571         while allocated < TOTAL_SIZE {
572             let mut random_size = 0u64;
573             unsafe { x86::random::rdrand64(&mut random_size) };
574             // 一次最多申请4M
575             random_size %= 1024 * 4096;
576             if random_size == 0 {
577                 continue;
578             }
579             let random_size =
580                 core::cmp::min(page_align_up(random_size as usize), TOTAL_SIZE - allocated);
581             let random_size = PageFrameCount::from_bytes(random_size.next_power_of_two()).unwrap();
582             // 获取帧
583             let (paddr, allocated_frame_count) =
584                 unsafe { LockedFrameAllocator.allocate(random_size).unwrap() };
585             assert!(allocated_frame_count.data().is_power_of_two());
586             assert!(paddr.data() % MMArch::PAGE_SIZE == 0);
587             unsafe {
588                 assert!(MMArch::phys_2_virt(paddr)
589                     .as_ref()
590                     .unwrap()
591                     .check_aligned(allocated_frame_count.data() * MMArch::PAGE_SIZE));
592             }
593             allocated += allocated_frame_count.data() * MMArch::PAGE_SIZE;
594             v.push((paddr, allocated_frame_count));
595             assert!(addr_set.insert(paddr), "duplicate address: {:?}", paddr);
596 
597             // 写入数据
598             let vaddr = unsafe { MMArch::phys_2_virt(paddr).unwrap() };
599             let slice = unsafe {
600                 core::slice::from_raw_parts_mut(
601                     vaddr.data() as *mut u8,
602                     allocated_frame_count.data() * MMArch::PAGE_SIZE,
603                 )
604             };
605             for (i, item) in slice.iter_mut().enumerate() {
606                 *item = ((i + unsafe { rdtsc() } as usize) % 256) as u8;
607             }
608 
609             // 随机释放一个内存块
610             if !v.is_empty() {
611                 let mut random_index = 0u64;
612                 unsafe { x86::random::rdrand64(&mut random_index) };
613                 // 70%概率释放
614                 if random_index % 10 > 7 {
615                     continue;
616                 }
617                 random_index %= v.len() as u64;
618                 let random_index = random_index as usize;
619                 let (paddr, allocated_frame_count) = v.remove(random_index);
620                 assert!(addr_set.remove(&paddr));
621                 unsafe { LockedFrameAllocator.free(paddr, allocated_frame_count) };
622                 free_count += allocated_frame_count.data() * MMArch::PAGE_SIZE;
623             }
624         }
625 
626         debug!(
627             "Allocated {} MB memory, release: {} MB, no release: {} bytes",
628             allocated / 1024 / 1024,
629             free_count / 1024 / 1024,
630             (allocated - free_count)
631         );
632 
633         debug!("Now, to release buddy memory");
634         // 释放所有的内存
635         for (paddr, allocated_frame_count) in v {
636             unsafe { LockedFrameAllocator.free(paddr, allocated_frame_count) };
637             assert!(addr_set.remove(&paddr));
638             free_count += allocated_frame_count.data() * MMArch::PAGE_SIZE;
639         }
640 
641         debug!("release done!, allocated: {allocated}, free_count: {free_count}");
642     }
643 }
644 
645 /// 全局的页帧分配器
646 #[derive(Debug, Clone, Copy, Hash)]
647 pub struct LockedFrameAllocator;
648 
649 impl FrameAllocator for LockedFrameAllocator {
650     unsafe fn allocate(&mut self, mut count: PageFrameCount) -> Option<(PhysAddr, PageFrameCount)> {
651         count = count.next_power_of_two();
652         if let Some(ref mut allocator) = *INNER_ALLOCATOR.lock_irqsave() {
653             return allocator.allocate(count);
654         } else {
655             return None;
656         }
657     }
658 
659     unsafe fn free(&mut self, address: crate::mm::PhysAddr, count: PageFrameCount) {
660         assert!(count.data().is_power_of_two());
661         if let Some(ref mut allocator) = *INNER_ALLOCATOR.lock_irqsave() {
662             return allocator.free(address, count);
663         }
664     }
665 
666     unsafe fn usage(&self) -> PageFrameUsage {
667         if let Some(ref mut allocator) = *INNER_ALLOCATOR.lock_irqsave() {
668             return allocator.usage();
669         } else {
670             panic!("usage error");
671         }
672     }
673 }
674 
675 /// 获取内核地址默认的页面标志
676 pub unsafe fn kernel_page_flags<A: MemoryManagementArch>(virt: VirtAddr) -> EntryFlags<A> {
677     let info: X86_64MMBootstrapInfo = BOOTSTRAP_MM_INFO.unwrap();
678 
679     if virt.data() >= info.kernel_code_start && virt.data() < info.kernel_code_end {
680         // Remap kernel code  execute
681         return EntryFlags::new().set_execute(true).set_write(true);
682     } else if virt.data() >= info.kernel_data_end && virt.data() < info.kernel_rodata_end {
683         // Remap kernel rodata read only
684         return EntryFlags::new().set_execute(true);
685     } else {
686         return EntryFlags::new().set_write(true).set_execute(true);
687     }
688 }
689 
690 unsafe fn set_inner_allocator(allocator: BuddyAllocator<MMArch>) {
691     static FLAG: AtomicBool = AtomicBool::new(false);
692     if FLAG
693         .compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst)
694         .is_err()
695     {
696         panic!("Cannot set inner allocator twice!");
697     }
698     *INNER_ALLOCATOR.lock() = Some(allocator);
699 }
700 
701 /// 低地址重映射的管理器
702 ///
703 /// 低地址重映射的管理器,在smp初始化完成之前,需要使用低地址的映射,因此需要在smp初始化完成之后,取消这一段映射
704 pub struct LowAddressRemapping;
705 
706 impl LowAddressRemapping {
707     // 映射64M
708     const REMAP_SIZE: usize = 64 * 1024 * 1024;
709 
710     pub unsafe fn remap_at_low_address(mapper: &mut PageMapper) {
711         for i in 0..(Self::REMAP_SIZE / MMArch::PAGE_SIZE) {
712             let paddr = PhysAddr::new(i * MMArch::PAGE_SIZE);
713             let vaddr = VirtAddr::new(i * MMArch::PAGE_SIZE);
714             let flags = kernel_page_flags::<MMArch>(vaddr);
715 
716             let flusher = mapper
717                 .map_phys(vaddr, paddr, flags)
718                 .expect("Failed to map frame");
719             // 暂时不刷新TLB
720             flusher.ignore();
721         }
722     }
723 
724     /// 取消低地址的映射
725     pub unsafe fn unmap_at_low_address(mapper: &mut PageMapper, flush: bool) {
726         for i in 0..(Self::REMAP_SIZE / MMArch::PAGE_SIZE) {
727             let vaddr = VirtAddr::new(i * MMArch::PAGE_SIZE);
728             let (_, _, flusher) = mapper
729                 .unmap_phys(vaddr, true)
730                 .expect("Failed to unmap frame");
731             if !flush {
732                 flusher.ignore();
733             }
734         }
735     }
736 }
737