xref: /DragonOS/kernel/src/arch/x86_64/mm/mod.rs (revision 2eab6dd743e94a86a685f1f3c01e599adf86610a)
1 pub mod barrier;
2 pub mod bump;
3 pub mod fault;
4 pub mod pkru;
5 
6 use alloc::sync::Arc;
7 use alloc::vec::Vec;
8 use hashbrown::HashSet;
9 use log::{debug, info, warn};
10 use x86::time::rdtsc;
11 use x86_64::registers::model_specific::EferFlags;
12 
13 use crate::driver::serial::serial8250::send_to_default_serial8250_port;
14 use crate::include::bindings::bindings::{
15     multiboot2_get_load_base, multiboot2_get_memory, multiboot2_iter, multiboot_mmap_entry_t,
16     multiboot_tag_load_base_addr_t,
17 };
18 use crate::libs::align::page_align_up;
19 use crate::libs::lib_ui::screen_manager::scm_disable_put_to_window;
20 use crate::libs::spinlock::SpinLock;
21 
22 use crate::mm::allocator::page_frame::{FrameAllocator, PageFrameCount, PageFrameUsage};
23 use crate::mm::memblock::mem_block_manager;
24 use crate::mm::ucontext::LockedVMA;
25 use crate::{
26     arch::MMArch,
27     mm::allocator::{buddy::BuddyAllocator, bump::BumpAllocator},
28 };
29 
30 use crate::mm::kernel_mapper::KernelMapper;
31 use crate::mm::page::{PageEntry, PageFlags, PAGE_1G_SHIFT};
32 use crate::mm::{MemoryManagementArch, PageTableKind, PhysAddr, VirtAddr};
33 
34 use system_error::SystemError;
35 
36 use core::arch::asm;
37 use core::ffi::c_void;
38 use core::fmt::Debug;
39 use core::mem::{self};
40 
41 use core::sync::atomic::{compiler_fence, AtomicBool, Ordering};
42 
43 use super::kvm::vmx::vmcs::VmcsFields;
44 use super::kvm::vmx::vmx_asm_wrapper::vmx_vmread;
45 
46 pub type PageMapper =
47     crate::mm::page::PageMapper<crate::arch::x86_64::mm::X86_64MMArch, LockedFrameAllocator>;
48 
49 /// 初始的CR3寄存器的值,用于内存管理初始化时,创建的第一个内核页表的位置
50 static mut INITIAL_CR3_VALUE: PhysAddr = PhysAddr::new(0);
51 
52 static INNER_ALLOCATOR: SpinLock<Option<BuddyAllocator<MMArch>>> = SpinLock::new(None);
53 
54 #[derive(Clone, Copy, Debug)]
55 pub struct X86_64MMBootstrapInfo {
56     kernel_load_base_paddr: usize,
57     kernel_code_start: usize,
58     kernel_code_end: usize,
59     kernel_data_end: usize,
60     kernel_rodata_end: usize,
61     start_brk: usize,
62 }
63 
64 pub(super) static mut BOOTSTRAP_MM_INFO: Option<X86_64MMBootstrapInfo> = None;
65 
66 /// @brief X86_64的内存管理架构结构体
67 #[derive(Debug, Clone, Copy, Hash)]
68 pub struct X86_64MMArch;
69 
70 /// XD标志位是否被保留
71 static XD_RESERVED: AtomicBool = AtomicBool::new(false);
72 
73 impl MemoryManagementArch for X86_64MMArch {
74     /// X86目前支持缺页中断
75     const PAGE_FAULT_ENABLED: bool = true;
76     /// 4K页
77     const PAGE_SHIFT: usize = 12;
78 
79     /// 每个页表项占8字节,总共有512个页表项
80     const PAGE_ENTRY_SHIFT: usize = 9;
81 
82     /// 四级页表(PML4T、PDPT、PDT、PT)
83     const PAGE_LEVELS: usize = 4;
84 
85     /// 页表项的有效位的index。在x86_64中,页表项的第[0, 47]位表示地址和flag,
86     /// 第[48, 51]位表示保留。因此,有效位的index为52。
87     /// 请注意,第63位是XD位,表示是否允许执行。
88     const ENTRY_ADDRESS_SHIFT: usize = 52;
89 
90     const ENTRY_FLAG_DEFAULT_PAGE: usize = Self::ENTRY_FLAG_PRESENT;
91 
92     const ENTRY_FLAG_DEFAULT_TABLE: usize = Self::ENTRY_FLAG_PRESENT;
93 
94     const ENTRY_FLAG_PRESENT: usize = 1 << 0;
95 
96     const ENTRY_FLAG_READONLY: usize = 0;
97 
98     const ENTRY_FLAG_WRITEABLE: usize = 1 << 1;
99     const ENTRY_FLAG_READWRITE: usize = 1 << 1;
100 
101     const ENTRY_FLAG_USER: usize = 1 << 2;
102 
103     const ENTRY_FLAG_WRITE_THROUGH: usize = 1 << 3;
104 
105     const ENTRY_FLAG_CACHE_DISABLE: usize = 1 << 4;
106 
107     const ENTRY_FLAG_NO_EXEC: usize = 1 << 63;
108     /// x86_64不存在EXEC标志位,只有NO_EXEC(XD)标志位
109     const ENTRY_FLAG_EXEC: usize = 0;
110 
111     const ENTRY_FLAG_ACCESSED: usize = 1 << 5;
112     const ENTRY_FLAG_DIRTY: usize = 1 << 6;
113     const ENTRY_FLAG_HUGE_PAGE: usize = 1 << 7;
114     const ENTRY_FLAG_GLOBAL: usize = 1 << 8;
115 
116     /// 物理地址与虚拟地址的偏移量
117     /// 0xffff_8000_0000_0000
118     const PHYS_OFFSET: usize = Self::PAGE_NEGATIVE_MASK + (Self::PAGE_ADDRESS_SIZE >> 1);
119     const KERNEL_LINK_OFFSET: usize = 0x100000;
120 
121     // 参考 https://code.dragonos.org.cn/xref/linux-6.1.9/arch/x86/include/asm/page_64_types.h#75
122     const USER_END_VADDR: VirtAddr =
123         VirtAddr::new((Self::PAGE_ADDRESS_SIZE >> 1) - Self::PAGE_SIZE);
124     const USER_BRK_START: VirtAddr = VirtAddr::new(0x700000000000);
125     const USER_STACK_START: VirtAddr = VirtAddr::new(0x6ffff0a00000);
126 
127     const FIXMAP_START_VADDR: VirtAddr = VirtAddr::new(0xffffb00000000000);
128     /// 设置FIXMAP区域大小为1M
129     const FIXMAP_SIZE: usize = 256 * 4096;
130 
131     const MMIO_BASE: VirtAddr = VirtAddr::new(0xffffa10000000000);
132     const MMIO_SIZE: usize = 1 << PAGE_1G_SHIFT;
133 
134     /// @brief 获取物理内存区域
135     unsafe fn init() {
136         extern "C" {
137             fn _text();
138             fn _etext();
139             fn _edata();
140             fn _erodata();
141             fn _end();
142         }
143 
144         Self::init_xd_rsvd();
145         let load_base_paddr = Self::get_load_base_paddr();
146 
147         let bootstrap_info = X86_64MMBootstrapInfo {
148             kernel_load_base_paddr: load_base_paddr.data(),
149             kernel_code_start: _text as usize,
150             kernel_code_end: _etext as usize,
151             kernel_data_end: _edata as usize,
152             kernel_rodata_end: _erodata as usize,
153             start_brk: _end as usize,
154         };
155 
156         unsafe {
157             BOOTSTRAP_MM_INFO = Some(bootstrap_info);
158         }
159 
160         // 初始化物理内存区域(从multiboot2中获取)
161         Self::init_memory_area_from_multiboot2().expect("init memory area failed");
162 
163         debug!("bootstrap info: {:?}", unsafe { BOOTSTRAP_MM_INFO });
164         debug!("phys[0]=virt[0x{:x}]", unsafe {
165             MMArch::phys_2_virt(PhysAddr::new(0)).unwrap().data()
166         });
167 
168         // 初始化内存管理器
169         unsafe { allocator_init() };
170 
171         send_to_default_serial8250_port("x86 64 init done\n\0".as_bytes());
172     }
173 
174     /// @brief 刷新TLB中,关于指定虚拟地址的条目
175     unsafe fn invalidate_page(address: VirtAddr) {
176         compiler_fence(Ordering::SeqCst);
177         asm!("invlpg [{0}]", in(reg) address.data(), options(nostack, preserves_flags));
178         compiler_fence(Ordering::SeqCst);
179     }
180 
181     /// @brief 刷新TLB中,所有的条目
182     unsafe fn invalidate_all() {
183         compiler_fence(Ordering::SeqCst);
184         // 通过设置cr3寄存器,来刷新整个TLB
185         Self::set_table(PageTableKind::User, Self::table(PageTableKind::User));
186         compiler_fence(Ordering::SeqCst);
187     }
188 
189     /// @brief 获取顶级页表的物理地址
190     unsafe fn table(table_kind: PageTableKind) -> PhysAddr {
191         match table_kind {
192             PageTableKind::Kernel | PageTableKind::User => {
193                 compiler_fence(Ordering::SeqCst);
194                 let cr3 = x86::controlregs::cr3() as usize;
195                 compiler_fence(Ordering::SeqCst);
196                 return PhysAddr::new(cr3);
197             }
198             PageTableKind::EPT => {
199                 let eptp =
200                     vmx_vmread(VmcsFields::CTRL_EPTP_PTR as u32).expect("Failed to read eptp");
201                 return PhysAddr::new(eptp as usize);
202             }
203         }
204     }
205 
206     /// @brief 设置顶级页表的物理地址到处理器中
207     unsafe fn set_table(_table_kind: PageTableKind, table: PhysAddr) {
208         compiler_fence(Ordering::SeqCst);
209         asm!("mov cr3, {}", in(reg) table.data(), options(nostack, preserves_flags));
210         compiler_fence(Ordering::SeqCst);
211     }
212 
213     /// @brief 判断虚拟地址是否合法
214     fn virt_is_valid(virt: VirtAddr) -> bool {
215         return virt.is_canonical();
216     }
217 
218     /// 获取内存管理初始化时,创建的第一个内核页表的地址
219     fn initial_page_table() -> PhysAddr {
220         unsafe {
221             return INITIAL_CR3_VALUE;
222         }
223     }
224 
225     /// @brief 创建新的顶层页表
226     ///
227     /// 该函数会创建页表并复制内核的映射到新的页表中
228     ///
229     /// @return 新的页表
230     fn setup_new_usermapper() -> Result<crate::mm::ucontext::UserMapper, SystemError> {
231         let new_umapper: crate::mm::page::PageMapper<X86_64MMArch, LockedFrameAllocator> = unsafe {
232             PageMapper::create(PageTableKind::User, LockedFrameAllocator)
233                 .ok_or(SystemError::ENOMEM)?
234         };
235 
236         let current_ktable: KernelMapper = KernelMapper::lock();
237         let copy_mapping = |pml4_entry_no| unsafe {
238             let entry: PageEntry<X86_64MMArch> = current_ktable
239                 .table()
240                 .entry(pml4_entry_no)
241                 .unwrap_or_else(|| panic!("entry {} not found", pml4_entry_no));
242             new_umapper.table().set_entry(pml4_entry_no, entry)
243         };
244 
245         // 复制内核的映射
246         for pml4_entry_no in MMArch::PAGE_KERNEL_INDEX..MMArch::PAGE_ENTRY_NUM {
247             copy_mapping(pml4_entry_no);
248         }
249 
250         return Ok(crate::mm::ucontext::UserMapper::new(new_umapper));
251     }
252 
253     const PAGE_SIZE: usize = 1 << Self::PAGE_SHIFT;
254 
255     const PAGE_OFFSET_MASK: usize = Self::PAGE_SIZE - 1;
256 
257     const PAGE_MASK: usize = !(Self::PAGE_OFFSET_MASK);
258 
259     const PAGE_ADDRESS_SHIFT: usize = Self::PAGE_LEVELS * Self::PAGE_ENTRY_SHIFT + Self::PAGE_SHIFT;
260 
261     const PAGE_ADDRESS_SIZE: usize = 1 << Self::PAGE_ADDRESS_SHIFT;
262 
263     const PAGE_ADDRESS_MASK: usize = Self::PAGE_ADDRESS_SIZE - Self::PAGE_SIZE;
264 
265     const PAGE_ENTRY_SIZE: usize = 1 << (Self::PAGE_SHIFT - Self::PAGE_ENTRY_SHIFT);
266 
267     const PAGE_ENTRY_NUM: usize = 1 << Self::PAGE_ENTRY_SHIFT;
268 
269     const PAGE_ENTRY_MASK: usize = Self::PAGE_ENTRY_NUM - 1;
270 
271     const PAGE_KERNEL_INDEX: usize = (Self::PHYS_OFFSET & Self::PAGE_ADDRESS_MASK)
272         >> (Self::PAGE_ADDRESS_SHIFT - Self::PAGE_ENTRY_SHIFT);
273 
274     const PAGE_NEGATIVE_MASK: usize = !((Self::PAGE_ADDRESS_SIZE) - 1);
275 
276     const ENTRY_ADDRESS_SIZE: usize = 1 << Self::ENTRY_ADDRESS_SHIFT;
277 
278     const ENTRY_ADDRESS_MASK: usize = Self::ENTRY_ADDRESS_SIZE - Self::PAGE_SIZE;
279 
280     const ENTRY_FLAGS_MASK: usize = !Self::ENTRY_ADDRESS_MASK;
281 
282     unsafe fn read<T>(address: VirtAddr) -> T {
283         return core::ptr::read(address.data() as *const T);
284     }
285 
286     unsafe fn write<T>(address: VirtAddr, value: T) {
287         core::ptr::write(address.data() as *mut T, value);
288     }
289 
290     unsafe fn write_bytes(address: VirtAddr, value: u8, count: usize) {
291         core::ptr::write_bytes(address.data() as *mut u8, value, count);
292     }
293 
294     unsafe fn phys_2_virt(phys: PhysAddr) -> Option<VirtAddr> {
295         if let Some(vaddr) = phys.data().checked_add(Self::PHYS_OFFSET) {
296             return Some(VirtAddr::new(vaddr));
297         } else {
298             return None;
299         }
300     }
301 
302     unsafe fn virt_2_phys(virt: VirtAddr) -> Option<PhysAddr> {
303         if let Some(paddr) = virt.data().checked_sub(Self::PHYS_OFFSET) {
304             return Some(PhysAddr::new(paddr));
305         } else {
306             return None;
307         }
308     }
309 
310     #[inline(always)]
311     fn make_entry(paddr: PhysAddr, page_flags: usize) -> usize {
312         return paddr.data() | page_flags;
313     }
314 
315     fn vma_access_permitted(
316         vma: Arc<LockedVMA>,
317         write: bool,
318         execute: bool,
319         foreign: bool,
320     ) -> bool {
321         if execute {
322             return true;
323         }
324         if foreign | vma.is_foreign() {
325             return true;
326         }
327         pkru::pkru_allows_pkey(pkru::vma_pkey(vma), write)
328     }
329 }
330 
331 impl X86_64MMArch {
332     unsafe fn get_load_base_paddr() -> PhysAddr {
333         let mut mb2_lb_info: [multiboot_tag_load_base_addr_t; 512] = mem::zeroed();
334         send_to_default_serial8250_port("get_load_base_paddr begin\n\0".as_bytes());
335 
336         let mut mb2_count: u32 = 0;
337         multiboot2_iter(
338             Some(multiboot2_get_load_base),
339             &mut mb2_lb_info as *mut [multiboot_tag_load_base_addr_t; 512] as usize as *mut c_void,
340             &mut mb2_count,
341         );
342 
343         if mb2_count == 0 {
344             send_to_default_serial8250_port(
345                 "get_load_base_paddr mb2_count == 0, default to 1MB\n\0".as_bytes(),
346             );
347             return PhysAddr::new(0x100000);
348         }
349 
350         let phys = mb2_lb_info[0].load_base_addr as usize;
351 
352         return PhysAddr::new(phys);
353     }
354     unsafe fn init_memory_area_from_multiboot2() -> Result<usize, SystemError> {
355         // 这个数组用来存放内存区域的信息(从C获取)
356         let mut mb2_mem_info: [multiboot_mmap_entry_t; 512] = mem::zeroed();
357         send_to_default_serial8250_port("init_memory_area_from_multiboot2 begin\n\0".as_bytes());
358 
359         let mut mb2_count: u32 = 0;
360         multiboot2_iter(
361             Some(multiboot2_get_memory),
362             &mut mb2_mem_info as *mut [multiboot_mmap_entry_t; 512] as usize as *mut c_void,
363             &mut mb2_count,
364         );
365         send_to_default_serial8250_port("init_memory_area_from_multiboot2 2\n\0".as_bytes());
366 
367         let mb2_count = mb2_count as usize;
368         let mut areas_count = 0usize;
369         let mut total_mem_size = 0usize;
370         for info_entry in mb2_mem_info.iter().take(mb2_count) {
371             // Only use the memory area if its type is 1 (RAM)
372             if info_entry.type_ == 1 {
373                 // Skip the memory area if its len is 0
374                 if info_entry.len == 0 {
375                     continue;
376                 }
377 
378                 total_mem_size += info_entry.len as usize;
379 
380                 mem_block_manager()
381                     .add_block(
382                         PhysAddr::new(info_entry.addr as usize),
383                         info_entry.len as usize,
384                     )
385                     .unwrap_or_else(|e| {
386                         warn!(
387                             "Failed to add memory block: base={:#x}, size={:#x}, error={:?}",
388                             info_entry.addr, info_entry.len, e
389                         );
390                     });
391                 areas_count += 1;
392             }
393         }
394         send_to_default_serial8250_port("init_memory_area_from_multiboot2 end\n\0".as_bytes());
395         info!("Total memory size: {} MB, total areas from multiboot2: {mb2_count}, valid areas: {areas_count}", total_mem_size / 1024 / 1024);
396         return Ok(areas_count);
397     }
398 
399     fn init_xd_rsvd() {
400         // 读取ia32-EFER寄存器的值
401         let efer: EferFlags = x86_64::registers::model_specific::Efer::read();
402         if !efer.contains(EferFlags::NO_EXECUTE_ENABLE) {
403             // NO_EXECUTE_ENABLE是false,那么就设置xd_reserved为true
404             debug!("NO_EXECUTE_ENABLE is false, set XD_RESERVED to true");
405             XD_RESERVED.store(true, Ordering::Relaxed);
406         }
407         compiler_fence(Ordering::SeqCst);
408     }
409 
410     /// 判断XD标志位是否被保留
411     pub fn is_xd_reserved() -> bool {
412         // return XD_RESERVED.load(Ordering::Relaxed);
413 
414         // 由于暂时不支持execute disable,因此直接返回true
415         // 不支持的原因是,目前好像没有能正确的设置page-level的xd位,会触发page fault
416         return true;
417     }
418 }
419 
420 impl VirtAddr {
421     /// @brief 判断虚拟地址是否合法
422     #[inline(always)]
423     pub fn is_canonical(self) -> bool {
424         let x = self.data() & X86_64MMArch::PHYS_OFFSET;
425         // 如果x为0,说明虚拟地址的高位为0,是合法的用户地址
426         // 如果x为PHYS_OFFSET,说明虚拟地址的高位全为1,是合法的内核地址
427         return x == 0 || x == X86_64MMArch::PHYS_OFFSET;
428     }
429 }
430 
431 unsafe fn allocator_init() {
432     let virt_offset = VirtAddr::new(page_align_up(BOOTSTRAP_MM_INFO.unwrap().start_brk));
433 
434     let phy_offset = unsafe { MMArch::virt_2_phys(virt_offset) }.unwrap();
435 
436     mem_block_manager()
437         .reserve_block(PhysAddr::new(0), phy_offset.data())
438         .expect("Failed to reserve block");
439     let mut bump_allocator = BumpAllocator::<X86_64MMArch>::new(phy_offset.data());
440     debug!(
441         "BumpAllocator created, offset={:?}",
442         bump_allocator.offset()
443     );
444 
445     // 暂存初始在head.S中指定的页表的地址,后面再考虑是否需要把它加到buddy的可用空间里面!
446     // 现在不加的原因是,我担心会有安全漏洞问题:这些初始的页表,位于内核的数据段。如果归还到buddy,
447     // 可能会产生一定的安全风险(有的代码可能根据虚拟地址来进行安全校验)
448     let _old_page_table = MMArch::table(PageTableKind::Kernel);
449 
450     let new_page_table: PhysAddr;
451     // 使用bump分配器,把所有的内存页都映射到页表
452     {
453         // 用bump allocator创建新的页表
454         let mut mapper: crate::mm::page::PageMapper<MMArch, &mut BumpAllocator<MMArch>> =
455             crate::mm::page::PageMapper::<MMArch, _>::create(
456                 PageTableKind::Kernel,
457                 &mut bump_allocator,
458             )
459             .expect("Failed to create page mapper");
460         new_page_table = mapper.table().phys();
461         debug!("PageMapper created");
462 
463         // 取消最开始时候,在head.S中指定的映射(暂时不刷新TLB)
464         {
465             let table = mapper.table();
466             let empty_entry = PageEntry::<MMArch>::from_usize(0);
467             for i in 0..MMArch::PAGE_ENTRY_NUM {
468                 table
469                     .set_entry(i, empty_entry)
470                     .expect("Failed to empty page table entry");
471             }
472         }
473         debug!("Successfully emptied page table");
474 
475         let total_num = mem_block_manager().total_initial_memory_regions();
476         for i in 0..total_num {
477             let area = mem_block_manager().get_initial_memory_region(i).unwrap();
478             // debug!("area: base={:?}, size={:#x}, end={:?}", area.base, area.size, area.base + area.size);
479             for i in 0..((area.size + MMArch::PAGE_SIZE - 1) / MMArch::PAGE_SIZE) {
480                 let paddr = area.base.add(i * MMArch::PAGE_SIZE);
481                 let vaddr = unsafe { MMArch::phys_2_virt(paddr) }.unwrap();
482                 let flags = kernel_page_flags::<MMArch>(vaddr);
483 
484                 let flusher = mapper
485                     .map_phys(vaddr, paddr, flags)
486                     .expect("Failed to map frame");
487                 // 暂时不刷新TLB
488                 flusher.ignore();
489             }
490         }
491     }
492 
493     unsafe {
494         INITIAL_CR3_VALUE = new_page_table;
495     }
496     debug!(
497         "After mapping all physical memory, DragonOS used: {} KB",
498         bump_allocator.offset() / 1024
499     );
500 
501     // 初始化buddy_allocator
502     let buddy_allocator = unsafe { BuddyAllocator::<X86_64MMArch>::new(bump_allocator).unwrap() };
503     // 设置全局的页帧分配器
504     unsafe { set_inner_allocator(buddy_allocator) };
505     info!("Successfully initialized buddy allocator");
506     // 关闭显示输出
507     scm_disable_put_to_window();
508 
509     // make the new page table current
510     {
511         let mut binding = INNER_ALLOCATOR.lock();
512         let mut allocator_guard = binding.as_mut().unwrap();
513         debug!("To enable new page table.");
514         compiler_fence(Ordering::SeqCst);
515         let mapper = crate::mm::page::PageMapper::<MMArch, _>::new(
516             PageTableKind::Kernel,
517             new_page_table,
518             &mut allocator_guard,
519         );
520         compiler_fence(Ordering::SeqCst);
521         mapper.make_current();
522         compiler_fence(Ordering::SeqCst);
523         debug!("New page table enabled");
524     }
525     debug!("Successfully enabled new page table");
526 }
527 
528 #[no_mangle]
529 pub extern "C" fn rs_test_buddy() {
530     test_buddy();
531 }
532 pub fn test_buddy() {
533     // 申请内存然后写入数据然后free掉
534     // 总共申请200MB内存
535     const TOTAL_SIZE: usize = 200 * 1024 * 1024;
536 
537     for i in 0..10 {
538         debug!("Test buddy, round: {i}");
539         // 存放申请的内存块
540         let mut v: Vec<(PhysAddr, PageFrameCount)> = Vec::with_capacity(60 * 1024);
541         // 存放已经申请的内存块的地址(用于检查重复)
542         let mut addr_set: HashSet<PhysAddr> = HashSet::new();
543 
544         let mut allocated = 0usize;
545 
546         let mut free_count = 0usize;
547 
548         while allocated < TOTAL_SIZE {
549             let mut random_size = 0u64;
550             unsafe { x86::random::rdrand64(&mut random_size) };
551             // 一次最多申请4M
552             random_size %= 1024 * 4096;
553             if random_size == 0 {
554                 continue;
555             }
556             let random_size =
557                 core::cmp::min(page_align_up(random_size as usize), TOTAL_SIZE - allocated);
558             let random_size = PageFrameCount::from_bytes(random_size.next_power_of_two()).unwrap();
559             // 获取帧
560             let (paddr, allocated_frame_count) =
561                 unsafe { LockedFrameAllocator.allocate(random_size).unwrap() };
562             assert!(allocated_frame_count.data().is_power_of_two());
563             assert!(paddr.data() % MMArch::PAGE_SIZE == 0);
564             unsafe {
565                 assert!(MMArch::phys_2_virt(paddr)
566                     .as_ref()
567                     .unwrap()
568                     .check_aligned(allocated_frame_count.data() * MMArch::PAGE_SIZE));
569             }
570             allocated += allocated_frame_count.data() * MMArch::PAGE_SIZE;
571             v.push((paddr, allocated_frame_count));
572             assert!(addr_set.insert(paddr), "duplicate address: {:?}", paddr);
573 
574             // 写入数据
575             let vaddr = unsafe { MMArch::phys_2_virt(paddr).unwrap() };
576             let slice = unsafe {
577                 core::slice::from_raw_parts_mut(
578                     vaddr.data() as *mut u8,
579                     allocated_frame_count.data() * MMArch::PAGE_SIZE,
580                 )
581             };
582             for (i, item) in slice.iter_mut().enumerate() {
583                 *item = ((i + unsafe { rdtsc() } as usize) % 256) as u8;
584             }
585 
586             // 随机释放一个内存块
587             if !v.is_empty() {
588                 let mut random_index = 0u64;
589                 unsafe { x86::random::rdrand64(&mut random_index) };
590                 // 70%概率释放
591                 if random_index % 10 > 7 {
592                     continue;
593                 }
594                 random_index %= v.len() as u64;
595                 let random_index = random_index as usize;
596                 let (paddr, allocated_frame_count) = v.remove(random_index);
597                 assert!(addr_set.remove(&paddr));
598                 unsafe { LockedFrameAllocator.free(paddr, allocated_frame_count) };
599                 free_count += allocated_frame_count.data() * MMArch::PAGE_SIZE;
600             }
601         }
602 
603         debug!(
604             "Allocated {} MB memory, release: {} MB, no release: {} bytes",
605             allocated / 1024 / 1024,
606             free_count / 1024 / 1024,
607             (allocated - free_count)
608         );
609 
610         debug!("Now, to release buddy memory");
611         // 释放所有的内存
612         for (paddr, allocated_frame_count) in v {
613             unsafe { LockedFrameAllocator.free(paddr, allocated_frame_count) };
614             assert!(addr_set.remove(&paddr));
615             free_count += allocated_frame_count.data() * MMArch::PAGE_SIZE;
616         }
617 
618         debug!("release done!, allocated: {allocated}, free_count: {free_count}");
619     }
620 }
621 
622 /// 全局的页帧分配器
623 #[derive(Debug, Clone, Copy, Hash)]
624 pub struct LockedFrameAllocator;
625 
626 impl FrameAllocator for LockedFrameAllocator {
627     unsafe fn allocate(&mut self, mut count: PageFrameCount) -> Option<(PhysAddr, PageFrameCount)> {
628         count = count.next_power_of_two();
629         if let Some(ref mut allocator) = *INNER_ALLOCATOR.lock_irqsave() {
630             return allocator.allocate(count);
631         } else {
632             return None;
633         }
634     }
635 
636     unsafe fn free(&mut self, address: crate::mm::PhysAddr, count: PageFrameCount) {
637         assert!(count.data().is_power_of_two());
638         if let Some(ref mut allocator) = *INNER_ALLOCATOR.lock_irqsave() {
639             return allocator.free(address, count);
640         }
641     }
642 
643     unsafe fn usage(&self) -> PageFrameUsage {
644         if let Some(ref mut allocator) = *INNER_ALLOCATOR.lock_irqsave() {
645             return allocator.usage();
646         } else {
647             panic!("usage error");
648         }
649     }
650 }
651 
652 /// 获取内核地址默认的页面标志
653 pub unsafe fn kernel_page_flags<A: MemoryManagementArch>(virt: VirtAddr) -> PageFlags<A> {
654     let info: X86_64MMBootstrapInfo = BOOTSTRAP_MM_INFO.unwrap();
655 
656     if virt.data() >= info.kernel_code_start && virt.data() < info.kernel_code_end {
657         // Remap kernel code  execute
658         return PageFlags::new().set_execute(true).set_write(true);
659     } else if virt.data() >= info.kernel_data_end && virt.data() < info.kernel_rodata_end {
660         // Remap kernel rodata read only
661         return PageFlags::new().set_execute(true);
662     } else {
663         return PageFlags::new().set_write(true).set_execute(true);
664     }
665 }
666 
667 unsafe fn set_inner_allocator(allocator: BuddyAllocator<MMArch>) {
668     static FLAG: AtomicBool = AtomicBool::new(false);
669     if FLAG
670         .compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst)
671         .is_err()
672     {
673         panic!("Cannot set inner allocator twice!");
674     }
675     *INNER_ALLOCATOR.lock() = Some(allocator);
676 }
677 
678 /// 低地址重映射的管理器
679 ///
680 /// 低地址重映射的管理器,在smp初始化完成之前,需要使用低地址的映射,因此需要在smp初始化完成之后,取消这一段映射
681 pub struct LowAddressRemapping;
682 
683 impl LowAddressRemapping {
684     // 映射64M
685     const REMAP_SIZE: usize = 64 * 1024 * 1024;
686 
687     pub unsafe fn remap_at_low_address(mapper: &mut PageMapper) {
688         for i in 0..(Self::REMAP_SIZE / MMArch::PAGE_SIZE) {
689             let paddr = PhysAddr::new(i * MMArch::PAGE_SIZE);
690             let vaddr = VirtAddr::new(i * MMArch::PAGE_SIZE);
691             let flags = kernel_page_flags::<MMArch>(vaddr);
692 
693             let flusher = mapper
694                 .map_phys(vaddr, paddr, flags)
695                 .expect("Failed to map frame");
696             // 暂时不刷新TLB
697             flusher.ignore();
698         }
699     }
700 
701     /// 取消低地址的映射
702     pub unsafe fn unmap_at_low_address(mapper: &mut PageMapper, flush: bool) {
703         for i in 0..(Self::REMAP_SIZE / MMArch::PAGE_SIZE) {
704             let vaddr = VirtAddr::new(i * MMArch::PAGE_SIZE);
705             let (_, _, flusher) = mapper
706                 .unmap_phys(vaddr, true)
707                 .expect("Failed to unmap frame");
708             if !flush {
709                 flusher.ignore();
710             }
711         }
712     }
713 }
714