xref: /DragonOS/kernel/src/arch/x86_64/mm/mod.rs (revision 3e3c6316aaac5a8a2932bd1746ec8b900dc5e2c6)
1 pub mod barrier;
2 pub mod bump;
3 mod c_adapter;
4 
5 use alloc::vec::Vec;
6 use hashbrown::HashSet;
7 use x86::time::rdtsc;
8 use x86_64::registers::model_specific::EferFlags;
9 
10 use crate::driver::tty::serial::serial8250::send_to_default_serial8250_port;
11 use crate::include::bindings::bindings::{
12     multiboot2_get_load_base, multiboot2_get_memory, multiboot2_iter, multiboot_mmap_entry_t,
13     multiboot_tag_load_base_addr_t,
14 };
15 use crate::libs::align::page_align_up;
16 use crate::libs::lib_ui::screen_manager::scm_disable_put_to_window;
17 use crate::libs::printk::PrintkWriter;
18 use crate::libs::spinlock::SpinLock;
19 
20 use crate::mm::allocator::page_frame::{FrameAllocator, PageFrameCount, PageFrameUsage};
21 use crate::mm::memblock::mem_block_manager;
22 use crate::mm::mmio_buddy::mmio_init;
23 use crate::{
24     arch::MMArch,
25     mm::allocator::{buddy::BuddyAllocator, bump::BumpAllocator},
26 };
27 
28 use crate::mm::kernel_mapper::KernelMapper;
29 use crate::mm::page::{PageEntry, PageFlags};
30 use crate::mm::{MemoryManagementArch, PageTableKind, PhysAddr, VirtAddr};
31 use crate::{kdebug, kinfo, kwarn};
32 use system_error::SystemError;
33 
34 use core::arch::asm;
35 use core::ffi::c_void;
36 use core::fmt::{Debug, Write};
37 use core::mem::{self};
38 
39 use core::sync::atomic::{compiler_fence, AtomicBool, Ordering};
40 
41 use super::kvm::vmx::vmcs::VmcsFields;
42 use super::kvm::vmx::vmx_asm_wrapper::vmx_vmread;
43 
44 pub type PageMapper =
45     crate::mm::page::PageMapper<crate::arch::x86_64::mm::X86_64MMArch, LockedFrameAllocator>;
46 
47 /// 初始的CR3寄存器的值,用于内存管理初始化时,创建的第一个内核页表的位置
48 static mut INITIAL_CR3_VALUE: PhysAddr = PhysAddr::new(0);
49 
50 /// 内核的第一个页表在pml4中的索引
51 /// 顶级页表的[256, 512)项是内核的页表
52 static KERNEL_PML4E_NO: usize = (X86_64MMArch::PHYS_OFFSET & ((1 << 48) - 1)) >> 39;
53 
54 static INNER_ALLOCATOR: SpinLock<Option<BuddyAllocator<MMArch>>> = SpinLock::new(None);
55 
56 #[derive(Clone, Copy, Debug)]
57 pub struct X86_64MMBootstrapInfo {
58     kernel_load_base_paddr: usize,
59     kernel_code_start: usize,
60     kernel_code_end: usize,
61     kernel_data_end: usize,
62     kernel_rodata_end: usize,
63     start_brk: usize,
64 }
65 
66 pub(super) static mut BOOTSTRAP_MM_INFO: Option<X86_64MMBootstrapInfo> = None;
67 
68 /// @brief X86_64的内存管理架构结构体
69 #[derive(Debug, Clone, Copy, Hash)]
70 pub struct X86_64MMArch;
71 
72 /// XD标志位是否被保留
73 static XD_RESERVED: AtomicBool = AtomicBool::new(false);
74 
75 impl MemoryManagementArch for X86_64MMArch {
76     /// 4K页
77     const PAGE_SHIFT: usize = 12;
78 
79     /// 每个页表项占8字节,总共有512个页表项
80     const PAGE_ENTRY_SHIFT: usize = 9;
81 
82     /// 四级页表(PML4T、PDPT、PDT、PT)
83     const PAGE_LEVELS: usize = 4;
84 
85     /// 页表项的有效位的index。在x86_64中,页表项的第[0, 47]位表示地址和flag,
86     /// 第[48, 51]位表示保留。因此,有效位的index为52。
87     /// 请注意,第63位是XD位,表示是否允许执行。
88     const ENTRY_ADDRESS_SHIFT: usize = 52;
89 
90     const ENTRY_FLAG_DEFAULT_PAGE: usize = Self::ENTRY_FLAG_PRESENT;
91 
92     const ENTRY_FLAG_DEFAULT_TABLE: usize = Self::ENTRY_FLAG_PRESENT;
93 
94     const ENTRY_FLAG_PRESENT: usize = 1 << 0;
95 
96     const ENTRY_FLAG_READONLY: usize = 0;
97 
98     const ENTRY_FLAG_READWRITE: usize = 1 << 1;
99 
100     const ENTRY_FLAG_USER: usize = 1 << 2;
101 
102     const ENTRY_FLAG_WRITE_THROUGH: usize = 1 << 3;
103 
104     const ENTRY_FLAG_CACHE_DISABLE: usize = 1 << 4;
105 
106     const ENTRY_FLAG_NO_EXEC: usize = 1 << 63;
107     /// x86_64不存在EXEC标志位,只有NO_EXEC(XD)标志位
108     const ENTRY_FLAG_EXEC: usize = 0;
109 
110     /// 物理地址与虚拟地址的偏移量
111     /// 0xffff_8000_0000_0000
112     const PHYS_OFFSET: usize = Self::PAGE_NEGATIVE_MASK + (Self::PAGE_ADDRESS_SIZE >> 1);
113 
114     const USER_END_VADDR: VirtAddr = VirtAddr::new(0x0000_7eff_ffff_ffff);
115     const USER_BRK_START: VirtAddr = VirtAddr::new(0x700000000000);
116     const USER_STACK_START: VirtAddr = VirtAddr::new(0x6ffff0a00000);
117 
118     const FIXMAP_START_VADDR: VirtAddr = VirtAddr::new(0xffffb00000000000);
119     /// 设置FIXMAP区域大小为1M
120     const FIXMAP_SIZE: usize = 256 * 4096;
121 
122     /// @brief 获取物理内存区域
123     unsafe fn init() {
124         extern "C" {
125             fn _text();
126             fn _etext();
127             fn _edata();
128             fn _erodata();
129             fn _end();
130         }
131 
132         Self::init_xd_rsvd();
133         let load_base_paddr = Self::get_load_base_paddr();
134 
135         let bootstrap_info = X86_64MMBootstrapInfo {
136             kernel_load_base_paddr: load_base_paddr.data(),
137             kernel_code_start: _text as usize,
138             kernel_code_end: _etext as usize,
139             kernel_data_end: _edata as usize,
140             kernel_rodata_end: _erodata as usize,
141             start_brk: _end as usize,
142         };
143 
144         unsafe {
145             BOOTSTRAP_MM_INFO = Some(bootstrap_info);
146         }
147 
148         // 初始化物理内存区域(从multiboot2中获取)
149         Self::init_memory_area_from_multiboot2().expect("init memory area failed");
150 
151         send_to_default_serial8250_port("x86 64 init end\n\0".as_bytes());
152     }
153 
154     /// @brief 刷新TLB中,关于指定虚拟地址的条目
155     unsafe fn invalidate_page(address: VirtAddr) {
156         compiler_fence(Ordering::SeqCst);
157         asm!("invlpg [{0}]", in(reg) address.data(), options(nostack, preserves_flags));
158         compiler_fence(Ordering::SeqCst);
159     }
160 
161     /// @brief 刷新TLB中,所有的条目
162     unsafe fn invalidate_all() {
163         compiler_fence(Ordering::SeqCst);
164         // 通过设置cr3寄存器,来刷新整个TLB
165         Self::set_table(PageTableKind::User, Self::table(PageTableKind::User));
166         compiler_fence(Ordering::SeqCst);
167     }
168 
169     /// @brief 获取顶级页表的物理地址
170     unsafe fn table(table_kind: PageTableKind) -> PhysAddr {
171         match table_kind {
172             PageTableKind::Kernel | PageTableKind::User => {
173                 let paddr: usize;
174                 compiler_fence(Ordering::SeqCst);
175                 asm!("mov {}, cr3", out(reg) paddr, options(nomem, nostack, preserves_flags));
176                 compiler_fence(Ordering::SeqCst);
177                 return PhysAddr::new(paddr);
178             }
179             PageTableKind::EPT => {
180                 let eptp =
181                     vmx_vmread(VmcsFields::CTRL_EPTP_PTR as u32).expect("Failed to read eptp");
182                 return PhysAddr::new(eptp as usize);
183             }
184         }
185     }
186 
187     /// @brief 设置顶级页表的物理地址到处理器中
188     unsafe fn set_table(_table_kind: PageTableKind, table: PhysAddr) {
189         compiler_fence(Ordering::SeqCst);
190         asm!("mov cr3, {}", in(reg) table.data(), options(nostack, preserves_flags));
191         compiler_fence(Ordering::SeqCst);
192     }
193 
194     /// @brief 判断虚拟地址是否合法
195     fn virt_is_valid(virt: VirtAddr) -> bool {
196         return virt.is_canonical();
197     }
198 
199     /// 获取内存管理初始化时,创建的第一个内核页表的地址
200     fn initial_page_table() -> PhysAddr {
201         unsafe {
202             return INITIAL_CR3_VALUE;
203         }
204     }
205 
206     /// @brief 创建新的顶层页表
207     ///
208     /// 该函数会创建页表并复制内核的映射到新的页表中
209     ///
210     /// @return 新的页表
211     fn setup_new_usermapper() -> Result<crate::mm::ucontext::UserMapper, SystemError> {
212         let new_umapper: crate::mm::page::PageMapper<X86_64MMArch, LockedFrameAllocator> = unsafe {
213             PageMapper::create(PageTableKind::User, LockedFrameAllocator)
214                 .ok_or(SystemError::ENOMEM)?
215         };
216 
217         let current_ktable: KernelMapper = KernelMapper::lock();
218         let copy_mapping = |pml4_entry_no| unsafe {
219             let entry: PageEntry<X86_64MMArch> = current_ktable
220                 .table()
221                 .entry(pml4_entry_no)
222                 .unwrap_or_else(|| panic!("entry {} not found", pml4_entry_no));
223             new_umapper.table().set_entry(pml4_entry_no, entry)
224         };
225 
226         // 复制内核的映射
227         for pml4_entry_no in KERNEL_PML4E_NO..512 {
228             copy_mapping(pml4_entry_no);
229         }
230 
231         return Ok(crate::mm::ucontext::UserMapper::new(new_umapper));
232     }
233 
234     const PAGE_SIZE: usize = 1 << Self::PAGE_SHIFT;
235 
236     const PAGE_OFFSET_MASK: usize = Self::PAGE_SIZE - 1;
237 
238     const PAGE_MASK: usize = !(Self::PAGE_OFFSET_MASK);
239 
240     const PAGE_ADDRESS_SHIFT: usize = Self::PAGE_LEVELS * Self::PAGE_ENTRY_SHIFT + Self::PAGE_SHIFT;
241 
242     const PAGE_ADDRESS_SIZE: usize = 1 << Self::PAGE_ADDRESS_SHIFT;
243 
244     const PAGE_ADDRESS_MASK: usize = Self::PAGE_ADDRESS_SIZE - Self::PAGE_SIZE;
245 
246     const PAGE_ENTRY_SIZE: usize = 1 << (Self::PAGE_SHIFT - Self::PAGE_ENTRY_SHIFT);
247 
248     const PAGE_ENTRY_NUM: usize = 1 << Self::PAGE_ENTRY_SHIFT;
249 
250     const PAGE_ENTRY_MASK: usize = Self::PAGE_ENTRY_NUM - 1;
251 
252     const PAGE_NEGATIVE_MASK: usize = !((Self::PAGE_ADDRESS_SIZE) - 1);
253 
254     const ENTRY_ADDRESS_SIZE: usize = 1 << Self::ENTRY_ADDRESS_SHIFT;
255 
256     const ENTRY_ADDRESS_MASK: usize = Self::ENTRY_ADDRESS_SIZE - Self::PAGE_SIZE;
257 
258     const ENTRY_FLAGS_MASK: usize = !Self::ENTRY_ADDRESS_MASK;
259 
260     unsafe fn read<T>(address: VirtAddr) -> T {
261         return core::ptr::read(address.data() as *const T);
262     }
263 
264     unsafe fn write<T>(address: VirtAddr, value: T) {
265         core::ptr::write(address.data() as *mut T, value);
266     }
267 
268     unsafe fn write_bytes(address: VirtAddr, value: u8, count: usize) {
269         core::ptr::write_bytes(address.data() as *mut u8, value, count);
270     }
271 
272     unsafe fn phys_2_virt(phys: PhysAddr) -> Option<VirtAddr> {
273         if let Some(vaddr) = phys.data().checked_add(Self::PHYS_OFFSET) {
274             return Some(VirtAddr::new(vaddr));
275         } else {
276             return None;
277         }
278     }
279 
280     unsafe fn virt_2_phys(virt: VirtAddr) -> Option<PhysAddr> {
281         if let Some(paddr) = virt.data().checked_sub(Self::PHYS_OFFSET) {
282             return Some(PhysAddr::new(paddr));
283         } else {
284             return None;
285         }
286     }
287 }
288 
289 impl X86_64MMArch {
290     unsafe fn get_load_base_paddr() -> PhysAddr {
291         let mut mb2_lb_info: [multiboot_tag_load_base_addr_t; 512] = mem::zeroed();
292         send_to_default_serial8250_port("get_load_base_paddr begin\n\0".as_bytes());
293 
294         let mut mb2_count: u32 = 0;
295         multiboot2_iter(
296             Some(multiboot2_get_load_base),
297             &mut mb2_lb_info as *mut [multiboot_tag_load_base_addr_t; 512] as usize as *mut c_void,
298             &mut mb2_count,
299         );
300 
301         if mb2_count == 0 {
302             send_to_default_serial8250_port(
303                 "get_load_base_paddr mb2_count == 0, default to 1MB\n\0".as_bytes(),
304             );
305             return PhysAddr::new(0x100000);
306         }
307 
308         let phys = mb2_lb_info[0].load_base_addr as usize;
309 
310         return PhysAddr::new(phys);
311     }
312     unsafe fn init_memory_area_from_multiboot2() -> Result<usize, SystemError> {
313         // 这个数组用来存放内存区域的信息(从C获取)
314         let mut mb2_mem_info: [multiboot_mmap_entry_t; 512] = mem::zeroed();
315         send_to_default_serial8250_port("init_memory_area_from_multiboot2 begin\n\0".as_bytes());
316 
317         let mut mb2_count: u32 = 0;
318         multiboot2_iter(
319             Some(multiboot2_get_memory),
320             &mut mb2_mem_info as *mut [multiboot_mmap_entry_t; 512] as usize as *mut c_void,
321             &mut mb2_count,
322         );
323         send_to_default_serial8250_port("init_memory_area_from_multiboot2 2\n\0".as_bytes());
324 
325         let mb2_count = mb2_count as usize;
326         let mut areas_count = 0usize;
327         let mut total_mem_size = 0usize;
328         for i in 0..mb2_count {
329             // Only use the memory area if its type is 1 (RAM)
330             if mb2_mem_info[i].type_ == 1 {
331                 // Skip the memory area if its len is 0
332                 if mb2_mem_info[i].len == 0 {
333                     continue;
334                 }
335 
336                 total_mem_size += mb2_mem_info[i].len as usize;
337                 // PHYS_MEMORY_AREAS[areas_count].base = PhysAddr::new(mb2_mem_info[i].addr as usize);
338                 // PHYS_MEMORY_AREAS[areas_count].size = mb2_mem_info[i].len as usize;
339 
340                 mem_block_manager()
341                     .add_block(
342                         PhysAddr::new(mb2_mem_info[i].addr as usize),
343                         mb2_mem_info[i].len as usize,
344                     )
345                     .unwrap_or_else(|e| {
346                         kwarn!(
347                             "Failed to add memory block: base={:#x}, size={:#x}, error={:?}",
348                             mb2_mem_info[i].addr,
349                             mb2_mem_info[i].len,
350                             e
351                         );
352                     });
353                 areas_count += 1;
354             }
355         }
356         send_to_default_serial8250_port("init_memory_area_from_multiboot2 end\n\0".as_bytes());
357         kinfo!("Total memory size: {} MB, total areas from multiboot2: {mb2_count}, valid areas: {areas_count}", total_mem_size / 1024 / 1024);
358         return Ok(areas_count);
359     }
360 
361     fn init_xd_rsvd() {
362         // 读取ia32-EFER寄存器的值
363         let efer: EferFlags = x86_64::registers::model_specific::Efer::read();
364         if !efer.contains(EferFlags::NO_EXECUTE_ENABLE) {
365             // NO_EXECUTE_ENABLE是false,那么就设置xd_reserved为true
366             kdebug!("NO_EXECUTE_ENABLE is false, set XD_RESERVED to true");
367             XD_RESERVED.store(true, Ordering::Relaxed);
368         }
369         compiler_fence(Ordering::SeqCst);
370     }
371 
372     /// 判断XD标志位是否被保留
373     pub fn is_xd_reserved() -> bool {
374         // return XD_RESERVED.load(Ordering::Relaxed);
375 
376         // 由于暂时不支持execute disable,因此直接返回true
377         // 不支持的原因是,目前好像没有能正确的设置page-level的xd位,会触发page fault
378         return true;
379     }
380 }
381 
382 impl VirtAddr {
383     /// @brief 判断虚拟地址是否合法
384     #[inline(always)]
385     pub fn is_canonical(self) -> bool {
386         let x = self.data() & X86_64MMArch::PHYS_OFFSET;
387         // 如果x为0,说明虚拟地址的高位为0,是合法的用户地址
388         // 如果x为PHYS_OFFSET,说明虚拟地址的高位全为1,是合法的内核地址
389         return x == 0 || x == X86_64MMArch::PHYS_OFFSET;
390     }
391 }
392 
393 /// @brief 初始化内存管理模块
394 pub fn mm_init() {
395     send_to_default_serial8250_port("mm_init\n\0".as_bytes());
396     PrintkWriter
397         .write_fmt(format_args!("mm_init() called\n"))
398         .unwrap();
399     // printk_color!(GREEN, BLACK, "mm_init() called\n");
400     static _CALL_ONCE: AtomicBool = AtomicBool::new(false);
401     if _CALL_ONCE
402         .compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst)
403         .is_err()
404     {
405         send_to_default_serial8250_port("mm_init err\n\0".as_bytes());
406         panic!("mm_init() can only be called once");
407     }
408 
409     unsafe { X86_64MMArch::init() };
410     kdebug!("bootstrap info: {:?}", unsafe { BOOTSTRAP_MM_INFO });
411     kdebug!("phys[0]=virt[0x{:x}]", unsafe {
412         MMArch::phys_2_virt(PhysAddr::new(0)).unwrap().data()
413     });
414 
415     // 初始化内存管理器
416     unsafe { allocator_init() };
417     // enable mmio
418     mmio_init();
419 }
420 
421 unsafe fn allocator_init() {
422     let virt_offset = BOOTSTRAP_MM_INFO.unwrap().start_brk;
423     let phy_offset =
424         unsafe { MMArch::virt_2_phys(VirtAddr::new(page_align_up(virt_offset))) }.unwrap();
425 
426     let mut bump_allocator = BumpAllocator::<X86_64MMArch>::new(phy_offset.data());
427     kdebug!(
428         "BumpAllocator created, offset={:?}",
429         bump_allocator.offset()
430     );
431 
432     // 暂存初始在head.S中指定的页表的地址,后面再考虑是否需要把它加到buddy的可用空间里面!
433     // 现在不加的原因是,我担心会有安全漏洞问题:这些初始的页表,位于内核的数据段。如果归还到buddy,
434     // 可能会产生一定的安全风险(有的代码可能根据虚拟地址来进行安全校验)
435     let _old_page_table = MMArch::table(PageTableKind::Kernel);
436 
437     let new_page_table: PhysAddr;
438     // 使用bump分配器,把所有的内存页都映射到页表
439     {
440         // 用bump allocator创建新的页表
441         let mut mapper: crate::mm::page::PageMapper<MMArch, &mut BumpAllocator<MMArch>> =
442             crate::mm::page::PageMapper::<MMArch, _>::create(
443                 PageTableKind::Kernel,
444                 &mut bump_allocator,
445             )
446             .expect("Failed to create page mapper");
447         new_page_table = mapper.table().phys();
448         kdebug!("PageMapper created");
449 
450         // 取消最开始时候,在head.S中指定的映射(暂时不刷新TLB)
451         {
452             let table = mapper.table();
453             let empty_entry = PageEntry::<MMArch>::new(0);
454             for i in 0..MMArch::PAGE_ENTRY_NUM {
455                 table
456                     .set_entry(i, empty_entry)
457                     .expect("Failed to empty page table entry");
458             }
459         }
460         kdebug!("Successfully emptied page table");
461 
462         let total_num = mem_block_manager().total_initial_memory_regions();
463         for i in 0..total_num {
464             let area = mem_block_manager().get_initial_memory_region(i).unwrap();
465             // kdebug!("area: base={:?}, size={:#x}, end={:?}", area.base, area.size, area.base + area.size);
466             for i in 0..((area.size + MMArch::PAGE_SIZE - 1) / MMArch::PAGE_SIZE) {
467                 let paddr = area.base.add(i * MMArch::PAGE_SIZE);
468                 let vaddr = unsafe { MMArch::phys_2_virt(paddr) }.unwrap();
469                 let flags = kernel_page_flags::<MMArch>(vaddr);
470 
471                 let flusher = mapper
472                     .map_phys(vaddr, paddr, flags)
473                     .expect("Failed to map frame");
474                 // 暂时不刷新TLB
475                 flusher.ignore();
476             }
477         }
478 
479         // 添加低地址的映射(在smp完成初始化之前,需要使用低地址的映射.初始化之后需要取消这一段映射)
480         LowAddressRemapping::remap_at_low_address(&mut mapper);
481     }
482 
483     unsafe {
484         INITIAL_CR3_VALUE = new_page_table;
485     }
486     kdebug!(
487         "After mapping all physical memory, DragonOS used: {} KB",
488         bump_allocator.offset() / 1024
489     );
490 
491     // 初始化buddy_allocator
492     let buddy_allocator = unsafe { BuddyAllocator::<X86_64MMArch>::new(bump_allocator).unwrap() };
493     // 设置全局的页帧分配器
494     unsafe { set_inner_allocator(buddy_allocator) };
495     kinfo!("Successfully initialized buddy allocator");
496     // 关闭显示输出
497     scm_disable_put_to_window();
498 
499     // make the new page table current
500     {
501         let mut binding = INNER_ALLOCATOR.lock();
502         let mut allocator_guard = binding.as_mut().unwrap();
503         kdebug!("To enable new page table.");
504         compiler_fence(Ordering::SeqCst);
505         let mapper = crate::mm::page::PageMapper::<MMArch, _>::new(
506             PageTableKind::Kernel,
507             new_page_table,
508             &mut allocator_guard,
509         );
510         compiler_fence(Ordering::SeqCst);
511         mapper.make_current();
512         compiler_fence(Ordering::SeqCst);
513         kdebug!("New page table enabled");
514     }
515     kdebug!("Successfully enabled new page table");
516 }
517 
518 #[no_mangle]
519 pub extern "C" fn rs_test_buddy() {
520     test_buddy();
521 }
522 pub fn test_buddy() {
523     // 申请内存然后写入数据然后free掉
524     // 总共申请200MB内存
525     const TOTAL_SIZE: usize = 200 * 1024 * 1024;
526 
527     for i in 0..10 {
528         kdebug!("Test buddy, round: {i}");
529         // 存放申请的内存块
530         let mut v: Vec<(PhysAddr, PageFrameCount)> = Vec::with_capacity(60 * 1024);
531         // 存放已经申请的内存块的地址(用于检查重复)
532         let mut addr_set: HashSet<PhysAddr> = HashSet::new();
533 
534         let mut allocated = 0usize;
535 
536         let mut free_count = 0usize;
537 
538         while allocated < TOTAL_SIZE {
539             let mut random_size = 0u64;
540             unsafe { x86::random::rdrand64(&mut random_size) };
541             // 一次最多申请4M
542             random_size = random_size % (1024 * 4096);
543             if random_size == 0 {
544                 continue;
545             }
546             let random_size =
547                 core::cmp::min(page_align_up(random_size as usize), TOTAL_SIZE - allocated);
548             let random_size = PageFrameCount::from_bytes(random_size.next_power_of_two()).unwrap();
549             // 获取帧
550             let (paddr, allocated_frame_count) =
551                 unsafe { LockedFrameAllocator.allocate(random_size).unwrap() };
552             assert!(allocated_frame_count.data().is_power_of_two());
553             assert!(paddr.data() % MMArch::PAGE_SIZE == 0);
554             unsafe {
555                 assert!(MMArch::phys_2_virt(paddr)
556                     .as_ref()
557                     .unwrap()
558                     .check_aligned(allocated_frame_count.data() * MMArch::PAGE_SIZE));
559             }
560             allocated += allocated_frame_count.data() * MMArch::PAGE_SIZE;
561             v.push((paddr, allocated_frame_count));
562             assert!(addr_set.insert(paddr), "duplicate address: {:?}", paddr);
563 
564             // 写入数据
565             let vaddr = unsafe { MMArch::phys_2_virt(paddr).unwrap() };
566             let slice = unsafe {
567                 core::slice::from_raw_parts_mut(
568                     vaddr.data() as *mut u8,
569                     allocated_frame_count.data() * MMArch::PAGE_SIZE,
570                 )
571             };
572             for i in 0..slice.len() {
573                 slice[i] = ((i + unsafe { rdtsc() } as usize) % 256) as u8;
574             }
575 
576             // 随机释放一个内存块
577             if v.len() > 0 {
578                 let mut random_index = 0u64;
579                 unsafe { x86::random::rdrand64(&mut random_index) };
580                 // 70%概率释放
581                 if random_index % 10 > 7 {
582                     continue;
583                 }
584                 random_index = random_index % v.len() as u64;
585                 let random_index = random_index as usize;
586                 let (paddr, allocated_frame_count) = v.remove(random_index);
587                 assert!(addr_set.remove(&paddr));
588                 unsafe { LockedFrameAllocator.free(paddr, allocated_frame_count) };
589                 free_count += allocated_frame_count.data() * MMArch::PAGE_SIZE;
590             }
591         }
592 
593         kdebug!(
594             "Allocated {} MB memory, release: {} MB, no release: {} bytes",
595             allocated / 1024 / 1024,
596             free_count / 1024 / 1024,
597             (allocated - free_count)
598         );
599 
600         kdebug!("Now, to release buddy memory");
601         // 释放所有的内存
602         for (paddr, allocated_frame_count) in v {
603             unsafe { LockedFrameAllocator.free(paddr, allocated_frame_count) };
604             assert!(addr_set.remove(&paddr));
605             free_count += allocated_frame_count.data() * MMArch::PAGE_SIZE;
606         }
607 
608         kdebug!("release done!, allocated: {allocated}, free_count: {free_count}");
609     }
610 }
611 
612 /// 全局的页帧分配器
613 #[derive(Debug, Clone, Copy, Hash)]
614 pub struct LockedFrameAllocator;
615 
616 impl FrameAllocator for LockedFrameAllocator {
617     unsafe fn allocate(&mut self, count: PageFrameCount) -> Option<(PhysAddr, PageFrameCount)> {
618         if let Some(ref mut allocator) = *INNER_ALLOCATOR.lock_irqsave() {
619             return allocator.allocate(count);
620         } else {
621             return None;
622         }
623     }
624 
625     unsafe fn free(&mut self, address: crate::mm::PhysAddr, count: PageFrameCount) {
626         assert!(count.data().is_power_of_two());
627         if let Some(ref mut allocator) = *INNER_ALLOCATOR.lock_irqsave() {
628             return allocator.free(address, count);
629         }
630     }
631 
632     unsafe fn usage(&self) -> PageFrameUsage {
633         if let Some(ref mut allocator) = *INNER_ALLOCATOR.lock_irqsave() {
634             return allocator.usage();
635         } else {
636             panic!("usage error");
637         }
638     }
639 }
640 
641 /// 获取内核地址默认的页面标志
642 pub unsafe fn kernel_page_flags<A: MemoryManagementArch>(virt: VirtAddr) -> PageFlags<A> {
643     let info: X86_64MMBootstrapInfo = BOOTSTRAP_MM_INFO.clone().unwrap();
644 
645     if virt.data() >= info.kernel_code_start && virt.data() < info.kernel_code_end {
646         // Remap kernel code  execute
647         return PageFlags::new().set_execute(true).set_write(true);
648     } else if virt.data() >= info.kernel_data_end && virt.data() < info.kernel_rodata_end {
649         // Remap kernel rodata read only
650         return PageFlags::new().set_execute(true);
651     } else {
652         return PageFlags::new().set_write(true).set_execute(true);
653     }
654 }
655 
656 unsafe fn set_inner_allocator(allocator: BuddyAllocator<MMArch>) {
657     static FLAG: AtomicBool = AtomicBool::new(false);
658     if FLAG
659         .compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst)
660         .is_err()
661     {
662         panic!("Cannot set inner allocator twice!");
663     }
664     *INNER_ALLOCATOR.lock() = Some(allocator);
665 }
666 
667 /// 低地址重映射的管理器
668 ///
669 /// 低地址重映射的管理器,在smp初始化完成之前,需要使用低地址的映射,因此需要在smp初始化完成之后,取消这一段映射
670 pub struct LowAddressRemapping;
671 
672 impl LowAddressRemapping {
673     // 映射32M
674     const REMAP_SIZE: usize = 32 * 1024 * 1024;
675 
676     pub unsafe fn remap_at_low_address(
677         mapper: &mut crate::mm::page::PageMapper<MMArch, &mut BumpAllocator<MMArch>>,
678     ) {
679         for i in 0..(Self::REMAP_SIZE / MMArch::PAGE_SIZE) {
680             let paddr = PhysAddr::new(i * MMArch::PAGE_SIZE);
681             let vaddr = VirtAddr::new(i * MMArch::PAGE_SIZE);
682             let flags = kernel_page_flags::<MMArch>(vaddr);
683 
684             let flusher = mapper
685                 .map_phys(vaddr, paddr, flags)
686                 .expect("Failed to map frame");
687             // 暂时不刷新TLB
688             flusher.ignore();
689         }
690     }
691 
692     /// 取消低地址的映射
693     pub unsafe fn unmap_at_low_address(flush: bool) {
694         let mut mapper = KernelMapper::lock();
695         assert!(mapper.as_mut().is_some());
696         for i in 0..(Self::REMAP_SIZE / MMArch::PAGE_SIZE) {
697             let vaddr = VirtAddr::new(i * MMArch::PAGE_SIZE);
698             let (_, _, flusher) = mapper
699                 .as_mut()
700                 .unwrap()
701                 .unmap_phys(vaddr, true)
702                 .expect("Failed to unmap frame");
703             if flush == false {
704                 flusher.ignore();
705             }
706         }
707     }
708 }
709 #[no_mangle]
710 pub extern "C" fn rs_mm_init() {
711     mm_init();
712 }
713