1 pub mod barrier;
2 pub mod bump;
3 mod c_adapter;
4
5 use alloc::vec::Vec;
6 use hashbrown::HashSet;
7 use x86::time::rdtsc;
8 use x86_64::registers::model_specific::EferFlags;
9
10 use crate::driver::serial::serial8250::send_to_default_serial8250_port;
11 use crate::include::bindings::bindings::{
12 multiboot2_get_load_base, multiboot2_get_memory, multiboot2_iter, multiboot_mmap_entry_t,
13 multiboot_tag_load_base_addr_t,
14 };
15 use crate::libs::align::page_align_up;
16 use crate::libs::lib_ui::screen_manager::scm_disable_put_to_window;
17 use crate::libs::spinlock::SpinLock;
18
19 use crate::mm::allocator::page_frame::{FrameAllocator, PageFrameCount, PageFrameUsage};
20 use crate::mm::memblock::mem_block_manager;
21 use crate::{
22 arch::MMArch,
23 mm::allocator::{buddy::BuddyAllocator, bump::BumpAllocator},
24 };
25
26 use crate::mm::kernel_mapper::KernelMapper;
27 use crate::mm::page::{PageEntry, PageFlags};
28 use crate::mm::{MemoryManagementArch, PageTableKind, PhysAddr, VirtAddr};
29 use crate::{kdebug, kinfo, kwarn};
30 use system_error::SystemError;
31
32 use core::arch::asm;
33 use core::ffi::c_void;
34 use core::fmt::Debug;
35 use core::mem::{self};
36
37 use core::sync::atomic::{compiler_fence, AtomicBool, Ordering};
38
39 use super::kvm::vmx::vmcs::VmcsFields;
40 use super::kvm::vmx::vmx_asm_wrapper::vmx_vmread;
41
42 pub type PageMapper =
43 crate::mm::page::PageMapper<crate::arch::x86_64::mm::X86_64MMArch, LockedFrameAllocator>;
44
45 /// 初始的CR3寄存器的值,用于内存管理初始化时,创建的第一个内核页表的位置
46 static mut INITIAL_CR3_VALUE: PhysAddr = PhysAddr::new(0);
47
48 /// 内核的第一个页表在pml4中的索引
49 /// 顶级页表的[256, 512)项是内核的页表
50 static KERNEL_PML4E_NO: usize = (X86_64MMArch::PHYS_OFFSET & ((1 << 48) - 1)) >> 39;
51
52 static INNER_ALLOCATOR: SpinLock<Option<BuddyAllocator<MMArch>>> = SpinLock::new(None);
53
54 #[derive(Clone, Copy, Debug)]
55 pub struct X86_64MMBootstrapInfo {
56 kernel_load_base_paddr: usize,
57 kernel_code_start: usize,
58 kernel_code_end: usize,
59 kernel_data_end: usize,
60 kernel_rodata_end: usize,
61 start_brk: usize,
62 }
63
64 pub(super) static mut BOOTSTRAP_MM_INFO: Option<X86_64MMBootstrapInfo> = None;
65
66 /// @brief X86_64的内存管理架构结构体
67 #[derive(Debug, Clone, Copy, Hash)]
68 pub struct X86_64MMArch;
69
70 /// XD标志位是否被保留
71 static XD_RESERVED: AtomicBool = AtomicBool::new(false);
72
73 impl MemoryManagementArch for X86_64MMArch {
74 /// 4K页
75 const PAGE_SHIFT: usize = 12;
76
77 /// 每个页表项占8字节,总共有512个页表项
78 const PAGE_ENTRY_SHIFT: usize = 9;
79
80 /// 四级页表(PML4T、PDPT、PDT、PT)
81 const PAGE_LEVELS: usize = 4;
82
83 /// 页表项的有效位的index。在x86_64中,页表项的第[0, 47]位表示地址和flag,
84 /// 第[48, 51]位表示保留。因此,有效位的index为52。
85 /// 请注意,第63位是XD位,表示是否允许执行。
86 const ENTRY_ADDRESS_SHIFT: usize = 52;
87
88 const ENTRY_FLAG_DEFAULT_PAGE: usize = Self::ENTRY_FLAG_PRESENT;
89
90 const ENTRY_FLAG_DEFAULT_TABLE: usize = Self::ENTRY_FLAG_PRESENT;
91
92 const ENTRY_FLAG_PRESENT: usize = 1 << 0;
93
94 const ENTRY_FLAG_READONLY: usize = 0;
95
96 const ENTRY_FLAG_READWRITE: usize = 1 << 1;
97
98 const ENTRY_FLAG_USER: usize = 1 << 2;
99
100 const ENTRY_FLAG_WRITE_THROUGH: usize = 1 << 3;
101
102 const ENTRY_FLAG_CACHE_DISABLE: usize = 1 << 4;
103
104 const ENTRY_FLAG_NO_EXEC: usize = 1 << 63;
105 /// x86_64不存在EXEC标志位,只有NO_EXEC(XD)标志位
106 const ENTRY_FLAG_EXEC: usize = 0;
107
108 const ENTRY_FLAG_ACCESSED: usize = 0;
109 const ENTRY_FLAG_DIRTY: usize = 0;
110
111 /// 物理地址与虚拟地址的偏移量
112 /// 0xffff_8000_0000_0000
113 const PHYS_OFFSET: usize = Self::PAGE_NEGATIVE_MASK + (Self::PAGE_ADDRESS_SIZE >> 1);
114 const KERNEL_LINK_OFFSET: usize = 0x100000;
115
116 // 参考 https://code.dragonos.org.cn/xref/linux-6.1.9/arch/x86/include/asm/page_64_types.h#75
117 const USER_END_VADDR: VirtAddr =
118 VirtAddr::new((Self::PAGE_ADDRESS_SIZE >> 1) - Self::PAGE_SIZE);
119 const USER_BRK_START: VirtAddr = VirtAddr::new(0x700000000000);
120 const USER_STACK_START: VirtAddr = VirtAddr::new(0x6ffff0a00000);
121
122 const FIXMAP_START_VADDR: VirtAddr = VirtAddr::new(0xffffb00000000000);
123 /// 设置FIXMAP区域大小为1M
124 const FIXMAP_SIZE: usize = 256 * 4096;
125
126 /// @brief 获取物理内存区域
init()127 unsafe fn init() {
128 extern "C" {
129 fn _text();
130 fn _etext();
131 fn _edata();
132 fn _erodata();
133 fn _end();
134 }
135
136 Self::init_xd_rsvd();
137 let load_base_paddr = Self::get_load_base_paddr();
138
139 let bootstrap_info = X86_64MMBootstrapInfo {
140 kernel_load_base_paddr: load_base_paddr.data(),
141 kernel_code_start: _text as usize,
142 kernel_code_end: _etext as usize,
143 kernel_data_end: _edata as usize,
144 kernel_rodata_end: _erodata as usize,
145 start_brk: _end as usize,
146 };
147
148 unsafe {
149 BOOTSTRAP_MM_INFO = Some(bootstrap_info);
150 }
151
152 // 初始化物理内存区域(从multiboot2中获取)
153 Self::init_memory_area_from_multiboot2().expect("init memory area failed");
154
155 kdebug!("bootstrap info: {:?}", unsafe { BOOTSTRAP_MM_INFO });
156 kdebug!("phys[0]=virt[0x{:x}]", unsafe {
157 MMArch::phys_2_virt(PhysAddr::new(0)).unwrap().data()
158 });
159
160 // 初始化内存管理器
161 unsafe { allocator_init() };
162 send_to_default_serial8250_port("x86 64 init done\n\0".as_bytes());
163 }
164
165 /// @brief 刷新TLB中,关于指定虚拟地址的条目
invalidate_page(address: VirtAddr)166 unsafe fn invalidate_page(address: VirtAddr) {
167 compiler_fence(Ordering::SeqCst);
168 asm!("invlpg [{0}]", in(reg) address.data(), options(nostack, preserves_flags));
169 compiler_fence(Ordering::SeqCst);
170 }
171
172 /// @brief 刷新TLB中,所有的条目
invalidate_all()173 unsafe fn invalidate_all() {
174 compiler_fence(Ordering::SeqCst);
175 // 通过设置cr3寄存器,来刷新整个TLB
176 Self::set_table(PageTableKind::User, Self::table(PageTableKind::User));
177 compiler_fence(Ordering::SeqCst);
178 }
179
180 /// @brief 获取顶级页表的物理地址
table(table_kind: PageTableKind) -> PhysAddr181 unsafe fn table(table_kind: PageTableKind) -> PhysAddr {
182 match table_kind {
183 PageTableKind::Kernel | PageTableKind::User => {
184 let paddr: usize;
185 compiler_fence(Ordering::SeqCst);
186 asm!("mov {}, cr3", out(reg) paddr, options(nomem, nostack, preserves_flags));
187 compiler_fence(Ordering::SeqCst);
188 return PhysAddr::new(paddr);
189 }
190 PageTableKind::EPT => {
191 let eptp =
192 vmx_vmread(VmcsFields::CTRL_EPTP_PTR as u32).expect("Failed to read eptp");
193 return PhysAddr::new(eptp as usize);
194 }
195 }
196 }
197
198 /// @brief 设置顶级页表的物理地址到处理器中
set_table(_table_kind: PageTableKind, table: PhysAddr)199 unsafe fn set_table(_table_kind: PageTableKind, table: PhysAddr) {
200 compiler_fence(Ordering::SeqCst);
201 asm!("mov cr3, {}", in(reg) table.data(), options(nostack, preserves_flags));
202 compiler_fence(Ordering::SeqCst);
203 }
204
205 /// @brief 判断虚拟地址是否合法
virt_is_valid(virt: VirtAddr) -> bool206 fn virt_is_valid(virt: VirtAddr) -> bool {
207 return virt.is_canonical();
208 }
209
210 /// 获取内存管理初始化时,创建的第一个内核页表的地址
initial_page_table() -> PhysAddr211 fn initial_page_table() -> PhysAddr {
212 unsafe {
213 return INITIAL_CR3_VALUE;
214 }
215 }
216
217 /// @brief 创建新的顶层页表
218 ///
219 /// 该函数会创建页表并复制内核的映射到新的页表中
220 ///
221 /// @return 新的页表
setup_new_usermapper() -> Result<crate::mm::ucontext::UserMapper, SystemError>222 fn setup_new_usermapper() -> Result<crate::mm::ucontext::UserMapper, SystemError> {
223 let new_umapper: crate::mm::page::PageMapper<X86_64MMArch, LockedFrameAllocator> = unsafe {
224 PageMapper::create(PageTableKind::User, LockedFrameAllocator)
225 .ok_or(SystemError::ENOMEM)?
226 };
227
228 let current_ktable: KernelMapper = KernelMapper::lock();
229 let copy_mapping = |pml4_entry_no| unsafe {
230 let entry: PageEntry<X86_64MMArch> = current_ktable
231 .table()
232 .entry(pml4_entry_no)
233 .unwrap_or_else(|| panic!("entry {} not found", pml4_entry_no));
234 new_umapper.table().set_entry(pml4_entry_no, entry)
235 };
236
237 // 复制内核的映射
238 for pml4_entry_no in KERNEL_PML4E_NO..512 {
239 copy_mapping(pml4_entry_no);
240 }
241
242 return Ok(crate::mm::ucontext::UserMapper::new(new_umapper));
243 }
244
245 const PAGE_SIZE: usize = 1 << Self::PAGE_SHIFT;
246
247 const PAGE_OFFSET_MASK: usize = Self::PAGE_SIZE - 1;
248
249 const PAGE_MASK: usize = !(Self::PAGE_OFFSET_MASK);
250
251 const PAGE_ADDRESS_SHIFT: usize = Self::PAGE_LEVELS * Self::PAGE_ENTRY_SHIFT + Self::PAGE_SHIFT;
252
253 const PAGE_ADDRESS_SIZE: usize = 1 << Self::PAGE_ADDRESS_SHIFT;
254
255 const PAGE_ADDRESS_MASK: usize = Self::PAGE_ADDRESS_SIZE - Self::PAGE_SIZE;
256
257 const PAGE_ENTRY_SIZE: usize = 1 << (Self::PAGE_SHIFT - Self::PAGE_ENTRY_SHIFT);
258
259 const PAGE_ENTRY_NUM: usize = 1 << Self::PAGE_ENTRY_SHIFT;
260
261 const PAGE_ENTRY_MASK: usize = Self::PAGE_ENTRY_NUM - 1;
262
263 const PAGE_NEGATIVE_MASK: usize = !((Self::PAGE_ADDRESS_SIZE) - 1);
264
265 const ENTRY_ADDRESS_SIZE: usize = 1 << Self::ENTRY_ADDRESS_SHIFT;
266
267 const ENTRY_ADDRESS_MASK: usize = Self::ENTRY_ADDRESS_SIZE - Self::PAGE_SIZE;
268
269 const ENTRY_FLAGS_MASK: usize = !Self::ENTRY_ADDRESS_MASK;
270
read<T>(address: VirtAddr) -> T271 unsafe fn read<T>(address: VirtAddr) -> T {
272 return core::ptr::read(address.data() as *const T);
273 }
274
write<T>(address: VirtAddr, value: T)275 unsafe fn write<T>(address: VirtAddr, value: T) {
276 core::ptr::write(address.data() as *mut T, value);
277 }
278
write_bytes(address: VirtAddr, value: u8, count: usize)279 unsafe fn write_bytes(address: VirtAddr, value: u8, count: usize) {
280 core::ptr::write_bytes(address.data() as *mut u8, value, count);
281 }
282
phys_2_virt(phys: PhysAddr) -> Option<VirtAddr>283 unsafe fn phys_2_virt(phys: PhysAddr) -> Option<VirtAddr> {
284 if let Some(vaddr) = phys.data().checked_add(Self::PHYS_OFFSET) {
285 return Some(VirtAddr::new(vaddr));
286 } else {
287 return None;
288 }
289 }
290
virt_2_phys(virt: VirtAddr) -> Option<PhysAddr>291 unsafe fn virt_2_phys(virt: VirtAddr) -> Option<PhysAddr> {
292 if let Some(paddr) = virt.data().checked_sub(Self::PHYS_OFFSET) {
293 return Some(PhysAddr::new(paddr));
294 } else {
295 return None;
296 }
297 }
298
299 #[inline(always)]
make_entry(paddr: PhysAddr, page_flags: usize) -> usize300 fn make_entry(paddr: PhysAddr, page_flags: usize) -> usize {
301 return paddr.data() | page_flags;
302 }
303 }
304
305 impl X86_64MMArch {
get_load_base_paddr() -> PhysAddr306 unsafe fn get_load_base_paddr() -> PhysAddr {
307 let mut mb2_lb_info: [multiboot_tag_load_base_addr_t; 512] = mem::zeroed();
308 send_to_default_serial8250_port("get_load_base_paddr begin\n\0".as_bytes());
309
310 let mut mb2_count: u32 = 0;
311 multiboot2_iter(
312 Some(multiboot2_get_load_base),
313 &mut mb2_lb_info as *mut [multiboot_tag_load_base_addr_t; 512] as usize as *mut c_void,
314 &mut mb2_count,
315 );
316
317 if mb2_count == 0 {
318 send_to_default_serial8250_port(
319 "get_load_base_paddr mb2_count == 0, default to 1MB\n\0".as_bytes(),
320 );
321 return PhysAddr::new(0x100000);
322 }
323
324 let phys = mb2_lb_info[0].load_base_addr as usize;
325
326 return PhysAddr::new(phys);
327 }
init_memory_area_from_multiboot2() -> Result<usize, SystemError>328 unsafe fn init_memory_area_from_multiboot2() -> Result<usize, SystemError> {
329 // 这个数组用来存放内存区域的信息(从C获取)
330 let mut mb2_mem_info: [multiboot_mmap_entry_t; 512] = mem::zeroed();
331 send_to_default_serial8250_port("init_memory_area_from_multiboot2 begin\n\0".as_bytes());
332
333 let mut mb2_count: u32 = 0;
334 multiboot2_iter(
335 Some(multiboot2_get_memory),
336 &mut mb2_mem_info as *mut [multiboot_mmap_entry_t; 512] as usize as *mut c_void,
337 &mut mb2_count,
338 );
339 send_to_default_serial8250_port("init_memory_area_from_multiboot2 2\n\0".as_bytes());
340
341 let mb2_count = mb2_count as usize;
342 let mut areas_count = 0usize;
343 let mut total_mem_size = 0usize;
344 for i in 0..mb2_count {
345 // Only use the memory area if its type is 1 (RAM)
346 if mb2_mem_info[i].type_ == 1 {
347 // Skip the memory area if its len is 0
348 if mb2_mem_info[i].len == 0 {
349 continue;
350 }
351
352 total_mem_size += mb2_mem_info[i].len as usize;
353
354 mem_block_manager()
355 .add_block(
356 PhysAddr::new(mb2_mem_info[i].addr as usize),
357 mb2_mem_info[i].len as usize,
358 )
359 .unwrap_or_else(|e| {
360 kwarn!(
361 "Failed to add memory block: base={:#x}, size={:#x}, error={:?}",
362 mb2_mem_info[i].addr,
363 mb2_mem_info[i].len,
364 e
365 );
366 });
367 areas_count += 1;
368 }
369 }
370 send_to_default_serial8250_port("init_memory_area_from_multiboot2 end\n\0".as_bytes());
371 kinfo!("Total memory size: {} MB, total areas from multiboot2: {mb2_count}, valid areas: {areas_count}", total_mem_size / 1024 / 1024);
372 return Ok(areas_count);
373 }
374
init_xd_rsvd()375 fn init_xd_rsvd() {
376 // 读取ia32-EFER寄存器的值
377 let efer: EferFlags = x86_64::registers::model_specific::Efer::read();
378 if !efer.contains(EferFlags::NO_EXECUTE_ENABLE) {
379 // NO_EXECUTE_ENABLE是false,那么就设置xd_reserved为true
380 kdebug!("NO_EXECUTE_ENABLE is false, set XD_RESERVED to true");
381 XD_RESERVED.store(true, Ordering::Relaxed);
382 }
383 compiler_fence(Ordering::SeqCst);
384 }
385
386 /// 判断XD标志位是否被保留
is_xd_reserved() -> bool387 pub fn is_xd_reserved() -> bool {
388 // return XD_RESERVED.load(Ordering::Relaxed);
389
390 // 由于暂时不支持execute disable,因此直接返回true
391 // 不支持的原因是,目前好像没有能正确的设置page-level的xd位,会触发page fault
392 return true;
393 }
394 }
395
396 impl VirtAddr {
397 /// @brief 判断虚拟地址是否合法
398 #[inline(always)]
is_canonical(self) -> bool399 pub fn is_canonical(self) -> bool {
400 let x = self.data() & X86_64MMArch::PHYS_OFFSET;
401 // 如果x为0,说明虚拟地址的高位为0,是合法的用户地址
402 // 如果x为PHYS_OFFSET,说明虚拟地址的高位全为1,是合法的内核地址
403 return x == 0 || x == X86_64MMArch::PHYS_OFFSET;
404 }
405 }
406
allocator_init()407 unsafe fn allocator_init() {
408 let virt_offset = BOOTSTRAP_MM_INFO.unwrap().start_brk;
409 let phy_offset =
410 unsafe { MMArch::virt_2_phys(VirtAddr::new(page_align_up(virt_offset))) }.unwrap();
411
412 let mut bump_allocator = BumpAllocator::<X86_64MMArch>::new(phy_offset.data());
413 kdebug!(
414 "BumpAllocator created, offset={:?}",
415 bump_allocator.offset()
416 );
417
418 // 暂存初始在head.S中指定的页表的地址,后面再考虑是否需要把它加到buddy的可用空间里面!
419 // 现在不加的原因是,我担心会有安全漏洞问题:这些初始的页表,位于内核的数据段。如果归还到buddy,
420 // 可能会产生一定的安全风险(有的代码可能根据虚拟地址来进行安全校验)
421 let _old_page_table = MMArch::table(PageTableKind::Kernel);
422
423 let new_page_table: PhysAddr;
424 // 使用bump分配器,把所有的内存页都映射到页表
425 {
426 // 用bump allocator创建新的页表
427 let mut mapper: crate::mm::page::PageMapper<MMArch, &mut BumpAllocator<MMArch>> =
428 crate::mm::page::PageMapper::<MMArch, _>::create(
429 PageTableKind::Kernel,
430 &mut bump_allocator,
431 )
432 .expect("Failed to create page mapper");
433 new_page_table = mapper.table().phys();
434 kdebug!("PageMapper created");
435
436 // 取消最开始时候,在head.S中指定的映射(暂时不刷新TLB)
437 {
438 let table = mapper.table();
439 let empty_entry = PageEntry::<MMArch>::from_usize(0);
440 for i in 0..MMArch::PAGE_ENTRY_NUM {
441 table
442 .set_entry(i, empty_entry)
443 .expect("Failed to empty page table entry");
444 }
445 }
446 kdebug!("Successfully emptied page table");
447
448 let total_num = mem_block_manager().total_initial_memory_regions();
449 for i in 0..total_num {
450 let area = mem_block_manager().get_initial_memory_region(i).unwrap();
451 // kdebug!("area: base={:?}, size={:#x}, end={:?}", area.base, area.size, area.base + area.size);
452 for i in 0..((area.size + MMArch::PAGE_SIZE - 1) / MMArch::PAGE_SIZE) {
453 let paddr = area.base.add(i * MMArch::PAGE_SIZE);
454 let vaddr = unsafe { MMArch::phys_2_virt(paddr) }.unwrap();
455 let flags = kernel_page_flags::<MMArch>(vaddr);
456
457 let flusher = mapper
458 .map_phys(vaddr, paddr, flags)
459 .expect("Failed to map frame");
460 // 暂时不刷新TLB
461 flusher.ignore();
462 }
463 }
464
465 // 添加低地址的映射(在smp完成初始化之前,需要使用低地址的映射.初始化之后需要取消这一段映射)
466 LowAddressRemapping::remap_at_low_address(&mut mapper);
467 }
468
469 unsafe {
470 INITIAL_CR3_VALUE = new_page_table;
471 }
472 kdebug!(
473 "After mapping all physical memory, DragonOS used: {} KB",
474 bump_allocator.offset() / 1024
475 );
476
477 // 初始化buddy_allocator
478 let buddy_allocator = unsafe { BuddyAllocator::<X86_64MMArch>::new(bump_allocator).unwrap() };
479 // 设置全局的页帧分配器
480 unsafe { set_inner_allocator(buddy_allocator) };
481 kinfo!("Successfully initialized buddy allocator");
482 // 关闭显示输出
483 scm_disable_put_to_window();
484
485 // make the new page table current
486 {
487 let mut binding = INNER_ALLOCATOR.lock();
488 let mut allocator_guard = binding.as_mut().unwrap();
489 kdebug!("To enable new page table.");
490 compiler_fence(Ordering::SeqCst);
491 let mapper = crate::mm::page::PageMapper::<MMArch, _>::new(
492 PageTableKind::Kernel,
493 new_page_table,
494 &mut allocator_guard,
495 );
496 compiler_fence(Ordering::SeqCst);
497 mapper.make_current();
498 compiler_fence(Ordering::SeqCst);
499 kdebug!("New page table enabled");
500 }
501 kdebug!("Successfully enabled new page table");
502 }
503
504 #[no_mangle]
rs_test_buddy()505 pub extern "C" fn rs_test_buddy() {
506 test_buddy();
507 }
test_buddy()508 pub fn test_buddy() {
509 // 申请内存然后写入数据然后free掉
510 // 总共申请200MB内存
511 const TOTAL_SIZE: usize = 200 * 1024 * 1024;
512
513 for i in 0..10 {
514 kdebug!("Test buddy, round: {i}");
515 // 存放申请的内存块
516 let mut v: Vec<(PhysAddr, PageFrameCount)> = Vec::with_capacity(60 * 1024);
517 // 存放已经申请的内存块的地址(用于检查重复)
518 let mut addr_set: HashSet<PhysAddr> = HashSet::new();
519
520 let mut allocated = 0usize;
521
522 let mut free_count = 0usize;
523
524 while allocated < TOTAL_SIZE {
525 let mut random_size = 0u64;
526 unsafe { x86::random::rdrand64(&mut random_size) };
527 // 一次最多申请4M
528 random_size = random_size % (1024 * 4096);
529 if random_size == 0 {
530 continue;
531 }
532 let random_size =
533 core::cmp::min(page_align_up(random_size as usize), TOTAL_SIZE - allocated);
534 let random_size = PageFrameCount::from_bytes(random_size.next_power_of_two()).unwrap();
535 // 获取帧
536 let (paddr, allocated_frame_count) =
537 unsafe { LockedFrameAllocator.allocate(random_size).unwrap() };
538 assert!(allocated_frame_count.data().is_power_of_two());
539 assert!(paddr.data() % MMArch::PAGE_SIZE == 0);
540 unsafe {
541 assert!(MMArch::phys_2_virt(paddr)
542 .as_ref()
543 .unwrap()
544 .check_aligned(allocated_frame_count.data() * MMArch::PAGE_SIZE));
545 }
546 allocated += allocated_frame_count.data() * MMArch::PAGE_SIZE;
547 v.push((paddr, allocated_frame_count));
548 assert!(addr_set.insert(paddr), "duplicate address: {:?}", paddr);
549
550 // 写入数据
551 let vaddr = unsafe { MMArch::phys_2_virt(paddr).unwrap() };
552 let slice = unsafe {
553 core::slice::from_raw_parts_mut(
554 vaddr.data() as *mut u8,
555 allocated_frame_count.data() * MMArch::PAGE_SIZE,
556 )
557 };
558 for i in 0..slice.len() {
559 slice[i] = ((i + unsafe { rdtsc() } as usize) % 256) as u8;
560 }
561
562 // 随机释放一个内存块
563 if v.len() > 0 {
564 let mut random_index = 0u64;
565 unsafe { x86::random::rdrand64(&mut random_index) };
566 // 70%概率释放
567 if random_index % 10 > 7 {
568 continue;
569 }
570 random_index = random_index % v.len() as u64;
571 let random_index = random_index as usize;
572 let (paddr, allocated_frame_count) = v.remove(random_index);
573 assert!(addr_set.remove(&paddr));
574 unsafe { LockedFrameAllocator.free(paddr, allocated_frame_count) };
575 free_count += allocated_frame_count.data() * MMArch::PAGE_SIZE;
576 }
577 }
578
579 kdebug!(
580 "Allocated {} MB memory, release: {} MB, no release: {} bytes",
581 allocated / 1024 / 1024,
582 free_count / 1024 / 1024,
583 (allocated - free_count)
584 );
585
586 kdebug!("Now, to release buddy memory");
587 // 释放所有的内存
588 for (paddr, allocated_frame_count) in v {
589 unsafe { LockedFrameAllocator.free(paddr, allocated_frame_count) };
590 assert!(addr_set.remove(&paddr));
591 free_count += allocated_frame_count.data() * MMArch::PAGE_SIZE;
592 }
593
594 kdebug!("release done!, allocated: {allocated}, free_count: {free_count}");
595 }
596 }
597
598 /// 全局的页帧分配器
599 #[derive(Debug, Clone, Copy, Hash)]
600 pub struct LockedFrameAllocator;
601
602 impl FrameAllocator for LockedFrameAllocator {
allocate(&mut self, count: PageFrameCount) -> Option<(PhysAddr, PageFrameCount)>603 unsafe fn allocate(&mut self, count: PageFrameCount) -> Option<(PhysAddr, PageFrameCount)> {
604 if let Some(ref mut allocator) = *INNER_ALLOCATOR.lock_irqsave() {
605 return allocator.allocate(count);
606 } else {
607 return None;
608 }
609 }
610
free(&mut self, address: crate::mm::PhysAddr, count: PageFrameCount)611 unsafe fn free(&mut self, address: crate::mm::PhysAddr, count: PageFrameCount) {
612 assert!(count.data().is_power_of_two());
613 if let Some(ref mut allocator) = *INNER_ALLOCATOR.lock_irqsave() {
614 return allocator.free(address, count);
615 }
616 }
617
usage(&self) -> PageFrameUsage618 unsafe fn usage(&self) -> PageFrameUsage {
619 if let Some(ref mut allocator) = *INNER_ALLOCATOR.lock_irqsave() {
620 return allocator.usage();
621 } else {
622 panic!("usage error");
623 }
624 }
625 }
626
627 /// 获取内核地址默认的页面标志
kernel_page_flags<A: MemoryManagementArch>(virt: VirtAddr) -> PageFlags<A>628 pub unsafe fn kernel_page_flags<A: MemoryManagementArch>(virt: VirtAddr) -> PageFlags<A> {
629 let info: X86_64MMBootstrapInfo = BOOTSTRAP_MM_INFO.clone().unwrap();
630
631 if virt.data() >= info.kernel_code_start && virt.data() < info.kernel_code_end {
632 // Remap kernel code execute
633 return PageFlags::new().set_execute(true).set_write(true);
634 } else if virt.data() >= info.kernel_data_end && virt.data() < info.kernel_rodata_end {
635 // Remap kernel rodata read only
636 return PageFlags::new().set_execute(true);
637 } else {
638 return PageFlags::new().set_write(true).set_execute(true);
639 }
640 }
641
set_inner_allocator(allocator: BuddyAllocator<MMArch>)642 unsafe fn set_inner_allocator(allocator: BuddyAllocator<MMArch>) {
643 static FLAG: AtomicBool = AtomicBool::new(false);
644 if FLAG
645 .compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst)
646 .is_err()
647 {
648 panic!("Cannot set inner allocator twice!");
649 }
650 *INNER_ALLOCATOR.lock() = Some(allocator);
651 }
652
653 /// 低地址重映射的管理器
654 ///
655 /// 低地址重映射的管理器,在smp初始化完成之前,需要使用低地址的映射,因此需要在smp初始化完成之后,取消这一段映射
656 pub struct LowAddressRemapping;
657
658 impl LowAddressRemapping {
659 // 映射64M
660 const REMAP_SIZE: usize = 64 * 1024 * 1024;
661
remap_at_low_address( mapper: &mut crate::mm::page::PageMapper<MMArch, &mut BumpAllocator<MMArch>>, )662 pub unsafe fn remap_at_low_address(
663 mapper: &mut crate::mm::page::PageMapper<MMArch, &mut BumpAllocator<MMArch>>,
664 ) {
665 for i in 0..(Self::REMAP_SIZE / MMArch::PAGE_SIZE) {
666 let paddr = PhysAddr::new(i * MMArch::PAGE_SIZE);
667 let vaddr = VirtAddr::new(i * MMArch::PAGE_SIZE);
668 let flags = kernel_page_flags::<MMArch>(vaddr);
669
670 let flusher = mapper
671 .map_phys(vaddr, paddr, flags)
672 .expect("Failed to map frame");
673 // 暂时不刷新TLB
674 flusher.ignore();
675 }
676 }
677
678 /// 取消低地址的映射
unmap_at_low_address(flush: bool)679 pub unsafe fn unmap_at_low_address(flush: bool) {
680 let mut mapper = KernelMapper::lock();
681 assert!(mapper.as_mut().is_some());
682 for i in 0..(Self::REMAP_SIZE / MMArch::PAGE_SIZE) {
683 let vaddr = VirtAddr::new(i * MMArch::PAGE_SIZE);
684 let (_, _, flusher) = mapper
685 .as_mut()
686 .unwrap()
687 .unmap_phys(vaddr, true)
688 .expect("Failed to unmap frame");
689 if flush == false {
690 flusher.ignore();
691 }
692 }
693 }
694 }
695