xref: /DragonOS/kernel/src/mm/no_init.rs (revision 74ffde667e5e7f4ac8ce6d5a5ec2c1403f36cbb0)
1 //! 该文件用于系统启动早期,内存管理器初始化之前,提供一些简单的内存映射功能
2 //!
3 //! 映射关系为:
4 //!
5 //! 虚拟地址 0-100M与虚拟地址 0x8000_0000_0000 - 0x8000_0640_0000 之间具有重映射关系。
6 //! 也就是说,他们的第二级页表在最顶级页表中,占用了第0和第256个页表项。
7 //!
8 //! 对于x86:
9 //! 这里假设在内核引导文件中,已经填写了前100M的页表,其中,前50M是真实映射到内存的,后面的仅仅创建了页表,表项全部为0。
10 
11 use bitmap::{traits::BitMapOps, StaticBitmap};
12 
13 use crate::{
14     libs::spinlock::SpinLock,
15     mm::{MMArch, MemoryManagementArch, PhysAddr},
16 };
17 
18 use core::marker::PhantomData;
19 
20 use super::{
21     allocator::page_frame::{FrameAllocator, PageFrameCount, PageFrameUsage},
22     page::PageFlags,
23     PageTableKind, VirtAddr,
24 };
25 
26 /// 用于存储重映射页表的位图和页面
27 static EARLY_IOREMAP_PAGES: SpinLock<EarlyIoRemapPages> = SpinLock::new(EarlyIoRemapPages::new());
28 
29 /// 早期重映射使用的页表
30 #[repr(C)]
31 #[repr(align(4096))]
32 #[derive(Clone, Copy)]
33 struct EarlyRemapPage {
34     data: [u64; MMArch::PAGE_SIZE],
35 }
36 
37 impl EarlyRemapPage {
38     /// 清空数据
39     fn zero(&mut self) {
40         self.data.fill(0);
41     }
42 }
43 
44 #[repr(C)]
45 struct EarlyIoRemapPages {
46     pages: [EarlyRemapPage; Self::EARLY_REMAP_PAGES_NUM],
47     bmp: StaticBitmap<{ Self::EARLY_REMAP_PAGES_NUM }>,
48 }
49 
50 impl EarlyIoRemapPages {
51     /// 预留的用于在内存管理初始化之前,映射内存所使用的页表数量
52     pub const EARLY_REMAP_PAGES_NUM: usize = 256;
53     pub const fn new() -> Self {
54         Self {
55             pages: [EarlyRemapPage {
56                 data: [0; MMArch::PAGE_SIZE],
57             }; Self::EARLY_REMAP_PAGES_NUM],
58             bmp: StaticBitmap::new(),
59         }
60     }
61 
62     /// 分配一个页面
63     ///
64     /// 如果成功,返回虚拟地址
65     ///
66     /// 如果失败,返回None
67     pub fn allocate_page(&mut self) -> Option<VirtAddr> {
68         if let Some(index) = self.bmp.first_false_index() {
69             self.bmp.set(index, true);
70             // 清空数据
71             self.pages[index].zero();
72 
73             let p = &self.pages[index] as *const EarlyRemapPage as usize;
74             let vaddr = VirtAddr::new(p);
75             assert!(vaddr.check_aligned(MMArch::PAGE_SIZE));
76             return Some(vaddr);
77         } else {
78             return None;
79         }
80     }
81 
82     pub fn free_page(&mut self, addr: VirtAddr) {
83         // 判断地址是否合法
84         let start_vaddr = &self.pages[0] as *const EarlyRemapPage as usize;
85         let offset = addr.data() - start_vaddr;
86         let index = offset / MMArch::PAGE_SIZE;
87         if index < Self::EARLY_REMAP_PAGES_NUM {
88             assert_eq!(self.bmp.get(index).unwrap(), true);
89             self.bmp.set(index, false);
90         }
91     }
92 }
93 
94 /// 伪分配器
95 struct PseudoAllocator<MMA> {
96     phantom: PhantomData<MMA>,
97 }
98 
99 impl<MMA: MemoryManagementArch> PseudoAllocator<MMA> {
100     pub const fn new() -> Self {
101         Self {
102             phantom: PhantomData,
103         }
104     }
105 }
106 
107 /// 为NoInitAllocator实现FrameAllocator
108 impl<MMA: MemoryManagementArch> FrameAllocator for PseudoAllocator<MMA> {
109     unsafe fn allocate(&mut self, count: PageFrameCount) -> Option<(PhysAddr, PageFrameCount)> {
110         assert!(count.data() == 1);
111         let vaddr = EARLY_IOREMAP_PAGES.lock_irqsave().allocate_page()?;
112         let paddr = MMA::virt_2_phys(vaddr)?;
113         return Some((paddr, count));
114     }
115 
116     unsafe fn free(&mut self, address: PhysAddr, count: PageFrameCount) {
117         assert_eq!(count.data(), 1);
118         assert!(address.check_aligned(MMA::PAGE_SIZE));
119         let vaddr = MMA::phys_2_virt(address);
120         if let Some(vaddr) = vaddr {
121             EARLY_IOREMAP_PAGES.lock_irqsave().free_page(vaddr);
122         }
123     }
124     /// @brief: 获取内存区域页帧的使用情况
125     /// @param  self
126     /// @return 页帧的使用情况
127     unsafe fn usage(&self) -> PageFrameUsage {
128         // 暂时不支持
129         panic!("NoInitAllocator can't get page frame usage");
130     }
131 }
132 
133 /// Use pseudo mapper to map physical memory to virtual memory.
134 ///
135 /// ## Safety
136 ///
137 /// 调用该函数时,必须保证内存管理器尚未初始化。否则将导致未定义的行为
138 ///
139 /// 并且,内核引导文件必须以4K页为粒度,填写了前100M的内存映射关系。(具体以本文件开头的注释为准)
140 #[inline(never)]
141 pub unsafe fn pseudo_map_phys(vaddr: VirtAddr, paddr: PhysAddr, count: PageFrameCount) {
142     assert!(vaddr.check_aligned(MMArch::PAGE_SIZE));
143     assert!(paddr.check_aligned(MMArch::PAGE_SIZE));
144 
145     let mut pseudo_allocator = PseudoAllocator::<MMArch>::new();
146 
147     let mut mapper = crate::mm::page::PageMapper::<MMArch, _>::new(
148         PageTableKind::Kernel,
149         MMArch::table(PageTableKind::Kernel),
150         &mut pseudo_allocator,
151     );
152 
153     let flags: PageFlags<MMArch> = PageFlags::new().set_write(true).set_execute(true);
154 
155     for i in 0..count.data() {
156         let vaddr = vaddr + i * MMArch::PAGE_SIZE;
157         let paddr = paddr + i * MMArch::PAGE_SIZE;
158         let flusher = mapper.map_phys(vaddr, paddr, flags).unwrap();
159         flusher.ignore();
160     }
161 
162     mapper.make_current();
163 }
164 
165 /// Unmap physical memory from virtual memory.
166 ///
167 /// ## 说明
168 ///
169 /// 该函数在系统启动早期,内存管理尚未初始化的时候使用
170 #[inline(never)]
171 pub unsafe fn pseudo_unmap_phys(vaddr: VirtAddr, count: PageFrameCount) {
172     assert!(vaddr.check_aligned(MMArch::PAGE_SIZE));
173     assert!(count.data() == 1);
174 
175     let mut pseudo_allocator = PseudoAllocator::<MMArch>::new();
176 
177     let mut mapper = crate::mm::page::PageMapper::<MMArch, _>::new(
178         PageTableKind::Kernel,
179         MMArch::table(PageTableKind::Kernel),
180         &mut pseudo_allocator,
181     );
182 
183     for i in 0..count.data() {
184         let vaddr = vaddr + i * MMArch::PAGE_SIZE;
185         mapper.unmap_phys(vaddr, true).map(|(_, _, flusher)| {
186             flusher.ignore();
187         });
188     }
189 
190     mapper.make_current();
191 }
192