xref: /DragonOS/kernel/src/mm/no_init.rs (revision 59a6bcf6aee15a11a16431bdf875905c5ecf9157)
1 //! 该文件用于系统启动早期,内存管理器初始化之前,提供一些简单的内存映射功能
2 //!
3 //! 映射关系为:
4 //!
5 //! 虚拟地址 0-100M与虚拟地址 0x8000_0000_0000 - 0x8000_0640_0000 之间具有重映射关系。
6 //! 也就是说,他们的第二级页表在最顶级页表中,占用了第0和第256个页表项。
7 //!
8 //! 对于x86:
9 //! 这里假设在内核引导文件中,已经填写了前100M的页表,其中,前50M是真实映射到内存的,后面的仅仅创建了页表,表项全部为0。
10 
11 use bitmap::{traits::BitMapOps, StaticBitmap};
12 
13 use crate::{
14     libs::spinlock::SpinLock,
15     mm::{MMArch, MemoryManagementArch, PhysAddr},
16 };
17 
18 use core::marker::PhantomData;
19 
20 use super::{
21     allocator::page_frame::{FrameAllocator, PageFrameCount, PageFrameUsage},
22     page::EntryFlags,
23     PageTableKind, VirtAddr,
24 };
25 
26 /// 用于存储重映射页表的位图和页面
27 pub static EARLY_IOREMAP_PAGES: SpinLock<EarlyIoRemapPages> =
28     SpinLock::new(EarlyIoRemapPages::new());
29 
30 /// 早期重映射使用的页表
31 #[repr(C)]
32 #[repr(align(4096))]
33 #[derive(Clone, Copy)]
34 struct EarlyRemapPage {
35     data: [u64; MMArch::PAGE_SIZE],
36 }
37 
38 impl EarlyRemapPage {
39     /// 清空数据
40     fn zero(&mut self) {
41         self.data.fill(0);
42     }
43 }
44 
45 #[repr(C)]
46 pub struct EarlyIoRemapPages {
47     pages: [EarlyRemapPage; Self::EARLY_REMAP_PAGES_NUM],
48     bmp: StaticBitmap<{ Self::EARLY_REMAP_PAGES_NUM }>,
49 }
50 
51 impl EarlyIoRemapPages {
52     /// 预留的用于在内存管理初始化之前,映射内存所使用的页表数量
53     pub const EARLY_REMAP_PAGES_NUM: usize = 256;
54     pub const fn new() -> Self {
55         Self {
56             pages: [EarlyRemapPage {
57                 data: [0; MMArch::PAGE_SIZE],
58             }; Self::EARLY_REMAP_PAGES_NUM],
59             bmp: StaticBitmap::new(),
60         }
61     }
62 
63     /// 分配一个页面
64     ///
65     /// 如果成功,返回虚拟地址
66     ///
67     /// 如果失败,返回None
68     pub fn allocate_page(&mut self) -> Option<VirtAddr> {
69         if let Some(index) = self.bmp.first_false_index() {
70             self.bmp.set(index, true);
71             // 清空数据
72             self.pages[index].zero();
73 
74             let p = &self.pages[index] as *const EarlyRemapPage as usize;
75             let vaddr = VirtAddr::new(p);
76             assert!(vaddr.check_aligned(MMArch::PAGE_SIZE));
77             return Some(vaddr);
78         } else {
79             return None;
80         }
81     }
82 
83     pub fn free_page(&mut self, addr: VirtAddr) {
84         // 判断地址是否合法
85         let start_vaddr = &self.pages[0] as *const EarlyRemapPage as usize;
86         let offset = addr.data() - start_vaddr;
87         let index = offset / MMArch::PAGE_SIZE;
88         if index < Self::EARLY_REMAP_PAGES_NUM {
89             assert!(self.bmp.get(index).unwrap());
90             self.bmp.set(index, false);
91         }
92     }
93 }
94 
95 /// 伪分配器
96 struct PseudoAllocator<MMA> {
97     phantom: PhantomData<MMA>,
98 }
99 
100 impl<MMA: MemoryManagementArch> PseudoAllocator<MMA> {
101     pub const fn new() -> Self {
102         Self {
103             phantom: PhantomData,
104         }
105     }
106 }
107 
108 /// 为NoInitAllocator实现FrameAllocator
109 impl<MMA: MemoryManagementArch> FrameAllocator for PseudoAllocator<MMA> {
110     unsafe fn allocate(&mut self, count: PageFrameCount) -> Option<(PhysAddr, PageFrameCount)> {
111         assert!(count.data() == 1);
112         let vaddr = EARLY_IOREMAP_PAGES.lock_irqsave().allocate_page()?;
113         let paddr = MMA::virt_2_phys(vaddr)?;
114         return Some((paddr, count));
115     }
116 
117     unsafe fn free(&mut self, address: PhysAddr, count: PageFrameCount) {
118         assert_eq!(count.data(), 1);
119         assert!(address.check_aligned(MMA::PAGE_SIZE));
120 
121         let vaddr = MMA::phys_2_virt(address);
122         if let Some(vaddr) = vaddr {
123             EARLY_IOREMAP_PAGES.lock_irqsave().free_page(vaddr);
124         }
125     }
126     /// @brief: 获取内存区域页帧的使用情况
127     /// @param  self
128     /// @return 页帧的使用情况
129     unsafe fn usage(&self) -> PageFrameUsage {
130         // 暂时不支持
131         panic!("NoInitAllocator can't get page frame usage");
132     }
133 }
134 
135 /// Use pseudo mapper to map physical memory to virtual memory.
136 ///
137 /// ## Safety
138 ///
139 /// 调用该函数时,必须保证内存管理器尚未初始化。否则将导致未定义的行为
140 ///
141 /// 并且,内核引导文件必须以4K页为粒度,填写了前100M的内存映射关系。(具体以本文件开头的注释为准)
142 #[inline(never)]
143 pub unsafe fn pseudo_map_phys(vaddr: VirtAddr, paddr: PhysAddr, count: PageFrameCount) {
144     let flags: EntryFlags<MMArch> = EntryFlags::new().set_write(true);
145 
146     pseudo_map_phys_with_flags(vaddr, paddr, count, flags);
147 }
148 
149 /// Use pseudo mapper to map physical memory to virtual memory
150 /// with READ_ONLY and EXECUTE flags.
151 #[inline(never)]
152 pub unsafe fn pseudo_map_phys_ro(vaddr: VirtAddr, paddr: PhysAddr, count: PageFrameCount) {
153     let flags: EntryFlags<MMArch> = EntryFlags::new().set_write(false).set_execute(true);
154 
155     pseudo_map_phys_with_flags(vaddr, paddr, count, flags);
156 }
157 
158 #[inline(never)]
159 pub unsafe fn pseudo_map_phys_with_flags(
160     vaddr: VirtAddr,
161     paddr: PhysAddr,
162     count: PageFrameCount,
163     flags: EntryFlags<MMArch>,
164 ) {
165     assert!(vaddr.check_aligned(MMArch::PAGE_SIZE));
166     assert!(paddr.check_aligned(MMArch::PAGE_SIZE));
167 
168     let mut pseudo_allocator = PseudoAllocator::<MMArch>::new();
169 
170     let mut mapper = crate::mm::page::PageMapper::<MMArch, _>::new(
171         PageTableKind::Kernel,
172         MMArch::table(PageTableKind::Kernel),
173         &mut pseudo_allocator,
174     );
175 
176     for i in 0..count.data() {
177         let vaddr = vaddr + i * MMArch::PAGE_SIZE;
178         let paddr = paddr + i * MMArch::PAGE_SIZE;
179         let flusher: crate::mm::page::PageFlush<MMArch> =
180             mapper.map_phys(vaddr, paddr, flags).unwrap();
181         flusher.ignore();
182     }
183 
184     mapper.make_current();
185 }
186 
187 /// Unmap physical memory from virtual memory.
188 ///
189 /// ## 说明
190 ///
191 /// 该函数在系统启动早期,内存管理尚未初始化的时候使用
192 #[inline(never)]
193 pub unsafe fn pseudo_unmap_phys(vaddr: VirtAddr, count: PageFrameCount) {
194     assert!(vaddr.check_aligned(MMArch::PAGE_SIZE));
195 
196     let mut pseudo_allocator = PseudoAllocator::<MMArch>::new();
197 
198     let mut mapper = crate::mm::page::PageMapper::<MMArch, _>::new(
199         PageTableKind::Kernel,
200         MMArch::table(PageTableKind::Kernel),
201         &mut pseudo_allocator,
202     );
203 
204     for i in 0..count.data() {
205         let vaddr = vaddr + i * MMArch::PAGE_SIZE;
206         if let Some((_, _, flusher)) = mapper.unmap_phys(vaddr, true) {
207             flusher.ignore();
208         };
209     }
210 
211     mapper.make_current();
212 }
213