xref: /DragonOS/kernel/src/arch/riscv64/mm/mod.rs (revision cf7f801e1d50ee5b04cb728e4251a57f4183bfbc)
1 use riscv::register::satp;
2 use sbi_rt::{HartMask, SbiRet};
3 use system_error::SystemError;
4 
5 use crate::{
6     arch::MMArch,
7     driver::open_firmware::fdt::open_firmware_fdt_driver,
8     libs::spinlock::SpinLock,
9     mm::{
10         allocator::{
11             buddy::BuddyAllocator,
12             page_frame::{FrameAllocator, PageFrameCount, PageFrameUsage, PhysPageFrame},
13         },
14         kernel_mapper::KernelMapper,
15         page::{EntryFlags, PageEntry, PAGE_1G_SHIFT},
16         ucontext::UserMapper,
17         MemoryManagementArch, PageTableKind, PhysAddr, VirtAddr, VmFlags,
18     },
19     smp::cpu::ProcessorId,
20 };
21 
22 use self::init::{riscv_mm_init, INITIAL_PGTABLE_VALUE};
23 
24 pub mod bump;
25 pub(super) mod init;
26 
27 pub type PageMapper = crate::mm::page::PageMapper<RiscV64MMArch, LockedFrameAllocator>;
28 
29 /// 内核起始物理地址
30 pub(self) static mut KERNEL_BEGIN_PA: PhysAddr = PhysAddr::new(0);
31 /// 内核结束的物理地址
32 pub(self) static mut KERNEL_END_PA: PhysAddr = PhysAddr::new(0);
33 /// 内核起始虚拟地址
34 pub(self) static mut KERNEL_BEGIN_VA: VirtAddr = VirtAddr::new(0);
35 /// 内核结束虚拟地址
36 pub(self) static mut KERNEL_END_VA: VirtAddr = VirtAddr::new(0);
37 
38 pub(self) static INNER_ALLOCATOR: SpinLock<Option<BuddyAllocator<MMArch>>> = SpinLock::new(None);
39 
40 /// RiscV64的内存管理架构结构体(sv39)
41 #[derive(Debug, Clone, Copy, Hash)]
42 pub struct RiscV64MMArch;
43 
44 impl RiscV64MMArch {
45     /// 使远程cpu的TLB中,指定地址范围的页失效
46     #[allow(dead_code)]
remote_invalidate_page( cpu: ProcessorId, address: VirtAddr, size: usize, ) -> Result<(), SbiRet>47     pub fn remote_invalidate_page(
48         cpu: ProcessorId,
49         address: VirtAddr,
50         size: usize,
51     ) -> Result<(), SbiRet> {
52         let r = sbi_rt::remote_sfence_vma(Into::into(cpu), address.data(), size);
53         if r.is_ok() {
54             return Ok(());
55         } else {
56             return Err(r);
57         }
58     }
59 
60     /// 使指定远程cpu的TLB中,所有范围的页失效
61     #[allow(dead_code)]
remote_invalidate_all(cpu: ProcessorId) -> Result<(), SbiRet>62     pub fn remote_invalidate_all(cpu: ProcessorId) -> Result<(), SbiRet> {
63         let r = Self::remote_invalidate_page(
64             cpu,
65             VirtAddr::new(0),
66             1 << RiscV64MMArch::ENTRY_ADDRESS_SHIFT,
67         );
68 
69         return r;
70     }
71 
remote_invalidate_all_with_mask(mask: HartMask) -> Result<(), SbiRet>72     pub fn remote_invalidate_all_with_mask(mask: HartMask) -> Result<(), SbiRet> {
73         let r = sbi_rt::remote_sfence_vma(mask, 0, 1 << RiscV64MMArch::ENTRY_ADDRESS_SHIFT);
74         if r.is_ok() {
75             return Ok(());
76         } else {
77             return Err(r);
78         }
79     }
80 }
81 
82 /// 内核空间起始地址在顶层页表中的索引
83 const KERNEL_TOP_PAGE_ENTRY_NO: usize = (RiscV64MMArch::PHYS_OFFSET
84     & ((1 << RiscV64MMArch::ENTRY_ADDRESS_SHIFT) - 1))
85     >> (RiscV64MMArch::ENTRY_ADDRESS_SHIFT - RiscV64MMArch::PAGE_ENTRY_SHIFT);
86 
87 impl MemoryManagementArch for RiscV64MMArch {
88     /// riscv64暂不支持缺页中断
89     const PAGE_FAULT_ENABLED: bool = false;
90 
91     const PAGE_SHIFT: usize = 12;
92 
93     const PAGE_ENTRY_SHIFT: usize = 9;
94 
95     /// sv39分页只有三级
96     const PAGE_LEVELS: usize = 3;
97 
98     const ENTRY_ADDRESS_SHIFT: usize = 39;
99 
100     const ENTRY_FLAG_DEFAULT_PAGE: usize = Self::ENTRY_FLAG_PRESENT
101         | Self::ENTRY_FLAG_READWRITE
102         | Self::ENTRY_FLAG_DIRTY
103         | Self::ENTRY_FLAG_ACCESSED
104         | Self::ENTRY_FLAG_GLOBAL;
105 
106     const ENTRY_FLAG_DEFAULT_TABLE: usize = Self::ENTRY_FLAG_PRESENT;
107 
108     const ENTRY_FLAG_PRESENT: usize = 1 << 0;
109 
110     const ENTRY_FLAG_READONLY: usize = (1 << 1);
111 
112     const ENTRY_FLAG_WRITEABLE: usize = (1 << 2);
113 
114     const ENTRY_FLAG_READWRITE: usize = (1 << 2) | (1 << 1);
115 
116     const ENTRY_FLAG_USER: usize = (1 << 4);
117     const ENTRY_ADDRESS_MASK: usize = Self::ENTRY_ADDRESS_SIZE - (1 << 10);
118     const ENTRY_FLAG_WRITE_THROUGH: usize = (2 << 61);
119 
120     const ENTRY_FLAG_CACHE_DISABLE: usize = (2 << 61);
121 
122     const ENTRY_FLAG_NO_EXEC: usize = 0;
123 
124     const ENTRY_FLAG_EXEC: usize = (1 << 3);
125     const ENTRY_FLAG_ACCESSED: usize = (1 << 6);
126     const ENTRY_FLAG_DIRTY: usize = (1 << 7);
127     const ENTRY_FLAG_GLOBAL: usize = (1 << 5);
128 
129     const PHYS_OFFSET: usize = 0xffff_ffc0_0000_0000;
130     const KERNEL_LINK_OFFSET: usize = 0x1000000;
131 
132     const USER_END_VADDR: crate::mm::VirtAddr = VirtAddr::new(0x0000_003f_ffff_ffff);
133 
134     const USER_BRK_START: crate::mm::VirtAddr = VirtAddr::new(0x0000_001f_ffff_ffff);
135 
136     const USER_STACK_START: crate::mm::VirtAddr = VirtAddr::new(0x0000_001f_ffa0_0000);
137 
138     /// 在距离sv39的顶端还有64M的位置,设置为FIXMAP的起始地址
139     const FIXMAP_START_VADDR: VirtAddr = VirtAddr::new(0xffff_ffff_fc00_0000);
140     /// 设置1MB的fixmap空间
141     const FIXMAP_SIZE: usize = 256 * 4096;
142 
143     /// 在距离sv39的顶端还有2G的位置,设置为MMIO空间的起始地址
144     const MMIO_BASE: VirtAddr = VirtAddr::new(0xffff_ffff_8000_0000);
145     /// 设置1g的MMIO空间
146     const MMIO_SIZE: usize = 1 << PAGE_1G_SHIFT;
147 
148     const ENTRY_FLAG_HUGE_PAGE: usize = Self::ENTRY_FLAG_PRESENT | Self::ENTRY_FLAG_READWRITE;
149 
150     #[inline(never)]
init()151     unsafe fn init() {
152         riscv_mm_init().expect("init kernel memory management architecture failed");
153     }
154 
arch_post_init()155     unsafe fn arch_post_init() {
156         // 映射fdt
157         open_firmware_fdt_driver()
158             .map_fdt()
159             .expect("openfirmware map fdt failed");
160     }
161 
invalidate_page(address: VirtAddr)162     unsafe fn invalidate_page(address: VirtAddr) {
163         riscv::asm::sfence_vma(0, address.data());
164     }
165 
invalidate_all()166     unsafe fn invalidate_all() {
167         riscv::asm::sfence_vma_all();
168     }
169 
table(_table_kind: PageTableKind) -> PhysAddr170     unsafe fn table(_table_kind: PageTableKind) -> PhysAddr {
171         // phys page number
172         let ppn = riscv::register::satp::read().ppn();
173 
174         let paddr = PhysPageFrame::from_ppn(ppn).phys_address();
175 
176         return paddr;
177     }
178 
set_table(_table_kind: PageTableKind, table: PhysAddr)179     unsafe fn set_table(_table_kind: PageTableKind, table: PhysAddr) {
180         let ppn = PhysPageFrame::new(table).ppn();
181         riscv::asm::sfence_vma_all();
182         satp::set(satp::Mode::Sv39, 0, ppn);
183     }
184 
virt_is_valid(virt: VirtAddr) -> bool185     fn virt_is_valid(virt: VirtAddr) -> bool {
186         virt.is_canonical()
187     }
188 
initial_page_table() -> PhysAddr189     fn initial_page_table() -> PhysAddr {
190         unsafe { INITIAL_PGTABLE_VALUE }
191     }
192 
setup_new_usermapper() -> Result<UserMapper, SystemError>193     fn setup_new_usermapper() -> Result<UserMapper, SystemError> {
194         let new_umapper: crate::mm::page::PageMapper<MMArch, LockedFrameAllocator> = unsafe {
195             PageMapper::create(PageTableKind::User, LockedFrameAllocator)
196                 .ok_or(SystemError::ENOMEM)?
197         };
198 
199         let current_ktable: KernelMapper = KernelMapper::lock();
200         let copy_mapping = |pml4_entry_no| unsafe {
201             let entry: PageEntry<RiscV64MMArch> = current_ktable
202                 .table()
203                 .entry(pml4_entry_no)
204                 .unwrap_or_else(|| panic!("entry {} not found", pml4_entry_no));
205             new_umapper.table().set_entry(pml4_entry_no, entry)
206         };
207 
208         // 复制内核的映射
209         for pml4_entry_no in KERNEL_TOP_PAGE_ENTRY_NO..512 {
210             copy_mapping(pml4_entry_no);
211         }
212 
213         return Ok(crate::mm::ucontext::UserMapper::new(new_umapper));
214     }
215 
phys_2_virt(phys: PhysAddr) -> Option<VirtAddr>216     unsafe fn phys_2_virt(phys: PhysAddr) -> Option<VirtAddr> {
217         // riscv的内核文件所占用的空间,由于重定位而导致不满足线性偏移量的关系
218         // 因此这里需要特殊处理
219         if phys >= KERNEL_BEGIN_PA && phys < KERNEL_END_PA {
220             let r = KERNEL_BEGIN_VA + (phys - KERNEL_BEGIN_PA);
221             return Some(r);
222         }
223 
224         if let Some(vaddr) = phys.data().checked_add(Self::PHYS_OFFSET) {
225             return Some(VirtAddr::new(vaddr));
226         } else {
227             return None;
228         }
229     }
230 
virt_2_phys(virt: VirtAddr) -> Option<PhysAddr>231     unsafe fn virt_2_phys(virt: VirtAddr) -> Option<PhysAddr> {
232         if virt >= KERNEL_BEGIN_VA && virt < KERNEL_END_VA {
233             let r = KERNEL_BEGIN_PA + (virt - KERNEL_BEGIN_VA);
234             return Some(r);
235         }
236 
237         if let Some(paddr) = virt.data().checked_sub(Self::PHYS_OFFSET) {
238             let r = PhysAddr::new(paddr);
239             return Some(r);
240         } else {
241             return None;
242         }
243     }
244 
make_entry(paddr: PhysAddr, page_flags: usize) -> usize245     fn make_entry(paddr: PhysAddr, page_flags: usize) -> usize {
246         let ppn = PhysPageFrame::new(paddr).ppn();
247         let r = ((ppn & ((1 << 54) - 1)) << 10) | page_flags;
248         return r;
249     }
250 
vma_access_permitted( _vma: alloc::sync::Arc<crate::mm::ucontext::LockedVMA>, _write: bool, _execute: bool, _foreign: bool, ) -> bool251     fn vma_access_permitted(
252         _vma: alloc::sync::Arc<crate::mm::ucontext::LockedVMA>,
253         _write: bool,
254         _execute: bool,
255         _foreign: bool,
256     ) -> bool {
257         true
258     }
259 
260     const PAGE_NONE: usize = Self::ENTRY_FLAG_GLOBAL | Self::ENTRY_FLAG_READONLY;
261 
262     const PAGE_READ: usize = PAGE_ENTRY_BASE | Self::ENTRY_FLAG_READONLY;
263 
264     const PAGE_WRITE: usize =
265         PAGE_ENTRY_BASE | Self::ENTRY_FLAG_READONLY | Self::ENTRY_FLAG_WRITEABLE;
266 
267     const PAGE_EXEC: usize = PAGE_ENTRY_BASE | Self::ENTRY_FLAG_EXEC;
268 
269     const PAGE_READ_EXEC: usize =
270         PAGE_ENTRY_BASE | Self::ENTRY_FLAG_READONLY | Self::ENTRY_FLAG_EXEC;
271 
272     const PAGE_WRITE_EXEC: usize = PAGE_ENTRY_BASE
273         | Self::ENTRY_FLAG_READONLY
274         | Self::ENTRY_FLAG_EXEC
275         | Self::ENTRY_FLAG_WRITEABLE;
276 
277     const PAGE_COPY: usize = Self::PAGE_READ;
278     const PAGE_COPY_EXEC: usize = Self::PAGE_READ_EXEC;
279     const PAGE_SHARED: usize = Self::PAGE_WRITE;
280     const PAGE_SHARED_EXEC: usize = Self::PAGE_WRITE_EXEC;
281 
282     const PAGE_COPY_NOEXEC: usize = 0;
283     const PAGE_READONLY: usize = 0;
284     const PAGE_READONLY_EXEC: usize = 0;
285 
286     const PROTECTION_MAP: [EntryFlags<MMArch>; 16] = protection_map();
287 }
288 
protection_map() -> [EntryFlags<MMArch>; 16]289 const fn protection_map() -> [EntryFlags<MMArch>; 16] {
290     let mut map = [0; 16];
291     map[VmFlags::VM_NONE.bits()] = MMArch::PAGE_NONE;
292     map[VmFlags::VM_READ.bits()] = MMArch::PAGE_READONLY;
293     map[VmFlags::VM_WRITE.bits()] = MMArch::PAGE_COPY;
294     map[VmFlags::VM_WRITE.bits() | VmFlags::VM_READ.bits()] = MMArch::PAGE_COPY;
295     map[VmFlags::VM_EXEC.bits()] = MMArch::PAGE_READONLY_EXEC;
296     map[VmFlags::VM_EXEC.bits() | VmFlags::VM_READ.bits()] = MMArch::PAGE_READONLY_EXEC;
297     map[VmFlags::VM_EXEC.bits() | VmFlags::VM_WRITE.bits()] = MMArch::PAGE_COPY_EXEC;
298     map[VmFlags::VM_EXEC.bits() | VmFlags::VM_WRITE.bits() | VmFlags::VM_READ.bits()] =
299         MMArch::PAGE_COPY_EXEC;
300     map[VmFlags::VM_SHARED.bits()] = MMArch::PAGE_NONE;
301     map[VmFlags::VM_SHARED.bits() | VmFlags::VM_READ.bits()] = MMArch::PAGE_READONLY;
302     map[VmFlags::VM_SHARED.bits() | VmFlags::VM_WRITE.bits()] = MMArch::PAGE_SHARED;
303     map[VmFlags::VM_SHARED.bits() | VmFlags::VM_WRITE.bits() | VmFlags::VM_READ.bits()] =
304         MMArch::PAGE_SHARED;
305     map[VmFlags::VM_SHARED.bits() | VmFlags::VM_EXEC.bits()] = MMArch::PAGE_READONLY_EXEC;
306     map[VmFlags::VM_SHARED.bits() | VmFlags::VM_EXEC.bits() | VmFlags::VM_READ.bits()] =
307         MMArch::PAGE_READONLY_EXEC;
308     map[VmFlags::VM_SHARED.bits() | VmFlags::VM_EXEC.bits() | VmFlags::VM_WRITE.bits()] =
309         MMArch::PAGE_SHARED_EXEC;
310     map[VmFlags::VM_SHARED.bits()
311         | VmFlags::VM_EXEC.bits()
312         | VmFlags::VM_WRITE.bits()
313         | VmFlags::VM_READ.bits()] = MMArch::PAGE_SHARED_EXEC;
314     let mut ret = [unsafe { EntryFlags::from_data(0) }; 16];
315     let mut index = 0;
316     while index < 16 {
317         ret[index] = unsafe { EntryFlags::from_data(map[index]) };
318         index += 1;
319     }
320     ret
321 }
322 
323 const PAGE_ENTRY_BASE: usize = RiscV64MMArch::ENTRY_FLAG_PRESENT
324     | RiscV64MMArch::ENTRY_FLAG_ACCESSED
325     | RiscV64MMArch::ENTRY_FLAG_USER;
326 
327 impl VirtAddr {
328     /// 判断虚拟地址是否合法
329     #[inline(always)]
is_canonical(self) -> bool330     pub fn is_canonical(self) -> bool {
331         let x = self.data() & RiscV64MMArch::PHYS_OFFSET;
332         // 如果x为0,说明虚拟地址的高位为0,是合法的用户地址
333         // 如果x为PHYS_OFFSET,说明虚拟地址的高位全为1,是合法的内核地址
334         return x == 0 || x == RiscV64MMArch::PHYS_OFFSET;
335     }
336 }
337 
338 /// 获取内核地址默认的页面标志
kernel_page_flags<A: MemoryManagementArch>(_virt: VirtAddr) -> EntryFlags<A>339 pub unsafe fn kernel_page_flags<A: MemoryManagementArch>(_virt: VirtAddr) -> EntryFlags<A> {
340     EntryFlags::from_data(RiscV64MMArch::ENTRY_FLAG_DEFAULT_PAGE)
341         .set_user(false)
342         .set_execute(true)
343 }
344 
345 /// 全局的页帧分配器
346 #[derive(Debug, Clone, Copy, Hash)]
347 pub struct LockedFrameAllocator;
348 
349 impl FrameAllocator for LockedFrameAllocator {
allocate(&mut self, count: PageFrameCount) -> Option<(PhysAddr, PageFrameCount)>350     unsafe fn allocate(&mut self, count: PageFrameCount) -> Option<(PhysAddr, PageFrameCount)> {
351         if let Some(ref mut allocator) = *INNER_ALLOCATOR.lock_irqsave() {
352             return allocator.allocate(count);
353         } else {
354             return None;
355         }
356     }
357 
free(&mut self, address: crate::mm::PhysAddr, count: PageFrameCount)358     unsafe fn free(&mut self, address: crate::mm::PhysAddr, count: PageFrameCount) {
359         assert!(count.data().is_power_of_two());
360         if let Some(ref mut allocator) = *INNER_ALLOCATOR.lock_irqsave() {
361             return allocator.free(address, count);
362         }
363     }
364 
usage(&self) -> PageFrameUsage365     unsafe fn usage(&self) -> PageFrameUsage {
366         if let Some(ref mut allocator) = *INNER_ALLOCATOR.lock_irqsave() {
367             return allocator.usage();
368         } else {
369             panic!("usage error");
370         }
371     }
372 }
373