1 use system_error::SystemError; 2 3 use super::{page::EntryFlags, PageTableKind, PhysAddr, VirtAddr}; 4 use crate::{ 5 arch::{ 6 mm::{LockedFrameAllocator, PageMapper}, 7 CurrentIrqArch, 8 }, 9 exception::InterruptArch, 10 libs::align::page_align_up, 11 mm::{allocator::page_frame::PageFrameCount, MMArch, MemoryManagementArch}, 12 smp::{ 13 core::smp_get_processor_id, 14 cpu::{AtomicProcessorId, ProcessorId}, 15 }, 16 }; 17 use core::{ 18 ops::Deref, 19 sync::atomic::{compiler_fence, AtomicUsize, Ordering}, 20 }; 21 22 /// 标志当前没有处理器持有内核映射器的锁 23 /// 之所以需要这个标志,是因为 AtomicProcessorId::new(0) 会把0当作一个处理器的id 24 const KERNEL_MAPPER_NO_PROCESSOR: ProcessorId = ProcessorId::INVALID; 25 /// 当前持有内核映射器锁的处理器 26 static KERNEL_MAPPER_LOCK_OWNER: AtomicProcessorId = 27 AtomicProcessorId::new(KERNEL_MAPPER_NO_PROCESSOR); 28 /// 内核映射器的锁计数器 29 static KERNEL_MAPPER_LOCK_COUNT: AtomicUsize = AtomicUsize::new(0); 30 31 pub struct KernelMapper { 32 /// 内核空间映射器 33 mapper: PageMapper, 34 /// 标记当前映射器是否为只读 35 readonly: bool, 36 } 37 38 impl KernelMapper { 39 fn lock_cpu(cpuid: ProcessorId, mapper: PageMapper) -> Self { 40 loop { 41 match KERNEL_MAPPER_LOCK_OWNER.compare_exchange_weak( 42 KERNEL_MAPPER_NO_PROCESSOR, 43 cpuid, 44 Ordering::Acquire, 45 Ordering::Relaxed, 46 ) { 47 Ok(_) => break, 48 // 当前处理器已经持有了锁 49 Err(id) if id == cpuid => break, 50 // either CAS failed, or some other hardware thread holds the lock 51 Err(_) => core::hint::spin_loop(), 52 } 53 } 54 55 let prev_count = KERNEL_MAPPER_LOCK_COUNT.fetch_add(1, Ordering::Relaxed); 56 compiler_fence(Ordering::Acquire); 57 58 // 本地核心已经持有过锁,因此标记当前加锁获得的映射器为只读 59 let readonly = prev_count > 0; 60 61 return Self { mapper, readonly }; 62 } 63 64 /// @brief 锁定内核映射器, 并返回一个内核映射器对象 65 #[inline(always)] 66 pub fn lock() -> Self { 67 let cpuid = smp_get_processor_id(); 68 let mapper = unsafe { PageMapper::current(PageTableKind::Kernel, LockedFrameAllocator) }; 69 return Self::lock_cpu(cpuid, mapper); 70 } 71 72 /// @brief 获取内核映射器的page mapper的可变引用。如果当前映射器为只读,则返回 None 73 #[inline(always)] 74 pub fn as_mut(&mut self) -> Option<&mut PageMapper> { 75 if self.readonly { 76 return None; 77 } else { 78 return Some(&mut self.mapper); 79 } 80 } 81 82 /// @brief 获取内核映射器的page mapper的不可变引用 83 #[inline(always)] 84 pub fn as_ref(&self) -> &PageMapper { 85 return &self.mapper; 86 } 87 88 /// 映射一段物理地址到指定的虚拟地址。 89 /// 90 /// ## 参数 91 /// 92 /// - `vaddr`: 要映射的虚拟地址 93 /// - `paddr`: 要映射的物理地址 94 /// - `size`: 要映射的大小(字节,必须是页大小的整数倍,否则会向上取整) 95 /// - `flags`: 页面标志 96 /// - `flush`: 是否刷新TLB 97 /// 98 /// ## 返回 99 /// 100 /// - 成功:返回Ok(()) 101 /// - 失败: 如果当前映射器为只读,则返回EAGAIN_OR_EWOULDBLOCK 102 pub unsafe fn map_phys_with_size( 103 &mut self, 104 mut vaddr: VirtAddr, 105 mut paddr: PhysAddr, 106 size: usize, 107 flags: EntryFlags<MMArch>, 108 flush: bool, 109 ) -> Result<(), SystemError> { 110 if self.readonly { 111 return Err(SystemError::EAGAIN_OR_EWOULDBLOCK); 112 } 113 114 let count = PageFrameCount::new(page_align_up(size) / MMArch::PAGE_SIZE); 115 // debug!("kernel mapper: map_phys: vaddr: {vaddr:?}, paddr: {paddr:?}, count: {count:?}, flags: {flags:?}"); 116 for _ in 0..count.data() { 117 let flusher = self.mapper.map_phys(vaddr, paddr, flags).unwrap(); 118 119 if flush { 120 flusher.flush(); 121 } 122 123 vaddr += MMArch::PAGE_SIZE; 124 paddr += MMArch::PAGE_SIZE; 125 } 126 compiler_fence(Ordering::SeqCst); 127 return Ok(()); 128 } 129 } 130 131 impl Drop for KernelMapper { 132 fn drop(&mut self) { 133 // 为了防止fetch_sub和store之间,由于中断,导致store错误清除了owner,导致错误,因此需要关中断。 134 let guard = unsafe { CurrentIrqArch::save_and_disable_irq() }; 135 let prev_count = KERNEL_MAPPER_LOCK_COUNT.fetch_sub(1, Ordering::Relaxed); 136 if prev_count == 1 { 137 KERNEL_MAPPER_LOCK_OWNER.store(KERNEL_MAPPER_NO_PROCESSOR, Ordering::Release); 138 } 139 drop(guard); 140 compiler_fence(Ordering::Release); 141 } 142 } 143 144 impl Deref for KernelMapper { 145 type Target = PageMapper; 146 147 fn deref(&self) -> &Self::Target { 148 return self.as_ref(); 149 } 150 } 151