1 use crate::arch::mm::kernel_page_flags; 2 3 use crate::arch::MMArch; 4 5 use crate::mm::kernel_mapper::KernelMapper; 6 use crate::mm::page::{page_manager_lock_irqsave, EntryFlags}; 7 use crate::mm::{ 8 allocator::page_frame::{ 9 allocate_page_frames, deallocate_page_frames, PageFrameCount, PhysPageFrame, 10 }, 11 MemoryManagementArch, PhysAddr, VirtAddr, 12 }; 13 use core::ptr::NonNull; 14 use virtio_drivers::{BufferDirection, Hal, PAGE_SIZE}; 15 16 pub struct HalImpl; 17 unsafe impl Hal for HalImpl { 18 /// @brief 申请用于DMA的内存页 19 /// @param pages 页数(4k一页) 20 /// @return PhysAddr 获得的内存页的初始物理地址 dma_alloc( pages: usize, _direction: BufferDirection, ) -> (virtio_drivers::PhysAddr, NonNull<u8>)21 fn dma_alloc( 22 pages: usize, 23 _direction: BufferDirection, 24 ) -> (virtio_drivers::PhysAddr, NonNull<u8>) { 25 let page_num = PageFrameCount::new( 26 (pages * PAGE_SIZE) 27 .div_ceil(MMArch::PAGE_SIZE) 28 .next_power_of_two(), 29 ); 30 unsafe { 31 let (paddr, count) = 32 allocate_page_frames(page_num).expect("VirtIO Impl: alloc page failed"); 33 let virt = MMArch::phys_2_virt(paddr).unwrap(); 34 // 清空这块区域,防止出现脏数据 35 core::ptr::write_bytes(virt.data() as *mut u8, 0, count.data() * MMArch::PAGE_SIZE); 36 37 let dma_flags: EntryFlags<MMArch> = EntryFlags::mmio_flags(); 38 39 let mut kernel_mapper = KernelMapper::lock(); 40 let kernel_mapper = kernel_mapper.as_mut().unwrap(); 41 let flusher = kernel_mapper 42 .remap(virt, dma_flags) 43 .expect("VirtIO Impl: remap failed"); 44 flusher.flush(); 45 return ( 46 paddr.data(), 47 NonNull::new(MMArch::phys_2_virt(paddr).unwrap().data() as _).unwrap(), 48 ); 49 } 50 } 51 /// @brief 释放用于DMA的内存页 52 /// @param paddr 起始物理地址 pages 页数(4k一页) 53 /// @return i32 0表示成功 dma_dealloc( paddr: virtio_drivers::PhysAddr, vaddr: NonNull<u8>, pages: usize, ) -> i3254 unsafe fn dma_dealloc( 55 paddr: virtio_drivers::PhysAddr, 56 vaddr: NonNull<u8>, 57 pages: usize, 58 ) -> i32 { 59 let page_count = PageFrameCount::new( 60 (pages * PAGE_SIZE) 61 .div_ceil(MMArch::PAGE_SIZE) 62 .next_power_of_two(), 63 ); 64 65 // 恢复页面属性 66 let vaddr = VirtAddr::new(vaddr.as_ptr() as usize); 67 let mut kernel_mapper = KernelMapper::lock(); 68 let kernel_mapper = kernel_mapper.as_mut().unwrap(); 69 let flusher = kernel_mapper 70 .remap(vaddr, kernel_page_flags(vaddr)) 71 .expect("VirtIO Impl: remap failed"); 72 flusher.flush(); 73 74 unsafe { 75 deallocate_page_frames( 76 PhysPageFrame::new(PhysAddr::new(paddr)), 77 page_count, 78 &mut page_manager_lock_irqsave(), 79 ); 80 } 81 return 0; 82 } 83 /// @brief mmio物理地址转换为虚拟地址,不需要使用 84 /// @param paddr 起始物理地址 85 /// @return NonNull<u8> 虚拟地址的指针 mmio_phys_to_virt(paddr: virtio_drivers::PhysAddr, _size: usize) -> NonNull<u8>86 unsafe fn mmio_phys_to_virt(paddr: virtio_drivers::PhysAddr, _size: usize) -> NonNull<u8> { 87 NonNull::new((MMArch::phys_2_virt(PhysAddr::new(paddr))).unwrap().data() as _).unwrap() 88 } 89 /// @brief 与真实物理设备共享 90 /// @param buffer 要共享的buffer _direction:设备到driver或driver到设备 91 /// @return buffer在内存中的物理地址 share( buffer: NonNull<[u8]>, _direction: BufferDirection, ) -> virtio_drivers::PhysAddr92 unsafe fn share( 93 buffer: NonNull<[u8]>, 94 _direction: BufferDirection, 95 ) -> virtio_drivers::PhysAddr { 96 let vaddr = VirtAddr::new(buffer.as_ptr() as *mut u8 as usize); 97 //debug!("virt:{:x}", vaddr); 98 // Nothing to do, as the host already has access to all memory. 99 return MMArch::virt_2_phys(vaddr).unwrap().data(); 100 } 101 /// @brief 停止共享(让主机可以访问全部内存的话什么都不用做) unshare( _paddr: virtio_drivers::PhysAddr, _buffer: NonNull<[u8]>, _direction: BufferDirection, )102 unsafe fn unshare( 103 _paddr: virtio_drivers::PhysAddr, 104 _buffer: NonNull<[u8]>, 105 _direction: BufferDirection, 106 ) { 107 // Nothing to do, as the host already has access to all memory and we didn't copy the buffer 108 // anywhere else. 109 } 110 } 111