1 use crate::arch::mm::kernel_page_flags; 2 3 use crate::arch::MMArch; 4 5 use crate::mm::kernel_mapper::KernelMapper; 6 use crate::mm::page::{page_manager_lock_irqsave, EntryFlags}; 7 use crate::mm::{ 8 allocator::page_frame::{ 9 allocate_page_frames, deallocate_page_frames, PageFrameCount, PhysPageFrame, 10 }, 11 MemoryManagementArch, PhysAddr, VirtAddr, 12 }; 13 use core::ptr::NonNull; 14 use virtio_drivers::{BufferDirection, Hal, PAGE_SIZE}; 15 16 pub struct HalImpl; 17 unsafe impl Hal for HalImpl { 18 /// @brief 申请用于DMA的内存页 19 /// @param pages 页数(4k一页) 20 /// @return PhysAddr 获得的内存页的初始物理地址 dma_alloc( pages: usize, _direction: BufferDirection, ) -> (virtio_drivers::PhysAddr, NonNull<u8>)21 fn dma_alloc( 22 pages: usize, 23 _direction: BufferDirection, 24 ) -> (virtio_drivers::PhysAddr, NonNull<u8>) { 25 let page_num = PageFrameCount::new( 26 ((pages * PAGE_SIZE + MMArch::PAGE_SIZE - 1) / MMArch::PAGE_SIZE).next_power_of_two(), 27 ); 28 unsafe { 29 let (paddr, count) = 30 allocate_page_frames(page_num).expect("VirtIO Impl: alloc page failed"); 31 let virt = MMArch::phys_2_virt(paddr).unwrap(); 32 // 清空这块区域,防止出现脏数据 33 core::ptr::write_bytes(virt.data() as *mut u8, 0, count.data() * MMArch::PAGE_SIZE); 34 35 let dma_flags: EntryFlags<MMArch> = EntryFlags::mmio_flags(); 36 37 let mut kernel_mapper = KernelMapper::lock(); 38 let kernel_mapper = kernel_mapper.as_mut().unwrap(); 39 let flusher = kernel_mapper 40 .remap(virt, dma_flags) 41 .expect("VirtIO Impl: remap failed"); 42 flusher.flush(); 43 return ( 44 paddr.data(), 45 NonNull::new(MMArch::phys_2_virt(paddr).unwrap().data() as _).unwrap(), 46 ); 47 } 48 } 49 /// @brief 释放用于DMA的内存页 50 /// @param paddr 起始物理地址 pages 页数(4k一页) 51 /// @return i32 0表示成功 dma_dealloc( paddr: virtio_drivers::PhysAddr, vaddr: NonNull<u8>, pages: usize, ) -> i3252 unsafe fn dma_dealloc( 53 paddr: virtio_drivers::PhysAddr, 54 vaddr: NonNull<u8>, 55 pages: usize, 56 ) -> i32 { 57 let page_count = PageFrameCount::new( 58 ((pages * PAGE_SIZE + MMArch::PAGE_SIZE - 1) / MMArch::PAGE_SIZE).next_power_of_two(), 59 ); 60 61 // 恢复页面属性 62 let vaddr = VirtAddr::new(vaddr.as_ptr() as usize); 63 let mut kernel_mapper = KernelMapper::lock(); 64 let kernel_mapper = kernel_mapper.as_mut().unwrap(); 65 let flusher = kernel_mapper 66 .remap(vaddr, kernel_page_flags(vaddr)) 67 .expect("VirtIO Impl: remap failed"); 68 flusher.flush(); 69 70 unsafe { 71 deallocate_page_frames( 72 PhysPageFrame::new(PhysAddr::new(paddr)), 73 page_count, 74 &mut page_manager_lock_irqsave(), 75 ); 76 } 77 return 0; 78 } 79 /// @brief mmio物理地址转换为虚拟地址,不需要使用 80 /// @param paddr 起始物理地址 81 /// @return NonNull<u8> 虚拟地址的指针 mmio_phys_to_virt(paddr: virtio_drivers::PhysAddr, _size: usize) -> NonNull<u8>82 unsafe fn mmio_phys_to_virt(paddr: virtio_drivers::PhysAddr, _size: usize) -> NonNull<u8> { 83 NonNull::new((MMArch::phys_2_virt(PhysAddr::new(paddr))).unwrap().data() as _).unwrap() 84 } 85 /// @brief 与真实物理设备共享 86 /// @param buffer 要共享的buffer _direction:设备到driver或driver到设备 87 /// @return buffer在内存中的物理地址 share( buffer: NonNull<[u8]>, _direction: BufferDirection, ) -> virtio_drivers::PhysAddr88 unsafe fn share( 89 buffer: NonNull<[u8]>, 90 _direction: BufferDirection, 91 ) -> virtio_drivers::PhysAddr { 92 let vaddr = VirtAddr::new(buffer.as_ptr() as *mut u8 as usize); 93 //debug!("virt:{:x}", vaddr); 94 // Nothing to do, as the host already has access to all memory. 95 return MMArch::virt_2_phys(vaddr).unwrap().data(); 96 } 97 /// @brief 停止共享(让主机可以访问全部内存的话什么都不用做) unshare( _paddr: virtio_drivers::PhysAddr, _buffer: NonNull<[u8]>, _direction: BufferDirection, )98 unsafe fn unshare( 99 _paddr: virtio_drivers::PhysAddr, 100 _buffer: NonNull<[u8]>, 101 _direction: BufferDirection, 102 ) { 103 // Nothing to do, as the host already has access to all memory and we didn't copy the buffer 104 // anywhere else. 105 } 106 } 107