1*77799ccaSWu Mianzhi use crate::arch::mm::kernel_page_flags; 2*77799ccaSWu Mianzhi 3*77799ccaSWu Mianzhi use crate::arch::MMArch; 4*77799ccaSWu Mianzhi 5*77799ccaSWu Mianzhi use crate::mm::kernel_mapper::KernelMapper; 6*77799ccaSWu Mianzhi use crate::mm::page::PageFlags; 7*77799ccaSWu Mianzhi use crate::mm::{ 8*77799ccaSWu Mianzhi allocator::page_frame::{ 9*77799ccaSWu Mianzhi allocate_page_frames, deallocate_page_frames, PageFrameCount, PhysPageFrame, 10*77799ccaSWu Mianzhi }, 11*77799ccaSWu Mianzhi MemoryManagementArch, PhysAddr, VirtAddr, 12*77799ccaSWu Mianzhi }; 13*77799ccaSWu Mianzhi use core::ptr::NonNull; 14*77799ccaSWu Mianzhi const PAGE_SIZE: usize = 4096; 15*77799ccaSWu Mianzhi /// @brief 申请用于DMA的内存页 16*77799ccaSWu Mianzhi /// @param pages 页数(4k一页) 17*77799ccaSWu Mianzhi /// @return PhysAddr 获得的内存页的初始物理地址 18*77799ccaSWu Mianzhi pub fn dma_alloc(pages: usize) -> (usize, NonNull<u8>) { 19*77799ccaSWu Mianzhi let page_num = PageFrameCount::new( 20*77799ccaSWu Mianzhi ((pages * PAGE_SIZE + MMArch::PAGE_SIZE - 1) / MMArch::PAGE_SIZE).next_power_of_two(), 21*77799ccaSWu Mianzhi ); 22*77799ccaSWu Mianzhi unsafe { 23*77799ccaSWu Mianzhi let (paddr, count) = allocate_page_frames(page_num).expect("e1000e: alloc page failed"); 24*77799ccaSWu Mianzhi let virt = MMArch::phys_2_virt(paddr).unwrap(); 25*77799ccaSWu Mianzhi // 清空这块区域,防止出现脏数据 26*77799ccaSWu Mianzhi core::ptr::write_bytes(virt.data() as *mut u8, 0, count.data() * MMArch::PAGE_SIZE); 27*77799ccaSWu Mianzhi 28*77799ccaSWu Mianzhi let dma_flags: PageFlags<MMArch> = PageFlags::mmio_flags(); 29*77799ccaSWu Mianzhi 30*77799ccaSWu Mianzhi let mut kernel_mapper = KernelMapper::lock(); 31*77799ccaSWu Mianzhi let kernel_mapper = kernel_mapper.as_mut().unwrap(); 32*77799ccaSWu Mianzhi let flusher = kernel_mapper 33*77799ccaSWu Mianzhi .remap(virt, dma_flags) 34*77799ccaSWu Mianzhi .expect("e1000e: remap failed"); 35*77799ccaSWu Mianzhi flusher.flush(); 36*77799ccaSWu Mianzhi return ( 37*77799ccaSWu Mianzhi paddr.data(), 38*77799ccaSWu Mianzhi NonNull::new(MMArch::phys_2_virt(paddr).unwrap().data() as _).unwrap(), 39*77799ccaSWu Mianzhi ); 40*77799ccaSWu Mianzhi } 41*77799ccaSWu Mianzhi } 42*77799ccaSWu Mianzhi /// @brief 释放用于DMA的内存页 43*77799ccaSWu Mianzhi /// @param paddr 起始物理地址 pages 页数(4k一页) 44*77799ccaSWu Mianzhi /// @return i32 0表示成功 45*77799ccaSWu Mianzhi pub unsafe fn dma_dealloc(paddr: usize, vaddr: NonNull<u8>, pages: usize) -> i32 { 46*77799ccaSWu Mianzhi let page_count = PageFrameCount::new( 47*77799ccaSWu Mianzhi ((pages * PAGE_SIZE + MMArch::PAGE_SIZE - 1) / MMArch::PAGE_SIZE).next_power_of_two(), 48*77799ccaSWu Mianzhi ); 49*77799ccaSWu Mianzhi 50*77799ccaSWu Mianzhi // 恢复页面属性 51*77799ccaSWu Mianzhi let vaddr = VirtAddr::new(vaddr.as_ptr() as *mut u8 as usize); 52*77799ccaSWu Mianzhi let mut kernel_mapper = KernelMapper::lock(); 53*77799ccaSWu Mianzhi let kernel_mapper = kernel_mapper.as_mut().unwrap(); 54*77799ccaSWu Mianzhi let flusher = kernel_mapper 55*77799ccaSWu Mianzhi .remap(vaddr, kernel_page_flags(vaddr)) 56*77799ccaSWu Mianzhi .expect("e1000e: remap failed"); 57*77799ccaSWu Mianzhi flusher.flush(); 58*77799ccaSWu Mianzhi 59*77799ccaSWu Mianzhi unsafe { 60*77799ccaSWu Mianzhi deallocate_page_frames(PhysPageFrame::new(PhysAddr::new(paddr)), page_count); 61*77799ccaSWu Mianzhi } 62*77799ccaSWu Mianzhi return 0; 63*77799ccaSWu Mianzhi } 64