xref: /DragonOS/kernel/src/driver/net/dma.rs (revision cf7f801e1d50ee5b04cb728e4251a57f4183bfbc)
177799ccaSWu Mianzhi use crate::arch::mm::kernel_page_flags;
277799ccaSWu Mianzhi 
377799ccaSWu Mianzhi use crate::arch::MMArch;
477799ccaSWu Mianzhi 
577799ccaSWu Mianzhi use crate::mm::kernel_mapper::KernelMapper;
6*cf7f801eSMemoryShore use crate::mm::page::{page_manager_lock_irqsave, EntryFlags};
777799ccaSWu Mianzhi use crate::mm::{
877799ccaSWu Mianzhi     allocator::page_frame::{
977799ccaSWu Mianzhi         allocate_page_frames, deallocate_page_frames, PageFrameCount, PhysPageFrame,
1077799ccaSWu Mianzhi     },
1177799ccaSWu Mianzhi     MemoryManagementArch, PhysAddr, VirtAddr,
1277799ccaSWu Mianzhi };
1377799ccaSWu Mianzhi use core::ptr::NonNull;
1477799ccaSWu Mianzhi const PAGE_SIZE: usize = 4096;
1577799ccaSWu Mianzhi /// @brief 申请用于DMA的内存页
1677799ccaSWu Mianzhi /// @param pages 页数(4k一页)
1777799ccaSWu Mianzhi /// @return PhysAddr 获得的内存页的初始物理地址
dma_alloc(pages: usize) -> (usize, NonNull<u8>)1877799ccaSWu Mianzhi pub fn dma_alloc(pages: usize) -> (usize, NonNull<u8>) {
1977799ccaSWu Mianzhi     let page_num = PageFrameCount::new(
2077799ccaSWu Mianzhi         ((pages * PAGE_SIZE + MMArch::PAGE_SIZE - 1) / MMArch::PAGE_SIZE).next_power_of_two(),
2177799ccaSWu Mianzhi     );
2277799ccaSWu Mianzhi     unsafe {
2377799ccaSWu Mianzhi         let (paddr, count) = allocate_page_frames(page_num).expect("e1000e: alloc page failed");
2477799ccaSWu Mianzhi         let virt = MMArch::phys_2_virt(paddr).unwrap();
2577799ccaSWu Mianzhi         // 清空这块区域,防止出现脏数据
2677799ccaSWu Mianzhi         core::ptr::write_bytes(virt.data() as *mut u8, 0, count.data() * MMArch::PAGE_SIZE);
2777799ccaSWu Mianzhi 
28*cf7f801eSMemoryShore         let dma_flags: EntryFlags<MMArch> = EntryFlags::mmio_flags();
2977799ccaSWu Mianzhi 
3077799ccaSWu Mianzhi         let mut kernel_mapper = KernelMapper::lock();
3177799ccaSWu Mianzhi         let kernel_mapper = kernel_mapper.as_mut().unwrap();
3277799ccaSWu Mianzhi         let flusher = kernel_mapper
3377799ccaSWu Mianzhi             .remap(virt, dma_flags)
3477799ccaSWu Mianzhi             .expect("e1000e: remap failed");
3577799ccaSWu Mianzhi         flusher.flush();
3677799ccaSWu Mianzhi         return (
3777799ccaSWu Mianzhi             paddr.data(),
3877799ccaSWu Mianzhi             NonNull::new(MMArch::phys_2_virt(paddr).unwrap().data() as _).unwrap(),
3977799ccaSWu Mianzhi         );
4077799ccaSWu Mianzhi     }
4177799ccaSWu Mianzhi }
4277799ccaSWu Mianzhi /// @brief 释放用于DMA的内存页
4377799ccaSWu Mianzhi /// @param paddr 起始物理地址 pages 页数(4k一页)
4477799ccaSWu Mianzhi /// @return i32 0表示成功
dma_dealloc(paddr: usize, vaddr: NonNull<u8>, pages: usize) -> i324577799ccaSWu Mianzhi pub unsafe fn dma_dealloc(paddr: usize, vaddr: NonNull<u8>, pages: usize) -> i32 {
4677799ccaSWu Mianzhi     let page_count = PageFrameCount::new(
4777799ccaSWu Mianzhi         ((pages * PAGE_SIZE + MMArch::PAGE_SIZE - 1) / MMArch::PAGE_SIZE).next_power_of_two(),
4877799ccaSWu Mianzhi     );
4977799ccaSWu Mianzhi 
5077799ccaSWu Mianzhi     // 恢复页面属性
51b5b571e0SLoGin     let vaddr = VirtAddr::new(vaddr.as_ptr() as usize);
5277799ccaSWu Mianzhi     let mut kernel_mapper = KernelMapper::lock();
5377799ccaSWu Mianzhi     let kernel_mapper = kernel_mapper.as_mut().unwrap();
5477799ccaSWu Mianzhi     let flusher = kernel_mapper
5577799ccaSWu Mianzhi         .remap(vaddr, kernel_page_flags(vaddr))
5677799ccaSWu Mianzhi         .expect("e1000e: remap failed");
5777799ccaSWu Mianzhi     flusher.flush();
5877799ccaSWu Mianzhi 
5977799ccaSWu Mianzhi     unsafe {
6056cc4dbeSJomo         deallocate_page_frames(
6156cc4dbeSJomo             PhysPageFrame::new(PhysAddr::new(paddr)),
6256cc4dbeSJomo             page_count,
636fc066acSJomo             &mut page_manager_lock_irqsave(),
6456cc4dbeSJomo         );
6577799ccaSWu Mianzhi     }
6677799ccaSWu Mianzhi     return 0;
6777799ccaSWu Mianzhi }
68