/DragonOS/kernel/src/mm/ |
H A D | no_init.rs | 15 mm::{MMArch, MemoryManagementArch, PhysAddr}, 35 data: [u64; MMArch::PAGE_SIZE], 57 data: [0; MMArch::PAGE_SIZE], in new() 76 assert!(vaddr.check_aligned(MMArch::PAGE_SIZE)); in allocate_page() 87 let index = offset / MMArch::PAGE_SIZE; in free_page() 144 let flags: EntryFlags<MMArch> = EntryFlags::new().set_write(true); in pseudo_map_phys() 153 let flags: EntryFlags<MMArch> = EntryFlags::new().set_write(false).set_execute(true); in pseudo_map_phys_ro() 163 flags: EntryFlags<MMArch>, in pseudo_map_phys_with_flags() argument 165 assert!(vaddr.check_aligned(MMArch::PAGE_SIZE)); in pseudo_map_phys_with_flags() 166 assert!(paddr.check_aligned(MMArch::PAGE_SIZE)); in pseudo_map_phys_with_flags() [all …]
|
H A D | early_ioremap.rs | 4 arch::MMArch, 28 const SLOT_CNT: usize = MMArch::FIXMAP_SIZE / MMArch::PAGE_SIZE; 81 if !phys.check_aligned(MMArch::PAGE_SIZE) { in map() 126 let map_size = slot_count * MMArch::PAGE_SIZE; in map() 153 if virt < MMArch::FIXMAP_START_VADDR || virt >= MMArch::FIXMAP_END_VADDR { in unmap() 195 MMArch::FIXMAP_START_VADDR + idx * MMArch::PAGE_SIZE in idx_to_virt()
|
H A D | fault.rs | 11 arch::{mm::PageMapper, MMArch}, 75 ((address - guard.region().start()) >> MMArch::PAGE_SHIFT) + file_page_offset in new() 132 if !MMArch::vma_access_permitted( in handle_mm_fault() 173 let level = MMArch::PAGE_LEVELS - level; in handle_normal_fault() 217 entry.set_flags(EntryFlags::from_data(MMArch::ENTRY_FLAG_DIRTY)); in handle_pte_fault() 249 Layout::from_size_align(MMArch::PAGE_SIZE, MMArch::PAGE_SIZE).unwrap(), in do_anonymous_page() 321 let new_frame = MMArch::phys_2_virt(cow_page_phys).unwrap(); in do_cow_fault() 323 MMArch::phys_2_virt(cache_page.read_irqsave().phys_address()) in do_cow_fault() 326 MMArch::PAGE_SIZE, in do_cow_fault() 478 (MMArch::phys_2_virt(paddr).unwrap().data() as *mut u8).copy_from_nonoverlapping( in do_wp_page() [all …]
|
H A D | ucontext.rs | 21 arch::{mm::PageMapper, CurrentIrqArch, MMArch}, 134 user_mapper: MMArch::setup_new_usermapper()?, in new() 139 brk_start: MMArch::USER_BRK_START, in new() 140 brk: MMArch::USER_BRK_START, in new() 168 .clone_from(&mut self.user_mapper, MMArch::PAGE_FAULT_ENABLED) in try_clone() 255 let allocate_at_once = if MMArch::PAGE_FAULT_ENABLED { in map_anonymous() 263 let addr = hint.data() & (!MMArch::PAGE_OFFSET_MASK); in map_anonymous() 291 VirtRegion::new(page.virt_address(), count.data() * MMArch::PAGE_SIZE), in map_anonymous() 333 let allocate_at_once = if MMArch::PAGE_FAULT_ENABLED { in file_mapping() 341 let addr = hint.data() & (!MMArch::PAGE_OFFSET_MASK); in file_mapping() [all …]
|
H A D | mmio_buddy.rs | 5 use crate::mm::{MMArch, MemoryManagementArch}; 67 pool_start_addr: MMArch::MMIO_BASE, in new() 68 pool_size: MMArch::MMIO_SIZE, in new() 75 let mut vaddr_base = MMArch::MMIO_BASE; in new() 76 let mut remain_size = MMArch::MMIO_SIZE; in new() 79 MMArch::MMIO_BASE, in new() 80 MMArch::MMIO_TOP, in new() 81 MMArch::MMIO_SIZE in new() 532 assert!(vaddr.check_aligned(MMArch::PAGE_SIZE)); in release_mmio() 533 assert!(length & (MMArch::PAGE_SIZE - 1) == 0); in release_mmio() [all …]
|
H A D | kernel_mapper.rs | 11 mm::{allocator::page_frame::PageFrameCount, MMArch, MemoryManagementArch}, 107 flags: EntryFlags<MMArch>, in map_phys_with_size() argument 114 let count = PageFrameCount::new(page_align_up(size) / MMArch::PAGE_SIZE); in map_phys_with_size() 123 vaddr += MMArch::PAGE_SIZE; in map_phys_with_size() 124 paddr += MMArch::PAGE_SIZE; in map_phys_with_size()
|
H A D | init.rs | 6 arch::MMArch, 51 MMArch::init(); in mm_init() 75 MMArch::arch_post_init(); in mm_init()
|
H A D | syscall.rs | 8 arch::MMArch, 258 if new_addr < address_space.brk_start || new_addr >= MMArch::USER_END_VADDR { in brk() 390 if !old_vaddr.check_aligned(MMArch::PAGE_SIZE) { in mremap() 451 assert!(start_vaddr.check_aligned(MMArch::PAGE_SIZE)); in munmap() 452 assert!(check_aligned(len, MMArch::PAGE_SIZE)); in munmap() 463 let page_count = PageFrameCount::new(len / MMArch::PAGE_SIZE); in munmap() 485 assert!(start_vaddr.check_aligned(MMArch::PAGE_SIZE)); in mprotect() 486 assert!(check_aligned(len, MMArch::PAGE_SIZE)); in mprotect() 499 let page_count = PageFrameCount::new(len / MMArch::PAGE_SIZE); in mprotect() 520 if !start_vaddr.check_aligned(MMArch::PAGE_SIZE) || !check_aligned(len, MMArch::PAGE_SIZE) { in madvise() [all …]
|
H A D | page.rs | 18 arch::{interrupt::ipi::send_ipi, mm::LockedFrameAllocator, MMArch}, 261 MMArch::PAGE_SIZE, in page_writeback() 264 MMArch::phys_2_virt(page.read_irqsave().phys_addr) in page_writeback() 267 MMArch::PAGE_SIZE, in page_writeback() 585 let mask = (MMArch::PAGE_ENTRY_NUM << shift) - 1; in index_of() 589 return Some((addr.data() >> shift) & MMArch::PAGE_ENTRY_MASK); in index_of() 619 let frame = MMArch::phys_2_virt(phys).unwrap(); in clone() 620 MMArch::write_bytes(frame, 0, MMArch::PAGE_SIZE); in clone() 649 let frame = MMArch::phys_2_virt(phys).unwrap().data() as *mut u8; in clone() 651 MMArch::phys_2_virt(old_phys).unwrap().data() as *mut u8, in clone() [all …]
|
/DragonOS/kernel/src/arch/riscv64/mm/ |
H A D | init.rs | 12 MMArch, 76 let _old_page_table = MMArch::table(PageTableKind::Kernel); in riscv_mm_init() 83 let mut mapper: crate::mm::page::PageMapper<MMArch, &mut BumpAllocator<MMArch>> = in riscv_mm_init() 84 crate::mm::page::PageMapper::<MMArch, _>::create( in riscv_mm_init() 95 let empty_entry = PageEntry::<MMArch>::from_usize(0); in riscv_mm_init() 96 for i in 0..MMArch::PAGE_ENTRY_NUM { in riscv_mm_init() 108 for i in 0..((area.size + MMArch::PAGE_SIZE - 1) / MMArch::PAGE_SIZE) { in riscv_mm_init() 109 let paddr = area.base.add(i * MMArch::PAGE_SIZE); in riscv_mm_init() 110 let vaddr = unsafe { MMArch::phys_2_virt(paddr) }.unwrap(); in riscv_mm_init() 111 let flags = kernel_page_flags::<MMArch>(vaddr).set_execute(true); in riscv_mm_init() [all …]
|
H A D | mod.rs | 6 arch::MMArch, 38 pub(self) static INNER_ALLOCATOR: SpinLock<Option<BuddyAllocator<MMArch>>> = SpinLock::new(None); 194 let new_umapper: crate::mm::page::PageMapper<MMArch, LockedFrameAllocator> = unsafe { in setup_new_usermapper() 286 const PROTECTION_MAP: [EntryFlags<MMArch>; 16] = protection_map(); 289 const fn protection_map() -> [EntryFlags<MMArch>; 16] { in protection_map() 291 map[VmFlags::VM_NONE.bits()] = MMArch::PAGE_NONE; 292 map[VmFlags::VM_READ.bits()] = MMArch::PAGE_READONLY; 293 map[VmFlags::VM_WRITE.bits()] = MMArch::PAGE_COPY; 294 map[VmFlags::VM_WRITE.bits() | VmFlags::VM_READ.bits()] = MMArch::PAGE_COPY; 295 map[VmFlags::VM_EXEC.bits()] = MMArch::PAGE_READONLY_EXEC; [all …]
|
/DragonOS/kernel/src/arch/x86_64/mm/ |
H A D | mod.rs | 24 arch::MMArch, 48 static INNER_ALLOCATOR: SpinLock<Option<BuddyAllocator<MMArch>>> = SpinLock::new(None); 169 MMArch::phys_2_virt(PhysAddr::new(0)).unwrap().data() in init() 250 for pml4_entry_no in MMArch::PAGE_KERNEL_INDEX..MMArch::PAGE_ENTRY_NUM { in setup_new_usermapper() 334 const PROTECTION_MAP: [EntryFlags<MMArch>; 16] = protection_map(); 383 const fn protection_map() -> [EntryFlags<MMArch>; 16] { in protection_map() 386 map[VmFlags::VM_NONE.bits()] = EntryFlags::from_data(MMArch::PAGE_NONE); 387 map[VmFlags::VM_READ.bits()] = EntryFlags::from_data(MMArch::PAGE_READONLY); 388 map[VmFlags::VM_WRITE.bits()] = EntryFlags::from_data(MMArch::PAGE_COPY); 390 EntryFlags::from_data(MMArch::PAGE_COPY); [all …]
|
/DragonOS/kernel/src/driver/virtio/ |
H A D | virtio_impl.rs | 3 use crate::arch::MMArch; 27 .div_ceil(MMArch::PAGE_SIZE) in dma_alloc() 33 let virt = MMArch::phys_2_virt(paddr).unwrap(); in dma_alloc() 35 core::ptr::write_bytes(virt.data() as *mut u8, 0, count.data() * MMArch::PAGE_SIZE); in dma_alloc() 37 let dma_flags: EntryFlags<MMArch> = EntryFlags::mmio_flags(); in dma_alloc() 47 NonNull::new(MMArch::phys_2_virt(paddr).unwrap().data() as _).unwrap(), in dma_alloc() 61 .div_ceil(MMArch::PAGE_SIZE) in dma_dealloc() 87 NonNull::new((MMArch::phys_2_virt(PhysAddr::new(paddr))).unwrap().data() as _).unwrap() in mmio_phys_to_virt() 99 return MMArch::virt_2_phys(vaddr).unwrap().data(); in share()
|
/DragonOS/kernel/src/driver/net/ |
H A D | dma.rs | 3 use crate::arch::MMArch; 21 .div_ceil(MMArch::PAGE_SIZE) in dma_alloc() 26 let virt = MMArch::phys_2_virt(paddr).unwrap(); in dma_alloc() 28 core::ptr::write_bytes(virt.data() as *mut u8, 0, count.data() * MMArch::PAGE_SIZE); in dma_alloc() 30 let dma_flags: EntryFlags<MMArch> = EntryFlags::mmio_flags(); in dma_alloc() 40 NonNull::new(MMArch::phys_2_virt(paddr).unwrap().data() as _).unwrap(), in dma_alloc() 50 .div_ceil(MMArch::PAGE_SIZE) in dma_dealloc()
|
/DragonOS/kernel/src/arch/x86_64/ |
H A D | elf.rs | 1 use crate::{arch::MMArch, libs::elf::ElfArch, mm::MemoryManagementArch}; 7 const ELF_ET_DYN_BASE: usize = MMArch::USER_END_VADDR.data() / 3 * 2; 9 const ELF_PAGE_SIZE: usize = MMArch::PAGE_SIZE;
|
/DragonOS/kernel/src/arch/riscv64/ |
H A D | elf.rs | 1 use crate::{arch::MMArch, libs::elf::ElfArch, mm::MemoryManagementArch}; 7 const ELF_ET_DYN_BASE: usize = MMArch::USER_END_VADDR.data() / 3 * 2; 9 const ELF_PAGE_SIZE: usize = MMArch::PAGE_SIZE;
|
/DragonOS/kernel/src/arch/x86_64/init/ |
H A D | mod.rs | 24 MMArch, 56 MMArch::phys_2_virt(PhysAddr::new(&GDT_Table as *const usize as usize)).unwrap(); in kernel_main() 58 MMArch::phys_2_virt(PhysAddr::new(&IDT_Table as *const usize as usize)).unwrap(); in kernel_main() 89 MMArch::phys_2_virt(PhysAddr::new(&GDT_Table as *const usize as usize)).unwrap(); in early_setup_arch() 91 MMArch::phys_2_virt(PhysAddr::new(&IDT_Table as *const usize as usize)).unwrap(); in early_setup_arch()
|
/DragonOS/kernel/src/mm/allocator/ |
H A D | page_frame.rs | 7 arch::{mm::LockedFrameAllocator, MMArch}, 24 number: paddr.data() >> MMArch::PAGE_SHIFT, in new() 40 return PhysAddr::new(self.number * MMArch::PAGE_SIZE); in phys_address() 99 number: vaddr.data() / MMArch::PAGE_SIZE, in new() 111 return VirtAddr::new(self.number * MMArch::PAGE_SIZE); in virt_address() 187 return self.0 * MMArch::PAGE_SIZE; in bytes() 194 if bytes & MMArch::PAGE_OFFSET_MASK != 0 { in from_bytes() 197 return Some(Self(bytes / MMArch::PAGE_SIZE)); in from_bytes()
|
H A D | kernel_allocator.rs | 7 mm::{MMArch, MemoryManagementArch, VirtAddr}, 34 let count = (page_align_up(layout.size()) / MMArch::PAGE_SIZE).next_power_of_two(); in alloc_in_buddy() 40 let virt_addr = unsafe { MMArch::phys_2_virt(phy_addr).ok_or(AllocError)? }; in alloc_in_buddy() 48 allocated_frame_count.data() * MMArch::PAGE_SIZE, in alloc_in_buddy() 56 let count = (page_align_up(layout.size()) / MMArch::PAGE_SIZE).next_power_of_two(); in free_in_buddy() 58 let phy_addr = MMArch::virt_2_phys(VirtAddr::new(ptr as usize)).unwrap(); in free_in_buddy()
|
H A D | slab.rs | 7 use crate::{arch::MMArch, mm::MemoryManagementArch, KERNEL_ALLOCATOR}; 92 assert_eq!(base_addr as usize & (MMArch::PAGE_SIZE - 1), 0); // 确认地址4k对齐 in free_slab_page() 93 assert_eq!(size, MMArch::PAGE_SIZE); // 确认释放的slab_page大小 in free_slab_page()
|
/DragonOS/kernel/src/arch/riscv64/driver/ |
H A D | of.rs | 17 let offset = fdt_paddr.data() & crate::arch::MMArch::PAGE_OFFSET_MASK; in map_fdt() 19 let map_paddr = PhysAddr::new(fdt_paddr.data() & crate::arch::MMArch::PAGE_MASK); in map_fdt()
|
/DragonOS/kernel/src/driver/disk/ahci/ |
H A D | mod.rs | 4 use crate::arch::MMArch; 94 MMArch::virt_2_phys(VirtAddr::new( in ahci_init() 101 MMArch::virt_2_phys(VirtAddr::new(ahci_port_base_vaddr + (j << 10))) in ahci_init() 107 MMArch::virt_2_phys(VirtAddr::new( in ahci_init()
|
/DragonOS/kernel/src/exception/ |
H A D | ipi.rs | 8 arch::MMArch, 74 unsafe { MMArch::invalidate_all() }; in handle()
|
/DragonOS/kernel/src/arch/x86_64/init/pvh/ |
H A D | mod.rs | 10 arch::MMArch, 45 MMArch::phys_2_virt(PhysAddr::new(START_INFO.get().cmdline_paddr as usize)) in init_kernel_cmdline() 73 MMArch::phys_2_virt(PhysAddr::new(start_info.memmap_paddr as usize)).unwrap() in early_init_memory_blocks()
|
/DragonOS/kernel/src/driver/firmware/efi/ |
H A D | init.rs | 8 arch::MMArch, 110 let size = (md.page_count << (MMArch::PAGE_SHIFT as u64)) as usize; in efi_find_mirror() 240 && ((efi_vaddr - md.virt_start) < (md.page_count << (MMArch::PAGE_SHIFT as u64))) in efi_vaddr_2_paddr() 260 (md.phys_start + (md.page_count << (MMArch::PAGE_SHIFT as u64))) as usize, in reserve_memory_regions() 266 let size = (page_count << (MMArch::PAGE_SHIFT as u64)) as usize; in reserve_memory_regions()
|