/linux-6.6.21/arch/riscv/mm/ |
D | kasan_init.c | 126 if (p4d_none(*p4dp) && IS_ALIGNED(vaddr, P4D_SIZE) && (next - vaddr) >= P4D_SIZE) { in kasan_populate_p4d() 127 phys_addr = memblock_phys_alloc(P4D_SIZE, P4D_SIZE); in kasan_populate_p4d() 130 memset(__va(phys_addr), KASAN_SHADOW_INIT, P4D_SIZE); in kasan_populate_p4d() 203 if (pgtable_l4_enabled && IS_ALIGNED(vaddr, P4D_SIZE) && in kasan_early_clear_p4d() 204 (next - vaddr) >= P4D_SIZE) { in kasan_early_clear_p4d() 287 if (p4d_none(*p4dp) && IS_ALIGNED(vaddr, P4D_SIZE) && in kasan_early_populate_p4d() 288 (next - vaddr) >= P4D_SIZE) { in kasan_early_populate_p4d()
|
D | init.c | 590 if (sz == P4D_SIZE) { in create_p4d_mapping() 673 if (!(pa & (P4D_SIZE - 1)) && !(va & (P4D_SIZE - 1)) && size >= P4D_SIZE) in best_map_size() 674 return P4D_SIZE; in best_map_size() 790 P4D_SIZE, PAGE_TABLE); in set_satp_mode() 1142 (uintptr_t)fixmap_pud, P4D_SIZE, PAGE_TABLE); in setup_vm() 1154 (uintptr_t)trampoline_pud, P4D_SIZE, PAGE_TABLE); in setup_vm()
|
D | hugetlbpage.c | 135 return P4D_SIZE - PUD_SIZE; in hugetlb_mask_last_page() 234 else if (sz >= P4D_SIZE) in set_huge_pte_at()
|
D | pageattr.c | 196 if (next - vaddr >= P4D_SIZE && in __split_linear_mapping_p4d()
|
/linux-6.6.21/include/asm-generic/ |
D | pgtable-nop4d.h | 13 #define P4D_SIZE (1UL << P4D_SHIFT) macro 14 #define P4D_MASK (~(P4D_SIZE-1))
|
D | tlb.h | 614 if (_sz >= P4D_SIZE) \
|
/linux-6.6.21/arch/x86/include/asm/ |
D | pgtable_64_types.h | 65 #define P4D_SIZE (_AC(1, UL) << P4D_SHIFT) macro 66 #define P4D_MASK (~(P4D_SIZE - 1))
|
D | pgtable_areas.h | 19 #define CPU_ENTRY_AREA_MAP_SIZE P4D_SIZE
|
/linux-6.6.21/mm/kasan/ |
D | init.c | 196 if (IS_ALIGNED(addr, P4D_SIZE) && end - addr >= P4D_SIZE) { in zero_p4d_populate() 441 if (IS_ALIGNED(addr, P4D_SIZE) && in kasan_remove_p4d_table() 442 IS_ALIGNED(next, P4D_SIZE)) { in kasan_remove_p4d_table()
|
/linux-6.6.21/arch/riscv/include/asm/ |
D | pgtable-64.h | 33 #define P4D_SIZE (_AC(1, UL) << P4D_SHIFT) macro 34 #define P4D_MASK (~(P4D_SIZE - 1))
|
/linux-6.6.21/arch/s390/boot/ |
D | vmem.c | 138 IS_ALIGNED(addr, P4D_SIZE) && end - addr >= P4D_SIZE) { in kasan_p4d_populate_zero_shadow()
|
/linux-6.6.21/arch/x86/mm/ |
D | ident_map.c | 84 next = (addr & P4D_MASK) + P4D_SIZE; in ident_p4d_init()
|
D | mem_encrypt_identity.c | 275 entries += (DIV_ROUND_UP(len, P4D_SIZE) + 1) * sizeof(pud_t) * PTRS_PER_PUD; in sme_pgtable_calc() 286 tables += DIV_ROUND_UP(entries, P4D_SIZE) * sizeof(pud_t) * PTRS_PER_PUD; in sme_pgtable_calc()
|
D | kasan_init_64.c | 188 for (; start < end; start += P4D_SIZE) in clear_pgds()
|
D | init_64.c | 692 vaddr_next = (vaddr & P4D_MASK) + P4D_SIZE; in phys_p4d_init()
|
/linux-6.6.21/mm/ |
D | page_vma_mapped.c | 221 step_forward(pvmw, P4D_SIZE); in page_vma_mapped_walk()
|
D | vmalloc.c | 237 if ((end - addr) != P4D_SIZE) in vmap_try_huge_p4d() 240 if (!IS_ALIGNED(addr, P4D_SIZE)) in vmap_try_huge_p4d() 243 if (!IS_ALIGNED(phys_addr, P4D_SIZE)) in vmap_try_huge_p4d()
|
/linux-6.6.21/arch/arm/mm/ |
D | dump.c | 389 addr = start + i * P4D_SIZE; in walk_p4d()
|
/linux-6.6.21/include/linux/ |
D | pgtable.h | 951 ({ unsigned long __boundary = ((addr) + P4D_SIZE) & P4D_MASK; \ 1641 #define p4d_leaf_size(x) P4D_SIZE
|
/linux-6.6.21/arch/powerpc/mm/ptdump/ |
D | hashpagetable.c | 443 addr = start + i * P4D_SIZE; in walk_p4d()
|
/linux-6.6.21/arch/um/kernel/ |
D | tlb.c | 384 last = ADD_ROUND(addr, P4D_SIZE); in flush_tlb_kernel_range_common()
|
/linux-6.6.21/arch/powerpc/mm/book3s64/ |
D | radix_pgtable.c | 849 if (!IS_ALIGNED(addr, P4D_SIZE) || in remove_pagetable() 850 !IS_ALIGNED(next, P4D_SIZE)) { in remove_pagetable()
|
/linux-6.6.21/arch/x86/xen/ |
D | mmu_pv.c | 1109 xen_free_ro_pages(pa, P4D_SIZE); in xen_cleanmfnmap_p4d() 1905 n_pud = roundup(size, P4D_SIZE) >> P4D_SHIFT; in xen_relocate_p2m()
|
/linux-6.6.21/arch/powerpc/kvm/ |
D | book3s_64_mmu_radix.c | 1383 gpa = (gpa & P4D_MASK) + P4D_SIZE; in debugfs_radix_read()
|
/linux-6.6.21/arch/s390/include/asm/ |
D | pgtable.h | 339 #define P4D_SIZE _REGION2_SIZE macro
|