Lines Matching refs:pgd
366 __visible pgdval_t xen_pgd_val(pgd_t pgd) in xen_pgd_val() argument
368 return pte_mfn_to_pfn(pgd.pgd); in xen_pgd_val()
380 __visible pgd_t xen_make_pgd(pgdval_t pgd) in xen_make_pgd() argument
382 pgd = pte_pfn_to_mfn(pgd); in xen_make_pgd()
383 return native_make_pgd(pgd); in xen_make_pgd()
446 static pgd_t *xen_get_user_pgd(pgd_t *pgd) in xen_get_user_pgd() argument
448 pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK); in xen_get_user_pgd()
449 unsigned offset = pgd - pgd_page; in xen_get_user_pgd()
504 pgd_val.pgd = p4d_val_ma(val); in xen_set_p4d()
601 static void __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd, in __xen_pgd_walk() argument
627 if (pgd_none(pgd[i])) in __xen_pgd_walk()
630 p4d = p4d_offset(&pgd[i], 0); in __xen_pgd_walk()
636 (*func)(mm, virt_to_page(pgd), PT_PGD); in __xen_pgd_walk()
644 __xen_pgd_walk(mm, mm->pgd, func, limit); in xen_pgd_walk()
729 static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd) in __xen_pgd_pin() argument
731 pgd_t *user_pgd = xen_get_user_pgd(pgd); in __xen_pgd_pin()
733 trace_xen_mmu_pgd_pin(mm, pgd); in __xen_pgd_pin()
737 __xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT); in __xen_pgd_pin()
739 xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd))); in __xen_pgd_pin()
752 __xen_pgd_pin(mm, mm->pgd); in xen_pgd_pin()
841 static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd) in __xen_pgd_unpin() argument
843 pgd_t *user_pgd = xen_get_user_pgd(pgd); in __xen_pgd_unpin()
845 trace_xen_mmu_pgd_unpin(mm, pgd); in __xen_pgd_unpin()
849 xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd))); in __xen_pgd_unpin()
857 __xen_pgd_walk(mm, pgd, xen_unpin_page, USER_LIMIT); in __xen_pgd_unpin()
864 __xen_pgd_unpin(mm, mm->pgd); in xen_pgd_unpin()
913 if (this_cpu_read(xen_current_cr3) == __pa(mm->pgd)) in drop_mm_ref_this_cpu()
932 if (per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd)) in xen_drop_mm_ref()
948 if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd)) in xen_drop_mm_ref()
985 if (xen_page_pinned(mm->pgd)) in xen_exit_mmap()
1120 pgd_t *pgd; in xen_cleanmfnmap() local
1126 pgd = pgd_offset_k(vaddr); in xen_cleanmfnmap()
1127 p4d = p4d_offset(pgd, 0); in xen_cleanmfnmap()
1395 pgd_t *pgd = mm->pgd; in xen_pgd_alloc() local
1396 struct page *page = virt_to_page(pgd); in xen_pgd_alloc()
1400 BUG_ON(PagePinned(virt_to_page(pgd))); in xen_pgd_alloc()
1414 BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd)))); in xen_pgd_alloc()
1419 static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd) in xen_pgd_free() argument
1421 pgd_t *user_pgd = xen_get_user_pgd(pgd); in xen_pgd_free()
1531 bool pinned = xen_page_pinned(mm->pgd); in xen_alloc_ptpage()
1694 void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) in xen_setup_kernel_pagetable() argument
1732 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd); in xen_setup_kernel_pagetable()
1735 addr[0] = (unsigned long)pgd; in xen_setup_kernel_pagetable()
1783 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd))); in xen_setup_kernel_pagetable()
1840 pgd_t pgd; in xen_early_virt_to_phys() local
1846 pgd = native_make_pgd(xen_read_phys_ulong(pa + pgd_index(vaddr) * in xen_early_virt_to_phys()
1847 sizeof(pgd))); in xen_early_virt_to_phys()
1848 if (!pgd_present(pgd)) in xen_early_virt_to_phys()
1851 pa = pgd_val(pgd) & PTE_PFN_MASK; in xen_early_virt_to_phys()
1889 pgd_t *pgd; in xen_relocate_p2m() local
1918 pgd = __va(read_cr3_pa()); in xen_relocate_p2m()
1958 set_pgd(pgd + 2 + idx_pud, __pgd(_PAGE_TABLE | pud_phys)); in xen_relocate_p2m()
1975 set_pgd(pgd + 1, __pgd(0)); in xen_relocate_p2m()