/linux-6.1.9/mm/ |
D | userfaultfd.c | 31 struct vm_area_struct *dst_vma; in find_dst_vma() local 33 dst_vma = find_vma(dst_mm, dst_start); in find_dst_vma() 34 if (!dst_vma) in find_dst_vma() 37 if (dst_start < dst_vma->vm_start || in find_dst_vma() 38 dst_start + len > dst_vma->vm_end) in find_dst_vma() 46 if (!dst_vma->vm_userfaultfd_ctx.ctx) in find_dst_vma() 49 return dst_vma; in find_dst_vma() 59 struct vm_area_struct *dst_vma, in mfill_atomic_install_pte() argument 65 bool writable = dst_vma->vm_flags & VM_WRITE; in mfill_atomic_install_pte() 66 bool vm_shared = dst_vma->vm_flags & VM_SHARED; in mfill_atomic_install_pte() [all …]
|
D | memory.c | 786 pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *dst_vma, in copy_nonpresent_pte() argument 789 unsigned long vm_flags = dst_vma->vm_flags; in copy_nonpresent_pte() 882 WARN_ON_ONCE(!userfaultfd_wp(dst_vma)); in copy_nonpresent_pte() 886 if (!userfaultfd_wp(dst_vma)) in copy_nonpresent_pte() 905 copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, in copy_present_page() argument 923 page_add_new_anon_rmap(new_page, dst_vma, addr); in copy_present_page() 924 lru_cache_add_inactive_or_unevictable(new_page, dst_vma); in copy_present_page() 928 pte = mk_pte(new_page, dst_vma->vm_page_prot); in copy_present_page() 929 pte = maybe_mkwrite(pte_mkdirty(pte), dst_vma); in copy_present_page() 930 if (userfaultfd_pte_wp(dst_vma, *src_pte)) in copy_present_page() [all …]
|
D | hugetlb.c | 4952 struct vm_area_struct *dst_vma, in copy_hugetlb_page_range() argument 4991 dst_pte = huge_pte_alloc(dst, dst_vma, addr, sz); in copy_hugetlb_page_range() 5023 if (!userfaultfd_wp(dst_vma) && uffd_wp) in copy_hugetlb_page_range() 5042 if (!userfaultfd_wp(dst_vma) && uffd_wp) in copy_hugetlb_page_range() 5050 if (userfaultfd_wp(dst_vma)) in copy_hugetlb_page_range() 5077 new = alloc_huge_page(dst_vma, addr, 1); in copy_hugetlb_page_range() 5083 copy_user_huge_page(new, ptepage, addr, dst_vma, in copy_hugetlb_page_range() 5093 restore_reserve_on_error(h, dst_vma, addr, in copy_hugetlb_page_range() 5099 hugetlb_install_page(dst_vma, dst_pte, addr, new); in copy_hugetlb_page_range() 6168 struct vm_area_struct *dst_vma, in hugetlb_mcopy_atomic_pte() argument [all …]
|
D | huge_memory.c | 1077 struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma) in copy_huge_pmd() argument 1086 if (!vma_is_anonymous(dst_vma)) in copy_huge_pmd() 1118 if (!userfaultfd_wp(dst_vma)) in copy_huge_pmd() 1163 if (!userfaultfd_wp(dst_vma)) in copy_huge_pmd()
|
D | shmem.c | 2399 struct vm_area_struct *dst_vma, in shmem_mfill_atomic_pte() argument 2405 struct inode *inode = file_inode(dst_vma->vm_file); in shmem_mfill_atomic_pte() 2409 pgoff_t pgoff = linear_page_index(dst_vma, dst_addr); in shmem_mfill_atomic_pte() 2492 ret = mfill_atomic_install_pte(dst_mm, dst_pmd, dst_vma, dst_addr, in shmem_mfill_atomic_pte()
|
/linux-6.1.9/include/linux/ |
D | shmem_fs.h | 153 struct vm_area_struct *dst_vma, 159 #define shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma, dst_addr, \ argument
|
D | userfaultfd_k.h | 60 struct vm_area_struct *dst_vma,
|
D | hugetlb.h | 171 struct vm_area_struct *dst_vma, 293 struct vm_area_struct *dst_vma, in copy_hugetlb_page_range() argument 405 struct vm_area_struct *dst_vma, in hugetlb_mcopy_atomic_pte() argument
|
D | huge_mm.h | 13 struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
|
D | mm.h | 1902 copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
|