Lines Matching refs:vma

397 		   struct vm_area_struct *vma, unsigned long floor,  in free_pgtables()  argument
400 MA_STATE(mas, mt, vma->vm_end, vma->vm_end); in free_pgtables()
403 unsigned long addr = vma->vm_start; in free_pgtables()
416 unlink_anon_vmas(vma); in free_pgtables()
417 unlink_file_vma(vma); in free_pgtables()
419 if (is_vm_hugetlb_page(vma)) { in free_pgtables()
420 hugetlb_free_pgd_range(tlb, addr, vma->vm_end, in free_pgtables()
426 while (next && next->vm_start <= vma->vm_end + PMD_SIZE in free_pgtables()
428 vma = next; in free_pgtables()
430 unlink_anon_vmas(vma); in free_pgtables()
431 unlink_file_vma(vma); in free_pgtables()
433 free_pgd_range(tlb, addr, vma->vm_end, in free_pgtables()
436 vma = next; in free_pgtables()
437 } while (vma); in free_pgtables()
519 static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr, in print_bad_pte() argument
522 pgd_t *pgd = pgd_offset(vma->vm_mm, addr); in print_bad_pte()
551 mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL; in print_bad_pte()
552 index = linear_page_index(vma, addr); in print_bad_pte()
560 (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index); in print_bad_pte()
562 vma->vm_file, in print_bad_pte()
563 vma->vm_ops ? vma->vm_ops->fault : NULL, in print_bad_pte()
564 vma->vm_file ? vma->vm_file->f_op->mmap : NULL, in print_bad_pte()
612 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, in vm_normal_page() argument
620 if (vma->vm_ops && vma->vm_ops->find_special_page) in vm_normal_page()
621 return vma->vm_ops->find_special_page(vma, addr); in vm_normal_page()
622 if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) in vm_normal_page()
637 print_bad_pte(vma, addr, pte, NULL); in vm_normal_page()
643 if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) { in vm_normal_page()
644 if (vma->vm_flags & VM_MIXEDMAP) { in vm_normal_page()
650 off = (addr - vma->vm_start) >> PAGE_SHIFT; in vm_normal_page()
651 if (pfn == vma->vm_pgoff + off) in vm_normal_page()
653 if (!is_cow_mapping(vma->vm_flags)) in vm_normal_page()
663 print_bad_pte(vma, addr, pte, NULL); in vm_normal_page()
676 struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr, in vm_normal_page_pmd() argument
686 if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) { in vm_normal_page_pmd()
687 if (vma->vm_flags & VM_MIXEDMAP) { in vm_normal_page_pmd()
693 off = (addr - vma->vm_start) >> PAGE_SHIFT; in vm_normal_page_pmd()
694 if (pfn == vma->vm_pgoff + off) in vm_normal_page_pmd()
696 if (!is_cow_mapping(vma->vm_flags)) in vm_normal_page_pmd()
717 static void restore_exclusive_pte(struct vm_area_struct *vma, in restore_exclusive_pte() argument
724 pte = pte_mkold(mk_pte(page, READ_ONCE(vma->vm_page_prot))); in restore_exclusive_pte()
732 pte = maybe_mkwrite(pte_mkdirty(pte), vma); in restore_exclusive_pte()
741 page_add_anon_rmap(page, vma, address, RMAP_NONE); in restore_exclusive_pte()
749 set_pte_at(vma->vm_mm, address, ptep, pte); in restore_exclusive_pte()
755 update_mmu_cache(vma, address, ptep); in restore_exclusive_pte()
763 try_restore_exclusive_pte(pte_t *src_pte, struct vm_area_struct *vma, in try_restore_exclusive_pte() argument
770 restore_exclusive_pte(vma, page, addr, src_pte); in try_restore_exclusive_pte()
999 page_copy_prealloc(struct mm_struct *src_mm, struct vm_area_struct *vma, in page_copy_prealloc() argument
1004 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, addr); in page_copy_prealloc()
1383 zap_install_uffd_wp_if_needed(struct vm_area_struct *vma, in zap_install_uffd_wp_if_needed() argument
1391 pte_install_uffd_wp_if_needed(vma, addr, pte, pteval); in zap_install_uffd_wp_if_needed()
1396 struct vm_area_struct *vma, pmd_t *pmd, in zap_pte_range() argument
1426 page = vm_normal_page(vma, addr, ptent); in zap_pte_range()
1432 zap_install_uffd_wp_if_needed(vma, addr, pte, details, in zap_pte_range()
1443 likely(!(vma->vm_flags & VM_SEQ_READ))) in zap_pte_range()
1447 page_remove_rmap(page, vma, false); in zap_pte_range()
1449 print_bad_pte(vma, addr, ptent, page); in zap_pte_range()
1470 WARN_ON_ONCE(!vma_is_anonymous(vma)); in zap_pte_range()
1473 page_remove_rmap(page, vma, false); in zap_pte_range()
1481 print_bad_pte(vma, addr, ptent, NULL); in zap_pte_range()
1500 zap_install_uffd_wp_if_needed(vma, addr, pte, details, ptent); in zap_pte_range()
1531 struct vm_area_struct *vma, pud_t *pud, in zap_pmd_range() argument
1543 __split_huge_pmd(vma, pmd, addr, false, NULL); in zap_pmd_range()
1544 else if (zap_huge_pmd(tlb, vma, pmd, addr)) in zap_pmd_range()
1568 next = zap_pte_range(tlb, vma, pmd, addr, next, details); in zap_pmd_range()
1577 struct vm_area_struct *vma, p4d_t *p4d, in zap_pud_range() argument
1590 split_huge_pud(vma, pud, addr); in zap_pud_range()
1591 } else if (zap_huge_pud(tlb, vma, pud, addr)) in zap_pud_range()
1597 next = zap_pmd_range(tlb, vma, pud, addr, next, details); in zap_pud_range()
1606 struct vm_area_struct *vma, pgd_t *pgd, in zap_p4d_range() argument
1618 next = zap_pud_range(tlb, vma, p4d, addr, next, details); in zap_p4d_range()
1625 struct vm_area_struct *vma, in unmap_page_range() argument
1633 tlb_start_vma(tlb, vma); in unmap_page_range()
1634 pgd = pgd_offset(vma->vm_mm, addr); in unmap_page_range()
1639 next = zap_p4d_range(tlb, vma, pgd, addr, next, details); in unmap_page_range()
1641 tlb_end_vma(tlb, vma); in unmap_page_range()
1646 struct vm_area_struct *vma, unsigned long start_addr, in unmap_single_vma() argument
1650 unsigned long start = max(vma->vm_start, start_addr); in unmap_single_vma()
1653 if (start >= vma->vm_end) in unmap_single_vma()
1655 end = min(vma->vm_end, end_addr); in unmap_single_vma()
1656 if (end <= vma->vm_start) in unmap_single_vma()
1659 if (vma->vm_file) in unmap_single_vma()
1660 uprobe_munmap(vma, start, end); in unmap_single_vma()
1662 if (unlikely(vma->vm_flags & VM_PFNMAP)) in unmap_single_vma()
1663 untrack_pfn(vma, 0, 0); in unmap_single_vma()
1666 if (unlikely(is_vm_hugetlb_page(vma))) { in unmap_single_vma()
1678 if (vma->vm_file) { in unmap_single_vma()
1681 __unmap_hugepage_range_final(tlb, vma, start, end, in unmap_single_vma()
1685 unmap_page_range(tlb, vma, start, end, details); in unmap_single_vma()
1709 struct vm_area_struct *vma, unsigned long start_addr, in unmap_vmas() argument
1718 MA_STATE(mas, mt, vma->vm_end, vma->vm_end); in unmap_vmas()
1720 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm, in unmap_vmas()
1724 unmap_single_vma(tlb, vma, start_addr, end_addr, &details); in unmap_vmas()
1725 } while ((vma = mas_find(&mas, end_addr - 1)) != NULL); in unmap_vmas()
1737 void zap_page_range(struct vm_area_struct *vma, unsigned long start, in zap_page_range() argument
1740 struct maple_tree *mt = &vma->vm_mm->mm_mt; in zap_page_range()
1744 MA_STATE(mas, mt, vma->vm_end, vma->vm_end); in zap_page_range()
1747 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, in zap_page_range()
1749 tlb_gather_mmu(&tlb, vma->vm_mm); in zap_page_range()
1750 update_hiwater_rss(vma->vm_mm); in zap_page_range()
1753 unmap_single_vma(&tlb, vma, start, range.end, NULL); in zap_page_range()
1754 } while ((vma = mas_find(&mas, end - 1)) != NULL); in zap_page_range()
1768 void zap_page_range_single(struct vm_area_struct *vma, unsigned long address, in zap_page_range_single() argument
1776 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, in zap_page_range_single()
1778 if (is_vm_hugetlb_page(vma)) in zap_page_range_single()
1779 adjust_range_if_pmd_sharing_possible(vma, &range.start, in zap_page_range_single()
1781 tlb_gather_mmu(&tlb, vma->vm_mm); in zap_page_range_single()
1782 update_hiwater_rss(vma->vm_mm); in zap_page_range_single()
1788 unmap_single_vma(&tlb, vma, address, end, details); in zap_page_range_single()
1804 void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, in zap_vma_ptes() argument
1807 if (!range_in_vma(vma, address, address + size) || in zap_vma_ptes()
1808 !(vma->vm_flags & VM_PFNMAP)) in zap_vma_ptes()
1811 zap_page_range_single(vma, address, size, NULL); in zap_vma_ptes()
1855 static int insert_page_into_pte_locked(struct vm_area_struct *vma, pte_t *pte, in insert_page_into_pte_locked() argument
1862 inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page)); in insert_page_into_pte_locked()
1863 page_add_file_rmap(page, vma, false); in insert_page_into_pte_locked()
1864 set_pte_at(vma->vm_mm, addr, pte, mk_pte(page, prot)); in insert_page_into_pte_locked()
1875 static int insert_page(struct vm_area_struct *vma, unsigned long addr, in insert_page() argument
1886 pte = get_locked_pte(vma->vm_mm, addr, &ptl); in insert_page()
1889 retval = insert_page_into_pte_locked(vma, pte, addr, page, prot); in insert_page()
1896 static int insert_page_in_batch_locked(struct vm_area_struct *vma, pte_t *pte, in insert_page_in_batch_locked() argument
1906 return insert_page_into_pte_locked(vma, pte, addr, page, prot); in insert_page_in_batch_locked()
1912 static int insert_pages(struct vm_area_struct *vma, unsigned long addr, in insert_pages() argument
1918 struct mm_struct *const mm = vma->vm_mm; in insert_pages()
1943 int err = insert_page_in_batch_locked(vma, pte, in insert_pages()
1982 int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr, in vm_insert_pages() argument
1988 if (addr < vma->vm_start || end_addr >= vma->vm_end) in vm_insert_pages()
1990 if (!(vma->vm_flags & VM_MIXEDMAP)) { in vm_insert_pages()
1991 BUG_ON(mmap_read_trylock(vma->vm_mm)); in vm_insert_pages()
1992 BUG_ON(vma->vm_flags & VM_PFNMAP); in vm_insert_pages()
1993 vma->vm_flags |= VM_MIXEDMAP; in vm_insert_pages()
1996 return insert_pages(vma, addr, pages, num, vma->vm_page_prot); in vm_insert_pages()
2002 err = vm_insert_page(vma, addr + (PAGE_SIZE * idx), pages[idx]); in vm_insert_pages()
2041 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, in vm_insert_page() argument
2044 if (addr < vma->vm_start || addr >= vma->vm_end) in vm_insert_page()
2048 if (!(vma->vm_flags & VM_MIXEDMAP)) { in vm_insert_page()
2049 BUG_ON(mmap_read_trylock(vma->vm_mm)); in vm_insert_page()
2050 BUG_ON(vma->vm_flags & VM_PFNMAP); in vm_insert_page()
2051 vma->vm_flags |= VM_MIXEDMAP; in vm_insert_page()
2053 return insert_page(vma, addr, page, vma->vm_page_prot); in vm_insert_page()
2068 static int __vm_map_pages(struct vm_area_struct *vma, struct page **pages, in __vm_map_pages() argument
2071 unsigned long count = vma_pages(vma); in __vm_map_pages()
2072 unsigned long uaddr = vma->vm_start; in __vm_map_pages()
2084 ret = vm_insert_page(vma, uaddr, pages[offset + i]); in __vm_map_pages()
2111 int vm_map_pages(struct vm_area_struct *vma, struct page **pages, in vm_map_pages() argument
2114 return __vm_map_pages(vma, pages, num, vma->vm_pgoff); in vm_map_pages()
2131 int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages, in vm_map_pages_zero() argument
2134 return __vm_map_pages(vma, pages, num, 0); in vm_map_pages_zero()
2138 static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr, in insert_pfn() argument
2141 struct mm_struct *mm = vma->vm_mm; in insert_pfn()
2165 entry = maybe_mkwrite(pte_mkdirty(entry), vma); in insert_pfn()
2166 if (ptep_set_access_flags(vma, addr, pte, entry, 1)) in insert_pfn()
2167 update_mmu_cache(vma, addr, pte); in insert_pfn()
2180 entry = maybe_mkwrite(pte_mkdirty(entry), vma); in insert_pfn()
2184 update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */ in insert_pfn()
2212 vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr, in vmf_insert_pfn_prot() argument
2221 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))); in vmf_insert_pfn_prot()
2222 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == in vmf_insert_pfn_prot()
2224 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); in vmf_insert_pfn_prot()
2225 BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn)); in vmf_insert_pfn_prot()
2227 if (addr < vma->vm_start || addr >= vma->vm_end) in vmf_insert_pfn_prot()
2233 track_pfn_insert(vma, &pgprot, __pfn_to_pfn_t(pfn, PFN_DEV)); in vmf_insert_pfn_prot()
2235 return insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot, in vmf_insert_pfn_prot()
2260 vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr, in vmf_insert_pfn() argument
2263 return vmf_insert_pfn_prot(vma, addr, pfn, vma->vm_page_prot); in vmf_insert_pfn()
2267 static bool vm_mixed_ok(struct vm_area_struct *vma, pfn_t pfn) in vm_mixed_ok() argument
2270 if (vma->vm_flags & VM_MIXEDMAP) in vm_mixed_ok()
2281 static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma, in __vm_insert_mixed() argument
2287 BUG_ON(!vm_mixed_ok(vma, pfn)); in __vm_insert_mixed()
2289 if (addr < vma->vm_start || addr >= vma->vm_end) in __vm_insert_mixed()
2292 track_pfn_insert(vma, &pgprot, pfn); in __vm_insert_mixed()
2314 err = insert_page(vma, addr, page, pgprot); in __vm_insert_mixed()
2316 return insert_pfn(vma, addr, pfn, pgprot, mkwrite); in __vm_insert_mixed()
2353 vm_fault_t vmf_insert_mixed_prot(struct vm_area_struct *vma, unsigned long addr, in vmf_insert_mixed_prot() argument
2356 return __vm_insert_mixed(vma, addr, pfn, pgprot, false); in vmf_insert_mixed_prot()
2360 vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr, in vmf_insert_mixed() argument
2363 return __vm_insert_mixed(vma, addr, pfn, vma->vm_page_prot, false); in vmf_insert_mixed()
2372 vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma, in vmf_insert_mixed_mkwrite() argument
2375 return __vm_insert_mixed(vma, addr, pfn, vma->vm_page_prot, true); in vmf_insert_mixed_mkwrite()
2481 int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr, in remap_pfn_range_notrack() argument
2487 struct mm_struct *mm = vma->vm_mm; in remap_pfn_range_notrack()
2511 if (is_cow_mapping(vma->vm_flags)) { in remap_pfn_range_notrack()
2512 if (addr != vma->vm_start || end != vma->vm_end) in remap_pfn_range_notrack()
2514 vma->vm_pgoff = pfn; in remap_pfn_range_notrack()
2517 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; in remap_pfn_range_notrack()
2522 flush_cache_range(vma, addr, end); in remap_pfn_range_notrack()
2546 int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, in remap_pfn_range() argument
2551 err = track_pfn_remap(vma, &prot, pfn, addr, PAGE_ALIGN(size)); in remap_pfn_range()
2555 err = remap_pfn_range_notrack(vma, addr, pfn, size, prot); in remap_pfn_range()
2557 untrack_pfn(vma, pfn, PAGE_ALIGN(size)); in remap_pfn_range()
2577 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len) in vm_iomap_memory() argument
2596 if (vma->vm_pgoff > pages) in vm_iomap_memory()
2598 pfn += vma->vm_pgoff; in vm_iomap_memory()
2599 pages -= vma->vm_pgoff; in vm_iomap_memory()
2602 vm_len = vma->vm_end - vma->vm_start; in vm_iomap_memory()
2607 return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot); in vm_iomap_memory()
2839 spinlock_t *ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd); in pte_unmap_same()
2857 struct vm_area_struct *vma = vmf->vma; in __wp_page_copy_user() local
2858 struct mm_struct *mm = vma->vm_mm; in __wp_page_copy_user()
2862 copy_user_highpage(dst, src, addr, vma); in __wp_page_copy_user()
2889 update_mmu_tlb(vma, addr, vmf->pte); in __wp_page_copy_user()
2895 if (ptep_set_access_flags(vma, addr, vmf->pte, entry, 0)) in __wp_page_copy_user()
2896 update_mmu_cache(vma, addr, vmf->pte); in __wp_page_copy_user()
2914 update_mmu_tlb(vma, addr, vmf->pte); in __wp_page_copy_user()
2945 static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma) in __get_fault_gfp_mask() argument
2947 struct file *vm_file = vma->vm_file; in __get_fault_gfp_mask()
2973 if (vmf->vma->vm_file && in do_page_mkwrite()
2974 IS_SWAPFILE(vmf->vma->vm_file->f_mapping->host)) in do_page_mkwrite()
2977 ret = vmf->vma->vm_ops->page_mkwrite(vmf); in do_page_mkwrite()
3001 struct vm_area_struct *vma = vmf->vma; in fault_dirty_shared_page() local
3005 bool page_mkwrite = vma->vm_ops && vma->vm_ops->page_mkwrite; in fault_dirty_shared_page()
3019 file_update_time(vma->vm_file); in fault_dirty_shared_page()
3055 struct vm_area_struct *vma = vmf->vma; in wp_page_reuse() local
3070 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte)); in wp_page_reuse()
3072 entry = maybe_mkwrite(pte_mkdirty(entry), vma); in wp_page_reuse()
3073 if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1)) in wp_page_reuse()
3074 update_mmu_cache(vma, vmf->address, vmf->pte); in wp_page_reuse()
3099 struct vm_area_struct *vma = vmf->vma; in wp_page_copy() local
3100 struct mm_struct *mm = vma->vm_mm; in wp_page_copy()
3109 if (unlikely(anon_vma_prepare(vma))) in wp_page_copy()
3113 new_page = alloc_zeroed_user_highpage_movable(vma, in wp_page_copy()
3118 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, in wp_page_copy()
3146 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, in wp_page_copy()
3165 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte)); in wp_page_copy()
3166 entry = mk_pte(new_page, vma->vm_page_prot); in wp_page_copy()
3174 entry = maybe_mkwrite(pte_mkdirty(entry), vma); in wp_page_copy()
3184 ptep_clear_flush_notify(vma, vmf->address, vmf->pte); in wp_page_copy()
3185 page_add_new_anon_rmap(new_page, vma, vmf->address); in wp_page_copy()
3186 lru_cache_add_inactive_or_unevictable(new_page, vma); in wp_page_copy()
3194 update_mmu_cache(vma, vmf->address, vmf->pte); in wp_page_copy()
3218 page_remove_rmap(old_page, vma, false); in wp_page_copy()
3225 update_mmu_tlb(vma, vmf->address, vmf->pte); in wp_page_copy()
3273 WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED)); in finish_mkwrite_fault()
3274 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address, in finish_mkwrite_fault()
3281 update_mmu_tlb(vmf->vma, vmf->address, vmf->pte); in finish_mkwrite_fault()
3295 struct vm_area_struct *vma = vmf->vma; in wp_pfn_shared() local
3297 if (vma->vm_ops && vma->vm_ops->pfn_mkwrite) { in wp_pfn_shared()
3302 ret = vma->vm_ops->pfn_mkwrite(vmf); in wp_pfn_shared()
3314 struct vm_area_struct *vma = vmf->vma; in wp_page_shared() local
3319 if (vma->vm_ops && vma->vm_ops->page_mkwrite) { in wp_page_shared()
3371 struct vm_area_struct *vma = vmf->vma; in do_wp_page() local
3378 if (userfaultfd_pte_wp(vma, *vmf->pte)) { in do_wp_page()
3387 if (unlikely(userfaultfd_wp(vmf->vma) && in do_wp_page()
3388 mm_tlb_flush_pending(vmf->vma->vm_mm))) in do_wp_page()
3389 flush_tlb_page(vmf->vma, vmf->address); in do_wp_page()
3392 vmf->page = vm_normal_page(vma, vmf->address, vmf->orig_pte); in do_wp_page()
3407 if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) == in do_wp_page()
3458 page_move_anon_rmap(vmf->page, vma); in do_wp_page()
3471 } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) == in do_wp_page()
3489 static void unmap_mapping_range_vma(struct vm_area_struct *vma, in unmap_mapping_range_vma() argument
3493 zap_page_range_single(vma, start_addr, end_addr - start_addr, details); in unmap_mapping_range_vma()
3501 struct vm_area_struct *vma; in unmap_mapping_range_tree() local
3504 vma_interval_tree_foreach(vma, root, first_index, last_index) { in unmap_mapping_range_tree()
3505 vba = vma->vm_pgoff; in unmap_mapping_range_tree()
3506 vea = vba + vma_pages(vma) - 1; in unmap_mapping_range_tree()
3510 unmap_mapping_range_vma(vma, in unmap_mapping_range_tree()
3511 ((zba - vba) << PAGE_SHIFT) + vma->vm_start, in unmap_mapping_range_tree()
3512 ((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start, in unmap_mapping_range_tree()
3623 struct vm_area_struct *vma = vmf->vma; in remove_device_exclusive_entry() local
3626 if (!folio_lock_or_retry(folio, vma->vm_mm, vmf->flags)) in remove_device_exclusive_entry()
3628 mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0, vma, in remove_device_exclusive_entry()
3629 vma->vm_mm, vmf->address & PAGE_MASK, in remove_device_exclusive_entry()
3633 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, in remove_device_exclusive_entry()
3636 restore_exclusive_pte(vma, vmf->page, vmf->address, vmf->pte); in remove_device_exclusive_entry()
3646 struct vm_area_struct *vma, in should_try_to_free_swap() argument
3651 if (mem_cgroup_swap_full(folio) || (vma->vm_flags & VM_LOCKED) || in should_try_to_free_swap()
3666 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, in pte_marker_clear()
3673 pte_clear(vmf->vma->vm_mm, vmf->address, vmf->pte); in pte_marker_clear()
3690 if (unlikely(!userfaultfd_wp(vmf->vma) || vma_is_anonymous(vmf->vma))) in pte_marker_handle_uffd_wp()
3707 if (WARN_ON_ONCE(vma_is_anonymous(vmf->vma) || !marker)) in handle_pte_marker()
3727 struct vm_area_struct *vma = vmf->vma; in do_swap_page() local
3745 migration_entry_wait(vma->vm_mm, vmf->pmd, in do_swap_page()
3752 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in do_swap_page()
3774 print_bad_pte(vma, vmf->address, vmf->orig_pte, NULL); in do_swap_page()
3785 folio = swap_cache_get_folio(entry, vma, vmf->address); in do_swap_page()
3795 vma, vmf->address, false); in do_swap_page()
3802 vma->vm_mm, GFP_KERNEL, in do_swap_page()
3833 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in do_swap_page()
3843 count_memcg_event_mm(vma->vm_mm, PGMAJFAULT); in do_swap_page()
3853 locked = folio_lock_or_retry(folio, vma->vm_mm, vmf->flags); in do_swap_page()
3877 page = ksm_might_need_to_copy(page, vma, vmf->address); in do_swap_page()
3900 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, in do_swap_page()
3967 if (should_try_to_free_swap(folio, vma, vmf->flags)) in do_swap_page()
3970 inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); in do_swap_page()
3971 dec_mm_counter_fast(vma->vm_mm, MM_SWAPENTS); in do_swap_page()
3972 pte = mk_pte(page, vma->vm_page_prot); in do_swap_page()
3983 pte = maybe_mkwrite(pte_mkdirty(pte), vma); in do_swap_page()
3989 flush_icache_page(vma, page); in do_swap_page()
4000 page_add_new_anon_rmap(page, vma, vmf->address); in do_swap_page()
4001 folio_add_lru_vma(folio, vma); in do_swap_page()
4003 page_add_anon_rmap(page, vma, vmf->address, rmap_flags); in do_swap_page()
4008 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte); in do_swap_page()
4009 arch_do_swap_page(vma->vm_mm, vma, vmf->address, pte, vmf->orig_pte); in do_swap_page()
4033 update_mmu_cache(vma, vmf->address, vmf->pte); in do_swap_page()
4062 struct vm_area_struct *vma = vmf->vma; in do_anonymous_page() local
4068 if (vma->vm_flags & VM_SHARED) in do_anonymous_page()
4081 if (pte_alloc(vma->vm_mm, vmf->pmd)) in do_anonymous_page()
4090 !mm_forbids_zeropage(vma->vm_mm)) { in do_anonymous_page()
4092 vma->vm_page_prot)); in do_anonymous_page()
4093 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in do_anonymous_page()
4096 update_mmu_tlb(vma, vmf->address, vmf->pte); in do_anonymous_page()
4099 ret = check_stable_address_space(vma->vm_mm); in do_anonymous_page()
4103 if (userfaultfd_missing(vma)) { in do_anonymous_page()
4111 if (unlikely(anon_vma_prepare(vma))) in do_anonymous_page()
4113 page = alloc_zeroed_user_highpage_movable(vma, vmf->address); in do_anonymous_page()
4117 if (mem_cgroup_charge(page_folio(page), vma->vm_mm, GFP_KERNEL)) in do_anonymous_page()
4128 entry = mk_pte(page, vma->vm_page_prot); in do_anonymous_page()
4130 if (vma->vm_flags & VM_WRITE) in do_anonymous_page()
4133 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, in do_anonymous_page()
4136 update_mmu_tlb(vma, vmf->address, vmf->pte); in do_anonymous_page()
4140 ret = check_stable_address_space(vma->vm_mm); in do_anonymous_page()
4145 if (userfaultfd_missing(vma)) { in do_anonymous_page()
4151 inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); in do_anonymous_page()
4152 page_add_new_anon_rmap(page, vma, vmf->address); in do_anonymous_page()
4153 lru_cache_add_inactive_or_unevictable(page, vma); in do_anonymous_page()
4155 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry); in do_anonymous_page()
4158 update_mmu_cache(vma, vmf->address, vmf->pte); in do_anonymous_page()
4178 struct vm_area_struct *vma = vmf->vma; in __do_fault() local
4197 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm); in __do_fault()
4202 ret = vma->vm_ops->fault(vmf); in __do_fault()
4235 struct vm_area_struct *vma = vmf->vma; in deposit_prealloc_pte() local
4237 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, vmf->prealloc_pte); in deposit_prealloc_pte()
4242 mm_inc_nr_ptes(vma->vm_mm); in deposit_prealloc_pte()
4248 struct vm_area_struct *vma = vmf->vma; in do_set_pmd() local
4255 if (!transhuge_vma_suitable(vma, haddr)) in do_set_pmd()
4276 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm); in do_set_pmd()
4281 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in do_set_pmd()
4286 flush_icache_page(vma, page + i); in do_set_pmd()
4288 entry = mk_huge_pmd(page, vma->vm_page_prot); in do_set_pmd()
4290 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); in do_set_pmd()
4292 add_mm_counter(vma->vm_mm, mm_counter_file(page), HPAGE_PMD_NR); in do_set_pmd()
4293 page_add_file_rmap(page, vma, true); in do_set_pmd()
4301 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); in do_set_pmd()
4303 update_mmu_cache_pmd(vma, haddr, vmf->pmd); in do_set_pmd()
4321 struct vm_area_struct *vma = vmf->vma; in do_set_pte() local
4327 flush_icache_page(vma, page); in do_set_pte()
4328 entry = mk_pte(page, vma->vm_page_prot); in do_set_pte()
4336 entry = maybe_mkwrite(pte_mkdirty(entry), vma); in do_set_pte()
4340 if (write && !(vma->vm_flags & VM_SHARED)) { in do_set_pte()
4341 inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); in do_set_pte()
4342 page_add_new_anon_rmap(page, vma, addr); in do_set_pte()
4343 lru_cache_add_inactive_or_unevictable(page, vma); in do_set_pte()
4345 inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page)); in do_set_pte()
4346 page_add_file_rmap(page, vma, false); in do_set_pte()
4348 set_pte_at(vma->vm_mm, addr, vmf->pte, entry); in do_set_pte()
4376 struct vm_area_struct *vma = vmf->vma; in finish_fault() local
4381 if ((vmf->flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) in finish_fault()
4390 if (!(vma->vm_flags & VM_SHARED)) { in finish_fault()
4391 ret = check_stable_address_space(vma->vm_mm); in finish_fault()
4404 pmd_install(vma->vm_mm, vmf->pmd, &vmf->prealloc_pte); in finish_fault()
4405 else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd))) in finish_fault()
4416 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in finish_fault()
4424 update_mmu_cache(vma, vmf->address, vmf->pte); in finish_fault()
4428 update_mmu_tlb(vma, vmf->address, vmf->pte); in finish_fault()
4502 address = max(address & mask, vmf->vma->vm_start); in do_fault_around()
4513 end_pgoff = min3(end_pgoff, vma_pages(vmf->vma) + vmf->vma->vm_pgoff - 1, in do_fault_around()
4517 vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm); in do_fault_around()
4522 return vmf->vma->vm_ops->map_pages(vmf, start_pgoff, end_pgoff); in do_fault_around()
4529 if (!vmf->vma->vm_ops->map_pages) in should_fault_around()
4532 if (uffd_disable_fault_around(vmf->vma)) in should_fault_around()
4566 struct vm_area_struct *vma = vmf->vma; in do_cow_fault() local
4569 if (unlikely(anon_vma_prepare(vma))) in do_cow_fault()
4572 vmf->cow_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address); in do_cow_fault()
4576 if (mem_cgroup_charge(page_folio(vmf->cow_page), vma->vm_mm, in do_cow_fault()
4589 copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma); in do_cow_fault()
4605 struct vm_area_struct *vma = vmf->vma; in do_shared_fault() local
4616 if (vma->vm_ops->page_mkwrite) { in do_shared_fault()
4648 struct vm_area_struct *vma = vmf->vma; in do_fault() local
4649 struct mm_struct *vm_mm = vma->vm_mm; in do_fault()
4655 if (!vma->vm_ops->fault) { in do_fault()
4663 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, in do_fault()
4683 else if (!(vma->vm_flags & VM_SHARED)) in do_fault()
4696 int numa_migrate_prep(struct page *page, struct vm_area_struct *vma, in numa_migrate_prep() argument
4707 return mpol_misplaced(page, vma, addr); in numa_migrate_prep()
4712 struct vm_area_struct *vma = vmf->vma; in do_numa_page() local
4726 vmf->ptl = pte_lockptr(vma->vm_mm, vmf->pmd); in do_numa_page()
4735 pte = pte_modify(old_pte, vma->vm_page_prot); in do_numa_page()
4737 page = vm_normal_page(vma, vmf->address, pte); in do_numa_page()
4760 if (page_mapcount(page) > 1 && (vma->vm_flags & VM_SHARED)) in do_numa_page()
4773 target_nid = numa_migrate_prep(page, vma, vmf->address, page_nid, in do_numa_page()
4782 if (migrate_misplaced_page(page, vma, target_nid)) { in do_numa_page()
4805 old_pte = ptep_modify_prot_start(vma, vmf->address, vmf->pte); in do_numa_page()
4806 pte = pte_modify(old_pte, vma->vm_page_prot); in do_numa_page()
4810 ptep_modify_prot_commit(vma, vmf->address, vmf->pte, old_pte, pte); in do_numa_page()
4811 update_mmu_cache(vma, vmf->address, vmf->pte); in do_numa_page()
4818 if (vma_is_anonymous(vmf->vma)) in create_huge_pmd()
4820 if (vmf->vma->vm_ops->huge_fault) in create_huge_pmd()
4821 return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD); in create_huge_pmd()
4830 if (vma_is_anonymous(vmf->vma)) { in wp_huge_pmd()
4832 userfaultfd_huge_pmd_wp(vmf->vma, vmf->orig_pmd)) in wp_huge_pmd()
4836 if (vmf->vma->vm_ops->huge_fault) { in wp_huge_pmd()
4837 vm_fault_t ret = vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD); in wp_huge_pmd()
4844 __split_huge_pmd(vmf->vma, vmf->pmd, vmf->address, false, NULL); in wp_huge_pmd()
4854 if (vma_is_anonymous(vmf->vma)) in create_huge_pud()
4856 if (vmf->vma->vm_ops->huge_fault) in create_huge_pud()
4857 return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD); in create_huge_pud()
4867 if (vma_is_anonymous(vmf->vma)) in wp_huge_pud()
4869 if (vmf->vma->vm_ops->huge_fault) { in wp_huge_pud()
4870 vm_fault_t ret = vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD); in wp_huge_pud()
4877 __split_huge_pud(vmf->vma, vmf->pud, vmf->address); in wp_huge_pud()
4951 if (vma_is_anonymous(vmf->vma)) in handle_pte_fault()
4960 if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma)) in handle_pte_fault()
4963 vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd); in handle_pte_fault()
4967 update_mmu_tlb(vmf->vma, vmf->address, vmf->pte); in handle_pte_fault()
4977 if (ptep_set_access_flags(vmf->vma, vmf->address, vmf->pte, entry, in handle_pte_fault()
4979 update_mmu_cache(vmf->vma, vmf->address, vmf->pte); in handle_pte_fault()
4991 flush_tlb_fix_spurious_fault(vmf->vma, vmf->address); in handle_pte_fault()
5004 static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma, in __handle_mm_fault() argument
5008 .vma = vma, in __handle_mm_fault()
5012 .pgoff = linear_page_index(vma, address), in __handle_mm_fault()
5013 .gfp_mask = __get_fault_gfp_mask(vma), in __handle_mm_fault()
5015 struct mm_struct *mm = vma->vm_mm; in __handle_mm_fault()
5016 unsigned long vm_flags = vma->vm_flags; in __handle_mm_fault()
5031 hugepage_vma_check(vma, vm_flags, false, true, true)) { in __handle_mm_fault()
5065 hugepage_vma_check(vma, vm_flags, false, true, true)) { in __handle_mm_fault()
5081 if (pmd_protnone(vmf.orig_pmd) && vma_is_accessible(vma)) in __handle_mm_fault()
5161 static void lru_gen_enter_fault(struct vm_area_struct *vma) in lru_gen_enter_fault() argument
5164 current->in_lru_fault = !(vma->vm_flags & (VM_SEQ_READ | VM_RAND_READ)); in lru_gen_enter_fault()
5172 static void lru_gen_enter_fault(struct vm_area_struct *vma) in lru_gen_enter_fault() argument
5187 vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address, in handle_mm_fault() argument
5195 count_memcg_event_mm(vma->vm_mm, PGFAULT); in handle_mm_fault()
5200 if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE, in handle_mm_fault()
5212 lru_gen_enter_fault(vma); in handle_mm_fault()
5214 if (unlikely(is_vm_hugetlb_page(vma))) in handle_mm_fault()
5215 ret = hugetlb_fault(vma->vm_mm, vma, address, flags); in handle_mm_fault()
5217 ret = __handle_mm_fault(vma, address, flags); in handle_mm_fault()
5383 int follow_pfn(struct vm_area_struct *vma, unsigned long address, in follow_pfn() argument
5390 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) in follow_pfn()
5393 ret = follow_pte(vma->vm_mm, address, &ptep, &ptl); in follow_pfn()
5403 int follow_phys(struct vm_area_struct *vma, in follow_phys() argument
5411 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) in follow_phys()
5414 if (follow_pte(vma->vm_mm, address, &ptep, &ptl)) in follow_phys()
5443 int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, in generic_access_phys() argument
5454 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) in generic_access_phys()
5458 if (follow_pte(vma->vm_mm, addr, &ptep, &ptl)) in generic_access_phys()
5473 if (follow_pte(vma->vm_mm, addr, &ptep, &ptl)) in generic_access_phys()
5503 struct vm_area_struct *vma; in __access_remote_vm() local
5517 gup_flags, &page, &vma, NULL); in __access_remote_vm()
5526 vma = vma_lookup(mm, addr); in __access_remote_vm()
5527 if (!vma) in __access_remote_vm()
5529 if (vma->vm_ops && vma->vm_ops->access) in __access_remote_vm()
5530 ret = vma->vm_ops->access(vma, addr, buf, in __access_remote_vm()
5544 copy_to_user_page(vma, page, addr, in __access_remote_vm()
5548 copy_from_user_page(vma, page, addr, in __access_remote_vm()
5610 struct vm_area_struct *vma; in print_vma_addr() local
5618 vma = find_vma(mm, ip); in print_vma_addr()
5619 if (vma && vma->vm_file) { in print_vma_addr()
5620 struct file *f = vma->vm_file; in print_vma_addr()
5629 vma->vm_start, in print_vma_addr()
5630 vma->vm_end - vma->vm_start); in print_vma_addr()
5741 struct vm_area_struct *vma, in copy_user_gigantic_page() argument
5753 copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma); in copy_user_gigantic_page()
5760 struct vm_area_struct *vma; member
5768 addr, copy_arg->vma); in copy_subpage()
5772 unsigned long addr_hint, struct vm_area_struct *vma, in copy_user_huge_page() argument
5780 .vma = vma, in copy_user_huge_page()
5784 copy_user_gigantic_page(dst, src, addr, vma, in copy_user_huge_page()