Lines Matching refs:vma
150 static void anon_vma_chain_link(struct vm_area_struct *vma, in anon_vma_chain_link() argument
154 avc->vma = vma; in anon_vma_chain_link()
156 list_add(&avc->same_vma, &vma->anon_vma_chain); in anon_vma_chain_link()
188 int __anon_vma_prepare(struct vm_area_struct *vma) in __anon_vma_prepare() argument
190 struct mm_struct *mm = vma->vm_mm; in __anon_vma_prepare()
200 anon_vma = find_mergeable_anon_vma(vma); in __anon_vma_prepare()
213 if (likely(!vma->anon_vma)) { in __anon_vma_prepare()
214 vma->anon_vma = anon_vma; in __anon_vma_prepare()
215 anon_vma_chain_link(vma, avc, anon_vma); in __anon_vma_prepare()
335 int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) in anon_vma_fork() argument
346 vma->anon_vma = NULL; in anon_vma_fork()
352 error = anon_vma_clone(vma, pvma); in anon_vma_fork()
357 if (vma->anon_vma) in anon_vma_fork()
382 vma->anon_vma = anon_vma; in anon_vma_fork()
384 anon_vma_chain_link(vma, avc, anon_vma); in anon_vma_fork()
393 unlink_anon_vmas(vma); in anon_vma_fork()
397 void unlink_anon_vmas(struct vm_area_struct *vma) in unlink_anon_vmas() argument
406 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { in unlink_anon_vmas()
424 if (vma->anon_vma) { in unlink_anon_vmas()
425 vma->anon_vma->num_active_vmas--; in unlink_anon_vmas()
431 vma->anon_vma = NULL; in unlink_anon_vmas()
440 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { in unlink_anon_vmas()
745 unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) in page_address_in_vma() argument
754 if (!vma->anon_vma || !page__anon_vma || in page_address_in_vma()
755 vma->anon_vma->root != page__anon_vma->root) in page_address_in_vma()
757 } else if (!vma->vm_file) { in page_address_in_vma()
759 } else if (vma->vm_file->f_mapping != folio->mapping) { in page_address_in_vma()
763 return vma_address(page, vma); in page_address_in_vma()
805 struct vm_area_struct *vma, unsigned long address, void *arg) in folio_referenced_one() argument
808 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); in folio_referenced_one()
814 if ((vma->vm_flags & VM_LOCKED) && in folio_referenced_one()
817 mlock_vma_folio(folio, vma, !pvmw.pte); in folio_referenced_one()
830 if (ptep_clear_flush_young_notify(vma, address, in folio_referenced_one()
834 if (pmdp_clear_flush_young_notify(vma, address, in folio_referenced_one()
852 pra->vm_flags |= vma->vm_flags & ~VM_LOCKED; in folio_referenced_one()
861 static bool invalid_folio_referenced_vma(struct vm_area_struct *vma, void *arg) in invalid_folio_referenced_vma() argument
872 if (!vma_has_recency(vma)) in invalid_folio_referenced_vma()
879 if (memcg && !mm_match_cgroup(vma->vm_mm, memcg)) in invalid_folio_referenced_vma()
938 struct vm_area_struct *vma = pvmw->vma; in page_vma_mkclean_one() local
947 vma->vm_mm, address, vma_address_end(pvmw)); in page_vma_mkclean_one()
961 flush_cache_page(vma, address, pte_pfn(entry)); in page_vma_mkclean_one()
962 entry = ptep_clear_flush(vma, address, pte); in page_vma_mkclean_one()
965 set_pte_at(vma->vm_mm, address, pte, entry); in page_vma_mkclean_one()
975 flush_cache_range(vma, address, in page_vma_mkclean_one()
977 entry = pmdp_invalidate(vma, address, pmd); in page_vma_mkclean_one()
980 set_pmd_at(vma->vm_mm, address, pmd, entry); in page_vma_mkclean_one()
997 static bool page_mkclean_one(struct folio *folio, struct vm_area_struct *vma, in page_mkclean_one() argument
1000 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, PVMW_SYNC); in page_mkclean_one()
1008 static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg) in invalid_mkclean_vma() argument
1010 if (vma->vm_flags & VM_SHARED) in invalid_mkclean_vma()
1054 struct vm_area_struct *vma) in pfn_mkclean_range() argument
1060 .vma = vma, in pfn_mkclean_range()
1064 if (invalid_mkclean_vma(vma, NULL)) in pfn_mkclean_range()
1067 pvmw.address = vma_pgoff_address(pgoff, nr_pages, vma); in pfn_mkclean_range()
1068 VM_BUG_ON_VMA(pvmw.address == -EFAULT, vma); in pfn_mkclean_range()
1106 void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma) in page_move_anon_rmap() argument
1108 void *anon_vma = vma->anon_vma; in page_move_anon_rmap()
1112 VM_BUG_ON_VMA(!anon_vma, vma); in page_move_anon_rmap()
1133 struct vm_area_struct *vma, unsigned long address, int exclusive) in __page_set_anon_rmap() argument
1135 struct anon_vma *anon_vma = vma->anon_vma; in __page_set_anon_rmap()
1158 folio->index = linear_page_index(vma, address); in __page_set_anon_rmap()
1172 struct vm_area_struct *vma, unsigned long address) in __page_check_anon_rmap() argument
1185 VM_BUG_ON_FOLIO(folio_anon_vma(folio)->root != vma->anon_vma->root, in __page_check_anon_rmap()
1187 VM_BUG_ON_PAGE(page_to_pgoff(page) != linear_page_index(vma, address), in __page_check_anon_rmap()
1203 void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma, in page_add_anon_rmap() argument
1250 __page_set_anon_rmap(folio, page, vma, address, in page_add_anon_rmap()
1253 __page_check_anon_rmap(folio, page, vma, address); in page_add_anon_rmap()
1256 mlock_vma_folio(folio, vma, compound); in page_add_anon_rmap()
1272 void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma, in folio_add_new_anon_rmap() argument
1277 VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); in folio_add_new_anon_rmap()
1293 __page_set_anon_rmap(folio, &folio->page, vma, address, 1); in folio_add_new_anon_rmap()
1309 unsigned int nr_pages, struct vm_area_struct *vma, in folio_add_file_rmap_range() argument
1355 mlock_vma_folio(folio, vma, compound); in folio_add_file_rmap_range()
1366 void page_add_file_rmap(struct page *page, struct vm_area_struct *vma, in page_add_file_rmap() argument
1379 folio_add_file_rmap_range(folio, page, nr_pages, vma, compound); in page_add_file_rmap()
1390 void page_remove_rmap(struct page *page, struct vm_area_struct *vma, in page_remove_rmap() argument
1466 munlock_vma_folio(folio, vma, compound); in page_remove_rmap()
1472 static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, in try_to_unmap_one() argument
1475 struct mm_struct *mm = vma->vm_mm; in try_to_unmap_one()
1476 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); in try_to_unmap_one()
1495 split_huge_pmd_address(vma, address, false, folio); in try_to_unmap_one()
1506 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, in try_to_unmap_one()
1513 adjust_range_if_pmd_sharing_possible(vma, &range.start, in try_to_unmap_one()
1517 hsz = huge_page_size(hstate_vma(vma)); in try_to_unmap_one()
1529 (vma->vm_flags & VM_LOCKED)) { in try_to_unmap_one()
1531 mlock_vma_folio(folio, vma, false); in try_to_unmap_one()
1558 flush_cache_range(vma, range.start, range.end); in try_to_unmap_one()
1572 if (!hugetlb_vma_trylock_write(vma)) { in try_to_unmap_one()
1577 if (huge_pmd_unshare(mm, vma, address, pvmw.pte)) { in try_to_unmap_one()
1578 hugetlb_vma_unlock_write(vma); in try_to_unmap_one()
1579 flush_tlb_range(vma, in try_to_unmap_one()
1594 hugetlb_vma_unlock_write(vma); in try_to_unmap_one()
1596 pteval = huge_ptep_clear_flush(vma, address, pvmw.pte); in try_to_unmap_one()
1598 flush_cache_page(vma, address, pfn); in try_to_unmap_one()
1613 pteval = ptep_clear_flush(vma, address, pvmw.pte); in try_to_unmap_one()
1622 pte_install_uffd_wp_if_needed(vma, address, pvmw.pte, pteval); in try_to_unmap_one()
1642 } else if (pte_unused(pteval) && !userfaultfd_armed(vma)) { in try_to_unmap_one()
1716 if (arch_unmap_one(mm, vma, address, pteval) < 0) { in try_to_unmap_one()
1764 page_remove_rmap(subpage, vma, folio_test_hugetlb(folio)); in try_to_unmap_one()
1765 if (vma->vm_flags & VM_LOCKED) in try_to_unmap_one()
1775 static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg) in invalid_migration_vma() argument
1777 return vma_is_temporary_stack(vma); in invalid_migration_vma()
1817 static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, in try_to_migrate_one() argument
1820 struct mm_struct *mm = vma->vm_mm; in try_to_migrate_one()
1821 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); in try_to_migrate_one()
1844 split_huge_pmd_address(vma, address, true, folio); in try_to_migrate_one()
1855 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, in try_to_migrate_one()
1862 adjust_range_if_pmd_sharing_possible(vma, &range.start, in try_to_migrate_one()
1866 hsz = huge_page_size(hstate_vma(vma)); in try_to_migrate_one()
1923 flush_cache_range(vma, range.start, range.end); in try_to_migrate_one()
1937 if (!hugetlb_vma_trylock_write(vma)) { in try_to_migrate_one()
1942 if (huge_pmd_unshare(mm, vma, address, pvmw.pte)) { in try_to_migrate_one()
1943 hugetlb_vma_unlock_write(vma); in try_to_migrate_one()
1944 flush_tlb_range(vma, in try_to_migrate_one()
1960 hugetlb_vma_unlock_write(vma); in try_to_migrate_one()
1963 pteval = huge_ptep_clear_flush(vma, address, pvmw.pte); in try_to_migrate_one()
1965 flush_cache_page(vma, address, pfn); in try_to_migrate_one()
1980 pteval = ptep_clear_flush(vma, address, pvmw.pte); in try_to_migrate_one()
2039 } else if (pte_unused(pteval) && !userfaultfd_armed(vma)) { in try_to_migrate_one()
2055 if (arch_unmap_one(mm, vma, address, pteval) < 0) { in try_to_migrate_one()
2117 page_remove_rmap(subpage, vma, folio_test_hugetlb(folio)); in try_to_migrate_one()
2118 if (vma->vm_flags & VM_LOCKED) in try_to_migrate_one()
2183 struct vm_area_struct *vma, unsigned long address, void *priv) in page_make_device_exclusive_one() argument
2185 struct mm_struct *mm = vma->vm_mm; in page_make_device_exclusive_one()
2186 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); in page_make_device_exclusive_one()
2197 vma->vm_mm, address, min(vma->vm_end, in page_make_device_exclusive_one()
2218 flush_cache_page(vma, address, pte_pfn(ptent)); in page_make_device_exclusive_one()
2219 pteval = ptep_clear_flush(vma, address, pvmw.pte); in page_make_device_exclusive_one()
2256 page_remove_rmap(subpage, vma, false); in page_make_device_exclusive_one()
2433 struct vm_area_struct *vma = avc->vma; in rmap_walk_anon() local
2434 unsigned long address = vma_address(&folio->page, vma); in rmap_walk_anon()
2436 VM_BUG_ON_VMA(address == -EFAULT, vma); in rmap_walk_anon()
2439 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) in rmap_walk_anon()
2442 if (!rwc->rmap_one(folio, vma, address, rwc->arg)) in rmap_walk_anon()
2466 struct vm_area_struct *vma; in rmap_walk_file() local
2493 vma_interval_tree_foreach(vma, &mapping->i_mmap, in rmap_walk_file()
2495 unsigned long address = vma_address(&folio->page, vma); in rmap_walk_file()
2497 VM_BUG_ON_VMA(address == -EFAULT, vma); in rmap_walk_file()
2500 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) in rmap_walk_file()
2503 if (!rwc->rmap_one(folio, vma, address, rwc->arg)) in rmap_walk_file()
2543 void hugepage_add_anon_rmap(struct page *page, struct vm_area_struct *vma, in hugepage_add_anon_rmap() argument
2547 struct anon_vma *anon_vma = vma->anon_vma; in hugepage_add_anon_rmap()
2557 __page_set_anon_rmap(folio, page, vma, address, in hugepage_add_anon_rmap()
2562 struct vm_area_struct *vma, unsigned long address) in hugepage_add_new_anon_rmap() argument
2564 BUG_ON(address < vma->vm_start || address >= vma->vm_end); in hugepage_add_new_anon_rmap()
2568 __page_set_anon_rmap(folio, &folio->page, vma, address, 1); in hugepage_add_new_anon_rmap()