Lines Matching refs:vma
74 bool hugepage_vma_check(struct vm_area_struct *vma, unsigned long vm_flags, in hugepage_vma_check() argument
77 if (!vma->vm_mm) /* vdso */ in hugepage_vma_check()
86 test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)) in hugepage_vma_check()
95 if (vma_is_dax(vma)) in hugepage_vma_check()
113 !transhuge_vma_suitable(vma, (vma->vm_end - HPAGE_PMD_SIZE))) in hugepage_vma_check()
121 if (!in_pf && shmem_file(vma->vm_file)) in hugepage_vma_check()
122 return shmem_huge_enabled(vma, !enforce_sysfs); in hugepage_vma_check()
131 if (!in_pf && file_thp_enabled(vma)) in hugepage_vma_check()
134 if (!vma_is_anonymous(vma)) in hugepage_vma_check()
137 if (vma_is_temporary_stack(vma)) in hugepage_vma_check()
147 if (!vma->anon_vma) in hugepage_vma_check()
554 pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) in maybe_pmd_mkwrite() argument
556 if (likely(vma->vm_flags & VM_WRITE)) in maybe_pmd_mkwrite()
655 struct vm_area_struct *vma = vmf->vma; in __do_huge_pmd_anonymous_page() local
662 if (mem_cgroup_charge(page_folio(page), vma->vm_mm, gfp)) { in __do_huge_pmd_anonymous_page()
670 pgtable = pte_alloc_one(vma->vm_mm); in __do_huge_pmd_anonymous_page()
684 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in __do_huge_pmd_anonymous_page()
690 ret = check_stable_address_space(vma->vm_mm); in __do_huge_pmd_anonymous_page()
695 if (userfaultfd_missing(vma)) { in __do_huge_pmd_anonymous_page()
698 pte_free(vma->vm_mm, pgtable); in __do_huge_pmd_anonymous_page()
704 entry = mk_huge_pmd(page, vma->vm_page_prot); in __do_huge_pmd_anonymous_page()
705 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); in __do_huge_pmd_anonymous_page()
706 page_add_new_anon_rmap(page, vma, haddr); in __do_huge_pmd_anonymous_page()
707 lru_cache_add_inactive_or_unevictable(page, vma); in __do_huge_pmd_anonymous_page()
708 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); in __do_huge_pmd_anonymous_page()
709 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); in __do_huge_pmd_anonymous_page()
710 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); in __do_huge_pmd_anonymous_page()
711 add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR); in __do_huge_pmd_anonymous_page()
712 mm_inc_nr_ptes(vma->vm_mm); in __do_huge_pmd_anonymous_page()
715 count_memcg_event_mm(vma->vm_mm, THP_FAULT_ALLOC); in __do_huge_pmd_anonymous_page()
723 pte_free(vma->vm_mm, pgtable); in __do_huge_pmd_anonymous_page()
738 gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma) in vma_thp_gfp_mask() argument
740 const bool vma_madvised = vma && (vma->vm_flags & VM_HUGEPAGE); in vma_thp_gfp_mask()
766 struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, in set_huge_zero_page() argument
772 entry = mk_pmd(zero_page, vma->vm_page_prot); in set_huge_zero_page()
781 struct vm_area_struct *vma = vmf->vma; in do_huge_pmd_anonymous_page() local
786 if (!transhuge_vma_suitable(vma, haddr)) in do_huge_pmd_anonymous_page()
788 if (unlikely(anon_vma_prepare(vma))) in do_huge_pmd_anonymous_page()
790 khugepaged_enter_vma(vma, vma->vm_flags); in do_huge_pmd_anonymous_page()
793 !mm_forbids_zeropage(vma->vm_mm) && in do_huge_pmd_anonymous_page()
798 pgtable = pte_alloc_one(vma->vm_mm); in do_huge_pmd_anonymous_page()
801 zero_page = mm_get_huge_zero_page(vma->vm_mm); in do_huge_pmd_anonymous_page()
803 pte_free(vma->vm_mm, pgtable); in do_huge_pmd_anonymous_page()
807 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in do_huge_pmd_anonymous_page()
810 ret = check_stable_address_space(vma->vm_mm); in do_huge_pmd_anonymous_page()
813 pte_free(vma->vm_mm, pgtable); in do_huge_pmd_anonymous_page()
814 } else if (userfaultfd_missing(vma)) { in do_huge_pmd_anonymous_page()
816 pte_free(vma->vm_mm, pgtable); in do_huge_pmd_anonymous_page()
820 set_huge_zero_page(pgtable, vma->vm_mm, vma, in do_huge_pmd_anonymous_page()
822 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); in do_huge_pmd_anonymous_page()
827 pte_free(vma->vm_mm, pgtable); in do_huge_pmd_anonymous_page()
831 gfp = vma_thp_gfp_mask(vma); in do_huge_pmd_anonymous_page()
832 folio = vma_alloc_folio(gfp, HPAGE_PMD_ORDER, vma, haddr, true); in do_huge_pmd_anonymous_page()
840 static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, in insert_pfn_pmd() argument
844 struct mm_struct *mm = vma->vm_mm; in insert_pfn_pmd()
856 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); in insert_pfn_pmd()
857 if (pmdp_set_access_flags(vma, addr, pmd, entry, 1)) in insert_pfn_pmd()
858 update_mmu_cache_pmd(vma, addr, pmd); in insert_pfn_pmd()
869 entry = maybe_pmd_mkwrite(entry, vma); in insert_pfn_pmd()
879 update_mmu_cache_pmd(vma, addr, pmd); in insert_pfn_pmd()
904 struct vm_area_struct *vma = vmf->vma; in vmf_insert_pfn_pmd_prot() local
912 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) && in vmf_insert_pfn_pmd_prot()
914 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == in vmf_insert_pfn_pmd_prot()
916 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); in vmf_insert_pfn_pmd_prot()
918 if (addr < vma->vm_start || addr >= vma->vm_end) in vmf_insert_pfn_pmd_prot()
922 pgtable = pte_alloc_one(vma->vm_mm); in vmf_insert_pfn_pmd_prot()
927 track_pfn_insert(vma, &pgprot, pfn); in vmf_insert_pfn_pmd_prot()
929 insert_pfn_pmd(vma, addr, vmf->pmd, pfn, pgprot, write, pgtable); in vmf_insert_pfn_pmd_prot()
935 static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma) in maybe_pud_mkwrite() argument
937 if (likely(vma->vm_flags & VM_WRITE)) in maybe_pud_mkwrite()
942 static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr, in insert_pfn_pud() argument
945 struct mm_struct *mm = vma->vm_mm; in insert_pfn_pud()
957 entry = maybe_pud_mkwrite(pud_mkdirty(entry), vma); in insert_pfn_pud()
958 if (pudp_set_access_flags(vma, addr, pud, entry, 1)) in insert_pfn_pud()
959 update_mmu_cache_pud(vma, addr, pud); in insert_pfn_pud()
969 entry = maybe_pud_mkwrite(entry, vma); in insert_pfn_pud()
972 update_mmu_cache_pud(vma, addr, pud); in insert_pfn_pud()
995 struct vm_area_struct *vma = vmf->vma; in vmf_insert_pfn_pud_prot() local
1002 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) && in vmf_insert_pfn_pud_prot()
1004 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == in vmf_insert_pfn_pud_prot()
1006 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); in vmf_insert_pfn_pud_prot()
1008 if (addr < vma->vm_start || addr >= vma->vm_end) in vmf_insert_pfn_pud_prot()
1011 track_pfn_insert(vma, &pgprot, pfn); in vmf_insert_pfn_pud_prot()
1013 insert_pfn_pud(vma, addr, vmf->pud, pfn, pgprot, write); in vmf_insert_pfn_pud_prot()
1019 static void touch_pmd(struct vm_area_struct *vma, unsigned long addr, in touch_pmd() argument
1027 if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK, in touch_pmd()
1029 update_mmu_cache_pmd(vma, addr, pmd); in touch_pmd()
1032 struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, in follow_devmap_pmd() argument
1036 struct mm_struct *mm = vma->vm_mm; in follow_devmap_pmd()
1055 touch_pmd(vma, addr, pmd, flags & FOLL_WRITE); in follow_devmap_pmd()
1177 static void touch_pud(struct vm_area_struct *vma, unsigned long addr, in touch_pud() argument
1185 if (pudp_set_access_flags(vma, addr & HPAGE_PUD_MASK, in touch_pud()
1187 update_mmu_cache_pud(vma, addr, pud); in touch_pud()
1190 struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr, in follow_devmap_pud() argument
1194 struct mm_struct *mm = vma->vm_mm; in follow_devmap_pud()
1213 touch_pud(vma, addr, pud, flags & FOLL_WRITE); in follow_devmap_pud()
1237 struct vm_area_struct *vma) in copy_huge_pud() argument
1280 vmf->ptl = pud_lock(vmf->vma->vm_mm, vmf->pud); in huge_pud_set_accessed()
1284 touch_pud(vmf->vma, vmf->address, vmf->pud, write); in huge_pud_set_accessed()
1294 vmf->ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd); in huge_pmd_set_accessed()
1298 touch_pmd(vmf->vma, vmf->address, vmf->pmd, write); in huge_pmd_set_accessed()
1307 struct vm_area_struct *vma = vmf->vma; in do_huge_pmd_wp_page() local
1313 vmf->ptl = pmd_lockptr(vma->vm_mm, vmf->pmd); in do_huge_pmd_wp_page()
1314 VM_BUG_ON_VMA(!vma->anon_vma, vma); in do_huge_pmd_wp_page()
1370 page_move_anon_rmap(page, vma); in do_huge_pmd_wp_page()
1378 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); in do_huge_pmd_wp_page()
1379 if (pmdp_set_access_flags(vma, haddr, vmf->pmd, entry, 1)) in do_huge_pmd_wp_page()
1380 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); in do_huge_pmd_wp_page()
1389 __split_huge_pmd(vma, vmf->pmd, vmf->address, false, NULL); in do_huge_pmd_wp_page()
1395 struct vm_area_struct *vma, in can_follow_write_pmd() argument
1407 if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED)) in can_follow_write_pmd()
1411 if (!(vma->vm_flags & VM_MAYWRITE)) in can_follow_write_pmd()
1415 if (vma->vm_flags & VM_WRITE) in can_follow_write_pmd()
1426 if (vma_soft_dirty_enabled(vma) && !pmd_soft_dirty(pmd)) in can_follow_write_pmd()
1428 return !userfaultfd_huge_pmd_wp(vma, pmd); in can_follow_write_pmd()
1431 struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, in follow_trans_huge_pmd() argument
1436 struct mm_struct *mm = vma->vm_mm; in follow_trans_huge_pmd()
1445 !can_follow_write_pmd(*pmd, page, vma, flags)) in follow_trans_huge_pmd()
1466 touch_pmd(vma, addr, pmd, flags & FOLL_WRITE); in follow_trans_huge_pmd()
1477 struct vm_area_struct *vma = vmf->vma; in do_huge_pmd_numa_page() local
1488 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in do_huge_pmd_numa_page()
1494 pmd = pmd_modify(oldpmd, vma->vm_page_prot); in do_huge_pmd_numa_page()
1495 page = vm_normal_page_pmd(vma, haddr, pmd); in do_huge_pmd_numa_page()
1510 target_nid = numa_migrate_prep(page, vma, haddr, page_nid, in do_huge_pmd_numa_page()
1520 migrated = migrate_misplaced_page(page, vma, target_nid); in do_huge_pmd_numa_page()
1526 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in do_huge_pmd_numa_page()
1543 pmd = pmd_modify(oldpmd, vma->vm_page_prot); in do_huge_pmd_numa_page()
1547 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd); in do_huge_pmd_numa_page()
1548 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); in do_huge_pmd_numa_page()
1557 bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, in madvise_free_huge_pmd() argument
1568 ptl = pmd_trans_huge_lock(pmd, vma); in madvise_free_huge_pmd()
1611 pmdp_invalidate(vma, addr, pmd); in madvise_free_huge_pmd()
1636 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, in zap_huge_pmd() argument
1644 ptl = __pmd_trans_huge_lock(pmd, vma); in zap_huge_pmd()
1653 orig_pmd = pmdp_huge_get_and_clear_full(vma, addr, pmd, in zap_huge_pmd()
1656 if (vma_is_special_huge(vma)) { in zap_huge_pmd()
1669 page_remove_rmap(page, vma, true); in zap_huge_pmd()
1701 struct vm_area_struct *vma) in pmd_move_must_withdraw() argument
1709 return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma); in pmd_move_must_withdraw()
1724 bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, in move_huge_pmd() argument
1729 struct mm_struct *mm = vma->vm_mm; in move_huge_pmd()
1745 old_ptl = __pmd_trans_huge_lock(old_pmd, vma); in move_huge_pmd()
1755 if (pmd_move_must_withdraw(new_ptl, old_ptl, vma)) { in move_huge_pmd()
1763 flush_pmd_tlb_range(vma, old_addr, old_addr + PMD_SIZE); in move_huge_pmd()
1779 int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, in change_huge_pmd() argument
1783 struct mm_struct *mm = vma->vm_mm; in change_huge_pmd()
1797 ptl = __pmd_trans_huge_lock(pmd, vma); in change_huge_pmd()
1880 oldpmd = pmdp_invalidate_ad(vma, addr, pmd); in change_huge_pmd()
1902 BUG_ON(vma_is_anonymous(vma) && !preserve_write && pmd_write(entry)); in change_huge_pmd()
1914 spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma) in __pmd_trans_huge_lock() argument
1917 ptl = pmd_lock(vma->vm_mm, pmd); in __pmd_trans_huge_lock()
1931 spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma) in __pud_trans_huge_lock() argument
1935 ptl = pud_lock(vma->vm_mm, pud); in __pud_trans_huge_lock()
1943 int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, in zap_huge_pud() argument
1948 ptl = __pud_trans_huge_lock(pud, vma); in zap_huge_pud()
1954 if (vma_is_special_huge(vma)) { in zap_huge_pud()
1964 static void __split_huge_pud_locked(struct vm_area_struct *vma, pud_t *pud, in __split_huge_pud_locked() argument
1968 VM_BUG_ON_VMA(vma->vm_start > haddr, vma); in __split_huge_pud_locked()
1969 VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PUD_SIZE, vma); in __split_huge_pud_locked()
1974 pudp_huge_clear_flush_notify(vma, haddr, pud); in __split_huge_pud_locked()
1977 void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud, in __split_huge_pud() argument
1983 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, in __split_huge_pud()
1987 ptl = pud_lock(vma->vm_mm, pud); in __split_huge_pud()
1990 __split_huge_pud_locked(vma, pud, range.start); in __split_huge_pud()
2002 static void __split_huge_zero_page_pmd(struct vm_area_struct *vma, in __split_huge_zero_page_pmd() argument
2005 struct mm_struct *mm = vma->vm_mm; in __split_huge_zero_page_pmd()
2018 pmdp_huge_clear_flush(vma, haddr, pmd); in __split_huge_zero_page_pmd()
2025 entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot); in __split_huge_zero_page_pmd()
2036 static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, in __split_huge_pmd_locked() argument
2039 struct mm_struct *mm = vma->vm_mm; in __split_huge_pmd_locked()
2049 VM_BUG_ON_VMA(vma->vm_start > haddr, vma); in __split_huge_pmd_locked()
2050 VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PMD_SIZE, vma); in __split_huge_pmd_locked()
2056 if (!vma_is_anonymous(vma)) { in __split_huge_pmd_locked()
2057 old_pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd); in __split_huge_pmd_locked()
2064 if (vma_is_special_huge(vma)) in __split_huge_pmd_locked()
2077 page_remove_rmap(page, vma, true); in __split_huge_pmd_locked()
2094 return __split_huge_zero_page_pmd(vma, haddr, pmd); in __split_huge_pmd_locked()
2117 old_pmd = pmdp_invalidate(vma, haddr, pmd); in __split_huge_pmd_locked()
2201 entry = mk_pte(page + i, READ_ONCE(vma->vm_page_prot)); in __split_huge_pmd_locked()
2202 entry = maybe_mkwrite(entry, vma); in __split_huge_pmd_locked()
2253 munlock_vma_page(page, vma, true); in __split_huge_pmd_locked()
2261 page_remove_rmap(page + i, vma, false); in __split_huge_pmd_locked()
2267 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, in __split_huge_pmd() argument
2273 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, in __split_huge_pmd()
2277 ptl = pmd_lock(vma->vm_mm, pmd); in __split_huge_pmd()
2294 __split_huge_pmd_locked(vma, pmd, range.start, freeze); in __split_huge_pmd()
2315 void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address, in split_huge_pmd_address() argument
2318 pmd_t *pmd = mm_find_pmd(vma->vm_mm, address); in split_huge_pmd_address()
2323 __split_huge_pmd(vma, pmd, address, freeze, folio); in split_huge_pmd_address()
2326 static inline void split_huge_pmd_if_needed(struct vm_area_struct *vma, unsigned long address) in split_huge_pmd_if_needed() argument
2333 range_in_vma(vma, ALIGN_DOWN(address, HPAGE_PMD_SIZE), in split_huge_pmd_if_needed()
2335 split_huge_pmd_address(vma, address, false, NULL); in split_huge_pmd_if_needed()
2338 void vma_adjust_trans_huge(struct vm_area_struct *vma, in vma_adjust_trans_huge() argument
2344 split_huge_pmd_if_needed(vma, start); in vma_adjust_trans_huge()
2347 split_huge_pmd_if_needed(vma, end); in vma_adjust_trans_huge()
2354 struct vm_area_struct *next = find_vma(vma->vm_mm, vma->vm_end); in vma_adjust_trans_huge()
2955 static inline bool vma_not_suitable_for_thp_split(struct vm_area_struct *vma) in vma_not_suitable_for_thp_split() argument
2957 return vma_is_special_huge(vma) || (vma->vm_flags & VM_IO) || in vma_not_suitable_for_thp_split()
2958 is_vm_hugetlb_page(vma); in vma_not_suitable_for_thp_split()
3002 struct vm_area_struct *vma = vma_lookup(mm, addr); in split_huge_pages_pid() local
3005 if (!vma) in split_huge_pages_pid()
3009 if (vma_not_suitable_for_thp_split(vma)) { in split_huge_pages_pid()
3010 addr = vma->vm_end; in split_huge_pages_pid()
3015 page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP); in split_huge_pages_pid()
3195 struct vm_area_struct *vma = pvmw->vma; in set_pmd_migration_entry() local
3196 struct mm_struct *mm = vma->vm_mm; in set_pmd_migration_entry()
3206 flush_cache_range(vma, address, address + HPAGE_PMD_SIZE); in set_pmd_migration_entry()
3207 pmdval = pmdp_invalidate(vma, address, pvmw->pmd); in set_pmd_migration_entry()
3232 page_remove_rmap(page, vma, true); in set_pmd_migration_entry()
3241 struct vm_area_struct *vma = pvmw->vma; in remove_migration_pmd() local
3242 struct mm_struct *mm = vma->vm_mm; in remove_migration_pmd()
3253 pmde = mk_huge_pmd(new, READ_ONCE(vma->vm_page_prot)); in remove_migration_pmd()
3257 pmde = maybe_pmd_mkwrite(pmde, vma); in remove_migration_pmd()
3272 page_add_anon_rmap(new, vma, haddr, rmap_flags); in remove_migration_pmd()
3274 page_add_file_rmap(new, vma, true); in remove_migration_pmd()
3280 update_mmu_cache_pmd(vma, address, pvmw->pmd); in remove_migration_pmd()