Lines Matching refs:vma
94 struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma) in anon_vma_name() argument
96 mmap_assert_locked(vma->vm_mm); in anon_vma_name()
98 if (vma->vm_file) in anon_vma_name()
101 return vma->anon_name; in anon_vma_name()
105 static int replace_anon_vma_name(struct vm_area_struct *vma, in replace_anon_vma_name() argument
108 struct anon_vma_name *orig_name = anon_vma_name(vma); in replace_anon_vma_name()
111 vma->anon_name = NULL; in replace_anon_vma_name()
119 vma->anon_name = anon_vma_name_reuse(anon_name); in replace_anon_vma_name()
125 static int replace_anon_vma_name(struct vm_area_struct *vma, in replace_anon_vma_name() argument
140 static int madvise_update_vma(struct vm_area_struct *vma, in madvise_update_vma() argument
145 struct mm_struct *mm = vma->vm_mm; in madvise_update_vma()
149 if (new_flags == vma->vm_flags && anon_vma_name_eq(anon_vma_name(vma), anon_name)) { in madvise_update_vma()
150 *prev = vma; in madvise_update_vma()
154 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); in madvise_update_vma()
155 *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma, in madvise_update_vma()
156 vma->vm_file, pgoff, vma_policy(vma), in madvise_update_vma()
157 vma->vm_userfaultfd_ctx, anon_name); in madvise_update_vma()
159 vma = *prev; in madvise_update_vma()
163 *prev = vma; in madvise_update_vma()
165 if (start != vma->vm_start) { in madvise_update_vma()
168 error = __split_vma(mm, vma, start, 1); in madvise_update_vma()
173 if (end != vma->vm_end) { in madvise_update_vma()
176 error = __split_vma(mm, vma, end, 0); in madvise_update_vma()
185 vma->vm_flags = new_flags; in madvise_update_vma()
186 if (!vma->vm_file) { in madvise_update_vma()
187 error = replace_anon_vma_name(vma, anon_name); in madvise_update_vma()
199 struct vm_area_struct *vma = walk->private; in swapin_walk_pmd_entry() local
213 ptep = pte_offset_map_lock(vma->vm_mm, pmd, index, &ptl); in swapin_walk_pmd_entry()
224 vma, index, false, &splug); in swapin_walk_pmd_entry()
237 static void force_shm_swapin_readahead(struct vm_area_struct *vma, in force_shm_swapin_readahead() argument
241 XA_STATE(xas, &mapping->i_pages, linear_page_index(vma, start)); in force_shm_swapin_readahead()
242 pgoff_t end_index = linear_page_index(vma, end + PAGE_SIZE - 1); in force_shm_swapin_readahead()
276 static long madvise_willneed(struct vm_area_struct *vma, in madvise_willneed() argument
280 struct mm_struct *mm = vma->vm_mm; in madvise_willneed()
281 struct file *file = vma->vm_file; in madvise_willneed()
284 *prev = vma; in madvise_willneed()
287 walk_page_range(vma->vm_mm, start, end, &swapin_walk_ops, vma); in madvise_willneed()
293 force_shm_swapin_readahead(vma, start, end, in madvise_willneed()
315 offset = (loff_t)(start - vma->vm_start) in madvise_willneed()
316 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); in madvise_willneed()
332 struct vm_area_struct *vma = walk->vma; in madvise_cold_or_pageout_pte_range() local
347 ptl = pmd_trans_huge_lock(pmd, vma); in madvise_cold_or_pageout_pte_range()
382 pmdp_invalidate(vma, addr, pmd); in madvise_cold_or_pageout_pte_range()
412 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in madvise_cold_or_pageout_pte_range()
424 page = vm_normal_page(vma, addr, ptent); in madvise_cold_or_pageout_pte_range()
505 struct vm_area_struct *vma, in madvise_cold_page_range() argument
513 tlb_start_vma(tlb, vma); in madvise_cold_page_range()
514 walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private); in madvise_cold_page_range()
515 tlb_end_vma(tlb, vma); in madvise_cold_page_range()
518 static inline bool can_madv_lru_vma(struct vm_area_struct *vma) in can_madv_lru_vma() argument
520 return !(vma->vm_flags & (VM_LOCKED|VM_PFNMAP|VM_HUGETLB)); in can_madv_lru_vma()
523 static long madvise_cold(struct vm_area_struct *vma, in madvise_cold() argument
527 struct mm_struct *mm = vma->vm_mm; in madvise_cold()
530 *prev = vma; in madvise_cold()
531 if (!can_madv_lru_vma(vma)) in madvise_cold()
536 madvise_cold_page_range(&tlb, vma, start_addr, end_addr); in madvise_cold()
543 struct vm_area_struct *vma, in madvise_pageout_page_range() argument
551 tlb_start_vma(tlb, vma); in madvise_pageout_page_range()
552 walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private); in madvise_pageout_page_range()
553 tlb_end_vma(tlb, vma); in madvise_pageout_page_range()
556 static inline bool can_do_pageout(struct vm_area_struct *vma) in can_do_pageout() argument
558 if (vma_is_anonymous(vma)) in can_do_pageout()
560 if (!vma->vm_file) in can_do_pageout()
569 file_inode(vma->vm_file)) || in can_do_pageout()
570 file_permission(vma->vm_file, MAY_WRITE) == 0; in can_do_pageout()
573 static long madvise_pageout(struct vm_area_struct *vma, in madvise_pageout() argument
577 struct mm_struct *mm = vma->vm_mm; in madvise_pageout()
580 *prev = vma; in madvise_pageout()
581 if (!can_madv_lru_vma(vma)) in madvise_pageout()
584 if (!can_do_pageout(vma)) in madvise_pageout()
589 madvise_pageout_page_range(&tlb, vma, start_addr, end_addr); in madvise_pageout()
601 struct vm_area_struct *vma = walk->vma; in madvise_free_pte_range() local
611 if (madvise_free_huge_pmd(tlb, vma, pmd, addr, next)) in madvise_free_pte_range()
646 page = vm_normal_page(vma, addr, ptent); in madvise_free_pte_range()
736 static int madvise_free_single_vma(struct vm_area_struct *vma, in madvise_free_single_vma() argument
739 struct mm_struct *mm = vma->vm_mm; in madvise_free_single_vma()
744 if (!vma_is_anonymous(vma)) in madvise_free_single_vma()
747 range.start = max(vma->vm_start, start_addr); in madvise_free_single_vma()
748 if (range.start >= vma->vm_end) in madvise_free_single_vma()
750 range.end = min(vma->vm_end, end_addr); in madvise_free_single_vma()
751 if (range.end <= vma->vm_start) in madvise_free_single_vma()
753 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, in madvise_free_single_vma()
761 tlb_start_vma(&tlb, vma); in madvise_free_single_vma()
762 walk_page_range(vma->vm_mm, range.start, range.end, in madvise_free_single_vma()
764 tlb_end_vma(&tlb, vma); in madvise_free_single_vma()
790 static long madvise_dontneed_single_vma(struct vm_area_struct *vma, in madvise_dontneed_single_vma() argument
793 zap_page_range_single(vma, start, end - start, NULL); in madvise_dontneed_single_vma()
797 static bool madvise_dontneed_free_valid_vma(struct vm_area_struct *vma, in madvise_dontneed_free_valid_vma() argument
802 if (!is_vm_hugetlb_page(vma)) { in madvise_dontneed_free_valid_vma()
808 return !(vma->vm_flags & forbidden); in madvise_dontneed_free_valid_vma()
813 if (start & ~huge_page_mask(hstate_vma(vma))) in madvise_dontneed_free_valid_vma()
822 *end = ALIGN_DOWN(*end, huge_page_size(hstate_vma(vma))); in madvise_dontneed_free_valid_vma()
827 static long madvise_dontneed_free(struct vm_area_struct *vma, in madvise_dontneed_free() argument
832 struct mm_struct *mm = vma->vm_mm; in madvise_dontneed_free()
834 *prev = vma; in madvise_dontneed_free()
835 if (!madvise_dontneed_free_valid_vma(vma, start, &end, behavior)) in madvise_dontneed_free()
841 if (!userfaultfd_remove(vma, start, end)) { in madvise_dontneed_free()
845 vma = find_vma(mm, start); in madvise_dontneed_free()
846 if (!vma) in madvise_dontneed_free()
848 if (start < vma->vm_start) { in madvise_dontneed_free()
864 if (!madvise_dontneed_free_valid_vma(vma, start, &end, in madvise_dontneed_free()
867 if (end > vma->vm_end) { in madvise_dontneed_free()
880 end = vma->vm_end; in madvise_dontneed_free()
886 return madvise_dontneed_single_vma(vma, start, end); in madvise_dontneed_free()
888 return madvise_free_single_vma(vma, start, end); in madvise_dontneed_free()
893 static long madvise_populate(struct vm_area_struct *vma, in madvise_populate() argument
899 struct mm_struct *mm = vma->vm_mm; in madvise_populate()
904 *prev = vma; in madvise_populate()
911 if (!vma || start >= vma->vm_end) { in madvise_populate()
912 vma = vma_lookup(mm, start); in madvise_populate()
913 if (!vma) in madvise_populate()
917 tmp_end = min_t(unsigned long, end, vma->vm_end); in madvise_populate()
919 pages = faultin_vma_page_range(vma, start, tmp_end, write, in madvise_populate()
925 vma = NULL; in madvise_populate()
954 static long madvise_remove(struct vm_area_struct *vma, in madvise_remove() argument
961 struct mm_struct *mm = vma->vm_mm; in madvise_remove()
965 if (vma->vm_flags & VM_LOCKED) in madvise_remove()
968 f = vma->vm_file; in madvise_remove()
974 if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE)) in madvise_remove()
977 offset = (loff_t)(start - vma->vm_start) in madvise_remove()
978 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); in madvise_remove()
987 if (userfaultfd_remove(vma, start, end)) { in madvise_remove()
1004 static int madvise_vma_behavior(struct vm_area_struct *vma, in madvise_vma_behavior() argument
1011 unsigned long new_flags = vma->vm_flags; in madvise_vma_behavior()
1015 return madvise_remove(vma, prev, start, end); in madvise_vma_behavior()
1017 return madvise_willneed(vma, prev, start, end); in madvise_vma_behavior()
1019 return madvise_cold(vma, prev, start, end); in madvise_vma_behavior()
1021 return madvise_pageout(vma, prev, start, end); in madvise_vma_behavior()
1025 return madvise_dontneed_free(vma, prev, start, end, behavior); in madvise_vma_behavior()
1028 return madvise_populate(vma, prev, start, end, behavior); in madvise_vma_behavior()
1042 if (vma->vm_flags & VM_IO) in madvise_vma_behavior()
1048 if (vma->vm_file || vma->vm_flags & VM_SHARED) in madvise_vma_behavior()
1059 if (!is_vm_hugetlb_page(vma) && new_flags & VM_SPECIAL) in madvise_vma_behavior()
1065 error = ksm_madvise(vma, start, end, behavior, &new_flags); in madvise_vma_behavior()
1071 error = hugepage_madvise(vma, &new_flags, behavior); in madvise_vma_behavior()
1076 return madvise_collapse(vma, prev, start, end); in madvise_vma_behavior()
1079 anon_name = anon_vma_name(vma); in madvise_vma_behavior()
1081 error = madvise_update_vma(vma, prev, start, end, new_flags, in madvise_vma_behavior()
1211 int (*visit)(struct vm_area_struct *vma, in madvise_walk_vmas() argument
1215 struct vm_area_struct *vma; in madvise_walk_vmas() local
1225 vma = find_vma_prev(mm, start, &prev); in madvise_walk_vmas()
1226 if (vma && start > vma->vm_start) in madvise_walk_vmas()
1227 prev = vma; in madvise_walk_vmas()
1233 if (!vma) in madvise_walk_vmas()
1237 if (start < vma->vm_start) { in madvise_walk_vmas()
1239 start = vma->vm_start; in madvise_walk_vmas()
1245 tmp = vma->vm_end; in madvise_walk_vmas()
1250 error = visit(vma, &prev, start, tmp, arg); in madvise_walk_vmas()
1259 vma = find_vma(mm, prev->vm_end); in madvise_walk_vmas()
1261 vma = find_vma(mm, start); in madvise_walk_vmas()
1268 static int madvise_vma_anon_name(struct vm_area_struct *vma, in madvise_vma_anon_name() argument
1276 if (vma->vm_file) in madvise_vma_anon_name()
1279 error = madvise_update_vma(vma, prev, start, end, vma->vm_flags, in madvise_vma_anon_name()