Lines Matching refs:vma

312 	struct vm_area_struct *vma = walk->vma;  in mlock_pte_range()  local
318 ptl = pmd_trans_huge_lock(pmd, vma); in mlock_pte_range()
325 if (vma->vm_flags & VM_LOCKED) in mlock_pte_range()
332 start_pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in mlock_pte_range()
341 folio = vm_normal_folio(vma, addr, ptent); in mlock_pte_range()
346 if (vma->vm_flags & VM_LOCKED) in mlock_pte_range()
369 static void mlock_vma_pages_range(struct vm_area_struct *vma, in mlock_vma_pages_range() argument
390 vma_start_write(vma); in mlock_vma_pages_range()
391 vm_flags_reset_once(vma, newflags); in mlock_vma_pages_range()
394 walk_page_range(vma->vm_mm, start, end, &mlock_walk_ops, NULL); in mlock_vma_pages_range()
399 vm_flags_reset_once(vma, newflags); in mlock_vma_pages_range()
412 static int mlock_fixup(struct vma_iterator *vmi, struct vm_area_struct *vma, in mlock_fixup() argument
416 struct mm_struct *mm = vma->vm_mm; in mlock_fixup()
420 vm_flags_t oldflags = vma->vm_flags; in mlock_fixup()
423 is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm) || in mlock_fixup()
424 vma_is_dax(vma) || vma_is_secretmem(vma)) in mlock_fixup()
428 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); in mlock_fixup()
430 vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma), in mlock_fixup()
431 vma->vm_userfaultfd_ctx, anon_vma_name(vma)); in mlock_fixup()
433 vma = *prev; in mlock_fixup()
437 if (start != vma->vm_start) { in mlock_fixup()
438 ret = split_vma(vmi, vma, start, 1); in mlock_fixup()
443 if (end != vma->vm_end) { in mlock_fixup()
444 ret = split_vma(vmi, vma, end, 0); in mlock_fixup()
467 vma_start_write(vma); in mlock_fixup()
468 vm_flags_reset(vma, newflags); in mlock_fixup()
470 mlock_vma_pages_range(vma, start, end, newflags); in mlock_fixup()
473 *prev = vma; in mlock_fixup()
481 struct vm_area_struct *vma, *prev; in apply_vma_lock_flags() local
491 vma = vma_iter_load(&vmi); in apply_vma_lock_flags()
492 if (!vma) in apply_vma_lock_flags()
496 if (start > vma->vm_start) in apply_vma_lock_flags()
497 prev = vma; in apply_vma_lock_flags()
500 tmp = vma->vm_start; in apply_vma_lock_flags()
501 for_each_vma_range(vmi, vma, end) { in apply_vma_lock_flags()
505 if (vma->vm_start != tmp) in apply_vma_lock_flags()
508 newflags = vma->vm_flags & ~VM_LOCKED_MASK; in apply_vma_lock_flags()
511 tmp = vma->vm_end; in apply_vma_lock_flags()
514 error = mlock_fixup(&vmi, vma, &prev, nstart, tmp, newflags); in apply_vma_lock_flags()
537 struct vm_area_struct *vma; in count_mm_mlocked_page_nr() local
548 for_each_vma_range(vmi, vma, end) { in count_mm_mlocked_page_nr()
549 if (vma->vm_flags & VM_LOCKED) { in count_mm_mlocked_page_nr()
550 if (start > vma->vm_start) in count_mm_mlocked_page_nr()
551 count -= (start - vma->vm_start); in count_mm_mlocked_page_nr()
552 if (end < vma->vm_end) { in count_mm_mlocked_page_nr()
553 count += end - vma->vm_start; in count_mm_mlocked_page_nr()
556 count += vma->vm_end - vma->vm_start; in count_mm_mlocked_page_nr()
670 struct vm_area_struct *vma, *prev = NULL; in apply_mlockall_flags() local
690 for_each_vma(vmi, vma) { in apply_mlockall_flags()
693 newflags = vma->vm_flags & ~VM_LOCKED_MASK; in apply_mlockall_flags()
697 mlock_fixup(&vmi, vma, &prev, vma->vm_start, vma->vm_end, in apply_mlockall_flags()