/linux-6.1.9/drivers/gpu/drm/i915/ |
D | i915_vma.h | 51 static inline bool i915_vma_is_active(const struct i915_vma *vma) in i915_vma_is_active() argument 53 return !i915_active_is_idle(&vma->active); in i915_vma_is_active() 59 int __must_check _i915_vma_move_to_active(struct i915_vma *vma, 64 i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq, in i915_vma_move_to_active() argument 67 return _i915_vma_move_to_active(vma, rq, &rq->fence, flags); in i915_vma_move_to_active() 72 static inline bool i915_vma_is_ggtt(const struct i915_vma *vma) in i915_vma_is_ggtt() argument 74 return test_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma)); in i915_vma_is_ggtt() 77 static inline bool i915_vma_is_dpt(const struct i915_vma *vma) in i915_vma_is_dpt() argument 79 return i915_is_dpt(vma->vm); in i915_vma_is_dpt() 82 static inline bool i915_vma_has_ggtt_write(const struct i915_vma *vma) in i915_vma_has_ggtt_write() argument [all …]
|
D | i915_vma.c | 44 static inline void assert_vma_held_evict(const struct i915_vma *vma) in assert_vma_held_evict() argument 51 if (kref_read(&vma->vm->ref)) in assert_vma_held_evict() 52 assert_object_held_shared(vma->obj); in assert_vma_held_evict() 62 static void i915_vma_free(struct i915_vma *vma) in i915_vma_free() argument 64 return kmem_cache_free(slab_vmas, vma); in i915_vma_free() 71 static void vma_print_allocator(struct i915_vma *vma, const char *reason) in vma_print_allocator() argument 75 if (!vma->node.stack) { in vma_print_allocator() 77 vma->node.start, vma->node.size, reason); in vma_print_allocator() 81 stack_depot_snprint(vma->node.stack, buf, sizeof(buf), 0); in vma_print_allocator() 83 vma->node.start, vma->node.size, reason, buf); in vma_print_allocator() [all …]
|
D | i915_gem_evict.c | 41 static bool dying_vma(struct i915_vma *vma) 43 return !kref_read(&vma->obj->base.refcount); 58 static bool grab_vma(struct i915_vma *vma, struct i915_gem_ww_ctx *ww) in grab_vma() argument 64 if (i915_gem_object_get_rcu(vma->obj)) { in grab_vma() 65 if (!i915_gem_object_trylock(vma->obj, ww)) { in grab_vma() 66 i915_gem_object_put(vma->obj); in grab_vma() 71 atomic_and(~I915_VMA_PIN_MASK, &vma->flags); in grab_vma() 77 static void ungrab_vma(struct i915_vma *vma) in ungrab_vma() argument 79 if (dying_vma(vma)) in ungrab_vma() 82 i915_gem_object_unlock(vma->obj); in ungrab_vma() [all …]
|
/linux-6.1.9/drivers/gpu/drm/ |
D | drm_vm.c | 59 struct vm_area_struct *vma; member 63 static void drm_vm_open(struct vm_area_struct *vma); 64 static void drm_vm_close(struct vm_area_struct *vma); 67 struct vm_area_struct *vma) in drm_io_prot() argument 69 pgprot_t tmp = vm_get_page_prot(vma->vm_flags); in drm_io_prot() 78 if (efi_range_is_wc(vma->vm_start, vma->vm_end - in drm_io_prot() 79 vma->vm_start)) in drm_io_prot() 89 static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma) in drm_dma_prot() argument 91 pgprot_t tmp = vm_get_page_prot(vma->vm_flags); in drm_dma_prot() 112 struct vm_area_struct *vma = vmf->vma; in drm_vm_fault() local [all …]
|
/linux-6.1.9/mm/ |
D | mremap.c | 72 static pud_t *alloc_new_pud(struct mm_struct *mm, struct vm_area_struct *vma, in alloc_new_pud() argument 86 static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma, in alloc_new_pmd() argument 92 pud = alloc_new_pud(mm, vma, addr); in alloc_new_pmd() 105 static void take_rmap_locks(struct vm_area_struct *vma) in take_rmap_locks() argument 107 if (vma->vm_file) in take_rmap_locks() 108 i_mmap_lock_write(vma->vm_file->f_mapping); in take_rmap_locks() 109 if (vma->anon_vma) in take_rmap_locks() 110 anon_vma_lock_write(vma->anon_vma); in take_rmap_locks() 113 static void drop_rmap_locks(struct vm_area_struct *vma) in drop_rmap_locks() argument 115 if (vma->anon_vma) in drop_rmap_locks() [all …]
|
D | mmap.c | 79 struct vm_area_struct *vma, struct vm_area_struct *prev, 89 void vma_set_page_prot(struct vm_area_struct *vma) in vma_set_page_prot() argument 91 unsigned long vm_flags = vma->vm_flags; in vma_set_page_prot() 94 vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags); in vma_set_page_prot() 95 if (vma_wants_writenotify(vma, vm_page_prot)) { in vma_set_page_prot() 100 WRITE_ONCE(vma->vm_page_prot, vm_page_prot); in vma_set_page_prot() 106 static void __remove_shared_vm_struct(struct vm_area_struct *vma, in __remove_shared_vm_struct() argument 109 if (vma->vm_flags & VM_SHARED) in __remove_shared_vm_struct() 113 vma_interval_tree_remove(vma, &mapping->i_mmap); in __remove_shared_vm_struct() 121 void unlink_file_vma(struct vm_area_struct *vma) in unlink_file_vma() argument [all …]
|
D | nommu.c | 98 struct vm_area_struct *vma; in kobjsize() local 100 vma = find_vma(current->mm, (unsigned long)objp); in kobjsize() 101 if (vma) in kobjsize() 102 return vma->vm_end - vma->vm_start; in kobjsize() 122 int follow_pfn(struct vm_area_struct *vma, unsigned long address, in follow_pfn() argument 125 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) in follow_pfn() 171 struct vm_area_struct *vma; in __vmalloc_user_flags() local 174 vma = find_vma(current->mm, (unsigned long)ret); in __vmalloc_user_flags() 175 if (vma) in __vmalloc_user_flags() 176 vma->vm_flags |= VM_USERMAP; in __vmalloc_user_flags() [all …]
|
D | madvise.c | 94 struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma) in anon_vma_name() argument 96 mmap_assert_locked(vma->vm_mm); in anon_vma_name() 98 if (vma->vm_file) in anon_vma_name() 101 return vma->anon_name; in anon_vma_name() 105 static int replace_anon_vma_name(struct vm_area_struct *vma, in replace_anon_vma_name() argument 108 struct anon_vma_name *orig_name = anon_vma_name(vma); in replace_anon_vma_name() 111 vma->anon_name = NULL; in replace_anon_vma_name() 119 vma->anon_name = anon_vma_name_reuse(anon_name); in replace_anon_vma_name() 125 static int replace_anon_vma_name(struct vm_area_struct *vma, in replace_anon_vma_name() argument 140 static int madvise_update_vma(struct vm_area_struct *vma, in madvise_update_vma() argument [all …]
|
D | mprotect.c | 42 static inline bool can_change_pte_writable(struct vm_area_struct *vma, in can_change_pte_writable() argument 47 VM_BUG_ON(!(vma->vm_flags & VM_WRITE) || pte_write(pte)); in can_change_pte_writable() 53 if (vma_soft_dirty_enabled(vma) && !pte_soft_dirty(pte)) in can_change_pte_writable() 57 if (userfaultfd_pte_wp(vma, pte)) in can_change_pte_writable() 60 if (!(vma->vm_flags & VM_SHARED)) { in can_change_pte_writable() 67 page = vm_normal_page(vma, addr, pte); in can_change_pte_writable() 76 struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, in change_pte_range() argument 103 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in change_pte_range() 106 if (prot_numa && !(vma->vm_flags & VM_SHARED) && in change_pte_range() 107 atomic_read(&vma->vm_mm->mm_users) == 1) in change_pte_range() [all …]
|
D | memory.c | 397 struct vm_area_struct *vma, unsigned long floor, in free_pgtables() argument 400 MA_STATE(mas, mt, vma->vm_end, vma->vm_end); in free_pgtables() 403 unsigned long addr = vma->vm_start; in free_pgtables() 416 unlink_anon_vmas(vma); in free_pgtables() 417 unlink_file_vma(vma); in free_pgtables() 419 if (is_vm_hugetlb_page(vma)) { in free_pgtables() 420 hugetlb_free_pgd_range(tlb, addr, vma->vm_end, in free_pgtables() 426 while (next && next->vm_start <= vma->vm_end + PMD_SIZE in free_pgtables() 428 vma = next; in free_pgtables() 430 unlink_anon_vmas(vma); in free_pgtables() [all …]
|
D | pgtable-generic.c | 65 int ptep_set_access_flags(struct vm_area_struct *vma, in ptep_set_access_flags() argument 71 set_pte_at(vma->vm_mm, address, ptep, entry); in ptep_set_access_flags() 72 flush_tlb_fix_spurious_fault(vma, address); in ptep_set_access_flags() 79 int ptep_clear_flush_young(struct vm_area_struct *vma, in ptep_clear_flush_young() argument 83 young = ptep_test_and_clear_young(vma, address, ptep); in ptep_clear_flush_young() 85 flush_tlb_page(vma, address); in ptep_clear_flush_young() 91 pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address, in ptep_clear_flush() argument 94 struct mm_struct *mm = (vma)->vm_mm; in ptep_clear_flush() 98 flush_tlb_page(vma, address); in ptep_clear_flush() 106 int pmdp_set_access_flags(struct vm_area_struct *vma, in pmdp_set_access_flags() argument [all …]
|
D | rmap.c | 149 static void anon_vma_chain_link(struct vm_area_struct *vma, in anon_vma_chain_link() argument 153 avc->vma = vma; in anon_vma_chain_link() 155 list_add(&avc->same_vma, &vma->anon_vma_chain); in anon_vma_chain_link() 187 int __anon_vma_prepare(struct vm_area_struct *vma) in __anon_vma_prepare() argument 189 struct mm_struct *mm = vma->vm_mm; in __anon_vma_prepare() 199 anon_vma = find_mergeable_anon_vma(vma); in __anon_vma_prepare() 212 if (likely(!vma->anon_vma)) { in __anon_vma_prepare() 213 vma->anon_vma = anon_vma; in __anon_vma_prepare() 214 anon_vma_chain_link(vma, avc, anon_vma); in __anon_vma_prepare() 333 int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) in anon_vma_fork() argument [all …]
|
D | huge_memory.c | 74 bool hugepage_vma_check(struct vm_area_struct *vma, unsigned long vm_flags, in hugepage_vma_check() argument 77 if (!vma->vm_mm) /* vdso */ in hugepage_vma_check() 86 test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)) in hugepage_vma_check() 95 if (vma_is_dax(vma)) in hugepage_vma_check() 113 !transhuge_vma_suitable(vma, (vma->vm_end - HPAGE_PMD_SIZE))) in hugepage_vma_check() 121 if (!in_pf && shmem_file(vma->vm_file)) in hugepage_vma_check() 122 return shmem_huge_enabled(vma, !enforce_sysfs); in hugepage_vma_check() 131 if (!in_pf && file_thp_enabled(vma)) in hugepage_vma_check() 134 if (!vma_is_anonymous(vma)) in hugepage_vma_check() 137 if (vma_is_temporary_stack(vma)) in hugepage_vma_check() [all …]
|
/linux-6.1.9/drivers/gpu/drm/msm/ |
D | msm_gem_vma.c | 41 bool msm_gem_vma_inuse(struct msm_gem_vma *vma) in msm_gem_vma_inuse() argument 43 if (vma->inuse > 0) in msm_gem_vma_inuse() 46 while (vma->fence_mask) { in msm_gem_vma_inuse() 47 unsigned idx = ffs(vma->fence_mask) - 1; in msm_gem_vma_inuse() 49 if (!msm_fence_completed(vma->fctx[idx], vma->fence[idx])) in msm_gem_vma_inuse() 52 vma->fence_mask &= ~BIT(idx); in msm_gem_vma_inuse() 60 struct msm_gem_vma *vma) in msm_gem_purge_vma() argument 62 unsigned size = vma->node.size; in msm_gem_purge_vma() 65 GEM_WARN_ON(msm_gem_vma_inuse(vma)); in msm_gem_purge_vma() 68 if (!vma->mapped) in msm_gem_purge_vma() [all …]
|
/linux-6.1.9/drivers/gpu/drm/i915/display/ |
D | intel_fb_pin.c | 30 struct i915_vma *vma; in intel_pin_fb_obj_dpt() local 73 vma = i915_vma_instance(obj, vm, view); in intel_pin_fb_obj_dpt() 74 if (IS_ERR(vma)) { in intel_pin_fb_obj_dpt() 75 ret = PTR_ERR(vma); in intel_pin_fb_obj_dpt() 79 if (i915_vma_misplaced(vma, 0, alignment, 0)) { in intel_pin_fb_obj_dpt() 80 ret = i915_vma_unbind(vma); in intel_pin_fb_obj_dpt() 85 ret = i915_vma_pin_ww(vma, &ww, 0, alignment, PIN_GLOBAL); in intel_pin_fb_obj_dpt() 90 vma = ERR_PTR(ret); in intel_pin_fb_obj_dpt() 94 vma->display_alignment = max_t(u64, vma->display_alignment, alignment); in intel_pin_fb_obj_dpt() 98 i915_vma_get(vma); in intel_pin_fb_obj_dpt() [all …]
|
/linux-6.1.9/drivers/gpu/drm/nouveau/ |
D | nouveau_vmm.c | 29 nouveau_vma_unmap(struct nouveau_vma *vma) in nouveau_vma_unmap() argument 31 if (vma->mem) { in nouveau_vma_unmap() 32 nvif_vmm_unmap(&vma->vmm->vmm, vma->addr); in nouveau_vma_unmap() 33 vma->mem = NULL; in nouveau_vma_unmap() 38 nouveau_vma_map(struct nouveau_vma *vma, struct nouveau_mem *mem) in nouveau_vma_map() argument 40 struct nvif_vma tmp = { .addr = vma->addr }; in nouveau_vma_map() 41 int ret = nouveau_mem_map(mem, &vma->vmm->vmm, &tmp); in nouveau_vma_map() 44 vma->mem = mem; in nouveau_vma_map() 51 struct nouveau_vma *vma; in nouveau_vma_find() local 53 list_for_each_entry(vma, &nvbo->vma_list, head) { in nouveau_vma_find() [all …]
|
/linux-6.1.9/include/linux/ |
D | userfaultfd_k.h | 76 extern void uffd_wp_range(struct mm_struct *dst_mm, struct vm_area_struct *vma, 80 static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma, in is_mergeable_vm_userfaultfd_ctx() argument 83 return vma->vm_userfaultfd_ctx.ctx == vm_ctx.ctx; in is_mergeable_vm_userfaultfd_ctx() 97 static inline bool uffd_disable_huge_pmd_share(struct vm_area_struct *vma) in uffd_disable_huge_pmd_share() argument 99 return vma->vm_flags & (VM_UFFD_WP | VM_UFFD_MINOR); in uffd_disable_huge_pmd_share() 109 static inline bool uffd_disable_fault_around(struct vm_area_struct *vma) in uffd_disable_fault_around() argument 111 return vma->vm_flags & (VM_UFFD_WP | VM_UFFD_MINOR); in uffd_disable_fault_around() 114 static inline bool userfaultfd_missing(struct vm_area_struct *vma) in userfaultfd_missing() argument 116 return vma->vm_flags & VM_UFFD_MISSING; in userfaultfd_missing() 119 static inline bool userfaultfd_wp(struct vm_area_struct *vma) in userfaultfd_wp() argument [all …]
|
D | huge_mm.h | 17 struct vm_area_struct *vma); 28 struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, 31 bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, 33 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd, 35 int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, pud_t *pud, 37 bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, 39 int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, 59 return vmf_insert_pfn_pmd_prot(vmf, pfn, vmf->vma->vm_page_prot, write); in vmf_insert_pfn_pmd() 78 return vmf_insert_pfn_pud_prot(vmf, pfn, vmf->vma->vm_page_prot, write); in vmf_insert_pfn_pud() 138 static inline bool transhuge_vma_suitable(struct vm_area_struct *vma, in transhuge_vma_suitable() argument [all …]
|
/linux-6.1.9/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ |
D | vmm.c | 750 struct nvkm_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL); in nvkm_vma_new() local 751 if (vma) { in nvkm_vma_new() 752 vma->addr = addr; in nvkm_vma_new() 753 vma->size = size; in nvkm_vma_new() 754 vma->page = NVKM_VMA_PAGE_NONE; in nvkm_vma_new() 755 vma->refd = NVKM_VMA_PAGE_NONE; in nvkm_vma_new() 757 return vma; in nvkm_vma_new() 761 nvkm_vma_tail(struct nvkm_vma *vma, u64 tail) in nvkm_vma_tail() argument 765 BUG_ON(vma->size == tail); in nvkm_vma_tail() 767 if (!(new = nvkm_vma_new(vma->addr + (vma->size - tail), tail))) in nvkm_vma_tail() [all …]
|
D | uvmm.c | 107 struct nvkm_vma *vma; in nvkm_uvmm_mthd_unmap() local 117 vma = nvkm_vmm_node_search(vmm, addr); in nvkm_uvmm_mthd_unmap() 118 if (ret = -ENOENT, !vma || vma->addr != addr) { in nvkm_uvmm_mthd_unmap() 120 addr, vma ? vma->addr : ~0ULL); in nvkm_uvmm_mthd_unmap() 124 if (ret = -ENOENT, vma->busy) { in nvkm_uvmm_mthd_unmap() 125 VMM_DEBUG(vmm, "denied %016llx: %d", addr, vma->busy); in nvkm_uvmm_mthd_unmap() 129 if (ret = -EINVAL, !vma->memory) { in nvkm_uvmm_mthd_unmap() 134 nvkm_vmm_unmap_locked(vmm, vma, false); in nvkm_uvmm_mthd_unmap() 150 struct nvkm_vma *vma; in nvkm_uvmm_mthd_map() local 169 if (ret = -ENOENT, !(vma = nvkm_vmm_node_search(vmm, addr))) { in nvkm_uvmm_mthd_map() [all …]
|
/linux-6.1.9/drivers/gpu/drm/i915/selftests/ |
D | i915_gem_gtt.c | 391 struct i915_vma *vma; in close_object_list() local 393 vma = i915_vma_instance(obj, vm, NULL); in close_object_list() 394 if (!IS_ERR(vma)) in close_object_list() 395 ignored = i915_vma_unbind_unlocked(vma); in close_object_list() 414 struct i915_vma *vma; in fill_hole() local 455 vma = i915_vma_instance(obj, vm, NULL); in fill_hole() 456 if (IS_ERR(vma)) in fill_hole() 465 err = i915_vma_pin(vma, 0, 0, offset | flags); in fill_hole() 472 if (!drm_mm_node_allocated(&vma->node) || in fill_hole() 473 i915_vma_misplaced(vma, 0, 0, offset | flags)) { in fill_hole() [all …]
|
D | i915_vma.c | 37 static bool assert_vma(struct i915_vma *vma, in assert_vma() argument 43 if (vma->vm != ctx->vm) { in assert_vma() 48 if (vma->size != obj->base.size) { in assert_vma() 50 vma->size, obj->base.size); in assert_vma() 54 if (vma->gtt_view.type != I915_GTT_VIEW_NORMAL) { in assert_vma() 56 vma->gtt_view.type); in assert_vma() 68 struct i915_vma *vma; in checked_vma_instance() local 71 vma = i915_vma_instance(obj, vm, view); in checked_vma_instance() 72 if (IS_ERR(vma)) in checked_vma_instance() 73 return vma; in checked_vma_instance() [all …]
|
/linux-6.1.9/drivers/pci/ |
D | mmap.c | 23 struct vm_area_struct *vma, in pci_mmap_resource_range() argument 30 if (vma->vm_pgoff + vma_pages(vma) > size) in pci_mmap_resource_range() 34 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); in pci_mmap_resource_range() 36 vma->vm_page_prot = pgprot_device(vma->vm_page_prot); in pci_mmap_resource_range() 39 ret = pci_iobar_pfn(pdev, bar, vma); in pci_mmap_resource_range() 43 vma->vm_pgoff += (pci_resource_start(pdev, bar) >> PAGE_SHIFT); in pci_mmap_resource_range() 45 vma->vm_ops = &pci_phys_vm_ops; in pci_mmap_resource_range() 47 return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, in pci_mmap_resource_range() 48 vma->vm_end - vma->vm_start, in pci_mmap_resource_range() 49 vma->vm_page_prot); in pci_mmap_resource_range()
|
/linux-6.1.9/fs/proc/ |
D | task_nommu.c | 24 struct vm_area_struct *vma; in task_mem() local 29 for_each_vma(vmi, vma) { in task_mem() 30 bytes += kobjsize(vma); in task_mem() 32 region = vma->vm_region; in task_mem() 37 size = vma->vm_end - vma->vm_start; in task_mem() 41 vma->vm_flags & VM_MAYSHARE) { in task_mem() 46 slack = region->vm_end - vma->vm_end; in task_mem() 84 struct vm_area_struct *vma; in task_vsize() local 88 for_each_vma(vmi, vma) in task_vsize() 89 vsize += vma->vm_end - vma->vm_start; in task_vsize() [all …]
|
D | task_mmu.c | 129 struct vm_area_struct *vma = vma_next(&priv->iter); in proc_get_vma() local 131 if (vma) { in proc_get_vma() 132 *ppos = vma->vm_start; in proc_get_vma() 135 vma = get_gate_vma(priv->mm); in proc_get_vma() 138 return vma; in proc_get_vma() 243 static int is_stack(struct vm_area_struct *vma) in is_stack() argument 250 return vma->vm_start <= vma->vm_mm->start_stack && in is_stack() 251 vma->vm_end >= vma->vm_mm->start_stack; in is_stack() 275 show_map_vma(struct seq_file *m, struct vm_area_struct *vma) in show_map_vma() argument 277 struct mm_struct *mm = vma->vm_mm; in show_map_vma() [all …]
|