Home
last modified time | relevance | path

Searched refs:vmf (Results 1 – 25 of 135) sorted by relevance

123456

/linux-6.1.9/mm/
Dmemory.c106 static vm_fault_t do_fault(struct vm_fault *vmf);
2834 static inline int pte_unmap_same(struct vm_fault *vmf) in pte_unmap_same() argument
2839 spinlock_t *ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd); in pte_unmap_same()
2841 same = pte_same(*vmf->pte, vmf->orig_pte); in pte_unmap_same()
2845 pte_unmap(vmf->pte); in pte_unmap_same()
2846 vmf->pte = NULL; in pte_unmap_same()
2851 struct vm_fault *vmf) in __wp_page_copy_user() argument
2857 struct vm_area_struct *vma = vmf->vma; in __wp_page_copy_user()
2859 unsigned long addr = vmf->address; in __wp_page_copy_user()
2879 if (!arch_has_hw_pte_young() && !pte_young(vmf->orig_pte)) { in __wp_page_copy_user()
[all …]
Dhuge_memory.c652 static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf, in __do_huge_pmd_anonymous_page() argument
655 struct vm_area_struct *vma = vmf->vma; in __do_huge_pmd_anonymous_page()
657 unsigned long haddr = vmf->address & HPAGE_PMD_MASK; in __do_huge_pmd_anonymous_page()
676 clear_huge_page(page, vmf->address, HPAGE_PMD_NR); in __do_huge_pmd_anonymous_page()
684 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in __do_huge_pmd_anonymous_page()
685 if (unlikely(!pmd_none(*vmf->pmd))) { in __do_huge_pmd_anonymous_page()
696 spin_unlock(vmf->ptl); in __do_huge_pmd_anonymous_page()
699 ret = handle_userfault(vmf, VM_UFFD_MISSING); in __do_huge_pmd_anonymous_page()
708 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); in __do_huge_pmd_anonymous_page()
709 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); in __do_huge_pmd_anonymous_page()
[all …]
Dsecretmem.c50 static vm_fault_t secretmem_fault(struct vm_fault *vmf) in secretmem_fault() argument
52 struct address_space *mapping = vmf->vma->vm_file->f_mapping; in secretmem_fault()
53 struct inode *inode = file_inode(vmf->vma->vm_file); in secretmem_fault()
54 pgoff_t offset = vmf->pgoff; in secretmem_fault()
55 gfp_t gfp = vmf->gfp_mask; in secretmem_fault()
61 if (((loff_t)vmf->pgoff << PAGE_SHIFT) >= i_size_read(inode)) in secretmem_fault()
103 vmf->page = page; in secretmem_fault()
Dfilemap.c2943 static int lock_folio_maybe_drop_mmap(struct vm_fault *vmf, struct folio *folio, in lock_folio_maybe_drop_mmap() argument
2954 if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT) in lock_folio_maybe_drop_mmap()
2957 *fpin = maybe_unlock_mmap_for_io(vmf, *fpin); in lock_folio_maybe_drop_mmap()
2958 if (vmf->flags & FAULT_FLAG_KILLABLE) { in lock_folio_maybe_drop_mmap()
2967 mmap_read_unlock(vmf->vma->vm_mm); in lock_folio_maybe_drop_mmap()
2983 static struct file *do_sync_mmap_readahead(struct vm_fault *vmf) in do_sync_mmap_readahead() argument
2985 struct file *file = vmf->vma->vm_file; in do_sync_mmap_readahead()
2988 DEFINE_READAHEAD(ractl, file, ra, mapping, vmf->pgoff); in do_sync_mmap_readahead()
2990 unsigned long vm_flags = vmf->vma->vm_flags; in do_sync_mmap_readahead()
2996 fpin = maybe_unlock_mmap_for_io(vmf, fpin); in do_sync_mmap_readahead()
[all …]
Dswap_state.c611 struct vm_fault *vmf) in swap_cluster_readahead() argument
622 struct vm_area_struct *vma = vmf->vma; in swap_cluster_readahead()
623 unsigned long addr = vmf->address; in swap_cluster_readahead()
712 static void swap_ra_info(struct vm_fault *vmf, in swap_ra_info() argument
715 struct vm_area_struct *vma = vmf->vma; in swap_ra_info()
732 faddr = vmf->address; in swap_ra_info()
733 orig_pte = pte = pte_offset_map(vmf->pmd, faddr); in swap_ra_info()
789 struct vm_fault *vmf) in swap_vma_readahead() argument
793 struct vm_area_struct *vma = vmf->vma; in swap_vma_readahead()
803 swap_ra_info(vmf, &ra_info); in swap_vma_readahead()
[all …]
Dswap.h56 struct vm_fault *vmf);
58 struct vm_fault *vmf);
85 gfp_t gfp_mask, struct vm_fault *vmf) in swap_cluster_readahead() argument
91 struct vm_fault *vmf) in swapin_readahead() argument
/linux-6.1.9/include/trace/events/
Dfs_dax.h11 TP_PROTO(struct inode *inode, struct vm_fault *vmf,
13 TP_ARGS(inode, vmf, max_pgoff, result),
29 __entry->vm_start = vmf->vma->vm_start;
30 __entry->vm_end = vmf->vma->vm_end;
31 __entry->vm_flags = vmf->vma->vm_flags;
32 __entry->address = vmf->address;
33 __entry->flags = vmf->flags;
34 __entry->pgoff = vmf->pgoff;
56 TP_PROTO(struct inode *inode, struct vm_fault *vmf, \
58 TP_ARGS(inode, vmf, max_pgoff, result))
[all …]
/linux-6.1.9/drivers/dax/
Ddevice.c76 static void dax_set_mapping(struct vm_fault *vmf, pfn_t pfn, in dax_set_mapping() argument
80 struct file *filp = vmf->vma->vm_file; in dax_set_mapping()
88 pgoff = linear_page_index(vmf->vma, in dax_set_mapping()
89 ALIGN(vmf->address, fault_size)); in dax_set_mapping()
104 struct vm_fault *vmf) in __dev_dax_pte_fault() argument
111 if (check_vma(dev_dax, vmf->vma, __func__)) in __dev_dax_pte_fault()
123 phys = dax_pgoff_to_phys(dev_dax, vmf->pgoff, PAGE_SIZE); in __dev_dax_pte_fault()
125 dev_dbg(dev, "pgoff_to_phys(%#lx) failed\n", vmf->pgoff); in __dev_dax_pte_fault()
131 dax_set_mapping(vmf, pfn, fault_size); in __dev_dax_pte_fault()
133 return vmf_insert_mixed(vmf->vma, vmf->address, pfn); in __dev_dax_pte_fault()
[all …]
/linux-6.1.9/fs/
Ddax.c811 static int copy_cow_page_dax(struct vm_fault *vmf, const struct iomap_iter *iter) in copy_cow_page_dax() argument
825 vto = kmap_atomic(vmf->cow_page); in copy_cow_page_dax()
826 copy_user_page(vto, kaddr, vmf->address, vmf->cow_page); in copy_cow_page_dax()
856 static void *dax_insert_entry(struct xa_state *xas, struct vm_fault *vmf, in dax_insert_entry() argument
860 struct address_space *mapping = vmf->vma->vm_file->f_mapping; in dax_insert_entry()
862 bool dirty = !dax_fault_is_synchronous(iter, vmf->vma); in dax_insert_entry()
884 dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address, in dax_insert_entry()
1149 static vm_fault_t dax_load_hole(struct xa_state *xas, struct vm_fault *vmf, in dax_load_hole() argument
1153 unsigned long vaddr = vmf->address; in dax_load_hole()
1157 *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, DAX_ZERO_PAGE); in dax_load_hole()
[all …]
/linux-6.1.9/drivers/gpu/drm/ttm/
Dttm_bo_vm.c47 struct vm_fault *vmf) in ttm_bo_vm_fault_idle() argument
62 if (fault_flag_allow_retry_first(vmf->flags)) { in ttm_bo_vm_fault_idle()
63 if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT) in ttm_bo_vm_fault_idle()
67 mmap_read_unlock(vmf->vma->vm_mm); in ttm_bo_vm_fault_idle()
122 struct vm_fault *vmf) in ttm_bo_vm_reserve() argument
136 if (fault_flag_allow_retry_first(vmf->flags)) { in ttm_bo_vm_reserve()
137 if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) { in ttm_bo_vm_reserve()
139 mmap_read_unlock(vmf->vma->vm_mm); in ttm_bo_vm_reserve()
186 vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, in ttm_bo_vm_fault_reserved() argument
190 struct vm_area_struct *vma = vmf->vma; in ttm_bo_vm_fault_reserved()
[all …]
/linux-6.1.9/drivers/video/fbdev/core/
Dfb_defio.c94 static vm_fault_t fb_deferred_io_fault(struct vm_fault *vmf) in fb_deferred_io_fault() argument
98 struct fb_info *info = vmf->vma->vm_private_data; in fb_deferred_io_fault()
100 offset = vmf->pgoff << PAGE_SHIFT; in fb_deferred_io_fault()
110 if (vmf->vma->vm_file) in fb_deferred_io_fault()
111 page->mapping = vmf->vma->vm_file->f_mapping; in fb_deferred_io_fault()
116 page->index = vmf->pgoff; /* for page_mkclean() */ in fb_deferred_io_fault()
118 vmf->page = page; in fb_deferred_io_fault()
205 static vm_fault_t fb_deferred_io_page_mkwrite(struct fb_info *info, struct vm_fault *vmf) in fb_deferred_io_page_mkwrite() argument
207 unsigned long offset = vmf->address - vmf->vma->vm_start; in fb_deferred_io_page_mkwrite()
208 struct page *page = vmf->page; in fb_deferred_io_page_mkwrite()
[all …]
/linux-6.1.9/fs/ocfs2/
Dmmap.c31 static vm_fault_t ocfs2_fault(struct vm_fault *vmf) in ocfs2_fault() argument
33 struct vm_area_struct *vma = vmf->vma; in ocfs2_fault()
38 ret = filemap_fault(vmf); in ocfs2_fault()
42 vma, vmf->page, vmf->pgoff); in ocfs2_fault()
113 static vm_fault_t ocfs2_page_mkwrite(struct vm_fault *vmf) in ocfs2_page_mkwrite() argument
115 struct page *page = vmf->page; in ocfs2_page_mkwrite()
116 struct inode *inode = file_inode(vmf->vma->vm_file); in ocfs2_page_mkwrite()
144 ret = __ocfs2_page_mkwrite(vmf->vma->vm_file, di_bh, page); in ocfs2_page_mkwrite()
/linux-6.1.9/include/linux/
Dhuge_mm.h10 vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf);
14 void huge_pmd_set_accessed(struct vm_fault *vmf);
20 void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud);
22 static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud) in huge_pud_set_accessed() argument
27 vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf);
42 vm_fault_t vmf_insert_pfn_pmd_prot(struct vm_fault *vmf, pfn_t pfn,
56 static inline vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, in vmf_insert_pfn_pmd() argument
59 return vmf_insert_pfn_pmd_prot(vmf, pfn, vmf->vma->vm_page_prot, write); in vmf_insert_pfn_pmd()
61 vm_fault_t vmf_insert_pfn_pud_prot(struct vm_fault *vmf, pfn_t pfn,
75 static inline vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, in vmf_insert_pfn_pud() argument
[all …]
/linux-6.1.9/arch/x86/entry/vdso/
Dvma.c60 struct vm_area_struct *vma, struct vm_fault *vmf) in vdso_fault() argument
64 if (!image || (vmf->pgoff << PAGE_SHIFT) >= image->size) in vdso_fault()
67 vmf->page = virt_to_page(image->data + (vmf->pgoff << PAGE_SHIFT)); in vdso_fault()
68 get_page(vmf->page); in vdso_fault()
151 struct vm_area_struct *vma, struct vm_fault *vmf) in vvar_fault() argument
160 sym_offset = (long)(vmf->pgoff << PAGE_SHIFT) + in vvar_fault()
195 addr = vmf->address + (image->sym_timens_page - sym_offset); in vvar_fault()
203 return vmf_insert_pfn(vma, vmf->address, pfn); in vvar_fault()
208 return vmf_insert_pfn_prot(vma, vmf->address, in vvar_fault()
216 return vmf_insert_pfn(vma, vmf->address, in vvar_fault()
[all …]
/linux-6.1.9/drivers/gpu/drm/
Ddrm_vm.c110 static vm_fault_t drm_vm_fault(struct vm_fault *vmf) in drm_vm_fault() argument
112 struct vm_area_struct *vma = vmf->vma; in drm_vm_fault()
139 resource_size_t offset = vmf->address - vma->vm_start; in drm_vm_fault()
169 vmf->page = page; in drm_vm_fault()
183 static vm_fault_t drm_vm_fault(struct vm_fault *vmf) in drm_vm_fault() argument
199 static vm_fault_t drm_vm_shm_fault(struct vm_fault *vmf) in drm_vm_shm_fault() argument
201 struct vm_area_struct *vma = vmf->vma; in drm_vm_shm_fault()
210 offset = vmf->address - vma->vm_start; in drm_vm_shm_fault()
216 vmf->page = page; in drm_vm_shm_fault()
299 static vm_fault_t drm_vm_dma_fault(struct vm_fault *vmf) in drm_vm_dma_fault() argument
[all …]
/linux-6.1.9/drivers/gpu/drm/vmwgfx/
Dvmwgfx_page_dirty.c375 vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf) in vmw_bo_vm_mkwrite() argument
377 struct vm_area_struct *vma = vmf->vma; in vmw_bo_vm_mkwrite()
390 save_flags = vmf->flags; in vmw_bo_vm_mkwrite()
391 vmf->flags &= ~FAULT_FLAG_ALLOW_RETRY; in vmw_bo_vm_mkwrite()
392 ret = ttm_bo_vm_reserve(bo, vmf); in vmw_bo_vm_mkwrite()
393 vmf->flags = save_flags; in vmw_bo_vm_mkwrite()
397 page_offset = vmf->pgoff - drm_vma_node_start(&bo->base.vma_node); in vmw_bo_vm_mkwrite()
417 vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf) in vmw_bo_vm_fault() argument
419 struct vm_area_struct *vma = vmf->vma; in vmw_bo_vm_fault()
428 ret = ttm_bo_vm_reserve(bo, vmf); in vmw_bo_vm_fault()
[all …]
/linux-6.1.9/fs/xfs/
Dxfs_file.c1266 struct vm_fault *vmf, in xfs_dax_fault() argument
1271 return dax_iomap_fault(vmf, pe_size, pfn, NULL, in xfs_dax_fault()
1272 (write_fault && !vmf->cow_page) ? in xfs_dax_fault()
1279 struct vm_fault *vmf, in xfs_dax_fault() argument
1301 struct vm_fault *vmf, in __xfs_filemap_fault() argument
1305 struct inode *inode = file_inode(vmf->vma->vm_file); in __xfs_filemap_fault()
1313 file_update_time(vmf->vma->vm_file); in __xfs_filemap_fault()
1320 ret = xfs_dax_fault(vmf, pe_size, write_fault, &pfn); in __xfs_filemap_fault()
1322 ret = dax_finish_sync_fault(vmf, pe_size, pfn); in __xfs_filemap_fault()
1327 ret = iomap_page_mkwrite(vmf, in __xfs_filemap_fault()
[all …]
/linux-6.1.9/drivers/xen/
Dprivcmd-buf.c117 static vm_fault_t privcmd_buf_vma_fault(struct vm_fault *vmf) in privcmd_buf_vma_fault() argument
120 vmf->vma, vmf->vma->vm_start, vmf->vma->vm_end, in privcmd_buf_vma_fault()
121 vmf->pgoff, (void *)vmf->address); in privcmd_buf_vma_fault()
/linux-6.1.9/fs/ext2/
Dfile.c91 static vm_fault_t ext2_dax_fault(struct vm_fault *vmf) in ext2_dax_fault() argument
93 struct inode *inode = file_inode(vmf->vma->vm_file); in ext2_dax_fault()
95 bool write = (vmf->flags & FAULT_FLAG_WRITE) && in ext2_dax_fault()
96 (vmf->vma->vm_flags & VM_SHARED); in ext2_dax_fault()
100 file_update_time(vmf->vma->vm_file); in ext2_dax_fault()
104 ret = dax_iomap_fault(vmf, PE_SIZE_PTE, NULL, NULL, &ext2_iomap_ops); in ext2_dax_fault()
/linux-6.1.9/sound/usb/usx2y/
DusX2Yhwdep.c21 static vm_fault_t snd_us428ctls_vm_fault(struct vm_fault *vmf) in snd_us428ctls_vm_fault() argument
28 vmf->vma->vm_start, in snd_us428ctls_vm_fault()
29 vmf->pgoff); in snd_us428ctls_vm_fault()
31 offset = vmf->pgoff << PAGE_SHIFT; in snd_us428ctls_vm_fault()
32 vaddr = (char *)((struct usx2ydev *)vmf->vma->vm_private_data)->us428ctls_sharedmem + offset; in snd_us428ctls_vm_fault()
35 vmf->page = page; in snd_us428ctls_vm_fault()
/linux-6.1.9/drivers/char/
Dmspec.c137 mspec_fault(struct vm_fault *vmf) in mspec_fault() argument
141 pgoff_t index = vmf->pgoff; in mspec_fault()
142 struct vma_data *vdata = vmf->vma->vm_private_data; in mspec_fault()
164 return vmf_insert_pfn(vmf->vma, vmf->address, pfn); in mspec_fault()
/linux-6.1.9/fs/nilfs2/
Dfile.c45 static vm_fault_t nilfs_page_mkwrite(struct vm_fault *vmf) in nilfs_page_mkwrite() argument
47 struct vm_area_struct *vma = vmf->vma; in nilfs_page_mkwrite()
48 struct page *page = vmf->page; in nilfs_page_mkwrite()
99 ret = block_page_mkwrite(vma, vmf, nilfs_get_block); in nilfs_page_mkwrite()
/linux-6.1.9/drivers/misc/ocxl/
Dsysfs.c109 static vm_fault_t global_mmio_fault(struct vm_fault *vmf) in global_mmio_fault() argument
111 struct vm_area_struct *vma = vmf->vma; in global_mmio_fault()
115 if (vmf->pgoff >= (afu->config.global_mmio_size >> PAGE_SHIFT)) in global_mmio_fault()
118 offset = vmf->pgoff; in global_mmio_fault()
120 return vmf_insert_pfn(vma, vmf->address, offset); in global_mmio_fault()
Dcontext.c139 static vm_fault_t ocxl_mmap_fault(struct vm_fault *vmf) in ocxl_mmap_fault() argument
141 struct vm_area_struct *vma = vmf->vma; in ocxl_mmap_fault()
146 offset = vmf->pgoff << PAGE_SHIFT; in ocxl_mmap_fault()
148 ctx->pasid, vmf->address, offset); in ocxl_mmap_fault()
151 ret = map_pp_mmio(vma, vmf->address, offset, ctx); in ocxl_mmap_fault()
153 ret = map_afu_irq(vma, vmf->address, offset, ctx); in ocxl_mmap_fault()
/linux-6.1.9/drivers/misc/cxl/
Dcontext.c126 static vm_fault_t cxl_mmap_fault(struct vm_fault *vmf) in cxl_mmap_fault() argument
128 struct vm_area_struct *vma = vmf->vma; in cxl_mmap_fault()
133 offset = vmf->pgoff << PAGE_SHIFT; in cxl_mmap_fault()
136 __func__, ctx->pe, vmf->address, offset); in cxl_mmap_fault()
161 vmf->page = ctx->ff_page; in cxl_mmap_fault()
168 ret = vmf_insert_pfn(vma, vmf->address, (area + offset) >> PAGE_SHIFT); in cxl_mmap_fault()

123456