Lines Matching refs:vmf

811 static int copy_cow_page_dax(struct vm_fault *vmf, const struct iomap_iter *iter)  in copy_cow_page_dax()  argument
825 vto = kmap_atomic(vmf->cow_page); in copy_cow_page_dax()
826 copy_user_page(vto, kaddr, vmf->address, vmf->cow_page); in copy_cow_page_dax()
856 static void *dax_insert_entry(struct xa_state *xas, struct vm_fault *vmf, in dax_insert_entry() argument
860 struct address_space *mapping = vmf->vma->vm_file->f_mapping; in dax_insert_entry()
862 bool dirty = !dax_fault_is_synchronous(iter, vmf->vma); in dax_insert_entry()
884 dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address, in dax_insert_entry()
1149 static vm_fault_t dax_load_hole(struct xa_state *xas, struct vm_fault *vmf, in dax_load_hole() argument
1153 unsigned long vaddr = vmf->address; in dax_load_hole()
1157 *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, DAX_ZERO_PAGE); in dax_load_hole()
1159 ret = vmf_insert_mixed(vmf->vma, vaddr, pfn); in dax_load_hole()
1160 trace_dax_load_hole(inode, vmf, ret); in dax_load_hole()
1165 static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf, in dax_pmd_load_hole() argument
1168 struct address_space *mapping = vmf->vma->vm_file->f_mapping; in dax_pmd_load_hole()
1169 unsigned long pmd_addr = vmf->address & PMD_MASK; in dax_pmd_load_hole()
1170 struct vm_area_struct *vma = vmf->vma; in dax_pmd_load_hole()
1178 zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm); in dax_pmd_load_hole()
1184 *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, in dax_pmd_load_hole()
1193 ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd); in dax_pmd_load_hole()
1194 if (!pmd_none(*(vmf->pmd))) { in dax_pmd_load_hole()
1200 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); in dax_pmd_load_hole()
1203 pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot); in dax_pmd_load_hole()
1205 set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry); in dax_pmd_load_hole()
1207 trace_dax_pmd_load_hole(inode, vmf, zero_page, *entry); in dax_pmd_load_hole()
1213 trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, *entry); in dax_pmd_load_hole()
1217 static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf, in dax_pmd_load_hole() argument
1491 static vm_fault_t dax_fault_cow_page(struct vm_fault *vmf, in dax_fault_cow_page() argument
1500 clear_user_highpage(vmf->cow_page, vmf->address); in dax_fault_cow_page()
1503 error = copy_cow_page_dax(vmf, iter); in dax_fault_cow_page()
1514 __SetPageUptodate(vmf->cow_page); in dax_fault_cow_page()
1515 ret = finish_fault(vmf); in dax_fault_cow_page()
1530 static vm_fault_t dax_fault_iter(struct vm_fault *vmf, in dax_fault_iter() argument
1544 if (!pmd && vmf->cow_page) in dax_fault_iter()
1545 return dax_fault_cow_page(vmf, iter); in dax_fault_iter()
1551 return dax_load_hole(xas, vmf, iter, entry); in dax_fault_iter()
1552 return dax_pmd_load_hole(xas, vmf, iter, entry); in dax_fault_iter()
1564 *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, entry_flags); in dax_fault_iter()
1573 if (dax_fault_is_synchronous(iter, vmf->vma)) in dax_fault_iter()
1578 return vmf_insert_pfn_pmd(vmf, pfn, write); in dax_fault_iter()
1582 return vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn); in dax_fault_iter()
1583 return vmf_insert_mixed(vmf->vma, vmf->address, pfn); in dax_fault_iter()
1586 static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp, in dax_iomap_pte_fault() argument
1589 struct address_space *mapping = vmf->vma->vm_file->f_mapping; in dax_iomap_pte_fault()
1590 XA_STATE(xas, &mapping->i_pages, vmf->pgoff); in dax_iomap_pte_fault()
1593 .pos = (loff_t)vmf->pgoff << PAGE_SHIFT, in dax_iomap_pte_fault()
1601 trace_dax_pte_fault(iter.inode, vmf, ret); in dax_iomap_pte_fault()
1612 if ((vmf->flags & FAULT_FLAG_WRITE) && !vmf->cow_page) in dax_iomap_pte_fault()
1627 if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) { in dax_iomap_pte_fault()
1638 ret = dax_fault_iter(vmf, &iter, pfnp, &xas, &entry, false); in dax_iomap_pte_fault()
1642 count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT); in dax_iomap_pte_fault()
1658 trace_dax_pte_fault_done(iter.inode, vmf, ret); in dax_iomap_pte_fault()
1663 static bool dax_fault_check_fallback(struct vm_fault *vmf, struct xa_state *xas, in dax_fault_check_fallback() argument
1666 unsigned long pmd_addr = vmf->address & PMD_MASK; in dax_fault_check_fallback()
1667 bool write = vmf->flags & FAULT_FLAG_WRITE; in dax_fault_check_fallback()
1675 if ((vmf->pgoff & PG_PMD_COLOUR) != in dax_fault_check_fallback()
1676 ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR)) in dax_fault_check_fallback()
1680 if (write && !(vmf->vma->vm_flags & VM_SHARED)) in dax_fault_check_fallback()
1684 if (pmd_addr < vmf->vma->vm_start) in dax_fault_check_fallback()
1686 if ((pmd_addr + PMD_SIZE) > vmf->vma->vm_end) in dax_fault_check_fallback()
1696 static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp, in dax_iomap_pmd_fault() argument
1699 struct address_space *mapping = vmf->vma->vm_file->f_mapping; in dax_iomap_pmd_fault()
1700 XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, PMD_ORDER); in dax_iomap_pmd_fault()
1711 if (vmf->flags & FAULT_FLAG_WRITE) in dax_iomap_pmd_fault()
1721 trace_dax_pmd_fault(iter.inode, vmf, max_pgoff, 0); in dax_iomap_pmd_fault()
1728 if (dax_fault_check_fallback(vmf, &xas, max_pgoff)) in dax_iomap_pmd_fault()
1749 if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) && in dax_iomap_pmd_fault()
1750 !pmd_devmap(*vmf->pmd)) { in dax_iomap_pmd_fault()
1760 ret = dax_fault_iter(vmf, &iter, pfnp, &xas, &entry, true); in dax_iomap_pmd_fault()
1769 split_huge_pmd(vmf->vma, vmf->pmd, vmf->address); in dax_iomap_pmd_fault()
1773 trace_dax_pmd_fault_done(iter.inode, vmf, max_pgoff, ret); in dax_iomap_pmd_fault()
1777 static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp, in dax_iomap_pmd_fault() argument
1797 vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size, in dax_iomap_fault() argument
1802 return dax_iomap_pte_fault(vmf, pfnp, iomap_errp, ops); in dax_iomap_fault()
1804 return dax_iomap_pmd_fault(vmf, pfnp, ops); in dax_iomap_fault()
1821 dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order) in dax_insert_pfn_mkwrite() argument
1823 struct address_space *mapping = vmf->vma->vm_file->f_mapping; in dax_insert_pfn_mkwrite()
1824 XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, order); in dax_insert_pfn_mkwrite()
1835 trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf, in dax_insert_pfn_mkwrite()
1843 ret = vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn); in dax_insert_pfn_mkwrite()
1846 ret = vmf_insert_pfn_pmd(vmf, pfn, FAULT_FLAG_WRITE); in dax_insert_pfn_mkwrite()
1851 trace_dax_insert_pfn_mkwrite(mapping->host, vmf, ret); in dax_insert_pfn_mkwrite()
1865 vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf, in dax_finish_sync_fault() argument
1869 loff_t start = ((loff_t)vmf->pgoff) << PAGE_SHIFT; in dax_finish_sync_fault()
1873 err = vfs_fsync_range(vmf->vma->vm_file, start, start + len - 1, 1); in dax_finish_sync_fault()
1876 return dax_insert_pfn_mkwrite(vmf, pfn, order); in dax_finish_sync_fault()