Lines Matching refs:xas

143 static wait_queue_head_t *dax_entry_waitqueue(struct xa_state *xas,  in dax_entry_waitqueue()  argument
147 unsigned long index = xas->xa_index; in dax_entry_waitqueue()
156 key->xa = xas->xa; in dax_entry_waitqueue()
159 hash = hash_long((unsigned long)xas->xa ^ index, DAX_WAIT_TABLE_BITS); in dax_entry_waitqueue()
181 static void dax_wake_entry(struct xa_state *xas, void *entry, in dax_wake_entry() argument
187 wq = dax_entry_waitqueue(xas, entry, &key); in dax_wake_entry()
209 static void *get_unlocked_entry(struct xa_state *xas, unsigned int order) in get_unlocked_entry() argument
219 entry = xas_find_conflict(xas); in get_unlocked_entry()
227 wq = dax_entry_waitqueue(xas, entry, &ewait.key); in get_unlocked_entry()
230 xas_unlock_irq(xas); in get_unlocked_entry()
231 xas_reset(xas); in get_unlocked_entry()
234 xas_lock_irq(xas); in get_unlocked_entry()
243 static void wait_entry_unlocked(struct xa_state *xas, void *entry) in wait_entry_unlocked() argument
251 wq = dax_entry_waitqueue(xas, entry, &ewait.key); in wait_entry_unlocked()
259 xas_unlock_irq(xas); in wait_entry_unlocked()
264 static void put_unlocked_entry(struct xa_state *xas, void *entry, in put_unlocked_entry() argument
268 dax_wake_entry(xas, entry, mode); in put_unlocked_entry()
276 static void dax_unlock_entry(struct xa_state *xas, void *entry) in dax_unlock_entry() argument
281 xas_reset(xas); in dax_unlock_entry()
282 xas_lock_irq(xas); in dax_unlock_entry()
283 old = xas_store(xas, entry); in dax_unlock_entry()
284 xas_unlock_irq(xas); in dax_unlock_entry()
286 dax_wake_entry(xas, entry, WAKE_NEXT); in dax_unlock_entry()
292 static void *dax_lock_entry(struct xa_state *xas, void *entry) in dax_lock_entry() argument
295 return xas_store(xas, xa_mk_value(v | DAX_LOCKED)); in dax_lock_entry()
425 XA_STATE(xas, NULL, 0); in dax_lock_folio()
448 xas.xa = &mapping->i_pages; in dax_lock_folio()
449 xas_lock_irq(&xas); in dax_lock_folio()
451 xas_unlock_irq(&xas); in dax_lock_folio()
454 xas_set(&xas, folio->index); in dax_lock_folio()
455 entry = xas_load(&xas); in dax_lock_folio()
458 wait_entry_unlocked(&xas, entry); in dax_lock_folio()
462 dax_lock_entry(&xas, entry); in dax_lock_folio()
463 xas_unlock_irq(&xas); in dax_lock_folio()
473 XA_STATE(xas, &mapping->i_pages, folio->index); in dax_unlock_folio()
478 dax_unlock_entry(&xas, (void *)cookie); in dax_unlock_folio()
493 XA_STATE(xas, NULL, 0); in dax_lock_mapping_entry()
502 xas.xa = &mapping->i_pages; in dax_lock_mapping_entry()
503 xas_lock_irq(&xas); in dax_lock_mapping_entry()
504 xas_set(&xas, index); in dax_lock_mapping_entry()
505 entry = xas_load(&xas); in dax_lock_mapping_entry()
508 wait_entry_unlocked(&xas, entry); in dax_lock_mapping_entry()
524 dax_lock_entry(&xas, entry); in dax_lock_mapping_entry()
526 xas_unlock_irq(&xas); in dax_lock_mapping_entry()
536 XA_STATE(xas, &mapping->i_pages, index); in dax_unlock_mapping_entry()
541 dax_unlock_entry(&xas, (void *)cookie); in dax_unlock_mapping_entry()
573 static void *grab_mapping_entry(struct xa_state *xas, in grab_mapping_entry() argument
576 unsigned long index = xas->xa_index; in grab_mapping_entry()
582 xas_lock_irq(xas); in grab_mapping_entry()
583 entry = get_unlocked_entry(xas, order); in grab_mapping_entry()
589 xas_set_err(xas, -EIO); in grab_mapping_entry()
607 dax_lock_entry(xas, entry); in grab_mapping_entry()
615 xas_unlock_irq(xas); in grab_mapping_entry()
617 xas->xa_index & ~PG_PMD_COLOUR, in grab_mapping_entry()
619 xas_reset(xas); in grab_mapping_entry()
620 xas_lock_irq(xas); in grab_mapping_entry()
624 xas_store(xas, NULL); /* undo the PMD join */ in grab_mapping_entry()
625 dax_wake_entry(xas, entry, WAKE_ALL); in grab_mapping_entry()
628 xas_set(xas, index); in grab_mapping_entry()
632 dax_lock_entry(xas, entry); in grab_mapping_entry()
639 dax_lock_entry(xas, entry); in grab_mapping_entry()
640 if (xas_error(xas)) in grab_mapping_entry()
646 xas_unlock_irq(xas); in grab_mapping_entry()
647 if (xas_nomem(xas, mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM)) in grab_mapping_entry()
649 if (xas->xa_node == XA_ERROR(-ENOMEM)) in grab_mapping_entry()
651 if (xas_error(xas)) in grab_mapping_entry()
655 xas_unlock_irq(xas); in grab_mapping_entry()
685 XA_STATE(xas, &mapping->i_pages, start_idx); in dax_layout_busy_page_range()
715 xas_lock_irq(&xas); in dax_layout_busy_page_range()
716 xas_for_each(&xas, entry, end_idx) { in dax_layout_busy_page_range()
720 entry = get_unlocked_entry(&xas, 0); in dax_layout_busy_page_range()
723 put_unlocked_entry(&xas, entry, WAKE_NEXT); in dax_layout_busy_page_range()
729 xas_pause(&xas); in dax_layout_busy_page_range()
730 xas_unlock_irq(&xas); in dax_layout_busy_page_range()
732 xas_lock_irq(&xas); in dax_layout_busy_page_range()
734 xas_unlock_irq(&xas); in dax_layout_busy_page_range()
748 XA_STATE(xas, &mapping->i_pages, index); in __dax_invalidate_entry()
752 xas_lock_irq(&xas); in __dax_invalidate_entry()
753 entry = get_unlocked_entry(&xas, 0); in __dax_invalidate_entry()
757 (xas_get_mark(&xas, PAGECACHE_TAG_DIRTY) || in __dax_invalidate_entry()
758 xas_get_mark(&xas, PAGECACHE_TAG_TOWRITE))) in __dax_invalidate_entry()
761 xas_store(&xas, NULL); in __dax_invalidate_entry()
765 put_unlocked_entry(&xas, entry, WAKE_ALL); in __dax_invalidate_entry()
766 xas_unlock_irq(&xas); in __dax_invalidate_entry()
773 XA_STATE(xas, &mapping->i_pages, start); in __dax_clear_dirty_range()
777 xas_lock_irq(&xas); in __dax_clear_dirty_range()
778 xas_for_each(&xas, entry, end) { in __dax_clear_dirty_range()
779 entry = get_unlocked_entry(&xas, 0); in __dax_clear_dirty_range()
780 xas_clear_mark(&xas, PAGECACHE_TAG_DIRTY); in __dax_clear_dirty_range()
781 xas_clear_mark(&xas, PAGECACHE_TAG_TOWRITE); in __dax_clear_dirty_range()
782 put_unlocked_entry(&xas, entry, WAKE_NEXT); in __dax_clear_dirty_range()
787 xas_pause(&xas); in __dax_clear_dirty_range()
788 xas_unlock_irq(&xas); in __dax_clear_dirty_range()
790 xas_lock_irq(&xas); in __dax_clear_dirty_range()
792 xas_unlock_irq(&xas); in __dax_clear_dirty_range()
869 static void *dax_insert_entry(struct xa_state *xas, struct vm_fault *vmf, in dax_insert_entry() argument
883 unsigned long index = xas->xa_index; in dax_insert_entry()
892 xas_reset(xas); in dax_insert_entry()
893 xas_lock_irq(xas); in dax_insert_entry()
908 old = dax_lock_entry(xas, new_entry); in dax_insert_entry()
913 xas_load(xas); /* Walk the xa_state */ in dax_insert_entry()
917 xas_set_mark(xas, PAGECACHE_TAG_DIRTY); in dax_insert_entry()
920 xas_set_mark(xas, PAGECACHE_TAG_TOWRITE); in dax_insert_entry()
922 xas_unlock_irq(xas); in dax_insert_entry()
926 static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev, in dax_writeback_one() argument
943 entry = get_unlocked_entry(xas, 0); in dax_writeback_one()
962 if (!xas_get_mark(xas, PAGECACHE_TAG_TOWRITE)) in dax_writeback_one()
967 dax_lock_entry(xas, entry); in dax_writeback_one()
976 xas_clear_mark(xas, PAGECACHE_TAG_TOWRITE); in dax_writeback_one()
977 xas_unlock_irq(xas); in dax_writeback_one()
988 index = xas->xa_index & ~(count - 1); in dax_writeback_one()
1006 xas_reset(xas); in dax_writeback_one()
1007 xas_lock_irq(xas); in dax_writeback_one()
1008 xas_store(xas, entry); in dax_writeback_one()
1009 xas_clear_mark(xas, PAGECACHE_TAG_DIRTY); in dax_writeback_one()
1010 dax_wake_entry(xas, entry, WAKE_NEXT); in dax_writeback_one()
1016 put_unlocked_entry(xas, entry, WAKE_NEXT); in dax_writeback_one()
1028 XA_STATE(xas, &mapping->i_pages, wbc->range_start >> PAGE_SHIFT); in dax_writeback_mapping_range()
1041 trace_dax_writeback_range(inode, xas.xa_index, end_index); in dax_writeback_mapping_range()
1043 tag_pages_for_writeback(mapping, xas.xa_index, end_index); in dax_writeback_mapping_range()
1045 xas_lock_irq(&xas); in dax_writeback_mapping_range()
1046 xas_for_each_marked(&xas, entry, end_index, PAGECACHE_TAG_TOWRITE) { in dax_writeback_mapping_range()
1047 ret = dax_writeback_one(&xas, dax_dev, mapping, entry); in dax_writeback_mapping_range()
1055 xas_pause(&xas); in dax_writeback_mapping_range()
1056 xas_unlock_irq(&xas); in dax_writeback_mapping_range()
1058 xas_lock_irq(&xas); in dax_writeback_mapping_range()
1060 xas_unlock_irq(&xas); in dax_writeback_mapping_range()
1061 trace_dax_writeback_range_done(inode, xas.xa_index, end_index); in dax_writeback_mapping_range()
1186 static vm_fault_t dax_load_hole(struct xa_state *xas, struct vm_fault *vmf, in dax_load_hole() argument
1194 *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, DAX_ZERO_PAGE); in dax_load_hole()
1202 static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf, in dax_pmd_load_hole() argument
1221 *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, in dax_pmd_load_hole()
1254 static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf, in dax_pmd_load_hole() argument
1644 struct xa_state *xas, void **entry, bool pmd) in dax_fault_iter() argument
1649 loff_t pos = (loff_t)xas->xa_index << PAGE_SHIFT; in dax_fault_iter()
1663 return dax_load_hole(xas, vmf, iter, entry); in dax_fault_iter()
1664 return dax_pmd_load_hole(xas, vmf, iter, entry); in dax_fault_iter()
1676 *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, entry_flags); in dax_fault_iter()
1701 XA_STATE(xas, &mapping->i_pages, vmf->pgoff); in dax_iomap_pte_fault()
1726 entry = grab_mapping_entry(&xas, mapping, 0); in dax_iomap_pte_fault()
1749 ret = dax_fault_iter(vmf, &iter, pfnp, &xas, &entry, false); in dax_iomap_pte_fault()
1767 dax_unlock_entry(&xas, entry); in dax_iomap_pte_fault()
1774 static bool dax_fault_check_fallback(struct vm_fault *vmf, struct xa_state *xas, in dax_fault_check_fallback() argument
1801 if ((xas->xa_index | PG_PMD_COLOUR) >= max_pgoff) in dax_fault_check_fallback()
1811 XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, PMD_ORDER); in dax_iomap_pmd_fault()
1833 if (xas.xa_index >= max_pgoff) { in dax_iomap_pmd_fault()
1838 if (dax_fault_check_fallback(vmf, &xas, max_pgoff)) in dax_iomap_pmd_fault()
1847 entry = grab_mapping_entry(&xas, mapping, PMD_ORDER); in dax_iomap_pmd_fault()
1865 iter.pos = (loff_t)xas.xa_index << PAGE_SHIFT; in dax_iomap_pmd_fault()
1870 ret = dax_fault_iter(vmf, &iter, pfnp, &xas, &entry, true); in dax_iomap_pmd_fault()
1876 dax_unlock_entry(&xas, entry); in dax_iomap_pmd_fault()
1932 XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, order); in dax_insert_pfn_mkwrite()
1936 xas_lock_irq(&xas); in dax_insert_pfn_mkwrite()
1937 entry = get_unlocked_entry(&xas, order); in dax_insert_pfn_mkwrite()
1941 put_unlocked_entry(&xas, entry, WAKE_NEXT); in dax_insert_pfn_mkwrite()
1942 xas_unlock_irq(&xas); in dax_insert_pfn_mkwrite()
1947 xas_set_mark(&xas, PAGECACHE_TAG_DIRTY); in dax_insert_pfn_mkwrite()
1948 dax_lock_entry(&xas, entry); in dax_insert_pfn_mkwrite()
1949 xas_unlock_irq(&xas); in dax_insert_pfn_mkwrite()
1958 dax_unlock_entry(&xas, entry); in dax_insert_pfn_mkwrite()