Lines Matching refs:xas

127 	XA_STATE(xas, &mapping->i_pages, folio->index);  in page_cache_delete()
130 mapping_set_update(&xas, mapping); in page_cache_delete()
134 xas_set_order(&xas, folio->index, folio_order(folio)); in page_cache_delete()
140 xas_store(&xas, shadow); in page_cache_delete()
141 xas_init_marks(&xas); in page_cache_delete()
280 XA_STATE(xas, &mapping->i_pages, fbatch->folios[0]->index); in page_cache_delete_batch()
285 mapping_set_update(&xas, mapping); in page_cache_delete_batch()
286 xas_for_each(&xas, folio, ULONG_MAX) { in page_cache_delete_batch()
312 xas_store(&xas, NULL); in page_cache_delete_batch()
474 XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT); in filemap_range_has_page()
482 page = xas_find(&xas, max); in filemap_range_has_page()
483 if (xas_retry(&xas, page)) in filemap_range_has_page()
633 XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT); in filemap_range_has_writeback()
641 xas_for_each(&xas, folio, max) { in filemap_range_has_writeback()
642 if (xas_retry(&xas, folio)) in filemap_range_has_writeback()
807 XA_STATE(xas, &mapping->i_pages, offset); in replace_page_cache_page()
819 xas_lock_irq(&xas); in replace_page_cache_page()
820 xas_store(&xas, new); in replace_page_cache_page()
832 xas_unlock_irq(&xas); in replace_page_cache_page()
842 XA_STATE(xas, &mapping->i_pages, index); in __filemap_add_folio()
849 mapping_set_update(&xas, mapping); in __filemap_add_folio()
857 xas_set_order(&xas, index, folio_order(folio)); in __filemap_add_folio()
864 folio->index = xas.xa_index; in __filemap_add_folio()
867 unsigned int order = xa_get_order(xas.xa, xas.xa_index); in __filemap_add_folio()
871 xas_split_alloc(&xas, xa_load(xas.xa, xas.xa_index), in __filemap_add_folio()
873 xas_lock_irq(&xas); in __filemap_add_folio()
874 xas_for_each_conflict(&xas, entry) { in __filemap_add_folio()
877 xas_set_err(&xas, -EEXIST); in __filemap_add_folio()
886 order = xa_get_order(xas.xa, xas.xa_index); in __filemap_add_folio()
890 xas_split(&xas, old, order); in __filemap_add_folio()
891 xas_reset(&xas); in __filemap_add_folio()
895 xas_store(&xas, folio); in __filemap_add_folio()
896 if (xas_error(&xas)) in __filemap_add_folio()
909 xas_unlock_irq(&xas); in __filemap_add_folio()
910 } while (xas_nomem(&xas, gfp)); in __filemap_add_folio()
912 if (xas_error(&xas)) in __filemap_add_folio()
923 return xas_error(&xas); in __filemap_add_folio()
1763 XA_STATE(xas, &mapping->i_pages, index); in page_cache_next_miss()
1766 void *entry = xas_next(&xas); in page_cache_next_miss()
1769 if (xas.xa_index == 0) in page_cache_next_miss()
1773 return xas.xa_index; in page_cache_next_miss()
1799 XA_STATE(xas, &mapping->i_pages, index); in page_cache_prev_miss()
1802 void *entry = xas_prev(&xas); in page_cache_prev_miss()
1805 if (xas.xa_index == ULONG_MAX) in page_cache_prev_miss()
1809 return xas.xa_index; in page_cache_prev_miss()
1847 XA_STATE(xas, &mapping->i_pages, index); in mapping_get_entry()
1852 xas_reset(&xas); in mapping_get_entry()
1853 folio = xas_load(&xas); in mapping_get_entry()
1854 if (xas_retry(&xas, folio)) in mapping_get_entry()
1866 if (unlikely(folio != xas_reload(&xas))) { in mapping_get_entry()
1996 static inline struct folio *find_get_entry(struct xa_state *xas, pgoff_t max, in find_get_entry() argument
2003 folio = xas_find(xas, max); in find_get_entry()
2005 folio = xas_find_marked(xas, max, mark); in find_get_entry()
2007 if (xas_retry(xas, folio)) in find_get_entry()
2020 if (unlikely(folio != xas_reload(xas))) { in find_get_entry()
2027 xas_reset(xas); in find_get_entry()
2054 XA_STATE(xas, &mapping->i_pages, start); in find_get_entries()
2058 while ((folio = find_get_entry(&xas, end, XA_PRESENT)) != NULL) { in find_get_entries()
2059 indices[fbatch->nr] = xas.xa_index; in find_get_entries()
2091 XA_STATE(xas, &mapping->i_pages, start); in find_lock_entries()
2095 while ((folio = find_get_entry(&xas, end, XA_PRESENT))) { in find_lock_entries()
2106 VM_BUG_ON_FOLIO(!folio_contains(folio, xas.xa_index), in find_lock_entries()
2109 indices[fbatch->nr] = xas.xa_index; in find_lock_entries()
2147 XA_STATE(xas, &mapping->i_pages, *start); in filemap_get_folios()
2151 while ((folio = find_get_entry(&xas, end, XA_PRESENT)) != NULL) { in filemap_get_folios()
2210 XA_STATE(xas, &mapping->i_pages, *start); in filemap_get_folios_contig()
2216 for (folio = xas_load(&xas); folio && xas.xa_index <= end; in filemap_get_folios_contig()
2217 folio = xas_next(&xas)) { in filemap_get_folios_contig()
2218 if (xas_retry(&xas, folio)) in filemap_get_folios_contig()
2230 if (unlikely(folio != xas_reload(&xas))) in filemap_get_folios_contig()
2246 xas_reset(&xas); in filemap_get_folios_contig()
2284 XA_STATE(xas, &mapping->i_pages, *index); in find_get_pages_range_tag()
2292 while ((folio = find_get_entry(&xas, end, tag))) { in find_get_pages_range_tag()
2357 XA_STATE(xas, &mapping->i_pages, index); in filemap_get_read_batch()
2361 for (folio = xas_load(&xas); folio; folio = xas_next(&xas)) { in filemap_get_read_batch()
2362 if (xas_retry(&xas, folio)) in filemap_get_read_batch()
2364 if (xas.xa_index > max || xa_is_value(folio)) in filemap_get_read_batch()
2371 if (unlikely(folio != xas_reload(&xas))) in filemap_get_read_batch()
2380 xas_advance(&xas, folio->index + folio_nr_pages(folio) - 1); in filemap_get_read_batch()
2385 xas_reset(&xas); in filemap_get_read_batch()
2825 static inline loff_t folio_seek_hole_data(struct xa_state *xas, in folio_seek_hole_data() argument
2837 xas_pause(xas); in folio_seek_hole_data()
2858 static inline size_t seek_folio_size(struct xa_state *xas, struct folio *folio) in seek_folio_size() argument
2861 return PAGE_SIZE << xa_get_order(xas->xa, xas->xa_index); in seek_folio_size()
2886 XA_STATE(xas, &mapping->i_pages, start >> PAGE_SHIFT); in mapping_seek_hole_data()
2895 while ((folio = find_get_entry(&xas, max, XA_PRESENT))) { in mapping_seek_hole_data()
2896 loff_t pos = (u64)xas.xa_index << PAGE_SHIFT; in mapping_seek_hole_data()
2905 seek_size = seek_folio_size(&xas, folio); in mapping_seek_hole_data()
2907 start = folio_seek_hole_data(&xas, mapping, folio, start, pos, in mapping_seek_hole_data()
2914 xas_set(&xas, pos >> PAGE_SHIFT); in mapping_seek_hole_data()
3282 struct xa_state *xas, pgoff_t end_pgoff) in next_uptodate_page() argument
3289 if (xas_retry(xas, folio)) in next_uptodate_page()
3298 if (unlikely(folio != xas_reload(xas))) in next_uptodate_page()
3309 if (xas->xa_index >= max_idx) in next_uptodate_page()
3316 } while ((folio = xas_next_entry(xas, end_pgoff)) != NULL); in next_uptodate_page()
3322 struct xa_state *xas, in first_map_page() argument
3325 return next_uptodate_page(xas_find(xas, end_pgoff), in first_map_page()
3326 mapping, xas, end_pgoff); in first_map_page()
3330 struct xa_state *xas, in next_map_page() argument
3333 return next_uptodate_page(xas_next_entry(xas, end_pgoff), in next_map_page()
3334 mapping, xas, end_pgoff); in next_map_page()
3345 XA_STATE(xas, &mapping->i_pages, start_pgoff); in filemap_map_pages()
3352 folio = first_map_page(mapping, &xas, end_pgoff); in filemap_map_pages()
3365 page = folio_file_page(folio, xas.xa_index); in filemap_map_pages()
3372 addr += (xas.xa_index - last_pgoff) << PAGE_SHIFT; in filemap_map_pages()
3373 vmf->pte += xas.xa_index - last_pgoff; in filemap_map_pages()
3374 last_pgoff = xas.xa_index; in filemap_map_pages()
3391 if (folio_more_pages(folio, xas.xa_index, end_pgoff)) { in filemap_map_pages()
3392 xas.xa_index++; in filemap_map_pages()
3399 if (folio_more_pages(folio, xas.xa_index, end_pgoff)) { in filemap_map_pages()
3400 xas.xa_index++; in filemap_map_pages()
3405 } while ((folio = next_map_page(mapping, &xas, end_pgoff)) != NULL); in filemap_map_pages()