/linux-6.1.9/lib/ ! |
D | xarray.c | 36 static inline void xas_lock_type(struct xa_state *xas, unsigned int lock_type) in xas_lock_type() argument 39 xas_lock_irq(xas); in xas_lock_type() 41 xas_lock_bh(xas); in xas_lock_type() 43 xas_lock(xas); in xas_lock_type() 46 static inline void xas_unlock_type(struct xa_state *xas, unsigned int lock_type) in xas_unlock_type() argument 49 xas_unlock_irq(xas); in xas_unlock_type() 51 xas_unlock_bh(xas); in xas_unlock_type() 53 xas_unlock(xas); in xas_unlock_type() 124 static void xas_squash_marks(const struct xa_state *xas) in xas_squash_marks() argument 127 unsigned int limit = xas->xa_offset + xas->xa_sibs + 1; in xas_squash_marks() [all …]
|
D | test_xarray.c | 74 XA_STATE_ORDER(xas, xa, index, order); in xa_store_order() 78 xas_lock(&xas); in xa_store_order() 79 curr = xas_store(&xas, entry); in xa_store_order() 80 xas_unlock(&xas); in xa_store_order() 81 } while (xas_nomem(&xas, gfp)); in xa_store_order() 104 XA_STATE(xas, xa, 0); in check_xas_retry() 111 XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_value(0)); in check_xas_retry() 113 XA_BUG_ON(xa, !xa_is_retry(xas_reload(&xas))); in check_xas_retry() 114 XA_BUG_ON(xa, xas_retry(&xas, NULL)); in check_xas_retry() 115 XA_BUG_ON(xa, xas_retry(&xas, xa_mk_value(0))); in check_xas_retry() [all …]
|
D | idr.c | 383 XA_STATE(xas, &ida->xa, min / IDA_BITMAP_BITS); in ida_alloc_range() 395 xas_lock_irqsave(&xas, flags); in ida_alloc_range() 397 bitmap = xas_find_marked(&xas, max / IDA_BITMAP_BITS, XA_FREE_MARK); in ida_alloc_range() 398 if (xas.xa_index > min / IDA_BITMAP_BITS) in ida_alloc_range() 400 if (xas.xa_index * IDA_BITMAP_BITS + bit > max) in ida_alloc_range() 408 if (xas.xa_index * IDA_BITMAP_BITS + bit > max) in ida_alloc_range() 412 xas_store(&xas, xa_mk_value(tmp)); in ida_alloc_range() 422 xas_store(&xas, bitmap); in ida_alloc_range() 423 if (xas_error(&xas)) { in ida_alloc_range() 431 if (xas.xa_index * IDA_BITMAP_BITS + bit > max) in ida_alloc_range() [all …]
|
/linux-6.1.9/include/linux/ ! |
D | xarray.h | 1388 #define xas_marked(xas, mark) xa_marked((xas)->xa, (mark)) argument 1389 #define xas_trylock(xas) xa_trylock((xas)->xa) argument 1390 #define xas_lock(xas) xa_lock((xas)->xa) argument 1391 #define xas_unlock(xas) xa_unlock((xas)->xa) argument 1392 #define xas_lock_bh(xas) xa_lock_bh((xas)->xa) argument 1393 #define xas_unlock_bh(xas) xa_unlock_bh((xas)->xa) argument 1394 #define xas_lock_irq(xas) xa_lock_irq((xas)->xa) argument 1395 #define xas_unlock_irq(xas) xa_unlock_irq((xas)->xa) argument 1396 #define xas_lock_irqsave(xas, flags) \ argument 1397 xa_lock_irqsave((xas)->xa, flags) [all …]
|
D | swap.h | 372 #define mapping_set_update(xas, mapping) do { \ argument 374 xas_set_update(xas, workingset_update_node); \ 375 xas_set_lru(xas, &shadow_nodes); \
|
/linux-6.1.9/fs/ ! |
D | dax.c | 157 static wait_queue_head_t *dax_entry_waitqueue(struct xa_state *xas, in dax_entry_waitqueue() argument 161 unsigned long index = xas->xa_index; in dax_entry_waitqueue() 170 key->xa = xas->xa; in dax_entry_waitqueue() 173 hash = hash_long((unsigned long)xas->xa ^ index, DAX_WAIT_TABLE_BITS); in dax_entry_waitqueue() 195 static void dax_wake_entry(struct xa_state *xas, void *entry, in dax_wake_entry() argument 201 wq = dax_entry_waitqueue(xas, entry, &key); in dax_wake_entry() 223 static void *get_unlocked_entry(struct xa_state *xas, unsigned int order) in get_unlocked_entry() argument 233 entry = xas_find_conflict(xas); in get_unlocked_entry() 241 wq = dax_entry_waitqueue(xas, entry, &ewait.key); in get_unlocked_entry() 244 xas_unlock_irq(xas); in get_unlocked_entry() [all …]
|
/linux-6.1.9/tools/testing/radix-tree/ ! |
D | multiorder.c | 18 XA_STATE_ORDER(xas, xa, index, order); in item_insert_order() 22 xas_lock(&xas); in item_insert_order() 23 xas_store(&xas, item); in item_insert_order() 24 xas_unlock(&xas); in item_insert_order() 25 } while (xas_nomem(&xas, GFP_KERNEL)); in item_insert_order() 27 if (!xas_error(&xas)) in item_insert_order() 31 return xas_error(&xas); in item_insert_order() 36 XA_STATE(xas, xa, 0); in multiorder_iteration() 56 xas_set(&xas, j); in multiorder_iteration() 57 xas_for_each(&xas, item, ULONG_MAX) { in multiorder_iteration() [all …]
|
D | iteration_check.c | 23 XA_STATE(xas, xa, index); in my_item_insert() 28 xas_lock(&xas); in my_item_insert() 30 xas_set_order(&xas, index, order); in my_item_insert() 32 if (xas_find_conflict(&xas)) in my_item_insert() 34 xas_store(&xas, item); in my_item_insert() 35 xas_set_mark(&xas, TAG); in my_item_insert() 38 xas_unlock(&xas); in my_item_insert() 39 if (xas_nomem(&xas, GFP_KERNEL)) in my_item_insert() 69 XA_STATE(xas, &array, 0); in tagged_iteration_fn() 75 xas_set(&xas, 0); in tagged_iteration_fn() [all …]
|
D | test.c | 176 XA_STATE(xas, xa, start); in tag_tagged_items() 183 xas_lock_irq(&xas); in tag_tagged_items() 184 xas_for_each_marked(&xas, item, end, iftag) { in tag_tagged_items() 185 xas_set_mark(&xas, thentag); in tag_tagged_items() 189 xas_pause(&xas); in tag_tagged_items() 190 xas_unlock_irq(&xas); in tag_tagged_items() 192 xas_lock_irq(&xas); in tag_tagged_items() 194 xas_unlock_irq(&xas); in tag_tagged_items() 257 XA_STATE(xas, xa, 0); in item_kill_tree() 260 xas_for_each(&xas, entry, ULONG_MAX) { in item_kill_tree() [all …]
|
D | iteration_check_2.c | 15 XA_STATE(xas, arg, 0); in iterator() 21 xas_set(&xas, 0); in iterator() 23 xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0) in iterator() 26 assert(xas.xa_index >= 100); in iterator()
|
D | regression1.c | 82 XA_STATE(xas, &mt_tree, start); in find_get_pages() 87 xas_for_each(&xas, page, ULONG_MAX) { in find_get_pages() 88 if (xas_retry(&xas, page)) in find_get_pages() 99 if (unlikely(page != xas_reload(&xas))) in find_get_pages() 108 xas_reset(&xas); in find_get_pages()
|
/linux-6.1.9/mm/ ! |
D | memfd.c | 31 static void memfd_tag_pins(struct xa_state *xas) in memfd_tag_pins() argument 39 xas_lock_irq(xas); in memfd_tag_pins() 40 xas_for_each(xas, page, ULONG_MAX) { in memfd_tag_pins() 48 xas_set_mark(xas, MEMFD_TAG_PINNED); in memfd_tag_pins() 50 xas_set(xas, page->index + cache_count); in memfd_tag_pins() 57 xas_pause(xas); in memfd_tag_pins() 58 xas_unlock_irq(xas); in memfd_tag_pins() 60 xas_lock_irq(xas); in memfd_tag_pins() 62 xas_unlock_irq(xas); in memfd_tag_pins() 76 XA_STATE(xas, &mapping->i_pages, 0); in memfd_wait_for_pins() [all …]
|
D | filemap.c | 127 XA_STATE(xas, &mapping->i_pages, folio->index); in page_cache_delete() 130 mapping_set_update(&xas, mapping); in page_cache_delete() 134 xas_set_order(&xas, folio->index, folio_order(folio)); in page_cache_delete() 140 xas_store(&xas, shadow); in page_cache_delete() 141 xas_init_marks(&xas); in page_cache_delete() 280 XA_STATE(xas, &mapping->i_pages, fbatch->folios[0]->index); in page_cache_delete_batch() 285 mapping_set_update(&xas, mapping); in page_cache_delete_batch() 286 xas_for_each(&xas, folio, ULONG_MAX) { in page_cache_delete_batch() 312 xas_store(&xas, NULL); in page_cache_delete_batch() 474 XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT); in filemap_range_has_page() [all …]
|
D | list_lru.c | 376 XA_STATE(xas, &lru->xa, 0); in memcg_destroy_list_lru() 382 xas_lock_irq(&xas); in memcg_destroy_list_lru() 383 xas_for_each(&xas, mlru, ULONG_MAX) { in memcg_destroy_list_lru() 385 xas_store(&xas, NULL); in memcg_destroy_list_lru() 387 xas_unlock_irq(&xas); in memcg_destroy_list_lru() 481 XA_STATE(xas, &lru->xa, 0); in memcg_list_lru_alloc() 510 xas_lock_irqsave(&xas, flags); in memcg_list_lru_alloc() 515 xas_set(&xas, index); in memcg_list_lru_alloc() 517 if (unlikely(index < 0 || xas_error(&xas) || xas_load(&xas))) { in memcg_list_lru_alloc() 520 xas_store(&xas, mlru); in memcg_list_lru_alloc() [all …]
|
D | swap_state.c | 93 XA_STATE_ORDER(xas, &address_space->i_pages, idx, folio_order(folio)); in add_to_swap_cache() 105 xas_lock_irq(&xas); in add_to_swap_cache() 106 xas_create_range(&xas); in add_to_swap_cache() 107 if (xas_error(&xas)) in add_to_swap_cache() 110 VM_BUG_ON_FOLIO(xas.xa_index != idx + i, folio); in add_to_swap_cache() 111 old = xas_load(&xas); in add_to_swap_cache() 117 xas_store(&xas, folio); in add_to_swap_cache() 118 xas_next(&xas); in add_to_swap_cache() 124 xas_unlock_irq(&xas); in add_to_swap_cache() 125 } while (xas_nomem(&xas, gfp)); in add_to_swap_cache() [all …]
|
D | khugepaged.c | 1754 XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER); in collapse_file() 1771 xas_lock_irq(&xas); in collapse_file() 1772 xas_create_range(&xas); in collapse_file() 1773 if (!xas_error(&xas)) in collapse_file() 1775 xas_unlock_irq(&xas); in collapse_file() 1776 if (!xas_nomem(&xas, GFP_KERNEL)) { in collapse_file() 1794 xas_set(&xas, start); in collapse_file() 1796 struct page *page = xas_next(&xas); in collapse_file() 1798 VM_BUG_ON(index != xas.xa_index); in collapse_file() 1807 if (!xas_next_entry(&xas, end - 1)) { in collapse_file() [all …]
|
D | page-writeback.c | 2232 XA_STATE(xas, &mapping->i_pages, start); in tag_pages_for_writeback() 2236 xas_lock_irq(&xas); in tag_pages_for_writeback() 2237 xas_for_each_marked(&xas, page, end, PAGECACHE_TAG_DIRTY) { in tag_pages_for_writeback() 2238 xas_set_mark(&xas, PAGECACHE_TAG_TOWRITE); in tag_pages_for_writeback() 2242 xas_pause(&xas); in tag_pages_for_writeback() 2243 xas_unlock_irq(&xas); in tag_pages_for_writeback() 2245 xas_lock_irq(&xas); in tag_pages_for_writeback() 2247 xas_unlock_irq(&xas); in tag_pages_for_writeback() 2965 XA_STATE(xas, &mapping->i_pages, folio_index(folio)); in __folio_start_writeback() 2970 xas_lock_irqsave(&xas, flags); in __folio_start_writeback() [all …]
|
D | shmem.c | 403 XA_STATE(xas, &mapping->i_pages, index); in shmem_replace_entry() 408 item = xas_load(&xas); in shmem_replace_entry() 411 xas_store(&xas, replacement); in shmem_replace_entry() 694 XA_STATE_ORDER(xas, &mapping->i_pages, index, folio_order(folio)); in shmem_add_to_page_cache() 720 xas_lock_irq(&xas); in shmem_add_to_page_cache() 721 if (expected != xas_find_conflict(&xas)) { in shmem_add_to_page_cache() 722 xas_set_err(&xas, -EEXIST); in shmem_add_to_page_cache() 725 if (expected && xas_find_conflict(&xas)) { in shmem_add_to_page_cache() 726 xas_set_err(&xas, -EEXIST); in shmem_add_to_page_cache() 729 xas_store(&xas, folio); in shmem_add_to_page_cache() [all …]
|
D | migrate.c | 384 XA_STATE(xas, &mapping->i_pages, folio_index(folio)); in folio_migrate_mapping() 407 xas_lock_irq(&xas); in folio_migrate_mapping() 409 xas_unlock_irq(&xas); in folio_migrate_mapping() 437 xas_store(&xas, newfolio); in folio_migrate_mapping() 446 xas_unlock(&xas); in folio_migrate_mapping() 499 XA_STATE(xas, &mapping->i_pages, folio_index(src)); in migrate_huge_page_move_mapping() 502 xas_lock_irq(&xas); in migrate_huge_page_move_mapping() 505 xas_unlock_irq(&xas); in migrate_huge_page_move_mapping() 514 xas_store(&xas, dst); in migrate_huge_page_move_mapping() 518 xas_unlock_irq(&xas); in migrate_huge_page_move_mapping()
|
D | truncate.c | 35 XA_STATE(xas, &mapping->i_pages, index); in __clear_shadow_entry() 37 xas_set_update(&xas, workingset_update_node); in __clear_shadow_entry() 38 if (xas_load(&xas) != entry) in __clear_shadow_entry() 40 xas_store(&xas, NULL); in __clear_shadow_entry()
|
/linux-6.1.9/fs/cachefiles/ ! |
D | ondemand.c | 14 XA_STATE(xas, &cache->reqs, 0); in cachefiles_ondemand_fd_release() 23 xas_for_each(&xas, req, ULONG_MAX) { in cachefiles_ondemand_fd_release() 28 xas_store(&xas, NULL); in cachefiles_ondemand_fd_release() 245 XA_STATE(xas, &cache->reqs, cache->req_id_next); in cachefiles_ondemand_daemon_read() 253 req = xas_find_marked(&xas, UINT_MAX, CACHEFILES_REQ_NEW); in cachefiles_ondemand_daemon_read() 255 xas_set(&xas, 0); in cachefiles_ondemand_daemon_read() 256 req = xas_find_marked(&xas, cache->req_id_next - 1, CACHEFILES_REQ_NEW); in cachefiles_ondemand_daemon_read() 271 xas_clear_mark(&xas, CACHEFILES_REQ_NEW); in cachefiles_ondemand_daemon_read() 272 cache->req_id_next = xas.xa_index + 1; in cachefiles_ondemand_daemon_read() 275 id = xas.xa_index; in cachefiles_ondemand_daemon_read() [all …]
|
/linux-6.1.9/drivers/infiniband/core/ ! |
D | ib_core_uverbs.c | 268 XA_STATE(xas, &ucontext->mmap_xa, min_pgoff); in rdma_user_mmap_entry_insert_range() 294 xas_find_marked(&xas, max_pgoff, XA_FREE_MARK); in rdma_user_mmap_entry_insert_range() 295 if (xas.xa_node == XAS_RESTART) in rdma_user_mmap_entry_insert_range() 298 xa_first = xas.xa_index; in rdma_user_mmap_entry_insert_range() 308 xas_next_entry(&xas, xa_last - 1); in rdma_user_mmap_entry_insert_range() 309 if (xas.xa_node == XAS_BOUNDS || xas.xa_index >= xa_last) in rdma_user_mmap_entry_insert_range()
|
/linux-6.1.9/arch/x86/kernel/cpu/sgx/ ! |
D | encl.c | 510 XA_STATE(xas, &encl->page_array, PFN_DOWN(start)); in sgx_encl_may_map() 525 xas_lock(&xas); in sgx_encl_may_map() 526 xas_for_each(&xas, page, PFN_DOWN(end - 1)) { in sgx_encl_may_map() 534 xas_pause(&xas); in sgx_encl_may_map() 535 xas_unlock(&xas); in sgx_encl_may_map() 541 xas_lock(&xas); in sgx_encl_may_map() 544 xas_unlock(&xas); in sgx_encl_may_map() 688 XA_STATE(xas, &encl->page_array, PFN_DOWN(encl->base)); in sgx_encl_release() 690 xas_lock(&xas); in sgx_encl_release() 691 xas_for_each(&xas, entry, max_page_index) { in sgx_encl_release() [all …]
|
/linux-6.1.9/fs/afs/ ! |
D | write.c | 264 XA_STATE(xas, &mapping->i_pages, start / PAGE_SIZE); in afs_pages_written_back() 272 xas_for_each(&xas, folio, end) { in afs_pages_written_back() 446 XA_STATE(xas, &mapping->i_pages, index); in afs_extend_writeback() 456 xas_for_each(&xas, folio, ULONG_MAX) { in afs_extend_writeback() 458 if (xas_retry(&xas, folio)) in afs_extend_writeback() 466 xas_reset(&xas); in afs_extend_writeback() 471 if (unlikely(folio != xas_reload(&xas))) { in afs_extend_writeback() 513 xas_pause(&xas); in afs_extend_writeback()
|
/linux-6.1.9/fs/netfs/ ! |
D | buffered_read.c | 25 XA_STATE(xas, &rreq->mapping->i_pages, start_page); in netfs_rreq_unlock_folios() 47 xas_for_each(&xas, folio, last_page) { in netfs_rreq_unlock_folios() 51 if (xas_retry(&xas, folio)) in netfs_rreq_unlock_folios()
|