Home
last modified time | relevance | path

Searched refs:xa_lock (Results 1 – 25 of 33) sorted by relevance

12

/linux-6.1.9/Documentation/translations/zh_CN/core-api/
Dxarray.rst162 内部使用xa_lock:
186 假设进入时持有xa_lock:
195 如果你想利用锁来保护你存储在XArray中的数据结构,你可以在调用xa_load()之前调用xa_lock(),然后在
225 xa_lock(&foo->array);
234 上面的例子还显示了一个常见的模式,即希望在存储端扩展xa_lock的覆盖范围,以保护与数组相关的一些统计
238 在进程上下文中使用xa_lock_irq(),在中断处理程序中使用xa_lock()。一些更常见的模式有一些辅助函数,
242 意味着你有权使用像__xa_erase()这样的函数而不占用xa_lockxa_lock是用来进行lockdep验证的,将来也
252 你需要在修改数组的时候使用xa_lock。在对数组进行只读操作时,你可以选择使用xa_lock或RCU锁。你可以在
267 存在xa_state中供下一次尝试。这个想法是,你拿着xa_lock,尝试操作,然后放弃锁。该操作试图在持有锁的情
295 - 这个条目目前正在被一个拥有xa_lock的线程修改。在这个RCU周期结束时,包含该条目的节点可能会被释放。
[all …]
/linux-6.1.9/include/linux/
Dxarray.h297 spinlock_t xa_lock; member
304 .xa_lock = __SPIN_LOCK_UNLOCKED(name.xa_lock), \
380 spin_lock_init(&xa->xa_lock); in xa_init_flags()
531 #define xa_trylock(xa) spin_trylock(&(xa)->xa_lock)
532 #define xa_lock(xa) spin_lock(&(xa)->xa_lock) macro
533 #define xa_unlock(xa) spin_unlock(&(xa)->xa_lock)
534 #define xa_lock_bh(xa) spin_lock_bh(&(xa)->xa_lock)
535 #define xa_unlock_bh(xa) spin_unlock_bh(&(xa)->xa_lock)
536 #define xa_lock_irq(xa) spin_lock_irq(&(xa)->xa_lock)
537 #define xa_unlock_irq(xa) spin_unlock_irq(&(xa)->xa_lock)
[all …]
Didr.h101 #define idr_lock(idr) xa_lock(&(idr)->idr_rt)
Dbacking-dev.h245 !lockdep_is_held(&inode->i_mapping->i_pages.xa_lock) && in inode_to_wb()
/linux-6.1.9/tools/testing/radix-tree/
Dregression1.c128 xa_lock(&mt_tree); in regression1_fn()
133 xa_lock(&mt_tree); in regression1_fn()
137 xa_lock(&mt_tree); in regression1_fn()
145 xa_lock(&mt_tree); in regression1_fn()
/linux-6.1.9/drivers/infiniband/core/
Dib_core_uverbs.c125 xa_lock(&ucontext->mmap_xa); in rdma_user_mmap_entry_get_pgoff()
190 xa_lock(&ucontext->mmap_xa); in rdma_user_mmap_entry_free()
235 xa_lock(&entry->ucontext->mmap_xa); in rdma_user_mmap_entry_remove()
287 xa_lock(&ucontext->mmap_xa); in rdma_user_mmap_entry_insert_range()
Drestrack.c119 xa_lock(&rt->xa); in rdma_restrack_count()
290 xa_lock(&rt->xa); in rdma_restrack_get_byid()
Dcounters.c290 xa_lock(&rt->xa); in rdma_get_counter_auto_mode()
402 xa_lock(&rt->xa); in get_running_counters_hwstat_sum()
417 xa_lock(&rt->xa); in get_running_counters_hwstat_sum()
Ducma.c144 xa_lock(&ctx_table); in ucma_get_ctx()
361 xa_lock(&ctx_table); in ucma_event_handler()
492 xa_lock(&multicast_table); in ucma_cleanup_multicast()
605 xa_lock(&ctx_table); in ucma_destroy_id()
1475 xa_lock(&multicast_table); in ucma_process_join()
1510 xa_lock(&multicast_table); in ucma_process_join()
1574 xa_lock(&multicast_table); in ucma_leave_multicast()
1649 xa_lock(&ctx_table); in ucma_migrate_id()
Dnldev.c774 xa_lock(&rt->xa); in fill_res_srq_qps()
901 xa_lock(&rt->xa); in fill_stat_counter_qps()
1552 xa_lock(&rt->xa); in res_get_common_dumpit()
1586 again: xa_lock(&rt->xa); in res_get_common_dumpit()
/linux-6.1.9/fs/erofs/
Dutils.c90 xa_lock(&sbi->managed_pslots); in erofs_insert_workgroup()
168 xa_lock(&sbi->managed_pslots); in erofs_shrink_workstation()
178 xa_lock(&sbi->managed_pslots); in erofs_shrink_workstation()
/linux-6.1.9/Documentation/core-api/
Dxarray.rst203 Takes xa_lock internally:
227 Assumes xa_lock held on entry:
237 that you are storing in the XArray, you can call xa_lock()
272 xa_lock(&foo->array);
283 coverage of the xa_lock on the store side to protect some statistics
288 context, or xa_lock_irq() in process context and xa_lock()
297 the xa_lock; the xa_lock is used for lockdep validation and will be used
311 to use the xa_lock while modifying the array. You can choose whether
312 to use the xa_lock or the RCU lock while doing read-only operations on
340 the xa_lock, attempt the operation and drop the lock. The operation
[all …]
/linux-6.1.9/arch/arm64/kernel/
Dhibernate.c247 xa_lock(&mte_pages); in swsusp_mte_free_storage()
298 xa_lock(&mte_pages); in swsusp_mte_restore_tags()
/linux-6.1.9/arch/arm64/mm/
Dmteswap.c82 xa_lock(&mte_pages); in mte_invalidate_tags_area()
/linux-6.1.9/drivers/infiniband/hw/mlx4/
Dcm.c366 xa_lock(&sriov->xa_rej_tmout); in alloc_rej_tmout()
411 xa_lock(&sriov->xa_rej_tmout); in lookup_rej_tmout_slave()
501 xa_lock(&sriov->xa_rej_tmout); in rej_tmout_xa_cleanup()
/linux-6.1.9/fs/cachefiles/
Dondemand.c16 xa_lock(&cache->reqs); in cachefiles_ondemand_fd_release()
252 xa_lock(&cache->reqs); in cachefiles_ondemand_daemon_read()
Ddaemon.c157 xa_lock(xa); in cachefiles_flush_reqs()
/linux-6.1.9/lib/
Dxarray.c328 __must_hold(xas->xa->xa_lock) in __xas_nomem()
1517 xa_lock(xa); in xa_erase()
1581 xa_lock(xa); in xa_store()
1972 xa_lock(xa); in xa_set_mark()
1990 xa_lock(xa); in xa_clear_mark()
/linux-6.1.9/drivers/infiniband/hw/mlx5/
Dodp.c454 xa_lock(&imr->implicit_children); in implicit_get_child_mr()
627 xa_lock(&imr->implicit_children); in pagefault_implicit_mr()
832 xa_lock(&dev->odp_mkeys); in pagefault_single_data_segment()
1685 xa_lock(&dev->odp_mkeys); in get_prefetchable_mr()
Dmr.c371 lockdep_assert_held(&ent->mkeys.xa_lock); in remove_cache_mr_locked()
386 lockdep_assert_held(&ent->mkeys.xa_lock); in resize_available_mrs()
540 lockdep_assert_held(&ent->mkeys.xa_lock); in queue_adjust_cache_locked()
/linux-6.1.9/mm/
Dworkingset.c554 lockdep_assert_held(&mapping->i_pages.xa_lock); in workingset_update_node()
/linux-6.1.9/drivers/vfio/
Dvfio_main.c69 xa_lock(&vfio_device_set_xa); in vfio_assign_device_set()
82 xa_lock(&vfio_device_set_xa); in vfio_assign_device_set()
118 xa_lock(&vfio_device_set_xa); in vfio_release_device_set()
/linux-6.1.9/drivers/infiniband/hw/hns/
Dhns_roce_srq.c17 xa_lock(&srq_table->xa); in hns_roce_srq_event()
/linux-6.1.9/drivers/gpu/drm/tegra/
Dsubmit.c150 xa_lock(&context->mappings); in tegra_drm_mapping_get()
/linux-6.1.9/drivers/gpu/drm/i915/gt/uc/
Dintel_guc_submission.c1120 xa_lock(&guc->context_lookup); in scrub_guc_desc_for_outstanding_g2h()
1736 xa_lock(&guc->context_lookup); in intel_guc_submission_reset()
1829 xa_lock(&guc->context_lookup); in intel_guc_submission_cancel_requests()
4614 xa_lock(&guc->context_lookup); in intel_guc_find_hung_context()
4619 xa_lock(&guc->context_lookup); in intel_guc_find_hung_context()
4663 xa_lock(&guc->context_lookup); in intel_guc_dump_active_requests()

12