/linux-6.6.21/lib/ |
D | test_xarray.c | 63 XA_BUG_ON(xa, xa_load(xa, index) != NULL); in xa_erase_index() 153 void *entry = xa_load(xa, j); in check_xa_load() 164 void *entry = xa_load(xa, j); in check_xa_load() 344 XA_BUG_ON(xa, xa_load(xa, 1) != NULL); in check_xa_shrink() 349 XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0)); in check_xa_shrink() 356 XA_BUG_ON(xa, xa_load(xa, max) != xa_mk_value(0)); in check_xa_shrink() 357 XA_BUG_ON(xa, xa_load(xa, max + 1) != NULL); in check_xa_shrink() 366 XA_BUG_ON(xa, xa_load(xa, max + 1) != NULL); in check_xa_shrink() 379 XA_BUG_ON(xa, xa_load(xa, i - 1) != NULL); in check_insert() 380 XA_BUG_ON(xa, xa_load(xa, i + 1) != NULL); in check_insert() [all …]
|
/linux-6.6.21/Documentation/translations/zh_CN/core-api/ |
D | xarray.rst | 63 然后你可以用xa_store()来设置条目,用xa_load()来获取条目。xa_store将用新的条目覆盖任 153 * xa_load() 195 如果你想利用锁来保护你存储在XArray中的数据结构,你可以在调用xa_load()之前调用xa_lock(),然后在
|
/linux-6.6.21/drivers/gpu/drm/tegra/ |
D | uapi.c | 170 context = xa_load(&fpriv->contexts, args->context); in tegra_drm_ioctl_channel_close() 200 context = xa_load(&fpriv->contexts, args->context); in tegra_drm_ioctl_channel_map() 281 context = xa_load(&fpriv->contexts, args->context); in tegra_drm_ioctl_channel_unmap()
|
D | submit.c | 152 mapping = xa_load(&context->mappings, id); in tegra_drm_mapping_get() 338 sp = xa_load(syncpoints, args->syncpt.id); in submit_get_syncpt() 524 context = xa_load(&fpriv->contexts, args->context); in tegra_drm_ioctl_channel_submit()
|
/linux-6.6.21/drivers/net/ethernet/mellanox/mlx5/core/steering/ |
D | dr_domain.c | 81 recalc_cs_ft = xa_load(&dmn->csum_fts_xa, vport_num); in mlx5dr_domain_get_recalc_cs_ft_addr() 327 vport_caps = xa_load(&caps->vports.vports_caps_xa, vport); in mlx5dr_domain_get_vport_cap() 568 peer = xa_load(&dmn->peer_dmn_xa, peer_vhca_id); in mlx5dr_domain_set_peer() 574 peer = xa_load(&dmn->peer_dmn_xa, peer_vhca_id); in mlx5dr_domain_set_peer()
|
/linux-6.6.21/drivers/firmware/arm_scmi/ |
D | perf.c | 528 opp = xa_load(&dom->opps_by_lvl, min_perf); in scmi_perf_limits_set() 536 opp = xa_load(&dom->opps_by_lvl, max_perf); in scmi_perf_limits_set() 607 opp = xa_load(&dom->opps_by_idx, *min_perf); in scmi_perf_limits_get() 613 opp = xa_load(&dom->opps_by_idx, *max_perf); in scmi_perf_limits_get() 674 opp = xa_load(&dom->opps_by_lvl, level); in scmi_perf_level_set() 737 opp = xa_load(&dom->opps_by_idx, *level); in scmi_perf_level_get() 910 opp = xa_load(&dom->opps_by_idx, level); in scmi_dvfs_freq_get()
|
/linux-6.6.21/drivers/infiniband/hw/cxgb4/ |
D | ev.c | 127 qhp = xa_load(&dev->qps, CQE_QPID(err_cqe)); in c4iw_ev_dispatch() 228 chp = xa_load(&dev->cqs, qid); in c4iw_ev_handler()
|
/linux-6.6.21/arch/arm64/mm/ |
D | mteswap.c | 52 void *tags = xa_load(&mte_pages, entry.val); in mte_restore_tags()
|
/linux-6.6.21/drivers/net/ethernet/mellanox/mlx5/core/ |
D | pagealloc.c | 106 root = xa_load(&dev->priv.page_root_xa, function); in page_root_per_function() 177 root = xa_load(&dev->priv.page_root_xa, function); in find_fw_page() 259 root = xa_load(&dev->priv.page_root_xa, fwp->function); in free_fwp() 443 root = xa_load(&dev->priv.page_root_xa, function); in release_all_pages() 501 root = xa_load(&dev->priv.page_root_xa, get_function(func_id, ec_function)); in reclaim_pages_cmd()
|
D | eq.c | 823 irq = xa_load(&table->comp_irqs, vecidx); in comp_irq_release_pci() 890 irq = xa_load(&table->comp_irqs, vecidx); in comp_irq_release_sf() 1037 irq = xa_load(&table->comp_irqs, vecidx); in create_comp_eq() 1077 eq = xa_load(&table->comp_eqs, vecidx); in mlx5_comp_eqn_get() 1108 eq = xa_load(&table->comp_eqs, vector); in mlx5_comp_irqn_get() 1125 eq = xa_load(&table->comp_eqs, vector); in mlx5_comp_irq_get_affinity_mask()
|
/linux-6.6.21/drivers/net/ethernet/mellanox/mlx5/core/en/ |
D | mapping.c | 116 mi = xa_load(&ctx->xarray, index); in mapping_remove() 139 mi = xa_load(&ctx->xarray, index); in mapping_find()
|
/linux-6.6.21/drivers/infiniband/sw/rxe/ |
D | rxe_mr.c | 255 page = xa_load(&mr->page_list, index); in rxe_mr_copy_xarray() 449 page = xa_load(&mr->page_list, index); in rxe_flush_pmem_iova() 498 page = xa_load(&mr->page_list, index); in rxe_mr_do_atomic_op() 556 page = xa_load(&mr->page_list, index); in rxe_mr_do_atomic_write()
|
/linux-6.6.21/security/apparmor/ |
D | secid.c | 61 return xa_load(&aa_secids, secid); in aa_secid_to_label()
|
/linux-6.6.21/drivers/infiniband/hw/erdma/ |
D | erdma_verbs.h | 275 return (struct erdma_qp *)xa_load(&dev->qp_xa, id); in find_qp_by_qpn() 280 return (struct erdma_cq *)xa_load(&dev->cq_xa, id); in find_cq_by_cqn()
|
/linux-6.6.21/mm/ |
D | readahead.c | 230 struct folio *folio = xa_load(&mapping->i_pages, index + i); in page_cache_ra_unbounded() 791 struct folio *folio = xa_load(&mapping->i_pages, index); in readahead_expand() 818 struct folio *folio = xa_load(&mapping->i_pages, index); in readahead_expand()
|
/linux-6.6.21/drivers/net/ethernet/intel/ice/ |
D | ice_eswitch_br.c | 312 port = xa_load(&bridge->ports, vsi_idx); in ice_esw_br_port_vlan_lookup() 318 vlan = xa_load(&port->vlans, vid); in ice_esw_br_port_vlan_lookup() 739 port = xa_load(&bridge->ports, vsi_idx); in ice_eswitch_br_port_vlan_add() 750 vlan = xa_load(&port->vlans, vid); in ice_eswitch_br_port_vlan_add() 774 port = xa_load(&bridge->ports, vsi_idx); in ice_eswitch_br_port_vlan_del() 778 vlan = xa_load(&port->vlans, vid); in ice_eswitch_br_port_vlan_del()
|
/linux-6.6.21/drivers/infiniband/hw/mlx5/ |
D | devx.c | 1374 event = xa_load(&dev->devx_event_table.event_xa, in devx_cleanup_subscription() 1378 xa_val_level2 = xa_load(&event->object_ids, sub->xa_key_level2); in devx_cleanup_subscription() 1439 event = xa_load(&table->event_xa, MLX5_EVENT_TYPE_COMP); in devx_cq_comp() 1443 obj_event = xa_load(&event->object_ids, obj_id); in devx_cq_comp() 1866 event = xa_load(&devx_event_table->event_xa, key_level1); in subscribe_event_xa_dealloc() 1869 xa_val_level2 = xa_load(&event->object_ids, in subscribe_event_xa_dealloc() 1888 event = xa_load(&devx_event_table->event_xa, key_level1); in subscribe_event_xa_alloc() 1910 obj_event = xa_load(&event->object_ids, key_level2); in subscribe_event_xa_alloc() 2140 event = xa_load(&devx_event_table->event_xa, in UVERBS_HANDLER() 2150 obj_event = xa_load(&event->object_ids, obj_id); in UVERBS_HANDLER() [all …]
|
/linux-6.6.21/drivers/gpu/drm/lima/ |
D | lima_ctx.c | 72 ctx = xa_load(&mgr->handles, id); in lima_ctx_get()
|
/linux-6.6.21/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/ |
D | dev.c | 176 sf_dev = xa_load(&table->devices, sf_index); in mlx5_sf_dev_state_change_handler() 254 if (!xa_load(&table->devices, i)) in mlx5_sf_dev_add_active_work()
|
/linux-6.6.21/fs/smb/server/mgmt/ |
D | user_session.c | 138 entry = xa_load(&sess->rpc_handle_list, id); in ksmbd_session_rpc_method() 258 sess = xa_load(&conn->sessions, id); in ksmbd_session_lookup()
|
/linux-6.6.21/block/partitions/ |
D | core.c | 333 if (xa_load(&disk->part_tbl, partno)) in add_partition() 485 part = xa_load(&disk->part_tbl, partno); in bdev_del_partition() 507 part = xa_load(&disk->part_tbl, partno); in bdev_resize_partition()
|
/linux-6.6.21/drivers/infiniband/hw/mlx4/ |
D | cm.c | 276 ent = xa_load(&sriov->pv_id_table, *pv_cm_id); in id_map_get() 367 item = xa_load(&sriov->xa_rej_tmout, (unsigned long)rem_pv_cm_id); in alloc_rej_tmout() 412 item = xa_load(&sriov->xa_rej_tmout, (unsigned long)rem_pv_cm_id); in lookup_rej_tmout_slave()
|
/linux-6.6.21/drivers/infiniband/core/ |
D | device.c | 313 device = xa_load(&devices, index); in ib_device_get_by_index() 432 struct ib_client *client = xa_load(&clients, index); in ib_device_rename() 666 if (xa_load(&devices, device->index) == device) in ib_dealloc_device() 761 client_data = xa_load(&device->client_data, client_id); in remove_client_context() 763 client = xa_load(&clients, client_id); in remove_client_context() 944 cdev = xa_load(&device->compat_devs, rnet->id); in add_one_compat_dev() 1757 if (xa_load(&clients, highest_client_id - 1)) in remove_client_id() 1882 struct ib_client *client = xa_load(&clients, index); in __ib_get_client_nl_info() 2553 struct ib_client *client = xa_load(&clients, index); in ib_get_net_dev_by_params()
|
/linux-6.6.21/drivers/tty/serial/ |
D | liteuart.c | 382 uart = (struct liteuart_port *)xa_load(&liteuart_array, co->index); in liteuart_console_write() 399 uart = (struct liteuart_port *)xa_load(&liteuart_array, co->index); in liteuart_console_setup()
|
/linux-6.6.21/arch/x86/kernel/cpu/sgx/ |
D | encl.c | 86 entry = xa_load(&encl->page_array, PFN_DOWN(addr)); in reclaimer_writing_to_pcmd() 287 entry = xa_load(&encl->page_array, PFN_DOWN(addr)); in sgx_encl_load_page_in_vma() 307 entry = xa_load(&encl->page_array, PFN_DOWN(addr)); in sgx_encl_load_page() 457 (!xa_load(&encl->page_array, PFN_DOWN(addr)))) in sgx_vma_fault()
|