/linux-6.1.9/include/rdma/ |
D | ib_umem.h | 62 static inline size_t ib_umem_num_dma_blocks(struct ib_umem *umem, in ib_umem_num_dma_blocks() function 72 return ib_umem_num_dma_blocks(umem, PAGE_SIZE); in ib_umem_num_pages()
|
/linux-6.1.9/drivers/infiniband/hw/mlx4/ |
D | mr.c | 274 *num_of_mtts = ib_umem_num_dma_blocks(umem, PAGE_SIZE); in mlx4_ib_umem_calc_optimal_mtt_size() 513 n = ib_umem_num_dma_blocks(mmr->umem, PAGE_SIZE); in mlx4_ib_rereg_user_mr()
|
D | srq.c | 123 dev->dev, ib_umem_num_dma_blocks(srq->umem, PAGE_SIZE), in mlx4_ib_create_srq()
|
/linux-6.1.9/drivers/infiniband/hw/vmw_pvrdma/ |
D | pvrdma_srq.c | 155 srq->npages = ib_umem_num_dma_blocks(srq->umem, PAGE_SIZE); in pvrdma_create_srq()
|
D | pvrdma_mr.c | 136 npages = ib_umem_num_dma_blocks(umem, PAGE_SIZE); in pvrdma_reg_user_mr()
|
D | pvrdma_cq.c | 148 npages = ib_umem_num_dma_blocks(cq->umem, PAGE_SIZE); in pvrdma_create_cq()
|
D | pvrdma_qp.c | 294 ib_umem_num_dma_blocks(qp->sumem, PAGE_SIZE); in pvrdma_create_qp() 296 qp->npages_recv = ib_umem_num_dma_blocks( in pvrdma_create_qp()
|
/linux-6.1.9/drivers/infiniband/hw/bnxt_re/ |
D | qplib_res.c | 117 pages = ib_umem_num_dma_blocks(sginfo->umem, sginfo->pgsize); in __alloc_pbl() 218 unsigned long sginfo_num_pages = ib_umem_num_dma_blocks( in bnxt_qplib_alloc_init_hwq()
|
D | ib_verbs.c | 3821 umem_pgs = ib_umem_num_dma_blocks(umem, page_size); in bnxt_re_reg_user_mr()
|
/linux-6.1.9/drivers/infiniband/hw/mlx5/ |
D | cq.c | 765 ncont = ib_umem_num_dma_blocks(cq->buf.umem, page_size); in create_cq_user() 1310 npas = ib_umem_num_dma_blocks(cq->resize_umem, page_size); in mlx5_ib_resize_cq()
|
D | mr.c | 932 dev, order_base_2(ib_umem_num_dma_blocks(umem, page_size))); in alloc_cacheable_mr() 988 roundup(ib_umem_num_dma_blocks(umem, page_size), 2); in reg_create() 1376 ib_umem_num_dma_blocks(new_umem, *page_size); in can_use_umr_rereg_pas()
|
D | umr.c | 626 dev, &sg, ib_umem_num_dma_blocks(mr->umem, 1 << mr->page_shift), in mlx5r_umr_update_mr_pas()
|
D | srq_cmd.c | 103 ib_umem_num_dma_blocks(in->umem, page_size) * sizeof(u64))) in __set_srq_page_size()
|
D | devx.c | 2268 ib_umem_num_dma_blocks(obj->umem, page_size)); in devx_umem_reg_cmd_alloc() 2278 ib_umem_num_dma_blocks(obj->umem, page_size)); in devx_umem_reg_cmd_alloc()
|
D | qp.c | 828 rwq->rq_num_pas = ib_umem_num_dma_blocks(rwq->umem, page_size); in create_user_rq() 936 ncont = ib_umem_num_dma_blocks(ubuffer->umem, page_size); in _create_user_qp() 1288 ib_umem_num_dma_blocks(sq->ubuffer.umem, page_size); in create_raw_packet_qp_sq() 1377 sizeof(u64) * ib_umem_num_dma_blocks(umem, page_size); in create_raw_packet_qp_rq()
|
/linux-6.1.9/drivers/infiniband/hw/cxgb4/ |
D | mem.c | 535 n = ib_umem_num_dma_blocks(mhp->umem, 1 << shift); in c4iw_reg_user_mr()
|
/linux-6.1.9/drivers/infiniband/hw/mthca/ |
D | mthca_provider.c | 861 n = ib_umem_num_dma_blocks(mr->umem, PAGE_SIZE); in mthca_reg_user_mr()
|
/linux-6.1.9/drivers/infiniband/hw/erdma/ |
D | erdma_verbs.c | 492 mem->mtt_nents = ib_umem_num_dma_blocks(mem->umem, mem->page_size); in get_mtt_entries()
|
/linux-6.1.9/drivers/infiniband/hw/efa/ |
D | efa_verbs.c | 1615 params.page_num = ib_umem_num_dma_blocks(mr->umem, pg_sz); in efa_register_mr()
|
/linux-6.1.9/drivers/infiniband/hw/qedr/ |
D | verbs.c | 809 fw_pages = ib_umem_num_dma_blocks(q->umem, 1 << FW_PAGE_SHIFT); in qedr_init_user_queue() 2974 ib_umem_num_dma_blocks(mr->umem, PAGE_SIZE), 1); in qedr_reg_user_mr()
|
/linux-6.1.9/drivers/infiniband/hw/ocrdma/ |
D | ocrdma_verbs.c | 871 dev, mr, ib_umem_num_dma_blocks(mr->umem, PAGE_SIZE)); in ocrdma_reg_user_mr()
|
/linux-6.1.9/drivers/infiniband/hw/irdma/ |
D | verbs.c | 2823 iwmr->page_cnt = ib_umem_num_dma_blocks(region, iwmr->page_size); in irdma_reg_user_mr()
|