/linux-6.1.9/include/trace/events/ |
D | rdma_core.h | 194 int nr_cqe, 199 TP_ARGS(cq, nr_cqe, comp_vector, poll_ctx), 203 __field(int, nr_cqe) 210 __entry->nr_cqe = nr_cqe; 216 __entry->cq_id, __entry->nr_cqe, __entry->comp_vector, 223 int nr_cqe, 229 TP_ARGS(nr_cqe, comp_vector, poll_ctx, rc), 233 __field(int, nr_cqe) 240 __entry->nr_cqe = nr_cqe; 246 __entry->nr_cqe, __entry->comp_vector,
|
/linux-6.1.9/drivers/infiniband/core/ |
D | cq.c | 212 struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private, int nr_cqe, in __ib_alloc_cq() argument 217 .cqe = nr_cqe, in __ib_alloc_cq() 270 trace_cq_alloc(cq, nr_cqe, comp_vector, poll_ctx); in __ib_alloc_cq() 281 trace_cq_alloc_error(nr_cqe, comp_vector, poll_ctx, ret); in __ib_alloc_cq() 298 int nr_cqe, enum ib_poll_context poll_ctx, in __ib_alloc_cq_any() argument 309 return __ib_alloc_cq(dev, private, nr_cqe, comp_vector, poll_ctx, in __ib_alloc_cq_any() 428 struct ib_cq *ib_cq_pool_get(struct ib_device *dev, unsigned int nr_cqe, in ib_cq_pool_get() argument 466 if (cq->cqe_used + nr_cqe > cq->cqe) in ib_cq_pool_get() 473 found->cqe_used += nr_cqe; in ib_cq_pool_get() 484 ret = ib_alloc_cqs(dev, nr_cqe, poll_ctx); in ib_cq_pool_get() [all …]
|
/linux-6.1.9/drivers/infiniband/ulp/rtrs/ |
D | rtrs.c | 230 static int create_cq(struct rtrs_con *con, int cq_vector, int nr_cqe, in create_cq() argument 237 cq = ib_alloc_cq(cm_id->device, con, nr_cqe, cq_vector, in create_cq() 240 cq = ib_cq_pool_get(cm_id->device, nr_cqe, cq_vector, poll_ctx); in create_cq() 248 con->nr_cqe = nr_cqe; in create_cq() 288 ib_cq_pool_put(con->cq, con->nr_cqe); in destroy_cq() 294 u32 max_send_sge, int cq_vector, int nr_cqe, in rtrs_cq_qp_create() argument 300 err = create_cq(con, cq_vector, nr_cqe, poll_ctx); in rtrs_cq_qp_create()
|
D | rtrs-pri.h | 98 int nr_cqe; member 317 u32 max_send_sge, int cq_vector, int nr_cqe,
|
/linux-6.1.9/include/rdma/ |
D | ib_verbs.h | 3856 struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private, int nr_cqe, 3860 int nr_cqe, int comp_vector, in ib_alloc_cq() argument 3863 return __ib_alloc_cq(dev, private, nr_cqe, comp_vector, poll_ctx, in ib_alloc_cq() 3868 int nr_cqe, enum ib_poll_context poll_ctx, 3879 void *private, int nr_cqe, in ib_alloc_cq_any() argument 3882 return __ib_alloc_cq_any(dev, private, nr_cqe, poll_ctx, in ib_alloc_cq_any() 4000 struct ib_cq *ib_cq_pool_get(struct ib_device *dev, unsigned int nr_cqe, 4004 void ib_cq_pool_put(struct ib_cq *cq, unsigned int nr_cqe);
|
/linux-6.1.9/drivers/nvme/target/ |
D | rdma.c | 1263 int nr_cqe, ret, i, factor; in nvmet_rdma_create_queue_ib() local 1268 nr_cqe = queue->recv_queue_size + 2 * queue->send_queue_size; in nvmet_rdma_create_queue_ib() 1270 queue->cq = ib_cq_pool_get(ndev->device, nr_cqe + 1, in nvmet_rdma_create_queue_ib() 1275 nr_cqe + 1, ret); in nvmet_rdma_create_queue_ib() 1332 ib_cq_pool_put(queue->cq, nr_cqe + 1); in nvmet_rdma_create_queue_ib()
|
/linux-6.1.9/drivers/nvme/host/ |
D | tcp.c | 131 unsigned int nr_cqe; member 553 queue->nr_cqe++; in nvme_tcp_process_nvme_cqe() 817 queue->nr_cqe++; in nvme_tcp_recv_data() 864 queue->nr_cqe++; in nvme_tcp_recv_ddgst() 1201 queue->nr_cqe = 0; in nvme_tcp_try_recv() 2480 return queue->nr_cqe; in nvme_tcp_poll()
|