Home
last modified time | relevance | path

Searched refs:cq_size (Results 1 – 25 of 42) sorted by relevance

12

/linux-6.1.9/drivers/infiniband/ulp/iser/
Diser_verbs.c242 unsigned int max_send_wr, cq_size; in iser_create_ib_conn_res() local
257 cq_size = max_send_wr + ISER_QP_MAX_RECV_DTOS; in iser_create_ib_conn_res()
258 ib_conn->cq = ib_cq_pool_get(ib_dev, cq_size, -1, IB_POLL_SOFTIRQ); in iser_create_ib_conn_res()
263 ib_conn->cq_size = cq_size; in iser_create_ib_conn_res()
292 ib_cq_pool_put(ib_conn->cq, ib_conn->cq_size); in iser_create_ib_conn_res()
405 ib_cq_pool_put(ib_conn->cq, ib_conn->cq_size); in iser_free_ib_conn_res()
Discsi_iser.h374 u32 cq_size; member
/linux-6.1.9/drivers/net/ethernet/mellanox/mlxbf_gige/
Dmlxbf_gige_rx.c84 size_t wq_size, cq_size; in mlxbf_gige_rx_init() local
118 cq_size = MLXBF_GIGE_RX_CQE_SZ * priv->rx_q_entries; in mlxbf_gige_rx_init()
119 priv->rx_cqe_base = dma_alloc_coherent(priv->dev, cq_size, in mlxbf_gige_rx_init()
/linux-6.1.9/drivers/net/ethernet/microsoft/mana/
Dmana_en.c755 req.cq_size = cq_spec->queue_size; in mana_create_wq_obj()
1410 u32 cq_size; in mana_create_txq() local
1427 cq_size = MAX_SEND_BUFFERS_PER_QUEUE * COMP_ENTRY_SIZE; in mana_create_txq()
1428 cq_size = PAGE_ALIGN(cq_size); in mana_create_txq()
1461 spec.queue_size = cq_size; in mana_create_txq()
1570 struct mana_rxq *rxq, u32 *rxq_size, u32 *cq_size) in mana_alloc_rx_wqe() argument
1582 *cq_size = 0; in mana_alloc_rx_wqe()
1617 *cq_size += COMP_ENTRY_SIZE; in mana_alloc_rx_wqe()
1651 u32 cq_size, rq_size; in mana_create_rxq() local
1668 err = mana_alloc_rx_wqe(apc, rxq, &rq_size, &cq_size); in mana_create_rxq()
[all …]
Dhw_channel.c341 u32 eq_size, cq_size; in mana_hwc_create_cq() local
348 cq_size = roundup_pow_of_two(GDMA_CQE_SIZE * q_depth); in mana_hwc_create_cq()
349 if (cq_size < MINIMUM_SUPPORTED_PAGE_SIZE) in mana_hwc_create_cq()
350 cq_size = MINIMUM_SUPPORTED_PAGE_SIZE; in mana_hwc_create_cq()
363 err = mana_hwc_create_gdma_cq(hwc, cq_size, hwc_cq, mana_hwc_comp_event, in mana_hwc_create_cq()
Dmana.h509 u32 cq_size; member
/linux-6.1.9/include/uapi/rdma/
Dirdma-abi.h85 __u32 cq_size; member
/linux-6.1.9/drivers/infiniband/hw/irdma/
Duser.h356 u32 cq_size; member
388 u32 cq_size; member
Duk.c968 void irdma_uk_cq_resize(struct irdma_cq_uk *cq, void *cq_base, int cq_size) in irdma_uk_cq_resize() argument
971 cq->cq_size = cq_size; in irdma_uk_cq_resize()
972 IRDMA_RING_INIT(cq->cq_ring, cq->cq_size); in irdma_uk_cq_resize()
1494 cq->cq_size = info->cq_size; in irdma_uk_cq_init()
1499 IRDMA_RING_INIT(cq->cq_ring, cq->cq_size); in irdma_uk_cq_init()
Dpuda.c730 set_64bit_val(wqe, 0, cq->cq_uk.cq_size); in irdma_puda_cq_wqe()
780 cqsize = rsrc->cq_size * (sizeof(struct irdma_cqe)); in irdma_puda_cq_create()
792 info.shadow_read_threshold = rsrc->cq_size >> 2; in irdma_puda_cq_create()
797 init_info->cq_size = rsrc->cq_size; in irdma_puda_cq_create()
1056 rsrc->cq_size = info->rq_size + info->sq_size; in irdma_puda_create_rsrc()
1059 rsrc->cq_size += info->rq_size; in irdma_puda_create_rsrc()
Dpuda.h115 u32 cq_size; member
Dverbs.h116 u16 cq_size; member
Dverbs.c1841 info.cq_size = max(entries, 4); in irdma_resize_cq()
1843 if (info.cq_size == iwcq->sc_cq.cq_uk.cq_size - 1) in irdma_resize_cq()
1880 rsize = info.cq_size * sizeof(struct irdma_cqe); in irdma_resize_cq()
1930 ibcq->cqe = info.cq_size - 1; in irdma_resize_cq()
1965 #define IRDMA_CREATE_CQ_MIN_RESP_LEN offsetofend(struct irdma_create_cq_resp, cq_size) in irdma_create_cq()
2000 ukinfo->cq_size = max(entries, 4); in irdma_create_cq()
2002 iwcq->ibcq.cqe = info.cq_uk_init_info.cq_size; in irdma_create_cq()
2079 ukinfo->cq_size = entries; in irdma_create_cq()
2081 rsize = info.cq_uk_init_info.cq_size * sizeof(struct irdma_cqe); in irdma_create_cq()
2108 info.shadow_read_threshold = min(info.cq_uk_init_info.cq_size / 2, in irdma_create_cq()
[all …]
/linux-6.1.9/drivers/infiniband/ulp/isert/
Dib_isert.h184 u32 cq_size; member
Dib_isert.c106 u32 cq_size = ISERT_QP_MAX_REQ_DTOS + ISERT_QP_MAX_RECV_DTOS + 2; in isert_create_qp() local
112 isert_conn->cq = ib_cq_pool_get(ib_dev, cq_size, -1, IB_POLL_WORKQUEUE); in isert_create_qp()
118 isert_conn->cq_size = cq_size; in isert_create_qp()
140 ib_cq_pool_put(isert_conn->cq, isert_conn->cq_size); in isert_create_qp()
412 ib_cq_pool_put(isert_conn->cq, isert_conn->cq_size); in isert_destroy_qp()
/linux-6.1.9/drivers/infiniband/ulp/srpt/
Dib_srpt.h304 u32 cq_size; member
/linux-6.1.9/drivers/net/ethernet/mellanox/mlx5/core/fpga/
Dconn.c411 static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size) in mlx5_fpga_conn_create_cq() argument
424 cq_size = roundup_pow_of_two(cq_size); in mlx5_fpga_conn_create_cq()
425 MLX5_SET(cqc, temp_cqc, log_cq_size, ilog2(cq_size)); in mlx5_fpga_conn_create_cq()
455 MLX5_SET(cqc, cqc, log_cq_size, ilog2(cq_size)); in mlx5_fpga_conn_create_cq()
/linux-6.1.9/drivers/net/ethernet/pensando/ionic/
Dionic_lif.c383 dma_free_coherent(dev, qcq->cq_size, qcq->cq_base, qcq->cq_base_pa); in ionic_qcq_free()
567 int q_size, cq_size; in ionic_qcq_alloc() local
571 cq_size = ALIGN(num_descs * cq_desc_size, PAGE_SIZE); in ionic_qcq_alloc()
573 new->q_size = PAGE_SIZE + q_size + cq_size; in ionic_qcq_alloc()
602 new->cq_size = PAGE_SIZE + (num_descs * cq_desc_size); in ionic_qcq_alloc()
603 new->cq_base = dma_alloc_coherent(dev, new->cq_size, &new->cq_base_pa, in ionic_qcq_alloc()
638 dma_free_coherent(dev, new->cq_size, new->cq_base, new->cq_base_pa); in ionic_qcq_alloc()
721 memset(qcq->cq_base, 0, qcq->cq_size); in ionic_qcq_sanitize()
2616 swap(a->cq_size, b->cq_size); in ionic_swap_queues()
Dionic_lif.h69 u32 cq_size; member
Dionic_debugfs.c133 debugfs_create_x32("cq_size", 0400, qcq_dentry, &qcq->cq_size); in ionic_debugfs_add_qcq()
/linux-6.1.9/drivers/net/ethernet/mellanox/mlx5/core/steering/
Ddr_send.c921 int cq_size; in mlx5dr_send_ring_alloc() local
929 cq_size = QUEUE_SIZE + 1; in mlx5dr_send_ring_alloc()
930 dmn->send_ring->cq = dr_create_cq(dmn->mdev, dmn->uar, cq_size); in mlx5dr_send_ring_alloc()
/linux-6.1.9/drivers/nvme/host/
Drdma.c100 int cq_size; member
419 ib_cq_pool_put(queue->ib_cq, queue->cq_size); in nvme_rdma_free_cq()
478 queue->ib_cq = ib_alloc_cq(ibdev, queue, queue->cq_size, in nvme_rdma_create_cq()
482 queue->ib_cq = ib_cq_pool_get(ibdev, queue->cq_size, in nvme_rdma_create_cq()
510 queue->cq_size = cq_factor * queue->queue_size + 1; in nvme_rdma_create_queue_ib()
/linux-6.1.9/drivers/dma/
Dhisi_dma.c586 size_t cq_size = sizeof(struct hisi_dma_cqe) * hdma_dev->chan_depth; in hisi_dma_alloc_qps_mem() local
598 chan->cq = dmam_alloc_coherent(dev, cq_size, &chan->cq_dma, in hisi_dma_alloc_qps_mem()
/linux-6.1.9/include/linux/qed/
Dqed_rdma_if.h257 u32 cq_size; member
/linux-6.1.9/drivers/misc/habanalabs/common/
Dhabanalabs_ioctl.c635 info.cq_size = hdev->captured_err_info.undef_opcode.cq_size; in undefined_opcode_info()

12