Home
last modified time | relevance | path

Searched refs:cq_depth (Results 1 – 24 of 24) sorted by relevance

/linux-6.6.21/drivers/net/ethernet/fungible/funcore/
Dfun_queue.c126 u8 cqe_size_log2, u32 cq_depth, dma_addr_t dma_addr, in fun_cq_create() argument
138 if (cq_depth > fdev->q_depth) in fun_cq_create()
146 cq_depth - 1, dma_addr, tailroom, in fun_cq_create()
292 max = funq->cq_depth - 1; in __fun_process_cq()
305 if (++funq->cq_head == funq->cq_depth) { in __fun_process_cq()
366 funq->cqes = fun_alloc_ring_mem(funq->fdev->dev, funq->cq_depth, in fun_alloc_cqes()
388 fun_free_ring_mem(dev, funq->cq_depth, 1 << funq->cqe_size_log2, false, in fun_free_queue()
441 funq->cq_depth = req->cq_depth; in fun_alloc_queue()
495 funq->cqe_size_log2, funq->cq_depth, in fun_create_cq()
Dfun_queue.h45 u32 cq_depth; member
122 u32 cq_depth; member
138 u8 cqe_size_log2, u32 cq_depth, dma_addr_t dma_addr,
Dfun_dev.c230 .cq_depth = areq->cq_depth, in fun_enable_admin_queue()
243 areq->cq_depth < AQA_MIN_QUEUE_SIZE || in fun_enable_admin_queue()
244 areq->cq_depth > AQA_MAX_QUEUE_SIZE) in fun_enable_admin_queue()
270 (funq->cq_depth - 1) << AQA_ACQS_SHIFT, in fun_enable_admin_queue()
Dfun_dev.h92 u16 cq_depth; member
/linux-6.6.21/drivers/net/ethernet/fungible/funeth/
Dfuneth.h75 unsigned int cq_depth; member
117 unsigned int cq_depth; member
Dfuneth_main.c508 err = alloc_rxqs(netdev, rxqs, qset->nrxqs, qset->cq_depth, in fun_alloc_rings()
841 .cq_depth = fp->cq_depth, in funeth_open()
1642 .cq_depth = fp->cq_depth, in fun_change_num_queues()
1782 fp->cq_depth = min(CQ_DEPTH, fdev->q_depth); in fun_create_netdev()
2001 .cq_depth = ADMIN_CQ_DEPTH, in funeth_probe()
Dfuneth_ethtool.c599 .cq_depth = 2 * ring->rx_pending, in fun_set_ringparam()
611 fp->cq_depth = 2 * fp->rq_depth; in fun_set_ringparam()
/linux-6.6.21/include/uapi/misc/uacce/
Dhisi_qm.h27 __u16 cq_depth; member
/linux-6.6.21/drivers/crypto/hisilicon/
Dqm.c239 #define QM_MK_CQC_DW3_V2(cqe_sz, cq_depth) \ argument
240 ((((u32)cq_depth) - 1) | ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT))
878 if (qp->qp_status.cq_head == qp->cq_depth - 1) { in qm_cq_head_update()
1913 memset(qp->cqe, 0, sizeof(struct qm_cqe) * qp->cq_depth); in qm_create_qp_nolock()
2036 cqc->w8 = cpu_to_le16(qp->cq_depth - 1); in qm_cq_ctx_cfg()
2038 cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V2(QM_QC_CQE_SIZE, qp->cq_depth)); in qm_cq_ctx_cfg()
2495 qp_info.cq_depth = qp->cq_depth; in hisi_qm_uacce_ioctl()
2645 u16 sq_depth, cq_depth; in qm_alloc_uacce() local
2687 qm_get_xqc_depth(qm, &sq_depth, &cq_depth, QM_QP_DEPTH_CAP); in qm_alloc_uacce()
2691 sizeof(struct qm_cqe) * cq_depth + PAGE_SIZE) >> in qm_alloc_uacce()
[all …]
Ddebugfs.c298 u16 sq_depth = qm->qp_array->cq_depth; in qm_sq_dump()
332 ret = q_dump_param_parse(qm, s, &cqe_id, &qp_id, qm->qp_array->cq_depth); in qm_cq_dump()
/linux-6.6.21/drivers/infiniband/hw/hns/
Dhns_roce_restrack.c21 if (rdma_nl_put_driver_u32(msg, "cq_depth", hr_cq->cq_depth)) in hns_roce_fill_res_cq_entry()
Dhns_roce_cq.c207 buf_attr.region[0].size = hr_cq->cq_depth * hr_cq->cqe_size; in alloc_cq_buf()
322 hr_cq->cq_depth = cq_entries; in set_cq_param()
Dhns_roce_device.h411 u32 cq_depth; member
Dhns_roce_hw_v2.h1180 __le32 cq_depth; member
Dhns_roce_hw_v2.c3465 return (hr_reg_read(cqe, CQE_OWNER) ^ !!(n & hr_cq->cq_depth)) ? cqe : in get_sw_cqe_v2()
3548 hr_reg_write(cq_context, CQC_SHIFT, ilog2(hr_cq->cq_depth)); in hns_roce_v2_write_cqc()
/linux-6.6.21/drivers/infiniband/hw/efa/
Defa_com_cmd.c163 create_cmd.cq_depth = params->cq_depth; in efa_com_create_cq()
191 result->actual_depth = params->cq_depth; in efa_com_create_cq()
Defa_com_cmd.h73 u16 cq_depth; member
Defa_admin_cmds_defs.h458 u16 cq_depth; member
Defa_verbs.c1153 params.cq_depth = entries; in efa_create_cq()
/linux-6.6.21/drivers/net/ethernet/brocade/bna/
Dbna_tx_rx.c2141 u32 cq_depth; in bna_rx_res_req() local
2147 cq_depth = roundup_pow_of_two(dq_depth + hq_depth); in bna_rx_res_req()
2149 cq_size = cq_depth * BFI_CQ_WI_SIZE; in bna_rx_res_req()
2277 u32 cq_depth, i; in bna_rx_create() local
2437 cq_depth = rx_cfg->q0_depth + in bna_rx_create()
2443 cq_depth = roundup_pow_of_two(cq_depth); in bna_rx_create()
2444 rxp->cq.ccb->q_depth = cq_depth; in bna_rx_create()
/linux-6.6.21/drivers/net/ethernet/amazon/ena/
Dena_admin_defs.h300 u16 cq_depth; member
Dena_com.c1407 create_cmd.cq_depth = io_cq->q_depth; in ena_com_create_io_cq()
/linux-6.6.21/include/linux/
Dhisi_acc_qm.h399 u16 cq_depth; member
/linux-6.6.21/kernel/locking/
Dlockdep.c1728 unsigned int cq_depth; in __bfs() local
1816 cq_depth = __cq_get_elem_count(cq); in __bfs()
1817 if (max_bfs_queue_depth < cq_depth) in __bfs()
1818 max_bfs_queue_depth = cq_depth; in __bfs()