/linux-6.6.21/drivers/net/ethernet/fungible/funcore/ |
D | fun_queue.c | 126 u8 cqe_size_log2, u32 cq_depth, dma_addr_t dma_addr, in fun_cq_create() argument 138 if (cq_depth > fdev->q_depth) in fun_cq_create() 146 cq_depth - 1, dma_addr, tailroom, in fun_cq_create() 292 max = funq->cq_depth - 1; in __fun_process_cq() 305 if (++funq->cq_head == funq->cq_depth) { in __fun_process_cq() 366 funq->cqes = fun_alloc_ring_mem(funq->fdev->dev, funq->cq_depth, in fun_alloc_cqes() 388 fun_free_ring_mem(dev, funq->cq_depth, 1 << funq->cqe_size_log2, false, in fun_free_queue() 441 funq->cq_depth = req->cq_depth; in fun_alloc_queue() 495 funq->cqe_size_log2, funq->cq_depth, in fun_create_cq()
|
D | fun_queue.h | 45 u32 cq_depth; member 122 u32 cq_depth; member 138 u8 cqe_size_log2, u32 cq_depth, dma_addr_t dma_addr,
|
D | fun_dev.c | 230 .cq_depth = areq->cq_depth, in fun_enable_admin_queue() 243 areq->cq_depth < AQA_MIN_QUEUE_SIZE || in fun_enable_admin_queue() 244 areq->cq_depth > AQA_MAX_QUEUE_SIZE) in fun_enable_admin_queue() 270 (funq->cq_depth - 1) << AQA_ACQS_SHIFT, in fun_enable_admin_queue()
|
D | fun_dev.h | 92 u16 cq_depth; member
|
/linux-6.6.21/drivers/net/ethernet/fungible/funeth/ |
D | funeth.h | 75 unsigned int cq_depth; member 117 unsigned int cq_depth; member
|
D | funeth_main.c | 508 err = alloc_rxqs(netdev, rxqs, qset->nrxqs, qset->cq_depth, in fun_alloc_rings() 841 .cq_depth = fp->cq_depth, in funeth_open() 1642 .cq_depth = fp->cq_depth, in fun_change_num_queues() 1782 fp->cq_depth = min(CQ_DEPTH, fdev->q_depth); in fun_create_netdev() 2001 .cq_depth = ADMIN_CQ_DEPTH, in funeth_probe()
|
D | funeth_ethtool.c | 599 .cq_depth = 2 * ring->rx_pending, in fun_set_ringparam() 611 fp->cq_depth = 2 * fp->rq_depth; in fun_set_ringparam()
|
/linux-6.6.21/include/uapi/misc/uacce/ |
D | hisi_qm.h | 27 __u16 cq_depth; member
|
/linux-6.6.21/drivers/crypto/hisilicon/ |
D | qm.c | 239 #define QM_MK_CQC_DW3_V2(cqe_sz, cq_depth) \ argument 240 ((((u32)cq_depth) - 1) | ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT)) 878 if (qp->qp_status.cq_head == qp->cq_depth - 1) { in qm_cq_head_update() 1913 memset(qp->cqe, 0, sizeof(struct qm_cqe) * qp->cq_depth); in qm_create_qp_nolock() 2036 cqc->w8 = cpu_to_le16(qp->cq_depth - 1); in qm_cq_ctx_cfg() 2038 cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V2(QM_QC_CQE_SIZE, qp->cq_depth)); in qm_cq_ctx_cfg() 2495 qp_info.cq_depth = qp->cq_depth; in hisi_qm_uacce_ioctl() 2645 u16 sq_depth, cq_depth; in qm_alloc_uacce() local 2687 qm_get_xqc_depth(qm, &sq_depth, &cq_depth, QM_QP_DEPTH_CAP); in qm_alloc_uacce() 2691 sizeof(struct qm_cqe) * cq_depth + PAGE_SIZE) >> in qm_alloc_uacce() [all …]
|
D | debugfs.c | 298 u16 sq_depth = qm->qp_array->cq_depth; in qm_sq_dump() 332 ret = q_dump_param_parse(qm, s, &cqe_id, &qp_id, qm->qp_array->cq_depth); in qm_cq_dump()
|
/linux-6.6.21/drivers/infiniband/hw/hns/ |
D | hns_roce_restrack.c | 21 if (rdma_nl_put_driver_u32(msg, "cq_depth", hr_cq->cq_depth)) in hns_roce_fill_res_cq_entry()
|
D | hns_roce_cq.c | 207 buf_attr.region[0].size = hr_cq->cq_depth * hr_cq->cqe_size; in alloc_cq_buf() 322 hr_cq->cq_depth = cq_entries; in set_cq_param()
|
D | hns_roce_device.h | 411 u32 cq_depth; member
|
D | hns_roce_hw_v2.h | 1180 __le32 cq_depth; member
|
D | hns_roce_hw_v2.c | 3465 return (hr_reg_read(cqe, CQE_OWNER) ^ !!(n & hr_cq->cq_depth)) ? cqe : in get_sw_cqe_v2() 3548 hr_reg_write(cq_context, CQC_SHIFT, ilog2(hr_cq->cq_depth)); in hns_roce_v2_write_cqc()
|
/linux-6.6.21/drivers/infiniband/hw/efa/ |
D | efa_com_cmd.c | 163 create_cmd.cq_depth = params->cq_depth; in efa_com_create_cq() 191 result->actual_depth = params->cq_depth; in efa_com_create_cq()
|
D | efa_com_cmd.h | 73 u16 cq_depth; member
|
D | efa_admin_cmds_defs.h | 458 u16 cq_depth; member
|
D | efa_verbs.c | 1153 params.cq_depth = entries; in efa_create_cq()
|
/linux-6.6.21/drivers/net/ethernet/brocade/bna/ |
D | bna_tx_rx.c | 2141 u32 cq_depth; in bna_rx_res_req() local 2147 cq_depth = roundup_pow_of_two(dq_depth + hq_depth); in bna_rx_res_req() 2149 cq_size = cq_depth * BFI_CQ_WI_SIZE; in bna_rx_res_req() 2277 u32 cq_depth, i; in bna_rx_create() local 2437 cq_depth = rx_cfg->q0_depth + in bna_rx_create() 2443 cq_depth = roundup_pow_of_two(cq_depth); in bna_rx_create() 2444 rxp->cq.ccb->q_depth = cq_depth; in bna_rx_create()
|
/linux-6.6.21/drivers/net/ethernet/amazon/ena/ |
D | ena_admin_defs.h | 300 u16 cq_depth; member
|
D | ena_com.c | 1407 create_cmd.cq_depth = io_cq->q_depth; in ena_com_create_io_cq()
|
/linux-6.6.21/include/linux/ |
D | hisi_acc_qm.h | 399 u16 cq_depth; member
|
/linux-6.6.21/kernel/locking/ |
D | lockdep.c | 1728 unsigned int cq_depth; in __bfs() local 1816 cq_depth = __cq_get_elem_count(cq); in __bfs() 1817 if (max_bfs_queue_depth < cq_depth) in __bfs() 1818 max_bfs_queue_depth = cq_depth; in __bfs()
|