/linux-6.1.9/drivers/infiniband/hw/irdma/ |
D | user.h | 292 void irdma_uk_cq_resize(struct irdma_cq_uk *cq, void *cq_base, int size); 351 struct irdma_cqe *cq_base; member 386 struct irdma_cqe *cq_base; member
|
D | uk.c | 968 void irdma_uk_cq_resize(struct irdma_cq_uk *cq, void *cq_base, int cq_size) in irdma_uk_cq_resize() argument 970 cq->cq_base = cq_base; in irdma_uk_cq_resize() 1083 ext_cqe = cq->cq_base[peek_head].buf; in irdma_uk_cq_poll_cmpl() 1492 cq->cq_base = info->cq_base; in irdma_uk_cq_init() 1519 cqe = ((struct irdma_extended_cqe *)(cq->cq_base))[cq_head].buf; in irdma_uk_clean_cq() 1521 cqe = cq->cq_base[cq_head].buf; in irdma_uk_clean_cq()
|
D | defs.h | 942 (_cq)->cq_base[IRDMA_RING_CURRENT_HEAD((_cq)->cq_ring)].buf \ 947 ((_cq)->cq_base))[IRDMA_RING_CURRENT_HEAD((_cq)->cq_ring)].buf \
|
D | type.h | 667 struct irdma_cqe *cq_base; member 813 struct irdma_cqe *cq_base; member
|
D | puda.c | 238 ext_cqe = cq_uk->cq_base[peek_head].buf; in irdma_puda_poll_info() 795 init_info->cq_base = mem->va; in irdma_puda_cq_create()
|
D | ctrl.c | 2626 irdma_uk_cq_resize(&cq->cq_uk, info->cq_base, info->cq_size); in irdma_sc_cq_resize() 4160 cq->cq_uk.cq_base = info->cq_base; in irdma_sc_ccq_init()
|
D | verbs.c | 1888 info.cq_base = kmem_buf.va; in irdma_resize_cq() 2103 ukinfo->cq_base = iwcq->kmem.va; in irdma_create_cq()
|
D | hw.c | 1027 info.cq_base = ccq->mem_cq.va; in irdma_create_ccq()
|
/linux-6.1.9/drivers/misc/habanalabs/common/ |
D | irq.c | 132 struct hl_cq_entry *cq_entry, *cq_base; in hl_irq_handler_cq() local 141 cq_base = cq->kernel_address; in hl_irq_handler_cq() 144 cq_entry = (struct hl_cq_entry *) &cq_base[cq->ci]; in hl_irq_handler_cq()
|
/linux-6.1.9/Documentation/networking/device_drivers/qlogic/ |
D | qlge.rst | 79 .cq_base = (void *)0x0,
|
/linux-6.1.9/drivers/net/ethernet/pensando/ionic/ |
D | ionic_lif.c | 382 if (qcq->cq_base) { in ionic_qcq_free() 383 dma_free_coherent(dev, qcq->cq_size, qcq->cq_base, qcq->cq_base_pa); in ionic_qcq_free() 384 qcq->cq_base = NULL; in ionic_qcq_free() 513 void *q_base, *cq_base, *sg_base; in ionic_qcq_alloc() local 585 cq_base = PTR_ALIGN(q_base + q_size, PAGE_SIZE); in ionic_qcq_alloc() 587 ionic_cq_map(&new->cq, cq_base, cq_base_pa); in ionic_qcq_alloc() 603 new->cq_base = dma_alloc_coherent(dev, new->cq_size, &new->cq_base_pa, in ionic_qcq_alloc() 605 if (!new->cq_base) { in ionic_qcq_alloc() 610 cq_base = PTR_ALIGN(new->cq_base, PAGE_SIZE); in ionic_qcq_alloc() 612 ionic_cq_map(&new->cq, cq_base, cq_base_pa); in ionic_qcq_alloc() [all …]
|
D | ionic_lif.h | 67 void *cq_base; member
|
/linux-6.1.9/drivers/infiniband/hw/bnxt_re/ |
D | qplib_fp.h | 343 #define BNXT_QPLIB_MAX_CQE_ENTRY_SIZE sizeof(struct cq_base)
|
D | qplib_fp.c | 1421 struct cq_base *hw_cqe; in __clean_cq() 2048 hwq_attr.stride = sizeof(struct cq_base); in bnxt_qplib_create_cq() 2236 struct cq_base *peek_hwcqe; in do_wa9060() 2602 struct cq_base *hw_cqe; in bnxt_qplib_is_cq_empty() 2854 struct cq_base *hw_cqe; in bnxt_qplib_poll_cq()
|
D | ib_verbs.c | 2849 entries * sizeof(struct cq_base), in bnxt_re_create_cq() 3882 resp.cqe_sz = sizeof(struct cq_base); in bnxt_re_alloc_ucontext()
|
D | roce_hsi.h | 546 struct cq_base { struct
|
/linux-6.1.9/drivers/staging/qlge/ |
D | qlge_main.c | 987 rx_ring->curr_entry = rx_ring->cq_base; in qlge_update_cq() 2840 if (rx_ring->cq_base) { in qlge_free_rx_resources() 2843 rx_ring->cq_base, rx_ring->cq_base_dma); in qlge_free_rx_resources() 2844 rx_ring->cq_base = NULL; in qlge_free_rx_resources() 2857 rx_ring->cq_base = in qlge_alloc_rx_resources() 2861 if (!rx_ring->cq_base) { in qlge_alloc_rx_resources() 2978 rx_ring->curr_entry = rx_ring->cq_base; in qlge_start_rx_ring()
|
D | qlge.h | 1463 void *cq_base; member
|
/linux-6.1.9/drivers/misc/habanalabs/gaudi2/ |
D | gaudi2.c | 6043 struct hl_cq_entry *cq_base; in gaudi2_send_job_to_kdma() local 6077 cq_base = cq->kernel_address; in gaudi2_send_job_to_kdma() 6078 polling_addr = (u32 *)&cq_base[cq->ci]; in gaudi2_send_job_to_kdma()
|