/linux-6.6.21/drivers/atm/ |
D | nicstar.c | 115 #define scq_virt_to_bus(scq, p) \ argument 116 (scq->dma + ((unsigned long)(p) - (unsigned long)(scq)->org)) 126 static void free_scq(ns_dev *card, scq_info * scq, struct atm_vcc *vcc); 134 static int push_scqe(ns_dev * card, vc_map * vc, scq_info * scq, ns_scqe * tbd, 137 static void drain_scq(ns_dev * card, scq_info * scq, int pos); 247 free_scq(card, card->scd2vc[j]->scq, card->scd2vc[j]->tx_vcc); in nicstar_remove_one() 863 scq_info *scq; in get_scq() local 868 scq = kmalloc(sizeof(*scq), GFP_KERNEL); in get_scq() 869 if (!scq) in get_scq() 871 scq->org = dma_alloc_coherent(&card->pcidev->dev, in get_scq() [all …]
|
D | idt77252.c | 639 struct scq_info *scq; in alloc_scq() local 641 scq = kzalloc(sizeof(struct scq_info), GFP_KERNEL); in alloc_scq() 642 if (!scq) in alloc_scq() 644 scq->base = dma_alloc_coherent(&card->pcidev->dev, SCQ_SIZE, in alloc_scq() 645 &scq->paddr, GFP_KERNEL); in alloc_scq() 646 if (scq->base == NULL) { in alloc_scq() 647 kfree(scq); in alloc_scq() 651 scq->next = scq->base; in alloc_scq() 652 scq->last = scq->base + (SCQ_ENTRIES - 1); in alloc_scq() 653 atomic_set(&scq->used, 0); in alloc_scq() [all …]
|
D | nicstar.h | 702 scq_info *scq; /* To keep track of the SCQ */ member
|
D | idt77252.h | 217 struct scq_info *scq; /* To keep track of the SCQ */ member
|
/linux-6.6.21/drivers/infiniband/hw/vmw_pvrdma/ |
D | pvrdma_qp.c | 65 static void pvrdma_lock_cqs(struct pvrdma_cq *scq, struct pvrdma_cq *rcq, in pvrdma_lock_cqs() argument 68 __acquires(scq->cq_lock) __acquires(rcq->cq_lock) in pvrdma_lock_cqs() 70 if (scq == rcq) { in pvrdma_lock_cqs() 71 spin_lock_irqsave(&scq->cq_lock, *scq_flags); in pvrdma_lock_cqs() 73 } else if (scq->cq_handle < rcq->cq_handle) { in pvrdma_lock_cqs() 74 spin_lock_irqsave(&scq->cq_lock, *scq_flags); in pvrdma_lock_cqs() 79 spin_lock_irqsave_nested(&scq->cq_lock, *scq_flags, in pvrdma_lock_cqs() 84 static void pvrdma_unlock_cqs(struct pvrdma_cq *scq, struct pvrdma_cq *rcq, in pvrdma_unlock_cqs() argument 87 __releases(scq->cq_lock) __releases(rcq->cq_lock) in pvrdma_unlock_cqs() 89 if (scq == rcq) { in pvrdma_unlock_cqs() [all …]
|
/linux-6.6.21/drivers/infiniband/sw/rxe/ |
D | rxe_qp.c | 357 struct rxe_cq *scq = to_rcq(init->send_cq); in rxe_qp_from_init() local 363 rxe_get(scq); in rxe_qp_from_init() 369 qp->scq = scq; in rxe_qp_from_init() 373 atomic_inc(&scq->num_wq); in rxe_qp_from_init() 397 atomic_dec(&scq->num_wq); in rxe_qp_from_init() 401 qp->scq = NULL; in rxe_qp_from_init() 406 rxe_put(scq); in rxe_qp_from_init() 847 if (qp->scq) { in rxe_qp_do_cleanup() 848 atomic_dec(&qp->scq->num_wq); in rxe_qp_do_cleanup() 849 rxe_put(qp->scq); in rxe_qp_do_cleanup()
|
D | rxe_comp.c | 466 rxe_cq_post(qp->scq, &cqe, 0); in do_complete() 583 err = rxe_cq_post(qp->scq, &cqe, 0); in flush_send_wqe() 585 rxe_dbg_cq(qp->scq, "post cq failed, err = %d", err); in flush_send_wqe()
|
D | rxe_verbs.h | 212 struct rxe_cq *scq; member
|
/linux-6.6.21/drivers/infiniband/hw/bnxt_re/ |
D | qplib_fp.c | 70 struct bnxt_qplib_cq *scq, *rcq; in __bnxt_qplib_add_flush_qp() local 72 scq = qp->scq; in __bnxt_qplib_add_flush_qp() 76 dev_dbg(&scq->hwq.pdev->dev, in __bnxt_qplib_add_flush_qp() 79 list_add_tail(&qp->sq_flush, &scq->sqf_head); in __bnxt_qplib_add_flush_qp() 94 __acquires(&qp->scq->flush_lock) __acquires(&qp->rcq->flush_lock) in bnxt_qplib_acquire_cq_flush_locks() 96 spin_lock_irqsave(&qp->scq->flush_lock, *flags); in bnxt_qplib_acquire_cq_flush_locks() 97 if (qp->scq == qp->rcq) in bnxt_qplib_acquire_cq_flush_locks() 105 __releases(&qp->scq->flush_lock) __releases(&qp->rcq->flush_lock) in bnxt_qplib_release_cq_flush_locks() 107 if (qp->scq == qp->rcq) in bnxt_qplib_release_cq_flush_locks() 111 spin_unlock_irqrestore(&qp->scq->flush_lock, *flags); in bnxt_qplib_release_cq_flush_locks() [all …]
|
D | ib_verbs.c | 808 __acquires(&qp->scq->cq_lock) __acquires(&qp->rcq->cq_lock) in bnxt_re_lock_cqs() 812 spin_lock_irqsave(&qp->scq->cq_lock, flags); in bnxt_re_lock_cqs() 813 if (qp->rcq != qp->scq) in bnxt_re_lock_cqs() 823 __releases(&qp->scq->cq_lock) __releases(&qp->rcq->cq_lock) in bnxt_re_unlock_cqs() 825 if (qp->rcq != qp->scq) in bnxt_re_unlock_cqs() 829 spin_unlock_irqrestore(&qp->scq->cq_lock, flags); in bnxt_re_unlock_cqs() 924 scq_nq = qplib_qp->scq->nq; in bnxt_re_destroy_qp() 1153 qp->qplib_qp.scq = qp1_qp->scq; in bnxt_re_create_shadow_qp() 1380 qplqp->scq = &cq->qplib_cq; in bnxt_re_init_qp_attr() 1381 qp->scq = cq; in bnxt_re_init_qp_attr() [all …]
|
D | ib_verbs.h | 94 struct bnxt_re_cq *scq; member
|
D | qplib_fp.h | 311 struct bnxt_qplib_cq *scq; member
|
/linux-6.6.21/drivers/infiniband/hw/cxgb4/ |
D | ev.c | 139 cqid = qhp->attr.scq; in c4iw_ev_dispatch()
|
D | qp.c | 200 struct t4_cq *rcq, struct t4_cq *scq, in create_qp() argument 338 FW_RI_RES_WR_IQID_V(scq->cqid)); in create_qp() 1812 wqe->u.init.scqid = cpu_to_be32(qhp->attr.scq); in rdma_init() 2188 qhp->attr.scq = ((struct c4iw_cq *) attrs->send_cq)->cq.cqid; in c4iw_create_qp()
|
D | iw_cxgb4.h | 450 u32 scq; member
|
/linux-6.6.21/drivers/infiniband/hw/hns/ |
D | hns_roce_qp.c | 182 struct ib_cq *scq = init_attr->send_cq; in get_least_load_bankid_for_qp() local 189 if (scq) in get_least_load_bankid_for_qp() 190 cqn = to_hr_cq(scq)->cqn; in get_least_load_bankid_for_qp() 193 if (scq && (get_affinity_cq_bank(i) != (cqn & CQ_BANKID_MASK))) in get_least_load_bankid_for_qp()
|
/linux-6.6.21/drivers/infiniband/core/ |
D | uverbs_cmd.c | 1286 struct ib_cq *scq = NULL, *rcq = NULL; in create_qp() local 1383 scq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, in create_qp() 1386 rcq = rcq ?: scq; in create_qp() 1389 if (!pd || (!scq && has_sq)) { in create_qp() 1398 attr.send_cq = scq; in create_qp() 1459 if (scq) in create_qp() 1460 rdma_lookup_put_uobject(&scq->uobject->uevent.uobject, in create_qp() 1462 if (rcq && rcq != scq) in create_qp() 1487 if (scq) in create_qp() 1488 rdma_lookup_put_uobject(&scq->uobject->uevent.uobject, in create_qp() [all …]
|
/linux-6.6.21/drivers/infiniband/hw/erdma/ |
D | erdma_verbs.h | 233 struct erdma_cq *scq; member
|
D | erdma_verbs.c | 64 FIELD_PREP(ERDMA_CMD_CREATE_QP_CQN_MASK, qp->scq->cqn); in create_qp_cmd() 89 FIELD_PREP(ERDMA_CMD_CREATE_QP_CQN_MASK, qp->scq->cqn); in create_qp_cmd() 938 qp->scq = to_ecq(attrs->send_cq); in erdma_create_qp()
|
/linux-6.6.21/drivers/net/ethernet/mellanox/mlx4/ |
D | resource_tracker.c | 112 struct res_cq *scq; member 2966 struct res_cq *scq; in mlx4_RST2INIT_QP_wrapper() local 3004 err = get_res(dev, slave, scqn, RES_CQ, &scq); in mlx4_RST2INIT_QP_wrapper() 3008 scq = rcq; in mlx4_RST2INIT_QP_wrapper() 3025 atomic_inc(&scq->ref_count); in mlx4_RST2INIT_QP_wrapper() 3026 qp->scq = scq; in mlx4_RST2INIT_QP_wrapper() 4004 atomic_dec(&qp->scq->ref_count); in mlx4_2RST_QP_wrapper() 4717 atomic_dec(&qp->scq->ref_count); in rem_slave_qps()
|
/linux-6.6.21/drivers/infiniband/sw/siw/ |
D | siw.h | 431 struct siw_cq *scq; member
|
D | siw_verbs.c | 399 qp->scq = to_siw_cq(attrs->send_cq); in siw_create_qp() 626 qp->scq = qp->rcq = NULL; in siw_destroy_qp()
|
D | siw_qp.c | 1066 struct siw_cq *cq = qp->scq; in siw_sqe_complete()
|