/linux-6.6.21/drivers/infiniband/hw/irdma/ |
D | ctrl.c | 182 static int irdma_sc_add_arp_cache_entry(struct irdma_sc_cqp *cqp, in irdma_sc_add_arp_cache_entry() argument 189 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); in irdma_sc_add_arp_cache_entry() 199 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); in irdma_sc_add_arp_cache_entry() 207 irdma_sc_cqp_post_sq(cqp); in irdma_sc_add_arp_cache_entry() 219 static int irdma_sc_del_arp_cache_entry(struct irdma_sc_cqp *cqp, u64 scratch, in irdma_sc_del_arp_cache_entry() argument 225 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); in irdma_sc_del_arp_cache_entry() 231 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); in irdma_sc_del_arp_cache_entry() 240 irdma_sc_cqp_post_sq(cqp); in irdma_sc_del_arp_cache_entry() 252 static int irdma_sc_manage_apbvt_entry(struct irdma_sc_cqp *cqp, in irdma_sc_manage_apbvt_entry() argument 259 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); in irdma_sc_manage_apbvt_entry() [all …]
|
D | uda.h | 39 int irdma_sc_access_ah(struct irdma_sc_cqp *cqp, struct irdma_ah_info *info, 41 int irdma_access_mcast_grp(struct irdma_sc_cqp *cqp, 50 static inline int irdma_sc_create_ah(struct irdma_sc_cqp *cqp, in irdma_sc_create_ah() argument 53 return irdma_sc_access_ah(cqp, info, IRDMA_CQP_OP_CREATE_ADDR_HANDLE, in irdma_sc_create_ah() 57 static inline int irdma_sc_destroy_ah(struct irdma_sc_cqp *cqp, in irdma_sc_destroy_ah() argument 60 return irdma_sc_access_ah(cqp, info, IRDMA_CQP_OP_DESTROY_ADDR_HANDLE, in irdma_sc_destroy_ah() 64 static inline int irdma_sc_create_mcast_grp(struct irdma_sc_cqp *cqp, in irdma_sc_create_mcast_grp() argument 68 return irdma_access_mcast_grp(cqp, info, IRDMA_CQP_OP_CREATE_MCAST_GRP, in irdma_sc_create_mcast_grp() 72 static inline int irdma_sc_modify_mcast_grp(struct irdma_sc_cqp *cqp, in irdma_sc_modify_mcast_grp() argument 76 return irdma_access_mcast_grp(cqp, info, IRDMA_CQP_OP_MODIFY_MCAST_GRP, in irdma_sc_modify_mcast_grp() [all …]
|
D | utils.c | 429 struct irdma_cqp_request *irdma_alloc_and_get_cqp_request(struct irdma_cqp *cqp, in irdma_alloc_and_get_cqp_request() argument 435 spin_lock_irqsave(&cqp->req_lock, flags); in irdma_alloc_and_get_cqp_request() 436 if (!list_empty(&cqp->cqp_avail_reqs)) { in irdma_alloc_and_get_cqp_request() 437 cqp_request = list_first_entry(&cqp->cqp_avail_reqs, in irdma_alloc_and_get_cqp_request() 441 spin_unlock_irqrestore(&cqp->req_lock, flags); in irdma_alloc_and_get_cqp_request() 451 ibdev_dbg(to_ibdev(cqp->sc_cqp.dev), "ERR: CQP Request Fail: No Memory"); in irdma_alloc_and_get_cqp_request() 476 void irdma_free_cqp_request(struct irdma_cqp *cqp, in irdma_free_cqp_request() argument 488 spin_lock_irqsave(&cqp->req_lock, flags); in irdma_free_cqp_request() 489 list_add_tail(&cqp_request->list, &cqp->cqp_avail_reqs); in irdma_free_cqp_request() 490 spin_unlock_irqrestore(&cqp->req_lock, flags); in irdma_free_cqp_request() [all …]
|
D | hw.c | 593 struct irdma_cqp *cqp = &rf->cqp; in irdma_destroy_cqp() local 596 status = irdma_sc_cqp_destroy(dev->cqp); in irdma_destroy_cqp() 601 dma_free_coherent(dev->hw->device, cqp->sq.size, cqp->sq.va, in irdma_destroy_cqp() 602 cqp->sq.pa); in irdma_destroy_cqp() 603 cqp->sq.va = NULL; in irdma_destroy_cqp() 604 kfree(cqp->scratch_array); in irdma_destroy_cqp() 605 cqp->scratch_array = NULL; in irdma_destroy_cqp() 606 kfree(cqp->cqp_requests); in irdma_destroy_cqp() 607 cqp->cqp_requests = NULL; in irdma_destroy_cqp() 870 return irdma_sc_static_hmc_pages_allocated(dev->cqp, 0, dev->hmc_fn_id, in irdma_create_hmc_objs() [all …]
|
D | uda.c | 20 int irdma_sc_access_ah(struct irdma_sc_cqp *cqp, struct irdma_ah_info *info, in irdma_sc_access_ah() argument 26 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); in irdma_sc_access_ah() 69 FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_WQEVALID, cqp->polarity) | in irdma_sc_access_ah() 78 irdma_sc_cqp_post_sq(cqp); in irdma_sc_access_ah() 115 int irdma_access_mcast_grp(struct irdma_sc_cqp *cqp, in irdma_access_mcast_grp() argument 122 ibdev_dbg(to_ibdev(cqp->dev), "WQE: mg_id out of range\n"); in irdma_access_mcast_grp() 126 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); in irdma_access_mcast_grp() 128 ibdev_dbg(to_ibdev(cqp->dev), "WQE: ring full\n"); in irdma_access_mcast_grp() 157 FIELD_PREP(IRDMA_UDA_CQPSQ_MG_WQEVALID, cqp->polarity) | in irdma_access_mcast_grp() 168 irdma_sc_cqp_post_sq(cqp); in irdma_access_mcast_grp()
|
D | type.h | 604 struct irdma_sc_cqp *cqp; member 668 struct irdma_sc_cqp *cqp; member 1192 int irdma_sc_cqp_create(struct irdma_sc_cqp *cqp, u16 *maj_err, u16 *min_err); 1193 int irdma_sc_cqp_destroy(struct irdma_sc_cqp *cqp); 1194 int irdma_sc_cqp_init(struct irdma_sc_cqp *cqp, 1196 void irdma_sc_cqp_post_sq(struct irdma_sc_cqp *cqp); 1197 int irdma_sc_poll_for_cqp_op_done(struct irdma_sc_cqp *cqp, u8 opcode, 1224 int irdma_sc_static_hmc_pages_allocated(struct irdma_sc_cqp *cqp, u64 scratch, 1292 struct irdma_sc_cqp *cqp; member 1298 struct irdma_sc_cqp *cqp; member [all …]
|
D | protos.h | 18 void irdma_sc_cqp_post_sq(struct irdma_sc_cqp *cqp); 19 __le64 *irdma_sc_cqp_get_next_send_wqe(struct irdma_sc_cqp *cqp, u64 scratch); 77 int irdma_sc_static_hmc_pages_allocated(struct irdma_sc_cqp *cqp, u64 scratch,
|
D | puda.c | 607 struct irdma_sc_cqp *cqp; in irdma_puda_qp_wqe() local 613 cqp = dev->cqp; in irdma_puda_qp_wqe() 614 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, 0); in irdma_puda_qp_wqe() 626 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); in irdma_puda_qp_wqe() 633 irdma_sc_cqp_post_sq(cqp); in irdma_puda_qp_wqe() 634 status = irdma_sc_poll_for_cqp_op_done(dev->cqp, IRDMA_CQP_OP_CREATE_QP, in irdma_puda_qp_wqe() 726 struct irdma_sc_cqp *cqp; in irdma_puda_cq_wqe() local 731 cqp = dev->cqp; in irdma_puda_cq_wqe() 732 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, 0); in irdma_puda_cq_wqe() 751 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); in irdma_puda_cq_wqe() [all …]
|
D | main.h | 303 struct irdma_cqp cqp; member 482 struct irdma_cqp_request *irdma_alloc_and_get_cqp_request(struct irdma_cqp *cqp, 484 void irdma_free_cqp_request(struct irdma_cqp *cqp, 486 void irdma_put_cqp_request(struct irdma_cqp *cqp,
|
D | verbs.c | 237 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true); in irdma_alloc_push_page() 249 cqp_info->in.u.manage_push_page.cqp = &iwdev->rf->cqp.sc_cqp; in irdma_alloc_push_page() 259 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request); in irdma_alloc_push_page() 740 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true); in irdma_cqp_create_qp_cmd() 756 irdma_put_cqp_request(&rf->cqp, cqp_request); in irdma_cqp_create_qp_cmd() 1978 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true); in irdma_resize_cq() 1996 irdma_put_cqp_request(&rf->cqp, cqp_request); in irdma_resize_cq() 2198 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true); in irdma_create_cq() 2211 irdma_put_cqp_request(&rf->cqp, cqp_request); in irdma_create_cq() 2546 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true); in irdma_hw_alloc_mw() [all …]
|
D | hmc.c | 134 return dev->cqp->process_cqp_sds(dev, &sdinfo); in irdma_hmc_sd_one() 173 ret_code = dev->cqp->process_cqp_sds(dev, &sdinfo); in irdma_hmc_sd_grp() 185 ret_code = dev->cqp->process_cqp_sds(dev, &sdinfo); in irdma_hmc_sd_grp()
|
/linux-6.6.21/drivers/net/ethernet/mellanox/mlx5/core/en/ |
D | params.h | 23 struct mlx5e_cq_param cqp; member 31 struct mlx5e_cq_param cqp; member
|
D | params.c | 1013 mlx5e_build_rx_cq_param(mdev, params, xsk, ¶m->cqp); in mlx5e_build_rq_param() 1072 mlx5e_build_tx_cq_param(mdev, params, ¶m->cqp); in mlx5e_build_sq_param() 1256 mlx5e_build_ico_cq_param(mdev, log_wq_size, ¶m->cqp); in mlx5e_build_icosq_param() 1273 mlx5e_build_ico_cq_param(mdev, log_wq_size, ¶m->cqp); in mlx5e_build_async_icosq_param() 1288 mlx5e_build_tx_cq_param(mdev, params, ¶m->cqp); in mlx5e_build_xdpsq_param()
|
D | ptp.c | 563 cq_param = &cparams->txq_sq_param.cqp; in mlx5e_ptp_open_tx_cqs() 610 cq_param = &cparams->rq_param.cqp; in mlx5e_ptp_open_rx_cq() 638 mlx5e_build_tx_cq_param(mdev, params, ¶m->cqp); in mlx5e_ptp_build_sq_param()
|
D | trap.c | 75 err = mlx5e_open_cq(priv, trap_moder, &rq_param->cqp, &ccp, &rq->cq); in mlx5e_open_trap_rq()
|
/linux-6.6.21/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/ |
D | setup.c | 130 err = mlx5e_open_cq(c->priv, params->rx_cq_moderation, &cparam->rq.cqp, &ccp, in mlx5e_open_xsk() 139 err = mlx5e_open_cq(c->priv, params->tx_cq_moderation, &cparam->xdp_sq.cqp, &ccp, in mlx5e_open_xsk()
|
/linux-6.6.21/drivers/scsi/lpfc/ |
D | lpfc_sli4.h | 1082 int lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp, 1092 struct lpfc_queue **drqp, struct lpfc_queue **cqp,
|
D | lpfc_sli.c | 16410 lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp, in lpfc_cq_create_set() argument 16427 if (!cqp || !hdwq || !numcq) in lpfc_cq_create_set() 16435 length += ((numcq * cqp[0]->page_count) * in lpfc_cq_create_set() 16453 cq = cqp[idx]; in lpfc_cq_create_set() 16627 cq = cqp[idx]; in lpfc_cq_create_set() 17429 struct lpfc_queue **drqp, struct lpfc_queue **cqp, in lpfc_mrq_create() argument 17444 if (!hrqp || !drqp || !cqp || !numrq) in lpfc_mrq_create() 17480 cq = cqp[idx]; in lpfc_mrq_create()
|
/linux-6.6.21/drivers/net/ethernet/mellanox/mlx5/core/ |
D | en_main.c | 2075 err = mlx5e_open_cq(c->priv, params->tx_cq_moderation, &cparam->txq_sq.cqp, in mlx5e_open_tx_cqs() 2276 err = mlx5e_open_cq(c->priv, icocq_moder, &cparam->async_icosq.cqp, &ccp, in mlx5e_open_queues() 2281 err = mlx5e_open_cq(c->priv, icocq_moder, &cparam->icosq.cqp, &ccp, in mlx5e_open_queues() 2290 err = mlx5e_open_cq(c->priv, params->tx_cq_moderation, &cparam->xdp_sq.cqp, &ccp, in mlx5e_open_queues() 2295 err = mlx5e_open_cq(c->priv, params->rx_cq_moderation, &cparam->rq.cqp, &ccp, in mlx5e_open_queues() 2300 err = c->xdp ? mlx5e_open_cq(c->priv, params->tx_cq_moderation, &cparam->xdp_sq.cqp, in mlx5e_open_queues()
|