Lines Matching refs:gsi
49 struct mlx5_ib_gsi_qp *gsi = &mqp->gsi; in generate_completions() local
54 for (index = gsi->outstanding_ci; index != gsi->outstanding_pi; in generate_completions()
56 wr = &gsi->outstanding_wrs[index % gsi->cap.max_send_wr]; in generate_completions()
65 gsi->outstanding_ci = index; in generate_completions()
70 struct mlx5_ib_gsi_qp *gsi = cq->cq_context; in handle_single_completion() local
73 struct mlx5_ib_qp *mqp = container_of(gsi, struct mlx5_ib_qp, gsi); in handle_single_completion()
77 spin_lock_irqsave(&gsi->lock, flags); in handle_single_completion()
85 spin_unlock_irqrestore(&gsi->lock, flags); in handle_single_completion()
92 struct mlx5_ib_gsi_qp *gsi; in mlx5_ib_create_gsi() local
106 gsi = &mqp->gsi; in mlx5_ib_create_gsi()
107 gsi->tx_qps = kcalloc(num_qps, sizeof(*gsi->tx_qps), GFP_KERNEL); in mlx5_ib_create_gsi()
108 if (!gsi->tx_qps) in mlx5_ib_create_gsi()
111 gsi->outstanding_wrs = in mlx5_ib_create_gsi()
112 kcalloc(attr->cap.max_send_wr, sizeof(*gsi->outstanding_wrs), in mlx5_ib_create_gsi()
114 if (!gsi->outstanding_wrs) { in mlx5_ib_create_gsi()
119 if (dev->devr.ports[port_num - 1].gsi) { in mlx5_ib_create_gsi()
125 gsi->num_qps = num_qps; in mlx5_ib_create_gsi()
126 spin_lock_init(&gsi->lock); in mlx5_ib_create_gsi()
128 gsi->cap = attr->cap; in mlx5_ib_create_gsi()
129 gsi->port_num = port_num; in mlx5_ib_create_gsi()
131 gsi->cq = ib_alloc_cq(pd->device, gsi, attr->cap.max_send_wr, 0, in mlx5_ib_create_gsi()
133 if (IS_ERR(gsi->cq)) { in mlx5_ib_create_gsi()
135 PTR_ERR(gsi->cq)); in mlx5_ib_create_gsi()
136 ret = PTR_ERR(gsi->cq); in mlx5_ib_create_gsi()
141 hw_init_attr.send_cq = gsi->cq; in mlx5_ib_create_gsi()
148 gsi->rx_qp = ib_create_qp(pd, &hw_init_attr); in mlx5_ib_create_gsi()
149 if (IS_ERR(gsi->rx_qp)) { in mlx5_ib_create_gsi()
151 PTR_ERR(gsi->rx_qp)); in mlx5_ib_create_gsi()
152 ret = PTR_ERR(gsi->rx_qp); in mlx5_ib_create_gsi()
156 dev->devr.ports[attr->port_num - 1].gsi = gsi; in mlx5_ib_create_gsi()
160 ib_free_cq(gsi->cq); in mlx5_ib_create_gsi()
162 kfree(gsi->outstanding_wrs); in mlx5_ib_create_gsi()
164 kfree(gsi->tx_qps); in mlx5_ib_create_gsi()
171 struct mlx5_ib_gsi_qp *gsi = &mqp->gsi; in mlx5_ib_destroy_gsi() local
172 const int port_num = gsi->port_num; in mlx5_ib_destroy_gsi()
176 ret = ib_destroy_qp(gsi->rx_qp); in mlx5_ib_destroy_gsi()
182 dev->devr.ports[port_num - 1].gsi = NULL; in mlx5_ib_destroy_gsi()
183 gsi->rx_qp = NULL; in mlx5_ib_destroy_gsi()
185 for (qp_index = 0; qp_index < gsi->num_qps; ++qp_index) { in mlx5_ib_destroy_gsi()
186 if (!gsi->tx_qps[qp_index]) in mlx5_ib_destroy_gsi()
188 WARN_ON_ONCE(ib_destroy_qp(gsi->tx_qps[qp_index])); in mlx5_ib_destroy_gsi()
189 gsi->tx_qps[qp_index] = NULL; in mlx5_ib_destroy_gsi()
192 ib_free_cq(gsi->cq); in mlx5_ib_destroy_gsi()
194 kfree(gsi->outstanding_wrs); in mlx5_ib_destroy_gsi()
195 kfree(gsi->tx_qps); in mlx5_ib_destroy_gsi()
199 static struct ib_qp *create_gsi_ud_qp(struct mlx5_ib_gsi_qp *gsi) in create_gsi_ud_qp() argument
201 struct ib_pd *pd = gsi->rx_qp->pd; in create_gsi_ud_qp()
203 .event_handler = gsi->rx_qp->event_handler, in create_gsi_ud_qp()
204 .qp_context = gsi->rx_qp->qp_context, in create_gsi_ud_qp()
205 .send_cq = gsi->cq, in create_gsi_ud_qp()
206 .recv_cq = gsi->rx_qp->recv_cq, in create_gsi_ud_qp()
208 .max_send_wr = gsi->cap.max_send_wr, in create_gsi_ud_qp()
209 .max_send_sge = gsi->cap.max_send_sge, in create_gsi_ud_qp()
210 .max_inline_data = gsi->cap.max_inline_data, in create_gsi_ud_qp()
219 static int modify_to_rts(struct mlx5_ib_gsi_qp *gsi, struct ib_qp *qp, in modify_to_rts() argument
231 attr.port_num = gsi->port_num; in modify_to_rts()
259 static void setup_qp(struct mlx5_ib_gsi_qp *gsi, u16 qp_index) in setup_qp() argument
261 struct ib_device *device = gsi->rx_qp->device; in setup_qp()
273 ret = ib_query_pkey(device, gsi->port_num, pkey_index, &pkey); in setup_qp()
276 gsi->port_num, qp_index); in setup_qp()
282 gsi->port_num, qp_index); in setup_qp()
286 spin_lock_irqsave(&gsi->lock, flags); in setup_qp()
287 qp = gsi->tx_qps[qp_index]; in setup_qp()
288 spin_unlock_irqrestore(&gsi->lock, flags); in setup_qp()
291 gsi->port_num, qp_index); in setup_qp()
295 qp = create_gsi_ud_qp(gsi); in setup_qp()
305 ret = modify_to_rts(gsi, qp, pkey_index); in setup_qp()
309 spin_lock_irqsave(&gsi->lock, flags); in setup_qp()
310 WARN_ON_ONCE(gsi->tx_qps[qp_index]); in setup_qp()
311 gsi->tx_qps[qp_index] = qp; in setup_qp()
312 spin_unlock_irqrestore(&gsi->lock, flags); in setup_qp()
325 struct mlx5_ib_gsi_qp *gsi = &mqp->gsi; in mlx5_ib_gsi_modify_qp() local
331 ret = ib_modify_qp(gsi->rx_qp, attr, attr_mask); in mlx5_ib_gsi_modify_qp()
337 if (to_mqp(gsi->rx_qp)->state != IB_QPS_RTS) in mlx5_ib_gsi_modify_qp()
340 for (qp_index = 0; qp_index < gsi->num_qps; ++qp_index) in mlx5_ib_gsi_modify_qp()
341 setup_qp(gsi, qp_index); in mlx5_ib_gsi_modify_qp()
350 struct mlx5_ib_gsi_qp *gsi = &mqp->gsi; in mlx5_ib_gsi_query_qp() local
353 ret = ib_query_qp(gsi->rx_qp, qp_attr, qp_attr_mask, qp_init_attr); in mlx5_ib_gsi_query_qp()
354 qp_init_attr->cap = gsi->cap; in mlx5_ib_gsi_query_qp()
362 struct mlx5_ib_gsi_qp *gsi = &mqp->gsi; in mlx5_ib_add_outstanding_wr() local
363 struct mlx5_ib_dev *dev = to_mdev(gsi->rx_qp->device); in mlx5_ib_add_outstanding_wr()
366 if (gsi->outstanding_pi == gsi->outstanding_ci + gsi->cap.max_send_wr) { in mlx5_ib_add_outstanding_wr()
371 gsi_wr = &gsi->outstanding_wrs[gsi->outstanding_pi % in mlx5_ib_add_outstanding_wr()
372 gsi->cap.max_send_wr]; in mlx5_ib_add_outstanding_wr()
373 gsi->outstanding_pi++; in mlx5_ib_add_outstanding_wr()
411 static struct ib_qp *get_tx_qp(struct mlx5_ib_gsi_qp *gsi, struct ib_ud_wr *wr) in get_tx_qp() argument
413 struct mlx5_ib_dev *dev = to_mdev(gsi->rx_qp->device); in get_tx_qp()
417 if (!gsi->num_qps) in get_tx_qp()
418 return gsi->rx_qp; in get_tx_qp()
423 if (qp_index >= gsi->num_qps) in get_tx_qp()
426 return gsi->tx_qps[qp_index]; in get_tx_qp()
433 struct mlx5_ib_gsi_qp *gsi = &mqp->gsi; in mlx5_ib_gsi_post_send() local
443 spin_lock_irqsave(&gsi->lock, flags); in mlx5_ib_gsi_post_send()
444 tx_qp = get_tx_qp(gsi, &cur_wr); in mlx5_ib_gsi_post_send()
449 spin_unlock_irqrestore(&gsi->lock, flags); in mlx5_ib_gsi_post_send()
460 gsi->outstanding_pi--; in mlx5_ib_gsi_post_send()
463 spin_unlock_irqrestore(&gsi->lock, flags); in mlx5_ib_gsi_post_send()
469 spin_unlock_irqrestore(&gsi->lock, flags); in mlx5_ib_gsi_post_send()
478 struct mlx5_ib_gsi_qp *gsi = &mqp->gsi; in mlx5_ib_gsi_post_recv() local
480 return ib_post_recv(gsi->rx_qp, wr, bad_wr); in mlx5_ib_gsi_post_recv()
483 void mlx5_ib_gsi_pkey_change(struct mlx5_ib_gsi_qp *gsi) in mlx5_ib_gsi_pkey_change() argument
487 for (qp_index = 0; qp_index < gsi->num_qps; ++qp_index) in mlx5_ib_gsi_pkey_change()
488 setup_qp(gsi, qp_index); in mlx5_ib_gsi_pkey_change()