1 // SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
2 /* Copyright (c) 2015 - 2021 Intel Corporation */
3 #include "main.h"
4
5 static struct irdma_rsrc_limits rsrc_limits_table[] = {
6 [0] = {
7 .qplimit = SZ_128,
8 },
9 [1] = {
10 .qplimit = SZ_1K,
11 },
12 [2] = {
13 .qplimit = SZ_2K,
14 },
15 [3] = {
16 .qplimit = SZ_4K,
17 },
18 [4] = {
19 .qplimit = SZ_16K,
20 },
21 [5] = {
22 .qplimit = SZ_64K,
23 },
24 [6] = {
25 .qplimit = SZ_128K,
26 },
27 [7] = {
28 .qplimit = SZ_256K,
29 },
30 };
31
32 /* types of hmc objects */
33 static enum irdma_hmc_rsrc_type iw_hmc_obj_types[] = {
34 IRDMA_HMC_IW_QP,
35 IRDMA_HMC_IW_CQ,
36 IRDMA_HMC_IW_HTE,
37 IRDMA_HMC_IW_ARP,
38 IRDMA_HMC_IW_APBVT_ENTRY,
39 IRDMA_HMC_IW_MR,
40 IRDMA_HMC_IW_XF,
41 IRDMA_HMC_IW_XFFL,
42 IRDMA_HMC_IW_Q1,
43 IRDMA_HMC_IW_Q1FL,
44 IRDMA_HMC_IW_TIMER,
45 IRDMA_HMC_IW_FSIMC,
46 IRDMA_HMC_IW_FSIAV,
47 IRDMA_HMC_IW_RRF,
48 IRDMA_HMC_IW_RRFFL,
49 IRDMA_HMC_IW_HDR,
50 IRDMA_HMC_IW_MD,
51 IRDMA_HMC_IW_OOISC,
52 IRDMA_HMC_IW_OOISCFFL,
53 };
54
55 /**
56 * irdma_iwarp_ce_handler - handle iwarp completions
57 * @iwcq: iwarp cq receiving event
58 */
irdma_iwarp_ce_handler(struct irdma_sc_cq * iwcq)59 static void irdma_iwarp_ce_handler(struct irdma_sc_cq *iwcq)
60 {
61 struct irdma_cq *cq = iwcq->back_cq;
62
63 if (!cq->user_mode)
64 atomic_set(&cq->armed, 0);
65 if (cq->ibcq.comp_handler)
66 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
67 }
68
69 /**
70 * irdma_puda_ce_handler - handle puda completion events
71 * @rf: RDMA PCI function
72 * @cq: puda completion q for event
73 */
irdma_puda_ce_handler(struct irdma_pci_f * rf,struct irdma_sc_cq * cq)74 static void irdma_puda_ce_handler(struct irdma_pci_f *rf,
75 struct irdma_sc_cq *cq)
76 {
77 struct irdma_sc_dev *dev = &rf->sc_dev;
78 u32 compl_error;
79 int status;
80
81 do {
82 status = irdma_puda_poll_cmpl(dev, cq, &compl_error);
83 if (status == -ENOENT)
84 break;
85 if (status) {
86 ibdev_dbg(to_ibdev(dev), "ERR: puda status = %d\n", status);
87 break;
88 }
89 if (compl_error) {
90 ibdev_dbg(to_ibdev(dev), "ERR: puda compl_err =0x%x\n",
91 compl_error);
92 break;
93 }
94 } while (1);
95
96 irdma_sc_ccq_arm(cq);
97 }
98
99 /**
100 * irdma_process_ceq - handle ceq for completions
101 * @rf: RDMA PCI function
102 * @ceq: ceq having cq for completion
103 */
irdma_process_ceq(struct irdma_pci_f * rf,struct irdma_ceq * ceq)104 static void irdma_process_ceq(struct irdma_pci_f *rf, struct irdma_ceq *ceq)
105 {
106 struct irdma_sc_dev *dev = &rf->sc_dev;
107 struct irdma_sc_ceq *sc_ceq;
108 struct irdma_sc_cq *cq;
109 unsigned long flags;
110
111 sc_ceq = &ceq->sc_ceq;
112 do {
113 spin_lock_irqsave(&ceq->ce_lock, flags);
114 cq = irdma_sc_process_ceq(dev, sc_ceq);
115 if (!cq) {
116 spin_unlock_irqrestore(&ceq->ce_lock, flags);
117 break;
118 }
119
120 if (cq->cq_type == IRDMA_CQ_TYPE_IWARP)
121 irdma_iwarp_ce_handler(cq);
122
123 spin_unlock_irqrestore(&ceq->ce_lock, flags);
124
125 if (cq->cq_type == IRDMA_CQ_TYPE_CQP)
126 queue_work(rf->cqp_cmpl_wq, &rf->cqp_cmpl_work);
127 else if (cq->cq_type == IRDMA_CQ_TYPE_ILQ ||
128 cq->cq_type == IRDMA_CQ_TYPE_IEQ)
129 irdma_puda_ce_handler(rf, cq);
130 } while (1);
131 }
132
irdma_set_flush_fields(struct irdma_sc_qp * qp,struct irdma_aeqe_info * info)133 static void irdma_set_flush_fields(struct irdma_sc_qp *qp,
134 struct irdma_aeqe_info *info)
135 {
136 qp->sq_flush_code = info->sq;
137 qp->rq_flush_code = info->rq;
138 qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
139
140 switch (info->ae_id) {
141 case IRDMA_AE_AMP_BOUNDS_VIOLATION:
142 case IRDMA_AE_AMP_INVALID_STAG:
143 case IRDMA_AE_AMP_RIGHTS_VIOLATION:
144 case IRDMA_AE_AMP_UNALLOCATED_STAG:
145 case IRDMA_AE_AMP_BAD_PD:
146 case IRDMA_AE_AMP_BAD_QP:
147 case IRDMA_AE_AMP_BAD_STAG_KEY:
148 case IRDMA_AE_AMP_BAD_STAG_INDEX:
149 case IRDMA_AE_AMP_TO_WRAP:
150 case IRDMA_AE_PRIV_OPERATION_DENIED:
151 qp->flush_code = FLUSH_PROT_ERR;
152 qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
153 break;
154 case IRDMA_AE_UDA_XMIT_BAD_PD:
155 case IRDMA_AE_WQE_UNEXPECTED_OPCODE:
156 qp->flush_code = FLUSH_LOC_QP_OP_ERR;
157 qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
158 break;
159 case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG:
160 case IRDMA_AE_UDA_XMIT_DGRAM_TOO_SHORT:
161 case IRDMA_AE_UDA_L4LEN_INVALID:
162 case IRDMA_AE_DDP_UBE_INVALID_MO:
163 case IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
164 qp->flush_code = FLUSH_LOC_LEN_ERR;
165 qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
166 break;
167 case IRDMA_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS:
168 case IRDMA_AE_IB_REMOTE_ACCESS_ERROR:
169 qp->flush_code = FLUSH_REM_ACCESS_ERR;
170 qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
171 break;
172 case IRDMA_AE_LLP_SEGMENT_TOO_SMALL:
173 case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR:
174 case IRDMA_AE_ROCE_RSP_LENGTH_ERROR:
175 case IRDMA_AE_IB_REMOTE_OP_ERROR:
176 qp->flush_code = FLUSH_REM_OP_ERR;
177 qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
178 break;
179 case IRDMA_AE_LCE_QP_CATASTROPHIC:
180 qp->flush_code = FLUSH_FATAL_ERR;
181 qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
182 break;
183 case IRDMA_AE_IB_RREQ_AND_Q1_FULL:
184 qp->flush_code = FLUSH_GENERAL_ERR;
185 break;
186 case IRDMA_AE_LLP_TOO_MANY_RETRIES:
187 qp->flush_code = FLUSH_RETRY_EXC_ERR;
188 qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
189 break;
190 case IRDMA_AE_AMP_MWBIND_INVALID_RIGHTS:
191 case IRDMA_AE_AMP_MWBIND_BIND_DISABLED:
192 case IRDMA_AE_AMP_MWBIND_INVALID_BOUNDS:
193 qp->flush_code = FLUSH_MW_BIND_ERR;
194 qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
195 break;
196 case IRDMA_AE_IB_INVALID_REQUEST:
197 qp->flush_code = FLUSH_REM_INV_REQ_ERR;
198 qp->event_type = IRDMA_QP_EVENT_REQ_ERR;
199 break;
200 default:
201 qp->flush_code = FLUSH_GENERAL_ERR;
202 qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
203 break;
204 }
205 }
206
207 /**
208 * irdma_process_aeq - handle aeq events
209 * @rf: RDMA PCI function
210 */
irdma_process_aeq(struct irdma_pci_f * rf)211 static void irdma_process_aeq(struct irdma_pci_f *rf)
212 {
213 struct irdma_sc_dev *dev = &rf->sc_dev;
214 struct irdma_aeq *aeq = &rf->aeq;
215 struct irdma_sc_aeq *sc_aeq = &aeq->sc_aeq;
216 struct irdma_aeqe_info aeinfo;
217 struct irdma_aeqe_info *info = &aeinfo;
218 int ret;
219 struct irdma_qp *iwqp = NULL;
220 struct irdma_sc_cq *cq = NULL;
221 struct irdma_cq *iwcq = NULL;
222 struct irdma_sc_qp *qp = NULL;
223 struct irdma_qp_host_ctx_info *ctx_info = NULL;
224 struct irdma_device *iwdev = rf->iwdev;
225 unsigned long flags;
226
227 u32 aeqcnt = 0;
228
229 if (!sc_aeq->size)
230 return;
231
232 do {
233 memset(info, 0, sizeof(*info));
234 ret = irdma_sc_get_next_aeqe(sc_aeq, info);
235 if (ret)
236 break;
237
238 aeqcnt++;
239 ibdev_dbg(&iwdev->ibdev,
240 "AEQ: ae_id = 0x%x bool qp=%d qp_id = %d tcp_state=%d iwarp_state=%d ae_src=%d\n",
241 info->ae_id, info->qp, info->qp_cq_id, info->tcp_state,
242 info->iwarp_state, info->ae_src);
243
244 if (info->qp) {
245 spin_lock_irqsave(&rf->qptable_lock, flags);
246 iwqp = rf->qp_table[info->qp_cq_id];
247 if (!iwqp) {
248 spin_unlock_irqrestore(&rf->qptable_lock,
249 flags);
250 if (info->ae_id == IRDMA_AE_QP_SUSPEND_COMPLETE) {
251 atomic_dec(&iwdev->vsi.qp_suspend_reqs);
252 wake_up(&iwdev->suspend_wq);
253 continue;
254 }
255 ibdev_dbg(&iwdev->ibdev, "AEQ: qp_id %d is already freed\n",
256 info->qp_cq_id);
257 continue;
258 }
259 irdma_qp_add_ref(&iwqp->ibqp);
260 spin_unlock_irqrestore(&rf->qptable_lock, flags);
261 qp = &iwqp->sc_qp;
262 spin_lock_irqsave(&iwqp->lock, flags);
263 iwqp->hw_tcp_state = info->tcp_state;
264 iwqp->hw_iwarp_state = info->iwarp_state;
265 if (info->ae_id != IRDMA_AE_QP_SUSPEND_COMPLETE)
266 iwqp->last_aeq = info->ae_id;
267 spin_unlock_irqrestore(&iwqp->lock, flags);
268 ctx_info = &iwqp->ctx_info;
269 } else {
270 if (info->ae_id != IRDMA_AE_CQ_OPERATION_ERROR)
271 continue;
272 }
273
274 switch (info->ae_id) {
275 struct irdma_cm_node *cm_node;
276 case IRDMA_AE_LLP_CONNECTION_ESTABLISHED:
277 cm_node = iwqp->cm_node;
278 if (cm_node->accept_pend) {
279 atomic_dec(&cm_node->listener->pend_accepts_cnt);
280 cm_node->accept_pend = 0;
281 }
282 iwqp->rts_ae_rcvd = 1;
283 wake_up_interruptible(&iwqp->waitq);
284 break;
285 case IRDMA_AE_LLP_FIN_RECEIVED:
286 case IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE:
287 if (qp->term_flags)
288 break;
289 if (atomic_inc_return(&iwqp->close_timer_started) == 1) {
290 iwqp->hw_tcp_state = IRDMA_TCP_STATE_CLOSE_WAIT;
291 if (iwqp->hw_tcp_state == IRDMA_TCP_STATE_CLOSE_WAIT &&
292 iwqp->ibqp_state == IB_QPS_RTS) {
293 irdma_next_iw_state(iwqp,
294 IRDMA_QP_STATE_CLOSING,
295 0, 0, 0);
296 irdma_cm_disconn(iwqp);
297 }
298 irdma_schedule_cm_timer(iwqp->cm_node,
299 (struct irdma_puda_buf *)iwqp,
300 IRDMA_TIMER_TYPE_CLOSE,
301 1, 0);
302 }
303 break;
304 case IRDMA_AE_LLP_CLOSE_COMPLETE:
305 if (qp->term_flags)
306 irdma_terminate_done(qp, 0);
307 else
308 irdma_cm_disconn(iwqp);
309 break;
310 case IRDMA_AE_BAD_CLOSE:
311 case IRDMA_AE_RESET_SENT:
312 irdma_next_iw_state(iwqp, IRDMA_QP_STATE_ERROR, 1, 0,
313 0);
314 irdma_cm_disconn(iwqp);
315 break;
316 case IRDMA_AE_LLP_CONNECTION_RESET:
317 if (atomic_read(&iwqp->close_timer_started))
318 break;
319 irdma_cm_disconn(iwqp);
320 break;
321 case IRDMA_AE_QP_SUSPEND_COMPLETE:
322 if (iwqp->iwdev->vsi.tc_change_pending) {
323 atomic_dec(&iwqp->sc_qp.vsi->qp_suspend_reqs);
324 wake_up(&iwqp->iwdev->suspend_wq);
325 }
326 break;
327 case IRDMA_AE_TERMINATE_SENT:
328 irdma_terminate_send_fin(qp);
329 break;
330 case IRDMA_AE_LLP_TERMINATE_RECEIVED:
331 irdma_terminate_received(qp, info);
332 break;
333 case IRDMA_AE_CQ_OPERATION_ERROR:
334 ibdev_err(&iwdev->ibdev,
335 "Processing an iWARP related AE for CQ misc = 0x%04X\n",
336 info->ae_id);
337 cq = (struct irdma_sc_cq *)(unsigned long)
338 info->compl_ctx;
339
340 iwcq = cq->back_cq;
341
342 if (iwcq->ibcq.event_handler) {
343 struct ib_event ibevent;
344
345 ibevent.device = iwcq->ibcq.device;
346 ibevent.event = IB_EVENT_CQ_ERR;
347 ibevent.element.cq = &iwcq->ibcq;
348 iwcq->ibcq.event_handler(&ibevent,
349 iwcq->ibcq.cq_context);
350 }
351 break;
352 case IRDMA_AE_RESET_NOT_SENT:
353 case IRDMA_AE_LLP_DOUBT_REACHABILITY:
354 case IRDMA_AE_RESOURCE_EXHAUSTION:
355 break;
356 case IRDMA_AE_PRIV_OPERATION_DENIED:
357 case IRDMA_AE_STAG_ZERO_INVALID:
358 case IRDMA_AE_IB_RREQ_AND_Q1_FULL:
359 case IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION:
360 case IRDMA_AE_DDP_UBE_INVALID_MO:
361 case IRDMA_AE_DDP_UBE_INVALID_QN:
362 case IRDMA_AE_DDP_NO_L_BIT:
363 case IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION:
364 case IRDMA_AE_RDMAP_ROE_UNEXPECTED_OPCODE:
365 case IRDMA_AE_ROE_INVALID_RDMA_READ_REQUEST:
366 case IRDMA_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP:
367 case IRDMA_AE_INVALID_ARP_ENTRY:
368 case IRDMA_AE_INVALID_TCP_OPTION_RCVD:
369 case IRDMA_AE_STALE_ARP_ENTRY:
370 case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR:
371 case IRDMA_AE_LLP_SEGMENT_TOO_SMALL:
372 case IRDMA_AE_LLP_SYN_RECEIVED:
373 case IRDMA_AE_LLP_TOO_MANY_RETRIES:
374 case IRDMA_AE_LCE_QP_CATASTROPHIC:
375 case IRDMA_AE_LCE_FUNCTION_CATASTROPHIC:
376 case IRDMA_AE_LCE_CQ_CATASTROPHIC:
377 case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG:
378 default:
379 ibdev_err(&iwdev->ibdev, "abnormal ae_id = 0x%x bool qp=%d qp_id = %d, ae_src=%d\n",
380 info->ae_id, info->qp, info->qp_cq_id, info->ae_src);
381 if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
382 ctx_info->roce_info->err_rq_idx_valid = info->rq;
383 if (info->rq) {
384 ctx_info->roce_info->err_rq_idx = info->wqe_idx;
385 irdma_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va,
386 ctx_info);
387 }
388 irdma_set_flush_fields(qp, info);
389 irdma_cm_disconn(iwqp);
390 break;
391 }
392 ctx_info->iwarp_info->err_rq_idx_valid = info->rq;
393 if (info->rq) {
394 ctx_info->iwarp_info->err_rq_idx = info->wqe_idx;
395 ctx_info->tcp_info_valid = false;
396 ctx_info->iwarp_info_valid = true;
397 irdma_sc_qp_setctx(&iwqp->sc_qp, iwqp->host_ctx.va,
398 ctx_info);
399 }
400 if (iwqp->hw_iwarp_state != IRDMA_QP_STATE_RTS &&
401 iwqp->hw_iwarp_state != IRDMA_QP_STATE_TERMINATE) {
402 irdma_next_iw_state(iwqp, IRDMA_QP_STATE_ERROR, 1, 0, 0);
403 irdma_cm_disconn(iwqp);
404 } else {
405 irdma_terminate_connection(qp, info);
406 }
407 break;
408 }
409 if (info->qp)
410 irdma_qp_rem_ref(&iwqp->ibqp);
411 } while (1);
412
413 if (aeqcnt)
414 irdma_sc_repost_aeq_entries(dev, aeqcnt);
415 }
416
417 /**
418 * irdma_ena_intr - set up device interrupts
419 * @dev: hardware control device structure
420 * @msix_id: id of the interrupt to be enabled
421 */
irdma_ena_intr(struct irdma_sc_dev * dev,u32 msix_id)422 static void irdma_ena_intr(struct irdma_sc_dev *dev, u32 msix_id)
423 {
424 dev->irq_ops->irdma_en_irq(dev, msix_id);
425 }
426
427 /**
428 * irdma_dpc - tasklet for aeq and ceq 0
429 * @t: tasklet_struct ptr
430 */
irdma_dpc(struct tasklet_struct * t)431 static void irdma_dpc(struct tasklet_struct *t)
432 {
433 struct irdma_pci_f *rf = from_tasklet(rf, t, dpc_tasklet);
434
435 if (rf->msix_shared)
436 irdma_process_ceq(rf, rf->ceqlist);
437 irdma_process_aeq(rf);
438 irdma_ena_intr(&rf->sc_dev, rf->iw_msixtbl[0].idx);
439 }
440
441 /**
442 * irdma_ceq_dpc - dpc handler for CEQ
443 * @t: tasklet_struct ptr
444 */
irdma_ceq_dpc(struct tasklet_struct * t)445 static void irdma_ceq_dpc(struct tasklet_struct *t)
446 {
447 struct irdma_ceq *iwceq = from_tasklet(iwceq, t, dpc_tasklet);
448 struct irdma_pci_f *rf = iwceq->rf;
449
450 irdma_process_ceq(rf, iwceq);
451 irdma_ena_intr(&rf->sc_dev, iwceq->msix_idx);
452 }
453
454 /**
455 * irdma_save_msix_info - copy msix vector information to iwarp device
456 * @rf: RDMA PCI function
457 *
458 * Allocate iwdev msix table and copy the msix info to the table
459 * Return 0 if successful, otherwise return error
460 */
irdma_save_msix_info(struct irdma_pci_f * rf)461 static int irdma_save_msix_info(struct irdma_pci_f *rf)
462 {
463 struct irdma_qvlist_info *iw_qvlist;
464 struct irdma_qv_info *iw_qvinfo;
465 struct msix_entry *pmsix;
466 u32 ceq_idx;
467 u32 i;
468 size_t size;
469
470 if (!rf->msix_count)
471 return -EINVAL;
472
473 size = sizeof(struct irdma_msix_vector) * rf->msix_count;
474 size += struct_size(iw_qvlist, qv_info, rf->msix_count);
475 rf->iw_msixtbl = kzalloc(size, GFP_KERNEL);
476 if (!rf->iw_msixtbl)
477 return -ENOMEM;
478
479 rf->iw_qvlist = (struct irdma_qvlist_info *)
480 (&rf->iw_msixtbl[rf->msix_count]);
481 iw_qvlist = rf->iw_qvlist;
482 iw_qvinfo = iw_qvlist->qv_info;
483 iw_qvlist->num_vectors = rf->msix_count;
484 if (rf->msix_count <= num_online_cpus())
485 rf->msix_shared = true;
486
487 pmsix = rf->msix_entries;
488 for (i = 0, ceq_idx = 0; i < rf->msix_count; i++, iw_qvinfo++) {
489 rf->iw_msixtbl[i].idx = pmsix->entry;
490 rf->iw_msixtbl[i].irq = pmsix->vector;
491 rf->iw_msixtbl[i].cpu_affinity = ceq_idx;
492 if (!i) {
493 iw_qvinfo->aeq_idx = 0;
494 if (rf->msix_shared)
495 iw_qvinfo->ceq_idx = ceq_idx++;
496 else
497 iw_qvinfo->ceq_idx = IRDMA_Q_INVALID_IDX;
498 } else {
499 iw_qvinfo->aeq_idx = IRDMA_Q_INVALID_IDX;
500 iw_qvinfo->ceq_idx = ceq_idx++;
501 }
502 iw_qvinfo->itr_idx = 3;
503 iw_qvinfo->v_idx = rf->iw_msixtbl[i].idx;
504 pmsix++;
505 }
506
507 return 0;
508 }
509
510 /**
511 * irdma_irq_handler - interrupt handler for aeq and ceq0
512 * @irq: Interrupt request number
513 * @data: RDMA PCI function
514 */
irdma_irq_handler(int irq,void * data)515 static irqreturn_t irdma_irq_handler(int irq, void *data)
516 {
517 struct irdma_pci_f *rf = data;
518
519 tasklet_schedule(&rf->dpc_tasklet);
520
521 return IRQ_HANDLED;
522 }
523
524 /**
525 * irdma_ceq_handler - interrupt handler for ceq
526 * @irq: interrupt request number
527 * @data: ceq pointer
528 */
irdma_ceq_handler(int irq,void * data)529 static irqreturn_t irdma_ceq_handler(int irq, void *data)
530 {
531 struct irdma_ceq *iwceq = data;
532
533 if (iwceq->irq != irq)
534 ibdev_err(to_ibdev(&iwceq->rf->sc_dev), "expected irq = %d received irq = %d\n",
535 iwceq->irq, irq);
536 tasklet_schedule(&iwceq->dpc_tasklet);
537
538 return IRQ_HANDLED;
539 }
540
541 /**
542 * irdma_destroy_irq - destroy device interrupts
543 * @rf: RDMA PCI function
544 * @msix_vec: msix vector to disable irq
545 * @dev_id: parameter to pass to free_irq (used during irq setup)
546 *
547 * The function is called when destroying aeq/ceq
548 */
irdma_destroy_irq(struct irdma_pci_f * rf,struct irdma_msix_vector * msix_vec,void * dev_id)549 static void irdma_destroy_irq(struct irdma_pci_f *rf,
550 struct irdma_msix_vector *msix_vec, void *dev_id)
551 {
552 struct irdma_sc_dev *dev = &rf->sc_dev;
553
554 dev->irq_ops->irdma_dis_irq(dev, msix_vec->idx);
555 irq_update_affinity_hint(msix_vec->irq, NULL);
556 free_irq(msix_vec->irq, dev_id);
557 }
558
559 /**
560 * irdma_destroy_cqp - destroy control qp
561 * @rf: RDMA PCI function
562 * @free_hwcqp: 1 if hw cqp should be freed
563 *
564 * Issue destroy cqp request and
565 * free the resources associated with the cqp
566 */
irdma_destroy_cqp(struct irdma_pci_f * rf,bool free_hwcqp)567 static void irdma_destroy_cqp(struct irdma_pci_f *rf, bool free_hwcqp)
568 {
569 struct irdma_sc_dev *dev = &rf->sc_dev;
570 struct irdma_cqp *cqp = &rf->cqp;
571 int status = 0;
572
573 if (rf->cqp_cmpl_wq)
574 destroy_workqueue(rf->cqp_cmpl_wq);
575 if (free_hwcqp)
576 status = irdma_sc_cqp_destroy(dev->cqp);
577 if (status)
578 ibdev_dbg(to_ibdev(dev), "ERR: Destroy CQP failed %d\n", status);
579
580 irdma_cleanup_pending_cqp_op(rf);
581 dma_free_coherent(dev->hw->device, cqp->sq.size, cqp->sq.va,
582 cqp->sq.pa);
583 cqp->sq.va = NULL;
584 kfree(cqp->scratch_array);
585 cqp->scratch_array = NULL;
586 kfree(cqp->cqp_requests);
587 cqp->cqp_requests = NULL;
588 }
589
irdma_destroy_virt_aeq(struct irdma_pci_f * rf)590 static void irdma_destroy_virt_aeq(struct irdma_pci_f *rf)
591 {
592 struct irdma_aeq *aeq = &rf->aeq;
593 u32 pg_cnt = DIV_ROUND_UP(aeq->mem.size, PAGE_SIZE);
594 dma_addr_t *pg_arr = (dma_addr_t *)aeq->palloc.level1.addr;
595
596 irdma_unmap_vm_page_list(&rf->hw, pg_arr, pg_cnt);
597 irdma_free_pble(rf->pble_rsrc, &aeq->palloc);
598 vfree(aeq->mem.va);
599 }
600
601 /**
602 * irdma_destroy_aeq - destroy aeq
603 * @rf: RDMA PCI function
604 *
605 * Issue a destroy aeq request and
606 * free the resources associated with the aeq
607 * The function is called during driver unload
608 */
irdma_destroy_aeq(struct irdma_pci_f * rf)609 static void irdma_destroy_aeq(struct irdma_pci_f *rf)
610 {
611 struct irdma_sc_dev *dev = &rf->sc_dev;
612 struct irdma_aeq *aeq = &rf->aeq;
613 int status = -EBUSY;
614
615 if (!rf->msix_shared) {
616 rf->sc_dev.irq_ops->irdma_cfg_aeq(&rf->sc_dev, rf->iw_msixtbl->idx, false);
617 irdma_destroy_irq(rf, rf->iw_msixtbl, rf);
618 }
619 if (rf->reset)
620 goto exit;
621
622 aeq->sc_aeq.size = 0;
623 status = irdma_cqp_aeq_cmd(dev, &aeq->sc_aeq, IRDMA_OP_AEQ_DESTROY);
624 if (status)
625 ibdev_dbg(to_ibdev(dev), "ERR: Destroy AEQ failed %d\n", status);
626
627 exit:
628 if (aeq->virtual_map) {
629 irdma_destroy_virt_aeq(rf);
630 } else {
631 dma_free_coherent(dev->hw->device, aeq->mem.size, aeq->mem.va,
632 aeq->mem.pa);
633 aeq->mem.va = NULL;
634 }
635 }
636
637 /**
638 * irdma_destroy_ceq - destroy ceq
639 * @rf: RDMA PCI function
640 * @iwceq: ceq to be destroyed
641 *
642 * Issue a destroy ceq request and
643 * free the resources associated with the ceq
644 */
irdma_destroy_ceq(struct irdma_pci_f * rf,struct irdma_ceq * iwceq)645 static void irdma_destroy_ceq(struct irdma_pci_f *rf, struct irdma_ceq *iwceq)
646 {
647 struct irdma_sc_dev *dev = &rf->sc_dev;
648 int status;
649
650 if (rf->reset)
651 goto exit;
652
653 status = irdma_sc_ceq_destroy(&iwceq->sc_ceq, 0, 1);
654 if (status) {
655 ibdev_dbg(to_ibdev(dev), "ERR: CEQ destroy command failed %d\n", status);
656 goto exit;
657 }
658
659 status = irdma_sc_cceq_destroy_done(&iwceq->sc_ceq);
660 if (status)
661 ibdev_dbg(to_ibdev(dev), "ERR: CEQ destroy completion failed %d\n",
662 status);
663 exit:
664 dma_free_coherent(dev->hw->device, iwceq->mem.size, iwceq->mem.va,
665 iwceq->mem.pa);
666 iwceq->mem.va = NULL;
667 }
668
669 /**
670 * irdma_del_ceq_0 - destroy ceq 0
671 * @rf: RDMA PCI function
672 *
673 * Disable the ceq 0 interrupt and destroy the ceq 0
674 */
irdma_del_ceq_0(struct irdma_pci_f * rf)675 static void irdma_del_ceq_0(struct irdma_pci_f *rf)
676 {
677 struct irdma_ceq *iwceq = rf->ceqlist;
678 struct irdma_msix_vector *msix_vec;
679
680 if (rf->msix_shared) {
681 msix_vec = &rf->iw_msixtbl[0];
682 rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev,
683 msix_vec->ceq_id,
684 msix_vec->idx, false);
685 irdma_destroy_irq(rf, msix_vec, rf);
686 } else {
687 msix_vec = &rf->iw_msixtbl[1];
688 irdma_destroy_irq(rf, msix_vec, iwceq);
689 }
690
691 irdma_destroy_ceq(rf, iwceq);
692 rf->sc_dev.ceq_valid = false;
693 rf->ceqs_count = 0;
694 }
695
696 /**
697 * irdma_del_ceqs - destroy all ceq's except CEQ 0
698 * @rf: RDMA PCI function
699 *
700 * Go through all of the device ceq's, except 0, and for each
701 * ceq disable the ceq interrupt and destroy the ceq
702 */
irdma_del_ceqs(struct irdma_pci_f * rf)703 static void irdma_del_ceqs(struct irdma_pci_f *rf)
704 {
705 struct irdma_ceq *iwceq = &rf->ceqlist[1];
706 struct irdma_msix_vector *msix_vec;
707 u32 i = 0;
708
709 if (rf->msix_shared)
710 msix_vec = &rf->iw_msixtbl[1];
711 else
712 msix_vec = &rf->iw_msixtbl[2];
713
714 for (i = 1; i < rf->ceqs_count; i++, msix_vec++, iwceq++) {
715 rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev, msix_vec->ceq_id,
716 msix_vec->idx, false);
717 irdma_destroy_irq(rf, msix_vec, iwceq);
718 irdma_cqp_ceq_cmd(&rf->sc_dev, &iwceq->sc_ceq,
719 IRDMA_OP_CEQ_DESTROY);
720 dma_free_coherent(rf->sc_dev.hw->device, iwceq->mem.size,
721 iwceq->mem.va, iwceq->mem.pa);
722 iwceq->mem.va = NULL;
723 }
724 rf->ceqs_count = 1;
725 }
726
727 /**
728 * irdma_destroy_ccq - destroy control cq
729 * @rf: RDMA PCI function
730 *
731 * Issue destroy ccq request and
732 * free the resources associated with the ccq
733 */
irdma_destroy_ccq(struct irdma_pci_f * rf)734 static void irdma_destroy_ccq(struct irdma_pci_f *rf)
735 {
736 struct irdma_sc_dev *dev = &rf->sc_dev;
737 struct irdma_ccq *ccq = &rf->ccq;
738 int status = 0;
739
740 if (!rf->reset)
741 status = irdma_sc_ccq_destroy(dev->ccq, 0, true);
742 if (status)
743 ibdev_dbg(to_ibdev(dev), "ERR: CCQ destroy failed %d\n", status);
744 dma_free_coherent(dev->hw->device, ccq->mem_cq.size, ccq->mem_cq.va,
745 ccq->mem_cq.pa);
746 ccq->mem_cq.va = NULL;
747 }
748
749 /**
750 * irdma_close_hmc_objects_type - delete hmc objects of a given type
751 * @dev: iwarp device
752 * @obj_type: the hmc object type to be deleted
753 * @hmc_info: host memory info struct
754 * @privileged: permission to close HMC objects
755 * @reset: true if called before reset
756 */
irdma_close_hmc_objects_type(struct irdma_sc_dev * dev,enum irdma_hmc_rsrc_type obj_type,struct irdma_hmc_info * hmc_info,bool privileged,bool reset)757 static void irdma_close_hmc_objects_type(struct irdma_sc_dev *dev,
758 enum irdma_hmc_rsrc_type obj_type,
759 struct irdma_hmc_info *hmc_info,
760 bool privileged, bool reset)
761 {
762 struct irdma_hmc_del_obj_info info = {};
763
764 info.hmc_info = hmc_info;
765 info.rsrc_type = obj_type;
766 info.count = hmc_info->hmc_obj[obj_type].cnt;
767 info.privileged = privileged;
768 if (irdma_sc_del_hmc_obj(dev, &info, reset))
769 ibdev_dbg(to_ibdev(dev), "ERR: del HMC obj of type %d failed\n",
770 obj_type);
771 }
772
773 /**
774 * irdma_del_hmc_objects - remove all device hmc objects
775 * @dev: iwarp device
776 * @hmc_info: hmc_info to free
777 * @privileged: permission to delete HMC objects
778 * @reset: true if called before reset
779 * @vers: hardware version
780 */
irdma_del_hmc_objects(struct irdma_sc_dev * dev,struct irdma_hmc_info * hmc_info,bool privileged,bool reset,enum irdma_vers vers)781 static void irdma_del_hmc_objects(struct irdma_sc_dev *dev,
782 struct irdma_hmc_info *hmc_info, bool privileged,
783 bool reset, enum irdma_vers vers)
784 {
785 unsigned int i;
786
787 for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++) {
788 if (dev->hmc_info->hmc_obj[iw_hmc_obj_types[i]].cnt)
789 irdma_close_hmc_objects_type(dev, iw_hmc_obj_types[i],
790 hmc_info, privileged, reset);
791 if (vers == IRDMA_GEN_1 && i == IRDMA_HMC_IW_TIMER)
792 break;
793 }
794 }
795
796 /**
797 * irdma_create_hmc_obj_type - create hmc object of a given type
798 * @dev: hardware control device structure
799 * @info: information for the hmc object to create
800 */
irdma_create_hmc_obj_type(struct irdma_sc_dev * dev,struct irdma_hmc_create_obj_info * info)801 static int irdma_create_hmc_obj_type(struct irdma_sc_dev *dev,
802 struct irdma_hmc_create_obj_info *info)
803 {
804 return irdma_sc_create_hmc_obj(dev, info);
805 }
806
807 /**
808 * irdma_create_hmc_objs - create all hmc objects for the device
809 * @rf: RDMA PCI function
810 * @privileged: permission to create HMC objects
811 * @vers: HW version
812 *
813 * Create the device hmc objects and allocate hmc pages
814 * Return 0 if successful, otherwise clean up and return error
815 */
irdma_create_hmc_objs(struct irdma_pci_f * rf,bool privileged,enum irdma_vers vers)816 static int irdma_create_hmc_objs(struct irdma_pci_f *rf, bool privileged,
817 enum irdma_vers vers)
818 {
819 struct irdma_sc_dev *dev = &rf->sc_dev;
820 struct irdma_hmc_create_obj_info info = {};
821 int i, status = 0;
822
823 info.hmc_info = dev->hmc_info;
824 info.privileged = privileged;
825 info.entry_type = rf->sd_type;
826
827 for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++) {
828 if (dev->hmc_info->hmc_obj[iw_hmc_obj_types[i]].cnt) {
829 info.rsrc_type = iw_hmc_obj_types[i];
830 info.count = dev->hmc_info->hmc_obj[info.rsrc_type].cnt;
831 info.add_sd_cnt = 0;
832 status = irdma_create_hmc_obj_type(dev, &info);
833 if (status) {
834 ibdev_dbg(to_ibdev(dev),
835 "ERR: create obj type %d status = %d\n",
836 iw_hmc_obj_types[i], status);
837 break;
838 }
839 }
840 if (vers == IRDMA_GEN_1 && i == IRDMA_HMC_IW_TIMER)
841 break;
842 }
843
844 if (!status)
845 return irdma_sc_static_hmc_pages_allocated(dev->cqp, 0, dev->hmc_fn_id,
846 true, true);
847
848 while (i) {
849 i--;
850 /* destroy the hmc objects of a given type */
851 if (dev->hmc_info->hmc_obj[iw_hmc_obj_types[i]].cnt)
852 irdma_close_hmc_objects_type(dev, iw_hmc_obj_types[i],
853 dev->hmc_info, privileged,
854 false);
855 }
856
857 return status;
858 }
859
860 /**
861 * irdma_obj_aligned_mem - get aligned memory from device allocated memory
862 * @rf: RDMA PCI function
863 * @memptr: points to the memory addresses
864 * @size: size of memory needed
865 * @mask: mask for the aligned memory
866 *
867 * Get aligned memory of the requested size and
868 * update the memptr to point to the new aligned memory
869 * Return 0 if successful, otherwise return no memory error
870 */
irdma_obj_aligned_mem(struct irdma_pci_f * rf,struct irdma_dma_mem * memptr,u32 size,u32 mask)871 static int irdma_obj_aligned_mem(struct irdma_pci_f *rf,
872 struct irdma_dma_mem *memptr, u32 size,
873 u32 mask)
874 {
875 unsigned long va, newva;
876 unsigned long extra;
877
878 va = (unsigned long)rf->obj_next.va;
879 newva = va;
880 if (mask)
881 newva = ALIGN(va, (unsigned long)mask + 1ULL);
882 extra = newva - va;
883 memptr->va = (u8 *)va + extra;
884 memptr->pa = rf->obj_next.pa + extra;
885 memptr->size = size;
886 if (((u8 *)memptr->va + size) > ((u8 *)rf->obj_mem.va + rf->obj_mem.size))
887 return -ENOMEM;
888
889 rf->obj_next.va = (u8 *)memptr->va + size;
890 rf->obj_next.pa = memptr->pa + size;
891
892 return 0;
893 }
894
895 /**
896 * irdma_create_cqp - create control qp
897 * @rf: RDMA PCI function
898 *
899 * Return 0, if the cqp and all the resources associated with it
900 * are successfully created, otherwise return error
901 */
irdma_create_cqp(struct irdma_pci_f * rf)902 static int irdma_create_cqp(struct irdma_pci_f *rf)
903 {
904 u32 sqsize = IRDMA_CQP_SW_SQSIZE_2048;
905 struct irdma_dma_mem mem;
906 struct irdma_sc_dev *dev = &rf->sc_dev;
907 struct irdma_cqp_init_info cqp_init_info = {};
908 struct irdma_cqp *cqp = &rf->cqp;
909 u16 maj_err, min_err;
910 int i, status;
911
912 cqp->cqp_requests = kcalloc(sqsize, sizeof(*cqp->cqp_requests), GFP_KERNEL);
913 if (!cqp->cqp_requests)
914 return -ENOMEM;
915
916 cqp->scratch_array = kcalloc(sqsize, sizeof(*cqp->scratch_array), GFP_KERNEL);
917 if (!cqp->scratch_array) {
918 kfree(cqp->cqp_requests);
919 return -ENOMEM;
920 }
921
922 dev->cqp = &cqp->sc_cqp;
923 dev->cqp->dev = dev;
924 cqp->sq.size = ALIGN(sizeof(struct irdma_cqp_sq_wqe) * sqsize,
925 IRDMA_CQP_ALIGNMENT);
926 cqp->sq.va = dma_alloc_coherent(dev->hw->device, cqp->sq.size,
927 &cqp->sq.pa, GFP_KERNEL);
928 if (!cqp->sq.va) {
929 kfree(cqp->scratch_array);
930 kfree(cqp->cqp_requests);
931 return -ENOMEM;
932 }
933
934 status = irdma_obj_aligned_mem(rf, &mem, sizeof(struct irdma_cqp_ctx),
935 IRDMA_HOST_CTX_ALIGNMENT_M);
936 if (status)
937 goto exit;
938
939 dev->cqp->host_ctx_pa = mem.pa;
940 dev->cqp->host_ctx = mem.va;
941 /* populate the cqp init info */
942 cqp_init_info.dev = dev;
943 cqp_init_info.sq_size = sqsize;
944 cqp_init_info.sq = cqp->sq.va;
945 cqp_init_info.sq_pa = cqp->sq.pa;
946 cqp_init_info.host_ctx_pa = mem.pa;
947 cqp_init_info.host_ctx = mem.va;
948 cqp_init_info.hmc_profile = rf->rsrc_profile;
949 cqp_init_info.scratch_array = cqp->scratch_array;
950 cqp_init_info.protocol_used = rf->protocol_used;
951
952 switch (rf->rdma_ver) {
953 case IRDMA_GEN_1:
954 cqp_init_info.hw_maj_ver = IRDMA_CQPHC_HW_MAJVER_GEN_1;
955 break;
956 case IRDMA_GEN_2:
957 cqp_init_info.hw_maj_ver = IRDMA_CQPHC_HW_MAJVER_GEN_2;
958 break;
959 }
960 status = irdma_sc_cqp_init(dev->cqp, &cqp_init_info);
961 if (status) {
962 ibdev_dbg(to_ibdev(dev), "ERR: cqp init status %d\n", status);
963 goto exit;
964 }
965
966 spin_lock_init(&cqp->req_lock);
967 spin_lock_init(&cqp->compl_lock);
968
969 status = irdma_sc_cqp_create(dev->cqp, &maj_err, &min_err);
970 if (status) {
971 ibdev_dbg(to_ibdev(dev),
972 "ERR: cqp create failed - status %d maj_err %d min_err %d\n",
973 status, maj_err, min_err);
974 goto exit;
975 }
976
977 INIT_LIST_HEAD(&cqp->cqp_avail_reqs);
978 INIT_LIST_HEAD(&cqp->cqp_pending_reqs);
979
980 /* init the waitqueue of the cqp_requests and add them to the list */
981 for (i = 0; i < sqsize; i++) {
982 init_waitqueue_head(&cqp->cqp_requests[i].waitq);
983 list_add_tail(&cqp->cqp_requests[i].list, &cqp->cqp_avail_reqs);
984 }
985 init_waitqueue_head(&cqp->remove_wq);
986 return 0;
987
988 exit:
989 irdma_destroy_cqp(rf, false);
990
991 return status;
992 }
993
994 /**
995 * irdma_create_ccq - create control cq
996 * @rf: RDMA PCI function
997 *
998 * Return 0, if the ccq and the resources associated with it
999 * are successfully created, otherwise return error
1000 */
irdma_create_ccq(struct irdma_pci_f * rf)1001 static int irdma_create_ccq(struct irdma_pci_f *rf)
1002 {
1003 struct irdma_sc_dev *dev = &rf->sc_dev;
1004 struct irdma_ccq_init_info info = {};
1005 struct irdma_ccq *ccq = &rf->ccq;
1006 int status;
1007
1008 dev->ccq = &ccq->sc_cq;
1009 dev->ccq->dev = dev;
1010 info.dev = dev;
1011 ccq->shadow_area.size = sizeof(struct irdma_cq_shadow_area);
1012 ccq->mem_cq.size = ALIGN(sizeof(struct irdma_cqe) * IW_CCQ_SIZE,
1013 IRDMA_CQ0_ALIGNMENT);
1014 ccq->mem_cq.va = dma_alloc_coherent(dev->hw->device, ccq->mem_cq.size,
1015 &ccq->mem_cq.pa, GFP_KERNEL);
1016 if (!ccq->mem_cq.va)
1017 return -ENOMEM;
1018
1019 status = irdma_obj_aligned_mem(rf, &ccq->shadow_area,
1020 ccq->shadow_area.size,
1021 IRDMA_SHADOWAREA_M);
1022 if (status)
1023 goto exit;
1024
1025 ccq->sc_cq.back_cq = ccq;
1026 /* populate the ccq init info */
1027 info.cq_base = ccq->mem_cq.va;
1028 info.cq_pa = ccq->mem_cq.pa;
1029 info.num_elem = IW_CCQ_SIZE;
1030 info.shadow_area = ccq->shadow_area.va;
1031 info.shadow_area_pa = ccq->shadow_area.pa;
1032 info.ceqe_mask = false;
1033 info.ceq_id_valid = true;
1034 info.shadow_read_threshold = 16;
1035 info.vsi = &rf->default_vsi;
1036 status = irdma_sc_ccq_init(dev->ccq, &info);
1037 if (!status)
1038 status = irdma_sc_ccq_create(dev->ccq, 0, true, true);
1039 exit:
1040 if (status) {
1041 dma_free_coherent(dev->hw->device, ccq->mem_cq.size,
1042 ccq->mem_cq.va, ccq->mem_cq.pa);
1043 ccq->mem_cq.va = NULL;
1044 }
1045
1046 return status;
1047 }
1048
1049 /**
1050 * irdma_alloc_set_mac - set up a mac address table entry
1051 * @iwdev: irdma device
1052 *
1053 * Allocate a mac ip entry and add it to the hw table Return 0
1054 * if successful, otherwise return error
1055 */
irdma_alloc_set_mac(struct irdma_device * iwdev)1056 static int irdma_alloc_set_mac(struct irdma_device *iwdev)
1057 {
1058 int status;
1059
1060 status = irdma_alloc_local_mac_entry(iwdev->rf,
1061 &iwdev->mac_ip_table_idx);
1062 if (!status) {
1063 status = irdma_add_local_mac_entry(iwdev->rf,
1064 (const u8 *)iwdev->netdev->dev_addr,
1065 (u8)iwdev->mac_ip_table_idx);
1066 if (status)
1067 irdma_del_local_mac_entry(iwdev->rf,
1068 (u8)iwdev->mac_ip_table_idx);
1069 }
1070 return status;
1071 }
1072
1073 /**
1074 * irdma_cfg_ceq_vector - set up the msix interrupt vector for
1075 * ceq
1076 * @rf: RDMA PCI function
1077 * @iwceq: ceq associated with the vector
1078 * @ceq_id: the id number of the iwceq
1079 * @msix_vec: interrupt vector information
1080 *
1081 * Allocate interrupt resources and enable irq handling
1082 * Return 0 if successful, otherwise return error
1083 */
irdma_cfg_ceq_vector(struct irdma_pci_f * rf,struct irdma_ceq * iwceq,u32 ceq_id,struct irdma_msix_vector * msix_vec)1084 static int irdma_cfg_ceq_vector(struct irdma_pci_f *rf, struct irdma_ceq *iwceq,
1085 u32 ceq_id, struct irdma_msix_vector *msix_vec)
1086 {
1087 int status;
1088
1089 if (rf->msix_shared && !ceq_id) {
1090 tasklet_setup(&rf->dpc_tasklet, irdma_dpc);
1091 status = request_irq(msix_vec->irq, irdma_irq_handler, 0,
1092 "AEQCEQ", rf);
1093 } else {
1094 tasklet_setup(&iwceq->dpc_tasklet, irdma_ceq_dpc);
1095
1096 status = request_irq(msix_vec->irq, irdma_ceq_handler, 0,
1097 "CEQ", iwceq);
1098 }
1099 cpumask_clear(&msix_vec->mask);
1100 cpumask_set_cpu(msix_vec->cpu_affinity, &msix_vec->mask);
1101 irq_update_affinity_hint(msix_vec->irq, &msix_vec->mask);
1102 if (status) {
1103 ibdev_dbg(&rf->iwdev->ibdev, "ERR: ceq irq config fail\n");
1104 return status;
1105 }
1106
1107 msix_vec->ceq_id = ceq_id;
1108 rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev, ceq_id, msix_vec->idx, true);
1109
1110 return 0;
1111 }
1112
1113 /**
1114 * irdma_cfg_aeq_vector - set up the msix vector for aeq
1115 * @rf: RDMA PCI function
1116 *
1117 * Allocate interrupt resources and enable irq handling
1118 * Return 0 if successful, otherwise return error
1119 */
irdma_cfg_aeq_vector(struct irdma_pci_f * rf)1120 static int irdma_cfg_aeq_vector(struct irdma_pci_f *rf)
1121 {
1122 struct irdma_msix_vector *msix_vec = rf->iw_msixtbl;
1123 u32 ret = 0;
1124
1125 if (!rf->msix_shared) {
1126 tasklet_setup(&rf->dpc_tasklet, irdma_dpc);
1127 ret = request_irq(msix_vec->irq, irdma_irq_handler, 0,
1128 "irdma", rf);
1129 }
1130 if (ret) {
1131 ibdev_dbg(&rf->iwdev->ibdev, "ERR: aeq irq config fail\n");
1132 return -EINVAL;
1133 }
1134
1135 rf->sc_dev.irq_ops->irdma_cfg_aeq(&rf->sc_dev, msix_vec->idx, true);
1136
1137 return 0;
1138 }
1139
1140 /**
1141 * irdma_create_ceq - create completion event queue
1142 * @rf: RDMA PCI function
1143 * @iwceq: pointer to the ceq resources to be created
1144 * @ceq_id: the id number of the iwceq
1145 * @vsi: SC vsi struct
1146 *
1147 * Return 0, if the ceq and the resources associated with it
1148 * are successfully created, otherwise return error
1149 */
irdma_create_ceq(struct irdma_pci_f * rf,struct irdma_ceq * iwceq,u32 ceq_id,struct irdma_sc_vsi * vsi)1150 static int irdma_create_ceq(struct irdma_pci_f *rf, struct irdma_ceq *iwceq,
1151 u32 ceq_id, struct irdma_sc_vsi *vsi)
1152 {
1153 int status;
1154 struct irdma_ceq_init_info info = {};
1155 struct irdma_sc_dev *dev = &rf->sc_dev;
1156 u64 scratch;
1157 u32 ceq_size;
1158
1159 info.ceq_id = ceq_id;
1160 iwceq->rf = rf;
1161 ceq_size = min(rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt,
1162 dev->hw_attrs.max_hw_ceq_size);
1163 iwceq->mem.size = ALIGN(sizeof(struct irdma_ceqe) * ceq_size,
1164 IRDMA_CEQ_ALIGNMENT);
1165 iwceq->mem.va = dma_alloc_coherent(dev->hw->device, iwceq->mem.size,
1166 &iwceq->mem.pa, GFP_KERNEL);
1167 if (!iwceq->mem.va)
1168 return -ENOMEM;
1169
1170 info.ceq_id = ceq_id;
1171 info.ceqe_base = iwceq->mem.va;
1172 info.ceqe_pa = iwceq->mem.pa;
1173 info.elem_cnt = ceq_size;
1174 iwceq->sc_ceq.ceq_id = ceq_id;
1175 info.dev = dev;
1176 info.vsi = vsi;
1177 scratch = (uintptr_t)&rf->cqp.sc_cqp;
1178 status = irdma_sc_ceq_init(&iwceq->sc_ceq, &info);
1179 if (!status) {
1180 if (dev->ceq_valid)
1181 status = irdma_cqp_ceq_cmd(&rf->sc_dev, &iwceq->sc_ceq,
1182 IRDMA_OP_CEQ_CREATE);
1183 else
1184 status = irdma_sc_cceq_create(&iwceq->sc_ceq, scratch);
1185 }
1186
1187 if (status) {
1188 dma_free_coherent(dev->hw->device, iwceq->mem.size,
1189 iwceq->mem.va, iwceq->mem.pa);
1190 iwceq->mem.va = NULL;
1191 }
1192
1193 return status;
1194 }
1195
1196 /**
1197 * irdma_setup_ceq_0 - create CEQ 0 and it's interrupt resource
1198 * @rf: RDMA PCI function
1199 *
1200 * Allocate a list for all device completion event queues
1201 * Create the ceq 0 and configure it's msix interrupt vector
1202 * Return 0, if successfully set up, otherwise return error
1203 */
irdma_setup_ceq_0(struct irdma_pci_f * rf)1204 static int irdma_setup_ceq_0(struct irdma_pci_f *rf)
1205 {
1206 struct irdma_ceq *iwceq;
1207 struct irdma_msix_vector *msix_vec;
1208 u32 i;
1209 int status = 0;
1210 u32 num_ceqs;
1211
1212 num_ceqs = min(rf->msix_count, rf->sc_dev.hmc_fpm_misc.max_ceqs);
1213 rf->ceqlist = kcalloc(num_ceqs, sizeof(*rf->ceqlist), GFP_KERNEL);
1214 if (!rf->ceqlist) {
1215 status = -ENOMEM;
1216 goto exit;
1217 }
1218
1219 iwceq = &rf->ceqlist[0];
1220 status = irdma_create_ceq(rf, iwceq, 0, &rf->default_vsi);
1221 if (status) {
1222 ibdev_dbg(&rf->iwdev->ibdev, "ERR: create ceq status = %d\n",
1223 status);
1224 goto exit;
1225 }
1226
1227 spin_lock_init(&iwceq->ce_lock);
1228 i = rf->msix_shared ? 0 : 1;
1229 msix_vec = &rf->iw_msixtbl[i];
1230 iwceq->irq = msix_vec->irq;
1231 iwceq->msix_idx = msix_vec->idx;
1232 status = irdma_cfg_ceq_vector(rf, iwceq, 0, msix_vec);
1233 if (status) {
1234 irdma_destroy_ceq(rf, iwceq);
1235 goto exit;
1236 }
1237
1238 irdma_ena_intr(&rf->sc_dev, msix_vec->idx);
1239 rf->ceqs_count++;
1240
1241 exit:
1242 if (status && !rf->ceqs_count) {
1243 kfree(rf->ceqlist);
1244 rf->ceqlist = NULL;
1245 return status;
1246 }
1247 rf->sc_dev.ceq_valid = true;
1248
1249 return 0;
1250 }
1251
1252 /**
1253 * irdma_setup_ceqs - manage the device ceq's and their interrupt resources
1254 * @rf: RDMA PCI function
1255 * @vsi: VSI structure for this CEQ
1256 *
1257 * Allocate a list for all device completion event queues
1258 * Create the ceq's and configure their msix interrupt vectors
1259 * Return 0, if ceqs are successfully set up, otherwise return error
1260 */
irdma_setup_ceqs(struct irdma_pci_f * rf,struct irdma_sc_vsi * vsi)1261 static int irdma_setup_ceqs(struct irdma_pci_f *rf, struct irdma_sc_vsi *vsi)
1262 {
1263 u32 i;
1264 u32 ceq_id;
1265 struct irdma_ceq *iwceq;
1266 struct irdma_msix_vector *msix_vec;
1267 int status;
1268 u32 num_ceqs;
1269
1270 num_ceqs = min(rf->msix_count, rf->sc_dev.hmc_fpm_misc.max_ceqs);
1271 i = (rf->msix_shared) ? 1 : 2;
1272 for (ceq_id = 1; i < num_ceqs; i++, ceq_id++) {
1273 iwceq = &rf->ceqlist[ceq_id];
1274 status = irdma_create_ceq(rf, iwceq, ceq_id, vsi);
1275 if (status) {
1276 ibdev_dbg(&rf->iwdev->ibdev,
1277 "ERR: create ceq status = %d\n", status);
1278 goto del_ceqs;
1279 }
1280 spin_lock_init(&iwceq->ce_lock);
1281 msix_vec = &rf->iw_msixtbl[i];
1282 iwceq->irq = msix_vec->irq;
1283 iwceq->msix_idx = msix_vec->idx;
1284 status = irdma_cfg_ceq_vector(rf, iwceq, ceq_id, msix_vec);
1285 if (status) {
1286 irdma_destroy_ceq(rf, iwceq);
1287 goto del_ceqs;
1288 }
1289 irdma_ena_intr(&rf->sc_dev, msix_vec->idx);
1290 rf->ceqs_count++;
1291 }
1292
1293 return 0;
1294
1295 del_ceqs:
1296 irdma_del_ceqs(rf);
1297
1298 return status;
1299 }
1300
irdma_create_virt_aeq(struct irdma_pci_f * rf,u32 size)1301 static int irdma_create_virt_aeq(struct irdma_pci_f *rf, u32 size)
1302 {
1303 struct irdma_aeq *aeq = &rf->aeq;
1304 dma_addr_t *pg_arr;
1305 u32 pg_cnt;
1306 int status;
1307
1308 if (rf->rdma_ver < IRDMA_GEN_2)
1309 return -EOPNOTSUPP;
1310
1311 aeq->mem.size = sizeof(struct irdma_sc_aeqe) * size;
1312 aeq->mem.va = vzalloc(aeq->mem.size);
1313
1314 if (!aeq->mem.va)
1315 return -ENOMEM;
1316
1317 pg_cnt = DIV_ROUND_UP(aeq->mem.size, PAGE_SIZE);
1318 status = irdma_get_pble(rf->pble_rsrc, &aeq->palloc, pg_cnt, true);
1319 if (status) {
1320 vfree(aeq->mem.va);
1321 return status;
1322 }
1323
1324 pg_arr = (dma_addr_t *)aeq->palloc.level1.addr;
1325 status = irdma_map_vm_page_list(&rf->hw, aeq->mem.va, pg_arr, pg_cnt);
1326 if (status) {
1327 irdma_free_pble(rf->pble_rsrc, &aeq->palloc);
1328 vfree(aeq->mem.va);
1329 return status;
1330 }
1331
1332 return 0;
1333 }
1334
1335 /**
1336 * irdma_create_aeq - create async event queue
1337 * @rf: RDMA PCI function
1338 *
1339 * Return 0, if the aeq and the resources associated with it
1340 * are successfully created, otherwise return error
1341 */
irdma_create_aeq(struct irdma_pci_f * rf)1342 static int irdma_create_aeq(struct irdma_pci_f *rf)
1343 {
1344 struct irdma_aeq_init_info info = {};
1345 struct irdma_sc_dev *dev = &rf->sc_dev;
1346 struct irdma_aeq *aeq = &rf->aeq;
1347 struct irdma_hmc_info *hmc_info = rf->sc_dev.hmc_info;
1348 u32 aeq_size;
1349 u8 multiplier = (rf->protocol_used == IRDMA_IWARP_PROTOCOL_ONLY) ? 2 : 1;
1350 int status;
1351
1352 aeq_size = multiplier * hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt +
1353 hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt;
1354 aeq_size = min(aeq_size, dev->hw_attrs.max_hw_aeq_size);
1355
1356 aeq->mem.size = ALIGN(sizeof(struct irdma_sc_aeqe) * aeq_size,
1357 IRDMA_AEQ_ALIGNMENT);
1358 aeq->mem.va = dma_alloc_coherent(dev->hw->device, aeq->mem.size,
1359 &aeq->mem.pa,
1360 GFP_KERNEL | __GFP_NOWARN);
1361 if (aeq->mem.va)
1362 goto skip_virt_aeq;
1363
1364 /* physically mapped aeq failed. setup virtual aeq */
1365 status = irdma_create_virt_aeq(rf, aeq_size);
1366 if (status)
1367 return status;
1368
1369 info.virtual_map = true;
1370 aeq->virtual_map = info.virtual_map;
1371 info.pbl_chunk_size = 1;
1372 info.first_pm_pbl_idx = aeq->palloc.level1.idx;
1373
1374 skip_virt_aeq:
1375 info.aeqe_base = aeq->mem.va;
1376 info.aeq_elem_pa = aeq->mem.pa;
1377 info.elem_cnt = aeq_size;
1378 info.dev = dev;
1379 info.msix_idx = rf->iw_msixtbl->idx;
1380 status = irdma_sc_aeq_init(&aeq->sc_aeq, &info);
1381 if (status)
1382 goto err;
1383
1384 status = irdma_cqp_aeq_cmd(dev, &aeq->sc_aeq, IRDMA_OP_AEQ_CREATE);
1385 if (status)
1386 goto err;
1387
1388 return 0;
1389
1390 err:
1391 if (aeq->virtual_map) {
1392 irdma_destroy_virt_aeq(rf);
1393 } else {
1394 dma_free_coherent(dev->hw->device, aeq->mem.size, aeq->mem.va,
1395 aeq->mem.pa);
1396 aeq->mem.va = NULL;
1397 }
1398
1399 return status;
1400 }
1401
1402 /**
1403 * irdma_setup_aeq - set up the device aeq
1404 * @rf: RDMA PCI function
1405 *
1406 * Create the aeq and configure its msix interrupt vector
1407 * Return 0 if successful, otherwise return error
1408 */
irdma_setup_aeq(struct irdma_pci_f * rf)1409 static int irdma_setup_aeq(struct irdma_pci_f *rf)
1410 {
1411 struct irdma_sc_dev *dev = &rf->sc_dev;
1412 int status;
1413
1414 status = irdma_create_aeq(rf);
1415 if (status)
1416 return status;
1417
1418 status = irdma_cfg_aeq_vector(rf);
1419 if (status) {
1420 irdma_destroy_aeq(rf);
1421 return status;
1422 }
1423
1424 if (!rf->msix_shared)
1425 irdma_ena_intr(dev, rf->iw_msixtbl[0].idx);
1426
1427 return 0;
1428 }
1429
1430 /**
1431 * irdma_initialize_ilq - create iwarp local queue for cm
1432 * @iwdev: irdma device
1433 *
1434 * Return 0 if successful, otherwise return error
1435 */
irdma_initialize_ilq(struct irdma_device * iwdev)1436 static int irdma_initialize_ilq(struct irdma_device *iwdev)
1437 {
1438 struct irdma_puda_rsrc_info info = {};
1439 int status;
1440
1441 info.type = IRDMA_PUDA_RSRC_TYPE_ILQ;
1442 info.cq_id = 1;
1443 info.qp_id = 1;
1444 info.count = 1;
1445 info.pd_id = 1;
1446 info.abi_ver = IRDMA_ABI_VER;
1447 info.sq_size = min(iwdev->rf->max_qp / 2, (u32)32768);
1448 info.rq_size = info.sq_size;
1449 info.buf_size = 1024;
1450 info.tx_buf_cnt = 2 * info.sq_size;
1451 info.receive = irdma_receive_ilq;
1452 info.xmit_complete = irdma_free_sqbuf;
1453 status = irdma_puda_create_rsrc(&iwdev->vsi, &info);
1454 if (status)
1455 ibdev_dbg(&iwdev->ibdev, "ERR: ilq create fail\n");
1456
1457 return status;
1458 }
1459
1460 /**
1461 * irdma_initialize_ieq - create iwarp exception queue
1462 * @iwdev: irdma device
1463 *
1464 * Return 0 if successful, otherwise return error
1465 */
irdma_initialize_ieq(struct irdma_device * iwdev)1466 static int irdma_initialize_ieq(struct irdma_device *iwdev)
1467 {
1468 struct irdma_puda_rsrc_info info = {};
1469 int status;
1470
1471 info.type = IRDMA_PUDA_RSRC_TYPE_IEQ;
1472 info.cq_id = 2;
1473 info.qp_id = iwdev->vsi.exception_lan_q;
1474 info.count = 1;
1475 info.pd_id = 2;
1476 info.abi_ver = IRDMA_ABI_VER;
1477 info.sq_size = min(iwdev->rf->max_qp / 2, (u32)32768);
1478 info.rq_size = info.sq_size;
1479 info.buf_size = iwdev->vsi.mtu + IRDMA_IPV4_PAD;
1480 info.tx_buf_cnt = 4096;
1481 status = irdma_puda_create_rsrc(&iwdev->vsi, &info);
1482 if (status)
1483 ibdev_dbg(&iwdev->ibdev, "ERR: ieq create fail\n");
1484
1485 return status;
1486 }
1487
1488 /**
1489 * irdma_reinitialize_ieq - destroy and re-create ieq
1490 * @vsi: VSI structure
1491 */
irdma_reinitialize_ieq(struct irdma_sc_vsi * vsi)1492 void irdma_reinitialize_ieq(struct irdma_sc_vsi *vsi)
1493 {
1494 struct irdma_device *iwdev = vsi->back_vsi;
1495 struct irdma_pci_f *rf = iwdev->rf;
1496
1497 irdma_puda_dele_rsrc(vsi, IRDMA_PUDA_RSRC_TYPE_IEQ, false);
1498 if (irdma_initialize_ieq(iwdev)) {
1499 iwdev->rf->reset = true;
1500 rf->gen_ops.request_reset(rf);
1501 }
1502 }
1503
1504 /**
1505 * irdma_hmc_setup - create hmc objects for the device
1506 * @rf: RDMA PCI function
1507 *
1508 * Set up the device private memory space for the number and size of
1509 * the hmc objects and create the objects
1510 * Return 0 if successful, otherwise return error
1511 */
irdma_hmc_setup(struct irdma_pci_f * rf)1512 static int irdma_hmc_setup(struct irdma_pci_f *rf)
1513 {
1514 int status;
1515 u32 qpcnt;
1516
1517 qpcnt = rsrc_limits_table[rf->limits_sel].qplimit;
1518
1519 rf->sd_type = IRDMA_SD_TYPE_DIRECT;
1520 status = irdma_cfg_fpm_val(&rf->sc_dev, qpcnt);
1521 if (status)
1522 return status;
1523
1524 status = irdma_create_hmc_objs(rf, true, rf->rdma_ver);
1525
1526 return status;
1527 }
1528
1529 /**
1530 * irdma_del_init_mem - deallocate memory resources
1531 * @rf: RDMA PCI function
1532 */
irdma_del_init_mem(struct irdma_pci_f * rf)1533 static void irdma_del_init_mem(struct irdma_pci_f *rf)
1534 {
1535 struct irdma_sc_dev *dev = &rf->sc_dev;
1536
1537 kfree(dev->hmc_info->sd_table.sd_entry);
1538 dev->hmc_info->sd_table.sd_entry = NULL;
1539 kfree(rf->mem_rsrc);
1540 rf->mem_rsrc = NULL;
1541 dma_free_coherent(rf->hw.device, rf->obj_mem.size, rf->obj_mem.va,
1542 rf->obj_mem.pa);
1543 rf->obj_mem.va = NULL;
1544 if (rf->rdma_ver != IRDMA_GEN_1) {
1545 bitmap_free(rf->allocated_ws_nodes);
1546 rf->allocated_ws_nodes = NULL;
1547 }
1548 kfree(rf->ceqlist);
1549 rf->ceqlist = NULL;
1550 kfree(rf->iw_msixtbl);
1551 rf->iw_msixtbl = NULL;
1552 kfree(rf->hmc_info_mem);
1553 rf->hmc_info_mem = NULL;
1554 }
1555
1556 /**
1557 * irdma_initialize_dev - initialize device
1558 * @rf: RDMA PCI function
1559 *
1560 * Allocate memory for the hmc objects and initialize iwdev
1561 * Return 0 if successful, otherwise clean up the resources
1562 * and return error
1563 */
irdma_initialize_dev(struct irdma_pci_f * rf)1564 static int irdma_initialize_dev(struct irdma_pci_f *rf)
1565 {
1566 int status;
1567 struct irdma_sc_dev *dev = &rf->sc_dev;
1568 struct irdma_device_init_info info = {};
1569 struct irdma_dma_mem mem;
1570 u32 size;
1571
1572 size = sizeof(struct irdma_hmc_pble_rsrc) +
1573 sizeof(struct irdma_hmc_info) +
1574 (sizeof(struct irdma_hmc_obj_info) * IRDMA_HMC_IW_MAX);
1575
1576 rf->hmc_info_mem = kzalloc(size, GFP_KERNEL);
1577 if (!rf->hmc_info_mem)
1578 return -ENOMEM;
1579
1580 rf->pble_rsrc = (struct irdma_hmc_pble_rsrc *)rf->hmc_info_mem;
1581 dev->hmc_info = &rf->hw.hmc;
1582 dev->hmc_info->hmc_obj = (struct irdma_hmc_obj_info *)
1583 (rf->pble_rsrc + 1);
1584
1585 status = irdma_obj_aligned_mem(rf, &mem, IRDMA_QUERY_FPM_BUF_SIZE,
1586 IRDMA_FPM_QUERY_BUF_ALIGNMENT_M);
1587 if (status)
1588 goto error;
1589
1590 info.fpm_query_buf_pa = mem.pa;
1591 info.fpm_query_buf = mem.va;
1592
1593 status = irdma_obj_aligned_mem(rf, &mem, IRDMA_COMMIT_FPM_BUF_SIZE,
1594 IRDMA_FPM_COMMIT_BUF_ALIGNMENT_M);
1595 if (status)
1596 goto error;
1597
1598 info.fpm_commit_buf_pa = mem.pa;
1599 info.fpm_commit_buf = mem.va;
1600
1601 info.bar0 = rf->hw.hw_addr;
1602 info.hmc_fn_id = rf->pf_id;
1603 info.hw = &rf->hw;
1604 status = irdma_sc_dev_init(rf->rdma_ver, &rf->sc_dev, &info);
1605 if (status)
1606 goto error;
1607
1608 return status;
1609 error:
1610 kfree(rf->hmc_info_mem);
1611 rf->hmc_info_mem = NULL;
1612
1613 return status;
1614 }
1615
1616 /**
1617 * irdma_rt_deinit_hw - clean up the irdma device resources
1618 * @iwdev: irdma device
1619 *
1620 * remove the mac ip entry and ipv4/ipv6 addresses, destroy the
1621 * device queues and free the pble and the hmc objects
1622 */
irdma_rt_deinit_hw(struct irdma_device * iwdev)1623 void irdma_rt_deinit_hw(struct irdma_device *iwdev)
1624 {
1625 ibdev_dbg(&iwdev->ibdev, "INIT: state = %d\n", iwdev->init_state);
1626
1627 switch (iwdev->init_state) {
1628 case IP_ADDR_REGISTERED:
1629 if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
1630 irdma_del_local_mac_entry(iwdev->rf,
1631 (u8)iwdev->mac_ip_table_idx);
1632 fallthrough;
1633 case AEQ_CREATED:
1634 case PBLE_CHUNK_MEM:
1635 case CEQS_CREATED:
1636 case IEQ_CREATED:
1637 if (!iwdev->roce_mode)
1638 irdma_puda_dele_rsrc(&iwdev->vsi, IRDMA_PUDA_RSRC_TYPE_IEQ,
1639 iwdev->rf->reset);
1640 fallthrough;
1641 case ILQ_CREATED:
1642 if (!iwdev->roce_mode)
1643 irdma_puda_dele_rsrc(&iwdev->vsi,
1644 IRDMA_PUDA_RSRC_TYPE_ILQ,
1645 iwdev->rf->reset);
1646 break;
1647 default:
1648 ibdev_warn(&iwdev->ibdev, "bad init_state = %d\n", iwdev->init_state);
1649 break;
1650 }
1651
1652 irdma_cleanup_cm_core(&iwdev->cm_core);
1653 if (iwdev->vsi.pestat) {
1654 irdma_vsi_stats_free(&iwdev->vsi);
1655 kfree(iwdev->vsi.pestat);
1656 }
1657 if (iwdev->cleanup_wq)
1658 destroy_workqueue(iwdev->cleanup_wq);
1659 }
1660
irdma_setup_init_state(struct irdma_pci_f * rf)1661 static int irdma_setup_init_state(struct irdma_pci_f *rf)
1662 {
1663 int status;
1664
1665 status = irdma_save_msix_info(rf);
1666 if (status)
1667 return status;
1668
1669 rf->hw.device = &rf->pcidev->dev;
1670 rf->obj_mem.size = ALIGN(8192, IRDMA_HW_PAGE_SIZE);
1671 rf->obj_mem.va = dma_alloc_coherent(rf->hw.device, rf->obj_mem.size,
1672 &rf->obj_mem.pa, GFP_KERNEL);
1673 if (!rf->obj_mem.va) {
1674 status = -ENOMEM;
1675 goto clean_msixtbl;
1676 }
1677
1678 rf->obj_next = rf->obj_mem;
1679 status = irdma_initialize_dev(rf);
1680 if (status)
1681 goto clean_obj_mem;
1682
1683 return 0;
1684
1685 clean_obj_mem:
1686 dma_free_coherent(rf->hw.device, rf->obj_mem.size, rf->obj_mem.va,
1687 rf->obj_mem.pa);
1688 rf->obj_mem.va = NULL;
1689 clean_msixtbl:
1690 kfree(rf->iw_msixtbl);
1691 rf->iw_msixtbl = NULL;
1692 return status;
1693 }
1694
1695 /**
1696 * irdma_get_used_rsrc - determine resources used internally
1697 * @iwdev: irdma device
1698 *
1699 * Called at the end of open to get all internal allocations
1700 */
irdma_get_used_rsrc(struct irdma_device * iwdev)1701 static void irdma_get_used_rsrc(struct irdma_device *iwdev)
1702 {
1703 iwdev->rf->used_pds = find_first_zero_bit(iwdev->rf->allocated_pds,
1704 iwdev->rf->max_pd);
1705 iwdev->rf->used_qps = find_first_zero_bit(iwdev->rf->allocated_qps,
1706 iwdev->rf->max_qp);
1707 iwdev->rf->used_cqs = find_first_zero_bit(iwdev->rf->allocated_cqs,
1708 iwdev->rf->max_cq);
1709 iwdev->rf->used_mrs = find_first_zero_bit(iwdev->rf->allocated_mrs,
1710 iwdev->rf->max_mr);
1711 }
1712
irdma_ctrl_deinit_hw(struct irdma_pci_f * rf)1713 void irdma_ctrl_deinit_hw(struct irdma_pci_f *rf)
1714 {
1715 enum init_completion_state state = rf->init_state;
1716
1717 rf->init_state = INVALID_STATE;
1718 if (rf->rsrc_created) {
1719 irdma_destroy_aeq(rf);
1720 irdma_destroy_pble_prm(rf->pble_rsrc);
1721 irdma_del_ceqs(rf);
1722 rf->rsrc_created = false;
1723 }
1724 switch (state) {
1725 case CEQ0_CREATED:
1726 irdma_del_ceq_0(rf);
1727 fallthrough;
1728 case CCQ_CREATED:
1729 irdma_destroy_ccq(rf);
1730 fallthrough;
1731 case HW_RSRC_INITIALIZED:
1732 case HMC_OBJS_CREATED:
1733 irdma_del_hmc_objects(&rf->sc_dev, rf->sc_dev.hmc_info, true,
1734 rf->reset, rf->rdma_ver);
1735 fallthrough;
1736 case CQP_CREATED:
1737 irdma_destroy_cqp(rf, true);
1738 fallthrough;
1739 case INITIAL_STATE:
1740 irdma_del_init_mem(rf);
1741 break;
1742 case INVALID_STATE:
1743 default:
1744 ibdev_warn(&rf->iwdev->ibdev, "bad init_state = %d\n", rf->init_state);
1745 break;
1746 }
1747 }
1748
1749 /**
1750 * irdma_rt_init_hw - Initializes runtime portion of HW
1751 * @iwdev: irdma device
1752 * @l2params: qos, tc, mtu info from netdev driver
1753 *
1754 * Create device queues ILQ, IEQ, CEQs and PBLEs. Setup irdma
1755 * device resource objects.
1756 */
irdma_rt_init_hw(struct irdma_device * iwdev,struct irdma_l2params * l2params)1757 int irdma_rt_init_hw(struct irdma_device *iwdev,
1758 struct irdma_l2params *l2params)
1759 {
1760 struct irdma_pci_f *rf = iwdev->rf;
1761 struct irdma_sc_dev *dev = &rf->sc_dev;
1762 struct irdma_vsi_init_info vsi_info = {};
1763 struct irdma_vsi_stats_info stats_info = {};
1764 int status;
1765
1766 vsi_info.dev = dev;
1767 vsi_info.back_vsi = iwdev;
1768 vsi_info.params = l2params;
1769 vsi_info.pf_data_vsi_num = iwdev->vsi_num;
1770 vsi_info.register_qset = rf->gen_ops.register_qset;
1771 vsi_info.unregister_qset = rf->gen_ops.unregister_qset;
1772 vsi_info.exception_lan_q = 2;
1773 irdma_sc_vsi_init(&iwdev->vsi, &vsi_info);
1774
1775 status = irdma_setup_cm_core(iwdev, rf->rdma_ver);
1776 if (status)
1777 return status;
1778
1779 stats_info.pestat = kzalloc(sizeof(*stats_info.pestat), GFP_KERNEL);
1780 if (!stats_info.pestat) {
1781 irdma_cleanup_cm_core(&iwdev->cm_core);
1782 return -ENOMEM;
1783 }
1784 stats_info.fcn_id = dev->hmc_fn_id;
1785 status = irdma_vsi_stats_init(&iwdev->vsi, &stats_info);
1786 if (status) {
1787 irdma_cleanup_cm_core(&iwdev->cm_core);
1788 kfree(stats_info.pestat);
1789 return status;
1790 }
1791
1792 do {
1793 if (!iwdev->roce_mode) {
1794 status = irdma_initialize_ilq(iwdev);
1795 if (status)
1796 break;
1797 iwdev->init_state = ILQ_CREATED;
1798 status = irdma_initialize_ieq(iwdev);
1799 if (status)
1800 break;
1801 iwdev->init_state = IEQ_CREATED;
1802 }
1803 if (!rf->rsrc_created) {
1804 status = irdma_setup_ceqs(rf, &iwdev->vsi);
1805 if (status)
1806 break;
1807
1808 iwdev->init_state = CEQS_CREATED;
1809
1810 status = irdma_hmc_init_pble(&rf->sc_dev,
1811 rf->pble_rsrc);
1812 if (status) {
1813 irdma_del_ceqs(rf);
1814 break;
1815 }
1816
1817 iwdev->init_state = PBLE_CHUNK_MEM;
1818
1819 status = irdma_setup_aeq(rf);
1820 if (status) {
1821 irdma_destroy_pble_prm(rf->pble_rsrc);
1822 irdma_del_ceqs(rf);
1823 break;
1824 }
1825 iwdev->init_state = AEQ_CREATED;
1826 rf->rsrc_created = true;
1827 }
1828
1829 if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
1830 irdma_alloc_set_mac(iwdev);
1831 irdma_add_ip(iwdev);
1832 iwdev->init_state = IP_ADDR_REGISTERED;
1833
1834 /* handles asynch cleanup tasks - disconnect CM , free qp,
1835 * free cq bufs
1836 */
1837 iwdev->cleanup_wq = alloc_workqueue("irdma-cleanup-wq",
1838 WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
1839 if (!iwdev->cleanup_wq)
1840 return -ENOMEM;
1841 irdma_get_used_rsrc(iwdev);
1842 init_waitqueue_head(&iwdev->suspend_wq);
1843
1844 return 0;
1845 } while (0);
1846
1847 dev_err(&rf->pcidev->dev, "HW runtime init FAIL status = %d last cmpl = %d\n",
1848 status, iwdev->init_state);
1849 irdma_rt_deinit_hw(iwdev);
1850
1851 return status;
1852 }
1853
1854 /**
1855 * irdma_ctrl_init_hw - Initializes control portion of HW
1856 * @rf: RDMA PCI function
1857 *
1858 * Create admin queues, HMC obejcts and RF resource objects
1859 */
irdma_ctrl_init_hw(struct irdma_pci_f * rf)1860 int irdma_ctrl_init_hw(struct irdma_pci_f *rf)
1861 {
1862 struct irdma_sc_dev *dev = &rf->sc_dev;
1863 int status;
1864 do {
1865 status = irdma_setup_init_state(rf);
1866 if (status)
1867 break;
1868 rf->init_state = INITIAL_STATE;
1869
1870 status = irdma_create_cqp(rf);
1871 if (status)
1872 break;
1873 rf->init_state = CQP_CREATED;
1874
1875 status = irdma_hmc_setup(rf);
1876 if (status)
1877 break;
1878 rf->init_state = HMC_OBJS_CREATED;
1879
1880 status = irdma_initialize_hw_rsrc(rf);
1881 if (status)
1882 break;
1883 rf->init_state = HW_RSRC_INITIALIZED;
1884
1885 status = irdma_create_ccq(rf);
1886 if (status)
1887 break;
1888 rf->init_state = CCQ_CREATED;
1889
1890 dev->feature_info[IRDMA_FEATURE_FW_INFO] = IRDMA_FW_VER_DEFAULT;
1891 if (rf->rdma_ver != IRDMA_GEN_1) {
1892 status = irdma_get_rdma_features(dev);
1893 if (status)
1894 break;
1895 }
1896
1897 status = irdma_setup_ceq_0(rf);
1898 if (status)
1899 break;
1900 rf->init_state = CEQ0_CREATED;
1901 /* Handles processing of CQP completions */
1902 rf->cqp_cmpl_wq = alloc_ordered_workqueue("cqp_cmpl_wq",
1903 WQ_HIGHPRI | WQ_UNBOUND);
1904 if (!rf->cqp_cmpl_wq) {
1905 status = -ENOMEM;
1906 break;
1907 }
1908 INIT_WORK(&rf->cqp_cmpl_work, cqp_compl_worker);
1909 irdma_sc_ccq_arm(dev->ccq);
1910 return 0;
1911 } while (0);
1912
1913 dev_err(&rf->pcidev->dev, "IRDMA hardware initialization FAILED init_state=%d status=%d\n",
1914 rf->init_state, status);
1915 irdma_ctrl_deinit_hw(rf);
1916 return status;
1917 }
1918
1919 /**
1920 * irdma_set_hw_rsrc - set hw memory resources.
1921 * @rf: RDMA PCI function
1922 */
irdma_set_hw_rsrc(struct irdma_pci_f * rf)1923 static void irdma_set_hw_rsrc(struct irdma_pci_f *rf)
1924 {
1925 rf->allocated_qps = (void *)(rf->mem_rsrc +
1926 (sizeof(struct irdma_arp_entry) * rf->arp_table_size));
1927 rf->allocated_cqs = &rf->allocated_qps[BITS_TO_LONGS(rf->max_qp)];
1928 rf->allocated_mrs = &rf->allocated_cqs[BITS_TO_LONGS(rf->max_cq)];
1929 rf->allocated_pds = &rf->allocated_mrs[BITS_TO_LONGS(rf->max_mr)];
1930 rf->allocated_ahs = &rf->allocated_pds[BITS_TO_LONGS(rf->max_pd)];
1931 rf->allocated_mcgs = &rf->allocated_ahs[BITS_TO_LONGS(rf->max_ah)];
1932 rf->allocated_arps = &rf->allocated_mcgs[BITS_TO_LONGS(rf->max_mcg)];
1933 rf->qp_table = (struct irdma_qp **)
1934 (&rf->allocated_arps[BITS_TO_LONGS(rf->arp_table_size)]);
1935
1936 spin_lock_init(&rf->rsrc_lock);
1937 spin_lock_init(&rf->arp_lock);
1938 spin_lock_init(&rf->qptable_lock);
1939 spin_lock_init(&rf->qh_list_lock);
1940 }
1941
1942 /**
1943 * irdma_calc_mem_rsrc_size - calculate memory resources size.
1944 * @rf: RDMA PCI function
1945 */
irdma_calc_mem_rsrc_size(struct irdma_pci_f * rf)1946 static u32 irdma_calc_mem_rsrc_size(struct irdma_pci_f *rf)
1947 {
1948 u32 rsrc_size;
1949
1950 rsrc_size = sizeof(struct irdma_arp_entry) * rf->arp_table_size;
1951 rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_qp);
1952 rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_mr);
1953 rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_cq);
1954 rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_pd);
1955 rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->arp_table_size);
1956 rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_ah);
1957 rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_mcg);
1958 rsrc_size += sizeof(struct irdma_qp **) * rf->max_qp;
1959
1960 return rsrc_size;
1961 }
1962
1963 /**
1964 * irdma_initialize_hw_rsrc - initialize hw resource tracking array
1965 * @rf: RDMA PCI function
1966 */
irdma_initialize_hw_rsrc(struct irdma_pci_f * rf)1967 u32 irdma_initialize_hw_rsrc(struct irdma_pci_f *rf)
1968 {
1969 u32 rsrc_size;
1970 u32 mrdrvbits;
1971 u32 ret;
1972
1973 if (rf->rdma_ver != IRDMA_GEN_1) {
1974 rf->allocated_ws_nodes = bitmap_zalloc(IRDMA_MAX_WS_NODES,
1975 GFP_KERNEL);
1976 if (!rf->allocated_ws_nodes)
1977 return -ENOMEM;
1978
1979 set_bit(0, rf->allocated_ws_nodes);
1980 rf->max_ws_node_id = IRDMA_MAX_WS_NODES;
1981 }
1982 rf->max_cqe = rf->sc_dev.hw_attrs.uk_attrs.max_hw_cq_size;
1983 rf->max_qp = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt;
1984 rf->max_mr = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_MR].cnt;
1985 rf->max_cq = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt;
1986 rf->max_pd = rf->sc_dev.hw_attrs.max_hw_pds;
1987 rf->arp_table_size = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_ARP].cnt;
1988 rf->max_ah = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt;
1989 rf->max_mcg = rf->max_qp;
1990
1991 rsrc_size = irdma_calc_mem_rsrc_size(rf);
1992 rf->mem_rsrc = kzalloc(rsrc_size, GFP_KERNEL);
1993 if (!rf->mem_rsrc) {
1994 ret = -ENOMEM;
1995 goto mem_rsrc_kzalloc_fail;
1996 }
1997
1998 rf->arp_table = (struct irdma_arp_entry *)rf->mem_rsrc;
1999
2000 irdma_set_hw_rsrc(rf);
2001
2002 set_bit(0, rf->allocated_mrs);
2003 set_bit(0, rf->allocated_qps);
2004 set_bit(0, rf->allocated_cqs);
2005 set_bit(0, rf->allocated_pds);
2006 set_bit(0, rf->allocated_arps);
2007 set_bit(0, rf->allocated_ahs);
2008 set_bit(0, rf->allocated_mcgs);
2009 set_bit(2, rf->allocated_qps); /* qp 2 IEQ */
2010 set_bit(1, rf->allocated_qps); /* qp 1 ILQ */
2011 set_bit(1, rf->allocated_cqs);
2012 set_bit(1, rf->allocated_pds);
2013 set_bit(2, rf->allocated_cqs);
2014 set_bit(2, rf->allocated_pds);
2015
2016 INIT_LIST_HEAD(&rf->mc_qht_list.list);
2017 /* stag index mask has a minimum of 14 bits */
2018 mrdrvbits = 24 - max(get_count_order(rf->max_mr), 14);
2019 rf->mr_stagmask = ~(((1 << mrdrvbits) - 1) << (32 - mrdrvbits));
2020
2021 return 0;
2022
2023 mem_rsrc_kzalloc_fail:
2024 bitmap_free(rf->allocated_ws_nodes);
2025 rf->allocated_ws_nodes = NULL;
2026
2027 return ret;
2028 }
2029
2030 /**
2031 * irdma_cqp_ce_handler - handle cqp completions
2032 * @rf: RDMA PCI function
2033 * @cq: cq for cqp completions
2034 */
irdma_cqp_ce_handler(struct irdma_pci_f * rf,struct irdma_sc_cq * cq)2035 void irdma_cqp_ce_handler(struct irdma_pci_f *rf, struct irdma_sc_cq *cq)
2036 {
2037 struct irdma_cqp_request *cqp_request;
2038 struct irdma_sc_dev *dev = &rf->sc_dev;
2039 u32 cqe_count = 0;
2040 struct irdma_ccq_cqe_info info;
2041 unsigned long flags;
2042 int ret;
2043
2044 do {
2045 memset(&info, 0, sizeof(info));
2046 spin_lock_irqsave(&rf->cqp.compl_lock, flags);
2047 ret = irdma_sc_ccq_get_cqe_info(cq, &info);
2048 spin_unlock_irqrestore(&rf->cqp.compl_lock, flags);
2049 if (ret)
2050 break;
2051
2052 cqp_request = (struct irdma_cqp_request *)
2053 (unsigned long)info.scratch;
2054 if (info.error && irdma_cqp_crit_err(dev, cqp_request->info.cqp_cmd,
2055 info.maj_err_code,
2056 info.min_err_code))
2057 ibdev_err(&rf->iwdev->ibdev, "cqp opcode = 0x%x maj_err_code = 0x%x min_err_code = 0x%x\n",
2058 info.op_code, info.maj_err_code, info.min_err_code);
2059 if (cqp_request) {
2060 cqp_request->compl_info.maj_err_code = info.maj_err_code;
2061 cqp_request->compl_info.min_err_code = info.min_err_code;
2062 cqp_request->compl_info.op_ret_val = info.op_ret_val;
2063 cqp_request->compl_info.error = info.error;
2064
2065 if (cqp_request->waiting) {
2066 cqp_request->request_done = true;
2067 wake_up(&cqp_request->waitq);
2068 irdma_put_cqp_request(&rf->cqp, cqp_request);
2069 } else {
2070 if (cqp_request->callback_fcn)
2071 cqp_request->callback_fcn(cqp_request);
2072 irdma_put_cqp_request(&rf->cqp, cqp_request);
2073 }
2074 }
2075
2076 cqe_count++;
2077 } while (1);
2078
2079 if (cqe_count) {
2080 irdma_process_bh(dev);
2081 irdma_sc_ccq_arm(cq);
2082 }
2083 }
2084
2085 /**
2086 * cqp_compl_worker - Handle cqp completions
2087 * @work: Pointer to work structure
2088 */
cqp_compl_worker(struct work_struct * work)2089 void cqp_compl_worker(struct work_struct *work)
2090 {
2091 struct irdma_pci_f *rf = container_of(work, struct irdma_pci_f,
2092 cqp_cmpl_work);
2093 struct irdma_sc_cq *cq = &rf->ccq.sc_cq;
2094
2095 irdma_cqp_ce_handler(rf, cq);
2096 }
2097
2098 /**
2099 * irdma_lookup_apbvt_entry - lookup hash table for an existing apbvt entry corresponding to port
2100 * @cm_core: cm's core
2101 * @port: port to identify apbvt entry
2102 */
irdma_lookup_apbvt_entry(struct irdma_cm_core * cm_core,u16 port)2103 static struct irdma_apbvt_entry *irdma_lookup_apbvt_entry(struct irdma_cm_core *cm_core,
2104 u16 port)
2105 {
2106 struct irdma_apbvt_entry *entry;
2107
2108 hash_for_each_possible(cm_core->apbvt_hash_tbl, entry, hlist, port) {
2109 if (entry->port == port) {
2110 entry->use_cnt++;
2111 return entry;
2112 }
2113 }
2114
2115 return NULL;
2116 }
2117
2118 /**
2119 * irdma_next_iw_state - modify qp state
2120 * @iwqp: iwarp qp to modify
2121 * @state: next state for qp
2122 * @del_hash: del hash
2123 * @term: term message
2124 * @termlen: length of term message
2125 */
irdma_next_iw_state(struct irdma_qp * iwqp,u8 state,u8 del_hash,u8 term,u8 termlen)2126 void irdma_next_iw_state(struct irdma_qp *iwqp, u8 state, u8 del_hash, u8 term,
2127 u8 termlen)
2128 {
2129 struct irdma_modify_qp_info info = {};
2130
2131 info.next_iwarp_state = state;
2132 info.remove_hash_idx = del_hash;
2133 info.cq_num_valid = true;
2134 info.arp_cache_idx_valid = true;
2135 info.dont_send_term = true;
2136 info.dont_send_fin = true;
2137 info.termlen = termlen;
2138
2139 if (term & IRDMAQP_TERM_SEND_TERM_ONLY)
2140 info.dont_send_term = false;
2141 if (term & IRDMAQP_TERM_SEND_FIN_ONLY)
2142 info.dont_send_fin = false;
2143 if (iwqp->sc_qp.term_flags && state == IRDMA_QP_STATE_ERROR)
2144 info.reset_tcp_conn = true;
2145 iwqp->hw_iwarp_state = state;
2146 irdma_hw_modify_qp(iwqp->iwdev, iwqp, &info, 0);
2147 iwqp->iwarp_state = info.next_iwarp_state;
2148 }
2149
2150 /**
2151 * irdma_del_local_mac_entry - remove a mac entry from the hw
2152 * table
2153 * @rf: RDMA PCI function
2154 * @idx: the index of the mac ip address to delete
2155 */
irdma_del_local_mac_entry(struct irdma_pci_f * rf,u16 idx)2156 void irdma_del_local_mac_entry(struct irdma_pci_f *rf, u16 idx)
2157 {
2158 struct irdma_cqp *iwcqp = &rf->cqp;
2159 struct irdma_cqp_request *cqp_request;
2160 struct cqp_cmds_info *cqp_info;
2161
2162 cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true);
2163 if (!cqp_request)
2164 return;
2165
2166 cqp_info = &cqp_request->info;
2167 cqp_info->cqp_cmd = IRDMA_OP_DELETE_LOCAL_MAC_ENTRY;
2168 cqp_info->post_sq = 1;
2169 cqp_info->in.u.del_local_mac_entry.cqp = &iwcqp->sc_cqp;
2170 cqp_info->in.u.del_local_mac_entry.scratch = (uintptr_t)cqp_request;
2171 cqp_info->in.u.del_local_mac_entry.entry_idx = idx;
2172 cqp_info->in.u.del_local_mac_entry.ignore_ref_count = 0;
2173
2174 irdma_handle_cqp_op(rf, cqp_request);
2175 irdma_put_cqp_request(iwcqp, cqp_request);
2176 }
2177
2178 /**
2179 * irdma_add_local_mac_entry - add a mac ip address entry to the
2180 * hw table
2181 * @rf: RDMA PCI function
2182 * @mac_addr: pointer to mac address
2183 * @idx: the index of the mac ip address to add
2184 */
irdma_add_local_mac_entry(struct irdma_pci_f * rf,const u8 * mac_addr,u16 idx)2185 int irdma_add_local_mac_entry(struct irdma_pci_f *rf, const u8 *mac_addr, u16 idx)
2186 {
2187 struct irdma_local_mac_entry_info *info;
2188 struct irdma_cqp *iwcqp = &rf->cqp;
2189 struct irdma_cqp_request *cqp_request;
2190 struct cqp_cmds_info *cqp_info;
2191 int status;
2192
2193 cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true);
2194 if (!cqp_request)
2195 return -ENOMEM;
2196
2197 cqp_info = &cqp_request->info;
2198 cqp_info->post_sq = 1;
2199 info = &cqp_info->in.u.add_local_mac_entry.info;
2200 ether_addr_copy(info->mac_addr, mac_addr);
2201 info->entry_idx = idx;
2202 cqp_info->in.u.add_local_mac_entry.scratch = (uintptr_t)cqp_request;
2203 cqp_info->cqp_cmd = IRDMA_OP_ADD_LOCAL_MAC_ENTRY;
2204 cqp_info->in.u.add_local_mac_entry.cqp = &iwcqp->sc_cqp;
2205 cqp_info->in.u.add_local_mac_entry.scratch = (uintptr_t)cqp_request;
2206
2207 status = irdma_handle_cqp_op(rf, cqp_request);
2208 irdma_put_cqp_request(iwcqp, cqp_request);
2209
2210 return status;
2211 }
2212
2213 /**
2214 * irdma_alloc_local_mac_entry - allocate a mac entry
2215 * @rf: RDMA PCI function
2216 * @mac_tbl_idx: the index of the new mac address
2217 *
2218 * Allocate a mac address entry and update the mac_tbl_idx
2219 * to hold the index of the newly created mac address
2220 * Return 0 if successful, otherwise return error
2221 */
irdma_alloc_local_mac_entry(struct irdma_pci_f * rf,u16 * mac_tbl_idx)2222 int irdma_alloc_local_mac_entry(struct irdma_pci_f *rf, u16 *mac_tbl_idx)
2223 {
2224 struct irdma_cqp *iwcqp = &rf->cqp;
2225 struct irdma_cqp_request *cqp_request;
2226 struct cqp_cmds_info *cqp_info;
2227 int status = 0;
2228
2229 cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true);
2230 if (!cqp_request)
2231 return -ENOMEM;
2232
2233 cqp_info = &cqp_request->info;
2234 cqp_info->cqp_cmd = IRDMA_OP_ALLOC_LOCAL_MAC_ENTRY;
2235 cqp_info->post_sq = 1;
2236 cqp_info->in.u.alloc_local_mac_entry.cqp = &iwcqp->sc_cqp;
2237 cqp_info->in.u.alloc_local_mac_entry.scratch = (uintptr_t)cqp_request;
2238 status = irdma_handle_cqp_op(rf, cqp_request);
2239 if (!status)
2240 *mac_tbl_idx = (u16)cqp_request->compl_info.op_ret_val;
2241
2242 irdma_put_cqp_request(iwcqp, cqp_request);
2243
2244 return status;
2245 }
2246
2247 /**
2248 * irdma_cqp_manage_apbvt_cmd - send cqp command manage apbvt
2249 * @iwdev: irdma device
2250 * @accel_local_port: port for apbvt
2251 * @add_port: add ordelete port
2252 */
irdma_cqp_manage_apbvt_cmd(struct irdma_device * iwdev,u16 accel_local_port,bool add_port)2253 static int irdma_cqp_manage_apbvt_cmd(struct irdma_device *iwdev,
2254 u16 accel_local_port, bool add_port)
2255 {
2256 struct irdma_apbvt_info *info;
2257 struct irdma_cqp_request *cqp_request;
2258 struct cqp_cmds_info *cqp_info;
2259 int status;
2260
2261 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, add_port);
2262 if (!cqp_request)
2263 return -ENOMEM;
2264
2265 cqp_info = &cqp_request->info;
2266 info = &cqp_info->in.u.manage_apbvt_entry.info;
2267 memset(info, 0, sizeof(*info));
2268 info->add = add_port;
2269 info->port = accel_local_port;
2270 cqp_info->cqp_cmd = IRDMA_OP_MANAGE_APBVT_ENTRY;
2271 cqp_info->post_sq = 1;
2272 cqp_info->in.u.manage_apbvt_entry.cqp = &iwdev->rf->cqp.sc_cqp;
2273 cqp_info->in.u.manage_apbvt_entry.scratch = (uintptr_t)cqp_request;
2274 ibdev_dbg(&iwdev->ibdev, "DEV: %s: port=0x%04x\n",
2275 (!add_port) ? "DELETE" : "ADD", accel_local_port);
2276
2277 status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
2278 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
2279
2280 return status;
2281 }
2282
2283 /**
2284 * irdma_add_apbvt - add tcp port to HW apbvt table
2285 * @iwdev: irdma device
2286 * @port: port for apbvt
2287 */
irdma_add_apbvt(struct irdma_device * iwdev,u16 port)2288 struct irdma_apbvt_entry *irdma_add_apbvt(struct irdma_device *iwdev, u16 port)
2289 {
2290 struct irdma_cm_core *cm_core = &iwdev->cm_core;
2291 struct irdma_apbvt_entry *entry;
2292 unsigned long flags;
2293
2294 spin_lock_irqsave(&cm_core->apbvt_lock, flags);
2295 entry = irdma_lookup_apbvt_entry(cm_core, port);
2296 if (entry) {
2297 spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
2298 return entry;
2299 }
2300
2301 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
2302 if (!entry) {
2303 spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
2304 return NULL;
2305 }
2306
2307 entry->port = port;
2308 entry->use_cnt = 1;
2309 hash_add(cm_core->apbvt_hash_tbl, &entry->hlist, entry->port);
2310 spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
2311
2312 if (irdma_cqp_manage_apbvt_cmd(iwdev, port, true)) {
2313 kfree(entry);
2314 return NULL;
2315 }
2316
2317 return entry;
2318 }
2319
2320 /**
2321 * irdma_del_apbvt - delete tcp port from HW apbvt table
2322 * @iwdev: irdma device
2323 * @entry: apbvt entry object
2324 */
irdma_del_apbvt(struct irdma_device * iwdev,struct irdma_apbvt_entry * entry)2325 void irdma_del_apbvt(struct irdma_device *iwdev,
2326 struct irdma_apbvt_entry *entry)
2327 {
2328 struct irdma_cm_core *cm_core = &iwdev->cm_core;
2329 unsigned long flags;
2330
2331 spin_lock_irqsave(&cm_core->apbvt_lock, flags);
2332 if (--entry->use_cnt) {
2333 spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
2334 return;
2335 }
2336
2337 hash_del(&entry->hlist);
2338 /* apbvt_lock is held across CQP delete APBVT OP (non-waiting) to
2339 * protect against race where add APBVT CQP can race ahead of the delete
2340 * APBVT for same port.
2341 */
2342 irdma_cqp_manage_apbvt_cmd(iwdev, entry->port, false);
2343 kfree(entry);
2344 spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
2345 }
2346
2347 /**
2348 * irdma_manage_arp_cache - manage hw arp cache
2349 * @rf: RDMA PCI function
2350 * @mac_addr: mac address ptr
2351 * @ip_addr: ip addr for arp cache
2352 * @ipv4: flag inicating IPv4
2353 * @action: add, delete or modify
2354 */
irdma_manage_arp_cache(struct irdma_pci_f * rf,const unsigned char * mac_addr,u32 * ip_addr,bool ipv4,u32 action)2355 void irdma_manage_arp_cache(struct irdma_pci_f *rf,
2356 const unsigned char *mac_addr,
2357 u32 *ip_addr, bool ipv4, u32 action)
2358 {
2359 struct irdma_add_arp_cache_entry_info *info;
2360 struct irdma_cqp_request *cqp_request;
2361 struct cqp_cmds_info *cqp_info;
2362 int arp_index;
2363
2364 arp_index = irdma_arp_table(rf, ip_addr, ipv4, mac_addr, action);
2365 if (arp_index == -1)
2366 return;
2367
2368 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, false);
2369 if (!cqp_request)
2370 return;
2371
2372 cqp_info = &cqp_request->info;
2373 if (action == IRDMA_ARP_ADD) {
2374 cqp_info->cqp_cmd = IRDMA_OP_ADD_ARP_CACHE_ENTRY;
2375 info = &cqp_info->in.u.add_arp_cache_entry.info;
2376 memset(info, 0, sizeof(*info));
2377 info->arp_index = (u16)arp_index;
2378 info->permanent = true;
2379 ether_addr_copy(info->mac_addr, mac_addr);
2380 cqp_info->in.u.add_arp_cache_entry.scratch =
2381 (uintptr_t)cqp_request;
2382 cqp_info->in.u.add_arp_cache_entry.cqp = &rf->cqp.sc_cqp;
2383 } else {
2384 cqp_info->cqp_cmd = IRDMA_OP_DELETE_ARP_CACHE_ENTRY;
2385 cqp_info->in.u.del_arp_cache_entry.scratch =
2386 (uintptr_t)cqp_request;
2387 cqp_info->in.u.del_arp_cache_entry.cqp = &rf->cqp.sc_cqp;
2388 cqp_info->in.u.del_arp_cache_entry.arp_index = arp_index;
2389 }
2390
2391 cqp_info->post_sq = 1;
2392 irdma_handle_cqp_op(rf, cqp_request);
2393 irdma_put_cqp_request(&rf->cqp, cqp_request);
2394 }
2395
2396 /**
2397 * irdma_send_syn_cqp_callback - do syn/ack after qhash
2398 * @cqp_request: qhash cqp completion
2399 */
irdma_send_syn_cqp_callback(struct irdma_cqp_request * cqp_request)2400 static void irdma_send_syn_cqp_callback(struct irdma_cqp_request *cqp_request)
2401 {
2402 struct irdma_cm_node *cm_node = cqp_request->param;
2403
2404 irdma_send_syn(cm_node, 1);
2405 irdma_rem_ref_cm_node(cm_node);
2406 }
2407
2408 /**
2409 * irdma_manage_qhash - add or modify qhash
2410 * @iwdev: irdma device
2411 * @cminfo: cm info for qhash
2412 * @etype: type (syn or quad)
2413 * @mtype: type of qhash
2414 * @cmnode: cmnode associated with connection
2415 * @wait: wait for completion
2416 */
irdma_manage_qhash(struct irdma_device * iwdev,struct irdma_cm_info * cminfo,enum irdma_quad_entry_type etype,enum irdma_quad_hash_manage_type mtype,void * cmnode,bool wait)2417 int irdma_manage_qhash(struct irdma_device *iwdev, struct irdma_cm_info *cminfo,
2418 enum irdma_quad_entry_type etype,
2419 enum irdma_quad_hash_manage_type mtype, void *cmnode,
2420 bool wait)
2421 {
2422 struct irdma_qhash_table_info *info;
2423 struct irdma_cqp *iwcqp = &iwdev->rf->cqp;
2424 struct irdma_cqp_request *cqp_request;
2425 struct cqp_cmds_info *cqp_info;
2426 struct irdma_cm_node *cm_node = cmnode;
2427 int status;
2428
2429 cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, wait);
2430 if (!cqp_request)
2431 return -ENOMEM;
2432
2433 cqp_info = &cqp_request->info;
2434 info = &cqp_info->in.u.manage_qhash_table_entry.info;
2435 memset(info, 0, sizeof(*info));
2436 info->vsi = &iwdev->vsi;
2437 info->manage = mtype;
2438 info->entry_type = etype;
2439 if (cminfo->vlan_id < VLAN_N_VID) {
2440 info->vlan_valid = true;
2441 info->vlan_id = cminfo->vlan_id;
2442 } else {
2443 info->vlan_valid = false;
2444 }
2445 info->ipv4_valid = cminfo->ipv4;
2446 info->user_pri = cminfo->user_pri;
2447 ether_addr_copy(info->mac_addr, iwdev->netdev->dev_addr);
2448 info->qp_num = cminfo->qh_qpid;
2449 info->dest_port = cminfo->loc_port;
2450 info->dest_ip[0] = cminfo->loc_addr[0];
2451 info->dest_ip[1] = cminfo->loc_addr[1];
2452 info->dest_ip[2] = cminfo->loc_addr[2];
2453 info->dest_ip[3] = cminfo->loc_addr[3];
2454 if (etype == IRDMA_QHASH_TYPE_TCP_ESTABLISHED ||
2455 etype == IRDMA_QHASH_TYPE_UDP_UNICAST ||
2456 etype == IRDMA_QHASH_TYPE_UDP_MCAST ||
2457 etype == IRDMA_QHASH_TYPE_ROCE_MCAST ||
2458 etype == IRDMA_QHASH_TYPE_ROCEV2_HW) {
2459 info->src_port = cminfo->rem_port;
2460 info->src_ip[0] = cminfo->rem_addr[0];
2461 info->src_ip[1] = cminfo->rem_addr[1];
2462 info->src_ip[2] = cminfo->rem_addr[2];
2463 info->src_ip[3] = cminfo->rem_addr[3];
2464 }
2465 if (cmnode) {
2466 cqp_request->callback_fcn = irdma_send_syn_cqp_callback;
2467 cqp_request->param = cmnode;
2468 if (!wait)
2469 refcount_inc(&cm_node->refcnt);
2470 }
2471 if (info->ipv4_valid)
2472 ibdev_dbg(&iwdev->ibdev,
2473 "CM: %s caller: %pS loc_port=0x%04x rem_port=0x%04x loc_addr=%pI4 rem_addr=%pI4 mac=%pM, vlan_id=%d cm_node=%p\n",
2474 (!mtype) ? "DELETE" : "ADD",
2475 __builtin_return_address(0), info->dest_port,
2476 info->src_port, info->dest_ip, info->src_ip,
2477 info->mac_addr, cminfo->vlan_id,
2478 cmnode ? cmnode : NULL);
2479 else
2480 ibdev_dbg(&iwdev->ibdev,
2481 "CM: %s caller: %pS loc_port=0x%04x rem_port=0x%04x loc_addr=%pI6 rem_addr=%pI6 mac=%pM, vlan_id=%d cm_node=%p\n",
2482 (!mtype) ? "DELETE" : "ADD",
2483 __builtin_return_address(0), info->dest_port,
2484 info->src_port, info->dest_ip, info->src_ip,
2485 info->mac_addr, cminfo->vlan_id,
2486 cmnode ? cmnode : NULL);
2487
2488 cqp_info->in.u.manage_qhash_table_entry.cqp = &iwdev->rf->cqp.sc_cqp;
2489 cqp_info->in.u.manage_qhash_table_entry.scratch = (uintptr_t)cqp_request;
2490 cqp_info->cqp_cmd = IRDMA_OP_MANAGE_QHASH_TABLE_ENTRY;
2491 cqp_info->post_sq = 1;
2492 status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
2493 if (status && cm_node && !wait)
2494 irdma_rem_ref_cm_node(cm_node);
2495
2496 irdma_put_cqp_request(iwcqp, cqp_request);
2497
2498 return status;
2499 }
2500
2501 /**
2502 * irdma_hw_flush_wqes_callback - Check return code after flush
2503 * @cqp_request: qhash cqp completion
2504 */
irdma_hw_flush_wqes_callback(struct irdma_cqp_request * cqp_request)2505 static void irdma_hw_flush_wqes_callback(struct irdma_cqp_request *cqp_request)
2506 {
2507 struct irdma_qp_flush_info *hw_info;
2508 struct irdma_sc_qp *qp;
2509 struct irdma_qp *iwqp;
2510 struct cqp_cmds_info *cqp_info;
2511
2512 cqp_info = &cqp_request->info;
2513 hw_info = &cqp_info->in.u.qp_flush_wqes.info;
2514 qp = cqp_info->in.u.qp_flush_wqes.qp;
2515 iwqp = qp->qp_uk.back_qp;
2516
2517 if (cqp_request->compl_info.maj_err_code)
2518 return;
2519
2520 if (hw_info->rq &&
2521 (cqp_request->compl_info.min_err_code == IRDMA_CQP_COMPL_SQ_WQE_FLUSHED ||
2522 cqp_request->compl_info.min_err_code == 0)) {
2523 /* RQ WQE flush was requested but did not happen */
2524 qp->qp_uk.rq_flush_complete = true;
2525 }
2526 if (hw_info->sq &&
2527 (cqp_request->compl_info.min_err_code == IRDMA_CQP_COMPL_RQ_WQE_FLUSHED ||
2528 cqp_request->compl_info.min_err_code == 0)) {
2529 if (IRDMA_RING_MORE_WORK(qp->qp_uk.sq_ring)) {
2530 ibdev_err(&iwqp->iwdev->ibdev, "Flush QP[%d] failed, SQ has more work",
2531 qp->qp_uk.qp_id);
2532 irdma_ib_qp_event(iwqp, IRDMA_QP_EVENT_CATASTROPHIC);
2533 }
2534 qp->qp_uk.sq_flush_complete = true;
2535 }
2536 }
2537
2538 /**
2539 * irdma_hw_flush_wqes - flush qp's wqe
2540 * @rf: RDMA PCI function
2541 * @qp: hardware control qp
2542 * @info: info for flush
2543 * @wait: flag wait for completion
2544 */
irdma_hw_flush_wqes(struct irdma_pci_f * rf,struct irdma_sc_qp * qp,struct irdma_qp_flush_info * info,bool wait)2545 int irdma_hw_flush_wqes(struct irdma_pci_f *rf, struct irdma_sc_qp *qp,
2546 struct irdma_qp_flush_info *info, bool wait)
2547 {
2548 int status;
2549 struct irdma_qp_flush_info *hw_info;
2550 struct irdma_cqp_request *cqp_request;
2551 struct cqp_cmds_info *cqp_info;
2552 struct irdma_qp *iwqp = qp->qp_uk.back_qp;
2553
2554 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, wait);
2555 if (!cqp_request)
2556 return -ENOMEM;
2557
2558 cqp_info = &cqp_request->info;
2559 if (!wait)
2560 cqp_request->callback_fcn = irdma_hw_flush_wqes_callback;
2561 hw_info = &cqp_request->info.in.u.qp_flush_wqes.info;
2562 memcpy(hw_info, info, sizeof(*hw_info));
2563 cqp_info->cqp_cmd = IRDMA_OP_QP_FLUSH_WQES;
2564 cqp_info->post_sq = 1;
2565 cqp_info->in.u.qp_flush_wqes.qp = qp;
2566 cqp_info->in.u.qp_flush_wqes.scratch = (uintptr_t)cqp_request;
2567 status = irdma_handle_cqp_op(rf, cqp_request);
2568 if (status) {
2569 qp->qp_uk.sq_flush_complete = true;
2570 qp->qp_uk.rq_flush_complete = true;
2571 irdma_put_cqp_request(&rf->cqp, cqp_request);
2572 return status;
2573 }
2574
2575 if (!wait || cqp_request->compl_info.maj_err_code)
2576 goto put_cqp;
2577
2578 if (info->rq) {
2579 if (cqp_request->compl_info.min_err_code == IRDMA_CQP_COMPL_SQ_WQE_FLUSHED ||
2580 cqp_request->compl_info.min_err_code == 0) {
2581 /* RQ WQE flush was requested but did not happen */
2582 qp->qp_uk.rq_flush_complete = true;
2583 }
2584 }
2585 if (info->sq) {
2586 if (cqp_request->compl_info.min_err_code == IRDMA_CQP_COMPL_RQ_WQE_FLUSHED ||
2587 cqp_request->compl_info.min_err_code == 0) {
2588 /*
2589 * Handling case where WQE is posted to empty SQ when
2590 * flush has not completed
2591 */
2592 if (IRDMA_RING_MORE_WORK(qp->qp_uk.sq_ring)) {
2593 struct irdma_cqp_request *new_req;
2594
2595 if (!qp->qp_uk.sq_flush_complete)
2596 goto put_cqp;
2597 qp->qp_uk.sq_flush_complete = false;
2598 qp->flush_sq = false;
2599
2600 info->rq = false;
2601 info->sq = true;
2602 new_req = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
2603 if (!new_req) {
2604 status = -ENOMEM;
2605 goto put_cqp;
2606 }
2607 cqp_info = &new_req->info;
2608 hw_info = &new_req->info.in.u.qp_flush_wqes.info;
2609 memcpy(hw_info, info, sizeof(*hw_info));
2610 cqp_info->cqp_cmd = IRDMA_OP_QP_FLUSH_WQES;
2611 cqp_info->post_sq = 1;
2612 cqp_info->in.u.qp_flush_wqes.qp = qp;
2613 cqp_info->in.u.qp_flush_wqes.scratch = (uintptr_t)new_req;
2614
2615 status = irdma_handle_cqp_op(rf, new_req);
2616 if (new_req->compl_info.maj_err_code ||
2617 new_req->compl_info.min_err_code != IRDMA_CQP_COMPL_SQ_WQE_FLUSHED ||
2618 status) {
2619 ibdev_err(&iwqp->iwdev->ibdev, "fatal QP event: SQ in error but not flushed, qp: %d",
2620 iwqp->ibqp.qp_num);
2621 qp->qp_uk.sq_flush_complete = false;
2622 irdma_ib_qp_event(iwqp, IRDMA_QP_EVENT_CATASTROPHIC);
2623 }
2624 irdma_put_cqp_request(&rf->cqp, new_req);
2625 } else {
2626 /* SQ WQE flush was requested but did not happen */
2627 qp->qp_uk.sq_flush_complete = true;
2628 }
2629 } else {
2630 if (!IRDMA_RING_MORE_WORK(qp->qp_uk.sq_ring))
2631 qp->qp_uk.sq_flush_complete = true;
2632 }
2633 }
2634
2635 ibdev_dbg(&rf->iwdev->ibdev,
2636 "VERBS: qp_id=%d qp_type=%d qpstate=%d ibqpstate=%d last_aeq=%d hw_iw_state=%d maj_err_code=%d min_err_code=%d\n",
2637 iwqp->ibqp.qp_num, rf->protocol_used, iwqp->iwarp_state,
2638 iwqp->ibqp_state, iwqp->last_aeq, iwqp->hw_iwarp_state,
2639 cqp_request->compl_info.maj_err_code,
2640 cqp_request->compl_info.min_err_code);
2641 put_cqp:
2642 irdma_put_cqp_request(&rf->cqp, cqp_request);
2643
2644 return status;
2645 }
2646
2647 /**
2648 * irdma_gen_ae - generate AE
2649 * @rf: RDMA PCI function
2650 * @qp: qp associated with AE
2651 * @info: info for ae
2652 * @wait: wait for completion
2653 */
irdma_gen_ae(struct irdma_pci_f * rf,struct irdma_sc_qp * qp,struct irdma_gen_ae_info * info,bool wait)2654 void irdma_gen_ae(struct irdma_pci_f *rf, struct irdma_sc_qp *qp,
2655 struct irdma_gen_ae_info *info, bool wait)
2656 {
2657 struct irdma_gen_ae_info *ae_info;
2658 struct irdma_cqp_request *cqp_request;
2659 struct cqp_cmds_info *cqp_info;
2660
2661 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, wait);
2662 if (!cqp_request)
2663 return;
2664
2665 cqp_info = &cqp_request->info;
2666 ae_info = &cqp_request->info.in.u.gen_ae.info;
2667 memcpy(ae_info, info, sizeof(*ae_info));
2668 cqp_info->cqp_cmd = IRDMA_OP_GEN_AE;
2669 cqp_info->post_sq = 1;
2670 cqp_info->in.u.gen_ae.qp = qp;
2671 cqp_info->in.u.gen_ae.scratch = (uintptr_t)cqp_request;
2672
2673 irdma_handle_cqp_op(rf, cqp_request);
2674 irdma_put_cqp_request(&rf->cqp, cqp_request);
2675 }
2676
irdma_flush_wqes(struct irdma_qp * iwqp,u32 flush_mask)2677 void irdma_flush_wqes(struct irdma_qp *iwqp, u32 flush_mask)
2678 {
2679 struct irdma_qp_flush_info info = {};
2680 struct irdma_pci_f *rf = iwqp->iwdev->rf;
2681 u8 flush_code = iwqp->sc_qp.flush_code;
2682
2683 if (!(flush_mask & IRDMA_FLUSH_SQ) && !(flush_mask & IRDMA_FLUSH_RQ))
2684 return;
2685
2686 /* Set flush info fields*/
2687 info.sq = flush_mask & IRDMA_FLUSH_SQ;
2688 info.rq = flush_mask & IRDMA_FLUSH_RQ;
2689
2690 /* Generate userflush errors in CQE */
2691 info.sq_major_code = IRDMA_FLUSH_MAJOR_ERR;
2692 info.sq_minor_code = FLUSH_GENERAL_ERR;
2693 info.rq_major_code = IRDMA_FLUSH_MAJOR_ERR;
2694 info.rq_minor_code = FLUSH_GENERAL_ERR;
2695 info.userflushcode = true;
2696
2697 if (flush_mask & IRDMA_REFLUSH) {
2698 if (info.sq)
2699 iwqp->sc_qp.flush_sq = false;
2700 if (info.rq)
2701 iwqp->sc_qp.flush_rq = false;
2702 } else {
2703 if (flush_code) {
2704 if (info.sq && iwqp->sc_qp.sq_flush_code)
2705 info.sq_minor_code = flush_code;
2706 if (info.rq && iwqp->sc_qp.rq_flush_code)
2707 info.rq_minor_code = flush_code;
2708 }
2709 if (!iwqp->user_mode)
2710 queue_delayed_work(iwqp->iwdev->cleanup_wq,
2711 &iwqp->dwork_flush,
2712 msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS));
2713 }
2714
2715 /* Issue flush */
2716 (void)irdma_hw_flush_wqes(rf, &iwqp->sc_qp, &info,
2717 flush_mask & IRDMA_FLUSH_WAIT);
2718 iwqp->flush_issued = true;
2719 }
2720