1 // SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
2 /* Copyright (c) 2015 - 2021 Intel Corporation */
3 #include "osdep.h"
4 #include "defs.h"
5 #include "user.h"
6 #include "irdma.h"
7
8 /**
9 * irdma_set_fragment - set fragment in wqe
10 * @wqe: wqe for setting fragment
11 * @offset: offset value
12 * @sge: sge length and stag
13 * @valid: The wqe valid
14 */
irdma_set_fragment(__le64 * wqe,u32 offset,struct ib_sge * sge,u8 valid)15 static void irdma_set_fragment(__le64 *wqe, u32 offset, struct ib_sge *sge,
16 u8 valid)
17 {
18 if (sge) {
19 set_64bit_val(wqe, offset,
20 FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->addr));
21 set_64bit_val(wqe, offset + 8,
22 FIELD_PREP(IRDMAQPSQ_VALID, valid) |
23 FIELD_PREP(IRDMAQPSQ_FRAG_LEN, sge->length) |
24 FIELD_PREP(IRDMAQPSQ_FRAG_STAG, sge->lkey));
25 } else {
26 set_64bit_val(wqe, offset, 0);
27 set_64bit_val(wqe, offset + 8,
28 FIELD_PREP(IRDMAQPSQ_VALID, valid));
29 }
30 }
31
32 /**
33 * irdma_set_fragment_gen_1 - set fragment in wqe
34 * @wqe: wqe for setting fragment
35 * @offset: offset value
36 * @sge: sge length and stag
37 * @valid: wqe valid flag
38 */
irdma_set_fragment_gen_1(__le64 * wqe,u32 offset,struct ib_sge * sge,u8 valid)39 static void irdma_set_fragment_gen_1(__le64 *wqe, u32 offset,
40 struct ib_sge *sge, u8 valid)
41 {
42 if (sge) {
43 set_64bit_val(wqe, offset,
44 FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->addr));
45 set_64bit_val(wqe, offset + 8,
46 FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, sge->length) |
47 FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_STAG, sge->lkey));
48 } else {
49 set_64bit_val(wqe, offset, 0);
50 set_64bit_val(wqe, offset + 8, 0);
51 }
52 }
53
54 /**
55 * irdma_nop_1 - insert a NOP wqe
56 * @qp: hw qp ptr
57 */
irdma_nop_1(struct irdma_qp_uk * qp)58 static int irdma_nop_1(struct irdma_qp_uk *qp)
59 {
60 u64 hdr;
61 __le64 *wqe;
62 u32 wqe_idx;
63 bool signaled = false;
64
65 if (!qp->sq_ring.head)
66 return -EINVAL;
67
68 wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
69 wqe = qp->sq_base[wqe_idx].elem;
70
71 qp->sq_wrtrk_array[wqe_idx].quanta = IRDMA_QP_WQE_MIN_QUANTA;
72
73 set_64bit_val(wqe, 0, 0);
74 set_64bit_val(wqe, 8, 0);
75 set_64bit_val(wqe, 16, 0);
76
77 hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_NOP) |
78 FIELD_PREP(IRDMAQPSQ_SIGCOMPL, signaled) |
79 FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
80
81 /* make sure WQE is written before valid bit is set */
82 dma_wmb();
83
84 set_64bit_val(wqe, 24, hdr);
85
86 return 0;
87 }
88
89 /**
90 * irdma_clr_wqes - clear next 128 sq entries
91 * @qp: hw qp ptr
92 * @qp_wqe_idx: wqe_idx
93 */
irdma_clr_wqes(struct irdma_qp_uk * qp,u32 qp_wqe_idx)94 void irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx)
95 {
96 struct irdma_qp_quanta *sq;
97 u32 wqe_idx;
98
99 if (!(qp_wqe_idx & 0x7F)) {
100 wqe_idx = (qp_wqe_idx + 128) % qp->sq_ring.size;
101 sq = qp->sq_base + wqe_idx;
102 if (wqe_idx)
103 memset(sq, qp->swqe_polarity ? 0 : 0xFF,
104 128 * sizeof(*sq));
105 else
106 memset(sq, qp->swqe_polarity ? 0xFF : 0,
107 128 * sizeof(*sq));
108 }
109 }
110
111 /**
112 * irdma_uk_qp_post_wr - ring doorbell
113 * @qp: hw qp ptr
114 */
irdma_uk_qp_post_wr(struct irdma_qp_uk * qp)115 void irdma_uk_qp_post_wr(struct irdma_qp_uk *qp)
116 {
117 u64 temp;
118 u32 hw_sq_tail;
119 u32 sw_sq_head;
120
121 /* valid bit is written and loads completed before reading shadow */
122 mb();
123
124 /* read the doorbell shadow area */
125 get_64bit_val(qp->shadow_area, 0, &temp);
126
127 hw_sq_tail = (u32)FIELD_GET(IRDMA_QP_DBSA_HW_SQ_TAIL, temp);
128 sw_sq_head = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
129 if (sw_sq_head != qp->initial_ring.head) {
130 if (sw_sq_head != hw_sq_tail) {
131 if (sw_sq_head > qp->initial_ring.head) {
132 if (hw_sq_tail >= qp->initial_ring.head &&
133 hw_sq_tail < sw_sq_head)
134 writel(qp->qp_id, qp->wqe_alloc_db);
135 } else {
136 if (hw_sq_tail >= qp->initial_ring.head ||
137 hw_sq_tail < sw_sq_head)
138 writel(qp->qp_id, qp->wqe_alloc_db);
139 }
140 }
141 }
142
143 qp->initial_ring.head = qp->sq_ring.head;
144 }
145
146 /**
147 * irdma_qp_get_next_send_wqe - pad with NOP if needed, return where next WR should go
148 * @qp: hw qp ptr
149 * @wqe_idx: return wqe index
150 * @quanta: size of WR in quanta
151 * @total_size: size of WR in bytes
152 * @info: info on WR
153 */
irdma_qp_get_next_send_wqe(struct irdma_qp_uk * qp,u32 * wqe_idx,u16 quanta,u32 total_size,struct irdma_post_sq_info * info)154 __le64 *irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx,
155 u16 quanta, u32 total_size,
156 struct irdma_post_sq_info *info)
157 {
158 __le64 *wqe;
159 __le64 *wqe_0 = NULL;
160 u16 avail_quanta;
161 u16 i;
162
163 avail_quanta = qp->uk_attrs->max_hw_sq_chunk -
164 (IRDMA_RING_CURRENT_HEAD(qp->sq_ring) %
165 qp->uk_attrs->max_hw_sq_chunk);
166 if (quanta <= avail_quanta) {
167 /* WR fits in current chunk */
168 if (quanta > IRDMA_SQ_RING_FREE_QUANTA(qp->sq_ring))
169 return NULL;
170 } else {
171 /* Need to pad with NOP */
172 if (quanta + avail_quanta >
173 IRDMA_SQ_RING_FREE_QUANTA(qp->sq_ring))
174 return NULL;
175
176 for (i = 0; i < avail_quanta; i++) {
177 irdma_nop_1(qp);
178 IRDMA_RING_MOVE_HEAD_NOCHECK(qp->sq_ring);
179 }
180 }
181
182 *wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
183 if (!*wqe_idx)
184 qp->swqe_polarity = !qp->swqe_polarity;
185
186 IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->sq_ring, quanta);
187
188 wqe = qp->sq_base[*wqe_idx].elem;
189 if (qp->uk_attrs->hw_rev == IRDMA_GEN_1 && quanta == 1 &&
190 (IRDMA_RING_CURRENT_HEAD(qp->sq_ring) & 1)) {
191 wqe_0 = qp->sq_base[IRDMA_RING_CURRENT_HEAD(qp->sq_ring)].elem;
192 wqe_0[3] = cpu_to_le64(FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity ? 0 : 1));
193 }
194 qp->sq_wrtrk_array[*wqe_idx].wrid = info->wr_id;
195 qp->sq_wrtrk_array[*wqe_idx].wr_len = total_size;
196 qp->sq_wrtrk_array[*wqe_idx].quanta = quanta;
197
198 return wqe;
199 }
200
201 /**
202 * irdma_qp_get_next_recv_wqe - get next qp's rcv wqe
203 * @qp: hw qp ptr
204 * @wqe_idx: return wqe index
205 */
irdma_qp_get_next_recv_wqe(struct irdma_qp_uk * qp,u32 * wqe_idx)206 __le64 *irdma_qp_get_next_recv_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx)
207 {
208 __le64 *wqe;
209 int ret_code;
210
211 if (IRDMA_RING_FULL_ERR(qp->rq_ring))
212 return NULL;
213
214 IRDMA_ATOMIC_RING_MOVE_HEAD(qp->rq_ring, *wqe_idx, ret_code);
215 if (ret_code)
216 return NULL;
217
218 if (!*wqe_idx)
219 qp->rwqe_polarity = !qp->rwqe_polarity;
220 /* rq_wqe_size_multiplier is no of 32 byte quanta in one rq wqe */
221 wqe = qp->rq_base[*wqe_idx * qp->rq_wqe_size_multiplier].elem;
222
223 return wqe;
224 }
225
226 /**
227 * irdma_uk_rdma_write - rdma write operation
228 * @qp: hw qp ptr
229 * @info: post sq information
230 * @post_sq: flag to post sq
231 */
irdma_uk_rdma_write(struct irdma_qp_uk * qp,struct irdma_post_sq_info * info,bool post_sq)232 int irdma_uk_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
233 bool post_sq)
234 {
235 u64 hdr;
236 __le64 *wqe;
237 struct irdma_rdma_write *op_info;
238 u32 i, wqe_idx;
239 u32 total_size = 0, byte_off;
240 int ret_code;
241 u32 frag_cnt, addl_frag_cnt;
242 bool read_fence = false;
243 u16 quanta;
244
245 op_info = &info->op.rdma_write;
246 if (op_info->num_lo_sges > qp->max_sq_frag_cnt)
247 return -EINVAL;
248
249 for (i = 0; i < op_info->num_lo_sges; i++)
250 total_size += op_info->lo_sg_list[i].length;
251
252 read_fence |= info->read_fence;
253
254 if (info->imm_data_valid)
255 frag_cnt = op_info->num_lo_sges + 1;
256 else
257 frag_cnt = op_info->num_lo_sges;
258 addl_frag_cnt = frag_cnt > 1 ? (frag_cnt - 1) : 0;
259 ret_code = irdma_fragcnt_to_quanta_sq(frag_cnt, &quanta);
260 if (ret_code)
261 return ret_code;
262
263 wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
264 info);
265 if (!wqe)
266 return -ENOMEM;
267
268 irdma_clr_wqes(qp, wqe_idx);
269
270 set_64bit_val(wqe, 16,
271 FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.addr));
272
273 if (info->imm_data_valid) {
274 set_64bit_val(wqe, 0,
275 FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
276 i = 0;
277 } else {
278 qp->wqe_ops.iw_set_fragment(wqe, 0,
279 op_info->lo_sg_list,
280 qp->swqe_polarity);
281 i = 1;
282 }
283
284 for (byte_off = 32; i < op_info->num_lo_sges; i++) {
285 qp->wqe_ops.iw_set_fragment(wqe, byte_off,
286 &op_info->lo_sg_list[i],
287 qp->swqe_polarity);
288 byte_off += 16;
289 }
290
291 /* if not an odd number set valid bit in next fragment */
292 if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(frag_cnt & 0x01) &&
293 frag_cnt) {
294 qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
295 qp->swqe_polarity);
296 if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
297 ++addl_frag_cnt;
298 }
299
300 hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.lkey) |
301 FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
302 FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, info->imm_data_valid) |
303 FIELD_PREP(IRDMAQPSQ_REPORTRTT, info->report_rtt) |
304 FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
305 FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
306 FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
307 FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
308 FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
309
310 dma_wmb(); /* make sure WQE is populated before valid bit is set */
311
312 set_64bit_val(wqe, 24, hdr);
313
314 if (post_sq)
315 irdma_uk_qp_post_wr(qp);
316
317 return 0;
318 }
319
320 /**
321 * irdma_uk_rdma_read - rdma read command
322 * @qp: hw qp ptr
323 * @info: post sq information
324 * @inv_stag: flag for inv_stag
325 * @post_sq: flag to post sq
326 */
irdma_uk_rdma_read(struct irdma_qp_uk * qp,struct irdma_post_sq_info * info,bool inv_stag,bool post_sq)327 int irdma_uk_rdma_read(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
328 bool inv_stag, bool post_sq)
329 {
330 struct irdma_rdma_read *op_info;
331 int ret_code;
332 u32 i, byte_off, total_size = 0;
333 bool local_fence = false;
334 u32 addl_frag_cnt;
335 __le64 *wqe;
336 u32 wqe_idx;
337 u16 quanta;
338 u64 hdr;
339
340 op_info = &info->op.rdma_read;
341 if (qp->max_sq_frag_cnt < op_info->num_lo_sges)
342 return -EINVAL;
343
344 for (i = 0; i < op_info->num_lo_sges; i++)
345 total_size += op_info->lo_sg_list[i].length;
346
347 ret_code = irdma_fragcnt_to_quanta_sq(op_info->num_lo_sges, &quanta);
348 if (ret_code)
349 return ret_code;
350
351 wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
352 info);
353 if (!wqe)
354 return -ENOMEM;
355
356 irdma_clr_wqes(qp, wqe_idx);
357
358 addl_frag_cnt = op_info->num_lo_sges > 1 ?
359 (op_info->num_lo_sges - 1) : 0;
360 local_fence |= info->local_fence;
361
362 qp->wqe_ops.iw_set_fragment(wqe, 0, op_info->lo_sg_list,
363 qp->swqe_polarity);
364 for (i = 1, byte_off = 32; i < op_info->num_lo_sges; ++i) {
365 qp->wqe_ops.iw_set_fragment(wqe, byte_off,
366 &op_info->lo_sg_list[i],
367 qp->swqe_polarity);
368 byte_off += 16;
369 }
370
371 /* if not an odd number set valid bit in next fragment */
372 if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 &&
373 !(op_info->num_lo_sges & 0x01) && op_info->num_lo_sges) {
374 qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
375 qp->swqe_polarity);
376 if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
377 ++addl_frag_cnt;
378 }
379 set_64bit_val(wqe, 16,
380 FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.addr));
381 hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.lkey) |
382 FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
383 FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
384 FIELD_PREP(IRDMAQPSQ_OPCODE,
385 (inv_stag ? IRDMAQP_OP_RDMA_READ_LOC_INV : IRDMAQP_OP_RDMA_READ)) |
386 FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
387 FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) |
388 FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
389 FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
390
391 dma_wmb(); /* make sure WQE is populated before valid bit is set */
392
393 set_64bit_val(wqe, 24, hdr);
394
395 if (post_sq)
396 irdma_uk_qp_post_wr(qp);
397
398 return 0;
399 }
400
401 /**
402 * irdma_uk_send - rdma send command
403 * @qp: hw qp ptr
404 * @info: post sq information
405 * @post_sq: flag to post sq
406 */
irdma_uk_send(struct irdma_qp_uk * qp,struct irdma_post_sq_info * info,bool post_sq)407 int irdma_uk_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
408 bool post_sq)
409 {
410 __le64 *wqe;
411 struct irdma_post_send *op_info;
412 u64 hdr;
413 u32 i, wqe_idx, total_size = 0, byte_off;
414 int ret_code;
415 u32 frag_cnt, addl_frag_cnt;
416 bool read_fence = false;
417 u16 quanta;
418
419 op_info = &info->op.send;
420 if (qp->max_sq_frag_cnt < op_info->num_sges)
421 return -EINVAL;
422
423 for (i = 0; i < op_info->num_sges; i++)
424 total_size += op_info->sg_list[i].length;
425
426 if (info->imm_data_valid)
427 frag_cnt = op_info->num_sges + 1;
428 else
429 frag_cnt = op_info->num_sges;
430 ret_code = irdma_fragcnt_to_quanta_sq(frag_cnt, &quanta);
431 if (ret_code)
432 return ret_code;
433
434 wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
435 info);
436 if (!wqe)
437 return -ENOMEM;
438
439 irdma_clr_wqes(qp, wqe_idx);
440
441 read_fence |= info->read_fence;
442 addl_frag_cnt = frag_cnt > 1 ? (frag_cnt - 1) : 0;
443 if (info->imm_data_valid) {
444 set_64bit_val(wqe, 0,
445 FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
446 i = 0;
447 } else {
448 qp->wqe_ops.iw_set_fragment(wqe, 0,
449 frag_cnt ? op_info->sg_list : NULL,
450 qp->swqe_polarity);
451 i = 1;
452 }
453
454 for (byte_off = 32; i < op_info->num_sges; i++) {
455 qp->wqe_ops.iw_set_fragment(wqe, byte_off, &op_info->sg_list[i],
456 qp->swqe_polarity);
457 byte_off += 16;
458 }
459
460 /* if not an odd number set valid bit in next fragment */
461 if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(frag_cnt & 0x01) &&
462 frag_cnt) {
463 qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
464 qp->swqe_polarity);
465 if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
466 ++addl_frag_cnt;
467 }
468
469 set_64bit_val(wqe, 16,
470 FIELD_PREP(IRDMAQPSQ_DESTQKEY, op_info->qkey) |
471 FIELD_PREP(IRDMAQPSQ_DESTQPN, op_info->dest_qp));
472 hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, info->stag_to_inv) |
473 FIELD_PREP(IRDMAQPSQ_AHID, op_info->ah_id) |
474 FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG,
475 (info->imm_data_valid ? 1 : 0)) |
476 FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
477 FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
478 FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
479 FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
480 FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
481 FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
482 FIELD_PREP(IRDMAQPSQ_UDPHEADER, info->udp_hdr) |
483 FIELD_PREP(IRDMAQPSQ_L4LEN, info->l4len) |
484 FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
485
486 dma_wmb(); /* make sure WQE is populated before valid bit is set */
487
488 set_64bit_val(wqe, 24, hdr);
489
490 if (post_sq)
491 irdma_uk_qp_post_wr(qp);
492
493 return 0;
494 }
495
496 /**
497 * irdma_set_mw_bind_wqe_gen_1 - set mw bind wqe
498 * @wqe: wqe for setting fragment
499 * @op_info: info for setting bind wqe values
500 */
irdma_set_mw_bind_wqe_gen_1(__le64 * wqe,struct irdma_bind_window * op_info)501 static void irdma_set_mw_bind_wqe_gen_1(__le64 *wqe,
502 struct irdma_bind_window *op_info)
503 {
504 set_64bit_val(wqe, 0, (uintptr_t)op_info->va);
505 set_64bit_val(wqe, 8,
506 FIELD_PREP(IRDMAQPSQ_PARENTMRSTAG, op_info->mw_stag) |
507 FIELD_PREP(IRDMAQPSQ_MWSTAG, op_info->mr_stag));
508 set_64bit_val(wqe, 16, op_info->bind_len);
509 }
510
511 /**
512 * irdma_copy_inline_data_gen_1 - Copy inline data to wqe
513 * @wqe: pointer to wqe
514 * @sge_list: table of pointers to inline data
515 * @num_sges: Total inline data length
516 * @polarity: compatibility parameter
517 */
irdma_copy_inline_data_gen_1(u8 * wqe,struct ib_sge * sge_list,u32 num_sges,u8 polarity)518 static void irdma_copy_inline_data_gen_1(u8 *wqe, struct ib_sge *sge_list,
519 u32 num_sges, u8 polarity)
520 {
521 u32 quanta_bytes_remaining = 16;
522 int i;
523
524 for (i = 0; i < num_sges; i++) {
525 u8 *cur_sge = (u8 *)(uintptr_t)sge_list[i].addr;
526 u32 sge_len = sge_list[i].length;
527
528 while (sge_len) {
529 u32 bytes_copied;
530
531 bytes_copied = min(sge_len, quanta_bytes_remaining);
532 memcpy(wqe, cur_sge, bytes_copied);
533 wqe += bytes_copied;
534 cur_sge += bytes_copied;
535 quanta_bytes_remaining -= bytes_copied;
536 sge_len -= bytes_copied;
537
538 if (!quanta_bytes_remaining) {
539 /* Remaining inline bytes reside after hdr */
540 wqe += 16;
541 quanta_bytes_remaining = 32;
542 }
543 }
544 }
545 }
546
547 /**
548 * irdma_inline_data_size_to_quanta_gen_1 - based on inline data, quanta
549 * @data_size: data size for inline
550 *
551 * Gets the quanta based on inline and immediate data.
552 */
irdma_inline_data_size_to_quanta_gen_1(u32 data_size)553 static inline u16 irdma_inline_data_size_to_quanta_gen_1(u32 data_size)
554 {
555 return data_size <= 16 ? IRDMA_QP_WQE_MIN_QUANTA : 2;
556 }
557
558 /**
559 * irdma_set_mw_bind_wqe - set mw bind in wqe
560 * @wqe: wqe for setting mw bind
561 * @op_info: info for setting wqe values
562 */
irdma_set_mw_bind_wqe(__le64 * wqe,struct irdma_bind_window * op_info)563 static void irdma_set_mw_bind_wqe(__le64 *wqe,
564 struct irdma_bind_window *op_info)
565 {
566 set_64bit_val(wqe, 0, (uintptr_t)op_info->va);
567 set_64bit_val(wqe, 8,
568 FIELD_PREP(IRDMAQPSQ_PARENTMRSTAG, op_info->mr_stag) |
569 FIELD_PREP(IRDMAQPSQ_MWSTAG, op_info->mw_stag));
570 set_64bit_val(wqe, 16, op_info->bind_len);
571 }
572
573 /**
574 * irdma_copy_inline_data - Copy inline data to wqe
575 * @wqe: pointer to wqe
576 * @sge_list: table of pointers to inline data
577 * @num_sges: number of SGE's
578 * @polarity: polarity of wqe valid bit
579 */
irdma_copy_inline_data(u8 * wqe,struct ib_sge * sge_list,u32 num_sges,u8 polarity)580 static void irdma_copy_inline_data(u8 *wqe, struct ib_sge *sge_list,
581 u32 num_sges, u8 polarity)
582 {
583 u8 inline_valid = polarity << IRDMA_INLINE_VALID_S;
584 u32 quanta_bytes_remaining = 8;
585 bool first_quanta = true;
586 int i;
587
588 wqe += 8;
589
590 for (i = 0; i < num_sges; i++) {
591 u8 *cur_sge = (u8 *)(uintptr_t)sge_list[i].addr;
592 u32 sge_len = sge_list[i].length;
593
594 while (sge_len) {
595 u32 bytes_copied;
596
597 bytes_copied = min(sge_len, quanta_bytes_remaining);
598 memcpy(wqe, cur_sge, bytes_copied);
599 wqe += bytes_copied;
600 cur_sge += bytes_copied;
601 quanta_bytes_remaining -= bytes_copied;
602 sge_len -= bytes_copied;
603
604 if (!quanta_bytes_remaining) {
605 quanta_bytes_remaining = 31;
606
607 /* Remaining inline bytes reside after hdr */
608 if (first_quanta) {
609 first_quanta = false;
610 wqe += 16;
611 } else {
612 *wqe = inline_valid;
613 wqe++;
614 }
615 }
616 }
617 }
618 if (!first_quanta && quanta_bytes_remaining < 31)
619 *(wqe + quanta_bytes_remaining) = inline_valid;
620 }
621
622 /**
623 * irdma_inline_data_size_to_quanta - based on inline data, quanta
624 * @data_size: data size for inline
625 *
626 * Gets the quanta based on inline and immediate data.
627 */
irdma_inline_data_size_to_quanta(u32 data_size)628 static u16 irdma_inline_data_size_to_quanta(u32 data_size)
629 {
630 if (data_size <= 8)
631 return IRDMA_QP_WQE_MIN_QUANTA;
632 else if (data_size <= 39)
633 return 2;
634 else if (data_size <= 70)
635 return 3;
636 else if (data_size <= 101)
637 return 4;
638 else if (data_size <= 132)
639 return 5;
640 else if (data_size <= 163)
641 return 6;
642 else if (data_size <= 194)
643 return 7;
644 else
645 return 8;
646 }
647
648 /**
649 * irdma_uk_inline_rdma_write - inline rdma write operation
650 * @qp: hw qp ptr
651 * @info: post sq information
652 * @post_sq: flag to post sq
653 */
irdma_uk_inline_rdma_write(struct irdma_qp_uk * qp,struct irdma_post_sq_info * info,bool post_sq)654 int irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp,
655 struct irdma_post_sq_info *info, bool post_sq)
656 {
657 __le64 *wqe;
658 struct irdma_rdma_write *op_info;
659 u64 hdr = 0;
660 u32 wqe_idx;
661 bool read_fence = false;
662 u32 i, total_size = 0;
663 u16 quanta;
664
665 op_info = &info->op.rdma_write;
666
667 if (unlikely(qp->max_sq_frag_cnt < op_info->num_lo_sges))
668 return -EINVAL;
669
670 for (i = 0; i < op_info->num_lo_sges; i++)
671 total_size += op_info->lo_sg_list[i].length;
672
673 if (unlikely(total_size > qp->max_inline_data))
674 return -EINVAL;
675
676 quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(total_size);
677 wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
678 info);
679 if (!wqe)
680 return -ENOMEM;
681
682 irdma_clr_wqes(qp, wqe_idx);
683
684 read_fence |= info->read_fence;
685 set_64bit_val(wqe, 16,
686 FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.addr));
687
688 hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.lkey) |
689 FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
690 FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, total_size) |
691 FIELD_PREP(IRDMAQPSQ_REPORTRTT, info->report_rtt ? 1 : 0) |
692 FIELD_PREP(IRDMAQPSQ_INLINEDATAFLAG, 1) |
693 FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, info->imm_data_valid ? 1 : 0) |
694 FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
695 FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
696 FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
697 FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
698
699 if (info->imm_data_valid)
700 set_64bit_val(wqe, 0,
701 FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
702
703 qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->lo_sg_list,
704 op_info->num_lo_sges,
705 qp->swqe_polarity);
706 dma_wmb(); /* make sure WQE is populated before valid bit is set */
707
708 set_64bit_val(wqe, 24, hdr);
709
710 if (post_sq)
711 irdma_uk_qp_post_wr(qp);
712
713 return 0;
714 }
715
716 /**
717 * irdma_uk_inline_send - inline send operation
718 * @qp: hw qp ptr
719 * @info: post sq information
720 * @post_sq: flag to post sq
721 */
irdma_uk_inline_send(struct irdma_qp_uk * qp,struct irdma_post_sq_info * info,bool post_sq)722 int irdma_uk_inline_send(struct irdma_qp_uk *qp,
723 struct irdma_post_sq_info *info, bool post_sq)
724 {
725 __le64 *wqe;
726 struct irdma_post_send *op_info;
727 u64 hdr;
728 u32 wqe_idx;
729 bool read_fence = false;
730 u32 i, total_size = 0;
731 u16 quanta;
732
733 op_info = &info->op.send;
734
735 if (unlikely(qp->max_sq_frag_cnt < op_info->num_sges))
736 return -EINVAL;
737
738 for (i = 0; i < op_info->num_sges; i++)
739 total_size += op_info->sg_list[i].length;
740
741 if (unlikely(total_size > qp->max_inline_data))
742 return -EINVAL;
743
744 quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(total_size);
745 wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
746 info);
747 if (!wqe)
748 return -ENOMEM;
749
750 irdma_clr_wqes(qp, wqe_idx);
751
752 set_64bit_val(wqe, 16,
753 FIELD_PREP(IRDMAQPSQ_DESTQKEY, op_info->qkey) |
754 FIELD_PREP(IRDMAQPSQ_DESTQPN, op_info->dest_qp));
755
756 read_fence |= info->read_fence;
757 hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, info->stag_to_inv) |
758 FIELD_PREP(IRDMAQPSQ_AHID, op_info->ah_id) |
759 FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
760 FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, total_size) |
761 FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG,
762 (info->imm_data_valid ? 1 : 0)) |
763 FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
764 FIELD_PREP(IRDMAQPSQ_INLINEDATAFLAG, 1) |
765 FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
766 FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
767 FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
768 FIELD_PREP(IRDMAQPSQ_UDPHEADER, info->udp_hdr) |
769 FIELD_PREP(IRDMAQPSQ_L4LEN, info->l4len) |
770 FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
771
772 if (info->imm_data_valid)
773 set_64bit_val(wqe, 0,
774 FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
775 qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->sg_list,
776 op_info->num_sges, qp->swqe_polarity);
777
778 dma_wmb(); /* make sure WQE is populated before valid bit is set */
779
780 set_64bit_val(wqe, 24, hdr);
781
782 if (post_sq)
783 irdma_uk_qp_post_wr(qp);
784
785 return 0;
786 }
787
788 /**
789 * irdma_uk_stag_local_invalidate - stag invalidate operation
790 * @qp: hw qp ptr
791 * @info: post sq information
792 * @post_sq: flag to post sq
793 */
irdma_uk_stag_local_invalidate(struct irdma_qp_uk * qp,struct irdma_post_sq_info * info,bool post_sq)794 int irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
795 struct irdma_post_sq_info *info,
796 bool post_sq)
797 {
798 __le64 *wqe;
799 struct irdma_inv_local_stag *op_info;
800 u64 hdr;
801 u32 wqe_idx;
802 bool local_fence = false;
803 struct ib_sge sge = {};
804
805 op_info = &info->op.inv_local_stag;
806 local_fence = info->local_fence;
807
808 wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, IRDMA_QP_WQE_MIN_QUANTA,
809 0, info);
810 if (!wqe)
811 return -ENOMEM;
812
813 irdma_clr_wqes(qp, wqe_idx);
814
815 sge.lkey = op_info->target_stag;
816 qp->wqe_ops.iw_set_fragment(wqe, 0, &sge, 0);
817
818 set_64bit_val(wqe, 16, 0);
819
820 hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMA_OP_TYPE_INV_STAG) |
821 FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
822 FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) |
823 FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
824 FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
825
826 dma_wmb(); /* make sure WQE is populated before valid bit is set */
827
828 set_64bit_val(wqe, 24, hdr);
829
830 if (post_sq)
831 irdma_uk_qp_post_wr(qp);
832
833 return 0;
834 }
835
836 /**
837 * irdma_uk_post_receive - post receive wqe
838 * @qp: hw qp ptr
839 * @info: post rq information
840 */
irdma_uk_post_receive(struct irdma_qp_uk * qp,struct irdma_post_rq_info * info)841 int irdma_uk_post_receive(struct irdma_qp_uk *qp,
842 struct irdma_post_rq_info *info)
843 {
844 u32 wqe_idx, i, byte_off;
845 u32 addl_frag_cnt;
846 __le64 *wqe;
847 u64 hdr;
848
849 if (qp->max_rq_frag_cnt < info->num_sges)
850 return -EINVAL;
851
852 wqe = irdma_qp_get_next_recv_wqe(qp, &wqe_idx);
853 if (!wqe)
854 return -ENOMEM;
855
856 qp->rq_wrid_array[wqe_idx] = info->wr_id;
857 addl_frag_cnt = info->num_sges > 1 ? (info->num_sges - 1) : 0;
858 qp->wqe_ops.iw_set_fragment(wqe, 0, info->sg_list,
859 qp->rwqe_polarity);
860
861 for (i = 1, byte_off = 32; i < info->num_sges; i++) {
862 qp->wqe_ops.iw_set_fragment(wqe, byte_off, &info->sg_list[i],
863 qp->rwqe_polarity);
864 byte_off += 16;
865 }
866
867 /* if not an odd number set valid bit in next fragment */
868 if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(info->num_sges & 0x01) &&
869 info->num_sges) {
870 qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
871 qp->rwqe_polarity);
872 if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
873 ++addl_frag_cnt;
874 }
875
876 set_64bit_val(wqe, 16, 0);
877 hdr = FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
878 FIELD_PREP(IRDMAQPSQ_VALID, qp->rwqe_polarity);
879
880 dma_wmb(); /* make sure WQE is populated before valid bit is set */
881
882 set_64bit_val(wqe, 24, hdr);
883
884 return 0;
885 }
886
887 /**
888 * irdma_uk_cq_resize - reset the cq buffer info
889 * @cq: cq to resize
890 * @cq_base: new cq buffer addr
891 * @cq_size: number of cqes
892 */
irdma_uk_cq_resize(struct irdma_cq_uk * cq,void * cq_base,int cq_size)893 void irdma_uk_cq_resize(struct irdma_cq_uk *cq, void *cq_base, int cq_size)
894 {
895 cq->cq_base = cq_base;
896 cq->cq_size = cq_size;
897 IRDMA_RING_INIT(cq->cq_ring, cq->cq_size);
898 cq->polarity = 1;
899 }
900
901 /**
902 * irdma_uk_cq_set_resized_cnt - record the count of the resized buffers
903 * @cq: cq to resize
904 * @cq_cnt: the count of the resized cq buffers
905 */
irdma_uk_cq_set_resized_cnt(struct irdma_cq_uk * cq,u16 cq_cnt)906 void irdma_uk_cq_set_resized_cnt(struct irdma_cq_uk *cq, u16 cq_cnt)
907 {
908 u64 temp_val;
909 u16 sw_cq_sel;
910 u8 arm_next_se;
911 u8 arm_next;
912 u8 arm_seq_num;
913
914 get_64bit_val(cq->shadow_area, 32, &temp_val);
915
916 sw_cq_sel = (u16)FIELD_GET(IRDMA_CQ_DBSA_SW_CQ_SELECT, temp_val);
917 sw_cq_sel += cq_cnt;
918
919 arm_seq_num = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_SEQ_NUM, temp_val);
920 arm_next_se = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT_SE, temp_val);
921 arm_next = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT, temp_val);
922
923 temp_val = FIELD_PREP(IRDMA_CQ_DBSA_ARM_SEQ_NUM, arm_seq_num) |
924 FIELD_PREP(IRDMA_CQ_DBSA_SW_CQ_SELECT, sw_cq_sel) |
925 FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT_SE, arm_next_se) |
926 FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT, arm_next);
927
928 set_64bit_val(cq->shadow_area, 32, temp_val);
929 }
930
931 /**
932 * irdma_uk_cq_request_notification - cq notification request (door bell)
933 * @cq: hw cq
934 * @cq_notify: notification type
935 */
irdma_uk_cq_request_notification(struct irdma_cq_uk * cq,enum irdma_cmpl_notify cq_notify)936 void irdma_uk_cq_request_notification(struct irdma_cq_uk *cq,
937 enum irdma_cmpl_notify cq_notify)
938 {
939 u64 temp_val;
940 u16 sw_cq_sel;
941 u8 arm_next_se = 0;
942 u8 arm_next = 0;
943 u8 arm_seq_num;
944
945 get_64bit_val(cq->shadow_area, 32, &temp_val);
946 arm_seq_num = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_SEQ_NUM, temp_val);
947 arm_seq_num++;
948 sw_cq_sel = (u16)FIELD_GET(IRDMA_CQ_DBSA_SW_CQ_SELECT, temp_val);
949 arm_next_se = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT_SE, temp_val);
950 arm_next_se |= 1;
951 if (cq_notify == IRDMA_CQ_COMPL_EVENT)
952 arm_next = 1;
953 temp_val = FIELD_PREP(IRDMA_CQ_DBSA_ARM_SEQ_NUM, arm_seq_num) |
954 FIELD_PREP(IRDMA_CQ_DBSA_SW_CQ_SELECT, sw_cq_sel) |
955 FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT_SE, arm_next_se) |
956 FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT, arm_next);
957
958 set_64bit_val(cq->shadow_area, 32, temp_val);
959
960 dma_wmb(); /* make sure WQE is populated before valid bit is set */
961
962 writel(cq->cq_id, cq->cqe_alloc_db);
963 }
964
965 /**
966 * irdma_uk_cq_poll_cmpl - get cq completion info
967 * @cq: hw cq
968 * @info: cq poll information returned
969 */
irdma_uk_cq_poll_cmpl(struct irdma_cq_uk * cq,struct irdma_cq_poll_info * info)970 int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
971 struct irdma_cq_poll_info *info)
972 {
973 u64 comp_ctx, qword0, qword2, qword3;
974 __le64 *cqe;
975 struct irdma_qp_uk *qp;
976 struct irdma_ring *pring = NULL;
977 u32 wqe_idx;
978 int ret_code;
979 bool move_cq_head = true;
980 u8 polarity;
981 bool ext_valid;
982 __le64 *ext_cqe;
983
984 if (cq->avoid_mem_cflct)
985 cqe = IRDMA_GET_CURRENT_EXTENDED_CQ_ELEM(cq);
986 else
987 cqe = IRDMA_GET_CURRENT_CQ_ELEM(cq);
988
989 get_64bit_val(cqe, 24, &qword3);
990 polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
991 if (polarity != cq->polarity)
992 return -ENOENT;
993
994 /* Ensure CQE contents are read after valid bit is checked */
995 dma_rmb();
996
997 ext_valid = (bool)FIELD_GET(IRDMA_CQ_EXTCQE, qword3);
998 if (ext_valid) {
999 u64 qword6, qword7;
1000 u32 peek_head;
1001
1002 if (cq->avoid_mem_cflct) {
1003 ext_cqe = (__le64 *)((u8 *)cqe + 32);
1004 get_64bit_val(ext_cqe, 24, &qword7);
1005 polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword7);
1006 } else {
1007 peek_head = (cq->cq_ring.head + 1) % cq->cq_ring.size;
1008 ext_cqe = cq->cq_base[peek_head].buf;
1009 get_64bit_val(ext_cqe, 24, &qword7);
1010 polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword7);
1011 if (!peek_head)
1012 polarity ^= 1;
1013 }
1014 if (polarity != cq->polarity)
1015 return -ENOENT;
1016
1017 /* Ensure ext CQE contents are read after ext valid bit is checked */
1018 dma_rmb();
1019
1020 info->imm_valid = (bool)FIELD_GET(IRDMA_CQ_IMMVALID, qword7);
1021 if (info->imm_valid) {
1022 u64 qword4;
1023
1024 get_64bit_val(ext_cqe, 0, &qword4);
1025 info->imm_data = (u32)FIELD_GET(IRDMA_CQ_IMMDATALOW32, qword4);
1026 }
1027 info->ud_smac_valid = (bool)FIELD_GET(IRDMA_CQ_UDSMACVALID, qword7);
1028 info->ud_vlan_valid = (bool)FIELD_GET(IRDMA_CQ_UDVLANVALID, qword7);
1029 if (info->ud_smac_valid || info->ud_vlan_valid) {
1030 get_64bit_val(ext_cqe, 16, &qword6);
1031 if (info->ud_vlan_valid)
1032 info->ud_vlan = (u16)FIELD_GET(IRDMA_CQ_UDVLAN, qword6);
1033 if (info->ud_smac_valid) {
1034 info->ud_smac[5] = qword6 & 0xFF;
1035 info->ud_smac[4] = (qword6 >> 8) & 0xFF;
1036 info->ud_smac[3] = (qword6 >> 16) & 0xFF;
1037 info->ud_smac[2] = (qword6 >> 24) & 0xFF;
1038 info->ud_smac[1] = (qword6 >> 32) & 0xFF;
1039 info->ud_smac[0] = (qword6 >> 40) & 0xFF;
1040 }
1041 }
1042 } else {
1043 info->imm_valid = false;
1044 info->ud_smac_valid = false;
1045 info->ud_vlan_valid = false;
1046 }
1047
1048 info->q_type = (u8)FIELD_GET(IRDMA_CQ_SQ, qword3);
1049 info->error = (bool)FIELD_GET(IRDMA_CQ_ERROR, qword3);
1050 info->ipv4 = (bool)FIELD_GET(IRDMACQ_IPV4, qword3);
1051 if (info->error) {
1052 info->major_err = FIELD_GET(IRDMA_CQ_MAJERR, qword3);
1053 info->minor_err = FIELD_GET(IRDMA_CQ_MINERR, qword3);
1054 if (info->major_err == IRDMA_FLUSH_MAJOR_ERR) {
1055 info->comp_status = IRDMA_COMPL_STATUS_FLUSHED;
1056 /* Set the min error to standard flush error code for remaining cqes */
1057 if (info->minor_err != FLUSH_GENERAL_ERR) {
1058 qword3 &= ~IRDMA_CQ_MINERR;
1059 qword3 |= FIELD_PREP(IRDMA_CQ_MINERR, FLUSH_GENERAL_ERR);
1060 set_64bit_val(cqe, 24, qword3);
1061 }
1062 } else {
1063 info->comp_status = IRDMA_COMPL_STATUS_UNKNOWN;
1064 }
1065 } else {
1066 info->comp_status = IRDMA_COMPL_STATUS_SUCCESS;
1067 }
1068
1069 get_64bit_val(cqe, 0, &qword0);
1070 get_64bit_val(cqe, 16, &qword2);
1071
1072 info->tcp_seq_num_rtt = (u32)FIELD_GET(IRDMACQ_TCPSEQNUMRTT, qword0);
1073 info->qp_id = (u32)FIELD_GET(IRDMACQ_QPID, qword2);
1074 info->ud_src_qpn = (u32)FIELD_GET(IRDMACQ_UDSRCQPN, qword2);
1075
1076 get_64bit_val(cqe, 8, &comp_ctx);
1077
1078 info->solicited_event = (bool)FIELD_GET(IRDMACQ_SOEVENT, qword3);
1079 qp = (struct irdma_qp_uk *)(unsigned long)comp_ctx;
1080 if (!qp || qp->destroy_pending) {
1081 ret_code = -EFAULT;
1082 goto exit;
1083 }
1084 wqe_idx = (u32)FIELD_GET(IRDMA_CQ_WQEIDX, qword3);
1085 info->qp_handle = (irdma_qp_handle)(unsigned long)qp;
1086 info->op_type = (u8)FIELD_GET(IRDMACQ_OP, qword3);
1087
1088 if (info->q_type == IRDMA_CQE_QTYPE_RQ) {
1089 u32 array_idx;
1090
1091 array_idx = wqe_idx / qp->rq_wqe_size_multiplier;
1092
1093 if (info->comp_status == IRDMA_COMPL_STATUS_FLUSHED ||
1094 info->comp_status == IRDMA_COMPL_STATUS_UNKNOWN) {
1095 if (!IRDMA_RING_MORE_WORK(qp->rq_ring)) {
1096 ret_code = -ENOENT;
1097 goto exit;
1098 }
1099
1100 info->wr_id = qp->rq_wrid_array[qp->rq_ring.tail];
1101 array_idx = qp->rq_ring.tail;
1102 } else {
1103 info->wr_id = qp->rq_wrid_array[array_idx];
1104 }
1105
1106 info->bytes_xfered = (u32)FIELD_GET(IRDMACQ_PAYLDLEN, qword0);
1107
1108 if (qword3 & IRDMACQ_STAG) {
1109 info->stag_invalid_set = true;
1110 info->inv_stag = (u32)FIELD_GET(IRDMACQ_INVSTAG, qword2);
1111 } else {
1112 info->stag_invalid_set = false;
1113 }
1114 IRDMA_RING_SET_TAIL(qp->rq_ring, array_idx + 1);
1115 if (info->comp_status == IRDMA_COMPL_STATUS_FLUSHED) {
1116 qp->rq_flush_seen = true;
1117 if (!IRDMA_RING_MORE_WORK(qp->rq_ring))
1118 qp->rq_flush_complete = true;
1119 else
1120 move_cq_head = false;
1121 }
1122 pring = &qp->rq_ring;
1123 } else { /* q_type is IRDMA_CQE_QTYPE_SQ */
1124 if (qp->first_sq_wq) {
1125 if (wqe_idx + 1 >= qp->conn_wqes)
1126 qp->first_sq_wq = false;
1127
1128 if (wqe_idx < qp->conn_wqes && qp->sq_ring.head == qp->sq_ring.tail) {
1129 IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
1130 IRDMA_RING_MOVE_TAIL(cq->cq_ring);
1131 set_64bit_val(cq->shadow_area, 0,
1132 IRDMA_RING_CURRENT_HEAD(cq->cq_ring));
1133 memset(info, 0,
1134 sizeof(struct irdma_cq_poll_info));
1135 return irdma_uk_cq_poll_cmpl(cq, info);
1136 }
1137 }
1138 if (info->comp_status != IRDMA_COMPL_STATUS_FLUSHED) {
1139 info->wr_id = qp->sq_wrtrk_array[wqe_idx].wrid;
1140 if (!info->comp_status)
1141 info->bytes_xfered = qp->sq_wrtrk_array[wqe_idx].wr_len;
1142 info->op_type = (u8)FIELD_GET(IRDMACQ_OP, qword3);
1143 IRDMA_RING_SET_TAIL(qp->sq_ring,
1144 wqe_idx + qp->sq_wrtrk_array[wqe_idx].quanta);
1145 } else {
1146 if (!IRDMA_RING_MORE_WORK(qp->sq_ring)) {
1147 ret_code = -ENOENT;
1148 goto exit;
1149 }
1150
1151 do {
1152 __le64 *sw_wqe;
1153 u64 wqe_qword;
1154 u32 tail;
1155
1156 tail = qp->sq_ring.tail;
1157 sw_wqe = qp->sq_base[tail].elem;
1158 get_64bit_val(sw_wqe, 24,
1159 &wqe_qword);
1160 info->op_type = (u8)FIELD_GET(IRDMAQPSQ_OPCODE,
1161 wqe_qword);
1162 IRDMA_RING_SET_TAIL(qp->sq_ring,
1163 tail + qp->sq_wrtrk_array[tail].quanta);
1164 if (info->op_type != IRDMAQP_OP_NOP) {
1165 info->wr_id = qp->sq_wrtrk_array[tail].wrid;
1166 info->bytes_xfered = qp->sq_wrtrk_array[tail].wr_len;
1167 break;
1168 }
1169 } while (1);
1170 if (info->op_type == IRDMA_OP_TYPE_BIND_MW &&
1171 info->minor_err == FLUSH_PROT_ERR)
1172 info->minor_err = FLUSH_MW_BIND_ERR;
1173 qp->sq_flush_seen = true;
1174 if (!IRDMA_RING_MORE_WORK(qp->sq_ring))
1175 qp->sq_flush_complete = true;
1176 }
1177 pring = &qp->sq_ring;
1178 }
1179
1180 ret_code = 0;
1181
1182 exit:
1183 if (!ret_code && info->comp_status == IRDMA_COMPL_STATUS_FLUSHED)
1184 if (pring && IRDMA_RING_MORE_WORK(*pring))
1185 move_cq_head = false;
1186
1187 if (move_cq_head) {
1188 IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
1189 if (!IRDMA_RING_CURRENT_HEAD(cq->cq_ring))
1190 cq->polarity ^= 1;
1191
1192 if (ext_valid && !cq->avoid_mem_cflct) {
1193 IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
1194 if (!IRDMA_RING_CURRENT_HEAD(cq->cq_ring))
1195 cq->polarity ^= 1;
1196 }
1197
1198 IRDMA_RING_MOVE_TAIL(cq->cq_ring);
1199 if (!cq->avoid_mem_cflct && ext_valid)
1200 IRDMA_RING_MOVE_TAIL(cq->cq_ring);
1201 set_64bit_val(cq->shadow_area, 0,
1202 IRDMA_RING_CURRENT_HEAD(cq->cq_ring));
1203 } else {
1204 qword3 &= ~IRDMA_CQ_WQEIDX;
1205 qword3 |= FIELD_PREP(IRDMA_CQ_WQEIDX, pring->tail);
1206 set_64bit_val(cqe, 24, qword3);
1207 }
1208
1209 return ret_code;
1210 }
1211
1212 /**
1213 * irdma_qp_round_up - return round up qp wq depth
1214 * @wqdepth: wq depth in quanta to round up
1215 */
irdma_qp_round_up(u32 wqdepth)1216 static int irdma_qp_round_up(u32 wqdepth)
1217 {
1218 int scount = 1;
1219
1220 for (wqdepth--; scount <= 16; scount *= 2)
1221 wqdepth |= wqdepth >> scount;
1222
1223 return ++wqdepth;
1224 }
1225
1226 /**
1227 * irdma_get_wqe_shift - get shift count for maximum wqe size
1228 * @uk_attrs: qp HW attributes
1229 * @sge: Maximum Scatter Gather Elements wqe
1230 * @inline_data: Maximum inline data size
1231 * @shift: Returns the shift needed based on sge
1232 *
1233 * Shift can be used to left shift the wqe size based on number of SGEs and inlind data size.
1234 * For 1 SGE or inline data <= 8, shift = 0 (wqe size of 32
1235 * bytes). For 2 or 3 SGEs or inline data <= 39, shift = 1 (wqe
1236 * size of 64 bytes).
1237 * For 4-7 SGE's and inline <= 101 Shift of 2 otherwise (wqe
1238 * size of 256 bytes).
1239 */
irdma_get_wqe_shift(struct irdma_uk_attrs * uk_attrs,u32 sge,u32 inline_data,u8 * shift)1240 void irdma_get_wqe_shift(struct irdma_uk_attrs *uk_attrs, u32 sge,
1241 u32 inline_data, u8 *shift)
1242 {
1243 *shift = 0;
1244 if (uk_attrs->hw_rev >= IRDMA_GEN_2) {
1245 if (sge > 1 || inline_data > 8) {
1246 if (sge < 4 && inline_data <= 39)
1247 *shift = 1;
1248 else if (sge < 8 && inline_data <= 101)
1249 *shift = 2;
1250 else
1251 *shift = 3;
1252 }
1253 } else if (sge > 1 || inline_data > 16) {
1254 *shift = (sge < 4 && inline_data <= 48) ? 1 : 2;
1255 }
1256 }
1257
1258 /*
1259 * irdma_get_sqdepth - get SQ depth (quanta)
1260 * @uk_attrs: qp HW attributes
1261 * @sq_size: SQ size
1262 * @shift: shift which determines size of WQE
1263 * @sqdepth: depth of SQ
1264 *
1265 */
irdma_get_sqdepth(struct irdma_uk_attrs * uk_attrs,u32 sq_size,u8 shift,u32 * sqdepth)1266 int irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift,
1267 u32 *sqdepth)
1268 {
1269 u32 min_size = (u32)uk_attrs->min_hw_wq_size << shift;
1270
1271 *sqdepth = irdma_qp_round_up((sq_size << shift) + IRDMA_SQ_RSVD);
1272
1273 if (*sqdepth < min_size)
1274 *sqdepth = min_size;
1275 else if (*sqdepth > uk_attrs->max_hw_wq_quanta)
1276 return -EINVAL;
1277
1278 return 0;
1279 }
1280
1281 /*
1282 * irdma_get_rqdepth - get RQ depth (quanta)
1283 * @uk_attrs: qp HW attributes
1284 * @rq_size: RQ size
1285 * @shift: shift which determines size of WQE
1286 * @rqdepth: depth of RQ
1287 */
irdma_get_rqdepth(struct irdma_uk_attrs * uk_attrs,u32 rq_size,u8 shift,u32 * rqdepth)1288 int irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size, u8 shift,
1289 u32 *rqdepth)
1290 {
1291 u32 min_size = (u32)uk_attrs->min_hw_wq_size << shift;
1292
1293 *rqdepth = irdma_qp_round_up((rq_size << shift) + IRDMA_RQ_RSVD);
1294
1295 if (*rqdepth < min_size)
1296 *rqdepth = min_size;
1297 else if (*rqdepth > uk_attrs->max_hw_rq_quanta)
1298 return -EINVAL;
1299
1300 return 0;
1301 }
1302
1303 static const struct irdma_wqe_uk_ops iw_wqe_uk_ops = {
1304 .iw_copy_inline_data = irdma_copy_inline_data,
1305 .iw_inline_data_size_to_quanta = irdma_inline_data_size_to_quanta,
1306 .iw_set_fragment = irdma_set_fragment,
1307 .iw_set_mw_bind_wqe = irdma_set_mw_bind_wqe,
1308 };
1309
1310 static const struct irdma_wqe_uk_ops iw_wqe_uk_ops_gen_1 = {
1311 .iw_copy_inline_data = irdma_copy_inline_data_gen_1,
1312 .iw_inline_data_size_to_quanta = irdma_inline_data_size_to_quanta_gen_1,
1313 .iw_set_fragment = irdma_set_fragment_gen_1,
1314 .iw_set_mw_bind_wqe = irdma_set_mw_bind_wqe_gen_1,
1315 };
1316
1317 /**
1318 * irdma_setup_connection_wqes - setup WQEs necessary to complete
1319 * connection.
1320 * @qp: hw qp (user and kernel)
1321 * @info: qp initialization info
1322 */
irdma_setup_connection_wqes(struct irdma_qp_uk * qp,struct irdma_qp_uk_init_info * info)1323 static void irdma_setup_connection_wqes(struct irdma_qp_uk *qp,
1324 struct irdma_qp_uk_init_info *info)
1325 {
1326 u16 move_cnt = 1;
1327
1328 if (!info->legacy_mode &&
1329 (qp->uk_attrs->feature_flags & IRDMA_FEATURE_RTS_AE))
1330 move_cnt = 3;
1331
1332 qp->conn_wqes = move_cnt;
1333 IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->sq_ring, move_cnt);
1334 IRDMA_RING_MOVE_TAIL_BY_COUNT(qp->sq_ring, move_cnt);
1335 IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->initial_ring, move_cnt);
1336 }
1337
1338 /**
1339 * irdma_uk_calc_shift_wq - calculate WQE shift for both SQ and RQ
1340 * @ukinfo: qp initialization info
1341 * @sq_shift: Returns shift of SQ
1342 * @rq_shift: Returns shift of RQ
1343 */
irdma_uk_calc_shift_wq(struct irdma_qp_uk_init_info * ukinfo,u8 * sq_shift,u8 * rq_shift)1344 void irdma_uk_calc_shift_wq(struct irdma_qp_uk_init_info *ukinfo, u8 *sq_shift,
1345 u8 *rq_shift)
1346 {
1347 bool imm_support = ukinfo->uk_attrs->hw_rev >= IRDMA_GEN_2;
1348
1349 irdma_get_wqe_shift(ukinfo->uk_attrs,
1350 imm_support ? ukinfo->max_sq_frag_cnt + 1 :
1351 ukinfo->max_sq_frag_cnt,
1352 ukinfo->max_inline_data, sq_shift);
1353
1354 irdma_get_wqe_shift(ukinfo->uk_attrs, ukinfo->max_rq_frag_cnt, 0,
1355 rq_shift);
1356
1357 if (ukinfo->uk_attrs->hw_rev == IRDMA_GEN_1) {
1358 if (ukinfo->abi_ver > 4)
1359 *rq_shift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;
1360 }
1361 }
1362
1363 /**
1364 * irdma_uk_calc_depth_shift_sq - calculate depth and shift for SQ size.
1365 * @ukinfo: qp initialization info
1366 * @sq_depth: Returns depth of SQ
1367 * @sq_shift: Returns shift of SQ
1368 */
irdma_uk_calc_depth_shift_sq(struct irdma_qp_uk_init_info * ukinfo,u32 * sq_depth,u8 * sq_shift)1369 int irdma_uk_calc_depth_shift_sq(struct irdma_qp_uk_init_info *ukinfo,
1370 u32 *sq_depth, u8 *sq_shift)
1371 {
1372 bool imm_support = ukinfo->uk_attrs->hw_rev >= IRDMA_GEN_2;
1373 int status;
1374
1375 irdma_get_wqe_shift(ukinfo->uk_attrs,
1376 imm_support ? ukinfo->max_sq_frag_cnt + 1 :
1377 ukinfo->max_sq_frag_cnt,
1378 ukinfo->max_inline_data, sq_shift);
1379 status = irdma_get_sqdepth(ukinfo->uk_attrs, ukinfo->sq_size,
1380 *sq_shift, sq_depth);
1381
1382 return status;
1383 }
1384
1385 /**
1386 * irdma_uk_calc_depth_shift_rq - calculate depth and shift for RQ size.
1387 * @ukinfo: qp initialization info
1388 * @rq_depth: Returns depth of RQ
1389 * @rq_shift: Returns shift of RQ
1390 */
irdma_uk_calc_depth_shift_rq(struct irdma_qp_uk_init_info * ukinfo,u32 * rq_depth,u8 * rq_shift)1391 int irdma_uk_calc_depth_shift_rq(struct irdma_qp_uk_init_info *ukinfo,
1392 u32 *rq_depth, u8 *rq_shift)
1393 {
1394 int status;
1395
1396 irdma_get_wqe_shift(ukinfo->uk_attrs, ukinfo->max_rq_frag_cnt, 0,
1397 rq_shift);
1398
1399 if (ukinfo->uk_attrs->hw_rev == IRDMA_GEN_1) {
1400 if (ukinfo->abi_ver > 4)
1401 *rq_shift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;
1402 }
1403
1404 status = irdma_get_rqdepth(ukinfo->uk_attrs, ukinfo->rq_size,
1405 *rq_shift, rq_depth);
1406
1407 return status;
1408 }
1409
1410 /**
1411 * irdma_uk_qp_init - initialize shared qp
1412 * @qp: hw qp (user and kernel)
1413 * @info: qp initialization info
1414 *
1415 * initializes the vars used in both user and kernel mode.
1416 * size of the wqe depends on numbers of max. fragements
1417 * allowed. Then size of wqe * the number of wqes should be the
1418 * amount of memory allocated for sq and rq.
1419 */
irdma_uk_qp_init(struct irdma_qp_uk * qp,struct irdma_qp_uk_init_info * info)1420 int irdma_uk_qp_init(struct irdma_qp_uk *qp, struct irdma_qp_uk_init_info *info)
1421 {
1422 int ret_code = 0;
1423 u32 sq_ring_size;
1424
1425 qp->uk_attrs = info->uk_attrs;
1426 if (info->max_sq_frag_cnt > qp->uk_attrs->max_hw_wq_frags ||
1427 info->max_rq_frag_cnt > qp->uk_attrs->max_hw_wq_frags)
1428 return -EINVAL;
1429
1430 qp->qp_caps = info->qp_caps;
1431 qp->sq_base = info->sq;
1432 qp->rq_base = info->rq;
1433 qp->qp_type = info->type ? info->type : IRDMA_QP_TYPE_IWARP;
1434 qp->shadow_area = info->shadow_area;
1435 qp->sq_wrtrk_array = info->sq_wrtrk_array;
1436
1437 qp->rq_wrid_array = info->rq_wrid_array;
1438 qp->wqe_alloc_db = info->wqe_alloc_db;
1439 qp->qp_id = info->qp_id;
1440 qp->sq_size = info->sq_size;
1441 qp->max_sq_frag_cnt = info->max_sq_frag_cnt;
1442 sq_ring_size = qp->sq_size << info->sq_shift;
1443 IRDMA_RING_INIT(qp->sq_ring, sq_ring_size);
1444 IRDMA_RING_INIT(qp->initial_ring, sq_ring_size);
1445 if (info->first_sq_wq) {
1446 irdma_setup_connection_wqes(qp, info);
1447 qp->swqe_polarity = 1;
1448 qp->first_sq_wq = true;
1449 } else {
1450 qp->swqe_polarity = 0;
1451 }
1452 qp->swqe_polarity_deferred = 1;
1453 qp->rwqe_polarity = 0;
1454 qp->rq_size = info->rq_size;
1455 qp->max_rq_frag_cnt = info->max_rq_frag_cnt;
1456 qp->max_inline_data = info->max_inline_data;
1457 qp->rq_wqe_size = info->rq_shift;
1458 IRDMA_RING_INIT(qp->rq_ring, qp->rq_size);
1459 qp->rq_wqe_size_multiplier = 1 << info->rq_shift;
1460 if (qp->uk_attrs->hw_rev == IRDMA_GEN_1)
1461 qp->wqe_ops = iw_wqe_uk_ops_gen_1;
1462 else
1463 qp->wqe_ops = iw_wqe_uk_ops;
1464 return ret_code;
1465 }
1466
1467 /**
1468 * irdma_uk_cq_init - initialize shared cq (user and kernel)
1469 * @cq: hw cq
1470 * @info: hw cq initialization info
1471 */
irdma_uk_cq_init(struct irdma_cq_uk * cq,struct irdma_cq_uk_init_info * info)1472 void irdma_uk_cq_init(struct irdma_cq_uk *cq,
1473 struct irdma_cq_uk_init_info *info)
1474 {
1475 cq->cq_base = info->cq_base;
1476 cq->cq_id = info->cq_id;
1477 cq->cq_size = info->cq_size;
1478 cq->cqe_alloc_db = info->cqe_alloc_db;
1479 cq->cq_ack_db = info->cq_ack_db;
1480 cq->shadow_area = info->shadow_area;
1481 cq->avoid_mem_cflct = info->avoid_mem_cflct;
1482 IRDMA_RING_INIT(cq->cq_ring, cq->cq_size);
1483 cq->polarity = 1;
1484 }
1485
1486 /**
1487 * irdma_uk_clean_cq - clean cq entries
1488 * @q: completion context
1489 * @cq: cq to clean
1490 */
irdma_uk_clean_cq(void * q,struct irdma_cq_uk * cq)1491 void irdma_uk_clean_cq(void *q, struct irdma_cq_uk *cq)
1492 {
1493 __le64 *cqe;
1494 u64 qword3, comp_ctx;
1495 u32 cq_head;
1496 u8 polarity, temp;
1497
1498 cq_head = cq->cq_ring.head;
1499 temp = cq->polarity;
1500 do {
1501 if (cq->avoid_mem_cflct)
1502 cqe = ((struct irdma_extended_cqe *)(cq->cq_base))[cq_head].buf;
1503 else
1504 cqe = cq->cq_base[cq_head].buf;
1505 get_64bit_val(cqe, 24, &qword3);
1506 polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
1507
1508 if (polarity != temp)
1509 break;
1510
1511 /* Ensure CQE contents are read after valid bit is checked */
1512 dma_rmb();
1513
1514 get_64bit_val(cqe, 8, &comp_ctx);
1515 if ((void *)(unsigned long)comp_ctx == q)
1516 set_64bit_val(cqe, 8, 0);
1517
1518 cq_head = (cq_head + 1) % cq->cq_ring.size;
1519 if (!cq_head)
1520 temp ^= 1;
1521 } while (true);
1522 }
1523
1524 /**
1525 * irdma_nop - post a nop
1526 * @qp: hw qp ptr
1527 * @wr_id: work request id
1528 * @signaled: signaled for completion
1529 * @post_sq: ring doorbell
1530 */
irdma_nop(struct irdma_qp_uk * qp,u64 wr_id,bool signaled,bool post_sq)1531 int irdma_nop(struct irdma_qp_uk *qp, u64 wr_id, bool signaled, bool post_sq)
1532 {
1533 __le64 *wqe;
1534 u64 hdr;
1535 u32 wqe_idx;
1536 struct irdma_post_sq_info info = {};
1537
1538 info.wr_id = wr_id;
1539 wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, IRDMA_QP_WQE_MIN_QUANTA,
1540 0, &info);
1541 if (!wqe)
1542 return -ENOMEM;
1543
1544 irdma_clr_wqes(qp, wqe_idx);
1545
1546 set_64bit_val(wqe, 0, 0);
1547 set_64bit_val(wqe, 8, 0);
1548 set_64bit_val(wqe, 16, 0);
1549
1550 hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_NOP) |
1551 FIELD_PREP(IRDMAQPSQ_SIGCOMPL, signaled) |
1552 FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
1553
1554 dma_wmb(); /* make sure WQE is populated before valid bit is set */
1555
1556 set_64bit_val(wqe, 24, hdr);
1557 if (post_sq)
1558 irdma_uk_qp_post_wr(qp);
1559
1560 return 0;
1561 }
1562
1563 /**
1564 * irdma_fragcnt_to_quanta_sq - calculate quanta based on fragment count for SQ
1565 * @frag_cnt: number of fragments
1566 * @quanta: quanta for frag_cnt
1567 */
irdma_fragcnt_to_quanta_sq(u32 frag_cnt,u16 * quanta)1568 int irdma_fragcnt_to_quanta_sq(u32 frag_cnt, u16 *quanta)
1569 {
1570 switch (frag_cnt) {
1571 case 0:
1572 case 1:
1573 *quanta = IRDMA_QP_WQE_MIN_QUANTA;
1574 break;
1575 case 2:
1576 case 3:
1577 *quanta = 2;
1578 break;
1579 case 4:
1580 case 5:
1581 *quanta = 3;
1582 break;
1583 case 6:
1584 case 7:
1585 *quanta = 4;
1586 break;
1587 case 8:
1588 case 9:
1589 *quanta = 5;
1590 break;
1591 case 10:
1592 case 11:
1593 *quanta = 6;
1594 break;
1595 case 12:
1596 case 13:
1597 *quanta = 7;
1598 break;
1599 case 14:
1600 case 15: /* when immediate data is present */
1601 *quanta = 8;
1602 break;
1603 default:
1604 return -EINVAL;
1605 }
1606
1607 return 0;
1608 }
1609
1610 /**
1611 * irdma_fragcnt_to_wqesize_rq - calculate wqe size based on fragment count for RQ
1612 * @frag_cnt: number of fragments
1613 * @wqe_size: size in bytes given frag_cnt
1614 */
irdma_fragcnt_to_wqesize_rq(u32 frag_cnt,u16 * wqe_size)1615 int irdma_fragcnt_to_wqesize_rq(u32 frag_cnt, u16 *wqe_size)
1616 {
1617 switch (frag_cnt) {
1618 case 0:
1619 case 1:
1620 *wqe_size = 32;
1621 break;
1622 case 2:
1623 case 3:
1624 *wqe_size = 64;
1625 break;
1626 case 4:
1627 case 5:
1628 case 6:
1629 case 7:
1630 *wqe_size = 128;
1631 break;
1632 case 8:
1633 case 9:
1634 case 10:
1635 case 11:
1636 case 12:
1637 case 13:
1638 case 14:
1639 *wqe_size = 256;
1640 break;
1641 default:
1642 return -EINVAL;
1643 }
1644
1645 return 0;
1646 }
1647