1 // SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
2 /* Copyright (c) 2015 - 2021 Intel Corporation */
3 #include "osdep.h"
4 #include "defs.h"
5 #include "user.h"
6 #include "irdma.h"
7
8 /**
9 * irdma_set_fragment - set fragment in wqe
10 * @wqe: wqe for setting fragment
11 * @offset: offset value
12 * @sge: sge length and stag
13 * @valid: The wqe valid
14 */
irdma_set_fragment(__le64 * wqe,u32 offset,struct ib_sge * sge,u8 valid)15 static void irdma_set_fragment(__le64 *wqe, u32 offset, struct ib_sge *sge,
16 u8 valid)
17 {
18 if (sge) {
19 set_64bit_val(wqe, offset,
20 FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->addr));
21 set_64bit_val(wqe, offset + 8,
22 FIELD_PREP(IRDMAQPSQ_VALID, valid) |
23 FIELD_PREP(IRDMAQPSQ_FRAG_LEN, sge->length) |
24 FIELD_PREP(IRDMAQPSQ_FRAG_STAG, sge->lkey));
25 } else {
26 set_64bit_val(wqe, offset, 0);
27 set_64bit_val(wqe, offset + 8,
28 FIELD_PREP(IRDMAQPSQ_VALID, valid));
29 }
30 }
31
32 /**
33 * irdma_set_fragment_gen_1 - set fragment in wqe
34 * @wqe: wqe for setting fragment
35 * @offset: offset value
36 * @sge: sge length and stag
37 * @valid: wqe valid flag
38 */
irdma_set_fragment_gen_1(__le64 * wqe,u32 offset,struct ib_sge * sge,u8 valid)39 static void irdma_set_fragment_gen_1(__le64 *wqe, u32 offset,
40 struct ib_sge *sge, u8 valid)
41 {
42 if (sge) {
43 set_64bit_val(wqe, offset,
44 FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->addr));
45 set_64bit_val(wqe, offset + 8,
46 FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, sge->length) |
47 FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_STAG, sge->lkey));
48 } else {
49 set_64bit_val(wqe, offset, 0);
50 set_64bit_val(wqe, offset + 8, 0);
51 }
52 }
53
54 /**
55 * irdma_nop_1 - insert a NOP wqe
56 * @qp: hw qp ptr
57 */
irdma_nop_1(struct irdma_qp_uk * qp)58 static int irdma_nop_1(struct irdma_qp_uk *qp)
59 {
60 u64 hdr;
61 __le64 *wqe;
62 u32 wqe_idx;
63 bool signaled = false;
64
65 if (!qp->sq_ring.head)
66 return -EINVAL;
67
68 wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
69 wqe = qp->sq_base[wqe_idx].elem;
70
71 qp->sq_wrtrk_array[wqe_idx].quanta = IRDMA_QP_WQE_MIN_QUANTA;
72
73 set_64bit_val(wqe, 0, 0);
74 set_64bit_val(wqe, 8, 0);
75 set_64bit_val(wqe, 16, 0);
76
77 hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_NOP) |
78 FIELD_PREP(IRDMAQPSQ_SIGCOMPL, signaled) |
79 FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
80
81 /* make sure WQE is written before valid bit is set */
82 dma_wmb();
83
84 set_64bit_val(wqe, 24, hdr);
85
86 return 0;
87 }
88
89 /**
90 * irdma_clr_wqes - clear next 128 sq entries
91 * @qp: hw qp ptr
92 * @qp_wqe_idx: wqe_idx
93 */
irdma_clr_wqes(struct irdma_qp_uk * qp,u32 qp_wqe_idx)94 void irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx)
95 {
96 __le64 *wqe;
97 u32 wqe_idx;
98
99 if (!(qp_wqe_idx & 0x7F)) {
100 wqe_idx = (qp_wqe_idx + 128) % qp->sq_ring.size;
101 wqe = qp->sq_base[wqe_idx].elem;
102 if (wqe_idx)
103 memset(wqe, qp->swqe_polarity ? 0 : 0xFF, 0x1000);
104 else
105 memset(wqe, qp->swqe_polarity ? 0xFF : 0, 0x1000);
106 }
107 }
108
109 /**
110 * irdma_uk_qp_post_wr - ring doorbell
111 * @qp: hw qp ptr
112 */
irdma_uk_qp_post_wr(struct irdma_qp_uk * qp)113 void irdma_uk_qp_post_wr(struct irdma_qp_uk *qp)
114 {
115 u64 temp;
116 u32 hw_sq_tail;
117 u32 sw_sq_head;
118
119 /* valid bit is written and loads completed before reading shadow */
120 mb();
121
122 /* read the doorbell shadow area */
123 get_64bit_val(qp->shadow_area, 0, &temp);
124
125 hw_sq_tail = (u32)FIELD_GET(IRDMA_QP_DBSA_HW_SQ_TAIL, temp);
126 sw_sq_head = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
127 if (sw_sq_head != qp->initial_ring.head) {
128 if (qp->push_dropped) {
129 writel(qp->qp_id, qp->wqe_alloc_db);
130 qp->push_dropped = false;
131 } else if (sw_sq_head != hw_sq_tail) {
132 if (sw_sq_head > qp->initial_ring.head) {
133 if (hw_sq_tail >= qp->initial_ring.head &&
134 hw_sq_tail < sw_sq_head)
135 writel(qp->qp_id, qp->wqe_alloc_db);
136 } else {
137 if (hw_sq_tail >= qp->initial_ring.head ||
138 hw_sq_tail < sw_sq_head)
139 writel(qp->qp_id, qp->wqe_alloc_db);
140 }
141 }
142 }
143
144 qp->initial_ring.head = qp->sq_ring.head;
145 }
146
147 /**
148 * irdma_qp_ring_push_db - ring qp doorbell
149 * @qp: hw qp ptr
150 * @wqe_idx: wqe index
151 */
irdma_qp_ring_push_db(struct irdma_qp_uk * qp,u32 wqe_idx)152 static void irdma_qp_ring_push_db(struct irdma_qp_uk *qp, u32 wqe_idx)
153 {
154 set_32bit_val(qp->push_db, 0,
155 FIELD_PREP(IRDMA_WQEALLOC_WQE_DESC_INDEX, wqe_idx >> 3) | qp->qp_id);
156 qp->initial_ring.head = qp->sq_ring.head;
157 qp->push_mode = true;
158 qp->push_dropped = false;
159 }
160
irdma_qp_push_wqe(struct irdma_qp_uk * qp,__le64 * wqe,u16 quanta,u32 wqe_idx,bool post_sq)161 void irdma_qp_push_wqe(struct irdma_qp_uk *qp, __le64 *wqe, u16 quanta,
162 u32 wqe_idx, bool post_sq)
163 {
164 __le64 *push;
165
166 if (IRDMA_RING_CURRENT_HEAD(qp->initial_ring) !=
167 IRDMA_RING_CURRENT_TAIL(qp->sq_ring) &&
168 !qp->push_mode) {
169 if (post_sq)
170 irdma_uk_qp_post_wr(qp);
171 } else {
172 push = (__le64 *)((uintptr_t)qp->push_wqe +
173 (wqe_idx & 0x7) * 0x20);
174 memcpy(push, wqe, quanta * IRDMA_QP_WQE_MIN_SIZE);
175 irdma_qp_ring_push_db(qp, wqe_idx);
176 }
177 }
178
179 /**
180 * irdma_qp_get_next_send_wqe - pad with NOP if needed, return where next WR should go
181 * @qp: hw qp ptr
182 * @wqe_idx: return wqe index
183 * @quanta: size of WR in quanta
184 * @total_size: size of WR in bytes
185 * @info: info on WR
186 */
irdma_qp_get_next_send_wqe(struct irdma_qp_uk * qp,u32 * wqe_idx,u16 quanta,u32 total_size,struct irdma_post_sq_info * info)187 __le64 *irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx,
188 u16 quanta, u32 total_size,
189 struct irdma_post_sq_info *info)
190 {
191 __le64 *wqe;
192 __le64 *wqe_0 = NULL;
193 u32 nop_wqe_idx;
194 u16 avail_quanta;
195 u16 i;
196
197 avail_quanta = qp->uk_attrs->max_hw_sq_chunk -
198 (IRDMA_RING_CURRENT_HEAD(qp->sq_ring) %
199 qp->uk_attrs->max_hw_sq_chunk);
200 if (quanta <= avail_quanta) {
201 /* WR fits in current chunk */
202 if (quanta > IRDMA_SQ_RING_FREE_QUANTA(qp->sq_ring))
203 return NULL;
204 } else {
205 /* Need to pad with NOP */
206 if (quanta + avail_quanta >
207 IRDMA_SQ_RING_FREE_QUANTA(qp->sq_ring))
208 return NULL;
209
210 nop_wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
211 for (i = 0; i < avail_quanta; i++) {
212 irdma_nop_1(qp);
213 IRDMA_RING_MOVE_HEAD_NOCHECK(qp->sq_ring);
214 }
215 if (qp->push_db && info->push_wqe)
216 irdma_qp_push_wqe(qp, qp->sq_base[nop_wqe_idx].elem,
217 avail_quanta, nop_wqe_idx, true);
218 }
219
220 *wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
221 if (!*wqe_idx)
222 qp->swqe_polarity = !qp->swqe_polarity;
223
224 IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->sq_ring, quanta);
225
226 wqe = qp->sq_base[*wqe_idx].elem;
227 if (qp->uk_attrs->hw_rev == IRDMA_GEN_1 && quanta == 1 &&
228 (IRDMA_RING_CURRENT_HEAD(qp->sq_ring) & 1)) {
229 wqe_0 = qp->sq_base[IRDMA_RING_CURRENT_HEAD(qp->sq_ring)].elem;
230 wqe_0[3] = cpu_to_le64(FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity ? 0 : 1));
231 }
232 qp->sq_wrtrk_array[*wqe_idx].wrid = info->wr_id;
233 qp->sq_wrtrk_array[*wqe_idx].wr_len = total_size;
234 qp->sq_wrtrk_array[*wqe_idx].quanta = quanta;
235
236 return wqe;
237 }
238
239 /**
240 * irdma_qp_get_next_recv_wqe - get next qp's rcv wqe
241 * @qp: hw qp ptr
242 * @wqe_idx: return wqe index
243 */
irdma_qp_get_next_recv_wqe(struct irdma_qp_uk * qp,u32 * wqe_idx)244 __le64 *irdma_qp_get_next_recv_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx)
245 {
246 __le64 *wqe;
247 int ret_code;
248
249 if (IRDMA_RING_FULL_ERR(qp->rq_ring))
250 return NULL;
251
252 IRDMA_ATOMIC_RING_MOVE_HEAD(qp->rq_ring, *wqe_idx, ret_code);
253 if (ret_code)
254 return NULL;
255
256 if (!*wqe_idx)
257 qp->rwqe_polarity = !qp->rwqe_polarity;
258 /* rq_wqe_size_multiplier is no of 32 byte quanta in one rq wqe */
259 wqe = qp->rq_base[*wqe_idx * qp->rq_wqe_size_multiplier].elem;
260
261 return wqe;
262 }
263
264 /**
265 * irdma_uk_rdma_write - rdma write operation
266 * @qp: hw qp ptr
267 * @info: post sq information
268 * @post_sq: flag to post sq
269 */
irdma_uk_rdma_write(struct irdma_qp_uk * qp,struct irdma_post_sq_info * info,bool post_sq)270 int irdma_uk_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
271 bool post_sq)
272 {
273 u64 hdr;
274 __le64 *wqe;
275 struct irdma_rdma_write *op_info;
276 u32 i, wqe_idx;
277 u32 total_size = 0, byte_off;
278 int ret_code;
279 u32 frag_cnt, addl_frag_cnt;
280 bool read_fence = false;
281 u16 quanta;
282
283 info->push_wqe = qp->push_db ? true : false;
284
285 op_info = &info->op.rdma_write;
286 if (op_info->num_lo_sges > qp->max_sq_frag_cnt)
287 return -EINVAL;
288
289 for (i = 0; i < op_info->num_lo_sges; i++)
290 total_size += op_info->lo_sg_list[i].length;
291
292 read_fence |= info->read_fence;
293
294 if (info->imm_data_valid)
295 frag_cnt = op_info->num_lo_sges + 1;
296 else
297 frag_cnt = op_info->num_lo_sges;
298 addl_frag_cnt = frag_cnt > 1 ? (frag_cnt - 1) : 0;
299 ret_code = irdma_fragcnt_to_quanta_sq(frag_cnt, &quanta);
300 if (ret_code)
301 return ret_code;
302
303 wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
304 info);
305 if (!wqe)
306 return -ENOMEM;
307
308 irdma_clr_wqes(qp, wqe_idx);
309
310 set_64bit_val(wqe, 16,
311 FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.addr));
312
313 if (info->imm_data_valid) {
314 set_64bit_val(wqe, 0,
315 FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
316 i = 0;
317 } else {
318 qp->wqe_ops.iw_set_fragment(wqe, 0,
319 op_info->lo_sg_list,
320 qp->swqe_polarity);
321 i = 1;
322 }
323
324 for (byte_off = 32; i < op_info->num_lo_sges; i++) {
325 qp->wqe_ops.iw_set_fragment(wqe, byte_off,
326 &op_info->lo_sg_list[i],
327 qp->swqe_polarity);
328 byte_off += 16;
329 }
330
331 /* if not an odd number set valid bit in next fragment */
332 if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(frag_cnt & 0x01) &&
333 frag_cnt) {
334 qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
335 qp->swqe_polarity);
336 if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
337 ++addl_frag_cnt;
338 }
339
340 hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.lkey) |
341 FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
342 FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, info->imm_data_valid) |
343 FIELD_PREP(IRDMAQPSQ_REPORTRTT, info->report_rtt) |
344 FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
345 FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
346 FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
347 FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
348 FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
349 FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
350
351 dma_wmb(); /* make sure WQE is populated before valid bit is set */
352
353 set_64bit_val(wqe, 24, hdr);
354 if (info->push_wqe) {
355 irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
356 } else {
357 if (post_sq)
358 irdma_uk_qp_post_wr(qp);
359 }
360
361 return 0;
362 }
363
364 /**
365 * irdma_uk_rdma_read - rdma read command
366 * @qp: hw qp ptr
367 * @info: post sq information
368 * @inv_stag: flag for inv_stag
369 * @post_sq: flag to post sq
370 */
irdma_uk_rdma_read(struct irdma_qp_uk * qp,struct irdma_post_sq_info * info,bool inv_stag,bool post_sq)371 int irdma_uk_rdma_read(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
372 bool inv_stag, bool post_sq)
373 {
374 struct irdma_rdma_read *op_info;
375 int ret_code;
376 u32 i, byte_off, total_size = 0;
377 bool local_fence = false;
378 u32 addl_frag_cnt;
379 __le64 *wqe;
380 u32 wqe_idx;
381 u16 quanta;
382 u64 hdr;
383
384 info->push_wqe = qp->push_db ? true : false;
385
386 op_info = &info->op.rdma_read;
387 if (qp->max_sq_frag_cnt < op_info->num_lo_sges)
388 return -EINVAL;
389
390 for (i = 0; i < op_info->num_lo_sges; i++)
391 total_size += op_info->lo_sg_list[i].length;
392
393 ret_code = irdma_fragcnt_to_quanta_sq(op_info->num_lo_sges, &quanta);
394 if (ret_code)
395 return ret_code;
396
397 wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
398 info);
399 if (!wqe)
400 return -ENOMEM;
401
402 irdma_clr_wqes(qp, wqe_idx);
403
404 addl_frag_cnt = op_info->num_lo_sges > 1 ?
405 (op_info->num_lo_sges - 1) : 0;
406 local_fence |= info->local_fence;
407
408 qp->wqe_ops.iw_set_fragment(wqe, 0, op_info->lo_sg_list,
409 qp->swqe_polarity);
410 for (i = 1, byte_off = 32; i < op_info->num_lo_sges; ++i) {
411 qp->wqe_ops.iw_set_fragment(wqe, byte_off,
412 &op_info->lo_sg_list[i],
413 qp->swqe_polarity);
414 byte_off += 16;
415 }
416
417 /* if not an odd number set valid bit in next fragment */
418 if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 &&
419 !(op_info->num_lo_sges & 0x01) && op_info->num_lo_sges) {
420 qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
421 qp->swqe_polarity);
422 if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
423 ++addl_frag_cnt;
424 }
425 set_64bit_val(wqe, 16,
426 FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.addr));
427 hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.lkey) |
428 FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
429 FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
430 FIELD_PREP(IRDMAQPSQ_OPCODE,
431 (inv_stag ? IRDMAQP_OP_RDMA_READ_LOC_INV : IRDMAQP_OP_RDMA_READ)) |
432 FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
433 FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
434 FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) |
435 FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
436 FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
437
438 dma_wmb(); /* make sure WQE is populated before valid bit is set */
439
440 set_64bit_val(wqe, 24, hdr);
441 if (info->push_wqe) {
442 irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
443 } else {
444 if (post_sq)
445 irdma_uk_qp_post_wr(qp);
446 }
447
448 return 0;
449 }
450
451 /**
452 * irdma_uk_send - rdma send command
453 * @qp: hw qp ptr
454 * @info: post sq information
455 * @post_sq: flag to post sq
456 */
irdma_uk_send(struct irdma_qp_uk * qp,struct irdma_post_sq_info * info,bool post_sq)457 int irdma_uk_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
458 bool post_sq)
459 {
460 __le64 *wqe;
461 struct irdma_post_send *op_info;
462 u64 hdr;
463 u32 i, wqe_idx, total_size = 0, byte_off;
464 int ret_code;
465 u32 frag_cnt, addl_frag_cnt;
466 bool read_fence = false;
467 u16 quanta;
468
469 info->push_wqe = qp->push_db ? true : false;
470
471 op_info = &info->op.send;
472 if (qp->max_sq_frag_cnt < op_info->num_sges)
473 return -EINVAL;
474
475 for (i = 0; i < op_info->num_sges; i++)
476 total_size += op_info->sg_list[i].length;
477
478 if (info->imm_data_valid)
479 frag_cnt = op_info->num_sges + 1;
480 else
481 frag_cnt = op_info->num_sges;
482 ret_code = irdma_fragcnt_to_quanta_sq(frag_cnt, &quanta);
483 if (ret_code)
484 return ret_code;
485
486 wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
487 info);
488 if (!wqe)
489 return -ENOMEM;
490
491 irdma_clr_wqes(qp, wqe_idx);
492
493 read_fence |= info->read_fence;
494 addl_frag_cnt = frag_cnt > 1 ? (frag_cnt - 1) : 0;
495 if (info->imm_data_valid) {
496 set_64bit_val(wqe, 0,
497 FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
498 i = 0;
499 } else {
500 qp->wqe_ops.iw_set_fragment(wqe, 0,
501 frag_cnt ? op_info->sg_list : NULL,
502 qp->swqe_polarity);
503 i = 1;
504 }
505
506 for (byte_off = 32; i < op_info->num_sges; i++) {
507 qp->wqe_ops.iw_set_fragment(wqe, byte_off, &op_info->sg_list[i],
508 qp->swqe_polarity);
509 byte_off += 16;
510 }
511
512 /* if not an odd number set valid bit in next fragment */
513 if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(frag_cnt & 0x01) &&
514 frag_cnt) {
515 qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
516 qp->swqe_polarity);
517 if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
518 ++addl_frag_cnt;
519 }
520
521 set_64bit_val(wqe, 16,
522 FIELD_PREP(IRDMAQPSQ_DESTQKEY, op_info->qkey) |
523 FIELD_PREP(IRDMAQPSQ_DESTQPN, op_info->dest_qp));
524 hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, info->stag_to_inv) |
525 FIELD_PREP(IRDMAQPSQ_AHID, op_info->ah_id) |
526 FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG,
527 (info->imm_data_valid ? 1 : 0)) |
528 FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
529 FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
530 FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
531 FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
532 FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
533 FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
534 FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
535 FIELD_PREP(IRDMAQPSQ_UDPHEADER, info->udp_hdr) |
536 FIELD_PREP(IRDMAQPSQ_L4LEN, info->l4len) |
537 FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
538
539 dma_wmb(); /* make sure WQE is populated before valid bit is set */
540
541 set_64bit_val(wqe, 24, hdr);
542 if (info->push_wqe) {
543 irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
544 } else {
545 if (post_sq)
546 irdma_uk_qp_post_wr(qp);
547 }
548
549 return 0;
550 }
551
552 /**
553 * irdma_set_mw_bind_wqe_gen_1 - set mw bind wqe
554 * @wqe: wqe for setting fragment
555 * @op_info: info for setting bind wqe values
556 */
irdma_set_mw_bind_wqe_gen_1(__le64 * wqe,struct irdma_bind_window * op_info)557 static void irdma_set_mw_bind_wqe_gen_1(__le64 *wqe,
558 struct irdma_bind_window *op_info)
559 {
560 set_64bit_val(wqe, 0, (uintptr_t)op_info->va);
561 set_64bit_val(wqe, 8,
562 FIELD_PREP(IRDMAQPSQ_PARENTMRSTAG, op_info->mw_stag) |
563 FIELD_PREP(IRDMAQPSQ_MWSTAG, op_info->mr_stag));
564 set_64bit_val(wqe, 16, op_info->bind_len);
565 }
566
567 /**
568 * irdma_copy_inline_data_gen_1 - Copy inline data to wqe
569 * @dest: pointer to wqe
570 * @src: pointer to inline data
571 * @len: length of inline data to copy
572 * @polarity: compatibility parameter
573 */
irdma_copy_inline_data_gen_1(u8 * dest,u8 * src,u32 len,u8 polarity)574 static void irdma_copy_inline_data_gen_1(u8 *dest, u8 *src, u32 len,
575 u8 polarity)
576 {
577 if (len <= 16) {
578 memcpy(dest, src, len);
579 } else {
580 memcpy(dest, src, 16);
581 src += 16;
582 dest = dest + 32;
583 memcpy(dest, src, len - 16);
584 }
585 }
586
587 /**
588 * irdma_inline_data_size_to_quanta_gen_1 - based on inline data, quanta
589 * @data_size: data size for inline
590 *
591 * Gets the quanta based on inline and immediate data.
592 */
irdma_inline_data_size_to_quanta_gen_1(u32 data_size)593 static inline u16 irdma_inline_data_size_to_quanta_gen_1(u32 data_size)
594 {
595 return data_size <= 16 ? IRDMA_QP_WQE_MIN_QUANTA : 2;
596 }
597
598 /**
599 * irdma_set_mw_bind_wqe - set mw bind in wqe
600 * @wqe: wqe for setting mw bind
601 * @op_info: info for setting wqe values
602 */
irdma_set_mw_bind_wqe(__le64 * wqe,struct irdma_bind_window * op_info)603 static void irdma_set_mw_bind_wqe(__le64 *wqe,
604 struct irdma_bind_window *op_info)
605 {
606 set_64bit_val(wqe, 0, (uintptr_t)op_info->va);
607 set_64bit_val(wqe, 8,
608 FIELD_PREP(IRDMAQPSQ_PARENTMRSTAG, op_info->mr_stag) |
609 FIELD_PREP(IRDMAQPSQ_MWSTAG, op_info->mw_stag));
610 set_64bit_val(wqe, 16, op_info->bind_len);
611 }
612
613 /**
614 * irdma_copy_inline_data - Copy inline data to wqe
615 * @dest: pointer to wqe
616 * @src: pointer to inline data
617 * @len: length of inline data to copy
618 * @polarity: polarity of wqe valid bit
619 */
irdma_copy_inline_data(u8 * dest,u8 * src,u32 len,u8 polarity)620 static void irdma_copy_inline_data(u8 *dest, u8 *src, u32 len, u8 polarity)
621 {
622 u8 inline_valid = polarity << IRDMA_INLINE_VALID_S;
623 u32 copy_size;
624
625 dest += 8;
626 if (len <= 8) {
627 memcpy(dest, src, len);
628 return;
629 }
630
631 *((u64 *)dest) = *((u64 *)src);
632 len -= 8;
633 src += 8;
634 dest += 24; /* point to additional 32 byte quanta */
635
636 while (len) {
637 copy_size = len < 31 ? len : 31;
638 memcpy(dest, src, copy_size);
639 *(dest + 31) = inline_valid;
640 len -= copy_size;
641 dest += 32;
642 src += copy_size;
643 }
644 }
645
646 /**
647 * irdma_inline_data_size_to_quanta - based on inline data, quanta
648 * @data_size: data size for inline
649 *
650 * Gets the quanta based on inline and immediate data.
651 */
irdma_inline_data_size_to_quanta(u32 data_size)652 static u16 irdma_inline_data_size_to_quanta(u32 data_size)
653 {
654 if (data_size <= 8)
655 return IRDMA_QP_WQE_MIN_QUANTA;
656 else if (data_size <= 39)
657 return 2;
658 else if (data_size <= 70)
659 return 3;
660 else if (data_size <= 101)
661 return 4;
662 else if (data_size <= 132)
663 return 5;
664 else if (data_size <= 163)
665 return 6;
666 else if (data_size <= 194)
667 return 7;
668 else
669 return 8;
670 }
671
672 /**
673 * irdma_uk_inline_rdma_write - inline rdma write operation
674 * @qp: hw qp ptr
675 * @info: post sq information
676 * @post_sq: flag to post sq
677 */
irdma_uk_inline_rdma_write(struct irdma_qp_uk * qp,struct irdma_post_sq_info * info,bool post_sq)678 int irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp,
679 struct irdma_post_sq_info *info, bool post_sq)
680 {
681 __le64 *wqe;
682 struct irdma_inline_rdma_write *op_info;
683 u64 hdr = 0;
684 u32 wqe_idx;
685 bool read_fence = false;
686 u16 quanta;
687
688 info->push_wqe = qp->push_db ? true : false;
689 op_info = &info->op.inline_rdma_write;
690
691 if (op_info->len > qp->max_inline_data)
692 return -EINVAL;
693
694 quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(op_info->len);
695 wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, op_info->len,
696 info);
697 if (!wqe)
698 return -ENOMEM;
699
700 irdma_clr_wqes(qp, wqe_idx);
701
702 read_fence |= info->read_fence;
703 set_64bit_val(wqe, 16,
704 FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.addr));
705
706 hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.lkey) |
707 FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
708 FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, op_info->len) |
709 FIELD_PREP(IRDMAQPSQ_REPORTRTT, info->report_rtt ? 1 : 0) |
710 FIELD_PREP(IRDMAQPSQ_INLINEDATAFLAG, 1) |
711 FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, info->imm_data_valid ? 1 : 0) |
712 FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe ? 1 : 0) |
713 FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
714 FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
715 FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
716 FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
717
718 if (info->imm_data_valid)
719 set_64bit_val(wqe, 0,
720 FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
721
722 qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->data, op_info->len,
723 qp->swqe_polarity);
724 dma_wmb(); /* make sure WQE is populated before valid bit is set */
725
726 set_64bit_val(wqe, 24, hdr);
727
728 if (info->push_wqe) {
729 irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
730 } else {
731 if (post_sq)
732 irdma_uk_qp_post_wr(qp);
733 }
734
735 return 0;
736 }
737
738 /**
739 * irdma_uk_inline_send - inline send operation
740 * @qp: hw qp ptr
741 * @info: post sq information
742 * @post_sq: flag to post sq
743 */
irdma_uk_inline_send(struct irdma_qp_uk * qp,struct irdma_post_sq_info * info,bool post_sq)744 int irdma_uk_inline_send(struct irdma_qp_uk *qp,
745 struct irdma_post_sq_info *info, bool post_sq)
746 {
747 __le64 *wqe;
748 struct irdma_post_inline_send *op_info;
749 u64 hdr;
750 u32 wqe_idx;
751 bool read_fence = false;
752 u16 quanta;
753
754 info->push_wqe = qp->push_db ? true : false;
755 op_info = &info->op.inline_send;
756
757 if (op_info->len > qp->max_inline_data)
758 return -EINVAL;
759
760 quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(op_info->len);
761 wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, op_info->len,
762 info);
763 if (!wqe)
764 return -ENOMEM;
765
766 irdma_clr_wqes(qp, wqe_idx);
767
768 set_64bit_val(wqe, 16,
769 FIELD_PREP(IRDMAQPSQ_DESTQKEY, op_info->qkey) |
770 FIELD_PREP(IRDMAQPSQ_DESTQPN, op_info->dest_qp));
771
772 read_fence |= info->read_fence;
773 hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, info->stag_to_inv) |
774 FIELD_PREP(IRDMAQPSQ_AHID, op_info->ah_id) |
775 FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
776 FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, op_info->len) |
777 FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG,
778 (info->imm_data_valid ? 1 : 0)) |
779 FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
780 FIELD_PREP(IRDMAQPSQ_INLINEDATAFLAG, 1) |
781 FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
782 FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
783 FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
784 FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
785 FIELD_PREP(IRDMAQPSQ_UDPHEADER, info->udp_hdr) |
786 FIELD_PREP(IRDMAQPSQ_L4LEN, info->l4len) |
787 FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
788
789 if (info->imm_data_valid)
790 set_64bit_val(wqe, 0,
791 FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
792 qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->data, op_info->len,
793 qp->swqe_polarity);
794
795 dma_wmb(); /* make sure WQE is populated before valid bit is set */
796
797 set_64bit_val(wqe, 24, hdr);
798
799 if (info->push_wqe) {
800 irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
801 } else {
802 if (post_sq)
803 irdma_uk_qp_post_wr(qp);
804 }
805
806 return 0;
807 }
808
809 /**
810 * irdma_uk_stag_local_invalidate - stag invalidate operation
811 * @qp: hw qp ptr
812 * @info: post sq information
813 * @post_sq: flag to post sq
814 */
irdma_uk_stag_local_invalidate(struct irdma_qp_uk * qp,struct irdma_post_sq_info * info,bool post_sq)815 int irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
816 struct irdma_post_sq_info *info,
817 bool post_sq)
818 {
819 __le64 *wqe;
820 struct irdma_inv_local_stag *op_info;
821 u64 hdr;
822 u32 wqe_idx;
823 bool local_fence = false;
824 struct ib_sge sge = {};
825
826 info->push_wqe = qp->push_db ? true : false;
827 op_info = &info->op.inv_local_stag;
828 local_fence = info->local_fence;
829
830 wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, IRDMA_QP_WQE_MIN_QUANTA,
831 0, info);
832 if (!wqe)
833 return -ENOMEM;
834
835 irdma_clr_wqes(qp, wqe_idx);
836
837 sge.lkey = op_info->target_stag;
838 qp->wqe_ops.iw_set_fragment(wqe, 0, &sge, 0);
839
840 set_64bit_val(wqe, 16, 0);
841
842 hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMA_OP_TYPE_INV_STAG) |
843 FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
844 FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
845 FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) |
846 FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
847 FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
848
849 dma_wmb(); /* make sure WQE is populated before valid bit is set */
850
851 set_64bit_val(wqe, 24, hdr);
852
853 if (info->push_wqe) {
854 irdma_qp_push_wqe(qp, wqe, IRDMA_QP_WQE_MIN_QUANTA, wqe_idx,
855 post_sq);
856 } else {
857 if (post_sq)
858 irdma_uk_qp_post_wr(qp);
859 }
860
861 return 0;
862 }
863
864 /**
865 * irdma_uk_post_receive - post receive wqe
866 * @qp: hw qp ptr
867 * @info: post rq information
868 */
irdma_uk_post_receive(struct irdma_qp_uk * qp,struct irdma_post_rq_info * info)869 int irdma_uk_post_receive(struct irdma_qp_uk *qp,
870 struct irdma_post_rq_info *info)
871 {
872 u32 wqe_idx, i, byte_off;
873 u32 addl_frag_cnt;
874 __le64 *wqe;
875 u64 hdr;
876
877 if (qp->max_rq_frag_cnt < info->num_sges)
878 return -EINVAL;
879
880 wqe = irdma_qp_get_next_recv_wqe(qp, &wqe_idx);
881 if (!wqe)
882 return -ENOMEM;
883
884 qp->rq_wrid_array[wqe_idx] = info->wr_id;
885 addl_frag_cnt = info->num_sges > 1 ? (info->num_sges - 1) : 0;
886 qp->wqe_ops.iw_set_fragment(wqe, 0, info->sg_list,
887 qp->rwqe_polarity);
888
889 for (i = 1, byte_off = 32; i < info->num_sges; i++) {
890 qp->wqe_ops.iw_set_fragment(wqe, byte_off, &info->sg_list[i],
891 qp->rwqe_polarity);
892 byte_off += 16;
893 }
894
895 /* if not an odd number set valid bit in next fragment */
896 if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(info->num_sges & 0x01) &&
897 info->num_sges) {
898 qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
899 qp->rwqe_polarity);
900 if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
901 ++addl_frag_cnt;
902 }
903
904 set_64bit_val(wqe, 16, 0);
905 hdr = FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
906 FIELD_PREP(IRDMAQPSQ_VALID, qp->rwqe_polarity);
907
908 dma_wmb(); /* make sure WQE is populated before valid bit is set */
909
910 set_64bit_val(wqe, 24, hdr);
911
912 return 0;
913 }
914
915 /**
916 * irdma_uk_cq_resize - reset the cq buffer info
917 * @cq: cq to resize
918 * @cq_base: new cq buffer addr
919 * @cq_size: number of cqes
920 */
irdma_uk_cq_resize(struct irdma_cq_uk * cq,void * cq_base,int cq_size)921 void irdma_uk_cq_resize(struct irdma_cq_uk *cq, void *cq_base, int cq_size)
922 {
923 cq->cq_base = cq_base;
924 cq->cq_size = cq_size;
925 IRDMA_RING_INIT(cq->cq_ring, cq->cq_size);
926 cq->polarity = 1;
927 }
928
929 /**
930 * irdma_uk_cq_set_resized_cnt - record the count of the resized buffers
931 * @cq: cq to resize
932 * @cq_cnt: the count of the resized cq buffers
933 */
irdma_uk_cq_set_resized_cnt(struct irdma_cq_uk * cq,u16 cq_cnt)934 void irdma_uk_cq_set_resized_cnt(struct irdma_cq_uk *cq, u16 cq_cnt)
935 {
936 u64 temp_val;
937 u16 sw_cq_sel;
938 u8 arm_next_se;
939 u8 arm_next;
940 u8 arm_seq_num;
941
942 get_64bit_val(cq->shadow_area, 32, &temp_val);
943
944 sw_cq_sel = (u16)FIELD_GET(IRDMA_CQ_DBSA_SW_CQ_SELECT, temp_val);
945 sw_cq_sel += cq_cnt;
946
947 arm_seq_num = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_SEQ_NUM, temp_val);
948 arm_next_se = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT_SE, temp_val);
949 arm_next = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT, temp_val);
950
951 temp_val = FIELD_PREP(IRDMA_CQ_DBSA_ARM_SEQ_NUM, arm_seq_num) |
952 FIELD_PREP(IRDMA_CQ_DBSA_SW_CQ_SELECT, sw_cq_sel) |
953 FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT_SE, arm_next_se) |
954 FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT, arm_next);
955
956 set_64bit_val(cq->shadow_area, 32, temp_val);
957 }
958
959 /**
960 * irdma_uk_cq_request_notification - cq notification request (door bell)
961 * @cq: hw cq
962 * @cq_notify: notification type
963 */
irdma_uk_cq_request_notification(struct irdma_cq_uk * cq,enum irdma_cmpl_notify cq_notify)964 void irdma_uk_cq_request_notification(struct irdma_cq_uk *cq,
965 enum irdma_cmpl_notify cq_notify)
966 {
967 u64 temp_val;
968 u16 sw_cq_sel;
969 u8 arm_next_se = 0;
970 u8 arm_next = 0;
971 u8 arm_seq_num;
972
973 get_64bit_val(cq->shadow_area, 32, &temp_val);
974 arm_seq_num = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_SEQ_NUM, temp_val);
975 arm_seq_num++;
976 sw_cq_sel = (u16)FIELD_GET(IRDMA_CQ_DBSA_SW_CQ_SELECT, temp_val);
977 arm_next_se = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT_SE, temp_val);
978 arm_next_se |= 1;
979 if (cq_notify == IRDMA_CQ_COMPL_EVENT)
980 arm_next = 1;
981 temp_val = FIELD_PREP(IRDMA_CQ_DBSA_ARM_SEQ_NUM, arm_seq_num) |
982 FIELD_PREP(IRDMA_CQ_DBSA_SW_CQ_SELECT, sw_cq_sel) |
983 FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT_SE, arm_next_se) |
984 FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT, arm_next);
985
986 set_64bit_val(cq->shadow_area, 32, temp_val);
987
988 dma_wmb(); /* make sure WQE is populated before valid bit is set */
989
990 writel(cq->cq_id, cq->cqe_alloc_db);
991 }
992
993 /**
994 * irdma_uk_cq_poll_cmpl - get cq completion info
995 * @cq: hw cq
996 * @info: cq poll information returned
997 */
irdma_uk_cq_poll_cmpl(struct irdma_cq_uk * cq,struct irdma_cq_poll_info * info)998 int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
999 struct irdma_cq_poll_info *info)
1000 {
1001 u64 comp_ctx, qword0, qword2, qword3;
1002 __le64 *cqe;
1003 struct irdma_qp_uk *qp;
1004 struct irdma_ring *pring = NULL;
1005 u32 wqe_idx, q_type;
1006 int ret_code;
1007 bool move_cq_head = true;
1008 u8 polarity;
1009 u8 op_type;
1010 bool ext_valid;
1011 __le64 *ext_cqe;
1012
1013 if (cq->avoid_mem_cflct)
1014 cqe = IRDMA_GET_CURRENT_EXTENDED_CQ_ELEM(cq);
1015 else
1016 cqe = IRDMA_GET_CURRENT_CQ_ELEM(cq);
1017
1018 get_64bit_val(cqe, 24, &qword3);
1019 polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
1020 if (polarity != cq->polarity)
1021 return -ENOENT;
1022
1023 /* Ensure CQE contents are read after valid bit is checked */
1024 dma_rmb();
1025
1026 ext_valid = (bool)FIELD_GET(IRDMA_CQ_EXTCQE, qword3);
1027 if (ext_valid) {
1028 u64 qword6, qword7;
1029 u32 peek_head;
1030
1031 if (cq->avoid_mem_cflct) {
1032 ext_cqe = (__le64 *)((u8 *)cqe + 32);
1033 get_64bit_val(ext_cqe, 24, &qword7);
1034 polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword7);
1035 } else {
1036 peek_head = (cq->cq_ring.head + 1) % cq->cq_ring.size;
1037 ext_cqe = cq->cq_base[peek_head].buf;
1038 get_64bit_val(ext_cqe, 24, &qword7);
1039 polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword7);
1040 if (!peek_head)
1041 polarity ^= 1;
1042 }
1043 if (polarity != cq->polarity)
1044 return -ENOENT;
1045
1046 /* Ensure ext CQE contents are read after ext valid bit is checked */
1047 dma_rmb();
1048
1049 info->imm_valid = (bool)FIELD_GET(IRDMA_CQ_IMMVALID, qword7);
1050 if (info->imm_valid) {
1051 u64 qword4;
1052
1053 get_64bit_val(ext_cqe, 0, &qword4);
1054 info->imm_data = (u32)FIELD_GET(IRDMA_CQ_IMMDATALOW32, qword4);
1055 }
1056 info->ud_smac_valid = (bool)FIELD_GET(IRDMA_CQ_UDSMACVALID, qword7);
1057 info->ud_vlan_valid = (bool)FIELD_GET(IRDMA_CQ_UDVLANVALID, qword7);
1058 if (info->ud_smac_valid || info->ud_vlan_valid) {
1059 get_64bit_val(ext_cqe, 16, &qword6);
1060 if (info->ud_vlan_valid)
1061 info->ud_vlan = (u16)FIELD_GET(IRDMA_CQ_UDVLAN, qword6);
1062 if (info->ud_smac_valid) {
1063 info->ud_smac[5] = qword6 & 0xFF;
1064 info->ud_smac[4] = (qword6 >> 8) & 0xFF;
1065 info->ud_smac[3] = (qword6 >> 16) & 0xFF;
1066 info->ud_smac[2] = (qword6 >> 24) & 0xFF;
1067 info->ud_smac[1] = (qword6 >> 32) & 0xFF;
1068 info->ud_smac[0] = (qword6 >> 40) & 0xFF;
1069 }
1070 }
1071 } else {
1072 info->imm_valid = false;
1073 info->ud_smac_valid = false;
1074 info->ud_vlan_valid = false;
1075 }
1076
1077 q_type = (u8)FIELD_GET(IRDMA_CQ_SQ, qword3);
1078 info->error = (bool)FIELD_GET(IRDMA_CQ_ERROR, qword3);
1079 info->push_dropped = (bool)FIELD_GET(IRDMACQ_PSHDROP, qword3);
1080 info->ipv4 = (bool)FIELD_GET(IRDMACQ_IPV4, qword3);
1081 if (info->error) {
1082 info->major_err = FIELD_GET(IRDMA_CQ_MAJERR, qword3);
1083 info->minor_err = FIELD_GET(IRDMA_CQ_MINERR, qword3);
1084 if (info->major_err == IRDMA_FLUSH_MAJOR_ERR) {
1085 info->comp_status = IRDMA_COMPL_STATUS_FLUSHED;
1086 /* Set the min error to standard flush error code for remaining cqes */
1087 if (info->minor_err != FLUSH_GENERAL_ERR) {
1088 qword3 &= ~IRDMA_CQ_MINERR;
1089 qword3 |= FIELD_PREP(IRDMA_CQ_MINERR, FLUSH_GENERAL_ERR);
1090 set_64bit_val(cqe, 24, qword3);
1091 }
1092 } else {
1093 info->comp_status = IRDMA_COMPL_STATUS_UNKNOWN;
1094 }
1095 } else {
1096 info->comp_status = IRDMA_COMPL_STATUS_SUCCESS;
1097 }
1098
1099 get_64bit_val(cqe, 0, &qword0);
1100 get_64bit_val(cqe, 16, &qword2);
1101
1102 info->tcp_seq_num_rtt = (u32)FIELD_GET(IRDMACQ_TCPSEQNUMRTT, qword0);
1103 info->qp_id = (u32)FIELD_GET(IRDMACQ_QPID, qword2);
1104 info->ud_src_qpn = (u32)FIELD_GET(IRDMACQ_UDSRCQPN, qword2);
1105
1106 get_64bit_val(cqe, 8, &comp_ctx);
1107
1108 info->solicited_event = (bool)FIELD_GET(IRDMACQ_SOEVENT, qword3);
1109 qp = (struct irdma_qp_uk *)(unsigned long)comp_ctx;
1110 if (!qp || qp->destroy_pending) {
1111 ret_code = -EFAULT;
1112 goto exit;
1113 }
1114 wqe_idx = (u32)FIELD_GET(IRDMA_CQ_WQEIDX, qword3);
1115 info->qp_handle = (irdma_qp_handle)(unsigned long)qp;
1116
1117 if (q_type == IRDMA_CQE_QTYPE_RQ) {
1118 u32 array_idx;
1119
1120 array_idx = wqe_idx / qp->rq_wqe_size_multiplier;
1121
1122 if (info->comp_status == IRDMA_COMPL_STATUS_FLUSHED ||
1123 info->comp_status == IRDMA_COMPL_STATUS_UNKNOWN) {
1124 if (!IRDMA_RING_MORE_WORK(qp->rq_ring)) {
1125 ret_code = -ENOENT;
1126 goto exit;
1127 }
1128
1129 info->wr_id = qp->rq_wrid_array[qp->rq_ring.tail];
1130 array_idx = qp->rq_ring.tail;
1131 } else {
1132 info->wr_id = qp->rq_wrid_array[array_idx];
1133 }
1134
1135 info->bytes_xfered = (u32)FIELD_GET(IRDMACQ_PAYLDLEN, qword0);
1136
1137 if (info->imm_valid)
1138 info->op_type = IRDMA_OP_TYPE_REC_IMM;
1139 else
1140 info->op_type = IRDMA_OP_TYPE_REC;
1141 if (qword3 & IRDMACQ_STAG) {
1142 info->stag_invalid_set = true;
1143 info->inv_stag = (u32)FIELD_GET(IRDMACQ_INVSTAG, qword2);
1144 } else {
1145 info->stag_invalid_set = false;
1146 }
1147 IRDMA_RING_SET_TAIL(qp->rq_ring, array_idx + 1);
1148 if (info->comp_status == IRDMA_COMPL_STATUS_FLUSHED) {
1149 qp->rq_flush_seen = true;
1150 if (!IRDMA_RING_MORE_WORK(qp->rq_ring))
1151 qp->rq_flush_complete = true;
1152 else
1153 move_cq_head = false;
1154 }
1155 pring = &qp->rq_ring;
1156 } else { /* q_type is IRDMA_CQE_QTYPE_SQ */
1157 if (qp->first_sq_wq) {
1158 if (wqe_idx + 1 >= qp->conn_wqes)
1159 qp->first_sq_wq = false;
1160
1161 if (wqe_idx < qp->conn_wqes && qp->sq_ring.head == qp->sq_ring.tail) {
1162 IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
1163 IRDMA_RING_MOVE_TAIL(cq->cq_ring);
1164 set_64bit_val(cq->shadow_area, 0,
1165 IRDMA_RING_CURRENT_HEAD(cq->cq_ring));
1166 memset(info, 0,
1167 sizeof(struct irdma_cq_poll_info));
1168 return irdma_uk_cq_poll_cmpl(cq, info);
1169 }
1170 }
1171 /*cease posting push mode on push drop*/
1172 if (info->push_dropped) {
1173 qp->push_mode = false;
1174 qp->push_dropped = true;
1175 }
1176 if (info->comp_status != IRDMA_COMPL_STATUS_FLUSHED) {
1177 info->wr_id = qp->sq_wrtrk_array[wqe_idx].wrid;
1178 if (!info->comp_status)
1179 info->bytes_xfered = qp->sq_wrtrk_array[wqe_idx].wr_len;
1180 info->op_type = (u8)FIELD_GET(IRDMACQ_OP, qword3);
1181 IRDMA_RING_SET_TAIL(qp->sq_ring,
1182 wqe_idx + qp->sq_wrtrk_array[wqe_idx].quanta);
1183 } else {
1184 if (!IRDMA_RING_MORE_WORK(qp->sq_ring)) {
1185 ret_code = -ENOENT;
1186 goto exit;
1187 }
1188
1189 do {
1190 __le64 *sw_wqe;
1191 u64 wqe_qword;
1192 u32 tail;
1193
1194 tail = qp->sq_ring.tail;
1195 sw_wqe = qp->sq_base[tail].elem;
1196 get_64bit_val(sw_wqe, 24,
1197 &wqe_qword);
1198 op_type = (u8)FIELD_GET(IRDMAQPSQ_OPCODE, wqe_qword);
1199 info->op_type = op_type;
1200 IRDMA_RING_SET_TAIL(qp->sq_ring,
1201 tail + qp->sq_wrtrk_array[tail].quanta);
1202 if (op_type != IRDMAQP_OP_NOP) {
1203 info->wr_id = qp->sq_wrtrk_array[tail].wrid;
1204 info->bytes_xfered = qp->sq_wrtrk_array[tail].wr_len;
1205 break;
1206 }
1207 } while (1);
1208 if (op_type == IRDMA_OP_TYPE_BIND_MW && info->minor_err == FLUSH_PROT_ERR)
1209 info->minor_err = FLUSH_MW_BIND_ERR;
1210 qp->sq_flush_seen = true;
1211 if (!IRDMA_RING_MORE_WORK(qp->sq_ring))
1212 qp->sq_flush_complete = true;
1213 }
1214 pring = &qp->sq_ring;
1215 }
1216
1217 ret_code = 0;
1218
1219 exit:
1220 if (!ret_code && info->comp_status == IRDMA_COMPL_STATUS_FLUSHED)
1221 if (pring && IRDMA_RING_MORE_WORK(*pring))
1222 move_cq_head = false;
1223
1224 if (move_cq_head) {
1225 IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
1226 if (!IRDMA_RING_CURRENT_HEAD(cq->cq_ring))
1227 cq->polarity ^= 1;
1228
1229 if (ext_valid && !cq->avoid_mem_cflct) {
1230 IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
1231 if (!IRDMA_RING_CURRENT_HEAD(cq->cq_ring))
1232 cq->polarity ^= 1;
1233 }
1234
1235 IRDMA_RING_MOVE_TAIL(cq->cq_ring);
1236 if (!cq->avoid_mem_cflct && ext_valid)
1237 IRDMA_RING_MOVE_TAIL(cq->cq_ring);
1238 set_64bit_val(cq->shadow_area, 0,
1239 IRDMA_RING_CURRENT_HEAD(cq->cq_ring));
1240 } else {
1241 qword3 &= ~IRDMA_CQ_WQEIDX;
1242 qword3 |= FIELD_PREP(IRDMA_CQ_WQEIDX, pring->tail);
1243 set_64bit_val(cqe, 24, qword3);
1244 }
1245
1246 return ret_code;
1247 }
1248
1249 /**
1250 * irdma_qp_round_up - return round up qp wq depth
1251 * @wqdepth: wq depth in quanta to round up
1252 */
irdma_qp_round_up(u32 wqdepth)1253 static int irdma_qp_round_up(u32 wqdepth)
1254 {
1255 int scount = 1;
1256
1257 for (wqdepth--; scount <= 16; scount *= 2)
1258 wqdepth |= wqdepth >> scount;
1259
1260 return ++wqdepth;
1261 }
1262
1263 /**
1264 * irdma_get_wqe_shift - get shift count for maximum wqe size
1265 * @uk_attrs: qp HW attributes
1266 * @sge: Maximum Scatter Gather Elements wqe
1267 * @inline_data: Maximum inline data size
1268 * @shift: Returns the shift needed based on sge
1269 *
1270 * Shift can be used to left shift the wqe size based on number of SGEs and inlind data size.
1271 * For 1 SGE or inline data <= 8, shift = 0 (wqe size of 32
1272 * bytes). For 2 or 3 SGEs or inline data <= 39, shift = 1 (wqe
1273 * size of 64 bytes).
1274 * For 4-7 SGE's and inline <= 101 Shift of 2 otherwise (wqe
1275 * size of 256 bytes).
1276 */
irdma_get_wqe_shift(struct irdma_uk_attrs * uk_attrs,u32 sge,u32 inline_data,u8 * shift)1277 void irdma_get_wqe_shift(struct irdma_uk_attrs *uk_attrs, u32 sge,
1278 u32 inline_data, u8 *shift)
1279 {
1280 *shift = 0;
1281 if (uk_attrs->hw_rev >= IRDMA_GEN_2) {
1282 if (sge > 1 || inline_data > 8) {
1283 if (sge < 4 && inline_data <= 39)
1284 *shift = 1;
1285 else if (sge < 8 && inline_data <= 101)
1286 *shift = 2;
1287 else
1288 *shift = 3;
1289 }
1290 } else if (sge > 1 || inline_data > 16) {
1291 *shift = (sge < 4 && inline_data <= 48) ? 1 : 2;
1292 }
1293 }
1294
1295 /*
1296 * irdma_get_sqdepth - get SQ depth (quanta)
1297 * @uk_attrs: qp HW attributes
1298 * @sq_size: SQ size
1299 * @shift: shift which determines size of WQE
1300 * @sqdepth: depth of SQ
1301 *
1302 */
irdma_get_sqdepth(struct irdma_uk_attrs * uk_attrs,u32 sq_size,u8 shift,u32 * sqdepth)1303 int irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift,
1304 u32 *sqdepth)
1305 {
1306 *sqdepth = irdma_qp_round_up((sq_size << shift) + IRDMA_SQ_RSVD);
1307
1308 if (*sqdepth < (IRDMA_QP_SW_MIN_WQSIZE << shift))
1309 *sqdepth = IRDMA_QP_SW_MIN_WQSIZE << shift;
1310 else if (*sqdepth > uk_attrs->max_hw_wq_quanta)
1311 return -EINVAL;
1312
1313 return 0;
1314 }
1315
1316 /*
1317 * irdma_get_rqdepth - get RQ depth (quanta)
1318 * @uk_attrs: qp HW attributes
1319 * @rq_size: RQ size
1320 * @shift: shift which determines size of WQE
1321 * @rqdepth: depth of RQ
1322 */
irdma_get_rqdepth(struct irdma_uk_attrs * uk_attrs,u32 rq_size,u8 shift,u32 * rqdepth)1323 int irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size, u8 shift,
1324 u32 *rqdepth)
1325 {
1326 *rqdepth = irdma_qp_round_up((rq_size << shift) + IRDMA_RQ_RSVD);
1327
1328 if (*rqdepth < (IRDMA_QP_SW_MIN_WQSIZE << shift))
1329 *rqdepth = IRDMA_QP_SW_MIN_WQSIZE << shift;
1330 else if (*rqdepth > uk_attrs->max_hw_rq_quanta)
1331 return -EINVAL;
1332
1333 return 0;
1334 }
1335
1336 static const struct irdma_wqe_uk_ops iw_wqe_uk_ops = {
1337 .iw_copy_inline_data = irdma_copy_inline_data,
1338 .iw_inline_data_size_to_quanta = irdma_inline_data_size_to_quanta,
1339 .iw_set_fragment = irdma_set_fragment,
1340 .iw_set_mw_bind_wqe = irdma_set_mw_bind_wqe,
1341 };
1342
1343 static const struct irdma_wqe_uk_ops iw_wqe_uk_ops_gen_1 = {
1344 .iw_copy_inline_data = irdma_copy_inline_data_gen_1,
1345 .iw_inline_data_size_to_quanta = irdma_inline_data_size_to_quanta_gen_1,
1346 .iw_set_fragment = irdma_set_fragment_gen_1,
1347 .iw_set_mw_bind_wqe = irdma_set_mw_bind_wqe_gen_1,
1348 };
1349
1350 /**
1351 * irdma_setup_connection_wqes - setup WQEs necessary to complete
1352 * connection.
1353 * @qp: hw qp (user and kernel)
1354 * @info: qp initialization info
1355 */
irdma_setup_connection_wqes(struct irdma_qp_uk * qp,struct irdma_qp_uk_init_info * info)1356 static void irdma_setup_connection_wqes(struct irdma_qp_uk *qp,
1357 struct irdma_qp_uk_init_info *info)
1358 {
1359 u16 move_cnt = 1;
1360
1361 if (!info->legacy_mode &&
1362 (qp->uk_attrs->feature_flags & IRDMA_FEATURE_RTS_AE))
1363 move_cnt = 3;
1364
1365 qp->conn_wqes = move_cnt;
1366 IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->sq_ring, move_cnt);
1367 IRDMA_RING_MOVE_TAIL_BY_COUNT(qp->sq_ring, move_cnt);
1368 IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->initial_ring, move_cnt);
1369 }
1370
1371 /**
1372 * irdma_uk_qp_init - initialize shared qp
1373 * @qp: hw qp (user and kernel)
1374 * @info: qp initialization info
1375 *
1376 * initializes the vars used in both user and kernel mode.
1377 * size of the wqe depends on numbers of max. fragements
1378 * allowed. Then size of wqe * the number of wqes should be the
1379 * amount of memory allocated for sq and rq.
1380 */
irdma_uk_qp_init(struct irdma_qp_uk * qp,struct irdma_qp_uk_init_info * info)1381 int irdma_uk_qp_init(struct irdma_qp_uk *qp, struct irdma_qp_uk_init_info *info)
1382 {
1383 int ret_code = 0;
1384 u32 sq_ring_size;
1385 u8 sqshift, rqshift;
1386
1387 qp->uk_attrs = info->uk_attrs;
1388 if (info->max_sq_frag_cnt > qp->uk_attrs->max_hw_wq_frags ||
1389 info->max_rq_frag_cnt > qp->uk_attrs->max_hw_wq_frags)
1390 return -EINVAL;
1391
1392 irdma_get_wqe_shift(qp->uk_attrs, info->max_rq_frag_cnt, 0, &rqshift);
1393 if (qp->uk_attrs->hw_rev == IRDMA_GEN_1) {
1394 irdma_get_wqe_shift(qp->uk_attrs, info->max_sq_frag_cnt,
1395 info->max_inline_data, &sqshift);
1396 if (info->abi_ver > 4)
1397 rqshift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;
1398 } else {
1399 irdma_get_wqe_shift(qp->uk_attrs, info->max_sq_frag_cnt + 1,
1400 info->max_inline_data, &sqshift);
1401 }
1402 qp->qp_caps = info->qp_caps;
1403 qp->sq_base = info->sq;
1404 qp->rq_base = info->rq;
1405 qp->qp_type = info->type ? info->type : IRDMA_QP_TYPE_IWARP;
1406 qp->shadow_area = info->shadow_area;
1407 qp->sq_wrtrk_array = info->sq_wrtrk_array;
1408
1409 qp->rq_wrid_array = info->rq_wrid_array;
1410 qp->wqe_alloc_db = info->wqe_alloc_db;
1411 qp->qp_id = info->qp_id;
1412 qp->sq_size = info->sq_size;
1413 qp->push_mode = false;
1414 qp->max_sq_frag_cnt = info->max_sq_frag_cnt;
1415 sq_ring_size = qp->sq_size << sqshift;
1416 IRDMA_RING_INIT(qp->sq_ring, sq_ring_size);
1417 IRDMA_RING_INIT(qp->initial_ring, sq_ring_size);
1418 if (info->first_sq_wq) {
1419 irdma_setup_connection_wqes(qp, info);
1420 qp->swqe_polarity = 1;
1421 qp->first_sq_wq = true;
1422 } else {
1423 qp->swqe_polarity = 0;
1424 }
1425 qp->swqe_polarity_deferred = 1;
1426 qp->rwqe_polarity = 0;
1427 qp->rq_size = info->rq_size;
1428 qp->max_rq_frag_cnt = info->max_rq_frag_cnt;
1429 qp->max_inline_data = info->max_inline_data;
1430 qp->rq_wqe_size = rqshift;
1431 IRDMA_RING_INIT(qp->rq_ring, qp->rq_size);
1432 qp->rq_wqe_size_multiplier = 1 << rqshift;
1433 if (qp->uk_attrs->hw_rev == IRDMA_GEN_1)
1434 qp->wqe_ops = iw_wqe_uk_ops_gen_1;
1435 else
1436 qp->wqe_ops = iw_wqe_uk_ops;
1437 return ret_code;
1438 }
1439
1440 /**
1441 * irdma_uk_cq_init - initialize shared cq (user and kernel)
1442 * @cq: hw cq
1443 * @info: hw cq initialization info
1444 */
irdma_uk_cq_init(struct irdma_cq_uk * cq,struct irdma_cq_uk_init_info * info)1445 void irdma_uk_cq_init(struct irdma_cq_uk *cq,
1446 struct irdma_cq_uk_init_info *info)
1447 {
1448 cq->cq_base = info->cq_base;
1449 cq->cq_id = info->cq_id;
1450 cq->cq_size = info->cq_size;
1451 cq->cqe_alloc_db = info->cqe_alloc_db;
1452 cq->cq_ack_db = info->cq_ack_db;
1453 cq->shadow_area = info->shadow_area;
1454 cq->avoid_mem_cflct = info->avoid_mem_cflct;
1455 IRDMA_RING_INIT(cq->cq_ring, cq->cq_size);
1456 cq->polarity = 1;
1457 }
1458
1459 /**
1460 * irdma_uk_clean_cq - clean cq entries
1461 * @q: completion context
1462 * @cq: cq to clean
1463 */
irdma_uk_clean_cq(void * q,struct irdma_cq_uk * cq)1464 void irdma_uk_clean_cq(void *q, struct irdma_cq_uk *cq)
1465 {
1466 __le64 *cqe;
1467 u64 qword3, comp_ctx;
1468 u32 cq_head;
1469 u8 polarity, temp;
1470
1471 cq_head = cq->cq_ring.head;
1472 temp = cq->polarity;
1473 do {
1474 if (cq->avoid_mem_cflct)
1475 cqe = ((struct irdma_extended_cqe *)(cq->cq_base))[cq_head].buf;
1476 else
1477 cqe = cq->cq_base[cq_head].buf;
1478 get_64bit_val(cqe, 24, &qword3);
1479 polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
1480
1481 if (polarity != temp)
1482 break;
1483
1484 get_64bit_val(cqe, 8, &comp_ctx);
1485 if ((void *)(unsigned long)comp_ctx == q)
1486 set_64bit_val(cqe, 8, 0);
1487
1488 cq_head = (cq_head + 1) % cq->cq_ring.size;
1489 if (!cq_head)
1490 temp ^= 1;
1491 } while (true);
1492 }
1493
1494 /**
1495 * irdma_nop - post a nop
1496 * @qp: hw qp ptr
1497 * @wr_id: work request id
1498 * @signaled: signaled for completion
1499 * @post_sq: ring doorbell
1500 */
irdma_nop(struct irdma_qp_uk * qp,u64 wr_id,bool signaled,bool post_sq)1501 int irdma_nop(struct irdma_qp_uk *qp, u64 wr_id, bool signaled, bool post_sq)
1502 {
1503 __le64 *wqe;
1504 u64 hdr;
1505 u32 wqe_idx;
1506 struct irdma_post_sq_info info = {};
1507
1508 info.push_wqe = false;
1509 info.wr_id = wr_id;
1510 wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, IRDMA_QP_WQE_MIN_QUANTA,
1511 0, &info);
1512 if (!wqe)
1513 return -ENOMEM;
1514
1515 irdma_clr_wqes(qp, wqe_idx);
1516
1517 set_64bit_val(wqe, 0, 0);
1518 set_64bit_val(wqe, 8, 0);
1519 set_64bit_val(wqe, 16, 0);
1520
1521 hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_NOP) |
1522 FIELD_PREP(IRDMAQPSQ_SIGCOMPL, signaled) |
1523 FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
1524
1525 dma_wmb(); /* make sure WQE is populated before valid bit is set */
1526
1527 set_64bit_val(wqe, 24, hdr);
1528 if (post_sq)
1529 irdma_uk_qp_post_wr(qp);
1530
1531 return 0;
1532 }
1533
1534 /**
1535 * irdma_fragcnt_to_quanta_sq - calculate quanta based on fragment count for SQ
1536 * @frag_cnt: number of fragments
1537 * @quanta: quanta for frag_cnt
1538 */
irdma_fragcnt_to_quanta_sq(u32 frag_cnt,u16 * quanta)1539 int irdma_fragcnt_to_quanta_sq(u32 frag_cnt, u16 *quanta)
1540 {
1541 switch (frag_cnt) {
1542 case 0:
1543 case 1:
1544 *quanta = IRDMA_QP_WQE_MIN_QUANTA;
1545 break;
1546 case 2:
1547 case 3:
1548 *quanta = 2;
1549 break;
1550 case 4:
1551 case 5:
1552 *quanta = 3;
1553 break;
1554 case 6:
1555 case 7:
1556 *quanta = 4;
1557 break;
1558 case 8:
1559 case 9:
1560 *quanta = 5;
1561 break;
1562 case 10:
1563 case 11:
1564 *quanta = 6;
1565 break;
1566 case 12:
1567 case 13:
1568 *quanta = 7;
1569 break;
1570 case 14:
1571 case 15: /* when immediate data is present */
1572 *quanta = 8;
1573 break;
1574 default:
1575 return -EINVAL;
1576 }
1577
1578 return 0;
1579 }
1580
1581 /**
1582 * irdma_fragcnt_to_wqesize_rq - calculate wqe size based on fragment count for RQ
1583 * @frag_cnt: number of fragments
1584 * @wqe_size: size in bytes given frag_cnt
1585 */
irdma_fragcnt_to_wqesize_rq(u32 frag_cnt,u16 * wqe_size)1586 int irdma_fragcnt_to_wqesize_rq(u32 frag_cnt, u16 *wqe_size)
1587 {
1588 switch (frag_cnt) {
1589 case 0:
1590 case 1:
1591 *wqe_size = 32;
1592 break;
1593 case 2:
1594 case 3:
1595 *wqe_size = 64;
1596 break;
1597 case 4:
1598 case 5:
1599 case 6:
1600 case 7:
1601 *wqe_size = 128;
1602 break;
1603 case 8:
1604 case 9:
1605 case 10:
1606 case 11:
1607 case 12:
1608 case 13:
1609 case 14:
1610 *wqe_size = 256;
1611 break;
1612 default:
1613 return -EINVAL;
1614 }
1615
1616 return 0;
1617 }
1618