1 // SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
2 /* Copyright (c) 2015 - 2021 Intel Corporation */
3 #include "osdep.h"
4 #include "defs.h"
5 #include "user.h"
6 #include "irdma.h"
7 
8 /**
9  * irdma_set_fragment - set fragment in wqe
10  * @wqe: wqe for setting fragment
11  * @offset: offset value
12  * @sge: sge length and stag
13  * @valid: The wqe valid
14  */
irdma_set_fragment(__le64 * wqe,u32 offset,struct ib_sge * sge,u8 valid)15 static void irdma_set_fragment(__le64 *wqe, u32 offset, struct ib_sge *sge,
16 			       u8 valid)
17 {
18 	if (sge) {
19 		set_64bit_val(wqe, offset,
20 			      FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->addr));
21 		set_64bit_val(wqe, offset + 8,
22 			      FIELD_PREP(IRDMAQPSQ_VALID, valid) |
23 			      FIELD_PREP(IRDMAQPSQ_FRAG_LEN, sge->length) |
24 			      FIELD_PREP(IRDMAQPSQ_FRAG_STAG, sge->lkey));
25 	} else {
26 		set_64bit_val(wqe, offset, 0);
27 		set_64bit_val(wqe, offset + 8,
28 			      FIELD_PREP(IRDMAQPSQ_VALID, valid));
29 	}
30 }
31 
32 /**
33  * irdma_set_fragment_gen_1 - set fragment in wqe
34  * @wqe: wqe for setting fragment
35  * @offset: offset value
36  * @sge: sge length and stag
37  * @valid: wqe valid flag
38  */
irdma_set_fragment_gen_1(__le64 * wqe,u32 offset,struct ib_sge * sge,u8 valid)39 static void irdma_set_fragment_gen_1(__le64 *wqe, u32 offset,
40 				     struct ib_sge *sge, u8 valid)
41 {
42 	if (sge) {
43 		set_64bit_val(wqe, offset,
44 			      FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->addr));
45 		set_64bit_val(wqe, offset + 8,
46 			      FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, sge->length) |
47 			      FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_STAG, sge->lkey));
48 	} else {
49 		set_64bit_val(wqe, offset, 0);
50 		set_64bit_val(wqe, offset + 8, 0);
51 	}
52 }
53 
54 /**
55  * irdma_nop_1 - insert a NOP wqe
56  * @qp: hw qp ptr
57  */
irdma_nop_1(struct irdma_qp_uk * qp)58 static int irdma_nop_1(struct irdma_qp_uk *qp)
59 {
60 	u64 hdr;
61 	__le64 *wqe;
62 	u32 wqe_idx;
63 	bool signaled = false;
64 
65 	if (!qp->sq_ring.head)
66 		return -EINVAL;
67 
68 	wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
69 	wqe = qp->sq_base[wqe_idx].elem;
70 
71 	qp->sq_wrtrk_array[wqe_idx].quanta = IRDMA_QP_WQE_MIN_QUANTA;
72 
73 	set_64bit_val(wqe, 0, 0);
74 	set_64bit_val(wqe, 8, 0);
75 	set_64bit_val(wqe, 16, 0);
76 
77 	hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_NOP) |
78 	      FIELD_PREP(IRDMAQPSQ_SIGCOMPL, signaled) |
79 	      FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
80 
81 	/* make sure WQE is written before valid bit is set */
82 	dma_wmb();
83 
84 	set_64bit_val(wqe, 24, hdr);
85 
86 	return 0;
87 }
88 
89 /**
90  * irdma_clr_wqes - clear next 128 sq entries
91  * @qp: hw qp ptr
92  * @qp_wqe_idx: wqe_idx
93  */
irdma_clr_wqes(struct irdma_qp_uk * qp,u32 qp_wqe_idx)94 void irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx)
95 {
96 	__le64 *wqe;
97 	u32 wqe_idx;
98 
99 	if (!(qp_wqe_idx & 0x7F)) {
100 		wqe_idx = (qp_wqe_idx + 128) % qp->sq_ring.size;
101 		wqe = qp->sq_base[wqe_idx].elem;
102 		if (wqe_idx)
103 			memset(wqe, qp->swqe_polarity ? 0 : 0xFF, 0x1000);
104 		else
105 			memset(wqe, qp->swqe_polarity ? 0xFF : 0, 0x1000);
106 	}
107 }
108 
109 /**
110  * irdma_uk_qp_post_wr - ring doorbell
111  * @qp: hw qp ptr
112  */
irdma_uk_qp_post_wr(struct irdma_qp_uk * qp)113 void irdma_uk_qp_post_wr(struct irdma_qp_uk *qp)
114 {
115 	u64 temp;
116 	u32 hw_sq_tail;
117 	u32 sw_sq_head;
118 
119 	/* valid bit is written and loads completed before reading shadow */
120 	mb();
121 
122 	/* read the doorbell shadow area */
123 	get_64bit_val(qp->shadow_area, 0, &temp);
124 
125 	hw_sq_tail = (u32)FIELD_GET(IRDMA_QP_DBSA_HW_SQ_TAIL, temp);
126 	sw_sq_head = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
127 	if (sw_sq_head != qp->initial_ring.head) {
128 		if (qp->push_dropped) {
129 			writel(qp->qp_id, qp->wqe_alloc_db);
130 			qp->push_dropped = false;
131 		} else if (sw_sq_head != hw_sq_tail) {
132 			if (sw_sq_head > qp->initial_ring.head) {
133 				if (hw_sq_tail >= qp->initial_ring.head &&
134 				    hw_sq_tail < sw_sq_head)
135 					writel(qp->qp_id, qp->wqe_alloc_db);
136 			} else {
137 				if (hw_sq_tail >= qp->initial_ring.head ||
138 				    hw_sq_tail < sw_sq_head)
139 					writel(qp->qp_id, qp->wqe_alloc_db);
140 			}
141 		}
142 	}
143 
144 	qp->initial_ring.head = qp->sq_ring.head;
145 }
146 
147 /**
148  * irdma_qp_ring_push_db -  ring qp doorbell
149  * @qp: hw qp ptr
150  * @wqe_idx: wqe index
151  */
irdma_qp_ring_push_db(struct irdma_qp_uk * qp,u32 wqe_idx)152 static void irdma_qp_ring_push_db(struct irdma_qp_uk *qp, u32 wqe_idx)
153 {
154 	set_32bit_val(qp->push_db, 0,
155 		      FIELD_PREP(IRDMA_WQEALLOC_WQE_DESC_INDEX, wqe_idx >> 3) | qp->qp_id);
156 	qp->initial_ring.head = qp->sq_ring.head;
157 	qp->push_mode = true;
158 	qp->push_dropped = false;
159 }
160 
irdma_qp_push_wqe(struct irdma_qp_uk * qp,__le64 * wqe,u16 quanta,u32 wqe_idx,bool post_sq)161 void irdma_qp_push_wqe(struct irdma_qp_uk *qp, __le64 *wqe, u16 quanta,
162 		       u32 wqe_idx, bool post_sq)
163 {
164 	__le64 *push;
165 
166 	if (IRDMA_RING_CURRENT_HEAD(qp->initial_ring) !=
167 		    IRDMA_RING_CURRENT_TAIL(qp->sq_ring) &&
168 	    !qp->push_mode) {
169 		if (post_sq)
170 			irdma_uk_qp_post_wr(qp);
171 	} else {
172 		push = (__le64 *)((uintptr_t)qp->push_wqe +
173 				  (wqe_idx & 0x7) * 0x20);
174 		memcpy(push, wqe, quanta * IRDMA_QP_WQE_MIN_SIZE);
175 		irdma_qp_ring_push_db(qp, wqe_idx);
176 	}
177 }
178 
179 /**
180  * irdma_qp_get_next_send_wqe - pad with NOP if needed, return where next WR should go
181  * @qp: hw qp ptr
182  * @wqe_idx: return wqe index
183  * @quanta: size of WR in quanta
184  * @total_size: size of WR in bytes
185  * @info: info on WR
186  */
irdma_qp_get_next_send_wqe(struct irdma_qp_uk * qp,u32 * wqe_idx,u16 quanta,u32 total_size,struct irdma_post_sq_info * info)187 __le64 *irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx,
188 				   u16 quanta, u32 total_size,
189 				   struct irdma_post_sq_info *info)
190 {
191 	__le64 *wqe;
192 	__le64 *wqe_0 = NULL;
193 	u32 nop_wqe_idx;
194 	u16 avail_quanta;
195 	u16 i;
196 
197 	avail_quanta = qp->uk_attrs->max_hw_sq_chunk -
198 		       (IRDMA_RING_CURRENT_HEAD(qp->sq_ring) %
199 		       qp->uk_attrs->max_hw_sq_chunk);
200 	if (quanta <= avail_quanta) {
201 		/* WR fits in current chunk */
202 		if (quanta > IRDMA_SQ_RING_FREE_QUANTA(qp->sq_ring))
203 			return NULL;
204 	} else {
205 		/* Need to pad with NOP */
206 		if (quanta + avail_quanta >
207 			IRDMA_SQ_RING_FREE_QUANTA(qp->sq_ring))
208 			return NULL;
209 
210 		nop_wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
211 		for (i = 0; i < avail_quanta; i++) {
212 			irdma_nop_1(qp);
213 			IRDMA_RING_MOVE_HEAD_NOCHECK(qp->sq_ring);
214 		}
215 		if (qp->push_db && info->push_wqe)
216 			irdma_qp_push_wqe(qp, qp->sq_base[nop_wqe_idx].elem,
217 					  avail_quanta, nop_wqe_idx, true);
218 	}
219 
220 	*wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
221 	if (!*wqe_idx)
222 		qp->swqe_polarity = !qp->swqe_polarity;
223 
224 	IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->sq_ring, quanta);
225 
226 	wqe = qp->sq_base[*wqe_idx].elem;
227 	if (qp->uk_attrs->hw_rev == IRDMA_GEN_1 && quanta == 1 &&
228 	    (IRDMA_RING_CURRENT_HEAD(qp->sq_ring) & 1)) {
229 		wqe_0 = qp->sq_base[IRDMA_RING_CURRENT_HEAD(qp->sq_ring)].elem;
230 		wqe_0[3] = cpu_to_le64(FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity ? 0 : 1));
231 	}
232 	qp->sq_wrtrk_array[*wqe_idx].wrid = info->wr_id;
233 	qp->sq_wrtrk_array[*wqe_idx].wr_len = total_size;
234 	qp->sq_wrtrk_array[*wqe_idx].quanta = quanta;
235 
236 	return wqe;
237 }
238 
239 /**
240  * irdma_qp_get_next_recv_wqe - get next qp's rcv wqe
241  * @qp: hw qp ptr
242  * @wqe_idx: return wqe index
243  */
irdma_qp_get_next_recv_wqe(struct irdma_qp_uk * qp,u32 * wqe_idx)244 __le64 *irdma_qp_get_next_recv_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx)
245 {
246 	__le64 *wqe;
247 	int ret_code;
248 
249 	if (IRDMA_RING_FULL_ERR(qp->rq_ring))
250 		return NULL;
251 
252 	IRDMA_ATOMIC_RING_MOVE_HEAD(qp->rq_ring, *wqe_idx, ret_code);
253 	if (ret_code)
254 		return NULL;
255 
256 	if (!*wqe_idx)
257 		qp->rwqe_polarity = !qp->rwqe_polarity;
258 	/* rq_wqe_size_multiplier is no of 32 byte quanta in one rq wqe */
259 	wqe = qp->rq_base[*wqe_idx * qp->rq_wqe_size_multiplier].elem;
260 
261 	return wqe;
262 }
263 
264 /**
265  * irdma_uk_rdma_write - rdma write operation
266  * @qp: hw qp ptr
267  * @info: post sq information
268  * @post_sq: flag to post sq
269  */
irdma_uk_rdma_write(struct irdma_qp_uk * qp,struct irdma_post_sq_info * info,bool post_sq)270 int irdma_uk_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
271 			bool post_sq)
272 {
273 	u64 hdr;
274 	__le64 *wqe;
275 	struct irdma_rdma_write *op_info;
276 	u32 i, wqe_idx;
277 	u32 total_size = 0, byte_off;
278 	int ret_code;
279 	u32 frag_cnt, addl_frag_cnt;
280 	bool read_fence = false;
281 	u16 quanta;
282 
283 	info->push_wqe = qp->push_db ? true : false;
284 
285 	op_info = &info->op.rdma_write;
286 	if (op_info->num_lo_sges > qp->max_sq_frag_cnt)
287 		return -EINVAL;
288 
289 	for (i = 0; i < op_info->num_lo_sges; i++)
290 		total_size += op_info->lo_sg_list[i].length;
291 
292 	read_fence |= info->read_fence;
293 
294 	if (info->imm_data_valid)
295 		frag_cnt = op_info->num_lo_sges + 1;
296 	else
297 		frag_cnt = op_info->num_lo_sges;
298 	addl_frag_cnt = frag_cnt > 1 ? (frag_cnt - 1) : 0;
299 	ret_code = irdma_fragcnt_to_quanta_sq(frag_cnt, &quanta);
300 	if (ret_code)
301 		return ret_code;
302 
303 	wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
304 					 info);
305 	if (!wqe)
306 		return -ENOMEM;
307 
308 	irdma_clr_wqes(qp, wqe_idx);
309 
310 	set_64bit_val(wqe, 16,
311 		      FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.addr));
312 
313 	if (info->imm_data_valid) {
314 		set_64bit_val(wqe, 0,
315 			      FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
316 		i = 0;
317 	} else {
318 		qp->wqe_ops.iw_set_fragment(wqe, 0,
319 					    op_info->lo_sg_list,
320 					    qp->swqe_polarity);
321 		i = 1;
322 	}
323 
324 	for (byte_off = 32; i < op_info->num_lo_sges; i++) {
325 		qp->wqe_ops.iw_set_fragment(wqe, byte_off,
326 					    &op_info->lo_sg_list[i],
327 					    qp->swqe_polarity);
328 		byte_off += 16;
329 	}
330 
331 	/* if not an odd number set valid bit in next fragment */
332 	if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(frag_cnt & 0x01) &&
333 	    frag_cnt) {
334 		qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
335 					    qp->swqe_polarity);
336 		if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
337 			++addl_frag_cnt;
338 	}
339 
340 	hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.lkey) |
341 	      FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
342 	      FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, info->imm_data_valid) |
343 	      FIELD_PREP(IRDMAQPSQ_REPORTRTT, info->report_rtt) |
344 	      FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
345 	      FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
346 	      FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
347 	      FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
348 	      FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
349 	      FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
350 
351 	dma_wmb(); /* make sure WQE is populated before valid bit is set */
352 
353 	set_64bit_val(wqe, 24, hdr);
354 	if (info->push_wqe) {
355 		irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
356 	} else {
357 		if (post_sq)
358 			irdma_uk_qp_post_wr(qp);
359 	}
360 
361 	return 0;
362 }
363 
364 /**
365  * irdma_uk_rdma_read - rdma read command
366  * @qp: hw qp ptr
367  * @info: post sq information
368  * @inv_stag: flag for inv_stag
369  * @post_sq: flag to post sq
370  */
irdma_uk_rdma_read(struct irdma_qp_uk * qp,struct irdma_post_sq_info * info,bool inv_stag,bool post_sq)371 int irdma_uk_rdma_read(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
372 		       bool inv_stag, bool post_sq)
373 {
374 	struct irdma_rdma_read *op_info;
375 	int ret_code;
376 	u32 i, byte_off, total_size = 0;
377 	bool local_fence = false;
378 	u32 addl_frag_cnt;
379 	__le64 *wqe;
380 	u32 wqe_idx;
381 	u16 quanta;
382 	u64 hdr;
383 
384 	info->push_wqe = qp->push_db ? true : false;
385 
386 	op_info = &info->op.rdma_read;
387 	if (qp->max_sq_frag_cnt < op_info->num_lo_sges)
388 		return -EINVAL;
389 
390 	for (i = 0; i < op_info->num_lo_sges; i++)
391 		total_size += op_info->lo_sg_list[i].length;
392 
393 	ret_code = irdma_fragcnt_to_quanta_sq(op_info->num_lo_sges, &quanta);
394 	if (ret_code)
395 		return ret_code;
396 
397 	wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
398 					 info);
399 	if (!wqe)
400 		return -ENOMEM;
401 
402 	irdma_clr_wqes(qp, wqe_idx);
403 
404 	addl_frag_cnt = op_info->num_lo_sges > 1 ?
405 			(op_info->num_lo_sges - 1) : 0;
406 	local_fence |= info->local_fence;
407 
408 	qp->wqe_ops.iw_set_fragment(wqe, 0, op_info->lo_sg_list,
409 				    qp->swqe_polarity);
410 	for (i = 1, byte_off = 32; i < op_info->num_lo_sges; ++i) {
411 		qp->wqe_ops.iw_set_fragment(wqe, byte_off,
412 					    &op_info->lo_sg_list[i],
413 					    qp->swqe_polarity);
414 		byte_off += 16;
415 	}
416 
417 	/* if not an odd number set valid bit in next fragment */
418 	if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 &&
419 	    !(op_info->num_lo_sges & 0x01) && op_info->num_lo_sges) {
420 		qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
421 					    qp->swqe_polarity);
422 		if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
423 			++addl_frag_cnt;
424 	}
425 	set_64bit_val(wqe, 16,
426 		      FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.addr));
427 	hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.lkey) |
428 	      FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
429 	      FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
430 	      FIELD_PREP(IRDMAQPSQ_OPCODE,
431 			 (inv_stag ? IRDMAQP_OP_RDMA_READ_LOC_INV : IRDMAQP_OP_RDMA_READ)) |
432 	      FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
433 	      FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
434 	      FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) |
435 	      FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
436 	      FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
437 
438 	dma_wmb(); /* make sure WQE is populated before valid bit is set */
439 
440 	set_64bit_val(wqe, 24, hdr);
441 	if (info->push_wqe) {
442 		irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
443 	} else {
444 		if (post_sq)
445 			irdma_uk_qp_post_wr(qp);
446 	}
447 
448 	return 0;
449 }
450 
451 /**
452  * irdma_uk_send - rdma send command
453  * @qp: hw qp ptr
454  * @info: post sq information
455  * @post_sq: flag to post sq
456  */
irdma_uk_send(struct irdma_qp_uk * qp,struct irdma_post_sq_info * info,bool post_sq)457 int irdma_uk_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
458 		  bool post_sq)
459 {
460 	__le64 *wqe;
461 	struct irdma_post_send *op_info;
462 	u64 hdr;
463 	u32 i, wqe_idx, total_size = 0, byte_off;
464 	int ret_code;
465 	u32 frag_cnt, addl_frag_cnt;
466 	bool read_fence = false;
467 	u16 quanta;
468 
469 	info->push_wqe = qp->push_db ? true : false;
470 
471 	op_info = &info->op.send;
472 	if (qp->max_sq_frag_cnt < op_info->num_sges)
473 		return -EINVAL;
474 
475 	for (i = 0; i < op_info->num_sges; i++)
476 		total_size += op_info->sg_list[i].length;
477 
478 	if (info->imm_data_valid)
479 		frag_cnt = op_info->num_sges + 1;
480 	else
481 		frag_cnt = op_info->num_sges;
482 	ret_code = irdma_fragcnt_to_quanta_sq(frag_cnt, &quanta);
483 	if (ret_code)
484 		return ret_code;
485 
486 	wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
487 					 info);
488 	if (!wqe)
489 		return -ENOMEM;
490 
491 	irdma_clr_wqes(qp, wqe_idx);
492 
493 	read_fence |= info->read_fence;
494 	addl_frag_cnt = frag_cnt > 1 ? (frag_cnt - 1) : 0;
495 	if (info->imm_data_valid) {
496 		set_64bit_val(wqe, 0,
497 			      FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
498 		i = 0;
499 	} else {
500 		qp->wqe_ops.iw_set_fragment(wqe, 0,
501 					    frag_cnt ? op_info->sg_list : NULL,
502 					    qp->swqe_polarity);
503 		i = 1;
504 	}
505 
506 	for (byte_off = 32; i < op_info->num_sges; i++) {
507 		qp->wqe_ops.iw_set_fragment(wqe, byte_off, &op_info->sg_list[i],
508 					    qp->swqe_polarity);
509 		byte_off += 16;
510 	}
511 
512 	/* if not an odd number set valid bit in next fragment */
513 	if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(frag_cnt & 0x01) &&
514 	    frag_cnt) {
515 		qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
516 					    qp->swqe_polarity);
517 		if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
518 			++addl_frag_cnt;
519 	}
520 
521 	set_64bit_val(wqe, 16,
522 		      FIELD_PREP(IRDMAQPSQ_DESTQKEY, op_info->qkey) |
523 		      FIELD_PREP(IRDMAQPSQ_DESTQPN, op_info->dest_qp));
524 	hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, info->stag_to_inv) |
525 	      FIELD_PREP(IRDMAQPSQ_AHID, op_info->ah_id) |
526 	      FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG,
527 			 (info->imm_data_valid ? 1 : 0)) |
528 	      FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
529 	      FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
530 	      FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
531 	      FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
532 	      FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
533 	      FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
534 	      FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
535 	      FIELD_PREP(IRDMAQPSQ_UDPHEADER, info->udp_hdr) |
536 	      FIELD_PREP(IRDMAQPSQ_L4LEN, info->l4len) |
537 	      FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
538 
539 	dma_wmb(); /* make sure WQE is populated before valid bit is set */
540 
541 	set_64bit_val(wqe, 24, hdr);
542 	if (info->push_wqe) {
543 		irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
544 	} else {
545 		if (post_sq)
546 			irdma_uk_qp_post_wr(qp);
547 	}
548 
549 	return 0;
550 }
551 
552 /**
553  * irdma_set_mw_bind_wqe_gen_1 - set mw bind wqe
554  * @wqe: wqe for setting fragment
555  * @op_info: info for setting bind wqe values
556  */
irdma_set_mw_bind_wqe_gen_1(__le64 * wqe,struct irdma_bind_window * op_info)557 static void irdma_set_mw_bind_wqe_gen_1(__le64 *wqe,
558 					struct irdma_bind_window *op_info)
559 {
560 	set_64bit_val(wqe, 0, (uintptr_t)op_info->va);
561 	set_64bit_val(wqe, 8,
562 		      FIELD_PREP(IRDMAQPSQ_PARENTMRSTAG, op_info->mw_stag) |
563 		      FIELD_PREP(IRDMAQPSQ_MWSTAG, op_info->mr_stag));
564 	set_64bit_val(wqe, 16, op_info->bind_len);
565 }
566 
567 /**
568  * irdma_copy_inline_data_gen_1 - Copy inline data to wqe
569  * @wqe: pointer to wqe
570  * @sge_list: table of pointers to inline data
571  * @num_sges: Total inline data length
572  * @polarity: compatibility parameter
573  */
irdma_copy_inline_data_gen_1(u8 * wqe,struct ib_sge * sge_list,u32 num_sges,u8 polarity)574 static void irdma_copy_inline_data_gen_1(u8 *wqe, struct ib_sge *sge_list,
575 					 u32 num_sges, u8 polarity)
576 {
577 	u32 quanta_bytes_remaining = 16;
578 	int i;
579 
580 	for (i = 0; i < num_sges; i++) {
581 		u8 *cur_sge = (u8 *)(uintptr_t)sge_list[i].addr;
582 		u32 sge_len = sge_list[i].length;
583 
584 		while (sge_len) {
585 			u32 bytes_copied;
586 
587 			bytes_copied = min(sge_len, quanta_bytes_remaining);
588 			memcpy(wqe, cur_sge, bytes_copied);
589 			wqe += bytes_copied;
590 			cur_sge += bytes_copied;
591 			quanta_bytes_remaining -= bytes_copied;
592 			sge_len -= bytes_copied;
593 
594 			if (!quanta_bytes_remaining) {
595 				/* Remaining inline bytes reside after hdr */
596 				wqe += 16;
597 				quanta_bytes_remaining = 32;
598 			}
599 		}
600 	}
601 }
602 
603 /**
604  * irdma_inline_data_size_to_quanta_gen_1 - based on inline data, quanta
605  * @data_size: data size for inline
606  *
607  * Gets the quanta based on inline and immediate data.
608  */
irdma_inline_data_size_to_quanta_gen_1(u32 data_size)609 static inline u16 irdma_inline_data_size_to_quanta_gen_1(u32 data_size)
610 {
611 	return data_size <= 16 ? IRDMA_QP_WQE_MIN_QUANTA : 2;
612 }
613 
614 /**
615  * irdma_set_mw_bind_wqe - set mw bind in wqe
616  * @wqe: wqe for setting mw bind
617  * @op_info: info for setting wqe values
618  */
irdma_set_mw_bind_wqe(__le64 * wqe,struct irdma_bind_window * op_info)619 static void irdma_set_mw_bind_wqe(__le64 *wqe,
620 				  struct irdma_bind_window *op_info)
621 {
622 	set_64bit_val(wqe, 0, (uintptr_t)op_info->va);
623 	set_64bit_val(wqe, 8,
624 		      FIELD_PREP(IRDMAQPSQ_PARENTMRSTAG, op_info->mr_stag) |
625 		      FIELD_PREP(IRDMAQPSQ_MWSTAG, op_info->mw_stag));
626 	set_64bit_val(wqe, 16, op_info->bind_len);
627 }
628 
629 /**
630  * irdma_copy_inline_data - Copy inline data to wqe
631  * @wqe: pointer to wqe
632  * @sge_list: table of pointers to inline data
633  * @num_sges: number of SGE's
634  * @polarity: polarity of wqe valid bit
635  */
irdma_copy_inline_data(u8 * wqe,struct ib_sge * sge_list,u32 num_sges,u8 polarity)636 static void irdma_copy_inline_data(u8 *wqe, struct ib_sge *sge_list,
637 				   u32 num_sges, u8 polarity)
638 {
639 	u8 inline_valid = polarity << IRDMA_INLINE_VALID_S;
640 	u32 quanta_bytes_remaining = 8;
641 	bool first_quanta = true;
642 	int i;
643 
644 	wqe += 8;
645 
646 	for (i = 0; i < num_sges; i++) {
647 		u8 *cur_sge = (u8 *)(uintptr_t)sge_list[i].addr;
648 		u32 sge_len = sge_list[i].length;
649 
650 		while (sge_len) {
651 			u32 bytes_copied;
652 
653 			bytes_copied = min(sge_len, quanta_bytes_remaining);
654 			memcpy(wqe, cur_sge, bytes_copied);
655 			wqe += bytes_copied;
656 			cur_sge += bytes_copied;
657 			quanta_bytes_remaining -= bytes_copied;
658 			sge_len -= bytes_copied;
659 
660 			if (!quanta_bytes_remaining) {
661 				quanta_bytes_remaining = 31;
662 
663 				/* Remaining inline bytes reside after hdr */
664 				if (first_quanta) {
665 					first_quanta = false;
666 					wqe += 16;
667 				} else {
668 					*wqe = inline_valid;
669 					wqe++;
670 				}
671 			}
672 		}
673 	}
674 	if (!first_quanta && quanta_bytes_remaining < 31)
675 		*(wqe + quanta_bytes_remaining) = inline_valid;
676 }
677 
678 /**
679  * irdma_inline_data_size_to_quanta - based on inline data, quanta
680  * @data_size: data size for inline
681  *
682  * Gets the quanta based on inline and immediate data.
683  */
irdma_inline_data_size_to_quanta(u32 data_size)684 static u16 irdma_inline_data_size_to_quanta(u32 data_size)
685 {
686 	if (data_size <= 8)
687 		return IRDMA_QP_WQE_MIN_QUANTA;
688 	else if (data_size <= 39)
689 		return 2;
690 	else if (data_size <= 70)
691 		return 3;
692 	else if (data_size <= 101)
693 		return 4;
694 	else if (data_size <= 132)
695 		return 5;
696 	else if (data_size <= 163)
697 		return 6;
698 	else if (data_size <= 194)
699 		return 7;
700 	else
701 		return 8;
702 }
703 
704 /**
705  * irdma_uk_inline_rdma_write - inline rdma write operation
706  * @qp: hw qp ptr
707  * @info: post sq information
708  * @post_sq: flag to post sq
709  */
irdma_uk_inline_rdma_write(struct irdma_qp_uk * qp,struct irdma_post_sq_info * info,bool post_sq)710 int irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp,
711 			       struct irdma_post_sq_info *info, bool post_sq)
712 {
713 	__le64 *wqe;
714 	struct irdma_rdma_write *op_info;
715 	u64 hdr = 0;
716 	u32 wqe_idx;
717 	bool read_fence = false;
718 	u32 i, total_size = 0;
719 	u16 quanta;
720 
721 	info->push_wqe = qp->push_db ? true : false;
722 	op_info = &info->op.rdma_write;
723 
724 	if (unlikely(qp->max_sq_frag_cnt < op_info->num_lo_sges))
725 		return -EINVAL;
726 
727 	for (i = 0; i < op_info->num_lo_sges; i++)
728 		total_size += op_info->lo_sg_list[i].length;
729 
730 	if (unlikely(total_size > qp->max_inline_data))
731 		return -EINVAL;
732 
733 	quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(total_size);
734 	wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
735 					 info);
736 	if (!wqe)
737 		return -ENOMEM;
738 
739 	irdma_clr_wqes(qp, wqe_idx);
740 
741 	read_fence |= info->read_fence;
742 	set_64bit_val(wqe, 16,
743 		      FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.addr));
744 
745 	hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.lkey) |
746 	      FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
747 	      FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, total_size) |
748 	      FIELD_PREP(IRDMAQPSQ_REPORTRTT, info->report_rtt ? 1 : 0) |
749 	      FIELD_PREP(IRDMAQPSQ_INLINEDATAFLAG, 1) |
750 	      FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, info->imm_data_valid ? 1 : 0) |
751 	      FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe ? 1 : 0) |
752 	      FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
753 	      FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
754 	      FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
755 	      FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
756 
757 	if (info->imm_data_valid)
758 		set_64bit_val(wqe, 0,
759 			      FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
760 
761 	qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->lo_sg_list,
762 					op_info->num_lo_sges,
763 					qp->swqe_polarity);
764 	dma_wmb(); /* make sure WQE is populated before valid bit is set */
765 
766 	set_64bit_val(wqe, 24, hdr);
767 
768 	if (info->push_wqe) {
769 		irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
770 	} else {
771 		if (post_sq)
772 			irdma_uk_qp_post_wr(qp);
773 	}
774 
775 	return 0;
776 }
777 
778 /**
779  * irdma_uk_inline_send - inline send operation
780  * @qp: hw qp ptr
781  * @info: post sq information
782  * @post_sq: flag to post sq
783  */
irdma_uk_inline_send(struct irdma_qp_uk * qp,struct irdma_post_sq_info * info,bool post_sq)784 int irdma_uk_inline_send(struct irdma_qp_uk *qp,
785 			 struct irdma_post_sq_info *info, bool post_sq)
786 {
787 	__le64 *wqe;
788 	struct irdma_post_send *op_info;
789 	u64 hdr;
790 	u32 wqe_idx;
791 	bool read_fence = false;
792 	u32 i, total_size = 0;
793 	u16 quanta;
794 
795 	info->push_wqe = qp->push_db ? true : false;
796 	op_info = &info->op.send;
797 
798 	if (unlikely(qp->max_sq_frag_cnt < op_info->num_sges))
799 		return -EINVAL;
800 
801 	for (i = 0; i < op_info->num_sges; i++)
802 		total_size += op_info->sg_list[i].length;
803 
804 	if (unlikely(total_size > qp->max_inline_data))
805 		return -EINVAL;
806 
807 	quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(total_size);
808 	wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
809 					 info);
810 	if (!wqe)
811 		return -ENOMEM;
812 
813 	irdma_clr_wqes(qp, wqe_idx);
814 
815 	set_64bit_val(wqe, 16,
816 		      FIELD_PREP(IRDMAQPSQ_DESTQKEY, op_info->qkey) |
817 		      FIELD_PREP(IRDMAQPSQ_DESTQPN, op_info->dest_qp));
818 
819 	read_fence |= info->read_fence;
820 	hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, info->stag_to_inv) |
821 	      FIELD_PREP(IRDMAQPSQ_AHID, op_info->ah_id) |
822 	      FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
823 	      FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, total_size) |
824 	      FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG,
825 			 (info->imm_data_valid ? 1 : 0)) |
826 	      FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
827 	      FIELD_PREP(IRDMAQPSQ_INLINEDATAFLAG, 1) |
828 	      FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
829 	      FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
830 	      FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
831 	      FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
832 	      FIELD_PREP(IRDMAQPSQ_UDPHEADER, info->udp_hdr) |
833 	      FIELD_PREP(IRDMAQPSQ_L4LEN, info->l4len) |
834 	      FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
835 
836 	if (info->imm_data_valid)
837 		set_64bit_val(wqe, 0,
838 			      FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
839 	qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->sg_list,
840 					op_info->num_sges, qp->swqe_polarity);
841 
842 	dma_wmb(); /* make sure WQE is populated before valid bit is set */
843 
844 	set_64bit_val(wqe, 24, hdr);
845 
846 	if (info->push_wqe) {
847 		irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
848 	} else {
849 		if (post_sq)
850 			irdma_uk_qp_post_wr(qp);
851 	}
852 
853 	return 0;
854 }
855 
856 /**
857  * irdma_uk_stag_local_invalidate - stag invalidate operation
858  * @qp: hw qp ptr
859  * @info: post sq information
860  * @post_sq: flag to post sq
861  */
irdma_uk_stag_local_invalidate(struct irdma_qp_uk * qp,struct irdma_post_sq_info * info,bool post_sq)862 int irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
863 				   struct irdma_post_sq_info *info,
864 				   bool post_sq)
865 {
866 	__le64 *wqe;
867 	struct irdma_inv_local_stag *op_info;
868 	u64 hdr;
869 	u32 wqe_idx;
870 	bool local_fence = false;
871 	struct ib_sge sge = {};
872 
873 	info->push_wqe = qp->push_db ? true : false;
874 	op_info = &info->op.inv_local_stag;
875 	local_fence = info->local_fence;
876 
877 	wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, IRDMA_QP_WQE_MIN_QUANTA,
878 					 0, info);
879 	if (!wqe)
880 		return -ENOMEM;
881 
882 	irdma_clr_wqes(qp, wqe_idx);
883 
884 	sge.lkey = op_info->target_stag;
885 	qp->wqe_ops.iw_set_fragment(wqe, 0, &sge, 0);
886 
887 	set_64bit_val(wqe, 16, 0);
888 
889 	hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMA_OP_TYPE_INV_STAG) |
890 	      FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
891 	      FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
892 	      FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) |
893 	      FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
894 	      FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
895 
896 	dma_wmb(); /* make sure WQE is populated before valid bit is set */
897 
898 	set_64bit_val(wqe, 24, hdr);
899 
900 	if (info->push_wqe) {
901 		irdma_qp_push_wqe(qp, wqe, IRDMA_QP_WQE_MIN_QUANTA, wqe_idx,
902 				  post_sq);
903 	} else {
904 		if (post_sq)
905 			irdma_uk_qp_post_wr(qp);
906 	}
907 
908 	return 0;
909 }
910 
911 /**
912  * irdma_uk_post_receive - post receive wqe
913  * @qp: hw qp ptr
914  * @info: post rq information
915  */
irdma_uk_post_receive(struct irdma_qp_uk * qp,struct irdma_post_rq_info * info)916 int irdma_uk_post_receive(struct irdma_qp_uk *qp,
917 			  struct irdma_post_rq_info *info)
918 {
919 	u32 wqe_idx, i, byte_off;
920 	u32 addl_frag_cnt;
921 	__le64 *wqe;
922 	u64 hdr;
923 
924 	if (qp->max_rq_frag_cnt < info->num_sges)
925 		return -EINVAL;
926 
927 	wqe = irdma_qp_get_next_recv_wqe(qp, &wqe_idx);
928 	if (!wqe)
929 		return -ENOMEM;
930 
931 	qp->rq_wrid_array[wqe_idx] = info->wr_id;
932 	addl_frag_cnt = info->num_sges > 1 ? (info->num_sges - 1) : 0;
933 	qp->wqe_ops.iw_set_fragment(wqe, 0, info->sg_list,
934 				    qp->rwqe_polarity);
935 
936 	for (i = 1, byte_off = 32; i < info->num_sges; i++) {
937 		qp->wqe_ops.iw_set_fragment(wqe, byte_off, &info->sg_list[i],
938 					    qp->rwqe_polarity);
939 		byte_off += 16;
940 	}
941 
942 	/* if not an odd number set valid bit in next fragment */
943 	if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(info->num_sges & 0x01) &&
944 	    info->num_sges) {
945 		qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
946 					    qp->rwqe_polarity);
947 		if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
948 			++addl_frag_cnt;
949 	}
950 
951 	set_64bit_val(wqe, 16, 0);
952 	hdr = FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
953 	      FIELD_PREP(IRDMAQPSQ_VALID, qp->rwqe_polarity);
954 
955 	dma_wmb(); /* make sure WQE is populated before valid bit is set */
956 
957 	set_64bit_val(wqe, 24, hdr);
958 
959 	return 0;
960 }
961 
962 /**
963  * irdma_uk_cq_resize - reset the cq buffer info
964  * @cq: cq to resize
965  * @cq_base: new cq buffer addr
966  * @cq_size: number of cqes
967  */
irdma_uk_cq_resize(struct irdma_cq_uk * cq,void * cq_base,int cq_size)968 void irdma_uk_cq_resize(struct irdma_cq_uk *cq, void *cq_base, int cq_size)
969 {
970 	cq->cq_base = cq_base;
971 	cq->cq_size = cq_size;
972 	IRDMA_RING_INIT(cq->cq_ring, cq->cq_size);
973 	cq->polarity = 1;
974 }
975 
976 /**
977  * irdma_uk_cq_set_resized_cnt - record the count of the resized buffers
978  * @cq: cq to resize
979  * @cq_cnt: the count of the resized cq buffers
980  */
irdma_uk_cq_set_resized_cnt(struct irdma_cq_uk * cq,u16 cq_cnt)981 void irdma_uk_cq_set_resized_cnt(struct irdma_cq_uk *cq, u16 cq_cnt)
982 {
983 	u64 temp_val;
984 	u16 sw_cq_sel;
985 	u8 arm_next_se;
986 	u8 arm_next;
987 	u8 arm_seq_num;
988 
989 	get_64bit_val(cq->shadow_area, 32, &temp_val);
990 
991 	sw_cq_sel = (u16)FIELD_GET(IRDMA_CQ_DBSA_SW_CQ_SELECT, temp_val);
992 	sw_cq_sel += cq_cnt;
993 
994 	arm_seq_num = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_SEQ_NUM, temp_val);
995 	arm_next_se = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT_SE, temp_val);
996 	arm_next = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT, temp_val);
997 
998 	temp_val = FIELD_PREP(IRDMA_CQ_DBSA_ARM_SEQ_NUM, arm_seq_num) |
999 		   FIELD_PREP(IRDMA_CQ_DBSA_SW_CQ_SELECT, sw_cq_sel) |
1000 		   FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT_SE, arm_next_se) |
1001 		   FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT, arm_next);
1002 
1003 	set_64bit_val(cq->shadow_area, 32, temp_val);
1004 }
1005 
1006 /**
1007  * irdma_uk_cq_request_notification - cq notification request (door bell)
1008  * @cq: hw cq
1009  * @cq_notify: notification type
1010  */
irdma_uk_cq_request_notification(struct irdma_cq_uk * cq,enum irdma_cmpl_notify cq_notify)1011 void irdma_uk_cq_request_notification(struct irdma_cq_uk *cq,
1012 				      enum irdma_cmpl_notify cq_notify)
1013 {
1014 	u64 temp_val;
1015 	u16 sw_cq_sel;
1016 	u8 arm_next_se = 0;
1017 	u8 arm_next = 0;
1018 	u8 arm_seq_num;
1019 
1020 	get_64bit_val(cq->shadow_area, 32, &temp_val);
1021 	arm_seq_num = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_SEQ_NUM, temp_val);
1022 	arm_seq_num++;
1023 	sw_cq_sel = (u16)FIELD_GET(IRDMA_CQ_DBSA_SW_CQ_SELECT, temp_val);
1024 	arm_next_se = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT_SE, temp_val);
1025 	arm_next_se |= 1;
1026 	if (cq_notify == IRDMA_CQ_COMPL_EVENT)
1027 		arm_next = 1;
1028 	temp_val = FIELD_PREP(IRDMA_CQ_DBSA_ARM_SEQ_NUM, arm_seq_num) |
1029 		   FIELD_PREP(IRDMA_CQ_DBSA_SW_CQ_SELECT, sw_cq_sel) |
1030 		   FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT_SE, arm_next_se) |
1031 		   FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT, arm_next);
1032 
1033 	set_64bit_val(cq->shadow_area, 32, temp_val);
1034 
1035 	dma_wmb(); /* make sure WQE is populated before valid bit is set */
1036 
1037 	writel(cq->cq_id, cq->cqe_alloc_db);
1038 }
1039 
1040 /**
1041  * irdma_uk_cq_poll_cmpl - get cq completion info
1042  * @cq: hw cq
1043  * @info: cq poll information returned
1044  */
irdma_uk_cq_poll_cmpl(struct irdma_cq_uk * cq,struct irdma_cq_poll_info * info)1045 int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
1046 			  struct irdma_cq_poll_info *info)
1047 {
1048 	u64 comp_ctx, qword0, qword2, qword3;
1049 	__le64 *cqe;
1050 	struct irdma_qp_uk *qp;
1051 	struct irdma_ring *pring = NULL;
1052 	u32 wqe_idx;
1053 	int ret_code;
1054 	bool move_cq_head = true;
1055 	u8 polarity;
1056 	bool ext_valid;
1057 	__le64 *ext_cqe;
1058 
1059 	if (cq->avoid_mem_cflct)
1060 		cqe = IRDMA_GET_CURRENT_EXTENDED_CQ_ELEM(cq);
1061 	else
1062 		cqe = IRDMA_GET_CURRENT_CQ_ELEM(cq);
1063 
1064 	get_64bit_val(cqe, 24, &qword3);
1065 	polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
1066 	if (polarity != cq->polarity)
1067 		return -ENOENT;
1068 
1069 	/* Ensure CQE contents are read after valid bit is checked */
1070 	dma_rmb();
1071 
1072 	ext_valid = (bool)FIELD_GET(IRDMA_CQ_EXTCQE, qword3);
1073 	if (ext_valid) {
1074 		u64 qword6, qword7;
1075 		u32 peek_head;
1076 
1077 		if (cq->avoid_mem_cflct) {
1078 			ext_cqe = (__le64 *)((u8 *)cqe + 32);
1079 			get_64bit_val(ext_cqe, 24, &qword7);
1080 			polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword7);
1081 		} else {
1082 			peek_head = (cq->cq_ring.head + 1) % cq->cq_ring.size;
1083 			ext_cqe = cq->cq_base[peek_head].buf;
1084 			get_64bit_val(ext_cqe, 24, &qword7);
1085 			polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword7);
1086 			if (!peek_head)
1087 				polarity ^= 1;
1088 		}
1089 		if (polarity != cq->polarity)
1090 			return -ENOENT;
1091 
1092 		/* Ensure ext CQE contents are read after ext valid bit is checked */
1093 		dma_rmb();
1094 
1095 		info->imm_valid = (bool)FIELD_GET(IRDMA_CQ_IMMVALID, qword7);
1096 		if (info->imm_valid) {
1097 			u64 qword4;
1098 
1099 			get_64bit_val(ext_cqe, 0, &qword4);
1100 			info->imm_data = (u32)FIELD_GET(IRDMA_CQ_IMMDATALOW32, qword4);
1101 		}
1102 		info->ud_smac_valid = (bool)FIELD_GET(IRDMA_CQ_UDSMACVALID, qword7);
1103 		info->ud_vlan_valid = (bool)FIELD_GET(IRDMA_CQ_UDVLANVALID, qword7);
1104 		if (info->ud_smac_valid || info->ud_vlan_valid) {
1105 			get_64bit_val(ext_cqe, 16, &qword6);
1106 			if (info->ud_vlan_valid)
1107 				info->ud_vlan = (u16)FIELD_GET(IRDMA_CQ_UDVLAN, qword6);
1108 			if (info->ud_smac_valid) {
1109 				info->ud_smac[5] = qword6 & 0xFF;
1110 				info->ud_smac[4] = (qword6 >> 8) & 0xFF;
1111 				info->ud_smac[3] = (qword6 >> 16) & 0xFF;
1112 				info->ud_smac[2] = (qword6 >> 24) & 0xFF;
1113 				info->ud_smac[1] = (qword6 >> 32) & 0xFF;
1114 				info->ud_smac[0] = (qword6 >> 40) & 0xFF;
1115 			}
1116 		}
1117 	} else {
1118 		info->imm_valid = false;
1119 		info->ud_smac_valid = false;
1120 		info->ud_vlan_valid = false;
1121 	}
1122 
1123 	info->q_type = (u8)FIELD_GET(IRDMA_CQ_SQ, qword3);
1124 	info->error = (bool)FIELD_GET(IRDMA_CQ_ERROR, qword3);
1125 	info->push_dropped = (bool)FIELD_GET(IRDMACQ_PSHDROP, qword3);
1126 	info->ipv4 = (bool)FIELD_GET(IRDMACQ_IPV4, qword3);
1127 	if (info->error) {
1128 		info->major_err = FIELD_GET(IRDMA_CQ_MAJERR, qword3);
1129 		info->minor_err = FIELD_GET(IRDMA_CQ_MINERR, qword3);
1130 		if (info->major_err == IRDMA_FLUSH_MAJOR_ERR) {
1131 			info->comp_status = IRDMA_COMPL_STATUS_FLUSHED;
1132 			/* Set the min error to standard flush error code for remaining cqes */
1133 			if (info->minor_err != FLUSH_GENERAL_ERR) {
1134 				qword3 &= ~IRDMA_CQ_MINERR;
1135 				qword3 |= FIELD_PREP(IRDMA_CQ_MINERR, FLUSH_GENERAL_ERR);
1136 				set_64bit_val(cqe, 24, qword3);
1137 			}
1138 		} else {
1139 			info->comp_status = IRDMA_COMPL_STATUS_UNKNOWN;
1140 		}
1141 	} else {
1142 		info->comp_status = IRDMA_COMPL_STATUS_SUCCESS;
1143 	}
1144 
1145 	get_64bit_val(cqe, 0, &qword0);
1146 	get_64bit_val(cqe, 16, &qword2);
1147 
1148 	info->tcp_seq_num_rtt = (u32)FIELD_GET(IRDMACQ_TCPSEQNUMRTT, qword0);
1149 	info->qp_id = (u32)FIELD_GET(IRDMACQ_QPID, qword2);
1150 	info->ud_src_qpn = (u32)FIELD_GET(IRDMACQ_UDSRCQPN, qword2);
1151 
1152 	get_64bit_val(cqe, 8, &comp_ctx);
1153 
1154 	info->solicited_event = (bool)FIELD_GET(IRDMACQ_SOEVENT, qword3);
1155 	qp = (struct irdma_qp_uk *)(unsigned long)comp_ctx;
1156 	if (!qp || qp->destroy_pending) {
1157 		ret_code = -EFAULT;
1158 		goto exit;
1159 	}
1160 	wqe_idx = (u32)FIELD_GET(IRDMA_CQ_WQEIDX, qword3);
1161 	info->qp_handle = (irdma_qp_handle)(unsigned long)qp;
1162 	info->op_type = (u8)FIELD_GET(IRDMA_CQ_SQ, qword3);
1163 
1164 	if (info->q_type == IRDMA_CQE_QTYPE_RQ) {
1165 		u32 array_idx;
1166 
1167 		array_idx = wqe_idx / qp->rq_wqe_size_multiplier;
1168 
1169 		if (info->comp_status == IRDMA_COMPL_STATUS_FLUSHED ||
1170 		    info->comp_status == IRDMA_COMPL_STATUS_UNKNOWN) {
1171 			if (!IRDMA_RING_MORE_WORK(qp->rq_ring)) {
1172 				ret_code = -ENOENT;
1173 				goto exit;
1174 			}
1175 
1176 			info->wr_id = qp->rq_wrid_array[qp->rq_ring.tail];
1177 			array_idx = qp->rq_ring.tail;
1178 		} else {
1179 			info->wr_id = qp->rq_wrid_array[array_idx];
1180 		}
1181 
1182 		info->bytes_xfered = (u32)FIELD_GET(IRDMACQ_PAYLDLEN, qword0);
1183 
1184 		if (qword3 & IRDMACQ_STAG) {
1185 			info->stag_invalid_set = true;
1186 			info->inv_stag = (u32)FIELD_GET(IRDMACQ_INVSTAG, qword2);
1187 		} else {
1188 			info->stag_invalid_set = false;
1189 		}
1190 		IRDMA_RING_SET_TAIL(qp->rq_ring, array_idx + 1);
1191 		if (info->comp_status == IRDMA_COMPL_STATUS_FLUSHED) {
1192 			qp->rq_flush_seen = true;
1193 			if (!IRDMA_RING_MORE_WORK(qp->rq_ring))
1194 				qp->rq_flush_complete = true;
1195 			else
1196 				move_cq_head = false;
1197 		}
1198 		pring = &qp->rq_ring;
1199 	} else { /* q_type is IRDMA_CQE_QTYPE_SQ */
1200 		if (qp->first_sq_wq) {
1201 			if (wqe_idx + 1 >= qp->conn_wqes)
1202 				qp->first_sq_wq = false;
1203 
1204 			if (wqe_idx < qp->conn_wqes && qp->sq_ring.head == qp->sq_ring.tail) {
1205 				IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
1206 				IRDMA_RING_MOVE_TAIL(cq->cq_ring);
1207 				set_64bit_val(cq->shadow_area, 0,
1208 					      IRDMA_RING_CURRENT_HEAD(cq->cq_ring));
1209 				memset(info, 0,
1210 				       sizeof(struct irdma_cq_poll_info));
1211 				return irdma_uk_cq_poll_cmpl(cq, info);
1212 			}
1213 		}
1214 		/*cease posting push mode on push drop*/
1215 		if (info->push_dropped) {
1216 			qp->push_mode = false;
1217 			qp->push_dropped = true;
1218 		}
1219 		if (info->comp_status != IRDMA_COMPL_STATUS_FLUSHED) {
1220 			info->wr_id = qp->sq_wrtrk_array[wqe_idx].wrid;
1221 			if (!info->comp_status)
1222 				info->bytes_xfered = qp->sq_wrtrk_array[wqe_idx].wr_len;
1223 			info->op_type = (u8)FIELD_GET(IRDMACQ_OP, qword3);
1224 			IRDMA_RING_SET_TAIL(qp->sq_ring,
1225 					    wqe_idx + qp->sq_wrtrk_array[wqe_idx].quanta);
1226 		} else {
1227 			if (!IRDMA_RING_MORE_WORK(qp->sq_ring)) {
1228 				ret_code = -ENOENT;
1229 				goto exit;
1230 			}
1231 
1232 			do {
1233 				__le64 *sw_wqe;
1234 				u64 wqe_qword;
1235 				u32 tail;
1236 
1237 				tail = qp->sq_ring.tail;
1238 				sw_wqe = qp->sq_base[tail].elem;
1239 				get_64bit_val(sw_wqe, 24,
1240 					      &wqe_qword);
1241 				info->op_type = (u8)FIELD_GET(IRDMAQPSQ_OPCODE,
1242 							      wqe_qword);
1243 				IRDMA_RING_SET_TAIL(qp->sq_ring,
1244 						    tail + qp->sq_wrtrk_array[tail].quanta);
1245 				if (info->op_type != IRDMAQP_OP_NOP) {
1246 					info->wr_id = qp->sq_wrtrk_array[tail].wrid;
1247 					info->bytes_xfered = qp->sq_wrtrk_array[tail].wr_len;
1248 					break;
1249 				}
1250 			} while (1);
1251 			if (info->op_type == IRDMA_OP_TYPE_BIND_MW &&
1252 			    info->minor_err == FLUSH_PROT_ERR)
1253 				info->minor_err = FLUSH_MW_BIND_ERR;
1254 			qp->sq_flush_seen = true;
1255 			if (!IRDMA_RING_MORE_WORK(qp->sq_ring))
1256 				qp->sq_flush_complete = true;
1257 		}
1258 		pring = &qp->sq_ring;
1259 	}
1260 
1261 	ret_code = 0;
1262 
1263 exit:
1264 	if (!ret_code && info->comp_status == IRDMA_COMPL_STATUS_FLUSHED)
1265 		if (pring && IRDMA_RING_MORE_WORK(*pring))
1266 			move_cq_head = false;
1267 
1268 	if (move_cq_head) {
1269 		IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
1270 		if (!IRDMA_RING_CURRENT_HEAD(cq->cq_ring))
1271 			cq->polarity ^= 1;
1272 
1273 		if (ext_valid && !cq->avoid_mem_cflct) {
1274 			IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
1275 			if (!IRDMA_RING_CURRENT_HEAD(cq->cq_ring))
1276 				cq->polarity ^= 1;
1277 		}
1278 
1279 		IRDMA_RING_MOVE_TAIL(cq->cq_ring);
1280 		if (!cq->avoid_mem_cflct && ext_valid)
1281 			IRDMA_RING_MOVE_TAIL(cq->cq_ring);
1282 		set_64bit_val(cq->shadow_area, 0,
1283 			      IRDMA_RING_CURRENT_HEAD(cq->cq_ring));
1284 	} else {
1285 		qword3 &= ~IRDMA_CQ_WQEIDX;
1286 		qword3 |= FIELD_PREP(IRDMA_CQ_WQEIDX, pring->tail);
1287 		set_64bit_val(cqe, 24, qword3);
1288 	}
1289 
1290 	return ret_code;
1291 }
1292 
1293 /**
1294  * irdma_qp_round_up - return round up qp wq depth
1295  * @wqdepth: wq depth in quanta to round up
1296  */
irdma_qp_round_up(u32 wqdepth)1297 static int irdma_qp_round_up(u32 wqdepth)
1298 {
1299 	int scount = 1;
1300 
1301 	for (wqdepth--; scount <= 16; scount *= 2)
1302 		wqdepth |= wqdepth >> scount;
1303 
1304 	return ++wqdepth;
1305 }
1306 
1307 /**
1308  * irdma_get_wqe_shift - get shift count for maximum wqe size
1309  * @uk_attrs: qp HW attributes
1310  * @sge: Maximum Scatter Gather Elements wqe
1311  * @inline_data: Maximum inline data size
1312  * @shift: Returns the shift needed based on sge
1313  *
1314  * Shift can be used to left shift the wqe size based on number of SGEs and inlind data size.
1315  * For 1 SGE or inline data <= 8, shift = 0 (wqe size of 32
1316  * bytes). For 2 or 3 SGEs or inline data <= 39, shift = 1 (wqe
1317  * size of 64 bytes).
1318  * For 4-7 SGE's and inline <= 101 Shift of 2 otherwise (wqe
1319  * size of 256 bytes).
1320  */
irdma_get_wqe_shift(struct irdma_uk_attrs * uk_attrs,u32 sge,u32 inline_data,u8 * shift)1321 void irdma_get_wqe_shift(struct irdma_uk_attrs *uk_attrs, u32 sge,
1322 			 u32 inline_data, u8 *shift)
1323 {
1324 	*shift = 0;
1325 	if (uk_attrs->hw_rev >= IRDMA_GEN_2) {
1326 		if (sge > 1 || inline_data > 8) {
1327 			if (sge < 4 && inline_data <= 39)
1328 				*shift = 1;
1329 			else if (sge < 8 && inline_data <= 101)
1330 				*shift = 2;
1331 			else
1332 				*shift = 3;
1333 		}
1334 	} else if (sge > 1 || inline_data > 16) {
1335 		*shift = (sge < 4 && inline_data <= 48) ? 1 : 2;
1336 	}
1337 }
1338 
1339 /*
1340  * irdma_get_sqdepth - get SQ depth (quanta)
1341  * @uk_attrs: qp HW attributes
1342  * @sq_size: SQ size
1343  * @shift: shift which determines size of WQE
1344  * @sqdepth: depth of SQ
1345  *
1346  */
irdma_get_sqdepth(struct irdma_uk_attrs * uk_attrs,u32 sq_size,u8 shift,u32 * sqdepth)1347 int irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift,
1348 		      u32 *sqdepth)
1349 {
1350 	*sqdepth = irdma_qp_round_up((sq_size << shift) + IRDMA_SQ_RSVD);
1351 
1352 	if (*sqdepth < (IRDMA_QP_SW_MIN_WQSIZE << shift))
1353 		*sqdepth = IRDMA_QP_SW_MIN_WQSIZE << shift;
1354 	else if (*sqdepth > uk_attrs->max_hw_wq_quanta)
1355 		return -EINVAL;
1356 
1357 	return 0;
1358 }
1359 
1360 /*
1361  * irdma_get_rqdepth - get RQ depth (quanta)
1362  * @uk_attrs: qp HW attributes
1363  * @rq_size: RQ size
1364  * @shift: shift which determines size of WQE
1365  * @rqdepth: depth of RQ
1366  */
irdma_get_rqdepth(struct irdma_uk_attrs * uk_attrs,u32 rq_size,u8 shift,u32 * rqdepth)1367 int irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size, u8 shift,
1368 		      u32 *rqdepth)
1369 {
1370 	*rqdepth = irdma_qp_round_up((rq_size << shift) + IRDMA_RQ_RSVD);
1371 
1372 	if (*rqdepth < (IRDMA_QP_SW_MIN_WQSIZE << shift))
1373 		*rqdepth = IRDMA_QP_SW_MIN_WQSIZE << shift;
1374 	else if (*rqdepth > uk_attrs->max_hw_rq_quanta)
1375 		return -EINVAL;
1376 
1377 	return 0;
1378 }
1379 
1380 static const struct irdma_wqe_uk_ops iw_wqe_uk_ops = {
1381 	.iw_copy_inline_data = irdma_copy_inline_data,
1382 	.iw_inline_data_size_to_quanta = irdma_inline_data_size_to_quanta,
1383 	.iw_set_fragment = irdma_set_fragment,
1384 	.iw_set_mw_bind_wqe = irdma_set_mw_bind_wqe,
1385 };
1386 
1387 static const struct irdma_wqe_uk_ops iw_wqe_uk_ops_gen_1 = {
1388 	.iw_copy_inline_data = irdma_copy_inline_data_gen_1,
1389 	.iw_inline_data_size_to_quanta = irdma_inline_data_size_to_quanta_gen_1,
1390 	.iw_set_fragment = irdma_set_fragment_gen_1,
1391 	.iw_set_mw_bind_wqe = irdma_set_mw_bind_wqe_gen_1,
1392 };
1393 
1394 /**
1395  * irdma_setup_connection_wqes - setup WQEs necessary to complete
1396  * connection.
1397  * @qp: hw qp (user and kernel)
1398  * @info: qp initialization info
1399  */
irdma_setup_connection_wqes(struct irdma_qp_uk * qp,struct irdma_qp_uk_init_info * info)1400 static void irdma_setup_connection_wqes(struct irdma_qp_uk *qp,
1401 					struct irdma_qp_uk_init_info *info)
1402 {
1403 	u16 move_cnt = 1;
1404 
1405 	if (!info->legacy_mode &&
1406 	    (qp->uk_attrs->feature_flags & IRDMA_FEATURE_RTS_AE))
1407 		move_cnt = 3;
1408 
1409 	qp->conn_wqes = move_cnt;
1410 	IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->sq_ring, move_cnt);
1411 	IRDMA_RING_MOVE_TAIL_BY_COUNT(qp->sq_ring, move_cnt);
1412 	IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->initial_ring, move_cnt);
1413 }
1414 
1415 /**
1416  * irdma_uk_qp_init - initialize shared qp
1417  * @qp: hw qp (user and kernel)
1418  * @info: qp initialization info
1419  *
1420  * initializes the vars used in both user and kernel mode.
1421  * size of the wqe depends on numbers of max. fragements
1422  * allowed. Then size of wqe * the number of wqes should be the
1423  * amount of memory allocated for sq and rq.
1424  */
irdma_uk_qp_init(struct irdma_qp_uk * qp,struct irdma_qp_uk_init_info * info)1425 int irdma_uk_qp_init(struct irdma_qp_uk *qp, struct irdma_qp_uk_init_info *info)
1426 {
1427 	int ret_code = 0;
1428 	u32 sq_ring_size;
1429 	u8 sqshift, rqshift;
1430 
1431 	qp->uk_attrs = info->uk_attrs;
1432 	if (info->max_sq_frag_cnt > qp->uk_attrs->max_hw_wq_frags ||
1433 	    info->max_rq_frag_cnt > qp->uk_attrs->max_hw_wq_frags)
1434 		return -EINVAL;
1435 
1436 	irdma_get_wqe_shift(qp->uk_attrs, info->max_rq_frag_cnt, 0, &rqshift);
1437 	if (qp->uk_attrs->hw_rev == IRDMA_GEN_1) {
1438 		irdma_get_wqe_shift(qp->uk_attrs, info->max_sq_frag_cnt,
1439 				    info->max_inline_data, &sqshift);
1440 		if (info->abi_ver > 4)
1441 			rqshift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;
1442 	} else {
1443 		irdma_get_wqe_shift(qp->uk_attrs, info->max_sq_frag_cnt + 1,
1444 				    info->max_inline_data, &sqshift);
1445 	}
1446 	qp->qp_caps = info->qp_caps;
1447 	qp->sq_base = info->sq;
1448 	qp->rq_base = info->rq;
1449 	qp->qp_type = info->type ? info->type : IRDMA_QP_TYPE_IWARP;
1450 	qp->shadow_area = info->shadow_area;
1451 	qp->sq_wrtrk_array = info->sq_wrtrk_array;
1452 
1453 	qp->rq_wrid_array = info->rq_wrid_array;
1454 	qp->wqe_alloc_db = info->wqe_alloc_db;
1455 	qp->qp_id = info->qp_id;
1456 	qp->sq_size = info->sq_size;
1457 	qp->push_mode = false;
1458 	qp->max_sq_frag_cnt = info->max_sq_frag_cnt;
1459 	sq_ring_size = qp->sq_size << sqshift;
1460 	IRDMA_RING_INIT(qp->sq_ring, sq_ring_size);
1461 	IRDMA_RING_INIT(qp->initial_ring, sq_ring_size);
1462 	if (info->first_sq_wq) {
1463 		irdma_setup_connection_wqes(qp, info);
1464 		qp->swqe_polarity = 1;
1465 		qp->first_sq_wq = true;
1466 	} else {
1467 		qp->swqe_polarity = 0;
1468 	}
1469 	qp->swqe_polarity_deferred = 1;
1470 	qp->rwqe_polarity = 0;
1471 	qp->rq_size = info->rq_size;
1472 	qp->max_rq_frag_cnt = info->max_rq_frag_cnt;
1473 	qp->max_inline_data = info->max_inline_data;
1474 	qp->rq_wqe_size = rqshift;
1475 	IRDMA_RING_INIT(qp->rq_ring, qp->rq_size);
1476 	qp->rq_wqe_size_multiplier = 1 << rqshift;
1477 	if (qp->uk_attrs->hw_rev == IRDMA_GEN_1)
1478 		qp->wqe_ops = iw_wqe_uk_ops_gen_1;
1479 	else
1480 		qp->wqe_ops = iw_wqe_uk_ops;
1481 	return ret_code;
1482 }
1483 
1484 /**
1485  * irdma_uk_cq_init - initialize shared cq (user and kernel)
1486  * @cq: hw cq
1487  * @info: hw cq initialization info
1488  */
irdma_uk_cq_init(struct irdma_cq_uk * cq,struct irdma_cq_uk_init_info * info)1489 void irdma_uk_cq_init(struct irdma_cq_uk *cq,
1490 		      struct irdma_cq_uk_init_info *info)
1491 {
1492 	cq->cq_base = info->cq_base;
1493 	cq->cq_id = info->cq_id;
1494 	cq->cq_size = info->cq_size;
1495 	cq->cqe_alloc_db = info->cqe_alloc_db;
1496 	cq->cq_ack_db = info->cq_ack_db;
1497 	cq->shadow_area = info->shadow_area;
1498 	cq->avoid_mem_cflct = info->avoid_mem_cflct;
1499 	IRDMA_RING_INIT(cq->cq_ring, cq->cq_size);
1500 	cq->polarity = 1;
1501 }
1502 
1503 /**
1504  * irdma_uk_clean_cq - clean cq entries
1505  * @q: completion context
1506  * @cq: cq to clean
1507  */
irdma_uk_clean_cq(void * q,struct irdma_cq_uk * cq)1508 void irdma_uk_clean_cq(void *q, struct irdma_cq_uk *cq)
1509 {
1510 	__le64 *cqe;
1511 	u64 qword3, comp_ctx;
1512 	u32 cq_head;
1513 	u8 polarity, temp;
1514 
1515 	cq_head = cq->cq_ring.head;
1516 	temp = cq->polarity;
1517 	do {
1518 		if (cq->avoid_mem_cflct)
1519 			cqe = ((struct irdma_extended_cqe *)(cq->cq_base))[cq_head].buf;
1520 		else
1521 			cqe = cq->cq_base[cq_head].buf;
1522 		get_64bit_val(cqe, 24, &qword3);
1523 		polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
1524 
1525 		if (polarity != temp)
1526 			break;
1527 
1528 		get_64bit_val(cqe, 8, &comp_ctx);
1529 		if ((void *)(unsigned long)comp_ctx == q)
1530 			set_64bit_val(cqe, 8, 0);
1531 
1532 		cq_head = (cq_head + 1) % cq->cq_ring.size;
1533 		if (!cq_head)
1534 			temp ^= 1;
1535 	} while (true);
1536 }
1537 
1538 /**
1539  * irdma_nop - post a nop
1540  * @qp: hw qp ptr
1541  * @wr_id: work request id
1542  * @signaled: signaled for completion
1543  * @post_sq: ring doorbell
1544  */
irdma_nop(struct irdma_qp_uk * qp,u64 wr_id,bool signaled,bool post_sq)1545 int irdma_nop(struct irdma_qp_uk *qp, u64 wr_id, bool signaled, bool post_sq)
1546 {
1547 	__le64 *wqe;
1548 	u64 hdr;
1549 	u32 wqe_idx;
1550 	struct irdma_post_sq_info info = {};
1551 
1552 	info.push_wqe = false;
1553 	info.wr_id = wr_id;
1554 	wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, IRDMA_QP_WQE_MIN_QUANTA,
1555 					 0, &info);
1556 	if (!wqe)
1557 		return -ENOMEM;
1558 
1559 	irdma_clr_wqes(qp, wqe_idx);
1560 
1561 	set_64bit_val(wqe, 0, 0);
1562 	set_64bit_val(wqe, 8, 0);
1563 	set_64bit_val(wqe, 16, 0);
1564 
1565 	hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_NOP) |
1566 	      FIELD_PREP(IRDMAQPSQ_SIGCOMPL, signaled) |
1567 	      FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
1568 
1569 	dma_wmb(); /* make sure WQE is populated before valid bit is set */
1570 
1571 	set_64bit_val(wqe, 24, hdr);
1572 	if (post_sq)
1573 		irdma_uk_qp_post_wr(qp);
1574 
1575 	return 0;
1576 }
1577 
1578 /**
1579  * irdma_fragcnt_to_quanta_sq - calculate quanta based on fragment count for SQ
1580  * @frag_cnt: number of fragments
1581  * @quanta: quanta for frag_cnt
1582  */
irdma_fragcnt_to_quanta_sq(u32 frag_cnt,u16 * quanta)1583 int irdma_fragcnt_to_quanta_sq(u32 frag_cnt, u16 *quanta)
1584 {
1585 	switch (frag_cnt) {
1586 	case 0:
1587 	case 1:
1588 		*quanta = IRDMA_QP_WQE_MIN_QUANTA;
1589 		break;
1590 	case 2:
1591 	case 3:
1592 		*quanta = 2;
1593 		break;
1594 	case 4:
1595 	case 5:
1596 		*quanta = 3;
1597 		break;
1598 	case 6:
1599 	case 7:
1600 		*quanta = 4;
1601 		break;
1602 	case 8:
1603 	case 9:
1604 		*quanta = 5;
1605 		break;
1606 	case 10:
1607 	case 11:
1608 		*quanta = 6;
1609 		break;
1610 	case 12:
1611 	case 13:
1612 		*quanta = 7;
1613 		break;
1614 	case 14:
1615 	case 15: /* when immediate data is present */
1616 		*quanta = 8;
1617 		break;
1618 	default:
1619 		return -EINVAL;
1620 	}
1621 
1622 	return 0;
1623 }
1624 
1625 /**
1626  * irdma_fragcnt_to_wqesize_rq - calculate wqe size based on fragment count for RQ
1627  * @frag_cnt: number of fragments
1628  * @wqe_size: size in bytes given frag_cnt
1629  */
irdma_fragcnt_to_wqesize_rq(u32 frag_cnt,u16 * wqe_size)1630 int irdma_fragcnt_to_wqesize_rq(u32 frag_cnt, u16 *wqe_size)
1631 {
1632 	switch (frag_cnt) {
1633 	case 0:
1634 	case 1:
1635 		*wqe_size = 32;
1636 		break;
1637 	case 2:
1638 	case 3:
1639 		*wqe_size = 64;
1640 		break;
1641 	case 4:
1642 	case 5:
1643 	case 6:
1644 	case 7:
1645 		*wqe_size = 128;
1646 		break;
1647 	case 8:
1648 	case 9:
1649 	case 10:
1650 	case 11:
1651 	case 12:
1652 	case 13:
1653 	case 14:
1654 		*wqe_size = 256;
1655 		break;
1656 	default:
1657 		return -EINVAL;
1658 	}
1659 
1660 	return 0;
1661 }
1662