1 /* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
2 /* Copyright (c) 2015 - 2021 Intel Corporation */
3 #ifndef IRDMA_TYPE_H
4 #define IRDMA_TYPE_H
5 #include "osdep.h"
6 #include "irdma.h"
7 #include "user.h"
8 #include "hmc.h"
9 #include "uda.h"
10 #include "ws.h"
11 #define IRDMA_DEBUG_ERR		"ERR"
12 #define IRDMA_DEBUG_INIT	"INIT"
13 #define IRDMA_DEBUG_DEV		"DEV"
14 #define IRDMA_DEBUG_CM		"CM"
15 #define IRDMA_DEBUG_VERBS	"VERBS"
16 #define IRDMA_DEBUG_PUDA	"PUDA"
17 #define IRDMA_DEBUG_ILQ		"ILQ"
18 #define IRDMA_DEBUG_IEQ		"IEQ"
19 #define IRDMA_DEBUG_QP		"QP"
20 #define IRDMA_DEBUG_CQ		"CQ"
21 #define IRDMA_DEBUG_MR		"MR"
22 #define IRDMA_DEBUG_PBLE	"PBLE"
23 #define IRDMA_DEBUG_WQE		"WQE"
24 #define IRDMA_DEBUG_AEQ		"AEQ"
25 #define IRDMA_DEBUG_CQP		"CQP"
26 #define IRDMA_DEBUG_HMC		"HMC"
27 #define IRDMA_DEBUG_USER	"USER"
28 #define IRDMA_DEBUG_VIRT	"VIRT"
29 #define IRDMA_DEBUG_DCB		"DCB"
30 #define	IRDMA_DEBUG_CQE		"CQE"
31 #define IRDMA_DEBUG_CLNT	"CLNT"
32 #define IRDMA_DEBUG_WS		"WS"
33 #define IRDMA_DEBUG_STATS	"STATS"
34 
35 enum irdma_page_size {
36 	IRDMA_PAGE_SIZE_4K = 0,
37 	IRDMA_PAGE_SIZE_2M,
38 	IRDMA_PAGE_SIZE_1G,
39 };
40 
41 enum irdma_hdrct_flags {
42 	DDP_LEN_FLAG  = 0x80,
43 	DDP_HDR_FLAG  = 0x40,
44 	RDMA_HDR_FLAG = 0x20,
45 };
46 
47 enum irdma_term_layers {
48 	LAYER_RDMA = 0,
49 	LAYER_DDP  = 1,
50 	LAYER_MPA  = 2,
51 };
52 
53 enum irdma_term_error_types {
54 	RDMAP_REMOTE_PROT = 1,
55 	RDMAP_REMOTE_OP   = 2,
56 	DDP_CATASTROPHIC  = 0,
57 	DDP_TAGGED_BUF    = 1,
58 	DDP_UNTAGGED_BUF  = 2,
59 	DDP_LLP		  = 3,
60 };
61 
62 enum irdma_term_rdma_errors {
63 	RDMAP_INV_STAG		  = 0x00,
64 	RDMAP_INV_BOUNDS	  = 0x01,
65 	RDMAP_ACCESS		  = 0x02,
66 	RDMAP_UNASSOC_STAG	  = 0x03,
67 	RDMAP_TO_WRAP		  = 0x04,
68 	RDMAP_INV_RDMAP_VER       = 0x05,
69 	RDMAP_UNEXPECTED_OP       = 0x06,
70 	RDMAP_CATASTROPHIC_LOCAL  = 0x07,
71 	RDMAP_CATASTROPHIC_GLOBAL = 0x08,
72 	RDMAP_CANT_INV_STAG       = 0x09,
73 	RDMAP_UNSPECIFIED	  = 0xff,
74 };
75 
76 enum irdma_term_ddp_errors {
77 	DDP_CATASTROPHIC_LOCAL      = 0x00,
78 	DDP_TAGGED_INV_STAG	    = 0x00,
79 	DDP_TAGGED_BOUNDS	    = 0x01,
80 	DDP_TAGGED_UNASSOC_STAG     = 0x02,
81 	DDP_TAGGED_TO_WRAP	    = 0x03,
82 	DDP_TAGGED_INV_DDP_VER      = 0x04,
83 	DDP_UNTAGGED_INV_QN	    = 0x01,
84 	DDP_UNTAGGED_INV_MSN_NO_BUF = 0x02,
85 	DDP_UNTAGGED_INV_MSN_RANGE  = 0x03,
86 	DDP_UNTAGGED_INV_MO	    = 0x04,
87 	DDP_UNTAGGED_INV_TOO_LONG   = 0x05,
88 	DDP_UNTAGGED_INV_DDP_VER    = 0x06,
89 };
90 
91 enum irdma_term_mpa_errors {
92 	MPA_CLOSED  = 0x01,
93 	MPA_CRC     = 0x02,
94 	MPA_MARKER  = 0x03,
95 	MPA_REQ_RSP = 0x04,
96 };
97 
98 enum irdma_qp_event_type {
99 	IRDMA_QP_EVENT_CATASTROPHIC,
100 	IRDMA_QP_EVENT_ACCESS_ERR,
101 	IRDMA_QP_EVENT_REQ_ERR,
102 };
103 
104 enum irdma_hw_stats_index {
105 	/* gen1 - 32-bit */
106 	IRDMA_HW_STAT_INDEX_IP4RXDISCARD	= 0,
107 	IRDMA_HW_STAT_INDEX_IP4RXTRUNC		= 1,
108 	IRDMA_HW_STAT_INDEX_IP4TXNOROUTE	= 2,
109 	IRDMA_HW_STAT_INDEX_IP6RXDISCARD	= 3,
110 	IRDMA_HW_STAT_INDEX_IP6RXTRUNC		= 4,
111 	IRDMA_HW_STAT_INDEX_IP6TXNOROUTE	= 5,
112 	IRDMA_HW_STAT_INDEX_TCPRTXSEG		= 6,
113 	IRDMA_HW_STAT_INDEX_TCPRXOPTERR		= 7,
114 	IRDMA_HW_STAT_INDEX_TCPRXPROTOERR	= 8,
115 	IRDMA_HW_STAT_INDEX_RXVLANERR		= 9,
116 		/* gen1 - 64-bit */
117 	IRDMA_HW_STAT_INDEX_IP4RXOCTS		= 10,
118 	IRDMA_HW_STAT_INDEX_IP4RXPKTS		= 11,
119 	IRDMA_HW_STAT_INDEX_IP4RXFRAGS		= 12,
120 	IRDMA_HW_STAT_INDEX_IP4RXMCPKTS		= 13,
121 	IRDMA_HW_STAT_INDEX_IP4TXOCTS		= 14,
122 	IRDMA_HW_STAT_INDEX_IP4TXPKTS		= 15,
123 	IRDMA_HW_STAT_INDEX_IP4TXFRAGS		= 16,
124 	IRDMA_HW_STAT_INDEX_IP4TXMCPKTS		= 17,
125 	IRDMA_HW_STAT_INDEX_IP6RXOCTS		= 18,
126 	IRDMA_HW_STAT_INDEX_IP6RXPKTS		= 19,
127 	IRDMA_HW_STAT_INDEX_IP6RXFRAGS		= 20,
128 	IRDMA_HW_STAT_INDEX_IP6RXMCPKTS		= 21,
129 	IRDMA_HW_STAT_INDEX_IP6TXOCTS		= 22,
130 	IRDMA_HW_STAT_INDEX_IP6TXPKTS		= 23,
131 	IRDMA_HW_STAT_INDEX_IP6TXFRAGS		= 24,
132 	IRDMA_HW_STAT_INDEX_IP6TXMCPKTS		= 25,
133 	IRDMA_HW_STAT_INDEX_TCPRXSEGS		= 26,
134 	IRDMA_HW_STAT_INDEX_TCPTXSEG		= 27,
135 	IRDMA_HW_STAT_INDEX_RDMARXRDS		= 28,
136 	IRDMA_HW_STAT_INDEX_RDMARXSNDS		= 29,
137 	IRDMA_HW_STAT_INDEX_RDMARXWRS		= 30,
138 	IRDMA_HW_STAT_INDEX_RDMATXRDS		= 31,
139 	IRDMA_HW_STAT_INDEX_RDMATXSNDS		= 32,
140 	IRDMA_HW_STAT_INDEX_RDMATXWRS		= 33,
141 	IRDMA_HW_STAT_INDEX_RDMAVBND		= 34,
142 	IRDMA_HW_STAT_INDEX_RDMAVINV		= 35,
143 	IRDMA_HW_STAT_INDEX_IP4RXMCOCTS         = 36,
144 	IRDMA_HW_STAT_INDEX_IP4TXMCOCTS         = 37,
145 	IRDMA_HW_STAT_INDEX_IP6RXMCOCTS         = 38,
146 	IRDMA_HW_STAT_INDEX_IP6TXMCOCTS         = 39,
147 	IRDMA_HW_STAT_INDEX_UDPRXPKTS           = 40,
148 	IRDMA_HW_STAT_INDEX_UDPTXPKTS           = 41,
149 	IRDMA_HW_STAT_INDEX_MAX_GEN_1           = 42, /* Must be same value as next entry */
150 	/* gen2 - 64-bit */
151 	IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS   = 42,
152 	/* gen2 - 32-bit */
153 	IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED      = 43,
154 	IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED      = 44,
155 	IRDMA_HW_STAT_INDEX_TXNPCNPSENT         = 45,
156 	IRDMA_HW_STAT_INDEX_MAX_GEN_2		= 46,
157 };
158 
159 enum irdma_feature_type {
160 	IRDMA_FEATURE_FW_INFO = 0,
161 	IRDMA_HW_VERSION_INFO = 1,
162 	IRDMA_QSETS_MAX       = 26,
163 	IRDMA_MAX_FEATURES, /* Must be last entry */
164 };
165 
166 enum irdma_sched_prio_type {
167 	IRDMA_PRIO_WEIGHTED_RR     = 1,
168 	IRDMA_PRIO_STRICT	   = 2,
169 	IRDMA_PRIO_WEIGHTED_STRICT = 3,
170 };
171 
172 enum irdma_vm_vf_type {
173 	IRDMA_VF_TYPE = 0,
174 	IRDMA_VM_TYPE,
175 	IRDMA_PF_TYPE,
176 };
177 
178 enum irdma_cqp_hmc_profile {
179 	IRDMA_HMC_PROFILE_DEFAULT  = 1,
180 	IRDMA_HMC_PROFILE_FAVOR_VF = 2,
181 	IRDMA_HMC_PROFILE_EQUAL    = 3,
182 };
183 
184 enum irdma_quad_entry_type {
185 	IRDMA_QHASH_TYPE_TCP_ESTABLISHED = 1,
186 	IRDMA_QHASH_TYPE_TCP_SYN,
187 	IRDMA_QHASH_TYPE_UDP_UNICAST,
188 	IRDMA_QHASH_TYPE_UDP_MCAST,
189 	IRDMA_QHASH_TYPE_ROCE_MCAST,
190 	IRDMA_QHASH_TYPE_ROCEV2_HW,
191 };
192 
193 enum irdma_quad_hash_manage_type {
194 	IRDMA_QHASH_MANAGE_TYPE_DELETE = 0,
195 	IRDMA_QHASH_MANAGE_TYPE_ADD,
196 	IRDMA_QHASH_MANAGE_TYPE_MODIFY,
197 };
198 
199 enum irdma_syn_rst_handling {
200 	IRDMA_SYN_RST_HANDLING_HW_TCP_SECURE = 0,
201 	IRDMA_SYN_RST_HANDLING_HW_TCP,
202 	IRDMA_SYN_RST_HANDLING_FW_TCP_SECURE,
203 	IRDMA_SYN_RST_HANDLING_FW_TCP,
204 };
205 
206 enum irdma_queue_type {
207 	IRDMA_QUEUE_TYPE_SQ_RQ = 0,
208 	IRDMA_QUEUE_TYPE_CQP,
209 };
210 
211 struct irdma_sc_dev;
212 struct irdma_vsi_pestat;
213 
214 struct irdma_dcqcn_cc_params {
215 	u8 cc_cfg_valid;
216 	u8 min_dec_factor;
217 	u8 min_rate;
218 	u8 dcqcn_f;
219 	u16 rai_factor;
220 	u16 hai_factor;
221 	u16 dcqcn_t;
222 	u32 dcqcn_b;
223 	u32 rreduce_mperiod;
224 };
225 
226 struct irdma_cqp_init_info {
227 	u64 cqp_compl_ctx;
228 	u64 host_ctx_pa;
229 	u64 sq_pa;
230 	struct irdma_sc_dev *dev;
231 	struct irdma_cqp_quanta *sq;
232 	struct irdma_dcqcn_cc_params dcqcn_params;
233 	__le64 *host_ctx;
234 	u64 *scratch_array;
235 	u32 sq_size;
236 	u16 hw_maj_ver;
237 	u16 hw_min_ver;
238 	u8 struct_ver;
239 	u8 hmc_profile;
240 	u8 ena_vf_count;
241 	u8 ceqs_per_vf;
242 	bool en_datacenter_tcp:1;
243 	bool disable_packed:1;
244 	bool rocev2_rto_policy:1;
245 	enum irdma_protocol_used protocol_used;
246 };
247 
248 struct irdma_terminate_hdr {
249 	u8 layer_etype;
250 	u8 error_code;
251 	u8 hdrct;
252 	u8 rsvd;
253 };
254 
255 struct irdma_cqp_sq_wqe {
256 	__le64 buf[IRDMA_CQP_WQE_SIZE];
257 };
258 
259 struct irdma_sc_aeqe {
260 	__le64 buf[IRDMA_AEQE_SIZE];
261 };
262 
263 struct irdma_ceqe {
264 	__le64 buf[IRDMA_CEQE_SIZE];
265 };
266 
267 struct irdma_cqp_ctx {
268 	__le64 buf[IRDMA_CQP_CTX_SIZE];
269 };
270 
271 struct irdma_cq_shadow_area {
272 	__le64 buf[IRDMA_SHADOW_AREA_SIZE];
273 };
274 
275 struct irdma_dev_hw_stats_offsets {
276 	u32 stats_offset[IRDMA_HW_STAT_INDEX_MAX_GEN_1];
277 };
278 
279 struct irdma_dev_hw_stats {
280 	u64 stats_val[IRDMA_GATHER_STATS_BUF_SIZE / sizeof(u64)];
281 };
282 
283 struct irdma_gather_stats {
284 	u64 val[IRDMA_GATHER_STATS_BUF_SIZE / sizeof(u64)];
285 };
286 
287 struct irdma_hw_stat_map {
288 	u16 byteoff;
289 	u8 bitoff;
290 	u64 bitmask;
291 };
292 
293 struct irdma_stats_gather_info {
294 	bool use_hmc_fcn_index:1;
295 	bool use_stats_inst:1;
296 	u8 hmc_fcn_index;
297 	u8 stats_inst_index;
298 	struct irdma_dma_mem stats_buff_mem;
299 	void *gather_stats_va;
300 	void *last_gather_stats_va;
301 };
302 
303 struct irdma_vsi_pestat {
304 	struct irdma_hw *hw;
305 	struct irdma_dev_hw_stats hw_stats;
306 	struct irdma_stats_gather_info gather_info;
307 	struct timer_list stats_timer;
308 	struct irdma_sc_vsi *vsi;
309 	struct irdma_dev_hw_stats last_hw_stats;
310 	spinlock_t lock; /* rdma stats lock */
311 };
312 
313 struct irdma_hw {
314 	u8 __iomem *hw_addr;
315 	u8 __iomem *priv_hw_addr;
316 	struct device *device;
317 	struct irdma_hmc_info hmc;
318 };
319 
320 struct irdma_pfpdu {
321 	struct list_head rxlist;
322 	u32 rcv_nxt;
323 	u32 fps;
324 	u32 max_fpdu_data;
325 	u32 nextseqnum;
326 	u32 rcv_start_seq;
327 	bool mode:1;
328 	bool mpa_crc_err:1;
329 	u8  marker_len;
330 	u64 total_ieq_bufs;
331 	u64 fpdu_processed;
332 	u64 bad_seq_num;
333 	u64 crc_err;
334 	u64 no_tx_bufs;
335 	u64 tx_err;
336 	u64 out_of_order;
337 	u64 pmode_count;
338 	struct irdma_sc_ah *ah;
339 	struct irdma_puda_buf *ah_buf;
340 	spinlock_t lock; /* fpdu processing lock */
341 	struct irdma_puda_buf *lastrcv_buf;
342 };
343 
344 struct irdma_sc_pd {
345 	struct irdma_sc_dev *dev;
346 	u32 pd_id;
347 	int abi_ver;
348 };
349 
350 struct irdma_cqp_quanta {
351 	__le64 elem[IRDMA_CQP_WQE_SIZE];
352 };
353 
354 struct irdma_sc_cqp {
355 	u32 size;
356 	u64 sq_pa;
357 	u64 host_ctx_pa;
358 	void *back_cqp;
359 	struct irdma_sc_dev *dev;
360 	int (*process_cqp_sds)(struct irdma_sc_dev *dev,
361 			       struct irdma_update_sds_info *info);
362 	struct irdma_dma_mem sdbuf;
363 	struct irdma_ring sq_ring;
364 	struct irdma_cqp_quanta *sq_base;
365 	struct irdma_dcqcn_cc_params dcqcn_params;
366 	__le64 *host_ctx;
367 	u64 *scratch_array;
368 	u64 requested_ops;
369 	atomic64_t completed_ops;
370 	u32 cqp_id;
371 	u32 sq_size;
372 	u32 hw_sq_size;
373 	u16 hw_maj_ver;
374 	u16 hw_min_ver;
375 	u8 struct_ver;
376 	u8 polarity;
377 	u8 hmc_profile;
378 	u8 ena_vf_count;
379 	u8 timeout_count;
380 	u8 ceqs_per_vf;
381 	bool en_datacenter_tcp:1;
382 	bool disable_packed:1;
383 	bool rocev2_rto_policy:1;
384 	enum irdma_protocol_used protocol_used;
385 };
386 
387 struct irdma_sc_aeq {
388 	u32 size;
389 	u64 aeq_elem_pa;
390 	struct irdma_sc_dev *dev;
391 	struct irdma_sc_aeqe *aeqe_base;
392 	void *pbl_list;
393 	u32 elem_cnt;
394 	struct irdma_ring aeq_ring;
395 	u8 pbl_chunk_size;
396 	u32 first_pm_pbl_idx;
397 	u32 msix_idx;
398 	u8 polarity;
399 	bool virtual_map:1;
400 };
401 
402 struct irdma_sc_ceq {
403 	u32 size;
404 	u64 ceq_elem_pa;
405 	struct irdma_sc_dev *dev;
406 	struct irdma_ceqe *ceqe_base;
407 	void *pbl_list;
408 	u32 ceq_id;
409 	u32 elem_cnt;
410 	struct irdma_ring ceq_ring;
411 	u8 pbl_chunk_size;
412 	u8 tph_val;
413 	u32 first_pm_pbl_idx;
414 	u8 polarity;
415 	struct irdma_sc_vsi *vsi;
416 	struct irdma_sc_cq **reg_cq;
417 	u32 reg_cq_size;
418 	spinlock_t req_cq_lock; /* protect access to reg_cq array */
419 	bool virtual_map:1;
420 	bool tph_en:1;
421 	bool itr_no_expire:1;
422 };
423 
424 struct irdma_sc_cq {
425 	struct irdma_cq_uk cq_uk;
426 	u64 cq_pa;
427 	u64 shadow_area_pa;
428 	struct irdma_sc_dev *dev;
429 	struct irdma_sc_vsi *vsi;
430 	void *pbl_list;
431 	void *back_cq;
432 	u32 ceq_id;
433 	u32 shadow_read_threshold;
434 	u8 pbl_chunk_size;
435 	u8 cq_type;
436 	u8 tph_val;
437 	u32 first_pm_pbl_idx;
438 	bool ceqe_mask:1;
439 	bool virtual_map:1;
440 	bool check_overflow:1;
441 	bool ceq_id_valid:1;
442 	bool tph_en;
443 };
444 
445 struct irdma_sc_qp {
446 	struct irdma_qp_uk qp_uk;
447 	u64 sq_pa;
448 	u64 rq_pa;
449 	u64 hw_host_ctx_pa;
450 	u64 shadow_area_pa;
451 	u64 q2_pa;
452 	struct irdma_sc_dev *dev;
453 	struct irdma_sc_vsi *vsi;
454 	struct irdma_sc_pd *pd;
455 	__le64 *hw_host_ctx;
456 	void *llp_stream_handle;
457 	struct irdma_pfpdu pfpdu;
458 	u32 ieq_qp;
459 	u8 *q2_buf;
460 	u64 qp_compl_ctx;
461 	u32 push_idx;
462 	u16 qs_handle;
463 	u16 push_offset;
464 	u8 flush_wqes_count;
465 	u8 sq_tph_val;
466 	u8 rq_tph_val;
467 	u8 qp_state;
468 	u8 hw_sq_size;
469 	u8 hw_rq_size;
470 	u8 src_mac_addr_idx;
471 	bool on_qoslist:1;
472 	bool ieq_pass_thru:1;
473 	bool sq_tph_en:1;
474 	bool rq_tph_en:1;
475 	bool rcv_tph_en:1;
476 	bool xmit_tph_en:1;
477 	bool virtual_map:1;
478 	bool flush_sq:1;
479 	bool flush_rq:1;
480 	bool sq_flush_code:1;
481 	bool rq_flush_code:1;
482 	enum irdma_flush_opcode flush_code;
483 	enum irdma_qp_event_type event_type;
484 	u8 term_flags;
485 	u8 user_pri;
486 	struct list_head list;
487 };
488 
489 struct irdma_stats_inst_info {
490 	bool use_hmc_fcn_index;
491 	u8 hmc_fn_id;
492 	u8 stats_idx;
493 };
494 
495 struct irdma_up_info {
496 	u8 map[8];
497 	u8 cnp_up_override;
498 	u8 hmc_fcn_idx;
499 	bool use_vlan:1;
500 	bool use_cnp_up_override:1;
501 };
502 
503 #define IRDMA_MAX_WS_NODES	0x3FF
504 #define IRDMA_WS_NODE_INVALID	0xFFFF
505 
506 struct irdma_ws_node_info {
507 	u16 id;
508 	u16 vsi;
509 	u16 parent_id;
510 	u16 qs_handle;
511 	bool type_leaf:1;
512 	bool enable:1;
513 	u8 prio_type;
514 	u8 tc;
515 	u8 weight;
516 };
517 
518 struct irdma_hmc_fpm_misc {
519 	u32 max_ceqs;
520 	u32 max_sds;
521 	u32 xf_block_size;
522 	u32 q1_block_size;
523 	u32 ht_multiplier;
524 	u32 timer_bucket;
525 	u32 rrf_block_size;
526 	u32 ooiscf_block_size;
527 };
528 
529 #define IRDMA_LEAF_DEFAULT_REL_BW		64
530 #define IRDMA_PARENT_DEFAULT_REL_BW		1
531 
532 struct irdma_qos {
533 	struct list_head qplist;
534 	struct mutex qos_mutex; /* protect QoS attributes per QoS level */
535 	u64 lan_qos_handle;
536 	u32 l2_sched_node_id;
537 	u16 qs_handle;
538 	u8 traffic_class;
539 	u8 rel_bw;
540 	u8 prio_type;
541 	bool valid;
542 };
543 
544 #define IRDMA_INVALID_STATS_IDX 0xff
545 struct irdma_sc_vsi {
546 	u16 vsi_idx;
547 	struct irdma_sc_dev *dev;
548 	void *back_vsi;
549 	u32 ilq_count;
550 	struct irdma_virt_mem ilq_mem;
551 	struct irdma_puda_rsrc *ilq;
552 	u32 ieq_count;
553 	struct irdma_virt_mem ieq_mem;
554 	struct irdma_puda_rsrc *ieq;
555 	u32 exception_lan_q;
556 	u16 mtu;
557 	u16 vm_id;
558 	enum irdma_vm_vf_type vm_vf_type;
559 	bool stats_inst_alloc:1;
560 	bool tc_change_pending:1;
561 	struct irdma_vsi_pestat *pestat;
562 	atomic_t qp_suspend_reqs;
563 	int (*register_qset)(struct irdma_sc_vsi *vsi,
564 			     struct irdma_ws_node *tc_node);
565 	void (*unregister_qset)(struct irdma_sc_vsi *vsi,
566 				struct irdma_ws_node *tc_node);
567 	u8 qos_rel_bw;
568 	u8 qos_prio_type;
569 	u8 stats_idx;
570 	u8 dscp_map[IIDC_MAX_DSCP_MAPPING];
571 	struct irdma_qos qos[IRDMA_MAX_USER_PRIORITY];
572 	u64 hw_stats_regs[IRDMA_HW_STAT_INDEX_MAX_GEN_1];
573 	bool dscp_mode:1;
574 };
575 
576 struct irdma_sc_dev {
577 	struct list_head cqp_cmd_head; /* head of the CQP command list */
578 	spinlock_t cqp_lock; /* protect CQP list access */
579 	bool stats_idx_array[IRDMA_MAX_STATS_COUNT_GEN_1];
580 	struct irdma_dma_mem vf_fpm_query_buf[IRDMA_MAX_PE_ENA_VF_COUNT];
581 	u64 fpm_query_buf_pa;
582 	u64 fpm_commit_buf_pa;
583 	__le64 *fpm_query_buf;
584 	__le64 *fpm_commit_buf;
585 	struct irdma_hw *hw;
586 	u8 __iomem *db_addr;
587 	u32 __iomem *wqe_alloc_db;
588 	u32 __iomem *cq_arm_db;
589 	u32 __iomem *aeq_alloc_db;
590 	u32 __iomem *cqp_db;
591 	u32 __iomem *cq_ack_db;
592 	u32 __iomem *ceq_itr_mask_db;
593 	u32 __iomem *aeq_itr_mask_db;
594 	u32 __iomem *hw_regs[IRDMA_MAX_REGS];
595 	u32 ceq_itr;   /* Interrupt throttle, usecs between interrupts: 0 disabled. 2 - 8160 */
596 	u64 hw_masks[IRDMA_MAX_MASKS];
597 	u64 hw_shifts[IRDMA_MAX_SHIFTS];
598 	const struct irdma_hw_stat_map *hw_stats_map;
599 	u64 hw_stats_regs[IRDMA_HW_STAT_INDEX_MAX_GEN_1];
600 	u64 feature_info[IRDMA_MAX_FEATURES];
601 	u64 cqp_cmd_stats[IRDMA_MAX_CQP_OPS];
602 	struct irdma_hw_attrs hw_attrs;
603 	struct irdma_hmc_info *hmc_info;
604 	struct irdma_sc_cqp *cqp;
605 	struct irdma_sc_aeq *aeq;
606 	struct irdma_sc_ceq *ceq[IRDMA_CEQ_MAX_COUNT];
607 	struct irdma_sc_cq *ccq;
608 	const struct irdma_irq_ops *irq_ops;
609 	struct irdma_hmc_fpm_misc hmc_fpm_misc;
610 	struct irdma_ws_node *ws_tree_root;
611 	struct mutex ws_mutex; /* ws tree mutex */
612 	u16 num_vfs;
613 	u8 hmc_fn_id;
614 	u8 vf_id;
615 	bool vchnl_up:1;
616 	bool ceq_valid:1;
617 	u8 pci_rev;
618 	int (*ws_add)(struct irdma_sc_vsi *vsi, u8 user_pri);
619 	void (*ws_remove)(struct irdma_sc_vsi *vsi, u8 user_pri);
620 	void (*ws_reset)(struct irdma_sc_vsi *vsi);
621 };
622 
623 struct irdma_modify_cq_info {
624 	u64 cq_pa;
625 	struct irdma_cqe *cq_base;
626 	u32 cq_size;
627 	u32 shadow_read_threshold;
628 	u8 pbl_chunk_size;
629 	u32 first_pm_pbl_idx;
630 	bool virtual_map:1;
631 	bool check_overflow;
632 	bool cq_resize:1;
633 };
634 
635 struct irdma_create_qp_info {
636 	bool ord_valid:1;
637 	bool tcp_ctx_valid:1;
638 	bool cq_num_valid:1;
639 	bool arp_cache_idx_valid:1;
640 	bool mac_valid:1;
641 	bool force_lpb;
642 	u8 next_iwarp_state;
643 };
644 
645 struct irdma_modify_qp_info {
646 	u64 rx_win0;
647 	u64 rx_win1;
648 	u16 new_mss;
649 	u8 next_iwarp_state;
650 	u8 curr_iwarp_state;
651 	u8 termlen;
652 	bool ord_valid:1;
653 	bool tcp_ctx_valid:1;
654 	bool udp_ctx_valid:1;
655 	bool cq_num_valid:1;
656 	bool arp_cache_idx_valid:1;
657 	bool reset_tcp_conn:1;
658 	bool remove_hash_idx:1;
659 	bool dont_send_term:1;
660 	bool dont_send_fin:1;
661 	bool cached_var_valid:1;
662 	bool mss_change:1;
663 	bool force_lpb:1;
664 	bool mac_valid:1;
665 };
666 
667 struct irdma_ccq_cqe_info {
668 	struct irdma_sc_cqp *cqp;
669 	u64 scratch;
670 	u32 op_ret_val;
671 	u16 maj_err_code;
672 	u16 min_err_code;
673 	u8 op_code;
674 	bool error;
675 };
676 
677 struct irdma_dcb_app_info {
678 	u8 priority;
679 	u8 selector;
680 	u16 prot_id;
681 };
682 
683 struct irdma_qos_tc_info {
684 	u64 tc_ctx;
685 	u8 rel_bw;
686 	u8 prio_type;
687 	u8 egress_virt_up;
688 	u8 ingress_virt_up;
689 };
690 
691 struct irdma_l2params {
692 	struct irdma_qos_tc_info tc_info[IRDMA_MAX_USER_PRIORITY];
693 	struct irdma_dcb_app_info apps[IRDMA_MAX_APPS];
694 	u32 num_apps;
695 	u16 qs_handle_list[IRDMA_MAX_USER_PRIORITY];
696 	u16 mtu;
697 	u8 up2tc[IRDMA_MAX_USER_PRIORITY];
698 	u8 dscp_map[IIDC_MAX_DSCP_MAPPING];
699 	u8 num_tc;
700 	u8 vsi_rel_bw;
701 	u8 vsi_prio_type;
702 	bool mtu_changed:1;
703 	bool tc_changed:1;
704 	bool dscp_mode:1;
705 };
706 
707 struct irdma_vsi_init_info {
708 	struct irdma_sc_dev *dev;
709 	void *back_vsi;
710 	struct irdma_l2params *params;
711 	u16 exception_lan_q;
712 	u16 pf_data_vsi_num;
713 	enum irdma_vm_vf_type vm_vf_type;
714 	u16 vm_id;
715 	int (*register_qset)(struct irdma_sc_vsi *vsi,
716 			     struct irdma_ws_node *tc_node);
717 	void (*unregister_qset)(struct irdma_sc_vsi *vsi,
718 				struct irdma_ws_node *tc_node);
719 };
720 
721 struct irdma_vsi_stats_info {
722 	struct irdma_vsi_pestat *pestat;
723 	u8 fcn_id;
724 	bool alloc_stats_inst;
725 };
726 
727 struct irdma_device_init_info {
728 	u64 fpm_query_buf_pa;
729 	u64 fpm_commit_buf_pa;
730 	__le64 *fpm_query_buf;
731 	__le64 *fpm_commit_buf;
732 	struct irdma_hw *hw;
733 	void __iomem *bar0;
734 	u8 hmc_fn_id;
735 };
736 
737 struct irdma_ceq_init_info {
738 	u64 ceqe_pa;
739 	struct irdma_sc_dev *dev;
740 	u64 *ceqe_base;
741 	void *pbl_list;
742 	u32 elem_cnt;
743 	u32 ceq_id;
744 	bool virtual_map:1;
745 	bool tph_en:1;
746 	bool itr_no_expire:1;
747 	u8 pbl_chunk_size;
748 	u8 tph_val;
749 	u32 first_pm_pbl_idx;
750 	struct irdma_sc_vsi *vsi;
751 	struct irdma_sc_cq **reg_cq;
752 	u32 reg_cq_idx;
753 };
754 
755 struct irdma_aeq_init_info {
756 	u64 aeq_elem_pa;
757 	struct irdma_sc_dev *dev;
758 	u32 *aeqe_base;
759 	void *pbl_list;
760 	u32 elem_cnt;
761 	bool virtual_map;
762 	u8 pbl_chunk_size;
763 	u32 first_pm_pbl_idx;
764 	u32 msix_idx;
765 };
766 
767 struct irdma_ccq_init_info {
768 	u64 cq_pa;
769 	u64 shadow_area_pa;
770 	struct irdma_sc_dev *dev;
771 	struct irdma_cqe *cq_base;
772 	__le64 *shadow_area;
773 	void *pbl_list;
774 	u32 num_elem;
775 	u32 ceq_id;
776 	u32 shadow_read_threshold;
777 	bool ceqe_mask:1;
778 	bool ceq_id_valid:1;
779 	bool avoid_mem_cflct:1;
780 	bool virtual_map:1;
781 	bool tph_en:1;
782 	u8 tph_val;
783 	u8 pbl_chunk_size;
784 	u32 first_pm_pbl_idx;
785 	struct irdma_sc_vsi *vsi;
786 };
787 
788 struct irdma_udp_offload_info {
789 	bool ipv4:1;
790 	bool insert_vlan_tag:1;
791 	u8 ttl;
792 	u8 tos;
793 	u16 src_port;
794 	u16 dst_port;
795 	u32 dest_ip_addr[4];
796 	u32 snd_mss;
797 	u16 vlan_tag;
798 	u16 arp_idx;
799 	u32 flow_label;
800 	u8 udp_state;
801 	u32 psn_nxt;
802 	u32 lsn;
803 	u32 epsn;
804 	u32 psn_max;
805 	u32 psn_una;
806 	u32 local_ipaddr[4];
807 	u32 cwnd;
808 	u8 rexmit_thresh;
809 	u8 rnr_nak_thresh;
810 };
811 
812 struct irdma_roce_offload_info {
813 	u16 p_key;
814 	u16 err_rq_idx;
815 	u32 qkey;
816 	u32 dest_qp;
817 	u8 roce_tver;
818 	u8 ack_credits;
819 	u8 err_rq_idx_valid;
820 	u32 pd_id;
821 	u16 ord_size;
822 	u16 ird_size;
823 	bool is_qp1:1;
824 	bool udprivcq_en:1;
825 	bool dcqcn_en:1;
826 	bool rcv_no_icrc:1;
827 	bool wr_rdresp_en:1;
828 	bool bind_en:1;
829 	bool fast_reg_en:1;
830 	bool priv_mode_en:1;
831 	bool rd_en:1;
832 	bool timely_en:1;
833 	bool dctcp_en:1;
834 	bool fw_cc_enable:1;
835 	bool use_stats_inst:1;
836 	u16 t_high;
837 	u16 t_low;
838 	u8 last_byte_sent;
839 	u8 mac_addr[ETH_ALEN];
840 	u8 rtomin;
841 };
842 
843 struct irdma_iwarp_offload_info {
844 	u16 rcv_mark_offset;
845 	u16 snd_mark_offset;
846 	u8 ddp_ver;
847 	u8 rdmap_ver;
848 	u8 iwarp_mode;
849 	u16 err_rq_idx;
850 	u32 pd_id;
851 	u16 ord_size;
852 	u16 ird_size;
853 	bool ib_rd_en:1;
854 	bool align_hdrs:1;
855 	bool rcv_no_mpa_crc:1;
856 	bool err_rq_idx_valid:1;
857 	bool snd_mark_en:1;
858 	bool rcv_mark_en:1;
859 	bool wr_rdresp_en:1;
860 	bool bind_en:1;
861 	bool fast_reg_en:1;
862 	bool priv_mode_en:1;
863 	bool rd_en:1;
864 	bool timely_en:1;
865 	bool use_stats_inst:1;
866 	bool ecn_en:1;
867 	bool dctcp_en:1;
868 	u16 t_high;
869 	u16 t_low;
870 	u8 last_byte_sent;
871 	u8 mac_addr[ETH_ALEN];
872 	u8 rtomin;
873 };
874 
875 struct irdma_tcp_offload_info {
876 	bool ipv4:1;
877 	bool no_nagle:1;
878 	bool insert_vlan_tag:1;
879 	bool time_stamp:1;
880 	bool drop_ooo_seg:1;
881 	bool avoid_stretch_ack:1;
882 	bool wscale:1;
883 	bool ignore_tcp_opt:1;
884 	bool ignore_tcp_uns_opt:1;
885 	u8 cwnd_inc_limit;
886 	u8 dup_ack_thresh;
887 	u8 ttl;
888 	u8 src_mac_addr_idx;
889 	u8 tos;
890 	u16 src_port;
891 	u16 dst_port;
892 	u32 dest_ip_addr[4];
893 	//u32 dest_ip_addr0;
894 	//u32 dest_ip_addr1;
895 	//u32 dest_ip_addr2;
896 	//u32 dest_ip_addr3;
897 	u32 snd_mss;
898 	u16 syn_rst_handling;
899 	u16 vlan_tag;
900 	u16 arp_idx;
901 	u32 flow_label;
902 	u8 tcp_state;
903 	u8 snd_wscale;
904 	u8 rcv_wscale;
905 	u32 time_stamp_recent;
906 	u32 time_stamp_age;
907 	u32 snd_nxt;
908 	u32 snd_wnd;
909 	u32 rcv_nxt;
910 	u32 rcv_wnd;
911 	u32 snd_max;
912 	u32 snd_una;
913 	u32 srtt;
914 	u32 rtt_var;
915 	u32 ss_thresh;
916 	u32 cwnd;
917 	u32 snd_wl1;
918 	u32 snd_wl2;
919 	u32 max_snd_window;
920 	u8 rexmit_thresh;
921 	u32 local_ipaddr[4];
922 };
923 
924 struct irdma_qp_host_ctx_info {
925 	u64 qp_compl_ctx;
926 	union {
927 		struct irdma_tcp_offload_info *tcp_info;
928 		struct irdma_udp_offload_info *udp_info;
929 	};
930 	union {
931 		struct irdma_iwarp_offload_info *iwarp_info;
932 		struct irdma_roce_offload_info *roce_info;
933 	};
934 	u32 send_cq_num;
935 	u32 rcv_cq_num;
936 	u32 rem_endpoint_idx;
937 	u8 stats_idx;
938 	bool srq_valid:1;
939 	bool tcp_info_valid:1;
940 	bool iwarp_info_valid:1;
941 	bool stats_idx_valid:1;
942 	u8 user_pri;
943 };
944 
945 struct irdma_aeqe_info {
946 	u64 compl_ctx;
947 	u32 qp_cq_id;
948 	u16 ae_id;
949 	u16 wqe_idx;
950 	u8 tcp_state;
951 	u8 iwarp_state;
952 	bool qp:1;
953 	bool cq:1;
954 	bool sq:1;
955 	bool rq:1;
956 	bool in_rdrsp_wr:1;
957 	bool out_rdrsp:1;
958 	bool aeqe_overflow:1;
959 	u8 q2_data_written;
960 	u8 ae_src;
961 };
962 
963 struct irdma_allocate_stag_info {
964 	u64 total_len;
965 	u64 first_pm_pbl_idx;
966 	u32 chunk_size;
967 	u32 stag_idx;
968 	u32 page_size;
969 	u32 pd_id;
970 	u16 access_rights;
971 	bool remote_access:1;
972 	bool use_hmc_fcn_index:1;
973 	bool use_pf_rid:1;
974 	bool all_memory:1;
975 	u8 hmc_fcn_index;
976 };
977 
978 struct irdma_mw_alloc_info {
979 	u32 mw_stag_index;
980 	u32 page_size;
981 	u32 pd_id;
982 	bool remote_access:1;
983 	bool mw_wide:1;
984 	bool mw1_bind_dont_vldt_key:1;
985 };
986 
987 struct irdma_reg_ns_stag_info {
988 	u64 reg_addr_pa;
989 	u64 va;
990 	u64 total_len;
991 	u32 page_size;
992 	u32 chunk_size;
993 	u32 first_pm_pbl_index;
994 	enum irdma_addressing_type addr_type;
995 	irdma_stag_index stag_idx;
996 	u16 access_rights;
997 	u32 pd_id;
998 	irdma_stag_key stag_key;
999 	bool use_hmc_fcn_index:1;
1000 	u8 hmc_fcn_index;
1001 	bool use_pf_rid:1;
1002 	bool all_memory:1;
1003 };
1004 
1005 struct irdma_fast_reg_stag_info {
1006 	u64 wr_id;
1007 	u64 reg_addr_pa;
1008 	u64 fbo;
1009 	void *va;
1010 	u64 total_len;
1011 	u32 page_size;
1012 	u32 chunk_size;
1013 	u32 first_pm_pbl_index;
1014 	enum irdma_addressing_type addr_type;
1015 	irdma_stag_index stag_idx;
1016 	u16 access_rights;
1017 	u32 pd_id;
1018 	irdma_stag_key stag_key;
1019 	bool local_fence:1;
1020 	bool read_fence:1;
1021 	bool signaled:1;
1022 	bool use_hmc_fcn_index:1;
1023 	u8 hmc_fcn_index;
1024 	bool use_pf_rid:1;
1025 	bool defer_flag:1;
1026 };
1027 
1028 struct irdma_dealloc_stag_info {
1029 	u32 stag_idx;
1030 	u32 pd_id;
1031 	bool mr:1;
1032 	bool dealloc_pbl:1;
1033 };
1034 
1035 struct irdma_register_shared_stag {
1036 	u64 va;
1037 	enum irdma_addressing_type addr_type;
1038 	irdma_stag_index new_stag_idx;
1039 	irdma_stag_index parent_stag_idx;
1040 	u32 access_rights;
1041 	u32 pd_id;
1042 	u32 page_size;
1043 	irdma_stag_key new_stag_key;
1044 };
1045 
1046 struct irdma_qp_init_info {
1047 	struct irdma_qp_uk_init_info qp_uk_init_info;
1048 	struct irdma_sc_pd *pd;
1049 	struct irdma_sc_vsi *vsi;
1050 	__le64 *host_ctx;
1051 	u8 *q2;
1052 	u64 sq_pa;
1053 	u64 rq_pa;
1054 	u64 host_ctx_pa;
1055 	u64 q2_pa;
1056 	u64 shadow_area_pa;
1057 	u8 sq_tph_val;
1058 	u8 rq_tph_val;
1059 	bool sq_tph_en:1;
1060 	bool rq_tph_en:1;
1061 	bool rcv_tph_en:1;
1062 	bool xmit_tph_en:1;
1063 	bool virtual_map:1;
1064 };
1065 
1066 struct irdma_cq_init_info {
1067 	struct irdma_sc_dev *dev;
1068 	u64 cq_base_pa;
1069 	u64 shadow_area_pa;
1070 	u32 ceq_id;
1071 	u32 shadow_read_threshold;
1072 	u8 pbl_chunk_size;
1073 	u32 first_pm_pbl_idx;
1074 	bool virtual_map:1;
1075 	bool ceqe_mask:1;
1076 	bool ceq_id_valid:1;
1077 	bool tph_en:1;
1078 	u8 tph_val;
1079 	u8 type;
1080 	struct irdma_cq_uk_init_info cq_uk_init_info;
1081 	struct irdma_sc_vsi *vsi;
1082 };
1083 
1084 struct irdma_upload_context_info {
1085 	u64 buf_pa;
1086 	u32 qp_id;
1087 	u8 qp_type;
1088 	bool freeze_qp:1;
1089 	bool raw_format:1;
1090 };
1091 
1092 struct irdma_local_mac_entry_info {
1093 	u8 mac_addr[6];
1094 	u16 entry_idx;
1095 };
1096 
1097 struct irdma_add_arp_cache_entry_info {
1098 	u8 mac_addr[ETH_ALEN];
1099 	u32 reach_max;
1100 	u16 arp_index;
1101 	bool permanent;
1102 };
1103 
1104 struct irdma_apbvt_info {
1105 	u16 port;
1106 	bool add;
1107 };
1108 
1109 struct irdma_qhash_table_info {
1110 	struct irdma_sc_vsi *vsi;
1111 	enum irdma_quad_hash_manage_type manage;
1112 	enum irdma_quad_entry_type entry_type;
1113 	bool vlan_valid:1;
1114 	bool ipv4_valid:1;
1115 	u8 mac_addr[ETH_ALEN];
1116 	u16 vlan_id;
1117 	u8 user_pri;
1118 	u32 qp_num;
1119 	u32 dest_ip[4];
1120 	u32 src_ip[4];
1121 	u16 dest_port;
1122 	u16 src_port;
1123 };
1124 
1125 struct irdma_cqp_manage_push_page_info {
1126 	u32 push_idx;
1127 	u16 qs_handle;
1128 	u8 free_page;
1129 	u8 push_page_type;
1130 };
1131 
1132 struct irdma_qp_flush_info {
1133 	u16 sq_minor_code;
1134 	u16 sq_major_code;
1135 	u16 rq_minor_code;
1136 	u16 rq_major_code;
1137 	u16 ae_code;
1138 	u8 ae_src;
1139 	bool sq:1;
1140 	bool rq:1;
1141 	bool userflushcode:1;
1142 	bool generate_ae:1;
1143 };
1144 
1145 struct irdma_gen_ae_info {
1146 	u16 ae_code;
1147 	u8 ae_src;
1148 };
1149 
1150 struct irdma_cqp_timeout {
1151 	u64 compl_cqp_cmds;
1152 	u32 count;
1153 };
1154 
1155 struct irdma_irq_ops {
1156 	void (*irdma_cfg_aeq)(struct irdma_sc_dev *dev, u32 idx, bool enable);
1157 	void (*irdma_cfg_ceq)(struct irdma_sc_dev *dev, u32 ceq_id, u32 idx,
1158 			      bool enable);
1159 	void (*irdma_dis_irq)(struct irdma_sc_dev *dev, u32 idx);
1160 	void (*irdma_en_irq)(struct irdma_sc_dev *dev, u32 idx);
1161 };
1162 
1163 void irdma_sc_ccq_arm(struct irdma_sc_cq *ccq);
1164 int irdma_sc_ccq_create(struct irdma_sc_cq *ccq, u64 scratch,
1165 			bool check_overflow, bool post_sq);
1166 int irdma_sc_ccq_destroy(struct irdma_sc_cq *ccq, u64 scratch, bool post_sq);
1167 int irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq,
1168 			      struct irdma_ccq_cqe_info *info);
1169 int irdma_sc_ccq_init(struct irdma_sc_cq *ccq,
1170 		      struct irdma_ccq_init_info *info);
1171 
1172 int irdma_sc_cceq_create(struct irdma_sc_ceq *ceq, u64 scratch);
1173 int irdma_sc_cceq_destroy_done(struct irdma_sc_ceq *ceq);
1174 
1175 int irdma_sc_ceq_destroy(struct irdma_sc_ceq *ceq, u64 scratch, bool post_sq);
1176 int irdma_sc_ceq_init(struct irdma_sc_ceq *ceq,
1177 		      struct irdma_ceq_init_info *info);
1178 void irdma_sc_cleanup_ceqes(struct irdma_sc_cq *cq, struct irdma_sc_ceq *ceq);
1179 void *irdma_sc_process_ceq(struct irdma_sc_dev *dev, struct irdma_sc_ceq *ceq);
1180 
1181 int irdma_sc_aeq_init(struct irdma_sc_aeq *aeq,
1182 		      struct irdma_aeq_init_info *info);
1183 int irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
1184 			   struct irdma_aeqe_info *info);
1185 void irdma_sc_repost_aeq_entries(struct irdma_sc_dev *dev, u32 count);
1186 
1187 void irdma_sc_pd_init(struct irdma_sc_dev *dev, struct irdma_sc_pd *pd, u32 pd_id,
1188 		      int abi_ver);
1189 void irdma_cfg_aeq(struct irdma_sc_dev *dev, u32 idx, bool enable);
1190 void irdma_check_cqp_progress(struct irdma_cqp_timeout *cqp_timeout,
1191 			      struct irdma_sc_dev *dev);
1192 int irdma_sc_cqp_create(struct irdma_sc_cqp *cqp, u16 *maj_err, u16 *min_err);
1193 int irdma_sc_cqp_destroy(struct irdma_sc_cqp *cqp);
1194 int irdma_sc_cqp_init(struct irdma_sc_cqp *cqp,
1195 		      struct irdma_cqp_init_info *info);
1196 void irdma_sc_cqp_post_sq(struct irdma_sc_cqp *cqp);
1197 int irdma_sc_poll_for_cqp_op_done(struct irdma_sc_cqp *cqp, u8 opcode,
1198 				  struct irdma_ccq_cqe_info *cmpl_info);
1199 int irdma_sc_fast_register(struct irdma_sc_qp *qp,
1200 			   struct irdma_fast_reg_stag_info *info, bool post_sq);
1201 int irdma_sc_qp_create(struct irdma_sc_qp *qp,
1202 		       struct irdma_create_qp_info *info, u64 scratch,
1203 		       bool post_sq);
1204 int irdma_sc_qp_destroy(struct irdma_sc_qp *qp, u64 scratch,
1205 			bool remove_hash_idx, bool ignore_mw_bnd, bool post_sq);
1206 int irdma_sc_qp_flush_wqes(struct irdma_sc_qp *qp,
1207 			   struct irdma_qp_flush_info *info, u64 scratch,
1208 			   bool post_sq);
1209 int irdma_sc_qp_init(struct irdma_sc_qp *qp, struct irdma_qp_init_info *info);
1210 int irdma_sc_qp_modify(struct irdma_sc_qp *qp,
1211 		       struct irdma_modify_qp_info *info, u64 scratch,
1212 		       bool post_sq);
1213 void irdma_sc_send_lsmm(struct irdma_sc_qp *qp, void *lsmm_buf, u32 size,
1214 			irdma_stag stag);
1215 
1216 void irdma_sc_send_rtt(struct irdma_sc_qp *qp, bool read);
1217 void irdma_sc_qp_setctx(struct irdma_sc_qp *qp, __le64 *qp_ctx,
1218 			struct irdma_qp_host_ctx_info *info);
1219 void irdma_sc_qp_setctx_roce(struct irdma_sc_qp *qp, __le64 *qp_ctx,
1220 			     struct irdma_qp_host_ctx_info *info);
1221 int irdma_sc_cq_destroy(struct irdma_sc_cq *cq, u64 scratch, bool post_sq);
1222 int irdma_sc_cq_init(struct irdma_sc_cq *cq, struct irdma_cq_init_info *info);
1223 void irdma_sc_cq_resize(struct irdma_sc_cq *cq, struct irdma_modify_cq_info *info);
1224 int irdma_sc_static_hmc_pages_allocated(struct irdma_sc_cqp *cqp, u64 scratch,
1225 					u8 hmc_fn_id, bool post_sq,
1226 					bool poll_registers);
1227 
1228 void sc_vsi_update_stats(struct irdma_sc_vsi *vsi);
1229 struct cqp_info {
1230 	union {
1231 		struct {
1232 			struct irdma_sc_qp *qp;
1233 			struct irdma_create_qp_info info;
1234 			u64 scratch;
1235 		} qp_create;
1236 
1237 		struct {
1238 			struct irdma_sc_qp *qp;
1239 			struct irdma_modify_qp_info info;
1240 			u64 scratch;
1241 		} qp_modify;
1242 
1243 		struct {
1244 			struct irdma_sc_qp *qp;
1245 			u64 scratch;
1246 			bool remove_hash_idx;
1247 			bool ignore_mw_bnd;
1248 		} qp_destroy;
1249 
1250 		struct {
1251 			struct irdma_sc_cq *cq;
1252 			u64 scratch;
1253 			bool check_overflow;
1254 		} cq_create;
1255 
1256 		struct {
1257 			struct irdma_sc_cq *cq;
1258 			struct irdma_modify_cq_info info;
1259 			u64 scratch;
1260 		} cq_modify;
1261 
1262 		struct {
1263 			struct irdma_sc_cq *cq;
1264 			u64 scratch;
1265 		} cq_destroy;
1266 
1267 		struct {
1268 			struct irdma_sc_dev *dev;
1269 			struct irdma_allocate_stag_info info;
1270 			u64 scratch;
1271 		} alloc_stag;
1272 
1273 		struct {
1274 			struct irdma_sc_dev *dev;
1275 			struct irdma_mw_alloc_info info;
1276 			u64 scratch;
1277 		} mw_alloc;
1278 
1279 		struct {
1280 			struct irdma_sc_dev *dev;
1281 			struct irdma_reg_ns_stag_info info;
1282 			u64 scratch;
1283 		} mr_reg_non_shared;
1284 
1285 		struct {
1286 			struct irdma_sc_dev *dev;
1287 			struct irdma_dealloc_stag_info info;
1288 			u64 scratch;
1289 		} dealloc_stag;
1290 
1291 		struct {
1292 			struct irdma_sc_cqp *cqp;
1293 			struct irdma_add_arp_cache_entry_info info;
1294 			u64 scratch;
1295 		} add_arp_cache_entry;
1296 
1297 		struct {
1298 			struct irdma_sc_cqp *cqp;
1299 			u64 scratch;
1300 			u16 arp_index;
1301 		} del_arp_cache_entry;
1302 
1303 		struct {
1304 			struct irdma_sc_cqp *cqp;
1305 			struct irdma_local_mac_entry_info info;
1306 			u64 scratch;
1307 		} add_local_mac_entry;
1308 
1309 		struct {
1310 			struct irdma_sc_cqp *cqp;
1311 			u64 scratch;
1312 			u8 entry_idx;
1313 			u8 ignore_ref_count;
1314 		} del_local_mac_entry;
1315 
1316 		struct {
1317 			struct irdma_sc_cqp *cqp;
1318 			u64 scratch;
1319 		} alloc_local_mac_entry;
1320 
1321 		struct {
1322 			struct irdma_sc_cqp *cqp;
1323 			struct irdma_cqp_manage_push_page_info info;
1324 			u64 scratch;
1325 		} manage_push_page;
1326 
1327 		struct {
1328 			struct irdma_sc_dev *dev;
1329 			struct irdma_upload_context_info info;
1330 			u64 scratch;
1331 		} qp_upload_context;
1332 
1333 		struct {
1334 			struct irdma_sc_dev *dev;
1335 			struct irdma_hmc_fcn_info info;
1336 			u64 scratch;
1337 		} manage_hmc_pm;
1338 
1339 		struct {
1340 			struct irdma_sc_ceq *ceq;
1341 			u64 scratch;
1342 		} ceq_create;
1343 
1344 		struct {
1345 			struct irdma_sc_ceq *ceq;
1346 			u64 scratch;
1347 		} ceq_destroy;
1348 
1349 		struct {
1350 			struct irdma_sc_aeq *aeq;
1351 			u64 scratch;
1352 		} aeq_create;
1353 
1354 		struct {
1355 			struct irdma_sc_aeq *aeq;
1356 			u64 scratch;
1357 		} aeq_destroy;
1358 
1359 		struct {
1360 			struct irdma_sc_qp *qp;
1361 			struct irdma_qp_flush_info info;
1362 			u64 scratch;
1363 		} qp_flush_wqes;
1364 
1365 		struct {
1366 			struct irdma_sc_qp *qp;
1367 			struct irdma_gen_ae_info info;
1368 			u64 scratch;
1369 		} gen_ae;
1370 
1371 		struct {
1372 			struct irdma_sc_cqp *cqp;
1373 			void *fpm_val_va;
1374 			u64 fpm_val_pa;
1375 			u8 hmc_fn_id;
1376 			u64 scratch;
1377 		} query_fpm_val;
1378 
1379 		struct {
1380 			struct irdma_sc_cqp *cqp;
1381 			void *fpm_val_va;
1382 			u64 fpm_val_pa;
1383 			u8 hmc_fn_id;
1384 			u64 scratch;
1385 		} commit_fpm_val;
1386 
1387 		struct {
1388 			struct irdma_sc_cqp *cqp;
1389 			struct irdma_apbvt_info info;
1390 			u64 scratch;
1391 		} manage_apbvt_entry;
1392 
1393 		struct {
1394 			struct irdma_sc_cqp *cqp;
1395 			struct irdma_qhash_table_info info;
1396 			u64 scratch;
1397 		} manage_qhash_table_entry;
1398 
1399 		struct {
1400 			struct irdma_sc_dev *dev;
1401 			struct irdma_update_sds_info info;
1402 			u64 scratch;
1403 		} update_pe_sds;
1404 
1405 		struct {
1406 			struct irdma_sc_cqp *cqp;
1407 			struct irdma_sc_qp *qp;
1408 			u64 scratch;
1409 		} suspend_resume;
1410 
1411 		struct {
1412 			struct irdma_sc_cqp *cqp;
1413 			struct irdma_ah_info info;
1414 			u64 scratch;
1415 		} ah_create;
1416 
1417 		struct {
1418 			struct irdma_sc_cqp *cqp;
1419 			struct irdma_ah_info info;
1420 			u64 scratch;
1421 		} ah_destroy;
1422 
1423 		struct {
1424 			struct irdma_sc_cqp *cqp;
1425 			struct irdma_mcast_grp_info info;
1426 			u64 scratch;
1427 		} mc_create;
1428 
1429 		struct {
1430 			struct irdma_sc_cqp *cqp;
1431 			struct irdma_mcast_grp_info info;
1432 			u64 scratch;
1433 		} mc_destroy;
1434 
1435 		struct {
1436 			struct irdma_sc_cqp *cqp;
1437 			struct irdma_mcast_grp_info info;
1438 			u64 scratch;
1439 		} mc_modify;
1440 
1441 		struct {
1442 			struct irdma_sc_cqp *cqp;
1443 			struct irdma_stats_inst_info info;
1444 			u64 scratch;
1445 		} stats_manage;
1446 
1447 		struct {
1448 			struct irdma_sc_cqp *cqp;
1449 			struct irdma_stats_gather_info info;
1450 			u64 scratch;
1451 		} stats_gather;
1452 
1453 		struct {
1454 			struct irdma_sc_cqp *cqp;
1455 			struct irdma_ws_node_info info;
1456 			u64 scratch;
1457 		} ws_node;
1458 
1459 		struct {
1460 			struct irdma_sc_cqp *cqp;
1461 			struct irdma_up_info info;
1462 			u64 scratch;
1463 		} up_map;
1464 
1465 		struct {
1466 			struct irdma_sc_cqp *cqp;
1467 			struct irdma_dma_mem query_buff_mem;
1468 			u64 scratch;
1469 		} query_rdma;
1470 	} u;
1471 };
1472 
1473 struct cqp_cmds_info {
1474 	struct list_head cqp_cmd_entry;
1475 	u8 cqp_cmd;
1476 	u8 post_sq;
1477 	struct cqp_info in;
1478 };
1479 
1480 __le64 *irdma_sc_cqp_get_next_send_wqe_idx(struct irdma_sc_cqp *cqp, u64 scratch,
1481 					   u32 *wqe_idx);
1482 
1483 /**
1484  * irdma_sc_cqp_get_next_send_wqe - get next wqe on cqp sq
1485  * @cqp: struct for cqp hw
1486  * @scratch: private data for CQP WQE
1487  */
irdma_sc_cqp_get_next_send_wqe(struct irdma_sc_cqp * cqp,u64 scratch)1488 static inline __le64 *irdma_sc_cqp_get_next_send_wqe(struct irdma_sc_cqp *cqp, u64 scratch)
1489 {
1490 	u32 wqe_idx;
1491 
1492 	return irdma_sc_cqp_get_next_send_wqe_idx(cqp, scratch, &wqe_idx);
1493 }
1494 #endif /* IRDMA_TYPE_H */
1495