1 /* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
2 /* Copyright (c) 2015 - 2021 Intel Corporation */
3 #ifndef IRDMA_TYPE_H
4 #define IRDMA_TYPE_H
5 #include "osdep.h"
6 #include "irdma.h"
7 #include "user.h"
8 #include "hmc.h"
9 #include "uda.h"
10 #include "ws.h"
11 #define IRDMA_DEBUG_ERR "ERR"
12 #define IRDMA_DEBUG_INIT "INIT"
13 #define IRDMA_DEBUG_DEV "DEV"
14 #define IRDMA_DEBUG_CM "CM"
15 #define IRDMA_DEBUG_VERBS "VERBS"
16 #define IRDMA_DEBUG_PUDA "PUDA"
17 #define IRDMA_DEBUG_ILQ "ILQ"
18 #define IRDMA_DEBUG_IEQ "IEQ"
19 #define IRDMA_DEBUG_QP "QP"
20 #define IRDMA_DEBUG_CQ "CQ"
21 #define IRDMA_DEBUG_MR "MR"
22 #define IRDMA_DEBUG_PBLE "PBLE"
23 #define IRDMA_DEBUG_WQE "WQE"
24 #define IRDMA_DEBUG_AEQ "AEQ"
25 #define IRDMA_DEBUG_CQP "CQP"
26 #define IRDMA_DEBUG_HMC "HMC"
27 #define IRDMA_DEBUG_USER "USER"
28 #define IRDMA_DEBUG_VIRT "VIRT"
29 #define IRDMA_DEBUG_DCB "DCB"
30 #define IRDMA_DEBUG_CQE "CQE"
31 #define IRDMA_DEBUG_CLNT "CLNT"
32 #define IRDMA_DEBUG_WS "WS"
33 #define IRDMA_DEBUG_STATS "STATS"
34
35 enum irdma_page_size {
36 IRDMA_PAGE_SIZE_4K = 0,
37 IRDMA_PAGE_SIZE_2M,
38 IRDMA_PAGE_SIZE_1G,
39 };
40
41 enum irdma_hdrct_flags {
42 DDP_LEN_FLAG = 0x80,
43 DDP_HDR_FLAG = 0x40,
44 RDMA_HDR_FLAG = 0x20,
45 };
46
47 enum irdma_term_layers {
48 LAYER_RDMA = 0,
49 LAYER_DDP = 1,
50 LAYER_MPA = 2,
51 };
52
53 enum irdma_term_error_types {
54 RDMAP_REMOTE_PROT = 1,
55 RDMAP_REMOTE_OP = 2,
56 DDP_CATASTROPHIC = 0,
57 DDP_TAGGED_BUF = 1,
58 DDP_UNTAGGED_BUF = 2,
59 DDP_LLP = 3,
60 };
61
62 enum irdma_term_rdma_errors {
63 RDMAP_INV_STAG = 0x00,
64 RDMAP_INV_BOUNDS = 0x01,
65 RDMAP_ACCESS = 0x02,
66 RDMAP_UNASSOC_STAG = 0x03,
67 RDMAP_TO_WRAP = 0x04,
68 RDMAP_INV_RDMAP_VER = 0x05,
69 RDMAP_UNEXPECTED_OP = 0x06,
70 RDMAP_CATASTROPHIC_LOCAL = 0x07,
71 RDMAP_CATASTROPHIC_GLOBAL = 0x08,
72 RDMAP_CANT_INV_STAG = 0x09,
73 RDMAP_UNSPECIFIED = 0xff,
74 };
75
76 enum irdma_term_ddp_errors {
77 DDP_CATASTROPHIC_LOCAL = 0x00,
78 DDP_TAGGED_INV_STAG = 0x00,
79 DDP_TAGGED_BOUNDS = 0x01,
80 DDP_TAGGED_UNASSOC_STAG = 0x02,
81 DDP_TAGGED_TO_WRAP = 0x03,
82 DDP_TAGGED_INV_DDP_VER = 0x04,
83 DDP_UNTAGGED_INV_QN = 0x01,
84 DDP_UNTAGGED_INV_MSN_NO_BUF = 0x02,
85 DDP_UNTAGGED_INV_MSN_RANGE = 0x03,
86 DDP_UNTAGGED_INV_MO = 0x04,
87 DDP_UNTAGGED_INV_TOO_LONG = 0x05,
88 DDP_UNTAGGED_INV_DDP_VER = 0x06,
89 };
90
91 enum irdma_term_mpa_errors {
92 MPA_CLOSED = 0x01,
93 MPA_CRC = 0x02,
94 MPA_MARKER = 0x03,
95 MPA_REQ_RSP = 0x04,
96 };
97
98 enum irdma_qp_event_type {
99 IRDMA_QP_EVENT_CATASTROPHIC,
100 IRDMA_QP_EVENT_ACCESS_ERR,
101 };
102
103 enum irdma_hw_stats_index_32b {
104 IRDMA_HW_STAT_INDEX_IP4RXDISCARD = 0,
105 IRDMA_HW_STAT_INDEX_IP4RXTRUNC = 1,
106 IRDMA_HW_STAT_INDEX_IP4TXNOROUTE = 2,
107 IRDMA_HW_STAT_INDEX_IP6RXDISCARD = 3,
108 IRDMA_HW_STAT_INDEX_IP6RXTRUNC = 4,
109 IRDMA_HW_STAT_INDEX_IP6TXNOROUTE = 5,
110 IRDMA_HW_STAT_INDEX_TCPRTXSEG = 6,
111 IRDMA_HW_STAT_INDEX_TCPRXOPTERR = 7,
112 IRDMA_HW_STAT_INDEX_TCPRXPROTOERR = 8,
113 IRDMA_HW_STAT_INDEX_MAX_32_GEN_1 = 9, /* Must be same value as next entry */
114 IRDMA_HW_STAT_INDEX_RXVLANERR = 9,
115 IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED = 10,
116 IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED = 11,
117 IRDMA_HW_STAT_INDEX_TXNPCNPSENT = 12,
118 IRDMA_HW_STAT_INDEX_MAX_32, /* Must be last entry */
119 };
120
121 enum irdma_hw_stats_index_64b {
122 IRDMA_HW_STAT_INDEX_IP4RXOCTS = 0,
123 IRDMA_HW_STAT_INDEX_IP4RXPKTS = 1,
124 IRDMA_HW_STAT_INDEX_IP4RXFRAGS = 2,
125 IRDMA_HW_STAT_INDEX_IP4RXMCPKTS = 3,
126 IRDMA_HW_STAT_INDEX_IP4TXOCTS = 4,
127 IRDMA_HW_STAT_INDEX_IP4TXPKTS = 5,
128 IRDMA_HW_STAT_INDEX_IP4TXFRAGS = 6,
129 IRDMA_HW_STAT_INDEX_IP4TXMCPKTS = 7,
130 IRDMA_HW_STAT_INDEX_IP6RXOCTS = 8,
131 IRDMA_HW_STAT_INDEX_IP6RXPKTS = 9,
132 IRDMA_HW_STAT_INDEX_IP6RXFRAGS = 10,
133 IRDMA_HW_STAT_INDEX_IP6RXMCPKTS = 11,
134 IRDMA_HW_STAT_INDEX_IP6TXOCTS = 12,
135 IRDMA_HW_STAT_INDEX_IP6TXPKTS = 13,
136 IRDMA_HW_STAT_INDEX_IP6TXFRAGS = 14,
137 IRDMA_HW_STAT_INDEX_IP6TXMCPKTS = 15,
138 IRDMA_HW_STAT_INDEX_TCPRXSEGS = 16,
139 IRDMA_HW_STAT_INDEX_TCPTXSEG = 17,
140 IRDMA_HW_STAT_INDEX_RDMARXRDS = 18,
141 IRDMA_HW_STAT_INDEX_RDMARXSNDS = 19,
142 IRDMA_HW_STAT_INDEX_RDMARXWRS = 20,
143 IRDMA_HW_STAT_INDEX_RDMATXRDS = 21,
144 IRDMA_HW_STAT_INDEX_RDMATXSNDS = 22,
145 IRDMA_HW_STAT_INDEX_RDMATXWRS = 23,
146 IRDMA_HW_STAT_INDEX_RDMAVBND = 24,
147 IRDMA_HW_STAT_INDEX_RDMAVINV = 25,
148 IRDMA_HW_STAT_INDEX_MAX_64_GEN_1 = 26, /* Must be same value as next entry */
149 IRDMA_HW_STAT_INDEX_IP4RXMCOCTS = 26,
150 IRDMA_HW_STAT_INDEX_IP4TXMCOCTS = 27,
151 IRDMA_HW_STAT_INDEX_IP6RXMCOCTS = 28,
152 IRDMA_HW_STAT_INDEX_IP6TXMCOCTS = 29,
153 IRDMA_HW_STAT_INDEX_UDPRXPKTS = 30,
154 IRDMA_HW_STAT_INDEX_UDPTXPKTS = 31,
155 IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS = 32,
156 IRDMA_HW_STAT_INDEX_MAX_64, /* Must be last entry */
157 };
158
159 enum irdma_feature_type {
160 IRDMA_FEATURE_FW_INFO = 0,
161 IRDMA_HW_VERSION_INFO = 1,
162 IRDMA_QSETS_MAX = 26,
163 IRDMA_MAX_FEATURES, /* Must be last entry */
164 };
165
166 enum irdma_sched_prio_type {
167 IRDMA_PRIO_WEIGHTED_RR = 1,
168 IRDMA_PRIO_STRICT = 2,
169 IRDMA_PRIO_WEIGHTED_STRICT = 3,
170 };
171
172 enum irdma_vm_vf_type {
173 IRDMA_VF_TYPE = 0,
174 IRDMA_VM_TYPE,
175 IRDMA_PF_TYPE,
176 };
177
178 enum irdma_cqp_hmc_profile {
179 IRDMA_HMC_PROFILE_DEFAULT = 1,
180 IRDMA_HMC_PROFILE_FAVOR_VF = 2,
181 IRDMA_HMC_PROFILE_EQUAL = 3,
182 };
183
184 enum irdma_quad_entry_type {
185 IRDMA_QHASH_TYPE_TCP_ESTABLISHED = 1,
186 IRDMA_QHASH_TYPE_TCP_SYN,
187 IRDMA_QHASH_TYPE_UDP_UNICAST,
188 IRDMA_QHASH_TYPE_UDP_MCAST,
189 IRDMA_QHASH_TYPE_ROCE_MCAST,
190 IRDMA_QHASH_TYPE_ROCEV2_HW,
191 };
192
193 enum irdma_quad_hash_manage_type {
194 IRDMA_QHASH_MANAGE_TYPE_DELETE = 0,
195 IRDMA_QHASH_MANAGE_TYPE_ADD,
196 IRDMA_QHASH_MANAGE_TYPE_MODIFY,
197 };
198
199 enum irdma_syn_rst_handling {
200 IRDMA_SYN_RST_HANDLING_HW_TCP_SECURE = 0,
201 IRDMA_SYN_RST_HANDLING_HW_TCP,
202 IRDMA_SYN_RST_HANDLING_FW_TCP_SECURE,
203 IRDMA_SYN_RST_HANDLING_FW_TCP,
204 };
205
206 enum irdma_queue_type {
207 IRDMA_QUEUE_TYPE_SQ_RQ = 0,
208 IRDMA_QUEUE_TYPE_CQP,
209 };
210
211 struct irdma_sc_dev;
212 struct irdma_vsi_pestat;
213
214 struct irdma_dcqcn_cc_params {
215 u8 cc_cfg_valid;
216 u8 min_dec_factor;
217 u8 min_rate;
218 u8 dcqcn_f;
219 u16 rai_factor;
220 u16 hai_factor;
221 u16 dcqcn_t;
222 u32 dcqcn_b;
223 u32 rreduce_mperiod;
224 };
225
226 struct irdma_cqp_init_info {
227 u64 cqp_compl_ctx;
228 u64 host_ctx_pa;
229 u64 sq_pa;
230 struct irdma_sc_dev *dev;
231 struct irdma_cqp_quanta *sq;
232 struct irdma_dcqcn_cc_params dcqcn_params;
233 __le64 *host_ctx;
234 u64 *scratch_array;
235 u32 sq_size;
236 u16 hw_maj_ver;
237 u16 hw_min_ver;
238 u8 struct_ver;
239 u8 hmc_profile;
240 u8 ena_vf_count;
241 u8 ceqs_per_vf;
242 bool en_datacenter_tcp:1;
243 bool disable_packed:1;
244 bool rocev2_rto_policy:1;
245 enum irdma_protocol_used protocol_used;
246 };
247
248 struct irdma_terminate_hdr {
249 u8 layer_etype;
250 u8 error_code;
251 u8 hdrct;
252 u8 rsvd;
253 };
254
255 struct irdma_cqp_sq_wqe {
256 __le64 buf[IRDMA_CQP_WQE_SIZE];
257 };
258
259 struct irdma_sc_aeqe {
260 __le64 buf[IRDMA_AEQE_SIZE];
261 };
262
263 struct irdma_ceqe {
264 __le64 buf[IRDMA_CEQE_SIZE];
265 };
266
267 struct irdma_cqp_ctx {
268 __le64 buf[IRDMA_CQP_CTX_SIZE];
269 };
270
271 struct irdma_cq_shadow_area {
272 __le64 buf[IRDMA_SHADOW_AREA_SIZE];
273 };
274
275 struct irdma_dev_hw_stats_offsets {
276 u32 stats_offset_32[IRDMA_HW_STAT_INDEX_MAX_32];
277 u32 stats_offset_64[IRDMA_HW_STAT_INDEX_MAX_64];
278 };
279
280 struct irdma_dev_hw_stats {
281 u64 stats_val_32[IRDMA_HW_STAT_INDEX_MAX_32];
282 u64 stats_val_64[IRDMA_HW_STAT_INDEX_MAX_64];
283 };
284
285 struct irdma_gather_stats {
286 u32 rsvd1;
287 u32 rxvlanerr;
288 u64 ip4rxocts;
289 u64 ip4rxpkts;
290 u32 ip4rxtrunc;
291 u32 ip4rxdiscard;
292 u64 ip4rxfrags;
293 u64 ip4rxmcocts;
294 u64 ip4rxmcpkts;
295 u64 ip6rxocts;
296 u64 ip6rxpkts;
297 u32 ip6rxtrunc;
298 u32 ip6rxdiscard;
299 u64 ip6rxfrags;
300 u64 ip6rxmcocts;
301 u64 ip6rxmcpkts;
302 u64 ip4txocts;
303 u64 ip4txpkts;
304 u64 ip4txfrag;
305 u64 ip4txmcocts;
306 u64 ip4txmcpkts;
307 u64 ip6txocts;
308 u64 ip6txpkts;
309 u64 ip6txfrags;
310 u64 ip6txmcocts;
311 u64 ip6txmcpkts;
312 u32 ip6txnoroute;
313 u32 ip4txnoroute;
314 u64 tcprxsegs;
315 u32 tcprxprotoerr;
316 u32 tcprxopterr;
317 u64 tcptxsegs;
318 u32 rsvd2;
319 u32 tcprtxseg;
320 u64 udprxpkts;
321 u64 udptxpkts;
322 u64 rdmarxwrs;
323 u64 rdmarxrds;
324 u64 rdmarxsnds;
325 u64 rdmatxwrs;
326 u64 rdmatxrds;
327 u64 rdmatxsnds;
328 u64 rdmavbn;
329 u64 rdmavinv;
330 u64 rxnpecnmrkpkts;
331 u32 rxrpcnphandled;
332 u32 rxrpcnpignored;
333 u32 txnpcnpsent;
334 u32 rsvd3[88];
335 };
336
337 struct irdma_stats_gather_info {
338 bool use_hmc_fcn_index:1;
339 bool use_stats_inst:1;
340 u8 hmc_fcn_index;
341 u8 stats_inst_index;
342 struct irdma_dma_mem stats_buff_mem;
343 void *gather_stats_va;
344 void *last_gather_stats_va;
345 };
346
347 struct irdma_vsi_pestat {
348 struct irdma_hw *hw;
349 struct irdma_dev_hw_stats hw_stats;
350 struct irdma_stats_gather_info gather_info;
351 struct timer_list stats_timer;
352 struct irdma_sc_vsi *vsi;
353 struct irdma_dev_hw_stats last_hw_stats;
354 spinlock_t lock; /* rdma stats lock */
355 };
356
357 struct irdma_hw {
358 u8 __iomem *hw_addr;
359 u8 __iomem *priv_hw_addr;
360 struct device *device;
361 struct irdma_hmc_info hmc;
362 };
363
364 struct irdma_pfpdu {
365 struct list_head rxlist;
366 u32 rcv_nxt;
367 u32 fps;
368 u32 max_fpdu_data;
369 u32 nextseqnum;
370 u32 rcv_start_seq;
371 bool mode:1;
372 bool mpa_crc_err:1;
373 u8 marker_len;
374 u64 total_ieq_bufs;
375 u64 fpdu_processed;
376 u64 bad_seq_num;
377 u64 crc_err;
378 u64 no_tx_bufs;
379 u64 tx_err;
380 u64 out_of_order;
381 u64 pmode_count;
382 struct irdma_sc_ah *ah;
383 struct irdma_puda_buf *ah_buf;
384 spinlock_t lock; /* fpdu processing lock */
385 struct irdma_puda_buf *lastrcv_buf;
386 };
387
388 struct irdma_sc_pd {
389 struct irdma_sc_dev *dev;
390 u32 pd_id;
391 int abi_ver;
392 };
393
394 struct irdma_cqp_quanta {
395 __le64 elem[IRDMA_CQP_WQE_SIZE];
396 };
397
398 struct irdma_sc_cqp {
399 u32 size;
400 u64 sq_pa;
401 u64 host_ctx_pa;
402 void *back_cqp;
403 struct irdma_sc_dev *dev;
404 int (*process_cqp_sds)(struct irdma_sc_dev *dev,
405 struct irdma_update_sds_info *info);
406 struct irdma_dma_mem sdbuf;
407 struct irdma_ring sq_ring;
408 struct irdma_cqp_quanta *sq_base;
409 struct irdma_dcqcn_cc_params dcqcn_params;
410 __le64 *host_ctx;
411 u64 *scratch_array;
412 u32 cqp_id;
413 u32 sq_size;
414 u32 hw_sq_size;
415 u16 hw_maj_ver;
416 u16 hw_min_ver;
417 u8 struct_ver;
418 u8 polarity;
419 u8 hmc_profile;
420 u8 ena_vf_count;
421 u8 timeout_count;
422 u8 ceqs_per_vf;
423 bool en_datacenter_tcp:1;
424 bool disable_packed:1;
425 bool rocev2_rto_policy:1;
426 enum irdma_protocol_used protocol_used;
427 };
428
429 struct irdma_sc_aeq {
430 u32 size;
431 u64 aeq_elem_pa;
432 struct irdma_sc_dev *dev;
433 struct irdma_sc_aeqe *aeqe_base;
434 void *pbl_list;
435 u32 elem_cnt;
436 struct irdma_ring aeq_ring;
437 u8 pbl_chunk_size;
438 u32 first_pm_pbl_idx;
439 u32 msix_idx;
440 u8 polarity;
441 bool virtual_map:1;
442 };
443
444 struct irdma_sc_ceq {
445 u32 size;
446 u64 ceq_elem_pa;
447 struct irdma_sc_dev *dev;
448 struct irdma_ceqe *ceqe_base;
449 void *pbl_list;
450 u32 ceq_id;
451 u32 elem_cnt;
452 struct irdma_ring ceq_ring;
453 u8 pbl_chunk_size;
454 u8 tph_val;
455 u32 first_pm_pbl_idx;
456 u8 polarity;
457 struct irdma_sc_vsi *vsi;
458 struct irdma_sc_cq **reg_cq;
459 u32 reg_cq_size;
460 spinlock_t req_cq_lock; /* protect access to reg_cq array */
461 bool virtual_map:1;
462 bool tph_en:1;
463 bool itr_no_expire:1;
464 };
465
466 struct irdma_sc_cq {
467 struct irdma_cq_uk cq_uk;
468 u64 cq_pa;
469 u64 shadow_area_pa;
470 struct irdma_sc_dev *dev;
471 struct irdma_sc_vsi *vsi;
472 void *pbl_list;
473 void *back_cq;
474 u32 ceq_id;
475 u32 shadow_read_threshold;
476 u8 pbl_chunk_size;
477 u8 cq_type;
478 u8 tph_val;
479 u32 first_pm_pbl_idx;
480 bool ceqe_mask:1;
481 bool virtual_map:1;
482 bool check_overflow:1;
483 bool ceq_id_valid:1;
484 bool tph_en;
485 };
486
487 struct irdma_sc_qp {
488 struct irdma_qp_uk qp_uk;
489 u64 sq_pa;
490 u64 rq_pa;
491 u64 hw_host_ctx_pa;
492 u64 shadow_area_pa;
493 u64 q2_pa;
494 struct irdma_sc_dev *dev;
495 struct irdma_sc_vsi *vsi;
496 struct irdma_sc_pd *pd;
497 __le64 *hw_host_ctx;
498 void *llp_stream_handle;
499 struct irdma_pfpdu pfpdu;
500 u32 ieq_qp;
501 u8 *q2_buf;
502 u64 qp_compl_ctx;
503 u32 push_idx;
504 u16 qs_handle;
505 u16 push_offset;
506 u8 flush_wqes_count;
507 u8 sq_tph_val;
508 u8 rq_tph_val;
509 u8 qp_state;
510 u8 hw_sq_size;
511 u8 hw_rq_size;
512 u8 src_mac_addr_idx;
513 bool on_qoslist:1;
514 bool ieq_pass_thru:1;
515 bool sq_tph_en:1;
516 bool rq_tph_en:1;
517 bool rcv_tph_en:1;
518 bool xmit_tph_en:1;
519 bool virtual_map:1;
520 bool flush_sq:1;
521 bool flush_rq:1;
522 bool sq_flush_code:1;
523 bool rq_flush_code:1;
524 enum irdma_flush_opcode flush_code;
525 enum irdma_qp_event_type event_type;
526 u8 term_flags;
527 u8 user_pri;
528 struct list_head list;
529 };
530
531 struct irdma_stats_inst_info {
532 bool use_hmc_fcn_index;
533 u8 hmc_fn_id;
534 u8 stats_idx;
535 };
536
537 struct irdma_up_info {
538 u8 map[8];
539 u8 cnp_up_override;
540 u8 hmc_fcn_idx;
541 bool use_vlan:1;
542 bool use_cnp_up_override:1;
543 };
544
545 #define IRDMA_MAX_WS_NODES 0x3FF
546 #define IRDMA_WS_NODE_INVALID 0xFFFF
547
548 struct irdma_ws_node_info {
549 u16 id;
550 u16 vsi;
551 u16 parent_id;
552 u16 qs_handle;
553 bool type_leaf:1;
554 bool enable:1;
555 u8 prio_type;
556 u8 tc;
557 u8 weight;
558 };
559
560 struct irdma_hmc_fpm_misc {
561 u32 max_ceqs;
562 u32 max_sds;
563 u32 xf_block_size;
564 u32 q1_block_size;
565 u32 ht_multiplier;
566 u32 timer_bucket;
567 u32 rrf_block_size;
568 u32 ooiscf_block_size;
569 };
570
571 #define IRDMA_LEAF_DEFAULT_REL_BW 64
572 #define IRDMA_PARENT_DEFAULT_REL_BW 1
573
574 struct irdma_qos {
575 struct list_head qplist;
576 struct mutex qos_mutex; /* protect QoS attributes per QoS level */
577 u64 lan_qos_handle;
578 u32 l2_sched_node_id;
579 u16 qs_handle;
580 u8 traffic_class;
581 u8 rel_bw;
582 u8 prio_type;
583 bool valid;
584 };
585
586 #define IRDMA_INVALID_FCN_ID 0xff
587 struct irdma_sc_vsi {
588 u16 vsi_idx;
589 struct irdma_sc_dev *dev;
590 void *back_vsi;
591 u32 ilq_count;
592 struct irdma_virt_mem ilq_mem;
593 struct irdma_puda_rsrc *ilq;
594 u32 ieq_count;
595 struct irdma_virt_mem ieq_mem;
596 struct irdma_puda_rsrc *ieq;
597 u32 exception_lan_q;
598 u16 mtu;
599 u16 vm_id;
600 u8 fcn_id;
601 enum irdma_vm_vf_type vm_vf_type;
602 bool stats_fcn_id_alloc:1;
603 bool tc_change_pending:1;
604 struct irdma_qos qos[IRDMA_MAX_USER_PRIORITY];
605 struct irdma_vsi_pestat *pestat;
606 atomic_t qp_suspend_reqs;
607 int (*register_qset)(struct irdma_sc_vsi *vsi,
608 struct irdma_ws_node *tc_node);
609 void (*unregister_qset)(struct irdma_sc_vsi *vsi,
610 struct irdma_ws_node *tc_node);
611 u8 qos_rel_bw;
612 u8 qos_prio_type;
613 u8 dscp_map[IIDC_MAX_DSCP_MAPPING];
614 bool dscp_mode:1;
615 };
616
617 struct irdma_sc_dev {
618 struct list_head cqp_cmd_head; /* head of the CQP command list */
619 spinlock_t cqp_lock; /* protect CQP list access */
620 bool fcn_id_array[IRDMA_MAX_STATS_COUNT];
621 struct irdma_dma_mem vf_fpm_query_buf[IRDMA_MAX_PE_ENA_VF_COUNT];
622 u64 fpm_query_buf_pa;
623 u64 fpm_commit_buf_pa;
624 __le64 *fpm_query_buf;
625 __le64 *fpm_commit_buf;
626 struct irdma_hw *hw;
627 u8 __iomem *db_addr;
628 u32 __iomem *wqe_alloc_db;
629 u32 __iomem *cq_arm_db;
630 u32 __iomem *aeq_alloc_db;
631 u32 __iomem *cqp_db;
632 u32 __iomem *cq_ack_db;
633 u32 __iomem *ceq_itr_mask_db;
634 u32 __iomem *aeq_itr_mask_db;
635 u32 __iomem *hw_regs[IRDMA_MAX_REGS];
636 u32 ceq_itr; /* Interrupt throttle, usecs between interrupts: 0 disabled. 2 - 8160 */
637 u64 hw_masks[IRDMA_MAX_MASKS];
638 u64 hw_shifts[IRDMA_MAX_SHIFTS];
639 u64 hw_stats_regs_32[IRDMA_HW_STAT_INDEX_MAX_32];
640 u64 hw_stats_regs_64[IRDMA_HW_STAT_INDEX_MAX_64];
641 u64 feature_info[IRDMA_MAX_FEATURES];
642 u64 cqp_cmd_stats[IRDMA_MAX_CQP_OPS];
643 struct irdma_hw_attrs hw_attrs;
644 struct irdma_hmc_info *hmc_info;
645 struct irdma_sc_cqp *cqp;
646 struct irdma_sc_aeq *aeq;
647 struct irdma_sc_ceq *ceq[IRDMA_CEQ_MAX_COUNT];
648 struct irdma_sc_cq *ccq;
649 const struct irdma_irq_ops *irq_ops;
650 struct irdma_hmc_fpm_misc hmc_fpm_misc;
651 struct irdma_ws_node *ws_tree_root;
652 struct mutex ws_mutex; /* ws tree mutex */
653 u16 num_vfs;
654 u8 hmc_fn_id;
655 u8 vf_id;
656 bool vchnl_up:1;
657 bool ceq_valid:1;
658 u8 pci_rev;
659 int (*ws_add)(struct irdma_sc_vsi *vsi, u8 user_pri);
660 void (*ws_remove)(struct irdma_sc_vsi *vsi, u8 user_pri);
661 void (*ws_reset)(struct irdma_sc_vsi *vsi);
662 };
663
664 struct irdma_modify_cq_info {
665 u64 cq_pa;
666 struct irdma_cqe *cq_base;
667 u32 cq_size;
668 u32 shadow_read_threshold;
669 u8 pbl_chunk_size;
670 u32 first_pm_pbl_idx;
671 bool virtual_map:1;
672 bool check_overflow;
673 bool cq_resize:1;
674 };
675
676 struct irdma_create_qp_info {
677 bool ord_valid:1;
678 bool tcp_ctx_valid:1;
679 bool cq_num_valid:1;
680 bool arp_cache_idx_valid:1;
681 bool mac_valid:1;
682 bool force_lpb;
683 u8 next_iwarp_state;
684 };
685
686 struct irdma_modify_qp_info {
687 u64 rx_win0;
688 u64 rx_win1;
689 u16 new_mss;
690 u8 next_iwarp_state;
691 u8 curr_iwarp_state;
692 u8 termlen;
693 bool ord_valid:1;
694 bool tcp_ctx_valid:1;
695 bool udp_ctx_valid:1;
696 bool cq_num_valid:1;
697 bool arp_cache_idx_valid:1;
698 bool reset_tcp_conn:1;
699 bool remove_hash_idx:1;
700 bool dont_send_term:1;
701 bool dont_send_fin:1;
702 bool cached_var_valid:1;
703 bool mss_change:1;
704 bool force_lpb:1;
705 bool mac_valid:1;
706 };
707
708 struct irdma_ccq_cqe_info {
709 struct irdma_sc_cqp *cqp;
710 u64 scratch;
711 u32 op_ret_val;
712 u16 maj_err_code;
713 u16 min_err_code;
714 u8 op_code;
715 bool error;
716 };
717
718 struct irdma_dcb_app_info {
719 u8 priority;
720 u8 selector;
721 u16 prot_id;
722 };
723
724 struct irdma_qos_tc_info {
725 u64 tc_ctx;
726 u8 rel_bw;
727 u8 prio_type;
728 u8 egress_virt_up;
729 u8 ingress_virt_up;
730 };
731
732 struct irdma_l2params {
733 struct irdma_qos_tc_info tc_info[IRDMA_MAX_USER_PRIORITY];
734 struct irdma_dcb_app_info apps[IRDMA_MAX_APPS];
735 u32 num_apps;
736 u16 qs_handle_list[IRDMA_MAX_USER_PRIORITY];
737 u16 mtu;
738 u8 up2tc[IRDMA_MAX_USER_PRIORITY];
739 u8 dscp_map[IIDC_MAX_DSCP_MAPPING];
740 u8 num_tc;
741 u8 vsi_rel_bw;
742 u8 vsi_prio_type;
743 bool mtu_changed:1;
744 bool tc_changed:1;
745 bool dscp_mode:1;
746 };
747
748 struct irdma_vsi_init_info {
749 struct irdma_sc_dev *dev;
750 void *back_vsi;
751 struct irdma_l2params *params;
752 u16 exception_lan_q;
753 u16 pf_data_vsi_num;
754 enum irdma_vm_vf_type vm_vf_type;
755 u16 vm_id;
756 int (*register_qset)(struct irdma_sc_vsi *vsi,
757 struct irdma_ws_node *tc_node);
758 void (*unregister_qset)(struct irdma_sc_vsi *vsi,
759 struct irdma_ws_node *tc_node);
760 };
761
762 struct irdma_vsi_stats_info {
763 struct irdma_vsi_pestat *pestat;
764 u8 fcn_id;
765 bool alloc_fcn_id;
766 };
767
768 struct irdma_device_init_info {
769 u64 fpm_query_buf_pa;
770 u64 fpm_commit_buf_pa;
771 __le64 *fpm_query_buf;
772 __le64 *fpm_commit_buf;
773 struct irdma_hw *hw;
774 void __iomem *bar0;
775 u8 hmc_fn_id;
776 };
777
778 struct irdma_ceq_init_info {
779 u64 ceqe_pa;
780 struct irdma_sc_dev *dev;
781 u64 *ceqe_base;
782 void *pbl_list;
783 u32 elem_cnt;
784 u32 ceq_id;
785 bool virtual_map:1;
786 bool tph_en:1;
787 bool itr_no_expire:1;
788 u8 pbl_chunk_size;
789 u8 tph_val;
790 u32 first_pm_pbl_idx;
791 struct irdma_sc_vsi *vsi;
792 struct irdma_sc_cq **reg_cq;
793 u32 reg_cq_idx;
794 };
795
796 struct irdma_aeq_init_info {
797 u64 aeq_elem_pa;
798 struct irdma_sc_dev *dev;
799 u32 *aeqe_base;
800 void *pbl_list;
801 u32 elem_cnt;
802 bool virtual_map;
803 u8 pbl_chunk_size;
804 u32 first_pm_pbl_idx;
805 u32 msix_idx;
806 };
807
808 struct irdma_ccq_init_info {
809 u64 cq_pa;
810 u64 shadow_area_pa;
811 struct irdma_sc_dev *dev;
812 struct irdma_cqe *cq_base;
813 __le64 *shadow_area;
814 void *pbl_list;
815 u32 num_elem;
816 u32 ceq_id;
817 u32 shadow_read_threshold;
818 bool ceqe_mask:1;
819 bool ceq_id_valid:1;
820 bool avoid_mem_cflct:1;
821 bool virtual_map:1;
822 bool tph_en:1;
823 u8 tph_val;
824 u8 pbl_chunk_size;
825 u32 first_pm_pbl_idx;
826 struct irdma_sc_vsi *vsi;
827 };
828
829 struct irdma_udp_offload_info {
830 bool ipv4:1;
831 bool insert_vlan_tag:1;
832 u8 ttl;
833 u8 tos;
834 u16 src_port;
835 u16 dst_port;
836 u32 dest_ip_addr[4];
837 u32 snd_mss;
838 u16 vlan_tag;
839 u16 arp_idx;
840 u32 flow_label;
841 u8 udp_state;
842 u32 psn_nxt;
843 u32 lsn;
844 u32 epsn;
845 u32 psn_max;
846 u32 psn_una;
847 u32 local_ipaddr[4];
848 u32 cwnd;
849 u8 rexmit_thresh;
850 u8 rnr_nak_thresh;
851 };
852
853 struct irdma_roce_offload_info {
854 u16 p_key;
855 u16 err_rq_idx;
856 u32 qkey;
857 u32 dest_qp;
858 u8 roce_tver;
859 u8 ack_credits;
860 u8 err_rq_idx_valid;
861 u32 pd_id;
862 u16 ord_size;
863 u16 ird_size;
864 bool is_qp1:1;
865 bool udprivcq_en:1;
866 bool dcqcn_en:1;
867 bool rcv_no_icrc:1;
868 bool wr_rdresp_en:1;
869 bool bind_en:1;
870 bool fast_reg_en:1;
871 bool priv_mode_en:1;
872 bool rd_en:1;
873 bool timely_en:1;
874 bool dctcp_en:1;
875 bool fw_cc_enable:1;
876 bool use_stats_inst:1;
877 u16 t_high;
878 u16 t_low;
879 u8 last_byte_sent;
880 u8 mac_addr[ETH_ALEN];
881 u8 rtomin;
882 };
883
884 struct irdma_iwarp_offload_info {
885 u16 rcv_mark_offset;
886 u16 snd_mark_offset;
887 u8 ddp_ver;
888 u8 rdmap_ver;
889 u8 iwarp_mode;
890 u16 err_rq_idx;
891 u32 pd_id;
892 u16 ord_size;
893 u16 ird_size;
894 bool ib_rd_en:1;
895 bool align_hdrs:1;
896 bool rcv_no_mpa_crc:1;
897 bool err_rq_idx_valid:1;
898 bool snd_mark_en:1;
899 bool rcv_mark_en:1;
900 bool wr_rdresp_en:1;
901 bool bind_en:1;
902 bool fast_reg_en:1;
903 bool priv_mode_en:1;
904 bool rd_en:1;
905 bool timely_en:1;
906 bool use_stats_inst:1;
907 bool ecn_en:1;
908 bool dctcp_en:1;
909 u16 t_high;
910 u16 t_low;
911 u8 last_byte_sent;
912 u8 mac_addr[ETH_ALEN];
913 u8 rtomin;
914 };
915
916 struct irdma_tcp_offload_info {
917 bool ipv4:1;
918 bool no_nagle:1;
919 bool insert_vlan_tag:1;
920 bool time_stamp:1;
921 bool drop_ooo_seg:1;
922 bool avoid_stretch_ack:1;
923 bool wscale:1;
924 bool ignore_tcp_opt:1;
925 bool ignore_tcp_uns_opt:1;
926 u8 cwnd_inc_limit;
927 u8 dup_ack_thresh;
928 u8 ttl;
929 u8 src_mac_addr_idx;
930 u8 tos;
931 u16 src_port;
932 u16 dst_port;
933 u32 dest_ip_addr[4];
934 //u32 dest_ip_addr0;
935 //u32 dest_ip_addr1;
936 //u32 dest_ip_addr2;
937 //u32 dest_ip_addr3;
938 u32 snd_mss;
939 u16 syn_rst_handling;
940 u16 vlan_tag;
941 u16 arp_idx;
942 u32 flow_label;
943 u8 tcp_state;
944 u8 snd_wscale;
945 u8 rcv_wscale;
946 u32 time_stamp_recent;
947 u32 time_stamp_age;
948 u32 snd_nxt;
949 u32 snd_wnd;
950 u32 rcv_nxt;
951 u32 rcv_wnd;
952 u32 snd_max;
953 u32 snd_una;
954 u32 srtt;
955 u32 rtt_var;
956 u32 ss_thresh;
957 u32 cwnd;
958 u32 snd_wl1;
959 u32 snd_wl2;
960 u32 max_snd_window;
961 u8 rexmit_thresh;
962 u32 local_ipaddr[4];
963 };
964
965 struct irdma_qp_host_ctx_info {
966 u64 qp_compl_ctx;
967 union {
968 struct irdma_tcp_offload_info *tcp_info;
969 struct irdma_udp_offload_info *udp_info;
970 };
971 union {
972 struct irdma_iwarp_offload_info *iwarp_info;
973 struct irdma_roce_offload_info *roce_info;
974 };
975 u32 send_cq_num;
976 u32 rcv_cq_num;
977 u32 rem_endpoint_idx;
978 u8 stats_idx;
979 bool srq_valid:1;
980 bool tcp_info_valid:1;
981 bool iwarp_info_valid:1;
982 bool stats_idx_valid:1;
983 u8 user_pri;
984 };
985
986 struct irdma_aeqe_info {
987 u64 compl_ctx;
988 u32 qp_cq_id;
989 u16 ae_id;
990 u16 wqe_idx;
991 u8 tcp_state;
992 u8 iwarp_state;
993 bool qp:1;
994 bool cq:1;
995 bool sq:1;
996 bool rq:1;
997 bool in_rdrsp_wr:1;
998 bool out_rdrsp:1;
999 bool aeqe_overflow:1;
1000 u8 q2_data_written;
1001 u8 ae_src;
1002 };
1003
1004 struct irdma_allocate_stag_info {
1005 u64 total_len;
1006 u64 first_pm_pbl_idx;
1007 u32 chunk_size;
1008 u32 stag_idx;
1009 u32 page_size;
1010 u32 pd_id;
1011 u16 access_rights;
1012 bool remote_access:1;
1013 bool use_hmc_fcn_index:1;
1014 bool use_pf_rid:1;
1015 u8 hmc_fcn_index;
1016 };
1017
1018 struct irdma_mw_alloc_info {
1019 u32 mw_stag_index;
1020 u32 page_size;
1021 u32 pd_id;
1022 bool remote_access:1;
1023 bool mw_wide:1;
1024 bool mw1_bind_dont_vldt_key:1;
1025 };
1026
1027 struct irdma_reg_ns_stag_info {
1028 u64 reg_addr_pa;
1029 u64 va;
1030 u64 total_len;
1031 u32 page_size;
1032 u32 chunk_size;
1033 u32 first_pm_pbl_index;
1034 enum irdma_addressing_type addr_type;
1035 irdma_stag_index stag_idx;
1036 u16 access_rights;
1037 u32 pd_id;
1038 irdma_stag_key stag_key;
1039 bool use_hmc_fcn_index:1;
1040 u8 hmc_fcn_index;
1041 bool use_pf_rid:1;
1042 };
1043
1044 struct irdma_fast_reg_stag_info {
1045 u64 wr_id;
1046 u64 reg_addr_pa;
1047 u64 fbo;
1048 void *va;
1049 u64 total_len;
1050 u32 page_size;
1051 u32 chunk_size;
1052 u32 first_pm_pbl_index;
1053 enum irdma_addressing_type addr_type;
1054 irdma_stag_index stag_idx;
1055 u16 access_rights;
1056 u32 pd_id;
1057 irdma_stag_key stag_key;
1058 bool local_fence:1;
1059 bool read_fence:1;
1060 bool signaled:1;
1061 bool push_wqe:1;
1062 bool use_hmc_fcn_index:1;
1063 u8 hmc_fcn_index;
1064 bool use_pf_rid:1;
1065 bool defer_flag:1;
1066 };
1067
1068 struct irdma_dealloc_stag_info {
1069 u32 stag_idx;
1070 u32 pd_id;
1071 bool mr:1;
1072 bool dealloc_pbl:1;
1073 };
1074
1075 struct irdma_register_shared_stag {
1076 u64 va;
1077 enum irdma_addressing_type addr_type;
1078 irdma_stag_index new_stag_idx;
1079 irdma_stag_index parent_stag_idx;
1080 u32 access_rights;
1081 u32 pd_id;
1082 u32 page_size;
1083 irdma_stag_key new_stag_key;
1084 };
1085
1086 struct irdma_qp_init_info {
1087 struct irdma_qp_uk_init_info qp_uk_init_info;
1088 struct irdma_sc_pd *pd;
1089 struct irdma_sc_vsi *vsi;
1090 __le64 *host_ctx;
1091 u8 *q2;
1092 u64 sq_pa;
1093 u64 rq_pa;
1094 u64 host_ctx_pa;
1095 u64 q2_pa;
1096 u64 shadow_area_pa;
1097 u8 sq_tph_val;
1098 u8 rq_tph_val;
1099 bool sq_tph_en:1;
1100 bool rq_tph_en:1;
1101 bool rcv_tph_en:1;
1102 bool xmit_tph_en:1;
1103 bool virtual_map:1;
1104 };
1105
1106 struct irdma_cq_init_info {
1107 struct irdma_sc_dev *dev;
1108 u64 cq_base_pa;
1109 u64 shadow_area_pa;
1110 u32 ceq_id;
1111 u32 shadow_read_threshold;
1112 u8 pbl_chunk_size;
1113 u32 first_pm_pbl_idx;
1114 bool virtual_map:1;
1115 bool ceqe_mask:1;
1116 bool ceq_id_valid:1;
1117 bool tph_en:1;
1118 u8 tph_val;
1119 u8 type;
1120 struct irdma_cq_uk_init_info cq_uk_init_info;
1121 struct irdma_sc_vsi *vsi;
1122 };
1123
1124 struct irdma_upload_context_info {
1125 u64 buf_pa;
1126 u32 qp_id;
1127 u8 qp_type;
1128 bool freeze_qp:1;
1129 bool raw_format:1;
1130 };
1131
1132 struct irdma_local_mac_entry_info {
1133 u8 mac_addr[6];
1134 u16 entry_idx;
1135 };
1136
1137 struct irdma_add_arp_cache_entry_info {
1138 u8 mac_addr[ETH_ALEN];
1139 u32 reach_max;
1140 u16 arp_index;
1141 bool permanent;
1142 };
1143
1144 struct irdma_apbvt_info {
1145 u16 port;
1146 bool add;
1147 };
1148
1149 struct irdma_qhash_table_info {
1150 struct irdma_sc_vsi *vsi;
1151 enum irdma_quad_hash_manage_type manage;
1152 enum irdma_quad_entry_type entry_type;
1153 bool vlan_valid:1;
1154 bool ipv4_valid:1;
1155 u8 mac_addr[ETH_ALEN];
1156 u16 vlan_id;
1157 u8 user_pri;
1158 u32 qp_num;
1159 u32 dest_ip[4];
1160 u32 src_ip[4];
1161 u16 dest_port;
1162 u16 src_port;
1163 };
1164
1165 struct irdma_cqp_manage_push_page_info {
1166 u32 push_idx;
1167 u16 qs_handle;
1168 u8 free_page;
1169 u8 push_page_type;
1170 };
1171
1172 struct irdma_qp_flush_info {
1173 u16 sq_minor_code;
1174 u16 sq_major_code;
1175 u16 rq_minor_code;
1176 u16 rq_major_code;
1177 u16 ae_code;
1178 u8 ae_src;
1179 bool sq:1;
1180 bool rq:1;
1181 bool userflushcode:1;
1182 bool generate_ae:1;
1183 };
1184
1185 struct irdma_gen_ae_info {
1186 u16 ae_code;
1187 u8 ae_src;
1188 };
1189
1190 struct irdma_cqp_timeout {
1191 u64 compl_cqp_cmds;
1192 u32 count;
1193 };
1194
1195 struct irdma_irq_ops {
1196 void (*irdma_cfg_aeq)(struct irdma_sc_dev *dev, u32 idx, bool enable);
1197 void (*irdma_cfg_ceq)(struct irdma_sc_dev *dev, u32 ceq_id, u32 idx,
1198 bool enable);
1199 void (*irdma_dis_irq)(struct irdma_sc_dev *dev, u32 idx);
1200 void (*irdma_en_irq)(struct irdma_sc_dev *dev, u32 idx);
1201 };
1202
1203 void irdma_sc_ccq_arm(struct irdma_sc_cq *ccq);
1204 int irdma_sc_ccq_create(struct irdma_sc_cq *ccq, u64 scratch,
1205 bool check_overflow, bool post_sq);
1206 int irdma_sc_ccq_destroy(struct irdma_sc_cq *ccq, u64 scratch, bool post_sq);
1207 int irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq,
1208 struct irdma_ccq_cqe_info *info);
1209 int irdma_sc_ccq_init(struct irdma_sc_cq *ccq,
1210 struct irdma_ccq_init_info *info);
1211
1212 int irdma_sc_cceq_create(struct irdma_sc_ceq *ceq, u64 scratch);
1213 int irdma_sc_cceq_destroy_done(struct irdma_sc_ceq *ceq);
1214
1215 int irdma_sc_ceq_destroy(struct irdma_sc_ceq *ceq, u64 scratch, bool post_sq);
1216 int irdma_sc_ceq_init(struct irdma_sc_ceq *ceq,
1217 struct irdma_ceq_init_info *info);
1218 void irdma_sc_cleanup_ceqes(struct irdma_sc_cq *cq, struct irdma_sc_ceq *ceq);
1219 void *irdma_sc_process_ceq(struct irdma_sc_dev *dev, struct irdma_sc_ceq *ceq);
1220
1221 int irdma_sc_aeq_init(struct irdma_sc_aeq *aeq,
1222 struct irdma_aeq_init_info *info);
1223 int irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
1224 struct irdma_aeqe_info *info);
1225 void irdma_sc_repost_aeq_entries(struct irdma_sc_dev *dev, u32 count);
1226
1227 void irdma_sc_pd_init(struct irdma_sc_dev *dev, struct irdma_sc_pd *pd, u32 pd_id,
1228 int abi_ver);
1229 void irdma_cfg_aeq(struct irdma_sc_dev *dev, u32 idx, bool enable);
1230 void irdma_check_cqp_progress(struct irdma_cqp_timeout *cqp_timeout,
1231 struct irdma_sc_dev *dev);
1232 int irdma_sc_cqp_create(struct irdma_sc_cqp *cqp, u16 *maj_err, u16 *min_err);
1233 int irdma_sc_cqp_destroy(struct irdma_sc_cqp *cqp);
1234 int irdma_sc_cqp_init(struct irdma_sc_cqp *cqp,
1235 struct irdma_cqp_init_info *info);
1236 void irdma_sc_cqp_post_sq(struct irdma_sc_cqp *cqp);
1237 int irdma_sc_poll_for_cqp_op_done(struct irdma_sc_cqp *cqp, u8 opcode,
1238 struct irdma_ccq_cqe_info *cmpl_info);
1239 int irdma_sc_fast_register(struct irdma_sc_qp *qp,
1240 struct irdma_fast_reg_stag_info *info, bool post_sq);
1241 int irdma_sc_qp_create(struct irdma_sc_qp *qp,
1242 struct irdma_create_qp_info *info, u64 scratch,
1243 bool post_sq);
1244 int irdma_sc_qp_destroy(struct irdma_sc_qp *qp, u64 scratch,
1245 bool remove_hash_idx, bool ignore_mw_bnd, bool post_sq);
1246 int irdma_sc_qp_flush_wqes(struct irdma_sc_qp *qp,
1247 struct irdma_qp_flush_info *info, u64 scratch,
1248 bool post_sq);
1249 int irdma_sc_qp_init(struct irdma_sc_qp *qp, struct irdma_qp_init_info *info);
1250 int irdma_sc_qp_modify(struct irdma_sc_qp *qp,
1251 struct irdma_modify_qp_info *info, u64 scratch,
1252 bool post_sq);
1253 void irdma_sc_send_lsmm(struct irdma_sc_qp *qp, void *lsmm_buf, u32 size,
1254 irdma_stag stag);
1255
1256 void irdma_sc_send_rtt(struct irdma_sc_qp *qp, bool read);
1257 void irdma_sc_qp_setctx(struct irdma_sc_qp *qp, __le64 *qp_ctx,
1258 struct irdma_qp_host_ctx_info *info);
1259 void irdma_sc_qp_setctx_roce(struct irdma_sc_qp *qp, __le64 *qp_ctx,
1260 struct irdma_qp_host_ctx_info *info);
1261 int irdma_sc_cq_destroy(struct irdma_sc_cq *cq, u64 scratch, bool post_sq);
1262 int irdma_sc_cq_init(struct irdma_sc_cq *cq, struct irdma_cq_init_info *info);
1263 void irdma_sc_cq_resize(struct irdma_sc_cq *cq, struct irdma_modify_cq_info *info);
1264 int irdma_sc_static_hmc_pages_allocated(struct irdma_sc_cqp *cqp, u64 scratch,
1265 u8 hmc_fn_id, bool post_sq,
1266 bool poll_registers);
1267
1268 void sc_vsi_update_stats(struct irdma_sc_vsi *vsi);
1269 struct cqp_info {
1270 union {
1271 struct {
1272 struct irdma_sc_qp *qp;
1273 struct irdma_create_qp_info info;
1274 u64 scratch;
1275 } qp_create;
1276
1277 struct {
1278 struct irdma_sc_qp *qp;
1279 struct irdma_modify_qp_info info;
1280 u64 scratch;
1281 } qp_modify;
1282
1283 struct {
1284 struct irdma_sc_qp *qp;
1285 u64 scratch;
1286 bool remove_hash_idx;
1287 bool ignore_mw_bnd;
1288 } qp_destroy;
1289
1290 struct {
1291 struct irdma_sc_cq *cq;
1292 u64 scratch;
1293 bool check_overflow;
1294 } cq_create;
1295
1296 struct {
1297 struct irdma_sc_cq *cq;
1298 struct irdma_modify_cq_info info;
1299 u64 scratch;
1300 } cq_modify;
1301
1302 struct {
1303 struct irdma_sc_cq *cq;
1304 u64 scratch;
1305 } cq_destroy;
1306
1307 struct {
1308 struct irdma_sc_dev *dev;
1309 struct irdma_allocate_stag_info info;
1310 u64 scratch;
1311 } alloc_stag;
1312
1313 struct {
1314 struct irdma_sc_dev *dev;
1315 struct irdma_mw_alloc_info info;
1316 u64 scratch;
1317 } mw_alloc;
1318
1319 struct {
1320 struct irdma_sc_dev *dev;
1321 struct irdma_reg_ns_stag_info info;
1322 u64 scratch;
1323 } mr_reg_non_shared;
1324
1325 struct {
1326 struct irdma_sc_dev *dev;
1327 struct irdma_dealloc_stag_info info;
1328 u64 scratch;
1329 } dealloc_stag;
1330
1331 struct {
1332 struct irdma_sc_cqp *cqp;
1333 struct irdma_add_arp_cache_entry_info info;
1334 u64 scratch;
1335 } add_arp_cache_entry;
1336
1337 struct {
1338 struct irdma_sc_cqp *cqp;
1339 u64 scratch;
1340 u16 arp_index;
1341 } del_arp_cache_entry;
1342
1343 struct {
1344 struct irdma_sc_cqp *cqp;
1345 struct irdma_local_mac_entry_info info;
1346 u64 scratch;
1347 } add_local_mac_entry;
1348
1349 struct {
1350 struct irdma_sc_cqp *cqp;
1351 u64 scratch;
1352 u8 entry_idx;
1353 u8 ignore_ref_count;
1354 } del_local_mac_entry;
1355
1356 struct {
1357 struct irdma_sc_cqp *cqp;
1358 u64 scratch;
1359 } alloc_local_mac_entry;
1360
1361 struct {
1362 struct irdma_sc_cqp *cqp;
1363 struct irdma_cqp_manage_push_page_info info;
1364 u64 scratch;
1365 } manage_push_page;
1366
1367 struct {
1368 struct irdma_sc_dev *dev;
1369 struct irdma_upload_context_info info;
1370 u64 scratch;
1371 } qp_upload_context;
1372
1373 struct {
1374 struct irdma_sc_dev *dev;
1375 struct irdma_hmc_fcn_info info;
1376 u64 scratch;
1377 } manage_hmc_pm;
1378
1379 struct {
1380 struct irdma_sc_ceq *ceq;
1381 u64 scratch;
1382 } ceq_create;
1383
1384 struct {
1385 struct irdma_sc_ceq *ceq;
1386 u64 scratch;
1387 } ceq_destroy;
1388
1389 struct {
1390 struct irdma_sc_aeq *aeq;
1391 u64 scratch;
1392 } aeq_create;
1393
1394 struct {
1395 struct irdma_sc_aeq *aeq;
1396 u64 scratch;
1397 } aeq_destroy;
1398
1399 struct {
1400 struct irdma_sc_qp *qp;
1401 struct irdma_qp_flush_info info;
1402 u64 scratch;
1403 } qp_flush_wqes;
1404
1405 struct {
1406 struct irdma_sc_qp *qp;
1407 struct irdma_gen_ae_info info;
1408 u64 scratch;
1409 } gen_ae;
1410
1411 struct {
1412 struct irdma_sc_cqp *cqp;
1413 void *fpm_val_va;
1414 u64 fpm_val_pa;
1415 u8 hmc_fn_id;
1416 u64 scratch;
1417 } query_fpm_val;
1418
1419 struct {
1420 struct irdma_sc_cqp *cqp;
1421 void *fpm_val_va;
1422 u64 fpm_val_pa;
1423 u8 hmc_fn_id;
1424 u64 scratch;
1425 } commit_fpm_val;
1426
1427 struct {
1428 struct irdma_sc_cqp *cqp;
1429 struct irdma_apbvt_info info;
1430 u64 scratch;
1431 } manage_apbvt_entry;
1432
1433 struct {
1434 struct irdma_sc_cqp *cqp;
1435 struct irdma_qhash_table_info info;
1436 u64 scratch;
1437 } manage_qhash_table_entry;
1438
1439 struct {
1440 struct irdma_sc_dev *dev;
1441 struct irdma_update_sds_info info;
1442 u64 scratch;
1443 } update_pe_sds;
1444
1445 struct {
1446 struct irdma_sc_cqp *cqp;
1447 struct irdma_sc_qp *qp;
1448 u64 scratch;
1449 } suspend_resume;
1450
1451 struct {
1452 struct irdma_sc_cqp *cqp;
1453 struct irdma_ah_info info;
1454 u64 scratch;
1455 } ah_create;
1456
1457 struct {
1458 struct irdma_sc_cqp *cqp;
1459 struct irdma_ah_info info;
1460 u64 scratch;
1461 } ah_destroy;
1462
1463 struct {
1464 struct irdma_sc_cqp *cqp;
1465 struct irdma_mcast_grp_info info;
1466 u64 scratch;
1467 } mc_create;
1468
1469 struct {
1470 struct irdma_sc_cqp *cqp;
1471 struct irdma_mcast_grp_info info;
1472 u64 scratch;
1473 } mc_destroy;
1474
1475 struct {
1476 struct irdma_sc_cqp *cqp;
1477 struct irdma_mcast_grp_info info;
1478 u64 scratch;
1479 } mc_modify;
1480
1481 struct {
1482 struct irdma_sc_cqp *cqp;
1483 struct irdma_stats_inst_info info;
1484 u64 scratch;
1485 } stats_manage;
1486
1487 struct {
1488 struct irdma_sc_cqp *cqp;
1489 struct irdma_stats_gather_info info;
1490 u64 scratch;
1491 } stats_gather;
1492
1493 struct {
1494 struct irdma_sc_cqp *cqp;
1495 struct irdma_ws_node_info info;
1496 u64 scratch;
1497 } ws_node;
1498
1499 struct {
1500 struct irdma_sc_cqp *cqp;
1501 struct irdma_up_info info;
1502 u64 scratch;
1503 } up_map;
1504
1505 struct {
1506 struct irdma_sc_cqp *cqp;
1507 struct irdma_dma_mem query_buff_mem;
1508 u64 scratch;
1509 } query_rdma;
1510 } u;
1511 };
1512
1513 struct cqp_cmds_info {
1514 struct list_head cqp_cmd_entry;
1515 u8 cqp_cmd;
1516 u8 post_sq;
1517 struct cqp_info in;
1518 };
1519
1520 __le64 *irdma_sc_cqp_get_next_send_wqe_idx(struct irdma_sc_cqp *cqp, u64 scratch,
1521 u32 *wqe_idx);
1522
1523 /**
1524 * irdma_sc_cqp_get_next_send_wqe - get next wqe on cqp sq
1525 * @cqp: struct for cqp hw
1526 * @scratch: private data for CQP WQE
1527 */
irdma_sc_cqp_get_next_send_wqe(struct irdma_sc_cqp * cqp,u64 scratch)1528 static inline __le64 *irdma_sc_cqp_get_next_send_wqe(struct irdma_sc_cqp *cqp, u64 scratch)
1529 {
1530 u32 wqe_idx;
1531
1532 return irdma_sc_cqp_get_next_send_wqe_idx(cqp, scratch, &wqe_idx);
1533 }
1534 #endif /* IRDMA_TYPE_H */
1535