1 /*
2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #ifndef MLX5_DEVICE_H
34 #define MLX5_DEVICE_H
35
36 #include <linux/types.h>
37 #include <rdma/ib_verbs.h>
38 #include <linux/mlx5/mlx5_ifc.h>
39
40 #if defined(__LITTLE_ENDIAN)
41 #define MLX5_SET_HOST_ENDIANNESS 0
42 #elif defined(__BIG_ENDIAN)
43 #define MLX5_SET_HOST_ENDIANNESS 0x80
44 #else
45 #error Host endianness not defined
46 #endif
47
48 /* helper macros */
49 #define __mlx5_nullp(typ) ((struct mlx5_ifc_##typ##_bits *)0)
50 #define __mlx5_bit_sz(typ, fld) sizeof(__mlx5_nullp(typ)->fld)
51 #define __mlx5_bit_off(typ, fld) (offsetof(struct mlx5_ifc_##typ##_bits, fld))
52 #define __mlx5_16_off(typ, fld) (__mlx5_bit_off(typ, fld) / 16)
53 #define __mlx5_dw_off(typ, fld) (__mlx5_bit_off(typ, fld) / 32)
54 #define __mlx5_64_off(typ, fld) (__mlx5_bit_off(typ, fld) / 64)
55 #define __mlx5_16_bit_off(typ, fld) (16 - __mlx5_bit_sz(typ, fld) - (__mlx5_bit_off(typ, fld) & 0xf))
56 #define __mlx5_dw_bit_off(typ, fld) (32 - __mlx5_bit_sz(typ, fld) - (__mlx5_bit_off(typ, fld) & 0x1f))
57 #define __mlx5_mask(typ, fld) ((u32)((1ull << __mlx5_bit_sz(typ, fld)) - 1))
58 #define __mlx5_dw_mask(typ, fld) (__mlx5_mask(typ, fld) << __mlx5_dw_bit_off(typ, fld))
59 #define __mlx5_mask16(typ, fld) ((u16)((1ull << __mlx5_bit_sz(typ, fld)) - 1))
60 #define __mlx5_16_mask(typ, fld) (__mlx5_mask16(typ, fld) << __mlx5_16_bit_off(typ, fld))
61 #define __mlx5_st_sz_bits(typ) sizeof(struct mlx5_ifc_##typ##_bits)
62
63 #define MLX5_FLD_SZ_BYTES(typ, fld) (__mlx5_bit_sz(typ, fld) / 8)
64 #define MLX5_ST_SZ_BYTES(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 8)
65 #define MLX5_ST_SZ_DW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 32)
66 #define MLX5_ST_SZ_QW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 64)
67 #define MLX5_UN_SZ_BYTES(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 8)
68 #define MLX5_UN_SZ_DW(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 32)
69 #define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8)
70 #define MLX5_ADDR_OF(typ, p, fld) ((void *)((uint8_t *)(p) + MLX5_BYTE_OFF(typ, fld)))
71
72 /* insert a value to a struct */
73 #define MLX5_SET(typ, p, fld, v) do { \
74 u32 _v = v; \
75 BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \
76 *((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \
77 cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \
78 (~__mlx5_dw_mask(typ, fld))) | (((_v) & __mlx5_mask(typ, fld)) \
79 << __mlx5_dw_bit_off(typ, fld))); \
80 } while (0)
81
82 #define MLX5_ARRAY_SET(typ, p, fld, idx, v) do { \
83 BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 32); \
84 MLX5_SET(typ, p, fld[idx], v); \
85 } while (0)
86
87 #define MLX5_SET_TO_ONES(typ, p, fld) do { \
88 BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \
89 *((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \
90 cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \
91 (~__mlx5_dw_mask(typ, fld))) | ((__mlx5_mask(typ, fld)) \
92 << __mlx5_dw_bit_off(typ, fld))); \
93 } while (0)
94
95 #define MLX5_GET(typ, p, fld) ((be32_to_cpu(*((__be32 *)(p) +\
96 __mlx5_dw_off(typ, fld))) >> __mlx5_dw_bit_off(typ, fld)) & \
97 __mlx5_mask(typ, fld))
98
99 #define MLX5_GET_PR(typ, p, fld) ({ \
100 u32 ___t = MLX5_GET(typ, p, fld); \
101 pr_debug(#fld " = 0x%x\n", ___t); \
102 ___t; \
103 })
104
105 #define __MLX5_SET64(typ, p, fld, v) do { \
106 BUILD_BUG_ON(__mlx5_bit_sz(typ, fld) != 64); \
107 *((__be64 *)(p) + __mlx5_64_off(typ, fld)) = cpu_to_be64(v); \
108 } while (0)
109
110 #define MLX5_SET64(typ, p, fld, v) do { \
111 BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 64); \
112 __MLX5_SET64(typ, p, fld, v); \
113 } while (0)
114
115 #define MLX5_ARRAY_SET64(typ, p, fld, idx, v) do { \
116 BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 64); \
117 __MLX5_SET64(typ, p, fld[idx], v); \
118 } while (0)
119
120 #define MLX5_GET64(typ, p, fld) be64_to_cpu(*((__be64 *)(p) + __mlx5_64_off(typ, fld)))
121
122 #define MLX5_GET64_PR(typ, p, fld) ({ \
123 u64 ___t = MLX5_GET64(typ, p, fld); \
124 pr_debug(#fld " = 0x%llx\n", ___t); \
125 ___t; \
126 })
127
128 #define MLX5_GET16(typ, p, fld) ((be16_to_cpu(*((__be16 *)(p) +\
129 __mlx5_16_off(typ, fld))) >> __mlx5_16_bit_off(typ, fld)) & \
130 __mlx5_mask16(typ, fld))
131
132 #define MLX5_SET16(typ, p, fld, v) do { \
133 u16 _v = v; \
134 BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 16); \
135 *((__be16 *)(p) + __mlx5_16_off(typ, fld)) = \
136 cpu_to_be16((be16_to_cpu(*((__be16 *)(p) + __mlx5_16_off(typ, fld))) & \
137 (~__mlx5_16_mask(typ, fld))) | (((_v) & __mlx5_mask16(typ, fld)) \
138 << __mlx5_16_bit_off(typ, fld))); \
139 } while (0)
140
141 /* Big endian getters */
142 #define MLX5_GET64_BE(typ, p, fld) (*((__be64 *)(p) +\
143 __mlx5_64_off(typ, fld)))
144
145 #define MLX5_GET_BE(type_t, typ, p, fld) ({ \
146 type_t tmp; \
147 switch (sizeof(tmp)) { \
148 case sizeof(u8): \
149 tmp = (__force type_t)MLX5_GET(typ, p, fld); \
150 break; \
151 case sizeof(u16): \
152 tmp = (__force type_t)cpu_to_be16(MLX5_GET(typ, p, fld)); \
153 break; \
154 case sizeof(u32): \
155 tmp = (__force type_t)cpu_to_be32(MLX5_GET(typ, p, fld)); \
156 break; \
157 case sizeof(u64): \
158 tmp = (__force type_t)MLX5_GET64_BE(typ, p, fld); \
159 break; \
160 } \
161 tmp; \
162 })
163
164 enum mlx5_inline_modes {
165 MLX5_INLINE_MODE_NONE,
166 MLX5_INLINE_MODE_L2,
167 MLX5_INLINE_MODE_IP,
168 MLX5_INLINE_MODE_TCP_UDP,
169 };
170
171 enum {
172 MLX5_MAX_COMMANDS = 32,
173 MLX5_CMD_DATA_BLOCK_SIZE = 512,
174 MLX5_PCI_CMD_XPORT = 7,
175 MLX5_MKEY_BSF_OCTO_SIZE = 4,
176 MLX5_MAX_PSVS = 4,
177 };
178
179 enum {
180 MLX5_EXTENDED_UD_AV = 0x80000000,
181 };
182
183 enum {
184 MLX5_CQ_STATE_ARMED = 9,
185 MLX5_CQ_STATE_ALWAYS_ARMED = 0xb,
186 MLX5_CQ_STATE_FIRED = 0xa,
187 };
188
189 enum {
190 MLX5_STAT_RATE_OFFSET = 5,
191 };
192
193 enum {
194 MLX5_INLINE_SEG = 0x80000000,
195 };
196
197 enum {
198 MLX5_HW_START_PADDING = MLX5_INLINE_SEG,
199 };
200
201 enum {
202 MLX5_MIN_PKEY_TABLE_SIZE = 128,
203 MLX5_MAX_LOG_PKEY_TABLE = 5,
204 };
205
206 enum {
207 MLX5_MKEY_INBOX_PG_ACCESS = 1 << 31
208 };
209
210 enum {
211 MLX5_PFAULT_SUBTYPE_WQE = 0,
212 MLX5_PFAULT_SUBTYPE_RDMA = 1,
213 };
214
215 enum wqe_page_fault_type {
216 MLX5_WQE_PF_TYPE_RMP = 0,
217 MLX5_WQE_PF_TYPE_REQ_SEND_OR_WRITE = 1,
218 MLX5_WQE_PF_TYPE_RESP = 2,
219 MLX5_WQE_PF_TYPE_REQ_READ_OR_ATOMIC = 3,
220 };
221
222 enum {
223 MLX5_PERM_LOCAL_READ = 1 << 2,
224 MLX5_PERM_LOCAL_WRITE = 1 << 3,
225 MLX5_PERM_REMOTE_READ = 1 << 4,
226 MLX5_PERM_REMOTE_WRITE = 1 << 5,
227 MLX5_PERM_ATOMIC = 1 << 6,
228 MLX5_PERM_UMR_EN = 1 << 7,
229 };
230
231 enum {
232 MLX5_PCIE_CTRL_SMALL_FENCE = 1 << 0,
233 MLX5_PCIE_CTRL_RELAXED_ORDERING = 1 << 2,
234 MLX5_PCIE_CTRL_NO_SNOOP = 1 << 3,
235 MLX5_PCIE_CTRL_TLP_PROCE_EN = 1 << 6,
236 MLX5_PCIE_CTRL_TPH_MASK = 3 << 4,
237 };
238
239 enum {
240 MLX5_EN_RD = (u64)1,
241 MLX5_EN_WR = (u64)2
242 };
243
244 enum {
245 MLX5_ADAPTER_PAGE_SHIFT = 12,
246 MLX5_ADAPTER_PAGE_SIZE = 1 << MLX5_ADAPTER_PAGE_SHIFT,
247 };
248
249 enum {
250 MLX5_BFREGS_PER_UAR = 4,
251 MLX5_MAX_UARS = 1 << 8,
252 MLX5_NON_FP_BFREGS_PER_UAR = 2,
253 MLX5_FP_BFREGS_PER_UAR = MLX5_BFREGS_PER_UAR -
254 MLX5_NON_FP_BFREGS_PER_UAR,
255 MLX5_MAX_BFREGS = MLX5_MAX_UARS *
256 MLX5_NON_FP_BFREGS_PER_UAR,
257 MLX5_UARS_IN_PAGE = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE,
258 MLX5_NON_FP_BFREGS_IN_PAGE = MLX5_NON_FP_BFREGS_PER_UAR * MLX5_UARS_IN_PAGE,
259 MLX5_MIN_DYN_BFREGS = 512,
260 MLX5_MAX_DYN_BFREGS = 1024,
261 };
262
263 enum {
264 MLX5_MKEY_MASK_LEN = 1ull << 0,
265 MLX5_MKEY_MASK_PAGE_SIZE = 1ull << 1,
266 MLX5_MKEY_MASK_START_ADDR = 1ull << 6,
267 MLX5_MKEY_MASK_PD = 1ull << 7,
268 MLX5_MKEY_MASK_EN_RINVAL = 1ull << 8,
269 MLX5_MKEY_MASK_EN_SIGERR = 1ull << 9,
270 MLX5_MKEY_MASK_BSF_EN = 1ull << 12,
271 MLX5_MKEY_MASK_KEY = 1ull << 13,
272 MLX5_MKEY_MASK_QPN = 1ull << 14,
273 MLX5_MKEY_MASK_LR = 1ull << 17,
274 MLX5_MKEY_MASK_LW = 1ull << 18,
275 MLX5_MKEY_MASK_RR = 1ull << 19,
276 MLX5_MKEY_MASK_RW = 1ull << 20,
277 MLX5_MKEY_MASK_A = 1ull << 21,
278 MLX5_MKEY_MASK_SMALL_FENCE = 1ull << 23,
279 MLX5_MKEY_MASK_RELAXED_ORDERING_WRITE = 1ull << 25,
280 MLX5_MKEY_MASK_FREE = 1ull << 29,
281 MLX5_MKEY_MASK_RELAXED_ORDERING_READ = 1ull << 47,
282 };
283
284 enum {
285 MLX5_UMR_TRANSLATION_OFFSET_EN = (1 << 4),
286
287 MLX5_UMR_CHECK_NOT_FREE = (1 << 5),
288 MLX5_UMR_CHECK_FREE = (2 << 5),
289
290 MLX5_UMR_INLINE = (1 << 7),
291 };
292
293 #define MLX5_UMR_KLM_ALIGNMENT 4
294 #define MLX5_UMR_MTT_ALIGNMENT 0x40
295 #define MLX5_UMR_MTT_MASK (MLX5_UMR_MTT_ALIGNMENT - 1)
296 #define MLX5_UMR_MTT_MIN_CHUNK_SIZE MLX5_UMR_MTT_ALIGNMENT
297
298 #define MLX5_USER_INDEX_LEN (MLX5_FLD_SZ_BYTES(qpc, user_index) * 8)
299
300 enum {
301 MLX5_EVENT_QUEUE_TYPE_QP = 0,
302 MLX5_EVENT_QUEUE_TYPE_RQ = 1,
303 MLX5_EVENT_QUEUE_TYPE_SQ = 2,
304 MLX5_EVENT_QUEUE_TYPE_DCT = 6,
305 };
306
307 /* mlx5 components can subscribe to any one of these events via
308 * mlx5_eq_notifier_register API.
309 */
310 enum mlx5_event {
311 /* Special value to subscribe to any event */
312 MLX5_EVENT_TYPE_NOTIFY_ANY = 0x0,
313 /* HW events enum start: comp events are not subscribable */
314 MLX5_EVENT_TYPE_COMP = 0x0,
315 /* HW Async events enum start: subscribable events */
316 MLX5_EVENT_TYPE_PATH_MIG = 0x01,
317 MLX5_EVENT_TYPE_COMM_EST = 0x02,
318 MLX5_EVENT_TYPE_SQ_DRAINED = 0x03,
319 MLX5_EVENT_TYPE_SRQ_LAST_WQE = 0x13,
320 MLX5_EVENT_TYPE_SRQ_RQ_LIMIT = 0x14,
321
322 MLX5_EVENT_TYPE_CQ_ERROR = 0x04,
323 MLX5_EVENT_TYPE_WQ_CATAS_ERROR = 0x05,
324 MLX5_EVENT_TYPE_PATH_MIG_FAILED = 0x07,
325 MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR = 0x10,
326 MLX5_EVENT_TYPE_WQ_ACCESS_ERROR = 0x11,
327 MLX5_EVENT_TYPE_SRQ_CATAS_ERROR = 0x12,
328 MLX5_EVENT_TYPE_OBJECT_CHANGE = 0x27,
329
330 MLX5_EVENT_TYPE_INTERNAL_ERROR = 0x08,
331 MLX5_EVENT_TYPE_PORT_CHANGE = 0x09,
332 MLX5_EVENT_TYPE_GPIO_EVENT = 0x15,
333 MLX5_EVENT_TYPE_PORT_MODULE_EVENT = 0x16,
334 MLX5_EVENT_TYPE_TEMP_WARN_EVENT = 0x17,
335 MLX5_EVENT_TYPE_XRQ_ERROR = 0x18,
336 MLX5_EVENT_TYPE_REMOTE_CONFIG = 0x19,
337 MLX5_EVENT_TYPE_GENERAL_EVENT = 0x22,
338 MLX5_EVENT_TYPE_MONITOR_COUNTER = 0x24,
339 MLX5_EVENT_TYPE_PPS_EVENT = 0x25,
340
341 MLX5_EVENT_TYPE_DB_BF_CONGESTION = 0x1a,
342 MLX5_EVENT_TYPE_STALL_EVENT = 0x1b,
343
344 MLX5_EVENT_TYPE_CMD = 0x0a,
345 MLX5_EVENT_TYPE_PAGE_REQUEST = 0xb,
346
347 MLX5_EVENT_TYPE_PAGE_FAULT = 0xc,
348 MLX5_EVENT_TYPE_NIC_VPORT_CHANGE = 0xd,
349
350 MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED = 0xe,
351 MLX5_EVENT_TYPE_VHCA_STATE_CHANGE = 0xf,
352
353 MLX5_EVENT_TYPE_DCT_DRAINED = 0x1c,
354 MLX5_EVENT_TYPE_DCT_KEY_VIOLATION = 0x1d,
355
356 MLX5_EVENT_TYPE_FPGA_ERROR = 0x20,
357 MLX5_EVENT_TYPE_FPGA_QP_ERROR = 0x21,
358
359 MLX5_EVENT_TYPE_DEVICE_TRACER = 0x26,
360
361 MLX5_EVENT_TYPE_MAX = 0x100,
362 };
363
364 enum mlx5_driver_event {
365 MLX5_DRIVER_EVENT_TYPE_TRAP = 0,
366 };
367
368 enum {
369 MLX5_TRACER_SUBTYPE_OWNERSHIP_CHANGE = 0x0,
370 MLX5_TRACER_SUBTYPE_TRACES_AVAILABLE = 0x1,
371 };
372
373 enum {
374 MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT = 0x1,
375 MLX5_GENERAL_SUBTYPE_PCI_POWER_CHANGE_EVENT = 0x5,
376 MLX5_GENERAL_SUBTYPE_FW_LIVE_PATCH_EVENT = 0x7,
377 MLX5_GENERAL_SUBTYPE_PCI_SYNC_FOR_FW_UPDATE_EVENT = 0x8,
378 };
379
380 enum {
381 MLX5_PORT_CHANGE_SUBTYPE_DOWN = 1,
382 MLX5_PORT_CHANGE_SUBTYPE_ACTIVE = 4,
383 MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED = 5,
384 MLX5_PORT_CHANGE_SUBTYPE_LID = 6,
385 MLX5_PORT_CHANGE_SUBTYPE_PKEY = 7,
386 MLX5_PORT_CHANGE_SUBTYPE_GUID = 8,
387 MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG = 9,
388 };
389
390 enum {
391 MLX5_ROCE_VERSION_1 = 0,
392 MLX5_ROCE_VERSION_2 = 2,
393 };
394
395 enum {
396 MLX5_ROCE_VERSION_1_CAP = 1 << MLX5_ROCE_VERSION_1,
397 MLX5_ROCE_VERSION_2_CAP = 1 << MLX5_ROCE_VERSION_2,
398 };
399
400 enum {
401 MLX5_ROCE_L3_TYPE_IPV4 = 0,
402 MLX5_ROCE_L3_TYPE_IPV6 = 1,
403 };
404
405 enum {
406 MLX5_ROCE_L3_TYPE_IPV4_CAP = 1 << 1,
407 MLX5_ROCE_L3_TYPE_IPV6_CAP = 1 << 2,
408 };
409
410 enum {
411 MLX5_OPCODE_NOP = 0x00,
412 MLX5_OPCODE_SEND_INVAL = 0x01,
413 MLX5_OPCODE_RDMA_WRITE = 0x08,
414 MLX5_OPCODE_RDMA_WRITE_IMM = 0x09,
415 MLX5_OPCODE_SEND = 0x0a,
416 MLX5_OPCODE_SEND_IMM = 0x0b,
417 MLX5_OPCODE_LSO = 0x0e,
418 MLX5_OPCODE_RDMA_READ = 0x10,
419 MLX5_OPCODE_ATOMIC_CS = 0x11,
420 MLX5_OPCODE_ATOMIC_FA = 0x12,
421 MLX5_OPCODE_ATOMIC_MASKED_CS = 0x14,
422 MLX5_OPCODE_ATOMIC_MASKED_FA = 0x15,
423 MLX5_OPCODE_BIND_MW = 0x18,
424 MLX5_OPCODE_CONFIG_CMD = 0x1f,
425 MLX5_OPCODE_ENHANCED_MPSW = 0x29,
426
427 MLX5_RECV_OPCODE_RDMA_WRITE_IMM = 0x00,
428 MLX5_RECV_OPCODE_SEND = 0x01,
429 MLX5_RECV_OPCODE_SEND_IMM = 0x02,
430 MLX5_RECV_OPCODE_SEND_INVAL = 0x03,
431
432 MLX5_CQE_OPCODE_ERROR = 0x1e,
433 MLX5_CQE_OPCODE_RESIZE = 0x16,
434
435 MLX5_OPCODE_SET_PSV = 0x20,
436 MLX5_OPCODE_GET_PSV = 0x21,
437 MLX5_OPCODE_CHECK_PSV = 0x22,
438 MLX5_OPCODE_DUMP = 0x23,
439 MLX5_OPCODE_RGET_PSV = 0x26,
440 MLX5_OPCODE_RCHECK_PSV = 0x27,
441
442 MLX5_OPCODE_UMR = 0x25,
443
444 MLX5_OPCODE_ACCESS_ASO = 0x2d,
445 };
446
447 enum {
448 MLX5_OPC_MOD_TLS_TIS_STATIC_PARAMS = 0x1,
449 MLX5_OPC_MOD_TLS_TIR_STATIC_PARAMS = 0x2,
450 };
451
452 enum {
453 MLX5_OPC_MOD_TLS_TIS_PROGRESS_PARAMS = 0x1,
454 MLX5_OPC_MOD_TLS_TIR_PROGRESS_PARAMS = 0x2,
455 };
456
457 struct mlx5_wqe_tls_static_params_seg {
458 u8 ctx[MLX5_ST_SZ_BYTES(tls_static_params)];
459 };
460
461 struct mlx5_wqe_tls_progress_params_seg {
462 __be32 tis_tir_num;
463 u8 ctx[MLX5_ST_SZ_BYTES(tls_progress_params)];
464 };
465
466 enum {
467 MLX5_SET_PORT_RESET_QKEY = 0,
468 MLX5_SET_PORT_GUID0 = 16,
469 MLX5_SET_PORT_NODE_GUID = 17,
470 MLX5_SET_PORT_SYS_GUID = 18,
471 MLX5_SET_PORT_GID_TABLE = 19,
472 MLX5_SET_PORT_PKEY_TABLE = 20,
473 };
474
475 enum {
476 MLX5_BW_NO_LIMIT = 0,
477 MLX5_100_MBPS_UNIT = 3,
478 MLX5_GBPS_UNIT = 4,
479 };
480
481 enum {
482 MLX5_MAX_PAGE_SHIFT = 31
483 };
484
485 enum {
486 /*
487 * Max wqe size for rdma read is 512 bytes, so this
488 * limits our max_sge_rd as the wqe needs to fit:
489 * - ctrl segment (16 bytes)
490 * - rdma segment (16 bytes)
491 * - scatter elements (16 bytes each)
492 */
493 MLX5_MAX_SGE_RD = (512 - 16 - 16) / 16
494 };
495
496 enum mlx5_odp_transport_cap_bits {
497 MLX5_ODP_SUPPORT_SEND = 1 << 31,
498 MLX5_ODP_SUPPORT_RECV = 1 << 30,
499 MLX5_ODP_SUPPORT_WRITE = 1 << 29,
500 MLX5_ODP_SUPPORT_READ = 1 << 28,
501 };
502
503 struct mlx5_odp_caps {
504 char reserved[0x10];
505 struct {
506 __be32 rc_odp_caps;
507 __be32 uc_odp_caps;
508 __be32 ud_odp_caps;
509 } per_transport_caps;
510 char reserved2[0xe4];
511 };
512
513 struct mlx5_cmd_layout {
514 u8 type;
515 u8 rsvd0[3];
516 __be32 inlen;
517 __be64 in_ptr;
518 __be32 in[4];
519 __be32 out[4];
520 __be64 out_ptr;
521 __be32 outlen;
522 u8 token;
523 u8 sig;
524 u8 rsvd1;
525 u8 status_own;
526 };
527
528 enum mlx5_rfr_severity_bit_offsets {
529 MLX5_RFR_BIT_OFFSET = 0x7,
530 };
531
532 struct health_buffer {
533 __be32 assert_var[6];
534 __be32 rsvd0[2];
535 __be32 assert_exit_ptr;
536 __be32 assert_callra;
537 __be32 rsvd1[1];
538 __be32 time;
539 __be32 fw_ver;
540 __be32 hw_id;
541 u8 rfr_severity;
542 u8 rsvd2[3];
543 u8 irisc_index;
544 u8 synd;
545 __be16 ext_synd;
546 };
547
548 enum mlx5_initializing_bit_offsets {
549 MLX5_FW_RESET_SUPPORTED_OFFSET = 30,
550 };
551
552 enum mlx5_cmd_addr_l_sz_offset {
553 MLX5_NIC_IFC_OFFSET = 8,
554 };
555
556 struct mlx5_init_seg {
557 __be32 fw_rev;
558 __be32 cmdif_rev_fw_sub;
559 __be32 rsvd0[2];
560 __be32 cmdq_addr_h;
561 __be32 cmdq_addr_l_sz;
562 __be32 cmd_dbell;
563 __be32 rsvd1[120];
564 __be32 initializing;
565 struct health_buffer health;
566 __be32 rsvd2[878];
567 __be32 cmd_exec_to;
568 __be32 cmd_q_init_to;
569 __be32 internal_timer_h;
570 __be32 internal_timer_l;
571 __be32 rsvd3[2];
572 __be32 health_counter;
573 __be32 rsvd4[11];
574 __be32 real_time_h;
575 __be32 real_time_l;
576 __be32 rsvd5[1006];
577 __be64 ieee1588_clk;
578 __be32 ieee1588_clk_type;
579 __be32 clr_intx;
580 };
581
582 struct mlx5_eqe_comp {
583 __be32 reserved[6];
584 __be32 cqn;
585 };
586
587 struct mlx5_eqe_qp_srq {
588 __be32 reserved1[5];
589 u8 type;
590 u8 reserved2[3];
591 __be32 qp_srq_n;
592 };
593
594 struct mlx5_eqe_cq_err {
595 __be32 cqn;
596 u8 reserved1[7];
597 u8 syndrome;
598 };
599
600 struct mlx5_eqe_xrq_err {
601 __be32 reserved1[5];
602 __be32 type_xrqn;
603 __be32 reserved2;
604 };
605
606 struct mlx5_eqe_port_state {
607 u8 reserved0[8];
608 u8 port;
609 };
610
611 struct mlx5_eqe_gpio {
612 __be32 reserved0[2];
613 __be64 gpio_event;
614 };
615
616 struct mlx5_eqe_congestion {
617 u8 type;
618 u8 rsvd0;
619 u8 congestion_level;
620 };
621
622 struct mlx5_eqe_stall_vl {
623 u8 rsvd0[3];
624 u8 port_vl;
625 };
626
627 struct mlx5_eqe_cmd {
628 __be32 vector;
629 __be32 rsvd[6];
630 };
631
632 struct mlx5_eqe_page_req {
633 __be16 ec_function;
634 __be16 func_id;
635 __be32 num_pages;
636 __be32 rsvd1[5];
637 };
638
639 struct mlx5_eqe_page_fault {
640 __be32 bytes_committed;
641 union {
642 struct {
643 u16 reserved1;
644 __be16 wqe_index;
645 u16 reserved2;
646 __be16 packet_length;
647 __be32 token;
648 u8 reserved4[8];
649 __be32 pftype_wq;
650 } __packed wqe;
651 struct {
652 __be32 r_key;
653 u16 reserved1;
654 __be16 packet_length;
655 __be32 rdma_op_len;
656 __be64 rdma_va;
657 __be32 pftype_token;
658 } __packed rdma;
659 } __packed;
660 } __packed;
661
662 struct mlx5_eqe_vport_change {
663 u8 rsvd0[2];
664 __be16 vport_num;
665 __be32 rsvd1[6];
666 } __packed;
667
668 struct mlx5_eqe_port_module {
669 u8 reserved_at_0[1];
670 u8 module;
671 u8 reserved_at_2[1];
672 u8 module_status;
673 u8 reserved_at_4[2];
674 u8 error_type;
675 } __packed;
676
677 struct mlx5_eqe_pps {
678 u8 rsvd0[3];
679 u8 pin;
680 u8 rsvd1[4];
681 union {
682 struct {
683 __be32 time_sec;
684 __be32 time_nsec;
685 };
686 struct {
687 __be64 time_stamp;
688 };
689 };
690 u8 rsvd2[12];
691 } __packed;
692
693 struct mlx5_eqe_dct {
694 __be32 reserved[6];
695 __be32 dctn;
696 };
697
698 struct mlx5_eqe_temp_warning {
699 __be64 sensor_warning_msb;
700 __be64 sensor_warning_lsb;
701 } __packed;
702
703 struct mlx5_eqe_obj_change {
704 u8 rsvd0[2];
705 __be16 obj_type;
706 __be32 obj_id;
707 } __packed;
708
709 #define SYNC_RST_STATE_MASK 0xf
710
711 enum sync_rst_state_type {
712 MLX5_SYNC_RST_STATE_RESET_REQUEST = 0x0,
713 MLX5_SYNC_RST_STATE_RESET_NOW = 0x1,
714 MLX5_SYNC_RST_STATE_RESET_ABORT = 0x2,
715 };
716
717 struct mlx5_eqe_sync_fw_update {
718 u8 reserved_at_0[3];
719 u8 sync_rst_state;
720 };
721
722 struct mlx5_eqe_vhca_state {
723 __be16 ec_function;
724 __be16 function_id;
725 } __packed;
726
727 union ev_data {
728 __be32 raw[7];
729 struct mlx5_eqe_cmd cmd;
730 struct mlx5_eqe_comp comp;
731 struct mlx5_eqe_qp_srq qp_srq;
732 struct mlx5_eqe_cq_err cq_err;
733 struct mlx5_eqe_port_state port;
734 struct mlx5_eqe_gpio gpio;
735 struct mlx5_eqe_congestion cong;
736 struct mlx5_eqe_stall_vl stall_vl;
737 struct mlx5_eqe_page_req req_pages;
738 struct mlx5_eqe_page_fault page_fault;
739 struct mlx5_eqe_vport_change vport_change;
740 struct mlx5_eqe_port_module port_module;
741 struct mlx5_eqe_pps pps;
742 struct mlx5_eqe_dct dct;
743 struct mlx5_eqe_temp_warning temp_warning;
744 struct mlx5_eqe_xrq_err xrq_err;
745 struct mlx5_eqe_sync_fw_update sync_fw_update;
746 struct mlx5_eqe_vhca_state vhca_state;
747 struct mlx5_eqe_obj_change obj_change;
748 } __packed;
749
750 struct mlx5_eqe {
751 u8 rsvd0;
752 u8 type;
753 u8 rsvd1;
754 u8 sub_type;
755 __be32 rsvd2[7];
756 union ev_data data;
757 __be16 rsvd3;
758 u8 signature;
759 u8 owner;
760 } __packed;
761
762 struct mlx5_cmd_prot_block {
763 u8 data[MLX5_CMD_DATA_BLOCK_SIZE];
764 u8 rsvd0[48];
765 __be64 next;
766 __be32 block_num;
767 u8 rsvd1;
768 u8 token;
769 u8 ctrl_sig;
770 u8 sig;
771 };
772
773 enum {
774 MLX5_CQE_SYND_FLUSHED_IN_ERROR = 5,
775 };
776
777 struct mlx5_err_cqe {
778 u8 rsvd0[32];
779 __be32 srqn;
780 u8 rsvd1[18];
781 u8 vendor_err_synd;
782 u8 syndrome;
783 __be32 s_wqe_opcode_qpn;
784 __be16 wqe_counter;
785 u8 signature;
786 u8 op_own;
787 };
788
789 struct mlx5_cqe64 {
790 u8 tls_outer_l3_tunneled;
791 u8 rsvd0;
792 __be16 wqe_id;
793 union {
794 struct {
795 u8 tcppsh_abort_dupack;
796 u8 min_ttl;
797 __be16 tcp_win;
798 __be32 ack_seq_num;
799 } lro;
800 struct {
801 u8 reserved0:1;
802 u8 match:1;
803 u8 flush:1;
804 u8 reserved3:5;
805 u8 header_size;
806 __be16 header_entry_index;
807 __be32 data_offset;
808 } shampo;
809 };
810 __be32 rss_hash_result;
811 u8 rss_hash_type;
812 u8 ml_path;
813 u8 rsvd20[2];
814 __be16 check_sum;
815 __be16 slid;
816 __be32 flags_rqpn;
817 u8 hds_ip_ext;
818 u8 l4_l3_hdr_type;
819 __be16 vlan_info;
820 __be32 srqn; /* [31:24]: lro_num_seg, [23:0]: srqn */
821 union {
822 __be32 immediate;
823 __be32 inval_rkey;
824 __be32 pkey;
825 __be32 ft_metadata;
826 };
827 u8 rsvd40[4];
828 __be32 byte_cnt;
829 __be32 timestamp_h;
830 __be32 timestamp_l;
831 __be32 sop_drop_qpn;
832 __be16 wqe_counter;
833 union {
834 u8 signature;
835 u8 validity_iteration_count;
836 };
837 u8 op_own;
838 };
839
840 struct mlx5_mini_cqe8 {
841 union {
842 __be32 rx_hash_result;
843 struct {
844 __be16 checksum;
845 __be16 stridx;
846 };
847 struct {
848 __be16 wqe_counter;
849 u8 s_wqe_opcode;
850 u8 reserved;
851 } s_wqe_info;
852 };
853 __be32 byte_cnt;
854 };
855
856 enum {
857 MLX5_NO_INLINE_DATA,
858 MLX5_INLINE_DATA32_SEG,
859 MLX5_INLINE_DATA64_SEG,
860 MLX5_COMPRESSED,
861 };
862
863 enum {
864 MLX5_CQE_FORMAT_CSUM = 0x1,
865 MLX5_CQE_FORMAT_CSUM_STRIDX = 0x3,
866 };
867
868 enum {
869 MLX5_CQE_COMPRESS_LAYOUT_BASIC = 0,
870 MLX5_CQE_COMPRESS_LAYOUT_ENHANCED = 1,
871 };
872
873 #define MLX5_MINI_CQE_ARRAY_SIZE 8
874
mlx5_get_cqe_format(struct mlx5_cqe64 * cqe)875 static inline u8 mlx5_get_cqe_format(struct mlx5_cqe64 *cqe)
876 {
877 return (cqe->op_own >> 2) & 0x3;
878 }
879
get_cqe_opcode(struct mlx5_cqe64 * cqe)880 static inline u8 get_cqe_opcode(struct mlx5_cqe64 *cqe)
881 {
882 return cqe->op_own >> 4;
883 }
884
get_cqe_lro_tcppsh(struct mlx5_cqe64 * cqe)885 static inline u8 get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe)
886 {
887 return (cqe->lro.tcppsh_abort_dupack >> 6) & 1;
888 }
889
get_cqe_l4_hdr_type(struct mlx5_cqe64 * cqe)890 static inline u8 get_cqe_l4_hdr_type(struct mlx5_cqe64 *cqe)
891 {
892 return (cqe->l4_l3_hdr_type >> 4) & 0x7;
893 }
894
cqe_is_tunneled(struct mlx5_cqe64 * cqe)895 static inline bool cqe_is_tunneled(struct mlx5_cqe64 *cqe)
896 {
897 return cqe->tls_outer_l3_tunneled & 0x1;
898 }
899
get_cqe_tls_offload(struct mlx5_cqe64 * cqe)900 static inline u8 get_cqe_tls_offload(struct mlx5_cqe64 *cqe)
901 {
902 return (cqe->tls_outer_l3_tunneled >> 3) & 0x3;
903 }
904
cqe_has_vlan(struct mlx5_cqe64 * cqe)905 static inline bool cqe_has_vlan(struct mlx5_cqe64 *cqe)
906 {
907 return cqe->l4_l3_hdr_type & 0x1;
908 }
909
get_cqe_ts(struct mlx5_cqe64 * cqe)910 static inline u64 get_cqe_ts(struct mlx5_cqe64 *cqe)
911 {
912 u32 hi, lo;
913
914 hi = be32_to_cpu(cqe->timestamp_h);
915 lo = be32_to_cpu(cqe->timestamp_l);
916
917 return (u64)lo | ((u64)hi << 32);
918 }
919
get_cqe_flow_tag(struct mlx5_cqe64 * cqe)920 static inline u16 get_cqe_flow_tag(struct mlx5_cqe64 *cqe)
921 {
922 return be32_to_cpu(cqe->sop_drop_qpn) & 0xFFF;
923 }
924
925 #define MLX5_MPWQE_LOG_NUM_STRIDES_EXT_BASE 3
926 #define MLX5_MPWQE_LOG_NUM_STRIDES_BASE 9
927 #define MLX5_MPWQE_LOG_NUM_STRIDES_MAX 16
928 #define MLX5_MPWQE_LOG_STRIDE_SZ_BASE 6
929 #define MLX5_MPWQE_LOG_STRIDE_SZ_MAX 13
930
931 struct mpwrq_cqe_bc {
932 __be16 filler_consumed_strides;
933 __be16 byte_cnt;
934 };
935
mpwrq_get_cqe_byte_cnt(struct mlx5_cqe64 * cqe)936 static inline u16 mpwrq_get_cqe_byte_cnt(struct mlx5_cqe64 *cqe)
937 {
938 struct mpwrq_cqe_bc *bc = (struct mpwrq_cqe_bc *)&cqe->byte_cnt;
939
940 return be16_to_cpu(bc->byte_cnt);
941 }
942
mpwrq_get_cqe_bc_consumed_strides(struct mpwrq_cqe_bc * bc)943 static inline u16 mpwrq_get_cqe_bc_consumed_strides(struct mpwrq_cqe_bc *bc)
944 {
945 return 0x7fff & be16_to_cpu(bc->filler_consumed_strides);
946 }
947
mpwrq_get_cqe_consumed_strides(struct mlx5_cqe64 * cqe)948 static inline u16 mpwrq_get_cqe_consumed_strides(struct mlx5_cqe64 *cqe)
949 {
950 struct mpwrq_cqe_bc *bc = (struct mpwrq_cqe_bc *)&cqe->byte_cnt;
951
952 return mpwrq_get_cqe_bc_consumed_strides(bc);
953 }
954
mpwrq_is_filler_cqe(struct mlx5_cqe64 * cqe)955 static inline bool mpwrq_is_filler_cqe(struct mlx5_cqe64 *cqe)
956 {
957 struct mpwrq_cqe_bc *bc = (struct mpwrq_cqe_bc *)&cqe->byte_cnt;
958
959 return 0x8000 & be16_to_cpu(bc->filler_consumed_strides);
960 }
961
mpwrq_get_cqe_stride_index(struct mlx5_cqe64 * cqe)962 static inline u16 mpwrq_get_cqe_stride_index(struct mlx5_cqe64 *cqe)
963 {
964 return be16_to_cpu(cqe->wqe_counter);
965 }
966
967 enum {
968 CQE_L4_HDR_TYPE_NONE = 0x0,
969 CQE_L4_HDR_TYPE_TCP_NO_ACK = 0x1,
970 CQE_L4_HDR_TYPE_UDP = 0x2,
971 CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA = 0x3,
972 CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA = 0x4,
973 };
974
975 enum {
976 CQE_RSS_HTYPE_IP = 0x3 << 2,
977 /* cqe->rss_hash_type[3:2] - IP destination selected for hash
978 * (00 = none, 01 = IPv4, 10 = IPv6, 11 = Reserved)
979 */
980 CQE_RSS_HTYPE_L4 = 0x3 << 6,
981 /* cqe->rss_hash_type[7:6] - L4 destination selected for hash
982 * (00 = none, 01 = TCP. 10 = UDP, 11 = IPSEC.SPI
983 */
984 };
985
986 enum {
987 MLX5_CQE_ROCE_L3_HEADER_TYPE_GRH = 0x0,
988 MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV6 = 0x1,
989 MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV4 = 0x2,
990 };
991
992 enum {
993 CQE_L2_OK = 1 << 0,
994 CQE_L3_OK = 1 << 1,
995 CQE_L4_OK = 1 << 2,
996 };
997
998 enum {
999 CQE_TLS_OFFLOAD_NOT_DECRYPTED = 0x0,
1000 CQE_TLS_OFFLOAD_DECRYPTED = 0x1,
1001 CQE_TLS_OFFLOAD_RESYNC = 0x2,
1002 CQE_TLS_OFFLOAD_ERROR = 0x3,
1003 };
1004
1005 struct mlx5_sig_err_cqe {
1006 u8 rsvd0[16];
1007 __be32 expected_trans_sig;
1008 __be32 actual_trans_sig;
1009 __be32 expected_reftag;
1010 __be32 actual_reftag;
1011 __be16 syndrome;
1012 u8 rsvd22[2];
1013 __be32 mkey;
1014 __be64 err_offset;
1015 u8 rsvd30[8];
1016 __be32 qpn;
1017 u8 rsvd38[2];
1018 u8 signature;
1019 u8 op_own;
1020 };
1021
1022 struct mlx5_wqe_srq_next_seg {
1023 u8 rsvd0[2];
1024 __be16 next_wqe_index;
1025 u8 signature;
1026 u8 rsvd1[11];
1027 };
1028
1029 union mlx5_ext_cqe {
1030 struct ib_grh grh;
1031 u8 inl[64];
1032 };
1033
1034 struct mlx5_cqe128 {
1035 union mlx5_ext_cqe inl_grh;
1036 struct mlx5_cqe64 cqe64;
1037 };
1038
1039 enum {
1040 MLX5_MKEY_STATUS_FREE = 1 << 6,
1041 };
1042
1043 enum {
1044 MLX5_MKEY_REMOTE_INVAL = 1 << 24,
1045 MLX5_MKEY_FLAG_SYNC_UMR = 1 << 29,
1046 MLX5_MKEY_BSF_EN = 1 << 30,
1047 };
1048
1049 struct mlx5_mkey_seg {
1050 /* This is a two bit field occupying bits 31-30.
1051 * bit 31 is always 0,
1052 * bit 30 is zero for regular MRs and 1 (e.g free) for UMRs that do not have translation
1053 */
1054 u8 status;
1055 u8 pcie_control;
1056 u8 flags;
1057 u8 version;
1058 __be32 qpn_mkey7_0;
1059 u8 rsvd1[4];
1060 __be32 flags_pd;
1061 __be64 start_addr;
1062 __be64 len;
1063 __be32 bsfs_octo_size;
1064 u8 rsvd2[16];
1065 __be32 xlt_oct_size;
1066 u8 rsvd3[3];
1067 u8 log2_page_size;
1068 u8 rsvd4[4];
1069 };
1070
1071 #define MLX5_ATTR_EXTENDED_PORT_INFO cpu_to_be16(0xff90)
1072
1073 enum {
1074 MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO = 1 << 0
1075 };
1076
1077 enum {
1078 VPORT_STATE_DOWN = 0x0,
1079 VPORT_STATE_UP = 0x1,
1080 };
1081
1082 enum {
1083 MLX5_VPORT_ADMIN_STATE_DOWN = 0x0,
1084 MLX5_VPORT_ADMIN_STATE_UP = 0x1,
1085 MLX5_VPORT_ADMIN_STATE_AUTO = 0x2,
1086 };
1087
1088 enum {
1089 MLX5_VPORT_CVLAN_INSERT_WHEN_NO_CVLAN = 0x1,
1090 MLX5_VPORT_CVLAN_INSERT_ALWAYS = 0x3,
1091 };
1092
1093 enum {
1094 MLX5_L3_PROT_TYPE_IPV4 = 0,
1095 MLX5_L3_PROT_TYPE_IPV6 = 1,
1096 };
1097
1098 enum {
1099 MLX5_L4_PROT_TYPE_TCP = 0,
1100 MLX5_L4_PROT_TYPE_UDP = 1,
1101 };
1102
1103 enum {
1104 MLX5_HASH_FIELD_SEL_SRC_IP = 1 << 0,
1105 MLX5_HASH_FIELD_SEL_DST_IP = 1 << 1,
1106 MLX5_HASH_FIELD_SEL_L4_SPORT = 1 << 2,
1107 MLX5_HASH_FIELD_SEL_L4_DPORT = 1 << 3,
1108 MLX5_HASH_FIELD_SEL_IPSEC_SPI = 1 << 4,
1109 };
1110
1111 enum {
1112 MLX5_MATCH_OUTER_HEADERS = 1 << 0,
1113 MLX5_MATCH_MISC_PARAMETERS = 1 << 1,
1114 MLX5_MATCH_INNER_HEADERS = 1 << 2,
1115 MLX5_MATCH_MISC_PARAMETERS_2 = 1 << 3,
1116 MLX5_MATCH_MISC_PARAMETERS_3 = 1 << 4,
1117 MLX5_MATCH_MISC_PARAMETERS_4 = 1 << 5,
1118 MLX5_MATCH_MISC_PARAMETERS_5 = 1 << 6,
1119 };
1120
1121 enum {
1122 MLX5_FLOW_TABLE_TYPE_NIC_RCV = 0,
1123 MLX5_FLOW_TABLE_TYPE_ESWITCH = 4,
1124 };
1125
1126 enum {
1127 MLX5_FLOW_CONTEXT_DEST_TYPE_VPORT = 0,
1128 MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE = 1,
1129 MLX5_FLOW_CONTEXT_DEST_TYPE_TIR = 2,
1130 };
1131
1132 enum mlx5_list_type {
1133 MLX5_NVPRT_LIST_TYPE_UC = 0x0,
1134 MLX5_NVPRT_LIST_TYPE_MC = 0x1,
1135 MLX5_NVPRT_LIST_TYPE_VLAN = 0x2,
1136 };
1137
1138 enum {
1139 MLX5_RQC_RQ_TYPE_MEMORY_RQ_INLINE = 0x0,
1140 MLX5_RQC_RQ_TYPE_MEMORY_RQ_RPM = 0x1,
1141 };
1142
1143 enum mlx5_wol_mode {
1144 MLX5_WOL_DISABLE = 0,
1145 MLX5_WOL_SECURED_MAGIC = 1 << 1,
1146 MLX5_WOL_MAGIC = 1 << 2,
1147 MLX5_WOL_ARP = 1 << 3,
1148 MLX5_WOL_BROADCAST = 1 << 4,
1149 MLX5_WOL_MULTICAST = 1 << 5,
1150 MLX5_WOL_UNICAST = 1 << 6,
1151 MLX5_WOL_PHY_ACTIVITY = 1 << 7,
1152 };
1153
1154 enum mlx5_mpls_supported_fields {
1155 MLX5_FIELD_SUPPORT_MPLS_LABEL = 1 << 0,
1156 MLX5_FIELD_SUPPORT_MPLS_EXP = 1 << 1,
1157 MLX5_FIELD_SUPPORT_MPLS_S_BOS = 1 << 2,
1158 MLX5_FIELD_SUPPORT_MPLS_TTL = 1 << 3
1159 };
1160
1161 enum mlx5_flex_parser_protos {
1162 MLX5_FLEX_PROTO_GENEVE = 1 << 3,
1163 MLX5_FLEX_PROTO_CW_MPLS_GRE = 1 << 4,
1164 MLX5_FLEX_PROTO_CW_MPLS_UDP = 1 << 5,
1165 MLX5_FLEX_PROTO_ICMP = 1 << 8,
1166 MLX5_FLEX_PROTO_ICMPV6 = 1 << 9,
1167 };
1168
1169 /* MLX5 DEV CAPs */
1170
1171 /* TODO: EAT.ME */
1172 enum mlx5_cap_mode {
1173 HCA_CAP_OPMOD_GET_MAX = 0,
1174 HCA_CAP_OPMOD_GET_CUR = 1,
1175 };
1176
1177 /* Any new cap addition must update mlx5_hca_caps_alloc() to allocate
1178 * capability memory.
1179 */
1180 enum mlx5_cap_type {
1181 MLX5_CAP_GENERAL = 0,
1182 MLX5_CAP_ETHERNET_OFFLOADS,
1183 MLX5_CAP_ODP,
1184 MLX5_CAP_ATOMIC,
1185 MLX5_CAP_ROCE,
1186 MLX5_CAP_IPOIB_OFFLOADS,
1187 MLX5_CAP_IPOIB_ENHANCED_OFFLOADS,
1188 MLX5_CAP_FLOW_TABLE,
1189 MLX5_CAP_ESWITCH_FLOW_TABLE,
1190 MLX5_CAP_ESWITCH,
1191 MLX5_CAP_RESERVED,
1192 MLX5_CAP_VECTOR_CALC,
1193 MLX5_CAP_QOS,
1194 MLX5_CAP_DEBUG,
1195 MLX5_CAP_RESERVED_14,
1196 MLX5_CAP_DEV_MEM,
1197 MLX5_CAP_RESERVED_16,
1198 MLX5_CAP_TLS,
1199 MLX5_CAP_VDPA_EMULATION = 0x13,
1200 MLX5_CAP_DEV_EVENT = 0x14,
1201 MLX5_CAP_IPSEC,
1202 MLX5_CAP_DEV_SHAMPO = 0x1d,
1203 MLX5_CAP_MACSEC = 0x1f,
1204 MLX5_CAP_GENERAL_2 = 0x20,
1205 MLX5_CAP_PORT_SELECTION = 0x25,
1206 MLX5_CAP_ADV_VIRTUALIZATION = 0x26,
1207 /* NUM OF CAP Types */
1208 MLX5_CAP_NUM
1209 };
1210
1211 enum mlx5_pcam_reg_groups {
1212 MLX5_PCAM_REGS_5000_TO_507F = 0x0,
1213 };
1214
1215 enum mlx5_pcam_feature_groups {
1216 MLX5_PCAM_FEATURE_ENHANCED_FEATURES = 0x0,
1217 };
1218
1219 enum mlx5_mcam_reg_groups {
1220 MLX5_MCAM_REGS_FIRST_128 = 0x0,
1221 MLX5_MCAM_REGS_0x9080_0x90FF = 0x1,
1222 MLX5_MCAM_REGS_0x9100_0x917F = 0x2,
1223 MLX5_MCAM_REGS_NUM = 0x3,
1224 };
1225
1226 enum mlx5_mcam_feature_groups {
1227 MLX5_MCAM_FEATURE_ENHANCED_FEATURES = 0x0,
1228 };
1229
1230 enum mlx5_qcam_reg_groups {
1231 MLX5_QCAM_REGS_FIRST_128 = 0x0,
1232 };
1233
1234 enum mlx5_qcam_feature_groups {
1235 MLX5_QCAM_FEATURE_ENHANCED_FEATURES = 0x0,
1236 };
1237
1238 /* GET Dev Caps macros */
1239 #define MLX5_CAP_GEN(mdev, cap) \
1240 MLX5_GET(cmd_hca_cap, mdev->caps.hca[MLX5_CAP_GENERAL]->cur, cap)
1241
1242 #define MLX5_CAP_GEN_64(mdev, cap) \
1243 MLX5_GET64(cmd_hca_cap, mdev->caps.hca[MLX5_CAP_GENERAL]->cur, cap)
1244
1245 #define MLX5_CAP_GEN_MAX(mdev, cap) \
1246 MLX5_GET(cmd_hca_cap, mdev->caps.hca[MLX5_CAP_GENERAL]->max, cap)
1247
1248 #define MLX5_CAP_GEN_2(mdev, cap) \
1249 MLX5_GET(cmd_hca_cap_2, mdev->caps.hca[MLX5_CAP_GENERAL_2]->cur, cap)
1250
1251 #define MLX5_CAP_GEN_2_64(mdev, cap) \
1252 MLX5_GET64(cmd_hca_cap_2, mdev->caps.hca[MLX5_CAP_GENERAL_2]->cur, cap)
1253
1254 #define MLX5_CAP_GEN_2_MAX(mdev, cap) \
1255 MLX5_GET(cmd_hca_cap_2, mdev->caps.hca[MLX5_CAP_GENERAL_2]->max, cap)
1256
1257 #define MLX5_CAP_ETH(mdev, cap) \
1258 MLX5_GET(per_protocol_networking_offload_caps,\
1259 mdev->caps.hca[MLX5_CAP_ETHERNET_OFFLOADS]->cur, cap)
1260
1261 #define MLX5_CAP_ETH_MAX(mdev, cap) \
1262 MLX5_GET(per_protocol_networking_offload_caps,\
1263 mdev->caps.hca[MLX5_CAP_ETHERNET_OFFLOADS]->max, cap)
1264
1265 #define MLX5_CAP_IPOIB_ENHANCED(mdev, cap) \
1266 MLX5_GET(per_protocol_networking_offload_caps,\
1267 mdev->caps.hca[MLX5_CAP_IPOIB_ENHANCED_OFFLOADS]->cur, cap)
1268
1269 #define MLX5_CAP_ROCE(mdev, cap) \
1270 MLX5_GET(roce_cap, mdev->caps.hca[MLX5_CAP_ROCE]->cur, cap)
1271
1272 #define MLX5_CAP_ROCE_MAX(mdev, cap) \
1273 MLX5_GET(roce_cap, mdev->caps.hca[MLX5_CAP_ROCE]->max, cap)
1274
1275 #define MLX5_CAP_ATOMIC(mdev, cap) \
1276 MLX5_GET(atomic_caps, mdev->caps.hca[MLX5_CAP_ATOMIC]->cur, cap)
1277
1278 #define MLX5_CAP_ATOMIC_MAX(mdev, cap) \
1279 MLX5_GET(atomic_caps, mdev->caps.hca[MLX5_CAP_ATOMIC]->max, cap)
1280
1281 #define MLX5_CAP_FLOWTABLE(mdev, cap) \
1282 MLX5_GET(flow_table_nic_cap, mdev->caps.hca[MLX5_CAP_FLOW_TABLE]->cur, cap)
1283
1284 #define MLX5_CAP64_FLOWTABLE(mdev, cap) \
1285 MLX5_GET64(flow_table_nic_cap, (mdev)->caps.hca[MLX5_CAP_FLOW_TABLE]->cur, cap)
1286
1287 #define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \
1288 MLX5_GET(flow_table_nic_cap, mdev->caps.hca[MLX5_CAP_FLOW_TABLE]->max, cap)
1289
1290 #define MLX5_CAP_FLOWTABLE_NIC_RX(mdev, cap) \
1291 MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.cap)
1292
1293 #define MLX5_CAP_FLOWTABLE_NIC_RX_MAX(mdev, cap) \
1294 MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive.cap)
1295
1296 #define MLX5_CAP_FLOWTABLE_NIC_TX(mdev, cap) \
1297 MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit.cap)
1298
1299 #define MLX5_CAP_FLOWTABLE_NIC_TX_MAX(mdev, cap) \
1300 MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit.cap)
1301
1302 #define MLX5_CAP_FLOWTABLE_SNIFFER_RX(mdev, cap) \
1303 MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive_sniffer.cap)
1304
1305 #define MLX5_CAP_FLOWTABLE_SNIFFER_RX_MAX(mdev, cap) \
1306 MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive_sniffer.cap)
1307
1308 #define MLX5_CAP_FLOWTABLE_SNIFFER_TX(mdev, cap) \
1309 MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit_sniffer.cap)
1310
1311 #define MLX5_CAP_FLOWTABLE_SNIFFER_TX_MAX(mdev, cap) \
1312 MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit_sniffer.cap)
1313
1314 #define MLX5_CAP_FLOWTABLE_RDMA_RX(mdev, cap) \
1315 MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive_rdma.cap)
1316
1317 #define MLX5_CAP_FLOWTABLE_RDMA_RX_MAX(mdev, cap) \
1318 MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive_rdma.cap)
1319
1320 #define MLX5_CAP_FLOWTABLE_RDMA_TX(mdev, cap) \
1321 MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit_rdma.cap)
1322
1323 #define MLX5_CAP_FLOWTABLE_RDMA_TX_MAX(mdev, cap) \
1324 MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit_rdma.cap)
1325
1326 #define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \
1327 MLX5_GET(flow_table_eswitch_cap, \
1328 mdev->caps.hca[MLX5_CAP_ESWITCH_FLOW_TABLE]->cur, cap)
1329
1330 #define MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, cap) \
1331 MLX5_GET(flow_table_eswitch_cap, \
1332 mdev->caps.hca[MLX5_CAP_ESWITCH_FLOW_TABLE]->max, cap)
1333
1334 #define MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) \
1335 MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_nic_esw_fdb.cap)
1336
1337 #define MLX5_CAP_ESW_FLOWTABLE_FDB_MAX(mdev, cap) \
1338 MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_nic_esw_fdb.cap)
1339
1340 #define MLX5_CAP_ESW_EGRESS_ACL(mdev, cap) \
1341 MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_egress.cap)
1342
1343 #define MLX5_CAP_ESW_EGRESS_ACL_MAX(mdev, cap) \
1344 MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_egress.cap)
1345
1346 #define MLX5_CAP_ESW_INGRESS_ACL(mdev, cap) \
1347 MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_ingress.cap)
1348
1349 #define MLX5_CAP_ESW_INGRESS_ACL_MAX(mdev, cap) \
1350 MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_ingress.cap)
1351
1352 #define MLX5_CAP_ESW(mdev, cap) \
1353 MLX5_GET(e_switch_cap, \
1354 mdev->caps.hca[MLX5_CAP_ESWITCH]->cur, cap)
1355
1356 #define MLX5_CAP64_ESW_FLOWTABLE(mdev, cap) \
1357 MLX5_GET64(flow_table_eswitch_cap, \
1358 (mdev)->caps.hca[MLX5_CAP_ESWITCH_FLOW_TABLE]->cur, cap)
1359
1360 #define MLX5_CAP_ESW_MAX(mdev, cap) \
1361 MLX5_GET(e_switch_cap, \
1362 mdev->caps.hca[MLX5_CAP_ESWITCH]->max, cap)
1363
1364 #define MLX5_CAP_PORT_SELECTION(mdev, cap) \
1365 MLX5_GET(port_selection_cap, \
1366 mdev->caps.hca[MLX5_CAP_PORT_SELECTION]->cur, cap)
1367
1368 #define MLX5_CAP_PORT_SELECTION_MAX(mdev, cap) \
1369 MLX5_GET(port_selection_cap, \
1370 mdev->caps.hca[MLX5_CAP_PORT_SELECTION]->max, cap)
1371
1372 #define MLX5_CAP_ADV_VIRTUALIZATION(mdev, cap) \
1373 MLX5_GET(adv_virtualization_cap, \
1374 mdev->caps.hca[MLX5_CAP_ADV_VIRTUALIZATION]->cur, cap)
1375
1376 #define MLX5_CAP_ADV_VIRTUALIZATION_MAX(mdev, cap) \
1377 MLX5_GET(adv_virtualization_cap, \
1378 mdev->caps.hca[MLX5_CAP_ADV_VIRTUALIZATION]->max, cap)
1379
1380 #define MLX5_CAP_FLOWTABLE_PORT_SELECTION(mdev, cap) \
1381 MLX5_CAP_PORT_SELECTION(mdev, flow_table_properties_port_selection.cap)
1382
1383 #define MLX5_CAP_FLOWTABLE_PORT_SELECTION_MAX(mdev, cap) \
1384 MLX5_CAP_PORT_SELECTION_MAX(mdev, flow_table_properties_port_selection.cap)
1385
1386 #define MLX5_CAP_ODP(mdev, cap)\
1387 MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->cur, cap)
1388
1389 #define MLX5_CAP_ODP_MAX(mdev, cap)\
1390 MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->max, cap)
1391
1392 #define MLX5_CAP_VECTOR_CALC(mdev, cap) \
1393 MLX5_GET(vector_calc_cap, \
1394 mdev->caps.hca[MLX5_CAP_VECTOR_CALC]->cur, cap)
1395
1396 #define MLX5_CAP_QOS(mdev, cap)\
1397 MLX5_GET(qos_cap, mdev->caps.hca[MLX5_CAP_QOS]->cur, cap)
1398
1399 #define MLX5_CAP_DEBUG(mdev, cap)\
1400 MLX5_GET(debug_cap, mdev->caps.hca[MLX5_CAP_DEBUG]->cur, cap)
1401
1402 #define MLX5_CAP_PCAM_FEATURE(mdev, fld) \
1403 MLX5_GET(pcam_reg, (mdev)->caps.pcam, feature_cap_mask.enhanced_features.fld)
1404
1405 #define MLX5_CAP_PCAM_REG(mdev, reg) \
1406 MLX5_GET(pcam_reg, (mdev)->caps.pcam, port_access_reg_cap_mask.regs_5000_to_507f.reg)
1407
1408 #define MLX5_CAP_MCAM_REG(mdev, reg) \
1409 MLX5_GET(mcam_reg, (mdev)->caps.mcam[MLX5_MCAM_REGS_FIRST_128], \
1410 mng_access_reg_cap_mask.access_regs.reg)
1411
1412 #define MLX5_CAP_MCAM_REG1(mdev, reg) \
1413 MLX5_GET(mcam_reg, (mdev)->caps.mcam[MLX5_MCAM_REGS_0x9080_0x90FF], \
1414 mng_access_reg_cap_mask.access_regs1.reg)
1415
1416 #define MLX5_CAP_MCAM_REG2(mdev, reg) \
1417 MLX5_GET(mcam_reg, (mdev)->caps.mcam[MLX5_MCAM_REGS_0x9100_0x917F], \
1418 mng_access_reg_cap_mask.access_regs2.reg)
1419
1420 #define MLX5_CAP_MCAM_FEATURE(mdev, fld) \
1421 MLX5_GET(mcam_reg, (mdev)->caps.mcam, mng_feature_cap_mask.enhanced_features.fld)
1422
1423 #define MLX5_CAP_QCAM_REG(mdev, fld) \
1424 MLX5_GET(qcam_reg, (mdev)->caps.qcam, qos_access_reg_cap_mask.reg_cap.fld)
1425
1426 #define MLX5_CAP_QCAM_FEATURE(mdev, fld) \
1427 MLX5_GET(qcam_reg, (mdev)->caps.qcam, qos_feature_cap_mask.feature_cap.fld)
1428
1429 #define MLX5_CAP_FPGA(mdev, cap) \
1430 MLX5_GET(fpga_cap, (mdev)->caps.fpga, cap)
1431
1432 #define MLX5_CAP64_FPGA(mdev, cap) \
1433 MLX5_GET64(fpga_cap, (mdev)->caps.fpga, cap)
1434
1435 #define MLX5_CAP_DEV_MEM(mdev, cap)\
1436 MLX5_GET(device_mem_cap, mdev->caps.hca[MLX5_CAP_DEV_MEM]->cur, cap)
1437
1438 #define MLX5_CAP64_DEV_MEM(mdev, cap)\
1439 MLX5_GET64(device_mem_cap, mdev->caps.hca[MLX5_CAP_DEV_MEM]->cur, cap)
1440
1441 #define MLX5_CAP_TLS(mdev, cap) \
1442 MLX5_GET(tls_cap, (mdev)->caps.hca[MLX5_CAP_TLS]->cur, cap)
1443
1444 #define MLX5_CAP_DEV_EVENT(mdev, cap)\
1445 MLX5_ADDR_OF(device_event_cap, (mdev)->caps.hca[MLX5_CAP_DEV_EVENT]->cur, cap)
1446
1447 #define MLX5_CAP_DEV_VDPA_EMULATION(mdev, cap)\
1448 MLX5_GET(virtio_emulation_cap, \
1449 (mdev)->caps.hca[MLX5_CAP_VDPA_EMULATION]->cur, cap)
1450
1451 #define MLX5_CAP64_DEV_VDPA_EMULATION(mdev, cap)\
1452 MLX5_GET64(virtio_emulation_cap, \
1453 (mdev)->caps.hca[MLX5_CAP_VDPA_EMULATION]->cur, cap)
1454
1455 #define MLX5_CAP_IPSEC(mdev, cap)\
1456 MLX5_GET(ipsec_cap, (mdev)->caps.hca[MLX5_CAP_IPSEC]->cur, cap)
1457
1458 #define MLX5_CAP_DEV_SHAMPO(mdev, cap)\
1459 MLX5_GET(shampo_cap, mdev->caps.hca_cur[MLX5_CAP_DEV_SHAMPO], cap)
1460
1461 #define MLX5_CAP_MACSEC(mdev, cap)\
1462 MLX5_GET(macsec_cap, (mdev)->caps.hca[MLX5_CAP_MACSEC]->cur, cap)
1463
1464 enum {
1465 MLX5_CMD_STAT_OK = 0x0,
1466 MLX5_CMD_STAT_INT_ERR = 0x1,
1467 MLX5_CMD_STAT_BAD_OP_ERR = 0x2,
1468 MLX5_CMD_STAT_BAD_PARAM_ERR = 0x3,
1469 MLX5_CMD_STAT_BAD_SYS_STATE_ERR = 0x4,
1470 MLX5_CMD_STAT_BAD_RES_ERR = 0x5,
1471 MLX5_CMD_STAT_RES_BUSY = 0x6,
1472 MLX5_CMD_STAT_LIM_ERR = 0x8,
1473 MLX5_CMD_STAT_BAD_RES_STATE_ERR = 0x9,
1474 MLX5_CMD_STAT_IX_ERR = 0xa,
1475 MLX5_CMD_STAT_NO_RES_ERR = 0xf,
1476 MLX5_CMD_STAT_BAD_INP_LEN_ERR = 0x50,
1477 MLX5_CMD_STAT_BAD_OUTP_LEN_ERR = 0x51,
1478 MLX5_CMD_STAT_BAD_QP_STATE_ERR = 0x10,
1479 MLX5_CMD_STAT_BAD_PKT_ERR = 0x30,
1480 MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR = 0x40,
1481 };
1482
1483 enum {
1484 MLX5_IEEE_802_3_COUNTERS_GROUP = 0x0,
1485 MLX5_RFC_2863_COUNTERS_GROUP = 0x1,
1486 MLX5_RFC_2819_COUNTERS_GROUP = 0x2,
1487 MLX5_RFC_3635_COUNTERS_GROUP = 0x3,
1488 MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP = 0x5,
1489 MLX5_PER_PRIORITY_COUNTERS_GROUP = 0x10,
1490 MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP = 0x11,
1491 MLX5_PHYSICAL_LAYER_COUNTERS_GROUP = 0x12,
1492 MLX5_PER_TRAFFIC_CLASS_CONGESTION_GROUP = 0x13,
1493 MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP = 0x16,
1494 MLX5_INFINIBAND_PORT_COUNTERS_GROUP = 0x20,
1495 };
1496
1497 enum {
1498 MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP = 0x0,
1499 };
1500
mlx5_to_sw_pkey_sz(int pkey_sz)1501 static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz)
1502 {
1503 if (pkey_sz > MLX5_MAX_LOG_PKEY_TABLE)
1504 return 0;
1505 return MLX5_MIN_PKEY_TABLE_SIZE << pkey_sz;
1506 }
1507
1508 #define MLX5_RDMA_RX_NUM_COUNTERS_PRIOS 2
1509 #define MLX5_RDMA_TX_NUM_COUNTERS_PRIOS 1
1510 #define MLX5_BY_PASS_NUM_REGULAR_PRIOS 16
1511 #define MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS 16
1512 #define MLX5_BY_PASS_NUM_MULTICAST_PRIOS 1
1513 #define MLX5_BY_PASS_NUM_PRIOS (MLX5_BY_PASS_NUM_REGULAR_PRIOS +\
1514 MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS +\
1515 MLX5_BY_PASS_NUM_MULTICAST_PRIOS)
1516
1517 #endif /* MLX5_DEVICE_H */
1518