1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved. */
3
4 #include <linux/types.h>
5 #include "mlx5_ifc_dr_ste_v1.h"
6 #include "dr_ste_v1.h"
7
8 #define DR_STE_CALC_DFNR_TYPE(lookup_type, inner) \
9 ((inner) ? DR_STE_V1_LU_TYPE_##lookup_type##_I : \
10 DR_STE_V1_LU_TYPE_##lookup_type##_O)
11
12 enum dr_ste_v1_entry_format {
13 DR_STE_V1_TYPE_BWC_BYTE = 0x0,
14 DR_STE_V1_TYPE_BWC_DW = 0x1,
15 DR_STE_V1_TYPE_MATCH = 0x2,
16 };
17
18 /* Lookup type is built from 2B: [ Definer mode 1B ][ Definer index 1B ] */
19 enum {
20 DR_STE_V1_LU_TYPE_NOP = 0x0000,
21 DR_STE_V1_LU_TYPE_ETHL2_TNL = 0x0002,
22 DR_STE_V1_LU_TYPE_IBL3_EXT = 0x0102,
23 DR_STE_V1_LU_TYPE_ETHL2_O = 0x0003,
24 DR_STE_V1_LU_TYPE_IBL4 = 0x0103,
25 DR_STE_V1_LU_TYPE_ETHL2_I = 0x0004,
26 DR_STE_V1_LU_TYPE_SRC_QP_GVMI = 0x0104,
27 DR_STE_V1_LU_TYPE_ETHL2_SRC_O = 0x0005,
28 DR_STE_V1_LU_TYPE_ETHL2_HEADERS_O = 0x0105,
29 DR_STE_V1_LU_TYPE_ETHL2_SRC_I = 0x0006,
30 DR_STE_V1_LU_TYPE_ETHL2_HEADERS_I = 0x0106,
31 DR_STE_V1_LU_TYPE_ETHL3_IPV4_5_TUPLE_O = 0x0007,
32 DR_STE_V1_LU_TYPE_IPV6_DES_O = 0x0107,
33 DR_STE_V1_LU_TYPE_ETHL3_IPV4_5_TUPLE_I = 0x0008,
34 DR_STE_V1_LU_TYPE_IPV6_DES_I = 0x0108,
35 DR_STE_V1_LU_TYPE_ETHL4_O = 0x0009,
36 DR_STE_V1_LU_TYPE_IPV6_SRC_O = 0x0109,
37 DR_STE_V1_LU_TYPE_ETHL4_I = 0x000a,
38 DR_STE_V1_LU_TYPE_IPV6_SRC_I = 0x010a,
39 DR_STE_V1_LU_TYPE_ETHL2_SRC_DST_O = 0x000b,
40 DR_STE_V1_LU_TYPE_MPLS_O = 0x010b,
41 DR_STE_V1_LU_TYPE_ETHL2_SRC_DST_I = 0x000c,
42 DR_STE_V1_LU_TYPE_MPLS_I = 0x010c,
43 DR_STE_V1_LU_TYPE_ETHL3_IPV4_MISC_O = 0x000d,
44 DR_STE_V1_LU_TYPE_GRE = 0x010d,
45 DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER = 0x000e,
46 DR_STE_V1_LU_TYPE_GENERAL_PURPOSE = 0x010e,
47 DR_STE_V1_LU_TYPE_ETHL3_IPV4_MISC_I = 0x000f,
48 DR_STE_V1_LU_TYPE_STEERING_REGISTERS_0 = 0x010f,
49 DR_STE_V1_LU_TYPE_STEERING_REGISTERS_1 = 0x0110,
50 DR_STE_V1_LU_TYPE_FLEX_PARSER_OK = 0x0011,
51 DR_STE_V1_LU_TYPE_FLEX_PARSER_0 = 0x0111,
52 DR_STE_V1_LU_TYPE_FLEX_PARSER_1 = 0x0112,
53 DR_STE_V1_LU_TYPE_ETHL4_MISC_O = 0x0113,
54 DR_STE_V1_LU_TYPE_ETHL4_MISC_I = 0x0114,
55 DR_STE_V1_LU_TYPE_INVALID = 0x00ff,
56 DR_STE_V1_LU_TYPE_DONT_CARE = MLX5DR_STE_LU_TYPE_DONT_CARE,
57 };
58
59 enum dr_ste_v1_header_anchors {
60 DR_STE_HEADER_ANCHOR_START_OUTER = 0x00,
61 DR_STE_HEADER_ANCHOR_1ST_VLAN = 0x02,
62 DR_STE_HEADER_ANCHOR_IPV6_IPV4 = 0x07,
63 DR_STE_HEADER_ANCHOR_INNER_MAC = 0x13,
64 DR_STE_HEADER_ANCHOR_INNER_IPV6_IPV4 = 0x19,
65 };
66
67 enum dr_ste_v1_action_size {
68 DR_STE_ACTION_SINGLE_SZ = 4,
69 DR_STE_ACTION_DOUBLE_SZ = 8,
70 DR_STE_ACTION_TRIPLE_SZ = 12,
71 };
72
73 enum dr_ste_v1_action_insert_ptr_attr {
74 DR_STE_V1_ACTION_INSERT_PTR_ATTR_NONE = 0, /* Regular push header (e.g. push vlan) */
75 DR_STE_V1_ACTION_INSERT_PTR_ATTR_ENCAP = 1, /* Encapsulation / Tunneling */
76 DR_STE_V1_ACTION_INSERT_PTR_ATTR_ESP = 2, /* IPsec */
77 };
78
79 enum dr_ste_v1_action_id {
80 DR_STE_V1_ACTION_ID_NOP = 0x00,
81 DR_STE_V1_ACTION_ID_COPY = 0x05,
82 DR_STE_V1_ACTION_ID_SET = 0x06,
83 DR_STE_V1_ACTION_ID_ADD = 0x07,
84 DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE = 0x08,
85 DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER = 0x09,
86 DR_STE_V1_ACTION_ID_INSERT_INLINE = 0x0a,
87 DR_STE_V1_ACTION_ID_INSERT_POINTER = 0x0b,
88 DR_STE_V1_ACTION_ID_FLOW_TAG = 0x0c,
89 DR_STE_V1_ACTION_ID_QUEUE_ID_SEL = 0x0d,
90 DR_STE_V1_ACTION_ID_ACCELERATED_LIST = 0x0e,
91 DR_STE_V1_ACTION_ID_MODIFY_LIST = 0x0f,
92 DR_STE_V1_ACTION_ID_ASO = 0x12,
93 DR_STE_V1_ACTION_ID_TRAILER = 0x13,
94 DR_STE_V1_ACTION_ID_COUNTER_ID = 0x14,
95 DR_STE_V1_ACTION_ID_MAX = 0x21,
96 /* use for special cases */
97 DR_STE_V1_ACTION_ID_SPECIAL_ENCAP_L3 = 0x22,
98 };
99
100 enum {
101 DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_0 = 0x00,
102 DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_1 = 0x01,
103 DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_2 = 0x02,
104 DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_0 = 0x08,
105 DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_1 = 0x09,
106 DR_STE_V1_ACTION_MDFY_FLD_L3_OUT_0 = 0x0e,
107 DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0 = 0x18,
108 DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_1 = 0x19,
109 DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_0 = 0x40,
110 DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_1 = 0x41,
111 DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_0 = 0x44,
112 DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_1 = 0x45,
113 DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_2 = 0x46,
114 DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_3 = 0x47,
115 DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_0 = 0x4c,
116 DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_1 = 0x4d,
117 DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_2 = 0x4e,
118 DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_3 = 0x4f,
119 DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_0 = 0x5e,
120 DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_1 = 0x5f,
121 DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_0 = 0x6f,
122 DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_1 = 0x70,
123 DR_STE_V1_ACTION_MDFY_FLD_METADATA_2_CQE = 0x7b,
124 DR_STE_V1_ACTION_MDFY_FLD_GNRL_PURPOSE = 0x7c,
125 DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_0 = 0x8c,
126 DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_1 = 0x8d,
127 DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_0 = 0x8e,
128 DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_1 = 0x8f,
129 DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_0 = 0x90,
130 DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_1 = 0x91,
131 };
132
133 enum dr_ste_v1_aso_ctx_type {
134 DR_STE_V1_ASO_CTX_TYPE_POLICERS = 0x2,
135 };
136
137 static const struct mlx5dr_ste_action_modify_field dr_ste_v1_action_modify_field_arr[] = {
138 [MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16] = {
139 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_0, .start = 0, .end = 31,
140 },
141 [MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0] = {
142 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_1, .start = 16, .end = 31,
143 },
144 [MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE] = {
145 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_1, .start = 0, .end = 15,
146 },
147 [MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16] = {
148 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_0, .start = 0, .end = 31,
149 },
150 [MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0] = {
151 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_1, .start = 16, .end = 31,
152 },
153 [MLX5_ACTION_IN_FIELD_OUT_IP_DSCP] = {
154 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L3_OUT_0, .start = 18, .end = 23,
155 },
156 [MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS] = {
157 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_1, .start = 16, .end = 24,
158 .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
159 },
160 [MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT] = {
161 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0, .start = 16, .end = 31,
162 .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
163 },
164 [MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT] = {
165 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0, .start = 0, .end = 15,
166 .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
167 },
168 [MLX5_ACTION_IN_FIELD_OUT_IP_TTL] = {
169 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L3_OUT_0, .start = 8, .end = 15,
170 .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
171 },
172 [MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT] = {
173 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L3_OUT_0, .start = 8, .end = 15,
174 .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
175 },
176 [MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT] = {
177 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0, .start = 16, .end = 31,
178 .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP,
179 },
180 [MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT] = {
181 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0, .start = 0, .end = 15,
182 .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP,
183 },
184 [MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96] = {
185 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_0, .start = 0, .end = 31,
186 .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
187 },
188 [MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64] = {
189 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_1, .start = 0, .end = 31,
190 .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
191 },
192 [MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32] = {
193 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_2, .start = 0, .end = 31,
194 .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
195 },
196 [MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0] = {
197 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_3, .start = 0, .end = 31,
198 .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
199 },
200 [MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96] = {
201 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_0, .start = 0, .end = 31,
202 .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
203 },
204 [MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64] = {
205 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_1, .start = 0, .end = 31,
206 .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
207 },
208 [MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32] = {
209 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_2, .start = 0, .end = 31,
210 .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
211 },
212 [MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0] = {
213 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_3, .start = 0, .end = 31,
214 .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
215 },
216 [MLX5_ACTION_IN_FIELD_OUT_SIPV4] = {
217 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_0, .start = 0, .end = 31,
218 .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
219 },
220 [MLX5_ACTION_IN_FIELD_OUT_DIPV4] = {
221 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_1, .start = 0, .end = 31,
222 .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
223 },
224 [MLX5_ACTION_IN_FIELD_METADATA_REG_A] = {
225 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_GNRL_PURPOSE, .start = 0, .end = 31,
226 },
227 [MLX5_ACTION_IN_FIELD_METADATA_REG_B] = {
228 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_METADATA_2_CQE, .start = 0, .end = 31,
229 },
230 [MLX5_ACTION_IN_FIELD_METADATA_REG_C_0] = {
231 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_0, .start = 0, .end = 31,
232 },
233 [MLX5_ACTION_IN_FIELD_METADATA_REG_C_1] = {
234 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_1, .start = 0, .end = 31,
235 },
236 [MLX5_ACTION_IN_FIELD_METADATA_REG_C_2] = {
237 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_0, .start = 0, .end = 31,
238 },
239 [MLX5_ACTION_IN_FIELD_METADATA_REG_C_3] = {
240 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_1, .start = 0, .end = 31,
241 },
242 [MLX5_ACTION_IN_FIELD_METADATA_REG_C_4] = {
243 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_0, .start = 0, .end = 31,
244 },
245 [MLX5_ACTION_IN_FIELD_METADATA_REG_C_5] = {
246 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_1, .start = 0, .end = 31,
247 },
248 [MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM] = {
249 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_0, .start = 0, .end = 31,
250 },
251 [MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM] = {
252 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_1, .start = 0, .end = 31,
253 },
254 [MLX5_ACTION_IN_FIELD_OUT_FIRST_VID] = {
255 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_2, .start = 0, .end = 15,
256 },
257 [MLX5_ACTION_IN_FIELD_OUT_EMD_31_0] = {
258 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_1, .start = 0, .end = 31,
259 },
260 [MLX5_ACTION_IN_FIELD_OUT_EMD_47_32] = {
261 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_0, .start = 0, .end = 15,
262 },
263 };
264
dr_ste_v1_set_entry_type(u8 * hw_ste_p,u8 entry_type)265 static void dr_ste_v1_set_entry_type(u8 *hw_ste_p, u8 entry_type)
266 {
267 MLX5_SET(ste_match_bwc_v1, hw_ste_p, entry_format, entry_type);
268 }
269
dr_ste_v1_set_miss_addr(u8 * hw_ste_p,u64 miss_addr)270 void dr_ste_v1_set_miss_addr(u8 *hw_ste_p, u64 miss_addr)
271 {
272 u64 index = miss_addr >> 6;
273
274 MLX5_SET(ste_match_bwc_v1, hw_ste_p, miss_address_39_32, index >> 26);
275 MLX5_SET(ste_match_bwc_v1, hw_ste_p, miss_address_31_6, index);
276 }
277
dr_ste_v1_get_miss_addr(u8 * hw_ste_p)278 u64 dr_ste_v1_get_miss_addr(u8 *hw_ste_p)
279 {
280 u64 index =
281 ((u64)MLX5_GET(ste_match_bwc_v1, hw_ste_p, miss_address_31_6) |
282 ((u64)MLX5_GET(ste_match_bwc_v1, hw_ste_p, miss_address_39_32)) << 26);
283
284 return index << 6;
285 }
286
dr_ste_v1_set_byte_mask(u8 * hw_ste_p,u16 byte_mask)287 void dr_ste_v1_set_byte_mask(u8 *hw_ste_p, u16 byte_mask)
288 {
289 MLX5_SET(ste_match_bwc_v1, hw_ste_p, byte_mask, byte_mask);
290 }
291
dr_ste_v1_get_byte_mask(u8 * hw_ste_p)292 u16 dr_ste_v1_get_byte_mask(u8 *hw_ste_p)
293 {
294 return MLX5_GET(ste_match_bwc_v1, hw_ste_p, byte_mask);
295 }
296
dr_ste_v1_set_lu_type(u8 * hw_ste_p,u16 lu_type)297 static void dr_ste_v1_set_lu_type(u8 *hw_ste_p, u16 lu_type)
298 {
299 MLX5_SET(ste_match_bwc_v1, hw_ste_p, entry_format, lu_type >> 8);
300 MLX5_SET(ste_match_bwc_v1, hw_ste_p, match_definer_ctx_idx, lu_type & 0xFF);
301 }
302
dr_ste_v1_set_next_lu_type(u8 * hw_ste_p,u16 lu_type)303 void dr_ste_v1_set_next_lu_type(u8 *hw_ste_p, u16 lu_type)
304 {
305 MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_entry_format, lu_type >> 8);
306 MLX5_SET(ste_match_bwc_v1, hw_ste_p, hash_definer_ctx_idx, lu_type & 0xFF);
307 }
308
dr_ste_v1_get_next_lu_type(u8 * hw_ste_p)309 u16 dr_ste_v1_get_next_lu_type(u8 *hw_ste_p)
310 {
311 u8 mode = MLX5_GET(ste_match_bwc_v1, hw_ste_p, next_entry_format);
312 u8 index = MLX5_GET(ste_match_bwc_v1, hw_ste_p, hash_definer_ctx_idx);
313
314 return (mode << 8 | index);
315 }
316
dr_ste_v1_set_hit_gvmi(u8 * hw_ste_p,u16 gvmi)317 static void dr_ste_v1_set_hit_gvmi(u8 *hw_ste_p, u16 gvmi)
318 {
319 MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_table_base_63_48, gvmi);
320 }
321
dr_ste_v1_set_hit_addr(u8 * hw_ste_p,u64 icm_addr,u32 ht_size)322 void dr_ste_v1_set_hit_addr(u8 *hw_ste_p, u64 icm_addr, u32 ht_size)
323 {
324 u64 index = (icm_addr >> 5) | ht_size;
325
326 MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_table_base_39_32_size, index >> 27);
327 MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_table_base_31_5_size, index);
328 }
329
dr_ste_v1_init(u8 * hw_ste_p,u16 lu_type,bool is_rx,u16 gvmi)330 void dr_ste_v1_init(u8 *hw_ste_p, u16 lu_type, bool is_rx, u16 gvmi)
331 {
332 dr_ste_v1_set_lu_type(hw_ste_p, lu_type);
333 dr_ste_v1_set_next_lu_type(hw_ste_p, MLX5DR_STE_LU_TYPE_DONT_CARE);
334
335 MLX5_SET(ste_match_bwc_v1, hw_ste_p, gvmi, gvmi);
336 MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_table_base_63_48, gvmi);
337 MLX5_SET(ste_match_bwc_v1, hw_ste_p, miss_address_63_48, gvmi);
338 }
339
dr_ste_v1_prepare_for_postsend(u8 * hw_ste_p,u32 ste_size)340 void dr_ste_v1_prepare_for_postsend(u8 *hw_ste_p, u32 ste_size)
341 {
342 u8 *tag = hw_ste_p + DR_STE_SIZE_CTRL;
343 u8 *mask = tag + DR_STE_SIZE_TAG;
344 u8 tmp_tag[DR_STE_SIZE_TAG] = {};
345
346 if (ste_size == DR_STE_SIZE_CTRL)
347 return;
348
349 WARN_ON(ste_size != DR_STE_SIZE);
350
351 /* Backup tag */
352 memcpy(tmp_tag, tag, DR_STE_SIZE_TAG);
353
354 /* Swap mask and tag both are the same size */
355 memcpy(tag, mask, DR_STE_SIZE_MASK);
356 memcpy(mask, tmp_tag, DR_STE_SIZE_TAG);
357 }
358
dr_ste_v1_set_rx_flow_tag(u8 * s_action,u32 flow_tag)359 static void dr_ste_v1_set_rx_flow_tag(u8 *s_action, u32 flow_tag)
360 {
361 MLX5_SET(ste_single_action_flow_tag_v1, s_action, action_id,
362 DR_STE_V1_ACTION_ID_FLOW_TAG);
363 MLX5_SET(ste_single_action_flow_tag_v1, s_action, flow_tag, flow_tag);
364 }
365
dr_ste_v1_set_counter_id(u8 * hw_ste_p,u32 ctr_id)366 static void dr_ste_v1_set_counter_id(u8 *hw_ste_p, u32 ctr_id)
367 {
368 MLX5_SET(ste_match_bwc_v1, hw_ste_p, counter_id, ctr_id);
369 }
370
dr_ste_v1_set_reparse(u8 * hw_ste_p)371 static void dr_ste_v1_set_reparse(u8 *hw_ste_p)
372 {
373 MLX5_SET(ste_match_bwc_v1, hw_ste_p, reparse, 1);
374 }
375
dr_ste_v1_set_encap(u8 * hw_ste_p,u8 * d_action,u32 reformat_id,int size)376 static void dr_ste_v1_set_encap(u8 *hw_ste_p, u8 *d_action,
377 u32 reformat_id, int size)
378 {
379 MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, action_id,
380 DR_STE_V1_ACTION_ID_INSERT_POINTER);
381 /* The hardware expects here size in words (2 byte) */
382 MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, size, size / 2);
383 MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, pointer, reformat_id);
384 MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, attributes,
385 DR_STE_V1_ACTION_INSERT_PTR_ATTR_ENCAP);
386 dr_ste_v1_set_reparse(hw_ste_p);
387 }
388
dr_ste_v1_set_insert_hdr(u8 * hw_ste_p,u8 * d_action,u32 reformat_id,u8 anchor,u8 offset,int size)389 static void dr_ste_v1_set_insert_hdr(u8 *hw_ste_p, u8 *d_action,
390 u32 reformat_id,
391 u8 anchor, u8 offset,
392 int size)
393 {
394 MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action,
395 action_id, DR_STE_V1_ACTION_ID_INSERT_POINTER);
396 MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, start_anchor, anchor);
397
398 /* The hardware expects here size and offset in words (2 byte) */
399 MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, size, size / 2);
400 MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, start_offset, offset / 2);
401
402 MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, pointer, reformat_id);
403 MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, attributes,
404 DR_STE_V1_ACTION_INSERT_PTR_ATTR_NONE);
405
406 dr_ste_v1_set_reparse(hw_ste_p);
407 }
408
dr_ste_v1_set_remove_hdr(u8 * hw_ste_p,u8 * s_action,u8 anchor,u8 offset,int size)409 static void dr_ste_v1_set_remove_hdr(u8 *hw_ste_p, u8 *s_action,
410 u8 anchor, u8 offset,
411 int size)
412 {
413 MLX5_SET(ste_single_action_remove_header_size_v1, s_action,
414 action_id, DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
415 MLX5_SET(ste_single_action_remove_header_size_v1, s_action, start_anchor, anchor);
416
417 /* The hardware expects here size and offset in words (2 byte) */
418 MLX5_SET(ste_single_action_remove_header_size_v1, s_action, remove_size, size / 2);
419 MLX5_SET(ste_single_action_remove_header_size_v1, s_action, start_offset, offset / 2);
420
421 dr_ste_v1_set_reparse(hw_ste_p);
422 }
423
dr_ste_v1_set_push_vlan(u8 * hw_ste_p,u8 * d_action,u32 vlan_hdr)424 static void dr_ste_v1_set_push_vlan(u8 *hw_ste_p, u8 *d_action,
425 u32 vlan_hdr)
426 {
427 MLX5_SET(ste_double_action_insert_with_inline_v1, d_action,
428 action_id, DR_STE_V1_ACTION_ID_INSERT_INLINE);
429 /* The hardware expects offset to vlan header in words (2 byte) */
430 MLX5_SET(ste_double_action_insert_with_inline_v1, d_action,
431 start_offset, HDR_LEN_L2_MACS >> 1);
432 MLX5_SET(ste_double_action_insert_with_inline_v1, d_action,
433 inline_data, vlan_hdr);
434
435 dr_ste_v1_set_reparse(hw_ste_p);
436 }
437
dr_ste_v1_set_pop_vlan(u8 * hw_ste_p,u8 * s_action,u8 vlans_num)438 static void dr_ste_v1_set_pop_vlan(u8 *hw_ste_p, u8 *s_action, u8 vlans_num)
439 {
440 MLX5_SET(ste_single_action_remove_header_size_v1, s_action,
441 action_id, DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
442 MLX5_SET(ste_single_action_remove_header_size_v1, s_action,
443 start_anchor, DR_STE_HEADER_ANCHOR_1ST_VLAN);
444 /* The hardware expects here size in words (2 byte) */
445 MLX5_SET(ste_single_action_remove_header_size_v1, s_action,
446 remove_size, (HDR_LEN_L2_VLAN >> 1) * vlans_num);
447
448 dr_ste_v1_set_reparse(hw_ste_p);
449 }
450
dr_ste_v1_set_encap_l3(u8 * hw_ste_p,u8 * frst_s_action,u8 * scnd_d_action,u32 reformat_id,int size)451 static void dr_ste_v1_set_encap_l3(u8 *hw_ste_p,
452 u8 *frst_s_action,
453 u8 *scnd_d_action,
454 u32 reformat_id,
455 int size)
456 {
457 /* Remove L2 headers */
458 MLX5_SET(ste_single_action_remove_header_v1, frst_s_action, action_id,
459 DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER);
460 MLX5_SET(ste_single_action_remove_header_v1, frst_s_action, end_anchor,
461 DR_STE_HEADER_ANCHOR_IPV6_IPV4);
462
463 /* Encapsulate with given reformat ID */
464 MLX5_SET(ste_double_action_insert_with_ptr_v1, scnd_d_action, action_id,
465 DR_STE_V1_ACTION_ID_INSERT_POINTER);
466 /* The hardware expects here size in words (2 byte) */
467 MLX5_SET(ste_double_action_insert_with_ptr_v1, scnd_d_action, size, size / 2);
468 MLX5_SET(ste_double_action_insert_with_ptr_v1, scnd_d_action, pointer, reformat_id);
469 MLX5_SET(ste_double_action_insert_with_ptr_v1, scnd_d_action, attributes,
470 DR_STE_V1_ACTION_INSERT_PTR_ATTR_ENCAP);
471
472 dr_ste_v1_set_reparse(hw_ste_p);
473 }
474
dr_ste_v1_set_rx_decap(u8 * hw_ste_p,u8 * s_action)475 static void dr_ste_v1_set_rx_decap(u8 *hw_ste_p, u8 *s_action)
476 {
477 MLX5_SET(ste_single_action_remove_header_v1, s_action, action_id,
478 DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER);
479 MLX5_SET(ste_single_action_remove_header_v1, s_action, decap, 1);
480 MLX5_SET(ste_single_action_remove_header_v1, s_action, vni_to_cqe, 1);
481 MLX5_SET(ste_single_action_remove_header_v1, s_action, end_anchor,
482 DR_STE_HEADER_ANCHOR_INNER_MAC);
483
484 dr_ste_v1_set_reparse(hw_ste_p);
485 }
486
dr_ste_v1_set_rewrite_actions(u8 * hw_ste_p,u8 * s_action,u16 num_of_actions,u32 re_write_index)487 static void dr_ste_v1_set_rewrite_actions(u8 *hw_ste_p,
488 u8 *s_action,
489 u16 num_of_actions,
490 u32 re_write_index)
491 {
492 MLX5_SET(ste_single_action_modify_list_v1, s_action, action_id,
493 DR_STE_V1_ACTION_ID_MODIFY_LIST);
494 MLX5_SET(ste_single_action_modify_list_v1, s_action, num_of_modify_actions,
495 num_of_actions);
496 MLX5_SET(ste_single_action_modify_list_v1, s_action, modify_actions_ptr,
497 re_write_index);
498
499 dr_ste_v1_set_reparse(hw_ste_p);
500 }
501
dr_ste_v1_set_aso_flow_meter(u8 * d_action,u32 object_id,u32 offset,u8 dest_reg_id,u8 init_color)502 static void dr_ste_v1_set_aso_flow_meter(u8 *d_action,
503 u32 object_id,
504 u32 offset,
505 u8 dest_reg_id,
506 u8 init_color)
507 {
508 MLX5_SET(ste_double_action_aso_v1, d_action, action_id,
509 DR_STE_V1_ACTION_ID_ASO);
510 MLX5_SET(ste_double_action_aso_v1, d_action, aso_context_number,
511 object_id + (offset / MLX5DR_ASO_FLOW_METER_NUM_PER_OBJ));
512 /* Convert reg_c index to HW 64bit index */
513 MLX5_SET(ste_double_action_aso_v1, d_action, dest_reg_id,
514 (dest_reg_id - 1) / 2);
515 MLX5_SET(ste_double_action_aso_v1, d_action, aso_context_type,
516 DR_STE_V1_ASO_CTX_TYPE_POLICERS);
517 MLX5_SET(ste_double_action_aso_v1, d_action, flow_meter.line_id,
518 offset % MLX5DR_ASO_FLOW_METER_NUM_PER_OBJ);
519 MLX5_SET(ste_double_action_aso_v1, d_action, flow_meter.initial_color,
520 init_color);
521 }
522
dr_ste_v1_arr_init_next_match(u8 ** last_ste,u32 * added_stes,u16 gvmi)523 static void dr_ste_v1_arr_init_next_match(u8 **last_ste,
524 u32 *added_stes,
525 u16 gvmi)
526 {
527 u8 *action;
528
529 (*added_stes)++;
530 *last_ste += DR_STE_SIZE;
531 dr_ste_v1_init(*last_ste, MLX5DR_STE_LU_TYPE_DONT_CARE, 0, gvmi);
532 dr_ste_v1_set_entry_type(*last_ste, DR_STE_V1_TYPE_MATCH);
533
534 action = MLX5_ADDR_OF(ste_mask_and_match_v1, *last_ste, action);
535 memset(action, 0, MLX5_FLD_SZ_BYTES(ste_mask_and_match_v1, action));
536 }
537
dr_ste_v1_set_actions_tx(struct mlx5dr_domain * dmn,u8 * action_type_set,u32 actions_caps,u8 * last_ste,struct mlx5dr_ste_actions_attr * attr,u32 * added_stes)538 void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
539 u8 *action_type_set,
540 u32 actions_caps,
541 u8 *last_ste,
542 struct mlx5dr_ste_actions_attr *attr,
543 u32 *added_stes)
544 {
545 u8 *action = MLX5_ADDR_OF(ste_match_bwc_v1, last_ste, action);
546 u8 action_sz = DR_STE_ACTION_DOUBLE_SZ;
547 bool allow_modify_hdr = true;
548 bool allow_encap = true;
549
550 if (action_type_set[DR_ACTION_TYP_POP_VLAN]) {
551 if (action_sz < DR_STE_ACTION_SINGLE_SZ) {
552 dr_ste_v1_arr_init_next_match(&last_ste, added_stes,
553 attr->gvmi);
554 action = MLX5_ADDR_OF(ste_mask_and_match_v1,
555 last_ste, action);
556 action_sz = DR_STE_ACTION_TRIPLE_SZ;
557 }
558 dr_ste_v1_set_pop_vlan(last_ste, action, attr->vlans.count);
559 action_sz -= DR_STE_ACTION_SINGLE_SZ;
560 action += DR_STE_ACTION_SINGLE_SZ;
561
562 /* Check if vlan_pop and modify_hdr on same STE is supported */
563 if (!(actions_caps & DR_STE_CTX_ACTION_CAP_POP_MDFY))
564 allow_modify_hdr = false;
565 }
566
567 if (action_type_set[DR_ACTION_TYP_CTR])
568 dr_ste_v1_set_counter_id(last_ste, attr->ctr_id);
569
570 if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
571 if (!allow_modify_hdr || action_sz < DR_STE_ACTION_DOUBLE_SZ) {
572 dr_ste_v1_arr_init_next_match(&last_ste, added_stes,
573 attr->gvmi);
574 action = MLX5_ADDR_OF(ste_mask_and_match_v1,
575 last_ste, action);
576 action_sz = DR_STE_ACTION_TRIPLE_SZ;
577 }
578 dr_ste_v1_set_rewrite_actions(last_ste, action,
579 attr->modify_actions,
580 attr->modify_index);
581 action_sz -= DR_STE_ACTION_DOUBLE_SZ;
582 action += DR_STE_ACTION_DOUBLE_SZ;
583 allow_encap = false;
584 }
585
586 if (action_type_set[DR_ACTION_TYP_PUSH_VLAN]) {
587 int i;
588
589 for (i = 0; i < attr->vlans.count; i++) {
590 if (action_sz < DR_STE_ACTION_DOUBLE_SZ || !allow_encap) {
591 dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
592 action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
593 action_sz = DR_STE_ACTION_TRIPLE_SZ;
594 allow_encap = true;
595 }
596 dr_ste_v1_set_push_vlan(last_ste, action,
597 attr->vlans.headers[i]);
598 action_sz -= DR_STE_ACTION_DOUBLE_SZ;
599 action += DR_STE_ACTION_DOUBLE_SZ;
600 }
601 }
602
603 if (action_type_set[DR_ACTION_TYP_L2_TO_TNL_L2]) {
604 if (!allow_encap || action_sz < DR_STE_ACTION_DOUBLE_SZ) {
605 dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
606 action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
607 action_sz = DR_STE_ACTION_TRIPLE_SZ;
608 allow_encap = true;
609 }
610 dr_ste_v1_set_encap(last_ste, action,
611 attr->reformat.id,
612 attr->reformat.size);
613 action_sz -= DR_STE_ACTION_DOUBLE_SZ;
614 action += DR_STE_ACTION_DOUBLE_SZ;
615 } else if (action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3]) {
616 u8 *d_action;
617
618 if (action_sz < DR_STE_ACTION_TRIPLE_SZ) {
619 dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
620 action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
621 action_sz = DR_STE_ACTION_TRIPLE_SZ;
622 }
623 d_action = action + DR_STE_ACTION_SINGLE_SZ;
624
625 dr_ste_v1_set_encap_l3(last_ste,
626 action, d_action,
627 attr->reformat.id,
628 attr->reformat.size);
629 action_sz -= DR_STE_ACTION_TRIPLE_SZ;
630 action += DR_STE_ACTION_TRIPLE_SZ;
631 } else if (action_type_set[DR_ACTION_TYP_INSERT_HDR]) {
632 if (!allow_encap || action_sz < DR_STE_ACTION_DOUBLE_SZ) {
633 dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
634 action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
635 action_sz = DR_STE_ACTION_TRIPLE_SZ;
636 }
637 dr_ste_v1_set_insert_hdr(last_ste, action,
638 attr->reformat.id,
639 attr->reformat.param_0,
640 attr->reformat.param_1,
641 attr->reformat.size);
642 action_sz -= DR_STE_ACTION_DOUBLE_SZ;
643 action += DR_STE_ACTION_DOUBLE_SZ;
644 } else if (action_type_set[DR_ACTION_TYP_REMOVE_HDR]) {
645 if (action_sz < DR_STE_ACTION_SINGLE_SZ) {
646 dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
647 action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
648 action_sz = DR_STE_ACTION_TRIPLE_SZ;
649 }
650 dr_ste_v1_set_remove_hdr(last_ste, action,
651 attr->reformat.param_0,
652 attr->reformat.param_1,
653 attr->reformat.size);
654 action_sz -= DR_STE_ACTION_SINGLE_SZ;
655 action += DR_STE_ACTION_SINGLE_SZ;
656 }
657
658 if (action_type_set[DR_ACTION_TYP_ASO_FLOW_METER]) {
659 if (action_sz < DR_STE_ACTION_DOUBLE_SZ) {
660 dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
661 action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
662 action_sz = DR_STE_ACTION_TRIPLE_SZ;
663 }
664 dr_ste_v1_set_aso_flow_meter(action,
665 attr->aso_flow_meter.obj_id,
666 attr->aso_flow_meter.offset,
667 attr->aso_flow_meter.dest_reg_id,
668 attr->aso_flow_meter.init_color);
669 action_sz -= DR_STE_ACTION_DOUBLE_SZ;
670 action += DR_STE_ACTION_DOUBLE_SZ;
671 }
672
673 dr_ste_v1_set_hit_gvmi(last_ste, attr->hit_gvmi);
674 dr_ste_v1_set_hit_addr(last_ste, attr->final_icm_addr, 1);
675 }
676
dr_ste_v1_set_actions_rx(struct mlx5dr_domain * dmn,u8 * action_type_set,u32 actions_caps,u8 * last_ste,struct mlx5dr_ste_actions_attr * attr,u32 * added_stes)677 void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
678 u8 *action_type_set,
679 u32 actions_caps,
680 u8 *last_ste,
681 struct mlx5dr_ste_actions_attr *attr,
682 u32 *added_stes)
683 {
684 u8 *action = MLX5_ADDR_OF(ste_match_bwc_v1, last_ste, action);
685 u8 action_sz = DR_STE_ACTION_DOUBLE_SZ;
686 bool allow_modify_hdr = true;
687 bool allow_ctr = true;
688
689 if (action_type_set[DR_ACTION_TYP_TNL_L3_TO_L2]) {
690 dr_ste_v1_set_rewrite_actions(last_ste, action,
691 attr->decap_actions,
692 attr->decap_index);
693 action_sz -= DR_STE_ACTION_DOUBLE_SZ;
694 action += DR_STE_ACTION_DOUBLE_SZ;
695 allow_modify_hdr = false;
696 allow_ctr = false;
697 } else if (action_type_set[DR_ACTION_TYP_TNL_L2_TO_L2]) {
698 dr_ste_v1_set_rx_decap(last_ste, action);
699 action_sz -= DR_STE_ACTION_SINGLE_SZ;
700 action += DR_STE_ACTION_SINGLE_SZ;
701 allow_modify_hdr = false;
702 allow_ctr = false;
703 }
704
705 if (action_type_set[DR_ACTION_TYP_TAG]) {
706 if (action_sz < DR_STE_ACTION_SINGLE_SZ) {
707 dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
708 action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
709 action_sz = DR_STE_ACTION_TRIPLE_SZ;
710 allow_modify_hdr = true;
711 allow_ctr = true;
712 }
713 dr_ste_v1_set_rx_flow_tag(action, attr->flow_tag);
714 action_sz -= DR_STE_ACTION_SINGLE_SZ;
715 action += DR_STE_ACTION_SINGLE_SZ;
716 }
717
718 if (action_type_set[DR_ACTION_TYP_POP_VLAN]) {
719 if (action_sz < DR_STE_ACTION_SINGLE_SZ ||
720 !allow_modify_hdr) {
721 dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
722 action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
723 action_sz = DR_STE_ACTION_TRIPLE_SZ;
724 }
725
726 dr_ste_v1_set_pop_vlan(last_ste, action, attr->vlans.count);
727 action_sz -= DR_STE_ACTION_SINGLE_SZ;
728 action += DR_STE_ACTION_SINGLE_SZ;
729 allow_ctr = false;
730
731 /* Check if vlan_pop and modify_hdr on same STE is supported */
732 if (!(actions_caps & DR_STE_CTX_ACTION_CAP_POP_MDFY))
733 allow_modify_hdr = false;
734 }
735
736 if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
737 /* Modify header and decapsulation must use different STEs */
738 if (!allow_modify_hdr || action_sz < DR_STE_ACTION_DOUBLE_SZ) {
739 dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
740 action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
741 action_sz = DR_STE_ACTION_TRIPLE_SZ;
742 allow_modify_hdr = true;
743 allow_ctr = true;
744 }
745 dr_ste_v1_set_rewrite_actions(last_ste, action,
746 attr->modify_actions,
747 attr->modify_index);
748 action_sz -= DR_STE_ACTION_DOUBLE_SZ;
749 action += DR_STE_ACTION_DOUBLE_SZ;
750 }
751
752 if (action_type_set[DR_ACTION_TYP_PUSH_VLAN]) {
753 int i;
754
755 for (i = 0; i < attr->vlans.count; i++) {
756 if (action_sz < DR_STE_ACTION_DOUBLE_SZ ||
757 !allow_modify_hdr) {
758 dr_ste_v1_arr_init_next_match(&last_ste,
759 added_stes,
760 attr->gvmi);
761 action = MLX5_ADDR_OF(ste_mask_and_match_v1,
762 last_ste, action);
763 action_sz = DR_STE_ACTION_TRIPLE_SZ;
764 }
765 dr_ste_v1_set_push_vlan(last_ste, action,
766 attr->vlans.headers[i]);
767 action_sz -= DR_STE_ACTION_DOUBLE_SZ;
768 action += DR_STE_ACTION_DOUBLE_SZ;
769 }
770 }
771
772 if (action_type_set[DR_ACTION_TYP_CTR]) {
773 /* Counter action set after decap and before insert_hdr
774 * to exclude decaped / encaped header respectively.
775 */
776 if (!allow_ctr) {
777 dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
778 action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
779 action_sz = DR_STE_ACTION_TRIPLE_SZ;
780 allow_modify_hdr = true;
781 }
782 dr_ste_v1_set_counter_id(last_ste, attr->ctr_id);
783 allow_ctr = false;
784 }
785
786 if (action_type_set[DR_ACTION_TYP_L2_TO_TNL_L2]) {
787 if (action_sz < DR_STE_ACTION_DOUBLE_SZ) {
788 dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
789 action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
790 action_sz = DR_STE_ACTION_TRIPLE_SZ;
791 }
792 dr_ste_v1_set_encap(last_ste, action,
793 attr->reformat.id,
794 attr->reformat.size);
795 action_sz -= DR_STE_ACTION_DOUBLE_SZ;
796 action += DR_STE_ACTION_DOUBLE_SZ;
797 allow_modify_hdr = false;
798 } else if (action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3]) {
799 u8 *d_action;
800
801 if (action_sz < DR_STE_ACTION_TRIPLE_SZ) {
802 dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
803 action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
804 action_sz = DR_STE_ACTION_TRIPLE_SZ;
805 }
806
807 d_action = action + DR_STE_ACTION_SINGLE_SZ;
808
809 dr_ste_v1_set_encap_l3(last_ste,
810 action, d_action,
811 attr->reformat.id,
812 attr->reformat.size);
813 action_sz -= DR_STE_ACTION_TRIPLE_SZ;
814 allow_modify_hdr = false;
815 } else if (action_type_set[DR_ACTION_TYP_INSERT_HDR]) {
816 /* Modify header, decap, and encap must use different STEs */
817 if (!allow_modify_hdr || action_sz < DR_STE_ACTION_DOUBLE_SZ) {
818 dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
819 action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
820 action_sz = DR_STE_ACTION_TRIPLE_SZ;
821 }
822 dr_ste_v1_set_insert_hdr(last_ste, action,
823 attr->reformat.id,
824 attr->reformat.param_0,
825 attr->reformat.param_1,
826 attr->reformat.size);
827 action_sz -= DR_STE_ACTION_DOUBLE_SZ;
828 action += DR_STE_ACTION_DOUBLE_SZ;
829 allow_modify_hdr = false;
830 } else if (action_type_set[DR_ACTION_TYP_REMOVE_HDR]) {
831 if (action_sz < DR_STE_ACTION_SINGLE_SZ) {
832 dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
833 action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
834 action_sz = DR_STE_ACTION_TRIPLE_SZ;
835 allow_modify_hdr = true;
836 allow_ctr = true;
837 }
838 dr_ste_v1_set_remove_hdr(last_ste, action,
839 attr->reformat.param_0,
840 attr->reformat.param_1,
841 attr->reformat.size);
842 action_sz -= DR_STE_ACTION_SINGLE_SZ;
843 action += DR_STE_ACTION_SINGLE_SZ;
844 }
845
846 if (action_type_set[DR_ACTION_TYP_ASO_FLOW_METER]) {
847 if (action_sz < DR_STE_ACTION_DOUBLE_SZ) {
848 dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
849 action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
850 action_sz = DR_STE_ACTION_TRIPLE_SZ;
851 }
852 dr_ste_v1_set_aso_flow_meter(action,
853 attr->aso_flow_meter.obj_id,
854 attr->aso_flow_meter.offset,
855 attr->aso_flow_meter.dest_reg_id,
856 attr->aso_flow_meter.init_color);
857 action_sz -= DR_STE_ACTION_DOUBLE_SZ;
858 action += DR_STE_ACTION_DOUBLE_SZ;
859 }
860
861 dr_ste_v1_set_hit_gvmi(last_ste, attr->hit_gvmi);
862 dr_ste_v1_set_hit_addr(last_ste, attr->final_icm_addr, 1);
863 }
864
dr_ste_v1_set_action_set(u8 * d_action,u8 hw_field,u8 shifter,u8 length,u32 data)865 void dr_ste_v1_set_action_set(u8 *d_action,
866 u8 hw_field,
867 u8 shifter,
868 u8 length,
869 u32 data)
870 {
871 shifter += MLX5_MODIFY_HEADER_V1_QW_OFFSET;
872 MLX5_SET(ste_double_action_set_v1, d_action, action_id, DR_STE_V1_ACTION_ID_SET);
873 MLX5_SET(ste_double_action_set_v1, d_action, destination_dw_offset, hw_field);
874 MLX5_SET(ste_double_action_set_v1, d_action, destination_left_shifter, shifter);
875 MLX5_SET(ste_double_action_set_v1, d_action, destination_length, length);
876 MLX5_SET(ste_double_action_set_v1, d_action, inline_data, data);
877 }
878
dr_ste_v1_set_action_add(u8 * d_action,u8 hw_field,u8 shifter,u8 length,u32 data)879 void dr_ste_v1_set_action_add(u8 *d_action,
880 u8 hw_field,
881 u8 shifter,
882 u8 length,
883 u32 data)
884 {
885 shifter += MLX5_MODIFY_HEADER_V1_QW_OFFSET;
886 MLX5_SET(ste_double_action_add_v1, d_action, action_id, DR_STE_V1_ACTION_ID_ADD);
887 MLX5_SET(ste_double_action_add_v1, d_action, destination_dw_offset, hw_field);
888 MLX5_SET(ste_double_action_add_v1, d_action, destination_left_shifter, shifter);
889 MLX5_SET(ste_double_action_add_v1, d_action, destination_length, length);
890 MLX5_SET(ste_double_action_add_v1, d_action, add_value, data);
891 }
892
dr_ste_v1_set_action_copy(u8 * d_action,u8 dst_hw_field,u8 dst_shifter,u8 dst_len,u8 src_hw_field,u8 src_shifter)893 void dr_ste_v1_set_action_copy(u8 *d_action,
894 u8 dst_hw_field,
895 u8 dst_shifter,
896 u8 dst_len,
897 u8 src_hw_field,
898 u8 src_shifter)
899 {
900 dst_shifter += MLX5_MODIFY_HEADER_V1_QW_OFFSET;
901 src_shifter += MLX5_MODIFY_HEADER_V1_QW_OFFSET;
902 MLX5_SET(ste_double_action_copy_v1, d_action, action_id, DR_STE_V1_ACTION_ID_COPY);
903 MLX5_SET(ste_double_action_copy_v1, d_action, destination_dw_offset, dst_hw_field);
904 MLX5_SET(ste_double_action_copy_v1, d_action, destination_left_shifter, dst_shifter);
905 MLX5_SET(ste_double_action_copy_v1, d_action, destination_length, dst_len);
906 MLX5_SET(ste_double_action_copy_v1, d_action, source_dw_offset, src_hw_field);
907 MLX5_SET(ste_double_action_copy_v1, d_action, source_right_shifter, src_shifter);
908 }
909
910 #define DR_STE_DECAP_L3_ACTION_NUM 8
911 #define DR_STE_L2_HDR_MAX_SZ 20
912
dr_ste_v1_set_action_decap_l3_list(void * data,u32 data_sz,u8 * hw_action,u32 hw_action_sz,u16 * used_hw_action_num)913 int dr_ste_v1_set_action_decap_l3_list(void *data,
914 u32 data_sz,
915 u8 *hw_action,
916 u32 hw_action_sz,
917 u16 *used_hw_action_num)
918 {
919 u8 padded_data[DR_STE_L2_HDR_MAX_SZ] = {};
920 void *data_ptr = padded_data;
921 u16 used_actions = 0;
922 u32 inline_data_sz;
923 u32 i;
924
925 if (hw_action_sz / DR_STE_ACTION_DOUBLE_SZ < DR_STE_DECAP_L3_ACTION_NUM)
926 return -EINVAL;
927
928 inline_data_sz =
929 MLX5_FLD_SZ_BYTES(ste_double_action_insert_with_inline_v1, inline_data);
930
931 /* Add an alignment padding */
932 memcpy(padded_data + data_sz % inline_data_sz, data, data_sz);
933
934 /* Remove L2L3 outer headers */
935 MLX5_SET(ste_single_action_remove_header_v1, hw_action, action_id,
936 DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER);
937 MLX5_SET(ste_single_action_remove_header_v1, hw_action, decap, 1);
938 MLX5_SET(ste_single_action_remove_header_v1, hw_action, vni_to_cqe, 1);
939 MLX5_SET(ste_single_action_remove_header_v1, hw_action, end_anchor,
940 DR_STE_HEADER_ANCHOR_INNER_IPV6_IPV4);
941 hw_action += DR_STE_ACTION_DOUBLE_SZ;
942 used_actions++; /* Remove and NOP are a single double action */
943
944 /* Point to the last dword of the header */
945 data_ptr += (data_sz / inline_data_sz) * inline_data_sz;
946
947 /* Add the new header using inline action 4Byte at a time, the header
948 * is added in reversed order to the beginning of the packet to avoid
949 * incorrect parsing by the HW. Since header is 14B or 18B an extra
950 * two bytes are padded and later removed.
951 */
952 for (i = 0; i < data_sz / inline_data_sz + 1; i++) {
953 void *addr_inline;
954
955 MLX5_SET(ste_double_action_insert_with_inline_v1, hw_action, action_id,
956 DR_STE_V1_ACTION_ID_INSERT_INLINE);
957 /* The hardware expects here offset to words (2 bytes) */
958 MLX5_SET(ste_double_action_insert_with_inline_v1, hw_action, start_offset, 0);
959
960 /* Copy bytes one by one to avoid endianness problem */
961 addr_inline = MLX5_ADDR_OF(ste_double_action_insert_with_inline_v1,
962 hw_action, inline_data);
963 memcpy(addr_inline, data_ptr - i * inline_data_sz, inline_data_sz);
964 hw_action += DR_STE_ACTION_DOUBLE_SZ;
965 used_actions++;
966 }
967
968 /* Remove first 2 extra bytes */
969 MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, action_id,
970 DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
971 MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, start_offset, 0);
972 /* The hardware expects here size in words (2 bytes) */
973 MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, remove_size, 1);
974 used_actions++;
975
976 *used_hw_action_num = used_actions;
977
978 return 0;
979 }
980
dr_ste_v1_build_eth_l2_src_dst_bit_mask(struct mlx5dr_match_param * value,bool inner,u8 * bit_mask)981 static void dr_ste_v1_build_eth_l2_src_dst_bit_mask(struct mlx5dr_match_param *value,
982 bool inner, u8 *bit_mask)
983 {
984 struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
985
986 DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, dmac_47_16, mask, dmac_47_16);
987 DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, dmac_15_0, mask, dmac_15_0);
988
989 DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, smac_47_16, mask, smac_47_16);
990 DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, smac_15_0, mask, smac_15_0);
991
992 DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, first_vlan_id, mask, first_vid);
993 DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, first_cfi, mask, first_cfi);
994 DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, first_priority, mask, first_prio);
995 DR_STE_SET_ONES(eth_l2_src_dst_v1, bit_mask, l3_type, mask, ip_version);
996
997 if (mask->cvlan_tag) {
998 MLX5_SET(ste_eth_l2_src_dst_v1, bit_mask, first_vlan_qualifier, -1);
999 mask->cvlan_tag = 0;
1000 } else if (mask->svlan_tag) {
1001 MLX5_SET(ste_eth_l2_src_dst_v1, bit_mask, first_vlan_qualifier, -1);
1002 mask->svlan_tag = 0;
1003 }
1004 }
1005
dr_ste_v1_build_eth_l2_src_dst_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1006 static int dr_ste_v1_build_eth_l2_src_dst_tag(struct mlx5dr_match_param *value,
1007 struct mlx5dr_ste_build *sb,
1008 u8 *tag)
1009 {
1010 struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1011
1012 DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, dmac_47_16, spec, dmac_47_16);
1013 DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, dmac_15_0, spec, dmac_15_0);
1014
1015 DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, smac_47_16, spec, smac_47_16);
1016 DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, smac_15_0, spec, smac_15_0);
1017
1018 if (spec->ip_version == IP_VERSION_IPV4) {
1019 MLX5_SET(ste_eth_l2_src_dst_v1, tag, l3_type, STE_IPV4);
1020 spec->ip_version = 0;
1021 } else if (spec->ip_version == IP_VERSION_IPV6) {
1022 MLX5_SET(ste_eth_l2_src_dst_v1, tag, l3_type, STE_IPV6);
1023 spec->ip_version = 0;
1024 } else if (spec->ip_version) {
1025 return -EINVAL;
1026 }
1027
1028 DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, first_vlan_id, spec, first_vid);
1029 DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, first_cfi, spec, first_cfi);
1030 DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, first_priority, spec, first_prio);
1031
1032 if (spec->cvlan_tag) {
1033 MLX5_SET(ste_eth_l2_src_dst_v1, tag, first_vlan_qualifier, DR_STE_CVLAN);
1034 spec->cvlan_tag = 0;
1035 } else if (spec->svlan_tag) {
1036 MLX5_SET(ste_eth_l2_src_dst_v1, tag, first_vlan_qualifier, DR_STE_SVLAN);
1037 spec->svlan_tag = 0;
1038 }
1039 return 0;
1040 }
1041
dr_ste_v1_build_eth_l2_src_dst_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1042 void dr_ste_v1_build_eth_l2_src_dst_init(struct mlx5dr_ste_build *sb,
1043 struct mlx5dr_match_param *mask)
1044 {
1045 dr_ste_v1_build_eth_l2_src_dst_bit_mask(mask, sb->inner, sb->bit_mask);
1046
1047 sb->lu_type = DR_STE_CALC_DFNR_TYPE(ETHL2_SRC_DST, sb->inner);
1048 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1049 sb->ste_build_tag_func = &dr_ste_v1_build_eth_l2_src_dst_tag;
1050 }
1051
dr_ste_v1_build_eth_l3_ipv6_dst_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1052 static int dr_ste_v1_build_eth_l3_ipv6_dst_tag(struct mlx5dr_match_param *value,
1053 struct mlx5dr_ste_build *sb,
1054 u8 *tag)
1055 {
1056 struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1057
1058 DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_127_96, spec, dst_ip_127_96);
1059 DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_95_64, spec, dst_ip_95_64);
1060 DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_63_32, spec, dst_ip_63_32);
1061 DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_31_0, spec, dst_ip_31_0);
1062
1063 return 0;
1064 }
1065
dr_ste_v1_build_eth_l3_ipv6_dst_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1066 void dr_ste_v1_build_eth_l3_ipv6_dst_init(struct mlx5dr_ste_build *sb,
1067 struct mlx5dr_match_param *mask)
1068 {
1069 dr_ste_v1_build_eth_l3_ipv6_dst_tag(mask, sb, sb->bit_mask);
1070
1071 sb->lu_type = DR_STE_CALC_DFNR_TYPE(IPV6_DES, sb->inner);
1072 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1073 sb->ste_build_tag_func = &dr_ste_v1_build_eth_l3_ipv6_dst_tag;
1074 }
1075
dr_ste_v1_build_eth_l3_ipv6_src_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1076 static int dr_ste_v1_build_eth_l3_ipv6_src_tag(struct mlx5dr_match_param *value,
1077 struct mlx5dr_ste_build *sb,
1078 u8 *tag)
1079 {
1080 struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1081
1082 DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_127_96, spec, src_ip_127_96);
1083 DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_95_64, spec, src_ip_95_64);
1084 DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_63_32, spec, src_ip_63_32);
1085 DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_31_0, spec, src_ip_31_0);
1086
1087 return 0;
1088 }
1089
dr_ste_v1_build_eth_l3_ipv6_src_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1090 void dr_ste_v1_build_eth_l3_ipv6_src_init(struct mlx5dr_ste_build *sb,
1091 struct mlx5dr_match_param *mask)
1092 {
1093 dr_ste_v1_build_eth_l3_ipv6_src_tag(mask, sb, sb->bit_mask);
1094
1095 sb->lu_type = DR_STE_CALC_DFNR_TYPE(IPV6_SRC, sb->inner);
1096 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1097 sb->ste_build_tag_func = &dr_ste_v1_build_eth_l3_ipv6_src_tag;
1098 }
1099
dr_ste_v1_build_eth_l3_ipv4_5_tuple_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1100 static int dr_ste_v1_build_eth_l3_ipv4_5_tuple_tag(struct mlx5dr_match_param *value,
1101 struct mlx5dr_ste_build *sb,
1102 u8 *tag)
1103 {
1104 struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1105
1106 DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, destination_address, spec, dst_ip_31_0);
1107 DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, source_address, spec, src_ip_31_0);
1108 DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, destination_port, spec, tcp_dport);
1109 DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, destination_port, spec, udp_dport);
1110 DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, source_port, spec, tcp_sport);
1111 DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, source_port, spec, udp_sport);
1112 DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, protocol, spec, ip_protocol);
1113 DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, fragmented, spec, frag);
1114 DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, dscp, spec, ip_dscp);
1115 DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, ecn, spec, ip_ecn);
1116
1117 if (spec->tcp_flags) {
1118 DR_STE_SET_TCP_FLAGS(eth_l3_ipv4_5_tuple_v1, tag, spec);
1119 spec->tcp_flags = 0;
1120 }
1121
1122 return 0;
1123 }
1124
dr_ste_v1_build_eth_l3_ipv4_5_tuple_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1125 void dr_ste_v1_build_eth_l3_ipv4_5_tuple_init(struct mlx5dr_ste_build *sb,
1126 struct mlx5dr_match_param *mask)
1127 {
1128 dr_ste_v1_build_eth_l3_ipv4_5_tuple_tag(mask, sb, sb->bit_mask);
1129
1130 sb->lu_type = DR_STE_CALC_DFNR_TYPE(ETHL3_IPV4_5_TUPLE, sb->inner);
1131 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1132 sb->ste_build_tag_func = &dr_ste_v1_build_eth_l3_ipv4_5_tuple_tag;
1133 }
1134
dr_ste_v1_build_eth_l2_src_or_dst_bit_mask(struct mlx5dr_match_param * value,bool inner,u8 * bit_mask)1135 static void dr_ste_v1_build_eth_l2_src_or_dst_bit_mask(struct mlx5dr_match_param *value,
1136 bool inner, u8 *bit_mask)
1137 {
1138 struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
1139 struct mlx5dr_match_misc *misc_mask = &value->misc;
1140
1141 DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, first_vlan_id, mask, first_vid);
1142 DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, first_cfi, mask, first_cfi);
1143 DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, first_priority, mask, first_prio);
1144 DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, ip_fragmented, mask, frag);
1145 DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, l3_ethertype, mask, ethertype);
1146 DR_STE_SET_ONES(eth_l2_src_v1, bit_mask, l3_type, mask, ip_version);
1147
1148 if (mask->svlan_tag || mask->cvlan_tag) {
1149 MLX5_SET(ste_eth_l2_src_v1, bit_mask, first_vlan_qualifier, -1);
1150 mask->cvlan_tag = 0;
1151 mask->svlan_tag = 0;
1152 }
1153
1154 if (inner) {
1155 if (misc_mask->inner_second_cvlan_tag ||
1156 misc_mask->inner_second_svlan_tag) {
1157 MLX5_SET(ste_eth_l2_src_v1, bit_mask, second_vlan_qualifier, -1);
1158 misc_mask->inner_second_cvlan_tag = 0;
1159 misc_mask->inner_second_svlan_tag = 0;
1160 }
1161
1162 DR_STE_SET_TAG(eth_l2_src_v1, bit_mask,
1163 second_vlan_id, misc_mask, inner_second_vid);
1164 DR_STE_SET_TAG(eth_l2_src_v1, bit_mask,
1165 second_cfi, misc_mask, inner_second_cfi);
1166 DR_STE_SET_TAG(eth_l2_src_v1, bit_mask,
1167 second_priority, misc_mask, inner_second_prio);
1168 } else {
1169 if (misc_mask->outer_second_cvlan_tag ||
1170 misc_mask->outer_second_svlan_tag) {
1171 MLX5_SET(ste_eth_l2_src_v1, bit_mask, second_vlan_qualifier, -1);
1172 misc_mask->outer_second_cvlan_tag = 0;
1173 misc_mask->outer_second_svlan_tag = 0;
1174 }
1175
1176 DR_STE_SET_TAG(eth_l2_src_v1, bit_mask,
1177 second_vlan_id, misc_mask, outer_second_vid);
1178 DR_STE_SET_TAG(eth_l2_src_v1, bit_mask,
1179 second_cfi, misc_mask, outer_second_cfi);
1180 DR_STE_SET_TAG(eth_l2_src_v1, bit_mask,
1181 second_priority, misc_mask, outer_second_prio);
1182 }
1183 }
1184
dr_ste_v1_build_eth_l2_src_or_dst_tag(struct mlx5dr_match_param * value,bool inner,u8 * tag)1185 static int dr_ste_v1_build_eth_l2_src_or_dst_tag(struct mlx5dr_match_param *value,
1186 bool inner, u8 *tag)
1187 {
1188 struct mlx5dr_match_spec *spec = inner ? &value->inner : &value->outer;
1189 struct mlx5dr_match_misc *misc_spec = &value->misc;
1190
1191 DR_STE_SET_TAG(eth_l2_src_v1, tag, first_vlan_id, spec, first_vid);
1192 DR_STE_SET_TAG(eth_l2_src_v1, tag, first_cfi, spec, first_cfi);
1193 DR_STE_SET_TAG(eth_l2_src_v1, tag, first_priority, spec, first_prio);
1194 DR_STE_SET_TAG(eth_l2_src_v1, tag, ip_fragmented, spec, frag);
1195 DR_STE_SET_TAG(eth_l2_src_v1, tag, l3_ethertype, spec, ethertype);
1196
1197 if (spec->ip_version == IP_VERSION_IPV4) {
1198 MLX5_SET(ste_eth_l2_src_v1, tag, l3_type, STE_IPV4);
1199 spec->ip_version = 0;
1200 } else if (spec->ip_version == IP_VERSION_IPV6) {
1201 MLX5_SET(ste_eth_l2_src_v1, tag, l3_type, STE_IPV6);
1202 spec->ip_version = 0;
1203 } else if (spec->ip_version) {
1204 return -EINVAL;
1205 }
1206
1207 if (spec->cvlan_tag) {
1208 MLX5_SET(ste_eth_l2_src_v1, tag, first_vlan_qualifier, DR_STE_CVLAN);
1209 spec->cvlan_tag = 0;
1210 } else if (spec->svlan_tag) {
1211 MLX5_SET(ste_eth_l2_src_v1, tag, first_vlan_qualifier, DR_STE_SVLAN);
1212 spec->svlan_tag = 0;
1213 }
1214
1215 if (inner) {
1216 if (misc_spec->inner_second_cvlan_tag) {
1217 MLX5_SET(ste_eth_l2_src_v1, tag, second_vlan_qualifier, DR_STE_CVLAN);
1218 misc_spec->inner_second_cvlan_tag = 0;
1219 } else if (misc_spec->inner_second_svlan_tag) {
1220 MLX5_SET(ste_eth_l2_src_v1, tag, second_vlan_qualifier, DR_STE_SVLAN);
1221 misc_spec->inner_second_svlan_tag = 0;
1222 }
1223
1224 DR_STE_SET_TAG(eth_l2_src_v1, tag, second_vlan_id, misc_spec, inner_second_vid);
1225 DR_STE_SET_TAG(eth_l2_src_v1, tag, second_cfi, misc_spec, inner_second_cfi);
1226 DR_STE_SET_TAG(eth_l2_src_v1, tag, second_priority, misc_spec, inner_second_prio);
1227 } else {
1228 if (misc_spec->outer_second_cvlan_tag) {
1229 MLX5_SET(ste_eth_l2_src_v1, tag, second_vlan_qualifier, DR_STE_CVLAN);
1230 misc_spec->outer_second_cvlan_tag = 0;
1231 } else if (misc_spec->outer_second_svlan_tag) {
1232 MLX5_SET(ste_eth_l2_src_v1, tag, second_vlan_qualifier, DR_STE_SVLAN);
1233 misc_spec->outer_second_svlan_tag = 0;
1234 }
1235 DR_STE_SET_TAG(eth_l2_src_v1, tag, second_vlan_id, misc_spec, outer_second_vid);
1236 DR_STE_SET_TAG(eth_l2_src_v1, tag, second_cfi, misc_spec, outer_second_cfi);
1237 DR_STE_SET_TAG(eth_l2_src_v1, tag, second_priority, misc_spec, outer_second_prio);
1238 }
1239
1240 return 0;
1241 }
1242
dr_ste_v1_build_eth_l2_src_bit_mask(struct mlx5dr_match_param * value,bool inner,u8 * bit_mask)1243 static void dr_ste_v1_build_eth_l2_src_bit_mask(struct mlx5dr_match_param *value,
1244 bool inner, u8 *bit_mask)
1245 {
1246 struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
1247
1248 DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, smac_47_16, mask, smac_47_16);
1249 DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, smac_15_0, mask, smac_15_0);
1250
1251 dr_ste_v1_build_eth_l2_src_or_dst_bit_mask(value, inner, bit_mask);
1252 }
1253
dr_ste_v1_build_eth_l2_src_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1254 static int dr_ste_v1_build_eth_l2_src_tag(struct mlx5dr_match_param *value,
1255 struct mlx5dr_ste_build *sb,
1256 u8 *tag)
1257 {
1258 struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1259
1260 DR_STE_SET_TAG(eth_l2_src_v1, tag, smac_47_16, spec, smac_47_16);
1261 DR_STE_SET_TAG(eth_l2_src_v1, tag, smac_15_0, spec, smac_15_0);
1262
1263 return dr_ste_v1_build_eth_l2_src_or_dst_tag(value, sb->inner, tag);
1264 }
1265
dr_ste_v1_build_eth_l2_src_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1266 void dr_ste_v1_build_eth_l2_src_init(struct mlx5dr_ste_build *sb,
1267 struct mlx5dr_match_param *mask)
1268 {
1269 dr_ste_v1_build_eth_l2_src_bit_mask(mask, sb->inner, sb->bit_mask);
1270
1271 sb->lu_type = DR_STE_CALC_DFNR_TYPE(ETHL2_SRC, sb->inner);
1272 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1273 sb->ste_build_tag_func = &dr_ste_v1_build_eth_l2_src_tag;
1274 }
1275
dr_ste_v1_build_eth_l2_dst_bit_mask(struct mlx5dr_match_param * value,bool inner,u8 * bit_mask)1276 static void dr_ste_v1_build_eth_l2_dst_bit_mask(struct mlx5dr_match_param *value,
1277 bool inner, u8 *bit_mask)
1278 {
1279 struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
1280
1281 DR_STE_SET_TAG(eth_l2_dst_v1, bit_mask, dmac_47_16, mask, dmac_47_16);
1282 DR_STE_SET_TAG(eth_l2_dst_v1, bit_mask, dmac_15_0, mask, dmac_15_0);
1283
1284 dr_ste_v1_build_eth_l2_src_or_dst_bit_mask(value, inner, bit_mask);
1285 }
1286
dr_ste_v1_build_eth_l2_dst_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1287 static int dr_ste_v1_build_eth_l2_dst_tag(struct mlx5dr_match_param *value,
1288 struct mlx5dr_ste_build *sb,
1289 u8 *tag)
1290 {
1291 struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1292
1293 DR_STE_SET_TAG(eth_l2_dst_v1, tag, dmac_47_16, spec, dmac_47_16);
1294 DR_STE_SET_TAG(eth_l2_dst_v1, tag, dmac_15_0, spec, dmac_15_0);
1295
1296 return dr_ste_v1_build_eth_l2_src_or_dst_tag(value, sb->inner, tag);
1297 }
1298
dr_ste_v1_build_eth_l2_dst_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1299 void dr_ste_v1_build_eth_l2_dst_init(struct mlx5dr_ste_build *sb,
1300 struct mlx5dr_match_param *mask)
1301 {
1302 dr_ste_v1_build_eth_l2_dst_bit_mask(mask, sb->inner, sb->bit_mask);
1303
1304 sb->lu_type = DR_STE_CALC_DFNR_TYPE(ETHL2, sb->inner);
1305 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1306 sb->ste_build_tag_func = &dr_ste_v1_build_eth_l2_dst_tag;
1307 }
1308
dr_ste_v1_build_eth_l2_tnl_bit_mask(struct mlx5dr_match_param * value,bool inner,u8 * bit_mask)1309 static void dr_ste_v1_build_eth_l2_tnl_bit_mask(struct mlx5dr_match_param *value,
1310 bool inner, u8 *bit_mask)
1311 {
1312 struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
1313 struct mlx5dr_match_misc *misc = &value->misc;
1314
1315 DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, dmac_47_16, mask, dmac_47_16);
1316 DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, dmac_15_0, mask, dmac_15_0);
1317 DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, first_vlan_id, mask, first_vid);
1318 DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, first_cfi, mask, first_cfi);
1319 DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, first_priority, mask, first_prio);
1320 DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, ip_fragmented, mask, frag);
1321 DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, l3_ethertype, mask, ethertype);
1322 DR_STE_SET_ONES(eth_l2_tnl_v1, bit_mask, l3_type, mask, ip_version);
1323
1324 if (misc->vxlan_vni) {
1325 MLX5_SET(ste_eth_l2_tnl_v1, bit_mask,
1326 l2_tunneling_network_id, (misc->vxlan_vni << 8));
1327 misc->vxlan_vni = 0;
1328 }
1329
1330 if (mask->svlan_tag || mask->cvlan_tag) {
1331 MLX5_SET(ste_eth_l2_tnl_v1, bit_mask, first_vlan_qualifier, -1);
1332 mask->cvlan_tag = 0;
1333 mask->svlan_tag = 0;
1334 }
1335 }
1336
dr_ste_v1_build_eth_l2_tnl_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1337 static int dr_ste_v1_build_eth_l2_tnl_tag(struct mlx5dr_match_param *value,
1338 struct mlx5dr_ste_build *sb,
1339 u8 *tag)
1340 {
1341 struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1342 struct mlx5dr_match_misc *misc = &value->misc;
1343
1344 DR_STE_SET_TAG(eth_l2_tnl_v1, tag, dmac_47_16, spec, dmac_47_16);
1345 DR_STE_SET_TAG(eth_l2_tnl_v1, tag, dmac_15_0, spec, dmac_15_0);
1346 DR_STE_SET_TAG(eth_l2_tnl_v1, tag, first_vlan_id, spec, first_vid);
1347 DR_STE_SET_TAG(eth_l2_tnl_v1, tag, first_cfi, spec, first_cfi);
1348 DR_STE_SET_TAG(eth_l2_tnl_v1, tag, ip_fragmented, spec, frag);
1349 DR_STE_SET_TAG(eth_l2_tnl_v1, tag, first_priority, spec, first_prio);
1350 DR_STE_SET_TAG(eth_l2_tnl_v1, tag, l3_ethertype, spec, ethertype);
1351
1352 if (misc->vxlan_vni) {
1353 MLX5_SET(ste_eth_l2_tnl_v1, tag, l2_tunneling_network_id,
1354 (misc->vxlan_vni << 8));
1355 misc->vxlan_vni = 0;
1356 }
1357
1358 if (spec->cvlan_tag) {
1359 MLX5_SET(ste_eth_l2_tnl_v1, tag, first_vlan_qualifier, DR_STE_CVLAN);
1360 spec->cvlan_tag = 0;
1361 } else if (spec->svlan_tag) {
1362 MLX5_SET(ste_eth_l2_tnl_v1, tag, first_vlan_qualifier, DR_STE_SVLAN);
1363 spec->svlan_tag = 0;
1364 }
1365
1366 if (spec->ip_version == IP_VERSION_IPV4) {
1367 MLX5_SET(ste_eth_l2_tnl_v1, tag, l3_type, STE_IPV4);
1368 spec->ip_version = 0;
1369 } else if (spec->ip_version == IP_VERSION_IPV6) {
1370 MLX5_SET(ste_eth_l2_tnl_v1, tag, l3_type, STE_IPV6);
1371 spec->ip_version = 0;
1372 } else if (spec->ip_version) {
1373 return -EINVAL;
1374 }
1375
1376 return 0;
1377 }
1378
dr_ste_v1_build_eth_l2_tnl_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1379 void dr_ste_v1_build_eth_l2_tnl_init(struct mlx5dr_ste_build *sb,
1380 struct mlx5dr_match_param *mask)
1381 {
1382 dr_ste_v1_build_eth_l2_tnl_bit_mask(mask, sb->inner, sb->bit_mask);
1383
1384 sb->lu_type = DR_STE_V1_LU_TYPE_ETHL2_TNL;
1385 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1386 sb->ste_build_tag_func = &dr_ste_v1_build_eth_l2_tnl_tag;
1387 }
1388
dr_ste_v1_build_eth_l3_ipv4_misc_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1389 static int dr_ste_v1_build_eth_l3_ipv4_misc_tag(struct mlx5dr_match_param *value,
1390 struct mlx5dr_ste_build *sb,
1391 u8 *tag)
1392 {
1393 struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1394
1395 DR_STE_SET_TAG(eth_l3_ipv4_misc_v1, tag, time_to_live, spec, ttl_hoplimit);
1396 DR_STE_SET_TAG(eth_l3_ipv4_misc_v1, tag, ihl, spec, ipv4_ihl);
1397
1398 return 0;
1399 }
1400
dr_ste_v1_build_eth_l3_ipv4_misc_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1401 void dr_ste_v1_build_eth_l3_ipv4_misc_init(struct mlx5dr_ste_build *sb,
1402 struct mlx5dr_match_param *mask)
1403 {
1404 dr_ste_v1_build_eth_l3_ipv4_misc_tag(mask, sb, sb->bit_mask);
1405
1406 sb->lu_type = DR_STE_CALC_DFNR_TYPE(ETHL3_IPV4_MISC, sb->inner);
1407 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1408 sb->ste_build_tag_func = &dr_ste_v1_build_eth_l3_ipv4_misc_tag;
1409 }
1410
dr_ste_v1_build_eth_ipv6_l3_l4_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1411 static int dr_ste_v1_build_eth_ipv6_l3_l4_tag(struct mlx5dr_match_param *value,
1412 struct mlx5dr_ste_build *sb,
1413 u8 *tag)
1414 {
1415 struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1416 struct mlx5dr_match_misc *misc = &value->misc;
1417
1418 DR_STE_SET_TAG(eth_l4_v1, tag, dst_port, spec, tcp_dport);
1419 DR_STE_SET_TAG(eth_l4_v1, tag, src_port, spec, tcp_sport);
1420 DR_STE_SET_TAG(eth_l4_v1, tag, dst_port, spec, udp_dport);
1421 DR_STE_SET_TAG(eth_l4_v1, tag, src_port, spec, udp_sport);
1422 DR_STE_SET_TAG(eth_l4_v1, tag, protocol, spec, ip_protocol);
1423 DR_STE_SET_TAG(eth_l4_v1, tag, fragmented, spec, frag);
1424 DR_STE_SET_TAG(eth_l4_v1, tag, dscp, spec, ip_dscp);
1425 DR_STE_SET_TAG(eth_l4_v1, tag, ecn, spec, ip_ecn);
1426 DR_STE_SET_TAG(eth_l4_v1, tag, ipv6_hop_limit, spec, ttl_hoplimit);
1427
1428 if (sb->inner)
1429 DR_STE_SET_TAG(eth_l4_v1, tag, flow_label, misc, inner_ipv6_flow_label);
1430 else
1431 DR_STE_SET_TAG(eth_l4_v1, tag, flow_label, misc, outer_ipv6_flow_label);
1432
1433 if (spec->tcp_flags) {
1434 DR_STE_SET_TCP_FLAGS(eth_l4_v1, tag, spec);
1435 spec->tcp_flags = 0;
1436 }
1437
1438 return 0;
1439 }
1440
dr_ste_v1_build_eth_ipv6_l3_l4_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1441 void dr_ste_v1_build_eth_ipv6_l3_l4_init(struct mlx5dr_ste_build *sb,
1442 struct mlx5dr_match_param *mask)
1443 {
1444 dr_ste_v1_build_eth_ipv6_l3_l4_tag(mask, sb, sb->bit_mask);
1445
1446 sb->lu_type = DR_STE_CALC_DFNR_TYPE(ETHL4, sb->inner);
1447 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1448 sb->ste_build_tag_func = &dr_ste_v1_build_eth_ipv6_l3_l4_tag;
1449 }
1450
dr_ste_v1_build_mpls_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1451 static int dr_ste_v1_build_mpls_tag(struct mlx5dr_match_param *value,
1452 struct mlx5dr_ste_build *sb,
1453 u8 *tag)
1454 {
1455 struct mlx5dr_match_misc2 *misc2 = &value->misc2;
1456
1457 if (sb->inner)
1458 DR_STE_SET_MPLS(mpls_v1, misc2, inner, tag);
1459 else
1460 DR_STE_SET_MPLS(mpls_v1, misc2, outer, tag);
1461
1462 return 0;
1463 }
1464
dr_ste_v1_build_mpls_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1465 void dr_ste_v1_build_mpls_init(struct mlx5dr_ste_build *sb,
1466 struct mlx5dr_match_param *mask)
1467 {
1468 dr_ste_v1_build_mpls_tag(mask, sb, sb->bit_mask);
1469
1470 sb->lu_type = DR_STE_CALC_DFNR_TYPE(MPLS, sb->inner);
1471 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1472 sb->ste_build_tag_func = &dr_ste_v1_build_mpls_tag;
1473 }
1474
dr_ste_v1_build_tnl_gre_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1475 static int dr_ste_v1_build_tnl_gre_tag(struct mlx5dr_match_param *value,
1476 struct mlx5dr_ste_build *sb,
1477 u8 *tag)
1478 {
1479 struct mlx5dr_match_misc *misc = &value->misc;
1480
1481 DR_STE_SET_TAG(gre_v1, tag, gre_protocol, misc, gre_protocol);
1482 DR_STE_SET_TAG(gre_v1, tag, gre_k_present, misc, gre_k_present);
1483 DR_STE_SET_TAG(gre_v1, tag, gre_key_h, misc, gre_key_h);
1484 DR_STE_SET_TAG(gre_v1, tag, gre_key_l, misc, gre_key_l);
1485
1486 DR_STE_SET_TAG(gre_v1, tag, gre_c_present, misc, gre_c_present);
1487 DR_STE_SET_TAG(gre_v1, tag, gre_s_present, misc, gre_s_present);
1488
1489 return 0;
1490 }
1491
dr_ste_v1_build_tnl_gre_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1492 void dr_ste_v1_build_tnl_gre_init(struct mlx5dr_ste_build *sb,
1493 struct mlx5dr_match_param *mask)
1494 {
1495 dr_ste_v1_build_tnl_gre_tag(mask, sb, sb->bit_mask);
1496
1497 sb->lu_type = DR_STE_V1_LU_TYPE_GRE;
1498 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1499 sb->ste_build_tag_func = &dr_ste_v1_build_tnl_gre_tag;
1500 }
1501
dr_ste_v1_build_tnl_mpls_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1502 static int dr_ste_v1_build_tnl_mpls_tag(struct mlx5dr_match_param *value,
1503 struct mlx5dr_ste_build *sb,
1504 u8 *tag)
1505 {
1506 struct mlx5dr_match_misc2 *misc2 = &value->misc2;
1507
1508 if (DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(misc2)) {
1509 DR_STE_SET_TAG(mpls_v1, tag, mpls0_label,
1510 misc2, outer_first_mpls_over_gre_label);
1511
1512 DR_STE_SET_TAG(mpls_v1, tag, mpls0_exp,
1513 misc2, outer_first_mpls_over_gre_exp);
1514
1515 DR_STE_SET_TAG(mpls_v1, tag, mpls0_s_bos,
1516 misc2, outer_first_mpls_over_gre_s_bos);
1517
1518 DR_STE_SET_TAG(mpls_v1, tag, mpls0_ttl,
1519 misc2, outer_first_mpls_over_gre_ttl);
1520 } else {
1521 DR_STE_SET_TAG(mpls_v1, tag, mpls0_label,
1522 misc2, outer_first_mpls_over_udp_label);
1523
1524 DR_STE_SET_TAG(mpls_v1, tag, mpls0_exp,
1525 misc2, outer_first_mpls_over_udp_exp);
1526
1527 DR_STE_SET_TAG(mpls_v1, tag, mpls0_s_bos,
1528 misc2, outer_first_mpls_over_udp_s_bos);
1529
1530 DR_STE_SET_TAG(mpls_v1, tag, mpls0_ttl,
1531 misc2, outer_first_mpls_over_udp_ttl);
1532 }
1533
1534 return 0;
1535 }
1536
dr_ste_v1_build_tnl_mpls_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1537 void dr_ste_v1_build_tnl_mpls_init(struct mlx5dr_ste_build *sb,
1538 struct mlx5dr_match_param *mask)
1539 {
1540 dr_ste_v1_build_tnl_mpls_tag(mask, sb, sb->bit_mask);
1541
1542 sb->lu_type = DR_STE_V1_LU_TYPE_MPLS_I;
1543 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1544 sb->ste_build_tag_func = &dr_ste_v1_build_tnl_mpls_tag;
1545 }
1546
dr_ste_v1_build_tnl_mpls_over_udp_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1547 static int dr_ste_v1_build_tnl_mpls_over_udp_tag(struct mlx5dr_match_param *value,
1548 struct mlx5dr_ste_build *sb,
1549 u8 *tag)
1550 {
1551 struct mlx5dr_match_misc2 *misc2 = &value->misc2;
1552 u8 *parser_ptr;
1553 u8 parser_id;
1554 u32 mpls_hdr;
1555
1556 mpls_hdr = misc2->outer_first_mpls_over_udp_label << HDR_MPLS_OFFSET_LABEL;
1557 misc2->outer_first_mpls_over_udp_label = 0;
1558 mpls_hdr |= misc2->outer_first_mpls_over_udp_exp << HDR_MPLS_OFFSET_EXP;
1559 misc2->outer_first_mpls_over_udp_exp = 0;
1560 mpls_hdr |= misc2->outer_first_mpls_over_udp_s_bos << HDR_MPLS_OFFSET_S_BOS;
1561 misc2->outer_first_mpls_over_udp_s_bos = 0;
1562 mpls_hdr |= misc2->outer_first_mpls_over_udp_ttl << HDR_MPLS_OFFSET_TTL;
1563 misc2->outer_first_mpls_over_udp_ttl = 0;
1564
1565 parser_id = sb->caps->flex_parser_id_mpls_over_udp;
1566 parser_ptr = dr_ste_calc_flex_parser_offset(tag, parser_id);
1567 *(__be32 *)parser_ptr = cpu_to_be32(mpls_hdr);
1568
1569 return 0;
1570 }
1571
dr_ste_v1_build_tnl_mpls_over_udp_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1572 void dr_ste_v1_build_tnl_mpls_over_udp_init(struct mlx5dr_ste_build *sb,
1573 struct mlx5dr_match_param *mask)
1574 {
1575 dr_ste_v1_build_tnl_mpls_over_udp_tag(mask, sb, sb->bit_mask);
1576
1577 /* STEs with lookup type FLEX_PARSER_{0/1} includes
1578 * flex parsers_{0-3}/{4-7} respectively.
1579 */
1580 sb->lu_type = sb->caps->flex_parser_id_mpls_over_udp > DR_STE_MAX_FLEX_0_ID ?
1581 DR_STE_V1_LU_TYPE_FLEX_PARSER_1 :
1582 DR_STE_V1_LU_TYPE_FLEX_PARSER_0;
1583
1584 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1585 sb->ste_build_tag_func = &dr_ste_v1_build_tnl_mpls_over_udp_tag;
1586 }
1587
dr_ste_v1_build_tnl_mpls_over_gre_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1588 static int dr_ste_v1_build_tnl_mpls_over_gre_tag(struct mlx5dr_match_param *value,
1589 struct mlx5dr_ste_build *sb,
1590 u8 *tag)
1591 {
1592 struct mlx5dr_match_misc2 *misc2 = &value->misc2;
1593 u8 *parser_ptr;
1594 u8 parser_id;
1595 u32 mpls_hdr;
1596
1597 mpls_hdr = misc2->outer_first_mpls_over_gre_label << HDR_MPLS_OFFSET_LABEL;
1598 misc2->outer_first_mpls_over_gre_label = 0;
1599 mpls_hdr |= misc2->outer_first_mpls_over_gre_exp << HDR_MPLS_OFFSET_EXP;
1600 misc2->outer_first_mpls_over_gre_exp = 0;
1601 mpls_hdr |= misc2->outer_first_mpls_over_gre_s_bos << HDR_MPLS_OFFSET_S_BOS;
1602 misc2->outer_first_mpls_over_gre_s_bos = 0;
1603 mpls_hdr |= misc2->outer_first_mpls_over_gre_ttl << HDR_MPLS_OFFSET_TTL;
1604 misc2->outer_first_mpls_over_gre_ttl = 0;
1605
1606 parser_id = sb->caps->flex_parser_id_mpls_over_gre;
1607 parser_ptr = dr_ste_calc_flex_parser_offset(tag, parser_id);
1608 *(__be32 *)parser_ptr = cpu_to_be32(mpls_hdr);
1609
1610 return 0;
1611 }
1612
dr_ste_v1_build_tnl_mpls_over_gre_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1613 void dr_ste_v1_build_tnl_mpls_over_gre_init(struct mlx5dr_ste_build *sb,
1614 struct mlx5dr_match_param *mask)
1615 {
1616 dr_ste_v1_build_tnl_mpls_over_gre_tag(mask, sb, sb->bit_mask);
1617
1618 /* STEs with lookup type FLEX_PARSER_{0/1} includes
1619 * flex parsers_{0-3}/{4-7} respectively.
1620 */
1621 sb->lu_type = sb->caps->flex_parser_id_mpls_over_gre > DR_STE_MAX_FLEX_0_ID ?
1622 DR_STE_V1_LU_TYPE_FLEX_PARSER_1 :
1623 DR_STE_V1_LU_TYPE_FLEX_PARSER_0;
1624
1625 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1626 sb->ste_build_tag_func = &dr_ste_v1_build_tnl_mpls_over_gre_tag;
1627 }
1628
dr_ste_v1_build_icmp_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1629 static int dr_ste_v1_build_icmp_tag(struct mlx5dr_match_param *value,
1630 struct mlx5dr_ste_build *sb,
1631 u8 *tag)
1632 {
1633 struct mlx5dr_match_misc3 *misc3 = &value->misc3;
1634 bool is_ipv4 = DR_MASK_IS_ICMPV4_SET(misc3);
1635 u32 *icmp_header_data;
1636 u8 *icmp_type;
1637 u8 *icmp_code;
1638
1639 if (is_ipv4) {
1640 icmp_header_data = &misc3->icmpv4_header_data;
1641 icmp_type = &misc3->icmpv4_type;
1642 icmp_code = &misc3->icmpv4_code;
1643 } else {
1644 icmp_header_data = &misc3->icmpv6_header_data;
1645 icmp_type = &misc3->icmpv6_type;
1646 icmp_code = &misc3->icmpv6_code;
1647 }
1648
1649 MLX5_SET(ste_icmp_v1, tag, icmp_header_data, *icmp_header_data);
1650 MLX5_SET(ste_icmp_v1, tag, icmp_type, *icmp_type);
1651 MLX5_SET(ste_icmp_v1, tag, icmp_code, *icmp_code);
1652
1653 *icmp_header_data = 0;
1654 *icmp_type = 0;
1655 *icmp_code = 0;
1656
1657 return 0;
1658 }
1659
dr_ste_v1_build_icmp_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1660 void dr_ste_v1_build_icmp_init(struct mlx5dr_ste_build *sb,
1661 struct mlx5dr_match_param *mask)
1662 {
1663 dr_ste_v1_build_icmp_tag(mask, sb, sb->bit_mask);
1664
1665 sb->lu_type = DR_STE_V1_LU_TYPE_ETHL4_MISC_O;
1666 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1667 sb->ste_build_tag_func = &dr_ste_v1_build_icmp_tag;
1668 }
1669
dr_ste_v1_build_general_purpose_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1670 static int dr_ste_v1_build_general_purpose_tag(struct mlx5dr_match_param *value,
1671 struct mlx5dr_ste_build *sb,
1672 u8 *tag)
1673 {
1674 struct mlx5dr_match_misc2 *misc2 = &value->misc2;
1675
1676 DR_STE_SET_TAG(general_purpose, tag, general_purpose_lookup_field,
1677 misc2, metadata_reg_a);
1678
1679 return 0;
1680 }
1681
dr_ste_v1_build_general_purpose_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1682 void dr_ste_v1_build_general_purpose_init(struct mlx5dr_ste_build *sb,
1683 struct mlx5dr_match_param *mask)
1684 {
1685 dr_ste_v1_build_general_purpose_tag(mask, sb, sb->bit_mask);
1686
1687 sb->lu_type = DR_STE_V1_LU_TYPE_GENERAL_PURPOSE;
1688 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1689 sb->ste_build_tag_func = &dr_ste_v1_build_general_purpose_tag;
1690 }
1691
dr_ste_v1_build_eth_l4_misc_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1692 static int dr_ste_v1_build_eth_l4_misc_tag(struct mlx5dr_match_param *value,
1693 struct mlx5dr_ste_build *sb,
1694 u8 *tag)
1695 {
1696 struct mlx5dr_match_misc3 *misc3 = &value->misc3;
1697
1698 if (sb->inner) {
1699 DR_STE_SET_TAG(eth_l4_misc_v1, tag, seq_num, misc3, inner_tcp_seq_num);
1700 DR_STE_SET_TAG(eth_l4_misc_v1, tag, ack_num, misc3, inner_tcp_ack_num);
1701 } else {
1702 DR_STE_SET_TAG(eth_l4_misc_v1, tag, seq_num, misc3, outer_tcp_seq_num);
1703 DR_STE_SET_TAG(eth_l4_misc_v1, tag, ack_num, misc3, outer_tcp_ack_num);
1704 }
1705
1706 return 0;
1707 }
1708
dr_ste_v1_build_eth_l4_misc_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1709 void dr_ste_v1_build_eth_l4_misc_init(struct mlx5dr_ste_build *sb,
1710 struct mlx5dr_match_param *mask)
1711 {
1712 dr_ste_v1_build_eth_l4_misc_tag(mask, sb, sb->bit_mask);
1713
1714 sb->lu_type = DR_STE_V1_LU_TYPE_ETHL4_MISC_O;
1715 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1716 sb->ste_build_tag_func = &dr_ste_v1_build_eth_l4_misc_tag;
1717 }
1718
1719 static int
dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1720 dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_tag(struct mlx5dr_match_param *value,
1721 struct mlx5dr_ste_build *sb,
1722 u8 *tag)
1723 {
1724 struct mlx5dr_match_misc3 *misc3 = &value->misc3;
1725
1726 DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
1727 outer_vxlan_gpe_flags, misc3,
1728 outer_vxlan_gpe_flags);
1729 DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
1730 outer_vxlan_gpe_next_protocol, misc3,
1731 outer_vxlan_gpe_next_protocol);
1732 DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
1733 outer_vxlan_gpe_vni, misc3,
1734 outer_vxlan_gpe_vni);
1735
1736 return 0;
1737 }
1738
dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1739 void dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init(struct mlx5dr_ste_build *sb,
1740 struct mlx5dr_match_param *mask)
1741 {
1742 dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_tag(mask, sb, sb->bit_mask);
1743
1744 sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER;
1745 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1746 sb->ste_build_tag_func = &dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_tag;
1747 }
1748
1749 static int
dr_ste_v1_build_flex_parser_tnl_geneve_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1750 dr_ste_v1_build_flex_parser_tnl_geneve_tag(struct mlx5dr_match_param *value,
1751 struct mlx5dr_ste_build *sb,
1752 u8 *tag)
1753 {
1754 struct mlx5dr_match_misc *misc = &value->misc;
1755
1756 DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
1757 geneve_protocol_type, misc, geneve_protocol_type);
1758 DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
1759 geneve_oam, misc, geneve_oam);
1760 DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
1761 geneve_opt_len, misc, geneve_opt_len);
1762 DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
1763 geneve_vni, misc, geneve_vni);
1764
1765 return 0;
1766 }
1767
dr_ste_v1_build_flex_parser_tnl_geneve_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1768 void dr_ste_v1_build_flex_parser_tnl_geneve_init(struct mlx5dr_ste_build *sb,
1769 struct mlx5dr_match_param *mask)
1770 {
1771 dr_ste_v1_build_flex_parser_tnl_geneve_tag(mask, sb, sb->bit_mask);
1772
1773 sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER;
1774 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1775 sb->ste_build_tag_func = &dr_ste_v1_build_flex_parser_tnl_geneve_tag;
1776 }
1777
dr_ste_v1_build_tnl_header_0_1_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,uint8_t * tag)1778 static int dr_ste_v1_build_tnl_header_0_1_tag(struct mlx5dr_match_param *value,
1779 struct mlx5dr_ste_build *sb,
1780 uint8_t *tag)
1781 {
1782 struct mlx5dr_match_misc5 *misc5 = &value->misc5;
1783
1784 DR_STE_SET_TAG(tunnel_header, tag, tunnel_header_0, misc5, tunnel_header_0);
1785 DR_STE_SET_TAG(tunnel_header, tag, tunnel_header_1, misc5, tunnel_header_1);
1786
1787 return 0;
1788 }
1789
dr_ste_v1_build_tnl_header_0_1_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1790 void dr_ste_v1_build_tnl_header_0_1_init(struct mlx5dr_ste_build *sb,
1791 struct mlx5dr_match_param *mask)
1792 {
1793 sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER;
1794 dr_ste_v1_build_tnl_header_0_1_tag(mask, sb, sb->bit_mask);
1795 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1796 sb->ste_build_tag_func = &dr_ste_v1_build_tnl_header_0_1_tag;
1797 }
1798
dr_ste_v1_build_register_0_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1799 static int dr_ste_v1_build_register_0_tag(struct mlx5dr_match_param *value,
1800 struct mlx5dr_ste_build *sb,
1801 u8 *tag)
1802 {
1803 struct mlx5dr_match_misc2 *misc2 = &value->misc2;
1804
1805 DR_STE_SET_TAG(register_0, tag, register_0_h, misc2, metadata_reg_c_0);
1806 DR_STE_SET_TAG(register_0, tag, register_0_l, misc2, metadata_reg_c_1);
1807 DR_STE_SET_TAG(register_0, tag, register_1_h, misc2, metadata_reg_c_2);
1808 DR_STE_SET_TAG(register_0, tag, register_1_l, misc2, metadata_reg_c_3);
1809
1810 return 0;
1811 }
1812
dr_ste_v1_build_register_0_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1813 void dr_ste_v1_build_register_0_init(struct mlx5dr_ste_build *sb,
1814 struct mlx5dr_match_param *mask)
1815 {
1816 dr_ste_v1_build_register_0_tag(mask, sb, sb->bit_mask);
1817
1818 sb->lu_type = DR_STE_V1_LU_TYPE_STEERING_REGISTERS_0;
1819 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1820 sb->ste_build_tag_func = &dr_ste_v1_build_register_0_tag;
1821 }
1822
dr_ste_v1_build_register_1_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1823 static int dr_ste_v1_build_register_1_tag(struct mlx5dr_match_param *value,
1824 struct mlx5dr_ste_build *sb,
1825 u8 *tag)
1826 {
1827 struct mlx5dr_match_misc2 *misc2 = &value->misc2;
1828
1829 DR_STE_SET_TAG(register_1, tag, register_2_h, misc2, metadata_reg_c_4);
1830 DR_STE_SET_TAG(register_1, tag, register_2_l, misc2, metadata_reg_c_5);
1831 DR_STE_SET_TAG(register_1, tag, register_3_h, misc2, metadata_reg_c_6);
1832 DR_STE_SET_TAG(register_1, tag, register_3_l, misc2, metadata_reg_c_7);
1833
1834 return 0;
1835 }
1836
dr_ste_v1_build_register_1_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1837 void dr_ste_v1_build_register_1_init(struct mlx5dr_ste_build *sb,
1838 struct mlx5dr_match_param *mask)
1839 {
1840 dr_ste_v1_build_register_1_tag(mask, sb, sb->bit_mask);
1841
1842 sb->lu_type = DR_STE_V1_LU_TYPE_STEERING_REGISTERS_1;
1843 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1844 sb->ste_build_tag_func = &dr_ste_v1_build_register_1_tag;
1845 }
1846
dr_ste_v1_build_src_gvmi_qpn_bit_mask(struct mlx5dr_match_param * value,u8 * bit_mask)1847 static void dr_ste_v1_build_src_gvmi_qpn_bit_mask(struct mlx5dr_match_param *value,
1848 u8 *bit_mask)
1849 {
1850 struct mlx5dr_match_misc *misc_mask = &value->misc;
1851
1852 DR_STE_SET_ONES(src_gvmi_qp_v1, bit_mask, source_gvmi, misc_mask, source_port);
1853 DR_STE_SET_ONES(src_gvmi_qp_v1, bit_mask, source_qp, misc_mask, source_sqn);
1854 misc_mask->source_eswitch_owner_vhca_id = 0;
1855 }
1856
dr_ste_v1_build_src_gvmi_qpn_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1857 static int dr_ste_v1_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
1858 struct mlx5dr_ste_build *sb,
1859 u8 *tag)
1860 {
1861 struct mlx5dr_match_misc *misc = &value->misc;
1862 struct mlx5dr_cmd_vport_cap *vport_cap;
1863 struct mlx5dr_domain *dmn = sb->dmn;
1864 struct mlx5dr_domain *vport_dmn;
1865 u8 *bit_mask = sb->bit_mask;
1866
1867 DR_STE_SET_TAG(src_gvmi_qp_v1, tag, source_qp, misc, source_sqn);
1868
1869 if (sb->vhca_id_valid) {
1870 /* Find port GVMI based on the eswitch_owner_vhca_id */
1871 if (misc->source_eswitch_owner_vhca_id == dmn->info.caps.gvmi)
1872 vport_dmn = dmn;
1873 else if (dmn->peer_dmn && (misc->source_eswitch_owner_vhca_id ==
1874 dmn->peer_dmn->info.caps.gvmi))
1875 vport_dmn = dmn->peer_dmn;
1876 else
1877 return -EINVAL;
1878
1879 misc->source_eswitch_owner_vhca_id = 0;
1880 } else {
1881 vport_dmn = dmn;
1882 }
1883
1884 if (!MLX5_GET(ste_src_gvmi_qp_v1, bit_mask, source_gvmi))
1885 return 0;
1886
1887 vport_cap = mlx5dr_domain_get_vport_cap(vport_dmn, misc->source_port);
1888 if (!vport_cap) {
1889 mlx5dr_err(dmn, "Vport 0x%x is disabled or invalid\n",
1890 misc->source_port);
1891 return -EINVAL;
1892 }
1893
1894 if (vport_cap->vport_gvmi)
1895 MLX5_SET(ste_src_gvmi_qp_v1, tag, source_gvmi, vport_cap->vport_gvmi);
1896
1897 misc->source_port = 0;
1898 return 0;
1899 }
1900
dr_ste_v1_build_src_gvmi_qpn_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1901 void dr_ste_v1_build_src_gvmi_qpn_init(struct mlx5dr_ste_build *sb,
1902 struct mlx5dr_match_param *mask)
1903 {
1904 dr_ste_v1_build_src_gvmi_qpn_bit_mask(mask, sb->bit_mask);
1905
1906 sb->lu_type = DR_STE_V1_LU_TYPE_SRC_QP_GVMI;
1907 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1908 sb->ste_build_tag_func = &dr_ste_v1_build_src_gvmi_qpn_tag;
1909 }
1910
dr_ste_v1_set_flex_parser(u32 * misc4_field_id,u32 * misc4_field_value,bool * parser_is_used,u8 * tag)1911 static void dr_ste_v1_set_flex_parser(u32 *misc4_field_id,
1912 u32 *misc4_field_value,
1913 bool *parser_is_used,
1914 u8 *tag)
1915 {
1916 u32 id = *misc4_field_id;
1917 u8 *parser_ptr;
1918
1919 if (id >= DR_NUM_OF_FLEX_PARSERS || parser_is_used[id])
1920 return;
1921
1922 parser_is_used[id] = true;
1923 parser_ptr = dr_ste_calc_flex_parser_offset(tag, id);
1924
1925 *(__be32 *)parser_ptr = cpu_to_be32(*misc4_field_value);
1926 *misc4_field_id = 0;
1927 *misc4_field_value = 0;
1928 }
1929
dr_ste_v1_build_felx_parser_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1930 static int dr_ste_v1_build_felx_parser_tag(struct mlx5dr_match_param *value,
1931 struct mlx5dr_ste_build *sb,
1932 u8 *tag)
1933 {
1934 struct mlx5dr_match_misc4 *misc_4_mask = &value->misc4;
1935 bool parser_is_used[DR_NUM_OF_FLEX_PARSERS] = {};
1936
1937 dr_ste_v1_set_flex_parser(&misc_4_mask->prog_sample_field_id_0,
1938 &misc_4_mask->prog_sample_field_value_0,
1939 parser_is_used, tag);
1940
1941 dr_ste_v1_set_flex_parser(&misc_4_mask->prog_sample_field_id_1,
1942 &misc_4_mask->prog_sample_field_value_1,
1943 parser_is_used, tag);
1944
1945 dr_ste_v1_set_flex_parser(&misc_4_mask->prog_sample_field_id_2,
1946 &misc_4_mask->prog_sample_field_value_2,
1947 parser_is_used, tag);
1948
1949 dr_ste_v1_set_flex_parser(&misc_4_mask->prog_sample_field_id_3,
1950 &misc_4_mask->prog_sample_field_value_3,
1951 parser_is_used, tag);
1952
1953 return 0;
1954 }
1955
dr_ste_v1_build_flex_parser_0_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1956 void dr_ste_v1_build_flex_parser_0_init(struct mlx5dr_ste_build *sb,
1957 struct mlx5dr_match_param *mask)
1958 {
1959 sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_0;
1960 dr_ste_v1_build_felx_parser_tag(mask, sb, sb->bit_mask);
1961 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1962 sb->ste_build_tag_func = &dr_ste_v1_build_felx_parser_tag;
1963 }
1964
dr_ste_v1_build_flex_parser_1_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1965 void dr_ste_v1_build_flex_parser_1_init(struct mlx5dr_ste_build *sb,
1966 struct mlx5dr_match_param *mask)
1967 {
1968 sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_1;
1969 dr_ste_v1_build_felx_parser_tag(mask, sb, sb->bit_mask);
1970 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1971 sb->ste_build_tag_func = &dr_ste_v1_build_felx_parser_tag;
1972 }
1973
1974 static int
dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1975 dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_tag(struct mlx5dr_match_param *value,
1976 struct mlx5dr_ste_build *sb,
1977 u8 *tag)
1978 {
1979 struct mlx5dr_match_misc3 *misc3 = &value->misc3;
1980 u8 parser_id = sb->caps->flex_parser_id_geneve_tlv_option_0;
1981 u8 *parser_ptr = dr_ste_calc_flex_parser_offset(tag, parser_id);
1982
1983 MLX5_SET(ste_flex_parser_0, parser_ptr, flex_parser_3,
1984 misc3->geneve_tlv_option_0_data);
1985 misc3->geneve_tlv_option_0_data = 0;
1986
1987 return 0;
1988 }
1989
1990 void
dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1991 dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init(struct mlx5dr_ste_build *sb,
1992 struct mlx5dr_match_param *mask)
1993 {
1994 dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_tag(mask, sb, sb->bit_mask);
1995
1996 /* STEs with lookup type FLEX_PARSER_{0/1} includes
1997 * flex parsers_{0-3}/{4-7} respectively.
1998 */
1999 sb->lu_type = sb->caps->flex_parser_id_geneve_tlv_option_0 > 3 ?
2000 DR_STE_V1_LU_TYPE_FLEX_PARSER_1 :
2001 DR_STE_V1_LU_TYPE_FLEX_PARSER_0;
2002
2003 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
2004 sb->ste_build_tag_func = &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_tag;
2005 }
2006
2007 static int
dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,uint8_t * tag)2008 dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_tag(struct mlx5dr_match_param *value,
2009 struct mlx5dr_ste_build *sb,
2010 uint8_t *tag)
2011 {
2012 u8 parser_id = sb->caps->flex_parser_id_geneve_tlv_option_0;
2013 struct mlx5dr_match_misc *misc = &value->misc;
2014
2015 if (misc->geneve_tlv_option_0_exist) {
2016 MLX5_SET(ste_flex_parser_ok, tag, flex_parsers_ok, 1 << parser_id);
2017 misc->geneve_tlv_option_0_exist = 0;
2018 }
2019
2020 return 0;
2021 }
2022
2023 void
dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)2024 dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_init(struct mlx5dr_ste_build *sb,
2025 struct mlx5dr_match_param *mask)
2026 {
2027 sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_OK;
2028 dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_tag(mask, sb, sb->bit_mask);
2029 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
2030 sb->ste_build_tag_func = &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_tag;
2031 }
2032
dr_ste_v1_build_flex_parser_tnl_gtpu_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)2033 static int dr_ste_v1_build_flex_parser_tnl_gtpu_tag(struct mlx5dr_match_param *value,
2034 struct mlx5dr_ste_build *sb,
2035 u8 *tag)
2036 {
2037 struct mlx5dr_match_misc3 *misc3 = &value->misc3;
2038
2039 DR_STE_SET_TAG(flex_parser_tnl_gtpu, tag, gtpu_msg_flags, misc3, gtpu_msg_flags);
2040 DR_STE_SET_TAG(flex_parser_tnl_gtpu, tag, gtpu_msg_type, misc3, gtpu_msg_type);
2041 DR_STE_SET_TAG(flex_parser_tnl_gtpu, tag, gtpu_teid, misc3, gtpu_teid);
2042
2043 return 0;
2044 }
2045
dr_ste_v1_build_flex_parser_tnl_gtpu_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)2046 void dr_ste_v1_build_flex_parser_tnl_gtpu_init(struct mlx5dr_ste_build *sb,
2047 struct mlx5dr_match_param *mask)
2048 {
2049 dr_ste_v1_build_flex_parser_tnl_gtpu_tag(mask, sb, sb->bit_mask);
2050
2051 sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER;
2052 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
2053 sb->ste_build_tag_func = &dr_ste_v1_build_flex_parser_tnl_gtpu_tag;
2054 }
2055
2056 static int
dr_ste_v1_build_tnl_gtpu_flex_parser_0_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)2057 dr_ste_v1_build_tnl_gtpu_flex_parser_0_tag(struct mlx5dr_match_param *value,
2058 struct mlx5dr_ste_build *sb,
2059 u8 *tag)
2060 {
2061 if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_dw_0))
2062 DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_0, sb->caps, &value->misc3);
2063 if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_teid))
2064 DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_teid, sb->caps, &value->misc3);
2065 if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_dw_2))
2066 DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_2, sb->caps, &value->misc3);
2067 if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_first_ext_dw_0))
2068 DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_first_ext_dw_0, sb->caps, &value->misc3);
2069 return 0;
2070 }
2071
2072 void
dr_ste_v1_build_tnl_gtpu_flex_parser_0_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)2073 dr_ste_v1_build_tnl_gtpu_flex_parser_0_init(struct mlx5dr_ste_build *sb,
2074 struct mlx5dr_match_param *mask)
2075 {
2076 dr_ste_v1_build_tnl_gtpu_flex_parser_0_tag(mask, sb, sb->bit_mask);
2077
2078 sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_0;
2079 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
2080 sb->ste_build_tag_func = &dr_ste_v1_build_tnl_gtpu_flex_parser_0_tag;
2081 }
2082
2083 static int
dr_ste_v1_build_tnl_gtpu_flex_parser_1_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)2084 dr_ste_v1_build_tnl_gtpu_flex_parser_1_tag(struct mlx5dr_match_param *value,
2085 struct mlx5dr_ste_build *sb,
2086 u8 *tag)
2087 {
2088 if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_dw_0))
2089 DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_0, sb->caps, &value->misc3);
2090 if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_teid))
2091 DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_teid, sb->caps, &value->misc3);
2092 if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_dw_2))
2093 DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_2, sb->caps, &value->misc3);
2094 if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_first_ext_dw_0))
2095 DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_first_ext_dw_0, sb->caps, &value->misc3);
2096 return 0;
2097 }
2098
2099 void
dr_ste_v1_build_tnl_gtpu_flex_parser_1_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)2100 dr_ste_v1_build_tnl_gtpu_flex_parser_1_init(struct mlx5dr_ste_build *sb,
2101 struct mlx5dr_match_param *mask)
2102 {
2103 dr_ste_v1_build_tnl_gtpu_flex_parser_1_tag(mask, sb, sb->bit_mask);
2104
2105 sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_1;
2106 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
2107 sb->ste_build_tag_func = &dr_ste_v1_build_tnl_gtpu_flex_parser_1_tag;
2108 }
2109
2110 static struct mlx5dr_ste_ctx ste_ctx_v1 = {
2111 /* Builders */
2112 .build_eth_l2_src_dst_init = &dr_ste_v1_build_eth_l2_src_dst_init,
2113 .build_eth_l3_ipv6_src_init = &dr_ste_v1_build_eth_l3_ipv6_src_init,
2114 .build_eth_l3_ipv6_dst_init = &dr_ste_v1_build_eth_l3_ipv6_dst_init,
2115 .build_eth_l3_ipv4_5_tuple_init = &dr_ste_v1_build_eth_l3_ipv4_5_tuple_init,
2116 .build_eth_l2_src_init = &dr_ste_v1_build_eth_l2_src_init,
2117 .build_eth_l2_dst_init = &dr_ste_v1_build_eth_l2_dst_init,
2118 .build_eth_l2_tnl_init = &dr_ste_v1_build_eth_l2_tnl_init,
2119 .build_eth_l3_ipv4_misc_init = &dr_ste_v1_build_eth_l3_ipv4_misc_init,
2120 .build_eth_ipv6_l3_l4_init = &dr_ste_v1_build_eth_ipv6_l3_l4_init,
2121 .build_mpls_init = &dr_ste_v1_build_mpls_init,
2122 .build_tnl_gre_init = &dr_ste_v1_build_tnl_gre_init,
2123 .build_tnl_mpls_init = &dr_ste_v1_build_tnl_mpls_init,
2124 .build_tnl_mpls_over_udp_init = &dr_ste_v1_build_tnl_mpls_over_udp_init,
2125 .build_tnl_mpls_over_gre_init = &dr_ste_v1_build_tnl_mpls_over_gre_init,
2126 .build_icmp_init = &dr_ste_v1_build_icmp_init,
2127 .build_general_purpose_init = &dr_ste_v1_build_general_purpose_init,
2128 .build_eth_l4_misc_init = &dr_ste_v1_build_eth_l4_misc_init,
2129 .build_tnl_vxlan_gpe_init = &dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init,
2130 .build_tnl_geneve_init = &dr_ste_v1_build_flex_parser_tnl_geneve_init,
2131 .build_tnl_geneve_tlv_opt_init = &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init,
2132 .build_tnl_geneve_tlv_opt_exist_init = &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_init,
2133 .build_register_0_init = &dr_ste_v1_build_register_0_init,
2134 .build_register_1_init = &dr_ste_v1_build_register_1_init,
2135 .build_src_gvmi_qpn_init = &dr_ste_v1_build_src_gvmi_qpn_init,
2136 .build_flex_parser_0_init = &dr_ste_v1_build_flex_parser_0_init,
2137 .build_flex_parser_1_init = &dr_ste_v1_build_flex_parser_1_init,
2138 .build_tnl_gtpu_init = &dr_ste_v1_build_flex_parser_tnl_gtpu_init,
2139 .build_tnl_header_0_1_init = &dr_ste_v1_build_tnl_header_0_1_init,
2140 .build_tnl_gtpu_flex_parser_0_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_0_init,
2141 .build_tnl_gtpu_flex_parser_1_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_1_init,
2142
2143 /* Getters and Setters */
2144 .ste_init = &dr_ste_v1_init,
2145 .set_next_lu_type = &dr_ste_v1_set_next_lu_type,
2146 .get_next_lu_type = &dr_ste_v1_get_next_lu_type,
2147 .set_miss_addr = &dr_ste_v1_set_miss_addr,
2148 .get_miss_addr = &dr_ste_v1_get_miss_addr,
2149 .set_hit_addr = &dr_ste_v1_set_hit_addr,
2150 .set_byte_mask = &dr_ste_v1_set_byte_mask,
2151 .get_byte_mask = &dr_ste_v1_get_byte_mask,
2152 /* Actions */
2153 .actions_caps = DR_STE_CTX_ACTION_CAP_TX_POP |
2154 DR_STE_CTX_ACTION_CAP_RX_PUSH |
2155 DR_STE_CTX_ACTION_CAP_RX_ENCAP |
2156 DR_STE_CTX_ACTION_CAP_POP_MDFY,
2157 .set_actions_rx = &dr_ste_v1_set_actions_rx,
2158 .set_actions_tx = &dr_ste_v1_set_actions_tx,
2159 .modify_field_arr_sz = ARRAY_SIZE(dr_ste_v1_action_modify_field_arr),
2160 .modify_field_arr = dr_ste_v1_action_modify_field_arr,
2161 .set_action_set = &dr_ste_v1_set_action_set,
2162 .set_action_add = &dr_ste_v1_set_action_add,
2163 .set_action_copy = &dr_ste_v1_set_action_copy,
2164 .set_action_decap_l3_list = &dr_ste_v1_set_action_decap_l3_list,
2165 /* Send */
2166 .prepare_for_postsend = &dr_ste_v1_prepare_for_postsend,
2167 };
2168
mlx5dr_ste_get_ctx_v1(void)2169 struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v1(void)
2170 {
2171 return &ste_ctx_v1;
2172 }
2173