1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved. */
3
4 #include <linux/types.h>
5 #include <linux/crc32.h>
6 #include "dr_ste.h"
7
8 #define SVLAN_ETHERTYPE 0x88a8
9 #define DR_STE_ENABLE_FLOW_TAG BIT(31)
10
11 enum dr_ste_v0_entry_type {
12 DR_STE_TYPE_TX = 1,
13 DR_STE_TYPE_RX = 2,
14 DR_STE_TYPE_MODIFY_PKT = 6,
15 };
16
17 enum dr_ste_v0_action_tunl {
18 DR_STE_TUNL_ACTION_NONE = 0,
19 DR_STE_TUNL_ACTION_ENABLE = 1,
20 DR_STE_TUNL_ACTION_DECAP = 2,
21 DR_STE_TUNL_ACTION_L3_DECAP = 3,
22 DR_STE_TUNL_ACTION_POP_VLAN = 4,
23 };
24
25 enum dr_ste_v0_action_type {
26 DR_STE_ACTION_TYPE_PUSH_VLAN = 1,
27 DR_STE_ACTION_TYPE_ENCAP_L3 = 3,
28 DR_STE_ACTION_TYPE_ENCAP = 4,
29 };
30
31 enum dr_ste_v0_action_mdfy_op {
32 DR_STE_ACTION_MDFY_OP_COPY = 0x1,
33 DR_STE_ACTION_MDFY_OP_SET = 0x2,
34 DR_STE_ACTION_MDFY_OP_ADD = 0x3,
35 };
36
37 #define DR_STE_CALC_LU_TYPE(lookup_type, rx, inner) \
38 ((inner) ? DR_STE_V0_LU_TYPE_##lookup_type##_I : \
39 (rx) ? DR_STE_V0_LU_TYPE_##lookup_type##_D : \
40 DR_STE_V0_LU_TYPE_##lookup_type##_O)
41
42 enum {
43 DR_STE_V0_LU_TYPE_NOP = 0x00,
44 DR_STE_V0_LU_TYPE_SRC_GVMI_AND_QP = 0x05,
45 DR_STE_V0_LU_TYPE_ETHL2_TUNNELING_I = 0x0a,
46 DR_STE_V0_LU_TYPE_ETHL2_DST_O = 0x06,
47 DR_STE_V0_LU_TYPE_ETHL2_DST_I = 0x07,
48 DR_STE_V0_LU_TYPE_ETHL2_DST_D = 0x1b,
49 DR_STE_V0_LU_TYPE_ETHL2_SRC_O = 0x08,
50 DR_STE_V0_LU_TYPE_ETHL2_SRC_I = 0x09,
51 DR_STE_V0_LU_TYPE_ETHL2_SRC_D = 0x1c,
52 DR_STE_V0_LU_TYPE_ETHL2_SRC_DST_O = 0x36,
53 DR_STE_V0_LU_TYPE_ETHL2_SRC_DST_I = 0x37,
54 DR_STE_V0_LU_TYPE_ETHL2_SRC_DST_D = 0x38,
55 DR_STE_V0_LU_TYPE_ETHL3_IPV6_DST_O = 0x0d,
56 DR_STE_V0_LU_TYPE_ETHL3_IPV6_DST_I = 0x0e,
57 DR_STE_V0_LU_TYPE_ETHL3_IPV6_DST_D = 0x1e,
58 DR_STE_V0_LU_TYPE_ETHL3_IPV6_SRC_O = 0x0f,
59 DR_STE_V0_LU_TYPE_ETHL3_IPV6_SRC_I = 0x10,
60 DR_STE_V0_LU_TYPE_ETHL3_IPV6_SRC_D = 0x1f,
61 DR_STE_V0_LU_TYPE_ETHL3_IPV4_5_TUPLE_O = 0x11,
62 DR_STE_V0_LU_TYPE_ETHL3_IPV4_5_TUPLE_I = 0x12,
63 DR_STE_V0_LU_TYPE_ETHL3_IPV4_5_TUPLE_D = 0x20,
64 DR_STE_V0_LU_TYPE_ETHL3_IPV4_MISC_O = 0x29,
65 DR_STE_V0_LU_TYPE_ETHL3_IPV4_MISC_I = 0x2a,
66 DR_STE_V0_LU_TYPE_ETHL3_IPV4_MISC_D = 0x2b,
67 DR_STE_V0_LU_TYPE_ETHL4_O = 0x13,
68 DR_STE_V0_LU_TYPE_ETHL4_I = 0x14,
69 DR_STE_V0_LU_TYPE_ETHL4_D = 0x21,
70 DR_STE_V0_LU_TYPE_ETHL4_MISC_O = 0x2c,
71 DR_STE_V0_LU_TYPE_ETHL4_MISC_I = 0x2d,
72 DR_STE_V0_LU_TYPE_ETHL4_MISC_D = 0x2e,
73 DR_STE_V0_LU_TYPE_MPLS_FIRST_O = 0x15,
74 DR_STE_V0_LU_TYPE_MPLS_FIRST_I = 0x24,
75 DR_STE_V0_LU_TYPE_MPLS_FIRST_D = 0x25,
76 DR_STE_V0_LU_TYPE_GRE = 0x16,
77 DR_STE_V0_LU_TYPE_FLEX_PARSER_0 = 0x22,
78 DR_STE_V0_LU_TYPE_FLEX_PARSER_1 = 0x23,
79 DR_STE_V0_LU_TYPE_FLEX_PARSER_TNL_HEADER = 0x19,
80 DR_STE_V0_LU_TYPE_GENERAL_PURPOSE = 0x18,
81 DR_STE_V0_LU_TYPE_STEERING_REGISTERS_0 = 0x2f,
82 DR_STE_V0_LU_TYPE_STEERING_REGISTERS_1 = 0x30,
83 DR_STE_V0_LU_TYPE_TUNNEL_HEADER = 0x34,
84 DR_STE_V0_LU_TYPE_DONT_CARE = MLX5DR_STE_LU_TYPE_DONT_CARE,
85 };
86
87 enum {
88 DR_STE_V0_ACTION_MDFY_FLD_L2_0 = 0,
89 DR_STE_V0_ACTION_MDFY_FLD_L2_1 = 1,
90 DR_STE_V0_ACTION_MDFY_FLD_L2_2 = 2,
91 DR_STE_V0_ACTION_MDFY_FLD_L3_0 = 3,
92 DR_STE_V0_ACTION_MDFY_FLD_L3_1 = 4,
93 DR_STE_V0_ACTION_MDFY_FLD_L3_2 = 5,
94 DR_STE_V0_ACTION_MDFY_FLD_L3_3 = 6,
95 DR_STE_V0_ACTION_MDFY_FLD_L3_4 = 7,
96 DR_STE_V0_ACTION_MDFY_FLD_L4_0 = 8,
97 DR_STE_V0_ACTION_MDFY_FLD_L4_1 = 9,
98 DR_STE_V0_ACTION_MDFY_FLD_MPLS = 10,
99 DR_STE_V0_ACTION_MDFY_FLD_L2_TNL_0 = 11,
100 DR_STE_V0_ACTION_MDFY_FLD_REG_0 = 12,
101 DR_STE_V0_ACTION_MDFY_FLD_REG_1 = 13,
102 DR_STE_V0_ACTION_MDFY_FLD_REG_2 = 14,
103 DR_STE_V0_ACTION_MDFY_FLD_REG_3 = 15,
104 DR_STE_V0_ACTION_MDFY_FLD_L4_2 = 16,
105 DR_STE_V0_ACTION_MDFY_FLD_FLEX_0 = 17,
106 DR_STE_V0_ACTION_MDFY_FLD_FLEX_1 = 18,
107 DR_STE_V0_ACTION_MDFY_FLD_FLEX_2 = 19,
108 DR_STE_V0_ACTION_MDFY_FLD_FLEX_3 = 20,
109 DR_STE_V0_ACTION_MDFY_FLD_L2_TNL_1 = 21,
110 DR_STE_V0_ACTION_MDFY_FLD_METADATA = 22,
111 DR_STE_V0_ACTION_MDFY_FLD_RESERVED = 23,
112 };
113
114 static const struct mlx5dr_ste_action_modify_field dr_ste_v0_action_modify_field_arr[] = {
115 [MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16] = {
116 .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L2_1, .start = 16, .end = 47,
117 },
118 [MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0] = {
119 .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L2_1, .start = 0, .end = 15,
120 },
121 [MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE] = {
122 .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L2_2, .start = 32, .end = 47,
123 },
124 [MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16] = {
125 .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L2_0, .start = 16, .end = 47,
126 },
127 [MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0] = {
128 .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L2_0, .start = 0, .end = 15,
129 },
130 [MLX5_ACTION_IN_FIELD_OUT_IP_DSCP] = {
131 .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_1, .start = 0, .end = 5,
132 },
133 [MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS] = {
134 .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_0, .start = 48, .end = 56,
135 .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
136 },
137 [MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT] = {
138 .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_0, .start = 0, .end = 15,
139 .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
140 },
141 [MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT] = {
142 .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_0, .start = 16, .end = 31,
143 .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
144 },
145 [MLX5_ACTION_IN_FIELD_OUT_IP_TTL] = {
146 .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_1, .start = 8, .end = 15,
147 .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
148 },
149 [MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT] = {
150 .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_1, .start = 8, .end = 15,
151 .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
152 },
153 [MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT] = {
154 .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_0, .start = 0, .end = 15,
155 .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP,
156 },
157 [MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT] = {
158 .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_0, .start = 16, .end = 31,
159 .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP,
160 },
161 [MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96] = {
162 .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_3, .start = 32, .end = 63,
163 .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
164 },
165 [MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64] = {
166 .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_3, .start = 0, .end = 31,
167 .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
168 },
169 [MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32] = {
170 .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_4, .start = 32, .end = 63,
171 .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
172 },
173 [MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0] = {
174 .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_4, .start = 0, .end = 31,
175 .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
176 },
177 [MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96] = {
178 .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_0, .start = 32, .end = 63,
179 .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
180 },
181 [MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64] = {
182 .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_0, .start = 0, .end = 31,
183 .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
184 },
185 [MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32] = {
186 .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_2, .start = 32, .end = 63,
187 .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
188 },
189 [MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0] = {
190 .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_2, .start = 0, .end = 31,
191 .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
192 },
193 [MLX5_ACTION_IN_FIELD_OUT_SIPV4] = {
194 .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_0, .start = 0, .end = 31,
195 .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
196 },
197 [MLX5_ACTION_IN_FIELD_OUT_DIPV4] = {
198 .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_0, .start = 32, .end = 63,
199 .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
200 },
201 [MLX5_ACTION_IN_FIELD_METADATA_REG_A] = {
202 .hw_field = DR_STE_V0_ACTION_MDFY_FLD_METADATA, .start = 0, .end = 31,
203 },
204 [MLX5_ACTION_IN_FIELD_METADATA_REG_B] = {
205 .hw_field = DR_STE_V0_ACTION_MDFY_FLD_METADATA, .start = 32, .end = 63,
206 },
207 [MLX5_ACTION_IN_FIELD_METADATA_REG_C_0] = {
208 .hw_field = DR_STE_V0_ACTION_MDFY_FLD_REG_0, .start = 32, .end = 63,
209 },
210 [MLX5_ACTION_IN_FIELD_METADATA_REG_C_1] = {
211 .hw_field = DR_STE_V0_ACTION_MDFY_FLD_REG_0, .start = 0, .end = 31,
212 },
213 [MLX5_ACTION_IN_FIELD_METADATA_REG_C_2] = {
214 .hw_field = DR_STE_V0_ACTION_MDFY_FLD_REG_1, .start = 32, .end = 63,
215 },
216 [MLX5_ACTION_IN_FIELD_METADATA_REG_C_3] = {
217 .hw_field = DR_STE_V0_ACTION_MDFY_FLD_REG_1, .start = 0, .end = 31,
218 },
219 [MLX5_ACTION_IN_FIELD_METADATA_REG_C_4] = {
220 .hw_field = DR_STE_V0_ACTION_MDFY_FLD_REG_2, .start = 32, .end = 63,
221 },
222 [MLX5_ACTION_IN_FIELD_METADATA_REG_C_5] = {
223 .hw_field = DR_STE_V0_ACTION_MDFY_FLD_REG_2, .start = 0, .end = 31,
224 },
225 [MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM] = {
226 .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_1, .start = 32, .end = 63,
227 },
228 [MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM] = {
229 .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_1, .start = 0, .end = 31,
230 },
231 [MLX5_ACTION_IN_FIELD_OUT_FIRST_VID] = {
232 .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L2_2, .start = 0, .end = 15,
233 },
234 };
235
dr_ste_v0_set_entry_type(u8 * hw_ste_p,u8 entry_type)236 static void dr_ste_v0_set_entry_type(u8 *hw_ste_p, u8 entry_type)
237 {
238 MLX5_SET(ste_general, hw_ste_p, entry_type, entry_type);
239 }
240
dr_ste_v0_get_entry_type(u8 * hw_ste_p)241 static u8 dr_ste_v0_get_entry_type(u8 *hw_ste_p)
242 {
243 return MLX5_GET(ste_general, hw_ste_p, entry_type);
244 }
245
dr_ste_v0_set_miss_addr(u8 * hw_ste_p,u64 miss_addr)246 static void dr_ste_v0_set_miss_addr(u8 *hw_ste_p, u64 miss_addr)
247 {
248 u64 index = miss_addr >> 6;
249
250 /* Miss address for TX and RX STEs located in the same offsets */
251 MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_39_32, index >> 26);
252 MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_31_6, index);
253 }
254
dr_ste_v0_get_miss_addr(u8 * hw_ste_p)255 static u64 dr_ste_v0_get_miss_addr(u8 *hw_ste_p)
256 {
257 u64 index =
258 ((u64)MLX5_GET(ste_rx_steering_mult, hw_ste_p, miss_address_31_6) |
259 ((u64)MLX5_GET(ste_rx_steering_mult, hw_ste_p, miss_address_39_32)) << 26);
260
261 return index << 6;
262 }
263
dr_ste_v0_set_byte_mask(u8 * hw_ste_p,u16 byte_mask)264 static void dr_ste_v0_set_byte_mask(u8 *hw_ste_p, u16 byte_mask)
265 {
266 MLX5_SET(ste_general, hw_ste_p, byte_mask, byte_mask);
267 }
268
dr_ste_v0_get_byte_mask(u8 * hw_ste_p)269 static u16 dr_ste_v0_get_byte_mask(u8 *hw_ste_p)
270 {
271 return MLX5_GET(ste_general, hw_ste_p, byte_mask);
272 }
273
dr_ste_v0_set_lu_type(u8 * hw_ste_p,u16 lu_type)274 static void dr_ste_v0_set_lu_type(u8 *hw_ste_p, u16 lu_type)
275 {
276 MLX5_SET(ste_general, hw_ste_p, entry_sub_type, lu_type);
277 }
278
dr_ste_v0_set_next_lu_type(u8 * hw_ste_p,u16 lu_type)279 static void dr_ste_v0_set_next_lu_type(u8 *hw_ste_p, u16 lu_type)
280 {
281 MLX5_SET(ste_general, hw_ste_p, next_lu_type, lu_type);
282 }
283
dr_ste_v0_get_next_lu_type(u8 * hw_ste_p)284 static u16 dr_ste_v0_get_next_lu_type(u8 *hw_ste_p)
285 {
286 return MLX5_GET(ste_general, hw_ste_p, next_lu_type);
287 }
288
dr_ste_v0_set_hit_gvmi(u8 * hw_ste_p,u16 gvmi)289 static void dr_ste_v0_set_hit_gvmi(u8 *hw_ste_p, u16 gvmi)
290 {
291 MLX5_SET(ste_general, hw_ste_p, next_table_base_63_48, gvmi);
292 }
293
dr_ste_v0_set_hit_addr(u8 * hw_ste_p,u64 icm_addr,u32 ht_size)294 static void dr_ste_v0_set_hit_addr(u8 *hw_ste_p, u64 icm_addr, u32 ht_size)
295 {
296 u64 index = (icm_addr >> 5) | ht_size;
297
298 MLX5_SET(ste_general, hw_ste_p, next_table_base_39_32_size, index >> 27);
299 MLX5_SET(ste_general, hw_ste_p, next_table_base_31_5_size, index);
300 }
301
dr_ste_v0_init_full(u8 * hw_ste_p,u16 lu_type,enum dr_ste_v0_entry_type entry_type,u16 gvmi)302 static void dr_ste_v0_init_full(u8 *hw_ste_p, u16 lu_type,
303 enum dr_ste_v0_entry_type entry_type, u16 gvmi)
304 {
305 dr_ste_v0_set_entry_type(hw_ste_p, entry_type);
306 dr_ste_v0_set_lu_type(hw_ste_p, lu_type);
307 dr_ste_v0_set_next_lu_type(hw_ste_p, MLX5DR_STE_LU_TYPE_DONT_CARE);
308
309 /* Set GVMI once, this is the same for RX/TX
310 * bits 63_48 of next table base / miss address encode the next GVMI
311 */
312 MLX5_SET(ste_rx_steering_mult, hw_ste_p, gvmi, gvmi);
313 MLX5_SET(ste_rx_steering_mult, hw_ste_p, next_table_base_63_48, gvmi);
314 MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_63_48, gvmi);
315 }
316
dr_ste_v0_init(u8 * hw_ste_p,u16 lu_type,bool is_rx,u16 gvmi)317 static void dr_ste_v0_init(u8 *hw_ste_p, u16 lu_type,
318 bool is_rx, u16 gvmi)
319 {
320 enum dr_ste_v0_entry_type entry_type;
321
322 entry_type = is_rx ? DR_STE_TYPE_RX : DR_STE_TYPE_TX;
323 dr_ste_v0_init_full(hw_ste_p, lu_type, entry_type, gvmi);
324 }
325
dr_ste_v0_rx_set_flow_tag(u8 * hw_ste_p,u32 flow_tag)326 static void dr_ste_v0_rx_set_flow_tag(u8 *hw_ste_p, u32 flow_tag)
327 {
328 MLX5_SET(ste_rx_steering_mult, hw_ste_p, qp_list_pointer,
329 DR_STE_ENABLE_FLOW_TAG | flow_tag);
330 }
331
dr_ste_v0_set_counter_id(u8 * hw_ste_p,u32 ctr_id)332 static void dr_ste_v0_set_counter_id(u8 *hw_ste_p, u32 ctr_id)
333 {
334 /* This can be used for both rx_steering_mult and for sx_transmit */
335 MLX5_SET(ste_rx_steering_mult, hw_ste_p, counter_trigger_15_0, ctr_id);
336 MLX5_SET(ste_rx_steering_mult, hw_ste_p, counter_trigger_23_16, ctr_id >> 16);
337 }
338
dr_ste_v0_set_go_back_bit(u8 * hw_ste_p)339 static void dr_ste_v0_set_go_back_bit(u8 *hw_ste_p)
340 {
341 MLX5_SET(ste_sx_transmit, hw_ste_p, go_back, 1);
342 }
343
dr_ste_v0_set_tx_push_vlan(u8 * hw_ste_p,u32 vlan_hdr,bool go_back)344 static void dr_ste_v0_set_tx_push_vlan(u8 *hw_ste_p, u32 vlan_hdr,
345 bool go_back)
346 {
347 MLX5_SET(ste_sx_transmit, hw_ste_p, action_type,
348 DR_STE_ACTION_TYPE_PUSH_VLAN);
349 MLX5_SET(ste_sx_transmit, hw_ste_p, encap_pointer_vlan_data, vlan_hdr);
350 /* Due to HW limitation we need to set this bit, otherwise reformat +
351 * push vlan will not work.
352 */
353 if (go_back)
354 dr_ste_v0_set_go_back_bit(hw_ste_p);
355 }
356
dr_ste_v0_set_tx_encap(void * hw_ste_p,u32 reformat_id,int size,bool encap_l3)357 static void dr_ste_v0_set_tx_encap(void *hw_ste_p, u32 reformat_id,
358 int size, bool encap_l3)
359 {
360 MLX5_SET(ste_sx_transmit, hw_ste_p, action_type,
361 encap_l3 ? DR_STE_ACTION_TYPE_ENCAP_L3 : DR_STE_ACTION_TYPE_ENCAP);
362 /* The hardware expects here size in words (2 byte) */
363 MLX5_SET(ste_sx_transmit, hw_ste_p, action_description, size / 2);
364 MLX5_SET(ste_sx_transmit, hw_ste_p, encap_pointer_vlan_data, reformat_id);
365 }
366
dr_ste_v0_set_rx_decap(u8 * hw_ste_p)367 static void dr_ste_v0_set_rx_decap(u8 *hw_ste_p)
368 {
369 MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
370 DR_STE_TUNL_ACTION_DECAP);
371 MLX5_SET(ste_rx_steering_mult, hw_ste_p, fail_on_error, 1);
372 }
373
dr_ste_v0_set_rx_pop_vlan(u8 * hw_ste_p)374 static void dr_ste_v0_set_rx_pop_vlan(u8 *hw_ste_p)
375 {
376 MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
377 DR_STE_TUNL_ACTION_POP_VLAN);
378 }
379
dr_ste_v0_set_rx_decap_l3(u8 * hw_ste_p,bool vlan)380 static void dr_ste_v0_set_rx_decap_l3(u8 *hw_ste_p, bool vlan)
381 {
382 MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
383 DR_STE_TUNL_ACTION_L3_DECAP);
384 MLX5_SET(ste_modify_packet, hw_ste_p, action_description, vlan ? 1 : 0);
385 MLX5_SET(ste_rx_steering_mult, hw_ste_p, fail_on_error, 1);
386 }
387
dr_ste_v0_set_rewrite_actions(u8 * hw_ste_p,u16 num_of_actions,u32 re_write_index)388 static void dr_ste_v0_set_rewrite_actions(u8 *hw_ste_p, u16 num_of_actions,
389 u32 re_write_index)
390 {
391 MLX5_SET(ste_modify_packet, hw_ste_p, number_of_re_write_actions,
392 num_of_actions);
393 MLX5_SET(ste_modify_packet, hw_ste_p, header_re_write_actions_pointer,
394 re_write_index);
395 }
396
dr_ste_v0_arr_init_next(u8 ** last_ste,u32 * added_stes,enum dr_ste_v0_entry_type entry_type,u16 gvmi)397 static void dr_ste_v0_arr_init_next(u8 **last_ste,
398 u32 *added_stes,
399 enum dr_ste_v0_entry_type entry_type,
400 u16 gvmi)
401 {
402 (*added_stes)++;
403 *last_ste += DR_STE_SIZE;
404 dr_ste_v0_init_full(*last_ste, MLX5DR_STE_LU_TYPE_DONT_CARE,
405 entry_type, gvmi);
406 }
407
408 static void
dr_ste_v0_set_actions_tx(struct mlx5dr_domain * dmn,u8 * action_type_set,u32 actions_caps,u8 * last_ste,struct mlx5dr_ste_actions_attr * attr,u32 * added_stes)409 dr_ste_v0_set_actions_tx(struct mlx5dr_domain *dmn,
410 u8 *action_type_set,
411 u32 actions_caps,
412 u8 *last_ste,
413 struct mlx5dr_ste_actions_attr *attr,
414 u32 *added_stes)
415 {
416 bool encap = action_type_set[DR_ACTION_TYP_L2_TO_TNL_L2] ||
417 action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3];
418
419 /* We want to make sure the modify header comes before L2
420 * encapsulation. The reason for that is that we support
421 * modify headers for outer headers only
422 */
423 if (action_type_set[DR_ACTION_TYP_MODIFY_HDR] && attr->modify_actions) {
424 dr_ste_v0_set_entry_type(last_ste, DR_STE_TYPE_MODIFY_PKT);
425 dr_ste_v0_set_rewrite_actions(last_ste,
426 attr->modify_actions,
427 attr->modify_index);
428 }
429
430 if (action_type_set[DR_ACTION_TYP_PUSH_VLAN]) {
431 int i;
432
433 for (i = 0; i < attr->vlans.count; i++) {
434 if (i || action_type_set[DR_ACTION_TYP_MODIFY_HDR])
435 dr_ste_v0_arr_init_next(&last_ste,
436 added_stes,
437 DR_STE_TYPE_TX,
438 attr->gvmi);
439
440 dr_ste_v0_set_tx_push_vlan(last_ste,
441 attr->vlans.headers[i],
442 encap);
443 }
444 }
445
446 if (encap) {
447 /* Modify header and encapsulation require a different STEs.
448 * Since modify header STE format doesn't support encapsulation
449 * tunneling_action.
450 */
451 if (action_type_set[DR_ACTION_TYP_MODIFY_HDR] ||
452 action_type_set[DR_ACTION_TYP_PUSH_VLAN])
453 dr_ste_v0_arr_init_next(&last_ste,
454 added_stes,
455 DR_STE_TYPE_TX,
456 attr->gvmi);
457
458 dr_ste_v0_set_tx_encap(last_ste,
459 attr->reformat.id,
460 attr->reformat.size,
461 action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3]);
462 /* Whenever prio_tag_required enabled, we can be sure that the
463 * previous table (ACL) already push vlan to our packet,
464 * And due to HW limitation we need to set this bit, otherwise
465 * push vlan + reformat will not work.
466 */
467 if (MLX5_CAP_GEN(dmn->mdev, prio_tag_required))
468 dr_ste_v0_set_go_back_bit(last_ste);
469 }
470
471 if (action_type_set[DR_ACTION_TYP_CTR])
472 dr_ste_v0_set_counter_id(last_ste, attr->ctr_id);
473
474 dr_ste_v0_set_hit_gvmi(last_ste, attr->hit_gvmi);
475 dr_ste_v0_set_hit_addr(last_ste, attr->final_icm_addr, 1);
476 }
477
478 static void
dr_ste_v0_set_actions_rx(struct mlx5dr_domain * dmn,u8 * action_type_set,u32 actions_caps,u8 * last_ste,struct mlx5dr_ste_actions_attr * attr,u32 * added_stes)479 dr_ste_v0_set_actions_rx(struct mlx5dr_domain *dmn,
480 u8 *action_type_set,
481 u32 actions_caps,
482 u8 *last_ste,
483 struct mlx5dr_ste_actions_attr *attr,
484 u32 *added_stes)
485 {
486 if (action_type_set[DR_ACTION_TYP_CTR])
487 dr_ste_v0_set_counter_id(last_ste, attr->ctr_id);
488
489 if (action_type_set[DR_ACTION_TYP_TNL_L3_TO_L2]) {
490 dr_ste_v0_set_entry_type(last_ste, DR_STE_TYPE_MODIFY_PKT);
491 dr_ste_v0_set_rx_decap_l3(last_ste, attr->decap_with_vlan);
492 dr_ste_v0_set_rewrite_actions(last_ste,
493 attr->decap_actions,
494 attr->decap_index);
495 }
496
497 if (action_type_set[DR_ACTION_TYP_TNL_L2_TO_L2])
498 dr_ste_v0_set_rx_decap(last_ste);
499
500 if (action_type_set[DR_ACTION_TYP_POP_VLAN]) {
501 int i;
502
503 for (i = 0; i < attr->vlans.count; i++) {
504 if (i ||
505 action_type_set[DR_ACTION_TYP_TNL_L2_TO_L2] ||
506 action_type_set[DR_ACTION_TYP_TNL_L3_TO_L2])
507 dr_ste_v0_arr_init_next(&last_ste,
508 added_stes,
509 DR_STE_TYPE_RX,
510 attr->gvmi);
511
512 dr_ste_v0_set_rx_pop_vlan(last_ste);
513 }
514 }
515
516 if (action_type_set[DR_ACTION_TYP_MODIFY_HDR] && attr->modify_actions) {
517 if (dr_ste_v0_get_entry_type(last_ste) == DR_STE_TYPE_MODIFY_PKT)
518 dr_ste_v0_arr_init_next(&last_ste,
519 added_stes,
520 DR_STE_TYPE_MODIFY_PKT,
521 attr->gvmi);
522 else
523 dr_ste_v0_set_entry_type(last_ste, DR_STE_TYPE_MODIFY_PKT);
524
525 dr_ste_v0_set_rewrite_actions(last_ste,
526 attr->modify_actions,
527 attr->modify_index);
528 }
529
530 if (action_type_set[DR_ACTION_TYP_TAG]) {
531 if (dr_ste_v0_get_entry_type(last_ste) == DR_STE_TYPE_MODIFY_PKT)
532 dr_ste_v0_arr_init_next(&last_ste,
533 added_stes,
534 DR_STE_TYPE_RX,
535 attr->gvmi);
536
537 dr_ste_v0_rx_set_flow_tag(last_ste, attr->flow_tag);
538 }
539
540 dr_ste_v0_set_hit_gvmi(last_ste, attr->hit_gvmi);
541 dr_ste_v0_set_hit_addr(last_ste, attr->final_icm_addr, 1);
542 }
543
dr_ste_v0_set_action_set(u8 * hw_action,u8 hw_field,u8 shifter,u8 length,u32 data)544 static void dr_ste_v0_set_action_set(u8 *hw_action,
545 u8 hw_field,
546 u8 shifter,
547 u8 length,
548 u32 data)
549 {
550 length = (length == 32) ? 0 : length;
551 MLX5_SET(dr_action_hw_set, hw_action, opcode, DR_STE_ACTION_MDFY_OP_SET);
552 MLX5_SET(dr_action_hw_set, hw_action, destination_field_code, hw_field);
553 MLX5_SET(dr_action_hw_set, hw_action, destination_left_shifter, shifter);
554 MLX5_SET(dr_action_hw_set, hw_action, destination_length, length);
555 MLX5_SET(dr_action_hw_set, hw_action, inline_data, data);
556 }
557
dr_ste_v0_set_action_add(u8 * hw_action,u8 hw_field,u8 shifter,u8 length,u32 data)558 static void dr_ste_v0_set_action_add(u8 *hw_action,
559 u8 hw_field,
560 u8 shifter,
561 u8 length,
562 u32 data)
563 {
564 length = (length == 32) ? 0 : length;
565 MLX5_SET(dr_action_hw_set, hw_action, opcode, DR_STE_ACTION_MDFY_OP_ADD);
566 MLX5_SET(dr_action_hw_set, hw_action, destination_field_code, hw_field);
567 MLX5_SET(dr_action_hw_set, hw_action, destination_left_shifter, shifter);
568 MLX5_SET(dr_action_hw_set, hw_action, destination_length, length);
569 MLX5_SET(dr_action_hw_set, hw_action, inline_data, data);
570 }
571
dr_ste_v0_set_action_copy(u8 * hw_action,u8 dst_hw_field,u8 dst_shifter,u8 dst_len,u8 src_hw_field,u8 src_shifter)572 static void dr_ste_v0_set_action_copy(u8 *hw_action,
573 u8 dst_hw_field,
574 u8 dst_shifter,
575 u8 dst_len,
576 u8 src_hw_field,
577 u8 src_shifter)
578 {
579 MLX5_SET(dr_action_hw_copy, hw_action, opcode, DR_STE_ACTION_MDFY_OP_COPY);
580 MLX5_SET(dr_action_hw_copy, hw_action, destination_field_code, dst_hw_field);
581 MLX5_SET(dr_action_hw_copy, hw_action, destination_left_shifter, dst_shifter);
582 MLX5_SET(dr_action_hw_copy, hw_action, destination_length, dst_len);
583 MLX5_SET(dr_action_hw_copy, hw_action, source_field_code, src_hw_field);
584 MLX5_SET(dr_action_hw_copy, hw_action, source_left_shifter, src_shifter);
585 }
586
587 #define DR_STE_DECAP_L3_MIN_ACTION_NUM 5
588
589 static int
dr_ste_v0_set_action_decap_l3_list(void * data,u32 data_sz,u8 * hw_action,u32 hw_action_sz,u16 * used_hw_action_num)590 dr_ste_v0_set_action_decap_l3_list(void *data, u32 data_sz,
591 u8 *hw_action, u32 hw_action_sz,
592 u16 *used_hw_action_num)
593 {
594 struct mlx5_ifc_l2_hdr_bits *l2_hdr = data;
595 u32 hw_action_num;
596 int required_actions;
597 u32 hdr_fld_4b;
598 u16 hdr_fld_2b;
599 u16 vlan_type;
600 bool vlan;
601
602 vlan = (data_sz != HDR_LEN_L2);
603 hw_action_num = hw_action_sz / MLX5_ST_SZ_BYTES(dr_action_hw_set);
604 required_actions = DR_STE_DECAP_L3_MIN_ACTION_NUM + !!vlan;
605
606 if (hw_action_num < required_actions)
607 return -ENOMEM;
608
609 /* dmac_47_16 */
610 MLX5_SET(dr_action_hw_set, hw_action,
611 opcode, DR_STE_ACTION_MDFY_OP_SET);
612 MLX5_SET(dr_action_hw_set, hw_action,
613 destination_length, 0);
614 MLX5_SET(dr_action_hw_set, hw_action,
615 destination_field_code, DR_STE_V0_ACTION_MDFY_FLD_L2_0);
616 MLX5_SET(dr_action_hw_set, hw_action,
617 destination_left_shifter, 16);
618 hdr_fld_4b = MLX5_GET(l2_hdr, l2_hdr, dmac_47_16);
619 MLX5_SET(dr_action_hw_set, hw_action,
620 inline_data, hdr_fld_4b);
621 hw_action += MLX5_ST_SZ_BYTES(dr_action_hw_set);
622
623 /* smac_47_16 */
624 MLX5_SET(dr_action_hw_set, hw_action,
625 opcode, DR_STE_ACTION_MDFY_OP_SET);
626 MLX5_SET(dr_action_hw_set, hw_action,
627 destination_length, 0);
628 MLX5_SET(dr_action_hw_set, hw_action,
629 destination_field_code, DR_STE_V0_ACTION_MDFY_FLD_L2_1);
630 MLX5_SET(dr_action_hw_set, hw_action, destination_left_shifter, 16);
631 hdr_fld_4b = (MLX5_GET(l2_hdr, l2_hdr, smac_31_0) >> 16 |
632 MLX5_GET(l2_hdr, l2_hdr, smac_47_32) << 16);
633 MLX5_SET(dr_action_hw_set, hw_action, inline_data, hdr_fld_4b);
634 hw_action += MLX5_ST_SZ_BYTES(dr_action_hw_set);
635
636 /* dmac_15_0 */
637 MLX5_SET(dr_action_hw_set, hw_action,
638 opcode, DR_STE_ACTION_MDFY_OP_SET);
639 MLX5_SET(dr_action_hw_set, hw_action,
640 destination_length, 16);
641 MLX5_SET(dr_action_hw_set, hw_action,
642 destination_field_code, DR_STE_V0_ACTION_MDFY_FLD_L2_0);
643 MLX5_SET(dr_action_hw_set, hw_action,
644 destination_left_shifter, 0);
645 hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, dmac_15_0);
646 MLX5_SET(dr_action_hw_set, hw_action,
647 inline_data, hdr_fld_2b);
648 hw_action += MLX5_ST_SZ_BYTES(dr_action_hw_set);
649
650 /* ethertype + (optional) vlan */
651 MLX5_SET(dr_action_hw_set, hw_action,
652 opcode, DR_STE_ACTION_MDFY_OP_SET);
653 MLX5_SET(dr_action_hw_set, hw_action,
654 destination_field_code, DR_STE_V0_ACTION_MDFY_FLD_L2_2);
655 MLX5_SET(dr_action_hw_set, hw_action,
656 destination_left_shifter, 32);
657 if (!vlan) {
658 hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, ethertype);
659 MLX5_SET(dr_action_hw_set, hw_action, inline_data, hdr_fld_2b);
660 MLX5_SET(dr_action_hw_set, hw_action, destination_length, 16);
661 } else {
662 hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, ethertype);
663 vlan_type = hdr_fld_2b == SVLAN_ETHERTYPE ? DR_STE_SVLAN : DR_STE_CVLAN;
664 hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, vlan);
665 hdr_fld_4b = (vlan_type << 16) | hdr_fld_2b;
666 MLX5_SET(dr_action_hw_set, hw_action, inline_data, hdr_fld_4b);
667 MLX5_SET(dr_action_hw_set, hw_action, destination_length, 18);
668 }
669 hw_action += MLX5_ST_SZ_BYTES(dr_action_hw_set);
670
671 /* smac_15_0 */
672 MLX5_SET(dr_action_hw_set, hw_action,
673 opcode, DR_STE_ACTION_MDFY_OP_SET);
674 MLX5_SET(dr_action_hw_set, hw_action,
675 destination_length, 16);
676 MLX5_SET(dr_action_hw_set, hw_action,
677 destination_field_code, DR_STE_V0_ACTION_MDFY_FLD_L2_1);
678 MLX5_SET(dr_action_hw_set, hw_action,
679 destination_left_shifter, 0);
680 hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, smac_31_0);
681 MLX5_SET(dr_action_hw_set, hw_action, inline_data, hdr_fld_2b);
682 hw_action += MLX5_ST_SZ_BYTES(dr_action_hw_set);
683
684 if (vlan) {
685 MLX5_SET(dr_action_hw_set, hw_action,
686 opcode, DR_STE_ACTION_MDFY_OP_SET);
687 hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, vlan_type);
688 MLX5_SET(dr_action_hw_set, hw_action,
689 inline_data, hdr_fld_2b);
690 MLX5_SET(dr_action_hw_set, hw_action,
691 destination_length, 16);
692 MLX5_SET(dr_action_hw_set, hw_action,
693 destination_field_code, DR_STE_V0_ACTION_MDFY_FLD_L2_2);
694 MLX5_SET(dr_action_hw_set, hw_action,
695 destination_left_shifter, 0);
696 }
697
698 *used_hw_action_num = required_actions;
699
700 return 0;
701 }
702
703 static void
dr_ste_v0_build_eth_l2_src_dst_bit_mask(struct mlx5dr_match_param * value,bool inner,u8 * bit_mask)704 dr_ste_v0_build_eth_l2_src_dst_bit_mask(struct mlx5dr_match_param *value,
705 bool inner, u8 *bit_mask)
706 {
707 struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
708
709 DR_STE_SET_TAG(eth_l2_src_dst, bit_mask, dmac_47_16, mask, dmac_47_16);
710 DR_STE_SET_TAG(eth_l2_src_dst, bit_mask, dmac_15_0, mask, dmac_15_0);
711
712 if (mask->smac_47_16 || mask->smac_15_0) {
713 MLX5_SET(ste_eth_l2_src_dst, bit_mask, smac_47_32,
714 mask->smac_47_16 >> 16);
715 MLX5_SET(ste_eth_l2_src_dst, bit_mask, smac_31_0,
716 mask->smac_47_16 << 16 | mask->smac_15_0);
717 mask->smac_47_16 = 0;
718 mask->smac_15_0 = 0;
719 }
720
721 DR_STE_SET_TAG(eth_l2_src_dst, bit_mask, first_vlan_id, mask, first_vid);
722 DR_STE_SET_TAG(eth_l2_src_dst, bit_mask, first_cfi, mask, first_cfi);
723 DR_STE_SET_TAG(eth_l2_src_dst, bit_mask, first_priority, mask, first_prio);
724 DR_STE_SET_ONES(eth_l2_src_dst, bit_mask, l3_type, mask, ip_version);
725
726 if (mask->cvlan_tag) {
727 MLX5_SET(ste_eth_l2_src_dst, bit_mask, first_vlan_qualifier, -1);
728 mask->cvlan_tag = 0;
729 } else if (mask->svlan_tag) {
730 MLX5_SET(ste_eth_l2_src_dst, bit_mask, first_vlan_qualifier, -1);
731 mask->svlan_tag = 0;
732 }
733 }
734
735 static int
dr_ste_v0_build_eth_l2_src_dst_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)736 dr_ste_v0_build_eth_l2_src_dst_tag(struct mlx5dr_match_param *value,
737 struct mlx5dr_ste_build *sb,
738 u8 *tag)
739 {
740 struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
741
742 DR_STE_SET_TAG(eth_l2_src_dst, tag, dmac_47_16, spec, dmac_47_16);
743 DR_STE_SET_TAG(eth_l2_src_dst, tag, dmac_15_0, spec, dmac_15_0);
744
745 if (spec->smac_47_16 || spec->smac_15_0) {
746 MLX5_SET(ste_eth_l2_src_dst, tag, smac_47_32,
747 spec->smac_47_16 >> 16);
748 MLX5_SET(ste_eth_l2_src_dst, tag, smac_31_0,
749 spec->smac_47_16 << 16 | spec->smac_15_0);
750 spec->smac_47_16 = 0;
751 spec->smac_15_0 = 0;
752 }
753
754 if (spec->ip_version) {
755 if (spec->ip_version == IP_VERSION_IPV4) {
756 MLX5_SET(ste_eth_l2_src_dst, tag, l3_type, STE_IPV4);
757 spec->ip_version = 0;
758 } else if (spec->ip_version == IP_VERSION_IPV6) {
759 MLX5_SET(ste_eth_l2_src_dst, tag, l3_type, STE_IPV6);
760 spec->ip_version = 0;
761 } else {
762 return -EINVAL;
763 }
764 }
765
766 DR_STE_SET_TAG(eth_l2_src_dst, tag, first_vlan_id, spec, first_vid);
767 DR_STE_SET_TAG(eth_l2_src_dst, tag, first_cfi, spec, first_cfi);
768 DR_STE_SET_TAG(eth_l2_src_dst, tag, first_priority, spec, first_prio);
769
770 if (spec->cvlan_tag) {
771 MLX5_SET(ste_eth_l2_src_dst, tag, first_vlan_qualifier, DR_STE_CVLAN);
772 spec->cvlan_tag = 0;
773 } else if (spec->svlan_tag) {
774 MLX5_SET(ste_eth_l2_src_dst, tag, first_vlan_qualifier, DR_STE_SVLAN);
775 spec->svlan_tag = 0;
776 }
777 return 0;
778 }
779
780 static void
dr_ste_v0_build_eth_l2_src_dst_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)781 dr_ste_v0_build_eth_l2_src_dst_init(struct mlx5dr_ste_build *sb,
782 struct mlx5dr_match_param *mask)
783 {
784 dr_ste_v0_build_eth_l2_src_dst_bit_mask(mask, sb->inner, sb->bit_mask);
785
786 sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_SRC_DST, sb->rx, sb->inner);
787 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
788 sb->ste_build_tag_func = &dr_ste_v0_build_eth_l2_src_dst_tag;
789 }
790
791 static int
dr_ste_v0_build_eth_l3_ipv6_dst_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)792 dr_ste_v0_build_eth_l3_ipv6_dst_tag(struct mlx5dr_match_param *value,
793 struct mlx5dr_ste_build *sb,
794 u8 *tag)
795 {
796 struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
797
798 DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_127_96, spec, dst_ip_127_96);
799 DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_95_64, spec, dst_ip_95_64);
800 DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_63_32, spec, dst_ip_63_32);
801 DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_31_0, spec, dst_ip_31_0);
802
803 return 0;
804 }
805
806 static void
dr_ste_v0_build_eth_l3_ipv6_dst_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)807 dr_ste_v0_build_eth_l3_ipv6_dst_init(struct mlx5dr_ste_build *sb,
808 struct mlx5dr_match_param *mask)
809 {
810 dr_ste_v0_build_eth_l3_ipv6_dst_tag(mask, sb, sb->bit_mask);
811
812 sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV6_DST, sb->rx, sb->inner);
813 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
814 sb->ste_build_tag_func = &dr_ste_v0_build_eth_l3_ipv6_dst_tag;
815 }
816
817 static int
dr_ste_v0_build_eth_l3_ipv6_src_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)818 dr_ste_v0_build_eth_l3_ipv6_src_tag(struct mlx5dr_match_param *value,
819 struct mlx5dr_ste_build *sb,
820 u8 *tag)
821 {
822 struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
823
824 DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_127_96, spec, src_ip_127_96);
825 DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_95_64, spec, src_ip_95_64);
826 DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_63_32, spec, src_ip_63_32);
827 DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_31_0, spec, src_ip_31_0);
828
829 return 0;
830 }
831
832 static void
dr_ste_v0_build_eth_l3_ipv6_src_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)833 dr_ste_v0_build_eth_l3_ipv6_src_init(struct mlx5dr_ste_build *sb,
834 struct mlx5dr_match_param *mask)
835 {
836 dr_ste_v0_build_eth_l3_ipv6_src_tag(mask, sb, sb->bit_mask);
837
838 sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV6_SRC, sb->rx, sb->inner);
839 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
840 sb->ste_build_tag_func = &dr_ste_v0_build_eth_l3_ipv6_src_tag;
841 }
842
843 static int
dr_ste_v0_build_eth_l3_ipv4_5_tuple_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)844 dr_ste_v0_build_eth_l3_ipv4_5_tuple_tag(struct mlx5dr_match_param *value,
845 struct mlx5dr_ste_build *sb,
846 u8 *tag)
847 {
848 struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
849
850 DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_address, spec, dst_ip_31_0);
851 DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_address, spec, src_ip_31_0);
852 DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_port, spec, tcp_dport);
853 DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_port, spec, udp_dport);
854 DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_port, spec, tcp_sport);
855 DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_port, spec, udp_sport);
856 DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, protocol, spec, ip_protocol);
857 DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, fragmented, spec, frag);
858 DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, dscp, spec, ip_dscp);
859 DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, ecn, spec, ip_ecn);
860
861 if (spec->tcp_flags) {
862 DR_STE_SET_TCP_FLAGS(eth_l3_ipv4_5_tuple, tag, spec);
863 spec->tcp_flags = 0;
864 }
865
866 return 0;
867 }
868
869 static void
dr_ste_v0_build_eth_l3_ipv4_5_tuple_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)870 dr_ste_v0_build_eth_l3_ipv4_5_tuple_init(struct mlx5dr_ste_build *sb,
871 struct mlx5dr_match_param *mask)
872 {
873 dr_ste_v0_build_eth_l3_ipv4_5_tuple_tag(mask, sb, sb->bit_mask);
874
875 sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV4_5_TUPLE, sb->rx, sb->inner);
876 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
877 sb->ste_build_tag_func = &dr_ste_v0_build_eth_l3_ipv4_5_tuple_tag;
878 }
879
880 static void
dr_ste_v0_build_eth_l2_src_or_dst_bit_mask(struct mlx5dr_match_param * value,bool inner,u8 * bit_mask)881 dr_ste_v0_build_eth_l2_src_or_dst_bit_mask(struct mlx5dr_match_param *value,
882 bool inner, u8 *bit_mask)
883 {
884 struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
885 struct mlx5dr_match_misc *misc_mask = &value->misc;
886
887 DR_STE_SET_TAG(eth_l2_src, bit_mask, first_vlan_id, mask, first_vid);
888 DR_STE_SET_TAG(eth_l2_src, bit_mask, first_cfi, mask, first_cfi);
889 DR_STE_SET_TAG(eth_l2_src, bit_mask, first_priority, mask, first_prio);
890 DR_STE_SET_TAG(eth_l2_src, bit_mask, ip_fragmented, mask, frag);
891 DR_STE_SET_TAG(eth_l2_src, bit_mask, l3_ethertype, mask, ethertype);
892 DR_STE_SET_ONES(eth_l2_src, bit_mask, l3_type, mask, ip_version);
893
894 if (mask->svlan_tag || mask->cvlan_tag) {
895 MLX5_SET(ste_eth_l2_src, bit_mask, first_vlan_qualifier, -1);
896 mask->cvlan_tag = 0;
897 mask->svlan_tag = 0;
898 }
899
900 if (inner) {
901 if (misc_mask->inner_second_cvlan_tag ||
902 misc_mask->inner_second_svlan_tag) {
903 MLX5_SET(ste_eth_l2_src, bit_mask, second_vlan_qualifier, -1);
904 misc_mask->inner_second_cvlan_tag = 0;
905 misc_mask->inner_second_svlan_tag = 0;
906 }
907
908 DR_STE_SET_TAG(eth_l2_src, bit_mask,
909 second_vlan_id, misc_mask, inner_second_vid);
910 DR_STE_SET_TAG(eth_l2_src, bit_mask,
911 second_cfi, misc_mask, inner_second_cfi);
912 DR_STE_SET_TAG(eth_l2_src, bit_mask,
913 second_priority, misc_mask, inner_second_prio);
914 } else {
915 if (misc_mask->outer_second_cvlan_tag ||
916 misc_mask->outer_second_svlan_tag) {
917 MLX5_SET(ste_eth_l2_src, bit_mask, second_vlan_qualifier, -1);
918 misc_mask->outer_second_cvlan_tag = 0;
919 misc_mask->outer_second_svlan_tag = 0;
920 }
921
922 DR_STE_SET_TAG(eth_l2_src, bit_mask,
923 second_vlan_id, misc_mask, outer_second_vid);
924 DR_STE_SET_TAG(eth_l2_src, bit_mask,
925 second_cfi, misc_mask, outer_second_cfi);
926 DR_STE_SET_TAG(eth_l2_src, bit_mask,
927 second_priority, misc_mask, outer_second_prio);
928 }
929 }
930
931 static int
dr_ste_v0_build_eth_l2_src_or_dst_tag(struct mlx5dr_match_param * value,bool inner,u8 * tag)932 dr_ste_v0_build_eth_l2_src_or_dst_tag(struct mlx5dr_match_param *value,
933 bool inner, u8 *tag)
934 {
935 struct mlx5dr_match_spec *spec = inner ? &value->inner : &value->outer;
936 struct mlx5dr_match_misc *misc_spec = &value->misc;
937
938 DR_STE_SET_TAG(eth_l2_src, tag, first_vlan_id, spec, first_vid);
939 DR_STE_SET_TAG(eth_l2_src, tag, first_cfi, spec, first_cfi);
940 DR_STE_SET_TAG(eth_l2_src, tag, first_priority, spec, first_prio);
941 DR_STE_SET_TAG(eth_l2_src, tag, ip_fragmented, spec, frag);
942 DR_STE_SET_TAG(eth_l2_src, tag, l3_ethertype, spec, ethertype);
943
944 if (spec->ip_version) {
945 if (spec->ip_version == IP_VERSION_IPV4) {
946 MLX5_SET(ste_eth_l2_src, tag, l3_type, STE_IPV4);
947 spec->ip_version = 0;
948 } else if (spec->ip_version == IP_VERSION_IPV6) {
949 MLX5_SET(ste_eth_l2_src, tag, l3_type, STE_IPV6);
950 spec->ip_version = 0;
951 } else {
952 return -EINVAL;
953 }
954 }
955
956 if (spec->cvlan_tag) {
957 MLX5_SET(ste_eth_l2_src, tag, first_vlan_qualifier, DR_STE_CVLAN);
958 spec->cvlan_tag = 0;
959 } else if (spec->svlan_tag) {
960 MLX5_SET(ste_eth_l2_src, tag, first_vlan_qualifier, DR_STE_SVLAN);
961 spec->svlan_tag = 0;
962 }
963
964 if (inner) {
965 if (misc_spec->inner_second_cvlan_tag) {
966 MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_CVLAN);
967 misc_spec->inner_second_cvlan_tag = 0;
968 } else if (misc_spec->inner_second_svlan_tag) {
969 MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_SVLAN);
970 misc_spec->inner_second_svlan_tag = 0;
971 }
972
973 DR_STE_SET_TAG(eth_l2_src, tag, second_vlan_id, misc_spec, inner_second_vid);
974 DR_STE_SET_TAG(eth_l2_src, tag, second_cfi, misc_spec, inner_second_cfi);
975 DR_STE_SET_TAG(eth_l2_src, tag, second_priority, misc_spec, inner_second_prio);
976 } else {
977 if (misc_spec->outer_second_cvlan_tag) {
978 MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_CVLAN);
979 misc_spec->outer_second_cvlan_tag = 0;
980 } else if (misc_spec->outer_second_svlan_tag) {
981 MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_SVLAN);
982 misc_spec->outer_second_svlan_tag = 0;
983 }
984 DR_STE_SET_TAG(eth_l2_src, tag, second_vlan_id, misc_spec, outer_second_vid);
985 DR_STE_SET_TAG(eth_l2_src, tag, second_cfi, misc_spec, outer_second_cfi);
986 DR_STE_SET_TAG(eth_l2_src, tag, second_priority, misc_spec, outer_second_prio);
987 }
988
989 return 0;
990 }
991
992 static void
dr_ste_v0_build_eth_l2_src_bit_mask(struct mlx5dr_match_param * value,bool inner,u8 * bit_mask)993 dr_ste_v0_build_eth_l2_src_bit_mask(struct mlx5dr_match_param *value,
994 bool inner, u8 *bit_mask)
995 {
996 struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
997
998 DR_STE_SET_TAG(eth_l2_src, bit_mask, smac_47_16, mask, smac_47_16);
999 DR_STE_SET_TAG(eth_l2_src, bit_mask, smac_15_0, mask, smac_15_0);
1000
1001 dr_ste_v0_build_eth_l2_src_or_dst_bit_mask(value, inner, bit_mask);
1002 }
1003
1004 static int
dr_ste_v0_build_eth_l2_src_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1005 dr_ste_v0_build_eth_l2_src_tag(struct mlx5dr_match_param *value,
1006 struct mlx5dr_ste_build *sb,
1007 u8 *tag)
1008 {
1009 struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1010
1011 DR_STE_SET_TAG(eth_l2_src, tag, smac_47_16, spec, smac_47_16);
1012 DR_STE_SET_TAG(eth_l2_src, tag, smac_15_0, spec, smac_15_0);
1013
1014 return dr_ste_v0_build_eth_l2_src_or_dst_tag(value, sb->inner, tag);
1015 }
1016
1017 static void
dr_ste_v0_build_eth_l2_src_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1018 dr_ste_v0_build_eth_l2_src_init(struct mlx5dr_ste_build *sb,
1019 struct mlx5dr_match_param *mask)
1020 {
1021 dr_ste_v0_build_eth_l2_src_bit_mask(mask, sb->inner, sb->bit_mask);
1022 sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_SRC, sb->rx, sb->inner);
1023 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1024 sb->ste_build_tag_func = &dr_ste_v0_build_eth_l2_src_tag;
1025 }
1026
1027 static void
dr_ste_v0_build_eth_l2_dst_bit_mask(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * bit_mask)1028 dr_ste_v0_build_eth_l2_dst_bit_mask(struct mlx5dr_match_param *value,
1029 struct mlx5dr_ste_build *sb,
1030 u8 *bit_mask)
1031 {
1032 struct mlx5dr_match_spec *mask = sb->inner ? &value->inner : &value->outer;
1033
1034 DR_STE_SET_TAG(eth_l2_dst, bit_mask, dmac_47_16, mask, dmac_47_16);
1035 DR_STE_SET_TAG(eth_l2_dst, bit_mask, dmac_15_0, mask, dmac_15_0);
1036
1037 dr_ste_v0_build_eth_l2_src_or_dst_bit_mask(value, sb->inner, bit_mask);
1038 }
1039
1040 static int
dr_ste_v0_build_eth_l2_dst_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1041 dr_ste_v0_build_eth_l2_dst_tag(struct mlx5dr_match_param *value,
1042 struct mlx5dr_ste_build *sb,
1043 u8 *tag)
1044 {
1045 struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1046
1047 DR_STE_SET_TAG(eth_l2_dst, tag, dmac_47_16, spec, dmac_47_16);
1048 DR_STE_SET_TAG(eth_l2_dst, tag, dmac_15_0, spec, dmac_15_0);
1049
1050 return dr_ste_v0_build_eth_l2_src_or_dst_tag(value, sb->inner, tag);
1051 }
1052
1053 static void
dr_ste_v0_build_eth_l2_dst_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1054 dr_ste_v0_build_eth_l2_dst_init(struct mlx5dr_ste_build *sb,
1055 struct mlx5dr_match_param *mask)
1056 {
1057 dr_ste_v0_build_eth_l2_dst_bit_mask(mask, sb, sb->bit_mask);
1058
1059 sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_DST, sb->rx, sb->inner);
1060 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1061 sb->ste_build_tag_func = &dr_ste_v0_build_eth_l2_dst_tag;
1062 }
1063
1064 static void
dr_ste_v0_build_eth_l2_tnl_bit_mask(struct mlx5dr_match_param * value,bool inner,u8 * bit_mask)1065 dr_ste_v0_build_eth_l2_tnl_bit_mask(struct mlx5dr_match_param *value,
1066 bool inner, u8 *bit_mask)
1067 {
1068 struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
1069 struct mlx5dr_match_misc *misc = &value->misc;
1070
1071 DR_STE_SET_TAG(eth_l2_tnl, bit_mask, dmac_47_16, mask, dmac_47_16);
1072 DR_STE_SET_TAG(eth_l2_tnl, bit_mask, dmac_15_0, mask, dmac_15_0);
1073 DR_STE_SET_TAG(eth_l2_tnl, bit_mask, first_vlan_id, mask, first_vid);
1074 DR_STE_SET_TAG(eth_l2_tnl, bit_mask, first_cfi, mask, first_cfi);
1075 DR_STE_SET_TAG(eth_l2_tnl, bit_mask, first_priority, mask, first_prio);
1076 DR_STE_SET_TAG(eth_l2_tnl, bit_mask, ip_fragmented, mask, frag);
1077 DR_STE_SET_TAG(eth_l2_tnl, bit_mask, l3_ethertype, mask, ethertype);
1078 DR_STE_SET_ONES(eth_l2_tnl, bit_mask, l3_type, mask, ip_version);
1079
1080 if (misc->vxlan_vni) {
1081 MLX5_SET(ste_eth_l2_tnl, bit_mask,
1082 l2_tunneling_network_id, (misc->vxlan_vni << 8));
1083 misc->vxlan_vni = 0;
1084 }
1085
1086 if (mask->svlan_tag || mask->cvlan_tag) {
1087 MLX5_SET(ste_eth_l2_tnl, bit_mask, first_vlan_qualifier, -1);
1088 mask->cvlan_tag = 0;
1089 mask->svlan_tag = 0;
1090 }
1091 }
1092
1093 static int
dr_ste_v0_build_eth_l2_tnl_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1094 dr_ste_v0_build_eth_l2_tnl_tag(struct mlx5dr_match_param *value,
1095 struct mlx5dr_ste_build *sb,
1096 u8 *tag)
1097 {
1098 struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1099 struct mlx5dr_match_misc *misc = &value->misc;
1100
1101 DR_STE_SET_TAG(eth_l2_tnl, tag, dmac_47_16, spec, dmac_47_16);
1102 DR_STE_SET_TAG(eth_l2_tnl, tag, dmac_15_0, spec, dmac_15_0);
1103 DR_STE_SET_TAG(eth_l2_tnl, tag, first_vlan_id, spec, first_vid);
1104 DR_STE_SET_TAG(eth_l2_tnl, tag, first_cfi, spec, first_cfi);
1105 DR_STE_SET_TAG(eth_l2_tnl, tag, ip_fragmented, spec, frag);
1106 DR_STE_SET_TAG(eth_l2_tnl, tag, first_priority, spec, first_prio);
1107 DR_STE_SET_TAG(eth_l2_tnl, tag, l3_ethertype, spec, ethertype);
1108
1109 if (misc->vxlan_vni) {
1110 MLX5_SET(ste_eth_l2_tnl, tag, l2_tunneling_network_id,
1111 (misc->vxlan_vni << 8));
1112 misc->vxlan_vni = 0;
1113 }
1114
1115 if (spec->cvlan_tag) {
1116 MLX5_SET(ste_eth_l2_tnl, tag, first_vlan_qualifier, DR_STE_CVLAN);
1117 spec->cvlan_tag = 0;
1118 } else if (spec->svlan_tag) {
1119 MLX5_SET(ste_eth_l2_tnl, tag, first_vlan_qualifier, DR_STE_SVLAN);
1120 spec->svlan_tag = 0;
1121 }
1122
1123 if (spec->ip_version) {
1124 if (spec->ip_version == IP_VERSION_IPV4) {
1125 MLX5_SET(ste_eth_l2_tnl, tag, l3_type, STE_IPV4);
1126 spec->ip_version = 0;
1127 } else if (spec->ip_version == IP_VERSION_IPV6) {
1128 MLX5_SET(ste_eth_l2_tnl, tag, l3_type, STE_IPV6);
1129 spec->ip_version = 0;
1130 } else {
1131 return -EINVAL;
1132 }
1133 }
1134
1135 return 0;
1136 }
1137
1138 static void
dr_ste_v0_build_eth_l2_tnl_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1139 dr_ste_v0_build_eth_l2_tnl_init(struct mlx5dr_ste_build *sb,
1140 struct mlx5dr_match_param *mask)
1141 {
1142 dr_ste_v0_build_eth_l2_tnl_bit_mask(mask, sb->inner, sb->bit_mask);
1143
1144 sb->lu_type = DR_STE_V0_LU_TYPE_ETHL2_TUNNELING_I;
1145 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1146 sb->ste_build_tag_func = &dr_ste_v0_build_eth_l2_tnl_tag;
1147 }
1148
1149 static int
dr_ste_v0_build_eth_l3_ipv4_misc_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1150 dr_ste_v0_build_eth_l3_ipv4_misc_tag(struct mlx5dr_match_param *value,
1151 struct mlx5dr_ste_build *sb,
1152 u8 *tag)
1153 {
1154 struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1155
1156 DR_STE_SET_TAG(eth_l3_ipv4_misc, tag, time_to_live, spec, ttl_hoplimit);
1157 DR_STE_SET_TAG(eth_l3_ipv4_misc, tag, ihl, spec, ipv4_ihl);
1158
1159 return 0;
1160 }
1161
1162 static void
dr_ste_v0_build_eth_l3_ipv4_misc_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1163 dr_ste_v0_build_eth_l3_ipv4_misc_init(struct mlx5dr_ste_build *sb,
1164 struct mlx5dr_match_param *mask)
1165 {
1166 dr_ste_v0_build_eth_l3_ipv4_misc_tag(mask, sb, sb->bit_mask);
1167
1168 sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV4_MISC, sb->rx, sb->inner);
1169 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1170 sb->ste_build_tag_func = &dr_ste_v0_build_eth_l3_ipv4_misc_tag;
1171 }
1172
1173 static int
dr_ste_v0_build_eth_ipv6_l3_l4_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1174 dr_ste_v0_build_eth_ipv6_l3_l4_tag(struct mlx5dr_match_param *value,
1175 struct mlx5dr_ste_build *sb,
1176 u8 *tag)
1177 {
1178 struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1179 struct mlx5dr_match_misc *misc = &value->misc;
1180
1181 DR_STE_SET_TAG(eth_l4, tag, dst_port, spec, tcp_dport);
1182 DR_STE_SET_TAG(eth_l4, tag, src_port, spec, tcp_sport);
1183 DR_STE_SET_TAG(eth_l4, tag, dst_port, spec, udp_dport);
1184 DR_STE_SET_TAG(eth_l4, tag, src_port, spec, udp_sport);
1185 DR_STE_SET_TAG(eth_l4, tag, protocol, spec, ip_protocol);
1186 DR_STE_SET_TAG(eth_l4, tag, fragmented, spec, frag);
1187 DR_STE_SET_TAG(eth_l4, tag, dscp, spec, ip_dscp);
1188 DR_STE_SET_TAG(eth_l4, tag, ecn, spec, ip_ecn);
1189 DR_STE_SET_TAG(eth_l4, tag, ipv6_hop_limit, spec, ttl_hoplimit);
1190
1191 if (sb->inner)
1192 DR_STE_SET_TAG(eth_l4, tag, flow_label, misc, inner_ipv6_flow_label);
1193 else
1194 DR_STE_SET_TAG(eth_l4, tag, flow_label, misc, outer_ipv6_flow_label);
1195
1196 if (spec->tcp_flags) {
1197 DR_STE_SET_TCP_FLAGS(eth_l4, tag, spec);
1198 spec->tcp_flags = 0;
1199 }
1200
1201 return 0;
1202 }
1203
1204 static void
dr_ste_v0_build_eth_ipv6_l3_l4_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1205 dr_ste_v0_build_eth_ipv6_l3_l4_init(struct mlx5dr_ste_build *sb,
1206 struct mlx5dr_match_param *mask)
1207 {
1208 dr_ste_v0_build_eth_ipv6_l3_l4_tag(mask, sb, sb->bit_mask);
1209
1210 sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL4, sb->rx, sb->inner);
1211 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1212 sb->ste_build_tag_func = &dr_ste_v0_build_eth_ipv6_l3_l4_tag;
1213 }
1214
1215 static int
dr_ste_v0_build_mpls_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1216 dr_ste_v0_build_mpls_tag(struct mlx5dr_match_param *value,
1217 struct mlx5dr_ste_build *sb,
1218 u8 *tag)
1219 {
1220 struct mlx5dr_match_misc2 *misc2 = &value->misc2;
1221
1222 if (sb->inner)
1223 DR_STE_SET_MPLS(mpls, misc2, inner, tag);
1224 else
1225 DR_STE_SET_MPLS(mpls, misc2, outer, tag);
1226
1227 return 0;
1228 }
1229
1230 static void
dr_ste_v0_build_mpls_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1231 dr_ste_v0_build_mpls_init(struct mlx5dr_ste_build *sb,
1232 struct mlx5dr_match_param *mask)
1233 {
1234 dr_ste_v0_build_mpls_tag(mask, sb, sb->bit_mask);
1235
1236 sb->lu_type = DR_STE_CALC_LU_TYPE(MPLS_FIRST, sb->rx, sb->inner);
1237 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1238 sb->ste_build_tag_func = &dr_ste_v0_build_mpls_tag;
1239 }
1240
1241 static int
dr_ste_v0_build_tnl_gre_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1242 dr_ste_v0_build_tnl_gre_tag(struct mlx5dr_match_param *value,
1243 struct mlx5dr_ste_build *sb,
1244 u8 *tag)
1245 {
1246 struct mlx5dr_match_misc *misc = &value->misc;
1247
1248 DR_STE_SET_TAG(gre, tag, gre_protocol, misc, gre_protocol);
1249
1250 DR_STE_SET_TAG(gre, tag, gre_k_present, misc, gre_k_present);
1251 DR_STE_SET_TAG(gre, tag, gre_key_h, misc, gre_key_h);
1252 DR_STE_SET_TAG(gre, tag, gre_key_l, misc, gre_key_l);
1253
1254 DR_STE_SET_TAG(gre, tag, gre_c_present, misc, gre_c_present);
1255
1256 DR_STE_SET_TAG(gre, tag, gre_s_present, misc, gre_s_present);
1257
1258 return 0;
1259 }
1260
1261 static void
dr_ste_v0_build_tnl_gre_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1262 dr_ste_v0_build_tnl_gre_init(struct mlx5dr_ste_build *sb,
1263 struct mlx5dr_match_param *mask)
1264 {
1265 dr_ste_v0_build_tnl_gre_tag(mask, sb, sb->bit_mask);
1266
1267 sb->lu_type = DR_STE_V0_LU_TYPE_GRE;
1268 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1269 sb->ste_build_tag_func = &dr_ste_v0_build_tnl_gre_tag;
1270 }
1271
1272 static int
dr_ste_v0_build_tnl_mpls_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1273 dr_ste_v0_build_tnl_mpls_tag(struct mlx5dr_match_param *value,
1274 struct mlx5dr_ste_build *sb,
1275 u8 *tag)
1276 {
1277 struct mlx5dr_match_misc2 *misc_2 = &value->misc2;
1278 u32 mpls_hdr;
1279
1280 if (DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(misc_2)) {
1281 mpls_hdr = misc_2->outer_first_mpls_over_gre_label << HDR_MPLS_OFFSET_LABEL;
1282 misc_2->outer_first_mpls_over_gre_label = 0;
1283 mpls_hdr |= misc_2->outer_first_mpls_over_gre_exp << HDR_MPLS_OFFSET_EXP;
1284 misc_2->outer_first_mpls_over_gre_exp = 0;
1285 mpls_hdr |= misc_2->outer_first_mpls_over_gre_s_bos << HDR_MPLS_OFFSET_S_BOS;
1286 misc_2->outer_first_mpls_over_gre_s_bos = 0;
1287 mpls_hdr |= misc_2->outer_first_mpls_over_gre_ttl << HDR_MPLS_OFFSET_TTL;
1288 misc_2->outer_first_mpls_over_gre_ttl = 0;
1289 } else {
1290 mpls_hdr = misc_2->outer_first_mpls_over_udp_label << HDR_MPLS_OFFSET_LABEL;
1291 misc_2->outer_first_mpls_over_udp_label = 0;
1292 mpls_hdr |= misc_2->outer_first_mpls_over_udp_exp << HDR_MPLS_OFFSET_EXP;
1293 misc_2->outer_first_mpls_over_udp_exp = 0;
1294 mpls_hdr |= misc_2->outer_first_mpls_over_udp_s_bos << HDR_MPLS_OFFSET_S_BOS;
1295 misc_2->outer_first_mpls_over_udp_s_bos = 0;
1296 mpls_hdr |= misc_2->outer_first_mpls_over_udp_ttl << HDR_MPLS_OFFSET_TTL;
1297 misc_2->outer_first_mpls_over_udp_ttl = 0;
1298 }
1299
1300 MLX5_SET(ste_flex_parser_0, tag, flex_parser_3, mpls_hdr);
1301 return 0;
1302 }
1303
1304 static void
dr_ste_v0_build_tnl_mpls_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1305 dr_ste_v0_build_tnl_mpls_init(struct mlx5dr_ste_build *sb,
1306 struct mlx5dr_match_param *mask)
1307 {
1308 dr_ste_v0_build_tnl_mpls_tag(mask, sb, sb->bit_mask);
1309
1310 sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_0;
1311 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1312 sb->ste_build_tag_func = &dr_ste_v0_build_tnl_mpls_tag;
1313 }
1314
1315 static int
dr_ste_v0_build_tnl_mpls_over_udp_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1316 dr_ste_v0_build_tnl_mpls_over_udp_tag(struct mlx5dr_match_param *value,
1317 struct mlx5dr_ste_build *sb,
1318 u8 *tag)
1319 {
1320 struct mlx5dr_match_misc2 *misc2 = &value->misc2;
1321 u8 *parser_ptr;
1322 u8 parser_id;
1323 u32 mpls_hdr;
1324
1325 mpls_hdr = misc2->outer_first_mpls_over_udp_label << HDR_MPLS_OFFSET_LABEL;
1326 misc2->outer_first_mpls_over_udp_label = 0;
1327 mpls_hdr |= misc2->outer_first_mpls_over_udp_exp << HDR_MPLS_OFFSET_EXP;
1328 misc2->outer_first_mpls_over_udp_exp = 0;
1329 mpls_hdr |= misc2->outer_first_mpls_over_udp_s_bos << HDR_MPLS_OFFSET_S_BOS;
1330 misc2->outer_first_mpls_over_udp_s_bos = 0;
1331 mpls_hdr |= misc2->outer_first_mpls_over_udp_ttl << HDR_MPLS_OFFSET_TTL;
1332 misc2->outer_first_mpls_over_udp_ttl = 0;
1333
1334 parser_id = sb->caps->flex_parser_id_mpls_over_udp;
1335 parser_ptr = dr_ste_calc_flex_parser_offset(tag, parser_id);
1336 *(__be32 *)parser_ptr = cpu_to_be32(mpls_hdr);
1337
1338 return 0;
1339 }
1340
1341 static void
dr_ste_v0_build_tnl_mpls_over_udp_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1342 dr_ste_v0_build_tnl_mpls_over_udp_init(struct mlx5dr_ste_build *sb,
1343 struct mlx5dr_match_param *mask)
1344 {
1345 dr_ste_v0_build_tnl_mpls_over_udp_tag(mask, sb, sb->bit_mask);
1346 /* STEs with lookup type FLEX_PARSER_{0/1} includes
1347 * flex parsers_{0-3}/{4-7} respectively.
1348 */
1349 sb->lu_type = sb->caps->flex_parser_id_mpls_over_udp > DR_STE_MAX_FLEX_0_ID ?
1350 DR_STE_V0_LU_TYPE_FLEX_PARSER_1 :
1351 DR_STE_V0_LU_TYPE_FLEX_PARSER_0;
1352
1353 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1354 sb->ste_build_tag_func = &dr_ste_v0_build_tnl_mpls_over_udp_tag;
1355 }
1356
1357 static int
dr_ste_v0_build_tnl_mpls_over_gre_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1358 dr_ste_v0_build_tnl_mpls_over_gre_tag(struct mlx5dr_match_param *value,
1359 struct mlx5dr_ste_build *sb,
1360 u8 *tag)
1361 {
1362 struct mlx5dr_match_misc2 *misc2 = &value->misc2;
1363 u8 *parser_ptr;
1364 u8 parser_id;
1365 u32 mpls_hdr;
1366
1367 mpls_hdr = misc2->outer_first_mpls_over_gre_label << HDR_MPLS_OFFSET_LABEL;
1368 misc2->outer_first_mpls_over_gre_label = 0;
1369 mpls_hdr |= misc2->outer_first_mpls_over_gre_exp << HDR_MPLS_OFFSET_EXP;
1370 misc2->outer_first_mpls_over_gre_exp = 0;
1371 mpls_hdr |= misc2->outer_first_mpls_over_gre_s_bos << HDR_MPLS_OFFSET_S_BOS;
1372 misc2->outer_first_mpls_over_gre_s_bos = 0;
1373 mpls_hdr |= misc2->outer_first_mpls_over_gre_ttl << HDR_MPLS_OFFSET_TTL;
1374 misc2->outer_first_mpls_over_gre_ttl = 0;
1375
1376 parser_id = sb->caps->flex_parser_id_mpls_over_gre;
1377 parser_ptr = dr_ste_calc_flex_parser_offset(tag, parser_id);
1378 *(__be32 *)parser_ptr = cpu_to_be32(mpls_hdr);
1379
1380 return 0;
1381 }
1382
1383 static void
dr_ste_v0_build_tnl_mpls_over_gre_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1384 dr_ste_v0_build_tnl_mpls_over_gre_init(struct mlx5dr_ste_build *sb,
1385 struct mlx5dr_match_param *mask)
1386 {
1387 dr_ste_v0_build_tnl_mpls_over_gre_tag(mask, sb, sb->bit_mask);
1388
1389 /* STEs with lookup type FLEX_PARSER_{0/1} includes
1390 * flex parsers_{0-3}/{4-7} respectively.
1391 */
1392 sb->lu_type = sb->caps->flex_parser_id_mpls_over_gre > DR_STE_MAX_FLEX_0_ID ?
1393 DR_STE_V0_LU_TYPE_FLEX_PARSER_1 :
1394 DR_STE_V0_LU_TYPE_FLEX_PARSER_0;
1395
1396 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1397 sb->ste_build_tag_func = &dr_ste_v0_build_tnl_mpls_over_gre_tag;
1398 }
1399
1400 #define ICMP_TYPE_OFFSET_FIRST_DW 24
1401 #define ICMP_CODE_OFFSET_FIRST_DW 16
1402
1403 static int
dr_ste_v0_build_icmp_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1404 dr_ste_v0_build_icmp_tag(struct mlx5dr_match_param *value,
1405 struct mlx5dr_ste_build *sb,
1406 u8 *tag)
1407 {
1408 struct mlx5dr_match_misc3 *misc_3 = &value->misc3;
1409 u32 *icmp_header_data;
1410 int dw0_location;
1411 int dw1_location;
1412 u8 *parser_ptr;
1413 u8 *icmp_type;
1414 u8 *icmp_code;
1415 bool is_ipv4;
1416 u32 icmp_hdr;
1417
1418 is_ipv4 = DR_MASK_IS_ICMPV4_SET(misc_3);
1419 if (is_ipv4) {
1420 icmp_header_data = &misc_3->icmpv4_header_data;
1421 icmp_type = &misc_3->icmpv4_type;
1422 icmp_code = &misc_3->icmpv4_code;
1423 dw0_location = sb->caps->flex_parser_id_icmp_dw0;
1424 dw1_location = sb->caps->flex_parser_id_icmp_dw1;
1425 } else {
1426 icmp_header_data = &misc_3->icmpv6_header_data;
1427 icmp_type = &misc_3->icmpv6_type;
1428 icmp_code = &misc_3->icmpv6_code;
1429 dw0_location = sb->caps->flex_parser_id_icmpv6_dw0;
1430 dw1_location = sb->caps->flex_parser_id_icmpv6_dw1;
1431 }
1432
1433 parser_ptr = dr_ste_calc_flex_parser_offset(tag, dw0_location);
1434 icmp_hdr = (*icmp_type << ICMP_TYPE_OFFSET_FIRST_DW) |
1435 (*icmp_code << ICMP_CODE_OFFSET_FIRST_DW);
1436 *(__be32 *)parser_ptr = cpu_to_be32(icmp_hdr);
1437 *icmp_code = 0;
1438 *icmp_type = 0;
1439
1440 parser_ptr = dr_ste_calc_flex_parser_offset(tag, dw1_location);
1441 *(__be32 *)parser_ptr = cpu_to_be32(*icmp_header_data);
1442 *icmp_header_data = 0;
1443
1444 return 0;
1445 }
1446
1447 static void
dr_ste_v0_build_icmp_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1448 dr_ste_v0_build_icmp_init(struct mlx5dr_ste_build *sb,
1449 struct mlx5dr_match_param *mask)
1450 {
1451 u8 parser_id;
1452 bool is_ipv4;
1453
1454 dr_ste_v0_build_icmp_tag(mask, sb, sb->bit_mask);
1455
1456 /* STEs with lookup type FLEX_PARSER_{0/1} includes
1457 * flex parsers_{0-3}/{4-7} respectively.
1458 */
1459 is_ipv4 = DR_MASK_IS_ICMPV4_SET(&mask->misc3);
1460 parser_id = is_ipv4 ? sb->caps->flex_parser_id_icmp_dw0 :
1461 sb->caps->flex_parser_id_icmpv6_dw0;
1462 sb->lu_type = parser_id > DR_STE_MAX_FLEX_0_ID ?
1463 DR_STE_V0_LU_TYPE_FLEX_PARSER_1 :
1464 DR_STE_V0_LU_TYPE_FLEX_PARSER_0;
1465 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1466 sb->ste_build_tag_func = &dr_ste_v0_build_icmp_tag;
1467 }
1468
1469 static int
dr_ste_v0_build_general_purpose_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1470 dr_ste_v0_build_general_purpose_tag(struct mlx5dr_match_param *value,
1471 struct mlx5dr_ste_build *sb,
1472 u8 *tag)
1473 {
1474 struct mlx5dr_match_misc2 *misc_2 = &value->misc2;
1475
1476 DR_STE_SET_TAG(general_purpose, tag, general_purpose_lookup_field,
1477 misc_2, metadata_reg_a);
1478
1479 return 0;
1480 }
1481
1482 static void
dr_ste_v0_build_general_purpose_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1483 dr_ste_v0_build_general_purpose_init(struct mlx5dr_ste_build *sb,
1484 struct mlx5dr_match_param *mask)
1485 {
1486 dr_ste_v0_build_general_purpose_tag(mask, sb, sb->bit_mask);
1487
1488 sb->lu_type = DR_STE_V0_LU_TYPE_GENERAL_PURPOSE;
1489 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1490 sb->ste_build_tag_func = &dr_ste_v0_build_general_purpose_tag;
1491 }
1492
1493 static int
dr_ste_v0_build_eth_l4_misc_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1494 dr_ste_v0_build_eth_l4_misc_tag(struct mlx5dr_match_param *value,
1495 struct mlx5dr_ste_build *sb,
1496 u8 *tag)
1497 {
1498 struct mlx5dr_match_misc3 *misc3 = &value->misc3;
1499
1500 if (sb->inner) {
1501 DR_STE_SET_TAG(eth_l4_misc, tag, seq_num, misc3, inner_tcp_seq_num);
1502 DR_STE_SET_TAG(eth_l4_misc, tag, ack_num, misc3, inner_tcp_ack_num);
1503 } else {
1504 DR_STE_SET_TAG(eth_l4_misc, tag, seq_num, misc3, outer_tcp_seq_num);
1505 DR_STE_SET_TAG(eth_l4_misc, tag, ack_num, misc3, outer_tcp_ack_num);
1506 }
1507
1508 return 0;
1509 }
1510
1511 static void
dr_ste_v0_build_eth_l4_misc_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1512 dr_ste_v0_build_eth_l4_misc_init(struct mlx5dr_ste_build *sb,
1513 struct mlx5dr_match_param *mask)
1514 {
1515 dr_ste_v0_build_eth_l4_misc_tag(mask, sb, sb->bit_mask);
1516
1517 sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL4_MISC, sb->rx, sb->inner);
1518 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1519 sb->ste_build_tag_func = &dr_ste_v0_build_eth_l4_misc_tag;
1520 }
1521
1522 static int
dr_ste_v0_build_flex_parser_tnl_vxlan_gpe_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1523 dr_ste_v0_build_flex_parser_tnl_vxlan_gpe_tag(struct mlx5dr_match_param *value,
1524 struct mlx5dr_ste_build *sb,
1525 u8 *tag)
1526 {
1527 struct mlx5dr_match_misc3 *misc3 = &value->misc3;
1528
1529 DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
1530 outer_vxlan_gpe_flags, misc3,
1531 outer_vxlan_gpe_flags);
1532 DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
1533 outer_vxlan_gpe_next_protocol, misc3,
1534 outer_vxlan_gpe_next_protocol);
1535 DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
1536 outer_vxlan_gpe_vni, misc3,
1537 outer_vxlan_gpe_vni);
1538
1539 return 0;
1540 }
1541
1542 static void
dr_ste_v0_build_flex_parser_tnl_vxlan_gpe_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1543 dr_ste_v0_build_flex_parser_tnl_vxlan_gpe_init(struct mlx5dr_ste_build *sb,
1544 struct mlx5dr_match_param *mask)
1545 {
1546 dr_ste_v0_build_flex_parser_tnl_vxlan_gpe_tag(mask, sb, sb->bit_mask);
1547 sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_TNL_HEADER;
1548 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1549 sb->ste_build_tag_func = &dr_ste_v0_build_flex_parser_tnl_vxlan_gpe_tag;
1550 }
1551
1552 static int
dr_ste_v0_build_flex_parser_tnl_geneve_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1553 dr_ste_v0_build_flex_parser_tnl_geneve_tag(struct mlx5dr_match_param *value,
1554 struct mlx5dr_ste_build *sb,
1555 u8 *tag)
1556 {
1557 struct mlx5dr_match_misc *misc = &value->misc;
1558
1559 DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
1560 geneve_protocol_type, misc, geneve_protocol_type);
1561 DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
1562 geneve_oam, misc, geneve_oam);
1563 DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
1564 geneve_opt_len, misc, geneve_opt_len);
1565 DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
1566 geneve_vni, misc, geneve_vni);
1567
1568 return 0;
1569 }
1570
1571 static void
dr_ste_v0_build_flex_parser_tnl_geneve_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1572 dr_ste_v0_build_flex_parser_tnl_geneve_init(struct mlx5dr_ste_build *sb,
1573 struct mlx5dr_match_param *mask)
1574 {
1575 dr_ste_v0_build_flex_parser_tnl_geneve_tag(mask, sb, sb->bit_mask);
1576 sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_TNL_HEADER;
1577 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1578 sb->ste_build_tag_func = &dr_ste_v0_build_flex_parser_tnl_geneve_tag;
1579 }
1580
1581 static int
dr_ste_v0_build_register_0_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1582 dr_ste_v0_build_register_0_tag(struct mlx5dr_match_param *value,
1583 struct mlx5dr_ste_build *sb,
1584 u8 *tag)
1585 {
1586 struct mlx5dr_match_misc2 *misc2 = &value->misc2;
1587
1588 DR_STE_SET_TAG(register_0, tag, register_0_h, misc2, metadata_reg_c_0);
1589 DR_STE_SET_TAG(register_0, tag, register_0_l, misc2, metadata_reg_c_1);
1590 DR_STE_SET_TAG(register_0, tag, register_1_h, misc2, metadata_reg_c_2);
1591 DR_STE_SET_TAG(register_0, tag, register_1_l, misc2, metadata_reg_c_3);
1592
1593 return 0;
1594 }
1595
1596 static void
dr_ste_v0_build_register_0_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1597 dr_ste_v0_build_register_0_init(struct mlx5dr_ste_build *sb,
1598 struct mlx5dr_match_param *mask)
1599 {
1600 dr_ste_v0_build_register_0_tag(mask, sb, sb->bit_mask);
1601
1602 sb->lu_type = DR_STE_V0_LU_TYPE_STEERING_REGISTERS_0;
1603 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1604 sb->ste_build_tag_func = &dr_ste_v0_build_register_0_tag;
1605 }
1606
1607 static int
dr_ste_v0_build_register_1_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1608 dr_ste_v0_build_register_1_tag(struct mlx5dr_match_param *value,
1609 struct mlx5dr_ste_build *sb,
1610 u8 *tag)
1611 {
1612 struct mlx5dr_match_misc2 *misc2 = &value->misc2;
1613
1614 DR_STE_SET_TAG(register_1, tag, register_2_h, misc2, metadata_reg_c_4);
1615 DR_STE_SET_TAG(register_1, tag, register_2_l, misc2, metadata_reg_c_5);
1616 DR_STE_SET_TAG(register_1, tag, register_3_h, misc2, metadata_reg_c_6);
1617 DR_STE_SET_TAG(register_1, tag, register_3_l, misc2, metadata_reg_c_7);
1618
1619 return 0;
1620 }
1621
1622 static void
dr_ste_v0_build_register_1_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1623 dr_ste_v0_build_register_1_init(struct mlx5dr_ste_build *sb,
1624 struct mlx5dr_match_param *mask)
1625 {
1626 dr_ste_v0_build_register_1_tag(mask, sb, sb->bit_mask);
1627
1628 sb->lu_type = DR_STE_V0_LU_TYPE_STEERING_REGISTERS_1;
1629 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1630 sb->ste_build_tag_func = &dr_ste_v0_build_register_1_tag;
1631 }
1632
1633 static void
dr_ste_v0_build_src_gvmi_qpn_bit_mask(struct mlx5dr_match_param * value,u8 * bit_mask)1634 dr_ste_v0_build_src_gvmi_qpn_bit_mask(struct mlx5dr_match_param *value,
1635 u8 *bit_mask)
1636 {
1637 struct mlx5dr_match_misc *misc_mask = &value->misc;
1638
1639 DR_STE_SET_ONES(src_gvmi_qp, bit_mask, source_gvmi, misc_mask, source_port);
1640 DR_STE_SET_ONES(src_gvmi_qp, bit_mask, source_qp, misc_mask, source_sqn);
1641 misc_mask->source_eswitch_owner_vhca_id = 0;
1642 }
1643
1644 static int
dr_ste_v0_build_src_gvmi_qpn_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1645 dr_ste_v0_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
1646 struct mlx5dr_ste_build *sb,
1647 u8 *tag)
1648 {
1649 struct mlx5dr_match_misc *misc = &value->misc;
1650 struct mlx5dr_cmd_vport_cap *vport_cap;
1651 struct mlx5dr_domain *dmn = sb->dmn;
1652 struct mlx5dr_domain *vport_dmn;
1653 u8 *bit_mask = sb->bit_mask;
1654 bool source_gvmi_set;
1655
1656 DR_STE_SET_TAG(src_gvmi_qp, tag, source_qp, misc, source_sqn);
1657
1658 if (sb->vhca_id_valid) {
1659 /* Find port GVMI based on the eswitch_owner_vhca_id */
1660 if (misc->source_eswitch_owner_vhca_id == dmn->info.caps.gvmi)
1661 vport_dmn = dmn;
1662 else if (dmn->peer_dmn && (misc->source_eswitch_owner_vhca_id ==
1663 dmn->peer_dmn->info.caps.gvmi))
1664 vport_dmn = dmn->peer_dmn;
1665 else
1666 return -EINVAL;
1667
1668 misc->source_eswitch_owner_vhca_id = 0;
1669 } else {
1670 vport_dmn = dmn;
1671 }
1672
1673 source_gvmi_set = MLX5_GET(ste_src_gvmi_qp, bit_mask, source_gvmi);
1674 if (source_gvmi_set) {
1675 vport_cap = mlx5dr_domain_get_vport_cap(vport_dmn,
1676 misc->source_port);
1677 if (!vport_cap) {
1678 mlx5dr_err(dmn, "Vport 0x%x is disabled or invalid\n",
1679 misc->source_port);
1680 return -EINVAL;
1681 }
1682
1683 if (vport_cap->vport_gvmi)
1684 MLX5_SET(ste_src_gvmi_qp, tag, source_gvmi, vport_cap->vport_gvmi);
1685
1686 misc->source_port = 0;
1687 }
1688
1689 return 0;
1690 }
1691
1692 static void
dr_ste_v0_build_src_gvmi_qpn_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1693 dr_ste_v0_build_src_gvmi_qpn_init(struct mlx5dr_ste_build *sb,
1694 struct mlx5dr_match_param *mask)
1695 {
1696 dr_ste_v0_build_src_gvmi_qpn_bit_mask(mask, sb->bit_mask);
1697
1698 sb->lu_type = DR_STE_V0_LU_TYPE_SRC_GVMI_AND_QP;
1699 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1700 sb->ste_build_tag_func = &dr_ste_v0_build_src_gvmi_qpn_tag;
1701 }
1702
dr_ste_v0_set_flex_parser(u32 * misc4_field_id,u32 * misc4_field_value,bool * parser_is_used,u8 * tag)1703 static void dr_ste_v0_set_flex_parser(u32 *misc4_field_id,
1704 u32 *misc4_field_value,
1705 bool *parser_is_used,
1706 u8 *tag)
1707 {
1708 u32 id = *misc4_field_id;
1709 u8 *parser_ptr;
1710
1711 if (id >= DR_NUM_OF_FLEX_PARSERS || parser_is_used[id])
1712 return;
1713
1714 parser_is_used[id] = true;
1715 parser_ptr = dr_ste_calc_flex_parser_offset(tag, id);
1716
1717 *(__be32 *)parser_ptr = cpu_to_be32(*misc4_field_value);
1718 *misc4_field_id = 0;
1719 *misc4_field_value = 0;
1720 }
1721
dr_ste_v0_build_flex_parser_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1722 static int dr_ste_v0_build_flex_parser_tag(struct mlx5dr_match_param *value,
1723 struct mlx5dr_ste_build *sb,
1724 u8 *tag)
1725 {
1726 struct mlx5dr_match_misc4 *misc_4_mask = &value->misc4;
1727 bool parser_is_used[DR_NUM_OF_FLEX_PARSERS] = {};
1728
1729 dr_ste_v0_set_flex_parser(&misc_4_mask->prog_sample_field_id_0,
1730 &misc_4_mask->prog_sample_field_value_0,
1731 parser_is_used, tag);
1732
1733 dr_ste_v0_set_flex_parser(&misc_4_mask->prog_sample_field_id_1,
1734 &misc_4_mask->prog_sample_field_value_1,
1735 parser_is_used, tag);
1736
1737 dr_ste_v0_set_flex_parser(&misc_4_mask->prog_sample_field_id_2,
1738 &misc_4_mask->prog_sample_field_value_2,
1739 parser_is_used, tag);
1740
1741 dr_ste_v0_set_flex_parser(&misc_4_mask->prog_sample_field_id_3,
1742 &misc_4_mask->prog_sample_field_value_3,
1743 parser_is_used, tag);
1744
1745 return 0;
1746 }
1747
dr_ste_v0_build_flex_parser_0_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1748 static void dr_ste_v0_build_flex_parser_0_init(struct mlx5dr_ste_build *sb,
1749 struct mlx5dr_match_param *mask)
1750 {
1751 sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_0;
1752 dr_ste_v0_build_flex_parser_tag(mask, sb, sb->bit_mask);
1753 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1754 sb->ste_build_tag_func = &dr_ste_v0_build_flex_parser_tag;
1755 }
1756
dr_ste_v0_build_flex_parser_1_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1757 static void dr_ste_v0_build_flex_parser_1_init(struct mlx5dr_ste_build *sb,
1758 struct mlx5dr_match_param *mask)
1759 {
1760 sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_1;
1761 dr_ste_v0_build_flex_parser_tag(mask, sb, sb->bit_mask);
1762 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1763 sb->ste_build_tag_func = &dr_ste_v0_build_flex_parser_tag;
1764 }
1765
1766 static int
dr_ste_v0_build_flex_parser_tnl_geneve_tlv_opt_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1767 dr_ste_v0_build_flex_parser_tnl_geneve_tlv_opt_tag(struct mlx5dr_match_param *value,
1768 struct mlx5dr_ste_build *sb,
1769 u8 *tag)
1770 {
1771 struct mlx5dr_match_misc3 *misc3 = &value->misc3;
1772 u8 parser_id = sb->caps->flex_parser_id_geneve_tlv_option_0;
1773 u8 *parser_ptr = dr_ste_calc_flex_parser_offset(tag, parser_id);
1774
1775 MLX5_SET(ste_flex_parser_0, parser_ptr, flex_parser_3,
1776 misc3->geneve_tlv_option_0_data);
1777 misc3->geneve_tlv_option_0_data = 0;
1778
1779 return 0;
1780 }
1781
1782 static void
dr_ste_v0_build_flex_parser_tnl_geneve_tlv_opt_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1783 dr_ste_v0_build_flex_parser_tnl_geneve_tlv_opt_init(struct mlx5dr_ste_build *sb,
1784 struct mlx5dr_match_param *mask)
1785 {
1786 dr_ste_v0_build_flex_parser_tnl_geneve_tlv_opt_tag(mask, sb, sb->bit_mask);
1787
1788 /* STEs with lookup type FLEX_PARSER_{0/1} includes
1789 * flex parsers_{0-3}/{4-7} respectively.
1790 */
1791 sb->lu_type = sb->caps->flex_parser_id_geneve_tlv_option_0 > 3 ?
1792 DR_STE_V0_LU_TYPE_FLEX_PARSER_1 :
1793 DR_STE_V0_LU_TYPE_FLEX_PARSER_0;
1794
1795 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1796 sb->ste_build_tag_func = &dr_ste_v0_build_flex_parser_tnl_geneve_tlv_opt_tag;
1797 }
1798
dr_ste_v0_build_flex_parser_tnl_gtpu_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1799 static int dr_ste_v0_build_flex_parser_tnl_gtpu_tag(struct mlx5dr_match_param *value,
1800 struct mlx5dr_ste_build *sb,
1801 u8 *tag)
1802 {
1803 struct mlx5dr_match_misc3 *misc3 = &value->misc3;
1804
1805 DR_STE_SET_TAG(flex_parser_tnl_gtpu, tag,
1806 gtpu_msg_flags, misc3,
1807 gtpu_msg_flags);
1808 DR_STE_SET_TAG(flex_parser_tnl_gtpu, tag,
1809 gtpu_msg_type, misc3,
1810 gtpu_msg_type);
1811 DR_STE_SET_TAG(flex_parser_tnl_gtpu, tag,
1812 gtpu_teid, misc3,
1813 gtpu_teid);
1814
1815 return 0;
1816 }
1817
dr_ste_v0_build_flex_parser_tnl_gtpu_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1818 static void dr_ste_v0_build_flex_parser_tnl_gtpu_init(struct mlx5dr_ste_build *sb,
1819 struct mlx5dr_match_param *mask)
1820 {
1821 dr_ste_v0_build_flex_parser_tnl_gtpu_tag(mask, sb, sb->bit_mask);
1822
1823 sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_TNL_HEADER;
1824 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1825 sb->ste_build_tag_func = &dr_ste_v0_build_flex_parser_tnl_gtpu_tag;
1826 }
1827
1828 static int
dr_ste_v0_build_tnl_gtpu_flex_parser_0_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1829 dr_ste_v0_build_tnl_gtpu_flex_parser_0_tag(struct mlx5dr_match_param *value,
1830 struct mlx5dr_ste_build *sb,
1831 u8 *tag)
1832 {
1833 if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_dw_0))
1834 DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_0, sb->caps, &value->misc3);
1835 if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_teid))
1836 DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_teid, sb->caps, &value->misc3);
1837 if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_dw_2))
1838 DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_2, sb->caps, &value->misc3);
1839 if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_first_ext_dw_0))
1840 DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_first_ext_dw_0, sb->caps, &value->misc3);
1841 return 0;
1842 }
1843
1844 static void
dr_ste_v0_build_tnl_gtpu_flex_parser_0_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1845 dr_ste_v0_build_tnl_gtpu_flex_parser_0_init(struct mlx5dr_ste_build *sb,
1846 struct mlx5dr_match_param *mask)
1847 {
1848 dr_ste_v0_build_tnl_gtpu_flex_parser_0_tag(mask, sb, sb->bit_mask);
1849
1850 sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_0;
1851 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1852 sb->ste_build_tag_func = &dr_ste_v0_build_tnl_gtpu_flex_parser_0_tag;
1853 }
1854
1855 static int
dr_ste_v0_build_tnl_gtpu_flex_parser_1_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1856 dr_ste_v0_build_tnl_gtpu_flex_parser_1_tag(struct mlx5dr_match_param *value,
1857 struct mlx5dr_ste_build *sb,
1858 u8 *tag)
1859 {
1860 if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_dw_0))
1861 DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_0, sb->caps, &value->misc3);
1862 if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_teid))
1863 DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_teid, sb->caps, &value->misc3);
1864 if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_dw_2))
1865 DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_2, sb->caps, &value->misc3);
1866 if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_first_ext_dw_0))
1867 DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_first_ext_dw_0, sb->caps, &value->misc3);
1868 return 0;
1869 }
1870
1871 static void
dr_ste_v0_build_tnl_gtpu_flex_parser_1_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1872 dr_ste_v0_build_tnl_gtpu_flex_parser_1_init(struct mlx5dr_ste_build *sb,
1873 struct mlx5dr_match_param *mask)
1874 {
1875 dr_ste_v0_build_tnl_gtpu_flex_parser_1_tag(mask, sb, sb->bit_mask);
1876
1877 sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_1;
1878 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1879 sb->ste_build_tag_func = &dr_ste_v0_build_tnl_gtpu_flex_parser_1_tag;
1880 }
1881
dr_ste_v0_build_tnl_header_0_1_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,uint8_t * tag)1882 static int dr_ste_v0_build_tnl_header_0_1_tag(struct mlx5dr_match_param *value,
1883 struct mlx5dr_ste_build *sb,
1884 uint8_t *tag)
1885 {
1886 struct mlx5dr_match_misc5 *misc5 = &value->misc5;
1887
1888 DR_STE_SET_TAG(tunnel_header, tag, tunnel_header_0, misc5, tunnel_header_0);
1889 DR_STE_SET_TAG(tunnel_header, tag, tunnel_header_1, misc5, tunnel_header_1);
1890
1891 return 0;
1892 }
1893
dr_ste_v0_build_tnl_header_0_1_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1894 static void dr_ste_v0_build_tnl_header_0_1_init(struct mlx5dr_ste_build *sb,
1895 struct mlx5dr_match_param *mask)
1896 {
1897 sb->lu_type = DR_STE_V0_LU_TYPE_TUNNEL_HEADER;
1898 dr_ste_v0_build_tnl_header_0_1_tag(mask, sb, sb->bit_mask);
1899 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1900 sb->ste_build_tag_func = &dr_ste_v0_build_tnl_header_0_1_tag;
1901 }
1902
1903 static struct mlx5dr_ste_ctx ste_ctx_v0 = {
1904 /* Builders */
1905 .build_eth_l2_src_dst_init = &dr_ste_v0_build_eth_l2_src_dst_init,
1906 .build_eth_l3_ipv6_src_init = &dr_ste_v0_build_eth_l3_ipv6_src_init,
1907 .build_eth_l3_ipv6_dst_init = &dr_ste_v0_build_eth_l3_ipv6_dst_init,
1908 .build_eth_l3_ipv4_5_tuple_init = &dr_ste_v0_build_eth_l3_ipv4_5_tuple_init,
1909 .build_eth_l2_src_init = &dr_ste_v0_build_eth_l2_src_init,
1910 .build_eth_l2_dst_init = &dr_ste_v0_build_eth_l2_dst_init,
1911 .build_eth_l2_tnl_init = &dr_ste_v0_build_eth_l2_tnl_init,
1912 .build_eth_l3_ipv4_misc_init = &dr_ste_v0_build_eth_l3_ipv4_misc_init,
1913 .build_eth_ipv6_l3_l4_init = &dr_ste_v0_build_eth_ipv6_l3_l4_init,
1914 .build_mpls_init = &dr_ste_v0_build_mpls_init,
1915 .build_tnl_gre_init = &dr_ste_v0_build_tnl_gre_init,
1916 .build_tnl_mpls_init = &dr_ste_v0_build_tnl_mpls_init,
1917 .build_tnl_mpls_over_udp_init = &dr_ste_v0_build_tnl_mpls_over_udp_init,
1918 .build_tnl_mpls_over_gre_init = &dr_ste_v0_build_tnl_mpls_over_gre_init,
1919 .build_icmp_init = &dr_ste_v0_build_icmp_init,
1920 .build_general_purpose_init = &dr_ste_v0_build_general_purpose_init,
1921 .build_eth_l4_misc_init = &dr_ste_v0_build_eth_l4_misc_init,
1922 .build_tnl_vxlan_gpe_init = &dr_ste_v0_build_flex_parser_tnl_vxlan_gpe_init,
1923 .build_tnl_geneve_init = &dr_ste_v0_build_flex_parser_tnl_geneve_init,
1924 .build_tnl_geneve_tlv_opt_init = &dr_ste_v0_build_flex_parser_tnl_geneve_tlv_opt_init,
1925 .build_register_0_init = &dr_ste_v0_build_register_0_init,
1926 .build_register_1_init = &dr_ste_v0_build_register_1_init,
1927 .build_src_gvmi_qpn_init = &dr_ste_v0_build_src_gvmi_qpn_init,
1928 .build_flex_parser_0_init = &dr_ste_v0_build_flex_parser_0_init,
1929 .build_flex_parser_1_init = &dr_ste_v0_build_flex_parser_1_init,
1930 .build_tnl_gtpu_init = &dr_ste_v0_build_flex_parser_tnl_gtpu_init,
1931 .build_tnl_header_0_1_init = &dr_ste_v0_build_tnl_header_0_1_init,
1932 .build_tnl_gtpu_flex_parser_0_init = &dr_ste_v0_build_tnl_gtpu_flex_parser_0_init,
1933 .build_tnl_gtpu_flex_parser_1_init = &dr_ste_v0_build_tnl_gtpu_flex_parser_1_init,
1934
1935 /* Getters and Setters */
1936 .ste_init = &dr_ste_v0_init,
1937 .set_next_lu_type = &dr_ste_v0_set_next_lu_type,
1938 .get_next_lu_type = &dr_ste_v0_get_next_lu_type,
1939 .set_miss_addr = &dr_ste_v0_set_miss_addr,
1940 .get_miss_addr = &dr_ste_v0_get_miss_addr,
1941 .set_hit_addr = &dr_ste_v0_set_hit_addr,
1942 .set_byte_mask = &dr_ste_v0_set_byte_mask,
1943 .get_byte_mask = &dr_ste_v0_get_byte_mask,
1944
1945 /* Actions */
1946 .actions_caps = DR_STE_CTX_ACTION_CAP_NONE,
1947 .set_actions_rx = &dr_ste_v0_set_actions_rx,
1948 .set_actions_tx = &dr_ste_v0_set_actions_tx,
1949 .modify_field_arr_sz = ARRAY_SIZE(dr_ste_v0_action_modify_field_arr),
1950 .modify_field_arr = dr_ste_v0_action_modify_field_arr,
1951 .set_action_set = &dr_ste_v0_set_action_set,
1952 .set_action_add = &dr_ste_v0_set_action_add,
1953 .set_action_copy = &dr_ste_v0_set_action_copy,
1954 .set_action_decap_l3_list = &dr_ste_v0_set_action_decap_l3_list,
1955 };
1956
mlx5dr_ste_get_ctx_v0(void)1957 struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v0(void)
1958 {
1959 return &ste_ctx_v0;
1960 }
1961