1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies. */
3 
4 #include <linux/types.h>
5 #include <linux/crc32.h>
6 #include "dr_ste.h"
7 
8 struct dr_hw_ste_format {
9 	u8 ctrl[DR_STE_SIZE_CTRL];
10 	u8 tag[DR_STE_SIZE_TAG];
11 	u8 mask[DR_STE_SIZE_MASK];
12 };
13 
dr_ste_crc32_calc(const void * input_data,size_t length)14 static u32 dr_ste_crc32_calc(const void *input_data, size_t length)
15 {
16 	u32 crc = crc32(0, input_data, length);
17 
18 	return (__force u32)htonl(crc);
19 }
20 
mlx5dr_ste_supp_ttl_cs_recalc(struct mlx5dr_cmd_caps * caps)21 bool mlx5dr_ste_supp_ttl_cs_recalc(struct mlx5dr_cmd_caps *caps)
22 {
23 	return caps->sw_format_ver > MLX5_STEERING_FORMAT_CONNECTX_5;
24 }
25 
mlx5dr_ste_calc_hash_index(u8 * hw_ste_p,struct mlx5dr_ste_htbl * htbl)26 u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl)
27 {
28 	u32 num_entries = mlx5dr_icm_pool_get_chunk_num_of_entries(htbl->chunk);
29 	struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
30 	u8 masked[DR_STE_SIZE_TAG] = {};
31 	u32 crc32, index;
32 	u16 bit;
33 	int i;
34 
35 	/* Don't calculate CRC if the result is predicted */
36 	if (num_entries == 1 || htbl->byte_mask == 0)
37 		return 0;
38 
39 	/* Mask tag using byte mask, bit per byte */
40 	bit = 1 << (DR_STE_SIZE_TAG - 1);
41 	for (i = 0; i < DR_STE_SIZE_TAG; i++) {
42 		if (htbl->byte_mask & bit)
43 			masked[i] = hw_ste->tag[i];
44 
45 		bit = bit >> 1;
46 	}
47 
48 	crc32 = dr_ste_crc32_calc(masked, DR_STE_SIZE_TAG);
49 	index = crc32 & (num_entries - 1);
50 
51 	return index;
52 }
53 
mlx5dr_ste_conv_bit_to_byte_mask(u8 * bit_mask)54 u16 mlx5dr_ste_conv_bit_to_byte_mask(u8 *bit_mask)
55 {
56 	u16 byte_mask = 0;
57 	int i;
58 
59 	for (i = 0; i < DR_STE_SIZE_MASK; i++) {
60 		byte_mask = byte_mask << 1;
61 		if (bit_mask[i] == 0xff)
62 			byte_mask |= 1;
63 	}
64 	return byte_mask;
65 }
66 
dr_ste_get_tag(u8 * hw_ste_p)67 static u8 *dr_ste_get_tag(u8 *hw_ste_p)
68 {
69 	struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
70 
71 	return hw_ste->tag;
72 }
73 
mlx5dr_ste_set_bit_mask(u8 * hw_ste_p,u8 * bit_mask)74 void mlx5dr_ste_set_bit_mask(u8 *hw_ste_p, u8 *bit_mask)
75 {
76 	struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
77 
78 	memcpy(hw_ste->mask, bit_mask, DR_STE_SIZE_MASK);
79 }
80 
dr_ste_set_always_hit(struct dr_hw_ste_format * hw_ste)81 static void dr_ste_set_always_hit(struct dr_hw_ste_format *hw_ste)
82 {
83 	memset(&hw_ste->tag, 0, sizeof(hw_ste->tag));
84 	memset(&hw_ste->mask, 0, sizeof(hw_ste->mask));
85 }
86 
dr_ste_set_always_miss(struct dr_hw_ste_format * hw_ste)87 static void dr_ste_set_always_miss(struct dr_hw_ste_format *hw_ste)
88 {
89 	hw_ste->tag[0] = 0xdc;
90 	hw_ste->mask[0] = 0;
91 }
92 
mlx5dr_ste_set_miss_addr(struct mlx5dr_ste_ctx * ste_ctx,u8 * hw_ste_p,u64 miss_addr)93 void mlx5dr_ste_set_miss_addr(struct mlx5dr_ste_ctx *ste_ctx,
94 			      u8 *hw_ste_p, u64 miss_addr)
95 {
96 	ste_ctx->set_miss_addr(hw_ste_p, miss_addr);
97 }
98 
dr_ste_always_miss_addr(struct mlx5dr_ste_ctx * ste_ctx,u8 * hw_ste,u64 miss_addr)99 static void dr_ste_always_miss_addr(struct mlx5dr_ste_ctx *ste_ctx,
100 				    u8 *hw_ste, u64 miss_addr)
101 {
102 	ste_ctx->set_next_lu_type(hw_ste, MLX5DR_STE_LU_TYPE_DONT_CARE);
103 	ste_ctx->set_miss_addr(hw_ste, miss_addr);
104 	dr_ste_set_always_miss((struct dr_hw_ste_format *)hw_ste);
105 }
106 
mlx5dr_ste_set_hit_addr(struct mlx5dr_ste_ctx * ste_ctx,u8 * hw_ste,u64 icm_addr,u32 ht_size)107 void mlx5dr_ste_set_hit_addr(struct mlx5dr_ste_ctx *ste_ctx,
108 			     u8 *hw_ste, u64 icm_addr, u32 ht_size)
109 {
110 	ste_ctx->set_hit_addr(hw_ste, icm_addr, ht_size);
111 }
112 
mlx5dr_ste_get_icm_addr(struct mlx5dr_ste * ste)113 u64 mlx5dr_ste_get_icm_addr(struct mlx5dr_ste *ste)
114 {
115 	u64 base_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(ste->htbl->chunk);
116 	u32 index = ste - ste->htbl->chunk->ste_arr;
117 
118 	return base_icm_addr + DR_STE_SIZE * index;
119 }
120 
mlx5dr_ste_get_mr_addr(struct mlx5dr_ste * ste)121 u64 mlx5dr_ste_get_mr_addr(struct mlx5dr_ste *ste)
122 {
123 	u32 index = ste - ste->htbl->chunk->ste_arr;
124 
125 	return mlx5dr_icm_pool_get_chunk_mr_addr(ste->htbl->chunk) + DR_STE_SIZE * index;
126 }
127 
mlx5dr_ste_get_hw_ste(struct mlx5dr_ste * ste)128 u8 *mlx5dr_ste_get_hw_ste(struct mlx5dr_ste *ste)
129 {
130 	u64 index = ste - ste->htbl->chunk->ste_arr;
131 
132 	return ste->htbl->chunk->hw_ste_arr + DR_STE_SIZE_REDUCED * index;
133 }
134 
mlx5dr_ste_get_miss_list(struct mlx5dr_ste * ste)135 struct list_head *mlx5dr_ste_get_miss_list(struct mlx5dr_ste *ste)
136 {
137 	u32 index = ste - ste->htbl->chunk->ste_arr;
138 
139 	return &ste->htbl->chunk->miss_list[index];
140 }
141 
dr_ste_always_hit_htbl(struct mlx5dr_ste_ctx * ste_ctx,u8 * hw_ste,struct mlx5dr_ste_htbl * next_htbl)142 static void dr_ste_always_hit_htbl(struct mlx5dr_ste_ctx *ste_ctx,
143 				   u8 *hw_ste,
144 				   struct mlx5dr_ste_htbl *next_htbl)
145 {
146 	struct mlx5dr_icm_chunk *chunk = next_htbl->chunk;
147 
148 	ste_ctx->set_byte_mask(hw_ste, next_htbl->byte_mask);
149 	ste_ctx->set_next_lu_type(hw_ste, next_htbl->lu_type);
150 	ste_ctx->set_hit_addr(hw_ste, mlx5dr_icm_pool_get_chunk_icm_addr(chunk),
151 			      mlx5dr_icm_pool_get_chunk_num_of_entries(chunk));
152 
153 	dr_ste_set_always_hit((struct dr_hw_ste_format *)hw_ste);
154 }
155 
mlx5dr_ste_is_last_in_rule(struct mlx5dr_matcher_rx_tx * nic_matcher,u8 ste_location)156 bool mlx5dr_ste_is_last_in_rule(struct mlx5dr_matcher_rx_tx *nic_matcher,
157 				u8 ste_location)
158 {
159 	return ste_location == nic_matcher->num_of_builders;
160 }
161 
162 /* Replace relevant fields, except of:
163  * htbl - keep the origin htbl
164  * miss_list + list - already took the src from the list.
165  * icm_addr/mr_addr - depends on the hosting table.
166  *
167  * Before:
168  * | a | -> | b | -> | c | ->
169  *
170  * After:
171  * | a | -> | c | ->
172  * While the data that was in b copied to a.
173  */
dr_ste_replace(struct mlx5dr_ste * dst,struct mlx5dr_ste * src)174 static void dr_ste_replace(struct mlx5dr_ste *dst, struct mlx5dr_ste *src)
175 {
176 	memcpy(mlx5dr_ste_get_hw_ste(dst), mlx5dr_ste_get_hw_ste(src),
177 	       DR_STE_SIZE_REDUCED);
178 	dst->next_htbl = src->next_htbl;
179 	if (dst->next_htbl)
180 		dst->next_htbl->pointing_ste = dst;
181 
182 	dst->refcount = src->refcount;
183 }
184 
185 /* Free ste which is the head and the only one in miss_list */
186 static void
dr_ste_remove_head_ste(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste * ste,struct mlx5dr_matcher_rx_tx * nic_matcher,struct mlx5dr_ste_send_info * ste_info_head,struct list_head * send_ste_list,struct mlx5dr_ste_htbl * stats_tbl)187 dr_ste_remove_head_ste(struct mlx5dr_ste_ctx *ste_ctx,
188 		       struct mlx5dr_ste *ste,
189 		       struct mlx5dr_matcher_rx_tx *nic_matcher,
190 		       struct mlx5dr_ste_send_info *ste_info_head,
191 		       struct list_head *send_ste_list,
192 		       struct mlx5dr_ste_htbl *stats_tbl)
193 {
194 	u8 tmp_data_ste[DR_STE_SIZE] = {};
195 	u64 miss_addr;
196 
197 	miss_addr = mlx5dr_icm_pool_get_chunk_icm_addr(nic_matcher->e_anchor->chunk);
198 
199 	/* Use temp ste because dr_ste_always_miss_addr
200 	 * touches bit_mask area which doesn't exist at ste->hw_ste.
201 	 * Need to use a full-sized (DR_STE_SIZE) hw_ste.
202 	 */
203 	memcpy(tmp_data_ste, mlx5dr_ste_get_hw_ste(ste), DR_STE_SIZE_REDUCED);
204 	dr_ste_always_miss_addr(ste_ctx, tmp_data_ste, miss_addr);
205 	memcpy(mlx5dr_ste_get_hw_ste(ste), tmp_data_ste, DR_STE_SIZE_REDUCED);
206 
207 	list_del_init(&ste->miss_list_node);
208 
209 	/* Write full STE size in order to have "always_miss" */
210 	mlx5dr_send_fill_and_append_ste_send_info(ste, DR_STE_SIZE,
211 						  0, tmp_data_ste,
212 						  ste_info_head,
213 						  send_ste_list,
214 						  true /* Copy data */);
215 
216 	stats_tbl->ctrl.num_of_valid_entries--;
217 }
218 
219 /* Free ste which is the head but NOT the only one in miss_list:
220  * |_ste_| --> |_next_ste_| -->|__| -->|__| -->/0
221  */
222 static void
dr_ste_replace_head_ste(struct mlx5dr_matcher_rx_tx * nic_matcher,struct mlx5dr_ste * ste,struct mlx5dr_ste * next_ste,struct mlx5dr_ste_send_info * ste_info_head,struct list_head * send_ste_list,struct mlx5dr_ste_htbl * stats_tbl)223 dr_ste_replace_head_ste(struct mlx5dr_matcher_rx_tx *nic_matcher,
224 			struct mlx5dr_ste *ste,
225 			struct mlx5dr_ste *next_ste,
226 			struct mlx5dr_ste_send_info *ste_info_head,
227 			struct list_head *send_ste_list,
228 			struct mlx5dr_ste_htbl *stats_tbl)
229 
230 {
231 	struct mlx5dr_ste_htbl *next_miss_htbl;
232 	u8 hw_ste[DR_STE_SIZE] = {};
233 	int sb_idx;
234 
235 	next_miss_htbl = next_ste->htbl;
236 
237 	/* Remove from the miss_list the next_ste before copy */
238 	list_del_init(&next_ste->miss_list_node);
239 
240 	/* Move data from next into ste */
241 	dr_ste_replace(ste, next_ste);
242 
243 	/* Update the rule on STE change */
244 	mlx5dr_rule_set_last_member(next_ste->rule_rx_tx, ste, false);
245 
246 	/* Copy all 64 hw_ste bytes */
247 	memcpy(hw_ste, mlx5dr_ste_get_hw_ste(ste), DR_STE_SIZE_REDUCED);
248 	sb_idx = ste->ste_chain_location - 1;
249 	mlx5dr_ste_set_bit_mask(hw_ste,
250 				nic_matcher->ste_builder[sb_idx].bit_mask);
251 
252 	/* Del the htbl that contains the next_ste.
253 	 * The origin htbl stay with the same number of entries.
254 	 */
255 	mlx5dr_htbl_put(next_miss_htbl);
256 
257 	mlx5dr_send_fill_and_append_ste_send_info(ste, DR_STE_SIZE,
258 						  0, hw_ste,
259 						  ste_info_head,
260 						  send_ste_list,
261 						  true /* Copy data */);
262 
263 	stats_tbl->ctrl.num_of_collisions--;
264 	stats_tbl->ctrl.num_of_valid_entries--;
265 }
266 
267 /* Free ste that is located in the middle of the miss list:
268  * |__| -->|_prev_ste_|->|_ste_|-->|_next_ste_|
269  */
dr_ste_remove_middle_ste(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste * ste,struct mlx5dr_ste_send_info * ste_info,struct list_head * send_ste_list,struct mlx5dr_ste_htbl * stats_tbl)270 static void dr_ste_remove_middle_ste(struct mlx5dr_ste_ctx *ste_ctx,
271 				     struct mlx5dr_ste *ste,
272 				     struct mlx5dr_ste_send_info *ste_info,
273 				     struct list_head *send_ste_list,
274 				     struct mlx5dr_ste_htbl *stats_tbl)
275 {
276 	struct mlx5dr_ste *prev_ste;
277 	u64 miss_addr;
278 
279 	prev_ste = list_prev_entry(ste, miss_list_node);
280 	if (WARN_ON(!prev_ste))
281 		return;
282 
283 	miss_addr = ste_ctx->get_miss_addr(mlx5dr_ste_get_hw_ste(ste));
284 	ste_ctx->set_miss_addr(mlx5dr_ste_get_hw_ste(prev_ste), miss_addr);
285 
286 	mlx5dr_send_fill_and_append_ste_send_info(prev_ste, DR_STE_SIZE_CTRL, 0,
287 						  mlx5dr_ste_get_hw_ste(prev_ste),
288 						  ste_info, send_ste_list,
289 						  true /* Copy data*/);
290 
291 	list_del_init(&ste->miss_list_node);
292 
293 	stats_tbl->ctrl.num_of_valid_entries--;
294 	stats_tbl->ctrl.num_of_collisions--;
295 }
296 
mlx5dr_ste_free(struct mlx5dr_ste * ste,struct mlx5dr_matcher * matcher,struct mlx5dr_matcher_rx_tx * nic_matcher)297 void mlx5dr_ste_free(struct mlx5dr_ste *ste,
298 		     struct mlx5dr_matcher *matcher,
299 		     struct mlx5dr_matcher_rx_tx *nic_matcher)
300 {
301 	struct mlx5dr_ste_send_info *cur_ste_info, *tmp_ste_info;
302 	struct mlx5dr_domain *dmn = matcher->tbl->dmn;
303 	struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
304 	struct mlx5dr_ste_send_info ste_info_head;
305 	struct mlx5dr_ste *next_ste, *first_ste;
306 	bool put_on_origin_table = true;
307 	struct mlx5dr_ste_htbl *stats_tbl;
308 	LIST_HEAD(send_ste_list);
309 
310 	first_ste = list_first_entry(mlx5dr_ste_get_miss_list(ste),
311 				     struct mlx5dr_ste, miss_list_node);
312 	stats_tbl = first_ste->htbl;
313 
314 	/* Two options:
315 	 * 1. ste is head:
316 	 *	a. head ste is the only ste in the miss list
317 	 *	b. head ste is not the only ste in the miss-list
318 	 * 2. ste is not head
319 	 */
320 	if (first_ste == ste) { /* Ste is the head */
321 		struct mlx5dr_ste *last_ste;
322 
323 		last_ste = list_last_entry(mlx5dr_ste_get_miss_list(ste),
324 					   struct mlx5dr_ste, miss_list_node);
325 		if (last_ste == first_ste)
326 			next_ste = NULL;
327 		else
328 			next_ste = list_next_entry(ste, miss_list_node);
329 
330 		if (!next_ste) {
331 			/* One and only entry in the list */
332 			dr_ste_remove_head_ste(ste_ctx, ste,
333 					       nic_matcher,
334 					       &ste_info_head,
335 					       &send_ste_list,
336 					       stats_tbl);
337 		} else {
338 			/* First but not only entry in the list */
339 			dr_ste_replace_head_ste(nic_matcher, ste,
340 						next_ste, &ste_info_head,
341 						&send_ste_list, stats_tbl);
342 			put_on_origin_table = false;
343 		}
344 	} else { /* Ste in the middle of the list */
345 		dr_ste_remove_middle_ste(ste_ctx, ste,
346 					 &ste_info_head, &send_ste_list,
347 					 stats_tbl);
348 	}
349 
350 	/* Update HW */
351 	list_for_each_entry_safe(cur_ste_info, tmp_ste_info,
352 				 &send_ste_list, send_list) {
353 		list_del(&cur_ste_info->send_list);
354 		mlx5dr_send_postsend_ste(dmn, cur_ste_info->ste,
355 					 cur_ste_info->data, cur_ste_info->size,
356 					 cur_ste_info->offset);
357 	}
358 
359 	if (put_on_origin_table)
360 		mlx5dr_htbl_put(ste->htbl);
361 }
362 
mlx5dr_ste_equal_tag(void * src,void * dst)363 bool mlx5dr_ste_equal_tag(void *src, void *dst)
364 {
365 	struct dr_hw_ste_format *s_hw_ste = (struct dr_hw_ste_format *)src;
366 	struct dr_hw_ste_format *d_hw_ste = (struct dr_hw_ste_format *)dst;
367 
368 	return !memcmp(s_hw_ste->tag, d_hw_ste->tag, DR_STE_SIZE_TAG);
369 }
370 
mlx5dr_ste_set_hit_addr_by_next_htbl(struct mlx5dr_ste_ctx * ste_ctx,u8 * hw_ste,struct mlx5dr_ste_htbl * next_htbl)371 void mlx5dr_ste_set_hit_addr_by_next_htbl(struct mlx5dr_ste_ctx *ste_ctx,
372 					  u8 *hw_ste,
373 					  struct mlx5dr_ste_htbl *next_htbl)
374 {
375 	u64 icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(next_htbl->chunk);
376 	u32 num_entries =
377 		mlx5dr_icm_pool_get_chunk_num_of_entries(next_htbl->chunk);
378 
379 	ste_ctx->set_hit_addr(hw_ste, icm_addr, num_entries);
380 }
381 
mlx5dr_ste_prepare_for_postsend(struct mlx5dr_ste_ctx * ste_ctx,u8 * hw_ste_p,u32 ste_size)382 void mlx5dr_ste_prepare_for_postsend(struct mlx5dr_ste_ctx *ste_ctx,
383 				     u8 *hw_ste_p, u32 ste_size)
384 {
385 	if (ste_ctx->prepare_for_postsend)
386 		ste_ctx->prepare_for_postsend(hw_ste_p, ste_size);
387 }
388 
389 /* Init one ste as a pattern for ste data array */
mlx5dr_ste_set_formatted_ste(struct mlx5dr_ste_ctx * ste_ctx,u16 gvmi,enum mlx5dr_domain_nic_type nic_type,struct mlx5dr_ste_htbl * htbl,u8 * formatted_ste,struct mlx5dr_htbl_connect_info * connect_info)390 void mlx5dr_ste_set_formatted_ste(struct mlx5dr_ste_ctx *ste_ctx,
391 				  u16 gvmi,
392 				  enum mlx5dr_domain_nic_type nic_type,
393 				  struct mlx5dr_ste_htbl *htbl,
394 				  u8 *formatted_ste,
395 				  struct mlx5dr_htbl_connect_info *connect_info)
396 {
397 	bool is_rx = nic_type == DR_DOMAIN_NIC_TYPE_RX;
398 	u8 tmp_hw_ste[DR_STE_SIZE] = {0};
399 
400 	ste_ctx->ste_init(formatted_ste, htbl->lu_type, is_rx, gvmi);
401 
402 	/* Use temp ste because dr_ste_always_miss_addr/hit_htbl
403 	 * touches bit_mask area which doesn't exist at ste->hw_ste.
404 	 * Need to use a full-sized (DR_STE_SIZE) hw_ste.
405 	 */
406 	memcpy(tmp_hw_ste, formatted_ste, DR_STE_SIZE_REDUCED);
407 	if (connect_info->type == CONNECT_HIT)
408 		dr_ste_always_hit_htbl(ste_ctx, tmp_hw_ste,
409 				       connect_info->hit_next_htbl);
410 	else
411 		dr_ste_always_miss_addr(ste_ctx, tmp_hw_ste,
412 					connect_info->miss_icm_addr);
413 	memcpy(formatted_ste, tmp_hw_ste, DR_STE_SIZE_REDUCED);
414 }
415 
mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain * dmn,struct mlx5dr_domain_rx_tx * nic_dmn,struct mlx5dr_ste_htbl * htbl,struct mlx5dr_htbl_connect_info * connect_info,bool update_hw_ste)416 int mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain *dmn,
417 				      struct mlx5dr_domain_rx_tx *nic_dmn,
418 				      struct mlx5dr_ste_htbl *htbl,
419 				      struct mlx5dr_htbl_connect_info *connect_info,
420 				      bool update_hw_ste)
421 {
422 	u8 formatted_ste[DR_STE_SIZE] = {};
423 
424 	mlx5dr_ste_set_formatted_ste(dmn->ste_ctx,
425 				     dmn->info.caps.gvmi,
426 				     nic_dmn->type,
427 				     htbl,
428 				     formatted_ste,
429 				     connect_info);
430 
431 	return mlx5dr_send_postsend_formatted_htbl(dmn, htbl, formatted_ste, update_hw_ste);
432 }
433 
mlx5dr_ste_create_next_htbl(struct mlx5dr_matcher * matcher,struct mlx5dr_matcher_rx_tx * nic_matcher,struct mlx5dr_ste * ste,u8 * cur_hw_ste,enum mlx5dr_icm_chunk_size log_table_size)434 int mlx5dr_ste_create_next_htbl(struct mlx5dr_matcher *matcher,
435 				struct mlx5dr_matcher_rx_tx *nic_matcher,
436 				struct mlx5dr_ste *ste,
437 				u8 *cur_hw_ste,
438 				enum mlx5dr_icm_chunk_size log_table_size)
439 {
440 	struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn;
441 	struct mlx5dr_domain *dmn = matcher->tbl->dmn;
442 	struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
443 	struct mlx5dr_htbl_connect_info info;
444 	struct mlx5dr_ste_htbl *next_htbl;
445 
446 	if (!mlx5dr_ste_is_last_in_rule(nic_matcher, ste->ste_chain_location)) {
447 		u16 next_lu_type;
448 		u16 byte_mask;
449 
450 		next_lu_type = ste_ctx->get_next_lu_type(cur_hw_ste);
451 		byte_mask = ste_ctx->get_byte_mask(cur_hw_ste);
452 
453 		next_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
454 						  log_table_size,
455 						  next_lu_type,
456 						  byte_mask);
457 		if (!next_htbl) {
458 			mlx5dr_dbg(dmn, "Failed allocating table\n");
459 			return -ENOMEM;
460 		}
461 
462 		/* Write new table to HW */
463 		info.type = CONNECT_MISS;
464 		info.miss_icm_addr =
465 			mlx5dr_icm_pool_get_chunk_icm_addr(nic_matcher->e_anchor->chunk);
466 		if (mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn, next_htbl,
467 						      &info, false)) {
468 			mlx5dr_info(dmn, "Failed writing table to HW\n");
469 			goto free_table;
470 		}
471 
472 		mlx5dr_ste_set_hit_addr_by_next_htbl(ste_ctx,
473 						     cur_hw_ste, next_htbl);
474 		ste->next_htbl = next_htbl;
475 		next_htbl->pointing_ste = ste;
476 	}
477 
478 	return 0;
479 
480 free_table:
481 	mlx5dr_ste_htbl_free(next_htbl);
482 	return -ENOENT;
483 }
484 
mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool * pool,enum mlx5dr_icm_chunk_size chunk_size,u16 lu_type,u16 byte_mask)485 struct mlx5dr_ste_htbl *mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool,
486 					      enum mlx5dr_icm_chunk_size chunk_size,
487 					      u16 lu_type, u16 byte_mask)
488 {
489 	struct mlx5dr_icm_chunk *chunk;
490 	struct mlx5dr_ste_htbl *htbl;
491 	u32 num_entries;
492 	int i;
493 
494 	htbl = kzalloc(sizeof(*htbl), GFP_KERNEL);
495 	if (!htbl)
496 		return NULL;
497 
498 	chunk = mlx5dr_icm_alloc_chunk(pool, chunk_size);
499 	if (!chunk)
500 		goto out_free_htbl;
501 
502 	htbl->chunk = chunk;
503 	htbl->lu_type = lu_type;
504 	htbl->byte_mask = byte_mask;
505 	htbl->refcount = 0;
506 	num_entries = mlx5dr_icm_pool_get_chunk_num_of_entries(chunk);
507 
508 	for (i = 0; i < num_entries; i++) {
509 		struct mlx5dr_ste *ste = &chunk->ste_arr[i];
510 
511 		ste->htbl = htbl;
512 		ste->refcount = 0;
513 		INIT_LIST_HEAD(&ste->miss_list_node);
514 		INIT_LIST_HEAD(&chunk->miss_list[i]);
515 	}
516 
517 	return htbl;
518 
519 out_free_htbl:
520 	kfree(htbl);
521 	return NULL;
522 }
523 
mlx5dr_ste_htbl_free(struct mlx5dr_ste_htbl * htbl)524 int mlx5dr_ste_htbl_free(struct mlx5dr_ste_htbl *htbl)
525 {
526 	if (htbl->refcount)
527 		return -EBUSY;
528 
529 	mlx5dr_icm_free_chunk(htbl->chunk);
530 	kfree(htbl);
531 	return 0;
532 }
533 
mlx5dr_ste_set_actions_tx(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_domain * dmn,u8 * action_type_set,u8 * hw_ste_arr,struct mlx5dr_ste_actions_attr * attr,u32 * added_stes)534 void mlx5dr_ste_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx,
535 			       struct mlx5dr_domain *dmn,
536 			       u8 *action_type_set,
537 			       u8 *hw_ste_arr,
538 			       struct mlx5dr_ste_actions_attr *attr,
539 			       u32 *added_stes)
540 {
541 	ste_ctx->set_actions_tx(dmn, action_type_set, ste_ctx->actions_caps,
542 				hw_ste_arr, attr, added_stes);
543 }
544 
mlx5dr_ste_set_actions_rx(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_domain * dmn,u8 * action_type_set,u8 * hw_ste_arr,struct mlx5dr_ste_actions_attr * attr,u32 * added_stes)545 void mlx5dr_ste_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx,
546 			       struct mlx5dr_domain *dmn,
547 			       u8 *action_type_set,
548 			       u8 *hw_ste_arr,
549 			       struct mlx5dr_ste_actions_attr *attr,
550 			       u32 *added_stes)
551 {
552 	ste_ctx->set_actions_rx(dmn, action_type_set, ste_ctx->actions_caps,
553 				hw_ste_arr, attr, added_stes);
554 }
555 
556 const struct mlx5dr_ste_action_modify_field *
mlx5dr_ste_conv_modify_hdr_sw_field(struct mlx5dr_ste_ctx * ste_ctx,u16 sw_field)557 mlx5dr_ste_conv_modify_hdr_sw_field(struct mlx5dr_ste_ctx *ste_ctx, u16 sw_field)
558 {
559 	const struct mlx5dr_ste_action_modify_field *hw_field;
560 
561 	if (sw_field >= ste_ctx->modify_field_arr_sz)
562 		return NULL;
563 
564 	hw_field = &ste_ctx->modify_field_arr[sw_field];
565 	if (!hw_field->end && !hw_field->start)
566 		return NULL;
567 
568 	return hw_field;
569 }
570 
mlx5dr_ste_set_action_set(struct mlx5dr_ste_ctx * ste_ctx,__be64 * hw_action,u8 hw_field,u8 shifter,u8 length,u32 data)571 void mlx5dr_ste_set_action_set(struct mlx5dr_ste_ctx *ste_ctx,
572 			       __be64 *hw_action,
573 			       u8 hw_field,
574 			       u8 shifter,
575 			       u8 length,
576 			       u32 data)
577 {
578 	ste_ctx->set_action_set((u8 *)hw_action,
579 				hw_field, shifter, length, data);
580 }
581 
mlx5dr_ste_set_action_add(struct mlx5dr_ste_ctx * ste_ctx,__be64 * hw_action,u8 hw_field,u8 shifter,u8 length,u32 data)582 void mlx5dr_ste_set_action_add(struct mlx5dr_ste_ctx *ste_ctx,
583 			       __be64 *hw_action,
584 			       u8 hw_field,
585 			       u8 shifter,
586 			       u8 length,
587 			       u32 data)
588 {
589 	ste_ctx->set_action_add((u8 *)hw_action,
590 				hw_field, shifter, length, data);
591 }
592 
mlx5dr_ste_set_action_copy(struct mlx5dr_ste_ctx * ste_ctx,__be64 * hw_action,u8 dst_hw_field,u8 dst_shifter,u8 dst_len,u8 src_hw_field,u8 src_shifter)593 void mlx5dr_ste_set_action_copy(struct mlx5dr_ste_ctx *ste_ctx,
594 				__be64 *hw_action,
595 				u8 dst_hw_field,
596 				u8 dst_shifter,
597 				u8 dst_len,
598 				u8 src_hw_field,
599 				u8 src_shifter)
600 {
601 	ste_ctx->set_action_copy((u8 *)hw_action,
602 				 dst_hw_field, dst_shifter, dst_len,
603 				 src_hw_field, src_shifter);
604 }
605 
mlx5dr_ste_set_action_decap_l3_list(struct mlx5dr_ste_ctx * ste_ctx,void * data,u32 data_sz,u8 * hw_action,u32 hw_action_sz,u16 * used_hw_action_num)606 int mlx5dr_ste_set_action_decap_l3_list(struct mlx5dr_ste_ctx *ste_ctx,
607 					void *data, u32 data_sz,
608 					u8 *hw_action, u32 hw_action_sz,
609 					u16 *used_hw_action_num)
610 {
611 	/* Only Ethernet frame is supported, with VLAN (18) or without (14) */
612 	if (data_sz != HDR_LEN_L2 && data_sz != HDR_LEN_L2_W_VLAN)
613 		return -EINVAL;
614 
615 	return ste_ctx->set_action_decap_l3_list(data, data_sz,
616 						 hw_action, hw_action_sz,
617 						 used_hw_action_num);
618 }
619 
dr_ste_build_pre_check_spec(struct mlx5dr_domain * dmn,struct mlx5dr_match_spec * spec)620 static int dr_ste_build_pre_check_spec(struct mlx5dr_domain *dmn,
621 				       struct mlx5dr_match_spec *spec)
622 {
623 	if (spec->ip_version) {
624 		if (spec->ip_version != 0xf) {
625 			mlx5dr_err(dmn,
626 				   "Partial ip_version mask with src/dst IP is not supported\n");
627 			return -EINVAL;
628 		}
629 	} else if (spec->ethertype != 0xffff &&
630 		   (DR_MASK_IS_SRC_IP_SET(spec) || DR_MASK_IS_DST_IP_SET(spec))) {
631 		mlx5dr_err(dmn,
632 			   "Partial/no ethertype mask with src/dst IP is not supported\n");
633 		return -EINVAL;
634 	}
635 
636 	return 0;
637 }
638 
mlx5dr_ste_build_pre_check(struct mlx5dr_domain * dmn,u8 match_criteria,struct mlx5dr_match_param * mask,struct mlx5dr_match_param * value)639 int mlx5dr_ste_build_pre_check(struct mlx5dr_domain *dmn,
640 			       u8 match_criteria,
641 			       struct mlx5dr_match_param *mask,
642 			       struct mlx5dr_match_param *value)
643 {
644 	if (value)
645 		return 0;
646 
647 	if (match_criteria & DR_MATCHER_CRITERIA_MISC) {
648 		if (mask->misc.source_port && mask->misc.source_port != 0xffff) {
649 			mlx5dr_err(dmn,
650 				   "Partial mask source_port is not supported\n");
651 			return -EINVAL;
652 		}
653 		if (mask->misc.source_eswitch_owner_vhca_id &&
654 		    mask->misc.source_eswitch_owner_vhca_id != 0xffff) {
655 			mlx5dr_err(dmn,
656 				   "Partial mask source_eswitch_owner_vhca_id is not supported\n");
657 			return -EINVAL;
658 		}
659 	}
660 
661 	if ((match_criteria & DR_MATCHER_CRITERIA_OUTER) &&
662 	    dr_ste_build_pre_check_spec(dmn, &mask->outer))
663 		return -EINVAL;
664 
665 	if ((match_criteria & DR_MATCHER_CRITERIA_INNER) &&
666 	    dr_ste_build_pre_check_spec(dmn, &mask->inner))
667 		return -EINVAL;
668 
669 	return 0;
670 }
671 
mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher * matcher,struct mlx5dr_matcher_rx_tx * nic_matcher,struct mlx5dr_match_param * value,u8 * ste_arr)672 int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher,
673 			     struct mlx5dr_matcher_rx_tx *nic_matcher,
674 			     struct mlx5dr_match_param *value,
675 			     u8 *ste_arr)
676 {
677 	struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn;
678 	bool is_rx = nic_dmn->type == DR_DOMAIN_NIC_TYPE_RX;
679 	struct mlx5dr_domain *dmn = matcher->tbl->dmn;
680 	struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
681 	struct mlx5dr_ste_build *sb;
682 	int ret, i;
683 
684 	ret = mlx5dr_ste_build_pre_check(dmn, matcher->match_criteria,
685 					 &matcher->mask, value);
686 	if (ret)
687 		return ret;
688 
689 	sb = nic_matcher->ste_builder;
690 	for (i = 0; i < nic_matcher->num_of_builders; i++) {
691 		ste_ctx->ste_init(ste_arr,
692 				  sb->lu_type,
693 				  is_rx,
694 				  dmn->info.caps.gvmi);
695 
696 		mlx5dr_ste_set_bit_mask(ste_arr, sb->bit_mask);
697 
698 		ret = sb->ste_build_tag_func(value, sb, dr_ste_get_tag(ste_arr));
699 		if (ret)
700 			return ret;
701 
702 		/* Connect the STEs */
703 		if (i < (nic_matcher->num_of_builders - 1)) {
704 			/* Need the next builder for these fields,
705 			 * not relevant for the last ste in the chain.
706 			 */
707 			sb++;
708 			ste_ctx->set_next_lu_type(ste_arr, sb->lu_type);
709 			ste_ctx->set_byte_mask(ste_arr, sb->byte_mask);
710 		}
711 		ste_arr += DR_STE_SIZE;
712 	}
713 	return 0;
714 }
715 
716 #define IFC_GET_CLR(typ, p, fld, clear) ({ \
717 	void *__p = (p); \
718 	u32 __t = MLX5_GET(typ, __p, fld); \
719 	if (clear) \
720 		MLX5_SET(typ, __p, fld, 0); \
721 	__t; \
722 })
723 
724 #define memcpy_and_clear(to, from, len, clear) ({ \
725 	void *__to = (to), *__from = (from); \
726 	size_t __len = (len); \
727 	memcpy(__to, __from, __len); \
728 	if (clear) \
729 		memset(__from, 0, __len); \
730 })
731 
dr_ste_copy_mask_misc(char * mask,struct mlx5dr_match_misc * spec,bool clr)732 static void dr_ste_copy_mask_misc(char *mask, struct mlx5dr_match_misc *spec, bool clr)
733 {
734 	spec->gre_c_present = IFC_GET_CLR(fte_match_set_misc, mask, gre_c_present, clr);
735 	spec->gre_k_present = IFC_GET_CLR(fte_match_set_misc, mask, gre_k_present, clr);
736 	spec->gre_s_present = IFC_GET_CLR(fte_match_set_misc, mask, gre_s_present, clr);
737 	spec->source_vhca_port = IFC_GET_CLR(fte_match_set_misc, mask, source_vhca_port, clr);
738 	spec->source_sqn = IFC_GET_CLR(fte_match_set_misc, mask, source_sqn, clr);
739 
740 	spec->source_port = IFC_GET_CLR(fte_match_set_misc, mask, source_port, clr);
741 	spec->source_eswitch_owner_vhca_id =
742 		IFC_GET_CLR(fte_match_set_misc, mask, source_eswitch_owner_vhca_id, clr);
743 
744 	spec->outer_second_prio = IFC_GET_CLR(fte_match_set_misc, mask, outer_second_prio, clr);
745 	spec->outer_second_cfi = IFC_GET_CLR(fte_match_set_misc, mask, outer_second_cfi, clr);
746 	spec->outer_second_vid = IFC_GET_CLR(fte_match_set_misc, mask, outer_second_vid, clr);
747 	spec->inner_second_prio = IFC_GET_CLR(fte_match_set_misc, mask, inner_second_prio, clr);
748 	spec->inner_second_cfi = IFC_GET_CLR(fte_match_set_misc, mask, inner_second_cfi, clr);
749 	spec->inner_second_vid = IFC_GET_CLR(fte_match_set_misc, mask, inner_second_vid, clr);
750 
751 	spec->outer_second_cvlan_tag =
752 		IFC_GET_CLR(fte_match_set_misc, mask, outer_second_cvlan_tag, clr);
753 	spec->inner_second_cvlan_tag =
754 		IFC_GET_CLR(fte_match_set_misc, mask, inner_second_cvlan_tag, clr);
755 	spec->outer_second_svlan_tag =
756 		IFC_GET_CLR(fte_match_set_misc, mask, outer_second_svlan_tag, clr);
757 	spec->inner_second_svlan_tag =
758 		IFC_GET_CLR(fte_match_set_misc, mask, inner_second_svlan_tag, clr);
759 	spec->gre_protocol = IFC_GET_CLR(fte_match_set_misc, mask, gre_protocol, clr);
760 
761 	spec->gre_key_h = IFC_GET_CLR(fte_match_set_misc, mask, gre_key.nvgre.hi, clr);
762 	spec->gre_key_l = IFC_GET_CLR(fte_match_set_misc, mask, gre_key.nvgre.lo, clr);
763 
764 	spec->vxlan_vni = IFC_GET_CLR(fte_match_set_misc, mask, vxlan_vni, clr);
765 
766 	spec->geneve_vni = IFC_GET_CLR(fte_match_set_misc, mask, geneve_vni, clr);
767 	spec->geneve_tlv_option_0_exist =
768 		IFC_GET_CLR(fte_match_set_misc, mask, geneve_tlv_option_0_exist, clr);
769 	spec->geneve_oam = IFC_GET_CLR(fte_match_set_misc, mask, geneve_oam, clr);
770 
771 	spec->outer_ipv6_flow_label =
772 		IFC_GET_CLR(fte_match_set_misc, mask, outer_ipv6_flow_label, clr);
773 
774 	spec->inner_ipv6_flow_label =
775 		IFC_GET_CLR(fte_match_set_misc, mask, inner_ipv6_flow_label, clr);
776 
777 	spec->geneve_opt_len = IFC_GET_CLR(fte_match_set_misc, mask, geneve_opt_len, clr);
778 	spec->geneve_protocol_type =
779 		IFC_GET_CLR(fte_match_set_misc, mask, geneve_protocol_type, clr);
780 
781 	spec->bth_dst_qp = IFC_GET_CLR(fte_match_set_misc, mask, bth_dst_qp, clr);
782 }
783 
dr_ste_copy_mask_spec(char * mask,struct mlx5dr_match_spec * spec,bool clr)784 static void dr_ste_copy_mask_spec(char *mask, struct mlx5dr_match_spec *spec, bool clr)
785 {
786 	__be32 raw_ip[4];
787 
788 	spec->smac_47_16 = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, smac_47_16, clr);
789 
790 	spec->smac_15_0 = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, smac_15_0, clr);
791 	spec->ethertype = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ethertype, clr);
792 
793 	spec->dmac_47_16 = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, dmac_47_16, clr);
794 
795 	spec->dmac_15_0 = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, dmac_15_0, clr);
796 	spec->first_prio = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, first_prio, clr);
797 	spec->first_cfi = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, first_cfi, clr);
798 	spec->first_vid = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, first_vid, clr);
799 
800 	spec->ip_protocol = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ip_protocol, clr);
801 	spec->ip_dscp = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ip_dscp, clr);
802 	spec->ip_ecn = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ip_ecn, clr);
803 	spec->cvlan_tag = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, cvlan_tag, clr);
804 	spec->svlan_tag = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, svlan_tag, clr);
805 	spec->frag = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, frag, clr);
806 	spec->ip_version = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ip_version, clr);
807 	spec->tcp_flags = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, tcp_flags, clr);
808 	spec->tcp_sport = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, tcp_sport, clr);
809 	spec->tcp_dport = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, tcp_dport, clr);
810 
811 	spec->ipv4_ihl = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ipv4_ihl, clr);
812 	spec->ttl_hoplimit = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ttl_hoplimit, clr);
813 
814 	spec->udp_sport = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, udp_sport, clr);
815 	spec->udp_dport = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, udp_dport, clr);
816 
817 	memcpy_and_clear(raw_ip, MLX5_ADDR_OF(fte_match_set_lyr_2_4, mask,
818 					      src_ipv4_src_ipv6.ipv6_layout.ipv6),
819 			 sizeof(raw_ip), clr);
820 
821 	spec->src_ip_127_96 = be32_to_cpu(raw_ip[0]);
822 	spec->src_ip_95_64 = be32_to_cpu(raw_ip[1]);
823 	spec->src_ip_63_32 = be32_to_cpu(raw_ip[2]);
824 	spec->src_ip_31_0 = be32_to_cpu(raw_ip[3]);
825 
826 	memcpy_and_clear(raw_ip, MLX5_ADDR_OF(fte_match_set_lyr_2_4, mask,
827 					      dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
828 			 sizeof(raw_ip), clr);
829 
830 	spec->dst_ip_127_96 = be32_to_cpu(raw_ip[0]);
831 	spec->dst_ip_95_64 = be32_to_cpu(raw_ip[1]);
832 	spec->dst_ip_63_32 = be32_to_cpu(raw_ip[2]);
833 	spec->dst_ip_31_0 = be32_to_cpu(raw_ip[3]);
834 }
835 
dr_ste_copy_mask_misc2(char * mask,struct mlx5dr_match_misc2 * spec,bool clr)836 static void dr_ste_copy_mask_misc2(char *mask, struct mlx5dr_match_misc2 *spec, bool clr)
837 {
838 	spec->outer_first_mpls_label =
839 		IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls.mpls_label, clr);
840 	spec->outer_first_mpls_exp =
841 		IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls.mpls_exp, clr);
842 	spec->outer_first_mpls_s_bos =
843 		IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls.mpls_s_bos, clr);
844 	spec->outer_first_mpls_ttl =
845 		IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls.mpls_ttl, clr);
846 	spec->inner_first_mpls_label =
847 		IFC_GET_CLR(fte_match_set_misc2, mask, inner_first_mpls.mpls_label, clr);
848 	spec->inner_first_mpls_exp =
849 		IFC_GET_CLR(fte_match_set_misc2, mask, inner_first_mpls.mpls_exp, clr);
850 	spec->inner_first_mpls_s_bos =
851 		IFC_GET_CLR(fte_match_set_misc2, mask, inner_first_mpls.mpls_s_bos, clr);
852 	spec->inner_first_mpls_ttl =
853 		IFC_GET_CLR(fte_match_set_misc2, mask, inner_first_mpls.mpls_ttl, clr);
854 	spec->outer_first_mpls_over_gre_label =
855 		IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_label, clr);
856 	spec->outer_first_mpls_over_gre_exp =
857 		IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_exp, clr);
858 	spec->outer_first_mpls_over_gre_s_bos =
859 		IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_s_bos, clr);
860 	spec->outer_first_mpls_over_gre_ttl =
861 		IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_ttl, clr);
862 	spec->outer_first_mpls_over_udp_label =
863 		IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_label, clr);
864 	spec->outer_first_mpls_over_udp_exp =
865 		IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_exp, clr);
866 	spec->outer_first_mpls_over_udp_s_bos =
867 		IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_s_bos, clr);
868 	spec->outer_first_mpls_over_udp_ttl =
869 		IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_ttl, clr);
870 	spec->metadata_reg_c_7 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_7, clr);
871 	spec->metadata_reg_c_6 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_6, clr);
872 	spec->metadata_reg_c_5 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_5, clr);
873 	spec->metadata_reg_c_4 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_4, clr);
874 	spec->metadata_reg_c_3 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_3, clr);
875 	spec->metadata_reg_c_2 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_2, clr);
876 	spec->metadata_reg_c_1 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_1, clr);
877 	spec->metadata_reg_c_0 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_0, clr);
878 	spec->metadata_reg_a = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_a, clr);
879 }
880 
dr_ste_copy_mask_misc3(char * mask,struct mlx5dr_match_misc3 * spec,bool clr)881 static void dr_ste_copy_mask_misc3(char *mask, struct mlx5dr_match_misc3 *spec, bool clr)
882 {
883 	spec->inner_tcp_seq_num = IFC_GET_CLR(fte_match_set_misc3, mask, inner_tcp_seq_num, clr);
884 	spec->outer_tcp_seq_num = IFC_GET_CLR(fte_match_set_misc3, mask, outer_tcp_seq_num, clr);
885 	spec->inner_tcp_ack_num = IFC_GET_CLR(fte_match_set_misc3, mask, inner_tcp_ack_num, clr);
886 	spec->outer_tcp_ack_num = IFC_GET_CLR(fte_match_set_misc3, mask, outer_tcp_ack_num, clr);
887 	spec->outer_vxlan_gpe_vni =
888 		IFC_GET_CLR(fte_match_set_misc3, mask, outer_vxlan_gpe_vni, clr);
889 	spec->outer_vxlan_gpe_next_protocol =
890 		IFC_GET_CLR(fte_match_set_misc3, mask, outer_vxlan_gpe_next_protocol, clr);
891 	spec->outer_vxlan_gpe_flags =
892 		IFC_GET_CLR(fte_match_set_misc3, mask, outer_vxlan_gpe_flags, clr);
893 	spec->icmpv4_header_data = IFC_GET_CLR(fte_match_set_misc3, mask, icmp_header_data, clr);
894 	spec->icmpv6_header_data =
895 		IFC_GET_CLR(fte_match_set_misc3, mask, icmpv6_header_data, clr);
896 	spec->icmpv4_type = IFC_GET_CLR(fte_match_set_misc3, mask, icmp_type, clr);
897 	spec->icmpv4_code = IFC_GET_CLR(fte_match_set_misc3, mask, icmp_code, clr);
898 	spec->icmpv6_type = IFC_GET_CLR(fte_match_set_misc3, mask, icmpv6_type, clr);
899 	spec->icmpv6_code = IFC_GET_CLR(fte_match_set_misc3, mask, icmpv6_code, clr);
900 	spec->geneve_tlv_option_0_data =
901 		IFC_GET_CLR(fte_match_set_misc3, mask, geneve_tlv_option_0_data, clr);
902 	spec->gtpu_teid = IFC_GET_CLR(fte_match_set_misc3, mask, gtpu_teid, clr);
903 	spec->gtpu_msg_flags = IFC_GET_CLR(fte_match_set_misc3, mask, gtpu_msg_flags, clr);
904 	spec->gtpu_msg_type = IFC_GET_CLR(fte_match_set_misc3, mask, gtpu_msg_type, clr);
905 	spec->gtpu_dw_0 = IFC_GET_CLR(fte_match_set_misc3, mask, gtpu_dw_0, clr);
906 	spec->gtpu_dw_2 = IFC_GET_CLR(fte_match_set_misc3, mask, gtpu_dw_2, clr);
907 	spec->gtpu_first_ext_dw_0 =
908 		IFC_GET_CLR(fte_match_set_misc3, mask, gtpu_first_ext_dw_0, clr);
909 }
910 
dr_ste_copy_mask_misc4(char * mask,struct mlx5dr_match_misc4 * spec,bool clr)911 static void dr_ste_copy_mask_misc4(char *mask, struct mlx5dr_match_misc4 *spec, bool clr)
912 {
913 	spec->prog_sample_field_id_0 =
914 		IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_id_0, clr);
915 	spec->prog_sample_field_value_0 =
916 		IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_value_0, clr);
917 	spec->prog_sample_field_id_1 =
918 		IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_id_1, clr);
919 	spec->prog_sample_field_value_1 =
920 		IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_value_1, clr);
921 	spec->prog_sample_field_id_2 =
922 		IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_id_2, clr);
923 	spec->prog_sample_field_value_2 =
924 		IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_value_2, clr);
925 	spec->prog_sample_field_id_3 =
926 		IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_id_3, clr);
927 	spec->prog_sample_field_value_3 =
928 		IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_value_3, clr);
929 }
930 
dr_ste_copy_mask_misc5(char * mask,struct mlx5dr_match_misc5 * spec,bool clr)931 static void dr_ste_copy_mask_misc5(char *mask, struct mlx5dr_match_misc5 *spec, bool clr)
932 {
933 	spec->macsec_tag_0 =
934 		IFC_GET_CLR(fte_match_set_misc5, mask, macsec_tag_0, clr);
935 	spec->macsec_tag_1 =
936 		IFC_GET_CLR(fte_match_set_misc5, mask, macsec_tag_1, clr);
937 	spec->macsec_tag_2 =
938 		IFC_GET_CLR(fte_match_set_misc5, mask, macsec_tag_2, clr);
939 	spec->macsec_tag_3 =
940 		IFC_GET_CLR(fte_match_set_misc5, mask, macsec_tag_3, clr);
941 	spec->tunnel_header_0 =
942 		IFC_GET_CLR(fte_match_set_misc5, mask, tunnel_header_0, clr);
943 	spec->tunnel_header_1 =
944 		IFC_GET_CLR(fte_match_set_misc5, mask, tunnel_header_1, clr);
945 	spec->tunnel_header_2 =
946 		IFC_GET_CLR(fte_match_set_misc5, mask, tunnel_header_2, clr);
947 	spec->tunnel_header_3 =
948 		IFC_GET_CLR(fte_match_set_misc5, mask, tunnel_header_3, clr);
949 }
950 
mlx5dr_ste_copy_param(u8 match_criteria,struct mlx5dr_match_param * set_param,struct mlx5dr_match_parameters * mask,bool clr)951 void mlx5dr_ste_copy_param(u8 match_criteria,
952 			   struct mlx5dr_match_param *set_param,
953 			   struct mlx5dr_match_parameters *mask,
954 			   bool clr)
955 {
956 	u8 tail_param[MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4)] = {};
957 	u8 *data = (u8 *)mask->match_buf;
958 	size_t param_location;
959 	void *buff;
960 
961 	if (match_criteria & DR_MATCHER_CRITERIA_OUTER) {
962 		if (mask->match_sz < sizeof(struct mlx5dr_match_spec)) {
963 			memcpy(tail_param, data, mask->match_sz);
964 			buff = tail_param;
965 		} else {
966 			buff = mask->match_buf;
967 		}
968 		dr_ste_copy_mask_spec(buff, &set_param->outer, clr);
969 	}
970 	param_location = sizeof(struct mlx5dr_match_spec);
971 
972 	if (match_criteria & DR_MATCHER_CRITERIA_MISC) {
973 		if (mask->match_sz < param_location +
974 		    sizeof(struct mlx5dr_match_misc)) {
975 			memcpy(tail_param, data + param_location,
976 			       mask->match_sz - param_location);
977 			buff = tail_param;
978 		} else {
979 			buff = data + param_location;
980 		}
981 		dr_ste_copy_mask_misc(buff, &set_param->misc, clr);
982 	}
983 	param_location += sizeof(struct mlx5dr_match_misc);
984 
985 	if (match_criteria & DR_MATCHER_CRITERIA_INNER) {
986 		if (mask->match_sz < param_location +
987 		    sizeof(struct mlx5dr_match_spec)) {
988 			memcpy(tail_param, data + param_location,
989 			       mask->match_sz - param_location);
990 			buff = tail_param;
991 		} else {
992 			buff = data + param_location;
993 		}
994 		dr_ste_copy_mask_spec(buff, &set_param->inner, clr);
995 	}
996 	param_location += sizeof(struct mlx5dr_match_spec);
997 
998 	if (match_criteria & DR_MATCHER_CRITERIA_MISC2) {
999 		if (mask->match_sz < param_location +
1000 		    sizeof(struct mlx5dr_match_misc2)) {
1001 			memcpy(tail_param, data + param_location,
1002 			       mask->match_sz - param_location);
1003 			buff = tail_param;
1004 		} else {
1005 			buff = data + param_location;
1006 		}
1007 		dr_ste_copy_mask_misc2(buff, &set_param->misc2, clr);
1008 	}
1009 
1010 	param_location += sizeof(struct mlx5dr_match_misc2);
1011 
1012 	if (match_criteria & DR_MATCHER_CRITERIA_MISC3) {
1013 		if (mask->match_sz < param_location +
1014 		    sizeof(struct mlx5dr_match_misc3)) {
1015 			memcpy(tail_param, data + param_location,
1016 			       mask->match_sz - param_location);
1017 			buff = tail_param;
1018 		} else {
1019 			buff = data + param_location;
1020 		}
1021 		dr_ste_copy_mask_misc3(buff, &set_param->misc3, clr);
1022 	}
1023 
1024 	param_location += sizeof(struct mlx5dr_match_misc3);
1025 
1026 	if (match_criteria & DR_MATCHER_CRITERIA_MISC4) {
1027 		if (mask->match_sz < param_location +
1028 		    sizeof(struct mlx5dr_match_misc4)) {
1029 			memcpy(tail_param, data + param_location,
1030 			       mask->match_sz - param_location);
1031 			buff = tail_param;
1032 		} else {
1033 			buff = data + param_location;
1034 		}
1035 		dr_ste_copy_mask_misc4(buff, &set_param->misc4, clr);
1036 	}
1037 
1038 	param_location += sizeof(struct mlx5dr_match_misc4);
1039 
1040 	if (match_criteria & DR_MATCHER_CRITERIA_MISC5) {
1041 		if (mask->match_sz < param_location +
1042 		    sizeof(struct mlx5dr_match_misc5)) {
1043 			memcpy(tail_param, data + param_location,
1044 			       mask->match_sz - param_location);
1045 			buff = tail_param;
1046 		} else {
1047 			buff = data + param_location;
1048 		}
1049 		dr_ste_copy_mask_misc5(buff, &set_param->misc5, clr);
1050 	}
1051 }
1052 
mlx5dr_ste_build_eth_l2_src_dst(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1053 void mlx5dr_ste_build_eth_l2_src_dst(struct mlx5dr_ste_ctx *ste_ctx,
1054 				     struct mlx5dr_ste_build *sb,
1055 				     struct mlx5dr_match_param *mask,
1056 				     bool inner, bool rx)
1057 {
1058 	sb->rx = rx;
1059 	sb->inner = inner;
1060 	ste_ctx->build_eth_l2_src_dst_init(sb, mask);
1061 }
1062 
mlx5dr_ste_build_eth_l3_ipv6_dst(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1063 void mlx5dr_ste_build_eth_l3_ipv6_dst(struct mlx5dr_ste_ctx *ste_ctx,
1064 				      struct mlx5dr_ste_build *sb,
1065 				      struct mlx5dr_match_param *mask,
1066 				      bool inner, bool rx)
1067 {
1068 	sb->rx = rx;
1069 	sb->inner = inner;
1070 	ste_ctx->build_eth_l3_ipv6_dst_init(sb, mask);
1071 }
1072 
mlx5dr_ste_build_eth_l3_ipv6_src(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1073 void mlx5dr_ste_build_eth_l3_ipv6_src(struct mlx5dr_ste_ctx *ste_ctx,
1074 				      struct mlx5dr_ste_build *sb,
1075 				      struct mlx5dr_match_param *mask,
1076 				      bool inner, bool rx)
1077 {
1078 	sb->rx = rx;
1079 	sb->inner = inner;
1080 	ste_ctx->build_eth_l3_ipv6_src_init(sb, mask);
1081 }
1082 
mlx5dr_ste_build_eth_l3_ipv4_5_tuple(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1083 void mlx5dr_ste_build_eth_l3_ipv4_5_tuple(struct mlx5dr_ste_ctx *ste_ctx,
1084 					  struct mlx5dr_ste_build *sb,
1085 					  struct mlx5dr_match_param *mask,
1086 					  bool inner, bool rx)
1087 {
1088 	sb->rx = rx;
1089 	sb->inner = inner;
1090 	ste_ctx->build_eth_l3_ipv4_5_tuple_init(sb, mask);
1091 }
1092 
mlx5dr_ste_build_eth_l2_src(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1093 void mlx5dr_ste_build_eth_l2_src(struct mlx5dr_ste_ctx *ste_ctx,
1094 				 struct mlx5dr_ste_build *sb,
1095 				 struct mlx5dr_match_param *mask,
1096 				 bool inner, bool rx)
1097 {
1098 	sb->rx = rx;
1099 	sb->inner = inner;
1100 	ste_ctx->build_eth_l2_src_init(sb, mask);
1101 }
1102 
mlx5dr_ste_build_eth_l2_dst(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1103 void mlx5dr_ste_build_eth_l2_dst(struct mlx5dr_ste_ctx *ste_ctx,
1104 				 struct mlx5dr_ste_build *sb,
1105 				 struct mlx5dr_match_param *mask,
1106 				 bool inner, bool rx)
1107 {
1108 	sb->rx = rx;
1109 	sb->inner = inner;
1110 	ste_ctx->build_eth_l2_dst_init(sb, mask);
1111 }
1112 
mlx5dr_ste_build_eth_l2_tnl(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1113 void mlx5dr_ste_build_eth_l2_tnl(struct mlx5dr_ste_ctx *ste_ctx,
1114 				 struct mlx5dr_ste_build *sb,
1115 				 struct mlx5dr_match_param *mask, bool inner, bool rx)
1116 {
1117 	sb->rx = rx;
1118 	sb->inner = inner;
1119 	ste_ctx->build_eth_l2_tnl_init(sb, mask);
1120 }
1121 
mlx5dr_ste_build_eth_l3_ipv4_misc(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1122 void mlx5dr_ste_build_eth_l3_ipv4_misc(struct mlx5dr_ste_ctx *ste_ctx,
1123 				       struct mlx5dr_ste_build *sb,
1124 				       struct mlx5dr_match_param *mask,
1125 				       bool inner, bool rx)
1126 {
1127 	sb->rx = rx;
1128 	sb->inner = inner;
1129 	ste_ctx->build_eth_l3_ipv4_misc_init(sb, mask);
1130 }
1131 
mlx5dr_ste_build_eth_ipv6_l3_l4(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1132 void mlx5dr_ste_build_eth_ipv6_l3_l4(struct mlx5dr_ste_ctx *ste_ctx,
1133 				     struct mlx5dr_ste_build *sb,
1134 				     struct mlx5dr_match_param *mask,
1135 				     bool inner, bool rx)
1136 {
1137 	sb->rx = rx;
1138 	sb->inner = inner;
1139 	ste_ctx->build_eth_ipv6_l3_l4_init(sb, mask);
1140 }
1141 
dr_ste_build_empty_always_hit_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1142 static int dr_ste_build_empty_always_hit_tag(struct mlx5dr_match_param *value,
1143 					     struct mlx5dr_ste_build *sb,
1144 					     u8 *tag)
1145 {
1146 	return 0;
1147 }
1148 
mlx5dr_ste_build_empty_always_hit(struct mlx5dr_ste_build * sb,bool rx)1149 void mlx5dr_ste_build_empty_always_hit(struct mlx5dr_ste_build *sb, bool rx)
1150 {
1151 	sb->rx = rx;
1152 	sb->lu_type = MLX5DR_STE_LU_TYPE_DONT_CARE;
1153 	sb->byte_mask = 0;
1154 	sb->ste_build_tag_func = &dr_ste_build_empty_always_hit_tag;
1155 }
1156 
mlx5dr_ste_build_mpls(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1157 void mlx5dr_ste_build_mpls(struct mlx5dr_ste_ctx *ste_ctx,
1158 			   struct mlx5dr_ste_build *sb,
1159 			   struct mlx5dr_match_param *mask,
1160 			   bool inner, bool rx)
1161 {
1162 	sb->rx = rx;
1163 	sb->inner = inner;
1164 	ste_ctx->build_mpls_init(sb, mask);
1165 }
1166 
mlx5dr_ste_build_tnl_gre(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1167 void mlx5dr_ste_build_tnl_gre(struct mlx5dr_ste_ctx *ste_ctx,
1168 			      struct mlx5dr_ste_build *sb,
1169 			      struct mlx5dr_match_param *mask,
1170 			      bool inner, bool rx)
1171 {
1172 	sb->rx = rx;
1173 	sb->inner = inner;
1174 	ste_ctx->build_tnl_gre_init(sb, mask);
1175 }
1176 
mlx5dr_ste_build_tnl_mpls_over_gre(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,struct mlx5dr_cmd_caps * caps,bool inner,bool rx)1177 void mlx5dr_ste_build_tnl_mpls_over_gre(struct mlx5dr_ste_ctx *ste_ctx,
1178 					struct mlx5dr_ste_build *sb,
1179 					struct mlx5dr_match_param *mask,
1180 					struct mlx5dr_cmd_caps *caps,
1181 					bool inner, bool rx)
1182 {
1183 	sb->rx = rx;
1184 	sb->inner = inner;
1185 	sb->caps = caps;
1186 	return ste_ctx->build_tnl_mpls_over_gre_init(sb, mask);
1187 }
1188 
mlx5dr_ste_build_tnl_mpls_over_udp(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,struct mlx5dr_cmd_caps * caps,bool inner,bool rx)1189 void mlx5dr_ste_build_tnl_mpls_over_udp(struct mlx5dr_ste_ctx *ste_ctx,
1190 					struct mlx5dr_ste_build *sb,
1191 					struct mlx5dr_match_param *mask,
1192 					struct mlx5dr_cmd_caps *caps,
1193 					bool inner, bool rx)
1194 {
1195 	sb->rx = rx;
1196 	sb->inner = inner;
1197 	sb->caps = caps;
1198 	return ste_ctx->build_tnl_mpls_over_udp_init(sb, mask);
1199 }
1200 
mlx5dr_ste_build_icmp(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,struct mlx5dr_cmd_caps * caps,bool inner,bool rx)1201 void mlx5dr_ste_build_icmp(struct mlx5dr_ste_ctx *ste_ctx,
1202 			   struct mlx5dr_ste_build *sb,
1203 			   struct mlx5dr_match_param *mask,
1204 			   struct mlx5dr_cmd_caps *caps,
1205 			   bool inner, bool rx)
1206 {
1207 	sb->rx = rx;
1208 	sb->inner = inner;
1209 	sb->caps = caps;
1210 	ste_ctx->build_icmp_init(sb, mask);
1211 }
1212 
mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1213 void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_ctx *ste_ctx,
1214 				      struct mlx5dr_ste_build *sb,
1215 				      struct mlx5dr_match_param *mask,
1216 				      bool inner, bool rx)
1217 {
1218 	sb->rx = rx;
1219 	sb->inner = inner;
1220 	ste_ctx->build_general_purpose_init(sb, mask);
1221 }
1222 
mlx5dr_ste_build_eth_l4_misc(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1223 void mlx5dr_ste_build_eth_l4_misc(struct mlx5dr_ste_ctx *ste_ctx,
1224 				  struct mlx5dr_ste_build *sb,
1225 				  struct mlx5dr_match_param *mask,
1226 				  bool inner, bool rx)
1227 {
1228 	sb->rx = rx;
1229 	sb->inner = inner;
1230 	ste_ctx->build_eth_l4_misc_init(sb, mask);
1231 }
1232 
mlx5dr_ste_build_tnl_vxlan_gpe(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1233 void mlx5dr_ste_build_tnl_vxlan_gpe(struct mlx5dr_ste_ctx *ste_ctx,
1234 				    struct mlx5dr_ste_build *sb,
1235 				    struct mlx5dr_match_param *mask,
1236 				    bool inner, bool rx)
1237 {
1238 	sb->rx = rx;
1239 	sb->inner = inner;
1240 	ste_ctx->build_tnl_vxlan_gpe_init(sb, mask);
1241 }
1242 
mlx5dr_ste_build_tnl_geneve(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1243 void mlx5dr_ste_build_tnl_geneve(struct mlx5dr_ste_ctx *ste_ctx,
1244 				 struct mlx5dr_ste_build *sb,
1245 				 struct mlx5dr_match_param *mask,
1246 				 bool inner, bool rx)
1247 {
1248 	sb->rx = rx;
1249 	sb->inner = inner;
1250 	ste_ctx->build_tnl_geneve_init(sb, mask);
1251 }
1252 
mlx5dr_ste_build_tnl_geneve_tlv_opt(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,struct mlx5dr_cmd_caps * caps,bool inner,bool rx)1253 void mlx5dr_ste_build_tnl_geneve_tlv_opt(struct mlx5dr_ste_ctx *ste_ctx,
1254 					 struct mlx5dr_ste_build *sb,
1255 					 struct mlx5dr_match_param *mask,
1256 					 struct mlx5dr_cmd_caps *caps,
1257 					 bool inner, bool rx)
1258 {
1259 	sb->rx = rx;
1260 	sb->caps = caps;
1261 	sb->inner = inner;
1262 	ste_ctx->build_tnl_geneve_tlv_opt_init(sb, mask);
1263 }
1264 
mlx5dr_ste_build_tnl_geneve_tlv_opt_exist(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,struct mlx5dr_cmd_caps * caps,bool inner,bool rx)1265 void mlx5dr_ste_build_tnl_geneve_tlv_opt_exist(struct mlx5dr_ste_ctx *ste_ctx,
1266 					       struct mlx5dr_ste_build *sb,
1267 					       struct mlx5dr_match_param *mask,
1268 					       struct mlx5dr_cmd_caps *caps,
1269 					       bool inner, bool rx)
1270 {
1271 	if (!ste_ctx->build_tnl_geneve_tlv_opt_exist_init)
1272 		return;
1273 
1274 	sb->rx = rx;
1275 	sb->caps = caps;
1276 	sb->inner = inner;
1277 	ste_ctx->build_tnl_geneve_tlv_opt_exist_init(sb, mask);
1278 }
1279 
mlx5dr_ste_build_tnl_gtpu(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1280 void mlx5dr_ste_build_tnl_gtpu(struct mlx5dr_ste_ctx *ste_ctx,
1281 			       struct mlx5dr_ste_build *sb,
1282 			       struct mlx5dr_match_param *mask,
1283 			       bool inner, bool rx)
1284 {
1285 	sb->rx = rx;
1286 	sb->inner = inner;
1287 	ste_ctx->build_tnl_gtpu_init(sb, mask);
1288 }
1289 
mlx5dr_ste_build_tnl_gtpu_flex_parser_0(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,struct mlx5dr_cmd_caps * caps,bool inner,bool rx)1290 void mlx5dr_ste_build_tnl_gtpu_flex_parser_0(struct mlx5dr_ste_ctx *ste_ctx,
1291 					     struct mlx5dr_ste_build *sb,
1292 					     struct mlx5dr_match_param *mask,
1293 					     struct mlx5dr_cmd_caps *caps,
1294 					     bool inner, bool rx)
1295 {
1296 	sb->rx = rx;
1297 	sb->caps = caps;
1298 	sb->inner = inner;
1299 	ste_ctx->build_tnl_gtpu_flex_parser_0_init(sb, mask);
1300 }
1301 
mlx5dr_ste_build_tnl_gtpu_flex_parser_1(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,struct mlx5dr_cmd_caps * caps,bool inner,bool rx)1302 void mlx5dr_ste_build_tnl_gtpu_flex_parser_1(struct mlx5dr_ste_ctx *ste_ctx,
1303 					     struct mlx5dr_ste_build *sb,
1304 					     struct mlx5dr_match_param *mask,
1305 					     struct mlx5dr_cmd_caps *caps,
1306 					     bool inner, bool rx)
1307 {
1308 	sb->rx = rx;
1309 	sb->caps = caps;
1310 	sb->inner = inner;
1311 	ste_ctx->build_tnl_gtpu_flex_parser_1_init(sb, mask);
1312 }
1313 
mlx5dr_ste_build_register_0(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1314 void mlx5dr_ste_build_register_0(struct mlx5dr_ste_ctx *ste_ctx,
1315 				 struct mlx5dr_ste_build *sb,
1316 				 struct mlx5dr_match_param *mask,
1317 				 bool inner, bool rx)
1318 {
1319 	sb->rx = rx;
1320 	sb->inner = inner;
1321 	ste_ctx->build_register_0_init(sb, mask);
1322 }
1323 
mlx5dr_ste_build_register_1(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1324 void mlx5dr_ste_build_register_1(struct mlx5dr_ste_ctx *ste_ctx,
1325 				 struct mlx5dr_ste_build *sb,
1326 				 struct mlx5dr_match_param *mask,
1327 				 bool inner, bool rx)
1328 {
1329 	sb->rx = rx;
1330 	sb->inner = inner;
1331 	ste_ctx->build_register_1_init(sb, mask);
1332 }
1333 
mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,struct mlx5dr_domain * dmn,bool inner,bool rx)1334 void mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_ctx *ste_ctx,
1335 				   struct mlx5dr_ste_build *sb,
1336 				   struct mlx5dr_match_param *mask,
1337 				   struct mlx5dr_domain *dmn,
1338 				   bool inner, bool rx)
1339 {
1340 	/* Set vhca_id_valid before we reset source_eswitch_owner_vhca_id */
1341 	sb->vhca_id_valid = mask->misc.source_eswitch_owner_vhca_id;
1342 
1343 	sb->rx = rx;
1344 	sb->dmn = dmn;
1345 	sb->inner = inner;
1346 	ste_ctx->build_src_gvmi_qpn_init(sb, mask);
1347 }
1348 
mlx5dr_ste_build_flex_parser_0(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1349 void mlx5dr_ste_build_flex_parser_0(struct mlx5dr_ste_ctx *ste_ctx,
1350 				    struct mlx5dr_ste_build *sb,
1351 				    struct mlx5dr_match_param *mask,
1352 				    bool inner, bool rx)
1353 {
1354 	sb->rx = rx;
1355 	sb->inner = inner;
1356 	ste_ctx->build_flex_parser_0_init(sb, mask);
1357 }
1358 
mlx5dr_ste_build_flex_parser_1(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1359 void mlx5dr_ste_build_flex_parser_1(struct mlx5dr_ste_ctx *ste_ctx,
1360 				    struct mlx5dr_ste_build *sb,
1361 				    struct mlx5dr_match_param *mask,
1362 				    bool inner, bool rx)
1363 {
1364 	sb->rx = rx;
1365 	sb->inner = inner;
1366 	ste_ctx->build_flex_parser_1_init(sb, mask);
1367 }
1368 
mlx5dr_ste_build_tnl_header_0_1(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1369 void mlx5dr_ste_build_tnl_header_0_1(struct mlx5dr_ste_ctx *ste_ctx,
1370 				     struct mlx5dr_ste_build *sb,
1371 				     struct mlx5dr_match_param *mask,
1372 				     bool inner, bool rx)
1373 {
1374 	sb->rx = rx;
1375 	sb->inner = inner;
1376 	ste_ctx->build_tnl_header_0_1_init(sb, mask);
1377 }
1378 
mlx5dr_ste_get_ctx(u8 version)1379 struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx(u8 version)
1380 {
1381 	if (version == MLX5_STEERING_FORMAT_CONNECTX_5)
1382 		return mlx5dr_ste_get_ctx_v0();
1383 	else if (version == MLX5_STEERING_FORMAT_CONNECTX_6DX)
1384 		return mlx5dr_ste_get_ctx_v1();
1385 	else if (version == MLX5_STEERING_FORMAT_CONNECTX_7)
1386 		return mlx5dr_ste_get_ctx_v2();
1387 
1388 	return NULL;
1389 }
1390