1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies. */
3 
4 #include "dr_types.h"
5 
6 #define DR_RULE_MAX_STE_CHAIN (DR_RULE_MAX_STES + DR_ACTION_MAX_STES)
7 
dr_rule_append_to_miss_list(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste * new_last_ste,struct list_head * miss_list,struct list_head * send_list)8 static int dr_rule_append_to_miss_list(struct mlx5dr_ste_ctx *ste_ctx,
9 				       struct mlx5dr_ste *new_last_ste,
10 				       struct list_head *miss_list,
11 				       struct list_head *send_list)
12 {
13 	struct mlx5dr_ste_send_info *ste_info_last;
14 	struct mlx5dr_ste *last_ste;
15 
16 	/* The new entry will be inserted after the last */
17 	last_ste = list_last_entry(miss_list, struct mlx5dr_ste, miss_list_node);
18 	WARN_ON(!last_ste);
19 
20 	ste_info_last = kzalloc(sizeof(*ste_info_last), GFP_KERNEL);
21 	if (!ste_info_last)
22 		return -ENOMEM;
23 
24 	mlx5dr_ste_set_miss_addr(ste_ctx, mlx5dr_ste_get_hw_ste(last_ste),
25 				 mlx5dr_ste_get_icm_addr(new_last_ste));
26 	list_add_tail(&new_last_ste->miss_list_node, miss_list);
27 
28 	mlx5dr_send_fill_and_append_ste_send_info(last_ste, DR_STE_SIZE_CTRL,
29 						  0, mlx5dr_ste_get_hw_ste(last_ste),
30 						  ste_info_last, send_list, true);
31 
32 	return 0;
33 }
34 
35 static struct mlx5dr_ste *
dr_rule_create_collision_htbl(struct mlx5dr_matcher * matcher,struct mlx5dr_matcher_rx_tx * nic_matcher,u8 * hw_ste)36 dr_rule_create_collision_htbl(struct mlx5dr_matcher *matcher,
37 			      struct mlx5dr_matcher_rx_tx *nic_matcher,
38 			      u8 *hw_ste)
39 {
40 	struct mlx5dr_domain *dmn = matcher->tbl->dmn;
41 	struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
42 	struct mlx5dr_ste_htbl *new_htbl;
43 	struct mlx5dr_ste *ste;
44 	u64 icm_addr;
45 
46 	/* Create new table for miss entry */
47 	new_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
48 					 DR_CHUNK_SIZE_1,
49 					 MLX5DR_STE_LU_TYPE_DONT_CARE,
50 					 0);
51 	if (!new_htbl) {
52 		mlx5dr_dbg(dmn, "Failed allocating collision table\n");
53 		return NULL;
54 	}
55 
56 	/* One and only entry, never grows */
57 	ste = new_htbl->chunk->ste_arr;
58 	icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(nic_matcher->e_anchor->chunk);
59 	mlx5dr_ste_set_miss_addr(ste_ctx, hw_ste, icm_addr);
60 	mlx5dr_htbl_get(new_htbl);
61 
62 	return ste;
63 }
64 
65 static struct mlx5dr_ste *
dr_rule_create_collision_entry(struct mlx5dr_matcher * matcher,struct mlx5dr_matcher_rx_tx * nic_matcher,u8 * hw_ste,struct mlx5dr_ste * orig_ste)66 dr_rule_create_collision_entry(struct mlx5dr_matcher *matcher,
67 			       struct mlx5dr_matcher_rx_tx *nic_matcher,
68 			       u8 *hw_ste,
69 			       struct mlx5dr_ste *orig_ste)
70 {
71 	struct mlx5dr_ste *ste;
72 
73 	ste = dr_rule_create_collision_htbl(matcher, nic_matcher, hw_ste);
74 	if (!ste) {
75 		mlx5dr_dbg(matcher->tbl->dmn, "Failed creating collision entry\n");
76 		return NULL;
77 	}
78 
79 	ste->ste_chain_location = orig_ste->ste_chain_location;
80 	ste->htbl->pointing_ste = orig_ste->htbl->pointing_ste;
81 
82 	/* In collision entry, all members share the same miss_list_head */
83 	ste->htbl->chunk->miss_list = mlx5dr_ste_get_miss_list(orig_ste);
84 
85 	/* Next table */
86 	if (mlx5dr_ste_create_next_htbl(matcher, nic_matcher, ste, hw_ste,
87 					DR_CHUNK_SIZE_1)) {
88 		mlx5dr_dbg(matcher->tbl->dmn, "Failed allocating table\n");
89 		goto free_tbl;
90 	}
91 
92 	return ste;
93 
94 free_tbl:
95 	mlx5dr_ste_free(ste, matcher, nic_matcher);
96 	return NULL;
97 }
98 
99 static int
dr_rule_handle_one_ste_in_update_list(struct mlx5dr_ste_send_info * ste_info,struct mlx5dr_domain * dmn)100 dr_rule_handle_one_ste_in_update_list(struct mlx5dr_ste_send_info *ste_info,
101 				      struct mlx5dr_domain *dmn)
102 {
103 	int ret;
104 
105 	list_del(&ste_info->send_list);
106 
107 	/* Copy data to ste, only reduced size or control, the last 16B (mask)
108 	 * is already written to the hw.
109 	 */
110 	if (ste_info->size == DR_STE_SIZE_CTRL)
111 		memcpy(mlx5dr_ste_get_hw_ste(ste_info->ste),
112 		       ste_info->data, DR_STE_SIZE_CTRL);
113 	else
114 		memcpy(mlx5dr_ste_get_hw_ste(ste_info->ste),
115 		       ste_info->data, DR_STE_SIZE_REDUCED);
116 
117 	ret = mlx5dr_send_postsend_ste(dmn, ste_info->ste, ste_info->data,
118 				       ste_info->size, ste_info->offset);
119 	if (ret)
120 		goto out;
121 
122 out:
123 	kfree(ste_info);
124 	return ret;
125 }
126 
dr_rule_send_update_list(struct list_head * send_ste_list,struct mlx5dr_domain * dmn,bool is_reverse)127 static int dr_rule_send_update_list(struct list_head *send_ste_list,
128 				    struct mlx5dr_domain *dmn,
129 				    bool is_reverse)
130 {
131 	struct mlx5dr_ste_send_info *ste_info, *tmp_ste_info;
132 	int ret;
133 
134 	if (is_reverse) {
135 		list_for_each_entry_safe_reverse(ste_info, tmp_ste_info,
136 						 send_ste_list, send_list) {
137 			ret = dr_rule_handle_one_ste_in_update_list(ste_info,
138 								    dmn);
139 			if (ret)
140 				return ret;
141 		}
142 	} else {
143 		list_for_each_entry_safe(ste_info, tmp_ste_info,
144 					 send_ste_list, send_list) {
145 			ret = dr_rule_handle_one_ste_in_update_list(ste_info,
146 								    dmn);
147 			if (ret)
148 				return ret;
149 		}
150 	}
151 
152 	return 0;
153 }
154 
155 static struct mlx5dr_ste *
dr_rule_find_ste_in_miss_list(struct list_head * miss_list,u8 * hw_ste)156 dr_rule_find_ste_in_miss_list(struct list_head *miss_list, u8 *hw_ste)
157 {
158 	struct mlx5dr_ste *ste;
159 
160 	if (list_empty(miss_list))
161 		return NULL;
162 
163 	/* Check if hw_ste is present in the list */
164 	list_for_each_entry(ste, miss_list, miss_list_node) {
165 		if (mlx5dr_ste_equal_tag(mlx5dr_ste_get_hw_ste(ste), hw_ste))
166 			return ste;
167 	}
168 
169 	return NULL;
170 }
171 
172 static struct mlx5dr_ste *
dr_rule_rehash_handle_collision(struct mlx5dr_matcher * matcher,struct mlx5dr_matcher_rx_tx * nic_matcher,struct list_head * update_list,struct mlx5dr_ste * col_ste,u8 * hw_ste)173 dr_rule_rehash_handle_collision(struct mlx5dr_matcher *matcher,
174 				struct mlx5dr_matcher_rx_tx *nic_matcher,
175 				struct list_head *update_list,
176 				struct mlx5dr_ste *col_ste,
177 				u8 *hw_ste)
178 {
179 	struct mlx5dr_domain *dmn = matcher->tbl->dmn;
180 	struct mlx5dr_ste *new_ste;
181 	int ret;
182 
183 	new_ste = dr_rule_create_collision_htbl(matcher, nic_matcher, hw_ste);
184 	if (!new_ste)
185 		return NULL;
186 
187 	/* Update collision pointing STE */
188 	new_ste->htbl->pointing_ste = col_ste->htbl->pointing_ste;
189 
190 	/* In collision entry, all members share the same miss_list_head */
191 	new_ste->htbl->chunk->miss_list = mlx5dr_ste_get_miss_list(col_ste);
192 
193 	/* Update the previous from the list */
194 	ret = dr_rule_append_to_miss_list(dmn->ste_ctx, new_ste,
195 					  mlx5dr_ste_get_miss_list(col_ste),
196 					  update_list);
197 	if (ret) {
198 		mlx5dr_dbg(dmn, "Failed update dup entry\n");
199 		goto err_exit;
200 	}
201 
202 	return new_ste;
203 
204 err_exit:
205 	mlx5dr_ste_free(new_ste, matcher, nic_matcher);
206 	return NULL;
207 }
208 
dr_rule_rehash_copy_ste_ctrl(struct mlx5dr_matcher * matcher,struct mlx5dr_matcher_rx_tx * nic_matcher,struct mlx5dr_ste * cur_ste,struct mlx5dr_ste * new_ste)209 static void dr_rule_rehash_copy_ste_ctrl(struct mlx5dr_matcher *matcher,
210 					 struct mlx5dr_matcher_rx_tx *nic_matcher,
211 					 struct mlx5dr_ste *cur_ste,
212 					 struct mlx5dr_ste *new_ste)
213 {
214 	new_ste->next_htbl = cur_ste->next_htbl;
215 	new_ste->ste_chain_location = cur_ste->ste_chain_location;
216 
217 	if (new_ste->next_htbl)
218 		new_ste->next_htbl->pointing_ste = new_ste;
219 
220 	/* We need to copy the refcount since this ste
221 	 * may have been traversed several times
222 	 */
223 	new_ste->refcount = cur_ste->refcount;
224 
225 	/* Link old STEs rule to the new ste */
226 	mlx5dr_rule_set_last_member(cur_ste->rule_rx_tx, new_ste, false);
227 }
228 
229 static struct mlx5dr_ste *
dr_rule_rehash_copy_ste(struct mlx5dr_matcher * matcher,struct mlx5dr_matcher_rx_tx * nic_matcher,struct mlx5dr_ste * cur_ste,struct mlx5dr_ste_htbl * new_htbl,struct list_head * update_list)230 dr_rule_rehash_copy_ste(struct mlx5dr_matcher *matcher,
231 			struct mlx5dr_matcher_rx_tx *nic_matcher,
232 			struct mlx5dr_ste *cur_ste,
233 			struct mlx5dr_ste_htbl *new_htbl,
234 			struct list_head *update_list)
235 {
236 	struct mlx5dr_domain *dmn = matcher->tbl->dmn;
237 	struct mlx5dr_ste_send_info *ste_info;
238 	bool use_update_list = false;
239 	u8 hw_ste[DR_STE_SIZE] = {};
240 	struct mlx5dr_ste *new_ste;
241 	u64 icm_addr;
242 	int new_idx;
243 	u8 sb_idx;
244 
245 	/* Copy STE mask from the matcher */
246 	sb_idx = cur_ste->ste_chain_location - 1;
247 	mlx5dr_ste_set_bit_mask(hw_ste, nic_matcher->ste_builder[sb_idx].bit_mask);
248 
249 	/* Copy STE control and tag */
250 	icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(nic_matcher->e_anchor->chunk);
251 	memcpy(hw_ste, mlx5dr_ste_get_hw_ste(cur_ste), DR_STE_SIZE_REDUCED);
252 	mlx5dr_ste_set_miss_addr(dmn->ste_ctx, hw_ste, icm_addr);
253 
254 	new_idx = mlx5dr_ste_calc_hash_index(hw_ste, new_htbl);
255 	new_ste = &new_htbl->chunk->ste_arr[new_idx];
256 
257 	if (mlx5dr_ste_is_not_used(new_ste)) {
258 		mlx5dr_htbl_get(new_htbl);
259 		list_add_tail(&new_ste->miss_list_node,
260 			      mlx5dr_ste_get_miss_list(new_ste));
261 	} else {
262 		new_ste = dr_rule_rehash_handle_collision(matcher,
263 							  nic_matcher,
264 							  update_list,
265 							  new_ste,
266 							  hw_ste);
267 		if (!new_ste) {
268 			mlx5dr_dbg(dmn, "Failed adding collision entry, index: %d\n",
269 				   new_idx);
270 			return NULL;
271 		}
272 		new_htbl->ctrl.num_of_collisions++;
273 		use_update_list = true;
274 	}
275 
276 	memcpy(mlx5dr_ste_get_hw_ste(new_ste), hw_ste, DR_STE_SIZE_REDUCED);
277 
278 	new_htbl->ctrl.num_of_valid_entries++;
279 
280 	if (use_update_list) {
281 		ste_info = kzalloc(sizeof(*ste_info), GFP_KERNEL);
282 		if (!ste_info)
283 			goto err_exit;
284 
285 		mlx5dr_send_fill_and_append_ste_send_info(new_ste, DR_STE_SIZE, 0,
286 							  hw_ste, ste_info,
287 							  update_list, true);
288 	}
289 
290 	dr_rule_rehash_copy_ste_ctrl(matcher, nic_matcher, cur_ste, new_ste);
291 
292 	return new_ste;
293 
294 err_exit:
295 	mlx5dr_ste_free(new_ste, matcher, nic_matcher);
296 	return NULL;
297 }
298 
dr_rule_rehash_copy_miss_list(struct mlx5dr_matcher * matcher,struct mlx5dr_matcher_rx_tx * nic_matcher,struct list_head * cur_miss_list,struct mlx5dr_ste_htbl * new_htbl,struct list_head * update_list)299 static int dr_rule_rehash_copy_miss_list(struct mlx5dr_matcher *matcher,
300 					 struct mlx5dr_matcher_rx_tx *nic_matcher,
301 					 struct list_head *cur_miss_list,
302 					 struct mlx5dr_ste_htbl *new_htbl,
303 					 struct list_head *update_list)
304 {
305 	struct mlx5dr_ste *tmp_ste, *cur_ste, *new_ste;
306 
307 	if (list_empty(cur_miss_list))
308 		return 0;
309 
310 	list_for_each_entry_safe(cur_ste, tmp_ste, cur_miss_list, miss_list_node) {
311 		new_ste = dr_rule_rehash_copy_ste(matcher,
312 						  nic_matcher,
313 						  cur_ste,
314 						  new_htbl,
315 						  update_list);
316 		if (!new_ste)
317 			goto err_insert;
318 
319 		list_del(&cur_ste->miss_list_node);
320 		mlx5dr_htbl_put(cur_ste->htbl);
321 	}
322 	return 0;
323 
324 err_insert:
325 	mlx5dr_err(matcher->tbl->dmn, "Fatal error during resize\n");
326 	WARN_ON(true);
327 	return -EINVAL;
328 }
329 
dr_rule_rehash_copy_htbl(struct mlx5dr_matcher * matcher,struct mlx5dr_matcher_rx_tx * nic_matcher,struct mlx5dr_ste_htbl * cur_htbl,struct mlx5dr_ste_htbl * new_htbl,struct list_head * update_list)330 static int dr_rule_rehash_copy_htbl(struct mlx5dr_matcher *matcher,
331 				    struct mlx5dr_matcher_rx_tx *nic_matcher,
332 				    struct mlx5dr_ste_htbl *cur_htbl,
333 				    struct mlx5dr_ste_htbl *new_htbl,
334 				    struct list_head *update_list)
335 {
336 	struct mlx5dr_ste *cur_ste;
337 	int cur_entries;
338 	int err = 0;
339 	int i;
340 
341 	cur_entries = mlx5dr_icm_pool_chunk_size_to_entries(cur_htbl->chunk->size);
342 
343 	if (cur_entries < 1) {
344 		mlx5dr_dbg(matcher->tbl->dmn, "Invalid number of entries\n");
345 		return -EINVAL;
346 	}
347 
348 	for (i = 0; i < cur_entries; i++) {
349 		cur_ste = &cur_htbl->chunk->ste_arr[i];
350 		if (mlx5dr_ste_is_not_used(cur_ste)) /* Empty, nothing to copy */
351 			continue;
352 
353 		err = dr_rule_rehash_copy_miss_list(matcher,
354 						    nic_matcher,
355 						    mlx5dr_ste_get_miss_list(cur_ste),
356 						    new_htbl,
357 						    update_list);
358 		if (err)
359 			goto clean_copy;
360 	}
361 
362 clean_copy:
363 	return err;
364 }
365 
366 static struct mlx5dr_ste_htbl *
dr_rule_rehash_htbl(struct mlx5dr_rule * rule,struct mlx5dr_rule_rx_tx * nic_rule,struct mlx5dr_ste_htbl * cur_htbl,u8 ste_location,struct list_head * update_list,enum mlx5dr_icm_chunk_size new_size)367 dr_rule_rehash_htbl(struct mlx5dr_rule *rule,
368 		    struct mlx5dr_rule_rx_tx *nic_rule,
369 		    struct mlx5dr_ste_htbl *cur_htbl,
370 		    u8 ste_location,
371 		    struct list_head *update_list,
372 		    enum mlx5dr_icm_chunk_size new_size)
373 {
374 	struct mlx5dr_ste_send_info *del_ste_info, *tmp_ste_info;
375 	struct mlx5dr_matcher *matcher = rule->matcher;
376 	struct mlx5dr_domain *dmn = matcher->tbl->dmn;
377 	struct mlx5dr_matcher_rx_tx *nic_matcher;
378 	struct mlx5dr_ste_send_info *ste_info;
379 	struct mlx5dr_htbl_connect_info info;
380 	struct mlx5dr_domain_rx_tx *nic_dmn;
381 	u8 formatted_ste[DR_STE_SIZE] = {};
382 	LIST_HEAD(rehash_table_send_list);
383 	struct mlx5dr_ste *ste_to_update;
384 	struct mlx5dr_ste_htbl *new_htbl;
385 	int err;
386 
387 	nic_matcher = nic_rule->nic_matcher;
388 	nic_dmn = nic_matcher->nic_tbl->nic_dmn;
389 
390 	ste_info = kzalloc(sizeof(*ste_info), GFP_KERNEL);
391 	if (!ste_info)
392 		return NULL;
393 
394 	new_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
395 					 new_size,
396 					 cur_htbl->lu_type,
397 					 cur_htbl->byte_mask);
398 	if (!new_htbl) {
399 		mlx5dr_err(dmn, "Failed to allocate new hash table\n");
400 		goto free_ste_info;
401 	}
402 
403 	/* Write new table to HW */
404 	info.type = CONNECT_MISS;
405 	info.miss_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(nic_matcher->e_anchor->chunk);
406 	mlx5dr_ste_set_formatted_ste(dmn->ste_ctx,
407 				     dmn->info.caps.gvmi,
408 				     nic_dmn->type,
409 				     new_htbl,
410 				     formatted_ste,
411 				     &info);
412 
413 	new_htbl->pointing_ste = cur_htbl->pointing_ste;
414 	new_htbl->pointing_ste->next_htbl = new_htbl;
415 	err = dr_rule_rehash_copy_htbl(matcher,
416 				       nic_matcher,
417 				       cur_htbl,
418 				       new_htbl,
419 				       &rehash_table_send_list);
420 	if (err)
421 		goto free_new_htbl;
422 
423 	if (mlx5dr_send_postsend_htbl(dmn, new_htbl, formatted_ste,
424 				      nic_matcher->ste_builder[ste_location - 1].bit_mask)) {
425 		mlx5dr_err(dmn, "Failed writing table to HW\n");
426 		goto free_new_htbl;
427 	}
428 
429 	/* Writing to the hw is done in regular order of rehash_table_send_list,
430 	 * in order to have the origin data written before the miss address of
431 	 * collision entries, if exists.
432 	 */
433 	if (dr_rule_send_update_list(&rehash_table_send_list, dmn, false)) {
434 		mlx5dr_err(dmn, "Failed updating table to HW\n");
435 		goto free_ste_list;
436 	}
437 
438 	/* Connect previous hash table to current */
439 	if (ste_location == 1) {
440 		/* The previous table is an anchor, anchors size is always one STE */
441 		struct mlx5dr_ste_htbl *prev_htbl = cur_htbl->pointing_ste->htbl;
442 
443 		/* On matcher s_anchor we keep an extra refcount */
444 		mlx5dr_htbl_get(new_htbl);
445 		mlx5dr_htbl_put(cur_htbl);
446 
447 		nic_matcher->s_htbl = new_htbl;
448 
449 		/* It is safe to operate dr_ste_set_hit_addr on the hw_ste here
450 		 * (48B len) which works only on first 32B
451 		 */
452 		mlx5dr_ste_set_hit_addr(dmn->ste_ctx,
453 					prev_htbl->chunk->hw_ste_arr,
454 					mlx5dr_icm_pool_get_chunk_icm_addr(new_htbl->chunk),
455 					mlx5dr_icm_pool_get_chunk_num_of_entries(new_htbl->chunk));
456 
457 		ste_to_update = &prev_htbl->chunk->ste_arr[0];
458 	} else {
459 		mlx5dr_ste_set_hit_addr_by_next_htbl(dmn->ste_ctx,
460 						     mlx5dr_ste_get_hw_ste(cur_htbl->pointing_ste),
461 						     new_htbl);
462 		ste_to_update = cur_htbl->pointing_ste;
463 	}
464 
465 	mlx5dr_send_fill_and_append_ste_send_info(ste_to_update, DR_STE_SIZE_CTRL,
466 						  0, mlx5dr_ste_get_hw_ste(ste_to_update),
467 						  ste_info, update_list, false);
468 
469 	return new_htbl;
470 
471 free_ste_list:
472 	/* Clean all ste_info's from the new table */
473 	list_for_each_entry_safe(del_ste_info, tmp_ste_info,
474 				 &rehash_table_send_list, send_list) {
475 		list_del(&del_ste_info->send_list);
476 		kfree(del_ste_info);
477 	}
478 
479 free_new_htbl:
480 	mlx5dr_ste_htbl_free(new_htbl);
481 free_ste_info:
482 	kfree(ste_info);
483 	mlx5dr_info(dmn, "Failed creating rehash table\n");
484 	return NULL;
485 }
486 
dr_rule_rehash(struct mlx5dr_rule * rule,struct mlx5dr_rule_rx_tx * nic_rule,struct mlx5dr_ste_htbl * cur_htbl,u8 ste_location,struct list_head * update_list)487 static struct mlx5dr_ste_htbl *dr_rule_rehash(struct mlx5dr_rule *rule,
488 					      struct mlx5dr_rule_rx_tx *nic_rule,
489 					      struct mlx5dr_ste_htbl *cur_htbl,
490 					      u8 ste_location,
491 					      struct list_head *update_list)
492 {
493 	struct mlx5dr_domain *dmn = rule->matcher->tbl->dmn;
494 	enum mlx5dr_icm_chunk_size new_size;
495 
496 	new_size = mlx5dr_icm_next_higher_chunk(cur_htbl->chunk->size);
497 	new_size = min_t(u32, new_size, dmn->info.max_log_sw_icm_sz);
498 
499 	if (new_size == cur_htbl->chunk->size)
500 		return NULL; /* Skip rehash, we already at the max size */
501 
502 	return dr_rule_rehash_htbl(rule, nic_rule, cur_htbl, ste_location,
503 				   update_list, new_size);
504 }
505 
506 static struct mlx5dr_ste *
dr_rule_handle_collision(struct mlx5dr_matcher * matcher,struct mlx5dr_matcher_rx_tx * nic_matcher,struct mlx5dr_ste * ste,u8 * hw_ste,struct list_head * miss_list,struct list_head * send_list)507 dr_rule_handle_collision(struct mlx5dr_matcher *matcher,
508 			 struct mlx5dr_matcher_rx_tx *nic_matcher,
509 			 struct mlx5dr_ste *ste,
510 			 u8 *hw_ste,
511 			 struct list_head *miss_list,
512 			 struct list_head *send_list)
513 {
514 	struct mlx5dr_domain *dmn = matcher->tbl->dmn;
515 	struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
516 	struct mlx5dr_ste_send_info *ste_info;
517 	struct mlx5dr_ste *new_ste;
518 
519 	ste_info = kzalloc(sizeof(*ste_info), GFP_KERNEL);
520 	if (!ste_info)
521 		return NULL;
522 
523 	new_ste = dr_rule_create_collision_entry(matcher, nic_matcher, hw_ste, ste);
524 	if (!new_ste)
525 		goto free_send_info;
526 
527 	if (dr_rule_append_to_miss_list(ste_ctx, new_ste,
528 					miss_list, send_list)) {
529 		mlx5dr_dbg(dmn, "Failed to update prev miss_list\n");
530 		goto err_exit;
531 	}
532 
533 	mlx5dr_send_fill_and_append_ste_send_info(new_ste, DR_STE_SIZE, 0, hw_ste,
534 						  ste_info, send_list, false);
535 
536 	ste->htbl->ctrl.num_of_collisions++;
537 	ste->htbl->ctrl.num_of_valid_entries++;
538 
539 	return new_ste;
540 
541 err_exit:
542 	mlx5dr_ste_free(new_ste, matcher, nic_matcher);
543 free_send_info:
544 	kfree(ste_info);
545 	return NULL;
546 }
547 
dr_rule_remove_action_members(struct mlx5dr_rule * rule)548 static void dr_rule_remove_action_members(struct mlx5dr_rule *rule)
549 {
550 	struct mlx5dr_rule_action_member *action_mem;
551 	struct mlx5dr_rule_action_member *tmp;
552 
553 	list_for_each_entry_safe(action_mem, tmp, &rule->rule_actions_list, list) {
554 		list_del(&action_mem->list);
555 		refcount_dec(&action_mem->action->refcount);
556 		kvfree(action_mem);
557 	}
558 }
559 
dr_rule_add_action_members(struct mlx5dr_rule * rule,size_t num_actions,struct mlx5dr_action * actions[])560 static int dr_rule_add_action_members(struct mlx5dr_rule *rule,
561 				      size_t num_actions,
562 				      struct mlx5dr_action *actions[])
563 {
564 	struct mlx5dr_rule_action_member *action_mem;
565 	int i;
566 
567 	for (i = 0; i < num_actions; i++) {
568 		action_mem = kvzalloc(sizeof(*action_mem), GFP_KERNEL);
569 		if (!action_mem)
570 			goto free_action_members;
571 
572 		action_mem->action = actions[i];
573 		INIT_LIST_HEAD(&action_mem->list);
574 		list_add_tail(&action_mem->list, &rule->rule_actions_list);
575 		refcount_inc(&action_mem->action->refcount);
576 	}
577 
578 	return 0;
579 
580 free_action_members:
581 	dr_rule_remove_action_members(rule);
582 	return -ENOMEM;
583 }
584 
mlx5dr_rule_set_last_member(struct mlx5dr_rule_rx_tx * nic_rule,struct mlx5dr_ste * ste,bool force)585 void mlx5dr_rule_set_last_member(struct mlx5dr_rule_rx_tx *nic_rule,
586 				 struct mlx5dr_ste *ste,
587 				 bool force)
588 {
589 	/* Update rule member is usually done for the last STE or during rule
590 	 * creation to recover from mid-creation failure (for this peruse the
591 	 * force flag is used)
592 	 */
593 	if (ste->next_htbl && !force)
594 		return;
595 
596 	/* Update is required since each rule keeps track of its last STE */
597 	ste->rule_rx_tx = nic_rule;
598 	nic_rule->last_rule_ste = ste;
599 }
600 
dr_rule_get_pointed_ste(struct mlx5dr_ste * curr_ste)601 static struct mlx5dr_ste *dr_rule_get_pointed_ste(struct mlx5dr_ste *curr_ste)
602 {
603 	struct mlx5dr_ste *first_ste;
604 
605 	first_ste = list_first_entry(mlx5dr_ste_get_miss_list(curr_ste),
606 				     struct mlx5dr_ste, miss_list_node);
607 
608 	return first_ste->htbl->pointing_ste;
609 }
610 
mlx5dr_rule_get_reverse_rule_members(struct mlx5dr_ste ** ste_arr,struct mlx5dr_ste * curr_ste,int * num_of_stes)611 int mlx5dr_rule_get_reverse_rule_members(struct mlx5dr_ste **ste_arr,
612 					 struct mlx5dr_ste *curr_ste,
613 					 int *num_of_stes)
614 {
615 	bool first = false;
616 
617 	*num_of_stes = 0;
618 
619 	if (!curr_ste)
620 		return -ENOENT;
621 
622 	/* Iterate from last to first */
623 	while (!first) {
624 		first = curr_ste->ste_chain_location == 1;
625 		ste_arr[*num_of_stes] = curr_ste;
626 		*num_of_stes += 1;
627 		curr_ste = dr_rule_get_pointed_ste(curr_ste);
628 	}
629 
630 	return 0;
631 }
632 
dr_rule_clean_rule_members(struct mlx5dr_rule * rule,struct mlx5dr_rule_rx_tx * nic_rule)633 static void dr_rule_clean_rule_members(struct mlx5dr_rule *rule,
634 				       struct mlx5dr_rule_rx_tx *nic_rule)
635 {
636 	struct mlx5dr_ste *ste_arr[DR_RULE_MAX_STES + DR_ACTION_MAX_STES];
637 	struct mlx5dr_ste *curr_ste = nic_rule->last_rule_ste;
638 	int i;
639 
640 	if (mlx5dr_rule_get_reverse_rule_members(ste_arr, curr_ste, &i))
641 		return;
642 
643 	while (i--)
644 		mlx5dr_ste_put(ste_arr[i], rule->matcher, nic_rule->nic_matcher);
645 }
646 
dr_get_bits_per_mask(u16 byte_mask)647 static u16 dr_get_bits_per_mask(u16 byte_mask)
648 {
649 	u16 bits = 0;
650 
651 	while (byte_mask) {
652 		byte_mask = byte_mask & (byte_mask - 1);
653 		bits++;
654 	}
655 
656 	return bits;
657 }
658 
dr_rule_need_enlarge_hash(struct mlx5dr_ste_htbl * htbl,struct mlx5dr_domain * dmn,struct mlx5dr_domain_rx_tx * nic_dmn)659 static bool dr_rule_need_enlarge_hash(struct mlx5dr_ste_htbl *htbl,
660 				      struct mlx5dr_domain *dmn,
661 				      struct mlx5dr_domain_rx_tx *nic_dmn)
662 {
663 	struct mlx5dr_ste_htbl_ctrl *ctrl = &htbl->ctrl;
664 	int threshold;
665 
666 	if (dmn->info.max_log_sw_icm_sz <= htbl->chunk->size)
667 		return false;
668 
669 	if (!mlx5dr_ste_htbl_may_grow(htbl))
670 		return false;
671 
672 	if (dr_get_bits_per_mask(htbl->byte_mask) * BITS_PER_BYTE <= htbl->chunk->size)
673 		return false;
674 
675 	threshold = mlx5dr_ste_htbl_increase_threshold(htbl);
676 	if (ctrl->num_of_collisions >= threshold &&
677 	    (ctrl->num_of_valid_entries - ctrl->num_of_collisions) >= threshold)
678 		return true;
679 
680 	return false;
681 }
682 
dr_rule_handle_action_stes(struct mlx5dr_rule * rule,struct mlx5dr_rule_rx_tx * nic_rule,struct list_head * send_ste_list,struct mlx5dr_ste * last_ste,u8 * hw_ste_arr,u32 new_hw_ste_arr_sz)683 static int dr_rule_handle_action_stes(struct mlx5dr_rule *rule,
684 				      struct mlx5dr_rule_rx_tx *nic_rule,
685 				      struct list_head *send_ste_list,
686 				      struct mlx5dr_ste *last_ste,
687 				      u8 *hw_ste_arr,
688 				      u32 new_hw_ste_arr_sz)
689 {
690 	struct mlx5dr_matcher_rx_tx *nic_matcher = nic_rule->nic_matcher;
691 	struct mlx5dr_ste_send_info *ste_info_arr[DR_ACTION_MAX_STES];
692 	u8 num_of_builders = nic_matcher->num_of_builders;
693 	struct mlx5dr_matcher *matcher = rule->matcher;
694 	struct mlx5dr_domain *dmn = matcher->tbl->dmn;
695 	u8 *curr_hw_ste, *prev_hw_ste;
696 	struct mlx5dr_ste *action_ste;
697 	int i, k;
698 
699 	/* Two cases:
700 	 * 1. num_of_builders is equal to new_hw_ste_arr_sz, the action in the ste
701 	 * 2. num_of_builders is less then new_hw_ste_arr_sz, new ste was added
702 	 *    to support the action.
703 	 */
704 
705 	for (i = num_of_builders, k = 0; i < new_hw_ste_arr_sz; i++, k++) {
706 		curr_hw_ste = hw_ste_arr + i * DR_STE_SIZE;
707 		prev_hw_ste = (i == 0) ? curr_hw_ste : hw_ste_arr + ((i - 1) * DR_STE_SIZE);
708 		action_ste = dr_rule_create_collision_htbl(matcher,
709 							   nic_matcher,
710 							   curr_hw_ste);
711 		if (!action_ste)
712 			return -ENOMEM;
713 
714 		mlx5dr_ste_get(action_ste);
715 
716 		action_ste->htbl->pointing_ste = last_ste;
717 		last_ste->next_htbl = action_ste->htbl;
718 		last_ste = action_ste;
719 
720 		/* While free ste we go over the miss list, so add this ste to the list */
721 		list_add_tail(&action_ste->miss_list_node,
722 			      mlx5dr_ste_get_miss_list(action_ste));
723 
724 		ste_info_arr[k] = kzalloc(sizeof(*ste_info_arr[k]),
725 					  GFP_KERNEL);
726 		if (!ste_info_arr[k])
727 			goto err_exit;
728 
729 		/* Point current ste to the new action */
730 		mlx5dr_ste_set_hit_addr_by_next_htbl(dmn->ste_ctx,
731 						     prev_hw_ste,
732 						     action_ste->htbl);
733 
734 		mlx5dr_rule_set_last_member(nic_rule, action_ste, true);
735 
736 		mlx5dr_send_fill_and_append_ste_send_info(action_ste, DR_STE_SIZE, 0,
737 							  curr_hw_ste,
738 							  ste_info_arr[k],
739 							  send_ste_list, false);
740 	}
741 
742 	last_ste->next_htbl = NULL;
743 
744 	return 0;
745 
746 err_exit:
747 	mlx5dr_ste_put(action_ste, matcher, nic_matcher);
748 	return -ENOMEM;
749 }
750 
dr_rule_handle_empty_entry(struct mlx5dr_matcher * matcher,struct mlx5dr_matcher_rx_tx * nic_matcher,struct mlx5dr_ste_htbl * cur_htbl,struct mlx5dr_ste * ste,u8 ste_location,u8 * hw_ste,struct list_head * miss_list,struct list_head * send_list)751 static int dr_rule_handle_empty_entry(struct mlx5dr_matcher *matcher,
752 				      struct mlx5dr_matcher_rx_tx *nic_matcher,
753 				      struct mlx5dr_ste_htbl *cur_htbl,
754 				      struct mlx5dr_ste *ste,
755 				      u8 ste_location,
756 				      u8 *hw_ste,
757 				      struct list_head *miss_list,
758 				      struct list_head *send_list)
759 {
760 	struct mlx5dr_domain *dmn = matcher->tbl->dmn;
761 	struct mlx5dr_ste_send_info *ste_info;
762 	u64 icm_addr;
763 
764 	/* Take ref on table, only on first time this ste is used */
765 	mlx5dr_htbl_get(cur_htbl);
766 
767 	/* new entry -> new branch */
768 	list_add_tail(&ste->miss_list_node, miss_list);
769 
770 	icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(nic_matcher->e_anchor->chunk);
771 	mlx5dr_ste_set_miss_addr(dmn->ste_ctx, hw_ste, icm_addr);
772 
773 	ste->ste_chain_location = ste_location;
774 
775 	ste_info = kzalloc(sizeof(*ste_info), GFP_KERNEL);
776 	if (!ste_info)
777 		goto clean_ste_setting;
778 
779 	if (mlx5dr_ste_create_next_htbl(matcher,
780 					nic_matcher,
781 					ste,
782 					hw_ste,
783 					DR_CHUNK_SIZE_1)) {
784 		mlx5dr_dbg(dmn, "Failed allocating table\n");
785 		goto clean_ste_info;
786 	}
787 
788 	cur_htbl->ctrl.num_of_valid_entries++;
789 
790 	mlx5dr_send_fill_and_append_ste_send_info(ste, DR_STE_SIZE, 0, hw_ste,
791 						  ste_info, send_list, false);
792 
793 	return 0;
794 
795 clean_ste_info:
796 	kfree(ste_info);
797 clean_ste_setting:
798 	list_del_init(&ste->miss_list_node);
799 	mlx5dr_htbl_put(cur_htbl);
800 
801 	return -ENOMEM;
802 }
803 
804 static struct mlx5dr_ste *
dr_rule_handle_ste_branch(struct mlx5dr_rule * rule,struct mlx5dr_rule_rx_tx * nic_rule,struct list_head * send_ste_list,struct mlx5dr_ste_htbl * cur_htbl,u8 * hw_ste,u8 ste_location,struct mlx5dr_ste_htbl ** put_htbl)805 dr_rule_handle_ste_branch(struct mlx5dr_rule *rule,
806 			  struct mlx5dr_rule_rx_tx *nic_rule,
807 			  struct list_head *send_ste_list,
808 			  struct mlx5dr_ste_htbl *cur_htbl,
809 			  u8 *hw_ste,
810 			  u8 ste_location,
811 			  struct mlx5dr_ste_htbl **put_htbl)
812 {
813 	struct mlx5dr_matcher *matcher = rule->matcher;
814 	struct mlx5dr_domain *dmn = matcher->tbl->dmn;
815 	struct mlx5dr_matcher_rx_tx *nic_matcher;
816 	struct mlx5dr_domain_rx_tx *nic_dmn;
817 	struct mlx5dr_ste_htbl *new_htbl;
818 	struct mlx5dr_ste *matched_ste;
819 	struct list_head *miss_list;
820 	bool skip_rehash = false;
821 	struct mlx5dr_ste *ste;
822 	int index;
823 
824 	nic_matcher = nic_rule->nic_matcher;
825 	nic_dmn = nic_matcher->nic_tbl->nic_dmn;
826 
827 again:
828 	index = mlx5dr_ste_calc_hash_index(hw_ste, cur_htbl);
829 	miss_list = &cur_htbl->chunk->miss_list[index];
830 	ste = &cur_htbl->chunk->ste_arr[index];
831 
832 	if (mlx5dr_ste_is_not_used(ste)) {
833 		if (dr_rule_handle_empty_entry(matcher, nic_matcher, cur_htbl,
834 					       ste, ste_location,
835 					       hw_ste, miss_list,
836 					       send_ste_list))
837 			return NULL;
838 	} else {
839 		/* Hash table index in use, check if this ste is in the miss list */
840 		matched_ste = dr_rule_find_ste_in_miss_list(miss_list, hw_ste);
841 		if (matched_ste) {
842 			/* If it is last STE in the chain, and has the same tag
843 			 * it means that all the previous stes are the same,
844 			 * if so, this rule is duplicated.
845 			 */
846 			if (!mlx5dr_ste_is_last_in_rule(nic_matcher, ste_location))
847 				return matched_ste;
848 
849 			mlx5dr_dbg(dmn, "Duplicate rule inserted\n");
850 		}
851 
852 		if (!skip_rehash && dr_rule_need_enlarge_hash(cur_htbl, dmn, nic_dmn)) {
853 			/* Hash table index in use, try to resize of the hash */
854 			skip_rehash = true;
855 
856 			/* Hold the table till we update.
857 			 * Release in dr_rule_create_rule()
858 			 */
859 			*put_htbl = cur_htbl;
860 			mlx5dr_htbl_get(cur_htbl);
861 
862 			new_htbl = dr_rule_rehash(rule, nic_rule, cur_htbl,
863 						  ste_location, send_ste_list);
864 			if (!new_htbl) {
865 				mlx5dr_err(dmn, "Failed creating rehash table, htbl-log_size: %d\n",
866 					   cur_htbl->chunk->size);
867 				mlx5dr_htbl_put(cur_htbl);
868 			} else {
869 				cur_htbl = new_htbl;
870 			}
871 			goto again;
872 		} else {
873 			/* Hash table index in use, add another collision (miss) */
874 			ste = dr_rule_handle_collision(matcher,
875 						       nic_matcher,
876 						       ste,
877 						       hw_ste,
878 						       miss_list,
879 						       send_ste_list);
880 			if (!ste) {
881 				mlx5dr_dbg(dmn, "failed adding collision entry, index: %d\n",
882 					   index);
883 				return NULL;
884 			}
885 		}
886 	}
887 	return ste;
888 }
889 
dr_rule_cmp_value_to_mask(u8 * mask,u8 * value,u32 s_idx,u32 e_idx)890 static bool dr_rule_cmp_value_to_mask(u8 *mask, u8 *value,
891 				      u32 s_idx, u32 e_idx)
892 {
893 	u32 i;
894 
895 	for (i = s_idx; i < e_idx; i++) {
896 		if (value[i] & ~mask[i]) {
897 			pr_info("Rule parameters contains a value not specified by mask\n");
898 			return false;
899 		}
900 	}
901 	return true;
902 }
903 
dr_rule_verify(struct mlx5dr_matcher * matcher,struct mlx5dr_match_parameters * value,struct mlx5dr_match_param * param)904 static bool dr_rule_verify(struct mlx5dr_matcher *matcher,
905 			   struct mlx5dr_match_parameters *value,
906 			   struct mlx5dr_match_param *param)
907 {
908 	u8 match_criteria = matcher->match_criteria;
909 	size_t value_size = value->match_sz;
910 	u8 *mask_p = (u8 *)&matcher->mask;
911 	u8 *param_p = (u8 *)param;
912 	u32 s_idx, e_idx;
913 
914 	if (!value_size ||
915 	    (value_size > DR_SZ_MATCH_PARAM || (value_size % sizeof(u32)))) {
916 		mlx5dr_err(matcher->tbl->dmn, "Rule parameters length is incorrect\n");
917 		return false;
918 	}
919 
920 	mlx5dr_ste_copy_param(matcher->match_criteria, param, value, false);
921 
922 	if (match_criteria & DR_MATCHER_CRITERIA_OUTER) {
923 		s_idx = offsetof(struct mlx5dr_match_param, outer);
924 		e_idx = min(s_idx + sizeof(param->outer), value_size);
925 
926 		if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
927 			mlx5dr_err(matcher->tbl->dmn, "Rule outer parameters contains a value not specified by mask\n");
928 			return false;
929 		}
930 	}
931 
932 	if (match_criteria & DR_MATCHER_CRITERIA_MISC) {
933 		s_idx = offsetof(struct mlx5dr_match_param, misc);
934 		e_idx = min(s_idx + sizeof(param->misc), value_size);
935 
936 		if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
937 			mlx5dr_err(matcher->tbl->dmn, "Rule misc parameters contains a value not specified by mask\n");
938 			return false;
939 		}
940 	}
941 
942 	if (match_criteria & DR_MATCHER_CRITERIA_INNER) {
943 		s_idx = offsetof(struct mlx5dr_match_param, inner);
944 		e_idx = min(s_idx + sizeof(param->inner), value_size);
945 
946 		if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
947 			mlx5dr_err(matcher->tbl->dmn, "Rule inner parameters contains a value not specified by mask\n");
948 			return false;
949 		}
950 	}
951 
952 	if (match_criteria & DR_MATCHER_CRITERIA_MISC2) {
953 		s_idx = offsetof(struct mlx5dr_match_param, misc2);
954 		e_idx = min(s_idx + sizeof(param->misc2), value_size);
955 
956 		if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
957 			mlx5dr_err(matcher->tbl->dmn, "Rule misc2 parameters contains a value not specified by mask\n");
958 			return false;
959 		}
960 	}
961 
962 	if (match_criteria & DR_MATCHER_CRITERIA_MISC3) {
963 		s_idx = offsetof(struct mlx5dr_match_param, misc3);
964 		e_idx = min(s_idx + sizeof(param->misc3), value_size);
965 
966 		if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
967 			mlx5dr_err(matcher->tbl->dmn, "Rule misc3 parameters contains a value not specified by mask\n");
968 			return false;
969 		}
970 	}
971 
972 	if (match_criteria & DR_MATCHER_CRITERIA_MISC4) {
973 		s_idx = offsetof(struct mlx5dr_match_param, misc4);
974 		e_idx = min(s_idx + sizeof(param->misc4), value_size);
975 
976 		if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
977 			mlx5dr_err(matcher->tbl->dmn,
978 				   "Rule misc4 parameters contains a value not specified by mask\n");
979 			return false;
980 		}
981 	}
982 
983 	if (match_criteria & DR_MATCHER_CRITERIA_MISC5) {
984 		s_idx = offsetof(struct mlx5dr_match_param, misc5);
985 		e_idx = min(s_idx + sizeof(param->misc5), value_size);
986 
987 		if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
988 			mlx5dr_err(matcher->tbl->dmn, "Rule misc5 parameters contains a value not specified by mask\n");
989 			return false;
990 		}
991 	}
992 	return true;
993 }
994 
dr_rule_destroy_rule_nic(struct mlx5dr_rule * rule,struct mlx5dr_rule_rx_tx * nic_rule)995 static int dr_rule_destroy_rule_nic(struct mlx5dr_rule *rule,
996 				    struct mlx5dr_rule_rx_tx *nic_rule)
997 {
998 	/* Check if this nic rule was actually created, or was it skipped
999 	 * and only the other type of the RX/TX nic rule was created.
1000 	 */
1001 	if (!nic_rule->last_rule_ste)
1002 		return 0;
1003 
1004 	mlx5dr_domain_nic_lock(nic_rule->nic_matcher->nic_tbl->nic_dmn);
1005 	dr_rule_clean_rule_members(rule, nic_rule);
1006 
1007 	nic_rule->nic_matcher->rules--;
1008 	if (!nic_rule->nic_matcher->rules)
1009 		mlx5dr_matcher_remove_from_tbl_nic(rule->matcher->tbl->dmn,
1010 						   nic_rule->nic_matcher);
1011 
1012 	mlx5dr_domain_nic_unlock(nic_rule->nic_matcher->nic_tbl->nic_dmn);
1013 
1014 	return 0;
1015 }
1016 
dr_rule_destroy_rule_fdb(struct mlx5dr_rule * rule)1017 static int dr_rule_destroy_rule_fdb(struct mlx5dr_rule *rule)
1018 {
1019 	dr_rule_destroy_rule_nic(rule, &rule->rx);
1020 	dr_rule_destroy_rule_nic(rule, &rule->tx);
1021 	return 0;
1022 }
1023 
dr_rule_destroy_rule(struct mlx5dr_rule * rule)1024 static int dr_rule_destroy_rule(struct mlx5dr_rule *rule)
1025 {
1026 	struct mlx5dr_domain *dmn = rule->matcher->tbl->dmn;
1027 
1028 	mlx5dr_dbg_rule_del(rule);
1029 
1030 	switch (dmn->type) {
1031 	case MLX5DR_DOMAIN_TYPE_NIC_RX:
1032 		dr_rule_destroy_rule_nic(rule, &rule->rx);
1033 		break;
1034 	case MLX5DR_DOMAIN_TYPE_NIC_TX:
1035 		dr_rule_destroy_rule_nic(rule, &rule->tx);
1036 		break;
1037 	case MLX5DR_DOMAIN_TYPE_FDB:
1038 		dr_rule_destroy_rule_fdb(rule);
1039 		break;
1040 	default:
1041 		return -EINVAL;
1042 	}
1043 
1044 	dr_rule_remove_action_members(rule);
1045 	kfree(rule);
1046 	return 0;
1047 }
1048 
dr_rule_get_ipv(struct mlx5dr_match_spec * spec)1049 static enum mlx5dr_ipv dr_rule_get_ipv(struct mlx5dr_match_spec *spec)
1050 {
1051 	if (spec->ip_version == 6 || spec->ethertype == ETH_P_IPV6)
1052 		return DR_RULE_IPV6;
1053 
1054 	return DR_RULE_IPV4;
1055 }
1056 
dr_rule_skip(enum mlx5dr_domain_type domain,enum mlx5dr_domain_nic_type nic_type,struct mlx5dr_match_param * mask,struct mlx5dr_match_param * value,u32 flow_source)1057 static bool dr_rule_skip(enum mlx5dr_domain_type domain,
1058 			 enum mlx5dr_domain_nic_type nic_type,
1059 			 struct mlx5dr_match_param *mask,
1060 			 struct mlx5dr_match_param *value,
1061 			 u32 flow_source)
1062 {
1063 	bool rx = nic_type == DR_DOMAIN_NIC_TYPE_RX;
1064 
1065 	if (domain != MLX5DR_DOMAIN_TYPE_FDB)
1066 		return false;
1067 
1068 	if (mask->misc.source_port) {
1069 		if (rx && value->misc.source_port != MLX5_VPORT_UPLINK)
1070 			return true;
1071 
1072 		if (!rx && value->misc.source_port == MLX5_VPORT_UPLINK)
1073 			return true;
1074 	}
1075 
1076 	if (rx && flow_source == MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT)
1077 		return true;
1078 
1079 	if (!rx && flow_source == MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK)
1080 		return true;
1081 
1082 	return false;
1083 }
1084 
1085 static int
dr_rule_create_rule_nic(struct mlx5dr_rule * rule,struct mlx5dr_rule_rx_tx * nic_rule,struct mlx5dr_match_param * param,size_t num_actions,struct mlx5dr_action * actions[])1086 dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
1087 			struct mlx5dr_rule_rx_tx *nic_rule,
1088 			struct mlx5dr_match_param *param,
1089 			size_t num_actions,
1090 			struct mlx5dr_action *actions[])
1091 {
1092 	struct mlx5dr_ste_send_info *ste_info, *tmp_ste_info;
1093 	struct mlx5dr_matcher *matcher = rule->matcher;
1094 	struct mlx5dr_domain *dmn = matcher->tbl->dmn;
1095 	struct mlx5dr_matcher_rx_tx *nic_matcher;
1096 	struct mlx5dr_domain_rx_tx *nic_dmn;
1097 	struct mlx5dr_ste_htbl *htbl = NULL;
1098 	struct mlx5dr_ste_htbl *cur_htbl;
1099 	struct mlx5dr_ste *ste = NULL;
1100 	LIST_HEAD(send_ste_list);
1101 	u8 *hw_ste_arr = NULL;
1102 	u32 new_hw_ste_arr_sz;
1103 	int ret, i;
1104 
1105 	nic_matcher = nic_rule->nic_matcher;
1106 	nic_dmn = nic_matcher->nic_tbl->nic_dmn;
1107 
1108 	if (dr_rule_skip(dmn->type, nic_dmn->type, &matcher->mask, param,
1109 			 rule->flow_source))
1110 		return 0;
1111 
1112 	hw_ste_arr = kzalloc(DR_RULE_MAX_STE_CHAIN * DR_STE_SIZE, GFP_KERNEL);
1113 	if (!hw_ste_arr)
1114 		return -ENOMEM;
1115 
1116 	mlx5dr_domain_nic_lock(nic_dmn);
1117 
1118 	ret = mlx5dr_matcher_add_to_tbl_nic(dmn, nic_matcher);
1119 	if (ret)
1120 		goto free_hw_ste;
1121 
1122 	ret = mlx5dr_matcher_select_builders(matcher,
1123 					     nic_matcher,
1124 					     dr_rule_get_ipv(&param->outer),
1125 					     dr_rule_get_ipv(&param->inner));
1126 	if (ret)
1127 		goto remove_from_nic_tbl;
1128 
1129 	/* Set the tag values inside the ste array */
1130 	ret = mlx5dr_ste_build_ste_arr(matcher, nic_matcher, param, hw_ste_arr);
1131 	if (ret)
1132 		goto remove_from_nic_tbl;
1133 
1134 	/* Set the actions values/addresses inside the ste array */
1135 	ret = mlx5dr_actions_build_ste_arr(matcher, nic_matcher, actions,
1136 					   num_actions, hw_ste_arr,
1137 					   &new_hw_ste_arr_sz);
1138 	if (ret)
1139 		goto remove_from_nic_tbl;
1140 
1141 	cur_htbl = nic_matcher->s_htbl;
1142 
1143 	/* Go over the array of STEs, and build dr_ste accordingly.
1144 	 * The loop is over only the builders which are equal or less to the
1145 	 * number of stes, in case we have actions that lives in other stes.
1146 	 */
1147 	for (i = 0; i < nic_matcher->num_of_builders; i++) {
1148 		/* Calculate CRC and keep new ste entry */
1149 		u8 *cur_hw_ste_ent = hw_ste_arr + (i * DR_STE_SIZE);
1150 
1151 		ste = dr_rule_handle_ste_branch(rule,
1152 						nic_rule,
1153 						&send_ste_list,
1154 						cur_htbl,
1155 						cur_hw_ste_ent,
1156 						i + 1,
1157 						&htbl);
1158 		if (!ste) {
1159 			mlx5dr_err(dmn, "Failed creating next branch\n");
1160 			ret = -ENOENT;
1161 			goto free_rule;
1162 		}
1163 
1164 		cur_htbl = ste->next_htbl;
1165 
1166 		mlx5dr_ste_get(ste);
1167 		mlx5dr_rule_set_last_member(nic_rule, ste, true);
1168 	}
1169 
1170 	/* Connect actions */
1171 	ret = dr_rule_handle_action_stes(rule, nic_rule, &send_ste_list,
1172 					 ste, hw_ste_arr, new_hw_ste_arr_sz);
1173 	if (ret) {
1174 		mlx5dr_dbg(dmn, "Failed apply actions\n");
1175 		goto free_rule;
1176 	}
1177 	ret = dr_rule_send_update_list(&send_ste_list, dmn, true);
1178 	if (ret) {
1179 		mlx5dr_err(dmn, "Failed sending ste!\n");
1180 		goto free_rule;
1181 	}
1182 
1183 	if (htbl)
1184 		mlx5dr_htbl_put(htbl);
1185 
1186 	nic_matcher->rules++;
1187 
1188 	mlx5dr_domain_nic_unlock(nic_dmn);
1189 
1190 	kfree(hw_ste_arr);
1191 
1192 	return 0;
1193 
1194 free_rule:
1195 	dr_rule_clean_rule_members(rule, nic_rule);
1196 	/* Clean all ste_info's */
1197 	list_for_each_entry_safe(ste_info, tmp_ste_info, &send_ste_list, send_list) {
1198 		list_del(&ste_info->send_list);
1199 		kfree(ste_info);
1200 	}
1201 
1202 remove_from_nic_tbl:
1203 	mlx5dr_matcher_remove_from_tbl_nic(dmn, nic_matcher);
1204 
1205 free_hw_ste:
1206 	mlx5dr_domain_nic_unlock(nic_dmn);
1207 	kfree(hw_ste_arr);
1208 	return ret;
1209 }
1210 
1211 static int
dr_rule_create_rule_fdb(struct mlx5dr_rule * rule,struct mlx5dr_match_param * param,size_t num_actions,struct mlx5dr_action * actions[])1212 dr_rule_create_rule_fdb(struct mlx5dr_rule *rule,
1213 			struct mlx5dr_match_param *param,
1214 			size_t num_actions,
1215 			struct mlx5dr_action *actions[])
1216 {
1217 	struct mlx5dr_match_param copy_param = {};
1218 	int ret;
1219 
1220 	/* Copy match_param since they will be consumed during the first
1221 	 * nic_rule insertion.
1222 	 */
1223 	memcpy(&copy_param, param, sizeof(struct mlx5dr_match_param));
1224 
1225 	ret = dr_rule_create_rule_nic(rule, &rule->rx, param,
1226 				      num_actions, actions);
1227 	if (ret)
1228 		return ret;
1229 
1230 	ret = dr_rule_create_rule_nic(rule, &rule->tx, &copy_param,
1231 				      num_actions, actions);
1232 	if (ret)
1233 		goto destroy_rule_nic_rx;
1234 
1235 	return 0;
1236 
1237 destroy_rule_nic_rx:
1238 	dr_rule_destroy_rule_nic(rule, &rule->rx);
1239 	return ret;
1240 }
1241 
1242 static struct mlx5dr_rule *
dr_rule_create_rule(struct mlx5dr_matcher * matcher,struct mlx5dr_match_parameters * value,size_t num_actions,struct mlx5dr_action * actions[],u32 flow_source)1243 dr_rule_create_rule(struct mlx5dr_matcher *matcher,
1244 		    struct mlx5dr_match_parameters *value,
1245 		    size_t num_actions,
1246 		    struct mlx5dr_action *actions[],
1247 		    u32 flow_source)
1248 {
1249 	struct mlx5dr_domain *dmn = matcher->tbl->dmn;
1250 	struct mlx5dr_match_param param = {};
1251 	struct mlx5dr_rule *rule;
1252 	int ret;
1253 
1254 	if (!dr_rule_verify(matcher, value, &param))
1255 		return NULL;
1256 
1257 	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
1258 	if (!rule)
1259 		return NULL;
1260 
1261 	rule->matcher = matcher;
1262 	rule->flow_source = flow_source;
1263 	INIT_LIST_HEAD(&rule->rule_actions_list);
1264 
1265 	ret = dr_rule_add_action_members(rule, num_actions, actions);
1266 	if (ret)
1267 		goto free_rule;
1268 
1269 	switch (dmn->type) {
1270 	case MLX5DR_DOMAIN_TYPE_NIC_RX:
1271 		rule->rx.nic_matcher = &matcher->rx;
1272 		ret = dr_rule_create_rule_nic(rule, &rule->rx, &param,
1273 					      num_actions, actions);
1274 		break;
1275 	case MLX5DR_DOMAIN_TYPE_NIC_TX:
1276 		rule->tx.nic_matcher = &matcher->tx;
1277 		ret = dr_rule_create_rule_nic(rule, &rule->tx, &param,
1278 					      num_actions, actions);
1279 		break;
1280 	case MLX5DR_DOMAIN_TYPE_FDB:
1281 		rule->rx.nic_matcher = &matcher->rx;
1282 		rule->tx.nic_matcher = &matcher->tx;
1283 		ret = dr_rule_create_rule_fdb(rule, &param,
1284 					      num_actions, actions);
1285 		break;
1286 	default:
1287 		ret = -EINVAL;
1288 		break;
1289 	}
1290 
1291 	if (ret)
1292 		goto remove_action_members;
1293 
1294 	INIT_LIST_HEAD(&rule->dbg_node);
1295 	mlx5dr_dbg_rule_add(rule);
1296 	return rule;
1297 
1298 remove_action_members:
1299 	dr_rule_remove_action_members(rule);
1300 free_rule:
1301 	kfree(rule);
1302 	mlx5dr_err(dmn, "Failed creating rule\n");
1303 	return NULL;
1304 }
1305 
mlx5dr_rule_create(struct mlx5dr_matcher * matcher,struct mlx5dr_match_parameters * value,size_t num_actions,struct mlx5dr_action * actions[],u32 flow_source)1306 struct mlx5dr_rule *mlx5dr_rule_create(struct mlx5dr_matcher *matcher,
1307 				       struct mlx5dr_match_parameters *value,
1308 				       size_t num_actions,
1309 				       struct mlx5dr_action *actions[],
1310 				       u32 flow_source)
1311 {
1312 	struct mlx5dr_rule *rule;
1313 
1314 	refcount_inc(&matcher->refcount);
1315 
1316 	rule = dr_rule_create_rule(matcher, value, num_actions, actions, flow_source);
1317 	if (!rule)
1318 		refcount_dec(&matcher->refcount);
1319 
1320 	return rule;
1321 }
1322 
mlx5dr_rule_destroy(struct mlx5dr_rule * rule)1323 int mlx5dr_rule_destroy(struct mlx5dr_rule *rule)
1324 {
1325 	struct mlx5dr_matcher *matcher = rule->matcher;
1326 	int ret;
1327 
1328 	ret = dr_rule_destroy_rule(rule);
1329 	if (!ret)
1330 		refcount_dec(&matcher->refcount);
1331 
1332 	return ret;
1333 }
1334