1 /*
2  * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <net/flow_dissector.h>
34 #include <net/flow_offload.h>
35 #include <net/sch_generic.h>
36 #include <net/pkt_cls.h>
37 #include <linux/mlx5/fs.h>
38 #include <linux/mlx5/device.h>
39 #include <linux/rhashtable.h>
40 #include <linux/refcount.h>
41 #include <linux/completion.h>
42 #include <net/arp.h>
43 #include <net/ipv6_stubs.h>
44 #include <net/bareudp.h>
45 #include <net/bonding.h>
46 #include "en.h"
47 #include "en/tc/post_act.h"
48 #include "en_rep.h"
49 #include "en/rep/tc.h"
50 #include "en/rep/neigh.h"
51 #include "en_tc.h"
52 #include "eswitch.h"
53 #include "fs_core.h"
54 #include "en/port.h"
55 #include "en/tc_tun.h"
56 #include "en/mapping.h"
57 #include "en/tc_ct.h"
58 #include "en/mod_hdr.h"
59 #include "en/tc_tun_encap.h"
60 #include "en/tc/sample.h"
61 #include "en/tc/act/act.h"
62 #include "en/tc/post_meter.h"
63 #include "lib/devcom.h"
64 #include "lib/geneve.h"
65 #include "lib/fs_chains.h"
66 #include "diag/en_tc_tracepoint.h"
67 #include <asm/div64.h>
68 #include "lag/lag.h"
69 #include "lag/mp.h"
70 
71 #define MLX5E_TC_TABLE_NUM_GROUPS 4
72 #define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(18)
73 
74 struct mlx5e_tc_table {
75 	/* Protects the dynamic assignment of the t parameter
76 	 * which is the nic tc root table.
77 	 */
78 	struct mutex			t_lock;
79 	struct mlx5e_priv		*priv;
80 	struct mlx5_flow_table		*t;
81 	struct mlx5_flow_table		*miss_t;
82 	struct mlx5_fs_chains           *chains;
83 	struct mlx5e_post_act		*post_act;
84 
85 	struct rhashtable               ht;
86 
87 	struct mod_hdr_tbl mod_hdr;
88 	struct mutex hairpin_tbl_lock; /* protects hairpin_tbl */
89 	DECLARE_HASHTABLE(hairpin_tbl, 8);
90 
91 	struct notifier_block     netdevice_nb;
92 	struct netdev_net_notifier	netdevice_nn;
93 
94 	struct mlx5_tc_ct_priv         *ct;
95 	struct mapping_ctx             *mapping;
96 };
97 
98 struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = {
99 	[CHAIN_TO_REG] = {
100 		.mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_0,
101 		.moffset = 0,
102 		.mlen = 16,
103 	},
104 	[VPORT_TO_REG] = {
105 		.mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_0,
106 		.moffset = 16,
107 		.mlen = 16,
108 	},
109 	[TUNNEL_TO_REG] = {
110 		.mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_1,
111 		.moffset = 8,
112 		.mlen = ESW_TUN_OPTS_BITS + ESW_TUN_ID_BITS,
113 		.soffset = MLX5_BYTE_OFF(fte_match_param,
114 					 misc_parameters_2.metadata_reg_c_1),
115 	},
116 	[ZONE_TO_REG] = zone_to_reg_ct,
117 	[ZONE_RESTORE_TO_REG] = zone_restore_to_reg_ct,
118 	[CTSTATE_TO_REG] = ctstate_to_reg_ct,
119 	[MARK_TO_REG] = mark_to_reg_ct,
120 	[LABELS_TO_REG] = labels_to_reg_ct,
121 	[FTEID_TO_REG] = fteid_to_reg_ct,
122 	/* For NIC rules we store the restore metadata directly
123 	 * into reg_b that is passed to SW since we don't
124 	 * jump between steering domains.
125 	 */
126 	[NIC_CHAIN_TO_REG] = {
127 		.mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_B,
128 		.moffset = 0,
129 		.mlen = 16,
130 	},
131 	[NIC_ZONE_RESTORE_TO_REG] = nic_zone_restore_to_reg_ct,
132 	[PACKET_COLOR_TO_REG] = packet_color_to_reg,
133 };
134 
mlx5e_tc_table_alloc(void)135 struct mlx5e_tc_table *mlx5e_tc_table_alloc(void)
136 {
137 	struct mlx5e_tc_table *tc;
138 
139 	tc = kvzalloc(sizeof(*tc), GFP_KERNEL);
140 	return tc ? tc : ERR_PTR(-ENOMEM);
141 }
142 
mlx5e_tc_table_free(struct mlx5e_tc_table * tc)143 void mlx5e_tc_table_free(struct mlx5e_tc_table *tc)
144 {
145 	kvfree(tc);
146 }
147 
mlx5e_nic_chains(struct mlx5e_tc_table * tc)148 struct mlx5_fs_chains *mlx5e_nic_chains(struct mlx5e_tc_table *tc)
149 {
150 	return tc->chains;
151 }
152 
153 /* To avoid false lock dependency warning set the tc_ht lock
154  * class different than the lock class of the ht being used when deleting
155  * last flow from a group and then deleting a group, we get into del_sw_flow_group()
156  * which call rhashtable_destroy on fg->ftes_hash which will take ht->mutex but
157  * it's different than the ht->mutex here.
158  */
159 static struct lock_class_key tc_ht_lock_key;
160 static struct lock_class_key tc_ht_wq_key;
161 
162 static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow);
163 static void free_flow_post_acts(struct mlx5e_tc_flow *flow);
164 
165 void
mlx5e_tc_match_to_reg_match(struct mlx5_flow_spec * spec,enum mlx5e_tc_attr_to_reg type,u32 val,u32 mask)166 mlx5e_tc_match_to_reg_match(struct mlx5_flow_spec *spec,
167 			    enum mlx5e_tc_attr_to_reg type,
168 			    u32 val,
169 			    u32 mask)
170 {
171 	void *headers_c = spec->match_criteria, *headers_v = spec->match_value, *fmask, *fval;
172 	int soffset = mlx5e_tc_attr_to_reg_mappings[type].soffset;
173 	int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset;
174 	int match_len = mlx5e_tc_attr_to_reg_mappings[type].mlen;
175 	u32 max_mask = GENMASK(match_len - 1, 0);
176 	__be32 curr_mask_be, curr_val_be;
177 	u32 curr_mask, curr_val;
178 
179 	fmask = headers_c + soffset;
180 	fval = headers_v + soffset;
181 
182 	memcpy(&curr_mask_be, fmask, 4);
183 	memcpy(&curr_val_be, fval, 4);
184 
185 	curr_mask = be32_to_cpu(curr_mask_be);
186 	curr_val = be32_to_cpu(curr_val_be);
187 
188 	//move to correct offset
189 	WARN_ON(mask > max_mask);
190 	mask <<= moffset;
191 	val <<= moffset;
192 	max_mask <<= moffset;
193 
194 	//zero val and mask
195 	curr_mask &= ~max_mask;
196 	curr_val &= ~max_mask;
197 
198 	//add current to mask
199 	curr_mask |= mask;
200 	curr_val |= val;
201 
202 	//back to be32 and write
203 	curr_mask_be = cpu_to_be32(curr_mask);
204 	curr_val_be = cpu_to_be32(curr_val);
205 
206 	memcpy(fmask, &curr_mask_be, 4);
207 	memcpy(fval, &curr_val_be, 4);
208 
209 	spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
210 }
211 
212 void
mlx5e_tc_match_to_reg_get_match(struct mlx5_flow_spec * spec,enum mlx5e_tc_attr_to_reg type,u32 * val,u32 * mask)213 mlx5e_tc_match_to_reg_get_match(struct mlx5_flow_spec *spec,
214 				enum mlx5e_tc_attr_to_reg type,
215 				u32 *val,
216 				u32 *mask)
217 {
218 	void *headers_c = spec->match_criteria, *headers_v = spec->match_value, *fmask, *fval;
219 	int soffset = mlx5e_tc_attr_to_reg_mappings[type].soffset;
220 	int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset;
221 	int match_len = mlx5e_tc_attr_to_reg_mappings[type].mlen;
222 	u32 max_mask = GENMASK(match_len - 1, 0);
223 	__be32 curr_mask_be, curr_val_be;
224 	u32 curr_mask, curr_val;
225 
226 	fmask = headers_c + soffset;
227 	fval = headers_v + soffset;
228 
229 	memcpy(&curr_mask_be, fmask, 4);
230 	memcpy(&curr_val_be, fval, 4);
231 
232 	curr_mask = be32_to_cpu(curr_mask_be);
233 	curr_val = be32_to_cpu(curr_val_be);
234 
235 	*mask = (curr_mask >> moffset) & max_mask;
236 	*val = (curr_val >> moffset) & max_mask;
237 }
238 
239 int
mlx5e_tc_match_to_reg_set_and_get_id(struct mlx5_core_dev * mdev,struct mlx5e_tc_mod_hdr_acts * mod_hdr_acts,enum mlx5_flow_namespace_type ns,enum mlx5e_tc_attr_to_reg type,u32 data)240 mlx5e_tc_match_to_reg_set_and_get_id(struct mlx5_core_dev *mdev,
241 				     struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
242 				     enum mlx5_flow_namespace_type ns,
243 				     enum mlx5e_tc_attr_to_reg type,
244 				     u32 data)
245 {
246 	int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset;
247 	int mfield = mlx5e_tc_attr_to_reg_mappings[type].mfield;
248 	int mlen = mlx5e_tc_attr_to_reg_mappings[type].mlen;
249 	char *modact;
250 	int err;
251 
252 	modact = mlx5e_mod_hdr_alloc(mdev, ns, mod_hdr_acts);
253 	if (IS_ERR(modact))
254 		return PTR_ERR(modact);
255 
256 	/* Firmware has 5bit length field and 0 means 32bits */
257 	if (mlen == 32)
258 		mlen = 0;
259 
260 	MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET);
261 	MLX5_SET(set_action_in, modact, field, mfield);
262 	MLX5_SET(set_action_in, modact, offset, moffset);
263 	MLX5_SET(set_action_in, modact, length, mlen);
264 	MLX5_SET(set_action_in, modact, data, data);
265 	err = mod_hdr_acts->num_actions;
266 	mod_hdr_acts->num_actions++;
267 
268 	return err;
269 }
270 
271 struct mlx5e_tc_int_port_priv *
mlx5e_get_int_port_priv(struct mlx5e_priv * priv)272 mlx5e_get_int_port_priv(struct mlx5e_priv *priv)
273 {
274 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
275 	struct mlx5_rep_uplink_priv *uplink_priv;
276 	struct mlx5e_rep_priv *uplink_rpriv;
277 
278 	if (is_mdev_switchdev_mode(priv->mdev)) {
279 		uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
280 		uplink_priv = &uplink_rpriv->uplink_priv;
281 
282 		return uplink_priv->int_port_priv;
283 	}
284 
285 	return NULL;
286 }
287 
288 struct mlx5e_flow_meters *
mlx5e_get_flow_meters(struct mlx5_core_dev * dev)289 mlx5e_get_flow_meters(struct mlx5_core_dev *dev)
290 {
291 	struct mlx5_eswitch *esw = dev->priv.eswitch;
292 	struct mlx5_rep_uplink_priv *uplink_priv;
293 	struct mlx5e_rep_priv *uplink_rpriv;
294 	struct mlx5e_priv *priv;
295 
296 	if (is_mdev_switchdev_mode(dev)) {
297 		uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
298 		uplink_priv = &uplink_rpriv->uplink_priv;
299 		priv = netdev_priv(uplink_rpriv->netdev);
300 		if (!uplink_priv->flow_meters)
301 			uplink_priv->flow_meters =
302 				mlx5e_flow_meters_init(priv,
303 						       MLX5_FLOW_NAMESPACE_FDB,
304 						       uplink_priv->post_act);
305 		if (!IS_ERR(uplink_priv->flow_meters))
306 			return uplink_priv->flow_meters;
307 	}
308 
309 	return NULL;
310 }
311 
312 static struct mlx5_tc_ct_priv *
get_ct_priv(struct mlx5e_priv * priv)313 get_ct_priv(struct mlx5e_priv *priv)
314 {
315 	struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
316 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
317 	struct mlx5_rep_uplink_priv *uplink_priv;
318 	struct mlx5e_rep_priv *uplink_rpriv;
319 
320 	if (is_mdev_switchdev_mode(priv->mdev)) {
321 		uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
322 		uplink_priv = &uplink_rpriv->uplink_priv;
323 
324 		return uplink_priv->ct_priv;
325 	}
326 
327 	return tc->ct;
328 }
329 
330 static struct mlx5e_tc_psample *
get_sample_priv(struct mlx5e_priv * priv)331 get_sample_priv(struct mlx5e_priv *priv)
332 {
333 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
334 	struct mlx5_rep_uplink_priv *uplink_priv;
335 	struct mlx5e_rep_priv *uplink_rpriv;
336 
337 	if (is_mdev_switchdev_mode(priv->mdev)) {
338 		uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
339 		uplink_priv = &uplink_rpriv->uplink_priv;
340 
341 		return uplink_priv->tc_psample;
342 	}
343 
344 	return NULL;
345 }
346 
347 static struct mlx5e_post_act *
get_post_action(struct mlx5e_priv * priv)348 get_post_action(struct mlx5e_priv *priv)
349 {
350 	struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
351 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
352 	struct mlx5_rep_uplink_priv *uplink_priv;
353 	struct mlx5e_rep_priv *uplink_rpriv;
354 
355 	if (is_mdev_switchdev_mode(priv->mdev)) {
356 		uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
357 		uplink_priv = &uplink_rpriv->uplink_priv;
358 
359 		return uplink_priv->post_act;
360 	}
361 
362 	return tc->post_act;
363 }
364 
365 struct mlx5_flow_handle *
mlx5_tc_rule_insert(struct mlx5e_priv * priv,struct mlx5_flow_spec * spec,struct mlx5_flow_attr * attr)366 mlx5_tc_rule_insert(struct mlx5e_priv *priv,
367 		    struct mlx5_flow_spec *spec,
368 		    struct mlx5_flow_attr *attr)
369 {
370 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
371 
372 	if (is_mdev_switchdev_mode(priv->mdev))
373 		return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
374 
375 	return	mlx5e_add_offloaded_nic_rule(priv, spec, attr);
376 }
377 
378 void
mlx5_tc_rule_delete(struct mlx5e_priv * priv,struct mlx5_flow_handle * rule,struct mlx5_flow_attr * attr)379 mlx5_tc_rule_delete(struct mlx5e_priv *priv,
380 		    struct mlx5_flow_handle *rule,
381 		    struct mlx5_flow_attr *attr)
382 {
383 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
384 
385 	if (is_mdev_switchdev_mode(priv->mdev)) {
386 		mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
387 		return;
388 	}
389 
390 	mlx5e_del_offloaded_nic_rule(priv, rule, attr);
391 }
392 
393 static bool
is_flow_meter_action(struct mlx5_flow_attr * attr)394 is_flow_meter_action(struct mlx5_flow_attr *attr)
395 {
396 	return ((attr->action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) &&
397 		(attr->exe_aso_type == MLX5_EXE_ASO_FLOW_METER));
398 }
399 
400 static int
mlx5e_tc_add_flow_meter(struct mlx5e_priv * priv,struct mlx5_flow_attr * attr)401 mlx5e_tc_add_flow_meter(struct mlx5e_priv *priv,
402 			struct mlx5_flow_attr *attr)
403 {
404 	struct mlx5e_post_act *post_act = get_post_action(priv);
405 	struct mlx5e_post_meter_priv *post_meter;
406 	enum mlx5_flow_namespace_type ns_type;
407 	struct mlx5e_flow_meter_handle *meter;
408 
409 	meter = mlx5e_tc_meter_replace(priv->mdev, &attr->meter_attr.params);
410 	if (IS_ERR(meter)) {
411 		mlx5_core_err(priv->mdev, "Failed to get flow meter\n");
412 		return PTR_ERR(meter);
413 	}
414 
415 	ns_type = mlx5e_tc_meter_get_namespace(meter->flow_meters);
416 	post_meter = mlx5e_post_meter_init(priv, ns_type, post_act, meter->green_counter,
417 					   meter->red_counter);
418 	if (IS_ERR(post_meter)) {
419 		mlx5_core_err(priv->mdev, "Failed to init post meter\n");
420 		goto err_meter_init;
421 	}
422 
423 	attr->meter_attr.meter = meter;
424 	attr->meter_attr.post_meter = post_meter;
425 	attr->dest_ft = mlx5e_post_meter_get_ft(post_meter);
426 	attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
427 
428 	return 0;
429 
430 err_meter_init:
431 	mlx5e_tc_meter_put(meter);
432 	return PTR_ERR(post_meter);
433 }
434 
435 static void
mlx5e_tc_del_flow_meter(struct mlx5_flow_attr * attr)436 mlx5e_tc_del_flow_meter(struct mlx5_flow_attr *attr)
437 {
438 	mlx5e_post_meter_cleanup(attr->meter_attr.post_meter);
439 	mlx5e_tc_meter_put(attr->meter_attr.meter);
440 }
441 
442 struct mlx5_flow_handle *
mlx5e_tc_rule_offload(struct mlx5e_priv * priv,struct mlx5_flow_spec * spec,struct mlx5_flow_attr * attr)443 mlx5e_tc_rule_offload(struct mlx5e_priv *priv,
444 		      struct mlx5_flow_spec *spec,
445 		      struct mlx5_flow_attr *attr)
446 {
447 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
448 	int err;
449 
450 	if (attr->flags & MLX5_ATTR_FLAG_CT) {
451 		struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts =
452 			&attr->parse_attr->mod_hdr_acts;
453 
454 		return mlx5_tc_ct_flow_offload(get_ct_priv(priv),
455 					       spec, attr,
456 					       mod_hdr_acts);
457 	}
458 
459 	if (!is_mdev_switchdev_mode(priv->mdev))
460 		return mlx5e_add_offloaded_nic_rule(priv, spec, attr);
461 
462 	if (attr->flags & MLX5_ATTR_FLAG_SAMPLE)
463 		return mlx5e_tc_sample_offload(get_sample_priv(priv), spec, attr);
464 
465 	if (is_flow_meter_action(attr)) {
466 		err = mlx5e_tc_add_flow_meter(priv, attr);
467 		if (err)
468 			return ERR_PTR(err);
469 	}
470 
471 	return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
472 }
473 
474 void
mlx5e_tc_rule_unoffload(struct mlx5e_priv * priv,struct mlx5_flow_handle * rule,struct mlx5_flow_attr * attr)475 mlx5e_tc_rule_unoffload(struct mlx5e_priv *priv,
476 			struct mlx5_flow_handle *rule,
477 			struct mlx5_flow_attr *attr)
478 {
479 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
480 
481 	if (attr->flags & MLX5_ATTR_FLAG_CT) {
482 		mlx5_tc_ct_delete_flow(get_ct_priv(priv), attr);
483 		return;
484 	}
485 
486 	if (!is_mdev_switchdev_mode(priv->mdev)) {
487 		mlx5e_del_offloaded_nic_rule(priv, rule, attr);
488 		return;
489 	}
490 
491 	if (attr->flags & MLX5_ATTR_FLAG_SAMPLE) {
492 		mlx5e_tc_sample_unoffload(get_sample_priv(priv), rule, attr);
493 		return;
494 	}
495 
496 	mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
497 
498 	if (attr->meter_attr.meter)
499 		mlx5e_tc_del_flow_meter(attr);
500 }
501 
502 int
mlx5e_tc_match_to_reg_set(struct mlx5_core_dev * mdev,struct mlx5e_tc_mod_hdr_acts * mod_hdr_acts,enum mlx5_flow_namespace_type ns,enum mlx5e_tc_attr_to_reg type,u32 data)503 mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev,
504 			  struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
505 			  enum mlx5_flow_namespace_type ns,
506 			  enum mlx5e_tc_attr_to_reg type,
507 			  u32 data)
508 {
509 	int ret = mlx5e_tc_match_to_reg_set_and_get_id(mdev, mod_hdr_acts, ns, type, data);
510 
511 	return ret < 0 ? ret : 0;
512 }
513 
mlx5e_tc_match_to_reg_mod_hdr_change(struct mlx5_core_dev * mdev,struct mlx5e_tc_mod_hdr_acts * mod_hdr_acts,enum mlx5e_tc_attr_to_reg type,int act_id,u32 data)514 void mlx5e_tc_match_to_reg_mod_hdr_change(struct mlx5_core_dev *mdev,
515 					  struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
516 					  enum mlx5e_tc_attr_to_reg type,
517 					  int act_id, u32 data)
518 {
519 	int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset;
520 	int mfield = mlx5e_tc_attr_to_reg_mappings[type].mfield;
521 	int mlen = mlx5e_tc_attr_to_reg_mappings[type].mlen;
522 	char *modact;
523 
524 	modact = mlx5e_mod_hdr_get_item(mod_hdr_acts, act_id);
525 
526 	/* Firmware has 5bit length field and 0 means 32bits */
527 	if (mlen == 32)
528 		mlen = 0;
529 
530 	MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET);
531 	MLX5_SET(set_action_in, modact, field, mfield);
532 	MLX5_SET(set_action_in, modact, offset, moffset);
533 	MLX5_SET(set_action_in, modact, length, mlen);
534 	MLX5_SET(set_action_in, modact, data, data);
535 }
536 
537 struct mlx5e_hairpin {
538 	struct mlx5_hairpin *pair;
539 
540 	struct mlx5_core_dev *func_mdev;
541 	struct mlx5e_priv *func_priv;
542 	u32 tdn;
543 	struct mlx5e_tir direct_tir;
544 
545 	int num_channels;
546 	struct mlx5e_rqt indir_rqt;
547 	struct mlx5e_tir indir_tir[MLX5E_NUM_INDIR_TIRS];
548 	struct mlx5_ttc_table *ttc;
549 };
550 
551 struct mlx5e_hairpin_entry {
552 	/* a node of a hash table which keeps all the  hairpin entries */
553 	struct hlist_node hairpin_hlist;
554 
555 	/* protects flows list */
556 	spinlock_t flows_lock;
557 	/* flows sharing the same hairpin */
558 	struct list_head flows;
559 	/* hpe's that were not fully initialized when dead peer update event
560 	 * function traversed them.
561 	 */
562 	struct list_head dead_peer_wait_list;
563 
564 	u16 peer_vhca_id;
565 	u8 prio;
566 	struct mlx5e_hairpin *hp;
567 	refcount_t refcnt;
568 	struct completion res_ready;
569 };
570 
571 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
572 			      struct mlx5e_tc_flow *flow);
573 
mlx5e_flow_get(struct mlx5e_tc_flow * flow)574 struct mlx5e_tc_flow *mlx5e_flow_get(struct mlx5e_tc_flow *flow)
575 {
576 	if (!flow || !refcount_inc_not_zero(&flow->refcnt))
577 		return ERR_PTR(-EINVAL);
578 	return flow;
579 }
580 
mlx5e_flow_put(struct mlx5e_priv * priv,struct mlx5e_tc_flow * flow)581 void mlx5e_flow_put(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow)
582 {
583 	if (refcount_dec_and_test(&flow->refcnt)) {
584 		mlx5e_tc_del_flow(priv, flow);
585 		kfree_rcu(flow, rcu_head);
586 	}
587 }
588 
mlx5e_is_eswitch_flow(struct mlx5e_tc_flow * flow)589 bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow)
590 {
591 	return flow_flag_test(flow, ESWITCH);
592 }
593 
mlx5e_is_ft_flow(struct mlx5e_tc_flow * flow)594 bool mlx5e_is_ft_flow(struct mlx5e_tc_flow *flow)
595 {
596 	return flow_flag_test(flow, FT);
597 }
598 
mlx5e_is_offloaded_flow(struct mlx5e_tc_flow * flow)599 bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow)
600 {
601 	return flow_flag_test(flow, OFFLOADED);
602 }
603 
mlx5e_get_flow_namespace(struct mlx5e_tc_flow * flow)604 int mlx5e_get_flow_namespace(struct mlx5e_tc_flow *flow)
605 {
606 	return mlx5e_is_eswitch_flow(flow) ?
607 		MLX5_FLOW_NAMESPACE_FDB : MLX5_FLOW_NAMESPACE_KERNEL;
608 }
609 
610 static struct mod_hdr_tbl *
get_mod_hdr_table(struct mlx5e_priv * priv,struct mlx5e_tc_flow * flow)611 get_mod_hdr_table(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow)
612 {
613 	struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
614 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
615 
616 	return mlx5e_get_flow_namespace(flow) == MLX5_FLOW_NAMESPACE_FDB ?
617 		&esw->offloads.mod_hdr :
618 		&tc->mod_hdr;
619 }
620 
mlx5e_attach_mod_hdr(struct mlx5e_priv * priv,struct mlx5e_tc_flow * flow,struct mlx5e_tc_flow_parse_attr * parse_attr)621 static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv,
622 				struct mlx5e_tc_flow *flow,
623 				struct mlx5e_tc_flow_parse_attr *parse_attr)
624 {
625 	struct mlx5_modify_hdr *modify_hdr;
626 	struct mlx5e_mod_hdr_handle *mh;
627 
628 	mh = mlx5e_mod_hdr_attach(priv->mdev, get_mod_hdr_table(priv, flow),
629 				  mlx5e_get_flow_namespace(flow),
630 				  &parse_attr->mod_hdr_acts);
631 	if (IS_ERR(mh))
632 		return PTR_ERR(mh);
633 
634 	modify_hdr = mlx5e_mod_hdr_get(mh);
635 	flow->attr->modify_hdr = modify_hdr;
636 	flow->mh = mh;
637 
638 	return 0;
639 }
640 
mlx5e_detach_mod_hdr(struct mlx5e_priv * priv,struct mlx5e_tc_flow * flow)641 static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv,
642 				 struct mlx5e_tc_flow *flow)
643 {
644 	/* flow wasn't fully initialized */
645 	if (!flow->mh)
646 		return;
647 
648 	mlx5e_mod_hdr_detach(priv->mdev, get_mod_hdr_table(priv, flow),
649 			     flow->mh);
650 	flow->mh = NULL;
651 }
652 
653 static
mlx5e_hairpin_get_mdev(struct net * net,int ifindex)654 struct mlx5_core_dev *mlx5e_hairpin_get_mdev(struct net *net, int ifindex)
655 {
656 	struct mlx5_core_dev *mdev;
657 	struct net_device *netdev;
658 	struct mlx5e_priv *priv;
659 
660 	netdev = dev_get_by_index(net, ifindex);
661 	if (!netdev)
662 		return ERR_PTR(-ENODEV);
663 
664 	priv = netdev_priv(netdev);
665 	mdev = priv->mdev;
666 	dev_put(netdev);
667 
668 	/* Mirred tc action holds a refcount on the ifindex net_device (see
669 	 * net/sched/act_mirred.c:tcf_mirred_get_dev). So, it's okay to continue using mdev
670 	 * after dev_put(netdev), while we're in the context of adding a tc flow.
671 	 *
672 	 * The mdev pointer corresponds to the peer/out net_device of a hairpin. It is then
673 	 * stored in a hairpin object, which exists until all flows, that refer to it, get
674 	 * removed.
675 	 *
676 	 * On the other hand, after a hairpin object has been created, the peer net_device may
677 	 * be removed/unbound while there are still some hairpin flows that are using it. This
678 	 * case is handled by mlx5e_tc_hairpin_update_dead_peer, which is hooked to
679 	 * NETDEV_UNREGISTER event of the peer net_device.
680 	 */
681 	return mdev;
682 }
683 
mlx5e_hairpin_create_transport(struct mlx5e_hairpin * hp)684 static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp)
685 {
686 	struct mlx5e_tir_builder *builder;
687 	int err;
688 
689 	builder = mlx5e_tir_builder_alloc(false);
690 	if (!builder)
691 		return -ENOMEM;
692 
693 	err = mlx5_core_alloc_transport_domain(hp->func_mdev, &hp->tdn);
694 	if (err)
695 		goto out;
696 
697 	mlx5e_tir_builder_build_inline(builder, hp->tdn, hp->pair->rqn[0]);
698 	err = mlx5e_tir_init(&hp->direct_tir, builder, hp->func_mdev, false);
699 	if (err)
700 		goto create_tir_err;
701 
702 out:
703 	mlx5e_tir_builder_free(builder);
704 	return err;
705 
706 create_tir_err:
707 	mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
708 
709 	goto out;
710 }
711 
mlx5e_hairpin_destroy_transport(struct mlx5e_hairpin * hp)712 static void mlx5e_hairpin_destroy_transport(struct mlx5e_hairpin *hp)
713 {
714 	mlx5e_tir_destroy(&hp->direct_tir);
715 	mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
716 }
717 
mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin * hp)718 static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
719 {
720 	struct mlx5e_priv *priv = hp->func_priv;
721 	struct mlx5_core_dev *mdev = priv->mdev;
722 	struct mlx5e_rss_params_indir *indir;
723 	int err;
724 
725 	indir = kvmalloc(sizeof(*indir), GFP_KERNEL);
726 	if (!indir)
727 		return -ENOMEM;
728 
729 	mlx5e_rss_params_indir_init_uniform(indir, hp->num_channels);
730 	err = mlx5e_rqt_init_indir(&hp->indir_rqt, mdev, hp->pair->rqn, hp->num_channels,
731 				   mlx5e_rx_res_get_current_hash(priv->rx_res).hfunc,
732 				   indir);
733 
734 	kvfree(indir);
735 	return err;
736 }
737 
mlx5e_hairpin_create_indirect_tirs(struct mlx5e_hairpin * hp)738 static int mlx5e_hairpin_create_indirect_tirs(struct mlx5e_hairpin *hp)
739 {
740 	struct mlx5e_priv *priv = hp->func_priv;
741 	struct mlx5e_rss_params_hash rss_hash;
742 	enum mlx5_traffic_types tt, max_tt;
743 	struct mlx5e_tir_builder *builder;
744 	int err = 0;
745 
746 	builder = mlx5e_tir_builder_alloc(false);
747 	if (!builder)
748 		return -ENOMEM;
749 
750 	rss_hash = mlx5e_rx_res_get_current_hash(priv->rx_res);
751 
752 	for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
753 		struct mlx5e_rss_params_traffic_type rss_tt;
754 
755 		rss_tt = mlx5e_rss_get_default_tt_config(tt);
756 
757 		mlx5e_tir_builder_build_rqt(builder, hp->tdn,
758 					    mlx5e_rqt_get_rqtn(&hp->indir_rqt),
759 					    false);
760 		mlx5e_tir_builder_build_rss(builder, &rss_hash, &rss_tt, false);
761 
762 		err = mlx5e_tir_init(&hp->indir_tir[tt], builder, hp->func_mdev, false);
763 		if (err) {
764 			mlx5_core_warn(hp->func_mdev, "create indirect tirs failed, %d\n", err);
765 			goto err_destroy_tirs;
766 		}
767 
768 		mlx5e_tir_builder_clear(builder);
769 	}
770 
771 out:
772 	mlx5e_tir_builder_free(builder);
773 	return err;
774 
775 err_destroy_tirs:
776 	max_tt = tt;
777 	for (tt = 0; tt < max_tt; tt++)
778 		mlx5e_tir_destroy(&hp->indir_tir[tt]);
779 
780 	goto out;
781 }
782 
mlx5e_hairpin_destroy_indirect_tirs(struct mlx5e_hairpin * hp)783 static void mlx5e_hairpin_destroy_indirect_tirs(struct mlx5e_hairpin *hp)
784 {
785 	int tt;
786 
787 	for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
788 		mlx5e_tir_destroy(&hp->indir_tir[tt]);
789 }
790 
mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin * hp,struct ttc_params * ttc_params)791 static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp,
792 					 struct ttc_params *ttc_params)
793 {
794 	struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
795 	int tt;
796 
797 	memset(ttc_params, 0, sizeof(*ttc_params));
798 
799 	ttc_params->ns = mlx5_get_flow_namespace(hp->func_mdev,
800 						 MLX5_FLOW_NAMESPACE_KERNEL);
801 	for (tt = 0; tt < MLX5_NUM_TT; tt++) {
802 		ttc_params->dests[tt].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
803 		ttc_params->dests[tt].tir_num =
804 			tt == MLX5_TT_ANY ?
805 				mlx5e_tir_get_tirn(&hp->direct_tir) :
806 				mlx5e_tir_get_tirn(&hp->indir_tir[tt]);
807 	}
808 
809 	ft_attr->level = MLX5E_TC_TTC_FT_LEVEL;
810 	ft_attr->prio = MLX5E_TC_PRIO;
811 }
812 
mlx5e_hairpin_rss_init(struct mlx5e_hairpin * hp)813 static int mlx5e_hairpin_rss_init(struct mlx5e_hairpin *hp)
814 {
815 	struct mlx5e_priv *priv = hp->func_priv;
816 	struct ttc_params ttc_params;
817 	struct mlx5_ttc_table *ttc;
818 	int err;
819 
820 	err = mlx5e_hairpin_create_indirect_rqt(hp);
821 	if (err)
822 		return err;
823 
824 	err = mlx5e_hairpin_create_indirect_tirs(hp);
825 	if (err)
826 		goto err_create_indirect_tirs;
827 
828 	mlx5e_hairpin_set_ttc_params(hp, &ttc_params);
829 	hp->ttc = mlx5_create_ttc_table(priv->mdev, &ttc_params);
830 	if (IS_ERR(hp->ttc)) {
831 		err = PTR_ERR(hp->ttc);
832 		goto err_create_ttc_table;
833 	}
834 
835 	ttc = mlx5e_fs_get_ttc(priv->fs, false);
836 	netdev_dbg(priv->netdev, "add hairpin: using %d channels rss ttc table id %x\n",
837 		   hp->num_channels,
838 		   mlx5_get_ttc_flow_table(ttc)->id);
839 
840 	return 0;
841 
842 err_create_ttc_table:
843 	mlx5e_hairpin_destroy_indirect_tirs(hp);
844 err_create_indirect_tirs:
845 	mlx5e_rqt_destroy(&hp->indir_rqt);
846 
847 	return err;
848 }
849 
mlx5e_hairpin_rss_cleanup(struct mlx5e_hairpin * hp)850 static void mlx5e_hairpin_rss_cleanup(struct mlx5e_hairpin *hp)
851 {
852 	mlx5_destroy_ttc_table(hp->ttc);
853 	mlx5e_hairpin_destroy_indirect_tirs(hp);
854 	mlx5e_rqt_destroy(&hp->indir_rqt);
855 }
856 
857 static struct mlx5e_hairpin *
mlx5e_hairpin_create(struct mlx5e_priv * priv,struct mlx5_hairpin_params * params,int peer_ifindex)858 mlx5e_hairpin_create(struct mlx5e_priv *priv, struct mlx5_hairpin_params *params,
859 		     int peer_ifindex)
860 {
861 	struct mlx5_core_dev *func_mdev, *peer_mdev;
862 	struct mlx5e_hairpin *hp;
863 	struct mlx5_hairpin *pair;
864 	int err;
865 
866 	hp = kzalloc(sizeof(*hp), GFP_KERNEL);
867 	if (!hp)
868 		return ERR_PTR(-ENOMEM);
869 
870 	func_mdev = priv->mdev;
871 	peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
872 	if (IS_ERR(peer_mdev)) {
873 		err = PTR_ERR(peer_mdev);
874 		goto create_pair_err;
875 	}
876 
877 	pair = mlx5_core_hairpin_create(func_mdev, peer_mdev, params);
878 	if (IS_ERR(pair)) {
879 		err = PTR_ERR(pair);
880 		goto create_pair_err;
881 	}
882 	hp->pair = pair;
883 	hp->func_mdev = func_mdev;
884 	hp->func_priv = priv;
885 	hp->num_channels = params->num_channels;
886 
887 	err = mlx5e_hairpin_create_transport(hp);
888 	if (err)
889 		goto create_transport_err;
890 
891 	if (hp->num_channels > 1) {
892 		err = mlx5e_hairpin_rss_init(hp);
893 		if (err)
894 			goto rss_init_err;
895 	}
896 
897 	return hp;
898 
899 rss_init_err:
900 	mlx5e_hairpin_destroy_transport(hp);
901 create_transport_err:
902 	mlx5_core_hairpin_destroy(hp->pair);
903 create_pair_err:
904 	kfree(hp);
905 	return ERR_PTR(err);
906 }
907 
mlx5e_hairpin_destroy(struct mlx5e_hairpin * hp)908 static void mlx5e_hairpin_destroy(struct mlx5e_hairpin *hp)
909 {
910 	if (hp->num_channels > 1)
911 		mlx5e_hairpin_rss_cleanup(hp);
912 	mlx5e_hairpin_destroy_transport(hp);
913 	mlx5_core_hairpin_destroy(hp->pair);
914 	kvfree(hp);
915 }
916 
hash_hairpin_info(u16 peer_vhca_id,u8 prio)917 static inline u32 hash_hairpin_info(u16 peer_vhca_id, u8 prio)
918 {
919 	return (peer_vhca_id << 16 | prio);
920 }
921 
mlx5e_hairpin_get(struct mlx5e_priv * priv,u16 peer_vhca_id,u8 prio)922 static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv,
923 						     u16 peer_vhca_id, u8 prio)
924 {
925 	struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
926 	struct mlx5e_hairpin_entry *hpe;
927 	u32 hash_key = hash_hairpin_info(peer_vhca_id, prio);
928 
929 	hash_for_each_possible(tc->hairpin_tbl, hpe,
930 			       hairpin_hlist, hash_key) {
931 		if (hpe->peer_vhca_id == peer_vhca_id && hpe->prio == prio) {
932 			refcount_inc(&hpe->refcnt);
933 			return hpe;
934 		}
935 	}
936 
937 	return NULL;
938 }
939 
mlx5e_hairpin_put(struct mlx5e_priv * priv,struct mlx5e_hairpin_entry * hpe)940 static void mlx5e_hairpin_put(struct mlx5e_priv *priv,
941 			      struct mlx5e_hairpin_entry *hpe)
942 {
943 	struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
944 	/* no more hairpin flows for us, release the hairpin pair */
945 	if (!refcount_dec_and_mutex_lock(&hpe->refcnt, &tc->hairpin_tbl_lock))
946 		return;
947 	hash_del(&hpe->hairpin_hlist);
948 	mutex_unlock(&tc->hairpin_tbl_lock);
949 
950 	if (!IS_ERR_OR_NULL(hpe->hp)) {
951 		netdev_dbg(priv->netdev, "del hairpin: peer %s\n",
952 			   dev_name(hpe->hp->pair->peer_mdev->device));
953 
954 		mlx5e_hairpin_destroy(hpe->hp);
955 	}
956 
957 	WARN_ON(!list_empty(&hpe->flows));
958 	kfree(hpe);
959 }
960 
961 #define UNKNOWN_MATCH_PRIO 8
962 
mlx5e_hairpin_get_prio(struct mlx5e_priv * priv,struct mlx5_flow_spec * spec,u8 * match_prio,struct netlink_ext_ack * extack)963 static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv,
964 				  struct mlx5_flow_spec *spec, u8 *match_prio,
965 				  struct netlink_ext_ack *extack)
966 {
967 	void *headers_c, *headers_v;
968 	u8 prio_val, prio_mask = 0;
969 	bool vlan_present;
970 
971 #ifdef CONFIG_MLX5_CORE_EN_DCB
972 	if (priv->dcbx_dp.trust_state != MLX5_QPTS_TRUST_PCP) {
973 		NL_SET_ERR_MSG_MOD(extack,
974 				   "only PCP trust state supported for hairpin");
975 		return -EOPNOTSUPP;
976 	}
977 #endif
978 	headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers);
979 	headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
980 
981 	vlan_present = MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag);
982 	if (vlan_present) {
983 		prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio);
984 		prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio);
985 	}
986 
987 	if (!vlan_present || !prio_mask) {
988 		prio_val = UNKNOWN_MATCH_PRIO;
989 	} else if (prio_mask != 0x7) {
990 		NL_SET_ERR_MSG_MOD(extack,
991 				   "masked priority match not supported for hairpin");
992 		return -EOPNOTSUPP;
993 	}
994 
995 	*match_prio = prio_val;
996 	return 0;
997 }
998 
mlx5e_hairpin_flow_add(struct mlx5e_priv * priv,struct mlx5e_tc_flow * flow,struct mlx5e_tc_flow_parse_attr * parse_attr,struct netlink_ext_ack * extack)999 static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
1000 				  struct mlx5e_tc_flow *flow,
1001 				  struct mlx5e_tc_flow_parse_attr *parse_attr,
1002 				  struct netlink_ext_ack *extack)
1003 {
1004 	struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
1005 	int peer_ifindex = parse_attr->mirred_ifindex[0];
1006 	struct mlx5_hairpin_params params;
1007 	struct mlx5_core_dev *peer_mdev;
1008 	struct mlx5e_hairpin_entry *hpe;
1009 	struct mlx5e_hairpin *hp;
1010 	u64 link_speed64;
1011 	u32 link_speed;
1012 	u8 match_prio;
1013 	u16 peer_id;
1014 	int err;
1015 
1016 	peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
1017 	if (IS_ERR(peer_mdev)) {
1018 		NL_SET_ERR_MSG_MOD(extack, "invalid ifindex of mirred device");
1019 		return PTR_ERR(peer_mdev);
1020 	}
1021 
1022 	if (!MLX5_CAP_GEN(priv->mdev, hairpin) || !MLX5_CAP_GEN(peer_mdev, hairpin)) {
1023 		NL_SET_ERR_MSG_MOD(extack, "hairpin is not supported");
1024 		return -EOPNOTSUPP;
1025 	}
1026 
1027 	peer_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
1028 	err = mlx5e_hairpin_get_prio(priv, &parse_attr->spec, &match_prio,
1029 				     extack);
1030 	if (err)
1031 		return err;
1032 
1033 	mutex_lock(&tc->hairpin_tbl_lock);
1034 	hpe = mlx5e_hairpin_get(priv, peer_id, match_prio);
1035 	if (hpe) {
1036 		mutex_unlock(&tc->hairpin_tbl_lock);
1037 		wait_for_completion(&hpe->res_ready);
1038 
1039 		if (IS_ERR(hpe->hp)) {
1040 			err = -EREMOTEIO;
1041 			goto out_err;
1042 		}
1043 		goto attach_flow;
1044 	}
1045 
1046 	hpe = kzalloc(sizeof(*hpe), GFP_KERNEL);
1047 	if (!hpe) {
1048 		mutex_unlock(&tc->hairpin_tbl_lock);
1049 		return -ENOMEM;
1050 	}
1051 
1052 	spin_lock_init(&hpe->flows_lock);
1053 	INIT_LIST_HEAD(&hpe->flows);
1054 	INIT_LIST_HEAD(&hpe->dead_peer_wait_list);
1055 	hpe->peer_vhca_id = peer_id;
1056 	hpe->prio = match_prio;
1057 	refcount_set(&hpe->refcnt, 1);
1058 	init_completion(&hpe->res_ready);
1059 
1060 	hash_add(tc->hairpin_tbl, &hpe->hairpin_hlist,
1061 		 hash_hairpin_info(peer_id, match_prio));
1062 	mutex_unlock(&tc->hairpin_tbl_lock);
1063 
1064 	params.log_data_size = 16;
1065 	params.log_data_size = min_t(u8, params.log_data_size,
1066 				     MLX5_CAP_GEN(priv->mdev, log_max_hairpin_wq_data_sz));
1067 	params.log_data_size = max_t(u8, params.log_data_size,
1068 				     MLX5_CAP_GEN(priv->mdev, log_min_hairpin_wq_data_sz));
1069 
1070 	params.log_num_packets = params.log_data_size -
1071 				 MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(priv->mdev);
1072 	params.log_num_packets = min_t(u8, params.log_num_packets,
1073 				       MLX5_CAP_GEN(priv->mdev, log_max_hairpin_num_packets));
1074 
1075 	params.q_counter = priv->q_counter;
1076 	/* set hairpin pair per each 50Gbs share of the link */
1077 	mlx5e_port_max_linkspeed(priv->mdev, &link_speed);
1078 	link_speed = max_t(u32, link_speed, 50000);
1079 	link_speed64 = link_speed;
1080 	do_div(link_speed64, 50000);
1081 	params.num_channels = link_speed64;
1082 
1083 	hp = mlx5e_hairpin_create(priv, &params, peer_ifindex);
1084 	hpe->hp = hp;
1085 	complete_all(&hpe->res_ready);
1086 	if (IS_ERR(hp)) {
1087 		err = PTR_ERR(hp);
1088 		goto out_err;
1089 	}
1090 
1091 	netdev_dbg(priv->netdev, "add hairpin: tirn %x rqn %x peer %s sqn %x prio %d (log) data %d packets %d\n",
1092 		   mlx5e_tir_get_tirn(&hp->direct_tir), hp->pair->rqn[0],
1093 		   dev_name(hp->pair->peer_mdev->device),
1094 		   hp->pair->sqn[0], match_prio, params.log_data_size, params.log_num_packets);
1095 
1096 attach_flow:
1097 	if (hpe->hp->num_channels > 1) {
1098 		flow_flag_set(flow, HAIRPIN_RSS);
1099 		flow->attr->nic_attr->hairpin_ft =
1100 			mlx5_get_ttc_flow_table(hpe->hp->ttc);
1101 	} else {
1102 		flow->attr->nic_attr->hairpin_tirn = mlx5e_tir_get_tirn(&hpe->hp->direct_tir);
1103 	}
1104 
1105 	flow->hpe = hpe;
1106 	spin_lock(&hpe->flows_lock);
1107 	list_add(&flow->hairpin, &hpe->flows);
1108 	spin_unlock(&hpe->flows_lock);
1109 
1110 	return 0;
1111 
1112 out_err:
1113 	mlx5e_hairpin_put(priv, hpe);
1114 	return err;
1115 }
1116 
mlx5e_hairpin_flow_del(struct mlx5e_priv * priv,struct mlx5e_tc_flow * flow)1117 static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv,
1118 				   struct mlx5e_tc_flow *flow)
1119 {
1120 	/* flow wasn't fully initialized */
1121 	if (!flow->hpe)
1122 		return;
1123 
1124 	spin_lock(&flow->hpe->flows_lock);
1125 	list_del(&flow->hairpin);
1126 	spin_unlock(&flow->hpe->flows_lock);
1127 
1128 	mlx5e_hairpin_put(priv, flow->hpe);
1129 	flow->hpe = NULL;
1130 }
1131 
1132 struct mlx5_flow_handle *
mlx5e_add_offloaded_nic_rule(struct mlx5e_priv * priv,struct mlx5_flow_spec * spec,struct mlx5_flow_attr * attr)1133 mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
1134 			     struct mlx5_flow_spec *spec,
1135 			     struct mlx5_flow_attr *attr)
1136 {
1137 	struct mlx5_flow_context *flow_context = &spec->flow_context;
1138 	struct mlx5e_vlan_table *vlan = mlx5e_fs_get_vlan(priv->fs);
1139 	struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
1140 	struct mlx5_nic_flow_attr *nic_attr = attr->nic_attr;
1141 	struct mlx5_flow_destination dest[2] = {};
1142 	struct mlx5_fs_chains *nic_chains;
1143 	struct mlx5_flow_act flow_act = {
1144 		.action = attr->action,
1145 		.flags    = FLOW_ACT_NO_APPEND,
1146 	};
1147 	struct mlx5_flow_handle *rule;
1148 	struct mlx5_flow_table *ft;
1149 	int dest_ix = 0;
1150 
1151 	nic_chains = mlx5e_nic_chains(tc);
1152 	flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
1153 	flow_context->flow_tag = nic_attr->flow_tag;
1154 
1155 	if (attr->dest_ft) {
1156 		dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1157 		dest[dest_ix].ft = attr->dest_ft;
1158 		dest_ix++;
1159 	} else if (nic_attr->hairpin_ft) {
1160 		dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1161 		dest[dest_ix].ft = nic_attr->hairpin_ft;
1162 		dest_ix++;
1163 	} else if (nic_attr->hairpin_tirn) {
1164 		dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
1165 		dest[dest_ix].tir_num = nic_attr->hairpin_tirn;
1166 		dest_ix++;
1167 	} else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
1168 		dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1169 		if (attr->dest_chain) {
1170 			dest[dest_ix].ft = mlx5_chains_get_table(nic_chains,
1171 								 attr->dest_chain, 1,
1172 								 MLX5E_TC_FT_LEVEL);
1173 			if (IS_ERR(dest[dest_ix].ft))
1174 				return ERR_CAST(dest[dest_ix].ft);
1175 		} else {
1176 			dest[dest_ix].ft = mlx5e_vlan_get_flowtable(vlan);
1177 		}
1178 		dest_ix++;
1179 	}
1180 
1181 	if (dest[0].type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
1182 	    MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level))
1183 		flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
1184 
1185 	if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
1186 		dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1187 		dest[dest_ix].counter_id = mlx5_fc_id(attr->counter);
1188 		dest_ix++;
1189 	}
1190 
1191 	if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
1192 		flow_act.modify_hdr = attr->modify_hdr;
1193 
1194 	mutex_lock(&tc->t_lock);
1195 	if (IS_ERR_OR_NULL(tc->t)) {
1196 		/* Create the root table here if doesn't exist yet */
1197 		tc->t =
1198 			mlx5_chains_get_table(nic_chains, 0, 1, MLX5E_TC_FT_LEVEL);
1199 
1200 		if (IS_ERR(tc->t)) {
1201 			mutex_unlock(&tc->t_lock);
1202 			netdev_err(priv->netdev,
1203 				   "Failed to create tc offload table\n");
1204 			rule = ERR_CAST(tc->t);
1205 			goto err_ft_get;
1206 		}
1207 	}
1208 	mutex_unlock(&tc->t_lock);
1209 
1210 	if (attr->chain || attr->prio)
1211 		ft = mlx5_chains_get_table(nic_chains,
1212 					   attr->chain, attr->prio,
1213 					   MLX5E_TC_FT_LEVEL);
1214 	else
1215 		ft = attr->ft;
1216 
1217 	if (IS_ERR(ft)) {
1218 		rule = ERR_CAST(ft);
1219 		goto err_ft_get;
1220 	}
1221 
1222 	if (attr->outer_match_level != MLX5_MATCH_NONE)
1223 		spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
1224 
1225 	rule = mlx5_add_flow_rules(ft, spec,
1226 				   &flow_act, dest, dest_ix);
1227 	if (IS_ERR(rule))
1228 		goto err_rule;
1229 
1230 	return rule;
1231 
1232 err_rule:
1233 	if (attr->chain || attr->prio)
1234 		mlx5_chains_put_table(nic_chains,
1235 				      attr->chain, attr->prio,
1236 				      MLX5E_TC_FT_LEVEL);
1237 err_ft_get:
1238 	if (attr->dest_chain)
1239 		mlx5_chains_put_table(nic_chains,
1240 				      attr->dest_chain, 1,
1241 				      MLX5E_TC_FT_LEVEL);
1242 
1243 	return ERR_CAST(rule);
1244 }
1245 
1246 static int
alloc_flow_attr_counter(struct mlx5_core_dev * counter_dev,struct mlx5_flow_attr * attr)1247 alloc_flow_attr_counter(struct mlx5_core_dev *counter_dev,
1248 			struct mlx5_flow_attr *attr)
1249 
1250 {
1251 	struct mlx5_fc *counter;
1252 
1253 	counter = mlx5_fc_create(counter_dev, true);
1254 	if (IS_ERR(counter))
1255 		return PTR_ERR(counter);
1256 
1257 	attr->counter = counter;
1258 	return 0;
1259 }
1260 
1261 static int
mlx5e_tc_add_nic_flow(struct mlx5e_priv * priv,struct mlx5e_tc_flow * flow,struct netlink_ext_ack * extack)1262 mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
1263 		      struct mlx5e_tc_flow *flow,
1264 		      struct netlink_ext_ack *extack)
1265 {
1266 	struct mlx5e_tc_flow_parse_attr *parse_attr;
1267 	struct mlx5_flow_attr *attr = flow->attr;
1268 	struct mlx5_core_dev *dev = priv->mdev;
1269 	int err;
1270 
1271 	parse_attr = attr->parse_attr;
1272 
1273 	if (flow_flag_test(flow, HAIRPIN)) {
1274 		err = mlx5e_hairpin_flow_add(priv, flow, parse_attr, extack);
1275 		if (err)
1276 			return err;
1277 	}
1278 
1279 	if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
1280 		err = alloc_flow_attr_counter(dev, attr);
1281 		if (err)
1282 			return err;
1283 	}
1284 
1285 	if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
1286 		err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
1287 		if (err)
1288 			return err;
1289 	}
1290 
1291 	if (attr->flags & MLX5_ATTR_FLAG_CT)
1292 		flow->rule[0] = mlx5_tc_ct_flow_offload(get_ct_priv(priv), &parse_attr->spec,
1293 							attr, &parse_attr->mod_hdr_acts);
1294 	else
1295 		flow->rule[0] = mlx5e_add_offloaded_nic_rule(priv, &parse_attr->spec,
1296 							     attr);
1297 
1298 	return PTR_ERR_OR_ZERO(flow->rule[0]);
1299 }
1300 
mlx5e_del_offloaded_nic_rule(struct mlx5e_priv * priv,struct mlx5_flow_handle * rule,struct mlx5_flow_attr * attr)1301 void mlx5e_del_offloaded_nic_rule(struct mlx5e_priv *priv,
1302 				  struct mlx5_flow_handle *rule,
1303 				  struct mlx5_flow_attr *attr)
1304 {
1305 	struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
1306 	struct mlx5_fs_chains *nic_chains;
1307 
1308 	nic_chains = mlx5e_nic_chains(tc);
1309 	mlx5_del_flow_rules(rule);
1310 
1311 	if (attr->chain || attr->prio)
1312 		mlx5_chains_put_table(nic_chains, attr->chain, attr->prio,
1313 				      MLX5E_TC_FT_LEVEL);
1314 
1315 	if (attr->dest_chain)
1316 		mlx5_chains_put_table(nic_chains, attr->dest_chain, 1,
1317 				      MLX5E_TC_FT_LEVEL);
1318 }
1319 
mlx5e_tc_del_nic_flow(struct mlx5e_priv * priv,struct mlx5e_tc_flow * flow)1320 static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
1321 				  struct mlx5e_tc_flow *flow)
1322 {
1323 	struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
1324 	struct mlx5_flow_attr *attr = flow->attr;
1325 
1326 	flow_flag_clear(flow, OFFLOADED);
1327 
1328 	if (attr->flags & MLX5_ATTR_FLAG_CT)
1329 		mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), attr);
1330 	else if (!IS_ERR_OR_NULL(flow->rule[0]))
1331 		mlx5e_del_offloaded_nic_rule(priv, flow->rule[0], attr);
1332 
1333 	/* Remove root table if no rules are left to avoid
1334 	 * extra steering hops.
1335 	 */
1336 	mutex_lock(&tc->t_lock);
1337 	if (!mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD)) &&
1338 	    !IS_ERR_OR_NULL(tc->t)) {
1339 		mlx5_chains_put_table(mlx5e_nic_chains(tc), 0, 1, MLX5E_TC_FT_LEVEL);
1340 		tc->t = NULL;
1341 	}
1342 	mutex_unlock(&tc->t_lock);
1343 
1344 	if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
1345 		mlx5e_mod_hdr_dealloc(&attr->parse_attr->mod_hdr_acts);
1346 		mlx5e_detach_mod_hdr(priv, flow);
1347 	}
1348 
1349 	if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
1350 		mlx5_fc_destroy(priv->mdev, attr->counter);
1351 
1352 	if (flow_flag_test(flow, HAIRPIN))
1353 		mlx5e_hairpin_flow_del(priv, flow);
1354 
1355 	free_flow_post_acts(flow);
1356 
1357 	kvfree(attr->parse_attr);
1358 	kfree(flow->attr);
1359 }
1360 
1361 struct mlx5_flow_handle *
mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch * esw,struct mlx5e_tc_flow * flow,struct mlx5_flow_spec * spec,struct mlx5_flow_attr * attr)1362 mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
1363 			   struct mlx5e_tc_flow *flow,
1364 			   struct mlx5_flow_spec *spec,
1365 			   struct mlx5_flow_attr *attr)
1366 {
1367 	struct mlx5_flow_handle *rule;
1368 
1369 	if (attr->flags & MLX5_ATTR_FLAG_SLOW_PATH)
1370 		return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
1371 
1372 	rule = mlx5e_tc_rule_offload(flow->priv, spec, attr);
1373 
1374 	if (IS_ERR(rule))
1375 		return rule;
1376 
1377 	if (attr->esw_attr->split_count) {
1378 		flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, spec, attr);
1379 		if (IS_ERR(flow->rule[1]))
1380 			goto err_rule1;
1381 	}
1382 
1383 	return rule;
1384 
1385 err_rule1:
1386 	mlx5e_tc_rule_unoffload(flow->priv, rule, attr);
1387 	return flow->rule[1];
1388 }
1389 
mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch * esw,struct mlx5e_tc_flow * flow,struct mlx5_flow_attr * attr)1390 void mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
1391 				  struct mlx5e_tc_flow *flow,
1392 				  struct mlx5_flow_attr *attr)
1393 {
1394 	flow_flag_clear(flow, OFFLOADED);
1395 
1396 	if (attr->flags & MLX5_ATTR_FLAG_SLOW_PATH)
1397 		return mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
1398 
1399 	if (attr->esw_attr->split_count)
1400 		mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
1401 
1402 	mlx5e_tc_rule_unoffload(flow->priv, flow->rule[0], attr);
1403 }
1404 
1405 struct mlx5_flow_handle *
mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch * esw,struct mlx5e_tc_flow * flow,struct mlx5_flow_spec * spec)1406 mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
1407 			      struct mlx5e_tc_flow *flow,
1408 			      struct mlx5_flow_spec *spec)
1409 {
1410 	struct mlx5e_tc_mod_hdr_acts mod_acts = {};
1411 	struct mlx5e_mod_hdr_handle *mh = NULL;
1412 	struct mlx5_flow_attr *slow_attr;
1413 	struct mlx5_flow_handle *rule;
1414 	bool fwd_and_modify_cap;
1415 	u32 chain_mapping = 0;
1416 	int err;
1417 
1418 	slow_attr = mlx5_alloc_flow_attr(MLX5_FLOW_NAMESPACE_FDB);
1419 	if (!slow_attr)
1420 		return ERR_PTR(-ENOMEM);
1421 
1422 	memcpy(slow_attr, flow->attr, ESW_FLOW_ATTR_SZ);
1423 	slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1424 	slow_attr->esw_attr->split_count = 0;
1425 	slow_attr->flags |= MLX5_ATTR_FLAG_SLOW_PATH;
1426 
1427 	fwd_and_modify_cap = MLX5_CAP_ESW_FLOWTABLE((esw)->dev, fdb_modify_header_fwd_to_table);
1428 	if (!fwd_and_modify_cap)
1429 		goto skip_restore;
1430 
1431 	err = mlx5_chains_get_chain_mapping(esw_chains(esw), flow->attr->chain, &chain_mapping);
1432 	if (err)
1433 		goto err_get_chain;
1434 
1435 	err = mlx5e_tc_match_to_reg_set(esw->dev, &mod_acts, MLX5_FLOW_NAMESPACE_FDB,
1436 					CHAIN_TO_REG, chain_mapping);
1437 	if (err)
1438 		goto err_reg_set;
1439 
1440 	mh = mlx5e_mod_hdr_attach(esw->dev, get_mod_hdr_table(flow->priv, flow),
1441 				  MLX5_FLOW_NAMESPACE_FDB, &mod_acts);
1442 	if (IS_ERR(mh)) {
1443 		err = PTR_ERR(mh);
1444 		goto err_attach;
1445 	}
1446 
1447 	slow_attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1448 	slow_attr->modify_hdr = mlx5e_mod_hdr_get(mh);
1449 
1450 skip_restore:
1451 	rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr);
1452 	if (IS_ERR(rule)) {
1453 		err = PTR_ERR(rule);
1454 		goto err_offload;
1455 	}
1456 
1457 	flow->slow_mh = mh;
1458 	flow->chain_mapping = chain_mapping;
1459 	flow_flag_set(flow, SLOW);
1460 
1461 	mlx5e_mod_hdr_dealloc(&mod_acts);
1462 	kfree(slow_attr);
1463 
1464 	return rule;
1465 
1466 err_offload:
1467 	if (fwd_and_modify_cap)
1468 		mlx5e_mod_hdr_detach(esw->dev, get_mod_hdr_table(flow->priv, flow), mh);
1469 err_attach:
1470 err_reg_set:
1471 	if (fwd_and_modify_cap)
1472 		mlx5_chains_put_chain_mapping(esw_chains(esw), chain_mapping);
1473 err_get_chain:
1474 	mlx5e_mod_hdr_dealloc(&mod_acts);
1475 	kfree(slow_attr);
1476 	return ERR_PTR(err);
1477 }
1478 
mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch * esw,struct mlx5e_tc_flow * flow)1479 void mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
1480 				       struct mlx5e_tc_flow *flow)
1481 {
1482 	struct mlx5_flow_attr *slow_attr;
1483 
1484 	slow_attr = mlx5_alloc_flow_attr(MLX5_FLOW_NAMESPACE_FDB);
1485 	if (!slow_attr) {
1486 		mlx5_core_warn(flow->priv->mdev, "Unable to alloc attr to unoffload slow path rule\n");
1487 		return;
1488 	}
1489 
1490 	memcpy(slow_attr, flow->attr, ESW_FLOW_ATTR_SZ);
1491 	slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1492 	slow_attr->esw_attr->split_count = 0;
1493 	slow_attr->flags |= MLX5_ATTR_FLAG_SLOW_PATH;
1494 	if (flow->slow_mh) {
1495 		slow_attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1496 		slow_attr->modify_hdr = mlx5e_mod_hdr_get(flow->slow_mh);
1497 	}
1498 	mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr);
1499 	if (flow->slow_mh) {
1500 		mlx5e_mod_hdr_detach(esw->dev, get_mod_hdr_table(flow->priv, flow), flow->slow_mh);
1501 		mlx5_chains_put_chain_mapping(esw_chains(esw), flow->chain_mapping);
1502 		flow->chain_mapping = 0;
1503 		flow->slow_mh = NULL;
1504 	}
1505 	flow_flag_clear(flow, SLOW);
1506 	kfree(slow_attr);
1507 }
1508 
1509 /* Caller must obtain uplink_priv->unready_flows_lock mutex before calling this
1510  * function.
1511  */
unready_flow_add(struct mlx5e_tc_flow * flow,struct list_head * unready_flows)1512 static void unready_flow_add(struct mlx5e_tc_flow *flow,
1513 			     struct list_head *unready_flows)
1514 {
1515 	flow_flag_set(flow, NOT_READY);
1516 	list_add_tail(&flow->unready, unready_flows);
1517 }
1518 
1519 /* Caller must obtain uplink_priv->unready_flows_lock mutex before calling this
1520  * function.
1521  */
unready_flow_del(struct mlx5e_tc_flow * flow)1522 static void unready_flow_del(struct mlx5e_tc_flow *flow)
1523 {
1524 	list_del(&flow->unready);
1525 	flow_flag_clear(flow, NOT_READY);
1526 }
1527 
add_unready_flow(struct mlx5e_tc_flow * flow)1528 static void add_unready_flow(struct mlx5e_tc_flow *flow)
1529 {
1530 	struct mlx5_rep_uplink_priv *uplink_priv;
1531 	struct mlx5e_rep_priv *rpriv;
1532 	struct mlx5_eswitch *esw;
1533 
1534 	esw = flow->priv->mdev->priv.eswitch;
1535 	rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1536 	uplink_priv = &rpriv->uplink_priv;
1537 
1538 	mutex_lock(&uplink_priv->unready_flows_lock);
1539 	unready_flow_add(flow, &uplink_priv->unready_flows);
1540 	mutex_unlock(&uplink_priv->unready_flows_lock);
1541 }
1542 
remove_unready_flow(struct mlx5e_tc_flow * flow)1543 static void remove_unready_flow(struct mlx5e_tc_flow *flow)
1544 {
1545 	struct mlx5_rep_uplink_priv *uplink_priv;
1546 	struct mlx5e_rep_priv *rpriv;
1547 	struct mlx5_eswitch *esw;
1548 
1549 	esw = flow->priv->mdev->priv.eswitch;
1550 	rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1551 	uplink_priv = &rpriv->uplink_priv;
1552 
1553 	mutex_lock(&uplink_priv->unready_flows_lock);
1554 	unready_flow_del(flow);
1555 	mutex_unlock(&uplink_priv->unready_flows_lock);
1556 }
1557 
mlx5e_tc_is_vf_tunnel(struct net_device * out_dev,struct net_device * route_dev)1558 bool mlx5e_tc_is_vf_tunnel(struct net_device *out_dev, struct net_device *route_dev)
1559 {
1560 	struct mlx5_core_dev *out_mdev, *route_mdev;
1561 	struct mlx5e_priv *out_priv, *route_priv;
1562 
1563 	out_priv = netdev_priv(out_dev);
1564 	out_mdev = out_priv->mdev;
1565 	route_priv = netdev_priv(route_dev);
1566 	route_mdev = route_priv->mdev;
1567 
1568 	if (out_mdev->coredev_type != MLX5_COREDEV_PF)
1569 		return false;
1570 
1571 	if (route_mdev->coredev_type != MLX5_COREDEV_VF &&
1572 	    route_mdev->coredev_type != MLX5_COREDEV_SF)
1573 		return false;
1574 
1575 	return mlx5e_same_hw_devs(out_priv, route_priv);
1576 }
1577 
mlx5e_tc_query_route_vport(struct net_device * out_dev,struct net_device * route_dev,u16 * vport)1578 int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *route_dev, u16 *vport)
1579 {
1580 	struct mlx5e_priv *out_priv, *route_priv;
1581 	struct mlx5_devcom *devcom = NULL;
1582 	struct mlx5_core_dev *route_mdev;
1583 	struct mlx5_eswitch *esw;
1584 	u16 vhca_id;
1585 	int err;
1586 
1587 	out_priv = netdev_priv(out_dev);
1588 	esw = out_priv->mdev->priv.eswitch;
1589 	route_priv = netdev_priv(route_dev);
1590 	route_mdev = route_priv->mdev;
1591 
1592 	vhca_id = MLX5_CAP_GEN(route_mdev, vhca_id);
1593 	if (mlx5_lag_is_active(out_priv->mdev)) {
1594 		/* In lag case we may get devices from different eswitch instances.
1595 		 * If we failed to get vport num, it means, mostly, that we on the wrong
1596 		 * eswitch.
1597 		 */
1598 		err = mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport);
1599 		if (err != -ENOENT)
1600 			return err;
1601 
1602 		devcom = out_priv->mdev->priv.devcom;
1603 		esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1604 		if (!esw)
1605 			return -ENODEV;
1606 	}
1607 
1608 	err = mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport);
1609 	if (devcom)
1610 		mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1611 	return err;
1612 }
1613 
mlx5e_tc_add_flow_mod_hdr(struct mlx5e_priv * priv,struct mlx5e_tc_flow * flow,struct mlx5_flow_attr * attr)1614 int mlx5e_tc_add_flow_mod_hdr(struct mlx5e_priv *priv,
1615 			      struct mlx5e_tc_flow *flow,
1616 			      struct mlx5_flow_attr *attr)
1617 {
1618 	struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts = &attr->parse_attr->mod_hdr_acts;
1619 	struct mlx5_modify_hdr *mod_hdr;
1620 
1621 	mod_hdr = mlx5_modify_header_alloc(priv->mdev,
1622 					   mlx5e_get_flow_namespace(flow),
1623 					   mod_hdr_acts->num_actions,
1624 					   mod_hdr_acts->actions);
1625 	if (IS_ERR(mod_hdr))
1626 		return PTR_ERR(mod_hdr);
1627 
1628 	WARN_ON(attr->modify_hdr);
1629 	attr->modify_hdr = mod_hdr;
1630 
1631 	return 0;
1632 }
1633 
1634 static int
set_encap_dests(struct mlx5e_priv * priv,struct mlx5e_tc_flow * flow,struct mlx5_flow_attr * attr,struct netlink_ext_ack * extack,bool * vf_tun)1635 set_encap_dests(struct mlx5e_priv *priv,
1636 		struct mlx5e_tc_flow *flow,
1637 		struct mlx5_flow_attr *attr,
1638 		struct netlink_ext_ack *extack,
1639 		bool *vf_tun)
1640 {
1641 	struct mlx5e_tc_flow_parse_attr *parse_attr;
1642 	struct mlx5_esw_flow_attr *esw_attr;
1643 	struct net_device *encap_dev = NULL;
1644 	struct mlx5e_rep_priv *rpriv;
1645 	struct mlx5e_priv *out_priv;
1646 	int out_index;
1647 	int err = 0;
1648 
1649 	if (!mlx5e_is_eswitch_flow(flow))
1650 		return 0;
1651 
1652 	parse_attr = attr->parse_attr;
1653 	esw_attr = attr->esw_attr;
1654 	*vf_tun = false;
1655 
1656 	for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
1657 		struct net_device *out_dev;
1658 		int mirred_ifindex;
1659 
1660 		if (!(esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP))
1661 			continue;
1662 
1663 		mirred_ifindex = parse_attr->mirred_ifindex[out_index];
1664 		out_dev = dev_get_by_index(dev_net(priv->netdev), mirred_ifindex);
1665 		if (!out_dev) {
1666 			NL_SET_ERR_MSG_MOD(extack, "Requested mirred device not found");
1667 			err = -ENODEV;
1668 			goto out;
1669 		}
1670 		err = mlx5e_attach_encap(priv, flow, attr, out_dev, out_index,
1671 					 extack, &encap_dev);
1672 		dev_put(out_dev);
1673 		if (err)
1674 			goto out;
1675 
1676 		if (esw_attr->dests[out_index].flags &
1677 		    MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE &&
1678 		    !esw_attr->dest_int_port)
1679 			*vf_tun = true;
1680 
1681 		out_priv = netdev_priv(encap_dev);
1682 		rpriv = out_priv->ppriv;
1683 		esw_attr->dests[out_index].rep = rpriv->rep;
1684 		esw_attr->dests[out_index].mdev = out_priv->mdev;
1685 	}
1686 
1687 	if (*vf_tun && esw_attr->out_count > 1) {
1688 		NL_SET_ERR_MSG_MOD(extack, "VF tunnel encap with mirroring is not supported");
1689 		err = -EOPNOTSUPP;
1690 		goto out;
1691 	}
1692 
1693 out:
1694 	return err;
1695 }
1696 
1697 static void
clean_encap_dests(struct mlx5e_priv * priv,struct mlx5e_tc_flow * flow,struct mlx5_flow_attr * attr,bool * vf_tun)1698 clean_encap_dests(struct mlx5e_priv *priv,
1699 		  struct mlx5e_tc_flow *flow,
1700 		  struct mlx5_flow_attr *attr,
1701 		  bool *vf_tun)
1702 {
1703 	struct mlx5_esw_flow_attr *esw_attr;
1704 	int out_index;
1705 
1706 	if (!mlx5e_is_eswitch_flow(flow))
1707 		return;
1708 
1709 	esw_attr = attr->esw_attr;
1710 	*vf_tun = false;
1711 
1712 	for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
1713 		if (!(esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP))
1714 			continue;
1715 
1716 		if (esw_attr->dests[out_index].flags &
1717 		    MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE &&
1718 		    !esw_attr->dest_int_port)
1719 			*vf_tun = true;
1720 
1721 		mlx5e_detach_encap(priv, flow, attr, out_index);
1722 		kfree(attr->parse_attr->tun_info[out_index]);
1723 	}
1724 }
1725 
1726 static int
mlx5e_tc_add_fdb_flow(struct mlx5e_priv * priv,struct mlx5e_tc_flow * flow,struct netlink_ext_ack * extack)1727 mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
1728 		      struct mlx5e_tc_flow *flow,
1729 		      struct netlink_ext_ack *extack)
1730 {
1731 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1732 	struct mlx5e_tc_flow_parse_attr *parse_attr;
1733 	struct mlx5_flow_attr *attr = flow->attr;
1734 	struct mlx5_esw_flow_attr *esw_attr;
1735 	u32 max_prio, max_chain;
1736 	bool vf_tun;
1737 	int err = 0;
1738 
1739 	parse_attr = attr->parse_attr;
1740 	esw_attr = attr->esw_attr;
1741 
1742 	/* We check chain range only for tc flows.
1743 	 * For ft flows, we checked attr->chain was originally 0 and set it to
1744 	 * FDB_FT_CHAIN which is outside tc range.
1745 	 * See mlx5e_rep_setup_ft_cb().
1746 	 */
1747 	max_chain = mlx5_chains_get_chain_range(esw_chains(esw));
1748 	if (!mlx5e_is_ft_flow(flow) && attr->chain > max_chain) {
1749 		NL_SET_ERR_MSG_MOD(extack,
1750 				   "Requested chain is out of supported range");
1751 		err = -EOPNOTSUPP;
1752 		goto err_out;
1753 	}
1754 
1755 	max_prio = mlx5_chains_get_prio_range(esw_chains(esw));
1756 	if (attr->prio > max_prio) {
1757 		NL_SET_ERR_MSG_MOD(extack,
1758 				   "Requested priority is out of supported range");
1759 		err = -EOPNOTSUPP;
1760 		goto err_out;
1761 	}
1762 
1763 	if (flow_flag_test(flow, TUN_RX)) {
1764 		err = mlx5e_attach_decap_route(priv, flow);
1765 		if (err)
1766 			goto err_out;
1767 
1768 		if (!attr->chain && esw_attr->int_port &&
1769 		    attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
1770 			/* If decap route device is internal port, change the
1771 			 * source vport value in reg_c0 back to uplink just in
1772 			 * case the rule performs goto chain > 0. If we have a miss
1773 			 * on chain > 0 we want the metadata regs to hold the
1774 			 * chain id so SW will resume handling of this packet
1775 			 * from the proper chain.
1776 			 */
1777 			u32 metadata = mlx5_eswitch_get_vport_metadata_for_set(esw,
1778 									esw_attr->in_rep->vport);
1779 
1780 			err = mlx5e_tc_match_to_reg_set(priv->mdev, &parse_attr->mod_hdr_acts,
1781 							MLX5_FLOW_NAMESPACE_FDB, VPORT_TO_REG,
1782 							metadata);
1783 			if (err)
1784 				goto err_out;
1785 
1786 			attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1787 		}
1788 	}
1789 
1790 	if (flow_flag_test(flow, L3_TO_L2_DECAP)) {
1791 		err = mlx5e_attach_decap(priv, flow, extack);
1792 		if (err)
1793 			goto err_out;
1794 	}
1795 
1796 	if (netif_is_ovs_master(parse_attr->filter_dev)) {
1797 		struct mlx5e_tc_int_port *int_port;
1798 
1799 		if (attr->chain) {
1800 			NL_SET_ERR_MSG_MOD(extack,
1801 					   "Internal port rule is only supported on chain 0");
1802 			err = -EOPNOTSUPP;
1803 			goto err_out;
1804 		}
1805 
1806 		if (attr->dest_chain) {
1807 			NL_SET_ERR_MSG_MOD(extack,
1808 					   "Internal port rule offload doesn't support goto action");
1809 			err = -EOPNOTSUPP;
1810 			goto err_out;
1811 		}
1812 
1813 		int_port = mlx5e_tc_int_port_get(mlx5e_get_int_port_priv(priv),
1814 						 parse_attr->filter_dev->ifindex,
1815 						 flow_flag_test(flow, EGRESS) ?
1816 						 MLX5E_TC_INT_PORT_EGRESS :
1817 						 MLX5E_TC_INT_PORT_INGRESS);
1818 		if (IS_ERR(int_port)) {
1819 			err = PTR_ERR(int_port);
1820 			goto err_out;
1821 		}
1822 
1823 		esw_attr->int_port = int_port;
1824 	}
1825 
1826 	err = set_encap_dests(priv, flow, attr, extack, &vf_tun);
1827 	if (err)
1828 		goto err_out;
1829 
1830 	err = mlx5_eswitch_add_vlan_action(esw, attr);
1831 	if (err)
1832 		goto err_out;
1833 
1834 	if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
1835 		if (vf_tun) {
1836 			err = mlx5e_tc_add_flow_mod_hdr(priv, flow, attr);
1837 			if (err)
1838 				goto err_out;
1839 		} else {
1840 			err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
1841 			if (err)
1842 				goto err_out;
1843 		}
1844 	}
1845 
1846 	if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
1847 		err = alloc_flow_attr_counter(esw_attr->counter_dev, attr);
1848 		if (err)
1849 			goto err_out;
1850 	}
1851 
1852 	/* we get here if one of the following takes place:
1853 	 * (1) there's no error
1854 	 * (2) there's an encap action and we don't have valid neigh
1855 	 */
1856 	if (flow_flag_test(flow, SLOW))
1857 		flow->rule[0] = mlx5e_tc_offload_to_slow_path(esw, flow, &parse_attr->spec);
1858 	else
1859 		flow->rule[0] = mlx5e_tc_offload_fdb_rules(esw, flow, &parse_attr->spec, attr);
1860 
1861 	if (IS_ERR(flow->rule[0])) {
1862 		err = PTR_ERR(flow->rule[0]);
1863 		goto err_out;
1864 	}
1865 	flow_flag_set(flow, OFFLOADED);
1866 
1867 	return 0;
1868 
1869 err_out:
1870 	flow_flag_set(flow, FAILED);
1871 	return err;
1872 }
1873 
mlx5_flow_has_geneve_opt(struct mlx5e_tc_flow * flow)1874 static bool mlx5_flow_has_geneve_opt(struct mlx5e_tc_flow *flow)
1875 {
1876 	struct mlx5_flow_spec *spec = &flow->attr->parse_attr->spec;
1877 	void *headers_v = MLX5_ADDR_OF(fte_match_param,
1878 				       spec->match_value,
1879 				       misc_parameters_3);
1880 	u32 geneve_tlv_opt_0_data = MLX5_GET(fte_match_set_misc3,
1881 					     headers_v,
1882 					     geneve_tlv_option_0_data);
1883 
1884 	return !!geneve_tlv_opt_0_data;
1885 }
1886 
mlx5e_tc_del_fdb_flow(struct mlx5e_priv * priv,struct mlx5e_tc_flow * flow)1887 static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
1888 				  struct mlx5e_tc_flow *flow)
1889 {
1890 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1891 	struct mlx5_flow_attr *attr = flow->attr;
1892 	struct mlx5_esw_flow_attr *esw_attr;
1893 	bool vf_tun;
1894 
1895 	esw_attr = attr->esw_attr;
1896 	mlx5e_put_flow_tunnel_id(flow);
1897 
1898 	if (flow_flag_test(flow, NOT_READY))
1899 		remove_unready_flow(flow);
1900 
1901 	if (mlx5e_is_offloaded_flow(flow)) {
1902 		if (flow_flag_test(flow, SLOW))
1903 			mlx5e_tc_unoffload_from_slow_path(esw, flow);
1904 		else
1905 			mlx5e_tc_unoffload_fdb_rules(esw, flow, attr);
1906 	}
1907 	complete_all(&flow->del_hw_done);
1908 
1909 	if (mlx5_flow_has_geneve_opt(flow))
1910 		mlx5_geneve_tlv_option_del(priv->mdev->geneve);
1911 
1912 	mlx5_eswitch_del_vlan_action(esw, attr);
1913 
1914 	if (flow->decap_route)
1915 		mlx5e_detach_decap_route(priv, flow);
1916 
1917 	clean_encap_dests(priv, flow, attr, &vf_tun);
1918 
1919 	mlx5_tc_ct_match_del(get_ct_priv(priv), &flow->attr->ct_attr);
1920 
1921 	if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
1922 		mlx5e_mod_hdr_dealloc(&attr->parse_attr->mod_hdr_acts);
1923 		if (vf_tun && attr->modify_hdr)
1924 			mlx5_modify_header_dealloc(priv->mdev, attr->modify_hdr);
1925 		else
1926 			mlx5e_detach_mod_hdr(priv, flow);
1927 	}
1928 
1929 	if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
1930 		mlx5_fc_destroy(esw_attr->counter_dev, attr->counter);
1931 
1932 	if (esw_attr->int_port)
1933 		mlx5e_tc_int_port_put(mlx5e_get_int_port_priv(priv), esw_attr->int_port);
1934 
1935 	if (esw_attr->dest_int_port)
1936 		mlx5e_tc_int_port_put(mlx5e_get_int_port_priv(priv), esw_attr->dest_int_port);
1937 
1938 	if (flow_flag_test(flow, L3_TO_L2_DECAP))
1939 		mlx5e_detach_decap(priv, flow);
1940 
1941 	free_flow_post_acts(flow);
1942 
1943 	if (flow->attr->lag.count)
1944 		mlx5_lag_del_mpesw_rule(esw->dev);
1945 
1946 	kvfree(attr->esw_attr->rx_tun_attr);
1947 	kvfree(attr->parse_attr);
1948 	kfree(flow->attr);
1949 }
1950 
mlx5e_tc_get_counter(struct mlx5e_tc_flow * flow)1951 struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow)
1952 {
1953 	struct mlx5_flow_attr *attr;
1954 
1955 	attr = list_first_entry(&flow->attrs, struct mlx5_flow_attr, list);
1956 	return attr->counter;
1957 }
1958 
1959 /* Iterate over tmp_list of flows attached to flow_list head. */
mlx5e_put_flow_list(struct mlx5e_priv * priv,struct list_head * flow_list)1960 void mlx5e_put_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list)
1961 {
1962 	struct mlx5e_tc_flow *flow, *tmp;
1963 
1964 	list_for_each_entry_safe(flow, tmp, flow_list, tmp_list)
1965 		mlx5e_flow_put(priv, flow);
1966 }
1967 
__mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow * flow)1968 static void __mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
1969 {
1970 	struct mlx5_eswitch *esw = flow->priv->mdev->priv.eswitch;
1971 
1972 	if (!flow_flag_test(flow, ESWITCH) ||
1973 	    !flow_flag_test(flow, DUP))
1974 		return;
1975 
1976 	mutex_lock(&esw->offloads.peer_mutex);
1977 	list_del(&flow->peer);
1978 	mutex_unlock(&esw->offloads.peer_mutex);
1979 
1980 	flow_flag_clear(flow, DUP);
1981 
1982 	if (refcount_dec_and_test(&flow->peer_flow->refcnt)) {
1983 		mlx5e_tc_del_fdb_flow(flow->peer_flow->priv, flow->peer_flow);
1984 		kfree(flow->peer_flow);
1985 	}
1986 
1987 	flow->peer_flow = NULL;
1988 }
1989 
mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow * flow)1990 static void mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
1991 {
1992 	struct mlx5_core_dev *dev = flow->priv->mdev;
1993 	struct mlx5_devcom *devcom = dev->priv.devcom;
1994 	struct mlx5_eswitch *peer_esw;
1995 
1996 	peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1997 	if (!peer_esw)
1998 		return;
1999 
2000 	__mlx5e_tc_del_fdb_peer_flow(flow);
2001 	mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
2002 }
2003 
mlx5e_tc_del_flow(struct mlx5e_priv * priv,struct mlx5e_tc_flow * flow)2004 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
2005 			      struct mlx5e_tc_flow *flow)
2006 {
2007 	if (mlx5e_is_eswitch_flow(flow)) {
2008 		mlx5e_tc_del_fdb_peer_flow(flow);
2009 		mlx5e_tc_del_fdb_flow(priv, flow);
2010 	} else {
2011 		mlx5e_tc_del_nic_flow(priv, flow);
2012 	}
2013 }
2014 
flow_requires_tunnel_mapping(u32 chain,struct flow_cls_offload * f)2015 static bool flow_requires_tunnel_mapping(u32 chain, struct flow_cls_offload *f)
2016 {
2017 	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
2018 	struct flow_action *flow_action = &rule->action;
2019 	const struct flow_action_entry *act;
2020 	int i;
2021 
2022 	if (chain)
2023 		return false;
2024 
2025 	flow_action_for_each(i, act, flow_action) {
2026 		switch (act->id) {
2027 		case FLOW_ACTION_GOTO:
2028 			return true;
2029 		case FLOW_ACTION_SAMPLE:
2030 			return true;
2031 		default:
2032 			continue;
2033 		}
2034 	}
2035 
2036 	return false;
2037 }
2038 
2039 static int
enc_opts_is_dont_care_or_full_match(struct mlx5e_priv * priv,struct flow_dissector_key_enc_opts * opts,struct netlink_ext_ack * extack,bool * dont_care)2040 enc_opts_is_dont_care_or_full_match(struct mlx5e_priv *priv,
2041 				    struct flow_dissector_key_enc_opts *opts,
2042 				    struct netlink_ext_ack *extack,
2043 				    bool *dont_care)
2044 {
2045 	struct geneve_opt *opt;
2046 	int off = 0;
2047 
2048 	*dont_care = true;
2049 
2050 	while (opts->len > off) {
2051 		opt = (struct geneve_opt *)&opts->data[off];
2052 
2053 		if (!(*dont_care) || opt->opt_class || opt->type ||
2054 		    memchr_inv(opt->opt_data, 0, opt->length * 4)) {
2055 			*dont_care = false;
2056 
2057 			if (opt->opt_class != htons(U16_MAX) ||
2058 			    opt->type != U8_MAX) {
2059 				NL_SET_ERR_MSG_MOD(extack,
2060 						   "Partial match of tunnel options in chain > 0 isn't supported");
2061 				netdev_warn(priv->netdev,
2062 					    "Partial match of tunnel options in chain > 0 isn't supported");
2063 				return -EOPNOTSUPP;
2064 			}
2065 		}
2066 
2067 		off += sizeof(struct geneve_opt) + opt->length * 4;
2068 	}
2069 
2070 	return 0;
2071 }
2072 
2073 #define COPY_DISSECTOR(rule, diss_key, dst)\
2074 ({ \
2075 	struct flow_rule *__rule = (rule);\
2076 	typeof(dst) __dst = dst;\
2077 \
2078 	memcpy(__dst,\
2079 	       skb_flow_dissector_target(__rule->match.dissector,\
2080 					 diss_key,\
2081 					 __rule->match.key),\
2082 	       sizeof(*__dst));\
2083 })
2084 
mlx5e_get_flow_tunnel_id(struct mlx5e_priv * priv,struct mlx5e_tc_flow * flow,struct flow_cls_offload * f,struct net_device * filter_dev)2085 static int mlx5e_get_flow_tunnel_id(struct mlx5e_priv *priv,
2086 				    struct mlx5e_tc_flow *flow,
2087 				    struct flow_cls_offload *f,
2088 				    struct net_device *filter_dev)
2089 {
2090 	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
2091 	struct netlink_ext_ack *extack = f->common.extack;
2092 	struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts;
2093 	struct flow_match_enc_opts enc_opts_match;
2094 	struct tunnel_match_enc_opts tun_enc_opts;
2095 	struct mlx5_rep_uplink_priv *uplink_priv;
2096 	struct mlx5_flow_attr *attr = flow->attr;
2097 	struct mlx5e_rep_priv *uplink_rpriv;
2098 	struct tunnel_match_key tunnel_key;
2099 	bool enc_opts_is_dont_care = true;
2100 	u32 tun_id, enc_opts_id = 0;
2101 	struct mlx5_eswitch *esw;
2102 	u32 value, mask;
2103 	int err;
2104 
2105 	esw = priv->mdev->priv.eswitch;
2106 	uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
2107 	uplink_priv = &uplink_rpriv->uplink_priv;
2108 
2109 	memset(&tunnel_key, 0, sizeof(tunnel_key));
2110 	COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL,
2111 		       &tunnel_key.enc_control);
2112 	if (tunnel_key.enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS)
2113 		COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
2114 			       &tunnel_key.enc_ipv4);
2115 	else
2116 		COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
2117 			       &tunnel_key.enc_ipv6);
2118 	COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IP, &tunnel_key.enc_ip);
2119 	COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_PORTS,
2120 		       &tunnel_key.enc_tp);
2121 	COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_KEYID,
2122 		       &tunnel_key.enc_key_id);
2123 	tunnel_key.filter_ifindex = filter_dev->ifindex;
2124 
2125 	err = mapping_add(uplink_priv->tunnel_mapping, &tunnel_key, &tun_id);
2126 	if (err)
2127 		return err;
2128 
2129 	flow_rule_match_enc_opts(rule, &enc_opts_match);
2130 	err = enc_opts_is_dont_care_or_full_match(priv,
2131 						  enc_opts_match.mask,
2132 						  extack,
2133 						  &enc_opts_is_dont_care);
2134 	if (err)
2135 		goto err_enc_opts;
2136 
2137 	if (!enc_opts_is_dont_care) {
2138 		memset(&tun_enc_opts, 0, sizeof(tun_enc_opts));
2139 		memcpy(&tun_enc_opts.key, enc_opts_match.key,
2140 		       sizeof(*enc_opts_match.key));
2141 		memcpy(&tun_enc_opts.mask, enc_opts_match.mask,
2142 		       sizeof(*enc_opts_match.mask));
2143 
2144 		err = mapping_add(uplink_priv->tunnel_enc_opts_mapping,
2145 				  &tun_enc_opts, &enc_opts_id);
2146 		if (err)
2147 			goto err_enc_opts;
2148 	}
2149 
2150 	value = tun_id << ENC_OPTS_BITS | enc_opts_id;
2151 	mask = enc_opts_id ? TUNNEL_ID_MASK :
2152 			     (TUNNEL_ID_MASK & ~ENC_OPTS_BITS_MASK);
2153 
2154 	if (attr->chain) {
2155 		mlx5e_tc_match_to_reg_match(&attr->parse_attr->spec,
2156 					    TUNNEL_TO_REG, value, mask);
2157 	} else {
2158 		mod_hdr_acts = &attr->parse_attr->mod_hdr_acts;
2159 		err = mlx5e_tc_match_to_reg_set(priv->mdev,
2160 						mod_hdr_acts, MLX5_FLOW_NAMESPACE_FDB,
2161 						TUNNEL_TO_REG, value);
2162 		if (err)
2163 			goto err_set;
2164 
2165 		attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
2166 	}
2167 
2168 	flow->attr->tunnel_id = value;
2169 	return 0;
2170 
2171 err_set:
2172 	if (enc_opts_id)
2173 		mapping_remove(uplink_priv->tunnel_enc_opts_mapping,
2174 			       enc_opts_id);
2175 err_enc_opts:
2176 	mapping_remove(uplink_priv->tunnel_mapping, tun_id);
2177 	return err;
2178 }
2179 
mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow * flow)2180 static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow)
2181 {
2182 	u32 enc_opts_id = flow->attr->tunnel_id & ENC_OPTS_BITS_MASK;
2183 	u32 tun_id = flow->attr->tunnel_id >> ENC_OPTS_BITS;
2184 	struct mlx5_rep_uplink_priv *uplink_priv;
2185 	struct mlx5e_rep_priv *uplink_rpriv;
2186 	struct mlx5_eswitch *esw;
2187 
2188 	esw = flow->priv->mdev->priv.eswitch;
2189 	uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
2190 	uplink_priv = &uplink_rpriv->uplink_priv;
2191 
2192 	if (tun_id)
2193 		mapping_remove(uplink_priv->tunnel_mapping, tun_id);
2194 	if (enc_opts_id)
2195 		mapping_remove(uplink_priv->tunnel_enc_opts_mapping,
2196 			       enc_opts_id);
2197 }
2198 
mlx5e_tc_set_ethertype(struct mlx5_core_dev * mdev,struct flow_match_basic * match,bool outer,void * headers_c,void * headers_v)2199 void mlx5e_tc_set_ethertype(struct mlx5_core_dev *mdev,
2200 			    struct flow_match_basic *match, bool outer,
2201 			    void *headers_c, void *headers_v)
2202 {
2203 	bool ip_version_cap;
2204 
2205 	ip_version_cap = outer ?
2206 		MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
2207 					  ft_field_support.outer_ip_version) :
2208 		MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
2209 					  ft_field_support.inner_ip_version);
2210 
2211 	if (ip_version_cap && match->mask->n_proto == htons(0xFFFF) &&
2212 	    (match->key->n_proto == htons(ETH_P_IP) ||
2213 	     match->key->n_proto == htons(ETH_P_IPV6))) {
2214 		MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_version);
2215 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version,
2216 			 match->key->n_proto == htons(ETH_P_IP) ? 4 : 6);
2217 	} else {
2218 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
2219 			 ntohs(match->mask->n_proto));
2220 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
2221 			 ntohs(match->key->n_proto));
2222 	}
2223 }
2224 
mlx5e_tc_get_ip_version(struct mlx5_flow_spec * spec,bool outer)2225 u8 mlx5e_tc_get_ip_version(struct mlx5_flow_spec *spec, bool outer)
2226 {
2227 	void *headers_v;
2228 	u16 ethertype;
2229 	u8 ip_version;
2230 
2231 	if (outer)
2232 		headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
2233 	else
2234 		headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, inner_headers);
2235 
2236 	ip_version = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_version);
2237 	/* Return ip_version converted from ethertype anyway */
2238 	if (!ip_version) {
2239 		ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
2240 		if (ethertype == ETH_P_IP || ethertype == ETH_P_ARP)
2241 			ip_version = 4;
2242 		else if (ethertype == ETH_P_IPV6)
2243 			ip_version = 6;
2244 	}
2245 	return ip_version;
2246 }
2247 
2248 /* Tunnel device follows RFC 6040, see include/net/inet_ecn.h.
2249  * And changes inner ip_ecn depending on inner and outer ip_ecn as follows:
2250  *      +---------+----------------------------------------+
2251  *      |Arriving |         Arriving Outer Header          |
2252  *      |   Inner +---------+---------+---------+----------+
2253  *      |  Header | Not-ECT | ECT(0)  | ECT(1)  |   CE     |
2254  *      +---------+---------+---------+---------+----------+
2255  *      | Not-ECT | Not-ECT | Not-ECT | Not-ECT | <drop>   |
2256  *      |  ECT(0) |  ECT(0) | ECT(0)  | ECT(1)  |   CE*    |
2257  *      |  ECT(1) |  ECT(1) | ECT(1)  | ECT(1)* |   CE*    |
2258  *      |    CE   |   CE    |  CE     | CE      |   CE     |
2259  *      +---------+---------+---------+---------+----------+
2260  *
2261  * Tc matches on inner after decapsulation on tunnel device, but hw offload matches
2262  * the inner ip_ecn value before hardware decap action.
2263  *
2264  * Cells marked are changed from original inner packet ip_ecn value during decap, and
2265  * so matching those values on inner ip_ecn before decap will fail.
2266  *
2267  * The following helper allows offload when inner ip_ecn won't be changed by outer ip_ecn,
2268  * except for the outer ip_ecn = CE, where in all cases inner ip_ecn will be changed to CE,
2269  * and such we can drop the inner ip_ecn=CE match.
2270  */
2271 
mlx5e_tc_verify_tunnel_ecn(struct mlx5e_priv * priv,struct flow_cls_offload * f,bool * match_inner_ecn)2272 static int mlx5e_tc_verify_tunnel_ecn(struct mlx5e_priv *priv,
2273 				      struct flow_cls_offload *f,
2274 				      bool *match_inner_ecn)
2275 {
2276 	u8 outer_ecn_mask = 0, outer_ecn_key = 0, inner_ecn_mask = 0, inner_ecn_key = 0;
2277 	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
2278 	struct netlink_ext_ack *extack = f->common.extack;
2279 	struct flow_match_ip match;
2280 
2281 	*match_inner_ecn = true;
2282 
2283 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) {
2284 		flow_rule_match_enc_ip(rule, &match);
2285 		outer_ecn_key = match.key->tos & INET_ECN_MASK;
2286 		outer_ecn_mask = match.mask->tos & INET_ECN_MASK;
2287 	}
2288 
2289 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
2290 		flow_rule_match_ip(rule, &match);
2291 		inner_ecn_key = match.key->tos & INET_ECN_MASK;
2292 		inner_ecn_mask = match.mask->tos & INET_ECN_MASK;
2293 	}
2294 
2295 	if (outer_ecn_mask != 0 && outer_ecn_mask != INET_ECN_MASK) {
2296 		NL_SET_ERR_MSG_MOD(extack, "Partial match on enc_tos ecn bits isn't supported");
2297 		netdev_warn(priv->netdev, "Partial match on enc_tos ecn bits isn't supported");
2298 		return -EOPNOTSUPP;
2299 	}
2300 
2301 	if (!outer_ecn_mask) {
2302 		if (!inner_ecn_mask)
2303 			return 0;
2304 
2305 		NL_SET_ERR_MSG_MOD(extack,
2306 				   "Matching on tos ecn bits without also matching enc_tos ecn bits isn't supported");
2307 		netdev_warn(priv->netdev,
2308 			    "Matching on tos ecn bits without also matching enc_tos ecn bits isn't supported");
2309 		return -EOPNOTSUPP;
2310 	}
2311 
2312 	if (inner_ecn_mask && inner_ecn_mask != INET_ECN_MASK) {
2313 		NL_SET_ERR_MSG_MOD(extack,
2314 				   "Partial match on tos ecn bits with match on enc_tos ecn bits isn't supported");
2315 		netdev_warn(priv->netdev,
2316 			    "Partial match on tos ecn bits with match on enc_tos ecn bits isn't supported");
2317 		return -EOPNOTSUPP;
2318 	}
2319 
2320 	if (!inner_ecn_mask)
2321 		return 0;
2322 
2323 	/* Both inner and outer have full mask on ecn */
2324 
2325 	if (outer_ecn_key == INET_ECN_ECT_1) {
2326 		/* inner ecn might change by DECAP action */
2327 
2328 		NL_SET_ERR_MSG_MOD(extack, "Match on enc_tos ecn = ECT(1) isn't supported");
2329 		netdev_warn(priv->netdev, "Match on enc_tos ecn = ECT(1) isn't supported");
2330 		return -EOPNOTSUPP;
2331 	}
2332 
2333 	if (outer_ecn_key != INET_ECN_CE)
2334 		return 0;
2335 
2336 	if (inner_ecn_key != INET_ECN_CE) {
2337 		/* Can't happen in software, as packet ecn will be changed to CE after decap */
2338 		NL_SET_ERR_MSG_MOD(extack,
2339 				   "Match on tos enc_tos ecn = CE while match on tos ecn != CE isn't supported");
2340 		netdev_warn(priv->netdev,
2341 			    "Match on tos enc_tos ecn = CE while match on tos ecn != CE isn't supported");
2342 		return -EOPNOTSUPP;
2343 	}
2344 
2345 	/* outer ecn = CE, inner ecn = CE, as decap will change inner ecn to CE in anycase,
2346 	 * drop match on inner ecn
2347 	 */
2348 	*match_inner_ecn = false;
2349 
2350 	return 0;
2351 }
2352 
parse_tunnel_attr(struct mlx5e_priv * priv,struct mlx5e_tc_flow * flow,struct mlx5_flow_spec * spec,struct flow_cls_offload * f,struct net_device * filter_dev,u8 * match_level,bool * match_inner)2353 static int parse_tunnel_attr(struct mlx5e_priv *priv,
2354 			     struct mlx5e_tc_flow *flow,
2355 			     struct mlx5_flow_spec *spec,
2356 			     struct flow_cls_offload *f,
2357 			     struct net_device *filter_dev,
2358 			     u8 *match_level,
2359 			     bool *match_inner)
2360 {
2361 	struct mlx5e_tc_tunnel *tunnel = mlx5e_get_tc_tun(filter_dev);
2362 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2363 	struct netlink_ext_ack *extack = f->common.extack;
2364 	bool needs_mapping, sets_mapping;
2365 	int err;
2366 
2367 	if (!mlx5e_is_eswitch_flow(flow)) {
2368 		NL_SET_ERR_MSG_MOD(extack, "Match on tunnel is not supported");
2369 		return -EOPNOTSUPP;
2370 	}
2371 
2372 	needs_mapping = !!flow->attr->chain;
2373 	sets_mapping = flow_requires_tunnel_mapping(flow->attr->chain, f);
2374 	*match_inner = !needs_mapping;
2375 
2376 	if ((needs_mapping || sets_mapping) &&
2377 	    !mlx5_eswitch_reg_c1_loopback_enabled(esw)) {
2378 		NL_SET_ERR_MSG_MOD(extack,
2379 				   "Chains on tunnel devices isn't supported without register loopback support");
2380 		netdev_warn(priv->netdev,
2381 			    "Chains on tunnel devices isn't supported without register loopback support");
2382 		return -EOPNOTSUPP;
2383 	}
2384 
2385 	if (!flow->attr->chain) {
2386 		err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f,
2387 					 match_level);
2388 		if (err) {
2389 			NL_SET_ERR_MSG_MOD(extack,
2390 					   "Failed to parse tunnel attributes");
2391 			netdev_warn(priv->netdev,
2392 				    "Failed to parse tunnel attributes");
2393 			return err;
2394 		}
2395 
2396 		/* With mpls over udp we decapsulate using packet reformat
2397 		 * object
2398 		 */
2399 		if (!netif_is_bareudp(filter_dev))
2400 			flow->attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
2401 		err = mlx5e_tc_set_attr_rx_tun(flow, spec);
2402 		if (err)
2403 			return err;
2404 	} else if (tunnel && tunnel->tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) {
2405 		struct mlx5_flow_spec *tmp_spec;
2406 
2407 		tmp_spec = kvzalloc(sizeof(*tmp_spec), GFP_KERNEL);
2408 		if (!tmp_spec) {
2409 			NL_SET_ERR_MSG_MOD(extack, "Failed to allocate memory for vxlan tmp spec");
2410 			netdev_warn(priv->netdev, "Failed to allocate memory for vxlan tmp spec");
2411 			return -ENOMEM;
2412 		}
2413 		memcpy(tmp_spec, spec, sizeof(*tmp_spec));
2414 
2415 		err = mlx5e_tc_tun_parse(filter_dev, priv, tmp_spec, f, match_level);
2416 		if (err) {
2417 			kvfree(tmp_spec);
2418 			NL_SET_ERR_MSG_MOD(extack, "Failed to parse tunnel attributes");
2419 			netdev_warn(priv->netdev, "Failed to parse tunnel attributes");
2420 			return err;
2421 		}
2422 		err = mlx5e_tc_set_attr_rx_tun(flow, tmp_spec);
2423 		kvfree(tmp_spec);
2424 		if (err)
2425 			return err;
2426 	}
2427 
2428 	if (!needs_mapping && !sets_mapping)
2429 		return 0;
2430 
2431 	return mlx5e_get_flow_tunnel_id(priv, flow, f, filter_dev);
2432 }
2433 
get_match_inner_headers_criteria(struct mlx5_flow_spec * spec)2434 static void *get_match_inner_headers_criteria(struct mlx5_flow_spec *spec)
2435 {
2436 	return MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2437 			    inner_headers);
2438 }
2439 
get_match_inner_headers_value(struct mlx5_flow_spec * spec)2440 static void *get_match_inner_headers_value(struct mlx5_flow_spec *spec)
2441 {
2442 	return MLX5_ADDR_OF(fte_match_param, spec->match_value,
2443 			    inner_headers);
2444 }
2445 
get_match_outer_headers_criteria(struct mlx5_flow_spec * spec)2446 static void *get_match_outer_headers_criteria(struct mlx5_flow_spec *spec)
2447 {
2448 	return MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2449 			    outer_headers);
2450 }
2451 
get_match_outer_headers_value(struct mlx5_flow_spec * spec)2452 static void *get_match_outer_headers_value(struct mlx5_flow_spec *spec)
2453 {
2454 	return MLX5_ADDR_OF(fte_match_param, spec->match_value,
2455 			    outer_headers);
2456 }
2457 
mlx5e_get_match_headers_value(u32 flags,struct mlx5_flow_spec * spec)2458 void *mlx5e_get_match_headers_value(u32 flags, struct mlx5_flow_spec *spec)
2459 {
2460 	return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ?
2461 		get_match_inner_headers_value(spec) :
2462 		get_match_outer_headers_value(spec);
2463 }
2464 
mlx5e_get_match_headers_criteria(u32 flags,struct mlx5_flow_spec * spec)2465 void *mlx5e_get_match_headers_criteria(u32 flags, struct mlx5_flow_spec *spec)
2466 {
2467 	return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ?
2468 		get_match_inner_headers_criteria(spec) :
2469 		get_match_outer_headers_criteria(spec);
2470 }
2471 
mlx5e_flower_parse_meta(struct net_device * filter_dev,struct flow_cls_offload * f)2472 static int mlx5e_flower_parse_meta(struct net_device *filter_dev,
2473 				   struct flow_cls_offload *f)
2474 {
2475 	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
2476 	struct netlink_ext_ack *extack = f->common.extack;
2477 	struct net_device *ingress_dev;
2478 	struct flow_match_meta match;
2479 
2480 	if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META))
2481 		return 0;
2482 
2483 	flow_rule_match_meta(rule, &match);
2484 	if (!match.mask->ingress_ifindex)
2485 		return 0;
2486 
2487 	if (match.mask->ingress_ifindex != 0xFFFFFFFF) {
2488 		NL_SET_ERR_MSG_MOD(extack, "Unsupported ingress ifindex mask");
2489 		return -EOPNOTSUPP;
2490 	}
2491 
2492 	ingress_dev = __dev_get_by_index(dev_net(filter_dev),
2493 					 match.key->ingress_ifindex);
2494 	if (!ingress_dev) {
2495 		NL_SET_ERR_MSG_MOD(extack,
2496 				   "Can't find the ingress port to match on");
2497 		return -ENOENT;
2498 	}
2499 
2500 	if (ingress_dev != filter_dev) {
2501 		NL_SET_ERR_MSG_MOD(extack,
2502 				   "Can't match on the ingress filter port");
2503 		return -EOPNOTSUPP;
2504 	}
2505 
2506 	return 0;
2507 }
2508 
skip_key_basic(struct net_device * filter_dev,struct flow_cls_offload * f)2509 static bool skip_key_basic(struct net_device *filter_dev,
2510 			   struct flow_cls_offload *f)
2511 {
2512 	/* When doing mpls over udp decap, the user needs to provide
2513 	 * MPLS_UC as the protocol in order to be able to match on mpls
2514 	 * label fields.  However, the actual ethertype is IP so we want to
2515 	 * avoid matching on this, otherwise we'll fail the match.
2516 	 */
2517 	if (netif_is_bareudp(filter_dev) && f->common.chain_index == 0)
2518 		return true;
2519 
2520 	return false;
2521 }
2522 
__parse_cls_flower(struct mlx5e_priv * priv,struct mlx5e_tc_flow * flow,struct mlx5_flow_spec * spec,struct flow_cls_offload * f,struct net_device * filter_dev,u8 * inner_match_level,u8 * outer_match_level)2523 static int __parse_cls_flower(struct mlx5e_priv *priv,
2524 			      struct mlx5e_tc_flow *flow,
2525 			      struct mlx5_flow_spec *spec,
2526 			      struct flow_cls_offload *f,
2527 			      struct net_device *filter_dev,
2528 			      u8 *inner_match_level, u8 *outer_match_level)
2529 {
2530 	struct netlink_ext_ack *extack = f->common.extack;
2531 	void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2532 				       outer_headers);
2533 	void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
2534 				       outer_headers);
2535 	void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2536 				    misc_parameters);
2537 	void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
2538 				    misc_parameters);
2539 	void *misc_c_3 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2540 				    misc_parameters_3);
2541 	void *misc_v_3 = MLX5_ADDR_OF(fte_match_param, spec->match_value,
2542 				    misc_parameters_3);
2543 	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
2544 	struct flow_dissector *dissector = rule->match.dissector;
2545 	enum fs_flow_table_type fs_type;
2546 	bool match_inner_ecn = true;
2547 	u16 addr_type = 0;
2548 	u8 ip_proto = 0;
2549 	u8 *match_level;
2550 	int err;
2551 
2552 	fs_type = mlx5e_is_eswitch_flow(flow) ? FS_FT_FDB : FS_FT_NIC_RX;
2553 	match_level = outer_match_level;
2554 
2555 	if (dissector->used_keys &
2556 	    ~(BIT(FLOW_DISSECTOR_KEY_META) |
2557 	      BIT(FLOW_DISSECTOR_KEY_CONTROL) |
2558 	      BIT(FLOW_DISSECTOR_KEY_BASIC) |
2559 	      BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
2560 	      BIT(FLOW_DISSECTOR_KEY_VLAN) |
2561 	      BIT(FLOW_DISSECTOR_KEY_CVLAN) |
2562 	      BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
2563 	      BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
2564 	      BIT(FLOW_DISSECTOR_KEY_PORTS) |
2565 	      BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
2566 	      BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
2567 	      BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
2568 	      BIT(FLOW_DISSECTOR_KEY_ENC_PORTS)	|
2569 	      BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
2570 	      BIT(FLOW_DISSECTOR_KEY_TCP) |
2571 	      BIT(FLOW_DISSECTOR_KEY_IP)  |
2572 	      BIT(FLOW_DISSECTOR_KEY_CT) |
2573 	      BIT(FLOW_DISSECTOR_KEY_ENC_IP) |
2574 	      BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) |
2575 	      BIT(FLOW_DISSECTOR_KEY_ICMP) |
2576 	      BIT(FLOW_DISSECTOR_KEY_MPLS))) {
2577 		NL_SET_ERR_MSG_MOD(extack, "Unsupported key");
2578 		netdev_dbg(priv->netdev, "Unsupported key used: 0x%x\n",
2579 			   dissector->used_keys);
2580 		return -EOPNOTSUPP;
2581 	}
2582 
2583 	if (mlx5e_get_tc_tun(filter_dev)) {
2584 		bool match_inner = false;
2585 
2586 		err = parse_tunnel_attr(priv, flow, spec, f, filter_dev,
2587 					outer_match_level, &match_inner);
2588 		if (err)
2589 			return err;
2590 
2591 		if (match_inner) {
2592 			/* header pointers should point to the inner headers
2593 			 * if the packet was decapsulated already.
2594 			 * outer headers are set by parse_tunnel_attr.
2595 			 */
2596 			match_level = inner_match_level;
2597 			headers_c = get_match_inner_headers_criteria(spec);
2598 			headers_v = get_match_inner_headers_value(spec);
2599 		}
2600 
2601 		err = mlx5e_tc_verify_tunnel_ecn(priv, f, &match_inner_ecn);
2602 		if (err)
2603 			return err;
2604 	}
2605 
2606 	err = mlx5e_flower_parse_meta(filter_dev, f);
2607 	if (err)
2608 		return err;
2609 
2610 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC) &&
2611 	    !skip_key_basic(filter_dev, f)) {
2612 		struct flow_match_basic match;
2613 
2614 		flow_rule_match_basic(rule, &match);
2615 		mlx5e_tc_set_ethertype(priv->mdev, &match,
2616 				       match_level == outer_match_level,
2617 				       headers_c, headers_v);
2618 
2619 		if (match.mask->n_proto)
2620 			*match_level = MLX5_MATCH_L2;
2621 	}
2622 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN) ||
2623 	    is_vlan_dev(filter_dev)) {
2624 		struct flow_dissector_key_vlan filter_dev_mask;
2625 		struct flow_dissector_key_vlan filter_dev_key;
2626 		struct flow_match_vlan match;
2627 
2628 		if (is_vlan_dev(filter_dev)) {
2629 			match.key = &filter_dev_key;
2630 			match.key->vlan_id = vlan_dev_vlan_id(filter_dev);
2631 			match.key->vlan_tpid = vlan_dev_vlan_proto(filter_dev);
2632 			match.key->vlan_priority = 0;
2633 			match.mask = &filter_dev_mask;
2634 			memset(match.mask, 0xff, sizeof(*match.mask));
2635 			match.mask->vlan_priority = 0;
2636 		} else {
2637 			flow_rule_match_vlan(rule, &match);
2638 		}
2639 		if (match.mask->vlan_id ||
2640 		    match.mask->vlan_priority ||
2641 		    match.mask->vlan_tpid) {
2642 			if (match.key->vlan_tpid == htons(ETH_P_8021AD)) {
2643 				MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2644 					 svlan_tag, 1);
2645 				MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2646 					 svlan_tag, 1);
2647 			} else {
2648 				MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2649 					 cvlan_tag, 1);
2650 				MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2651 					 cvlan_tag, 1);
2652 			}
2653 
2654 			MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid,
2655 				 match.mask->vlan_id);
2656 			MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid,
2657 				 match.key->vlan_id);
2658 
2659 			MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio,
2660 				 match.mask->vlan_priority);
2661 			MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio,
2662 				 match.key->vlan_priority);
2663 
2664 			*match_level = MLX5_MATCH_L2;
2665 
2666 			if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN) &&
2667 			    match.mask->vlan_eth_type &&
2668 			    MLX5_CAP_FLOWTABLE_TYPE(priv->mdev,
2669 						    ft_field_support.outer_second_vid,
2670 						    fs_type)) {
2671 				MLX5_SET(fte_match_set_misc, misc_c,
2672 					 outer_second_cvlan_tag, 1);
2673 				spec->match_criteria_enable |=
2674 					MLX5_MATCH_MISC_PARAMETERS;
2675 			}
2676 		}
2677 	} else if (*match_level != MLX5_MATCH_NONE) {
2678 		/* cvlan_tag enabled in match criteria and
2679 		 * disabled in match value means both S & C tags
2680 		 * don't exist (untagged of both)
2681 		 */
2682 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
2683 		*match_level = MLX5_MATCH_L2;
2684 	}
2685 
2686 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) {
2687 		struct flow_match_vlan match;
2688 
2689 		flow_rule_match_cvlan(rule, &match);
2690 		if (match.mask->vlan_id ||
2691 		    match.mask->vlan_priority ||
2692 		    match.mask->vlan_tpid) {
2693 			if (!MLX5_CAP_FLOWTABLE_TYPE(priv->mdev, ft_field_support.outer_second_vid,
2694 						     fs_type)) {
2695 				NL_SET_ERR_MSG_MOD(extack,
2696 						   "Matching on CVLAN is not supported");
2697 				return -EOPNOTSUPP;
2698 			}
2699 
2700 			if (match.key->vlan_tpid == htons(ETH_P_8021AD)) {
2701 				MLX5_SET(fte_match_set_misc, misc_c,
2702 					 outer_second_svlan_tag, 1);
2703 				MLX5_SET(fte_match_set_misc, misc_v,
2704 					 outer_second_svlan_tag, 1);
2705 			} else {
2706 				MLX5_SET(fte_match_set_misc, misc_c,
2707 					 outer_second_cvlan_tag, 1);
2708 				MLX5_SET(fte_match_set_misc, misc_v,
2709 					 outer_second_cvlan_tag, 1);
2710 			}
2711 
2712 			MLX5_SET(fte_match_set_misc, misc_c, outer_second_vid,
2713 				 match.mask->vlan_id);
2714 			MLX5_SET(fte_match_set_misc, misc_v, outer_second_vid,
2715 				 match.key->vlan_id);
2716 			MLX5_SET(fte_match_set_misc, misc_c, outer_second_prio,
2717 				 match.mask->vlan_priority);
2718 			MLX5_SET(fte_match_set_misc, misc_v, outer_second_prio,
2719 				 match.key->vlan_priority);
2720 
2721 			*match_level = MLX5_MATCH_L2;
2722 			spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
2723 		}
2724 	}
2725 
2726 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
2727 		struct flow_match_eth_addrs match;
2728 
2729 		flow_rule_match_eth_addrs(rule, &match);
2730 		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2731 					     dmac_47_16),
2732 				match.mask->dst);
2733 		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2734 					     dmac_47_16),
2735 				match.key->dst);
2736 
2737 		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2738 					     smac_47_16),
2739 				match.mask->src);
2740 		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2741 					     smac_47_16),
2742 				match.key->src);
2743 
2744 		if (!is_zero_ether_addr(match.mask->src) ||
2745 		    !is_zero_ether_addr(match.mask->dst))
2746 			*match_level = MLX5_MATCH_L2;
2747 	}
2748 
2749 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
2750 		struct flow_match_control match;
2751 
2752 		flow_rule_match_control(rule, &match);
2753 		addr_type = match.key->addr_type;
2754 
2755 		/* the HW doesn't support frag first/later */
2756 		if (match.mask->flags & FLOW_DIS_FIRST_FRAG) {
2757 			NL_SET_ERR_MSG_MOD(extack, "Match on frag first/later is not supported");
2758 			return -EOPNOTSUPP;
2759 		}
2760 
2761 		if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) {
2762 			MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
2763 			MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
2764 				 match.key->flags & FLOW_DIS_IS_FRAGMENT);
2765 
2766 			/* the HW doesn't need L3 inline to match on frag=no */
2767 			if (!(match.key->flags & FLOW_DIS_IS_FRAGMENT))
2768 				*match_level = MLX5_MATCH_L2;
2769 	/* ***  L2 attributes parsing up to here *** */
2770 			else
2771 				*match_level = MLX5_MATCH_L3;
2772 		}
2773 	}
2774 
2775 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
2776 		struct flow_match_basic match;
2777 
2778 		flow_rule_match_basic(rule, &match);
2779 		ip_proto = match.key->ip_proto;
2780 
2781 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
2782 			 match.mask->ip_proto);
2783 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2784 			 match.key->ip_proto);
2785 
2786 		if (match.mask->ip_proto)
2787 			*match_level = MLX5_MATCH_L3;
2788 	}
2789 
2790 	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
2791 		struct flow_match_ipv4_addrs match;
2792 
2793 		flow_rule_match_ipv4_addrs(rule, &match);
2794 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2795 				    src_ipv4_src_ipv6.ipv4_layout.ipv4),
2796 		       &match.mask->src, sizeof(match.mask->src));
2797 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2798 				    src_ipv4_src_ipv6.ipv4_layout.ipv4),
2799 		       &match.key->src, sizeof(match.key->src));
2800 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2801 				    dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
2802 		       &match.mask->dst, sizeof(match.mask->dst));
2803 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2804 				    dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
2805 		       &match.key->dst, sizeof(match.key->dst));
2806 
2807 		if (match.mask->src || match.mask->dst)
2808 			*match_level = MLX5_MATCH_L3;
2809 	}
2810 
2811 	if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
2812 		struct flow_match_ipv6_addrs match;
2813 
2814 		flow_rule_match_ipv6_addrs(rule, &match);
2815 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2816 				    src_ipv4_src_ipv6.ipv6_layout.ipv6),
2817 		       &match.mask->src, sizeof(match.mask->src));
2818 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2819 				    src_ipv4_src_ipv6.ipv6_layout.ipv6),
2820 		       &match.key->src, sizeof(match.key->src));
2821 
2822 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2823 				    dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
2824 		       &match.mask->dst, sizeof(match.mask->dst));
2825 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2826 				    dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
2827 		       &match.key->dst, sizeof(match.key->dst));
2828 
2829 		if (ipv6_addr_type(&match.mask->src) != IPV6_ADDR_ANY ||
2830 		    ipv6_addr_type(&match.mask->dst) != IPV6_ADDR_ANY)
2831 			*match_level = MLX5_MATCH_L3;
2832 	}
2833 
2834 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
2835 		struct flow_match_ip match;
2836 
2837 		flow_rule_match_ip(rule, &match);
2838 		if (match_inner_ecn) {
2839 			MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn,
2840 				 match.mask->tos & 0x3);
2841 			MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn,
2842 				 match.key->tos & 0x3);
2843 		}
2844 
2845 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp,
2846 			 match.mask->tos >> 2);
2847 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp,
2848 			 match.key->tos  >> 2);
2849 
2850 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit,
2851 			 match.mask->ttl);
2852 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit,
2853 			 match.key->ttl);
2854 
2855 		if (match.mask->ttl &&
2856 		    !MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
2857 						ft_field_support.outer_ipv4_ttl)) {
2858 			NL_SET_ERR_MSG_MOD(extack,
2859 					   "Matching on TTL is not supported");
2860 			return -EOPNOTSUPP;
2861 		}
2862 
2863 		if (match.mask->tos || match.mask->ttl)
2864 			*match_level = MLX5_MATCH_L3;
2865 	}
2866 
2867 	/* ***  L3 attributes parsing up to here *** */
2868 
2869 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
2870 		struct flow_match_ports match;
2871 
2872 		flow_rule_match_ports(rule, &match);
2873 		switch (ip_proto) {
2874 		case IPPROTO_TCP:
2875 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2876 				 tcp_sport, ntohs(match.mask->src));
2877 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2878 				 tcp_sport, ntohs(match.key->src));
2879 
2880 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2881 				 tcp_dport, ntohs(match.mask->dst));
2882 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2883 				 tcp_dport, ntohs(match.key->dst));
2884 			break;
2885 
2886 		case IPPROTO_UDP:
2887 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2888 				 udp_sport, ntohs(match.mask->src));
2889 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2890 				 udp_sport, ntohs(match.key->src));
2891 
2892 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2893 				 udp_dport, ntohs(match.mask->dst));
2894 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2895 				 udp_dport, ntohs(match.key->dst));
2896 			break;
2897 		default:
2898 			NL_SET_ERR_MSG_MOD(extack,
2899 					   "Only UDP and TCP transports are supported for L4 matching");
2900 			netdev_err(priv->netdev,
2901 				   "Only UDP and TCP transport are supported\n");
2902 			return -EINVAL;
2903 		}
2904 
2905 		if (match.mask->src || match.mask->dst)
2906 			*match_level = MLX5_MATCH_L4;
2907 	}
2908 
2909 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
2910 		struct flow_match_tcp match;
2911 
2912 		flow_rule_match_tcp(rule, &match);
2913 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags,
2914 			 ntohs(match.mask->flags));
2915 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
2916 			 ntohs(match.key->flags));
2917 
2918 		if (match.mask->flags)
2919 			*match_level = MLX5_MATCH_L4;
2920 	}
2921 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP)) {
2922 		struct flow_match_icmp match;
2923 
2924 		flow_rule_match_icmp(rule, &match);
2925 		switch (ip_proto) {
2926 		case IPPROTO_ICMP:
2927 			if (!(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) &
2928 			      MLX5_FLEX_PROTO_ICMP)) {
2929 				NL_SET_ERR_MSG_MOD(extack,
2930 						   "Match on Flex protocols for ICMP is not supported");
2931 				return -EOPNOTSUPP;
2932 			}
2933 			MLX5_SET(fte_match_set_misc3, misc_c_3, icmp_type,
2934 				 match.mask->type);
2935 			MLX5_SET(fte_match_set_misc3, misc_v_3, icmp_type,
2936 				 match.key->type);
2937 			MLX5_SET(fte_match_set_misc3, misc_c_3, icmp_code,
2938 				 match.mask->code);
2939 			MLX5_SET(fte_match_set_misc3, misc_v_3, icmp_code,
2940 				 match.key->code);
2941 			break;
2942 		case IPPROTO_ICMPV6:
2943 			if (!(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) &
2944 			      MLX5_FLEX_PROTO_ICMPV6)) {
2945 				NL_SET_ERR_MSG_MOD(extack,
2946 						   "Match on Flex protocols for ICMPV6 is not supported");
2947 				return -EOPNOTSUPP;
2948 			}
2949 			MLX5_SET(fte_match_set_misc3, misc_c_3, icmpv6_type,
2950 				 match.mask->type);
2951 			MLX5_SET(fte_match_set_misc3, misc_v_3, icmpv6_type,
2952 				 match.key->type);
2953 			MLX5_SET(fte_match_set_misc3, misc_c_3, icmpv6_code,
2954 				 match.mask->code);
2955 			MLX5_SET(fte_match_set_misc3, misc_v_3, icmpv6_code,
2956 				 match.key->code);
2957 			break;
2958 		default:
2959 			NL_SET_ERR_MSG_MOD(extack,
2960 					   "Code and type matching only with ICMP and ICMPv6");
2961 			netdev_err(priv->netdev,
2962 				   "Code and type matching only with ICMP and ICMPv6\n");
2963 			return -EINVAL;
2964 		}
2965 		if (match.mask->code || match.mask->type) {
2966 			*match_level = MLX5_MATCH_L4;
2967 			spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_3;
2968 		}
2969 	}
2970 	/* Currently supported only for MPLS over UDP */
2971 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS) &&
2972 	    !netif_is_bareudp(filter_dev)) {
2973 		NL_SET_ERR_MSG_MOD(extack,
2974 				   "Matching on MPLS is supported only for MPLS over UDP");
2975 		netdev_err(priv->netdev,
2976 			   "Matching on MPLS is supported only for MPLS over UDP\n");
2977 		return -EOPNOTSUPP;
2978 	}
2979 
2980 	return 0;
2981 }
2982 
parse_cls_flower(struct mlx5e_priv * priv,struct mlx5e_tc_flow * flow,struct mlx5_flow_spec * spec,struct flow_cls_offload * f,struct net_device * filter_dev)2983 static int parse_cls_flower(struct mlx5e_priv *priv,
2984 			    struct mlx5e_tc_flow *flow,
2985 			    struct mlx5_flow_spec *spec,
2986 			    struct flow_cls_offload *f,
2987 			    struct net_device *filter_dev)
2988 {
2989 	u8 inner_match_level, outer_match_level, non_tunnel_match_level;
2990 	struct netlink_ext_ack *extack = f->common.extack;
2991 	struct mlx5_core_dev *dev = priv->mdev;
2992 	struct mlx5_eswitch *esw = dev->priv.eswitch;
2993 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
2994 	struct mlx5_eswitch_rep *rep;
2995 	bool is_eswitch_flow;
2996 	int err;
2997 
2998 	inner_match_level = MLX5_MATCH_NONE;
2999 	outer_match_level = MLX5_MATCH_NONE;
3000 
3001 	err = __parse_cls_flower(priv, flow, spec, f, filter_dev,
3002 				 &inner_match_level, &outer_match_level);
3003 	non_tunnel_match_level = (inner_match_level == MLX5_MATCH_NONE) ?
3004 				 outer_match_level : inner_match_level;
3005 
3006 	is_eswitch_flow = mlx5e_is_eswitch_flow(flow);
3007 	if (!err && is_eswitch_flow) {
3008 		rep = rpriv->rep;
3009 		if (rep->vport != MLX5_VPORT_UPLINK &&
3010 		    (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
3011 		    esw->offloads.inline_mode < non_tunnel_match_level)) {
3012 			NL_SET_ERR_MSG_MOD(extack,
3013 					   "Flow is not offloaded due to min inline setting");
3014 			netdev_warn(priv->netdev,
3015 				    "Flow is not offloaded due to min inline setting, required %d actual %d\n",
3016 				    non_tunnel_match_level, esw->offloads.inline_mode);
3017 			return -EOPNOTSUPP;
3018 		}
3019 	}
3020 
3021 	flow->attr->inner_match_level = inner_match_level;
3022 	flow->attr->outer_match_level = outer_match_level;
3023 
3024 
3025 	return err;
3026 }
3027 
3028 struct mlx5_fields {
3029 	u8  field;
3030 	u8  field_bsize;
3031 	u32 field_mask;
3032 	u32 offset;
3033 	u32 match_offset;
3034 };
3035 
3036 #define OFFLOAD(fw_field, field_bsize, field_mask, field, off, match_field) \
3037 		{MLX5_ACTION_IN_FIELD_OUT_ ## fw_field, field_bsize, field_mask, \
3038 		 offsetof(struct pedit_headers, field) + (off), \
3039 		 MLX5_BYTE_OFF(fte_match_set_lyr_2_4, match_field)}
3040 
3041 /* masked values are the same and there are no rewrites that do not have a
3042  * match.
3043  */
3044 #define SAME_VAL_MASK(type, valp, maskp, matchvalp, matchmaskp) ({ \
3045 	type matchmaskx = *(type *)(matchmaskp); \
3046 	type matchvalx = *(type *)(matchvalp); \
3047 	type maskx = *(type *)(maskp); \
3048 	type valx = *(type *)(valp); \
3049 	\
3050 	(valx & maskx) == (matchvalx & matchmaskx) && !(maskx & (maskx ^ \
3051 								 matchmaskx)); \
3052 })
3053 
cmp_val_mask(void * valp,void * maskp,void * matchvalp,void * matchmaskp,u8 bsize)3054 static bool cmp_val_mask(void *valp, void *maskp, void *matchvalp,
3055 			 void *matchmaskp, u8 bsize)
3056 {
3057 	bool same = false;
3058 
3059 	switch (bsize) {
3060 	case 8:
3061 		same = SAME_VAL_MASK(u8, valp, maskp, matchvalp, matchmaskp);
3062 		break;
3063 	case 16:
3064 		same = SAME_VAL_MASK(u16, valp, maskp, matchvalp, matchmaskp);
3065 		break;
3066 	case 32:
3067 		same = SAME_VAL_MASK(u32, valp, maskp, matchvalp, matchmaskp);
3068 		break;
3069 	}
3070 
3071 	return same;
3072 }
3073 
3074 static struct mlx5_fields fields[] = {
3075 	OFFLOAD(DMAC_47_16, 32, U32_MAX, eth.h_dest[0], 0, dmac_47_16),
3076 	OFFLOAD(DMAC_15_0,  16, U16_MAX, eth.h_dest[4], 0, dmac_15_0),
3077 	OFFLOAD(SMAC_47_16, 32, U32_MAX, eth.h_source[0], 0, smac_47_16),
3078 	OFFLOAD(SMAC_15_0,  16, U16_MAX, eth.h_source[4], 0, smac_15_0),
3079 	OFFLOAD(ETHERTYPE,  16, U16_MAX, eth.h_proto, 0, ethertype),
3080 	OFFLOAD(FIRST_VID,  16, U16_MAX, vlan.h_vlan_TCI, 0, first_vid),
3081 
3082 	OFFLOAD(IP_DSCP, 8,    0xfc, ip4.tos,   0, ip_dscp),
3083 	OFFLOAD(IP_TTL,  8,  U8_MAX, ip4.ttl,   0, ttl_hoplimit),
3084 	OFFLOAD(SIPV4,  32, U32_MAX, ip4.saddr, 0, src_ipv4_src_ipv6.ipv4_layout.ipv4),
3085 	OFFLOAD(DIPV4,  32, U32_MAX, ip4.daddr, 0, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
3086 
3087 	OFFLOAD(SIPV6_127_96, 32, U32_MAX, ip6.saddr.s6_addr32[0], 0,
3088 		src_ipv4_src_ipv6.ipv6_layout.ipv6[0]),
3089 	OFFLOAD(SIPV6_95_64,  32, U32_MAX, ip6.saddr.s6_addr32[1], 0,
3090 		src_ipv4_src_ipv6.ipv6_layout.ipv6[4]),
3091 	OFFLOAD(SIPV6_63_32,  32, U32_MAX, ip6.saddr.s6_addr32[2], 0,
3092 		src_ipv4_src_ipv6.ipv6_layout.ipv6[8]),
3093 	OFFLOAD(SIPV6_31_0,   32, U32_MAX, ip6.saddr.s6_addr32[3], 0,
3094 		src_ipv4_src_ipv6.ipv6_layout.ipv6[12]),
3095 	OFFLOAD(DIPV6_127_96, 32, U32_MAX, ip6.daddr.s6_addr32[0], 0,
3096 		dst_ipv4_dst_ipv6.ipv6_layout.ipv6[0]),
3097 	OFFLOAD(DIPV6_95_64,  32, U32_MAX, ip6.daddr.s6_addr32[1], 0,
3098 		dst_ipv4_dst_ipv6.ipv6_layout.ipv6[4]),
3099 	OFFLOAD(DIPV6_63_32,  32, U32_MAX, ip6.daddr.s6_addr32[2], 0,
3100 		dst_ipv4_dst_ipv6.ipv6_layout.ipv6[8]),
3101 	OFFLOAD(DIPV6_31_0,   32, U32_MAX, ip6.daddr.s6_addr32[3], 0,
3102 		dst_ipv4_dst_ipv6.ipv6_layout.ipv6[12]),
3103 	OFFLOAD(IPV6_HOPLIMIT, 8,  U8_MAX, ip6.hop_limit, 0, ttl_hoplimit),
3104 	OFFLOAD(IP_DSCP, 16,  0xc00f, ip6, 0, ip_dscp),
3105 
3106 	OFFLOAD(TCP_SPORT, 16, U16_MAX, tcp.source,  0, tcp_sport),
3107 	OFFLOAD(TCP_DPORT, 16, U16_MAX, tcp.dest,    0, tcp_dport),
3108 	/* in linux iphdr tcp_flags is 8 bits long */
3109 	OFFLOAD(TCP_FLAGS,  8,  U8_MAX, tcp.ack_seq, 5, tcp_flags),
3110 
3111 	OFFLOAD(UDP_SPORT, 16, U16_MAX, udp.source, 0, udp_sport),
3112 	OFFLOAD(UDP_DPORT, 16, U16_MAX, udp.dest,   0, udp_dport),
3113 };
3114 
mask_to_le(unsigned long mask,int size)3115 static unsigned long mask_to_le(unsigned long mask, int size)
3116 {
3117 	__be32 mask_be32;
3118 	__be16 mask_be16;
3119 
3120 	if (size == 32) {
3121 		mask_be32 = (__force __be32)(mask);
3122 		mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32));
3123 	} else if (size == 16) {
3124 		mask_be32 = (__force __be32)(mask);
3125 		mask_be16 = *(__be16 *)&mask_be32;
3126 		mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16));
3127 	}
3128 
3129 	return mask;
3130 }
3131 
offload_pedit_fields(struct mlx5e_priv * priv,int namespace,struct mlx5e_tc_flow_parse_attr * parse_attr,u32 * action_flags,struct netlink_ext_ack * extack)3132 static int offload_pedit_fields(struct mlx5e_priv *priv,
3133 				int namespace,
3134 				struct mlx5e_tc_flow_parse_attr *parse_attr,
3135 				u32 *action_flags,
3136 				struct netlink_ext_ack *extack)
3137 {
3138 	struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
3139 	struct pedit_headers_action *hdrs = parse_attr->hdrs;
3140 	void *headers_c, *headers_v, *action, *vals_p;
3141 	u32 *s_masks_p, *a_masks_p, s_mask, a_mask;
3142 	struct mlx5e_tc_mod_hdr_acts *mod_acts;
3143 	unsigned long mask, field_mask;
3144 	int i, first, last, next_z;
3145 	struct mlx5_fields *f;
3146 	u8 cmd;
3147 
3148 	mod_acts = &parse_attr->mod_hdr_acts;
3149 	headers_c = mlx5e_get_match_headers_criteria(*action_flags, &parse_attr->spec);
3150 	headers_v = mlx5e_get_match_headers_value(*action_flags, &parse_attr->spec);
3151 
3152 	set_masks = &hdrs[0].masks;
3153 	add_masks = &hdrs[1].masks;
3154 	set_vals = &hdrs[0].vals;
3155 	add_vals = &hdrs[1].vals;
3156 
3157 	for (i = 0; i < ARRAY_SIZE(fields); i++) {
3158 		bool skip;
3159 
3160 		f = &fields[i];
3161 		/* avoid seeing bits set from previous iterations */
3162 		s_mask = 0;
3163 		a_mask = 0;
3164 
3165 		s_masks_p = (void *)set_masks + f->offset;
3166 		a_masks_p = (void *)add_masks + f->offset;
3167 
3168 		s_mask = *s_masks_p & f->field_mask;
3169 		a_mask = *a_masks_p & f->field_mask;
3170 
3171 		if (!s_mask && !a_mask) /* nothing to offload here */
3172 			continue;
3173 
3174 		if (s_mask && a_mask) {
3175 			NL_SET_ERR_MSG_MOD(extack,
3176 					   "can't set and add to the same HW field");
3177 			netdev_warn(priv->netdev,
3178 				    "mlx5: can't set and add to the same HW field (%x)\n",
3179 				    f->field);
3180 			return -EOPNOTSUPP;
3181 		}
3182 
3183 		skip = false;
3184 		if (s_mask) {
3185 			void *match_mask = headers_c + f->match_offset;
3186 			void *match_val = headers_v + f->match_offset;
3187 
3188 			cmd  = MLX5_ACTION_TYPE_SET;
3189 			mask = s_mask;
3190 			vals_p = (void *)set_vals + f->offset;
3191 			/* don't rewrite if we have a match on the same value */
3192 			if (cmp_val_mask(vals_p, s_masks_p, match_val,
3193 					 match_mask, f->field_bsize))
3194 				skip = true;
3195 			/* clear to denote we consumed this field */
3196 			*s_masks_p &= ~f->field_mask;
3197 		} else {
3198 			cmd  = MLX5_ACTION_TYPE_ADD;
3199 			mask = a_mask;
3200 			vals_p = (void *)add_vals + f->offset;
3201 			/* add 0 is no change */
3202 			if ((*(u32 *)vals_p & f->field_mask) == 0)
3203 				skip = true;
3204 			/* clear to denote we consumed this field */
3205 			*a_masks_p &= ~f->field_mask;
3206 		}
3207 		if (skip)
3208 			continue;
3209 
3210 		mask = mask_to_le(mask, f->field_bsize);
3211 
3212 		first = find_first_bit(&mask, f->field_bsize);
3213 		next_z = find_next_zero_bit(&mask, f->field_bsize, first);
3214 		last  = find_last_bit(&mask, f->field_bsize);
3215 		if (first < next_z && next_z < last) {
3216 			NL_SET_ERR_MSG_MOD(extack,
3217 					   "rewrite of few sub-fields isn't supported");
3218 			netdev_warn(priv->netdev,
3219 				    "mlx5: rewrite of few sub-fields (mask %lx) isn't offloaded\n",
3220 				    mask);
3221 			return -EOPNOTSUPP;
3222 		}
3223 
3224 		action = mlx5e_mod_hdr_alloc(priv->mdev, namespace, mod_acts);
3225 		if (IS_ERR(action)) {
3226 			NL_SET_ERR_MSG_MOD(extack,
3227 					   "too many pedit actions, can't offload");
3228 			mlx5_core_warn(priv->mdev,
3229 				       "mlx5: parsed %d pedit actions, can't do more\n",
3230 				       mod_acts->num_actions);
3231 			return PTR_ERR(action);
3232 		}
3233 
3234 		MLX5_SET(set_action_in, action, action_type, cmd);
3235 		MLX5_SET(set_action_in, action, field, f->field);
3236 
3237 		if (cmd == MLX5_ACTION_TYPE_SET) {
3238 			int start;
3239 
3240 			field_mask = mask_to_le(f->field_mask, f->field_bsize);
3241 
3242 			/* if field is bit sized it can start not from first bit */
3243 			start = find_first_bit(&field_mask, f->field_bsize);
3244 
3245 			MLX5_SET(set_action_in, action, offset, first - start);
3246 			/* length is num of bits to be written, zero means length of 32 */
3247 			MLX5_SET(set_action_in, action, length, (last - first + 1));
3248 		}
3249 
3250 		if (f->field_bsize == 32)
3251 			MLX5_SET(set_action_in, action, data, ntohl(*(__be32 *)vals_p) >> first);
3252 		else if (f->field_bsize == 16)
3253 			MLX5_SET(set_action_in, action, data, ntohs(*(__be16 *)vals_p) >> first);
3254 		else if (f->field_bsize == 8)
3255 			MLX5_SET(set_action_in, action, data, *(u8 *)vals_p >> first);
3256 
3257 		++mod_acts->num_actions;
3258 	}
3259 
3260 	return 0;
3261 }
3262 
3263 static const struct pedit_headers zero_masks = {};
3264 
verify_offload_pedit_fields(struct mlx5e_priv * priv,struct mlx5e_tc_flow_parse_attr * parse_attr,struct netlink_ext_ack * extack)3265 static int verify_offload_pedit_fields(struct mlx5e_priv *priv,
3266 				       struct mlx5e_tc_flow_parse_attr *parse_attr,
3267 				       struct netlink_ext_ack *extack)
3268 {
3269 	struct pedit_headers *cmd_masks;
3270 	u8 cmd;
3271 
3272 	for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) {
3273 		cmd_masks = &parse_attr->hdrs[cmd].masks;
3274 		if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) {
3275 			NL_SET_ERR_MSG_MOD(extack, "attempt to offload an unsupported field");
3276 			netdev_warn(priv->netdev, "attempt to offload an unsupported field (cmd %d)\n", cmd);
3277 			print_hex_dump(KERN_WARNING, "mask: ", DUMP_PREFIX_ADDRESS,
3278 				       16, 1, cmd_masks, sizeof(zero_masks), true);
3279 			return -EOPNOTSUPP;
3280 		}
3281 	}
3282 
3283 	return 0;
3284 }
3285 
alloc_tc_pedit_action(struct mlx5e_priv * priv,int namespace,struct mlx5e_tc_flow_parse_attr * parse_attr,u32 * action_flags,struct netlink_ext_ack * extack)3286 static int alloc_tc_pedit_action(struct mlx5e_priv *priv, int namespace,
3287 				 struct mlx5e_tc_flow_parse_attr *parse_attr,
3288 				 u32 *action_flags,
3289 				 struct netlink_ext_ack *extack)
3290 {
3291 	int err;
3292 
3293 	err = offload_pedit_fields(priv, namespace, parse_attr, action_flags, extack);
3294 	if (err)
3295 		goto out_dealloc_parsed_actions;
3296 
3297 	err = verify_offload_pedit_fields(priv, parse_attr, extack);
3298 	if (err)
3299 		goto out_dealloc_parsed_actions;
3300 
3301 	return 0;
3302 
3303 out_dealloc_parsed_actions:
3304 	mlx5e_mod_hdr_dealloc(&parse_attr->mod_hdr_acts);
3305 	return err;
3306 }
3307 
3308 struct ip_ttl_word {
3309 	__u8	ttl;
3310 	__u8	protocol;
3311 	__sum16	check;
3312 };
3313 
3314 struct ipv6_hoplimit_word {
3315 	__be16	payload_len;
3316 	__u8	nexthdr;
3317 	__u8	hop_limit;
3318 };
3319 
3320 static bool
is_action_keys_supported(const struct flow_action_entry * act,bool ct_flow,bool * modify_ip_header,bool * modify_tuple,struct netlink_ext_ack * extack)3321 is_action_keys_supported(const struct flow_action_entry *act, bool ct_flow,
3322 			 bool *modify_ip_header, bool *modify_tuple,
3323 			 struct netlink_ext_ack *extack)
3324 {
3325 	u32 mask, offset;
3326 	u8 htype;
3327 
3328 	htype = act->mangle.htype;
3329 	offset = act->mangle.offset;
3330 	mask = ~act->mangle.mask;
3331 	/* For IPv4 & IPv6 header check 4 byte word,
3332 	 * to determine that modified fields
3333 	 * are NOT ttl & hop_limit only.
3334 	 */
3335 	if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4) {
3336 		struct ip_ttl_word *ttl_word =
3337 			(struct ip_ttl_word *)&mask;
3338 
3339 		if (offset != offsetof(struct iphdr, ttl) ||
3340 		    ttl_word->protocol ||
3341 		    ttl_word->check) {
3342 			*modify_ip_header = true;
3343 		}
3344 
3345 		if (offset >= offsetof(struct iphdr, saddr))
3346 			*modify_tuple = true;
3347 
3348 		if (ct_flow && *modify_tuple) {
3349 			NL_SET_ERR_MSG_MOD(extack,
3350 					   "can't offload re-write of ipv4 address with action ct");
3351 			return false;
3352 		}
3353 	} else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) {
3354 		struct ipv6_hoplimit_word *hoplimit_word =
3355 			(struct ipv6_hoplimit_word *)&mask;
3356 
3357 		if (offset != offsetof(struct ipv6hdr, payload_len) ||
3358 		    hoplimit_word->payload_len ||
3359 		    hoplimit_word->nexthdr) {
3360 			*modify_ip_header = true;
3361 		}
3362 
3363 		if (ct_flow && offset >= offsetof(struct ipv6hdr, saddr))
3364 			*modify_tuple = true;
3365 
3366 		if (ct_flow && *modify_tuple) {
3367 			NL_SET_ERR_MSG_MOD(extack,
3368 					   "can't offload re-write of ipv6 address with action ct");
3369 			return false;
3370 		}
3371 	} else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_TCP ||
3372 		   htype == FLOW_ACT_MANGLE_HDR_TYPE_UDP) {
3373 		*modify_tuple = true;
3374 		if (ct_flow) {
3375 			NL_SET_ERR_MSG_MOD(extack,
3376 					   "can't offload re-write of transport header ports with action ct");
3377 			return false;
3378 		}
3379 	}
3380 
3381 	return true;
3382 }
3383 
modify_tuple_supported(bool modify_tuple,bool ct_clear,bool ct_flow,struct netlink_ext_ack * extack,struct mlx5e_priv * priv,struct mlx5_flow_spec * spec)3384 static bool modify_tuple_supported(bool modify_tuple, bool ct_clear,
3385 				   bool ct_flow, struct netlink_ext_ack *extack,
3386 				   struct mlx5e_priv *priv,
3387 				   struct mlx5_flow_spec *spec)
3388 {
3389 	if (!modify_tuple || ct_clear)
3390 		return true;
3391 
3392 	if (ct_flow) {
3393 		NL_SET_ERR_MSG_MOD(extack,
3394 				   "can't offload tuple modification with non-clear ct()");
3395 		netdev_info(priv->netdev,
3396 			    "can't offload tuple modification with non-clear ct()");
3397 		return false;
3398 	}
3399 
3400 	/* Add ct_state=-trk match so it will be offloaded for non ct flows
3401 	 * (or after clear action), as otherwise, since the tuple is changed,
3402 	 * we can't restore ct state
3403 	 */
3404 	if (mlx5_tc_ct_add_no_trk_match(spec)) {
3405 		NL_SET_ERR_MSG_MOD(extack,
3406 				   "can't offload tuple modification with ct matches and no ct(clear) action");
3407 		netdev_info(priv->netdev,
3408 			    "can't offload tuple modification with ct matches and no ct(clear) action");
3409 		return false;
3410 	}
3411 
3412 	return true;
3413 }
3414 
modify_header_match_supported(struct mlx5e_priv * priv,struct mlx5_flow_spec * spec,struct flow_action * flow_action,u32 actions,bool ct_flow,bool ct_clear,struct netlink_ext_ack * extack)3415 static bool modify_header_match_supported(struct mlx5e_priv *priv,
3416 					  struct mlx5_flow_spec *spec,
3417 					  struct flow_action *flow_action,
3418 					  u32 actions, bool ct_flow,
3419 					  bool ct_clear,
3420 					  struct netlink_ext_ack *extack)
3421 {
3422 	const struct flow_action_entry *act;
3423 	bool modify_ip_header, modify_tuple;
3424 	void *headers_c;
3425 	void *headers_v;
3426 	u16 ethertype;
3427 	u8 ip_proto;
3428 	int i;
3429 
3430 	headers_c = mlx5e_get_match_headers_criteria(actions, spec);
3431 	headers_v = mlx5e_get_match_headers_value(actions, spec);
3432 	ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
3433 
3434 	/* for non-IP we only re-write MACs, so we're okay */
3435 	if (MLX5_GET(fte_match_set_lyr_2_4, headers_c, ip_version) == 0 &&
3436 	    ethertype != ETH_P_IP && ethertype != ETH_P_IPV6)
3437 		goto out_ok;
3438 
3439 	modify_ip_header = false;
3440 	modify_tuple = false;
3441 	flow_action_for_each(i, act, flow_action) {
3442 		if (act->id != FLOW_ACTION_MANGLE &&
3443 		    act->id != FLOW_ACTION_ADD)
3444 			continue;
3445 
3446 		if (!is_action_keys_supported(act, ct_flow,
3447 					      &modify_ip_header,
3448 					      &modify_tuple, extack))
3449 			return false;
3450 	}
3451 
3452 	if (!modify_tuple_supported(modify_tuple, ct_clear, ct_flow, extack,
3453 				    priv, spec))
3454 		return false;
3455 
3456 	ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol);
3457 	if (modify_ip_header && ip_proto != IPPROTO_TCP &&
3458 	    ip_proto != IPPROTO_UDP && ip_proto != IPPROTO_ICMP) {
3459 		NL_SET_ERR_MSG_MOD(extack,
3460 				   "can't offload re-write of non TCP/UDP");
3461 		netdev_info(priv->netdev, "can't offload re-write of ip proto %d\n",
3462 			    ip_proto);
3463 		return false;
3464 	}
3465 
3466 out_ok:
3467 	return true;
3468 }
3469 
3470 static bool
actions_match_supported_fdb(struct mlx5e_priv * priv,struct mlx5e_tc_flow_parse_attr * parse_attr,struct mlx5e_tc_flow * flow,struct netlink_ext_ack * extack)3471 actions_match_supported_fdb(struct mlx5e_priv *priv,
3472 			    struct mlx5e_tc_flow_parse_attr *parse_attr,
3473 			    struct mlx5e_tc_flow *flow,
3474 			    struct netlink_ext_ack *extack)
3475 {
3476 	struct mlx5_esw_flow_attr *esw_attr = flow->attr->esw_attr;
3477 	bool ct_flow, ct_clear;
3478 
3479 	ct_clear = flow->attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR;
3480 	ct_flow = flow_flag_test(flow, CT) && !ct_clear;
3481 
3482 	if (esw_attr->split_count && ct_flow &&
3483 	    !MLX5_CAP_GEN(esw_attr->in_mdev, reg_c_preserve)) {
3484 		/* All registers used by ct are cleared when using
3485 		 * split rules.
3486 		 */
3487 		NL_SET_ERR_MSG_MOD(extack, "Can't offload mirroring with action ct");
3488 		return false;
3489 	}
3490 
3491 	if (esw_attr->split_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
3492 		NL_SET_ERR_MSG_MOD(extack,
3493 				   "current firmware doesn't support split rule for port mirroring");
3494 		netdev_warn_once(priv->netdev,
3495 				 "current firmware doesn't support split rule for port mirroring\n");
3496 		return false;
3497 	}
3498 
3499 	return true;
3500 }
3501 
3502 static bool
actions_match_supported(struct mlx5e_priv * priv,struct flow_action * flow_action,u32 actions,struct mlx5e_tc_flow_parse_attr * parse_attr,struct mlx5e_tc_flow * flow,struct netlink_ext_ack * extack)3503 actions_match_supported(struct mlx5e_priv *priv,
3504 			struct flow_action *flow_action,
3505 			u32 actions,
3506 			struct mlx5e_tc_flow_parse_attr *parse_attr,
3507 			struct mlx5e_tc_flow *flow,
3508 			struct netlink_ext_ack *extack)
3509 {
3510 	bool ct_flow, ct_clear;
3511 
3512 	ct_clear = flow->attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR;
3513 	ct_flow = flow_flag_test(flow, CT) && !ct_clear;
3514 
3515 	if (!(actions &
3516 	      (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) {
3517 		NL_SET_ERR_MSG_MOD(extack, "Rule must have at least one forward/drop action");
3518 		return false;
3519 	}
3520 
3521 	if (!(~actions &
3522 	      (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) {
3523 		NL_SET_ERR_MSG_MOD(extack, "Rule cannot support forward+drop action");
3524 		return false;
3525 	}
3526 
3527 	if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
3528 	    actions & MLX5_FLOW_CONTEXT_ACTION_DROP) {
3529 		NL_SET_ERR_MSG_MOD(extack, "Drop with modify header action is not supported");
3530 		return false;
3531 	}
3532 
3533 	if (!(~actions &
3534 	      (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) {
3535 		NL_SET_ERR_MSG_MOD(extack, "Rule cannot support forward+drop action");
3536 		return false;
3537 	}
3538 
3539 	if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
3540 	    actions & MLX5_FLOW_CONTEXT_ACTION_DROP) {
3541 		NL_SET_ERR_MSG_MOD(extack, "Drop with modify header action is not supported");
3542 		return false;
3543 	}
3544 
3545 	if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
3546 	    !modify_header_match_supported(priv, &parse_attr->spec, flow_action,
3547 					   actions, ct_flow, ct_clear, extack))
3548 		return false;
3549 
3550 	if (mlx5e_is_eswitch_flow(flow) &&
3551 	    !actions_match_supported_fdb(priv, parse_attr, flow, extack))
3552 		return false;
3553 
3554 	return true;
3555 }
3556 
same_port_devs(struct mlx5e_priv * priv,struct mlx5e_priv * peer_priv)3557 static bool same_port_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
3558 {
3559 	return priv->mdev == peer_priv->mdev;
3560 }
3561 
mlx5e_same_hw_devs(struct mlx5e_priv * priv,struct mlx5e_priv * peer_priv)3562 bool mlx5e_same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
3563 {
3564 	struct mlx5_core_dev *fmdev, *pmdev;
3565 	u64 fsystem_guid, psystem_guid;
3566 
3567 	fmdev = priv->mdev;
3568 	pmdev = peer_priv->mdev;
3569 
3570 	fsystem_guid = mlx5_query_nic_system_image_guid(fmdev);
3571 	psystem_guid = mlx5_query_nic_system_image_guid(pmdev);
3572 
3573 	return (fsystem_guid == psystem_guid);
3574 }
3575 
3576 static int
actions_prepare_mod_hdr_actions(struct mlx5e_priv * priv,struct mlx5e_tc_flow * flow,struct mlx5_flow_attr * attr,struct netlink_ext_ack * extack)3577 actions_prepare_mod_hdr_actions(struct mlx5e_priv *priv,
3578 				struct mlx5e_tc_flow *flow,
3579 				struct mlx5_flow_attr *attr,
3580 				struct netlink_ext_ack *extack)
3581 {
3582 	struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
3583 	struct pedit_headers_action *hdrs = parse_attr->hdrs;
3584 	enum mlx5_flow_namespace_type ns_type;
3585 	int err;
3586 
3587 	if (!hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits &&
3588 	    !hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits)
3589 		return 0;
3590 
3591 	ns_type = mlx5e_get_flow_namespace(flow);
3592 
3593 	err = alloc_tc_pedit_action(priv, ns_type, parse_attr, &attr->action, extack);
3594 	if (err)
3595 		return err;
3596 
3597 	if (parse_attr->mod_hdr_acts.num_actions > 0)
3598 		return 0;
3599 
3600 	/* In case all pedit actions are skipped, remove the MOD_HDR flag. */
3601 	attr->action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
3602 	mlx5e_mod_hdr_dealloc(&parse_attr->mod_hdr_acts);
3603 
3604 	if (ns_type != MLX5_FLOW_NAMESPACE_FDB)
3605 		return 0;
3606 
3607 	if (!((attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) ||
3608 	      (attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH)))
3609 		attr->esw_attr->split_count = 0;
3610 
3611 	return 0;
3612 }
3613 
3614 static struct mlx5_flow_attr*
mlx5e_clone_flow_attr_for_post_act(struct mlx5_flow_attr * attr,enum mlx5_flow_namespace_type ns_type)3615 mlx5e_clone_flow_attr_for_post_act(struct mlx5_flow_attr *attr,
3616 				   enum mlx5_flow_namespace_type ns_type)
3617 {
3618 	struct mlx5e_tc_flow_parse_attr *parse_attr;
3619 	u32 attr_sz = ns_to_attr_sz(ns_type);
3620 	struct mlx5_flow_attr *attr2;
3621 
3622 	attr2 = mlx5_alloc_flow_attr(ns_type);
3623 	parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL);
3624 	if (!attr2 || !parse_attr) {
3625 		kvfree(parse_attr);
3626 		kfree(attr2);
3627 		return NULL;
3628 	}
3629 
3630 	memcpy(attr2, attr, attr_sz);
3631 	INIT_LIST_HEAD(&attr2->list);
3632 	parse_attr->filter_dev = attr->parse_attr->filter_dev;
3633 	attr2->action = 0;
3634 	attr2->flags = 0;
3635 	attr2->parse_attr = parse_attr;
3636 	attr2->dest_chain = 0;
3637 	attr2->dest_ft = NULL;
3638 
3639 	if (ns_type == MLX5_FLOW_NAMESPACE_FDB) {
3640 		attr2->esw_attr->out_count = 0;
3641 		attr2->esw_attr->split_count = 0;
3642 	}
3643 
3644 	return attr2;
3645 }
3646 
3647 static struct mlx5_core_dev *
get_flow_counter_dev(struct mlx5e_tc_flow * flow)3648 get_flow_counter_dev(struct mlx5e_tc_flow *flow)
3649 {
3650 	return mlx5e_is_eswitch_flow(flow) ? flow->attr->esw_attr->counter_dev : flow->priv->mdev;
3651 }
3652 
3653 struct mlx5_flow_attr *
mlx5e_tc_get_encap_attr(struct mlx5e_tc_flow * flow)3654 mlx5e_tc_get_encap_attr(struct mlx5e_tc_flow *flow)
3655 {
3656 	struct mlx5_esw_flow_attr *esw_attr;
3657 	struct mlx5_flow_attr *attr;
3658 	int i;
3659 
3660 	list_for_each_entry(attr, &flow->attrs, list) {
3661 		esw_attr = attr->esw_attr;
3662 		for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
3663 			if (esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP)
3664 				return attr;
3665 		}
3666 	}
3667 
3668 	return NULL;
3669 }
3670 
3671 void
mlx5e_tc_unoffload_flow_post_acts(struct mlx5e_tc_flow * flow)3672 mlx5e_tc_unoffload_flow_post_acts(struct mlx5e_tc_flow *flow)
3673 {
3674 	struct mlx5e_post_act *post_act = get_post_action(flow->priv);
3675 	struct mlx5_flow_attr *attr;
3676 
3677 	list_for_each_entry(attr, &flow->attrs, list) {
3678 		if (list_is_last(&attr->list, &flow->attrs))
3679 			break;
3680 
3681 		mlx5e_tc_post_act_unoffload(post_act, attr->post_act_handle);
3682 	}
3683 }
3684 
3685 static void
free_flow_post_acts(struct mlx5e_tc_flow * flow)3686 free_flow_post_acts(struct mlx5e_tc_flow *flow)
3687 {
3688 	struct mlx5_core_dev *counter_dev = get_flow_counter_dev(flow);
3689 	struct mlx5e_post_act *post_act = get_post_action(flow->priv);
3690 	struct mlx5_flow_attr *attr, *tmp;
3691 	bool vf_tun;
3692 
3693 	list_for_each_entry_safe(attr, tmp, &flow->attrs, list) {
3694 		if (list_is_last(&attr->list, &flow->attrs))
3695 			break;
3696 
3697 		if (attr->post_act_handle)
3698 			mlx5e_tc_post_act_del(post_act, attr->post_act_handle);
3699 
3700 		clean_encap_dests(flow->priv, flow, attr, &vf_tun);
3701 
3702 		if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
3703 			mlx5_fc_destroy(counter_dev, attr->counter);
3704 
3705 		if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
3706 			mlx5e_mod_hdr_dealloc(&attr->parse_attr->mod_hdr_acts);
3707 			if (attr->modify_hdr)
3708 				mlx5_modify_header_dealloc(flow->priv->mdev, attr->modify_hdr);
3709 		}
3710 
3711 		list_del(&attr->list);
3712 		kvfree(attr->parse_attr);
3713 		kfree(attr);
3714 	}
3715 }
3716 
3717 int
mlx5e_tc_offload_flow_post_acts(struct mlx5e_tc_flow * flow)3718 mlx5e_tc_offload_flow_post_acts(struct mlx5e_tc_flow *flow)
3719 {
3720 	struct mlx5e_post_act *post_act = get_post_action(flow->priv);
3721 	struct mlx5_flow_attr *attr;
3722 	int err = 0;
3723 
3724 	list_for_each_entry(attr, &flow->attrs, list) {
3725 		if (list_is_last(&attr->list, &flow->attrs))
3726 			break;
3727 
3728 		err = mlx5e_tc_post_act_offload(post_act, attr->post_act_handle);
3729 		if (err)
3730 			break;
3731 	}
3732 
3733 	return err;
3734 }
3735 
3736 /* TC filter rule HW translation:
3737  *
3738  * +---------------------+
3739  * + ft prio (tc chain)  +
3740  * + original match      +
3741  * +---------------------+
3742  *           |
3743  *           | if multi table action
3744  *           |
3745  *           v
3746  * +---------------------+
3747  * + post act ft         |<----.
3748  * + match fte id        |     | split on multi table action
3749  * + do actions          |-----'
3750  * +---------------------+
3751  *           |
3752  *           |
3753  *           v
3754  * Do rest of the actions after last multi table action.
3755  */
3756 static int
alloc_flow_post_acts(struct mlx5e_tc_flow * flow,struct netlink_ext_ack * extack)3757 alloc_flow_post_acts(struct mlx5e_tc_flow *flow, struct netlink_ext_ack *extack)
3758 {
3759 	struct mlx5e_post_act *post_act = get_post_action(flow->priv);
3760 	struct mlx5_flow_attr *attr, *next_attr = NULL;
3761 	struct mlx5e_post_act_handle *handle;
3762 	bool vf_tun;
3763 	int err;
3764 
3765 	/* This is going in reverse order as needed.
3766 	 * The first entry is the last attribute.
3767 	 */
3768 	list_for_each_entry(attr, &flow->attrs, list) {
3769 		if (!next_attr) {
3770 			/* Set counter action on last post act rule. */
3771 			attr->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
3772 		} else {
3773 			err = mlx5e_tc_act_set_next_post_act(flow, attr, next_attr);
3774 			if (err)
3775 				goto out_free;
3776 		}
3777 
3778 		/* Don't add post_act rule for first attr (last in the list).
3779 		 * It's being handled by the caller.
3780 		 */
3781 		if (list_is_last(&attr->list, &flow->attrs))
3782 			break;
3783 
3784 		err = set_encap_dests(flow->priv, flow, attr, extack, &vf_tun);
3785 		if (err)
3786 			goto out_free;
3787 
3788 		err = actions_prepare_mod_hdr_actions(flow->priv, flow, attr, extack);
3789 		if (err)
3790 			goto out_free;
3791 
3792 		if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
3793 			err = mlx5e_tc_add_flow_mod_hdr(flow->priv, flow, attr);
3794 			if (err)
3795 				goto out_free;
3796 		}
3797 
3798 		if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
3799 			err = alloc_flow_attr_counter(get_flow_counter_dev(flow), attr);
3800 			if (err)
3801 				goto out_free;
3802 		}
3803 
3804 		handle = mlx5e_tc_post_act_add(post_act, attr);
3805 		if (IS_ERR(handle)) {
3806 			err = PTR_ERR(handle);
3807 			goto out_free;
3808 		}
3809 
3810 		attr->post_act_handle = handle;
3811 		next_attr = attr;
3812 	}
3813 
3814 	if (flow_flag_test(flow, SLOW))
3815 		goto out;
3816 
3817 	err = mlx5e_tc_offload_flow_post_acts(flow);
3818 	if (err)
3819 		goto out_free;
3820 
3821 out:
3822 	return 0;
3823 
3824 out_free:
3825 	free_flow_post_acts(flow);
3826 	return err;
3827 }
3828 
3829 static int
parse_tc_actions(struct mlx5e_tc_act_parse_state * parse_state,struct flow_action * flow_action)3830 parse_tc_actions(struct mlx5e_tc_act_parse_state *parse_state,
3831 		 struct flow_action *flow_action)
3832 {
3833 	struct netlink_ext_ack *extack = parse_state->extack;
3834 	struct mlx5e_tc_flow_action flow_action_reorder;
3835 	struct mlx5e_tc_flow *flow = parse_state->flow;
3836 	struct mlx5_flow_attr *attr = flow->attr;
3837 	enum mlx5_flow_namespace_type ns_type;
3838 	struct mlx5e_priv *priv = flow->priv;
3839 	struct flow_action_entry *act, **_act;
3840 	struct mlx5e_tc_act *tc_act;
3841 	int err, i;
3842 
3843 	flow_action_reorder.num_entries = flow_action->num_entries;
3844 	flow_action_reorder.entries = kcalloc(flow_action->num_entries,
3845 					      sizeof(flow_action), GFP_KERNEL);
3846 	if (!flow_action_reorder.entries)
3847 		return -ENOMEM;
3848 
3849 	mlx5e_tc_act_reorder_flow_actions(flow_action, &flow_action_reorder);
3850 
3851 	ns_type = mlx5e_get_flow_namespace(flow);
3852 	list_add(&attr->list, &flow->attrs);
3853 
3854 	flow_action_for_each(i, _act, &flow_action_reorder) {
3855 		act = *_act;
3856 		tc_act = mlx5e_tc_act_get(act->id, ns_type);
3857 		if (!tc_act) {
3858 			NL_SET_ERR_MSG_MOD(extack, "Not implemented offload action");
3859 			err = -EOPNOTSUPP;
3860 			goto out_free;
3861 		}
3862 
3863 		if (!tc_act->can_offload(parse_state, act, i, attr)) {
3864 			err = -EOPNOTSUPP;
3865 			goto out_free;
3866 		}
3867 
3868 		err = tc_act->parse_action(parse_state, act, priv, attr);
3869 		if (err)
3870 			goto out_free;
3871 
3872 		parse_state->actions |= attr->action;
3873 
3874 		/* Split attr for multi table act if not the last act. */
3875 		if (tc_act->is_multi_table_act &&
3876 		    tc_act->is_multi_table_act(priv, act, attr) &&
3877 		    i < flow_action_reorder.num_entries - 1) {
3878 			err = mlx5e_tc_act_post_parse(parse_state, flow_action, attr, ns_type);
3879 			if (err)
3880 				goto out_free;
3881 
3882 			attr = mlx5e_clone_flow_attr_for_post_act(flow->attr, ns_type);
3883 			if (!attr) {
3884 				err = -ENOMEM;
3885 				goto out_free;
3886 			}
3887 
3888 			list_add(&attr->list, &flow->attrs);
3889 		}
3890 	}
3891 
3892 	kfree(flow_action_reorder.entries);
3893 
3894 	err = mlx5e_tc_act_post_parse(parse_state, flow_action, attr, ns_type);
3895 	if (err)
3896 		goto out_free_post_acts;
3897 
3898 	err = alloc_flow_post_acts(flow, extack);
3899 	if (err)
3900 		goto out_free_post_acts;
3901 
3902 	return 0;
3903 
3904 out_free:
3905 	kfree(flow_action_reorder.entries);
3906 out_free_post_acts:
3907 	free_flow_post_acts(flow);
3908 
3909 	return err;
3910 }
3911 
3912 static int
flow_action_supported(struct flow_action * flow_action,struct netlink_ext_ack * extack)3913 flow_action_supported(struct flow_action *flow_action,
3914 		      struct netlink_ext_ack *extack)
3915 {
3916 	if (!flow_action_has_entries(flow_action)) {
3917 		NL_SET_ERR_MSG_MOD(extack, "Flow action doesn't have any entries");
3918 		return -EINVAL;
3919 	}
3920 
3921 	if (!flow_action_hw_stats_check(flow_action, extack,
3922 					FLOW_ACTION_HW_STATS_DELAYED_BIT)) {
3923 		NL_SET_ERR_MSG_MOD(extack, "Flow action HW stats type is not supported");
3924 		return -EOPNOTSUPP;
3925 	}
3926 
3927 	return 0;
3928 }
3929 
3930 static int
parse_tc_nic_actions(struct mlx5e_priv * priv,struct flow_action * flow_action,struct mlx5e_tc_flow * flow,struct netlink_ext_ack * extack)3931 parse_tc_nic_actions(struct mlx5e_priv *priv,
3932 		     struct flow_action *flow_action,
3933 		     struct mlx5e_tc_flow *flow,
3934 		     struct netlink_ext_ack *extack)
3935 {
3936 	struct mlx5e_tc_act_parse_state *parse_state;
3937 	struct mlx5e_tc_flow_parse_attr *parse_attr;
3938 	struct mlx5_flow_attr *attr = flow->attr;
3939 	int err;
3940 
3941 	err = flow_action_supported(flow_action, extack);
3942 	if (err)
3943 		return err;
3944 
3945 	attr->nic_attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
3946 	parse_attr = attr->parse_attr;
3947 	parse_state = &parse_attr->parse_state;
3948 	mlx5e_tc_act_init_parse_state(parse_state, flow, flow_action, extack);
3949 	parse_state->ct_priv = get_ct_priv(priv);
3950 
3951 	err = parse_tc_actions(parse_state, flow_action);
3952 	if (err)
3953 		return err;
3954 
3955 	err = actions_prepare_mod_hdr_actions(priv, flow, attr, extack);
3956 	if (err)
3957 		return err;
3958 
3959 	if (!actions_match_supported(priv, flow_action, parse_state->actions,
3960 				     parse_attr, flow, extack))
3961 		return -EOPNOTSUPP;
3962 
3963 	return 0;
3964 }
3965 
is_merged_eswitch_vfs(struct mlx5e_priv * priv,struct net_device * peer_netdev)3966 static bool is_merged_eswitch_vfs(struct mlx5e_priv *priv,
3967 				  struct net_device *peer_netdev)
3968 {
3969 	struct mlx5e_priv *peer_priv;
3970 
3971 	peer_priv = netdev_priv(peer_netdev);
3972 
3973 	return (MLX5_CAP_ESW(priv->mdev, merged_eswitch) &&
3974 		mlx5e_eswitch_vf_rep(priv->netdev) &&
3975 		mlx5e_eswitch_vf_rep(peer_netdev) &&
3976 		mlx5e_same_hw_devs(priv, peer_priv));
3977 }
3978 
same_hw_reps(struct mlx5e_priv * priv,struct net_device * peer_netdev)3979 static bool same_hw_reps(struct mlx5e_priv *priv,
3980 			 struct net_device *peer_netdev)
3981 {
3982 	struct mlx5e_priv *peer_priv;
3983 
3984 	peer_priv = netdev_priv(peer_netdev);
3985 
3986 	return mlx5e_eswitch_rep(priv->netdev) &&
3987 	       mlx5e_eswitch_rep(peer_netdev) &&
3988 	       mlx5e_same_hw_devs(priv, peer_priv);
3989 }
3990 
is_lag_dev(struct mlx5e_priv * priv,struct net_device * peer_netdev)3991 static bool is_lag_dev(struct mlx5e_priv *priv,
3992 		       struct net_device *peer_netdev)
3993 {
3994 	return ((mlx5_lag_is_sriov(priv->mdev) ||
3995 		 mlx5_lag_is_multipath(priv->mdev)) &&
3996 		 same_hw_reps(priv, peer_netdev));
3997 }
3998 
is_multiport_eligible(struct mlx5e_priv * priv,struct net_device * out_dev)3999 static bool is_multiport_eligible(struct mlx5e_priv *priv, struct net_device *out_dev)
4000 {
4001 	if (same_hw_reps(priv, out_dev) &&
4002 	    MLX5_CAP_PORT_SELECTION(priv->mdev, port_select_flow_table) &&
4003 	    MLX5_CAP_GEN(priv->mdev, create_lag_when_not_master_up))
4004 		return true;
4005 
4006 	return false;
4007 }
4008 
mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv * priv,struct net_device * out_dev)4009 bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv,
4010 				    struct net_device *out_dev)
4011 {
4012 	if (is_merged_eswitch_vfs(priv, out_dev))
4013 		return true;
4014 
4015 	if (is_multiport_eligible(priv, out_dev))
4016 		return true;
4017 
4018 	if (is_lag_dev(priv, out_dev))
4019 		return true;
4020 
4021 	return mlx5e_eswitch_rep(out_dev) &&
4022 	       same_port_devs(priv, netdev_priv(out_dev));
4023 }
4024 
mlx5e_set_fwd_to_int_port_actions(struct mlx5e_priv * priv,struct mlx5_flow_attr * attr,int ifindex,enum mlx5e_tc_int_port_type type,u32 * action,int out_index)4025 int mlx5e_set_fwd_to_int_port_actions(struct mlx5e_priv *priv,
4026 				      struct mlx5_flow_attr *attr,
4027 				      int ifindex,
4028 				      enum mlx5e_tc_int_port_type type,
4029 				      u32 *action,
4030 				      int out_index)
4031 {
4032 	struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
4033 	struct mlx5e_tc_int_port_priv *int_port_priv;
4034 	struct mlx5e_tc_flow_parse_attr *parse_attr;
4035 	struct mlx5e_tc_int_port *dest_int_port;
4036 	int err;
4037 
4038 	parse_attr = attr->parse_attr;
4039 	int_port_priv = mlx5e_get_int_port_priv(priv);
4040 
4041 	dest_int_port = mlx5e_tc_int_port_get(int_port_priv, ifindex, type);
4042 	if (IS_ERR(dest_int_port))
4043 		return PTR_ERR(dest_int_port);
4044 
4045 	err = mlx5e_tc_match_to_reg_set(priv->mdev, &parse_attr->mod_hdr_acts,
4046 					MLX5_FLOW_NAMESPACE_FDB, VPORT_TO_REG,
4047 					mlx5e_tc_int_port_get_metadata(dest_int_port));
4048 	if (err) {
4049 		mlx5e_tc_int_port_put(int_port_priv, dest_int_port);
4050 		return err;
4051 	}
4052 
4053 	*action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
4054 
4055 	esw_attr->dest_int_port = dest_int_port;
4056 	esw_attr->dests[out_index].flags |= MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE;
4057 
4058 	/* Forward to root fdb for matching against the new source vport */
4059 	attr->dest_chain = 0;
4060 
4061 	return 0;
4062 }
4063 
4064 static int
parse_tc_fdb_actions(struct mlx5e_priv * priv,struct flow_action * flow_action,struct mlx5e_tc_flow * flow,struct netlink_ext_ack * extack)4065 parse_tc_fdb_actions(struct mlx5e_priv *priv,
4066 		     struct flow_action *flow_action,
4067 		     struct mlx5e_tc_flow *flow,
4068 		     struct netlink_ext_ack *extack)
4069 {
4070 	struct mlx5e_tc_act_parse_state *parse_state;
4071 	struct mlx5e_tc_flow_parse_attr *parse_attr;
4072 	struct mlx5_flow_attr *attr = flow->attr;
4073 	struct mlx5_esw_flow_attr *esw_attr;
4074 	struct net_device *filter_dev;
4075 	int err;
4076 
4077 	err = flow_action_supported(flow_action, extack);
4078 	if (err)
4079 		return err;
4080 
4081 	esw_attr = attr->esw_attr;
4082 	parse_attr = attr->parse_attr;
4083 	filter_dev = parse_attr->filter_dev;
4084 	parse_state = &parse_attr->parse_state;
4085 	mlx5e_tc_act_init_parse_state(parse_state, flow, flow_action, extack);
4086 	parse_state->ct_priv = get_ct_priv(priv);
4087 
4088 	err = parse_tc_actions(parse_state, flow_action);
4089 	if (err)
4090 		return err;
4091 
4092 	/* Forward to/from internal port can only have 1 dest */
4093 	if ((netif_is_ovs_master(filter_dev) || esw_attr->dest_int_port) &&
4094 	    esw_attr->out_count > 1) {
4095 		NL_SET_ERR_MSG_MOD(extack,
4096 				   "Rules with internal port can have only one destination");
4097 		return -EOPNOTSUPP;
4098 	}
4099 
4100 	/* Forward from tunnel/internal port to internal port is not supported */
4101 	if ((mlx5e_get_tc_tun(filter_dev) || netif_is_ovs_master(filter_dev)) &&
4102 	    esw_attr->dest_int_port) {
4103 		NL_SET_ERR_MSG_MOD(extack,
4104 				   "Forwarding from tunnel/internal port to internal port is not supported");
4105 		return -EOPNOTSUPP;
4106 	}
4107 
4108 	err = actions_prepare_mod_hdr_actions(priv, flow, attr, extack);
4109 	if (err)
4110 		return err;
4111 
4112 	if (!actions_match_supported(priv, flow_action, parse_state->actions,
4113 				     parse_attr, flow, extack))
4114 		return -EOPNOTSUPP;
4115 
4116 	return 0;
4117 }
4118 
get_flags(int flags,unsigned long * flow_flags)4119 static void get_flags(int flags, unsigned long *flow_flags)
4120 {
4121 	unsigned long __flow_flags = 0;
4122 
4123 	if (flags & MLX5_TC_FLAG(INGRESS))
4124 		__flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_INGRESS);
4125 	if (flags & MLX5_TC_FLAG(EGRESS))
4126 		__flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_EGRESS);
4127 
4128 	if (flags & MLX5_TC_FLAG(ESW_OFFLOAD))
4129 		__flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH);
4130 	if (flags & MLX5_TC_FLAG(NIC_OFFLOAD))
4131 		__flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC);
4132 	if (flags & MLX5_TC_FLAG(FT_OFFLOAD))
4133 		__flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_FT);
4134 
4135 	*flow_flags = __flow_flags;
4136 }
4137 
4138 static const struct rhashtable_params tc_ht_params = {
4139 	.head_offset = offsetof(struct mlx5e_tc_flow, node),
4140 	.key_offset = offsetof(struct mlx5e_tc_flow, cookie),
4141 	.key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
4142 	.automatic_shrinking = true,
4143 };
4144 
get_tc_ht(struct mlx5e_priv * priv,unsigned long flags)4145 static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv,
4146 				    unsigned long flags)
4147 {
4148 	struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
4149 	struct mlx5e_rep_priv *rpriv;
4150 
4151 	if (flags & MLX5_TC_FLAG(ESW_OFFLOAD)) {
4152 		rpriv = priv->ppriv;
4153 		return &rpriv->tc_ht;
4154 	} else /* NIC offload */
4155 		return &tc->ht;
4156 }
4157 
is_peer_flow_needed(struct mlx5e_tc_flow * flow)4158 static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow)
4159 {
4160 	struct mlx5_esw_flow_attr *esw_attr = flow->attr->esw_attr;
4161 	struct mlx5_flow_attr *attr = flow->attr;
4162 	bool is_rep_ingress = esw_attr->in_rep->vport != MLX5_VPORT_UPLINK &&
4163 		flow_flag_test(flow, INGRESS);
4164 	bool act_is_encap = !!(attr->action &
4165 			       MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT);
4166 	bool esw_paired = mlx5_devcom_is_paired(esw_attr->in_mdev->priv.devcom,
4167 						MLX5_DEVCOM_ESW_OFFLOADS);
4168 
4169 	if (!esw_paired)
4170 		return false;
4171 
4172 	if ((mlx5_lag_is_sriov(esw_attr->in_mdev) ||
4173 	     mlx5_lag_is_multipath(esw_attr->in_mdev)) &&
4174 	    (is_rep_ingress || act_is_encap))
4175 		return true;
4176 
4177 	return false;
4178 }
4179 
4180 struct mlx5_flow_attr *
mlx5_alloc_flow_attr(enum mlx5_flow_namespace_type type)4181 mlx5_alloc_flow_attr(enum mlx5_flow_namespace_type type)
4182 {
4183 	u32 ex_attr_size = (type == MLX5_FLOW_NAMESPACE_FDB)  ?
4184 				sizeof(struct mlx5_esw_flow_attr) :
4185 				sizeof(struct mlx5_nic_flow_attr);
4186 	struct mlx5_flow_attr *attr;
4187 
4188 	attr = kzalloc(sizeof(*attr) + ex_attr_size, GFP_KERNEL);
4189 	if (!attr)
4190 		return attr;
4191 
4192 	INIT_LIST_HEAD(&attr->list);
4193 	return attr;
4194 }
4195 
4196 static int
mlx5e_alloc_flow(struct mlx5e_priv * priv,int attr_size,struct flow_cls_offload * f,unsigned long flow_flags,struct mlx5e_tc_flow_parse_attr ** __parse_attr,struct mlx5e_tc_flow ** __flow)4197 mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
4198 		 struct flow_cls_offload *f, unsigned long flow_flags,
4199 		 struct mlx5e_tc_flow_parse_attr **__parse_attr,
4200 		 struct mlx5e_tc_flow **__flow)
4201 {
4202 	struct mlx5e_tc_flow_parse_attr *parse_attr;
4203 	struct mlx5_flow_attr *attr;
4204 	struct mlx5e_tc_flow *flow;
4205 	int err = -ENOMEM;
4206 	int out_index;
4207 
4208 	flow = kzalloc(sizeof(*flow), GFP_KERNEL);
4209 	parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL);
4210 	if (!parse_attr || !flow)
4211 		goto err_free;
4212 
4213 	flow->flags = flow_flags;
4214 	flow->cookie = f->cookie;
4215 	flow->priv = priv;
4216 
4217 	attr = mlx5_alloc_flow_attr(mlx5e_get_flow_namespace(flow));
4218 	if (!attr)
4219 		goto err_free;
4220 
4221 	flow->attr = attr;
4222 
4223 	for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
4224 		INIT_LIST_HEAD(&flow->encaps[out_index].list);
4225 	INIT_LIST_HEAD(&flow->hairpin);
4226 	INIT_LIST_HEAD(&flow->l3_to_l2_reformat);
4227 	INIT_LIST_HEAD(&flow->attrs);
4228 	refcount_set(&flow->refcnt, 1);
4229 	init_completion(&flow->init_done);
4230 	init_completion(&flow->del_hw_done);
4231 
4232 	*__flow = flow;
4233 	*__parse_attr = parse_attr;
4234 
4235 	return 0;
4236 
4237 err_free:
4238 	kfree(flow);
4239 	kvfree(parse_attr);
4240 	return err;
4241 }
4242 
4243 static void
mlx5e_flow_attr_init(struct mlx5_flow_attr * attr,struct mlx5e_tc_flow_parse_attr * parse_attr,struct flow_cls_offload * f)4244 mlx5e_flow_attr_init(struct mlx5_flow_attr *attr,
4245 		     struct mlx5e_tc_flow_parse_attr *parse_attr,
4246 		     struct flow_cls_offload *f)
4247 {
4248 	attr->parse_attr = parse_attr;
4249 	attr->chain = f->common.chain_index;
4250 	attr->prio = f->common.prio;
4251 }
4252 
4253 static void
mlx5e_flow_esw_attr_init(struct mlx5_flow_attr * attr,struct mlx5e_priv * priv,struct mlx5e_tc_flow_parse_attr * parse_attr,struct flow_cls_offload * f,struct mlx5_eswitch_rep * in_rep,struct mlx5_core_dev * in_mdev)4254 mlx5e_flow_esw_attr_init(struct mlx5_flow_attr *attr,
4255 			 struct mlx5e_priv *priv,
4256 			 struct mlx5e_tc_flow_parse_attr *parse_attr,
4257 			 struct flow_cls_offload *f,
4258 			 struct mlx5_eswitch_rep *in_rep,
4259 			 struct mlx5_core_dev *in_mdev)
4260 {
4261 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4262 	struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
4263 
4264 	mlx5e_flow_attr_init(attr, parse_attr, f);
4265 
4266 	esw_attr->in_rep = in_rep;
4267 	esw_attr->in_mdev = in_mdev;
4268 
4269 	if (MLX5_CAP_ESW(esw->dev, counter_eswitch_affinity) ==
4270 	    MLX5_COUNTER_SOURCE_ESWITCH)
4271 		esw_attr->counter_dev = in_mdev;
4272 	else
4273 		esw_attr->counter_dev = priv->mdev;
4274 }
4275 
4276 static struct mlx5e_tc_flow *
__mlx5e_add_fdb_flow(struct mlx5e_priv * priv,struct flow_cls_offload * f,unsigned long flow_flags,struct net_device * filter_dev,struct mlx5_eswitch_rep * in_rep,struct mlx5_core_dev * in_mdev)4277 __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
4278 		     struct flow_cls_offload *f,
4279 		     unsigned long flow_flags,
4280 		     struct net_device *filter_dev,
4281 		     struct mlx5_eswitch_rep *in_rep,
4282 		     struct mlx5_core_dev *in_mdev)
4283 {
4284 	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
4285 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4286 	struct netlink_ext_ack *extack = f->common.extack;
4287 	struct mlx5e_tc_flow_parse_attr *parse_attr;
4288 	struct mlx5e_tc_flow *flow;
4289 	int attr_size, err;
4290 
4291 	flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH);
4292 	attr_size  = sizeof(struct mlx5_esw_flow_attr);
4293 	err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
4294 			       &parse_attr, &flow);
4295 	if (err)
4296 		goto out;
4297 
4298 	parse_attr->filter_dev = filter_dev;
4299 	mlx5e_flow_esw_attr_init(flow->attr,
4300 				 priv, parse_attr,
4301 				 f, in_rep, in_mdev);
4302 
4303 	err = parse_cls_flower(flow->priv, flow, &parse_attr->spec,
4304 			       f, filter_dev);
4305 	if (err)
4306 		goto err_free;
4307 
4308 	/* actions validation depends on parsing the ct matches first */
4309 	err = mlx5_tc_ct_match_add(get_ct_priv(priv), &parse_attr->spec, f,
4310 				   &flow->attr->ct_attr, extack);
4311 	if (err)
4312 		goto err_free;
4313 
4314 	/* always set IP version for indirect table handling */
4315 	flow->attr->ip_version = mlx5e_tc_get_ip_version(&parse_attr->spec, true);
4316 
4317 	err = parse_tc_fdb_actions(priv, &rule->action, flow, extack);
4318 	if (err)
4319 		goto err_free;
4320 
4321 	if (flow->attr->lag.count) {
4322 		err = mlx5_lag_add_mpesw_rule(esw->dev);
4323 		if (err)
4324 			goto err_free;
4325 	}
4326 
4327 	err = mlx5e_tc_add_fdb_flow(priv, flow, extack);
4328 	complete_all(&flow->init_done);
4329 	if (err) {
4330 		if (!(err == -ENETUNREACH && mlx5_lag_is_multipath(in_mdev)))
4331 			goto err_lag;
4332 
4333 		add_unready_flow(flow);
4334 	}
4335 
4336 	return flow;
4337 
4338 err_lag:
4339 	if (flow->attr->lag.count)
4340 		mlx5_lag_del_mpesw_rule(esw->dev);
4341 err_free:
4342 	mlx5e_flow_put(priv, flow);
4343 out:
4344 	return ERR_PTR(err);
4345 }
4346 
mlx5e_tc_add_fdb_peer_flow(struct flow_cls_offload * f,struct mlx5e_tc_flow * flow,unsigned long flow_flags)4347 static int mlx5e_tc_add_fdb_peer_flow(struct flow_cls_offload *f,
4348 				      struct mlx5e_tc_flow *flow,
4349 				      unsigned long flow_flags)
4350 {
4351 	struct mlx5e_priv *priv = flow->priv, *peer_priv;
4352 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch, *peer_esw;
4353 	struct mlx5_esw_flow_attr *attr = flow->attr->esw_attr;
4354 	struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
4355 	struct mlx5e_tc_flow_parse_attr *parse_attr;
4356 	struct mlx5e_rep_priv *peer_urpriv;
4357 	struct mlx5e_tc_flow *peer_flow;
4358 	struct mlx5_core_dev *in_mdev;
4359 	int err = 0;
4360 
4361 	peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
4362 	if (!peer_esw)
4363 		return -ENODEV;
4364 
4365 	peer_urpriv = mlx5_eswitch_get_uplink_priv(peer_esw, REP_ETH);
4366 	peer_priv = netdev_priv(peer_urpriv->netdev);
4367 
4368 	/* in_mdev is assigned of which the packet originated from.
4369 	 * So packets redirected to uplink use the same mdev of the
4370 	 * original flow and packets redirected from uplink use the
4371 	 * peer mdev.
4372 	 */
4373 	if (attr->in_rep->vport == MLX5_VPORT_UPLINK)
4374 		in_mdev = peer_priv->mdev;
4375 	else
4376 		in_mdev = priv->mdev;
4377 
4378 	parse_attr = flow->attr->parse_attr;
4379 	peer_flow = __mlx5e_add_fdb_flow(peer_priv, f, flow_flags,
4380 					 parse_attr->filter_dev,
4381 					 attr->in_rep, in_mdev);
4382 	if (IS_ERR(peer_flow)) {
4383 		err = PTR_ERR(peer_flow);
4384 		goto out;
4385 	}
4386 
4387 	flow->peer_flow = peer_flow;
4388 	flow_flag_set(flow, DUP);
4389 	mutex_lock(&esw->offloads.peer_mutex);
4390 	list_add_tail(&flow->peer, &esw->offloads.peer_flows);
4391 	mutex_unlock(&esw->offloads.peer_mutex);
4392 
4393 out:
4394 	mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
4395 	return err;
4396 }
4397 
4398 static int
mlx5e_add_fdb_flow(struct mlx5e_priv * priv,struct flow_cls_offload * f,unsigned long flow_flags,struct net_device * filter_dev,struct mlx5e_tc_flow ** __flow)4399 mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
4400 		   struct flow_cls_offload *f,
4401 		   unsigned long flow_flags,
4402 		   struct net_device *filter_dev,
4403 		   struct mlx5e_tc_flow **__flow)
4404 {
4405 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
4406 	struct mlx5_eswitch_rep *in_rep = rpriv->rep;
4407 	struct mlx5_core_dev *in_mdev = priv->mdev;
4408 	struct mlx5e_tc_flow *flow;
4409 	int err;
4410 
4411 	flow = __mlx5e_add_fdb_flow(priv, f, flow_flags, filter_dev, in_rep,
4412 				    in_mdev);
4413 	if (IS_ERR(flow))
4414 		return PTR_ERR(flow);
4415 
4416 	if (is_peer_flow_needed(flow)) {
4417 		err = mlx5e_tc_add_fdb_peer_flow(f, flow, flow_flags);
4418 		if (err) {
4419 			mlx5e_tc_del_fdb_flow(priv, flow);
4420 			goto out;
4421 		}
4422 	}
4423 
4424 	*__flow = flow;
4425 
4426 	return 0;
4427 
4428 out:
4429 	return err;
4430 }
4431 
4432 static int
mlx5e_add_nic_flow(struct mlx5e_priv * priv,struct flow_cls_offload * f,unsigned long flow_flags,struct net_device * filter_dev,struct mlx5e_tc_flow ** __flow)4433 mlx5e_add_nic_flow(struct mlx5e_priv *priv,
4434 		   struct flow_cls_offload *f,
4435 		   unsigned long flow_flags,
4436 		   struct net_device *filter_dev,
4437 		   struct mlx5e_tc_flow **__flow)
4438 {
4439 	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
4440 	struct netlink_ext_ack *extack = f->common.extack;
4441 	struct mlx5e_tc_flow_parse_attr *parse_attr;
4442 	struct mlx5e_tc_flow *flow;
4443 	int attr_size, err;
4444 
4445 	if (!MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level)) {
4446 		if (!tc_cls_can_offload_and_chain0(priv->netdev, &f->common))
4447 			return -EOPNOTSUPP;
4448 	} else if (!tc_can_offload_extack(priv->netdev, f->common.extack)) {
4449 		return -EOPNOTSUPP;
4450 	}
4451 
4452 	flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC);
4453 	attr_size  = sizeof(struct mlx5_nic_flow_attr);
4454 	err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
4455 			       &parse_attr, &flow);
4456 	if (err)
4457 		goto out;
4458 
4459 	parse_attr->filter_dev = filter_dev;
4460 	mlx5e_flow_attr_init(flow->attr, parse_attr, f);
4461 
4462 	err = parse_cls_flower(flow->priv, flow, &parse_attr->spec,
4463 			       f, filter_dev);
4464 	if (err)
4465 		goto err_free;
4466 
4467 	err = mlx5_tc_ct_match_add(get_ct_priv(priv), &parse_attr->spec, f,
4468 				   &flow->attr->ct_attr, extack);
4469 	if (err)
4470 		goto err_free;
4471 
4472 	err = parse_tc_nic_actions(priv, &rule->action, flow, extack);
4473 	if (err)
4474 		goto err_free;
4475 
4476 	err = mlx5e_tc_add_nic_flow(priv, flow, extack);
4477 	if (err)
4478 		goto err_free;
4479 
4480 	flow_flag_set(flow, OFFLOADED);
4481 	*__flow = flow;
4482 
4483 	return 0;
4484 
4485 err_free:
4486 	flow_flag_set(flow, FAILED);
4487 	mlx5e_mod_hdr_dealloc(&parse_attr->mod_hdr_acts);
4488 	mlx5e_flow_put(priv, flow);
4489 out:
4490 	return err;
4491 }
4492 
4493 static int
mlx5e_tc_add_flow(struct mlx5e_priv * priv,struct flow_cls_offload * f,unsigned long flags,struct net_device * filter_dev,struct mlx5e_tc_flow ** flow)4494 mlx5e_tc_add_flow(struct mlx5e_priv *priv,
4495 		  struct flow_cls_offload *f,
4496 		  unsigned long flags,
4497 		  struct net_device *filter_dev,
4498 		  struct mlx5e_tc_flow **flow)
4499 {
4500 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4501 	unsigned long flow_flags;
4502 	int err;
4503 
4504 	get_flags(flags, &flow_flags);
4505 
4506 	if (!tc_can_offload_extack(priv->netdev, f->common.extack))
4507 		return -EOPNOTSUPP;
4508 
4509 	if (esw && esw->mode == MLX5_ESWITCH_OFFLOADS)
4510 		err = mlx5e_add_fdb_flow(priv, f, flow_flags,
4511 					 filter_dev, flow);
4512 	else
4513 		err = mlx5e_add_nic_flow(priv, f, flow_flags,
4514 					 filter_dev, flow);
4515 
4516 	return err;
4517 }
4518 
is_flow_rule_duplicate_allowed(struct net_device * dev,struct mlx5e_rep_priv * rpriv)4519 static bool is_flow_rule_duplicate_allowed(struct net_device *dev,
4520 					   struct mlx5e_rep_priv *rpriv)
4521 {
4522 	/* Offloaded flow rule is allowed to duplicate on non-uplink representor
4523 	 * sharing tc block with other slaves of a lag device. Rpriv can be NULL if this
4524 	 * function is called from NIC mode.
4525 	 */
4526 	return netif_is_lag_port(dev) && rpriv && rpriv->rep->vport != MLX5_VPORT_UPLINK;
4527 }
4528 
mlx5e_configure_flower(struct net_device * dev,struct mlx5e_priv * priv,struct flow_cls_offload * f,unsigned long flags)4529 int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
4530 			   struct flow_cls_offload *f, unsigned long flags)
4531 {
4532 	struct netlink_ext_ack *extack = f->common.extack;
4533 	struct rhashtable *tc_ht = get_tc_ht(priv, flags);
4534 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
4535 	struct mlx5e_tc_flow *flow;
4536 	int err = 0;
4537 
4538 	if (!mlx5_esw_hold(priv->mdev))
4539 		return -EBUSY;
4540 
4541 	mlx5_esw_get(priv->mdev);
4542 
4543 	rcu_read_lock();
4544 	flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params);
4545 	if (flow) {
4546 		/* Same flow rule offloaded to non-uplink representor sharing tc block,
4547 		 * just return 0.
4548 		 */
4549 		if (is_flow_rule_duplicate_allowed(dev, rpriv) && flow->orig_dev != dev)
4550 			goto rcu_unlock;
4551 
4552 		NL_SET_ERR_MSG_MOD(extack,
4553 				   "flow cookie already exists, ignoring");
4554 		netdev_warn_once(priv->netdev,
4555 				 "flow cookie %lx already exists, ignoring\n",
4556 				 f->cookie);
4557 		err = -EEXIST;
4558 		goto rcu_unlock;
4559 	}
4560 rcu_unlock:
4561 	rcu_read_unlock();
4562 	if (flow)
4563 		goto out;
4564 
4565 	trace_mlx5e_configure_flower(f);
4566 	err = mlx5e_tc_add_flow(priv, f, flags, dev, &flow);
4567 	if (err)
4568 		goto out;
4569 
4570 	/* Flow rule offloaded to non-uplink representor sharing tc block,
4571 	 * set the flow's owner dev.
4572 	 */
4573 	if (is_flow_rule_duplicate_allowed(dev, rpriv))
4574 		flow->orig_dev = dev;
4575 
4576 	err = rhashtable_lookup_insert_fast(tc_ht, &flow->node, tc_ht_params);
4577 	if (err)
4578 		goto err_free;
4579 
4580 	mlx5_esw_release(priv->mdev);
4581 	return 0;
4582 
4583 err_free:
4584 	mlx5e_flow_put(priv, flow);
4585 out:
4586 	mlx5_esw_put(priv->mdev);
4587 	mlx5_esw_release(priv->mdev);
4588 	return err;
4589 }
4590 
same_flow_direction(struct mlx5e_tc_flow * flow,int flags)4591 static bool same_flow_direction(struct mlx5e_tc_flow *flow, int flags)
4592 {
4593 	bool dir_ingress = !!(flags & MLX5_TC_FLAG(INGRESS));
4594 	bool dir_egress = !!(flags & MLX5_TC_FLAG(EGRESS));
4595 
4596 	return flow_flag_test(flow, INGRESS) == dir_ingress &&
4597 		flow_flag_test(flow, EGRESS) == dir_egress;
4598 }
4599 
mlx5e_delete_flower(struct net_device * dev,struct mlx5e_priv * priv,struct flow_cls_offload * f,unsigned long flags)4600 int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv,
4601 			struct flow_cls_offload *f, unsigned long flags)
4602 {
4603 	struct rhashtable *tc_ht = get_tc_ht(priv, flags);
4604 	struct mlx5e_tc_flow *flow;
4605 	int err;
4606 
4607 	rcu_read_lock();
4608 	flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params);
4609 	if (!flow || !same_flow_direction(flow, flags)) {
4610 		err = -EINVAL;
4611 		goto errout;
4612 	}
4613 
4614 	/* Only delete the flow if it doesn't have MLX5E_TC_FLOW_DELETED flag
4615 	 * set.
4616 	 */
4617 	if (flow_flag_test_and_set(flow, DELETED)) {
4618 		err = -EINVAL;
4619 		goto errout;
4620 	}
4621 	rhashtable_remove_fast(tc_ht, &flow->node, tc_ht_params);
4622 	rcu_read_unlock();
4623 
4624 	trace_mlx5e_delete_flower(f);
4625 	mlx5e_flow_put(priv, flow);
4626 
4627 	mlx5_esw_put(priv->mdev);
4628 	return 0;
4629 
4630 errout:
4631 	rcu_read_unlock();
4632 	return err;
4633 }
4634 
mlx5e_stats_flower(struct net_device * dev,struct mlx5e_priv * priv,struct flow_cls_offload * f,unsigned long flags)4635 int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
4636 		       struct flow_cls_offload *f, unsigned long flags)
4637 {
4638 	struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
4639 	struct rhashtable *tc_ht = get_tc_ht(priv, flags);
4640 	struct mlx5_eswitch *peer_esw;
4641 	struct mlx5e_tc_flow *flow;
4642 	struct mlx5_fc *counter;
4643 	u64 lastuse = 0;
4644 	u64 packets = 0;
4645 	u64 bytes = 0;
4646 	int err = 0;
4647 
4648 	rcu_read_lock();
4649 	flow = mlx5e_flow_get(rhashtable_lookup(tc_ht, &f->cookie,
4650 						tc_ht_params));
4651 	rcu_read_unlock();
4652 	if (IS_ERR(flow))
4653 		return PTR_ERR(flow);
4654 
4655 	if (!same_flow_direction(flow, flags)) {
4656 		err = -EINVAL;
4657 		goto errout;
4658 	}
4659 
4660 	if (mlx5e_is_offloaded_flow(flow) || flow_flag_test(flow, CT)) {
4661 		counter = mlx5e_tc_get_counter(flow);
4662 		if (!counter)
4663 			goto errout;
4664 
4665 		mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
4666 	}
4667 
4668 	/* Under multipath it's possible for one rule to be currently
4669 	 * un-offloaded while the other rule is offloaded.
4670 	 */
4671 	peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
4672 	if (!peer_esw)
4673 		goto out;
4674 
4675 	if (flow_flag_test(flow, DUP) &&
4676 	    flow_flag_test(flow->peer_flow, OFFLOADED)) {
4677 		u64 bytes2;
4678 		u64 packets2;
4679 		u64 lastuse2;
4680 
4681 		counter = mlx5e_tc_get_counter(flow->peer_flow);
4682 		if (!counter)
4683 			goto no_peer_counter;
4684 		mlx5_fc_query_cached(counter, &bytes2, &packets2, &lastuse2);
4685 
4686 		bytes += bytes2;
4687 		packets += packets2;
4688 		lastuse = max_t(u64, lastuse, lastuse2);
4689 	}
4690 
4691 no_peer_counter:
4692 	mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
4693 out:
4694 	flow_stats_update(&f->stats, bytes, packets, 0, lastuse,
4695 			  FLOW_ACTION_HW_STATS_DELAYED);
4696 	trace_mlx5e_stats_flower(f);
4697 errout:
4698 	mlx5e_flow_put(priv, flow);
4699 	return err;
4700 }
4701 
apply_police_params(struct mlx5e_priv * priv,u64 rate,struct netlink_ext_ack * extack)4702 static int apply_police_params(struct mlx5e_priv *priv, u64 rate,
4703 			       struct netlink_ext_ack *extack)
4704 {
4705 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
4706 	struct mlx5_eswitch *esw;
4707 	u32 rate_mbps = 0;
4708 	u16 vport_num;
4709 	int err;
4710 
4711 	vport_num = rpriv->rep->vport;
4712 	if (vport_num >= MLX5_VPORT_ECPF) {
4713 		NL_SET_ERR_MSG_MOD(extack,
4714 				   "Ingress rate limit is supported only for Eswitch ports connected to VFs");
4715 		return -EOPNOTSUPP;
4716 	}
4717 
4718 	esw = priv->mdev->priv.eswitch;
4719 	/* rate is given in bytes/sec.
4720 	 * First convert to bits/sec and then round to the nearest mbit/secs.
4721 	 * mbit means million bits.
4722 	 * Moreover, if rate is non zero we choose to configure to a minimum of
4723 	 * 1 mbit/sec.
4724 	 */
4725 	if (rate) {
4726 		rate = (rate * BITS_PER_BYTE) + 500000;
4727 		do_div(rate, 1000000);
4728 		rate_mbps = max_t(u32, rate, 1);
4729 	}
4730 
4731 	err = mlx5_esw_qos_modify_vport_rate(esw, vport_num, rate_mbps);
4732 	if (err)
4733 		NL_SET_ERR_MSG_MOD(extack, "failed applying action to hardware");
4734 
4735 	return err;
4736 }
4737 
mlx5e_policer_validate(const struct flow_action * action,const struct flow_action_entry * act,struct netlink_ext_ack * extack)4738 int mlx5e_policer_validate(const struct flow_action *action,
4739 			   const struct flow_action_entry *act,
4740 			   struct netlink_ext_ack *extack)
4741 {
4742 	if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
4743 		NL_SET_ERR_MSG_MOD(extack,
4744 				   "Offload not supported when exceed action is not drop");
4745 		return -EOPNOTSUPP;
4746 	}
4747 
4748 	if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
4749 	    !flow_action_is_last_entry(action, act)) {
4750 		NL_SET_ERR_MSG_MOD(extack,
4751 				   "Offload not supported when conform action is ok, but action is not last");
4752 		return -EOPNOTSUPP;
4753 	}
4754 
4755 	if (act->police.peakrate_bytes_ps ||
4756 	    act->police.avrate || act->police.overhead) {
4757 		NL_SET_ERR_MSG_MOD(extack,
4758 				   "Offload not supported when peakrate/avrate/overhead is configured");
4759 		return -EOPNOTSUPP;
4760 	}
4761 
4762 	return 0;
4763 }
4764 
scan_tc_matchall_fdb_actions(struct mlx5e_priv * priv,struct flow_action * flow_action,struct netlink_ext_ack * extack)4765 static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv,
4766 					struct flow_action *flow_action,
4767 					struct netlink_ext_ack *extack)
4768 {
4769 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
4770 	const struct flow_action_entry *act;
4771 	int err;
4772 	int i;
4773 
4774 	if (!flow_action_has_entries(flow_action)) {
4775 		NL_SET_ERR_MSG_MOD(extack, "matchall called with no action");
4776 		return -EINVAL;
4777 	}
4778 
4779 	if (!flow_offload_has_one_action(flow_action)) {
4780 		NL_SET_ERR_MSG_MOD(extack, "matchall policing support only a single action");
4781 		return -EOPNOTSUPP;
4782 	}
4783 
4784 	if (!flow_action_basic_hw_stats_check(flow_action, extack)) {
4785 		NL_SET_ERR_MSG_MOD(extack, "Flow action HW stats type is not supported");
4786 		return -EOPNOTSUPP;
4787 	}
4788 
4789 	flow_action_for_each(i, act, flow_action) {
4790 		switch (act->id) {
4791 		case FLOW_ACTION_POLICE:
4792 			if (act->police.notexceed.act_id != FLOW_ACTION_CONTINUE) {
4793 				NL_SET_ERR_MSG_MOD(extack,
4794 						   "Offload not supported when conform action is not continue");
4795 				return -EOPNOTSUPP;
4796 			}
4797 
4798 			err = mlx5e_policer_validate(flow_action, act, extack);
4799 			if (err)
4800 				return err;
4801 
4802 			err = apply_police_params(priv, act->police.rate_bytes_ps, extack);
4803 			if (err)
4804 				return err;
4805 
4806 			rpriv->prev_vf_vport_stats = priv->stats.vf_vport;
4807 			break;
4808 		default:
4809 			NL_SET_ERR_MSG_MOD(extack, "mlx5 supports only police action for matchall");
4810 			return -EOPNOTSUPP;
4811 		}
4812 	}
4813 
4814 	return 0;
4815 }
4816 
mlx5e_tc_configure_matchall(struct mlx5e_priv * priv,struct tc_cls_matchall_offload * ma)4817 int mlx5e_tc_configure_matchall(struct mlx5e_priv *priv,
4818 				struct tc_cls_matchall_offload *ma)
4819 {
4820 	struct netlink_ext_ack *extack = ma->common.extack;
4821 
4822 	if (ma->common.prio != 1) {
4823 		NL_SET_ERR_MSG_MOD(extack, "only priority 1 is supported");
4824 		return -EINVAL;
4825 	}
4826 
4827 	return scan_tc_matchall_fdb_actions(priv, &ma->rule->action, extack);
4828 }
4829 
mlx5e_tc_delete_matchall(struct mlx5e_priv * priv,struct tc_cls_matchall_offload * ma)4830 int mlx5e_tc_delete_matchall(struct mlx5e_priv *priv,
4831 			     struct tc_cls_matchall_offload *ma)
4832 {
4833 	struct netlink_ext_ack *extack = ma->common.extack;
4834 
4835 	return apply_police_params(priv, 0, extack);
4836 }
4837 
mlx5e_tc_stats_matchall(struct mlx5e_priv * priv,struct tc_cls_matchall_offload * ma)4838 void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv,
4839 			     struct tc_cls_matchall_offload *ma)
4840 {
4841 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
4842 	struct rtnl_link_stats64 cur_stats;
4843 	u64 dbytes;
4844 	u64 dpkts;
4845 
4846 	cur_stats = priv->stats.vf_vport;
4847 	dpkts = cur_stats.rx_packets - rpriv->prev_vf_vport_stats.rx_packets;
4848 	dbytes = cur_stats.rx_bytes - rpriv->prev_vf_vport_stats.rx_bytes;
4849 	rpriv->prev_vf_vport_stats = cur_stats;
4850 	flow_stats_update(&ma->stats, dbytes, dpkts, 0, jiffies,
4851 			  FLOW_ACTION_HW_STATS_DELAYED);
4852 }
4853 
mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv * priv,struct mlx5e_priv * peer_priv)4854 static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
4855 					      struct mlx5e_priv *peer_priv)
4856 {
4857 	struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
4858 	struct mlx5_core_dev *peer_mdev = peer_priv->mdev;
4859 	struct mlx5e_hairpin_entry *hpe, *tmp;
4860 	LIST_HEAD(init_wait_list);
4861 	u16 peer_vhca_id;
4862 	int bkt;
4863 
4864 	if (!mlx5e_same_hw_devs(priv, peer_priv))
4865 		return;
4866 
4867 	peer_vhca_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
4868 
4869 	mutex_lock(&tc->hairpin_tbl_lock);
4870 	hash_for_each(tc->hairpin_tbl, bkt, hpe, hairpin_hlist)
4871 		if (refcount_inc_not_zero(&hpe->refcnt))
4872 			list_add(&hpe->dead_peer_wait_list, &init_wait_list);
4873 	mutex_unlock(&tc->hairpin_tbl_lock);
4874 
4875 	list_for_each_entry_safe(hpe, tmp, &init_wait_list, dead_peer_wait_list) {
4876 		wait_for_completion(&hpe->res_ready);
4877 		if (!IS_ERR_OR_NULL(hpe->hp) && hpe->peer_vhca_id == peer_vhca_id)
4878 			mlx5_core_hairpin_clear_dead_peer(hpe->hp->pair);
4879 
4880 		mlx5e_hairpin_put(priv, hpe);
4881 	}
4882 }
4883 
mlx5e_tc_netdev_event(struct notifier_block * this,unsigned long event,void * ptr)4884 static int mlx5e_tc_netdev_event(struct notifier_block *this,
4885 				 unsigned long event, void *ptr)
4886 {
4887 	struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
4888 	struct mlx5e_priv *peer_priv;
4889 	struct mlx5e_tc_table *tc;
4890 	struct mlx5e_priv *priv;
4891 
4892 	if (ndev->netdev_ops != &mlx5e_netdev_ops ||
4893 	    event != NETDEV_UNREGISTER ||
4894 	    ndev->reg_state == NETREG_REGISTERED)
4895 		return NOTIFY_DONE;
4896 
4897 	tc = container_of(this, struct mlx5e_tc_table, netdevice_nb);
4898 	priv = tc->priv;
4899 	peer_priv = netdev_priv(ndev);
4900 	if (priv == peer_priv ||
4901 	    !(priv->netdev->features & NETIF_F_HW_TC))
4902 		return NOTIFY_DONE;
4903 
4904 	mlx5e_tc_hairpin_update_dead_peer(priv, peer_priv);
4905 
4906 	return NOTIFY_DONE;
4907 }
4908 
mlx5e_tc_nic_get_ft_size(struct mlx5_core_dev * dev)4909 static int mlx5e_tc_nic_get_ft_size(struct mlx5_core_dev *dev)
4910 {
4911 	int tc_grp_size, tc_tbl_size;
4912 	u32 max_flow_counter;
4913 
4914 	max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
4915 			    MLX5_CAP_GEN(dev, max_flow_counter_15_0);
4916 
4917 	tc_grp_size = min_t(int, max_flow_counter, MLX5E_TC_TABLE_MAX_GROUP_SIZE);
4918 
4919 	tc_tbl_size = min_t(int, tc_grp_size * MLX5E_TC_TABLE_NUM_GROUPS,
4920 			    BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev, log_max_ft_size)));
4921 
4922 	return tc_tbl_size;
4923 }
4924 
mlx5e_tc_nic_create_miss_table(struct mlx5e_priv * priv)4925 static int mlx5e_tc_nic_create_miss_table(struct mlx5e_priv *priv)
4926 {
4927 	struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
4928 	struct mlx5_flow_table **ft = &tc->miss_t;
4929 	struct mlx5_flow_table_attr ft_attr = {};
4930 	struct mlx5_flow_namespace *ns;
4931 	int err = 0;
4932 
4933 	ft_attr.max_fte = 1;
4934 	ft_attr.autogroup.max_num_groups = 1;
4935 	ft_attr.level = MLX5E_TC_MISS_LEVEL;
4936 	ft_attr.prio = 0;
4937 	ns = mlx5_get_flow_namespace(priv->mdev, MLX5_FLOW_NAMESPACE_KERNEL);
4938 
4939 	*ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
4940 	if (IS_ERR(*ft)) {
4941 		err = PTR_ERR(*ft);
4942 		netdev_err(priv->netdev, "failed to create tc nic miss table err=%d\n", err);
4943 	}
4944 
4945 	return err;
4946 }
4947 
mlx5e_tc_nic_destroy_miss_table(struct mlx5e_priv * priv)4948 static void mlx5e_tc_nic_destroy_miss_table(struct mlx5e_priv *priv)
4949 {
4950 	struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
4951 
4952 	mlx5_destroy_flow_table(tc->miss_t);
4953 }
4954 
mlx5e_tc_nic_init(struct mlx5e_priv * priv)4955 int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
4956 {
4957 	struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
4958 	struct mlx5_core_dev *dev = priv->mdev;
4959 	struct mapping_ctx *chains_mapping;
4960 	struct mlx5_chains_attr attr = {};
4961 	u64 mapping_id;
4962 	int err;
4963 
4964 	mlx5e_mod_hdr_tbl_init(&tc->mod_hdr);
4965 	mutex_init(&tc->t_lock);
4966 	mutex_init(&tc->hairpin_tbl_lock);
4967 	hash_init(tc->hairpin_tbl);
4968 	tc->priv = priv;
4969 
4970 	err = rhashtable_init(&tc->ht, &tc_ht_params);
4971 	if (err)
4972 		return err;
4973 
4974 	lockdep_set_class(&tc->ht.mutex, &tc_ht_lock_key);
4975 	lockdep_init_map(&tc->ht.run_work.lockdep_map, "tc_ht_wq_key", &tc_ht_wq_key, 0);
4976 
4977 	mapping_id = mlx5_query_nic_system_image_guid(dev);
4978 
4979 	chains_mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_CHAIN,
4980 					       sizeof(struct mlx5_mapped_obj),
4981 					       MLX5E_TC_TABLE_CHAIN_TAG_MASK, true);
4982 
4983 	if (IS_ERR(chains_mapping)) {
4984 		err = PTR_ERR(chains_mapping);
4985 		goto err_mapping;
4986 	}
4987 	tc->mapping = chains_mapping;
4988 
4989 	err = mlx5e_tc_nic_create_miss_table(priv);
4990 	if (err)
4991 		goto err_chains;
4992 
4993 	if (MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level))
4994 		attr.flags = MLX5_CHAINS_AND_PRIOS_SUPPORTED |
4995 			MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
4996 	attr.ns = MLX5_FLOW_NAMESPACE_KERNEL;
4997 	attr.max_ft_sz = mlx5e_tc_nic_get_ft_size(dev);
4998 	attr.max_grp_num = MLX5E_TC_TABLE_NUM_GROUPS;
4999 	attr.default_ft = tc->miss_t;
5000 	attr.mapping = chains_mapping;
5001 
5002 	tc->chains = mlx5_chains_create(dev, &attr);
5003 	if (IS_ERR(tc->chains)) {
5004 		err = PTR_ERR(tc->chains);
5005 		goto err_miss;
5006 	}
5007 
5008 	tc->post_act = mlx5e_tc_post_act_init(priv, tc->chains, MLX5_FLOW_NAMESPACE_KERNEL);
5009 	tc->ct = mlx5_tc_ct_init(priv, tc->chains, &tc->mod_hdr,
5010 				 MLX5_FLOW_NAMESPACE_KERNEL, tc->post_act);
5011 
5012 	tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event;
5013 	err = register_netdevice_notifier_dev_net(priv->netdev,
5014 						  &tc->netdevice_nb,
5015 						  &tc->netdevice_nn);
5016 	if (err) {
5017 		tc->netdevice_nb.notifier_call = NULL;
5018 		mlx5_core_warn(priv->mdev, "Failed to register netdev notifier\n");
5019 		goto err_reg;
5020 	}
5021 
5022 	return 0;
5023 
5024 err_reg:
5025 	mlx5_tc_ct_clean(tc->ct);
5026 	mlx5e_tc_post_act_destroy(tc->post_act);
5027 	mlx5_chains_destroy(tc->chains);
5028 err_miss:
5029 	mlx5e_tc_nic_destroy_miss_table(priv);
5030 err_chains:
5031 	mapping_destroy(chains_mapping);
5032 err_mapping:
5033 	rhashtable_destroy(&tc->ht);
5034 	return err;
5035 }
5036 
_mlx5e_tc_del_flow(void * ptr,void * arg)5037 static void _mlx5e_tc_del_flow(void *ptr, void *arg)
5038 {
5039 	struct mlx5e_tc_flow *flow = ptr;
5040 	struct mlx5e_priv *priv = flow->priv;
5041 
5042 	mlx5e_tc_del_flow(priv, flow);
5043 	kfree(flow);
5044 }
5045 
mlx5e_tc_nic_cleanup(struct mlx5e_priv * priv)5046 void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
5047 {
5048 	struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
5049 
5050 	if (tc->netdevice_nb.notifier_call)
5051 		unregister_netdevice_notifier_dev_net(priv->netdev,
5052 						      &tc->netdevice_nb,
5053 						      &tc->netdevice_nn);
5054 
5055 	mlx5e_mod_hdr_tbl_destroy(&tc->mod_hdr);
5056 	mutex_destroy(&tc->hairpin_tbl_lock);
5057 
5058 	rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, NULL);
5059 
5060 	if (!IS_ERR_OR_NULL(tc->t)) {
5061 		mlx5_chains_put_table(tc->chains, 0, 1, MLX5E_TC_FT_LEVEL);
5062 		tc->t = NULL;
5063 	}
5064 	mutex_destroy(&tc->t_lock);
5065 
5066 	mlx5_tc_ct_clean(tc->ct);
5067 	mlx5e_tc_post_act_destroy(tc->post_act);
5068 	mapping_destroy(tc->mapping);
5069 	mlx5_chains_destroy(tc->chains);
5070 	mlx5e_tc_nic_destroy_miss_table(priv);
5071 }
5072 
mlx5e_tc_ht_init(struct rhashtable * tc_ht)5073 int mlx5e_tc_ht_init(struct rhashtable *tc_ht)
5074 {
5075 	int err;
5076 
5077 	err = rhashtable_init(tc_ht, &tc_ht_params);
5078 	if (err)
5079 		return err;
5080 
5081 	lockdep_set_class(&tc_ht->mutex, &tc_ht_lock_key);
5082 	lockdep_init_map(&tc_ht->run_work.lockdep_map, "tc_ht_wq_key", &tc_ht_wq_key, 0);
5083 
5084 	return 0;
5085 }
5086 
mlx5e_tc_ht_cleanup(struct rhashtable * tc_ht)5087 void mlx5e_tc_ht_cleanup(struct rhashtable *tc_ht)
5088 {
5089 	rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL);
5090 }
5091 
mlx5e_tc_esw_init(struct mlx5_rep_uplink_priv * uplink_priv)5092 int mlx5e_tc_esw_init(struct mlx5_rep_uplink_priv *uplink_priv)
5093 {
5094 	const size_t sz_enc_opts = sizeof(struct tunnel_match_enc_opts);
5095 	struct mlx5e_rep_priv *rpriv;
5096 	struct mapping_ctx *mapping;
5097 	struct mlx5_eswitch *esw;
5098 	struct mlx5e_priv *priv;
5099 	u64 mapping_id;
5100 	int err = 0;
5101 
5102 	rpriv = container_of(uplink_priv, struct mlx5e_rep_priv, uplink_priv);
5103 	priv = netdev_priv(rpriv->netdev);
5104 	esw = priv->mdev->priv.eswitch;
5105 
5106 	uplink_priv->post_act = mlx5e_tc_post_act_init(priv, esw_chains(esw),
5107 						       MLX5_FLOW_NAMESPACE_FDB);
5108 	uplink_priv->ct_priv = mlx5_tc_ct_init(netdev_priv(priv->netdev),
5109 					       esw_chains(esw),
5110 					       &esw->offloads.mod_hdr,
5111 					       MLX5_FLOW_NAMESPACE_FDB,
5112 					       uplink_priv->post_act);
5113 
5114 	uplink_priv->int_port_priv = mlx5e_tc_int_port_init(netdev_priv(priv->netdev));
5115 
5116 	uplink_priv->tc_psample = mlx5e_tc_sample_init(esw, uplink_priv->post_act);
5117 
5118 	mapping_id = mlx5_query_nic_system_image_guid(esw->dev);
5119 
5120 	mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_TUNNEL,
5121 					sizeof(struct tunnel_match_key),
5122 					TUNNEL_INFO_BITS_MASK, true);
5123 
5124 	if (IS_ERR(mapping)) {
5125 		err = PTR_ERR(mapping);
5126 		goto err_tun_mapping;
5127 	}
5128 	uplink_priv->tunnel_mapping = mapping;
5129 
5130 	/* Two last values are reserved for stack devices slow path table mark
5131 	 * and bridge ingress push mark.
5132 	 */
5133 	mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_TUNNEL_ENC_OPTS,
5134 					sz_enc_opts, ENC_OPTS_BITS_MASK - 2, true);
5135 	if (IS_ERR(mapping)) {
5136 		err = PTR_ERR(mapping);
5137 		goto err_enc_opts_mapping;
5138 	}
5139 	uplink_priv->tunnel_enc_opts_mapping = mapping;
5140 
5141 	uplink_priv->encap = mlx5e_tc_tun_init(priv);
5142 	if (IS_ERR(uplink_priv->encap)) {
5143 		err = PTR_ERR(uplink_priv->encap);
5144 		goto err_register_fib_notifier;
5145 	}
5146 
5147 	return 0;
5148 
5149 err_register_fib_notifier:
5150 	mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
5151 err_enc_opts_mapping:
5152 	mapping_destroy(uplink_priv->tunnel_mapping);
5153 err_tun_mapping:
5154 	mlx5e_tc_sample_cleanup(uplink_priv->tc_psample);
5155 	mlx5e_tc_int_port_cleanup(uplink_priv->int_port_priv);
5156 	mlx5_tc_ct_clean(uplink_priv->ct_priv);
5157 	netdev_warn(priv->netdev,
5158 		    "Failed to initialize tc (eswitch), err: %d", err);
5159 	mlx5e_tc_post_act_destroy(uplink_priv->post_act);
5160 	return err;
5161 }
5162 
mlx5e_tc_esw_cleanup(struct mlx5_rep_uplink_priv * uplink_priv)5163 void mlx5e_tc_esw_cleanup(struct mlx5_rep_uplink_priv *uplink_priv)
5164 {
5165 	mlx5e_tc_tun_cleanup(uplink_priv->encap);
5166 
5167 	mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
5168 	mapping_destroy(uplink_priv->tunnel_mapping);
5169 
5170 	mlx5e_tc_sample_cleanup(uplink_priv->tc_psample);
5171 	mlx5e_tc_int_port_cleanup(uplink_priv->int_port_priv);
5172 	mlx5_tc_ct_clean(uplink_priv->ct_priv);
5173 	mlx5e_flow_meters_cleanup(uplink_priv->flow_meters);
5174 	mlx5e_tc_post_act_destroy(uplink_priv->post_act);
5175 }
5176 
mlx5e_tc_num_filters(struct mlx5e_priv * priv,unsigned long flags)5177 int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags)
5178 {
5179 	struct rhashtable *tc_ht = get_tc_ht(priv, flags);
5180 
5181 	return atomic_read(&tc_ht->nelems);
5182 }
5183 
mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch * esw)5184 void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw)
5185 {
5186 	struct mlx5e_tc_flow *flow, *tmp;
5187 
5188 	list_for_each_entry_safe(flow, tmp, &esw->offloads.peer_flows, peer)
5189 		__mlx5e_tc_del_fdb_peer_flow(flow);
5190 }
5191 
mlx5e_tc_reoffload_flows_work(struct work_struct * work)5192 void mlx5e_tc_reoffload_flows_work(struct work_struct *work)
5193 {
5194 	struct mlx5_rep_uplink_priv *rpriv =
5195 		container_of(work, struct mlx5_rep_uplink_priv,
5196 			     reoffload_flows_work);
5197 	struct mlx5e_tc_flow *flow, *tmp;
5198 
5199 	mutex_lock(&rpriv->unready_flows_lock);
5200 	list_for_each_entry_safe(flow, tmp, &rpriv->unready_flows, unready) {
5201 		if (!mlx5e_tc_add_fdb_flow(flow->priv, flow, NULL))
5202 			unready_flow_del(flow);
5203 	}
5204 	mutex_unlock(&rpriv->unready_flows_lock);
5205 }
5206 
mlx5e_setup_tc_cls_flower(struct mlx5e_priv * priv,struct flow_cls_offload * cls_flower,unsigned long flags)5207 static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv,
5208 				     struct flow_cls_offload *cls_flower,
5209 				     unsigned long flags)
5210 {
5211 	switch (cls_flower->command) {
5212 	case FLOW_CLS_REPLACE:
5213 		return mlx5e_configure_flower(priv->netdev, priv, cls_flower,
5214 					      flags);
5215 	case FLOW_CLS_DESTROY:
5216 		return mlx5e_delete_flower(priv->netdev, priv, cls_flower,
5217 					   flags);
5218 	case FLOW_CLS_STATS:
5219 		return mlx5e_stats_flower(priv->netdev, priv, cls_flower,
5220 					  flags);
5221 	default:
5222 		return -EOPNOTSUPP;
5223 	}
5224 }
5225 
mlx5e_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)5226 int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
5227 			    void *cb_priv)
5228 {
5229 	unsigned long flags = MLX5_TC_FLAG(INGRESS);
5230 	struct mlx5e_priv *priv = cb_priv;
5231 
5232 	if (!priv->netdev || !netif_device_present(priv->netdev))
5233 		return -EOPNOTSUPP;
5234 
5235 	if (mlx5e_is_uplink_rep(priv))
5236 		flags |= MLX5_TC_FLAG(ESW_OFFLOAD);
5237 	else
5238 		flags |= MLX5_TC_FLAG(NIC_OFFLOAD);
5239 
5240 	switch (type) {
5241 	case TC_SETUP_CLSFLOWER:
5242 		return mlx5e_setup_tc_cls_flower(priv, type_data, flags);
5243 	default:
5244 		return -EOPNOTSUPP;
5245 	}
5246 }
5247 
mlx5e_tc_update_skb(struct mlx5_cqe64 * cqe,struct sk_buff * skb)5248 bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe,
5249 			 struct sk_buff *skb)
5250 {
5251 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
5252 	u32 chain = 0, chain_tag, reg_b, zone_restore_id;
5253 	struct mlx5e_priv *priv = netdev_priv(skb->dev);
5254 	struct mlx5_mapped_obj mapped_obj;
5255 	struct tc_skb_ext *tc_skb_ext;
5256 	struct mlx5e_tc_table *tc;
5257 	int err;
5258 
5259 	reg_b = be32_to_cpu(cqe->ft_metadata);
5260 	tc = mlx5e_fs_get_tc(priv->fs);
5261 	chain_tag = reg_b & MLX5E_TC_TABLE_CHAIN_TAG_MASK;
5262 
5263 	err = mapping_find(tc->mapping, chain_tag, &mapped_obj);
5264 	if (err) {
5265 		netdev_dbg(priv->netdev,
5266 			   "Couldn't find chain for chain tag: %d, err: %d\n",
5267 			   chain_tag, err);
5268 		return false;
5269 	}
5270 
5271 	if (mapped_obj.type == MLX5_MAPPED_OBJ_CHAIN) {
5272 		chain = mapped_obj.chain;
5273 		tc_skb_ext = tc_skb_ext_alloc(skb);
5274 		if (WARN_ON(!tc_skb_ext))
5275 			return false;
5276 
5277 		tc_skb_ext->chain = chain;
5278 
5279 		zone_restore_id = (reg_b >> MLX5_REG_MAPPING_MOFFSET(NIC_ZONE_RESTORE_TO_REG)) &
5280 			ESW_ZONE_ID_MASK;
5281 
5282 		if (!mlx5e_tc_ct_restore_flow(tc->ct, skb,
5283 					      zone_restore_id))
5284 			return false;
5285 	} else {
5286 		netdev_dbg(priv->netdev, "Invalid mapped object type: %d\n", mapped_obj.type);
5287 		return false;
5288 	}
5289 #endif /* CONFIG_NET_TC_SKB_EXT */
5290 
5291 	return true;
5292 }
5293