1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2021 Mellanox Technologies Ltd */
3 
4 #include <linux/etherdevice.h>
5 #include <linux/mlx5/driver.h>
6 #include <linux/mlx5/mlx5_ifc.h>
7 #include <linux/mlx5/vport.h>
8 #include <linux/mlx5/fs.h>
9 #include "esw/acl/lgcy.h"
10 #include "esw/legacy.h"
11 #include "mlx5_core.h"
12 #include "eswitch.h"
13 #include "fs_core.h"
14 #include "fs_ft_pool.h"
15 #include "esw/qos.h"
16 
17 enum {
18 	LEGACY_VEPA_PRIO = 0,
19 	LEGACY_FDB_PRIO,
20 };
21 
esw_create_legacy_vepa_table(struct mlx5_eswitch * esw)22 static int esw_create_legacy_vepa_table(struct mlx5_eswitch *esw)
23 {
24 	struct mlx5_flow_table_attr ft_attr = {};
25 	struct mlx5_core_dev *dev = esw->dev;
26 	struct mlx5_flow_namespace *root_ns;
27 	struct mlx5_flow_table *fdb;
28 	int err;
29 
30 	root_ns = mlx5_get_fdb_sub_ns(dev, 0);
31 	if (!root_ns) {
32 		esw_warn(dev, "Failed to get FDB flow namespace\n");
33 		return -EOPNOTSUPP;
34 	}
35 
36 	/* num FTE 2, num FG 2 */
37 	ft_attr.prio = LEGACY_VEPA_PRIO;
38 	ft_attr.max_fte = 2;
39 	ft_attr.autogroup.max_num_groups = 2;
40 	fdb = mlx5_create_auto_grouped_flow_table(root_ns, &ft_attr);
41 	if (IS_ERR(fdb)) {
42 		err = PTR_ERR(fdb);
43 		esw_warn(dev, "Failed to create VEPA FDB err %d\n", err);
44 		return err;
45 	}
46 	esw->fdb_table.legacy.vepa_fdb = fdb;
47 
48 	return 0;
49 }
50 
esw_destroy_legacy_fdb_table(struct mlx5_eswitch * esw)51 static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw)
52 {
53 	esw_debug(esw->dev, "Destroy FDB Table\n");
54 	if (!esw->fdb_table.legacy.fdb)
55 		return;
56 
57 	if (esw->fdb_table.legacy.promisc_grp)
58 		mlx5_destroy_flow_group(esw->fdb_table.legacy.promisc_grp);
59 	if (esw->fdb_table.legacy.allmulti_grp)
60 		mlx5_destroy_flow_group(esw->fdb_table.legacy.allmulti_grp);
61 	if (esw->fdb_table.legacy.addr_grp)
62 		mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp);
63 	mlx5_destroy_flow_table(esw->fdb_table.legacy.fdb);
64 
65 	esw->fdb_table.legacy.fdb = NULL;
66 	esw->fdb_table.legacy.addr_grp = NULL;
67 	esw->fdb_table.legacy.allmulti_grp = NULL;
68 	esw->fdb_table.legacy.promisc_grp = NULL;
69 	atomic64_set(&esw->user_count, 0);
70 }
71 
esw_create_legacy_fdb_table(struct mlx5_eswitch * esw)72 static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw)
73 {
74 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
75 	struct mlx5_flow_table_attr ft_attr = {};
76 	struct mlx5_core_dev *dev = esw->dev;
77 	struct mlx5_flow_namespace *root_ns;
78 	struct mlx5_flow_table *fdb;
79 	struct mlx5_flow_group *g;
80 	void *match_criteria;
81 	int table_size;
82 	u32 *flow_group_in;
83 	u8 *dmac;
84 	int err = 0;
85 
86 	esw_debug(dev, "Create FDB log_max_size(%d)\n",
87 		  MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
88 
89 	root_ns = mlx5_get_fdb_sub_ns(dev, 0);
90 	if (!root_ns) {
91 		esw_warn(dev, "Failed to get FDB flow namespace\n");
92 		return -EOPNOTSUPP;
93 	}
94 
95 	flow_group_in = kvzalloc(inlen, GFP_KERNEL);
96 	if (!flow_group_in)
97 		return -ENOMEM;
98 
99 	ft_attr.max_fte = POOL_NEXT_SIZE;
100 	ft_attr.prio = LEGACY_FDB_PRIO;
101 	fdb = mlx5_create_flow_table(root_ns, &ft_attr);
102 	if (IS_ERR(fdb)) {
103 		err = PTR_ERR(fdb);
104 		esw_warn(dev, "Failed to create FDB Table err %d\n", err);
105 		goto out;
106 	}
107 	esw->fdb_table.legacy.fdb = fdb;
108 	table_size = fdb->max_fte;
109 
110 	/* Addresses group : Full match unicast/multicast addresses */
111 	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
112 		 MLX5_MATCH_OUTER_HEADERS);
113 	match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
114 	dmac = MLX5_ADDR_OF(fte_match_param, match_criteria, outer_headers.dmac_47_16);
115 	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
116 	/* Preserve 2 entries for allmulti and promisc rules*/
117 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 3);
118 	eth_broadcast_addr(dmac);
119 	g = mlx5_create_flow_group(fdb, flow_group_in);
120 	if (IS_ERR(g)) {
121 		err = PTR_ERR(g);
122 		esw_warn(dev, "Failed to create flow group err(%d)\n", err);
123 		goto out;
124 	}
125 	esw->fdb_table.legacy.addr_grp = g;
126 
127 	/* Allmulti group : One rule that forwards any mcast traffic */
128 	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
129 		 MLX5_MATCH_OUTER_HEADERS);
130 	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 2);
131 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 2);
132 	eth_zero_addr(dmac);
133 	dmac[0] = 0x01;
134 	g = mlx5_create_flow_group(fdb, flow_group_in);
135 	if (IS_ERR(g)) {
136 		err = PTR_ERR(g);
137 		esw_warn(dev, "Failed to create allmulti flow group err(%d)\n", err);
138 		goto out;
139 	}
140 	esw->fdb_table.legacy.allmulti_grp = g;
141 
142 	/* Promiscuous group :
143 	 * One rule that forward all unmatched traffic from previous groups
144 	 */
145 	eth_zero_addr(dmac);
146 	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
147 		 MLX5_MATCH_MISC_PARAMETERS);
148 	MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
149 	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 1);
150 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1);
151 	g = mlx5_create_flow_group(fdb, flow_group_in);
152 	if (IS_ERR(g)) {
153 		err = PTR_ERR(g);
154 		esw_warn(dev, "Failed to create promisc flow group err(%d)\n", err);
155 		goto out;
156 	}
157 	esw->fdb_table.legacy.promisc_grp = g;
158 
159 out:
160 	if (err)
161 		esw_destroy_legacy_fdb_table(esw);
162 
163 	kvfree(flow_group_in);
164 	return err;
165 }
166 
esw_destroy_legacy_vepa_table(struct mlx5_eswitch * esw)167 static void esw_destroy_legacy_vepa_table(struct mlx5_eswitch *esw)
168 {
169 	esw_debug(esw->dev, "Destroy VEPA Table\n");
170 	if (!esw->fdb_table.legacy.vepa_fdb)
171 		return;
172 
173 	mlx5_destroy_flow_table(esw->fdb_table.legacy.vepa_fdb);
174 	esw->fdb_table.legacy.vepa_fdb = NULL;
175 }
176 
esw_create_legacy_table(struct mlx5_eswitch * esw)177 static int esw_create_legacy_table(struct mlx5_eswitch *esw)
178 {
179 	int err;
180 
181 	memset(&esw->fdb_table.legacy, 0, sizeof(struct legacy_fdb));
182 	atomic64_set(&esw->user_count, 0);
183 
184 	err = esw_create_legacy_vepa_table(esw);
185 	if (err)
186 		return err;
187 
188 	err = esw_create_legacy_fdb_table(esw);
189 	if (err)
190 		esw_destroy_legacy_vepa_table(esw);
191 
192 	return err;
193 }
194 
esw_cleanup_vepa_rules(struct mlx5_eswitch * esw)195 static void esw_cleanup_vepa_rules(struct mlx5_eswitch *esw)
196 {
197 	if (esw->fdb_table.legacy.vepa_uplink_rule)
198 		mlx5_del_flow_rules(esw->fdb_table.legacy.vepa_uplink_rule);
199 
200 	if (esw->fdb_table.legacy.vepa_star_rule)
201 		mlx5_del_flow_rules(esw->fdb_table.legacy.vepa_star_rule);
202 
203 	esw->fdb_table.legacy.vepa_uplink_rule = NULL;
204 	esw->fdb_table.legacy.vepa_star_rule = NULL;
205 }
206 
esw_destroy_legacy_table(struct mlx5_eswitch * esw)207 static void esw_destroy_legacy_table(struct mlx5_eswitch *esw)
208 {
209 	esw_cleanup_vepa_rules(esw);
210 	esw_destroy_legacy_fdb_table(esw);
211 	esw_destroy_legacy_vepa_table(esw);
212 }
213 
214 #define MLX5_LEGACY_SRIOV_VPORT_EVENTS (MLX5_VPORT_UC_ADDR_CHANGE | \
215 					MLX5_VPORT_MC_ADDR_CHANGE | \
216 					MLX5_VPORT_PROMISC_CHANGE)
217 
esw_legacy_enable(struct mlx5_eswitch * esw)218 int esw_legacy_enable(struct mlx5_eswitch *esw)
219 {
220 	struct mlx5_vport *vport;
221 	unsigned long i;
222 	int ret;
223 
224 	ret = esw_create_legacy_table(esw);
225 	if (ret)
226 		return ret;
227 
228 	mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
229 		vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
230 
231 	ret = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_LEGACY_SRIOV_VPORT_EVENTS);
232 	if (ret)
233 		esw_destroy_legacy_table(esw);
234 	return ret;
235 }
236 
esw_legacy_disable(struct mlx5_eswitch * esw)237 void esw_legacy_disable(struct mlx5_eswitch *esw)
238 {
239 	struct esw_mc_addr *mc_promisc;
240 
241 	mlx5_eswitch_disable_pf_vf_vports(esw);
242 
243 	mc_promisc = &esw->mc_promisc;
244 	if (mc_promisc->uplink_rule)
245 		mlx5_del_flow_rules(mc_promisc->uplink_rule);
246 
247 	esw_destroy_legacy_table(esw);
248 }
249 
_mlx5_eswitch_set_vepa_locked(struct mlx5_eswitch * esw,u8 setting)250 static int _mlx5_eswitch_set_vepa_locked(struct mlx5_eswitch *esw,
251 					 u8 setting)
252 {
253 	struct mlx5_flow_destination dest = {};
254 	struct mlx5_flow_act flow_act = {};
255 	struct mlx5_flow_handle *flow_rule;
256 	struct mlx5_flow_spec *spec;
257 	int err = 0;
258 	void *misc;
259 
260 	if (!setting) {
261 		esw_cleanup_vepa_rules(esw);
262 		return 0;
263 	}
264 
265 	if (esw->fdb_table.legacy.vepa_uplink_rule)
266 		return 0;
267 
268 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
269 	if (!spec)
270 		return -ENOMEM;
271 
272 	/* Uplink rule forward uplink traffic to FDB */
273 	misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
274 	MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_UPLINK);
275 
276 	misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
277 	MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
278 
279 	spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
280 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
281 	dest.ft = esw->fdb_table.legacy.fdb;
282 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
283 	flow_rule = mlx5_add_flow_rules(esw->fdb_table.legacy.vepa_fdb, spec,
284 					&flow_act, &dest, 1);
285 	if (IS_ERR(flow_rule)) {
286 		err = PTR_ERR(flow_rule);
287 		goto out;
288 	} else {
289 		esw->fdb_table.legacy.vepa_uplink_rule = flow_rule;
290 	}
291 
292 	/* Star rule to forward all traffic to uplink vport */
293 	memset(&dest, 0, sizeof(dest));
294 	dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
295 	dest.vport.num = MLX5_VPORT_UPLINK;
296 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
297 	flow_rule = mlx5_add_flow_rules(esw->fdb_table.legacy.vepa_fdb, NULL,
298 					&flow_act, &dest, 1);
299 	if (IS_ERR(flow_rule)) {
300 		err = PTR_ERR(flow_rule);
301 		goto out;
302 	} else {
303 		esw->fdb_table.legacy.vepa_star_rule = flow_rule;
304 	}
305 
306 out:
307 	kvfree(spec);
308 	if (err)
309 		esw_cleanup_vepa_rules(esw);
310 	return err;
311 }
312 
mlx5_eswitch_set_vepa(struct mlx5_eswitch * esw,u8 setting)313 int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting)
314 {
315 	int err = 0;
316 
317 	if (!esw)
318 		return -EOPNOTSUPP;
319 
320 	if (!mlx5_esw_allowed(esw))
321 		return -EPERM;
322 
323 	mutex_lock(&esw->state_lock);
324 	if (esw->mode != MLX5_ESWITCH_LEGACY) {
325 		err = -EOPNOTSUPP;
326 		goto out;
327 	}
328 
329 	err = _mlx5_eswitch_set_vepa_locked(esw, setting);
330 
331 out:
332 	mutex_unlock(&esw->state_lock);
333 	return err;
334 }
335 
mlx5_eswitch_get_vepa(struct mlx5_eswitch * esw,u8 * setting)336 int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting)
337 {
338 	if (!esw)
339 		return -EOPNOTSUPP;
340 
341 	if (!mlx5_esw_allowed(esw))
342 		return -EPERM;
343 
344 	if (esw->mode != MLX5_ESWITCH_LEGACY)
345 		return -EOPNOTSUPP;
346 
347 	*setting = esw->fdb_table.legacy.vepa_uplink_rule ? 1 : 0;
348 	return 0;
349 }
350 
esw_legacy_vport_acl_setup(struct mlx5_eswitch * esw,struct mlx5_vport * vport)351 int esw_legacy_vport_acl_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
352 {
353 	int ret;
354 
355 	/* Only non manager vports need ACL in legacy mode */
356 	if (mlx5_esw_is_manager_vport(esw, vport->vport))
357 		return 0;
358 
359 	ret = esw_acl_ingress_lgcy_setup(esw, vport);
360 	if (ret)
361 		goto ingress_err;
362 
363 	ret = esw_acl_egress_lgcy_setup(esw, vport);
364 	if (ret)
365 		goto egress_err;
366 
367 	return 0;
368 
369 egress_err:
370 	esw_acl_ingress_lgcy_cleanup(esw, vport);
371 ingress_err:
372 	return ret;
373 }
374 
esw_legacy_vport_acl_cleanup(struct mlx5_eswitch * esw,struct mlx5_vport * vport)375 void esw_legacy_vport_acl_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
376 {
377 	if (mlx5_esw_is_manager_vport(esw, vport->vport))
378 		return;
379 
380 	esw_acl_egress_lgcy_cleanup(esw, vport);
381 	esw_acl_ingress_lgcy_cleanup(esw, vport);
382 }
383 
mlx5_esw_query_vport_drop_stats(struct mlx5_core_dev * dev,struct mlx5_vport * vport,struct mlx5_vport_drop_stats * stats)384 int mlx5_esw_query_vport_drop_stats(struct mlx5_core_dev *dev,
385 				    struct mlx5_vport *vport,
386 				    struct mlx5_vport_drop_stats *stats)
387 {
388 	u64 rx_discard_vport_down, tx_discard_vport_down;
389 	struct mlx5_eswitch *esw = dev->priv.eswitch;
390 	u64 bytes = 0;
391 	int err = 0;
392 
393 	if (esw->mode != MLX5_ESWITCH_LEGACY)
394 		return 0;
395 
396 	mutex_lock(&esw->state_lock);
397 	if (!vport->enabled)
398 		goto unlock;
399 
400 	if (!IS_ERR_OR_NULL(vport->egress.legacy.drop_counter))
401 		mlx5_fc_query(dev, vport->egress.legacy.drop_counter,
402 			      &stats->rx_dropped, &bytes);
403 
404 	if (vport->ingress.legacy.drop_counter)
405 		mlx5_fc_query(dev, vport->ingress.legacy.drop_counter,
406 			      &stats->tx_dropped, &bytes);
407 
408 	if (!MLX5_CAP_GEN(dev, receive_discard_vport_down) &&
409 	    !MLX5_CAP_GEN(dev, transmit_discard_vport_down))
410 		goto unlock;
411 
412 	err = mlx5_query_vport_down_stats(dev, vport->vport, 1,
413 					  &rx_discard_vport_down,
414 					  &tx_discard_vport_down);
415 	if (err)
416 		goto unlock;
417 
418 	if (MLX5_CAP_GEN(dev, receive_discard_vport_down))
419 		stats->rx_dropped += rx_discard_vport_down;
420 	if (MLX5_CAP_GEN(dev, transmit_discard_vport_down))
421 		stats->tx_dropped += tx_discard_vport_down;
422 
423 unlock:
424 	mutex_unlock(&esw->state_lock);
425 	return err;
426 }
427 
mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch * esw,u16 vport,u16 vlan,u8 qos)428 int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
429 				u16 vport, u16 vlan, u8 qos)
430 {
431 	u8 set_flags = 0;
432 	int err = 0;
433 
434 	if (!mlx5_esw_allowed(esw))
435 		return vlan ? -EPERM : 0;
436 
437 	if (vlan || qos)
438 		set_flags = SET_VLAN_STRIP | SET_VLAN_INSERT;
439 
440 	mutex_lock(&esw->state_lock);
441 	if (esw->mode != MLX5_ESWITCH_LEGACY) {
442 		if (!vlan)
443 			goto unlock; /* compatibility with libvirt */
444 
445 		err = -EOPNOTSUPP;
446 		goto unlock;
447 	}
448 
449 	err = __mlx5_eswitch_set_vport_vlan(esw, vport, vlan, qos, set_flags);
450 
451 unlock:
452 	mutex_unlock(&esw->state_lock);
453 	return err;
454 }
455 
mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch * esw,u16 vport,bool spoofchk)456 int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
457 				    u16 vport, bool spoofchk)
458 {
459 	struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
460 	bool pschk;
461 	int err = 0;
462 
463 	if (!mlx5_esw_allowed(esw))
464 		return -EPERM;
465 	if (IS_ERR(evport))
466 		return PTR_ERR(evport);
467 
468 	mutex_lock(&esw->state_lock);
469 	if (esw->mode != MLX5_ESWITCH_LEGACY) {
470 		err = -EOPNOTSUPP;
471 		goto unlock;
472 	}
473 	pschk = evport->info.spoofchk;
474 	evport->info.spoofchk = spoofchk;
475 	if (pschk && !is_valid_ether_addr(evport->info.mac))
476 		mlx5_core_warn(esw->dev,
477 			       "Spoofchk in set while MAC is invalid, vport(%d)\n",
478 			       evport->vport);
479 	if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY)
480 		err = esw_acl_ingress_lgcy_setup(esw, evport);
481 	if (err)
482 		evport->info.spoofchk = pschk;
483 
484 unlock:
485 	mutex_unlock(&esw->state_lock);
486 	return err;
487 }
488 
mlx5_eswitch_set_vport_trust(struct mlx5_eswitch * esw,u16 vport,bool setting)489 int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
490 				 u16 vport, bool setting)
491 {
492 	struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
493 	int err = 0;
494 
495 	if (!mlx5_esw_allowed(esw))
496 		return -EPERM;
497 	if (IS_ERR(evport))
498 		return PTR_ERR(evport);
499 
500 	mutex_lock(&esw->state_lock);
501 	if (esw->mode != MLX5_ESWITCH_LEGACY) {
502 		err = -EOPNOTSUPP;
503 		goto unlock;
504 	}
505 	evport->info.trusted = setting;
506 	if (evport->enabled)
507 		esw_vport_change_handle_locked(evport);
508 
509 unlock:
510 	mutex_unlock(&esw->state_lock);
511 	return err;
512 }
513 
mlx5_eswitch_set_vport_rate(struct mlx5_eswitch * esw,u16 vport,u32 max_rate,u32 min_rate)514 int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport,
515 				u32 max_rate, u32 min_rate)
516 {
517 	struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
518 	int err;
519 
520 	if (!mlx5_esw_allowed(esw))
521 		return -EPERM;
522 	if (IS_ERR(evport))
523 		return PTR_ERR(evport);
524 
525 	mutex_lock(&esw->state_lock);
526 	err = mlx5_esw_qos_set_vport_rate(esw, evport, max_rate, min_rate);
527 	mutex_unlock(&esw->state_lock);
528 	return err;
529 }
530