1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
3 
4 #include <linux/kernel.h>
5 #include <linux/types.h>
6 #include <linux/netdevice.h>
7 #include <linux/etherdevice.h>
8 #include <linux/slab.h>
9 #include <linux/device.h>
10 #include <linux/skbuff.h>
11 #include <linux/if_vlan.h>
12 #include <linux/if_bridge.h>
13 #include <linux/workqueue.h>
14 #include <linux/jiffies.h>
15 #include <linux/rtnetlink.h>
16 #include <linux/netlink.h>
17 #include <net/switchdev.h>
18 #include <net/vxlan.h>
19 
20 #include "spectrum_span.h"
21 #include "spectrum_switchdev.h"
22 #include "spectrum.h"
23 #include "core.h"
24 #include "reg.h"
25 
26 struct mlxsw_sp_bridge_ops;
27 
28 struct mlxsw_sp_bridge {
29 	struct mlxsw_sp *mlxsw_sp;
30 	struct {
31 		struct delayed_work dw;
32 #define MLXSW_SP_DEFAULT_LEARNING_INTERVAL 100
33 		unsigned int interval; /* ms */
34 	} fdb_notify;
35 #define MLXSW_SP_MIN_AGEING_TIME 10
36 #define MLXSW_SP_MAX_AGEING_TIME 1000000
37 #define MLXSW_SP_DEFAULT_AGEING_TIME 300
38 	u32 ageing_time;
39 	bool vlan_enabled_exists;
40 	struct list_head bridges_list;
41 	DECLARE_BITMAP(mids_bitmap, MLXSW_SP_MID_MAX);
42 	const struct mlxsw_sp_bridge_ops *bridge_8021q_ops;
43 	const struct mlxsw_sp_bridge_ops *bridge_8021d_ops;
44 	const struct mlxsw_sp_bridge_ops *bridge_8021ad_ops;
45 };
46 
47 struct mlxsw_sp_bridge_device {
48 	struct net_device *dev;
49 	struct list_head list;
50 	struct list_head ports_list;
51 	struct list_head mids_list;
52 	u8 vlan_enabled:1,
53 	   multicast_enabled:1,
54 	   mrouter:1;
55 	const struct mlxsw_sp_bridge_ops *ops;
56 };
57 
58 struct mlxsw_sp_bridge_port {
59 	struct net_device *dev;
60 	struct mlxsw_sp_bridge_device *bridge_device;
61 	struct list_head list;
62 	struct list_head vlans_list;
63 	unsigned int ref_count;
64 	u8 stp_state;
65 	unsigned long flags;
66 	bool mrouter;
67 	bool lagged;
68 	union {
69 		u16 lag_id;
70 		u16 system_port;
71 	};
72 };
73 
74 struct mlxsw_sp_bridge_vlan {
75 	struct list_head list;
76 	struct list_head port_vlan_list;
77 	u16 vid;
78 };
79 
80 struct mlxsw_sp_bridge_ops {
81 	int (*port_join)(struct mlxsw_sp_bridge_device *bridge_device,
82 			 struct mlxsw_sp_bridge_port *bridge_port,
83 			 struct mlxsw_sp_port *mlxsw_sp_port,
84 			 struct netlink_ext_ack *extack);
85 	void (*port_leave)(struct mlxsw_sp_bridge_device *bridge_device,
86 			   struct mlxsw_sp_bridge_port *bridge_port,
87 			   struct mlxsw_sp_port *mlxsw_sp_port);
88 	int (*vxlan_join)(struct mlxsw_sp_bridge_device *bridge_device,
89 			  const struct net_device *vxlan_dev, u16 vid,
90 			  struct netlink_ext_ack *extack);
91 	struct mlxsw_sp_fid *
92 		(*fid_get)(struct mlxsw_sp_bridge_device *bridge_device,
93 			   u16 vid, struct netlink_ext_ack *extack);
94 	struct mlxsw_sp_fid *
95 		(*fid_lookup)(struct mlxsw_sp_bridge_device *bridge_device,
96 			      u16 vid);
97 	u16 (*fid_vid)(struct mlxsw_sp_bridge_device *bridge_device,
98 		       const struct mlxsw_sp_fid *fid);
99 };
100 
101 struct mlxsw_sp_switchdev_ops {
102 	void (*init)(struct mlxsw_sp *mlxsw_sp);
103 };
104 
105 static int
106 mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
107 			       struct mlxsw_sp_bridge_port *bridge_port,
108 			       u16 fid_index);
109 
110 static void
111 mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port,
112 			       struct mlxsw_sp_bridge_port *bridge_port);
113 
114 static void
115 mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp_port *mlxsw_sp_port,
116 				   struct mlxsw_sp_bridge_device
117 				   *bridge_device);
118 
119 static void
120 mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
121 				 struct mlxsw_sp_bridge_port *bridge_port,
122 				 bool add);
123 
124 static struct mlxsw_sp_bridge_device *
mlxsw_sp_bridge_device_find(const struct mlxsw_sp_bridge * bridge,const struct net_device * br_dev)125 mlxsw_sp_bridge_device_find(const struct mlxsw_sp_bridge *bridge,
126 			    const struct net_device *br_dev)
127 {
128 	struct mlxsw_sp_bridge_device *bridge_device;
129 
130 	list_for_each_entry(bridge_device, &bridge->bridges_list, list)
131 		if (bridge_device->dev == br_dev)
132 			return bridge_device;
133 
134 	return NULL;
135 }
136 
mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp * mlxsw_sp,const struct net_device * br_dev)137 bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp,
138 					 const struct net_device *br_dev)
139 {
140 	return !!mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
141 }
142 
mlxsw_sp_bridge_device_upper_rif_destroy(struct net_device * dev,struct netdev_nested_priv * priv)143 static int mlxsw_sp_bridge_device_upper_rif_destroy(struct net_device *dev,
144 						    struct netdev_nested_priv *priv)
145 {
146 	struct mlxsw_sp *mlxsw_sp = priv->data;
147 
148 	mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev);
149 	return 0;
150 }
151 
mlxsw_sp_bridge_device_rifs_destroy(struct mlxsw_sp * mlxsw_sp,struct net_device * dev)152 static void mlxsw_sp_bridge_device_rifs_destroy(struct mlxsw_sp *mlxsw_sp,
153 						struct net_device *dev)
154 {
155 	struct netdev_nested_priv priv = {
156 		.data = (void *)mlxsw_sp,
157 	};
158 
159 	mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev);
160 	netdev_walk_all_upper_dev_rcu(dev,
161 				      mlxsw_sp_bridge_device_upper_rif_destroy,
162 				      &priv);
163 }
164 
mlxsw_sp_bridge_device_vxlan_init(struct mlxsw_sp_bridge * bridge,struct net_device * br_dev,struct netlink_ext_ack * extack)165 static int mlxsw_sp_bridge_device_vxlan_init(struct mlxsw_sp_bridge *bridge,
166 					     struct net_device *br_dev,
167 					     struct netlink_ext_ack *extack)
168 {
169 	struct net_device *dev, *stop_dev;
170 	struct list_head *iter;
171 	int err;
172 
173 	netdev_for_each_lower_dev(br_dev, dev, iter) {
174 		if (netif_is_vxlan(dev) && netif_running(dev)) {
175 			err = mlxsw_sp_bridge_vxlan_join(bridge->mlxsw_sp,
176 							 br_dev, dev, 0,
177 							 extack);
178 			if (err) {
179 				stop_dev = dev;
180 				goto err_vxlan_join;
181 			}
182 		}
183 	}
184 
185 	return 0;
186 
187 err_vxlan_join:
188 	netdev_for_each_lower_dev(br_dev, dev, iter) {
189 		if (netif_is_vxlan(dev) && netif_running(dev)) {
190 			if (stop_dev == dev)
191 				break;
192 			mlxsw_sp_bridge_vxlan_leave(bridge->mlxsw_sp, dev);
193 		}
194 	}
195 	return err;
196 }
197 
mlxsw_sp_bridge_device_vxlan_fini(struct mlxsw_sp_bridge * bridge,struct net_device * br_dev)198 static void mlxsw_sp_bridge_device_vxlan_fini(struct mlxsw_sp_bridge *bridge,
199 					      struct net_device *br_dev)
200 {
201 	struct net_device *dev;
202 	struct list_head *iter;
203 
204 	netdev_for_each_lower_dev(br_dev, dev, iter) {
205 		if (netif_is_vxlan(dev) && netif_running(dev))
206 			mlxsw_sp_bridge_vxlan_leave(bridge->mlxsw_sp, dev);
207 	}
208 }
209 
mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp * mlxsw_sp,bool no_delay)210 static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp,
211 					      bool no_delay)
212 {
213 	struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
214 	unsigned int interval = no_delay ? 0 : bridge->fdb_notify.interval;
215 
216 	mlxsw_core_schedule_dw(&bridge->fdb_notify.dw,
217 			       msecs_to_jiffies(interval));
218 }
219 
220 static struct mlxsw_sp_bridge_device *
mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge * bridge,struct net_device * br_dev,struct netlink_ext_ack * extack)221 mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
222 			      struct net_device *br_dev,
223 			      struct netlink_ext_ack *extack)
224 {
225 	struct device *dev = bridge->mlxsw_sp->bus_info->dev;
226 	struct mlxsw_sp_bridge_device *bridge_device;
227 	bool vlan_enabled = br_vlan_enabled(br_dev);
228 	int err;
229 
230 	if (vlan_enabled && bridge->vlan_enabled_exists) {
231 		dev_err(dev, "Only one VLAN-aware bridge is supported\n");
232 		NL_SET_ERR_MSG_MOD(extack, "Only one VLAN-aware bridge is supported");
233 		return ERR_PTR(-EINVAL);
234 	}
235 
236 	bridge_device = kzalloc(sizeof(*bridge_device), GFP_KERNEL);
237 	if (!bridge_device)
238 		return ERR_PTR(-ENOMEM);
239 
240 	bridge_device->dev = br_dev;
241 	bridge_device->vlan_enabled = vlan_enabled;
242 	bridge_device->multicast_enabled = br_multicast_enabled(br_dev);
243 	bridge_device->mrouter = br_multicast_router(br_dev);
244 	INIT_LIST_HEAD(&bridge_device->ports_list);
245 	if (vlan_enabled) {
246 		u16 proto;
247 
248 		bridge->vlan_enabled_exists = true;
249 		br_vlan_get_proto(br_dev, &proto);
250 		if (proto == ETH_P_8021AD)
251 			bridge_device->ops = bridge->bridge_8021ad_ops;
252 		else
253 			bridge_device->ops = bridge->bridge_8021q_ops;
254 	} else {
255 		bridge_device->ops = bridge->bridge_8021d_ops;
256 	}
257 	INIT_LIST_HEAD(&bridge_device->mids_list);
258 	if (list_empty(&bridge->bridges_list))
259 		mlxsw_sp_fdb_notify_work_schedule(bridge->mlxsw_sp, false);
260 	list_add(&bridge_device->list, &bridge->bridges_list);
261 
262 	/* It is possible we already have VXLAN devices enslaved to the bridge.
263 	 * In which case, we need to replay their configuration as if they were
264 	 * just now enslaved to the bridge.
265 	 */
266 	err = mlxsw_sp_bridge_device_vxlan_init(bridge, br_dev, extack);
267 	if (err)
268 		goto err_vxlan_init;
269 
270 	return bridge_device;
271 
272 err_vxlan_init:
273 	list_del(&bridge_device->list);
274 	if (bridge_device->vlan_enabled)
275 		bridge->vlan_enabled_exists = false;
276 	kfree(bridge_device);
277 	return ERR_PTR(err);
278 }
279 
280 static void
mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge * bridge,struct mlxsw_sp_bridge_device * bridge_device)281 mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge,
282 			       struct mlxsw_sp_bridge_device *bridge_device)
283 {
284 	mlxsw_sp_bridge_device_vxlan_fini(bridge, bridge_device->dev);
285 	mlxsw_sp_bridge_device_rifs_destroy(bridge->mlxsw_sp,
286 					    bridge_device->dev);
287 	list_del(&bridge_device->list);
288 	if (list_empty(&bridge->bridges_list))
289 		cancel_delayed_work(&bridge->fdb_notify.dw);
290 	if (bridge_device->vlan_enabled)
291 		bridge->vlan_enabled_exists = false;
292 	WARN_ON(!list_empty(&bridge_device->ports_list));
293 	WARN_ON(!list_empty(&bridge_device->mids_list));
294 	kfree(bridge_device);
295 }
296 
297 static struct mlxsw_sp_bridge_device *
mlxsw_sp_bridge_device_get(struct mlxsw_sp_bridge * bridge,struct net_device * br_dev,struct netlink_ext_ack * extack)298 mlxsw_sp_bridge_device_get(struct mlxsw_sp_bridge *bridge,
299 			   struct net_device *br_dev,
300 			   struct netlink_ext_ack *extack)
301 {
302 	struct mlxsw_sp_bridge_device *bridge_device;
303 
304 	bridge_device = mlxsw_sp_bridge_device_find(bridge, br_dev);
305 	if (bridge_device)
306 		return bridge_device;
307 
308 	return mlxsw_sp_bridge_device_create(bridge, br_dev, extack);
309 }
310 
311 static void
mlxsw_sp_bridge_device_put(struct mlxsw_sp_bridge * bridge,struct mlxsw_sp_bridge_device * bridge_device)312 mlxsw_sp_bridge_device_put(struct mlxsw_sp_bridge *bridge,
313 			   struct mlxsw_sp_bridge_device *bridge_device)
314 {
315 	if (list_empty(&bridge_device->ports_list))
316 		mlxsw_sp_bridge_device_destroy(bridge, bridge_device);
317 }
318 
319 static struct mlxsw_sp_bridge_port *
__mlxsw_sp_bridge_port_find(const struct mlxsw_sp_bridge_device * bridge_device,const struct net_device * brport_dev)320 __mlxsw_sp_bridge_port_find(const struct mlxsw_sp_bridge_device *bridge_device,
321 			    const struct net_device *brport_dev)
322 {
323 	struct mlxsw_sp_bridge_port *bridge_port;
324 
325 	list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
326 		if (bridge_port->dev == brport_dev)
327 			return bridge_port;
328 	}
329 
330 	return NULL;
331 }
332 
333 struct mlxsw_sp_bridge_port *
mlxsw_sp_bridge_port_find(struct mlxsw_sp_bridge * bridge,struct net_device * brport_dev)334 mlxsw_sp_bridge_port_find(struct mlxsw_sp_bridge *bridge,
335 			  struct net_device *brport_dev)
336 {
337 	struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev);
338 	struct mlxsw_sp_bridge_device *bridge_device;
339 
340 	if (!br_dev)
341 		return NULL;
342 
343 	bridge_device = mlxsw_sp_bridge_device_find(bridge, br_dev);
344 	if (!bridge_device)
345 		return NULL;
346 
347 	return __mlxsw_sp_bridge_port_find(bridge_device, brport_dev);
348 }
349 
350 static struct mlxsw_sp_bridge_port *
mlxsw_sp_bridge_port_create(struct mlxsw_sp_bridge_device * bridge_device,struct net_device * brport_dev,struct netlink_ext_ack * extack)351 mlxsw_sp_bridge_port_create(struct mlxsw_sp_bridge_device *bridge_device,
352 			    struct net_device *brport_dev,
353 			    struct netlink_ext_ack *extack)
354 {
355 	struct mlxsw_sp_bridge_port *bridge_port;
356 	struct mlxsw_sp_port *mlxsw_sp_port;
357 	int err;
358 
359 	bridge_port = kzalloc(sizeof(*bridge_port), GFP_KERNEL);
360 	if (!bridge_port)
361 		return ERR_PTR(-ENOMEM);
362 
363 	mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(brport_dev);
364 	bridge_port->lagged = mlxsw_sp_port->lagged;
365 	if (bridge_port->lagged)
366 		bridge_port->lag_id = mlxsw_sp_port->lag_id;
367 	else
368 		bridge_port->system_port = mlxsw_sp_port->local_port;
369 	bridge_port->dev = brport_dev;
370 	bridge_port->bridge_device = bridge_device;
371 	bridge_port->stp_state = BR_STATE_DISABLED;
372 	bridge_port->flags = BR_LEARNING | BR_FLOOD | BR_LEARNING_SYNC |
373 			     BR_MCAST_FLOOD;
374 	INIT_LIST_HEAD(&bridge_port->vlans_list);
375 	list_add(&bridge_port->list, &bridge_device->ports_list);
376 	bridge_port->ref_count = 1;
377 
378 	err = switchdev_bridge_port_offload(brport_dev, mlxsw_sp_port->dev,
379 					    NULL, NULL, NULL, false, extack);
380 	if (err)
381 		goto err_switchdev_offload;
382 
383 	return bridge_port;
384 
385 err_switchdev_offload:
386 	list_del(&bridge_port->list);
387 	kfree(bridge_port);
388 	return ERR_PTR(err);
389 }
390 
391 static void
mlxsw_sp_bridge_port_destroy(struct mlxsw_sp_bridge_port * bridge_port)392 mlxsw_sp_bridge_port_destroy(struct mlxsw_sp_bridge_port *bridge_port)
393 {
394 	switchdev_bridge_port_unoffload(bridge_port->dev, NULL, NULL, NULL);
395 	list_del(&bridge_port->list);
396 	WARN_ON(!list_empty(&bridge_port->vlans_list));
397 	kfree(bridge_port);
398 }
399 
400 static struct mlxsw_sp_bridge_port *
mlxsw_sp_bridge_port_get(struct mlxsw_sp_bridge * bridge,struct net_device * brport_dev,struct netlink_ext_ack * extack)401 mlxsw_sp_bridge_port_get(struct mlxsw_sp_bridge *bridge,
402 			 struct net_device *brport_dev,
403 			 struct netlink_ext_ack *extack)
404 {
405 	struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev);
406 	struct mlxsw_sp_bridge_device *bridge_device;
407 	struct mlxsw_sp_bridge_port *bridge_port;
408 	int err;
409 
410 	bridge_port = mlxsw_sp_bridge_port_find(bridge, brport_dev);
411 	if (bridge_port) {
412 		bridge_port->ref_count++;
413 		return bridge_port;
414 	}
415 
416 	bridge_device = mlxsw_sp_bridge_device_get(bridge, br_dev, extack);
417 	if (IS_ERR(bridge_device))
418 		return ERR_CAST(bridge_device);
419 
420 	bridge_port = mlxsw_sp_bridge_port_create(bridge_device, brport_dev,
421 						  extack);
422 	if (IS_ERR(bridge_port)) {
423 		err = PTR_ERR(bridge_port);
424 		goto err_bridge_port_create;
425 	}
426 
427 	return bridge_port;
428 
429 err_bridge_port_create:
430 	mlxsw_sp_bridge_device_put(bridge, bridge_device);
431 	return ERR_PTR(err);
432 }
433 
mlxsw_sp_bridge_port_put(struct mlxsw_sp_bridge * bridge,struct mlxsw_sp_bridge_port * bridge_port)434 static void mlxsw_sp_bridge_port_put(struct mlxsw_sp_bridge *bridge,
435 				     struct mlxsw_sp_bridge_port *bridge_port)
436 {
437 	struct mlxsw_sp_bridge_device *bridge_device;
438 
439 	if (--bridge_port->ref_count != 0)
440 		return;
441 	bridge_device = bridge_port->bridge_device;
442 	mlxsw_sp_bridge_port_destroy(bridge_port);
443 	mlxsw_sp_bridge_device_put(bridge, bridge_device);
444 }
445 
446 static struct mlxsw_sp_port_vlan *
mlxsw_sp_port_vlan_find_by_bridge(struct mlxsw_sp_port * mlxsw_sp_port,const struct mlxsw_sp_bridge_device * bridge_device,u16 vid)447 mlxsw_sp_port_vlan_find_by_bridge(struct mlxsw_sp_port *mlxsw_sp_port,
448 				  const struct mlxsw_sp_bridge_device *
449 				  bridge_device,
450 				  u16 vid)
451 {
452 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
453 
454 	list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
455 			    list) {
456 		if (!mlxsw_sp_port_vlan->bridge_port)
457 			continue;
458 		if (mlxsw_sp_port_vlan->bridge_port->bridge_device !=
459 		    bridge_device)
460 			continue;
461 		if (bridge_device->vlan_enabled &&
462 		    mlxsw_sp_port_vlan->vid != vid)
463 			continue;
464 		return mlxsw_sp_port_vlan;
465 	}
466 
467 	return NULL;
468 }
469 
470 static struct mlxsw_sp_port_vlan*
mlxsw_sp_port_vlan_find_by_fid(struct mlxsw_sp_port * mlxsw_sp_port,u16 fid_index)471 mlxsw_sp_port_vlan_find_by_fid(struct mlxsw_sp_port *mlxsw_sp_port,
472 			       u16 fid_index)
473 {
474 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
475 
476 	list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
477 			    list) {
478 		struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
479 
480 		if (fid && mlxsw_sp_fid_index(fid) == fid_index)
481 			return mlxsw_sp_port_vlan;
482 	}
483 
484 	return NULL;
485 }
486 
487 static struct mlxsw_sp_bridge_vlan *
mlxsw_sp_bridge_vlan_find(const struct mlxsw_sp_bridge_port * bridge_port,u16 vid)488 mlxsw_sp_bridge_vlan_find(const struct mlxsw_sp_bridge_port *bridge_port,
489 			  u16 vid)
490 {
491 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
492 
493 	list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
494 		if (bridge_vlan->vid == vid)
495 			return bridge_vlan;
496 	}
497 
498 	return NULL;
499 }
500 
501 static struct mlxsw_sp_bridge_vlan *
mlxsw_sp_bridge_vlan_create(struct mlxsw_sp_bridge_port * bridge_port,u16 vid)502 mlxsw_sp_bridge_vlan_create(struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
503 {
504 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
505 
506 	bridge_vlan = kzalloc(sizeof(*bridge_vlan), GFP_KERNEL);
507 	if (!bridge_vlan)
508 		return NULL;
509 
510 	INIT_LIST_HEAD(&bridge_vlan->port_vlan_list);
511 	bridge_vlan->vid = vid;
512 	list_add(&bridge_vlan->list, &bridge_port->vlans_list);
513 
514 	return bridge_vlan;
515 }
516 
517 static void
mlxsw_sp_bridge_vlan_destroy(struct mlxsw_sp_bridge_vlan * bridge_vlan)518 mlxsw_sp_bridge_vlan_destroy(struct mlxsw_sp_bridge_vlan *bridge_vlan)
519 {
520 	list_del(&bridge_vlan->list);
521 	WARN_ON(!list_empty(&bridge_vlan->port_vlan_list));
522 	kfree(bridge_vlan);
523 }
524 
525 static struct mlxsw_sp_bridge_vlan *
mlxsw_sp_bridge_vlan_get(struct mlxsw_sp_bridge_port * bridge_port,u16 vid)526 mlxsw_sp_bridge_vlan_get(struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
527 {
528 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
529 
530 	bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid);
531 	if (bridge_vlan)
532 		return bridge_vlan;
533 
534 	return mlxsw_sp_bridge_vlan_create(bridge_port, vid);
535 }
536 
mlxsw_sp_bridge_vlan_put(struct mlxsw_sp_bridge_vlan * bridge_vlan)537 static void mlxsw_sp_bridge_vlan_put(struct mlxsw_sp_bridge_vlan *bridge_vlan)
538 {
539 	if (list_empty(&bridge_vlan->port_vlan_list))
540 		mlxsw_sp_bridge_vlan_destroy(bridge_vlan);
541 }
542 
543 static int
mlxsw_sp_port_bridge_vlan_stp_set(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_bridge_vlan * bridge_vlan,u8 state)544 mlxsw_sp_port_bridge_vlan_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
545 				  struct mlxsw_sp_bridge_vlan *bridge_vlan,
546 				  u8 state)
547 {
548 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
549 
550 	list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
551 			    bridge_vlan_node) {
552 		if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
553 			continue;
554 		return mlxsw_sp_port_vid_stp_set(mlxsw_sp_port,
555 						 bridge_vlan->vid, state);
556 	}
557 
558 	return 0;
559 }
560 
mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port * mlxsw_sp_port,struct net_device * orig_dev,u8 state)561 static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
562 					    struct net_device *orig_dev,
563 					    u8 state)
564 {
565 	struct mlxsw_sp_bridge_port *bridge_port;
566 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
567 	int err;
568 
569 	/* It's possible we failed to enslave the port, yet this
570 	 * operation is executed due to it being deferred.
571 	 */
572 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
573 						orig_dev);
574 	if (!bridge_port)
575 		return 0;
576 
577 	list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
578 		err = mlxsw_sp_port_bridge_vlan_stp_set(mlxsw_sp_port,
579 							bridge_vlan, state);
580 		if (err)
581 			goto err_port_bridge_vlan_stp_set;
582 	}
583 
584 	bridge_port->stp_state = state;
585 
586 	return 0;
587 
588 err_port_bridge_vlan_stp_set:
589 	list_for_each_entry_continue_reverse(bridge_vlan,
590 					     &bridge_port->vlans_list, list)
591 		mlxsw_sp_port_bridge_vlan_stp_set(mlxsw_sp_port, bridge_vlan,
592 						  bridge_port->stp_state);
593 	return err;
594 }
595 
596 static int
mlxsw_sp_port_bridge_vlan_flood_set(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_bridge_vlan * bridge_vlan,enum mlxsw_sp_flood_type packet_type,bool member)597 mlxsw_sp_port_bridge_vlan_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
598 				    struct mlxsw_sp_bridge_vlan *bridge_vlan,
599 				    enum mlxsw_sp_flood_type packet_type,
600 				    bool member)
601 {
602 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
603 
604 	list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
605 			    bridge_vlan_node) {
606 		if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
607 			continue;
608 		return mlxsw_sp_fid_flood_set(mlxsw_sp_port_vlan->fid,
609 					      packet_type,
610 					      mlxsw_sp_port->local_port,
611 					      member);
612 	}
613 
614 	return 0;
615 }
616 
617 static int
mlxsw_sp_bridge_port_flood_table_set(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_bridge_port * bridge_port,enum mlxsw_sp_flood_type packet_type,bool member)618 mlxsw_sp_bridge_port_flood_table_set(struct mlxsw_sp_port *mlxsw_sp_port,
619 				     struct mlxsw_sp_bridge_port *bridge_port,
620 				     enum mlxsw_sp_flood_type packet_type,
621 				     bool member)
622 {
623 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
624 	int err;
625 
626 	list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
627 		err = mlxsw_sp_port_bridge_vlan_flood_set(mlxsw_sp_port,
628 							  bridge_vlan,
629 							  packet_type,
630 							  member);
631 		if (err)
632 			goto err_port_bridge_vlan_flood_set;
633 	}
634 
635 	return 0;
636 
637 err_port_bridge_vlan_flood_set:
638 	list_for_each_entry_continue_reverse(bridge_vlan,
639 					     &bridge_port->vlans_list, list)
640 		mlxsw_sp_port_bridge_vlan_flood_set(mlxsw_sp_port, bridge_vlan,
641 						    packet_type, !member);
642 	return err;
643 }
644 
645 static int
mlxsw_sp_port_bridge_vlan_learning_set(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_bridge_vlan * bridge_vlan,bool set)646 mlxsw_sp_port_bridge_vlan_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
647 				       struct mlxsw_sp_bridge_vlan *bridge_vlan,
648 				       bool set)
649 {
650 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
651 	u16 vid = bridge_vlan->vid;
652 
653 	list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
654 			    bridge_vlan_node) {
655 		if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
656 			continue;
657 		return mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, set);
658 	}
659 
660 	return 0;
661 }
662 
663 static int
mlxsw_sp_bridge_port_learning_set(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_bridge_port * bridge_port,bool set)664 mlxsw_sp_bridge_port_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
665 				  struct mlxsw_sp_bridge_port *bridge_port,
666 				  bool set)
667 {
668 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
669 	int err;
670 
671 	list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
672 		err = mlxsw_sp_port_bridge_vlan_learning_set(mlxsw_sp_port,
673 							     bridge_vlan, set);
674 		if (err)
675 			goto err_port_bridge_vlan_learning_set;
676 	}
677 
678 	return 0;
679 
680 err_port_bridge_vlan_learning_set:
681 	list_for_each_entry_continue_reverse(bridge_vlan,
682 					     &bridge_port->vlans_list, list)
683 		mlxsw_sp_port_bridge_vlan_learning_set(mlxsw_sp_port,
684 						       bridge_vlan, !set);
685 	return err;
686 }
687 
688 static int
mlxsw_sp_port_attr_br_pre_flags_set(struct mlxsw_sp_port * mlxsw_sp_port,struct switchdev_brport_flags flags)689 mlxsw_sp_port_attr_br_pre_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
690 				    struct switchdev_brport_flags flags)
691 {
692 	if (flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD))
693 		return -EINVAL;
694 
695 	return 0;
696 }
697 
mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port * mlxsw_sp_port,struct net_device * orig_dev,struct switchdev_brport_flags flags)698 static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
699 					   struct net_device *orig_dev,
700 					   struct switchdev_brport_flags flags)
701 {
702 	struct mlxsw_sp_bridge_port *bridge_port;
703 	int err;
704 
705 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
706 						orig_dev);
707 	if (!bridge_port)
708 		return 0;
709 
710 	if (flags.mask & BR_FLOOD) {
711 		err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port,
712 							   bridge_port,
713 							   MLXSW_SP_FLOOD_TYPE_UC,
714 							   flags.val & BR_FLOOD);
715 		if (err)
716 			return err;
717 	}
718 
719 	if (flags.mask & BR_LEARNING) {
720 		err = mlxsw_sp_bridge_port_learning_set(mlxsw_sp_port,
721 							bridge_port,
722 							flags.val & BR_LEARNING);
723 		if (err)
724 			return err;
725 	}
726 
727 	if (bridge_port->bridge_device->multicast_enabled)
728 		goto out;
729 
730 	if (flags.mask & BR_MCAST_FLOOD) {
731 		err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port,
732 							   bridge_port,
733 							   MLXSW_SP_FLOOD_TYPE_MC,
734 							   flags.val & BR_MCAST_FLOOD);
735 		if (err)
736 			return err;
737 	}
738 
739 out:
740 	memcpy(&bridge_port->flags, &flags.val, sizeof(flags.val));
741 	return 0;
742 }
743 
mlxsw_sp_ageing_set(struct mlxsw_sp * mlxsw_sp,u32 ageing_time)744 static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time)
745 {
746 	char sfdat_pl[MLXSW_REG_SFDAT_LEN];
747 	int err;
748 
749 	mlxsw_reg_sfdat_pack(sfdat_pl, ageing_time);
750 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdat), sfdat_pl);
751 	if (err)
752 		return err;
753 	mlxsw_sp->bridge->ageing_time = ageing_time;
754 	return 0;
755 }
756 
mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port * mlxsw_sp_port,unsigned long ageing_clock_t)757 static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port,
758 					    unsigned long ageing_clock_t)
759 {
760 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
761 	unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
762 	u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
763 
764 	if (ageing_time < MLXSW_SP_MIN_AGEING_TIME ||
765 	    ageing_time > MLXSW_SP_MAX_AGEING_TIME)
766 		return -ERANGE;
767 
768 	return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time);
769 }
770 
mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port * mlxsw_sp_port,struct net_device * orig_dev,bool vlan_enabled)771 static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
772 					  struct net_device *orig_dev,
773 					  bool vlan_enabled)
774 {
775 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
776 	struct mlxsw_sp_bridge_device *bridge_device;
777 
778 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
779 	if (WARN_ON(!bridge_device))
780 		return -EINVAL;
781 
782 	if (bridge_device->vlan_enabled == vlan_enabled)
783 		return 0;
784 
785 	netdev_err(bridge_device->dev, "VLAN filtering can't be changed for existing bridge\n");
786 	return -EINVAL;
787 }
788 
mlxsw_sp_port_attr_br_vlan_proto_set(struct mlxsw_sp_port * mlxsw_sp_port,struct net_device * orig_dev,u16 vlan_proto)789 static int mlxsw_sp_port_attr_br_vlan_proto_set(struct mlxsw_sp_port *mlxsw_sp_port,
790 						struct net_device *orig_dev,
791 						u16 vlan_proto)
792 {
793 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
794 	struct mlxsw_sp_bridge_device *bridge_device;
795 
796 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
797 	if (WARN_ON(!bridge_device))
798 		return -EINVAL;
799 
800 	netdev_err(bridge_device->dev, "VLAN protocol can't be changed on existing bridge\n");
801 	return -EINVAL;
802 }
803 
mlxsw_sp_port_attr_mrouter_set(struct mlxsw_sp_port * mlxsw_sp_port,struct net_device * orig_dev,bool is_port_mrouter)804 static int mlxsw_sp_port_attr_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
805 					  struct net_device *orig_dev,
806 					  bool is_port_mrouter)
807 {
808 	struct mlxsw_sp_bridge_port *bridge_port;
809 	int err;
810 
811 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
812 						orig_dev);
813 	if (!bridge_port)
814 		return 0;
815 
816 	if (!bridge_port->bridge_device->multicast_enabled)
817 		goto out;
818 
819 	err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
820 						   MLXSW_SP_FLOOD_TYPE_MC,
821 						   is_port_mrouter);
822 	if (err)
823 		return err;
824 
825 	mlxsw_sp_port_mrouter_update_mdb(mlxsw_sp_port, bridge_port,
826 					 is_port_mrouter);
827 out:
828 	bridge_port->mrouter = is_port_mrouter;
829 	return 0;
830 }
831 
mlxsw_sp_mc_flood(const struct mlxsw_sp_bridge_port * bridge_port)832 static bool mlxsw_sp_mc_flood(const struct mlxsw_sp_bridge_port *bridge_port)
833 {
834 	const struct mlxsw_sp_bridge_device *bridge_device;
835 
836 	bridge_device = bridge_port->bridge_device;
837 	return bridge_device->multicast_enabled ? bridge_port->mrouter :
838 					bridge_port->flags & BR_MCAST_FLOOD;
839 }
840 
mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port * mlxsw_sp_port,struct net_device * orig_dev,bool mc_disabled)841 static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port,
842 					 struct net_device *orig_dev,
843 					 bool mc_disabled)
844 {
845 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
846 	struct mlxsw_sp_bridge_device *bridge_device;
847 	struct mlxsw_sp_bridge_port *bridge_port;
848 	int err;
849 
850 	/* It's possible we failed to enslave the port, yet this
851 	 * operation is executed due to it being deferred.
852 	 */
853 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
854 	if (!bridge_device)
855 		return 0;
856 
857 	if (bridge_device->multicast_enabled != !mc_disabled) {
858 		bridge_device->multicast_enabled = !mc_disabled;
859 		mlxsw_sp_bridge_mdb_mc_enable_sync(mlxsw_sp_port,
860 						   bridge_device);
861 	}
862 
863 	list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
864 		enum mlxsw_sp_flood_type packet_type = MLXSW_SP_FLOOD_TYPE_MC;
865 		bool member = mlxsw_sp_mc_flood(bridge_port);
866 
867 		err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port,
868 							   bridge_port,
869 							   packet_type, member);
870 		if (err)
871 			return err;
872 	}
873 
874 	bridge_device->multicast_enabled = !mc_disabled;
875 
876 	return 0;
877 }
878 
mlxsw_sp_smid_router_port_set(struct mlxsw_sp * mlxsw_sp,u16 mid_idx,bool add)879 static int mlxsw_sp_smid_router_port_set(struct mlxsw_sp *mlxsw_sp,
880 					 u16 mid_idx, bool add)
881 {
882 	char *smid2_pl;
883 	int err;
884 
885 	smid2_pl = kmalloc(MLXSW_REG_SMID2_LEN, GFP_KERNEL);
886 	if (!smid2_pl)
887 		return -ENOMEM;
888 
889 	mlxsw_reg_smid2_pack(smid2_pl, mid_idx,
890 			     mlxsw_sp_router_port(mlxsw_sp), add);
891 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid2), smid2_pl);
892 	kfree(smid2_pl);
893 	return err;
894 }
895 
896 static void
mlxsw_sp_bridge_mrouter_update_mdb(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_bridge_device * bridge_device,bool add)897 mlxsw_sp_bridge_mrouter_update_mdb(struct mlxsw_sp *mlxsw_sp,
898 				   struct mlxsw_sp_bridge_device *bridge_device,
899 				   bool add)
900 {
901 	struct mlxsw_sp_mid *mid;
902 
903 	list_for_each_entry(mid, &bridge_device->mids_list, list)
904 		mlxsw_sp_smid_router_port_set(mlxsw_sp, mid->mid, add);
905 }
906 
907 static int
mlxsw_sp_port_attr_br_mrouter_set(struct mlxsw_sp_port * mlxsw_sp_port,struct net_device * orig_dev,bool is_mrouter)908 mlxsw_sp_port_attr_br_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
909 				  struct net_device *orig_dev,
910 				  bool is_mrouter)
911 {
912 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
913 	struct mlxsw_sp_bridge_device *bridge_device;
914 
915 	/* It's possible we failed to enslave the port, yet this
916 	 * operation is executed due to it being deferred.
917 	 */
918 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
919 	if (!bridge_device)
920 		return 0;
921 
922 	if (bridge_device->mrouter != is_mrouter)
923 		mlxsw_sp_bridge_mrouter_update_mdb(mlxsw_sp, bridge_device,
924 						   is_mrouter);
925 	bridge_device->mrouter = is_mrouter;
926 	return 0;
927 }
928 
mlxsw_sp_port_attr_set(struct net_device * dev,const void * ctx,const struct switchdev_attr * attr,struct netlink_ext_ack * extack)929 static int mlxsw_sp_port_attr_set(struct net_device *dev, const void *ctx,
930 				  const struct switchdev_attr *attr,
931 				  struct netlink_ext_ack *extack)
932 {
933 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
934 	int err;
935 
936 	switch (attr->id) {
937 	case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
938 		err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port,
939 						       attr->orig_dev,
940 						       attr->u.stp_state);
941 		break;
942 	case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
943 		err = mlxsw_sp_port_attr_br_pre_flags_set(mlxsw_sp_port,
944 							  attr->u.brport_flags);
945 		break;
946 	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
947 		err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port,
948 						      attr->orig_dev,
949 						      attr->u.brport_flags);
950 		break;
951 	case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
952 		err = mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port,
953 						       attr->u.ageing_time);
954 		break;
955 	case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
956 		err = mlxsw_sp_port_attr_br_vlan_set(mlxsw_sp_port,
957 						     attr->orig_dev,
958 						     attr->u.vlan_filtering);
959 		break;
960 	case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_PROTOCOL:
961 		err = mlxsw_sp_port_attr_br_vlan_proto_set(mlxsw_sp_port,
962 							   attr->orig_dev,
963 							   attr->u.vlan_protocol);
964 		break;
965 	case SWITCHDEV_ATTR_ID_PORT_MROUTER:
966 		err = mlxsw_sp_port_attr_mrouter_set(mlxsw_sp_port,
967 						     attr->orig_dev,
968 						     attr->u.mrouter);
969 		break;
970 	case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
971 		err = mlxsw_sp_port_mc_disabled_set(mlxsw_sp_port,
972 						    attr->orig_dev,
973 						    attr->u.mc_disabled);
974 		break;
975 	case SWITCHDEV_ATTR_ID_BRIDGE_MROUTER:
976 		err = mlxsw_sp_port_attr_br_mrouter_set(mlxsw_sp_port,
977 							attr->orig_dev,
978 							attr->u.mrouter);
979 		break;
980 	default:
981 		err = -EOPNOTSUPP;
982 		break;
983 	}
984 
985 	mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
986 
987 	return err;
988 }
989 
990 static int
mlxsw_sp_port_vlan_fid_join(struct mlxsw_sp_port_vlan * mlxsw_sp_port_vlan,struct mlxsw_sp_bridge_port * bridge_port,struct netlink_ext_ack * extack)991 mlxsw_sp_port_vlan_fid_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
992 			    struct mlxsw_sp_bridge_port *bridge_port,
993 			    struct netlink_ext_ack *extack)
994 {
995 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
996 	struct mlxsw_sp_bridge_device *bridge_device;
997 	u16 local_port = mlxsw_sp_port->local_port;
998 	u16 vid = mlxsw_sp_port_vlan->vid;
999 	struct mlxsw_sp_fid *fid;
1000 	int err;
1001 
1002 	bridge_device = bridge_port->bridge_device;
1003 	fid = bridge_device->ops->fid_get(bridge_device, vid, extack);
1004 	if (IS_ERR(fid))
1005 		return PTR_ERR(fid);
1006 
1007 	err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port,
1008 				     bridge_port->flags & BR_FLOOD);
1009 	if (err)
1010 		goto err_fid_uc_flood_set;
1011 
1012 	err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port,
1013 				     mlxsw_sp_mc_flood(bridge_port));
1014 	if (err)
1015 		goto err_fid_mc_flood_set;
1016 
1017 	err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port,
1018 				     true);
1019 	if (err)
1020 		goto err_fid_bc_flood_set;
1021 
1022 	err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
1023 	if (err)
1024 		goto err_fid_port_vid_map;
1025 
1026 	mlxsw_sp_port_vlan->fid = fid;
1027 
1028 	return 0;
1029 
1030 err_fid_port_vid_map:
1031 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port, false);
1032 err_fid_bc_flood_set:
1033 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port, false);
1034 err_fid_mc_flood_set:
1035 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port, false);
1036 err_fid_uc_flood_set:
1037 	mlxsw_sp_fid_put(fid);
1038 	return err;
1039 }
1040 
1041 static void
mlxsw_sp_port_vlan_fid_leave(struct mlxsw_sp_port_vlan * mlxsw_sp_port_vlan)1042 mlxsw_sp_port_vlan_fid_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1043 {
1044 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1045 	struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
1046 	u16 local_port = mlxsw_sp_port->local_port;
1047 	u16 vid = mlxsw_sp_port_vlan->vid;
1048 
1049 	mlxsw_sp_port_vlan->fid = NULL;
1050 	mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
1051 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port, false);
1052 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port, false);
1053 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port, false);
1054 	mlxsw_sp_fid_put(fid);
1055 }
1056 
1057 static u16
mlxsw_sp_port_pvid_determine(const struct mlxsw_sp_port * mlxsw_sp_port,u16 vid,bool is_pvid)1058 mlxsw_sp_port_pvid_determine(const struct mlxsw_sp_port *mlxsw_sp_port,
1059 			     u16 vid, bool is_pvid)
1060 {
1061 	if (is_pvid)
1062 		return vid;
1063 	else if (mlxsw_sp_port->pvid == vid)
1064 		return 0;	/* Dis-allow untagged packets */
1065 	else
1066 		return mlxsw_sp_port->pvid;
1067 }
1068 
1069 static int
mlxsw_sp_port_vlan_bridge_join(struct mlxsw_sp_port_vlan * mlxsw_sp_port_vlan,struct mlxsw_sp_bridge_port * bridge_port,struct netlink_ext_ack * extack)1070 mlxsw_sp_port_vlan_bridge_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
1071 			       struct mlxsw_sp_bridge_port *bridge_port,
1072 			       struct netlink_ext_ack *extack)
1073 {
1074 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1075 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
1076 	u16 vid = mlxsw_sp_port_vlan->vid;
1077 	int err;
1078 
1079 	/* No need to continue if only VLAN flags were changed */
1080 	if (mlxsw_sp_port_vlan->bridge_port)
1081 		return 0;
1082 
1083 	err = mlxsw_sp_port_vlan_fid_join(mlxsw_sp_port_vlan, bridge_port,
1084 					  extack);
1085 	if (err)
1086 		return err;
1087 
1088 	err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid,
1089 					     bridge_port->flags & BR_LEARNING);
1090 	if (err)
1091 		goto err_port_vid_learning_set;
1092 
1093 	err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
1094 					bridge_port->stp_state);
1095 	if (err)
1096 		goto err_port_vid_stp_set;
1097 
1098 	bridge_vlan = mlxsw_sp_bridge_vlan_get(bridge_port, vid);
1099 	if (!bridge_vlan) {
1100 		err = -ENOMEM;
1101 		goto err_bridge_vlan_get;
1102 	}
1103 
1104 	list_add(&mlxsw_sp_port_vlan->bridge_vlan_node,
1105 		 &bridge_vlan->port_vlan_list);
1106 
1107 	mlxsw_sp_bridge_port_get(mlxsw_sp_port->mlxsw_sp->bridge,
1108 				 bridge_port->dev, extack);
1109 	mlxsw_sp_port_vlan->bridge_port = bridge_port;
1110 
1111 	return 0;
1112 
1113 err_bridge_vlan_get:
1114 	mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED);
1115 err_port_vid_stp_set:
1116 	mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
1117 err_port_vid_learning_set:
1118 	mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan);
1119 	return err;
1120 }
1121 
1122 void
mlxsw_sp_port_vlan_bridge_leave(struct mlxsw_sp_port_vlan * mlxsw_sp_port_vlan)1123 mlxsw_sp_port_vlan_bridge_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1124 {
1125 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1126 	struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
1127 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
1128 	struct mlxsw_sp_bridge_port *bridge_port;
1129 	u16 vid = mlxsw_sp_port_vlan->vid;
1130 	bool last_port, last_vlan;
1131 
1132 	if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021Q &&
1133 		    mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021D))
1134 		return;
1135 
1136 	bridge_port = mlxsw_sp_port_vlan->bridge_port;
1137 	last_vlan = list_is_singular(&bridge_port->vlans_list);
1138 	bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid);
1139 	last_port = list_is_singular(&bridge_vlan->port_vlan_list);
1140 
1141 	list_del(&mlxsw_sp_port_vlan->bridge_vlan_node);
1142 	mlxsw_sp_bridge_vlan_put(bridge_vlan);
1143 	mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED);
1144 	mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
1145 	if (last_port)
1146 		mlxsw_sp_bridge_port_fdb_flush(mlxsw_sp_port->mlxsw_sp,
1147 					       bridge_port,
1148 					       mlxsw_sp_fid_index(fid));
1149 	if (last_vlan)
1150 		mlxsw_sp_bridge_port_mdb_flush(mlxsw_sp_port, bridge_port);
1151 
1152 	mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan);
1153 
1154 	mlxsw_sp_bridge_port_put(mlxsw_sp_port->mlxsw_sp->bridge, bridge_port);
1155 	mlxsw_sp_port_vlan->bridge_port = NULL;
1156 }
1157 
1158 static int
mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_bridge_port * bridge_port,u16 vid,bool is_untagged,bool is_pvid,struct netlink_ext_ack * extack)1159 mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port,
1160 			      struct mlxsw_sp_bridge_port *bridge_port,
1161 			      u16 vid, bool is_untagged, bool is_pvid,
1162 			      struct netlink_ext_ack *extack)
1163 {
1164 	u16 pvid = mlxsw_sp_port_pvid_determine(mlxsw_sp_port, vid, is_pvid);
1165 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1166 	u16 old_pvid = mlxsw_sp_port->pvid;
1167 	u16 proto;
1168 	int err;
1169 
1170 	/* The only valid scenario in which a port-vlan already exists, is if
1171 	 * the VLAN flags were changed and the port-vlan is associated with the
1172 	 * correct bridge port
1173 	 */
1174 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1175 	if (mlxsw_sp_port_vlan &&
1176 	    mlxsw_sp_port_vlan->bridge_port != bridge_port)
1177 		return -EEXIST;
1178 
1179 	if (!mlxsw_sp_port_vlan) {
1180 		mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port,
1181 							       vid);
1182 		if (IS_ERR(mlxsw_sp_port_vlan))
1183 			return PTR_ERR(mlxsw_sp_port_vlan);
1184 	}
1185 
1186 	err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true,
1187 				     is_untagged);
1188 	if (err)
1189 		goto err_port_vlan_set;
1190 
1191 	br_vlan_get_proto(bridge_port->bridge_device->dev, &proto);
1192 	err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid, proto);
1193 	if (err)
1194 		goto err_port_pvid_set;
1195 
1196 	err = mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port,
1197 					     extack);
1198 	if (err)
1199 		goto err_port_vlan_bridge_join;
1200 
1201 	return 0;
1202 
1203 err_port_vlan_bridge_join:
1204 	mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid, proto);
1205 err_port_pvid_set:
1206 	mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1207 err_port_vlan_set:
1208 	mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1209 	return err;
1210 }
1211 
1212 static int
mlxsw_sp_br_ban_rif_pvid_change(struct mlxsw_sp * mlxsw_sp,const struct net_device * br_dev,const struct switchdev_obj_port_vlan * vlan)1213 mlxsw_sp_br_ban_rif_pvid_change(struct mlxsw_sp *mlxsw_sp,
1214 				const struct net_device *br_dev,
1215 				const struct switchdev_obj_port_vlan *vlan)
1216 {
1217 	u16 pvid;
1218 
1219 	pvid = mlxsw_sp_rif_vid(mlxsw_sp, br_dev);
1220 	if (!pvid)
1221 		return 0;
1222 
1223 	if (vlan->flags & BRIDGE_VLAN_INFO_PVID) {
1224 		if (vlan->vid != pvid) {
1225 			netdev_err(br_dev, "Can't change PVID, it's used by router interface\n");
1226 			return -EBUSY;
1227 		}
1228 	} else {
1229 		if (vlan->vid == pvid) {
1230 			netdev_err(br_dev, "Can't remove PVID, it's used by router interface\n");
1231 			return -EBUSY;
1232 		}
1233 	}
1234 
1235 	return 0;
1236 }
1237 
mlxsw_sp_port_vlans_add(struct mlxsw_sp_port * mlxsw_sp_port,const struct switchdev_obj_port_vlan * vlan,struct netlink_ext_ack * extack)1238 static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
1239 				   const struct switchdev_obj_port_vlan *vlan,
1240 				   struct netlink_ext_ack *extack)
1241 {
1242 	bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
1243 	bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
1244 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1245 	struct net_device *orig_dev = vlan->obj.orig_dev;
1246 	struct mlxsw_sp_bridge_port *bridge_port;
1247 
1248 	if (netif_is_bridge_master(orig_dev)) {
1249 		int err = 0;
1250 
1251 		if (br_vlan_enabled(orig_dev))
1252 			err = mlxsw_sp_br_ban_rif_pvid_change(mlxsw_sp,
1253 							      orig_dev, vlan);
1254 		if (!err)
1255 			err = -EOPNOTSUPP;
1256 		return err;
1257 	}
1258 
1259 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1260 	if (WARN_ON(!bridge_port))
1261 		return -EINVAL;
1262 
1263 	if (!bridge_port->bridge_device->vlan_enabled)
1264 		return 0;
1265 
1266 	return mlxsw_sp_bridge_port_vlan_add(mlxsw_sp_port, bridge_port,
1267 					     vlan->vid, flag_untagged,
1268 					     flag_pvid, extack);
1269 }
1270 
mlxsw_sp_fdb_flush_type(bool lagged)1271 static enum mlxsw_reg_sfdf_flush_type mlxsw_sp_fdb_flush_type(bool lagged)
1272 {
1273 	return lagged ? MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID :
1274 			MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID;
1275 }
1276 
1277 static int
mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_bridge_port * bridge_port,u16 fid_index)1278 mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
1279 			       struct mlxsw_sp_bridge_port *bridge_port,
1280 			       u16 fid_index)
1281 {
1282 	bool lagged = bridge_port->lagged;
1283 	char sfdf_pl[MLXSW_REG_SFDF_LEN];
1284 	u16 system_port;
1285 
1286 	system_port = lagged ? bridge_port->lag_id : bridge_port->system_port;
1287 	mlxsw_reg_sfdf_pack(sfdf_pl, mlxsw_sp_fdb_flush_type(lagged));
1288 	mlxsw_reg_sfdf_fid_set(sfdf_pl, fid_index);
1289 	mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl, system_port);
1290 
1291 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
1292 }
1293 
mlxsw_sp_sfd_rec_policy(bool dynamic)1294 static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic)
1295 {
1296 	return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS :
1297 			 MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_MLAG;
1298 }
1299 
mlxsw_sp_sfd_op(bool adding)1300 static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding)
1301 {
1302 	return adding ? MLXSW_REG_SFD_OP_WRITE_EDIT :
1303 			MLXSW_REG_SFD_OP_WRITE_REMOVE;
1304 }
1305 
1306 static int
mlxsw_sp_port_fdb_tun_uc_op4(struct mlxsw_sp * mlxsw_sp,bool dynamic,const char * mac,u16 fid,__be32 addr,bool adding)1307 mlxsw_sp_port_fdb_tun_uc_op4(struct mlxsw_sp *mlxsw_sp, bool dynamic,
1308 			     const char *mac, u16 fid, __be32 addr, bool adding)
1309 {
1310 	char *sfd_pl;
1311 	u8 num_rec;
1312 	u32 uip;
1313 	int err;
1314 
1315 	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1316 	if (!sfd_pl)
1317 		return -ENOMEM;
1318 
1319 	uip = be32_to_cpu(addr);
1320 	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1321 	mlxsw_reg_sfd_uc_tunnel_pack4(sfd_pl, 0,
1322 				      mlxsw_sp_sfd_rec_policy(dynamic), mac,
1323 				      fid, MLXSW_REG_SFD_REC_ACTION_NOP, uip);
1324 	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1325 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1326 	if (err)
1327 		goto out;
1328 
1329 	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1330 		err = -EBUSY;
1331 
1332 out:
1333 	kfree(sfd_pl);
1334 	return err;
1335 }
1336 
mlxsw_sp_port_fdb_tun_uc_op6_sfd_write(struct mlxsw_sp * mlxsw_sp,const char * mac,u16 fid,u32 kvdl_index,bool adding)1337 static int mlxsw_sp_port_fdb_tun_uc_op6_sfd_write(struct mlxsw_sp *mlxsw_sp,
1338 						  const char *mac, u16 fid,
1339 						  u32 kvdl_index, bool adding)
1340 {
1341 	char *sfd_pl;
1342 	u8 num_rec;
1343 	int err;
1344 
1345 	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1346 	if (!sfd_pl)
1347 		return -ENOMEM;
1348 
1349 	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1350 	mlxsw_reg_sfd_uc_tunnel_pack6(sfd_pl, 0, mac, fid,
1351 				      MLXSW_REG_SFD_REC_ACTION_NOP, kvdl_index);
1352 	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1353 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1354 	if (err)
1355 		goto out;
1356 
1357 	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1358 		err = -EBUSY;
1359 
1360 out:
1361 	kfree(sfd_pl);
1362 	return err;
1363 }
1364 
mlxsw_sp_port_fdb_tun_uc_op6_add(struct mlxsw_sp * mlxsw_sp,const char * mac,u16 fid,const struct in6_addr * addr)1365 static int mlxsw_sp_port_fdb_tun_uc_op6_add(struct mlxsw_sp *mlxsw_sp,
1366 					    const char *mac, u16 fid,
1367 					    const struct in6_addr *addr)
1368 {
1369 	u32 kvdl_index;
1370 	int err;
1371 
1372 	err = mlxsw_sp_nve_ipv6_addr_kvdl_set(mlxsw_sp, addr, &kvdl_index);
1373 	if (err)
1374 		return err;
1375 
1376 	err = mlxsw_sp_port_fdb_tun_uc_op6_sfd_write(mlxsw_sp, mac, fid,
1377 						     kvdl_index, true);
1378 	if (err)
1379 		goto err_sfd_write;
1380 
1381 	err = mlxsw_sp_nve_ipv6_addr_map_replace(mlxsw_sp, mac, fid, addr);
1382 	if (err)
1383 		/* Replace can fail only for creating new mapping, so removing
1384 		 * the FDB entry in the error path is OK.
1385 		 */
1386 		goto err_addr_replace;
1387 
1388 	return 0;
1389 
1390 err_addr_replace:
1391 	mlxsw_sp_port_fdb_tun_uc_op6_sfd_write(mlxsw_sp, mac, fid, kvdl_index,
1392 					       false);
1393 err_sfd_write:
1394 	mlxsw_sp_nve_ipv6_addr_kvdl_unset(mlxsw_sp, addr);
1395 	return err;
1396 }
1397 
mlxsw_sp_port_fdb_tun_uc_op6_del(struct mlxsw_sp * mlxsw_sp,const char * mac,u16 fid,const struct in6_addr * addr)1398 static void mlxsw_sp_port_fdb_tun_uc_op6_del(struct mlxsw_sp *mlxsw_sp,
1399 					     const char *mac, u16 fid,
1400 					     const struct in6_addr *addr)
1401 {
1402 	mlxsw_sp_nve_ipv6_addr_map_del(mlxsw_sp, mac, fid);
1403 	mlxsw_sp_port_fdb_tun_uc_op6_sfd_write(mlxsw_sp, mac, fid, 0, false);
1404 	mlxsw_sp_nve_ipv6_addr_kvdl_unset(mlxsw_sp, addr);
1405 }
1406 
1407 static int
mlxsw_sp_port_fdb_tun_uc_op6(struct mlxsw_sp * mlxsw_sp,const char * mac,u16 fid,const struct in6_addr * addr,bool adding)1408 mlxsw_sp_port_fdb_tun_uc_op6(struct mlxsw_sp *mlxsw_sp, const char *mac,
1409 			     u16 fid, const struct in6_addr *addr, bool adding)
1410 {
1411 	if (adding)
1412 		return mlxsw_sp_port_fdb_tun_uc_op6_add(mlxsw_sp, mac, fid,
1413 							addr);
1414 
1415 	mlxsw_sp_port_fdb_tun_uc_op6_del(mlxsw_sp, mac, fid, addr);
1416 	return 0;
1417 }
1418 
mlxsw_sp_port_fdb_tunnel_uc_op(struct mlxsw_sp * mlxsw_sp,const char * mac,u16 fid,enum mlxsw_sp_l3proto proto,const union mlxsw_sp_l3addr * addr,bool adding,bool dynamic)1419 static int mlxsw_sp_port_fdb_tunnel_uc_op(struct mlxsw_sp *mlxsw_sp,
1420 					  const char *mac, u16 fid,
1421 					  enum mlxsw_sp_l3proto proto,
1422 					  const union mlxsw_sp_l3addr *addr,
1423 					  bool adding, bool dynamic)
1424 {
1425 	switch (proto) {
1426 	case MLXSW_SP_L3_PROTO_IPV4:
1427 		return mlxsw_sp_port_fdb_tun_uc_op4(mlxsw_sp, dynamic, mac, fid,
1428 						    addr->addr4, adding);
1429 	case MLXSW_SP_L3_PROTO_IPV6:
1430 		return mlxsw_sp_port_fdb_tun_uc_op6(mlxsw_sp, mac, fid,
1431 						    &addr->addr6, adding);
1432 	default:
1433 		WARN_ON(1);
1434 		return -EOPNOTSUPP;
1435 	}
1436 }
1437 
__mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp * mlxsw_sp,u16 local_port,const char * mac,u16 fid,bool adding,enum mlxsw_reg_sfd_rec_action action,enum mlxsw_reg_sfd_rec_policy policy)1438 static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u16 local_port,
1439 				     const char *mac, u16 fid, bool adding,
1440 				     enum mlxsw_reg_sfd_rec_action action,
1441 				     enum mlxsw_reg_sfd_rec_policy policy)
1442 {
1443 	char *sfd_pl;
1444 	u8 num_rec;
1445 	int err;
1446 
1447 	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1448 	if (!sfd_pl)
1449 		return -ENOMEM;
1450 
1451 	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1452 	mlxsw_reg_sfd_uc_pack(sfd_pl, 0, policy, mac, fid, action, local_port);
1453 	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1454 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1455 	if (err)
1456 		goto out;
1457 
1458 	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1459 		err = -EBUSY;
1460 
1461 out:
1462 	kfree(sfd_pl);
1463 	return err;
1464 }
1465 
mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp * mlxsw_sp,u16 local_port,const char * mac,u16 fid,bool adding,bool dynamic)1466 static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u16 local_port,
1467 				   const char *mac, u16 fid, bool adding,
1468 				   bool dynamic)
1469 {
1470 	return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding,
1471 					 MLXSW_REG_SFD_REC_ACTION_NOP,
1472 					 mlxsw_sp_sfd_rec_policy(dynamic));
1473 }
1474 
mlxsw_sp_rif_fdb_op(struct mlxsw_sp * mlxsw_sp,const char * mac,u16 fid,bool adding)1475 int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
1476 			bool adding)
1477 {
1478 	return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, adding,
1479 					 MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER,
1480 					 MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY);
1481 }
1482 
mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp * mlxsw_sp,u16 lag_id,const char * mac,u16 fid,u16 lag_vid,bool adding,bool dynamic)1483 static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
1484 				       const char *mac, u16 fid, u16 lag_vid,
1485 				       bool adding, bool dynamic)
1486 {
1487 	char *sfd_pl;
1488 	u8 num_rec;
1489 	int err;
1490 
1491 	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1492 	if (!sfd_pl)
1493 		return -ENOMEM;
1494 
1495 	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1496 	mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
1497 				  mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP,
1498 				  lag_vid, lag_id);
1499 	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1500 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1501 	if (err)
1502 		goto out;
1503 
1504 	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1505 		err = -EBUSY;
1506 
1507 out:
1508 	kfree(sfd_pl);
1509 	return err;
1510 }
1511 
1512 static int
mlxsw_sp_port_fdb_set(struct mlxsw_sp_port * mlxsw_sp_port,struct switchdev_notifier_fdb_info * fdb_info,bool adding)1513 mlxsw_sp_port_fdb_set(struct mlxsw_sp_port *mlxsw_sp_port,
1514 		      struct switchdev_notifier_fdb_info *fdb_info, bool adding)
1515 {
1516 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1517 	struct net_device *orig_dev = fdb_info->info.dev;
1518 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1519 	struct mlxsw_sp_bridge_device *bridge_device;
1520 	struct mlxsw_sp_bridge_port *bridge_port;
1521 	u16 fid_index, vid;
1522 
1523 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1524 	if (!bridge_port)
1525 		return -EINVAL;
1526 
1527 	bridge_device = bridge_port->bridge_device;
1528 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
1529 							       bridge_device,
1530 							       fdb_info->vid);
1531 	if (!mlxsw_sp_port_vlan)
1532 		return 0;
1533 
1534 	fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
1535 	vid = mlxsw_sp_port_vlan->vid;
1536 
1537 	if (!bridge_port->lagged)
1538 		return mlxsw_sp_port_fdb_uc_op(mlxsw_sp,
1539 					       bridge_port->system_port,
1540 					       fdb_info->addr, fid_index,
1541 					       adding, false);
1542 	else
1543 		return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp,
1544 						   bridge_port->lag_id,
1545 						   fdb_info->addr, fid_index,
1546 						   vid, adding, false);
1547 }
1548 
mlxsw_sp_port_mdb_op(struct mlxsw_sp * mlxsw_sp,const char * addr,u16 fid,u16 mid_idx,bool adding)1549 static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
1550 				u16 fid, u16 mid_idx, bool adding)
1551 {
1552 	char *sfd_pl;
1553 	u8 num_rec;
1554 	int err;
1555 
1556 	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1557 	if (!sfd_pl)
1558 		return -ENOMEM;
1559 
1560 	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1561 	mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid,
1562 			      MLXSW_REG_SFD_REC_ACTION_NOP, mid_idx);
1563 	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1564 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1565 	if (err)
1566 		goto out;
1567 
1568 	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1569 		err = -EBUSY;
1570 
1571 out:
1572 	kfree(sfd_pl);
1573 	return err;
1574 }
1575 
mlxsw_sp_port_smid_full_entry(struct mlxsw_sp * mlxsw_sp,u16 mid_idx,long * ports_bitmap,bool set_router_port)1576 static int mlxsw_sp_port_smid_full_entry(struct mlxsw_sp *mlxsw_sp, u16 mid_idx,
1577 					 long *ports_bitmap,
1578 					 bool set_router_port)
1579 {
1580 	char *smid2_pl;
1581 	int err, i;
1582 
1583 	smid2_pl = kmalloc(MLXSW_REG_SMID2_LEN, GFP_KERNEL);
1584 	if (!smid2_pl)
1585 		return -ENOMEM;
1586 
1587 	mlxsw_reg_smid2_pack(smid2_pl, mid_idx, 0, false);
1588 	for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) {
1589 		if (mlxsw_sp->ports[i])
1590 			mlxsw_reg_smid2_port_mask_set(smid2_pl, i, 1);
1591 	}
1592 
1593 	mlxsw_reg_smid2_port_mask_set(smid2_pl,
1594 				      mlxsw_sp_router_port(mlxsw_sp), 1);
1595 
1596 	for_each_set_bit(i, ports_bitmap, mlxsw_core_max_ports(mlxsw_sp->core))
1597 		mlxsw_reg_smid2_port_set(smid2_pl, i, 1);
1598 
1599 	mlxsw_reg_smid2_port_set(smid2_pl, mlxsw_sp_router_port(mlxsw_sp),
1600 				 set_router_port);
1601 
1602 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid2), smid2_pl);
1603 	kfree(smid2_pl);
1604 	return err;
1605 }
1606 
mlxsw_sp_port_smid_set(struct mlxsw_sp_port * mlxsw_sp_port,u16 mid_idx,bool add)1607 static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port,
1608 				  u16 mid_idx, bool add)
1609 {
1610 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1611 	char *smid2_pl;
1612 	int err;
1613 
1614 	smid2_pl = kmalloc(MLXSW_REG_SMID2_LEN, GFP_KERNEL);
1615 	if (!smid2_pl)
1616 		return -ENOMEM;
1617 
1618 	mlxsw_reg_smid2_pack(smid2_pl, mid_idx, mlxsw_sp_port->local_port, add);
1619 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid2), smid2_pl);
1620 	kfree(smid2_pl);
1621 	return err;
1622 }
1623 
1624 static struct
__mlxsw_sp_mc_get(struct mlxsw_sp_bridge_device * bridge_device,const unsigned char * addr,u16 fid)1625 mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp_bridge_device *bridge_device,
1626 				const unsigned char *addr,
1627 				u16 fid)
1628 {
1629 	struct mlxsw_sp_mid *mid;
1630 
1631 	list_for_each_entry(mid, &bridge_device->mids_list, list) {
1632 		if (ether_addr_equal(mid->addr, addr) && mid->fid == fid)
1633 			return mid;
1634 	}
1635 	return NULL;
1636 }
1637 
1638 static void
mlxsw_sp_bridge_port_get_ports_bitmap(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_bridge_port * bridge_port,unsigned long * ports_bitmap)1639 mlxsw_sp_bridge_port_get_ports_bitmap(struct mlxsw_sp *mlxsw_sp,
1640 				      struct mlxsw_sp_bridge_port *bridge_port,
1641 				      unsigned long *ports_bitmap)
1642 {
1643 	struct mlxsw_sp_port *mlxsw_sp_port;
1644 	u64 max_lag_members, i;
1645 	int lag_id;
1646 
1647 	if (!bridge_port->lagged) {
1648 		set_bit(bridge_port->system_port, ports_bitmap);
1649 	} else {
1650 		max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
1651 						     MAX_LAG_MEMBERS);
1652 		lag_id = bridge_port->lag_id;
1653 		for (i = 0; i < max_lag_members; i++) {
1654 			mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp,
1655 								 lag_id, i);
1656 			if (mlxsw_sp_port)
1657 				set_bit(mlxsw_sp_port->local_port,
1658 					ports_bitmap);
1659 		}
1660 	}
1661 }
1662 
1663 static void
mlxsw_sp_mc_get_mrouters_bitmap(unsigned long * flood_bitmap,struct mlxsw_sp_bridge_device * bridge_device,struct mlxsw_sp * mlxsw_sp)1664 mlxsw_sp_mc_get_mrouters_bitmap(unsigned long *flood_bitmap,
1665 				struct mlxsw_sp_bridge_device *bridge_device,
1666 				struct mlxsw_sp *mlxsw_sp)
1667 {
1668 	struct mlxsw_sp_bridge_port *bridge_port;
1669 
1670 	list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
1671 		if (bridge_port->mrouter) {
1672 			mlxsw_sp_bridge_port_get_ports_bitmap(mlxsw_sp,
1673 							      bridge_port,
1674 							      flood_bitmap);
1675 		}
1676 	}
1677 }
1678 
1679 static bool
mlxsw_sp_mc_write_mdb_entry(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_mid * mid,struct mlxsw_sp_bridge_device * bridge_device)1680 mlxsw_sp_mc_write_mdb_entry(struct mlxsw_sp *mlxsw_sp,
1681 			    struct mlxsw_sp_mid *mid,
1682 			    struct mlxsw_sp_bridge_device *bridge_device)
1683 {
1684 	long *flood_bitmap;
1685 	int num_of_ports;
1686 	u16 mid_idx;
1687 	int err;
1688 
1689 	mid_idx = find_first_zero_bit(mlxsw_sp->bridge->mids_bitmap,
1690 				      MLXSW_SP_MID_MAX);
1691 	if (mid_idx == MLXSW_SP_MID_MAX)
1692 		return false;
1693 
1694 	num_of_ports = mlxsw_core_max_ports(mlxsw_sp->core);
1695 	flood_bitmap = bitmap_alloc(num_of_ports, GFP_KERNEL);
1696 	if (!flood_bitmap)
1697 		return false;
1698 
1699 	bitmap_copy(flood_bitmap, mid->ports_in_mid, num_of_ports);
1700 	mlxsw_sp_mc_get_mrouters_bitmap(flood_bitmap, bridge_device, mlxsw_sp);
1701 
1702 	mid->mid = mid_idx;
1703 	err = mlxsw_sp_port_smid_full_entry(mlxsw_sp, mid_idx, flood_bitmap,
1704 					    bridge_device->mrouter);
1705 	bitmap_free(flood_bitmap);
1706 	if (err)
1707 		return false;
1708 
1709 	err = mlxsw_sp_port_mdb_op(mlxsw_sp, mid->addr, mid->fid, mid_idx,
1710 				   true);
1711 	if (err)
1712 		return false;
1713 
1714 	set_bit(mid_idx, mlxsw_sp->bridge->mids_bitmap);
1715 	mid->in_hw = true;
1716 	return true;
1717 }
1718 
mlxsw_sp_mc_remove_mdb_entry(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_mid * mid)1719 static int mlxsw_sp_mc_remove_mdb_entry(struct mlxsw_sp *mlxsw_sp,
1720 					struct mlxsw_sp_mid *mid)
1721 {
1722 	if (!mid->in_hw)
1723 		return 0;
1724 
1725 	clear_bit(mid->mid, mlxsw_sp->bridge->mids_bitmap);
1726 	mid->in_hw = false;
1727 	return mlxsw_sp_port_mdb_op(mlxsw_sp, mid->addr, mid->fid, mid->mid,
1728 				    false);
1729 }
1730 
1731 static struct
__mlxsw_sp_mc_alloc(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_bridge_device * bridge_device,const unsigned char * addr,u16 fid)1732 mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp,
1733 				  struct mlxsw_sp_bridge_device *bridge_device,
1734 				  const unsigned char *addr,
1735 				  u16 fid)
1736 {
1737 	struct mlxsw_sp_mid *mid;
1738 
1739 	mid = kzalloc(sizeof(*mid), GFP_KERNEL);
1740 	if (!mid)
1741 		return NULL;
1742 
1743 	mid->ports_in_mid = bitmap_zalloc(mlxsw_core_max_ports(mlxsw_sp->core),
1744 					  GFP_KERNEL);
1745 	if (!mid->ports_in_mid)
1746 		goto err_ports_in_mid_alloc;
1747 
1748 	ether_addr_copy(mid->addr, addr);
1749 	mid->fid = fid;
1750 	mid->in_hw = false;
1751 
1752 	if (!bridge_device->multicast_enabled)
1753 		goto out;
1754 
1755 	if (!mlxsw_sp_mc_write_mdb_entry(mlxsw_sp, mid, bridge_device))
1756 		goto err_write_mdb_entry;
1757 
1758 out:
1759 	list_add_tail(&mid->list, &bridge_device->mids_list);
1760 	return mid;
1761 
1762 err_write_mdb_entry:
1763 	bitmap_free(mid->ports_in_mid);
1764 err_ports_in_mid_alloc:
1765 	kfree(mid);
1766 	return NULL;
1767 }
1768 
mlxsw_sp_port_remove_from_mid(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_mid * mid)1769 static int mlxsw_sp_port_remove_from_mid(struct mlxsw_sp_port *mlxsw_sp_port,
1770 					 struct mlxsw_sp_mid *mid)
1771 {
1772 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1773 	int err = 0;
1774 
1775 	clear_bit(mlxsw_sp_port->local_port, mid->ports_in_mid);
1776 	if (bitmap_empty(mid->ports_in_mid,
1777 			 mlxsw_core_max_ports(mlxsw_sp->core))) {
1778 		err = mlxsw_sp_mc_remove_mdb_entry(mlxsw_sp, mid);
1779 		list_del(&mid->list);
1780 		bitmap_free(mid->ports_in_mid);
1781 		kfree(mid);
1782 	}
1783 	return err;
1784 }
1785 
mlxsw_sp_port_mdb_add(struct mlxsw_sp_port * mlxsw_sp_port,const struct switchdev_obj_port_mdb * mdb)1786 static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
1787 				 const struct switchdev_obj_port_mdb *mdb)
1788 {
1789 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1790 	struct net_device *orig_dev = mdb->obj.orig_dev;
1791 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1792 	struct net_device *dev = mlxsw_sp_port->dev;
1793 	struct mlxsw_sp_bridge_device *bridge_device;
1794 	struct mlxsw_sp_bridge_port *bridge_port;
1795 	struct mlxsw_sp_mid *mid;
1796 	u16 fid_index;
1797 	int err = 0;
1798 
1799 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1800 	if (!bridge_port)
1801 		return 0;
1802 
1803 	bridge_device = bridge_port->bridge_device;
1804 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
1805 							       bridge_device,
1806 							       mdb->vid);
1807 	if (!mlxsw_sp_port_vlan)
1808 		return 0;
1809 
1810 	fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
1811 
1812 	mid = __mlxsw_sp_mc_get(bridge_device, mdb->addr, fid_index);
1813 	if (!mid) {
1814 		mid = __mlxsw_sp_mc_alloc(mlxsw_sp, bridge_device, mdb->addr,
1815 					  fid_index);
1816 		if (!mid) {
1817 			netdev_err(dev, "Unable to allocate MC group\n");
1818 			return -ENOMEM;
1819 		}
1820 	}
1821 	set_bit(mlxsw_sp_port->local_port, mid->ports_in_mid);
1822 
1823 	if (!bridge_device->multicast_enabled)
1824 		return 0;
1825 
1826 	if (bridge_port->mrouter)
1827 		return 0;
1828 
1829 	err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, true);
1830 	if (err) {
1831 		netdev_err(dev, "Unable to set SMID\n");
1832 		goto err_out;
1833 	}
1834 
1835 	return 0;
1836 
1837 err_out:
1838 	mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid);
1839 	return err;
1840 }
1841 
1842 static void
mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_bridge_device * bridge_device)1843 mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp_port *mlxsw_sp_port,
1844 				   struct mlxsw_sp_bridge_device
1845 				   *bridge_device)
1846 {
1847 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1848 	struct mlxsw_sp_mid *mid;
1849 	bool mc_enabled;
1850 
1851 	mc_enabled = bridge_device->multicast_enabled;
1852 
1853 	list_for_each_entry(mid, &bridge_device->mids_list, list) {
1854 		if (mc_enabled)
1855 			mlxsw_sp_mc_write_mdb_entry(mlxsw_sp, mid,
1856 						    bridge_device);
1857 		else
1858 			mlxsw_sp_mc_remove_mdb_entry(mlxsw_sp, mid);
1859 	}
1860 }
1861 
1862 static void
mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_bridge_port * bridge_port,bool add)1863 mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
1864 				 struct mlxsw_sp_bridge_port *bridge_port,
1865 				 bool add)
1866 {
1867 	struct mlxsw_sp_bridge_device *bridge_device;
1868 	struct mlxsw_sp_mid *mid;
1869 
1870 	bridge_device = bridge_port->bridge_device;
1871 
1872 	list_for_each_entry(mid, &bridge_device->mids_list, list) {
1873 		if (!test_bit(mlxsw_sp_port->local_port, mid->ports_in_mid))
1874 			mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, add);
1875 	}
1876 }
1877 
mlxsw_sp_port_obj_add(struct net_device * dev,const void * ctx,const struct switchdev_obj * obj,struct netlink_ext_ack * extack)1878 static int mlxsw_sp_port_obj_add(struct net_device *dev, const void *ctx,
1879 				 const struct switchdev_obj *obj,
1880 				 struct netlink_ext_ack *extack)
1881 {
1882 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1883 	const struct switchdev_obj_port_vlan *vlan;
1884 	int err = 0;
1885 
1886 	switch (obj->id) {
1887 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
1888 		vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
1889 
1890 		err = mlxsw_sp_port_vlans_add(mlxsw_sp_port, vlan, extack);
1891 
1892 		/* The event is emitted before the changes are actually
1893 		 * applied to the bridge. Therefore schedule the respin
1894 		 * call for later, so that the respin logic sees the
1895 		 * updated bridge state.
1896 		 */
1897 		mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
1898 		break;
1899 	case SWITCHDEV_OBJ_ID_PORT_MDB:
1900 		err = mlxsw_sp_port_mdb_add(mlxsw_sp_port,
1901 					    SWITCHDEV_OBJ_PORT_MDB(obj));
1902 		break;
1903 	default:
1904 		err = -EOPNOTSUPP;
1905 		break;
1906 	}
1907 
1908 	return err;
1909 }
1910 
1911 static void
mlxsw_sp_bridge_port_vlan_del(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_bridge_port * bridge_port,u16 vid)1912 mlxsw_sp_bridge_port_vlan_del(struct mlxsw_sp_port *mlxsw_sp_port,
1913 			      struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
1914 {
1915 	u16 pvid = mlxsw_sp_port->pvid == vid ? 0 : mlxsw_sp_port->pvid;
1916 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1917 	u16 proto;
1918 
1919 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1920 	if (WARN_ON(!mlxsw_sp_port_vlan))
1921 		return;
1922 
1923 	mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
1924 	br_vlan_get_proto(bridge_port->bridge_device->dev, &proto);
1925 	mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid, proto);
1926 	mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1927 	mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1928 }
1929 
mlxsw_sp_port_vlans_del(struct mlxsw_sp_port * mlxsw_sp_port,const struct switchdev_obj_port_vlan * vlan)1930 static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
1931 				   const struct switchdev_obj_port_vlan *vlan)
1932 {
1933 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1934 	struct net_device *orig_dev = vlan->obj.orig_dev;
1935 	struct mlxsw_sp_bridge_port *bridge_port;
1936 
1937 	if (netif_is_bridge_master(orig_dev))
1938 		return -EOPNOTSUPP;
1939 
1940 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1941 	if (WARN_ON(!bridge_port))
1942 		return -EINVAL;
1943 
1944 	if (!bridge_port->bridge_device->vlan_enabled)
1945 		return 0;
1946 
1947 	mlxsw_sp_bridge_port_vlan_del(mlxsw_sp_port, bridge_port, vlan->vid);
1948 
1949 	return 0;
1950 }
1951 
1952 static int
__mlxsw_sp_port_mdb_del(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_bridge_port * bridge_port,struct mlxsw_sp_mid * mid)1953 __mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
1954 			struct mlxsw_sp_bridge_port *bridge_port,
1955 			struct mlxsw_sp_mid *mid)
1956 {
1957 	struct net_device *dev = mlxsw_sp_port->dev;
1958 	int err;
1959 
1960 	if (bridge_port->bridge_device->multicast_enabled &&
1961 	    !bridge_port->mrouter) {
1962 		err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false);
1963 		if (err)
1964 			netdev_err(dev, "Unable to remove port from SMID\n");
1965 	}
1966 
1967 	err = mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid);
1968 	if (err)
1969 		netdev_err(dev, "Unable to remove MC SFD\n");
1970 
1971 	return err;
1972 }
1973 
mlxsw_sp_port_mdb_del(struct mlxsw_sp_port * mlxsw_sp_port,const struct switchdev_obj_port_mdb * mdb)1974 static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
1975 				 const struct switchdev_obj_port_mdb *mdb)
1976 {
1977 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1978 	struct net_device *orig_dev = mdb->obj.orig_dev;
1979 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1980 	struct mlxsw_sp_bridge_device *bridge_device;
1981 	struct net_device *dev = mlxsw_sp_port->dev;
1982 	struct mlxsw_sp_bridge_port *bridge_port;
1983 	struct mlxsw_sp_mid *mid;
1984 	u16 fid_index;
1985 
1986 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1987 	if (!bridge_port)
1988 		return 0;
1989 
1990 	bridge_device = bridge_port->bridge_device;
1991 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
1992 							       bridge_device,
1993 							       mdb->vid);
1994 	if (!mlxsw_sp_port_vlan)
1995 		return 0;
1996 
1997 	fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
1998 
1999 	mid = __mlxsw_sp_mc_get(bridge_device, mdb->addr, fid_index);
2000 	if (!mid) {
2001 		netdev_err(dev, "Unable to remove port from MC DB\n");
2002 		return -EINVAL;
2003 	}
2004 
2005 	return __mlxsw_sp_port_mdb_del(mlxsw_sp_port, bridge_port, mid);
2006 }
2007 
2008 static void
mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_bridge_port * bridge_port)2009 mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port,
2010 			       struct mlxsw_sp_bridge_port *bridge_port)
2011 {
2012 	struct mlxsw_sp_bridge_device *bridge_device;
2013 	struct mlxsw_sp_mid *mid, *tmp;
2014 
2015 	bridge_device = bridge_port->bridge_device;
2016 
2017 	list_for_each_entry_safe(mid, tmp, &bridge_device->mids_list, list) {
2018 		if (test_bit(mlxsw_sp_port->local_port, mid->ports_in_mid)) {
2019 			__mlxsw_sp_port_mdb_del(mlxsw_sp_port, bridge_port,
2020 						mid);
2021 		} else if (bridge_device->multicast_enabled &&
2022 			   bridge_port->mrouter) {
2023 			mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false);
2024 		}
2025 	}
2026 }
2027 
mlxsw_sp_port_obj_del(struct net_device * dev,const void * ctx,const struct switchdev_obj * obj)2028 static int mlxsw_sp_port_obj_del(struct net_device *dev, const void *ctx,
2029 				 const struct switchdev_obj *obj)
2030 {
2031 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2032 	int err = 0;
2033 
2034 	switch (obj->id) {
2035 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
2036 		err = mlxsw_sp_port_vlans_del(mlxsw_sp_port,
2037 					      SWITCHDEV_OBJ_PORT_VLAN(obj));
2038 		break;
2039 	case SWITCHDEV_OBJ_ID_PORT_MDB:
2040 		err = mlxsw_sp_port_mdb_del(mlxsw_sp_port,
2041 					    SWITCHDEV_OBJ_PORT_MDB(obj));
2042 		break;
2043 	default:
2044 		err = -EOPNOTSUPP;
2045 		break;
2046 	}
2047 
2048 	mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
2049 
2050 	return err;
2051 }
2052 
mlxsw_sp_lag_rep_port(struct mlxsw_sp * mlxsw_sp,u16 lag_id)2053 static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp,
2054 						   u16 lag_id)
2055 {
2056 	struct mlxsw_sp_port *mlxsw_sp_port;
2057 	u64 max_lag_members;
2058 	int i;
2059 
2060 	max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
2061 					     MAX_LAG_MEMBERS);
2062 	for (i = 0; i < max_lag_members; i++) {
2063 		mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
2064 		if (mlxsw_sp_port)
2065 			return mlxsw_sp_port;
2066 	}
2067 	return NULL;
2068 }
2069 
2070 static int
mlxsw_sp_bridge_vlan_aware_port_join(struct mlxsw_sp_bridge_port * bridge_port,struct mlxsw_sp_port * mlxsw_sp_port,struct netlink_ext_ack * extack)2071 mlxsw_sp_bridge_vlan_aware_port_join(struct mlxsw_sp_bridge_port *bridge_port,
2072 				     struct mlxsw_sp_port *mlxsw_sp_port,
2073 				     struct netlink_ext_ack *extack)
2074 {
2075 	if (is_vlan_dev(bridge_port->dev)) {
2076 		NL_SET_ERR_MSG_MOD(extack, "Can not enslave a VLAN device to a VLAN-aware bridge");
2077 		return -EINVAL;
2078 	}
2079 
2080 	/* Port is no longer usable as a router interface */
2081 	if (mlxsw_sp_port->default_vlan->fid)
2082 		mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan);
2083 
2084 	return 0;
2085 }
2086 
2087 static int
mlxsw_sp_bridge_8021q_port_join(struct mlxsw_sp_bridge_device * bridge_device,struct mlxsw_sp_bridge_port * bridge_port,struct mlxsw_sp_port * mlxsw_sp_port,struct netlink_ext_ack * extack)2088 mlxsw_sp_bridge_8021q_port_join(struct mlxsw_sp_bridge_device *bridge_device,
2089 				struct mlxsw_sp_bridge_port *bridge_port,
2090 				struct mlxsw_sp_port *mlxsw_sp_port,
2091 				struct netlink_ext_ack *extack)
2092 {
2093 	return mlxsw_sp_bridge_vlan_aware_port_join(bridge_port, mlxsw_sp_port,
2094 						    extack);
2095 }
2096 
2097 static void
mlxsw_sp_bridge_vlan_aware_port_leave(struct mlxsw_sp_port * mlxsw_sp_port)2098 mlxsw_sp_bridge_vlan_aware_port_leave(struct mlxsw_sp_port *mlxsw_sp_port)
2099 {
2100 	/* Make sure untagged frames are allowed to ingress */
2101 	mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID,
2102 			       ETH_P_8021Q);
2103 }
2104 
2105 static void
mlxsw_sp_bridge_8021q_port_leave(struct mlxsw_sp_bridge_device * bridge_device,struct mlxsw_sp_bridge_port * bridge_port,struct mlxsw_sp_port * mlxsw_sp_port)2106 mlxsw_sp_bridge_8021q_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
2107 				 struct mlxsw_sp_bridge_port *bridge_port,
2108 				 struct mlxsw_sp_port *mlxsw_sp_port)
2109 {
2110 	mlxsw_sp_bridge_vlan_aware_port_leave(mlxsw_sp_port);
2111 }
2112 
2113 static int
mlxsw_sp_bridge_vlan_aware_vxlan_join(struct mlxsw_sp_bridge_device * bridge_device,const struct net_device * vxlan_dev,u16 vid,u16 ethertype,struct netlink_ext_ack * extack)2114 mlxsw_sp_bridge_vlan_aware_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
2115 				      const struct net_device *vxlan_dev,
2116 				      u16 vid, u16 ethertype,
2117 				      struct netlink_ext_ack *extack)
2118 {
2119 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2120 	struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
2121 	struct mlxsw_sp_nve_params params = {
2122 		.type = MLXSW_SP_NVE_TYPE_VXLAN,
2123 		.vni = vxlan->cfg.vni,
2124 		.dev = vxlan_dev,
2125 		.ethertype = ethertype,
2126 	};
2127 	struct mlxsw_sp_fid *fid;
2128 	int err;
2129 
2130 	/* If the VLAN is 0, we need to find the VLAN that is configured as
2131 	 * PVID and egress untagged on the bridge port of the VxLAN device.
2132 	 * It is possible no such VLAN exists
2133 	 */
2134 	if (!vid) {
2135 		err = mlxsw_sp_vxlan_mapped_vid(vxlan_dev, &vid);
2136 		if (err || !vid)
2137 			return err;
2138 	}
2139 
2140 	fid = mlxsw_sp_fid_8021q_get(mlxsw_sp, vid);
2141 	if (IS_ERR(fid)) {
2142 		NL_SET_ERR_MSG_MOD(extack, "Failed to create 802.1Q FID");
2143 		return PTR_ERR(fid);
2144 	}
2145 
2146 	if (mlxsw_sp_fid_vni_is_set(fid)) {
2147 		NL_SET_ERR_MSG_MOD(extack, "VNI is already set on FID");
2148 		err = -EINVAL;
2149 		goto err_vni_exists;
2150 	}
2151 
2152 	err = mlxsw_sp_nve_fid_enable(mlxsw_sp, fid, &params, extack);
2153 	if (err)
2154 		goto err_nve_fid_enable;
2155 
2156 	return 0;
2157 
2158 err_nve_fid_enable:
2159 err_vni_exists:
2160 	mlxsw_sp_fid_put(fid);
2161 	return err;
2162 }
2163 
2164 static int
mlxsw_sp_bridge_8021q_vxlan_join(struct mlxsw_sp_bridge_device * bridge_device,const struct net_device * vxlan_dev,u16 vid,struct netlink_ext_ack * extack)2165 mlxsw_sp_bridge_8021q_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
2166 				 const struct net_device *vxlan_dev, u16 vid,
2167 				 struct netlink_ext_ack *extack)
2168 {
2169 	return mlxsw_sp_bridge_vlan_aware_vxlan_join(bridge_device, vxlan_dev,
2170 						     vid, ETH_P_8021Q, extack);
2171 }
2172 
2173 static struct net_device *
mlxsw_sp_bridge_8021q_vxlan_dev_find(struct net_device * br_dev,u16 vid)2174 mlxsw_sp_bridge_8021q_vxlan_dev_find(struct net_device *br_dev, u16 vid)
2175 {
2176 	struct net_device *dev;
2177 	struct list_head *iter;
2178 
2179 	netdev_for_each_lower_dev(br_dev, dev, iter) {
2180 		u16 pvid;
2181 		int err;
2182 
2183 		if (!netif_is_vxlan(dev))
2184 			continue;
2185 
2186 		err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid);
2187 		if (err || pvid != vid)
2188 			continue;
2189 
2190 		return dev;
2191 	}
2192 
2193 	return NULL;
2194 }
2195 
2196 static struct mlxsw_sp_fid *
mlxsw_sp_bridge_8021q_fid_get(struct mlxsw_sp_bridge_device * bridge_device,u16 vid,struct netlink_ext_ack * extack)2197 mlxsw_sp_bridge_8021q_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
2198 			      u16 vid, struct netlink_ext_ack *extack)
2199 {
2200 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2201 
2202 	return mlxsw_sp_fid_8021q_get(mlxsw_sp, vid);
2203 }
2204 
2205 static struct mlxsw_sp_fid *
mlxsw_sp_bridge_8021q_fid_lookup(struct mlxsw_sp_bridge_device * bridge_device,u16 vid)2206 mlxsw_sp_bridge_8021q_fid_lookup(struct mlxsw_sp_bridge_device *bridge_device,
2207 				 u16 vid)
2208 {
2209 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2210 
2211 	return mlxsw_sp_fid_8021q_lookup(mlxsw_sp, vid);
2212 }
2213 
2214 static u16
mlxsw_sp_bridge_8021q_fid_vid(struct mlxsw_sp_bridge_device * bridge_device,const struct mlxsw_sp_fid * fid)2215 mlxsw_sp_bridge_8021q_fid_vid(struct mlxsw_sp_bridge_device *bridge_device,
2216 			      const struct mlxsw_sp_fid *fid)
2217 {
2218 	return mlxsw_sp_fid_8021q_vid(fid);
2219 }
2220 
2221 static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021q_ops = {
2222 	.port_join	= mlxsw_sp_bridge_8021q_port_join,
2223 	.port_leave	= mlxsw_sp_bridge_8021q_port_leave,
2224 	.vxlan_join	= mlxsw_sp_bridge_8021q_vxlan_join,
2225 	.fid_get	= mlxsw_sp_bridge_8021q_fid_get,
2226 	.fid_lookup	= mlxsw_sp_bridge_8021q_fid_lookup,
2227 	.fid_vid	= mlxsw_sp_bridge_8021q_fid_vid,
2228 };
2229 
2230 static bool
mlxsw_sp_port_is_br_member(const struct mlxsw_sp_port * mlxsw_sp_port,const struct net_device * br_dev)2231 mlxsw_sp_port_is_br_member(const struct mlxsw_sp_port *mlxsw_sp_port,
2232 			   const struct net_device *br_dev)
2233 {
2234 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2235 
2236 	list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
2237 			    list) {
2238 		if (mlxsw_sp_port_vlan->bridge_port &&
2239 		    mlxsw_sp_port_vlan->bridge_port->bridge_device->dev ==
2240 		    br_dev)
2241 			return true;
2242 	}
2243 
2244 	return false;
2245 }
2246 
2247 static int
mlxsw_sp_bridge_8021d_port_join(struct mlxsw_sp_bridge_device * bridge_device,struct mlxsw_sp_bridge_port * bridge_port,struct mlxsw_sp_port * mlxsw_sp_port,struct netlink_ext_ack * extack)2248 mlxsw_sp_bridge_8021d_port_join(struct mlxsw_sp_bridge_device *bridge_device,
2249 				struct mlxsw_sp_bridge_port *bridge_port,
2250 				struct mlxsw_sp_port *mlxsw_sp_port,
2251 				struct netlink_ext_ack *extack)
2252 {
2253 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2254 	struct net_device *dev = bridge_port->dev;
2255 	u16 vid;
2256 
2257 	vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : MLXSW_SP_DEFAULT_VID;
2258 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
2259 	if (WARN_ON(!mlxsw_sp_port_vlan))
2260 		return -EINVAL;
2261 
2262 	if (mlxsw_sp_port_is_br_member(mlxsw_sp_port, bridge_device->dev)) {
2263 		NL_SET_ERR_MSG_MOD(extack, "Can not bridge VLAN uppers of the same port");
2264 		return -EINVAL;
2265 	}
2266 
2267 	/* Port is no longer usable as a router interface */
2268 	if (mlxsw_sp_port_vlan->fid)
2269 		mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
2270 
2271 	return mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port,
2272 					      extack);
2273 }
2274 
2275 static void
mlxsw_sp_bridge_8021d_port_leave(struct mlxsw_sp_bridge_device * bridge_device,struct mlxsw_sp_bridge_port * bridge_port,struct mlxsw_sp_port * mlxsw_sp_port)2276 mlxsw_sp_bridge_8021d_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
2277 				 struct mlxsw_sp_bridge_port *bridge_port,
2278 				 struct mlxsw_sp_port *mlxsw_sp_port)
2279 {
2280 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2281 	struct net_device *dev = bridge_port->dev;
2282 	u16 vid;
2283 
2284 	vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : MLXSW_SP_DEFAULT_VID;
2285 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
2286 	if (!mlxsw_sp_port_vlan || !mlxsw_sp_port_vlan->bridge_port)
2287 		return;
2288 
2289 	mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
2290 }
2291 
2292 static int
mlxsw_sp_bridge_8021d_vxlan_join(struct mlxsw_sp_bridge_device * bridge_device,const struct net_device * vxlan_dev,u16 vid,struct netlink_ext_ack * extack)2293 mlxsw_sp_bridge_8021d_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
2294 				 const struct net_device *vxlan_dev, u16 vid,
2295 				 struct netlink_ext_ack *extack)
2296 {
2297 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2298 	struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
2299 	struct mlxsw_sp_nve_params params = {
2300 		.type = MLXSW_SP_NVE_TYPE_VXLAN,
2301 		.vni = vxlan->cfg.vni,
2302 		.dev = vxlan_dev,
2303 		.ethertype = ETH_P_8021Q,
2304 	};
2305 	struct mlxsw_sp_fid *fid;
2306 	int err;
2307 
2308 	fid = mlxsw_sp_fid_8021d_get(mlxsw_sp, bridge_device->dev->ifindex);
2309 	if (IS_ERR(fid)) {
2310 		NL_SET_ERR_MSG_MOD(extack, "Failed to create 802.1D FID");
2311 		return -EINVAL;
2312 	}
2313 
2314 	if (mlxsw_sp_fid_vni_is_set(fid)) {
2315 		NL_SET_ERR_MSG_MOD(extack, "VNI is already set on FID");
2316 		err = -EINVAL;
2317 		goto err_vni_exists;
2318 	}
2319 
2320 	err = mlxsw_sp_nve_fid_enable(mlxsw_sp, fid, &params, extack);
2321 	if (err)
2322 		goto err_nve_fid_enable;
2323 
2324 	return 0;
2325 
2326 err_nve_fid_enable:
2327 err_vni_exists:
2328 	mlxsw_sp_fid_put(fid);
2329 	return err;
2330 }
2331 
2332 static struct mlxsw_sp_fid *
mlxsw_sp_bridge_8021d_fid_get(struct mlxsw_sp_bridge_device * bridge_device,u16 vid,struct netlink_ext_ack * extack)2333 mlxsw_sp_bridge_8021d_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
2334 			      u16 vid, struct netlink_ext_ack *extack)
2335 {
2336 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2337 
2338 	return mlxsw_sp_fid_8021d_get(mlxsw_sp, bridge_device->dev->ifindex);
2339 }
2340 
2341 static struct mlxsw_sp_fid *
mlxsw_sp_bridge_8021d_fid_lookup(struct mlxsw_sp_bridge_device * bridge_device,u16 vid)2342 mlxsw_sp_bridge_8021d_fid_lookup(struct mlxsw_sp_bridge_device *bridge_device,
2343 				 u16 vid)
2344 {
2345 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2346 
2347 	/* The only valid VLAN for a VLAN-unaware bridge is 0 */
2348 	if (vid)
2349 		return NULL;
2350 
2351 	return mlxsw_sp_fid_8021d_lookup(mlxsw_sp, bridge_device->dev->ifindex);
2352 }
2353 
2354 static u16
mlxsw_sp_bridge_8021d_fid_vid(struct mlxsw_sp_bridge_device * bridge_device,const struct mlxsw_sp_fid * fid)2355 mlxsw_sp_bridge_8021d_fid_vid(struct mlxsw_sp_bridge_device *bridge_device,
2356 			      const struct mlxsw_sp_fid *fid)
2357 {
2358 	return 0;
2359 }
2360 
2361 static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021d_ops = {
2362 	.port_join	= mlxsw_sp_bridge_8021d_port_join,
2363 	.port_leave	= mlxsw_sp_bridge_8021d_port_leave,
2364 	.vxlan_join	= mlxsw_sp_bridge_8021d_vxlan_join,
2365 	.fid_get	= mlxsw_sp_bridge_8021d_fid_get,
2366 	.fid_lookup	= mlxsw_sp_bridge_8021d_fid_lookup,
2367 	.fid_vid	= mlxsw_sp_bridge_8021d_fid_vid,
2368 };
2369 
2370 static int
mlxsw_sp_bridge_8021ad_port_join(struct mlxsw_sp_bridge_device * bridge_device,struct mlxsw_sp_bridge_port * bridge_port,struct mlxsw_sp_port * mlxsw_sp_port,struct netlink_ext_ack * extack)2371 mlxsw_sp_bridge_8021ad_port_join(struct mlxsw_sp_bridge_device *bridge_device,
2372 				 struct mlxsw_sp_bridge_port *bridge_port,
2373 				 struct mlxsw_sp_port *mlxsw_sp_port,
2374 				 struct netlink_ext_ack *extack)
2375 {
2376 	int err;
2377 
2378 	err = mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, false);
2379 	if (err)
2380 		return err;
2381 
2382 	err = mlxsw_sp_bridge_vlan_aware_port_join(bridge_port, mlxsw_sp_port,
2383 						   extack);
2384 	if (err)
2385 		goto err_bridge_vlan_aware_port_join;
2386 
2387 	return 0;
2388 
2389 err_bridge_vlan_aware_port_join:
2390 	mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, false, true);
2391 	return err;
2392 }
2393 
2394 static void
mlxsw_sp_bridge_8021ad_port_leave(struct mlxsw_sp_bridge_device * bridge_device,struct mlxsw_sp_bridge_port * bridge_port,struct mlxsw_sp_port * mlxsw_sp_port)2395 mlxsw_sp_bridge_8021ad_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
2396 				  struct mlxsw_sp_bridge_port *bridge_port,
2397 				  struct mlxsw_sp_port *mlxsw_sp_port)
2398 {
2399 	mlxsw_sp_bridge_vlan_aware_port_leave(mlxsw_sp_port);
2400 	mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, false, true);
2401 }
2402 
2403 static int
mlxsw_sp_bridge_8021ad_vxlan_join(struct mlxsw_sp_bridge_device * bridge_device,const struct net_device * vxlan_dev,u16 vid,struct netlink_ext_ack * extack)2404 mlxsw_sp_bridge_8021ad_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
2405 				  const struct net_device *vxlan_dev, u16 vid,
2406 				  struct netlink_ext_ack *extack)
2407 {
2408 	return mlxsw_sp_bridge_vlan_aware_vxlan_join(bridge_device, vxlan_dev,
2409 						     vid, ETH_P_8021AD, extack);
2410 }
2411 
2412 static const struct mlxsw_sp_bridge_ops mlxsw_sp1_bridge_8021ad_ops = {
2413 	.port_join	= mlxsw_sp_bridge_8021ad_port_join,
2414 	.port_leave	= mlxsw_sp_bridge_8021ad_port_leave,
2415 	.vxlan_join	= mlxsw_sp_bridge_8021ad_vxlan_join,
2416 	.fid_get	= mlxsw_sp_bridge_8021q_fid_get,
2417 	.fid_lookup	= mlxsw_sp_bridge_8021q_fid_lookup,
2418 	.fid_vid	= mlxsw_sp_bridge_8021q_fid_vid,
2419 };
2420 
2421 static int
mlxsw_sp2_bridge_8021ad_port_join(struct mlxsw_sp_bridge_device * bridge_device,struct mlxsw_sp_bridge_port * bridge_port,struct mlxsw_sp_port * mlxsw_sp_port,struct netlink_ext_ack * extack)2422 mlxsw_sp2_bridge_8021ad_port_join(struct mlxsw_sp_bridge_device *bridge_device,
2423 				  struct mlxsw_sp_bridge_port *bridge_port,
2424 				  struct mlxsw_sp_port *mlxsw_sp_port,
2425 				  struct netlink_ext_ack *extack)
2426 {
2427 	int err;
2428 
2429 	/* The EtherType of decapsulated packets is determined at the egress
2430 	 * port to allow 802.1d and 802.1ad bridges with VXLAN devices to
2431 	 * co-exist.
2432 	 */
2433 	err = mlxsw_sp_port_egress_ethtype_set(mlxsw_sp_port, ETH_P_8021AD);
2434 	if (err)
2435 		return err;
2436 
2437 	err = mlxsw_sp_bridge_8021ad_port_join(bridge_device, bridge_port,
2438 					       mlxsw_sp_port, extack);
2439 	if (err)
2440 		goto err_bridge_8021ad_port_join;
2441 
2442 	return 0;
2443 
2444 err_bridge_8021ad_port_join:
2445 	mlxsw_sp_port_egress_ethtype_set(mlxsw_sp_port, ETH_P_8021Q);
2446 	return err;
2447 }
2448 
2449 static void
mlxsw_sp2_bridge_8021ad_port_leave(struct mlxsw_sp_bridge_device * bridge_device,struct mlxsw_sp_bridge_port * bridge_port,struct mlxsw_sp_port * mlxsw_sp_port)2450 mlxsw_sp2_bridge_8021ad_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
2451 				   struct mlxsw_sp_bridge_port *bridge_port,
2452 				   struct mlxsw_sp_port *mlxsw_sp_port)
2453 {
2454 	mlxsw_sp_bridge_8021ad_port_leave(bridge_device, bridge_port,
2455 					  mlxsw_sp_port);
2456 	mlxsw_sp_port_egress_ethtype_set(mlxsw_sp_port, ETH_P_8021Q);
2457 }
2458 
2459 static const struct mlxsw_sp_bridge_ops mlxsw_sp2_bridge_8021ad_ops = {
2460 	.port_join	= mlxsw_sp2_bridge_8021ad_port_join,
2461 	.port_leave	= mlxsw_sp2_bridge_8021ad_port_leave,
2462 	.vxlan_join	= mlxsw_sp_bridge_8021ad_vxlan_join,
2463 	.fid_get	= mlxsw_sp_bridge_8021q_fid_get,
2464 	.fid_lookup	= mlxsw_sp_bridge_8021q_fid_lookup,
2465 	.fid_vid	= mlxsw_sp_bridge_8021q_fid_vid,
2466 };
2467 
mlxsw_sp_port_bridge_join(struct mlxsw_sp_port * mlxsw_sp_port,struct net_device * brport_dev,struct net_device * br_dev,struct netlink_ext_ack * extack)2468 int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
2469 			      struct net_device *brport_dev,
2470 			      struct net_device *br_dev,
2471 			      struct netlink_ext_ack *extack)
2472 {
2473 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2474 	struct mlxsw_sp_bridge_device *bridge_device;
2475 	struct mlxsw_sp_bridge_port *bridge_port;
2476 	int err;
2477 
2478 	bridge_port = mlxsw_sp_bridge_port_get(mlxsw_sp->bridge, brport_dev,
2479 					       extack);
2480 	if (IS_ERR(bridge_port))
2481 		return PTR_ERR(bridge_port);
2482 	bridge_device = bridge_port->bridge_device;
2483 
2484 	err = bridge_device->ops->port_join(bridge_device, bridge_port,
2485 					    mlxsw_sp_port, extack);
2486 	if (err)
2487 		goto err_port_join;
2488 
2489 	return 0;
2490 
2491 err_port_join:
2492 	mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port);
2493 	return err;
2494 }
2495 
mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port * mlxsw_sp_port,struct net_device * brport_dev,struct net_device * br_dev)2496 void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
2497 				struct net_device *brport_dev,
2498 				struct net_device *br_dev)
2499 {
2500 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2501 	struct mlxsw_sp_bridge_device *bridge_device;
2502 	struct mlxsw_sp_bridge_port *bridge_port;
2503 
2504 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2505 	if (!bridge_device)
2506 		return;
2507 	bridge_port = __mlxsw_sp_bridge_port_find(bridge_device, brport_dev);
2508 	if (!bridge_port)
2509 		return;
2510 
2511 	bridge_device->ops->port_leave(bridge_device, bridge_port,
2512 				       mlxsw_sp_port);
2513 	mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port);
2514 }
2515 
mlxsw_sp_bridge_vxlan_join(struct mlxsw_sp * mlxsw_sp,const struct net_device * br_dev,const struct net_device * vxlan_dev,u16 vid,struct netlink_ext_ack * extack)2516 int mlxsw_sp_bridge_vxlan_join(struct mlxsw_sp *mlxsw_sp,
2517 			       const struct net_device *br_dev,
2518 			       const struct net_device *vxlan_dev, u16 vid,
2519 			       struct netlink_ext_ack *extack)
2520 {
2521 	struct mlxsw_sp_bridge_device *bridge_device;
2522 
2523 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2524 	if (WARN_ON(!bridge_device))
2525 		return -EINVAL;
2526 
2527 	return bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, vid,
2528 					      extack);
2529 }
2530 
mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp * mlxsw_sp,const struct net_device * vxlan_dev)2531 void mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp,
2532 				 const struct net_device *vxlan_dev)
2533 {
2534 	struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
2535 	struct mlxsw_sp_fid *fid;
2536 
2537 	/* If the VxLAN device is down, then the FID does not have a VNI */
2538 	fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vxlan->cfg.vni);
2539 	if (!fid)
2540 		return;
2541 
2542 	mlxsw_sp_nve_fid_disable(mlxsw_sp, fid);
2543 	/* Drop both the reference we just took during lookup and the reference
2544 	 * the VXLAN device took.
2545 	 */
2546 	mlxsw_sp_fid_put(fid);
2547 	mlxsw_sp_fid_put(fid);
2548 }
2549 
2550 static void
mlxsw_sp_switchdev_vxlan_addr_convert(const union vxlan_addr * vxlan_addr,enum mlxsw_sp_l3proto * proto,union mlxsw_sp_l3addr * addr)2551 mlxsw_sp_switchdev_vxlan_addr_convert(const union vxlan_addr *vxlan_addr,
2552 				      enum mlxsw_sp_l3proto *proto,
2553 				      union mlxsw_sp_l3addr *addr)
2554 {
2555 	if (vxlan_addr->sa.sa_family == AF_INET) {
2556 		addr->addr4 = vxlan_addr->sin.sin_addr.s_addr;
2557 		*proto = MLXSW_SP_L3_PROTO_IPV4;
2558 	} else {
2559 		addr->addr6 = vxlan_addr->sin6.sin6_addr;
2560 		*proto = MLXSW_SP_L3_PROTO_IPV6;
2561 	}
2562 }
2563 
2564 static void
mlxsw_sp_switchdev_addr_vxlan_convert(enum mlxsw_sp_l3proto proto,const union mlxsw_sp_l3addr * addr,union vxlan_addr * vxlan_addr)2565 mlxsw_sp_switchdev_addr_vxlan_convert(enum mlxsw_sp_l3proto proto,
2566 				      const union mlxsw_sp_l3addr *addr,
2567 				      union vxlan_addr *vxlan_addr)
2568 {
2569 	switch (proto) {
2570 	case MLXSW_SP_L3_PROTO_IPV4:
2571 		vxlan_addr->sa.sa_family = AF_INET;
2572 		vxlan_addr->sin.sin_addr.s_addr = addr->addr4;
2573 		break;
2574 	case MLXSW_SP_L3_PROTO_IPV6:
2575 		vxlan_addr->sa.sa_family = AF_INET6;
2576 		vxlan_addr->sin6.sin6_addr = addr->addr6;
2577 		break;
2578 	}
2579 }
2580 
mlxsw_sp_fdb_vxlan_call_notifiers(struct net_device * dev,const char * mac,enum mlxsw_sp_l3proto proto,union mlxsw_sp_l3addr * addr,__be32 vni,bool adding)2581 static void mlxsw_sp_fdb_vxlan_call_notifiers(struct net_device *dev,
2582 					      const char *mac,
2583 					      enum mlxsw_sp_l3proto proto,
2584 					      union mlxsw_sp_l3addr *addr,
2585 					      __be32 vni, bool adding)
2586 {
2587 	struct switchdev_notifier_vxlan_fdb_info info;
2588 	struct vxlan_dev *vxlan = netdev_priv(dev);
2589 	enum switchdev_notifier_type type;
2590 
2591 	type = adding ? SWITCHDEV_VXLAN_FDB_ADD_TO_BRIDGE :
2592 			SWITCHDEV_VXLAN_FDB_DEL_TO_BRIDGE;
2593 	mlxsw_sp_switchdev_addr_vxlan_convert(proto, addr, &info.remote_ip);
2594 	info.remote_port = vxlan->cfg.dst_port;
2595 	info.remote_vni = vni;
2596 	info.remote_ifindex = 0;
2597 	ether_addr_copy(info.eth_addr, mac);
2598 	info.vni = vni;
2599 	info.offloaded = adding;
2600 	call_switchdev_notifiers(type, dev, &info.info, NULL);
2601 }
2602 
mlxsw_sp_fdb_nve_call_notifiers(struct net_device * dev,const char * mac,enum mlxsw_sp_l3proto proto,union mlxsw_sp_l3addr * addr,__be32 vni,bool adding)2603 static void mlxsw_sp_fdb_nve_call_notifiers(struct net_device *dev,
2604 					    const char *mac,
2605 					    enum mlxsw_sp_l3proto proto,
2606 					    union mlxsw_sp_l3addr *addr,
2607 					    __be32 vni,
2608 					    bool adding)
2609 {
2610 	if (netif_is_vxlan(dev))
2611 		mlxsw_sp_fdb_vxlan_call_notifiers(dev, mac, proto, addr, vni,
2612 						  adding);
2613 }
2614 
2615 static void
mlxsw_sp_fdb_call_notifiers(enum switchdev_notifier_type type,const char * mac,u16 vid,struct net_device * dev,bool offloaded)2616 mlxsw_sp_fdb_call_notifiers(enum switchdev_notifier_type type,
2617 			    const char *mac, u16 vid,
2618 			    struct net_device *dev, bool offloaded)
2619 {
2620 	struct switchdev_notifier_fdb_info info = {};
2621 
2622 	info.addr = mac;
2623 	info.vid = vid;
2624 	info.offloaded = offloaded;
2625 	call_switchdev_notifiers(type, dev, &info.info, NULL);
2626 }
2627 
mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp * mlxsw_sp,char * sfn_pl,int rec_index,bool adding)2628 static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
2629 					    char *sfn_pl, int rec_index,
2630 					    bool adding)
2631 {
2632 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2633 	struct mlxsw_sp_bridge_device *bridge_device;
2634 	struct mlxsw_sp_bridge_port *bridge_port;
2635 	struct mlxsw_sp_port *mlxsw_sp_port;
2636 	enum switchdev_notifier_type type;
2637 	char mac[ETH_ALEN];
2638 	u16 local_port;
2639 	u16 vid, fid;
2640 	bool do_notification = true;
2641 	int err;
2642 
2643 	mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &fid, &local_port);
2644 
2645 	if (WARN_ON_ONCE(!mlxsw_sp_local_port_is_valid(mlxsw_sp, local_port)))
2646 		return;
2647 	mlxsw_sp_port = mlxsw_sp->ports[local_port];
2648 	if (!mlxsw_sp_port) {
2649 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect local port in FDB notification\n");
2650 		goto just_remove;
2651 	}
2652 
2653 	if (mlxsw_sp_fid_is_dummy(mlxsw_sp, fid))
2654 		goto just_remove;
2655 
2656 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid);
2657 	if (!mlxsw_sp_port_vlan) {
2658 		netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n");
2659 		goto just_remove;
2660 	}
2661 
2662 	bridge_port = mlxsw_sp_port_vlan->bridge_port;
2663 	if (!bridge_port) {
2664 		netdev_err(mlxsw_sp_port->dev, "{Port, VID} not associated with a bridge\n");
2665 		goto just_remove;
2666 	}
2667 
2668 	bridge_device = bridge_port->bridge_device;
2669 	vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
2670 
2671 do_fdb_op:
2672 	err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid,
2673 				      adding, true);
2674 	if (err) {
2675 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
2676 		return;
2677 	}
2678 
2679 	if (!do_notification)
2680 		return;
2681 	type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE;
2682 	mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev, adding);
2683 
2684 	return;
2685 
2686 just_remove:
2687 	adding = false;
2688 	do_notification = false;
2689 	goto do_fdb_op;
2690 }
2691 
mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp * mlxsw_sp,char * sfn_pl,int rec_index,bool adding)2692 static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
2693 						char *sfn_pl, int rec_index,
2694 						bool adding)
2695 {
2696 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2697 	struct mlxsw_sp_bridge_device *bridge_device;
2698 	struct mlxsw_sp_bridge_port *bridge_port;
2699 	struct mlxsw_sp_port *mlxsw_sp_port;
2700 	enum switchdev_notifier_type type;
2701 	char mac[ETH_ALEN];
2702 	u16 lag_vid = 0;
2703 	u16 lag_id;
2704 	u16 vid, fid;
2705 	bool do_notification = true;
2706 	int err;
2707 
2708 	mlxsw_reg_sfn_mac_lag_unpack(sfn_pl, rec_index, mac, &fid, &lag_id);
2709 	mlxsw_sp_port = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id);
2710 	if (!mlxsw_sp_port) {
2711 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Cannot find port representor for LAG\n");
2712 		goto just_remove;
2713 	}
2714 
2715 	if (mlxsw_sp_fid_is_dummy(mlxsw_sp, fid))
2716 		goto just_remove;
2717 
2718 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid);
2719 	if (!mlxsw_sp_port_vlan) {
2720 		netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n");
2721 		goto just_remove;
2722 	}
2723 
2724 	bridge_port = mlxsw_sp_port_vlan->bridge_port;
2725 	if (!bridge_port) {
2726 		netdev_err(mlxsw_sp_port->dev, "{Port, VID} not associated with a bridge\n");
2727 		goto just_remove;
2728 	}
2729 
2730 	bridge_device = bridge_port->bridge_device;
2731 	vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
2732 	lag_vid = mlxsw_sp_fid_lag_vid_valid(mlxsw_sp_port_vlan->fid) ?
2733 		  mlxsw_sp_port_vlan->vid : 0;
2734 
2735 do_fdb_op:
2736 	err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid,
2737 					  adding, true);
2738 	if (err) {
2739 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
2740 		return;
2741 	}
2742 
2743 	if (!do_notification)
2744 		return;
2745 	type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE;
2746 	mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev, adding);
2747 
2748 	return;
2749 
2750 just_remove:
2751 	adding = false;
2752 	do_notification = false;
2753 	goto do_fdb_op;
2754 }
2755 
2756 static int
__mlxsw_sp_fdb_notify_mac_uc_tunnel_process(struct mlxsw_sp * mlxsw_sp,const struct mlxsw_sp_fid * fid,bool adding,struct net_device ** nve_dev,u16 * p_vid,__be32 * p_vni)2757 __mlxsw_sp_fdb_notify_mac_uc_tunnel_process(struct mlxsw_sp *mlxsw_sp,
2758 					    const struct mlxsw_sp_fid *fid,
2759 					    bool adding,
2760 					    struct net_device **nve_dev,
2761 					    u16 *p_vid, __be32 *p_vni)
2762 {
2763 	struct mlxsw_sp_bridge_device *bridge_device;
2764 	struct net_device *br_dev, *dev;
2765 	int nve_ifindex;
2766 	int err;
2767 
2768 	err = mlxsw_sp_fid_nve_ifindex(fid, &nve_ifindex);
2769 	if (err)
2770 		return err;
2771 
2772 	err = mlxsw_sp_fid_vni(fid, p_vni);
2773 	if (err)
2774 		return err;
2775 
2776 	dev = __dev_get_by_index(mlxsw_sp_net(mlxsw_sp), nve_ifindex);
2777 	if (!dev)
2778 		return -EINVAL;
2779 	*nve_dev = dev;
2780 
2781 	if (!netif_running(dev))
2782 		return -EINVAL;
2783 
2784 	if (adding && !br_port_flag_is_set(dev, BR_LEARNING))
2785 		return -EINVAL;
2786 
2787 	if (adding && netif_is_vxlan(dev)) {
2788 		struct vxlan_dev *vxlan = netdev_priv(dev);
2789 
2790 		if (!(vxlan->cfg.flags & VXLAN_F_LEARN))
2791 			return -EINVAL;
2792 	}
2793 
2794 	br_dev = netdev_master_upper_dev_get(dev);
2795 	if (!br_dev)
2796 		return -EINVAL;
2797 
2798 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2799 	if (!bridge_device)
2800 		return -EINVAL;
2801 
2802 	*p_vid = bridge_device->ops->fid_vid(bridge_device, fid);
2803 
2804 	return 0;
2805 }
2806 
mlxsw_sp_fdb_notify_mac_uc_tunnel_process(struct mlxsw_sp * mlxsw_sp,char * sfn_pl,int rec_index,bool adding)2807 static void mlxsw_sp_fdb_notify_mac_uc_tunnel_process(struct mlxsw_sp *mlxsw_sp,
2808 						      char *sfn_pl,
2809 						      int rec_index,
2810 						      bool adding)
2811 {
2812 	enum mlxsw_reg_sfn_uc_tunnel_protocol sfn_proto;
2813 	enum switchdev_notifier_type type;
2814 	struct net_device *nve_dev;
2815 	union mlxsw_sp_l3addr addr;
2816 	struct mlxsw_sp_fid *fid;
2817 	char mac[ETH_ALEN];
2818 	u16 fid_index, vid;
2819 	__be32 vni;
2820 	u32 uip;
2821 	int err;
2822 
2823 	mlxsw_reg_sfn_uc_tunnel_unpack(sfn_pl, rec_index, mac, &fid_index,
2824 				       &uip, &sfn_proto);
2825 
2826 	fid = mlxsw_sp_fid_lookup_by_index(mlxsw_sp, fid_index);
2827 	if (!fid)
2828 		goto err_fid_lookup;
2829 
2830 	err = mlxsw_sp_nve_learned_ip_resolve(mlxsw_sp, uip,
2831 					      (enum mlxsw_sp_l3proto) sfn_proto,
2832 					      &addr);
2833 	if (err)
2834 		goto err_ip_resolve;
2835 
2836 	err = __mlxsw_sp_fdb_notify_mac_uc_tunnel_process(mlxsw_sp, fid, adding,
2837 							  &nve_dev, &vid, &vni);
2838 	if (err)
2839 		goto err_fdb_process;
2840 
2841 	err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, mac, fid_index,
2842 					     (enum mlxsw_sp_l3proto) sfn_proto,
2843 					     &addr, adding, true);
2844 	if (err)
2845 		goto err_fdb_op;
2846 
2847 	mlxsw_sp_fdb_nve_call_notifiers(nve_dev, mac,
2848 					(enum mlxsw_sp_l3proto) sfn_proto,
2849 					&addr, vni, adding);
2850 
2851 	type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE :
2852 			SWITCHDEV_FDB_DEL_TO_BRIDGE;
2853 	mlxsw_sp_fdb_call_notifiers(type, mac, vid, nve_dev, adding);
2854 
2855 	mlxsw_sp_fid_put(fid);
2856 
2857 	return;
2858 
2859 err_fdb_op:
2860 err_fdb_process:
2861 err_ip_resolve:
2862 	mlxsw_sp_fid_put(fid);
2863 err_fid_lookup:
2864 	/* Remove an FDB entry in case we cannot process it. Otherwise the
2865 	 * device will keep sending the same notification over and over again.
2866 	 */
2867 	mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, mac, fid_index,
2868 				       (enum mlxsw_sp_l3proto) sfn_proto, &addr,
2869 				       false, true);
2870 }
2871 
mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp * mlxsw_sp,char * sfn_pl,int rec_index)2872 static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp,
2873 					    char *sfn_pl, int rec_index)
2874 {
2875 	switch (mlxsw_reg_sfn_rec_type_get(sfn_pl, rec_index)) {
2876 	case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC:
2877 		mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
2878 						rec_index, true);
2879 		break;
2880 	case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC:
2881 		mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
2882 						rec_index, false);
2883 		break;
2884 	case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC_LAG:
2885 		mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
2886 						    rec_index, true);
2887 		break;
2888 	case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC_LAG:
2889 		mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
2890 						    rec_index, false);
2891 		break;
2892 	case MLXSW_REG_SFN_REC_TYPE_LEARNED_UNICAST_TUNNEL:
2893 		mlxsw_sp_fdb_notify_mac_uc_tunnel_process(mlxsw_sp, sfn_pl,
2894 							  rec_index, true);
2895 		break;
2896 	case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_UNICAST_TUNNEL:
2897 		mlxsw_sp_fdb_notify_mac_uc_tunnel_process(mlxsw_sp, sfn_pl,
2898 							  rec_index, false);
2899 		break;
2900 	}
2901 }
2902 
2903 #define MLXSW_SP_FDB_SFN_QUERIES_PER_SESSION 10
2904 
mlxsw_sp_fdb_notify_work(struct work_struct * work)2905 static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
2906 {
2907 	struct mlxsw_sp_bridge *bridge;
2908 	struct mlxsw_sp *mlxsw_sp;
2909 	bool reschedule = false;
2910 	char *sfn_pl;
2911 	int queries;
2912 	u8 num_rec;
2913 	int i;
2914 	int err;
2915 
2916 	sfn_pl = kmalloc(MLXSW_REG_SFN_LEN, GFP_KERNEL);
2917 	if (!sfn_pl)
2918 		return;
2919 
2920 	bridge = container_of(work, struct mlxsw_sp_bridge, fdb_notify.dw.work);
2921 	mlxsw_sp = bridge->mlxsw_sp;
2922 
2923 	rtnl_lock();
2924 	if (list_empty(&bridge->bridges_list))
2925 		goto out;
2926 	reschedule = true;
2927 	queries = MLXSW_SP_FDB_SFN_QUERIES_PER_SESSION;
2928 	while (queries > 0) {
2929 		mlxsw_reg_sfn_pack(sfn_pl);
2930 		err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl);
2931 		if (err) {
2932 			dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n");
2933 			goto out;
2934 		}
2935 		num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl);
2936 		for (i = 0; i < num_rec; i++)
2937 			mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i);
2938 		if (num_rec != MLXSW_REG_SFN_REC_MAX_COUNT)
2939 			goto out;
2940 		queries--;
2941 	}
2942 
2943 out:
2944 	rtnl_unlock();
2945 	kfree(sfn_pl);
2946 	if (!reschedule)
2947 		return;
2948 	mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp, !queries);
2949 }
2950 
2951 struct mlxsw_sp_switchdev_event_work {
2952 	struct work_struct work;
2953 	union {
2954 		struct switchdev_notifier_fdb_info fdb_info;
2955 		struct switchdev_notifier_vxlan_fdb_info vxlan_fdb_info;
2956 	};
2957 	struct net_device *dev;
2958 	unsigned long event;
2959 };
2960 
2961 static void
mlxsw_sp_switchdev_bridge_vxlan_fdb_event(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_switchdev_event_work * switchdev_work,struct mlxsw_sp_fid * fid,__be32 vni)2962 mlxsw_sp_switchdev_bridge_vxlan_fdb_event(struct mlxsw_sp *mlxsw_sp,
2963 					  struct mlxsw_sp_switchdev_event_work *
2964 					  switchdev_work,
2965 					  struct mlxsw_sp_fid *fid, __be32 vni)
2966 {
2967 	struct switchdev_notifier_vxlan_fdb_info vxlan_fdb_info;
2968 	struct switchdev_notifier_fdb_info *fdb_info;
2969 	struct net_device *dev = switchdev_work->dev;
2970 	enum mlxsw_sp_l3proto proto;
2971 	union mlxsw_sp_l3addr addr;
2972 	int err;
2973 
2974 	fdb_info = &switchdev_work->fdb_info;
2975 	err = vxlan_fdb_find_uc(dev, fdb_info->addr, vni, &vxlan_fdb_info);
2976 	if (err)
2977 		return;
2978 
2979 	mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info.remote_ip,
2980 					      &proto, &addr);
2981 
2982 	switch (switchdev_work->event) {
2983 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
2984 		err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp,
2985 						     vxlan_fdb_info.eth_addr,
2986 						     mlxsw_sp_fid_index(fid),
2987 						     proto, &addr, true, false);
2988 		if (err)
2989 			return;
2990 		vxlan_fdb_info.offloaded = true;
2991 		call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
2992 					 &vxlan_fdb_info.info, NULL);
2993 		mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
2994 					    vxlan_fdb_info.eth_addr,
2995 					    fdb_info->vid, dev, true);
2996 		break;
2997 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
2998 		err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp,
2999 						     vxlan_fdb_info.eth_addr,
3000 						     mlxsw_sp_fid_index(fid),
3001 						     proto, &addr, false,
3002 						     false);
3003 		vxlan_fdb_info.offloaded = false;
3004 		call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
3005 					 &vxlan_fdb_info.info, NULL);
3006 		break;
3007 	}
3008 }
3009 
3010 static void
mlxsw_sp_switchdev_bridge_nve_fdb_event(struct mlxsw_sp_switchdev_event_work * switchdev_work)3011 mlxsw_sp_switchdev_bridge_nve_fdb_event(struct mlxsw_sp_switchdev_event_work *
3012 					switchdev_work)
3013 {
3014 	struct mlxsw_sp_bridge_device *bridge_device;
3015 	struct net_device *dev = switchdev_work->dev;
3016 	struct net_device *br_dev;
3017 	struct mlxsw_sp *mlxsw_sp;
3018 	struct mlxsw_sp_fid *fid;
3019 	__be32 vni;
3020 	int err;
3021 
3022 	if (switchdev_work->event != SWITCHDEV_FDB_ADD_TO_DEVICE &&
3023 	    switchdev_work->event != SWITCHDEV_FDB_DEL_TO_DEVICE)
3024 		return;
3025 
3026 	if (switchdev_work->event == SWITCHDEV_FDB_ADD_TO_DEVICE &&
3027 	    (!switchdev_work->fdb_info.added_by_user ||
3028 	     switchdev_work->fdb_info.is_local))
3029 		return;
3030 
3031 	if (!netif_running(dev))
3032 		return;
3033 	br_dev = netdev_master_upper_dev_get(dev);
3034 	if (!br_dev)
3035 		return;
3036 	if (!netif_is_bridge_master(br_dev))
3037 		return;
3038 	mlxsw_sp = mlxsw_sp_lower_get(br_dev);
3039 	if (!mlxsw_sp)
3040 		return;
3041 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
3042 	if (!bridge_device)
3043 		return;
3044 
3045 	fid = bridge_device->ops->fid_lookup(bridge_device,
3046 					     switchdev_work->fdb_info.vid);
3047 	if (!fid)
3048 		return;
3049 
3050 	err = mlxsw_sp_fid_vni(fid, &vni);
3051 	if (err)
3052 		goto out;
3053 
3054 	mlxsw_sp_switchdev_bridge_vxlan_fdb_event(mlxsw_sp, switchdev_work, fid,
3055 						  vni);
3056 
3057 out:
3058 	mlxsw_sp_fid_put(fid);
3059 }
3060 
mlxsw_sp_switchdev_bridge_fdb_event_work(struct work_struct * work)3061 static void mlxsw_sp_switchdev_bridge_fdb_event_work(struct work_struct *work)
3062 {
3063 	struct mlxsw_sp_switchdev_event_work *switchdev_work =
3064 		container_of(work, struct mlxsw_sp_switchdev_event_work, work);
3065 	struct net_device *dev = switchdev_work->dev;
3066 	struct switchdev_notifier_fdb_info *fdb_info;
3067 	struct mlxsw_sp_port *mlxsw_sp_port;
3068 	int err;
3069 
3070 	rtnl_lock();
3071 	if (netif_is_vxlan(dev)) {
3072 		mlxsw_sp_switchdev_bridge_nve_fdb_event(switchdev_work);
3073 		goto out;
3074 	}
3075 
3076 	mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
3077 	if (!mlxsw_sp_port)
3078 		goto out;
3079 
3080 	switch (switchdev_work->event) {
3081 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
3082 		fdb_info = &switchdev_work->fdb_info;
3083 		if (!fdb_info->added_by_user || fdb_info->is_local)
3084 			break;
3085 		err = mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, true);
3086 		if (err)
3087 			break;
3088 		mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
3089 					    fdb_info->addr,
3090 					    fdb_info->vid, dev, true);
3091 		break;
3092 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
3093 		fdb_info = &switchdev_work->fdb_info;
3094 		mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, false);
3095 		break;
3096 	case SWITCHDEV_FDB_ADD_TO_BRIDGE:
3097 	case SWITCHDEV_FDB_DEL_TO_BRIDGE:
3098 		/* These events are only used to potentially update an existing
3099 		 * SPAN mirror.
3100 		 */
3101 		break;
3102 	}
3103 
3104 	mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
3105 
3106 out:
3107 	rtnl_unlock();
3108 	kfree(switchdev_work->fdb_info.addr);
3109 	kfree(switchdev_work);
3110 	dev_put(dev);
3111 }
3112 
3113 static void
mlxsw_sp_switchdev_vxlan_fdb_add(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_switchdev_event_work * switchdev_work)3114 mlxsw_sp_switchdev_vxlan_fdb_add(struct mlxsw_sp *mlxsw_sp,
3115 				 struct mlxsw_sp_switchdev_event_work *
3116 				 switchdev_work)
3117 {
3118 	struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info;
3119 	struct mlxsw_sp_bridge_device *bridge_device;
3120 	struct net_device *dev = switchdev_work->dev;
3121 	u8 all_zeros_mac[ETH_ALEN] = { 0 };
3122 	enum mlxsw_sp_l3proto proto;
3123 	union mlxsw_sp_l3addr addr;
3124 	struct net_device *br_dev;
3125 	struct mlxsw_sp_fid *fid;
3126 	u16 vid;
3127 	int err;
3128 
3129 	vxlan_fdb_info = &switchdev_work->vxlan_fdb_info;
3130 	br_dev = netdev_master_upper_dev_get(dev);
3131 
3132 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
3133 	if (!bridge_device)
3134 		return;
3135 
3136 	fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vxlan_fdb_info->vni);
3137 	if (!fid)
3138 		return;
3139 
3140 	mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info->remote_ip,
3141 					      &proto, &addr);
3142 
3143 	if (ether_addr_equal(vxlan_fdb_info->eth_addr, all_zeros_mac)) {
3144 		err = mlxsw_sp_nve_flood_ip_add(mlxsw_sp, fid, proto, &addr);
3145 		if (err) {
3146 			mlxsw_sp_fid_put(fid);
3147 			return;
3148 		}
3149 		vxlan_fdb_info->offloaded = true;
3150 		call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
3151 					 &vxlan_fdb_info->info, NULL);
3152 		mlxsw_sp_fid_put(fid);
3153 		return;
3154 	}
3155 
3156 	/* The device has a single FDB table, whereas Linux has two - one
3157 	 * in the bridge driver and another in the VxLAN driver. We only
3158 	 * program an entry to the device if the MAC points to the VxLAN
3159 	 * device in the bridge's FDB table
3160 	 */
3161 	vid = bridge_device->ops->fid_vid(bridge_device, fid);
3162 	if (br_fdb_find_port(br_dev, vxlan_fdb_info->eth_addr, vid) != dev)
3163 		goto err_br_fdb_find;
3164 
3165 	err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, vxlan_fdb_info->eth_addr,
3166 					     mlxsw_sp_fid_index(fid), proto,
3167 					     &addr, true, false);
3168 	if (err)
3169 		goto err_fdb_tunnel_uc_op;
3170 	vxlan_fdb_info->offloaded = true;
3171 	call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
3172 				 &vxlan_fdb_info->info, NULL);
3173 	mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
3174 				    vxlan_fdb_info->eth_addr, vid, dev, true);
3175 
3176 	mlxsw_sp_fid_put(fid);
3177 
3178 	return;
3179 
3180 err_fdb_tunnel_uc_op:
3181 err_br_fdb_find:
3182 	mlxsw_sp_fid_put(fid);
3183 }
3184 
3185 static void
mlxsw_sp_switchdev_vxlan_fdb_del(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_switchdev_event_work * switchdev_work)3186 mlxsw_sp_switchdev_vxlan_fdb_del(struct mlxsw_sp *mlxsw_sp,
3187 				 struct mlxsw_sp_switchdev_event_work *
3188 				 switchdev_work)
3189 {
3190 	struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info;
3191 	struct mlxsw_sp_bridge_device *bridge_device;
3192 	struct net_device *dev = switchdev_work->dev;
3193 	struct net_device *br_dev = netdev_master_upper_dev_get(dev);
3194 	u8 all_zeros_mac[ETH_ALEN] = { 0 };
3195 	enum mlxsw_sp_l3proto proto;
3196 	union mlxsw_sp_l3addr addr;
3197 	struct mlxsw_sp_fid *fid;
3198 	u16 vid;
3199 
3200 	vxlan_fdb_info = &switchdev_work->vxlan_fdb_info;
3201 
3202 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
3203 	if (!bridge_device)
3204 		return;
3205 
3206 	fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vxlan_fdb_info->vni);
3207 	if (!fid)
3208 		return;
3209 
3210 	mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info->remote_ip,
3211 					      &proto, &addr);
3212 
3213 	if (ether_addr_equal(vxlan_fdb_info->eth_addr, all_zeros_mac)) {
3214 		mlxsw_sp_nve_flood_ip_del(mlxsw_sp, fid, proto, &addr);
3215 		mlxsw_sp_fid_put(fid);
3216 		return;
3217 	}
3218 
3219 	mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, vxlan_fdb_info->eth_addr,
3220 				       mlxsw_sp_fid_index(fid), proto, &addr,
3221 				       false, false);
3222 	vid = bridge_device->ops->fid_vid(bridge_device, fid);
3223 	mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
3224 				    vxlan_fdb_info->eth_addr, vid, dev, false);
3225 
3226 	mlxsw_sp_fid_put(fid);
3227 }
3228 
mlxsw_sp_switchdev_vxlan_fdb_event_work(struct work_struct * work)3229 static void mlxsw_sp_switchdev_vxlan_fdb_event_work(struct work_struct *work)
3230 {
3231 	struct mlxsw_sp_switchdev_event_work *switchdev_work =
3232 		container_of(work, struct mlxsw_sp_switchdev_event_work, work);
3233 	struct net_device *dev = switchdev_work->dev;
3234 	struct mlxsw_sp *mlxsw_sp;
3235 	struct net_device *br_dev;
3236 
3237 	rtnl_lock();
3238 
3239 	if (!netif_running(dev))
3240 		goto out;
3241 	br_dev = netdev_master_upper_dev_get(dev);
3242 	if (!br_dev)
3243 		goto out;
3244 	if (!netif_is_bridge_master(br_dev))
3245 		goto out;
3246 	mlxsw_sp = mlxsw_sp_lower_get(br_dev);
3247 	if (!mlxsw_sp)
3248 		goto out;
3249 
3250 	switch (switchdev_work->event) {
3251 	case SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE:
3252 		mlxsw_sp_switchdev_vxlan_fdb_add(mlxsw_sp, switchdev_work);
3253 		break;
3254 	case SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE:
3255 		mlxsw_sp_switchdev_vxlan_fdb_del(mlxsw_sp, switchdev_work);
3256 		break;
3257 	}
3258 
3259 out:
3260 	rtnl_unlock();
3261 	kfree(switchdev_work);
3262 	dev_put(dev);
3263 }
3264 
3265 static int
mlxsw_sp_switchdev_vxlan_work_prepare(struct mlxsw_sp_switchdev_event_work * switchdev_work,struct switchdev_notifier_info * info)3266 mlxsw_sp_switchdev_vxlan_work_prepare(struct mlxsw_sp_switchdev_event_work *
3267 				      switchdev_work,
3268 				      struct switchdev_notifier_info *info)
3269 {
3270 	struct vxlan_dev *vxlan = netdev_priv(switchdev_work->dev);
3271 	struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info;
3272 	struct vxlan_config *cfg = &vxlan->cfg;
3273 	struct netlink_ext_ack *extack;
3274 
3275 	extack = switchdev_notifier_info_to_extack(info);
3276 	vxlan_fdb_info = container_of(info,
3277 				      struct switchdev_notifier_vxlan_fdb_info,
3278 				      info);
3279 
3280 	if (vxlan_fdb_info->remote_port != cfg->dst_port) {
3281 		NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Non-default remote port is not supported");
3282 		return -EOPNOTSUPP;
3283 	}
3284 	if (vxlan_fdb_info->remote_vni != cfg->vni ||
3285 	    vxlan_fdb_info->vni != cfg->vni) {
3286 		NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Non-default VNI is not supported");
3287 		return -EOPNOTSUPP;
3288 	}
3289 	if (vxlan_fdb_info->remote_ifindex) {
3290 		NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Local interface is not supported");
3291 		return -EOPNOTSUPP;
3292 	}
3293 	if (is_multicast_ether_addr(vxlan_fdb_info->eth_addr)) {
3294 		NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Multicast MAC addresses not supported");
3295 		return -EOPNOTSUPP;
3296 	}
3297 	if (vxlan_addr_multicast(&vxlan_fdb_info->remote_ip)) {
3298 		NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Multicast destination IP is not supported");
3299 		return -EOPNOTSUPP;
3300 	}
3301 
3302 	switchdev_work->vxlan_fdb_info = *vxlan_fdb_info;
3303 
3304 	return 0;
3305 }
3306 
3307 /* Called under rcu_read_lock() */
mlxsw_sp_switchdev_event(struct notifier_block * unused,unsigned long event,void * ptr)3308 static int mlxsw_sp_switchdev_event(struct notifier_block *unused,
3309 				    unsigned long event, void *ptr)
3310 {
3311 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
3312 	struct mlxsw_sp_switchdev_event_work *switchdev_work;
3313 	struct switchdev_notifier_fdb_info *fdb_info;
3314 	struct switchdev_notifier_info *info = ptr;
3315 	struct net_device *br_dev;
3316 	int err;
3317 
3318 	if (event == SWITCHDEV_PORT_ATTR_SET) {
3319 		err = switchdev_handle_port_attr_set(dev, ptr,
3320 						     mlxsw_sp_port_dev_check,
3321 						     mlxsw_sp_port_attr_set);
3322 		return notifier_from_errno(err);
3323 	}
3324 
3325 	/* Tunnel devices are not our uppers, so check their master instead */
3326 	br_dev = netdev_master_upper_dev_get_rcu(dev);
3327 	if (!br_dev)
3328 		return NOTIFY_DONE;
3329 	if (!netif_is_bridge_master(br_dev))
3330 		return NOTIFY_DONE;
3331 	if (!mlxsw_sp_port_dev_lower_find_rcu(br_dev))
3332 		return NOTIFY_DONE;
3333 
3334 	switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
3335 	if (!switchdev_work)
3336 		return NOTIFY_BAD;
3337 
3338 	switchdev_work->dev = dev;
3339 	switchdev_work->event = event;
3340 
3341 	switch (event) {
3342 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
3343 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
3344 	case SWITCHDEV_FDB_ADD_TO_BRIDGE:
3345 	case SWITCHDEV_FDB_DEL_TO_BRIDGE:
3346 		fdb_info = container_of(info,
3347 					struct switchdev_notifier_fdb_info,
3348 					info);
3349 		INIT_WORK(&switchdev_work->work,
3350 			  mlxsw_sp_switchdev_bridge_fdb_event_work);
3351 		memcpy(&switchdev_work->fdb_info, ptr,
3352 		       sizeof(switchdev_work->fdb_info));
3353 		switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
3354 		if (!switchdev_work->fdb_info.addr)
3355 			goto err_addr_alloc;
3356 		ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
3357 				fdb_info->addr);
3358 		/* Take a reference on the device. This can be either
3359 		 * upper device containig mlxsw_sp_port or just a
3360 		 * mlxsw_sp_port
3361 		 */
3362 		dev_hold(dev);
3363 		break;
3364 	case SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE:
3365 	case SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE:
3366 		INIT_WORK(&switchdev_work->work,
3367 			  mlxsw_sp_switchdev_vxlan_fdb_event_work);
3368 		err = mlxsw_sp_switchdev_vxlan_work_prepare(switchdev_work,
3369 							    info);
3370 		if (err)
3371 			goto err_vxlan_work_prepare;
3372 		dev_hold(dev);
3373 		break;
3374 	default:
3375 		kfree(switchdev_work);
3376 		return NOTIFY_DONE;
3377 	}
3378 
3379 	mlxsw_core_schedule_work(&switchdev_work->work);
3380 
3381 	return NOTIFY_DONE;
3382 
3383 err_vxlan_work_prepare:
3384 err_addr_alloc:
3385 	kfree(switchdev_work);
3386 	return NOTIFY_BAD;
3387 }
3388 
3389 struct notifier_block mlxsw_sp_switchdev_notifier = {
3390 	.notifier_call = mlxsw_sp_switchdev_event,
3391 };
3392 
3393 static int
mlxsw_sp_switchdev_vxlan_vlan_add(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_bridge_device * bridge_device,const struct net_device * vxlan_dev,u16 vid,bool flag_untagged,bool flag_pvid,struct netlink_ext_ack * extack)3394 mlxsw_sp_switchdev_vxlan_vlan_add(struct mlxsw_sp *mlxsw_sp,
3395 				  struct mlxsw_sp_bridge_device *bridge_device,
3396 				  const struct net_device *vxlan_dev, u16 vid,
3397 				  bool flag_untagged, bool flag_pvid,
3398 				  struct netlink_ext_ack *extack)
3399 {
3400 	struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
3401 	__be32 vni = vxlan->cfg.vni;
3402 	struct mlxsw_sp_fid *fid;
3403 	u16 old_vid;
3404 	int err;
3405 
3406 	/* We cannot have the same VLAN as PVID and egress untagged on multiple
3407 	 * VxLAN devices. Note that we get this notification before the VLAN is
3408 	 * actually added to the bridge's database, so it is not possible for
3409 	 * the lookup function to return 'vxlan_dev'
3410 	 */
3411 	if (flag_untagged && flag_pvid &&
3412 	    mlxsw_sp_bridge_8021q_vxlan_dev_find(bridge_device->dev, vid)) {
3413 		NL_SET_ERR_MSG_MOD(extack, "VLAN already mapped to a different VNI");
3414 		return -EINVAL;
3415 	}
3416 
3417 	if (!netif_running(vxlan_dev))
3418 		return 0;
3419 
3420 	/* First case: FID is not associated with this VNI, but the new VLAN
3421 	 * is both PVID and egress untagged. Need to enable NVE on the FID, if
3422 	 * it exists
3423 	 */
3424 	fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vni);
3425 	if (!fid) {
3426 		if (!flag_untagged || !flag_pvid)
3427 			return 0;
3428 		return bridge_device->ops->vxlan_join(bridge_device, vxlan_dev,
3429 						      vid, extack);
3430 	}
3431 
3432 	/* Second case: FID is associated with the VNI and the VLAN associated
3433 	 * with the FID is the same as the notified VLAN. This means the flags
3434 	 * (PVID / egress untagged) were toggled and that NVE should be
3435 	 * disabled on the FID
3436 	 */
3437 	old_vid = mlxsw_sp_fid_8021q_vid(fid);
3438 	if (vid == old_vid) {
3439 		if (WARN_ON(flag_untagged && flag_pvid)) {
3440 			mlxsw_sp_fid_put(fid);
3441 			return -EINVAL;
3442 		}
3443 		mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
3444 		mlxsw_sp_fid_put(fid);
3445 		return 0;
3446 	}
3447 
3448 	/* Third case: A new VLAN was configured on the VxLAN device, but this
3449 	 * VLAN is not PVID, so there is nothing to do.
3450 	 */
3451 	if (!flag_pvid) {
3452 		mlxsw_sp_fid_put(fid);
3453 		return 0;
3454 	}
3455 
3456 	/* Fourth case: Thew new VLAN is PVID, which means the VLAN currently
3457 	 * mapped to the VNI should be unmapped
3458 	 */
3459 	mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
3460 	mlxsw_sp_fid_put(fid);
3461 
3462 	/* Fifth case: The new VLAN is also egress untagged, which means the
3463 	 * VLAN needs to be mapped to the VNI
3464 	 */
3465 	if (!flag_untagged)
3466 		return 0;
3467 
3468 	err = bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, vid, extack);
3469 	if (err)
3470 		goto err_vxlan_join;
3471 
3472 	return 0;
3473 
3474 err_vxlan_join:
3475 	bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, old_vid, NULL);
3476 	return err;
3477 }
3478 
3479 static void
mlxsw_sp_switchdev_vxlan_vlan_del(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_bridge_device * bridge_device,const struct net_device * vxlan_dev,u16 vid)3480 mlxsw_sp_switchdev_vxlan_vlan_del(struct mlxsw_sp *mlxsw_sp,
3481 				  struct mlxsw_sp_bridge_device *bridge_device,
3482 				  const struct net_device *vxlan_dev, u16 vid)
3483 {
3484 	struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
3485 	__be32 vni = vxlan->cfg.vni;
3486 	struct mlxsw_sp_fid *fid;
3487 
3488 	if (!netif_running(vxlan_dev))
3489 		return;
3490 
3491 	fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vni);
3492 	if (!fid)
3493 		return;
3494 
3495 	/* A different VLAN than the one mapped to the VNI is deleted */
3496 	if (mlxsw_sp_fid_8021q_vid(fid) != vid)
3497 		goto out;
3498 
3499 	mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
3500 
3501 out:
3502 	mlxsw_sp_fid_put(fid);
3503 }
3504 
3505 static int
mlxsw_sp_switchdev_vxlan_vlans_add(struct net_device * vxlan_dev,struct switchdev_notifier_port_obj_info * port_obj_info)3506 mlxsw_sp_switchdev_vxlan_vlans_add(struct net_device *vxlan_dev,
3507 				   struct switchdev_notifier_port_obj_info *
3508 				   port_obj_info)
3509 {
3510 	struct switchdev_obj_port_vlan *vlan =
3511 		SWITCHDEV_OBJ_PORT_VLAN(port_obj_info->obj);
3512 	bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
3513 	bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
3514 	struct mlxsw_sp_bridge_device *bridge_device;
3515 	struct netlink_ext_ack *extack;
3516 	struct mlxsw_sp *mlxsw_sp;
3517 	struct net_device *br_dev;
3518 
3519 	extack = switchdev_notifier_info_to_extack(&port_obj_info->info);
3520 	br_dev = netdev_master_upper_dev_get(vxlan_dev);
3521 	if (!br_dev)
3522 		return 0;
3523 
3524 	mlxsw_sp = mlxsw_sp_lower_get(br_dev);
3525 	if (!mlxsw_sp)
3526 		return 0;
3527 
3528 	port_obj_info->handled = true;
3529 
3530 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
3531 	if (!bridge_device)
3532 		return -EINVAL;
3533 
3534 	if (!bridge_device->vlan_enabled)
3535 		return 0;
3536 
3537 	return mlxsw_sp_switchdev_vxlan_vlan_add(mlxsw_sp, bridge_device,
3538 						 vxlan_dev, vlan->vid,
3539 						 flag_untagged,
3540 						 flag_pvid, extack);
3541 }
3542 
3543 static void
mlxsw_sp_switchdev_vxlan_vlans_del(struct net_device * vxlan_dev,struct switchdev_notifier_port_obj_info * port_obj_info)3544 mlxsw_sp_switchdev_vxlan_vlans_del(struct net_device *vxlan_dev,
3545 				   struct switchdev_notifier_port_obj_info *
3546 				   port_obj_info)
3547 {
3548 	struct switchdev_obj_port_vlan *vlan =
3549 		SWITCHDEV_OBJ_PORT_VLAN(port_obj_info->obj);
3550 	struct mlxsw_sp_bridge_device *bridge_device;
3551 	struct mlxsw_sp *mlxsw_sp;
3552 	struct net_device *br_dev;
3553 
3554 	br_dev = netdev_master_upper_dev_get(vxlan_dev);
3555 	if (!br_dev)
3556 		return;
3557 
3558 	mlxsw_sp = mlxsw_sp_lower_get(br_dev);
3559 	if (!mlxsw_sp)
3560 		return;
3561 
3562 	port_obj_info->handled = true;
3563 
3564 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
3565 	if (!bridge_device)
3566 		return;
3567 
3568 	if (!bridge_device->vlan_enabled)
3569 		return;
3570 
3571 	mlxsw_sp_switchdev_vxlan_vlan_del(mlxsw_sp, bridge_device, vxlan_dev,
3572 					  vlan->vid);
3573 }
3574 
3575 static int
mlxsw_sp_switchdev_handle_vxlan_obj_add(struct net_device * vxlan_dev,struct switchdev_notifier_port_obj_info * port_obj_info)3576 mlxsw_sp_switchdev_handle_vxlan_obj_add(struct net_device *vxlan_dev,
3577 					struct switchdev_notifier_port_obj_info *
3578 					port_obj_info)
3579 {
3580 	int err = 0;
3581 
3582 	switch (port_obj_info->obj->id) {
3583 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
3584 		err = mlxsw_sp_switchdev_vxlan_vlans_add(vxlan_dev,
3585 							 port_obj_info);
3586 		break;
3587 	default:
3588 		break;
3589 	}
3590 
3591 	return err;
3592 }
3593 
3594 static void
mlxsw_sp_switchdev_handle_vxlan_obj_del(struct net_device * vxlan_dev,struct switchdev_notifier_port_obj_info * port_obj_info)3595 mlxsw_sp_switchdev_handle_vxlan_obj_del(struct net_device *vxlan_dev,
3596 					struct switchdev_notifier_port_obj_info *
3597 					port_obj_info)
3598 {
3599 	switch (port_obj_info->obj->id) {
3600 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
3601 		mlxsw_sp_switchdev_vxlan_vlans_del(vxlan_dev, port_obj_info);
3602 		break;
3603 	default:
3604 		break;
3605 	}
3606 }
3607 
mlxsw_sp_switchdev_blocking_event(struct notifier_block * unused,unsigned long event,void * ptr)3608 static int mlxsw_sp_switchdev_blocking_event(struct notifier_block *unused,
3609 					     unsigned long event, void *ptr)
3610 {
3611 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
3612 	int err = 0;
3613 
3614 	switch (event) {
3615 	case SWITCHDEV_PORT_OBJ_ADD:
3616 		if (netif_is_vxlan(dev))
3617 			err = mlxsw_sp_switchdev_handle_vxlan_obj_add(dev, ptr);
3618 		else
3619 			err = switchdev_handle_port_obj_add(dev, ptr,
3620 							mlxsw_sp_port_dev_check,
3621 							mlxsw_sp_port_obj_add);
3622 		return notifier_from_errno(err);
3623 	case SWITCHDEV_PORT_OBJ_DEL:
3624 		if (netif_is_vxlan(dev))
3625 			mlxsw_sp_switchdev_handle_vxlan_obj_del(dev, ptr);
3626 		else
3627 			err = switchdev_handle_port_obj_del(dev, ptr,
3628 							mlxsw_sp_port_dev_check,
3629 							mlxsw_sp_port_obj_del);
3630 		return notifier_from_errno(err);
3631 	case SWITCHDEV_PORT_ATTR_SET:
3632 		err = switchdev_handle_port_attr_set(dev, ptr,
3633 						     mlxsw_sp_port_dev_check,
3634 						     mlxsw_sp_port_attr_set);
3635 		return notifier_from_errno(err);
3636 	}
3637 
3638 	return NOTIFY_DONE;
3639 }
3640 
3641 static struct notifier_block mlxsw_sp_switchdev_blocking_notifier = {
3642 	.notifier_call = mlxsw_sp_switchdev_blocking_event,
3643 };
3644 
3645 u8
mlxsw_sp_bridge_port_stp_state(struct mlxsw_sp_bridge_port * bridge_port)3646 mlxsw_sp_bridge_port_stp_state(struct mlxsw_sp_bridge_port *bridge_port)
3647 {
3648 	return bridge_port->stp_state;
3649 }
3650 
mlxsw_sp_fdb_init(struct mlxsw_sp * mlxsw_sp)3651 static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
3652 {
3653 	struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
3654 	struct notifier_block *nb;
3655 	int err;
3656 
3657 	err = mlxsw_sp_ageing_set(mlxsw_sp, MLXSW_SP_DEFAULT_AGEING_TIME);
3658 	if (err) {
3659 		dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n");
3660 		return err;
3661 	}
3662 
3663 	err = register_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
3664 	if (err) {
3665 		dev_err(mlxsw_sp->bus_info->dev, "Failed to register switchdev notifier\n");
3666 		return err;
3667 	}
3668 
3669 	nb = &mlxsw_sp_switchdev_blocking_notifier;
3670 	err = register_switchdev_blocking_notifier(nb);
3671 	if (err) {
3672 		dev_err(mlxsw_sp->bus_info->dev, "Failed to register switchdev blocking notifier\n");
3673 		goto err_register_switchdev_blocking_notifier;
3674 	}
3675 
3676 	INIT_DELAYED_WORK(&bridge->fdb_notify.dw, mlxsw_sp_fdb_notify_work);
3677 	bridge->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL;
3678 	return 0;
3679 
3680 err_register_switchdev_blocking_notifier:
3681 	unregister_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
3682 	return err;
3683 }
3684 
mlxsw_sp_fdb_fini(struct mlxsw_sp * mlxsw_sp)3685 static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp)
3686 {
3687 	struct notifier_block *nb;
3688 
3689 	cancel_delayed_work_sync(&mlxsw_sp->bridge->fdb_notify.dw);
3690 
3691 	nb = &mlxsw_sp_switchdev_blocking_notifier;
3692 	unregister_switchdev_blocking_notifier(nb);
3693 
3694 	unregister_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
3695 }
3696 
mlxsw_sp1_switchdev_init(struct mlxsw_sp * mlxsw_sp)3697 static void mlxsw_sp1_switchdev_init(struct mlxsw_sp *mlxsw_sp)
3698 {
3699 	mlxsw_sp->bridge->bridge_8021ad_ops = &mlxsw_sp1_bridge_8021ad_ops;
3700 }
3701 
3702 const struct mlxsw_sp_switchdev_ops mlxsw_sp1_switchdev_ops = {
3703 	.init	= mlxsw_sp1_switchdev_init,
3704 };
3705 
mlxsw_sp2_switchdev_init(struct mlxsw_sp * mlxsw_sp)3706 static void mlxsw_sp2_switchdev_init(struct mlxsw_sp *mlxsw_sp)
3707 {
3708 	mlxsw_sp->bridge->bridge_8021ad_ops = &mlxsw_sp2_bridge_8021ad_ops;
3709 }
3710 
3711 const struct mlxsw_sp_switchdev_ops mlxsw_sp2_switchdev_ops = {
3712 	.init	= mlxsw_sp2_switchdev_init,
3713 };
3714 
mlxsw_sp_switchdev_init(struct mlxsw_sp * mlxsw_sp)3715 int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
3716 {
3717 	struct mlxsw_sp_bridge *bridge;
3718 
3719 	bridge = kzalloc(sizeof(*mlxsw_sp->bridge), GFP_KERNEL);
3720 	if (!bridge)
3721 		return -ENOMEM;
3722 	mlxsw_sp->bridge = bridge;
3723 	bridge->mlxsw_sp = mlxsw_sp;
3724 
3725 	INIT_LIST_HEAD(&mlxsw_sp->bridge->bridges_list);
3726 
3727 	bridge->bridge_8021q_ops = &mlxsw_sp_bridge_8021q_ops;
3728 	bridge->bridge_8021d_ops = &mlxsw_sp_bridge_8021d_ops;
3729 
3730 	mlxsw_sp->switchdev_ops->init(mlxsw_sp);
3731 
3732 	return mlxsw_sp_fdb_init(mlxsw_sp);
3733 }
3734 
mlxsw_sp_switchdev_fini(struct mlxsw_sp * mlxsw_sp)3735 void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
3736 {
3737 	mlxsw_sp_fdb_fini(mlxsw_sp);
3738 	WARN_ON(!list_empty(&mlxsw_sp->bridge->bridges_list));
3739 	kfree(mlxsw_sp->bridge);
3740 }
3741 
3742