1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
3
4 #include <linux/kernel.h>
5 #include <linux/types.h>
6 #include <linux/netdevice.h>
7 #include <linux/etherdevice.h>
8 #include <linux/slab.h>
9 #include <linux/device.h>
10 #include <linux/skbuff.h>
11 #include <linux/if_vlan.h>
12 #include <linux/if_bridge.h>
13 #include <linux/workqueue.h>
14 #include <linux/jiffies.h>
15 #include <linux/rtnetlink.h>
16 #include <linux/netlink.h>
17 #include <net/switchdev.h>
18 #include <net/vxlan.h>
19
20 #include "spectrum_span.h"
21 #include "spectrum_switchdev.h"
22 #include "spectrum.h"
23 #include "core.h"
24 #include "reg.h"
25
26 struct mlxsw_sp_bridge_ops;
27
28 struct mlxsw_sp_bridge {
29 struct mlxsw_sp *mlxsw_sp;
30 struct {
31 struct delayed_work dw;
32 #define MLXSW_SP_DEFAULT_LEARNING_INTERVAL 100
33 unsigned int interval; /* ms */
34 } fdb_notify;
35 #define MLXSW_SP_MIN_AGEING_TIME 10
36 #define MLXSW_SP_MAX_AGEING_TIME 1000000
37 #define MLXSW_SP_DEFAULT_AGEING_TIME 300
38 u32 ageing_time;
39 bool vlan_enabled_exists;
40 struct list_head bridges_list;
41 DECLARE_BITMAP(mids_bitmap, MLXSW_SP_MID_MAX);
42 const struct mlxsw_sp_bridge_ops *bridge_8021q_ops;
43 const struct mlxsw_sp_bridge_ops *bridge_8021d_ops;
44 const struct mlxsw_sp_bridge_ops *bridge_8021ad_ops;
45 };
46
47 struct mlxsw_sp_bridge_device {
48 struct net_device *dev;
49 struct list_head list;
50 struct list_head ports_list;
51 struct list_head mdb_list;
52 struct rhashtable mdb_ht;
53 u8 vlan_enabled:1,
54 multicast_enabled:1,
55 mrouter:1;
56 const struct mlxsw_sp_bridge_ops *ops;
57 };
58
59 struct mlxsw_sp_bridge_port {
60 struct net_device *dev;
61 struct mlxsw_sp_bridge_device *bridge_device;
62 struct list_head list;
63 struct list_head vlans_list;
64 unsigned int ref_count;
65 u8 stp_state;
66 unsigned long flags;
67 bool mrouter;
68 bool lagged;
69 union {
70 u16 lag_id;
71 u16 system_port;
72 };
73 };
74
75 struct mlxsw_sp_bridge_vlan {
76 struct list_head list;
77 struct list_head port_vlan_list;
78 u16 vid;
79 };
80
81 struct mlxsw_sp_bridge_ops {
82 int (*port_join)(struct mlxsw_sp_bridge_device *bridge_device,
83 struct mlxsw_sp_bridge_port *bridge_port,
84 struct mlxsw_sp_port *mlxsw_sp_port,
85 struct netlink_ext_ack *extack);
86 void (*port_leave)(struct mlxsw_sp_bridge_device *bridge_device,
87 struct mlxsw_sp_bridge_port *bridge_port,
88 struct mlxsw_sp_port *mlxsw_sp_port);
89 int (*vxlan_join)(struct mlxsw_sp_bridge_device *bridge_device,
90 const struct net_device *vxlan_dev, u16 vid,
91 struct netlink_ext_ack *extack);
92 struct mlxsw_sp_fid *
93 (*fid_get)(struct mlxsw_sp_bridge_device *bridge_device,
94 u16 vid, struct netlink_ext_ack *extack);
95 struct mlxsw_sp_fid *
96 (*fid_lookup)(struct mlxsw_sp_bridge_device *bridge_device,
97 u16 vid);
98 u16 (*fid_vid)(struct mlxsw_sp_bridge_device *bridge_device,
99 const struct mlxsw_sp_fid *fid);
100 };
101
102 struct mlxsw_sp_switchdev_ops {
103 void (*init)(struct mlxsw_sp *mlxsw_sp);
104 };
105
106 struct mlxsw_sp_mdb_entry_key {
107 unsigned char addr[ETH_ALEN];
108 u16 fid;
109 };
110
111 struct mlxsw_sp_mdb_entry {
112 struct list_head list;
113 struct rhash_head ht_node;
114 struct mlxsw_sp_mdb_entry_key key;
115 u16 mid;
116 struct list_head ports_list;
117 u16 ports_count;
118 };
119
120 struct mlxsw_sp_mdb_entry_port {
121 struct list_head list; /* Member of 'ports_list'. */
122 u16 local_port;
123 refcount_t refcount;
124 bool mrouter;
125 };
126
127 static const struct rhashtable_params mlxsw_sp_mdb_ht_params = {
128 .key_offset = offsetof(struct mlxsw_sp_mdb_entry, key),
129 .head_offset = offsetof(struct mlxsw_sp_mdb_entry, ht_node),
130 .key_len = sizeof(struct mlxsw_sp_mdb_entry_key),
131 };
132
133 static int
134 mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
135 struct mlxsw_sp_bridge_port *bridge_port,
136 u16 fid_index);
137
138 static void
139 mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port,
140 struct mlxsw_sp_bridge_port *bridge_port,
141 u16 fid_index);
142
143 static int
144 mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp *mlxsw_sp,
145 struct mlxsw_sp_bridge_device
146 *bridge_device, bool mc_enabled);
147
148 static void
149 mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
150 struct mlxsw_sp_bridge_port *bridge_port,
151 bool add);
152
153 static struct mlxsw_sp_bridge_device *
mlxsw_sp_bridge_device_find(const struct mlxsw_sp_bridge * bridge,const struct net_device * br_dev)154 mlxsw_sp_bridge_device_find(const struct mlxsw_sp_bridge *bridge,
155 const struct net_device *br_dev)
156 {
157 struct mlxsw_sp_bridge_device *bridge_device;
158
159 list_for_each_entry(bridge_device, &bridge->bridges_list, list)
160 if (bridge_device->dev == br_dev)
161 return bridge_device;
162
163 return NULL;
164 }
165
mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp * mlxsw_sp,const struct net_device * br_dev)166 bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp,
167 const struct net_device *br_dev)
168 {
169 return !!mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
170 }
171
mlxsw_sp_bridge_device_upper_rif_destroy(struct net_device * dev,struct netdev_nested_priv * priv)172 static int mlxsw_sp_bridge_device_upper_rif_destroy(struct net_device *dev,
173 struct netdev_nested_priv *priv)
174 {
175 struct mlxsw_sp *mlxsw_sp = priv->data;
176
177 mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev);
178 return 0;
179 }
180
mlxsw_sp_bridge_device_rifs_destroy(struct mlxsw_sp * mlxsw_sp,struct net_device * dev)181 static void mlxsw_sp_bridge_device_rifs_destroy(struct mlxsw_sp *mlxsw_sp,
182 struct net_device *dev)
183 {
184 struct netdev_nested_priv priv = {
185 .data = (void *)mlxsw_sp,
186 };
187
188 mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev);
189 netdev_walk_all_upper_dev_rcu(dev,
190 mlxsw_sp_bridge_device_upper_rif_destroy,
191 &priv);
192 }
193
mlxsw_sp_bridge_device_vxlan_init(struct mlxsw_sp_bridge * bridge,struct net_device * br_dev,struct netlink_ext_ack * extack)194 static int mlxsw_sp_bridge_device_vxlan_init(struct mlxsw_sp_bridge *bridge,
195 struct net_device *br_dev,
196 struct netlink_ext_ack *extack)
197 {
198 struct net_device *dev, *stop_dev;
199 struct list_head *iter;
200 int err;
201
202 netdev_for_each_lower_dev(br_dev, dev, iter) {
203 if (netif_is_vxlan(dev) && netif_running(dev)) {
204 err = mlxsw_sp_bridge_vxlan_join(bridge->mlxsw_sp,
205 br_dev, dev, 0,
206 extack);
207 if (err) {
208 stop_dev = dev;
209 goto err_vxlan_join;
210 }
211 }
212 }
213
214 return 0;
215
216 err_vxlan_join:
217 netdev_for_each_lower_dev(br_dev, dev, iter) {
218 if (netif_is_vxlan(dev) && netif_running(dev)) {
219 if (stop_dev == dev)
220 break;
221 mlxsw_sp_bridge_vxlan_leave(bridge->mlxsw_sp, dev);
222 }
223 }
224 return err;
225 }
226
mlxsw_sp_bridge_device_vxlan_fini(struct mlxsw_sp_bridge * bridge,struct net_device * br_dev)227 static void mlxsw_sp_bridge_device_vxlan_fini(struct mlxsw_sp_bridge *bridge,
228 struct net_device *br_dev)
229 {
230 struct net_device *dev;
231 struct list_head *iter;
232
233 netdev_for_each_lower_dev(br_dev, dev, iter) {
234 if (netif_is_vxlan(dev) && netif_running(dev))
235 mlxsw_sp_bridge_vxlan_leave(bridge->mlxsw_sp, dev);
236 }
237 }
238
mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp * mlxsw_sp,bool no_delay)239 static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp,
240 bool no_delay)
241 {
242 struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
243 unsigned int interval = no_delay ? 0 : bridge->fdb_notify.interval;
244
245 mlxsw_core_schedule_dw(&bridge->fdb_notify.dw,
246 msecs_to_jiffies(interval));
247 }
248
249 static struct mlxsw_sp_bridge_device *
mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge * bridge,struct net_device * br_dev,struct netlink_ext_ack * extack)250 mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
251 struct net_device *br_dev,
252 struct netlink_ext_ack *extack)
253 {
254 struct device *dev = bridge->mlxsw_sp->bus_info->dev;
255 struct mlxsw_sp_bridge_device *bridge_device;
256 bool vlan_enabled = br_vlan_enabled(br_dev);
257 int err;
258
259 if (vlan_enabled && bridge->vlan_enabled_exists) {
260 dev_err(dev, "Only one VLAN-aware bridge is supported\n");
261 NL_SET_ERR_MSG_MOD(extack, "Only one VLAN-aware bridge is supported");
262 return ERR_PTR(-EINVAL);
263 }
264
265 bridge_device = kzalloc(sizeof(*bridge_device), GFP_KERNEL);
266 if (!bridge_device)
267 return ERR_PTR(-ENOMEM);
268
269 err = rhashtable_init(&bridge_device->mdb_ht, &mlxsw_sp_mdb_ht_params);
270 if (err)
271 goto err_mdb_rhashtable_init;
272
273 bridge_device->dev = br_dev;
274 bridge_device->vlan_enabled = vlan_enabled;
275 bridge_device->multicast_enabled = br_multicast_enabled(br_dev);
276 bridge_device->mrouter = br_multicast_router(br_dev);
277 INIT_LIST_HEAD(&bridge_device->ports_list);
278 if (vlan_enabled) {
279 u16 proto;
280
281 bridge->vlan_enabled_exists = true;
282 br_vlan_get_proto(br_dev, &proto);
283 if (proto == ETH_P_8021AD)
284 bridge_device->ops = bridge->bridge_8021ad_ops;
285 else
286 bridge_device->ops = bridge->bridge_8021q_ops;
287 } else {
288 bridge_device->ops = bridge->bridge_8021d_ops;
289 }
290 INIT_LIST_HEAD(&bridge_device->mdb_list);
291
292 if (list_empty(&bridge->bridges_list))
293 mlxsw_sp_fdb_notify_work_schedule(bridge->mlxsw_sp, false);
294 list_add(&bridge_device->list, &bridge->bridges_list);
295
296 /* It is possible we already have VXLAN devices enslaved to the bridge.
297 * In which case, we need to replay their configuration as if they were
298 * just now enslaved to the bridge.
299 */
300 err = mlxsw_sp_bridge_device_vxlan_init(bridge, br_dev, extack);
301 if (err)
302 goto err_vxlan_init;
303
304 return bridge_device;
305
306 err_vxlan_init:
307 list_del(&bridge_device->list);
308 if (bridge_device->vlan_enabled)
309 bridge->vlan_enabled_exists = false;
310 rhashtable_destroy(&bridge_device->mdb_ht);
311 err_mdb_rhashtable_init:
312 kfree(bridge_device);
313 return ERR_PTR(err);
314 }
315
316 static void
mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge * bridge,struct mlxsw_sp_bridge_device * bridge_device)317 mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge,
318 struct mlxsw_sp_bridge_device *bridge_device)
319 {
320 mlxsw_sp_bridge_device_vxlan_fini(bridge, bridge_device->dev);
321 mlxsw_sp_bridge_device_rifs_destroy(bridge->mlxsw_sp,
322 bridge_device->dev);
323 list_del(&bridge_device->list);
324 if (list_empty(&bridge->bridges_list))
325 cancel_delayed_work(&bridge->fdb_notify.dw);
326 if (bridge_device->vlan_enabled)
327 bridge->vlan_enabled_exists = false;
328 WARN_ON(!list_empty(&bridge_device->ports_list));
329 WARN_ON(!list_empty(&bridge_device->mdb_list));
330 rhashtable_destroy(&bridge_device->mdb_ht);
331 kfree(bridge_device);
332 }
333
334 static struct mlxsw_sp_bridge_device *
mlxsw_sp_bridge_device_get(struct mlxsw_sp_bridge * bridge,struct net_device * br_dev,struct netlink_ext_ack * extack)335 mlxsw_sp_bridge_device_get(struct mlxsw_sp_bridge *bridge,
336 struct net_device *br_dev,
337 struct netlink_ext_ack *extack)
338 {
339 struct mlxsw_sp_bridge_device *bridge_device;
340
341 bridge_device = mlxsw_sp_bridge_device_find(bridge, br_dev);
342 if (bridge_device)
343 return bridge_device;
344
345 return mlxsw_sp_bridge_device_create(bridge, br_dev, extack);
346 }
347
348 static void
mlxsw_sp_bridge_device_put(struct mlxsw_sp_bridge * bridge,struct mlxsw_sp_bridge_device * bridge_device)349 mlxsw_sp_bridge_device_put(struct mlxsw_sp_bridge *bridge,
350 struct mlxsw_sp_bridge_device *bridge_device)
351 {
352 if (list_empty(&bridge_device->ports_list))
353 mlxsw_sp_bridge_device_destroy(bridge, bridge_device);
354 }
355
356 static struct mlxsw_sp_bridge_port *
__mlxsw_sp_bridge_port_find(const struct mlxsw_sp_bridge_device * bridge_device,const struct net_device * brport_dev)357 __mlxsw_sp_bridge_port_find(const struct mlxsw_sp_bridge_device *bridge_device,
358 const struct net_device *brport_dev)
359 {
360 struct mlxsw_sp_bridge_port *bridge_port;
361
362 list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
363 if (bridge_port->dev == brport_dev)
364 return bridge_port;
365 }
366
367 return NULL;
368 }
369
370 struct mlxsw_sp_bridge_port *
mlxsw_sp_bridge_port_find(struct mlxsw_sp_bridge * bridge,struct net_device * brport_dev)371 mlxsw_sp_bridge_port_find(struct mlxsw_sp_bridge *bridge,
372 struct net_device *brport_dev)
373 {
374 struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev);
375 struct mlxsw_sp_bridge_device *bridge_device;
376
377 if (!br_dev)
378 return NULL;
379
380 bridge_device = mlxsw_sp_bridge_device_find(bridge, br_dev);
381 if (!bridge_device)
382 return NULL;
383
384 return __mlxsw_sp_bridge_port_find(bridge_device, brport_dev);
385 }
386
387 static struct mlxsw_sp_bridge_port *
mlxsw_sp_bridge_port_create(struct mlxsw_sp_bridge_device * bridge_device,struct net_device * brport_dev,struct netlink_ext_ack * extack)388 mlxsw_sp_bridge_port_create(struct mlxsw_sp_bridge_device *bridge_device,
389 struct net_device *brport_dev,
390 struct netlink_ext_ack *extack)
391 {
392 struct mlxsw_sp_bridge_port *bridge_port;
393 struct mlxsw_sp_port *mlxsw_sp_port;
394 int err;
395
396 bridge_port = kzalloc(sizeof(*bridge_port), GFP_KERNEL);
397 if (!bridge_port)
398 return ERR_PTR(-ENOMEM);
399
400 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(brport_dev);
401 bridge_port->lagged = mlxsw_sp_port->lagged;
402 if (bridge_port->lagged)
403 bridge_port->lag_id = mlxsw_sp_port->lag_id;
404 else
405 bridge_port->system_port = mlxsw_sp_port->local_port;
406 bridge_port->dev = brport_dev;
407 bridge_port->bridge_device = bridge_device;
408 bridge_port->stp_state = BR_STATE_DISABLED;
409 bridge_port->flags = BR_LEARNING | BR_FLOOD | BR_LEARNING_SYNC |
410 BR_MCAST_FLOOD;
411 INIT_LIST_HEAD(&bridge_port->vlans_list);
412 list_add(&bridge_port->list, &bridge_device->ports_list);
413 bridge_port->ref_count = 1;
414
415 err = switchdev_bridge_port_offload(brport_dev, mlxsw_sp_port->dev,
416 NULL, NULL, NULL, false, extack);
417 if (err)
418 goto err_switchdev_offload;
419
420 return bridge_port;
421
422 err_switchdev_offload:
423 list_del(&bridge_port->list);
424 kfree(bridge_port);
425 return ERR_PTR(err);
426 }
427
428 static void
mlxsw_sp_bridge_port_destroy(struct mlxsw_sp_bridge_port * bridge_port)429 mlxsw_sp_bridge_port_destroy(struct mlxsw_sp_bridge_port *bridge_port)
430 {
431 switchdev_bridge_port_unoffload(bridge_port->dev, NULL, NULL, NULL);
432 list_del(&bridge_port->list);
433 WARN_ON(!list_empty(&bridge_port->vlans_list));
434 kfree(bridge_port);
435 }
436
437 static struct mlxsw_sp_bridge_port *
mlxsw_sp_bridge_port_get(struct mlxsw_sp_bridge * bridge,struct net_device * brport_dev,struct netlink_ext_ack * extack)438 mlxsw_sp_bridge_port_get(struct mlxsw_sp_bridge *bridge,
439 struct net_device *brport_dev,
440 struct netlink_ext_ack *extack)
441 {
442 struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev);
443 struct mlxsw_sp_bridge_device *bridge_device;
444 struct mlxsw_sp_bridge_port *bridge_port;
445 int err;
446
447 bridge_port = mlxsw_sp_bridge_port_find(bridge, brport_dev);
448 if (bridge_port) {
449 bridge_port->ref_count++;
450 return bridge_port;
451 }
452
453 bridge_device = mlxsw_sp_bridge_device_get(bridge, br_dev, extack);
454 if (IS_ERR(bridge_device))
455 return ERR_CAST(bridge_device);
456
457 bridge_port = mlxsw_sp_bridge_port_create(bridge_device, brport_dev,
458 extack);
459 if (IS_ERR(bridge_port)) {
460 err = PTR_ERR(bridge_port);
461 goto err_bridge_port_create;
462 }
463
464 return bridge_port;
465
466 err_bridge_port_create:
467 mlxsw_sp_bridge_device_put(bridge, bridge_device);
468 return ERR_PTR(err);
469 }
470
mlxsw_sp_bridge_port_put(struct mlxsw_sp_bridge * bridge,struct mlxsw_sp_bridge_port * bridge_port)471 static void mlxsw_sp_bridge_port_put(struct mlxsw_sp_bridge *bridge,
472 struct mlxsw_sp_bridge_port *bridge_port)
473 {
474 struct mlxsw_sp_bridge_device *bridge_device;
475
476 if (--bridge_port->ref_count != 0)
477 return;
478 bridge_device = bridge_port->bridge_device;
479 mlxsw_sp_bridge_port_destroy(bridge_port);
480 mlxsw_sp_bridge_device_put(bridge, bridge_device);
481 }
482
483 static struct mlxsw_sp_port_vlan *
mlxsw_sp_port_vlan_find_by_bridge(struct mlxsw_sp_port * mlxsw_sp_port,const struct mlxsw_sp_bridge_device * bridge_device,u16 vid)484 mlxsw_sp_port_vlan_find_by_bridge(struct mlxsw_sp_port *mlxsw_sp_port,
485 const struct mlxsw_sp_bridge_device *
486 bridge_device,
487 u16 vid)
488 {
489 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
490
491 list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
492 list) {
493 if (!mlxsw_sp_port_vlan->bridge_port)
494 continue;
495 if (mlxsw_sp_port_vlan->bridge_port->bridge_device !=
496 bridge_device)
497 continue;
498 if (bridge_device->vlan_enabled &&
499 mlxsw_sp_port_vlan->vid != vid)
500 continue;
501 return mlxsw_sp_port_vlan;
502 }
503
504 return NULL;
505 }
506
507 static struct mlxsw_sp_port_vlan*
mlxsw_sp_port_vlan_find_by_fid(struct mlxsw_sp_port * mlxsw_sp_port,u16 fid_index)508 mlxsw_sp_port_vlan_find_by_fid(struct mlxsw_sp_port *mlxsw_sp_port,
509 u16 fid_index)
510 {
511 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
512
513 list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
514 list) {
515 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
516
517 if (fid && mlxsw_sp_fid_index(fid) == fid_index)
518 return mlxsw_sp_port_vlan;
519 }
520
521 return NULL;
522 }
523
524 static struct mlxsw_sp_bridge_vlan *
mlxsw_sp_bridge_vlan_find(const struct mlxsw_sp_bridge_port * bridge_port,u16 vid)525 mlxsw_sp_bridge_vlan_find(const struct mlxsw_sp_bridge_port *bridge_port,
526 u16 vid)
527 {
528 struct mlxsw_sp_bridge_vlan *bridge_vlan;
529
530 list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
531 if (bridge_vlan->vid == vid)
532 return bridge_vlan;
533 }
534
535 return NULL;
536 }
537
538 static struct mlxsw_sp_bridge_vlan *
mlxsw_sp_bridge_vlan_create(struct mlxsw_sp_bridge_port * bridge_port,u16 vid)539 mlxsw_sp_bridge_vlan_create(struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
540 {
541 struct mlxsw_sp_bridge_vlan *bridge_vlan;
542
543 bridge_vlan = kzalloc(sizeof(*bridge_vlan), GFP_KERNEL);
544 if (!bridge_vlan)
545 return NULL;
546
547 INIT_LIST_HEAD(&bridge_vlan->port_vlan_list);
548 bridge_vlan->vid = vid;
549 list_add(&bridge_vlan->list, &bridge_port->vlans_list);
550
551 return bridge_vlan;
552 }
553
554 static void
mlxsw_sp_bridge_vlan_destroy(struct mlxsw_sp_bridge_vlan * bridge_vlan)555 mlxsw_sp_bridge_vlan_destroy(struct mlxsw_sp_bridge_vlan *bridge_vlan)
556 {
557 list_del(&bridge_vlan->list);
558 WARN_ON(!list_empty(&bridge_vlan->port_vlan_list));
559 kfree(bridge_vlan);
560 }
561
562 static struct mlxsw_sp_bridge_vlan *
mlxsw_sp_bridge_vlan_get(struct mlxsw_sp_bridge_port * bridge_port,u16 vid)563 mlxsw_sp_bridge_vlan_get(struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
564 {
565 struct mlxsw_sp_bridge_vlan *bridge_vlan;
566
567 bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid);
568 if (bridge_vlan)
569 return bridge_vlan;
570
571 return mlxsw_sp_bridge_vlan_create(bridge_port, vid);
572 }
573
mlxsw_sp_bridge_vlan_put(struct mlxsw_sp_bridge_vlan * bridge_vlan)574 static void mlxsw_sp_bridge_vlan_put(struct mlxsw_sp_bridge_vlan *bridge_vlan)
575 {
576 if (list_empty(&bridge_vlan->port_vlan_list))
577 mlxsw_sp_bridge_vlan_destroy(bridge_vlan);
578 }
579
580 static int
mlxsw_sp_port_bridge_vlan_stp_set(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_bridge_vlan * bridge_vlan,u8 state)581 mlxsw_sp_port_bridge_vlan_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
582 struct mlxsw_sp_bridge_vlan *bridge_vlan,
583 u8 state)
584 {
585 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
586
587 list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
588 bridge_vlan_node) {
589 if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
590 continue;
591 return mlxsw_sp_port_vid_stp_set(mlxsw_sp_port,
592 bridge_vlan->vid, state);
593 }
594
595 return 0;
596 }
597
mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port * mlxsw_sp_port,struct net_device * orig_dev,u8 state)598 static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
599 struct net_device *orig_dev,
600 u8 state)
601 {
602 struct mlxsw_sp_bridge_port *bridge_port;
603 struct mlxsw_sp_bridge_vlan *bridge_vlan;
604 int err;
605
606 /* It's possible we failed to enslave the port, yet this
607 * operation is executed due to it being deferred.
608 */
609 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
610 orig_dev);
611 if (!bridge_port)
612 return 0;
613
614 list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
615 err = mlxsw_sp_port_bridge_vlan_stp_set(mlxsw_sp_port,
616 bridge_vlan, state);
617 if (err)
618 goto err_port_bridge_vlan_stp_set;
619 }
620
621 bridge_port->stp_state = state;
622
623 return 0;
624
625 err_port_bridge_vlan_stp_set:
626 list_for_each_entry_continue_reverse(bridge_vlan,
627 &bridge_port->vlans_list, list)
628 mlxsw_sp_port_bridge_vlan_stp_set(mlxsw_sp_port, bridge_vlan,
629 bridge_port->stp_state);
630 return err;
631 }
632
633 static int
mlxsw_sp_port_bridge_vlan_flood_set(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_bridge_vlan * bridge_vlan,enum mlxsw_sp_flood_type packet_type,bool member)634 mlxsw_sp_port_bridge_vlan_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
635 struct mlxsw_sp_bridge_vlan *bridge_vlan,
636 enum mlxsw_sp_flood_type packet_type,
637 bool member)
638 {
639 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
640
641 list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
642 bridge_vlan_node) {
643 if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
644 continue;
645 return mlxsw_sp_fid_flood_set(mlxsw_sp_port_vlan->fid,
646 packet_type,
647 mlxsw_sp_port->local_port,
648 member);
649 }
650
651 return 0;
652 }
653
654 static int
mlxsw_sp_bridge_port_flood_table_set(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_bridge_port * bridge_port,enum mlxsw_sp_flood_type packet_type,bool member)655 mlxsw_sp_bridge_port_flood_table_set(struct mlxsw_sp_port *mlxsw_sp_port,
656 struct mlxsw_sp_bridge_port *bridge_port,
657 enum mlxsw_sp_flood_type packet_type,
658 bool member)
659 {
660 struct mlxsw_sp_bridge_vlan *bridge_vlan;
661 int err;
662
663 list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
664 err = mlxsw_sp_port_bridge_vlan_flood_set(mlxsw_sp_port,
665 bridge_vlan,
666 packet_type,
667 member);
668 if (err)
669 goto err_port_bridge_vlan_flood_set;
670 }
671
672 return 0;
673
674 err_port_bridge_vlan_flood_set:
675 list_for_each_entry_continue_reverse(bridge_vlan,
676 &bridge_port->vlans_list, list)
677 mlxsw_sp_port_bridge_vlan_flood_set(mlxsw_sp_port, bridge_vlan,
678 packet_type, !member);
679 return err;
680 }
681
682 static int
mlxsw_sp_bridge_vlans_flood_set(struct mlxsw_sp_bridge_vlan * bridge_vlan,enum mlxsw_sp_flood_type packet_type,bool member)683 mlxsw_sp_bridge_vlans_flood_set(struct mlxsw_sp_bridge_vlan *bridge_vlan,
684 enum mlxsw_sp_flood_type packet_type,
685 bool member)
686 {
687 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
688 int err;
689
690 list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
691 bridge_vlan_node) {
692 u16 local_port = mlxsw_sp_port_vlan->mlxsw_sp_port->local_port;
693
694 err = mlxsw_sp_fid_flood_set(mlxsw_sp_port_vlan->fid,
695 packet_type, local_port, member);
696 if (err)
697 goto err_fid_flood_set;
698 }
699
700 return 0;
701
702 err_fid_flood_set:
703 list_for_each_entry_continue_reverse(mlxsw_sp_port_vlan,
704 &bridge_vlan->port_vlan_list,
705 list) {
706 u16 local_port = mlxsw_sp_port_vlan->mlxsw_sp_port->local_port;
707
708 mlxsw_sp_fid_flood_set(mlxsw_sp_port_vlan->fid, packet_type,
709 local_port, !member);
710 }
711
712 return err;
713 }
714
715 static int
mlxsw_sp_bridge_ports_flood_table_set(struct mlxsw_sp_bridge_port * bridge_port,enum mlxsw_sp_flood_type packet_type,bool member)716 mlxsw_sp_bridge_ports_flood_table_set(struct mlxsw_sp_bridge_port *bridge_port,
717 enum mlxsw_sp_flood_type packet_type,
718 bool member)
719 {
720 struct mlxsw_sp_bridge_vlan *bridge_vlan;
721 int err;
722
723 list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
724 err = mlxsw_sp_bridge_vlans_flood_set(bridge_vlan, packet_type,
725 member);
726 if (err)
727 goto err_bridge_vlans_flood_set;
728 }
729
730 return 0;
731
732 err_bridge_vlans_flood_set:
733 list_for_each_entry_continue_reverse(bridge_vlan,
734 &bridge_port->vlans_list, list)
735 mlxsw_sp_bridge_vlans_flood_set(bridge_vlan, packet_type,
736 !member);
737 return err;
738 }
739
740 static int
mlxsw_sp_port_bridge_vlan_learning_set(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_bridge_vlan * bridge_vlan,bool set)741 mlxsw_sp_port_bridge_vlan_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
742 struct mlxsw_sp_bridge_vlan *bridge_vlan,
743 bool set)
744 {
745 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
746 u16 vid = bridge_vlan->vid;
747
748 list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
749 bridge_vlan_node) {
750 if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
751 continue;
752 return mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, set);
753 }
754
755 return 0;
756 }
757
758 static int
mlxsw_sp_bridge_port_learning_set(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_bridge_port * bridge_port,bool set)759 mlxsw_sp_bridge_port_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
760 struct mlxsw_sp_bridge_port *bridge_port,
761 bool set)
762 {
763 struct mlxsw_sp_bridge_vlan *bridge_vlan;
764 int err;
765
766 list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
767 err = mlxsw_sp_port_bridge_vlan_learning_set(mlxsw_sp_port,
768 bridge_vlan, set);
769 if (err)
770 goto err_port_bridge_vlan_learning_set;
771 }
772
773 return 0;
774
775 err_port_bridge_vlan_learning_set:
776 list_for_each_entry_continue_reverse(bridge_vlan,
777 &bridge_port->vlans_list, list)
778 mlxsw_sp_port_bridge_vlan_learning_set(mlxsw_sp_port,
779 bridge_vlan, !set);
780 return err;
781 }
782
783 static int
mlxsw_sp_port_attr_br_pre_flags_set(struct mlxsw_sp_port * mlxsw_sp_port,struct switchdev_brport_flags flags)784 mlxsw_sp_port_attr_br_pre_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
785 struct switchdev_brport_flags flags)
786 {
787 if (flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD))
788 return -EINVAL;
789
790 return 0;
791 }
792
mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port * mlxsw_sp_port,struct net_device * orig_dev,struct switchdev_brport_flags flags)793 static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
794 struct net_device *orig_dev,
795 struct switchdev_brport_flags flags)
796 {
797 struct mlxsw_sp_bridge_port *bridge_port;
798 int err;
799
800 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
801 orig_dev);
802 if (!bridge_port)
803 return 0;
804
805 if (flags.mask & BR_FLOOD) {
806 err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port,
807 bridge_port,
808 MLXSW_SP_FLOOD_TYPE_UC,
809 flags.val & BR_FLOOD);
810 if (err)
811 return err;
812 }
813
814 if (flags.mask & BR_LEARNING) {
815 err = mlxsw_sp_bridge_port_learning_set(mlxsw_sp_port,
816 bridge_port,
817 flags.val & BR_LEARNING);
818 if (err)
819 return err;
820 }
821
822 if (bridge_port->bridge_device->multicast_enabled)
823 goto out;
824
825 if (flags.mask & BR_MCAST_FLOOD) {
826 err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port,
827 bridge_port,
828 MLXSW_SP_FLOOD_TYPE_MC,
829 flags.val & BR_MCAST_FLOOD);
830 if (err)
831 return err;
832 }
833
834 out:
835 memcpy(&bridge_port->flags, &flags.val, sizeof(flags.val));
836 return 0;
837 }
838
mlxsw_sp_ageing_set(struct mlxsw_sp * mlxsw_sp,u32 ageing_time)839 static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time)
840 {
841 char sfdat_pl[MLXSW_REG_SFDAT_LEN];
842 int err;
843
844 mlxsw_reg_sfdat_pack(sfdat_pl, ageing_time);
845 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdat), sfdat_pl);
846 if (err)
847 return err;
848 mlxsw_sp->bridge->ageing_time = ageing_time;
849 return 0;
850 }
851
mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port * mlxsw_sp_port,unsigned long ageing_clock_t)852 static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port,
853 unsigned long ageing_clock_t)
854 {
855 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
856 unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
857 u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
858
859 if (ageing_time < MLXSW_SP_MIN_AGEING_TIME ||
860 ageing_time > MLXSW_SP_MAX_AGEING_TIME)
861 return -ERANGE;
862
863 return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time);
864 }
865
mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port * mlxsw_sp_port,struct net_device * orig_dev,bool vlan_enabled)866 static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
867 struct net_device *orig_dev,
868 bool vlan_enabled)
869 {
870 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
871 struct mlxsw_sp_bridge_device *bridge_device;
872
873 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
874 if (WARN_ON(!bridge_device))
875 return -EINVAL;
876
877 if (bridge_device->vlan_enabled == vlan_enabled)
878 return 0;
879
880 netdev_err(bridge_device->dev, "VLAN filtering can't be changed for existing bridge\n");
881 return -EINVAL;
882 }
883
mlxsw_sp_port_attr_br_vlan_proto_set(struct mlxsw_sp_port * mlxsw_sp_port,struct net_device * orig_dev,u16 vlan_proto)884 static int mlxsw_sp_port_attr_br_vlan_proto_set(struct mlxsw_sp_port *mlxsw_sp_port,
885 struct net_device *orig_dev,
886 u16 vlan_proto)
887 {
888 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
889 struct mlxsw_sp_bridge_device *bridge_device;
890
891 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
892 if (WARN_ON(!bridge_device))
893 return -EINVAL;
894
895 netdev_err(bridge_device->dev, "VLAN protocol can't be changed on existing bridge\n");
896 return -EINVAL;
897 }
898
mlxsw_sp_port_attr_mrouter_set(struct mlxsw_sp_port * mlxsw_sp_port,struct net_device * orig_dev,bool is_port_mrouter)899 static int mlxsw_sp_port_attr_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
900 struct net_device *orig_dev,
901 bool is_port_mrouter)
902 {
903 struct mlxsw_sp_bridge_port *bridge_port;
904 int err;
905
906 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
907 orig_dev);
908 if (!bridge_port)
909 return 0;
910
911 mlxsw_sp_port_mrouter_update_mdb(mlxsw_sp_port, bridge_port,
912 is_port_mrouter);
913
914 if (!bridge_port->bridge_device->multicast_enabled)
915 goto out;
916
917 err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
918 MLXSW_SP_FLOOD_TYPE_MC,
919 is_port_mrouter);
920 if (err)
921 return err;
922
923 out:
924 bridge_port->mrouter = is_port_mrouter;
925 return 0;
926 }
927
mlxsw_sp_mc_flood(const struct mlxsw_sp_bridge_port * bridge_port)928 static bool mlxsw_sp_mc_flood(const struct mlxsw_sp_bridge_port *bridge_port)
929 {
930 const struct mlxsw_sp_bridge_device *bridge_device;
931
932 bridge_device = bridge_port->bridge_device;
933 return bridge_device->multicast_enabled ? bridge_port->mrouter :
934 bridge_port->flags & BR_MCAST_FLOOD;
935 }
936
mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port * mlxsw_sp_port,struct net_device * orig_dev,bool mc_disabled)937 static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port,
938 struct net_device *orig_dev,
939 bool mc_disabled)
940 {
941 enum mlxsw_sp_flood_type packet_type = MLXSW_SP_FLOOD_TYPE_MC;
942 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
943 struct mlxsw_sp_bridge_device *bridge_device;
944 struct mlxsw_sp_bridge_port *bridge_port;
945 int err;
946
947 /* It's possible we failed to enslave the port, yet this
948 * operation is executed due to it being deferred.
949 */
950 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
951 if (!bridge_device)
952 return 0;
953
954 if (bridge_device->multicast_enabled == !mc_disabled)
955 return 0;
956
957 bridge_device->multicast_enabled = !mc_disabled;
958 err = mlxsw_sp_bridge_mdb_mc_enable_sync(mlxsw_sp, bridge_device,
959 !mc_disabled);
960 if (err)
961 goto err_mc_enable_sync;
962
963 list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
964 bool member = mlxsw_sp_mc_flood(bridge_port);
965
966 err = mlxsw_sp_bridge_ports_flood_table_set(bridge_port,
967 packet_type,
968 member);
969 if (err)
970 goto err_flood_table_set;
971 }
972
973 return 0;
974
975 err_flood_table_set:
976 list_for_each_entry_continue_reverse(bridge_port,
977 &bridge_device->ports_list, list) {
978 bool member = mlxsw_sp_mc_flood(bridge_port);
979
980 mlxsw_sp_bridge_ports_flood_table_set(bridge_port, packet_type,
981 !member);
982 }
983 mlxsw_sp_bridge_mdb_mc_enable_sync(mlxsw_sp, bridge_device,
984 mc_disabled);
985 err_mc_enable_sync:
986 bridge_device->multicast_enabled = mc_disabled;
987 return err;
988 }
989
990 static struct mlxsw_sp_mdb_entry_port *
mlxsw_sp_mdb_entry_port_lookup(struct mlxsw_sp_mdb_entry * mdb_entry,u16 local_port)991 mlxsw_sp_mdb_entry_port_lookup(struct mlxsw_sp_mdb_entry *mdb_entry,
992 u16 local_port)
993 {
994 struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
995
996 list_for_each_entry(mdb_entry_port, &mdb_entry->ports_list, list) {
997 if (mdb_entry_port->local_port == local_port)
998 return mdb_entry_port;
999 }
1000
1001 return NULL;
1002 }
1003
1004 static struct mlxsw_sp_mdb_entry_port *
mlxsw_sp_mdb_entry_port_get(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_mdb_entry * mdb_entry,u16 local_port)1005 mlxsw_sp_mdb_entry_port_get(struct mlxsw_sp *mlxsw_sp,
1006 struct mlxsw_sp_mdb_entry *mdb_entry,
1007 u16 local_port)
1008 {
1009 struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
1010 int err;
1011
1012 mdb_entry_port = mlxsw_sp_mdb_entry_port_lookup(mdb_entry, local_port);
1013 if (mdb_entry_port) {
1014 if (mdb_entry_port->mrouter &&
1015 refcount_read(&mdb_entry_port->refcount) == 1)
1016 mdb_entry->ports_count++;
1017
1018 refcount_inc(&mdb_entry_port->refcount);
1019 return mdb_entry_port;
1020 }
1021
1022 err = mlxsw_sp_pgt_entry_port_set(mlxsw_sp, mdb_entry->mid,
1023 mdb_entry->key.fid, local_port, true);
1024 if (err)
1025 return ERR_PTR(err);
1026
1027 mdb_entry_port = kzalloc(sizeof(*mdb_entry_port), GFP_KERNEL);
1028 if (!mdb_entry_port) {
1029 err = -ENOMEM;
1030 goto err_mdb_entry_port_alloc;
1031 }
1032
1033 mdb_entry_port->local_port = local_port;
1034 refcount_set(&mdb_entry_port->refcount, 1);
1035 list_add(&mdb_entry_port->list, &mdb_entry->ports_list);
1036 mdb_entry->ports_count++;
1037
1038 return mdb_entry_port;
1039
1040 err_mdb_entry_port_alloc:
1041 mlxsw_sp_pgt_entry_port_set(mlxsw_sp, mdb_entry->mid,
1042 mdb_entry->key.fid, local_port, false);
1043 return ERR_PTR(err);
1044 }
1045
1046 static void
mlxsw_sp_mdb_entry_port_put(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_mdb_entry * mdb_entry,u16 local_port,bool force)1047 mlxsw_sp_mdb_entry_port_put(struct mlxsw_sp *mlxsw_sp,
1048 struct mlxsw_sp_mdb_entry *mdb_entry,
1049 u16 local_port, bool force)
1050 {
1051 struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
1052
1053 mdb_entry_port = mlxsw_sp_mdb_entry_port_lookup(mdb_entry, local_port);
1054 if (!mdb_entry_port)
1055 return;
1056
1057 if (!force && !refcount_dec_and_test(&mdb_entry_port->refcount)) {
1058 if (mdb_entry_port->mrouter &&
1059 refcount_read(&mdb_entry_port->refcount) == 1)
1060 mdb_entry->ports_count--;
1061 return;
1062 }
1063
1064 mdb_entry->ports_count--;
1065 list_del(&mdb_entry_port->list);
1066 kfree(mdb_entry_port);
1067 mlxsw_sp_pgt_entry_port_set(mlxsw_sp, mdb_entry->mid,
1068 mdb_entry->key.fid, local_port, false);
1069 }
1070
1071 static __always_unused struct mlxsw_sp_mdb_entry_port *
mlxsw_sp_mdb_entry_mrouter_port_get(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_mdb_entry * mdb_entry,u16 local_port)1072 mlxsw_sp_mdb_entry_mrouter_port_get(struct mlxsw_sp *mlxsw_sp,
1073 struct mlxsw_sp_mdb_entry *mdb_entry,
1074 u16 local_port)
1075 {
1076 struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
1077 int err;
1078
1079 mdb_entry_port = mlxsw_sp_mdb_entry_port_lookup(mdb_entry, local_port);
1080 if (mdb_entry_port) {
1081 if (!mdb_entry_port->mrouter)
1082 refcount_inc(&mdb_entry_port->refcount);
1083 return mdb_entry_port;
1084 }
1085
1086 err = mlxsw_sp_pgt_entry_port_set(mlxsw_sp, mdb_entry->mid,
1087 mdb_entry->key.fid, local_port, true);
1088 if (err)
1089 return ERR_PTR(err);
1090
1091 mdb_entry_port = kzalloc(sizeof(*mdb_entry_port), GFP_KERNEL);
1092 if (!mdb_entry_port) {
1093 err = -ENOMEM;
1094 goto err_mdb_entry_port_alloc;
1095 }
1096
1097 mdb_entry_port->local_port = local_port;
1098 refcount_set(&mdb_entry_port->refcount, 1);
1099 mdb_entry_port->mrouter = true;
1100 list_add(&mdb_entry_port->list, &mdb_entry->ports_list);
1101
1102 return mdb_entry_port;
1103
1104 err_mdb_entry_port_alloc:
1105 mlxsw_sp_pgt_entry_port_set(mlxsw_sp, mdb_entry->mid,
1106 mdb_entry->key.fid, local_port, false);
1107 return ERR_PTR(err);
1108 }
1109
1110 static __always_unused void
mlxsw_sp_mdb_entry_mrouter_port_put(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_mdb_entry * mdb_entry,u16 local_port)1111 mlxsw_sp_mdb_entry_mrouter_port_put(struct mlxsw_sp *mlxsw_sp,
1112 struct mlxsw_sp_mdb_entry *mdb_entry,
1113 u16 local_port)
1114 {
1115 struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
1116
1117 mdb_entry_port = mlxsw_sp_mdb_entry_port_lookup(mdb_entry, local_port);
1118 if (!mdb_entry_port)
1119 return;
1120
1121 if (!mdb_entry_port->mrouter)
1122 return;
1123
1124 mdb_entry_port->mrouter = false;
1125 if (!refcount_dec_and_test(&mdb_entry_port->refcount))
1126 return;
1127
1128 list_del(&mdb_entry_port->list);
1129 kfree(mdb_entry_port);
1130 mlxsw_sp_pgt_entry_port_set(mlxsw_sp, mdb_entry->mid,
1131 mdb_entry->key.fid, local_port, false);
1132 }
1133
1134 static void
mlxsw_sp_bridge_mrouter_update_mdb(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_bridge_device * bridge_device,bool add)1135 mlxsw_sp_bridge_mrouter_update_mdb(struct mlxsw_sp *mlxsw_sp,
1136 struct mlxsw_sp_bridge_device *bridge_device,
1137 bool add)
1138 {
1139 u16 local_port = mlxsw_sp_router_port(mlxsw_sp);
1140 struct mlxsw_sp_mdb_entry *mdb_entry;
1141
1142 list_for_each_entry(mdb_entry, &bridge_device->mdb_list, list) {
1143 if (add)
1144 mlxsw_sp_mdb_entry_mrouter_port_get(mlxsw_sp, mdb_entry,
1145 local_port);
1146 else
1147 mlxsw_sp_mdb_entry_mrouter_port_put(mlxsw_sp, mdb_entry,
1148 local_port);
1149 }
1150 }
1151
1152 static int
mlxsw_sp_port_attr_br_mrouter_set(struct mlxsw_sp_port * mlxsw_sp_port,struct net_device * orig_dev,bool is_mrouter)1153 mlxsw_sp_port_attr_br_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
1154 struct net_device *orig_dev,
1155 bool is_mrouter)
1156 {
1157 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1158 struct mlxsw_sp_bridge_device *bridge_device;
1159
1160 /* It's possible we failed to enslave the port, yet this
1161 * operation is executed due to it being deferred.
1162 */
1163 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
1164 if (!bridge_device)
1165 return 0;
1166
1167 if (bridge_device->mrouter != is_mrouter)
1168 mlxsw_sp_bridge_mrouter_update_mdb(mlxsw_sp, bridge_device,
1169 is_mrouter);
1170 bridge_device->mrouter = is_mrouter;
1171 return 0;
1172 }
1173
mlxsw_sp_port_attr_set(struct net_device * dev,const void * ctx,const struct switchdev_attr * attr,struct netlink_ext_ack * extack)1174 static int mlxsw_sp_port_attr_set(struct net_device *dev, const void *ctx,
1175 const struct switchdev_attr *attr,
1176 struct netlink_ext_ack *extack)
1177 {
1178 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1179 int err;
1180
1181 switch (attr->id) {
1182 case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
1183 err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port,
1184 attr->orig_dev,
1185 attr->u.stp_state);
1186 break;
1187 case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
1188 err = mlxsw_sp_port_attr_br_pre_flags_set(mlxsw_sp_port,
1189 attr->u.brport_flags);
1190 break;
1191 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
1192 err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port,
1193 attr->orig_dev,
1194 attr->u.brport_flags);
1195 break;
1196 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
1197 err = mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port,
1198 attr->u.ageing_time);
1199 break;
1200 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
1201 err = mlxsw_sp_port_attr_br_vlan_set(mlxsw_sp_port,
1202 attr->orig_dev,
1203 attr->u.vlan_filtering);
1204 break;
1205 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_PROTOCOL:
1206 err = mlxsw_sp_port_attr_br_vlan_proto_set(mlxsw_sp_port,
1207 attr->orig_dev,
1208 attr->u.vlan_protocol);
1209 break;
1210 case SWITCHDEV_ATTR_ID_PORT_MROUTER:
1211 err = mlxsw_sp_port_attr_mrouter_set(mlxsw_sp_port,
1212 attr->orig_dev,
1213 attr->u.mrouter);
1214 break;
1215 case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
1216 err = mlxsw_sp_port_mc_disabled_set(mlxsw_sp_port,
1217 attr->orig_dev,
1218 attr->u.mc_disabled);
1219 break;
1220 case SWITCHDEV_ATTR_ID_BRIDGE_MROUTER:
1221 err = mlxsw_sp_port_attr_br_mrouter_set(mlxsw_sp_port,
1222 attr->orig_dev,
1223 attr->u.mrouter);
1224 break;
1225 default:
1226 err = -EOPNOTSUPP;
1227 break;
1228 }
1229
1230 mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
1231
1232 return err;
1233 }
1234
1235 static int
mlxsw_sp_port_vlan_fid_join(struct mlxsw_sp_port_vlan * mlxsw_sp_port_vlan,struct mlxsw_sp_bridge_port * bridge_port,struct netlink_ext_ack * extack)1236 mlxsw_sp_port_vlan_fid_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
1237 struct mlxsw_sp_bridge_port *bridge_port,
1238 struct netlink_ext_ack *extack)
1239 {
1240 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1241 struct mlxsw_sp_bridge_device *bridge_device;
1242 u16 local_port = mlxsw_sp_port->local_port;
1243 u16 vid = mlxsw_sp_port_vlan->vid;
1244 struct mlxsw_sp_fid *fid;
1245 int err;
1246
1247 bridge_device = bridge_port->bridge_device;
1248 fid = bridge_device->ops->fid_get(bridge_device, vid, extack);
1249 if (IS_ERR(fid))
1250 return PTR_ERR(fid);
1251
1252 err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port,
1253 bridge_port->flags & BR_FLOOD);
1254 if (err)
1255 goto err_fid_uc_flood_set;
1256
1257 err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port,
1258 mlxsw_sp_mc_flood(bridge_port));
1259 if (err)
1260 goto err_fid_mc_flood_set;
1261
1262 err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port,
1263 true);
1264 if (err)
1265 goto err_fid_bc_flood_set;
1266
1267 err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
1268 if (err)
1269 goto err_fid_port_vid_map;
1270
1271 mlxsw_sp_port_vlan->fid = fid;
1272
1273 return 0;
1274
1275 err_fid_port_vid_map:
1276 mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port, false);
1277 err_fid_bc_flood_set:
1278 mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port, false);
1279 err_fid_mc_flood_set:
1280 mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port, false);
1281 err_fid_uc_flood_set:
1282 mlxsw_sp_fid_put(fid);
1283 return err;
1284 }
1285
1286 static void
mlxsw_sp_port_vlan_fid_leave(struct mlxsw_sp_port_vlan * mlxsw_sp_port_vlan)1287 mlxsw_sp_port_vlan_fid_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1288 {
1289 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1290 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
1291 u16 local_port = mlxsw_sp_port->local_port;
1292 u16 vid = mlxsw_sp_port_vlan->vid;
1293
1294 mlxsw_sp_port_vlan->fid = NULL;
1295 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
1296 mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port, false);
1297 mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port, false);
1298 mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port, false);
1299 mlxsw_sp_fid_put(fid);
1300 }
1301
1302 static u16
mlxsw_sp_port_pvid_determine(const struct mlxsw_sp_port * mlxsw_sp_port,u16 vid,bool is_pvid)1303 mlxsw_sp_port_pvid_determine(const struct mlxsw_sp_port *mlxsw_sp_port,
1304 u16 vid, bool is_pvid)
1305 {
1306 if (is_pvid)
1307 return vid;
1308 else if (mlxsw_sp_port->pvid == vid)
1309 return 0; /* Dis-allow untagged packets */
1310 else
1311 return mlxsw_sp_port->pvid;
1312 }
1313
1314 static int
mlxsw_sp_port_vlan_bridge_join(struct mlxsw_sp_port_vlan * mlxsw_sp_port_vlan,struct mlxsw_sp_bridge_port * bridge_port,struct netlink_ext_ack * extack)1315 mlxsw_sp_port_vlan_bridge_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
1316 struct mlxsw_sp_bridge_port *bridge_port,
1317 struct netlink_ext_ack *extack)
1318 {
1319 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1320 struct mlxsw_sp_bridge_vlan *bridge_vlan;
1321 u16 vid = mlxsw_sp_port_vlan->vid;
1322 int err;
1323
1324 /* No need to continue if only VLAN flags were changed */
1325 if (mlxsw_sp_port_vlan->bridge_port)
1326 return 0;
1327
1328 err = mlxsw_sp_port_vlan_fid_join(mlxsw_sp_port_vlan, bridge_port,
1329 extack);
1330 if (err)
1331 return err;
1332
1333 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid,
1334 bridge_port->flags & BR_LEARNING);
1335 if (err)
1336 goto err_port_vid_learning_set;
1337
1338 err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
1339 bridge_port->stp_state);
1340 if (err)
1341 goto err_port_vid_stp_set;
1342
1343 bridge_vlan = mlxsw_sp_bridge_vlan_get(bridge_port, vid);
1344 if (!bridge_vlan) {
1345 err = -ENOMEM;
1346 goto err_bridge_vlan_get;
1347 }
1348
1349 list_add(&mlxsw_sp_port_vlan->bridge_vlan_node,
1350 &bridge_vlan->port_vlan_list);
1351
1352 mlxsw_sp_bridge_port_get(mlxsw_sp_port->mlxsw_sp->bridge,
1353 bridge_port->dev, extack);
1354 mlxsw_sp_port_vlan->bridge_port = bridge_port;
1355
1356 return 0;
1357
1358 err_bridge_vlan_get:
1359 mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED);
1360 err_port_vid_stp_set:
1361 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
1362 err_port_vid_learning_set:
1363 mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan);
1364 return err;
1365 }
1366
1367 void
mlxsw_sp_port_vlan_bridge_leave(struct mlxsw_sp_port_vlan * mlxsw_sp_port_vlan)1368 mlxsw_sp_port_vlan_bridge_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1369 {
1370 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1371 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
1372 struct mlxsw_sp_bridge_vlan *bridge_vlan;
1373 struct mlxsw_sp_bridge_port *bridge_port;
1374 u16 vid = mlxsw_sp_port_vlan->vid;
1375 bool last_port;
1376
1377 if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021Q &&
1378 mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021D))
1379 return;
1380
1381 bridge_port = mlxsw_sp_port_vlan->bridge_port;
1382 bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid);
1383 last_port = list_is_singular(&bridge_vlan->port_vlan_list);
1384
1385 list_del(&mlxsw_sp_port_vlan->bridge_vlan_node);
1386 mlxsw_sp_bridge_vlan_put(bridge_vlan);
1387 mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED);
1388 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
1389 if (last_port)
1390 mlxsw_sp_bridge_port_fdb_flush(mlxsw_sp_port->mlxsw_sp,
1391 bridge_port,
1392 mlxsw_sp_fid_index(fid));
1393
1394 mlxsw_sp_bridge_port_mdb_flush(mlxsw_sp_port, bridge_port,
1395 mlxsw_sp_fid_index(fid));
1396
1397 mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan);
1398
1399 mlxsw_sp_bridge_port_put(mlxsw_sp_port->mlxsw_sp->bridge, bridge_port);
1400 mlxsw_sp_port_vlan->bridge_port = NULL;
1401 }
1402
1403 static int
mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_bridge_port * bridge_port,u16 vid,bool is_untagged,bool is_pvid,struct netlink_ext_ack * extack)1404 mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port,
1405 struct mlxsw_sp_bridge_port *bridge_port,
1406 u16 vid, bool is_untagged, bool is_pvid,
1407 struct netlink_ext_ack *extack)
1408 {
1409 u16 pvid = mlxsw_sp_port_pvid_determine(mlxsw_sp_port, vid, is_pvid);
1410 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1411 u16 old_pvid = mlxsw_sp_port->pvid;
1412 u16 proto;
1413 int err;
1414
1415 /* The only valid scenario in which a port-vlan already exists, is if
1416 * the VLAN flags were changed and the port-vlan is associated with the
1417 * correct bridge port
1418 */
1419 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1420 if (mlxsw_sp_port_vlan &&
1421 mlxsw_sp_port_vlan->bridge_port != bridge_port)
1422 return -EEXIST;
1423
1424 if (!mlxsw_sp_port_vlan) {
1425 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port,
1426 vid);
1427 if (IS_ERR(mlxsw_sp_port_vlan))
1428 return PTR_ERR(mlxsw_sp_port_vlan);
1429 }
1430
1431 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true,
1432 is_untagged);
1433 if (err)
1434 goto err_port_vlan_set;
1435
1436 br_vlan_get_proto(bridge_port->bridge_device->dev, &proto);
1437 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid, proto);
1438 if (err)
1439 goto err_port_pvid_set;
1440
1441 err = mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port,
1442 extack);
1443 if (err)
1444 goto err_port_vlan_bridge_join;
1445
1446 return 0;
1447
1448 err_port_vlan_bridge_join:
1449 mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid, proto);
1450 err_port_pvid_set:
1451 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1452 err_port_vlan_set:
1453 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1454 return err;
1455 }
1456
1457 static int
mlxsw_sp_br_ban_rif_pvid_change(struct mlxsw_sp * mlxsw_sp,const struct net_device * br_dev,const struct switchdev_obj_port_vlan * vlan)1458 mlxsw_sp_br_ban_rif_pvid_change(struct mlxsw_sp *mlxsw_sp,
1459 const struct net_device *br_dev,
1460 const struct switchdev_obj_port_vlan *vlan)
1461 {
1462 u16 pvid;
1463
1464 pvid = mlxsw_sp_rif_vid(mlxsw_sp, br_dev);
1465 if (!pvid)
1466 return 0;
1467
1468 if (vlan->flags & BRIDGE_VLAN_INFO_PVID) {
1469 if (vlan->vid != pvid) {
1470 netdev_err(br_dev, "Can't change PVID, it's used by router interface\n");
1471 return -EBUSY;
1472 }
1473 } else {
1474 if (vlan->vid == pvid) {
1475 netdev_err(br_dev, "Can't remove PVID, it's used by router interface\n");
1476 return -EBUSY;
1477 }
1478 }
1479
1480 return 0;
1481 }
1482
mlxsw_sp_port_vlans_add(struct mlxsw_sp_port * mlxsw_sp_port,const struct switchdev_obj_port_vlan * vlan,struct netlink_ext_ack * extack)1483 static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
1484 const struct switchdev_obj_port_vlan *vlan,
1485 struct netlink_ext_ack *extack)
1486 {
1487 bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
1488 bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
1489 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1490 struct net_device *orig_dev = vlan->obj.orig_dev;
1491 struct mlxsw_sp_bridge_port *bridge_port;
1492
1493 if (netif_is_bridge_master(orig_dev)) {
1494 int err = 0;
1495
1496 if (br_vlan_enabled(orig_dev))
1497 err = mlxsw_sp_br_ban_rif_pvid_change(mlxsw_sp,
1498 orig_dev, vlan);
1499 if (!err)
1500 err = -EOPNOTSUPP;
1501 return err;
1502 }
1503
1504 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1505 if (WARN_ON(!bridge_port))
1506 return -EINVAL;
1507
1508 if (!bridge_port->bridge_device->vlan_enabled)
1509 return 0;
1510
1511 return mlxsw_sp_bridge_port_vlan_add(mlxsw_sp_port, bridge_port,
1512 vlan->vid, flag_untagged,
1513 flag_pvid, extack);
1514 }
1515
mlxsw_sp_fdb_flush_type(bool lagged)1516 static enum mlxsw_reg_sfdf_flush_type mlxsw_sp_fdb_flush_type(bool lagged)
1517 {
1518 return lagged ? MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID :
1519 MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID;
1520 }
1521
1522 static int
mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_bridge_port * bridge_port,u16 fid_index)1523 mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
1524 struct mlxsw_sp_bridge_port *bridge_port,
1525 u16 fid_index)
1526 {
1527 bool lagged = bridge_port->lagged;
1528 char sfdf_pl[MLXSW_REG_SFDF_LEN];
1529 u16 system_port;
1530
1531 system_port = lagged ? bridge_port->lag_id : bridge_port->system_port;
1532 mlxsw_reg_sfdf_pack(sfdf_pl, mlxsw_sp_fdb_flush_type(lagged));
1533 mlxsw_reg_sfdf_fid_set(sfdf_pl, fid_index);
1534 mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl, system_port);
1535
1536 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
1537 }
1538
mlxsw_sp_sfd_rec_policy(bool dynamic)1539 static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic)
1540 {
1541 return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS :
1542 MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_MLAG;
1543 }
1544
mlxsw_sp_sfd_op(bool adding)1545 static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding)
1546 {
1547 return adding ? MLXSW_REG_SFD_OP_WRITE_EDIT :
1548 MLXSW_REG_SFD_OP_WRITE_REMOVE;
1549 }
1550
1551 static int
mlxsw_sp_port_fdb_tun_uc_op4(struct mlxsw_sp * mlxsw_sp,bool dynamic,const char * mac,u16 fid,__be32 addr,bool adding)1552 mlxsw_sp_port_fdb_tun_uc_op4(struct mlxsw_sp *mlxsw_sp, bool dynamic,
1553 const char *mac, u16 fid, __be32 addr, bool adding)
1554 {
1555 char *sfd_pl;
1556 u8 num_rec;
1557 u32 uip;
1558 int err;
1559
1560 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1561 if (!sfd_pl)
1562 return -ENOMEM;
1563
1564 uip = be32_to_cpu(addr);
1565 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1566 mlxsw_reg_sfd_uc_tunnel_pack4(sfd_pl, 0,
1567 mlxsw_sp_sfd_rec_policy(dynamic), mac,
1568 fid, MLXSW_REG_SFD_REC_ACTION_NOP, uip);
1569 num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1570 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1571 if (err)
1572 goto out;
1573
1574 if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1575 err = -EBUSY;
1576
1577 out:
1578 kfree(sfd_pl);
1579 return err;
1580 }
1581
mlxsw_sp_port_fdb_tun_uc_op6_sfd_write(struct mlxsw_sp * mlxsw_sp,const char * mac,u16 fid,u32 kvdl_index,bool adding)1582 static int mlxsw_sp_port_fdb_tun_uc_op6_sfd_write(struct mlxsw_sp *mlxsw_sp,
1583 const char *mac, u16 fid,
1584 u32 kvdl_index, bool adding)
1585 {
1586 char *sfd_pl;
1587 u8 num_rec;
1588 int err;
1589
1590 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1591 if (!sfd_pl)
1592 return -ENOMEM;
1593
1594 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1595 mlxsw_reg_sfd_uc_tunnel_pack6(sfd_pl, 0, mac, fid,
1596 MLXSW_REG_SFD_REC_ACTION_NOP, kvdl_index);
1597 num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1598 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1599 if (err)
1600 goto out;
1601
1602 if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1603 err = -EBUSY;
1604
1605 out:
1606 kfree(sfd_pl);
1607 return err;
1608 }
1609
mlxsw_sp_port_fdb_tun_uc_op6_add(struct mlxsw_sp * mlxsw_sp,const char * mac,u16 fid,const struct in6_addr * addr)1610 static int mlxsw_sp_port_fdb_tun_uc_op6_add(struct mlxsw_sp *mlxsw_sp,
1611 const char *mac, u16 fid,
1612 const struct in6_addr *addr)
1613 {
1614 u32 kvdl_index;
1615 int err;
1616
1617 err = mlxsw_sp_nve_ipv6_addr_kvdl_set(mlxsw_sp, addr, &kvdl_index);
1618 if (err)
1619 return err;
1620
1621 err = mlxsw_sp_port_fdb_tun_uc_op6_sfd_write(mlxsw_sp, mac, fid,
1622 kvdl_index, true);
1623 if (err)
1624 goto err_sfd_write;
1625
1626 err = mlxsw_sp_nve_ipv6_addr_map_replace(mlxsw_sp, mac, fid, addr);
1627 if (err)
1628 /* Replace can fail only for creating new mapping, so removing
1629 * the FDB entry in the error path is OK.
1630 */
1631 goto err_addr_replace;
1632
1633 return 0;
1634
1635 err_addr_replace:
1636 mlxsw_sp_port_fdb_tun_uc_op6_sfd_write(mlxsw_sp, mac, fid, kvdl_index,
1637 false);
1638 err_sfd_write:
1639 mlxsw_sp_nve_ipv6_addr_kvdl_unset(mlxsw_sp, addr);
1640 return err;
1641 }
1642
mlxsw_sp_port_fdb_tun_uc_op6_del(struct mlxsw_sp * mlxsw_sp,const char * mac,u16 fid,const struct in6_addr * addr)1643 static void mlxsw_sp_port_fdb_tun_uc_op6_del(struct mlxsw_sp *mlxsw_sp,
1644 const char *mac, u16 fid,
1645 const struct in6_addr *addr)
1646 {
1647 mlxsw_sp_nve_ipv6_addr_map_del(mlxsw_sp, mac, fid);
1648 mlxsw_sp_port_fdb_tun_uc_op6_sfd_write(mlxsw_sp, mac, fid, 0, false);
1649 mlxsw_sp_nve_ipv6_addr_kvdl_unset(mlxsw_sp, addr);
1650 }
1651
1652 static int
mlxsw_sp_port_fdb_tun_uc_op6(struct mlxsw_sp * mlxsw_sp,const char * mac,u16 fid,const struct in6_addr * addr,bool adding)1653 mlxsw_sp_port_fdb_tun_uc_op6(struct mlxsw_sp *mlxsw_sp, const char *mac,
1654 u16 fid, const struct in6_addr *addr, bool adding)
1655 {
1656 if (adding)
1657 return mlxsw_sp_port_fdb_tun_uc_op6_add(mlxsw_sp, mac, fid,
1658 addr);
1659
1660 mlxsw_sp_port_fdb_tun_uc_op6_del(mlxsw_sp, mac, fid, addr);
1661 return 0;
1662 }
1663
mlxsw_sp_port_fdb_tunnel_uc_op(struct mlxsw_sp * mlxsw_sp,const char * mac,u16 fid,enum mlxsw_sp_l3proto proto,const union mlxsw_sp_l3addr * addr,bool adding,bool dynamic)1664 static int mlxsw_sp_port_fdb_tunnel_uc_op(struct mlxsw_sp *mlxsw_sp,
1665 const char *mac, u16 fid,
1666 enum mlxsw_sp_l3proto proto,
1667 const union mlxsw_sp_l3addr *addr,
1668 bool adding, bool dynamic)
1669 {
1670 switch (proto) {
1671 case MLXSW_SP_L3_PROTO_IPV4:
1672 return mlxsw_sp_port_fdb_tun_uc_op4(mlxsw_sp, dynamic, mac, fid,
1673 addr->addr4, adding);
1674 case MLXSW_SP_L3_PROTO_IPV6:
1675 return mlxsw_sp_port_fdb_tun_uc_op6(mlxsw_sp, mac, fid,
1676 &addr->addr6, adding);
1677 default:
1678 WARN_ON(1);
1679 return -EOPNOTSUPP;
1680 }
1681 }
1682
__mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp * mlxsw_sp,u16 local_port,const char * mac,u16 fid,u16 vid,bool adding,enum mlxsw_reg_sfd_rec_action action,enum mlxsw_reg_sfd_rec_policy policy)1683 static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u16 local_port,
1684 const char *mac, u16 fid, u16 vid,
1685 bool adding,
1686 enum mlxsw_reg_sfd_rec_action action,
1687 enum mlxsw_reg_sfd_rec_policy policy)
1688 {
1689 char *sfd_pl;
1690 u8 num_rec;
1691 int err;
1692
1693 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1694 if (!sfd_pl)
1695 return -ENOMEM;
1696
1697 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1698 mlxsw_reg_sfd_uc_pack(sfd_pl, 0, policy, mac, fid, vid, action,
1699 local_port);
1700 num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1701 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1702 if (err)
1703 goto out;
1704
1705 if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1706 err = -EBUSY;
1707
1708 out:
1709 kfree(sfd_pl);
1710 return err;
1711 }
1712
mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp * mlxsw_sp,u16 local_port,const char * mac,u16 fid,u16 vid,bool adding,bool dynamic)1713 static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u16 local_port,
1714 const char *mac, u16 fid, u16 vid,
1715 bool adding, bool dynamic)
1716 {
1717 return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, vid,
1718 adding, MLXSW_REG_SFD_REC_ACTION_NOP,
1719 mlxsw_sp_sfd_rec_policy(dynamic));
1720 }
1721
mlxsw_sp_rif_fdb_op(struct mlxsw_sp * mlxsw_sp,const char * mac,u16 fid,bool adding)1722 int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
1723 bool adding)
1724 {
1725 return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, 0, adding,
1726 MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER,
1727 MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY);
1728 }
1729
mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp * mlxsw_sp,u16 lag_id,const char * mac,u16 fid,u16 lag_vid,bool adding,bool dynamic)1730 static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
1731 const char *mac, u16 fid, u16 lag_vid,
1732 bool adding, bool dynamic)
1733 {
1734 char *sfd_pl;
1735 u8 num_rec;
1736 int err;
1737
1738 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1739 if (!sfd_pl)
1740 return -ENOMEM;
1741
1742 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1743 mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
1744 mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP,
1745 lag_vid, lag_id);
1746 num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1747 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1748 if (err)
1749 goto out;
1750
1751 if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1752 err = -EBUSY;
1753
1754 out:
1755 kfree(sfd_pl);
1756 return err;
1757 }
1758
1759 static int
mlxsw_sp_port_fdb_set(struct mlxsw_sp_port * mlxsw_sp_port,struct switchdev_notifier_fdb_info * fdb_info,bool adding)1760 mlxsw_sp_port_fdb_set(struct mlxsw_sp_port *mlxsw_sp_port,
1761 struct switchdev_notifier_fdb_info *fdb_info, bool adding)
1762 {
1763 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1764 struct net_device *orig_dev = fdb_info->info.dev;
1765 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1766 struct mlxsw_sp_bridge_device *bridge_device;
1767 struct mlxsw_sp_bridge_port *bridge_port;
1768 u16 fid_index, vid;
1769
1770 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1771 if (!bridge_port)
1772 return -EINVAL;
1773
1774 bridge_device = bridge_port->bridge_device;
1775 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
1776 bridge_device,
1777 fdb_info->vid);
1778 if (!mlxsw_sp_port_vlan)
1779 return 0;
1780
1781 fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
1782 vid = mlxsw_sp_port_vlan->vid;
1783
1784 if (!bridge_port->lagged)
1785 return mlxsw_sp_port_fdb_uc_op(mlxsw_sp,
1786 bridge_port->system_port,
1787 fdb_info->addr, fid_index, vid,
1788 adding, false);
1789 else
1790 return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp,
1791 bridge_port->lag_id,
1792 fdb_info->addr, fid_index,
1793 vid, adding, false);
1794 }
1795
mlxsw_sp_mdb_entry_write(struct mlxsw_sp * mlxsw_sp,const struct mlxsw_sp_mdb_entry * mdb_entry,bool adding)1796 static int mlxsw_sp_mdb_entry_write(struct mlxsw_sp *mlxsw_sp,
1797 const struct mlxsw_sp_mdb_entry *mdb_entry,
1798 bool adding)
1799 {
1800 char *sfd_pl;
1801 u8 num_rec;
1802 int err;
1803
1804 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1805 if (!sfd_pl)
1806 return -ENOMEM;
1807
1808 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1809 mlxsw_reg_sfd_mc_pack(sfd_pl, 0, mdb_entry->key.addr,
1810 mdb_entry->key.fid, MLXSW_REG_SFD_REC_ACTION_NOP,
1811 mdb_entry->mid);
1812 num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1813 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1814 if (err)
1815 goto out;
1816
1817 if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1818 err = -EBUSY;
1819
1820 out:
1821 kfree(sfd_pl);
1822 return err;
1823 }
1824
1825 static void
mlxsw_sp_bridge_port_get_ports_bitmap(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_bridge_port * bridge_port,struct mlxsw_sp_ports_bitmap * ports_bm)1826 mlxsw_sp_bridge_port_get_ports_bitmap(struct mlxsw_sp *mlxsw_sp,
1827 struct mlxsw_sp_bridge_port *bridge_port,
1828 struct mlxsw_sp_ports_bitmap *ports_bm)
1829 {
1830 struct mlxsw_sp_port *mlxsw_sp_port;
1831 u64 max_lag_members, i;
1832 int lag_id;
1833
1834 if (!bridge_port->lagged) {
1835 set_bit(bridge_port->system_port, ports_bm->bitmap);
1836 } else {
1837 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
1838 MAX_LAG_MEMBERS);
1839 lag_id = bridge_port->lag_id;
1840 for (i = 0; i < max_lag_members; i++) {
1841 mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp,
1842 lag_id, i);
1843 if (mlxsw_sp_port)
1844 set_bit(mlxsw_sp_port->local_port,
1845 ports_bm->bitmap);
1846 }
1847 }
1848 }
1849
1850 static void
mlxsw_sp_mc_get_mrouters_bitmap(struct mlxsw_sp_ports_bitmap * flood_bm,struct mlxsw_sp_bridge_device * bridge_device,struct mlxsw_sp * mlxsw_sp)1851 mlxsw_sp_mc_get_mrouters_bitmap(struct mlxsw_sp_ports_bitmap *flood_bm,
1852 struct mlxsw_sp_bridge_device *bridge_device,
1853 struct mlxsw_sp *mlxsw_sp)
1854 {
1855 struct mlxsw_sp_bridge_port *bridge_port;
1856
1857 list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
1858 if (bridge_port->mrouter) {
1859 mlxsw_sp_bridge_port_get_ports_bitmap(mlxsw_sp,
1860 bridge_port,
1861 flood_bm);
1862 }
1863 }
1864 }
1865
mlxsw_sp_mc_mdb_mrouters_add(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ports_bitmap * ports_bm,struct mlxsw_sp_mdb_entry * mdb_entry)1866 static int mlxsw_sp_mc_mdb_mrouters_add(struct mlxsw_sp *mlxsw_sp,
1867 struct mlxsw_sp_ports_bitmap *ports_bm,
1868 struct mlxsw_sp_mdb_entry *mdb_entry)
1869 {
1870 struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
1871 unsigned int nbits = ports_bm->nbits;
1872 int i;
1873
1874 for_each_set_bit(i, ports_bm->bitmap, nbits) {
1875 mdb_entry_port = mlxsw_sp_mdb_entry_mrouter_port_get(mlxsw_sp,
1876 mdb_entry,
1877 i);
1878 if (IS_ERR(mdb_entry_port)) {
1879 nbits = i;
1880 goto err_mrouter_port_get;
1881 }
1882 }
1883
1884 return 0;
1885
1886 err_mrouter_port_get:
1887 for_each_set_bit(i, ports_bm->bitmap, nbits)
1888 mlxsw_sp_mdb_entry_mrouter_port_put(mlxsw_sp, mdb_entry, i);
1889 return PTR_ERR(mdb_entry_port);
1890 }
1891
mlxsw_sp_mc_mdb_mrouters_del(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ports_bitmap * ports_bm,struct mlxsw_sp_mdb_entry * mdb_entry)1892 static void mlxsw_sp_mc_mdb_mrouters_del(struct mlxsw_sp *mlxsw_sp,
1893 struct mlxsw_sp_ports_bitmap *ports_bm,
1894 struct mlxsw_sp_mdb_entry *mdb_entry)
1895 {
1896 int i;
1897
1898 for_each_set_bit(i, ports_bm->bitmap, ports_bm->nbits)
1899 mlxsw_sp_mdb_entry_mrouter_port_put(mlxsw_sp, mdb_entry, i);
1900 }
1901
1902 static int
mlxsw_sp_mc_mdb_mrouters_set(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_bridge_device * bridge_device,struct mlxsw_sp_mdb_entry * mdb_entry,bool add)1903 mlxsw_sp_mc_mdb_mrouters_set(struct mlxsw_sp *mlxsw_sp,
1904 struct mlxsw_sp_bridge_device *bridge_device,
1905 struct mlxsw_sp_mdb_entry *mdb_entry, bool add)
1906 {
1907 struct mlxsw_sp_ports_bitmap ports_bm;
1908 int err;
1909
1910 err = mlxsw_sp_port_bitmap_init(mlxsw_sp, &ports_bm);
1911 if (err)
1912 return err;
1913
1914 mlxsw_sp_mc_get_mrouters_bitmap(&ports_bm, bridge_device, mlxsw_sp);
1915
1916 if (add)
1917 err = mlxsw_sp_mc_mdb_mrouters_add(mlxsw_sp, &ports_bm,
1918 mdb_entry);
1919 else
1920 mlxsw_sp_mc_mdb_mrouters_del(mlxsw_sp, &ports_bm, mdb_entry);
1921
1922 mlxsw_sp_port_bitmap_fini(&ports_bm);
1923 return err;
1924 }
1925
1926 static struct mlxsw_sp_mdb_entry *
mlxsw_sp_mc_mdb_entry_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_bridge_device * bridge_device,const unsigned char * addr,u16 fid,u16 local_port)1927 mlxsw_sp_mc_mdb_entry_init(struct mlxsw_sp *mlxsw_sp,
1928 struct mlxsw_sp_bridge_device *bridge_device,
1929 const unsigned char *addr, u16 fid, u16 local_port)
1930 {
1931 struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
1932 struct mlxsw_sp_mdb_entry *mdb_entry;
1933 int err;
1934
1935 mdb_entry = kzalloc(sizeof(*mdb_entry), GFP_KERNEL);
1936 if (!mdb_entry)
1937 return ERR_PTR(-ENOMEM);
1938
1939 ether_addr_copy(mdb_entry->key.addr, addr);
1940 mdb_entry->key.fid = fid;
1941 err = mlxsw_sp_pgt_mid_alloc(mlxsw_sp, &mdb_entry->mid);
1942 if (err)
1943 goto err_pgt_mid_alloc;
1944
1945 INIT_LIST_HEAD(&mdb_entry->ports_list);
1946
1947 err = mlxsw_sp_mc_mdb_mrouters_set(mlxsw_sp, bridge_device, mdb_entry,
1948 true);
1949 if (err)
1950 goto err_mdb_mrouters_set;
1951
1952 mdb_entry_port = mlxsw_sp_mdb_entry_port_get(mlxsw_sp, mdb_entry,
1953 local_port);
1954 if (IS_ERR(mdb_entry_port)) {
1955 err = PTR_ERR(mdb_entry_port);
1956 goto err_mdb_entry_port_get;
1957 }
1958
1959 if (bridge_device->multicast_enabled) {
1960 err = mlxsw_sp_mdb_entry_write(mlxsw_sp, mdb_entry, true);
1961 if (err)
1962 goto err_mdb_entry_write;
1963 }
1964
1965 err = rhashtable_insert_fast(&bridge_device->mdb_ht,
1966 &mdb_entry->ht_node,
1967 mlxsw_sp_mdb_ht_params);
1968 if (err)
1969 goto err_rhashtable_insert;
1970
1971 list_add_tail(&mdb_entry->list, &bridge_device->mdb_list);
1972
1973 return mdb_entry;
1974
1975 err_rhashtable_insert:
1976 if (bridge_device->multicast_enabled)
1977 mlxsw_sp_mdb_entry_write(mlxsw_sp, mdb_entry, false);
1978 err_mdb_entry_write:
1979 mlxsw_sp_mdb_entry_port_put(mlxsw_sp, mdb_entry, local_port, false);
1980 err_mdb_entry_port_get:
1981 mlxsw_sp_mc_mdb_mrouters_set(mlxsw_sp, bridge_device, mdb_entry, false);
1982 err_mdb_mrouters_set:
1983 mlxsw_sp_pgt_mid_free(mlxsw_sp, mdb_entry->mid);
1984 err_pgt_mid_alloc:
1985 kfree(mdb_entry);
1986 return ERR_PTR(err);
1987 }
1988
1989 static void
mlxsw_sp_mc_mdb_entry_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_mdb_entry * mdb_entry,struct mlxsw_sp_bridge_device * bridge_device,u16 local_port,bool force)1990 mlxsw_sp_mc_mdb_entry_fini(struct mlxsw_sp *mlxsw_sp,
1991 struct mlxsw_sp_mdb_entry *mdb_entry,
1992 struct mlxsw_sp_bridge_device *bridge_device,
1993 u16 local_port, bool force)
1994 {
1995 list_del(&mdb_entry->list);
1996 rhashtable_remove_fast(&bridge_device->mdb_ht, &mdb_entry->ht_node,
1997 mlxsw_sp_mdb_ht_params);
1998 if (bridge_device->multicast_enabled)
1999 mlxsw_sp_mdb_entry_write(mlxsw_sp, mdb_entry, false);
2000 mlxsw_sp_mdb_entry_port_put(mlxsw_sp, mdb_entry, local_port, force);
2001 mlxsw_sp_mc_mdb_mrouters_set(mlxsw_sp, bridge_device, mdb_entry, false);
2002 WARN_ON(!list_empty(&mdb_entry->ports_list));
2003 mlxsw_sp_pgt_mid_free(mlxsw_sp, mdb_entry->mid);
2004 kfree(mdb_entry);
2005 }
2006
2007 static struct mlxsw_sp_mdb_entry *
mlxsw_sp_mc_mdb_entry_get(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_bridge_device * bridge_device,const unsigned char * addr,u16 fid,u16 local_port)2008 mlxsw_sp_mc_mdb_entry_get(struct mlxsw_sp *mlxsw_sp,
2009 struct mlxsw_sp_bridge_device *bridge_device,
2010 const unsigned char *addr, u16 fid, u16 local_port)
2011 {
2012 struct mlxsw_sp_mdb_entry_key key = {};
2013 struct mlxsw_sp_mdb_entry *mdb_entry;
2014
2015 ether_addr_copy(key.addr, addr);
2016 key.fid = fid;
2017 mdb_entry = rhashtable_lookup_fast(&bridge_device->mdb_ht, &key,
2018 mlxsw_sp_mdb_ht_params);
2019 if (mdb_entry) {
2020 struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
2021
2022 mdb_entry_port = mlxsw_sp_mdb_entry_port_get(mlxsw_sp,
2023 mdb_entry,
2024 local_port);
2025 if (IS_ERR(mdb_entry_port))
2026 return ERR_CAST(mdb_entry_port);
2027
2028 return mdb_entry;
2029 }
2030
2031 return mlxsw_sp_mc_mdb_entry_init(mlxsw_sp, bridge_device, addr, fid,
2032 local_port);
2033 }
2034
2035 static bool
mlxsw_sp_mc_mdb_entry_remove(struct mlxsw_sp_mdb_entry * mdb_entry,struct mlxsw_sp_mdb_entry_port * removed_entry_port,bool force)2036 mlxsw_sp_mc_mdb_entry_remove(struct mlxsw_sp_mdb_entry *mdb_entry,
2037 struct mlxsw_sp_mdb_entry_port *removed_entry_port,
2038 bool force)
2039 {
2040 if (mdb_entry->ports_count > 1)
2041 return false;
2042
2043 if (force)
2044 return true;
2045
2046 if (!removed_entry_port->mrouter &&
2047 refcount_read(&removed_entry_port->refcount) > 1)
2048 return false;
2049
2050 if (removed_entry_port->mrouter &&
2051 refcount_read(&removed_entry_port->refcount) > 2)
2052 return false;
2053
2054 return true;
2055 }
2056
2057 static void
mlxsw_sp_mc_mdb_entry_put(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_bridge_device * bridge_device,struct mlxsw_sp_mdb_entry * mdb_entry,u16 local_port,bool force)2058 mlxsw_sp_mc_mdb_entry_put(struct mlxsw_sp *mlxsw_sp,
2059 struct mlxsw_sp_bridge_device *bridge_device,
2060 struct mlxsw_sp_mdb_entry *mdb_entry, u16 local_port,
2061 bool force)
2062 {
2063 struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
2064
2065 mdb_entry_port = mlxsw_sp_mdb_entry_port_lookup(mdb_entry, local_port);
2066 if (!mdb_entry_port)
2067 return;
2068
2069 /* Avoid a temporary situation in which the MDB entry points to an empty
2070 * PGT entry, as otherwise packets will be temporarily dropped instead
2071 * of being flooded. Instead, in this situation, call
2072 * mlxsw_sp_mc_mdb_entry_fini(), which first deletes the MDB entry and
2073 * then releases the PGT entry.
2074 */
2075 if (mlxsw_sp_mc_mdb_entry_remove(mdb_entry, mdb_entry_port, force))
2076 mlxsw_sp_mc_mdb_entry_fini(mlxsw_sp, mdb_entry, bridge_device,
2077 local_port, force);
2078 else
2079 mlxsw_sp_mdb_entry_port_put(mlxsw_sp, mdb_entry, local_port,
2080 force);
2081 }
2082
mlxsw_sp_port_mdb_add(struct mlxsw_sp_port * mlxsw_sp_port,const struct switchdev_obj_port_mdb * mdb)2083 static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
2084 const struct switchdev_obj_port_mdb *mdb)
2085 {
2086 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2087 struct net_device *orig_dev = mdb->obj.orig_dev;
2088 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2089 struct mlxsw_sp_bridge_device *bridge_device;
2090 struct mlxsw_sp_bridge_port *bridge_port;
2091 struct mlxsw_sp_mdb_entry *mdb_entry;
2092 u16 fid_index;
2093
2094 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
2095 if (!bridge_port)
2096 return 0;
2097
2098 bridge_device = bridge_port->bridge_device;
2099 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
2100 bridge_device,
2101 mdb->vid);
2102 if (!mlxsw_sp_port_vlan)
2103 return 0;
2104
2105 fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
2106
2107 mdb_entry = mlxsw_sp_mc_mdb_entry_get(mlxsw_sp, bridge_device,
2108 mdb->addr, fid_index,
2109 mlxsw_sp_port->local_port);
2110 if (IS_ERR(mdb_entry))
2111 return PTR_ERR(mdb_entry);
2112
2113 return 0;
2114 }
2115
2116 static int
mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_bridge_device * bridge_device,bool mc_enabled)2117 mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp *mlxsw_sp,
2118 struct mlxsw_sp_bridge_device *bridge_device,
2119 bool mc_enabled)
2120 {
2121 struct mlxsw_sp_mdb_entry *mdb_entry;
2122 int err;
2123
2124 list_for_each_entry(mdb_entry, &bridge_device->mdb_list, list) {
2125 err = mlxsw_sp_mdb_entry_write(mlxsw_sp, mdb_entry, mc_enabled);
2126 if (err)
2127 goto err_mdb_entry_write;
2128 }
2129 return 0;
2130
2131 err_mdb_entry_write:
2132 list_for_each_entry_continue_reverse(mdb_entry,
2133 &bridge_device->mdb_list, list)
2134 mlxsw_sp_mdb_entry_write(mlxsw_sp, mdb_entry, !mc_enabled);
2135 return err;
2136 }
2137
2138 static void
mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_bridge_port * bridge_port,bool add)2139 mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
2140 struct mlxsw_sp_bridge_port *bridge_port,
2141 bool add)
2142 {
2143 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2144 struct mlxsw_sp_bridge_device *bridge_device;
2145 u16 local_port = mlxsw_sp_port->local_port;
2146 struct mlxsw_sp_mdb_entry *mdb_entry;
2147
2148 bridge_device = bridge_port->bridge_device;
2149
2150 list_for_each_entry(mdb_entry, &bridge_device->mdb_list, list) {
2151 if (add)
2152 mlxsw_sp_mdb_entry_mrouter_port_get(mlxsw_sp, mdb_entry,
2153 local_port);
2154 else
2155 mlxsw_sp_mdb_entry_mrouter_port_put(mlxsw_sp, mdb_entry,
2156 local_port);
2157 }
2158 }
2159
mlxsw_sp_port_obj_add(struct net_device * dev,const void * ctx,const struct switchdev_obj * obj,struct netlink_ext_ack * extack)2160 static int mlxsw_sp_port_obj_add(struct net_device *dev, const void *ctx,
2161 const struct switchdev_obj *obj,
2162 struct netlink_ext_ack *extack)
2163 {
2164 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2165 const struct switchdev_obj_port_vlan *vlan;
2166 int err = 0;
2167
2168 switch (obj->id) {
2169 case SWITCHDEV_OBJ_ID_PORT_VLAN:
2170 vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
2171
2172 err = mlxsw_sp_port_vlans_add(mlxsw_sp_port, vlan, extack);
2173
2174 /* The event is emitted before the changes are actually
2175 * applied to the bridge. Therefore schedule the respin
2176 * call for later, so that the respin logic sees the
2177 * updated bridge state.
2178 */
2179 mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
2180 break;
2181 case SWITCHDEV_OBJ_ID_PORT_MDB:
2182 err = mlxsw_sp_port_mdb_add(mlxsw_sp_port,
2183 SWITCHDEV_OBJ_PORT_MDB(obj));
2184 break;
2185 default:
2186 err = -EOPNOTSUPP;
2187 break;
2188 }
2189
2190 return err;
2191 }
2192
2193 static void
mlxsw_sp_bridge_port_vlan_del(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_bridge_port * bridge_port,u16 vid)2194 mlxsw_sp_bridge_port_vlan_del(struct mlxsw_sp_port *mlxsw_sp_port,
2195 struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
2196 {
2197 u16 pvid = mlxsw_sp_port->pvid == vid ? 0 : mlxsw_sp_port->pvid;
2198 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2199 u16 proto;
2200
2201 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
2202 if (WARN_ON(!mlxsw_sp_port_vlan))
2203 return;
2204
2205 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
2206 br_vlan_get_proto(bridge_port->bridge_device->dev, &proto);
2207 mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid, proto);
2208 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
2209 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
2210 }
2211
mlxsw_sp_port_vlans_del(struct mlxsw_sp_port * mlxsw_sp_port,const struct switchdev_obj_port_vlan * vlan)2212 static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
2213 const struct switchdev_obj_port_vlan *vlan)
2214 {
2215 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2216 struct net_device *orig_dev = vlan->obj.orig_dev;
2217 struct mlxsw_sp_bridge_port *bridge_port;
2218
2219 if (netif_is_bridge_master(orig_dev))
2220 return -EOPNOTSUPP;
2221
2222 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
2223 if (WARN_ON(!bridge_port))
2224 return -EINVAL;
2225
2226 if (!bridge_port->bridge_device->vlan_enabled)
2227 return 0;
2228
2229 mlxsw_sp_bridge_port_vlan_del(mlxsw_sp_port, bridge_port, vlan->vid);
2230
2231 return 0;
2232 }
2233
mlxsw_sp_port_mdb_del(struct mlxsw_sp_port * mlxsw_sp_port,const struct switchdev_obj_port_mdb * mdb)2234 static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
2235 const struct switchdev_obj_port_mdb *mdb)
2236 {
2237 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2238 struct net_device *orig_dev = mdb->obj.orig_dev;
2239 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2240 struct mlxsw_sp_bridge_device *bridge_device;
2241 struct net_device *dev = mlxsw_sp_port->dev;
2242 struct mlxsw_sp_bridge_port *bridge_port;
2243 struct mlxsw_sp_mdb_entry_key key = {};
2244 struct mlxsw_sp_mdb_entry *mdb_entry;
2245 u16 fid_index;
2246
2247 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
2248 if (!bridge_port)
2249 return 0;
2250
2251 bridge_device = bridge_port->bridge_device;
2252 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
2253 bridge_device,
2254 mdb->vid);
2255 if (!mlxsw_sp_port_vlan)
2256 return 0;
2257
2258 fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
2259
2260 ether_addr_copy(key.addr, mdb->addr);
2261 key.fid = fid_index;
2262 mdb_entry = rhashtable_lookup_fast(&bridge_device->mdb_ht, &key,
2263 mlxsw_sp_mdb_ht_params);
2264 if (!mdb_entry) {
2265 netdev_err(dev, "Unable to remove port from MC DB\n");
2266 return -EINVAL;
2267 }
2268
2269 mlxsw_sp_mc_mdb_entry_put(mlxsw_sp, bridge_device, mdb_entry,
2270 mlxsw_sp_port->local_port, false);
2271 return 0;
2272 }
2273
2274 static void
mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_bridge_port * bridge_port,u16 fid_index)2275 mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port,
2276 struct mlxsw_sp_bridge_port *bridge_port,
2277 u16 fid_index)
2278 {
2279 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2280 struct mlxsw_sp_bridge_device *bridge_device;
2281 struct mlxsw_sp_mdb_entry *mdb_entry, *tmp;
2282 u16 local_port = mlxsw_sp_port->local_port;
2283
2284 bridge_device = bridge_port->bridge_device;
2285
2286 list_for_each_entry_safe(mdb_entry, tmp, &bridge_device->mdb_list,
2287 list) {
2288 if (mdb_entry->key.fid != fid_index)
2289 continue;
2290
2291 if (bridge_port->mrouter)
2292 mlxsw_sp_mdb_entry_mrouter_port_put(mlxsw_sp,
2293 mdb_entry,
2294 local_port);
2295
2296 mlxsw_sp_mc_mdb_entry_put(mlxsw_sp, bridge_device, mdb_entry,
2297 local_port, true);
2298 }
2299 }
2300
mlxsw_sp_port_obj_del(struct net_device * dev,const void * ctx,const struct switchdev_obj * obj)2301 static int mlxsw_sp_port_obj_del(struct net_device *dev, const void *ctx,
2302 const struct switchdev_obj *obj)
2303 {
2304 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2305 int err = 0;
2306
2307 switch (obj->id) {
2308 case SWITCHDEV_OBJ_ID_PORT_VLAN:
2309 err = mlxsw_sp_port_vlans_del(mlxsw_sp_port,
2310 SWITCHDEV_OBJ_PORT_VLAN(obj));
2311 break;
2312 case SWITCHDEV_OBJ_ID_PORT_MDB:
2313 err = mlxsw_sp_port_mdb_del(mlxsw_sp_port,
2314 SWITCHDEV_OBJ_PORT_MDB(obj));
2315 break;
2316 default:
2317 err = -EOPNOTSUPP;
2318 break;
2319 }
2320
2321 mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
2322
2323 return err;
2324 }
2325
mlxsw_sp_lag_rep_port(struct mlxsw_sp * mlxsw_sp,u16 lag_id)2326 static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp,
2327 u16 lag_id)
2328 {
2329 struct mlxsw_sp_port *mlxsw_sp_port;
2330 u64 max_lag_members;
2331 int i;
2332
2333 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
2334 MAX_LAG_MEMBERS);
2335 for (i = 0; i < max_lag_members; i++) {
2336 mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
2337 if (mlxsw_sp_port)
2338 return mlxsw_sp_port;
2339 }
2340 return NULL;
2341 }
2342
2343 static int
mlxsw_sp_bridge_vlan_aware_port_join(struct mlxsw_sp_bridge_port * bridge_port,struct mlxsw_sp_port * mlxsw_sp_port,struct netlink_ext_ack * extack)2344 mlxsw_sp_bridge_vlan_aware_port_join(struct mlxsw_sp_bridge_port *bridge_port,
2345 struct mlxsw_sp_port *mlxsw_sp_port,
2346 struct netlink_ext_ack *extack)
2347 {
2348 if (is_vlan_dev(bridge_port->dev)) {
2349 NL_SET_ERR_MSG_MOD(extack, "Can not enslave a VLAN device to a VLAN-aware bridge");
2350 return -EINVAL;
2351 }
2352
2353 /* Port is no longer usable as a router interface */
2354 if (mlxsw_sp_port->default_vlan->fid)
2355 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan);
2356
2357 return 0;
2358 }
2359
2360 static int
mlxsw_sp_bridge_8021q_port_join(struct mlxsw_sp_bridge_device * bridge_device,struct mlxsw_sp_bridge_port * bridge_port,struct mlxsw_sp_port * mlxsw_sp_port,struct netlink_ext_ack * extack)2361 mlxsw_sp_bridge_8021q_port_join(struct mlxsw_sp_bridge_device *bridge_device,
2362 struct mlxsw_sp_bridge_port *bridge_port,
2363 struct mlxsw_sp_port *mlxsw_sp_port,
2364 struct netlink_ext_ack *extack)
2365 {
2366 return mlxsw_sp_bridge_vlan_aware_port_join(bridge_port, mlxsw_sp_port,
2367 extack);
2368 }
2369
2370 static void
mlxsw_sp_bridge_vlan_aware_port_leave(struct mlxsw_sp_port * mlxsw_sp_port)2371 mlxsw_sp_bridge_vlan_aware_port_leave(struct mlxsw_sp_port *mlxsw_sp_port)
2372 {
2373 /* Make sure untagged frames are allowed to ingress */
2374 mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID,
2375 ETH_P_8021Q);
2376 }
2377
2378 static void
mlxsw_sp_bridge_8021q_port_leave(struct mlxsw_sp_bridge_device * bridge_device,struct mlxsw_sp_bridge_port * bridge_port,struct mlxsw_sp_port * mlxsw_sp_port)2379 mlxsw_sp_bridge_8021q_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
2380 struct mlxsw_sp_bridge_port *bridge_port,
2381 struct mlxsw_sp_port *mlxsw_sp_port)
2382 {
2383 mlxsw_sp_bridge_vlan_aware_port_leave(mlxsw_sp_port);
2384 }
2385
2386 static int
mlxsw_sp_bridge_vlan_aware_vxlan_join(struct mlxsw_sp_bridge_device * bridge_device,const struct net_device * vxlan_dev,u16 vid,u16 ethertype,struct netlink_ext_ack * extack)2387 mlxsw_sp_bridge_vlan_aware_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
2388 const struct net_device *vxlan_dev,
2389 u16 vid, u16 ethertype,
2390 struct netlink_ext_ack *extack)
2391 {
2392 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2393 struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
2394 struct mlxsw_sp_nve_params params = {
2395 .type = MLXSW_SP_NVE_TYPE_VXLAN,
2396 .vni = vxlan->cfg.vni,
2397 .dev = vxlan_dev,
2398 .ethertype = ethertype,
2399 };
2400 struct mlxsw_sp_fid *fid;
2401 int err;
2402
2403 /* If the VLAN is 0, we need to find the VLAN that is configured as
2404 * PVID and egress untagged on the bridge port of the VxLAN device.
2405 * It is possible no such VLAN exists
2406 */
2407 if (!vid) {
2408 err = mlxsw_sp_vxlan_mapped_vid(vxlan_dev, &vid);
2409 if (err || !vid)
2410 return err;
2411 }
2412
2413 fid = mlxsw_sp_fid_8021q_get(mlxsw_sp, vid);
2414 if (IS_ERR(fid)) {
2415 NL_SET_ERR_MSG_MOD(extack, "Failed to create 802.1Q FID");
2416 return PTR_ERR(fid);
2417 }
2418
2419 if (mlxsw_sp_fid_vni_is_set(fid)) {
2420 NL_SET_ERR_MSG_MOD(extack, "VNI is already set on FID");
2421 err = -EINVAL;
2422 goto err_vni_exists;
2423 }
2424
2425 err = mlxsw_sp_nve_fid_enable(mlxsw_sp, fid, ¶ms, extack);
2426 if (err)
2427 goto err_nve_fid_enable;
2428
2429 return 0;
2430
2431 err_nve_fid_enable:
2432 err_vni_exists:
2433 mlxsw_sp_fid_put(fid);
2434 return err;
2435 }
2436
2437 static int
mlxsw_sp_bridge_8021q_vxlan_join(struct mlxsw_sp_bridge_device * bridge_device,const struct net_device * vxlan_dev,u16 vid,struct netlink_ext_ack * extack)2438 mlxsw_sp_bridge_8021q_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
2439 const struct net_device *vxlan_dev, u16 vid,
2440 struct netlink_ext_ack *extack)
2441 {
2442 return mlxsw_sp_bridge_vlan_aware_vxlan_join(bridge_device, vxlan_dev,
2443 vid, ETH_P_8021Q, extack);
2444 }
2445
2446 static struct net_device *
mlxsw_sp_bridge_8021q_vxlan_dev_find(struct net_device * br_dev,u16 vid)2447 mlxsw_sp_bridge_8021q_vxlan_dev_find(struct net_device *br_dev, u16 vid)
2448 {
2449 struct net_device *dev;
2450 struct list_head *iter;
2451
2452 netdev_for_each_lower_dev(br_dev, dev, iter) {
2453 u16 pvid;
2454 int err;
2455
2456 if (!netif_is_vxlan(dev))
2457 continue;
2458
2459 err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid);
2460 if (err || pvid != vid)
2461 continue;
2462
2463 return dev;
2464 }
2465
2466 return NULL;
2467 }
2468
2469 static struct mlxsw_sp_fid *
mlxsw_sp_bridge_8021q_fid_get(struct mlxsw_sp_bridge_device * bridge_device,u16 vid,struct netlink_ext_ack * extack)2470 mlxsw_sp_bridge_8021q_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
2471 u16 vid, struct netlink_ext_ack *extack)
2472 {
2473 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2474
2475 return mlxsw_sp_fid_8021q_get(mlxsw_sp, vid);
2476 }
2477
2478 static struct mlxsw_sp_fid *
mlxsw_sp_bridge_8021q_fid_lookup(struct mlxsw_sp_bridge_device * bridge_device,u16 vid)2479 mlxsw_sp_bridge_8021q_fid_lookup(struct mlxsw_sp_bridge_device *bridge_device,
2480 u16 vid)
2481 {
2482 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2483
2484 return mlxsw_sp_fid_8021q_lookup(mlxsw_sp, vid);
2485 }
2486
2487 static u16
mlxsw_sp_bridge_8021q_fid_vid(struct mlxsw_sp_bridge_device * bridge_device,const struct mlxsw_sp_fid * fid)2488 mlxsw_sp_bridge_8021q_fid_vid(struct mlxsw_sp_bridge_device *bridge_device,
2489 const struct mlxsw_sp_fid *fid)
2490 {
2491 return mlxsw_sp_fid_8021q_vid(fid);
2492 }
2493
2494 static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021q_ops = {
2495 .port_join = mlxsw_sp_bridge_8021q_port_join,
2496 .port_leave = mlxsw_sp_bridge_8021q_port_leave,
2497 .vxlan_join = mlxsw_sp_bridge_8021q_vxlan_join,
2498 .fid_get = mlxsw_sp_bridge_8021q_fid_get,
2499 .fid_lookup = mlxsw_sp_bridge_8021q_fid_lookup,
2500 .fid_vid = mlxsw_sp_bridge_8021q_fid_vid,
2501 };
2502
2503 static bool
mlxsw_sp_port_is_br_member(const struct mlxsw_sp_port * mlxsw_sp_port,const struct net_device * br_dev)2504 mlxsw_sp_port_is_br_member(const struct mlxsw_sp_port *mlxsw_sp_port,
2505 const struct net_device *br_dev)
2506 {
2507 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2508
2509 list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
2510 list) {
2511 if (mlxsw_sp_port_vlan->bridge_port &&
2512 mlxsw_sp_port_vlan->bridge_port->bridge_device->dev ==
2513 br_dev)
2514 return true;
2515 }
2516
2517 return false;
2518 }
2519
2520 static int
mlxsw_sp_bridge_8021d_port_join(struct mlxsw_sp_bridge_device * bridge_device,struct mlxsw_sp_bridge_port * bridge_port,struct mlxsw_sp_port * mlxsw_sp_port,struct netlink_ext_ack * extack)2521 mlxsw_sp_bridge_8021d_port_join(struct mlxsw_sp_bridge_device *bridge_device,
2522 struct mlxsw_sp_bridge_port *bridge_port,
2523 struct mlxsw_sp_port *mlxsw_sp_port,
2524 struct netlink_ext_ack *extack)
2525 {
2526 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2527 struct net_device *dev = bridge_port->dev;
2528 u16 vid;
2529
2530 vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : MLXSW_SP_DEFAULT_VID;
2531 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
2532 if (WARN_ON(!mlxsw_sp_port_vlan))
2533 return -EINVAL;
2534
2535 if (mlxsw_sp_port_is_br_member(mlxsw_sp_port, bridge_device->dev)) {
2536 NL_SET_ERR_MSG_MOD(extack, "Can not bridge VLAN uppers of the same port");
2537 return -EINVAL;
2538 }
2539
2540 /* Port is no longer usable as a router interface */
2541 if (mlxsw_sp_port_vlan->fid)
2542 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
2543
2544 return mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port,
2545 extack);
2546 }
2547
2548 static void
mlxsw_sp_bridge_8021d_port_leave(struct mlxsw_sp_bridge_device * bridge_device,struct mlxsw_sp_bridge_port * bridge_port,struct mlxsw_sp_port * mlxsw_sp_port)2549 mlxsw_sp_bridge_8021d_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
2550 struct mlxsw_sp_bridge_port *bridge_port,
2551 struct mlxsw_sp_port *mlxsw_sp_port)
2552 {
2553 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2554 struct net_device *dev = bridge_port->dev;
2555 u16 vid;
2556
2557 vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : MLXSW_SP_DEFAULT_VID;
2558 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
2559 if (!mlxsw_sp_port_vlan || !mlxsw_sp_port_vlan->bridge_port)
2560 return;
2561
2562 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
2563 }
2564
2565 static int
mlxsw_sp_bridge_8021d_vxlan_join(struct mlxsw_sp_bridge_device * bridge_device,const struct net_device * vxlan_dev,u16 vid,struct netlink_ext_ack * extack)2566 mlxsw_sp_bridge_8021d_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
2567 const struct net_device *vxlan_dev, u16 vid,
2568 struct netlink_ext_ack *extack)
2569 {
2570 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2571 struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
2572 struct mlxsw_sp_nve_params params = {
2573 .type = MLXSW_SP_NVE_TYPE_VXLAN,
2574 .vni = vxlan->cfg.vni,
2575 .dev = vxlan_dev,
2576 .ethertype = ETH_P_8021Q,
2577 };
2578 struct mlxsw_sp_fid *fid;
2579 int err;
2580
2581 fid = mlxsw_sp_fid_8021d_get(mlxsw_sp, bridge_device->dev->ifindex);
2582 if (IS_ERR(fid)) {
2583 NL_SET_ERR_MSG_MOD(extack, "Failed to create 802.1D FID");
2584 return -EINVAL;
2585 }
2586
2587 if (mlxsw_sp_fid_vni_is_set(fid)) {
2588 NL_SET_ERR_MSG_MOD(extack, "VNI is already set on FID");
2589 err = -EINVAL;
2590 goto err_vni_exists;
2591 }
2592
2593 err = mlxsw_sp_nve_fid_enable(mlxsw_sp, fid, ¶ms, extack);
2594 if (err)
2595 goto err_nve_fid_enable;
2596
2597 return 0;
2598
2599 err_nve_fid_enable:
2600 err_vni_exists:
2601 mlxsw_sp_fid_put(fid);
2602 return err;
2603 }
2604
2605 static struct mlxsw_sp_fid *
mlxsw_sp_bridge_8021d_fid_get(struct mlxsw_sp_bridge_device * bridge_device,u16 vid,struct netlink_ext_ack * extack)2606 mlxsw_sp_bridge_8021d_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
2607 u16 vid, struct netlink_ext_ack *extack)
2608 {
2609 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2610
2611 return mlxsw_sp_fid_8021d_get(mlxsw_sp, bridge_device->dev->ifindex);
2612 }
2613
2614 static struct mlxsw_sp_fid *
mlxsw_sp_bridge_8021d_fid_lookup(struct mlxsw_sp_bridge_device * bridge_device,u16 vid)2615 mlxsw_sp_bridge_8021d_fid_lookup(struct mlxsw_sp_bridge_device *bridge_device,
2616 u16 vid)
2617 {
2618 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2619
2620 /* The only valid VLAN for a VLAN-unaware bridge is 0 */
2621 if (vid)
2622 return NULL;
2623
2624 return mlxsw_sp_fid_8021d_lookup(mlxsw_sp, bridge_device->dev->ifindex);
2625 }
2626
2627 static u16
mlxsw_sp_bridge_8021d_fid_vid(struct mlxsw_sp_bridge_device * bridge_device,const struct mlxsw_sp_fid * fid)2628 mlxsw_sp_bridge_8021d_fid_vid(struct mlxsw_sp_bridge_device *bridge_device,
2629 const struct mlxsw_sp_fid *fid)
2630 {
2631 return 0;
2632 }
2633
2634 static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021d_ops = {
2635 .port_join = mlxsw_sp_bridge_8021d_port_join,
2636 .port_leave = mlxsw_sp_bridge_8021d_port_leave,
2637 .vxlan_join = mlxsw_sp_bridge_8021d_vxlan_join,
2638 .fid_get = mlxsw_sp_bridge_8021d_fid_get,
2639 .fid_lookup = mlxsw_sp_bridge_8021d_fid_lookup,
2640 .fid_vid = mlxsw_sp_bridge_8021d_fid_vid,
2641 };
2642
2643 static int
mlxsw_sp_bridge_8021ad_port_join(struct mlxsw_sp_bridge_device * bridge_device,struct mlxsw_sp_bridge_port * bridge_port,struct mlxsw_sp_port * mlxsw_sp_port,struct netlink_ext_ack * extack)2644 mlxsw_sp_bridge_8021ad_port_join(struct mlxsw_sp_bridge_device *bridge_device,
2645 struct mlxsw_sp_bridge_port *bridge_port,
2646 struct mlxsw_sp_port *mlxsw_sp_port,
2647 struct netlink_ext_ack *extack)
2648 {
2649 int err;
2650
2651 err = mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, false);
2652 if (err)
2653 return err;
2654
2655 err = mlxsw_sp_bridge_vlan_aware_port_join(bridge_port, mlxsw_sp_port,
2656 extack);
2657 if (err)
2658 goto err_bridge_vlan_aware_port_join;
2659
2660 return 0;
2661
2662 err_bridge_vlan_aware_port_join:
2663 mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, false, true);
2664 return err;
2665 }
2666
2667 static void
mlxsw_sp_bridge_8021ad_port_leave(struct mlxsw_sp_bridge_device * bridge_device,struct mlxsw_sp_bridge_port * bridge_port,struct mlxsw_sp_port * mlxsw_sp_port)2668 mlxsw_sp_bridge_8021ad_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
2669 struct mlxsw_sp_bridge_port *bridge_port,
2670 struct mlxsw_sp_port *mlxsw_sp_port)
2671 {
2672 mlxsw_sp_bridge_vlan_aware_port_leave(mlxsw_sp_port);
2673 mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, false, true);
2674 }
2675
2676 static int
mlxsw_sp_bridge_8021ad_vxlan_join(struct mlxsw_sp_bridge_device * bridge_device,const struct net_device * vxlan_dev,u16 vid,struct netlink_ext_ack * extack)2677 mlxsw_sp_bridge_8021ad_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
2678 const struct net_device *vxlan_dev, u16 vid,
2679 struct netlink_ext_ack *extack)
2680 {
2681 return mlxsw_sp_bridge_vlan_aware_vxlan_join(bridge_device, vxlan_dev,
2682 vid, ETH_P_8021AD, extack);
2683 }
2684
2685 static const struct mlxsw_sp_bridge_ops mlxsw_sp1_bridge_8021ad_ops = {
2686 .port_join = mlxsw_sp_bridge_8021ad_port_join,
2687 .port_leave = mlxsw_sp_bridge_8021ad_port_leave,
2688 .vxlan_join = mlxsw_sp_bridge_8021ad_vxlan_join,
2689 .fid_get = mlxsw_sp_bridge_8021q_fid_get,
2690 .fid_lookup = mlxsw_sp_bridge_8021q_fid_lookup,
2691 .fid_vid = mlxsw_sp_bridge_8021q_fid_vid,
2692 };
2693
2694 static int
mlxsw_sp2_bridge_8021ad_port_join(struct mlxsw_sp_bridge_device * bridge_device,struct mlxsw_sp_bridge_port * bridge_port,struct mlxsw_sp_port * mlxsw_sp_port,struct netlink_ext_ack * extack)2695 mlxsw_sp2_bridge_8021ad_port_join(struct mlxsw_sp_bridge_device *bridge_device,
2696 struct mlxsw_sp_bridge_port *bridge_port,
2697 struct mlxsw_sp_port *mlxsw_sp_port,
2698 struct netlink_ext_ack *extack)
2699 {
2700 int err;
2701
2702 /* The EtherType of decapsulated packets is determined at the egress
2703 * port to allow 802.1d and 802.1ad bridges with VXLAN devices to
2704 * co-exist.
2705 */
2706 err = mlxsw_sp_port_egress_ethtype_set(mlxsw_sp_port, ETH_P_8021AD);
2707 if (err)
2708 return err;
2709
2710 err = mlxsw_sp_bridge_8021ad_port_join(bridge_device, bridge_port,
2711 mlxsw_sp_port, extack);
2712 if (err)
2713 goto err_bridge_8021ad_port_join;
2714
2715 return 0;
2716
2717 err_bridge_8021ad_port_join:
2718 mlxsw_sp_port_egress_ethtype_set(mlxsw_sp_port, ETH_P_8021Q);
2719 return err;
2720 }
2721
2722 static void
mlxsw_sp2_bridge_8021ad_port_leave(struct mlxsw_sp_bridge_device * bridge_device,struct mlxsw_sp_bridge_port * bridge_port,struct mlxsw_sp_port * mlxsw_sp_port)2723 mlxsw_sp2_bridge_8021ad_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
2724 struct mlxsw_sp_bridge_port *bridge_port,
2725 struct mlxsw_sp_port *mlxsw_sp_port)
2726 {
2727 mlxsw_sp_bridge_8021ad_port_leave(bridge_device, bridge_port,
2728 mlxsw_sp_port);
2729 mlxsw_sp_port_egress_ethtype_set(mlxsw_sp_port, ETH_P_8021Q);
2730 }
2731
2732 static const struct mlxsw_sp_bridge_ops mlxsw_sp2_bridge_8021ad_ops = {
2733 .port_join = mlxsw_sp2_bridge_8021ad_port_join,
2734 .port_leave = mlxsw_sp2_bridge_8021ad_port_leave,
2735 .vxlan_join = mlxsw_sp_bridge_8021ad_vxlan_join,
2736 .fid_get = mlxsw_sp_bridge_8021q_fid_get,
2737 .fid_lookup = mlxsw_sp_bridge_8021q_fid_lookup,
2738 .fid_vid = mlxsw_sp_bridge_8021q_fid_vid,
2739 };
2740
mlxsw_sp_port_bridge_join(struct mlxsw_sp_port * mlxsw_sp_port,struct net_device * brport_dev,struct net_device * br_dev,struct netlink_ext_ack * extack)2741 int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
2742 struct net_device *brport_dev,
2743 struct net_device *br_dev,
2744 struct netlink_ext_ack *extack)
2745 {
2746 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2747 struct mlxsw_sp_bridge_device *bridge_device;
2748 struct mlxsw_sp_bridge_port *bridge_port;
2749 int err;
2750
2751 bridge_port = mlxsw_sp_bridge_port_get(mlxsw_sp->bridge, brport_dev,
2752 extack);
2753 if (IS_ERR(bridge_port))
2754 return PTR_ERR(bridge_port);
2755 bridge_device = bridge_port->bridge_device;
2756
2757 err = bridge_device->ops->port_join(bridge_device, bridge_port,
2758 mlxsw_sp_port, extack);
2759 if (err)
2760 goto err_port_join;
2761
2762 return 0;
2763
2764 err_port_join:
2765 mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port);
2766 return err;
2767 }
2768
mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port * mlxsw_sp_port,struct net_device * brport_dev,struct net_device * br_dev)2769 void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
2770 struct net_device *brport_dev,
2771 struct net_device *br_dev)
2772 {
2773 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2774 struct mlxsw_sp_bridge_device *bridge_device;
2775 struct mlxsw_sp_bridge_port *bridge_port;
2776
2777 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2778 if (!bridge_device)
2779 return;
2780 bridge_port = __mlxsw_sp_bridge_port_find(bridge_device, brport_dev);
2781 if (!bridge_port)
2782 return;
2783
2784 bridge_device->ops->port_leave(bridge_device, bridge_port,
2785 mlxsw_sp_port);
2786 mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port);
2787 }
2788
mlxsw_sp_bridge_vxlan_join(struct mlxsw_sp * mlxsw_sp,const struct net_device * br_dev,const struct net_device * vxlan_dev,u16 vid,struct netlink_ext_ack * extack)2789 int mlxsw_sp_bridge_vxlan_join(struct mlxsw_sp *mlxsw_sp,
2790 const struct net_device *br_dev,
2791 const struct net_device *vxlan_dev, u16 vid,
2792 struct netlink_ext_ack *extack)
2793 {
2794 struct mlxsw_sp_bridge_device *bridge_device;
2795
2796 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2797 if (WARN_ON(!bridge_device))
2798 return -EINVAL;
2799
2800 return bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, vid,
2801 extack);
2802 }
2803
mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp * mlxsw_sp,const struct net_device * vxlan_dev)2804 void mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp,
2805 const struct net_device *vxlan_dev)
2806 {
2807 struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
2808 struct mlxsw_sp_fid *fid;
2809
2810 /* If the VxLAN device is down, then the FID does not have a VNI */
2811 fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vxlan->cfg.vni);
2812 if (!fid)
2813 return;
2814
2815 mlxsw_sp_nve_fid_disable(mlxsw_sp, fid);
2816 /* Drop both the reference we just took during lookup and the reference
2817 * the VXLAN device took.
2818 */
2819 mlxsw_sp_fid_put(fid);
2820 mlxsw_sp_fid_put(fid);
2821 }
2822
2823 static void
mlxsw_sp_switchdev_vxlan_addr_convert(const union vxlan_addr * vxlan_addr,enum mlxsw_sp_l3proto * proto,union mlxsw_sp_l3addr * addr)2824 mlxsw_sp_switchdev_vxlan_addr_convert(const union vxlan_addr *vxlan_addr,
2825 enum mlxsw_sp_l3proto *proto,
2826 union mlxsw_sp_l3addr *addr)
2827 {
2828 if (vxlan_addr->sa.sa_family == AF_INET) {
2829 addr->addr4 = vxlan_addr->sin.sin_addr.s_addr;
2830 *proto = MLXSW_SP_L3_PROTO_IPV4;
2831 } else {
2832 addr->addr6 = vxlan_addr->sin6.sin6_addr;
2833 *proto = MLXSW_SP_L3_PROTO_IPV6;
2834 }
2835 }
2836
2837 static void
mlxsw_sp_switchdev_addr_vxlan_convert(enum mlxsw_sp_l3proto proto,const union mlxsw_sp_l3addr * addr,union vxlan_addr * vxlan_addr)2838 mlxsw_sp_switchdev_addr_vxlan_convert(enum mlxsw_sp_l3proto proto,
2839 const union mlxsw_sp_l3addr *addr,
2840 union vxlan_addr *vxlan_addr)
2841 {
2842 switch (proto) {
2843 case MLXSW_SP_L3_PROTO_IPV4:
2844 vxlan_addr->sa.sa_family = AF_INET;
2845 vxlan_addr->sin.sin_addr.s_addr = addr->addr4;
2846 break;
2847 case MLXSW_SP_L3_PROTO_IPV6:
2848 vxlan_addr->sa.sa_family = AF_INET6;
2849 vxlan_addr->sin6.sin6_addr = addr->addr6;
2850 break;
2851 }
2852 }
2853
mlxsw_sp_fdb_vxlan_call_notifiers(struct net_device * dev,const char * mac,enum mlxsw_sp_l3proto proto,union mlxsw_sp_l3addr * addr,__be32 vni,bool adding)2854 static void mlxsw_sp_fdb_vxlan_call_notifiers(struct net_device *dev,
2855 const char *mac,
2856 enum mlxsw_sp_l3proto proto,
2857 union mlxsw_sp_l3addr *addr,
2858 __be32 vni, bool adding)
2859 {
2860 struct switchdev_notifier_vxlan_fdb_info info;
2861 struct vxlan_dev *vxlan = netdev_priv(dev);
2862 enum switchdev_notifier_type type;
2863
2864 type = adding ? SWITCHDEV_VXLAN_FDB_ADD_TO_BRIDGE :
2865 SWITCHDEV_VXLAN_FDB_DEL_TO_BRIDGE;
2866 mlxsw_sp_switchdev_addr_vxlan_convert(proto, addr, &info.remote_ip);
2867 info.remote_port = vxlan->cfg.dst_port;
2868 info.remote_vni = vni;
2869 info.remote_ifindex = 0;
2870 ether_addr_copy(info.eth_addr, mac);
2871 info.vni = vni;
2872 info.offloaded = adding;
2873 call_switchdev_notifiers(type, dev, &info.info, NULL);
2874 }
2875
mlxsw_sp_fdb_nve_call_notifiers(struct net_device * dev,const char * mac,enum mlxsw_sp_l3proto proto,union mlxsw_sp_l3addr * addr,__be32 vni,bool adding)2876 static void mlxsw_sp_fdb_nve_call_notifiers(struct net_device *dev,
2877 const char *mac,
2878 enum mlxsw_sp_l3proto proto,
2879 union mlxsw_sp_l3addr *addr,
2880 __be32 vni,
2881 bool adding)
2882 {
2883 if (netif_is_vxlan(dev))
2884 mlxsw_sp_fdb_vxlan_call_notifiers(dev, mac, proto, addr, vni,
2885 adding);
2886 }
2887
2888 static void
mlxsw_sp_fdb_call_notifiers(enum switchdev_notifier_type type,const char * mac,u16 vid,struct net_device * dev,bool offloaded)2889 mlxsw_sp_fdb_call_notifiers(enum switchdev_notifier_type type,
2890 const char *mac, u16 vid,
2891 struct net_device *dev, bool offloaded)
2892 {
2893 struct switchdev_notifier_fdb_info info = {};
2894
2895 info.addr = mac;
2896 info.vid = vid;
2897 info.offloaded = offloaded;
2898 call_switchdev_notifiers(type, dev, &info.info, NULL);
2899 }
2900
mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp * mlxsw_sp,char * sfn_pl,int rec_index,bool adding)2901 static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
2902 char *sfn_pl, int rec_index,
2903 bool adding)
2904 {
2905 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2906 struct mlxsw_sp_bridge_device *bridge_device;
2907 struct mlxsw_sp_bridge_port *bridge_port;
2908 struct mlxsw_sp_port *mlxsw_sp_port;
2909 u16 local_port, vid, fid, evid = 0;
2910 enum switchdev_notifier_type type;
2911 char mac[ETH_ALEN];
2912 bool do_notification = true;
2913 int err;
2914
2915 mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &fid, &local_port);
2916
2917 if (WARN_ON_ONCE(!mlxsw_sp_local_port_is_valid(mlxsw_sp, local_port)))
2918 return;
2919 mlxsw_sp_port = mlxsw_sp->ports[local_port];
2920 if (!mlxsw_sp_port) {
2921 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect local port in FDB notification\n");
2922 goto just_remove;
2923 }
2924
2925 if (mlxsw_sp_fid_is_dummy(mlxsw_sp, fid))
2926 goto just_remove;
2927
2928 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid);
2929 if (!mlxsw_sp_port_vlan) {
2930 netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n");
2931 goto just_remove;
2932 }
2933
2934 bridge_port = mlxsw_sp_port_vlan->bridge_port;
2935 if (!bridge_port) {
2936 netdev_err(mlxsw_sp_port->dev, "{Port, VID} not associated with a bridge\n");
2937 goto just_remove;
2938 }
2939
2940 bridge_device = bridge_port->bridge_device;
2941 vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
2942 evid = mlxsw_sp_port_vlan->vid;
2943
2944 do_fdb_op:
2945 err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, evid,
2946 adding, true);
2947 if (err) {
2948 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
2949 return;
2950 }
2951
2952 if (!do_notification)
2953 return;
2954 type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE;
2955 mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev, adding);
2956
2957 return;
2958
2959 just_remove:
2960 adding = false;
2961 do_notification = false;
2962 goto do_fdb_op;
2963 }
2964
mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp * mlxsw_sp,char * sfn_pl,int rec_index,bool adding)2965 static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
2966 char *sfn_pl, int rec_index,
2967 bool adding)
2968 {
2969 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2970 struct mlxsw_sp_bridge_device *bridge_device;
2971 struct mlxsw_sp_bridge_port *bridge_port;
2972 struct mlxsw_sp_port *mlxsw_sp_port;
2973 enum switchdev_notifier_type type;
2974 char mac[ETH_ALEN];
2975 u16 lag_vid = 0;
2976 u16 lag_id;
2977 u16 vid, fid;
2978 bool do_notification = true;
2979 int err;
2980
2981 mlxsw_reg_sfn_mac_lag_unpack(sfn_pl, rec_index, mac, &fid, &lag_id);
2982 mlxsw_sp_port = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id);
2983 if (!mlxsw_sp_port) {
2984 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Cannot find port representor for LAG\n");
2985 goto just_remove;
2986 }
2987
2988 if (mlxsw_sp_fid_is_dummy(mlxsw_sp, fid))
2989 goto just_remove;
2990
2991 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid);
2992 if (!mlxsw_sp_port_vlan) {
2993 netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n");
2994 goto just_remove;
2995 }
2996
2997 bridge_port = mlxsw_sp_port_vlan->bridge_port;
2998 if (!bridge_port) {
2999 netdev_err(mlxsw_sp_port->dev, "{Port, VID} not associated with a bridge\n");
3000 goto just_remove;
3001 }
3002
3003 bridge_device = bridge_port->bridge_device;
3004 vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
3005 lag_vid = mlxsw_sp_port_vlan->vid;
3006
3007 do_fdb_op:
3008 err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid,
3009 adding, true);
3010 if (err) {
3011 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
3012 return;
3013 }
3014
3015 if (!do_notification)
3016 return;
3017 type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE;
3018 mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev, adding);
3019
3020 return;
3021
3022 just_remove:
3023 adding = false;
3024 do_notification = false;
3025 goto do_fdb_op;
3026 }
3027
3028 static int
__mlxsw_sp_fdb_notify_mac_uc_tunnel_process(struct mlxsw_sp * mlxsw_sp,const struct mlxsw_sp_fid * fid,bool adding,struct net_device ** nve_dev,u16 * p_vid,__be32 * p_vni)3029 __mlxsw_sp_fdb_notify_mac_uc_tunnel_process(struct mlxsw_sp *mlxsw_sp,
3030 const struct mlxsw_sp_fid *fid,
3031 bool adding,
3032 struct net_device **nve_dev,
3033 u16 *p_vid, __be32 *p_vni)
3034 {
3035 struct mlxsw_sp_bridge_device *bridge_device;
3036 struct net_device *br_dev, *dev;
3037 int nve_ifindex;
3038 int err;
3039
3040 err = mlxsw_sp_fid_nve_ifindex(fid, &nve_ifindex);
3041 if (err)
3042 return err;
3043
3044 err = mlxsw_sp_fid_vni(fid, p_vni);
3045 if (err)
3046 return err;
3047
3048 dev = __dev_get_by_index(mlxsw_sp_net(mlxsw_sp), nve_ifindex);
3049 if (!dev)
3050 return -EINVAL;
3051 *nve_dev = dev;
3052
3053 if (!netif_running(dev))
3054 return -EINVAL;
3055
3056 if (adding && !br_port_flag_is_set(dev, BR_LEARNING))
3057 return -EINVAL;
3058
3059 if (adding && netif_is_vxlan(dev)) {
3060 struct vxlan_dev *vxlan = netdev_priv(dev);
3061
3062 if (!(vxlan->cfg.flags & VXLAN_F_LEARN))
3063 return -EINVAL;
3064 }
3065
3066 br_dev = netdev_master_upper_dev_get(dev);
3067 if (!br_dev)
3068 return -EINVAL;
3069
3070 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
3071 if (!bridge_device)
3072 return -EINVAL;
3073
3074 *p_vid = bridge_device->ops->fid_vid(bridge_device, fid);
3075
3076 return 0;
3077 }
3078
mlxsw_sp_fdb_notify_mac_uc_tunnel_process(struct mlxsw_sp * mlxsw_sp,char * sfn_pl,int rec_index,bool adding)3079 static void mlxsw_sp_fdb_notify_mac_uc_tunnel_process(struct mlxsw_sp *mlxsw_sp,
3080 char *sfn_pl,
3081 int rec_index,
3082 bool adding)
3083 {
3084 enum mlxsw_reg_sfn_uc_tunnel_protocol sfn_proto;
3085 enum switchdev_notifier_type type;
3086 struct net_device *nve_dev;
3087 union mlxsw_sp_l3addr addr;
3088 struct mlxsw_sp_fid *fid;
3089 char mac[ETH_ALEN];
3090 u16 fid_index, vid;
3091 __be32 vni;
3092 u32 uip;
3093 int err;
3094
3095 mlxsw_reg_sfn_uc_tunnel_unpack(sfn_pl, rec_index, mac, &fid_index,
3096 &uip, &sfn_proto);
3097
3098 fid = mlxsw_sp_fid_lookup_by_index(mlxsw_sp, fid_index);
3099 if (!fid)
3100 goto err_fid_lookup;
3101
3102 err = mlxsw_sp_nve_learned_ip_resolve(mlxsw_sp, uip,
3103 (enum mlxsw_sp_l3proto) sfn_proto,
3104 &addr);
3105 if (err)
3106 goto err_ip_resolve;
3107
3108 err = __mlxsw_sp_fdb_notify_mac_uc_tunnel_process(mlxsw_sp, fid, adding,
3109 &nve_dev, &vid, &vni);
3110 if (err)
3111 goto err_fdb_process;
3112
3113 err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, mac, fid_index,
3114 (enum mlxsw_sp_l3proto) sfn_proto,
3115 &addr, adding, true);
3116 if (err)
3117 goto err_fdb_op;
3118
3119 mlxsw_sp_fdb_nve_call_notifiers(nve_dev, mac,
3120 (enum mlxsw_sp_l3proto) sfn_proto,
3121 &addr, vni, adding);
3122
3123 type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE :
3124 SWITCHDEV_FDB_DEL_TO_BRIDGE;
3125 mlxsw_sp_fdb_call_notifiers(type, mac, vid, nve_dev, adding);
3126
3127 mlxsw_sp_fid_put(fid);
3128
3129 return;
3130
3131 err_fdb_op:
3132 err_fdb_process:
3133 err_ip_resolve:
3134 mlxsw_sp_fid_put(fid);
3135 err_fid_lookup:
3136 /* Remove an FDB entry in case we cannot process it. Otherwise the
3137 * device will keep sending the same notification over and over again.
3138 */
3139 mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, mac, fid_index,
3140 (enum mlxsw_sp_l3proto) sfn_proto, &addr,
3141 false, true);
3142 }
3143
mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp * mlxsw_sp,char * sfn_pl,int rec_index)3144 static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp,
3145 char *sfn_pl, int rec_index)
3146 {
3147 switch (mlxsw_reg_sfn_rec_type_get(sfn_pl, rec_index)) {
3148 case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC:
3149 mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
3150 rec_index, true);
3151 break;
3152 case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC:
3153 mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
3154 rec_index, false);
3155 break;
3156 case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC_LAG:
3157 mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
3158 rec_index, true);
3159 break;
3160 case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC_LAG:
3161 mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
3162 rec_index, false);
3163 break;
3164 case MLXSW_REG_SFN_REC_TYPE_LEARNED_UNICAST_TUNNEL:
3165 mlxsw_sp_fdb_notify_mac_uc_tunnel_process(mlxsw_sp, sfn_pl,
3166 rec_index, true);
3167 break;
3168 case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_UNICAST_TUNNEL:
3169 mlxsw_sp_fdb_notify_mac_uc_tunnel_process(mlxsw_sp, sfn_pl,
3170 rec_index, false);
3171 break;
3172 }
3173 }
3174
3175 #define MLXSW_SP_FDB_SFN_QUERIES_PER_SESSION 10
3176
mlxsw_sp_fdb_notify_work(struct work_struct * work)3177 static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
3178 {
3179 struct mlxsw_sp_bridge *bridge;
3180 struct mlxsw_sp *mlxsw_sp;
3181 bool reschedule = false;
3182 char *sfn_pl;
3183 int queries;
3184 u8 num_rec;
3185 int i;
3186 int err;
3187
3188 sfn_pl = kmalloc(MLXSW_REG_SFN_LEN, GFP_KERNEL);
3189 if (!sfn_pl)
3190 return;
3191
3192 bridge = container_of(work, struct mlxsw_sp_bridge, fdb_notify.dw.work);
3193 mlxsw_sp = bridge->mlxsw_sp;
3194
3195 rtnl_lock();
3196 if (list_empty(&bridge->bridges_list))
3197 goto out;
3198 reschedule = true;
3199 queries = MLXSW_SP_FDB_SFN_QUERIES_PER_SESSION;
3200 while (queries > 0) {
3201 mlxsw_reg_sfn_pack(sfn_pl);
3202 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl);
3203 if (err) {
3204 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n");
3205 goto out;
3206 }
3207 num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl);
3208 for (i = 0; i < num_rec; i++)
3209 mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i);
3210 if (num_rec != MLXSW_REG_SFN_REC_MAX_COUNT)
3211 goto out;
3212 queries--;
3213 }
3214
3215 out:
3216 rtnl_unlock();
3217 kfree(sfn_pl);
3218 if (!reschedule)
3219 return;
3220 mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp, !queries);
3221 }
3222
3223 struct mlxsw_sp_switchdev_event_work {
3224 struct work_struct work;
3225 union {
3226 struct switchdev_notifier_fdb_info fdb_info;
3227 struct switchdev_notifier_vxlan_fdb_info vxlan_fdb_info;
3228 };
3229 struct net_device *dev;
3230 unsigned long event;
3231 };
3232
3233 static void
mlxsw_sp_switchdev_bridge_vxlan_fdb_event(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_switchdev_event_work * switchdev_work,struct mlxsw_sp_fid * fid,__be32 vni)3234 mlxsw_sp_switchdev_bridge_vxlan_fdb_event(struct mlxsw_sp *mlxsw_sp,
3235 struct mlxsw_sp_switchdev_event_work *
3236 switchdev_work,
3237 struct mlxsw_sp_fid *fid, __be32 vni)
3238 {
3239 struct switchdev_notifier_vxlan_fdb_info vxlan_fdb_info;
3240 struct switchdev_notifier_fdb_info *fdb_info;
3241 struct net_device *dev = switchdev_work->dev;
3242 enum mlxsw_sp_l3proto proto;
3243 union mlxsw_sp_l3addr addr;
3244 int err;
3245
3246 fdb_info = &switchdev_work->fdb_info;
3247 err = vxlan_fdb_find_uc(dev, fdb_info->addr, vni, &vxlan_fdb_info);
3248 if (err)
3249 return;
3250
3251 mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info.remote_ip,
3252 &proto, &addr);
3253
3254 switch (switchdev_work->event) {
3255 case SWITCHDEV_FDB_ADD_TO_DEVICE:
3256 err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp,
3257 vxlan_fdb_info.eth_addr,
3258 mlxsw_sp_fid_index(fid),
3259 proto, &addr, true, false);
3260 if (err)
3261 return;
3262 vxlan_fdb_info.offloaded = true;
3263 call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
3264 &vxlan_fdb_info.info, NULL);
3265 mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
3266 vxlan_fdb_info.eth_addr,
3267 fdb_info->vid, dev, true);
3268 break;
3269 case SWITCHDEV_FDB_DEL_TO_DEVICE:
3270 err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp,
3271 vxlan_fdb_info.eth_addr,
3272 mlxsw_sp_fid_index(fid),
3273 proto, &addr, false,
3274 false);
3275 vxlan_fdb_info.offloaded = false;
3276 call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
3277 &vxlan_fdb_info.info, NULL);
3278 break;
3279 }
3280 }
3281
3282 static void
mlxsw_sp_switchdev_bridge_nve_fdb_event(struct mlxsw_sp_switchdev_event_work * switchdev_work)3283 mlxsw_sp_switchdev_bridge_nve_fdb_event(struct mlxsw_sp_switchdev_event_work *
3284 switchdev_work)
3285 {
3286 struct mlxsw_sp_bridge_device *bridge_device;
3287 struct net_device *dev = switchdev_work->dev;
3288 struct net_device *br_dev;
3289 struct mlxsw_sp *mlxsw_sp;
3290 struct mlxsw_sp_fid *fid;
3291 __be32 vni;
3292 int err;
3293
3294 if (switchdev_work->event != SWITCHDEV_FDB_ADD_TO_DEVICE &&
3295 switchdev_work->event != SWITCHDEV_FDB_DEL_TO_DEVICE)
3296 return;
3297
3298 if (switchdev_work->event == SWITCHDEV_FDB_ADD_TO_DEVICE &&
3299 (!switchdev_work->fdb_info.added_by_user ||
3300 switchdev_work->fdb_info.is_local))
3301 return;
3302
3303 if (!netif_running(dev))
3304 return;
3305 br_dev = netdev_master_upper_dev_get(dev);
3306 if (!br_dev)
3307 return;
3308 if (!netif_is_bridge_master(br_dev))
3309 return;
3310 mlxsw_sp = mlxsw_sp_lower_get(br_dev);
3311 if (!mlxsw_sp)
3312 return;
3313 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
3314 if (!bridge_device)
3315 return;
3316
3317 fid = bridge_device->ops->fid_lookup(bridge_device,
3318 switchdev_work->fdb_info.vid);
3319 if (!fid)
3320 return;
3321
3322 err = mlxsw_sp_fid_vni(fid, &vni);
3323 if (err)
3324 goto out;
3325
3326 mlxsw_sp_switchdev_bridge_vxlan_fdb_event(mlxsw_sp, switchdev_work, fid,
3327 vni);
3328
3329 out:
3330 mlxsw_sp_fid_put(fid);
3331 }
3332
mlxsw_sp_switchdev_bridge_fdb_event_work(struct work_struct * work)3333 static void mlxsw_sp_switchdev_bridge_fdb_event_work(struct work_struct *work)
3334 {
3335 struct mlxsw_sp_switchdev_event_work *switchdev_work =
3336 container_of(work, struct mlxsw_sp_switchdev_event_work, work);
3337 struct net_device *dev = switchdev_work->dev;
3338 struct switchdev_notifier_fdb_info *fdb_info;
3339 struct mlxsw_sp_port *mlxsw_sp_port;
3340 int err;
3341
3342 rtnl_lock();
3343 if (netif_is_vxlan(dev)) {
3344 mlxsw_sp_switchdev_bridge_nve_fdb_event(switchdev_work);
3345 goto out;
3346 }
3347
3348 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
3349 if (!mlxsw_sp_port)
3350 goto out;
3351
3352 switch (switchdev_work->event) {
3353 case SWITCHDEV_FDB_ADD_TO_DEVICE:
3354 fdb_info = &switchdev_work->fdb_info;
3355 if (!fdb_info->added_by_user || fdb_info->is_local)
3356 break;
3357 err = mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, true);
3358 if (err)
3359 break;
3360 mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
3361 fdb_info->addr,
3362 fdb_info->vid, dev, true);
3363 break;
3364 case SWITCHDEV_FDB_DEL_TO_DEVICE:
3365 fdb_info = &switchdev_work->fdb_info;
3366 mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, false);
3367 break;
3368 case SWITCHDEV_FDB_ADD_TO_BRIDGE:
3369 case SWITCHDEV_FDB_DEL_TO_BRIDGE:
3370 /* These events are only used to potentially update an existing
3371 * SPAN mirror.
3372 */
3373 break;
3374 }
3375
3376 mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
3377
3378 out:
3379 rtnl_unlock();
3380 kfree(switchdev_work->fdb_info.addr);
3381 kfree(switchdev_work);
3382 dev_put(dev);
3383 }
3384
3385 static void
mlxsw_sp_switchdev_vxlan_fdb_add(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_switchdev_event_work * switchdev_work)3386 mlxsw_sp_switchdev_vxlan_fdb_add(struct mlxsw_sp *mlxsw_sp,
3387 struct mlxsw_sp_switchdev_event_work *
3388 switchdev_work)
3389 {
3390 struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info;
3391 struct mlxsw_sp_bridge_device *bridge_device;
3392 struct net_device *dev = switchdev_work->dev;
3393 u8 all_zeros_mac[ETH_ALEN] = { 0 };
3394 enum mlxsw_sp_l3proto proto;
3395 union mlxsw_sp_l3addr addr;
3396 struct net_device *br_dev;
3397 struct mlxsw_sp_fid *fid;
3398 u16 vid;
3399 int err;
3400
3401 vxlan_fdb_info = &switchdev_work->vxlan_fdb_info;
3402 br_dev = netdev_master_upper_dev_get(dev);
3403
3404 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
3405 if (!bridge_device)
3406 return;
3407
3408 fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vxlan_fdb_info->vni);
3409 if (!fid)
3410 return;
3411
3412 mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info->remote_ip,
3413 &proto, &addr);
3414
3415 if (ether_addr_equal(vxlan_fdb_info->eth_addr, all_zeros_mac)) {
3416 err = mlxsw_sp_nve_flood_ip_add(mlxsw_sp, fid, proto, &addr);
3417 if (err) {
3418 mlxsw_sp_fid_put(fid);
3419 return;
3420 }
3421 vxlan_fdb_info->offloaded = true;
3422 call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
3423 &vxlan_fdb_info->info, NULL);
3424 mlxsw_sp_fid_put(fid);
3425 return;
3426 }
3427
3428 /* The device has a single FDB table, whereas Linux has two - one
3429 * in the bridge driver and another in the VxLAN driver. We only
3430 * program an entry to the device if the MAC points to the VxLAN
3431 * device in the bridge's FDB table
3432 */
3433 vid = bridge_device->ops->fid_vid(bridge_device, fid);
3434 if (br_fdb_find_port(br_dev, vxlan_fdb_info->eth_addr, vid) != dev)
3435 goto err_br_fdb_find;
3436
3437 err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, vxlan_fdb_info->eth_addr,
3438 mlxsw_sp_fid_index(fid), proto,
3439 &addr, true, false);
3440 if (err)
3441 goto err_fdb_tunnel_uc_op;
3442 vxlan_fdb_info->offloaded = true;
3443 call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
3444 &vxlan_fdb_info->info, NULL);
3445 mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
3446 vxlan_fdb_info->eth_addr, vid, dev, true);
3447
3448 mlxsw_sp_fid_put(fid);
3449
3450 return;
3451
3452 err_fdb_tunnel_uc_op:
3453 err_br_fdb_find:
3454 mlxsw_sp_fid_put(fid);
3455 }
3456
3457 static void
mlxsw_sp_switchdev_vxlan_fdb_del(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_switchdev_event_work * switchdev_work)3458 mlxsw_sp_switchdev_vxlan_fdb_del(struct mlxsw_sp *mlxsw_sp,
3459 struct mlxsw_sp_switchdev_event_work *
3460 switchdev_work)
3461 {
3462 struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info;
3463 struct mlxsw_sp_bridge_device *bridge_device;
3464 struct net_device *dev = switchdev_work->dev;
3465 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
3466 u8 all_zeros_mac[ETH_ALEN] = { 0 };
3467 enum mlxsw_sp_l3proto proto;
3468 union mlxsw_sp_l3addr addr;
3469 struct mlxsw_sp_fid *fid;
3470 u16 vid;
3471
3472 vxlan_fdb_info = &switchdev_work->vxlan_fdb_info;
3473 if (!vxlan_fdb_info->offloaded)
3474 return;
3475
3476 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
3477 if (!bridge_device)
3478 return;
3479
3480 fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vxlan_fdb_info->vni);
3481 if (!fid)
3482 return;
3483
3484 mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info->remote_ip,
3485 &proto, &addr);
3486
3487 if (ether_addr_equal(vxlan_fdb_info->eth_addr, all_zeros_mac)) {
3488 mlxsw_sp_nve_flood_ip_del(mlxsw_sp, fid, proto, &addr);
3489 mlxsw_sp_fid_put(fid);
3490 return;
3491 }
3492
3493 mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, vxlan_fdb_info->eth_addr,
3494 mlxsw_sp_fid_index(fid), proto, &addr,
3495 false, false);
3496 vid = bridge_device->ops->fid_vid(bridge_device, fid);
3497 mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
3498 vxlan_fdb_info->eth_addr, vid, dev, false);
3499
3500 mlxsw_sp_fid_put(fid);
3501 }
3502
mlxsw_sp_switchdev_vxlan_fdb_event_work(struct work_struct * work)3503 static void mlxsw_sp_switchdev_vxlan_fdb_event_work(struct work_struct *work)
3504 {
3505 struct mlxsw_sp_switchdev_event_work *switchdev_work =
3506 container_of(work, struct mlxsw_sp_switchdev_event_work, work);
3507 struct net_device *dev = switchdev_work->dev;
3508 struct mlxsw_sp *mlxsw_sp;
3509 struct net_device *br_dev;
3510
3511 rtnl_lock();
3512
3513 if (!netif_running(dev))
3514 goto out;
3515 br_dev = netdev_master_upper_dev_get(dev);
3516 if (!br_dev)
3517 goto out;
3518 if (!netif_is_bridge_master(br_dev))
3519 goto out;
3520 mlxsw_sp = mlxsw_sp_lower_get(br_dev);
3521 if (!mlxsw_sp)
3522 goto out;
3523
3524 switch (switchdev_work->event) {
3525 case SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE:
3526 mlxsw_sp_switchdev_vxlan_fdb_add(mlxsw_sp, switchdev_work);
3527 break;
3528 case SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE:
3529 mlxsw_sp_switchdev_vxlan_fdb_del(mlxsw_sp, switchdev_work);
3530 break;
3531 }
3532
3533 out:
3534 rtnl_unlock();
3535 kfree(switchdev_work);
3536 dev_put(dev);
3537 }
3538
3539 static int
mlxsw_sp_switchdev_vxlan_work_prepare(struct mlxsw_sp_switchdev_event_work * switchdev_work,struct switchdev_notifier_info * info)3540 mlxsw_sp_switchdev_vxlan_work_prepare(struct mlxsw_sp_switchdev_event_work *
3541 switchdev_work,
3542 struct switchdev_notifier_info *info)
3543 {
3544 struct vxlan_dev *vxlan = netdev_priv(switchdev_work->dev);
3545 struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info;
3546 struct vxlan_config *cfg = &vxlan->cfg;
3547 struct netlink_ext_ack *extack;
3548
3549 extack = switchdev_notifier_info_to_extack(info);
3550 vxlan_fdb_info = container_of(info,
3551 struct switchdev_notifier_vxlan_fdb_info,
3552 info);
3553
3554 if (vxlan_fdb_info->remote_port != cfg->dst_port) {
3555 NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Non-default remote port is not supported");
3556 return -EOPNOTSUPP;
3557 }
3558 if (vxlan_fdb_info->remote_vni != cfg->vni ||
3559 vxlan_fdb_info->vni != cfg->vni) {
3560 NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Non-default VNI is not supported");
3561 return -EOPNOTSUPP;
3562 }
3563 if (vxlan_fdb_info->remote_ifindex) {
3564 NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Local interface is not supported");
3565 return -EOPNOTSUPP;
3566 }
3567 if (is_multicast_ether_addr(vxlan_fdb_info->eth_addr)) {
3568 NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Multicast MAC addresses not supported");
3569 return -EOPNOTSUPP;
3570 }
3571 if (vxlan_addr_multicast(&vxlan_fdb_info->remote_ip)) {
3572 NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Multicast destination IP is not supported");
3573 return -EOPNOTSUPP;
3574 }
3575
3576 switchdev_work->vxlan_fdb_info = *vxlan_fdb_info;
3577
3578 return 0;
3579 }
3580
3581 /* Called under rcu_read_lock() */
mlxsw_sp_switchdev_event(struct notifier_block * unused,unsigned long event,void * ptr)3582 static int mlxsw_sp_switchdev_event(struct notifier_block *unused,
3583 unsigned long event, void *ptr)
3584 {
3585 struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
3586 struct mlxsw_sp_switchdev_event_work *switchdev_work;
3587 struct switchdev_notifier_fdb_info *fdb_info;
3588 struct switchdev_notifier_info *info = ptr;
3589 struct net_device *br_dev;
3590 int err;
3591
3592 if (event == SWITCHDEV_PORT_ATTR_SET) {
3593 err = switchdev_handle_port_attr_set(dev, ptr,
3594 mlxsw_sp_port_dev_check,
3595 mlxsw_sp_port_attr_set);
3596 return notifier_from_errno(err);
3597 }
3598
3599 /* Tunnel devices are not our uppers, so check their master instead */
3600 br_dev = netdev_master_upper_dev_get_rcu(dev);
3601 if (!br_dev)
3602 return NOTIFY_DONE;
3603 if (!netif_is_bridge_master(br_dev))
3604 return NOTIFY_DONE;
3605 if (!mlxsw_sp_port_dev_lower_find_rcu(br_dev))
3606 return NOTIFY_DONE;
3607
3608 switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
3609 if (!switchdev_work)
3610 return NOTIFY_BAD;
3611
3612 switchdev_work->dev = dev;
3613 switchdev_work->event = event;
3614
3615 switch (event) {
3616 case SWITCHDEV_FDB_ADD_TO_DEVICE:
3617 case SWITCHDEV_FDB_DEL_TO_DEVICE:
3618 case SWITCHDEV_FDB_ADD_TO_BRIDGE:
3619 case SWITCHDEV_FDB_DEL_TO_BRIDGE:
3620 fdb_info = container_of(info,
3621 struct switchdev_notifier_fdb_info,
3622 info);
3623 INIT_WORK(&switchdev_work->work,
3624 mlxsw_sp_switchdev_bridge_fdb_event_work);
3625 memcpy(&switchdev_work->fdb_info, ptr,
3626 sizeof(switchdev_work->fdb_info));
3627 switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
3628 if (!switchdev_work->fdb_info.addr)
3629 goto err_addr_alloc;
3630 ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
3631 fdb_info->addr);
3632 /* Take a reference on the device. This can be either
3633 * upper device containig mlxsw_sp_port or just a
3634 * mlxsw_sp_port
3635 */
3636 dev_hold(dev);
3637 break;
3638 case SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE:
3639 case SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE:
3640 INIT_WORK(&switchdev_work->work,
3641 mlxsw_sp_switchdev_vxlan_fdb_event_work);
3642 err = mlxsw_sp_switchdev_vxlan_work_prepare(switchdev_work,
3643 info);
3644 if (err)
3645 goto err_vxlan_work_prepare;
3646 dev_hold(dev);
3647 break;
3648 default:
3649 kfree(switchdev_work);
3650 return NOTIFY_DONE;
3651 }
3652
3653 mlxsw_core_schedule_work(&switchdev_work->work);
3654
3655 return NOTIFY_DONE;
3656
3657 err_vxlan_work_prepare:
3658 err_addr_alloc:
3659 kfree(switchdev_work);
3660 return NOTIFY_BAD;
3661 }
3662
3663 struct notifier_block mlxsw_sp_switchdev_notifier = {
3664 .notifier_call = mlxsw_sp_switchdev_event,
3665 };
3666
3667 static int
mlxsw_sp_switchdev_vxlan_vlan_add(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_bridge_device * bridge_device,const struct net_device * vxlan_dev,u16 vid,bool flag_untagged,bool flag_pvid,struct netlink_ext_ack * extack)3668 mlxsw_sp_switchdev_vxlan_vlan_add(struct mlxsw_sp *mlxsw_sp,
3669 struct mlxsw_sp_bridge_device *bridge_device,
3670 const struct net_device *vxlan_dev, u16 vid,
3671 bool flag_untagged, bool flag_pvid,
3672 struct netlink_ext_ack *extack)
3673 {
3674 struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
3675 __be32 vni = vxlan->cfg.vni;
3676 struct mlxsw_sp_fid *fid;
3677 u16 old_vid;
3678 int err;
3679
3680 /* We cannot have the same VLAN as PVID and egress untagged on multiple
3681 * VxLAN devices. Note that we get this notification before the VLAN is
3682 * actually added to the bridge's database, so it is not possible for
3683 * the lookup function to return 'vxlan_dev'
3684 */
3685 if (flag_untagged && flag_pvid &&
3686 mlxsw_sp_bridge_8021q_vxlan_dev_find(bridge_device->dev, vid)) {
3687 NL_SET_ERR_MSG_MOD(extack, "VLAN already mapped to a different VNI");
3688 return -EINVAL;
3689 }
3690
3691 if (!netif_running(vxlan_dev))
3692 return 0;
3693
3694 /* First case: FID is not associated with this VNI, but the new VLAN
3695 * is both PVID and egress untagged. Need to enable NVE on the FID, if
3696 * it exists
3697 */
3698 fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vni);
3699 if (!fid) {
3700 if (!flag_untagged || !flag_pvid)
3701 return 0;
3702 return bridge_device->ops->vxlan_join(bridge_device, vxlan_dev,
3703 vid, extack);
3704 }
3705
3706 /* Second case: FID is associated with the VNI and the VLAN associated
3707 * with the FID is the same as the notified VLAN. This means the flags
3708 * (PVID / egress untagged) were toggled and that NVE should be
3709 * disabled on the FID
3710 */
3711 old_vid = mlxsw_sp_fid_8021q_vid(fid);
3712 if (vid == old_vid) {
3713 if (WARN_ON(flag_untagged && flag_pvid)) {
3714 mlxsw_sp_fid_put(fid);
3715 return -EINVAL;
3716 }
3717 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
3718 mlxsw_sp_fid_put(fid);
3719 return 0;
3720 }
3721
3722 /* Third case: A new VLAN was configured on the VxLAN device, but this
3723 * VLAN is not PVID, so there is nothing to do.
3724 */
3725 if (!flag_pvid) {
3726 mlxsw_sp_fid_put(fid);
3727 return 0;
3728 }
3729
3730 /* Fourth case: Thew new VLAN is PVID, which means the VLAN currently
3731 * mapped to the VNI should be unmapped
3732 */
3733 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
3734 mlxsw_sp_fid_put(fid);
3735
3736 /* Fifth case: The new VLAN is also egress untagged, which means the
3737 * VLAN needs to be mapped to the VNI
3738 */
3739 if (!flag_untagged)
3740 return 0;
3741
3742 err = bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, vid, extack);
3743 if (err)
3744 goto err_vxlan_join;
3745
3746 return 0;
3747
3748 err_vxlan_join:
3749 bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, old_vid, NULL);
3750 return err;
3751 }
3752
3753 static void
mlxsw_sp_switchdev_vxlan_vlan_del(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_bridge_device * bridge_device,const struct net_device * vxlan_dev,u16 vid)3754 mlxsw_sp_switchdev_vxlan_vlan_del(struct mlxsw_sp *mlxsw_sp,
3755 struct mlxsw_sp_bridge_device *bridge_device,
3756 const struct net_device *vxlan_dev, u16 vid)
3757 {
3758 struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
3759 __be32 vni = vxlan->cfg.vni;
3760 struct mlxsw_sp_fid *fid;
3761
3762 if (!netif_running(vxlan_dev))
3763 return;
3764
3765 fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vni);
3766 if (!fid)
3767 return;
3768
3769 /* A different VLAN than the one mapped to the VNI is deleted */
3770 if (mlxsw_sp_fid_8021q_vid(fid) != vid)
3771 goto out;
3772
3773 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
3774
3775 out:
3776 mlxsw_sp_fid_put(fid);
3777 }
3778
3779 static int
mlxsw_sp_switchdev_vxlan_vlans_add(struct net_device * vxlan_dev,struct switchdev_notifier_port_obj_info * port_obj_info)3780 mlxsw_sp_switchdev_vxlan_vlans_add(struct net_device *vxlan_dev,
3781 struct switchdev_notifier_port_obj_info *
3782 port_obj_info)
3783 {
3784 struct switchdev_obj_port_vlan *vlan =
3785 SWITCHDEV_OBJ_PORT_VLAN(port_obj_info->obj);
3786 bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
3787 bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
3788 struct mlxsw_sp_bridge_device *bridge_device;
3789 struct netlink_ext_ack *extack;
3790 struct mlxsw_sp *mlxsw_sp;
3791 struct net_device *br_dev;
3792
3793 extack = switchdev_notifier_info_to_extack(&port_obj_info->info);
3794 br_dev = netdev_master_upper_dev_get(vxlan_dev);
3795 if (!br_dev)
3796 return 0;
3797
3798 mlxsw_sp = mlxsw_sp_lower_get(br_dev);
3799 if (!mlxsw_sp)
3800 return 0;
3801
3802 port_obj_info->handled = true;
3803
3804 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
3805 if (!bridge_device)
3806 return -EINVAL;
3807
3808 if (!bridge_device->vlan_enabled)
3809 return 0;
3810
3811 return mlxsw_sp_switchdev_vxlan_vlan_add(mlxsw_sp, bridge_device,
3812 vxlan_dev, vlan->vid,
3813 flag_untagged,
3814 flag_pvid, extack);
3815 }
3816
3817 static void
mlxsw_sp_switchdev_vxlan_vlans_del(struct net_device * vxlan_dev,struct switchdev_notifier_port_obj_info * port_obj_info)3818 mlxsw_sp_switchdev_vxlan_vlans_del(struct net_device *vxlan_dev,
3819 struct switchdev_notifier_port_obj_info *
3820 port_obj_info)
3821 {
3822 struct switchdev_obj_port_vlan *vlan =
3823 SWITCHDEV_OBJ_PORT_VLAN(port_obj_info->obj);
3824 struct mlxsw_sp_bridge_device *bridge_device;
3825 struct mlxsw_sp *mlxsw_sp;
3826 struct net_device *br_dev;
3827
3828 br_dev = netdev_master_upper_dev_get(vxlan_dev);
3829 if (!br_dev)
3830 return;
3831
3832 mlxsw_sp = mlxsw_sp_lower_get(br_dev);
3833 if (!mlxsw_sp)
3834 return;
3835
3836 port_obj_info->handled = true;
3837
3838 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
3839 if (!bridge_device)
3840 return;
3841
3842 if (!bridge_device->vlan_enabled)
3843 return;
3844
3845 mlxsw_sp_switchdev_vxlan_vlan_del(mlxsw_sp, bridge_device, vxlan_dev,
3846 vlan->vid);
3847 }
3848
3849 static int
mlxsw_sp_switchdev_handle_vxlan_obj_add(struct net_device * vxlan_dev,struct switchdev_notifier_port_obj_info * port_obj_info)3850 mlxsw_sp_switchdev_handle_vxlan_obj_add(struct net_device *vxlan_dev,
3851 struct switchdev_notifier_port_obj_info *
3852 port_obj_info)
3853 {
3854 int err = 0;
3855
3856 switch (port_obj_info->obj->id) {
3857 case SWITCHDEV_OBJ_ID_PORT_VLAN:
3858 err = mlxsw_sp_switchdev_vxlan_vlans_add(vxlan_dev,
3859 port_obj_info);
3860 break;
3861 default:
3862 break;
3863 }
3864
3865 return err;
3866 }
3867
3868 static void
mlxsw_sp_switchdev_handle_vxlan_obj_del(struct net_device * vxlan_dev,struct switchdev_notifier_port_obj_info * port_obj_info)3869 mlxsw_sp_switchdev_handle_vxlan_obj_del(struct net_device *vxlan_dev,
3870 struct switchdev_notifier_port_obj_info *
3871 port_obj_info)
3872 {
3873 switch (port_obj_info->obj->id) {
3874 case SWITCHDEV_OBJ_ID_PORT_VLAN:
3875 mlxsw_sp_switchdev_vxlan_vlans_del(vxlan_dev, port_obj_info);
3876 break;
3877 default:
3878 break;
3879 }
3880 }
3881
mlxsw_sp_switchdev_blocking_event(struct notifier_block * unused,unsigned long event,void * ptr)3882 static int mlxsw_sp_switchdev_blocking_event(struct notifier_block *unused,
3883 unsigned long event, void *ptr)
3884 {
3885 struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
3886 int err = 0;
3887
3888 switch (event) {
3889 case SWITCHDEV_PORT_OBJ_ADD:
3890 if (netif_is_vxlan(dev))
3891 err = mlxsw_sp_switchdev_handle_vxlan_obj_add(dev, ptr);
3892 else
3893 err = switchdev_handle_port_obj_add(dev, ptr,
3894 mlxsw_sp_port_dev_check,
3895 mlxsw_sp_port_obj_add);
3896 return notifier_from_errno(err);
3897 case SWITCHDEV_PORT_OBJ_DEL:
3898 if (netif_is_vxlan(dev))
3899 mlxsw_sp_switchdev_handle_vxlan_obj_del(dev, ptr);
3900 else
3901 err = switchdev_handle_port_obj_del(dev, ptr,
3902 mlxsw_sp_port_dev_check,
3903 mlxsw_sp_port_obj_del);
3904 return notifier_from_errno(err);
3905 case SWITCHDEV_PORT_ATTR_SET:
3906 err = switchdev_handle_port_attr_set(dev, ptr,
3907 mlxsw_sp_port_dev_check,
3908 mlxsw_sp_port_attr_set);
3909 return notifier_from_errno(err);
3910 }
3911
3912 return NOTIFY_DONE;
3913 }
3914
3915 static struct notifier_block mlxsw_sp_switchdev_blocking_notifier = {
3916 .notifier_call = mlxsw_sp_switchdev_blocking_event,
3917 };
3918
3919 u8
mlxsw_sp_bridge_port_stp_state(struct mlxsw_sp_bridge_port * bridge_port)3920 mlxsw_sp_bridge_port_stp_state(struct mlxsw_sp_bridge_port *bridge_port)
3921 {
3922 return bridge_port->stp_state;
3923 }
3924
mlxsw_sp_fdb_init(struct mlxsw_sp * mlxsw_sp)3925 static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
3926 {
3927 struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
3928 struct notifier_block *nb;
3929 int err;
3930
3931 err = mlxsw_sp_ageing_set(mlxsw_sp, MLXSW_SP_DEFAULT_AGEING_TIME);
3932 if (err) {
3933 dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n");
3934 return err;
3935 }
3936
3937 err = register_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
3938 if (err) {
3939 dev_err(mlxsw_sp->bus_info->dev, "Failed to register switchdev notifier\n");
3940 return err;
3941 }
3942
3943 nb = &mlxsw_sp_switchdev_blocking_notifier;
3944 err = register_switchdev_blocking_notifier(nb);
3945 if (err) {
3946 dev_err(mlxsw_sp->bus_info->dev, "Failed to register switchdev blocking notifier\n");
3947 goto err_register_switchdev_blocking_notifier;
3948 }
3949
3950 INIT_DELAYED_WORK(&bridge->fdb_notify.dw, mlxsw_sp_fdb_notify_work);
3951 bridge->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL;
3952 return 0;
3953
3954 err_register_switchdev_blocking_notifier:
3955 unregister_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
3956 return err;
3957 }
3958
mlxsw_sp_fdb_fini(struct mlxsw_sp * mlxsw_sp)3959 static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp)
3960 {
3961 struct notifier_block *nb;
3962
3963 cancel_delayed_work_sync(&mlxsw_sp->bridge->fdb_notify.dw);
3964
3965 nb = &mlxsw_sp_switchdev_blocking_notifier;
3966 unregister_switchdev_blocking_notifier(nb);
3967
3968 unregister_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
3969 }
3970
mlxsw_sp1_switchdev_init(struct mlxsw_sp * mlxsw_sp)3971 static void mlxsw_sp1_switchdev_init(struct mlxsw_sp *mlxsw_sp)
3972 {
3973 mlxsw_sp->bridge->bridge_8021ad_ops = &mlxsw_sp1_bridge_8021ad_ops;
3974 }
3975
3976 const struct mlxsw_sp_switchdev_ops mlxsw_sp1_switchdev_ops = {
3977 .init = mlxsw_sp1_switchdev_init,
3978 };
3979
mlxsw_sp2_switchdev_init(struct mlxsw_sp * mlxsw_sp)3980 static void mlxsw_sp2_switchdev_init(struct mlxsw_sp *mlxsw_sp)
3981 {
3982 mlxsw_sp->bridge->bridge_8021ad_ops = &mlxsw_sp2_bridge_8021ad_ops;
3983 }
3984
3985 const struct mlxsw_sp_switchdev_ops mlxsw_sp2_switchdev_ops = {
3986 .init = mlxsw_sp2_switchdev_init,
3987 };
3988
mlxsw_sp_switchdev_init(struct mlxsw_sp * mlxsw_sp)3989 int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
3990 {
3991 struct mlxsw_sp_bridge *bridge;
3992
3993 bridge = kzalloc(sizeof(*mlxsw_sp->bridge), GFP_KERNEL);
3994 if (!bridge)
3995 return -ENOMEM;
3996 mlxsw_sp->bridge = bridge;
3997 bridge->mlxsw_sp = mlxsw_sp;
3998
3999 INIT_LIST_HEAD(&mlxsw_sp->bridge->bridges_list);
4000
4001 bridge->bridge_8021q_ops = &mlxsw_sp_bridge_8021q_ops;
4002 bridge->bridge_8021d_ops = &mlxsw_sp_bridge_8021d_ops;
4003
4004 mlxsw_sp->switchdev_ops->init(mlxsw_sp);
4005
4006 return mlxsw_sp_fdb_init(mlxsw_sp);
4007 }
4008
mlxsw_sp_switchdev_fini(struct mlxsw_sp * mlxsw_sp)4009 void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
4010 {
4011 mlxsw_sp_fdb_fini(mlxsw_sp);
4012 WARN_ON(!list_empty(&mlxsw_sp->bridge->bridges_list));
4013 kfree(mlxsw_sp->bridge);
4014 }
4015
4016