1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * net/dsa/dsa_priv.h - Hardware switch handling
4  * Copyright (c) 2008-2009 Marvell Semiconductor
5  */
6 
7 #ifndef __DSA_PRIV_H
8 #define __DSA_PRIV_H
9 
10 #include <linux/if_bridge.h>
11 #include <linux/if_vlan.h>
12 #include <linux/phy.h>
13 #include <linux/netdevice.h>
14 #include <linux/netpoll.h>
15 #include <net/dsa.h>
16 #include <net/gro_cells.h>
17 
18 #define DSA_MAX_NUM_OFFLOADING_BRIDGES		BITS_PER_LONG
19 
20 enum {
21 	DSA_NOTIFIER_AGEING_TIME,
22 	DSA_NOTIFIER_BRIDGE_JOIN,
23 	DSA_NOTIFIER_BRIDGE_LEAVE,
24 	DSA_NOTIFIER_FDB_ADD,
25 	DSA_NOTIFIER_FDB_DEL,
26 	DSA_NOTIFIER_HOST_FDB_ADD,
27 	DSA_NOTIFIER_HOST_FDB_DEL,
28 	DSA_NOTIFIER_LAG_FDB_ADD,
29 	DSA_NOTIFIER_LAG_FDB_DEL,
30 	DSA_NOTIFIER_LAG_CHANGE,
31 	DSA_NOTIFIER_LAG_JOIN,
32 	DSA_NOTIFIER_LAG_LEAVE,
33 	DSA_NOTIFIER_MDB_ADD,
34 	DSA_NOTIFIER_MDB_DEL,
35 	DSA_NOTIFIER_HOST_MDB_ADD,
36 	DSA_NOTIFIER_HOST_MDB_DEL,
37 	DSA_NOTIFIER_VLAN_ADD,
38 	DSA_NOTIFIER_VLAN_DEL,
39 	DSA_NOTIFIER_HOST_VLAN_ADD,
40 	DSA_NOTIFIER_HOST_VLAN_DEL,
41 	DSA_NOTIFIER_MTU,
42 	DSA_NOTIFIER_TAG_PROTO,
43 	DSA_NOTIFIER_TAG_PROTO_CONNECT,
44 	DSA_NOTIFIER_TAG_PROTO_DISCONNECT,
45 	DSA_NOTIFIER_TAG_8021Q_VLAN_ADD,
46 	DSA_NOTIFIER_TAG_8021Q_VLAN_DEL,
47 	DSA_NOTIFIER_MASTER_STATE_CHANGE,
48 };
49 
50 /* DSA_NOTIFIER_AGEING_TIME */
51 struct dsa_notifier_ageing_time_info {
52 	unsigned int ageing_time;
53 };
54 
55 /* DSA_NOTIFIER_BRIDGE_* */
56 struct dsa_notifier_bridge_info {
57 	const struct dsa_port *dp;
58 	struct dsa_bridge bridge;
59 	bool tx_fwd_offload;
60 	struct netlink_ext_ack *extack;
61 };
62 
63 /* DSA_NOTIFIER_FDB_* */
64 struct dsa_notifier_fdb_info {
65 	const struct dsa_port *dp;
66 	const unsigned char *addr;
67 	u16 vid;
68 	struct dsa_db db;
69 };
70 
71 /* DSA_NOTIFIER_LAG_FDB_* */
72 struct dsa_notifier_lag_fdb_info {
73 	struct dsa_lag *lag;
74 	const unsigned char *addr;
75 	u16 vid;
76 	struct dsa_db db;
77 };
78 
79 /* DSA_NOTIFIER_MDB_* */
80 struct dsa_notifier_mdb_info {
81 	const struct dsa_port *dp;
82 	const struct switchdev_obj_port_mdb *mdb;
83 	struct dsa_db db;
84 };
85 
86 /* DSA_NOTIFIER_LAG_* */
87 struct dsa_notifier_lag_info {
88 	const struct dsa_port *dp;
89 	struct dsa_lag lag;
90 	struct netdev_lag_upper_info *info;
91 };
92 
93 /* DSA_NOTIFIER_VLAN_* */
94 struct dsa_notifier_vlan_info {
95 	const struct dsa_port *dp;
96 	const struct switchdev_obj_port_vlan *vlan;
97 	struct netlink_ext_ack *extack;
98 };
99 
100 /* DSA_NOTIFIER_MTU */
101 struct dsa_notifier_mtu_info {
102 	const struct dsa_port *dp;
103 	int mtu;
104 };
105 
106 /* DSA_NOTIFIER_TAG_PROTO_* */
107 struct dsa_notifier_tag_proto_info {
108 	const struct dsa_device_ops *tag_ops;
109 };
110 
111 /* DSA_NOTIFIER_TAG_8021Q_VLAN_* */
112 struct dsa_notifier_tag_8021q_vlan_info {
113 	const struct dsa_port *dp;
114 	u16 vid;
115 };
116 
117 /* DSA_NOTIFIER_MASTER_STATE_CHANGE */
118 struct dsa_notifier_master_state_info {
119 	const struct net_device *master;
120 	bool operational;
121 };
122 
123 struct dsa_switchdev_event_work {
124 	struct net_device *dev;
125 	struct net_device *orig_dev;
126 	struct work_struct work;
127 	unsigned long event;
128 	/* Specific for SWITCHDEV_FDB_ADD_TO_DEVICE and
129 	 * SWITCHDEV_FDB_DEL_TO_DEVICE
130 	 */
131 	unsigned char addr[ETH_ALEN];
132 	u16 vid;
133 	bool host_addr;
134 };
135 
136 enum dsa_standalone_event {
137 	DSA_UC_ADD,
138 	DSA_UC_DEL,
139 	DSA_MC_ADD,
140 	DSA_MC_DEL,
141 };
142 
143 struct dsa_standalone_event_work {
144 	struct work_struct work;
145 	struct net_device *dev;
146 	enum dsa_standalone_event event;
147 	unsigned char addr[ETH_ALEN];
148 	u16 vid;
149 };
150 
151 struct dsa_slave_priv {
152 	/* Copy of CPU port xmit for faster access in slave transmit hot path */
153 	struct sk_buff *	(*xmit)(struct sk_buff *skb,
154 					struct net_device *dev);
155 
156 	struct gro_cells	gcells;
157 
158 	/* DSA port data, such as switch, port index, etc. */
159 	struct dsa_port		*dp;
160 
161 #ifdef CONFIG_NET_POLL_CONTROLLER
162 	struct netpoll		*netpoll;
163 #endif
164 
165 	/* TC context */
166 	struct list_head	mall_tc_list;
167 };
168 
169 /* dsa.c */
170 const struct dsa_device_ops *dsa_tag_driver_get(int tag_protocol);
171 void dsa_tag_driver_put(const struct dsa_device_ops *ops);
172 const struct dsa_device_ops *dsa_find_tagger_by_name(const char *buf);
173 
174 bool dsa_db_equal(const struct dsa_db *a, const struct dsa_db *b);
175 
176 bool dsa_schedule_work(struct work_struct *work);
177 const char *dsa_tag_protocol_to_str(const struct dsa_device_ops *ops);
178 
dsa_tag_protocol_overhead(const struct dsa_device_ops * ops)179 static inline int dsa_tag_protocol_overhead(const struct dsa_device_ops *ops)
180 {
181 	return ops->needed_headroom + ops->needed_tailroom;
182 }
183 
184 /* master.c */
185 int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp);
186 void dsa_master_teardown(struct net_device *dev);
187 
dsa_master_find_slave(struct net_device * dev,int device,int port)188 static inline struct net_device *dsa_master_find_slave(struct net_device *dev,
189 						       int device, int port)
190 {
191 	struct dsa_port *cpu_dp = dev->dsa_ptr;
192 	struct dsa_switch_tree *dst = cpu_dp->dst;
193 	struct dsa_port *dp;
194 
195 	list_for_each_entry(dp, &dst->ports, list)
196 		if (dp->ds->index == device && dp->index == port &&
197 		    dp->type == DSA_PORT_TYPE_USER)
198 			return dp->slave;
199 
200 	return NULL;
201 }
202 
203 /* port.c */
204 void dsa_port_set_tag_protocol(struct dsa_port *cpu_dp,
205 			       const struct dsa_device_ops *tag_ops);
206 int dsa_port_set_state(struct dsa_port *dp, u8 state, bool do_fast_age);
207 int dsa_port_set_mst_state(struct dsa_port *dp,
208 			   const struct switchdev_mst_state *state,
209 			   struct netlink_ext_ack *extack);
210 int dsa_port_enable_rt(struct dsa_port *dp, struct phy_device *phy);
211 int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy);
212 void dsa_port_disable_rt(struct dsa_port *dp);
213 void dsa_port_disable(struct dsa_port *dp);
214 int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br,
215 			 struct netlink_ext_ack *extack);
216 void dsa_port_pre_bridge_leave(struct dsa_port *dp, struct net_device *br);
217 void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br);
218 int dsa_port_lag_change(struct dsa_port *dp,
219 			struct netdev_lag_lower_state_info *linfo);
220 int dsa_port_lag_join(struct dsa_port *dp, struct net_device *lag_dev,
221 		      struct netdev_lag_upper_info *uinfo,
222 		      struct netlink_ext_ack *extack);
223 void dsa_port_pre_lag_leave(struct dsa_port *dp, struct net_device *lag_dev);
224 void dsa_port_lag_leave(struct dsa_port *dp, struct net_device *lag_dev);
225 int dsa_port_vlan_filtering(struct dsa_port *dp, bool vlan_filtering,
226 			    struct netlink_ext_ack *extack);
227 bool dsa_port_skip_vlan_configuration(struct dsa_port *dp);
228 int dsa_port_ageing_time(struct dsa_port *dp, clock_t ageing_clock);
229 int dsa_port_mst_enable(struct dsa_port *dp, bool on,
230 			struct netlink_ext_ack *extack);
231 int dsa_port_vlan_msti(struct dsa_port *dp,
232 		       const struct switchdev_vlan_msti *msti);
233 int dsa_port_mtu_change(struct dsa_port *dp, int new_mtu);
234 int dsa_port_fdb_add(struct dsa_port *dp, const unsigned char *addr,
235 		     u16 vid);
236 int dsa_port_fdb_del(struct dsa_port *dp, const unsigned char *addr,
237 		     u16 vid);
238 int dsa_port_standalone_host_fdb_add(struct dsa_port *dp,
239 				     const unsigned char *addr, u16 vid);
240 int dsa_port_standalone_host_fdb_del(struct dsa_port *dp,
241 				     const unsigned char *addr, u16 vid);
242 int dsa_port_bridge_host_fdb_add(struct dsa_port *dp, const unsigned char *addr,
243 				 u16 vid);
244 int dsa_port_bridge_host_fdb_del(struct dsa_port *dp, const unsigned char *addr,
245 				 u16 vid);
246 int dsa_port_lag_fdb_add(struct dsa_port *dp, const unsigned char *addr,
247 			 u16 vid);
248 int dsa_port_lag_fdb_del(struct dsa_port *dp, const unsigned char *addr,
249 			 u16 vid);
250 int dsa_port_fdb_dump(struct dsa_port *dp, dsa_fdb_dump_cb_t *cb, void *data);
251 int dsa_port_mdb_add(const struct dsa_port *dp,
252 		     const struct switchdev_obj_port_mdb *mdb);
253 int dsa_port_mdb_del(const struct dsa_port *dp,
254 		     const struct switchdev_obj_port_mdb *mdb);
255 int dsa_port_standalone_host_mdb_add(const struct dsa_port *dp,
256 				     const struct switchdev_obj_port_mdb *mdb);
257 int dsa_port_standalone_host_mdb_del(const struct dsa_port *dp,
258 				     const struct switchdev_obj_port_mdb *mdb);
259 int dsa_port_bridge_host_mdb_add(const struct dsa_port *dp,
260 				 const struct switchdev_obj_port_mdb *mdb);
261 int dsa_port_bridge_host_mdb_del(const struct dsa_port *dp,
262 				 const struct switchdev_obj_port_mdb *mdb);
263 int dsa_port_pre_bridge_flags(const struct dsa_port *dp,
264 			      struct switchdev_brport_flags flags,
265 			      struct netlink_ext_ack *extack);
266 int dsa_port_bridge_flags(struct dsa_port *dp,
267 			  struct switchdev_brport_flags flags,
268 			  struct netlink_ext_ack *extack);
269 int dsa_port_vlan_add(struct dsa_port *dp,
270 		      const struct switchdev_obj_port_vlan *vlan,
271 		      struct netlink_ext_ack *extack);
272 int dsa_port_vlan_del(struct dsa_port *dp,
273 		      const struct switchdev_obj_port_vlan *vlan);
274 int dsa_port_host_vlan_add(struct dsa_port *dp,
275 			   const struct switchdev_obj_port_vlan *vlan,
276 			   struct netlink_ext_ack *extack);
277 int dsa_port_host_vlan_del(struct dsa_port *dp,
278 			   const struct switchdev_obj_port_vlan *vlan);
279 int dsa_port_mrp_add(const struct dsa_port *dp,
280 		     const struct switchdev_obj_mrp *mrp);
281 int dsa_port_mrp_del(const struct dsa_port *dp,
282 		     const struct switchdev_obj_mrp *mrp);
283 int dsa_port_mrp_add_ring_role(const struct dsa_port *dp,
284 			       const struct switchdev_obj_ring_role_mrp *mrp);
285 int dsa_port_mrp_del_ring_role(const struct dsa_port *dp,
286 			       const struct switchdev_obj_ring_role_mrp *mrp);
287 int dsa_port_phylink_create(struct dsa_port *dp);
288 int dsa_port_link_register_of(struct dsa_port *dp);
289 void dsa_port_link_unregister_of(struct dsa_port *dp);
290 int dsa_port_hsr_join(struct dsa_port *dp, struct net_device *hsr);
291 void dsa_port_hsr_leave(struct dsa_port *dp, struct net_device *hsr);
292 int dsa_port_tag_8021q_vlan_add(struct dsa_port *dp, u16 vid, bool broadcast);
293 void dsa_port_tag_8021q_vlan_del(struct dsa_port *dp, u16 vid, bool broadcast);
294 void dsa_port_set_host_flood(struct dsa_port *dp, bool uc, bool mc);
295 
296 /* slave.c */
297 extern const struct dsa_device_ops notag_netdev_ops;
298 extern struct notifier_block dsa_slave_switchdev_notifier;
299 extern struct notifier_block dsa_slave_switchdev_blocking_notifier;
300 
301 void dsa_slave_mii_bus_init(struct dsa_switch *ds);
302 int dsa_slave_create(struct dsa_port *dp);
303 void dsa_slave_destroy(struct net_device *slave_dev);
304 int dsa_slave_suspend(struct net_device *slave_dev);
305 int dsa_slave_resume(struct net_device *slave_dev);
306 int dsa_slave_register_notifier(void);
307 void dsa_slave_unregister_notifier(void);
308 void dsa_slave_setup_tagger(struct net_device *slave);
309 int dsa_slave_change_mtu(struct net_device *dev, int new_mtu);
310 int dsa_slave_manage_vlan_filtering(struct net_device *dev,
311 				    bool vlan_filtering);
312 
dsa_slave_to_port(const struct net_device * dev)313 static inline struct dsa_port *dsa_slave_to_port(const struct net_device *dev)
314 {
315 	struct dsa_slave_priv *p = netdev_priv(dev);
316 
317 	return p->dp;
318 }
319 
320 static inline struct net_device *
dsa_slave_to_master(const struct net_device * dev)321 dsa_slave_to_master(const struct net_device *dev)
322 {
323 	struct dsa_port *dp = dsa_slave_to_port(dev);
324 
325 	return dp->cpu_dp->master;
326 }
327 
328 /* If under a bridge with vlan_filtering=0, make sure to send pvid-tagged
329  * frames as untagged, since the bridge will not untag them.
330  */
dsa_untag_bridge_pvid(struct sk_buff * skb)331 static inline struct sk_buff *dsa_untag_bridge_pvid(struct sk_buff *skb)
332 {
333 	struct dsa_port *dp = dsa_slave_to_port(skb->dev);
334 	struct net_device *br = dsa_port_bridge_dev_get(dp);
335 	struct net_device *dev = skb->dev;
336 	struct net_device *upper_dev;
337 	u16 vid, pvid, proto;
338 	int err;
339 
340 	if (!br || br_vlan_enabled(br))
341 		return skb;
342 
343 	err = br_vlan_get_proto(br, &proto);
344 	if (err)
345 		return skb;
346 
347 	/* Move VLAN tag from data to hwaccel */
348 	if (!skb_vlan_tag_present(skb) && skb->protocol == htons(proto)) {
349 		skb = skb_vlan_untag(skb);
350 		if (!skb)
351 			return NULL;
352 	}
353 
354 	if (!skb_vlan_tag_present(skb))
355 		return skb;
356 
357 	vid = skb_vlan_tag_get_id(skb);
358 
359 	/* We already run under an RCU read-side critical section since
360 	 * we are called from netif_receive_skb_list_internal().
361 	 */
362 	err = br_vlan_get_pvid_rcu(dev, &pvid);
363 	if (err)
364 		return skb;
365 
366 	if (vid != pvid)
367 		return skb;
368 
369 	/* The sad part about attempting to untag from DSA is that we
370 	 * don't know, unless we check, if the skb will end up in
371 	 * the bridge's data path - br_allowed_ingress() - or not.
372 	 * For example, there might be an 8021q upper for the
373 	 * default_pvid of the bridge, which will steal VLAN-tagged traffic
374 	 * from the bridge's data path. This is a configuration that DSA
375 	 * supports because vlan_filtering is 0. In that case, we should
376 	 * definitely keep the tag, to make sure it keeps working.
377 	 */
378 	upper_dev = __vlan_find_dev_deep_rcu(br, htons(proto), vid);
379 	if (upper_dev)
380 		return skb;
381 
382 	__vlan_hwaccel_clear_tag(skb);
383 
384 	return skb;
385 }
386 
387 /* For switches without hardware support for DSA tagging to be able
388  * to support termination through the bridge.
389  */
390 static inline struct net_device *
dsa_find_designated_bridge_port_by_vid(struct net_device * master,u16 vid)391 dsa_find_designated_bridge_port_by_vid(struct net_device *master, u16 vid)
392 {
393 	struct dsa_port *cpu_dp = master->dsa_ptr;
394 	struct dsa_switch_tree *dst = cpu_dp->dst;
395 	struct bridge_vlan_info vinfo;
396 	struct net_device *slave;
397 	struct dsa_port *dp;
398 	int err;
399 
400 	list_for_each_entry(dp, &dst->ports, list) {
401 		if (dp->type != DSA_PORT_TYPE_USER)
402 			continue;
403 
404 		if (!dp->bridge)
405 			continue;
406 
407 		if (dp->stp_state != BR_STATE_LEARNING &&
408 		    dp->stp_state != BR_STATE_FORWARDING)
409 			continue;
410 
411 		/* Since the bridge might learn this packet, keep the CPU port
412 		 * affinity with the port that will be used for the reply on
413 		 * xmit.
414 		 */
415 		if (dp->cpu_dp != cpu_dp)
416 			continue;
417 
418 		slave = dp->slave;
419 
420 		err = br_vlan_get_info_rcu(slave, vid, &vinfo);
421 		if (err)
422 			continue;
423 
424 		return slave;
425 	}
426 
427 	return NULL;
428 }
429 
430 /* If the ingress port offloads the bridge, we mark the frame as autonomously
431  * forwarded by hardware, so the software bridge doesn't forward in twice, back
432  * to us, because we already did. However, if we're in fallback mode and we do
433  * software bridging, we are not offloading it, therefore the dp->bridge
434  * pointer is not populated, and flooding needs to be done by software (we are
435  * effectively operating in standalone ports mode).
436  */
dsa_default_offload_fwd_mark(struct sk_buff * skb)437 static inline void dsa_default_offload_fwd_mark(struct sk_buff *skb)
438 {
439 	struct dsa_port *dp = dsa_slave_to_port(skb->dev);
440 
441 	skb->offload_fwd_mark = !!(dp->bridge);
442 }
443 
444 /* Helper for removing DSA header tags from packets in the RX path.
445  * Must not be called before skb_pull(len).
446  *                                                                 skb->data
447  *                                                                         |
448  *                                                                         v
449  * |   |   |   |   |   |   |   |   |   |   |   |   |   |   |   |   |   |   |
450  * +-----------------------+-----------------------+---------------+-------+
451  * |    Destination MAC    |      Source MAC       |  DSA header   | EType |
452  * +-----------------------+-----------------------+---------------+-------+
453  *                                                 |               |
454  * <----- len ----->                               <----- len ----->
455  *                 |
456  *       >>>>>>>   v
457  *       >>>>>>>   |   |   |   |   |   |   |   |   |   |   |   |   |   |   |
458  *       >>>>>>>   +-----------------------+-----------------------+-------+
459  *       >>>>>>>   |    Destination MAC    |      Source MAC       | EType |
460  *                 +-----------------------+-----------------------+-------+
461  *                                                                         ^
462  *                                                                         |
463  *                                                                 skb->data
464  */
dsa_strip_etype_header(struct sk_buff * skb,int len)465 static inline void dsa_strip_etype_header(struct sk_buff *skb, int len)
466 {
467 	memmove(skb->data - ETH_HLEN, skb->data - ETH_HLEN - len, 2 * ETH_ALEN);
468 }
469 
470 /* Helper for creating space for DSA header tags in TX path packets.
471  * Must not be called before skb_push(len).
472  *
473  * Before:
474  *
475  *       <<<<<<<   |   |   |   |   |   |   |   |   |   |   |   |   |   |   |
476  * ^     <<<<<<<   +-----------------------+-----------------------+-------+
477  * |     <<<<<<<   |    Destination MAC    |      Source MAC       | EType |
478  * |               +-----------------------+-----------------------+-------+
479  * <----- len ----->
480  * |
481  * |
482  * skb->data
483  *
484  * After:
485  *
486  * |   |   |   |   |   |   |   |   |   |   |   |   |   |   |   |   |   |   |
487  * +-----------------------+-----------------------+---------------+-------+
488  * |    Destination MAC    |      Source MAC       |  DSA header   | EType |
489  * +-----------------------+-----------------------+---------------+-------+
490  * ^                                               |               |
491  * |                                               <----- len ----->
492  * skb->data
493  */
dsa_alloc_etype_header(struct sk_buff * skb,int len)494 static inline void dsa_alloc_etype_header(struct sk_buff *skb, int len)
495 {
496 	memmove(skb->data, skb->data + len, 2 * ETH_ALEN);
497 }
498 
499 /* On RX, eth_type_trans() on the DSA master pulls ETH_HLEN bytes starting from
500  * skb_mac_header(skb), which leaves skb->data pointing at the first byte after
501  * what the DSA master perceives as the EtherType (the beginning of the L3
502  * protocol). Since DSA EtherType header taggers treat the EtherType as part of
503  * the DSA tag itself, and the EtherType is 2 bytes in length, the DSA header
504  * is located 2 bytes behind skb->data. Note that EtherType in this context
505  * means the first 2 bytes of the DSA header, not the encapsulated EtherType
506  * that will become visible after the DSA header is stripped.
507  */
dsa_etype_header_pos_rx(struct sk_buff * skb)508 static inline void *dsa_etype_header_pos_rx(struct sk_buff *skb)
509 {
510 	return skb->data - 2;
511 }
512 
513 /* On TX, skb->data points to skb_mac_header(skb), which means that EtherType
514  * header taggers start exactly where the EtherType is (the EtherType is
515  * treated as part of the DSA header).
516  */
dsa_etype_header_pos_tx(struct sk_buff * skb)517 static inline void *dsa_etype_header_pos_tx(struct sk_buff *skb)
518 {
519 	return skb->data + 2 * ETH_ALEN;
520 }
521 
522 /* switch.c */
523 int dsa_switch_register_notifier(struct dsa_switch *ds);
524 void dsa_switch_unregister_notifier(struct dsa_switch *ds);
525 
dsa_switch_supports_uc_filtering(struct dsa_switch * ds)526 static inline bool dsa_switch_supports_uc_filtering(struct dsa_switch *ds)
527 {
528 	return ds->ops->port_fdb_add && ds->ops->port_fdb_del &&
529 	       ds->fdb_isolation && !ds->vlan_filtering_is_global &&
530 	       !ds->needs_standalone_vlan_filtering;
531 }
532 
dsa_switch_supports_mc_filtering(struct dsa_switch * ds)533 static inline bool dsa_switch_supports_mc_filtering(struct dsa_switch *ds)
534 {
535 	return ds->ops->port_mdb_add && ds->ops->port_mdb_del &&
536 	       ds->fdb_isolation && !ds->vlan_filtering_is_global &&
537 	       !ds->needs_standalone_vlan_filtering;
538 }
539 
540 /* dsa2.c */
541 void dsa_lag_map(struct dsa_switch_tree *dst, struct dsa_lag *lag);
542 void dsa_lag_unmap(struct dsa_switch_tree *dst, struct dsa_lag *lag);
543 struct dsa_lag *dsa_tree_lag_find(struct dsa_switch_tree *dst,
544 				  const struct net_device *lag_dev);
545 int dsa_tree_notify(struct dsa_switch_tree *dst, unsigned long e, void *v);
546 int dsa_broadcast(unsigned long e, void *v);
547 int dsa_tree_change_tag_proto(struct dsa_switch_tree *dst,
548 			      struct net_device *master,
549 			      const struct dsa_device_ops *tag_ops,
550 			      const struct dsa_device_ops *old_tag_ops);
551 void dsa_tree_master_admin_state_change(struct dsa_switch_tree *dst,
552 					struct net_device *master,
553 					bool up);
554 void dsa_tree_master_oper_state_change(struct dsa_switch_tree *dst,
555 				       struct net_device *master,
556 				       bool up);
557 unsigned int dsa_bridge_num_get(const struct net_device *bridge_dev, int max);
558 void dsa_bridge_num_put(const struct net_device *bridge_dev,
559 			unsigned int bridge_num);
560 struct dsa_bridge *dsa_tree_bridge_find(struct dsa_switch_tree *dst,
561 					const struct net_device *br);
562 
563 /* tag_8021q.c */
564 int dsa_switch_tag_8021q_vlan_add(struct dsa_switch *ds,
565 				  struct dsa_notifier_tag_8021q_vlan_info *info);
566 int dsa_switch_tag_8021q_vlan_del(struct dsa_switch *ds,
567 				  struct dsa_notifier_tag_8021q_vlan_info *info);
568 
569 extern struct list_head dsa_tree_list;
570 
571 #endif
572