1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * net/dsa/dsa_priv.h - Hardware switch handling
4 * Copyright (c) 2008-2009 Marvell Semiconductor
5 */
6
7 #ifndef __DSA_PRIV_H
8 #define __DSA_PRIV_H
9
10 #include <linux/if_bridge.h>
11 #include <linux/if_vlan.h>
12 #include <linux/phy.h>
13 #include <linux/netdevice.h>
14 #include <linux/netpoll.h>
15 #include <net/dsa.h>
16 #include <net/gro_cells.h>
17
18 #define DSA_MAX_NUM_OFFLOADING_BRIDGES BITS_PER_LONG
19
20 enum {
21 DSA_NOTIFIER_AGEING_TIME,
22 DSA_NOTIFIER_BRIDGE_JOIN,
23 DSA_NOTIFIER_BRIDGE_LEAVE,
24 DSA_NOTIFIER_FDB_ADD,
25 DSA_NOTIFIER_FDB_DEL,
26 DSA_NOTIFIER_HOST_FDB_ADD,
27 DSA_NOTIFIER_HOST_FDB_DEL,
28 DSA_NOTIFIER_LAG_FDB_ADD,
29 DSA_NOTIFIER_LAG_FDB_DEL,
30 DSA_NOTIFIER_LAG_CHANGE,
31 DSA_NOTIFIER_LAG_JOIN,
32 DSA_NOTIFIER_LAG_LEAVE,
33 DSA_NOTIFIER_MDB_ADD,
34 DSA_NOTIFIER_MDB_DEL,
35 DSA_NOTIFIER_HOST_MDB_ADD,
36 DSA_NOTIFIER_HOST_MDB_DEL,
37 DSA_NOTIFIER_VLAN_ADD,
38 DSA_NOTIFIER_VLAN_DEL,
39 DSA_NOTIFIER_HOST_VLAN_ADD,
40 DSA_NOTIFIER_HOST_VLAN_DEL,
41 DSA_NOTIFIER_MTU,
42 DSA_NOTIFIER_TAG_PROTO,
43 DSA_NOTIFIER_TAG_PROTO_CONNECT,
44 DSA_NOTIFIER_TAG_PROTO_DISCONNECT,
45 DSA_NOTIFIER_TAG_8021Q_VLAN_ADD,
46 DSA_NOTIFIER_TAG_8021Q_VLAN_DEL,
47 DSA_NOTIFIER_MASTER_STATE_CHANGE,
48 };
49
50 /* DSA_NOTIFIER_AGEING_TIME */
51 struct dsa_notifier_ageing_time_info {
52 unsigned int ageing_time;
53 };
54
55 /* DSA_NOTIFIER_BRIDGE_* */
56 struct dsa_notifier_bridge_info {
57 const struct dsa_port *dp;
58 struct dsa_bridge bridge;
59 bool tx_fwd_offload;
60 struct netlink_ext_ack *extack;
61 };
62
63 /* DSA_NOTIFIER_FDB_* */
64 struct dsa_notifier_fdb_info {
65 const struct dsa_port *dp;
66 const unsigned char *addr;
67 u16 vid;
68 struct dsa_db db;
69 };
70
71 /* DSA_NOTIFIER_LAG_FDB_* */
72 struct dsa_notifier_lag_fdb_info {
73 struct dsa_lag *lag;
74 const unsigned char *addr;
75 u16 vid;
76 struct dsa_db db;
77 };
78
79 /* DSA_NOTIFIER_MDB_* */
80 struct dsa_notifier_mdb_info {
81 const struct dsa_port *dp;
82 const struct switchdev_obj_port_mdb *mdb;
83 struct dsa_db db;
84 };
85
86 /* DSA_NOTIFIER_LAG_* */
87 struct dsa_notifier_lag_info {
88 const struct dsa_port *dp;
89 struct dsa_lag lag;
90 struct netdev_lag_upper_info *info;
91 struct netlink_ext_ack *extack;
92 };
93
94 /* DSA_NOTIFIER_VLAN_* */
95 struct dsa_notifier_vlan_info {
96 const struct dsa_port *dp;
97 const struct switchdev_obj_port_vlan *vlan;
98 struct netlink_ext_ack *extack;
99 };
100
101 /* DSA_NOTIFIER_MTU */
102 struct dsa_notifier_mtu_info {
103 const struct dsa_port *dp;
104 int mtu;
105 };
106
107 /* DSA_NOTIFIER_TAG_PROTO_* */
108 struct dsa_notifier_tag_proto_info {
109 const struct dsa_device_ops *tag_ops;
110 };
111
112 /* DSA_NOTIFIER_TAG_8021Q_VLAN_* */
113 struct dsa_notifier_tag_8021q_vlan_info {
114 const struct dsa_port *dp;
115 u16 vid;
116 };
117
118 /* DSA_NOTIFIER_MASTER_STATE_CHANGE */
119 struct dsa_notifier_master_state_info {
120 const struct net_device *master;
121 bool operational;
122 };
123
124 struct dsa_switchdev_event_work {
125 struct net_device *dev;
126 struct net_device *orig_dev;
127 struct work_struct work;
128 unsigned long event;
129 /* Specific for SWITCHDEV_FDB_ADD_TO_DEVICE and
130 * SWITCHDEV_FDB_DEL_TO_DEVICE
131 */
132 unsigned char addr[ETH_ALEN];
133 u16 vid;
134 bool host_addr;
135 };
136
137 enum dsa_standalone_event {
138 DSA_UC_ADD,
139 DSA_UC_DEL,
140 DSA_MC_ADD,
141 DSA_MC_DEL,
142 };
143
144 struct dsa_standalone_event_work {
145 struct work_struct work;
146 struct net_device *dev;
147 enum dsa_standalone_event event;
148 unsigned char addr[ETH_ALEN];
149 u16 vid;
150 };
151
152 struct dsa_slave_priv {
153 /* Copy of CPU port xmit for faster access in slave transmit hot path */
154 struct sk_buff * (*xmit)(struct sk_buff *skb,
155 struct net_device *dev);
156
157 struct gro_cells gcells;
158
159 /* DSA port data, such as switch, port index, etc. */
160 struct dsa_port *dp;
161
162 #ifdef CONFIG_NET_POLL_CONTROLLER
163 struct netpoll *netpoll;
164 #endif
165
166 /* TC context */
167 struct list_head mall_tc_list;
168 };
169
170 /* dsa.c */
171 const struct dsa_device_ops *dsa_tag_driver_get(int tag_protocol);
172 void dsa_tag_driver_put(const struct dsa_device_ops *ops);
173 const struct dsa_device_ops *dsa_find_tagger_by_name(const char *buf);
174
175 bool dsa_db_equal(const struct dsa_db *a, const struct dsa_db *b);
176
177 bool dsa_schedule_work(struct work_struct *work);
178 const char *dsa_tag_protocol_to_str(const struct dsa_device_ops *ops);
179
dsa_tag_protocol_overhead(const struct dsa_device_ops * ops)180 static inline int dsa_tag_protocol_overhead(const struct dsa_device_ops *ops)
181 {
182 return ops->needed_headroom + ops->needed_tailroom;
183 }
184
185 /* master.c */
186 int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp);
187 void dsa_master_teardown(struct net_device *dev);
188 int dsa_master_lag_setup(struct net_device *lag_dev, struct dsa_port *cpu_dp,
189 struct netdev_lag_upper_info *uinfo,
190 struct netlink_ext_ack *extack);
191 void dsa_master_lag_teardown(struct net_device *lag_dev,
192 struct dsa_port *cpu_dp);
193
dsa_master_find_slave(struct net_device * dev,int device,int port)194 static inline struct net_device *dsa_master_find_slave(struct net_device *dev,
195 int device, int port)
196 {
197 struct dsa_port *cpu_dp = dev->dsa_ptr;
198 struct dsa_switch_tree *dst = cpu_dp->dst;
199 struct dsa_port *dp;
200
201 list_for_each_entry(dp, &dst->ports, list)
202 if (dp->ds->index == device && dp->index == port &&
203 dp->type == DSA_PORT_TYPE_USER)
204 return dp->slave;
205
206 return NULL;
207 }
208
209 /* netlink.c */
210 extern struct rtnl_link_ops dsa_link_ops __read_mostly;
211
212 /* port.c */
213 bool dsa_port_supports_hwtstamp(struct dsa_port *dp, struct ifreq *ifr);
214 void dsa_port_set_tag_protocol(struct dsa_port *cpu_dp,
215 const struct dsa_device_ops *tag_ops);
216 int dsa_port_set_state(struct dsa_port *dp, u8 state, bool do_fast_age);
217 int dsa_port_set_mst_state(struct dsa_port *dp,
218 const struct switchdev_mst_state *state,
219 struct netlink_ext_ack *extack);
220 int dsa_port_enable_rt(struct dsa_port *dp, struct phy_device *phy);
221 int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy);
222 void dsa_port_disable_rt(struct dsa_port *dp);
223 void dsa_port_disable(struct dsa_port *dp);
224 int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br,
225 struct netlink_ext_ack *extack);
226 void dsa_port_pre_bridge_leave(struct dsa_port *dp, struct net_device *br);
227 void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br);
228 int dsa_port_lag_change(struct dsa_port *dp,
229 struct netdev_lag_lower_state_info *linfo);
230 int dsa_port_lag_join(struct dsa_port *dp, struct net_device *lag_dev,
231 struct netdev_lag_upper_info *uinfo,
232 struct netlink_ext_ack *extack);
233 void dsa_port_pre_lag_leave(struct dsa_port *dp, struct net_device *lag_dev);
234 void dsa_port_lag_leave(struct dsa_port *dp, struct net_device *lag_dev);
235 int dsa_port_vlan_filtering(struct dsa_port *dp, bool vlan_filtering,
236 struct netlink_ext_ack *extack);
237 bool dsa_port_skip_vlan_configuration(struct dsa_port *dp);
238 int dsa_port_ageing_time(struct dsa_port *dp, clock_t ageing_clock);
239 int dsa_port_mst_enable(struct dsa_port *dp, bool on,
240 struct netlink_ext_ack *extack);
241 int dsa_port_vlan_msti(struct dsa_port *dp,
242 const struct switchdev_vlan_msti *msti);
243 int dsa_port_mtu_change(struct dsa_port *dp, int new_mtu);
244 int dsa_port_fdb_add(struct dsa_port *dp, const unsigned char *addr,
245 u16 vid);
246 int dsa_port_fdb_del(struct dsa_port *dp, const unsigned char *addr,
247 u16 vid);
248 int dsa_port_standalone_host_fdb_add(struct dsa_port *dp,
249 const unsigned char *addr, u16 vid);
250 int dsa_port_standalone_host_fdb_del(struct dsa_port *dp,
251 const unsigned char *addr, u16 vid);
252 int dsa_port_bridge_host_fdb_add(struct dsa_port *dp, const unsigned char *addr,
253 u16 vid);
254 int dsa_port_bridge_host_fdb_del(struct dsa_port *dp, const unsigned char *addr,
255 u16 vid);
256 int dsa_port_lag_fdb_add(struct dsa_port *dp, const unsigned char *addr,
257 u16 vid);
258 int dsa_port_lag_fdb_del(struct dsa_port *dp, const unsigned char *addr,
259 u16 vid);
260 int dsa_port_fdb_dump(struct dsa_port *dp, dsa_fdb_dump_cb_t *cb, void *data);
261 int dsa_port_mdb_add(const struct dsa_port *dp,
262 const struct switchdev_obj_port_mdb *mdb);
263 int dsa_port_mdb_del(const struct dsa_port *dp,
264 const struct switchdev_obj_port_mdb *mdb);
265 int dsa_port_standalone_host_mdb_add(const struct dsa_port *dp,
266 const struct switchdev_obj_port_mdb *mdb);
267 int dsa_port_standalone_host_mdb_del(const struct dsa_port *dp,
268 const struct switchdev_obj_port_mdb *mdb);
269 int dsa_port_bridge_host_mdb_add(const struct dsa_port *dp,
270 const struct switchdev_obj_port_mdb *mdb);
271 int dsa_port_bridge_host_mdb_del(const struct dsa_port *dp,
272 const struct switchdev_obj_port_mdb *mdb);
273 int dsa_port_pre_bridge_flags(const struct dsa_port *dp,
274 struct switchdev_brport_flags flags,
275 struct netlink_ext_ack *extack);
276 int dsa_port_bridge_flags(struct dsa_port *dp,
277 struct switchdev_brport_flags flags,
278 struct netlink_ext_ack *extack);
279 int dsa_port_vlan_add(struct dsa_port *dp,
280 const struct switchdev_obj_port_vlan *vlan,
281 struct netlink_ext_ack *extack);
282 int dsa_port_vlan_del(struct dsa_port *dp,
283 const struct switchdev_obj_port_vlan *vlan);
284 int dsa_port_host_vlan_add(struct dsa_port *dp,
285 const struct switchdev_obj_port_vlan *vlan,
286 struct netlink_ext_ack *extack);
287 int dsa_port_host_vlan_del(struct dsa_port *dp,
288 const struct switchdev_obj_port_vlan *vlan);
289 int dsa_port_mrp_add(const struct dsa_port *dp,
290 const struct switchdev_obj_mrp *mrp);
291 int dsa_port_mrp_del(const struct dsa_port *dp,
292 const struct switchdev_obj_mrp *mrp);
293 int dsa_port_mrp_add_ring_role(const struct dsa_port *dp,
294 const struct switchdev_obj_ring_role_mrp *mrp);
295 int dsa_port_mrp_del_ring_role(const struct dsa_port *dp,
296 const struct switchdev_obj_ring_role_mrp *mrp);
297 int dsa_port_phylink_create(struct dsa_port *dp);
298 void dsa_port_phylink_destroy(struct dsa_port *dp);
299 int dsa_shared_port_link_register_of(struct dsa_port *dp);
300 void dsa_shared_port_link_unregister_of(struct dsa_port *dp);
301 int dsa_port_hsr_join(struct dsa_port *dp, struct net_device *hsr);
302 void dsa_port_hsr_leave(struct dsa_port *dp, struct net_device *hsr);
303 int dsa_port_tag_8021q_vlan_add(struct dsa_port *dp, u16 vid, bool broadcast);
304 void dsa_port_tag_8021q_vlan_del(struct dsa_port *dp, u16 vid, bool broadcast);
305 void dsa_port_set_host_flood(struct dsa_port *dp, bool uc, bool mc);
306 int dsa_port_change_master(struct dsa_port *dp, struct net_device *master,
307 struct netlink_ext_ack *extack);
308
309 /* slave.c */
310 extern const struct dsa_device_ops notag_netdev_ops;
311 extern struct notifier_block dsa_slave_switchdev_notifier;
312 extern struct notifier_block dsa_slave_switchdev_blocking_notifier;
313
314 void dsa_slave_mii_bus_init(struct dsa_switch *ds);
315 int dsa_slave_create(struct dsa_port *dp);
316 void dsa_slave_destroy(struct net_device *slave_dev);
317 int dsa_slave_suspend(struct net_device *slave_dev);
318 int dsa_slave_resume(struct net_device *slave_dev);
319 int dsa_slave_register_notifier(void);
320 void dsa_slave_unregister_notifier(void);
321 void dsa_slave_sync_ha(struct net_device *dev);
322 void dsa_slave_unsync_ha(struct net_device *dev);
323 void dsa_slave_setup_tagger(struct net_device *slave);
324 int dsa_slave_change_mtu(struct net_device *dev, int new_mtu);
325 int dsa_slave_change_master(struct net_device *dev, struct net_device *master,
326 struct netlink_ext_ack *extack);
327 int dsa_slave_manage_vlan_filtering(struct net_device *dev,
328 bool vlan_filtering);
329
dsa_slave_to_port(const struct net_device * dev)330 static inline struct dsa_port *dsa_slave_to_port(const struct net_device *dev)
331 {
332 struct dsa_slave_priv *p = netdev_priv(dev);
333
334 return p->dp;
335 }
336
337 static inline struct net_device *
dsa_slave_to_master(const struct net_device * dev)338 dsa_slave_to_master(const struct net_device *dev)
339 {
340 struct dsa_port *dp = dsa_slave_to_port(dev);
341
342 return dsa_port_to_master(dp);
343 }
344
345 /* If under a bridge with vlan_filtering=0, make sure to send pvid-tagged
346 * frames as untagged, since the bridge will not untag them.
347 */
dsa_untag_bridge_pvid(struct sk_buff * skb)348 static inline struct sk_buff *dsa_untag_bridge_pvid(struct sk_buff *skb)
349 {
350 struct dsa_port *dp = dsa_slave_to_port(skb->dev);
351 struct net_device *br = dsa_port_bridge_dev_get(dp);
352 struct net_device *dev = skb->dev;
353 struct net_device *upper_dev;
354 u16 vid, pvid, proto;
355 int err;
356
357 if (!br || br_vlan_enabled(br))
358 return skb;
359
360 err = br_vlan_get_proto(br, &proto);
361 if (err)
362 return skb;
363
364 /* Move VLAN tag from data to hwaccel */
365 if (!skb_vlan_tag_present(skb) && skb->protocol == htons(proto)) {
366 skb = skb_vlan_untag(skb);
367 if (!skb)
368 return NULL;
369 }
370
371 if (!skb_vlan_tag_present(skb))
372 return skb;
373
374 vid = skb_vlan_tag_get_id(skb);
375
376 /* We already run under an RCU read-side critical section since
377 * we are called from netif_receive_skb_list_internal().
378 */
379 err = br_vlan_get_pvid_rcu(dev, &pvid);
380 if (err)
381 return skb;
382
383 if (vid != pvid)
384 return skb;
385
386 /* The sad part about attempting to untag from DSA is that we
387 * don't know, unless we check, if the skb will end up in
388 * the bridge's data path - br_allowed_ingress() - or not.
389 * For example, there might be an 8021q upper for the
390 * default_pvid of the bridge, which will steal VLAN-tagged traffic
391 * from the bridge's data path. This is a configuration that DSA
392 * supports because vlan_filtering is 0. In that case, we should
393 * definitely keep the tag, to make sure it keeps working.
394 */
395 upper_dev = __vlan_find_dev_deep_rcu(br, htons(proto), vid);
396 if (upper_dev)
397 return skb;
398
399 __vlan_hwaccel_clear_tag(skb);
400
401 return skb;
402 }
403
404 /* For switches without hardware support for DSA tagging to be able
405 * to support termination through the bridge.
406 */
407 static inline struct net_device *
dsa_find_designated_bridge_port_by_vid(struct net_device * master,u16 vid)408 dsa_find_designated_bridge_port_by_vid(struct net_device *master, u16 vid)
409 {
410 struct dsa_port *cpu_dp = master->dsa_ptr;
411 struct dsa_switch_tree *dst = cpu_dp->dst;
412 struct bridge_vlan_info vinfo;
413 struct net_device *slave;
414 struct dsa_port *dp;
415 int err;
416
417 list_for_each_entry(dp, &dst->ports, list) {
418 if (dp->type != DSA_PORT_TYPE_USER)
419 continue;
420
421 if (!dp->bridge)
422 continue;
423
424 if (dp->stp_state != BR_STATE_LEARNING &&
425 dp->stp_state != BR_STATE_FORWARDING)
426 continue;
427
428 /* Since the bridge might learn this packet, keep the CPU port
429 * affinity with the port that will be used for the reply on
430 * xmit.
431 */
432 if (dp->cpu_dp != cpu_dp)
433 continue;
434
435 slave = dp->slave;
436
437 err = br_vlan_get_info_rcu(slave, vid, &vinfo);
438 if (err)
439 continue;
440
441 return slave;
442 }
443
444 return NULL;
445 }
446
447 /* If the ingress port offloads the bridge, we mark the frame as autonomously
448 * forwarded by hardware, so the software bridge doesn't forward in twice, back
449 * to us, because we already did. However, if we're in fallback mode and we do
450 * software bridging, we are not offloading it, therefore the dp->bridge
451 * pointer is not populated, and flooding needs to be done by software (we are
452 * effectively operating in standalone ports mode).
453 */
dsa_default_offload_fwd_mark(struct sk_buff * skb)454 static inline void dsa_default_offload_fwd_mark(struct sk_buff *skb)
455 {
456 struct dsa_port *dp = dsa_slave_to_port(skb->dev);
457
458 skb->offload_fwd_mark = !!(dp->bridge);
459 }
460
461 /* Helper for removing DSA header tags from packets in the RX path.
462 * Must not be called before skb_pull(len).
463 * skb->data
464 * |
465 * v
466 * | | | | | | | | | | | | | | | | | | |
467 * +-----------------------+-----------------------+---------------+-------+
468 * | Destination MAC | Source MAC | DSA header | EType |
469 * +-----------------------+-----------------------+---------------+-------+
470 * | |
471 * <----- len -----> <----- len ----->
472 * |
473 * >>>>>>> v
474 * >>>>>>> | | | | | | | | | | | | | | |
475 * >>>>>>> +-----------------------+-----------------------+-------+
476 * >>>>>>> | Destination MAC | Source MAC | EType |
477 * +-----------------------+-----------------------+-------+
478 * ^
479 * |
480 * skb->data
481 */
dsa_strip_etype_header(struct sk_buff * skb,int len)482 static inline void dsa_strip_etype_header(struct sk_buff *skb, int len)
483 {
484 memmove(skb->data - ETH_HLEN, skb->data - ETH_HLEN - len, 2 * ETH_ALEN);
485 }
486
487 /* Helper for creating space for DSA header tags in TX path packets.
488 * Must not be called before skb_push(len).
489 *
490 * Before:
491 *
492 * <<<<<<< | | | | | | | | | | | | | | |
493 * ^ <<<<<<< +-----------------------+-----------------------+-------+
494 * | <<<<<<< | Destination MAC | Source MAC | EType |
495 * | +-----------------------+-----------------------+-------+
496 * <----- len ----->
497 * |
498 * |
499 * skb->data
500 *
501 * After:
502 *
503 * | | | | | | | | | | | | | | | | | | |
504 * +-----------------------+-----------------------+---------------+-------+
505 * | Destination MAC | Source MAC | DSA header | EType |
506 * +-----------------------+-----------------------+---------------+-------+
507 * ^ | |
508 * | <----- len ----->
509 * skb->data
510 */
dsa_alloc_etype_header(struct sk_buff * skb,int len)511 static inline void dsa_alloc_etype_header(struct sk_buff *skb, int len)
512 {
513 memmove(skb->data, skb->data + len, 2 * ETH_ALEN);
514 }
515
516 /* On RX, eth_type_trans() on the DSA master pulls ETH_HLEN bytes starting from
517 * skb_mac_header(skb), which leaves skb->data pointing at the first byte after
518 * what the DSA master perceives as the EtherType (the beginning of the L3
519 * protocol). Since DSA EtherType header taggers treat the EtherType as part of
520 * the DSA tag itself, and the EtherType is 2 bytes in length, the DSA header
521 * is located 2 bytes behind skb->data. Note that EtherType in this context
522 * means the first 2 bytes of the DSA header, not the encapsulated EtherType
523 * that will become visible after the DSA header is stripped.
524 */
dsa_etype_header_pos_rx(struct sk_buff * skb)525 static inline void *dsa_etype_header_pos_rx(struct sk_buff *skb)
526 {
527 return skb->data - 2;
528 }
529
530 /* On TX, skb->data points to skb_mac_header(skb), which means that EtherType
531 * header taggers start exactly where the EtherType is (the EtherType is
532 * treated as part of the DSA header).
533 */
dsa_etype_header_pos_tx(struct sk_buff * skb)534 static inline void *dsa_etype_header_pos_tx(struct sk_buff *skb)
535 {
536 return skb->data + 2 * ETH_ALEN;
537 }
538
539 /* switch.c */
540 int dsa_switch_register_notifier(struct dsa_switch *ds);
541 void dsa_switch_unregister_notifier(struct dsa_switch *ds);
542
dsa_switch_supports_uc_filtering(struct dsa_switch * ds)543 static inline bool dsa_switch_supports_uc_filtering(struct dsa_switch *ds)
544 {
545 return ds->ops->port_fdb_add && ds->ops->port_fdb_del &&
546 ds->fdb_isolation && !ds->vlan_filtering_is_global &&
547 !ds->needs_standalone_vlan_filtering;
548 }
549
dsa_switch_supports_mc_filtering(struct dsa_switch * ds)550 static inline bool dsa_switch_supports_mc_filtering(struct dsa_switch *ds)
551 {
552 return ds->ops->port_mdb_add && ds->ops->port_mdb_del &&
553 ds->fdb_isolation && !ds->vlan_filtering_is_global &&
554 !ds->needs_standalone_vlan_filtering;
555 }
556
557 /* dsa2.c */
558 void dsa_lag_map(struct dsa_switch_tree *dst, struct dsa_lag *lag);
559 void dsa_lag_unmap(struct dsa_switch_tree *dst, struct dsa_lag *lag);
560 struct dsa_lag *dsa_tree_lag_find(struct dsa_switch_tree *dst,
561 const struct net_device *lag_dev);
562 struct net_device *dsa_tree_find_first_master(struct dsa_switch_tree *dst);
563 int dsa_tree_notify(struct dsa_switch_tree *dst, unsigned long e, void *v);
564 int dsa_broadcast(unsigned long e, void *v);
565 int dsa_tree_change_tag_proto(struct dsa_switch_tree *dst,
566 const struct dsa_device_ops *tag_ops,
567 const struct dsa_device_ops *old_tag_ops);
568 void dsa_tree_master_admin_state_change(struct dsa_switch_tree *dst,
569 struct net_device *master,
570 bool up);
571 void dsa_tree_master_oper_state_change(struct dsa_switch_tree *dst,
572 struct net_device *master,
573 bool up);
574 unsigned int dsa_bridge_num_get(const struct net_device *bridge_dev, int max);
575 void dsa_bridge_num_put(const struct net_device *bridge_dev,
576 unsigned int bridge_num);
577 struct dsa_bridge *dsa_tree_bridge_find(struct dsa_switch_tree *dst,
578 const struct net_device *br);
579
580 /* tag_8021q.c */
581 int dsa_switch_tag_8021q_vlan_add(struct dsa_switch *ds,
582 struct dsa_notifier_tag_8021q_vlan_info *info);
583 int dsa_switch_tag_8021q_vlan_del(struct dsa_switch *ds,
584 struct dsa_notifier_tag_8021q_vlan_info *info);
585
586 extern struct list_head dsa_tree_list;
587
588 #endif
589