1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/dsa/dsa2.c - Hardware switch handling, binding version 2
4  * Copyright (c) 2008-2009 Marvell Semiconductor
5  * Copyright (c) 2013 Florian Fainelli <florian@openwrt.org>
6  * Copyright (c) 2016 Andrew Lunn <andrew@lunn.ch>
7  */
8 
9 #include <linux/device.h>
10 #include <linux/err.h>
11 #include <linux/list.h>
12 #include <linux/netdevice.h>
13 #include <linux/slab.h>
14 #include <linux/rtnetlink.h>
15 #include <linux/of.h>
16 #include <linux/of_mdio.h>
17 #include <linux/of_net.h>
18 #include <net/devlink.h>
19 #include <net/sch_generic.h>
20 
21 #include "dsa_priv.h"
22 
23 static DEFINE_MUTEX(dsa2_mutex);
24 LIST_HEAD(dsa_tree_list);
25 
26 /* Track the bridges with forwarding offload enabled */
27 static unsigned long dsa_fwd_offloading_bridges;
28 
29 /**
30  * dsa_tree_notify - Execute code for all switches in a DSA switch tree.
31  * @dst: collection of struct dsa_switch devices to notify.
32  * @e: event, must be of type DSA_NOTIFIER_*
33  * @v: event-specific value.
34  *
35  * Given a struct dsa_switch_tree, this can be used to run a function once for
36  * each member DSA switch. The other alternative of traversing the tree is only
37  * through its ports list, which does not uniquely list the switches.
38  */
dsa_tree_notify(struct dsa_switch_tree * dst,unsigned long e,void * v)39 int dsa_tree_notify(struct dsa_switch_tree *dst, unsigned long e, void *v)
40 {
41 	struct raw_notifier_head *nh = &dst->nh;
42 	int err;
43 
44 	err = raw_notifier_call_chain(nh, e, v);
45 
46 	return notifier_to_errno(err);
47 }
48 
49 /**
50  * dsa_broadcast - Notify all DSA trees in the system.
51  * @e: event, must be of type DSA_NOTIFIER_*
52  * @v: event-specific value.
53  *
54  * Can be used to notify the switching fabric of events such as cross-chip
55  * bridging between disjoint trees (such as islands of tagger-compatible
56  * switches bridged by an incompatible middle switch).
57  *
58  * WARNING: this function is not reliable during probe time, because probing
59  * between trees is asynchronous and not all DSA trees might have probed.
60  */
dsa_broadcast(unsigned long e,void * v)61 int dsa_broadcast(unsigned long e, void *v)
62 {
63 	struct dsa_switch_tree *dst;
64 	int err = 0;
65 
66 	list_for_each_entry(dst, &dsa_tree_list, list) {
67 		err = dsa_tree_notify(dst, e, v);
68 		if (err)
69 			break;
70 	}
71 
72 	return err;
73 }
74 
75 /**
76  * dsa_lag_map() - Map LAG structure to a linear LAG array
77  * @dst: Tree in which to record the mapping.
78  * @lag: LAG structure that is to be mapped to the tree's array.
79  *
80  * dsa_lag_id/dsa_lag_by_id can then be used to translate between the
81  * two spaces. The size of the mapping space is determined by the
82  * driver by setting ds->num_lag_ids. It is perfectly legal to leave
83  * it unset if it is not needed, in which case these functions become
84  * no-ops.
85  */
dsa_lag_map(struct dsa_switch_tree * dst,struct dsa_lag * lag)86 void dsa_lag_map(struct dsa_switch_tree *dst, struct dsa_lag *lag)
87 {
88 	unsigned int id;
89 
90 	for (id = 1; id <= dst->lags_len; id++) {
91 		if (!dsa_lag_by_id(dst, id)) {
92 			dst->lags[id - 1] = lag;
93 			lag->id = id;
94 			return;
95 		}
96 	}
97 
98 	/* No IDs left, which is OK. Some drivers do not need it. The
99 	 * ones that do, e.g. mv88e6xxx, will discover that dsa_lag_id
100 	 * returns an error for this device when joining the LAG. The
101 	 * driver can then return -EOPNOTSUPP back to DSA, which will
102 	 * fall back to a software LAG.
103 	 */
104 }
105 
106 /**
107  * dsa_lag_unmap() - Remove a LAG ID mapping
108  * @dst: Tree in which the mapping is recorded.
109  * @lag: LAG structure that was mapped.
110  *
111  * As there may be multiple users of the mapping, it is only removed
112  * if there are no other references to it.
113  */
dsa_lag_unmap(struct dsa_switch_tree * dst,struct dsa_lag * lag)114 void dsa_lag_unmap(struct dsa_switch_tree *dst, struct dsa_lag *lag)
115 {
116 	unsigned int id;
117 
118 	dsa_lags_foreach_id(id, dst) {
119 		if (dsa_lag_by_id(dst, id) == lag) {
120 			dst->lags[id - 1] = NULL;
121 			lag->id = 0;
122 			break;
123 		}
124 	}
125 }
126 
dsa_tree_lag_find(struct dsa_switch_tree * dst,const struct net_device * lag_dev)127 struct dsa_lag *dsa_tree_lag_find(struct dsa_switch_tree *dst,
128 				  const struct net_device *lag_dev)
129 {
130 	struct dsa_port *dp;
131 
132 	list_for_each_entry(dp, &dst->ports, list)
133 		if (dsa_port_lag_dev_get(dp) == lag_dev)
134 			return dp->lag;
135 
136 	return NULL;
137 }
138 
dsa_tree_bridge_find(struct dsa_switch_tree * dst,const struct net_device * br)139 struct dsa_bridge *dsa_tree_bridge_find(struct dsa_switch_tree *dst,
140 					const struct net_device *br)
141 {
142 	struct dsa_port *dp;
143 
144 	list_for_each_entry(dp, &dst->ports, list)
145 		if (dsa_port_bridge_dev_get(dp) == br)
146 			return dp->bridge;
147 
148 	return NULL;
149 }
150 
dsa_bridge_num_find(const struct net_device * bridge_dev)151 static int dsa_bridge_num_find(const struct net_device *bridge_dev)
152 {
153 	struct dsa_switch_tree *dst;
154 
155 	list_for_each_entry(dst, &dsa_tree_list, list) {
156 		struct dsa_bridge *bridge;
157 
158 		bridge = dsa_tree_bridge_find(dst, bridge_dev);
159 		if (bridge)
160 			return bridge->num;
161 	}
162 
163 	return 0;
164 }
165 
dsa_bridge_num_get(const struct net_device * bridge_dev,int max)166 unsigned int dsa_bridge_num_get(const struct net_device *bridge_dev, int max)
167 {
168 	unsigned int bridge_num = dsa_bridge_num_find(bridge_dev);
169 
170 	/* Switches without FDB isolation support don't get unique
171 	 * bridge numbering
172 	 */
173 	if (!max)
174 		return 0;
175 
176 	if (!bridge_num) {
177 		/* First port that requests FDB isolation or TX forwarding
178 		 * offload for this bridge
179 		 */
180 		bridge_num = find_next_zero_bit(&dsa_fwd_offloading_bridges,
181 						DSA_MAX_NUM_OFFLOADING_BRIDGES,
182 						1);
183 		if (bridge_num >= max)
184 			return 0;
185 
186 		set_bit(bridge_num, &dsa_fwd_offloading_bridges);
187 	}
188 
189 	return bridge_num;
190 }
191 
dsa_bridge_num_put(const struct net_device * bridge_dev,unsigned int bridge_num)192 void dsa_bridge_num_put(const struct net_device *bridge_dev,
193 			unsigned int bridge_num)
194 {
195 	/* Since we refcount bridges, we know that when we call this function
196 	 * it is no longer in use, so we can just go ahead and remove it from
197 	 * the bit mask.
198 	 */
199 	clear_bit(bridge_num, &dsa_fwd_offloading_bridges);
200 }
201 
dsa_switch_find(int tree_index,int sw_index)202 struct dsa_switch *dsa_switch_find(int tree_index, int sw_index)
203 {
204 	struct dsa_switch_tree *dst;
205 	struct dsa_port *dp;
206 
207 	list_for_each_entry(dst, &dsa_tree_list, list) {
208 		if (dst->index != tree_index)
209 			continue;
210 
211 		list_for_each_entry(dp, &dst->ports, list) {
212 			if (dp->ds->index != sw_index)
213 				continue;
214 
215 			return dp->ds;
216 		}
217 	}
218 
219 	return NULL;
220 }
221 EXPORT_SYMBOL_GPL(dsa_switch_find);
222 
dsa_tree_find(int index)223 static struct dsa_switch_tree *dsa_tree_find(int index)
224 {
225 	struct dsa_switch_tree *dst;
226 
227 	list_for_each_entry(dst, &dsa_tree_list, list)
228 		if (dst->index == index)
229 			return dst;
230 
231 	return NULL;
232 }
233 
dsa_tree_alloc(int index)234 static struct dsa_switch_tree *dsa_tree_alloc(int index)
235 {
236 	struct dsa_switch_tree *dst;
237 
238 	dst = kzalloc(sizeof(*dst), GFP_KERNEL);
239 	if (!dst)
240 		return NULL;
241 
242 	dst->index = index;
243 
244 	INIT_LIST_HEAD(&dst->rtable);
245 
246 	INIT_LIST_HEAD(&dst->ports);
247 
248 	INIT_LIST_HEAD(&dst->list);
249 	list_add_tail(&dst->list, &dsa_tree_list);
250 
251 	kref_init(&dst->refcount);
252 
253 	return dst;
254 }
255 
dsa_tree_free(struct dsa_switch_tree * dst)256 static void dsa_tree_free(struct dsa_switch_tree *dst)
257 {
258 	if (dst->tag_ops)
259 		dsa_tag_driver_put(dst->tag_ops);
260 	list_del(&dst->list);
261 	kfree(dst);
262 }
263 
dsa_tree_get(struct dsa_switch_tree * dst)264 static struct dsa_switch_tree *dsa_tree_get(struct dsa_switch_tree *dst)
265 {
266 	if (dst)
267 		kref_get(&dst->refcount);
268 
269 	return dst;
270 }
271 
dsa_tree_touch(int index)272 static struct dsa_switch_tree *dsa_tree_touch(int index)
273 {
274 	struct dsa_switch_tree *dst;
275 
276 	dst = dsa_tree_find(index);
277 	if (dst)
278 		return dsa_tree_get(dst);
279 	else
280 		return dsa_tree_alloc(index);
281 }
282 
dsa_tree_release(struct kref * ref)283 static void dsa_tree_release(struct kref *ref)
284 {
285 	struct dsa_switch_tree *dst;
286 
287 	dst = container_of(ref, struct dsa_switch_tree, refcount);
288 
289 	dsa_tree_free(dst);
290 }
291 
dsa_tree_put(struct dsa_switch_tree * dst)292 static void dsa_tree_put(struct dsa_switch_tree *dst)
293 {
294 	if (dst)
295 		kref_put(&dst->refcount, dsa_tree_release);
296 }
297 
dsa_tree_find_port_by_node(struct dsa_switch_tree * dst,struct device_node * dn)298 static struct dsa_port *dsa_tree_find_port_by_node(struct dsa_switch_tree *dst,
299 						   struct device_node *dn)
300 {
301 	struct dsa_port *dp;
302 
303 	list_for_each_entry(dp, &dst->ports, list)
304 		if (dp->dn == dn)
305 			return dp;
306 
307 	return NULL;
308 }
309 
dsa_link_touch(struct dsa_port * dp,struct dsa_port * link_dp)310 static struct dsa_link *dsa_link_touch(struct dsa_port *dp,
311 				       struct dsa_port *link_dp)
312 {
313 	struct dsa_switch *ds = dp->ds;
314 	struct dsa_switch_tree *dst;
315 	struct dsa_link *dl;
316 
317 	dst = ds->dst;
318 
319 	list_for_each_entry(dl, &dst->rtable, list)
320 		if (dl->dp == dp && dl->link_dp == link_dp)
321 			return dl;
322 
323 	dl = kzalloc(sizeof(*dl), GFP_KERNEL);
324 	if (!dl)
325 		return NULL;
326 
327 	dl->dp = dp;
328 	dl->link_dp = link_dp;
329 
330 	INIT_LIST_HEAD(&dl->list);
331 	list_add_tail(&dl->list, &dst->rtable);
332 
333 	return dl;
334 }
335 
dsa_port_setup_routing_table(struct dsa_port * dp)336 static bool dsa_port_setup_routing_table(struct dsa_port *dp)
337 {
338 	struct dsa_switch *ds = dp->ds;
339 	struct dsa_switch_tree *dst = ds->dst;
340 	struct device_node *dn = dp->dn;
341 	struct of_phandle_iterator it;
342 	struct dsa_port *link_dp;
343 	struct dsa_link *dl;
344 	int err;
345 
346 	of_for_each_phandle(&it, err, dn, "link", NULL, 0) {
347 		link_dp = dsa_tree_find_port_by_node(dst, it.node);
348 		if (!link_dp) {
349 			of_node_put(it.node);
350 			return false;
351 		}
352 
353 		dl = dsa_link_touch(dp, link_dp);
354 		if (!dl) {
355 			of_node_put(it.node);
356 			return false;
357 		}
358 	}
359 
360 	return true;
361 }
362 
dsa_tree_setup_routing_table(struct dsa_switch_tree * dst)363 static bool dsa_tree_setup_routing_table(struct dsa_switch_tree *dst)
364 {
365 	bool complete = true;
366 	struct dsa_port *dp;
367 
368 	list_for_each_entry(dp, &dst->ports, list) {
369 		if (dsa_port_is_dsa(dp)) {
370 			complete = dsa_port_setup_routing_table(dp);
371 			if (!complete)
372 				break;
373 		}
374 	}
375 
376 	return complete;
377 }
378 
dsa_tree_find_first_cpu(struct dsa_switch_tree * dst)379 static struct dsa_port *dsa_tree_find_first_cpu(struct dsa_switch_tree *dst)
380 {
381 	struct dsa_port *dp;
382 
383 	list_for_each_entry(dp, &dst->ports, list)
384 		if (dsa_port_is_cpu(dp))
385 			return dp;
386 
387 	return NULL;
388 }
389 
dsa_tree_find_first_master(struct dsa_switch_tree * dst)390 struct net_device *dsa_tree_find_first_master(struct dsa_switch_tree *dst)
391 {
392 	struct device_node *ethernet;
393 	struct net_device *master;
394 	struct dsa_port *cpu_dp;
395 
396 	cpu_dp = dsa_tree_find_first_cpu(dst);
397 	ethernet = of_parse_phandle(cpu_dp->dn, "ethernet", 0);
398 	master = of_find_net_device_by_node(ethernet);
399 	of_node_put(ethernet);
400 
401 	return master;
402 }
403 
404 /* Assign the default CPU port (the first one in the tree) to all ports of the
405  * fabric which don't already have one as part of their own switch.
406  */
dsa_tree_setup_default_cpu(struct dsa_switch_tree * dst)407 static int dsa_tree_setup_default_cpu(struct dsa_switch_tree *dst)
408 {
409 	struct dsa_port *cpu_dp, *dp;
410 
411 	cpu_dp = dsa_tree_find_first_cpu(dst);
412 	if (!cpu_dp) {
413 		pr_err("DSA: tree %d has no CPU port\n", dst->index);
414 		return -EINVAL;
415 	}
416 
417 	list_for_each_entry(dp, &dst->ports, list) {
418 		if (dp->cpu_dp)
419 			continue;
420 
421 		if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
422 			dp->cpu_dp = cpu_dp;
423 	}
424 
425 	return 0;
426 }
427 
428 /* Perform initial assignment of CPU ports to user ports and DSA links in the
429  * fabric, giving preference to CPU ports local to each switch. Default to
430  * using the first CPU port in the switch tree if the port does not have a CPU
431  * port local to this switch.
432  */
dsa_tree_setup_cpu_ports(struct dsa_switch_tree * dst)433 static int dsa_tree_setup_cpu_ports(struct dsa_switch_tree *dst)
434 {
435 	struct dsa_port *cpu_dp, *dp;
436 
437 	list_for_each_entry(cpu_dp, &dst->ports, list) {
438 		if (!dsa_port_is_cpu(cpu_dp))
439 			continue;
440 
441 		/* Prefer a local CPU port */
442 		dsa_switch_for_each_port(dp, cpu_dp->ds) {
443 			/* Prefer the first local CPU port found */
444 			if (dp->cpu_dp)
445 				continue;
446 
447 			if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
448 				dp->cpu_dp = cpu_dp;
449 		}
450 	}
451 
452 	return dsa_tree_setup_default_cpu(dst);
453 }
454 
dsa_tree_teardown_cpu_ports(struct dsa_switch_tree * dst)455 static void dsa_tree_teardown_cpu_ports(struct dsa_switch_tree *dst)
456 {
457 	struct dsa_port *dp;
458 
459 	list_for_each_entry(dp, &dst->ports, list)
460 		if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
461 			dp->cpu_dp = NULL;
462 }
463 
dsa_port_devlink_setup(struct dsa_port * dp)464 static int dsa_port_devlink_setup(struct dsa_port *dp)
465 {
466 	struct devlink_port *dlp = &dp->devlink_port;
467 	struct dsa_switch_tree *dst = dp->ds->dst;
468 	struct devlink_port_attrs attrs = {};
469 	struct devlink *dl = dp->ds->devlink;
470 	struct dsa_switch *ds = dp->ds;
471 	const unsigned char *id;
472 	unsigned char len;
473 	int err;
474 
475 	memset(dlp, 0, sizeof(*dlp));
476 	devlink_port_init(dl, dlp);
477 
478 	if (ds->ops->port_setup) {
479 		err = ds->ops->port_setup(ds, dp->index);
480 		if (err)
481 			return err;
482 	}
483 
484 	id = (const unsigned char *)&dst->index;
485 	len = sizeof(dst->index);
486 
487 	attrs.phys.port_number = dp->index;
488 	memcpy(attrs.switch_id.id, id, len);
489 	attrs.switch_id.id_len = len;
490 
491 	switch (dp->type) {
492 	case DSA_PORT_TYPE_UNUSED:
493 		attrs.flavour = DEVLINK_PORT_FLAVOUR_UNUSED;
494 		break;
495 	case DSA_PORT_TYPE_CPU:
496 		attrs.flavour = DEVLINK_PORT_FLAVOUR_CPU;
497 		break;
498 	case DSA_PORT_TYPE_DSA:
499 		attrs.flavour = DEVLINK_PORT_FLAVOUR_DSA;
500 		break;
501 	case DSA_PORT_TYPE_USER:
502 		attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
503 		break;
504 	}
505 
506 	devlink_port_attrs_set(dlp, &attrs);
507 	err = devlink_port_register(dl, dlp, dp->index);
508 	if (err) {
509 		if (ds->ops->port_teardown)
510 			ds->ops->port_teardown(ds, dp->index);
511 		return err;
512 	}
513 
514 	return 0;
515 }
516 
dsa_port_devlink_teardown(struct dsa_port * dp)517 static void dsa_port_devlink_teardown(struct dsa_port *dp)
518 {
519 	struct devlink_port *dlp = &dp->devlink_port;
520 	struct dsa_switch *ds = dp->ds;
521 
522 	devlink_port_unregister(dlp);
523 
524 	if (ds->ops->port_teardown)
525 		ds->ops->port_teardown(ds, dp->index);
526 
527 	devlink_port_fini(dlp);
528 }
529 
dsa_port_setup(struct dsa_port * dp)530 static int dsa_port_setup(struct dsa_port *dp)
531 {
532 	struct devlink_port *dlp = &dp->devlink_port;
533 	bool dsa_port_link_registered = false;
534 	struct dsa_switch *ds = dp->ds;
535 	bool dsa_port_enabled = false;
536 	int err = 0;
537 
538 	if (dp->setup)
539 		return 0;
540 
541 	err = dsa_port_devlink_setup(dp);
542 	if (err)
543 		return err;
544 
545 	switch (dp->type) {
546 	case DSA_PORT_TYPE_UNUSED:
547 		dsa_port_disable(dp);
548 		break;
549 	case DSA_PORT_TYPE_CPU:
550 		if (dp->dn) {
551 			err = dsa_shared_port_link_register_of(dp);
552 			if (err)
553 				break;
554 			dsa_port_link_registered = true;
555 		} else {
556 			dev_warn(ds->dev,
557 				 "skipping link registration for CPU port %d\n",
558 				 dp->index);
559 		}
560 
561 		err = dsa_port_enable(dp, NULL);
562 		if (err)
563 			break;
564 		dsa_port_enabled = true;
565 
566 		break;
567 	case DSA_PORT_TYPE_DSA:
568 		if (dp->dn) {
569 			err = dsa_shared_port_link_register_of(dp);
570 			if (err)
571 				break;
572 			dsa_port_link_registered = true;
573 		} else {
574 			dev_warn(ds->dev,
575 				 "skipping link registration for DSA port %d\n",
576 				 dp->index);
577 		}
578 
579 		err = dsa_port_enable(dp, NULL);
580 		if (err)
581 			break;
582 		dsa_port_enabled = true;
583 
584 		break;
585 	case DSA_PORT_TYPE_USER:
586 		of_get_mac_address(dp->dn, dp->mac);
587 		err = dsa_slave_create(dp);
588 		if (err)
589 			break;
590 
591 		devlink_port_type_eth_set(dlp, dp->slave);
592 		break;
593 	}
594 
595 	if (err && dsa_port_enabled)
596 		dsa_port_disable(dp);
597 	if (err && dsa_port_link_registered)
598 		dsa_shared_port_link_unregister_of(dp);
599 	if (err) {
600 		dsa_port_devlink_teardown(dp);
601 		return err;
602 	}
603 
604 	dp->setup = true;
605 
606 	return 0;
607 }
608 
dsa_port_teardown(struct dsa_port * dp)609 static void dsa_port_teardown(struct dsa_port *dp)
610 {
611 	struct devlink_port *dlp = &dp->devlink_port;
612 
613 	if (!dp->setup)
614 		return;
615 
616 	devlink_port_type_clear(dlp);
617 
618 	switch (dp->type) {
619 	case DSA_PORT_TYPE_UNUSED:
620 		break;
621 	case DSA_PORT_TYPE_CPU:
622 		dsa_port_disable(dp);
623 		if (dp->dn)
624 			dsa_shared_port_link_unregister_of(dp);
625 		break;
626 	case DSA_PORT_TYPE_DSA:
627 		dsa_port_disable(dp);
628 		if (dp->dn)
629 			dsa_shared_port_link_unregister_of(dp);
630 		break;
631 	case DSA_PORT_TYPE_USER:
632 		if (dp->slave) {
633 			dsa_slave_destroy(dp->slave);
634 			dp->slave = NULL;
635 		}
636 		break;
637 	}
638 
639 	dsa_port_devlink_teardown(dp);
640 
641 	dp->setup = false;
642 }
643 
dsa_port_setup_as_unused(struct dsa_port * dp)644 static int dsa_port_setup_as_unused(struct dsa_port *dp)
645 {
646 	dp->type = DSA_PORT_TYPE_UNUSED;
647 	return dsa_port_setup(dp);
648 }
649 
dsa_devlink_info_get(struct devlink * dl,struct devlink_info_req * req,struct netlink_ext_ack * extack)650 static int dsa_devlink_info_get(struct devlink *dl,
651 				struct devlink_info_req *req,
652 				struct netlink_ext_ack *extack)
653 {
654 	struct dsa_switch *ds = dsa_devlink_to_ds(dl);
655 
656 	if (ds->ops->devlink_info_get)
657 		return ds->ops->devlink_info_get(ds, req, extack);
658 
659 	return -EOPNOTSUPP;
660 }
661 
dsa_devlink_sb_pool_get(struct devlink * dl,unsigned int sb_index,u16 pool_index,struct devlink_sb_pool_info * pool_info)662 static int dsa_devlink_sb_pool_get(struct devlink *dl,
663 				   unsigned int sb_index, u16 pool_index,
664 				   struct devlink_sb_pool_info *pool_info)
665 {
666 	struct dsa_switch *ds = dsa_devlink_to_ds(dl);
667 
668 	if (!ds->ops->devlink_sb_pool_get)
669 		return -EOPNOTSUPP;
670 
671 	return ds->ops->devlink_sb_pool_get(ds, sb_index, pool_index,
672 					    pool_info);
673 }
674 
dsa_devlink_sb_pool_set(struct devlink * dl,unsigned int sb_index,u16 pool_index,u32 size,enum devlink_sb_threshold_type threshold_type,struct netlink_ext_ack * extack)675 static int dsa_devlink_sb_pool_set(struct devlink *dl, unsigned int sb_index,
676 				   u16 pool_index, u32 size,
677 				   enum devlink_sb_threshold_type threshold_type,
678 				   struct netlink_ext_ack *extack)
679 {
680 	struct dsa_switch *ds = dsa_devlink_to_ds(dl);
681 
682 	if (!ds->ops->devlink_sb_pool_set)
683 		return -EOPNOTSUPP;
684 
685 	return ds->ops->devlink_sb_pool_set(ds, sb_index, pool_index, size,
686 					    threshold_type, extack);
687 }
688 
dsa_devlink_sb_port_pool_get(struct devlink_port * dlp,unsigned int sb_index,u16 pool_index,u32 * p_threshold)689 static int dsa_devlink_sb_port_pool_get(struct devlink_port *dlp,
690 					unsigned int sb_index, u16 pool_index,
691 					u32 *p_threshold)
692 {
693 	struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
694 	int port = dsa_devlink_port_to_port(dlp);
695 
696 	if (!ds->ops->devlink_sb_port_pool_get)
697 		return -EOPNOTSUPP;
698 
699 	return ds->ops->devlink_sb_port_pool_get(ds, port, sb_index,
700 						 pool_index, p_threshold);
701 }
702 
dsa_devlink_sb_port_pool_set(struct devlink_port * dlp,unsigned int sb_index,u16 pool_index,u32 threshold,struct netlink_ext_ack * extack)703 static int dsa_devlink_sb_port_pool_set(struct devlink_port *dlp,
704 					unsigned int sb_index, u16 pool_index,
705 					u32 threshold,
706 					struct netlink_ext_ack *extack)
707 {
708 	struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
709 	int port = dsa_devlink_port_to_port(dlp);
710 
711 	if (!ds->ops->devlink_sb_port_pool_set)
712 		return -EOPNOTSUPP;
713 
714 	return ds->ops->devlink_sb_port_pool_set(ds, port, sb_index,
715 						 pool_index, threshold, extack);
716 }
717 
718 static int
dsa_devlink_sb_tc_pool_bind_get(struct devlink_port * dlp,unsigned int sb_index,u16 tc_index,enum devlink_sb_pool_type pool_type,u16 * p_pool_index,u32 * p_threshold)719 dsa_devlink_sb_tc_pool_bind_get(struct devlink_port *dlp,
720 				unsigned int sb_index, u16 tc_index,
721 				enum devlink_sb_pool_type pool_type,
722 				u16 *p_pool_index, u32 *p_threshold)
723 {
724 	struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
725 	int port = dsa_devlink_port_to_port(dlp);
726 
727 	if (!ds->ops->devlink_sb_tc_pool_bind_get)
728 		return -EOPNOTSUPP;
729 
730 	return ds->ops->devlink_sb_tc_pool_bind_get(ds, port, sb_index,
731 						    tc_index, pool_type,
732 						    p_pool_index, p_threshold);
733 }
734 
735 static int
dsa_devlink_sb_tc_pool_bind_set(struct devlink_port * dlp,unsigned int sb_index,u16 tc_index,enum devlink_sb_pool_type pool_type,u16 pool_index,u32 threshold,struct netlink_ext_ack * extack)736 dsa_devlink_sb_tc_pool_bind_set(struct devlink_port *dlp,
737 				unsigned int sb_index, u16 tc_index,
738 				enum devlink_sb_pool_type pool_type,
739 				u16 pool_index, u32 threshold,
740 				struct netlink_ext_ack *extack)
741 {
742 	struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
743 	int port = dsa_devlink_port_to_port(dlp);
744 
745 	if (!ds->ops->devlink_sb_tc_pool_bind_set)
746 		return -EOPNOTSUPP;
747 
748 	return ds->ops->devlink_sb_tc_pool_bind_set(ds, port, sb_index,
749 						    tc_index, pool_type,
750 						    pool_index, threshold,
751 						    extack);
752 }
753 
dsa_devlink_sb_occ_snapshot(struct devlink * dl,unsigned int sb_index)754 static int dsa_devlink_sb_occ_snapshot(struct devlink *dl,
755 				       unsigned int sb_index)
756 {
757 	struct dsa_switch *ds = dsa_devlink_to_ds(dl);
758 
759 	if (!ds->ops->devlink_sb_occ_snapshot)
760 		return -EOPNOTSUPP;
761 
762 	return ds->ops->devlink_sb_occ_snapshot(ds, sb_index);
763 }
764 
dsa_devlink_sb_occ_max_clear(struct devlink * dl,unsigned int sb_index)765 static int dsa_devlink_sb_occ_max_clear(struct devlink *dl,
766 					unsigned int sb_index)
767 {
768 	struct dsa_switch *ds = dsa_devlink_to_ds(dl);
769 
770 	if (!ds->ops->devlink_sb_occ_max_clear)
771 		return -EOPNOTSUPP;
772 
773 	return ds->ops->devlink_sb_occ_max_clear(ds, sb_index);
774 }
775 
dsa_devlink_sb_occ_port_pool_get(struct devlink_port * dlp,unsigned int sb_index,u16 pool_index,u32 * p_cur,u32 * p_max)776 static int dsa_devlink_sb_occ_port_pool_get(struct devlink_port *dlp,
777 					    unsigned int sb_index,
778 					    u16 pool_index, u32 *p_cur,
779 					    u32 *p_max)
780 {
781 	struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
782 	int port = dsa_devlink_port_to_port(dlp);
783 
784 	if (!ds->ops->devlink_sb_occ_port_pool_get)
785 		return -EOPNOTSUPP;
786 
787 	return ds->ops->devlink_sb_occ_port_pool_get(ds, port, sb_index,
788 						     pool_index, p_cur, p_max);
789 }
790 
791 static int
dsa_devlink_sb_occ_tc_port_bind_get(struct devlink_port * dlp,unsigned int sb_index,u16 tc_index,enum devlink_sb_pool_type pool_type,u32 * p_cur,u32 * p_max)792 dsa_devlink_sb_occ_tc_port_bind_get(struct devlink_port *dlp,
793 				    unsigned int sb_index, u16 tc_index,
794 				    enum devlink_sb_pool_type pool_type,
795 				    u32 *p_cur, u32 *p_max)
796 {
797 	struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
798 	int port = dsa_devlink_port_to_port(dlp);
799 
800 	if (!ds->ops->devlink_sb_occ_tc_port_bind_get)
801 		return -EOPNOTSUPP;
802 
803 	return ds->ops->devlink_sb_occ_tc_port_bind_get(ds, port,
804 							sb_index, tc_index,
805 							pool_type, p_cur,
806 							p_max);
807 }
808 
809 static const struct devlink_ops dsa_devlink_ops = {
810 	.info_get			= dsa_devlink_info_get,
811 	.sb_pool_get			= dsa_devlink_sb_pool_get,
812 	.sb_pool_set			= dsa_devlink_sb_pool_set,
813 	.sb_port_pool_get		= dsa_devlink_sb_port_pool_get,
814 	.sb_port_pool_set		= dsa_devlink_sb_port_pool_set,
815 	.sb_tc_pool_bind_get		= dsa_devlink_sb_tc_pool_bind_get,
816 	.sb_tc_pool_bind_set		= dsa_devlink_sb_tc_pool_bind_set,
817 	.sb_occ_snapshot		= dsa_devlink_sb_occ_snapshot,
818 	.sb_occ_max_clear		= dsa_devlink_sb_occ_max_clear,
819 	.sb_occ_port_pool_get		= dsa_devlink_sb_occ_port_pool_get,
820 	.sb_occ_tc_port_bind_get	= dsa_devlink_sb_occ_tc_port_bind_get,
821 };
822 
dsa_switch_setup_tag_protocol(struct dsa_switch * ds)823 static int dsa_switch_setup_tag_protocol(struct dsa_switch *ds)
824 {
825 	const struct dsa_device_ops *tag_ops = ds->dst->tag_ops;
826 	struct dsa_switch_tree *dst = ds->dst;
827 	int err;
828 
829 	if (tag_ops->proto == dst->default_proto)
830 		goto connect;
831 
832 	rtnl_lock();
833 	err = ds->ops->change_tag_protocol(ds, tag_ops->proto);
834 	rtnl_unlock();
835 	if (err) {
836 		dev_err(ds->dev, "Unable to use tag protocol \"%s\": %pe\n",
837 			tag_ops->name, ERR_PTR(err));
838 		return err;
839 	}
840 
841 connect:
842 	if (tag_ops->connect) {
843 		err = tag_ops->connect(ds);
844 		if (err)
845 			return err;
846 	}
847 
848 	if (ds->ops->connect_tag_protocol) {
849 		err = ds->ops->connect_tag_protocol(ds, tag_ops->proto);
850 		if (err) {
851 			dev_err(ds->dev,
852 				"Unable to connect to tag protocol \"%s\": %pe\n",
853 				tag_ops->name, ERR_PTR(err));
854 			goto disconnect;
855 		}
856 	}
857 
858 	return 0;
859 
860 disconnect:
861 	if (tag_ops->disconnect)
862 		tag_ops->disconnect(ds);
863 
864 	return err;
865 }
866 
dsa_switch_teardown_tag_protocol(struct dsa_switch * ds)867 static void dsa_switch_teardown_tag_protocol(struct dsa_switch *ds)
868 {
869 	const struct dsa_device_ops *tag_ops = ds->dst->tag_ops;
870 
871 	if (tag_ops->disconnect)
872 		tag_ops->disconnect(ds);
873 }
874 
dsa_switch_setup(struct dsa_switch * ds)875 static int dsa_switch_setup(struct dsa_switch *ds)
876 {
877 	struct dsa_devlink_priv *dl_priv;
878 	struct device_node *dn;
879 	int err;
880 
881 	if (ds->setup)
882 		return 0;
883 
884 	/* Initialize ds->phys_mii_mask before registering the slave MDIO bus
885 	 * driver and before ops->setup() has run, since the switch drivers and
886 	 * the slave MDIO bus driver rely on these values for probing PHY
887 	 * devices or not
888 	 */
889 	ds->phys_mii_mask |= dsa_user_ports(ds);
890 
891 	/* Add the switch to devlink before calling setup, so that setup can
892 	 * add dpipe tables
893 	 */
894 	ds->devlink =
895 		devlink_alloc(&dsa_devlink_ops, sizeof(*dl_priv), ds->dev);
896 	if (!ds->devlink)
897 		return -ENOMEM;
898 	dl_priv = devlink_priv(ds->devlink);
899 	dl_priv->ds = ds;
900 
901 	err = dsa_switch_register_notifier(ds);
902 	if (err)
903 		goto devlink_free;
904 
905 	ds->configure_vlan_while_not_filtering = true;
906 
907 	err = ds->ops->setup(ds);
908 	if (err < 0)
909 		goto unregister_notifier;
910 
911 	err = dsa_switch_setup_tag_protocol(ds);
912 	if (err)
913 		goto teardown;
914 
915 	if (!ds->slave_mii_bus && ds->ops->phy_read) {
916 		ds->slave_mii_bus = mdiobus_alloc();
917 		if (!ds->slave_mii_bus) {
918 			err = -ENOMEM;
919 			goto teardown;
920 		}
921 
922 		dsa_slave_mii_bus_init(ds);
923 
924 		dn = of_get_child_by_name(ds->dev->of_node, "mdio");
925 
926 		err = of_mdiobus_register(ds->slave_mii_bus, dn);
927 		of_node_put(dn);
928 		if (err < 0)
929 			goto free_slave_mii_bus;
930 	}
931 
932 	ds->setup = true;
933 	devlink_register(ds->devlink);
934 	return 0;
935 
936 free_slave_mii_bus:
937 	if (ds->slave_mii_bus && ds->ops->phy_read)
938 		mdiobus_free(ds->slave_mii_bus);
939 teardown:
940 	if (ds->ops->teardown)
941 		ds->ops->teardown(ds);
942 unregister_notifier:
943 	dsa_switch_unregister_notifier(ds);
944 devlink_free:
945 	devlink_free(ds->devlink);
946 	ds->devlink = NULL;
947 	return err;
948 }
949 
dsa_switch_teardown(struct dsa_switch * ds)950 static void dsa_switch_teardown(struct dsa_switch *ds)
951 {
952 	if (!ds->setup)
953 		return;
954 
955 	if (ds->devlink)
956 		devlink_unregister(ds->devlink);
957 
958 	if (ds->slave_mii_bus && ds->ops->phy_read) {
959 		mdiobus_unregister(ds->slave_mii_bus);
960 		mdiobus_free(ds->slave_mii_bus);
961 		ds->slave_mii_bus = NULL;
962 	}
963 
964 	dsa_switch_teardown_tag_protocol(ds);
965 
966 	if (ds->ops->teardown)
967 		ds->ops->teardown(ds);
968 
969 	dsa_switch_unregister_notifier(ds);
970 
971 	if (ds->devlink) {
972 		devlink_free(ds->devlink);
973 		ds->devlink = NULL;
974 	}
975 
976 	ds->setup = false;
977 }
978 
979 /* First tear down the non-shared, then the shared ports. This ensures that
980  * all work items scheduled by our switchdev handlers for user ports have
981  * completed before we destroy the refcounting kept on the shared ports.
982  */
dsa_tree_teardown_ports(struct dsa_switch_tree * dst)983 static void dsa_tree_teardown_ports(struct dsa_switch_tree *dst)
984 {
985 	struct dsa_port *dp;
986 
987 	list_for_each_entry(dp, &dst->ports, list)
988 		if (dsa_port_is_user(dp) || dsa_port_is_unused(dp))
989 			dsa_port_teardown(dp);
990 
991 	dsa_flush_workqueue();
992 
993 	list_for_each_entry(dp, &dst->ports, list)
994 		if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp))
995 			dsa_port_teardown(dp);
996 }
997 
dsa_tree_teardown_switches(struct dsa_switch_tree * dst)998 static void dsa_tree_teardown_switches(struct dsa_switch_tree *dst)
999 {
1000 	struct dsa_port *dp;
1001 
1002 	list_for_each_entry(dp, &dst->ports, list)
1003 		dsa_switch_teardown(dp->ds);
1004 }
1005 
1006 /* Bring shared ports up first, then non-shared ports */
dsa_tree_setup_ports(struct dsa_switch_tree * dst)1007 static int dsa_tree_setup_ports(struct dsa_switch_tree *dst)
1008 {
1009 	struct dsa_port *dp;
1010 	int err = 0;
1011 
1012 	list_for_each_entry(dp, &dst->ports, list) {
1013 		if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp)) {
1014 			err = dsa_port_setup(dp);
1015 			if (err)
1016 				goto teardown;
1017 		}
1018 	}
1019 
1020 	list_for_each_entry(dp, &dst->ports, list) {
1021 		if (dsa_port_is_user(dp) || dsa_port_is_unused(dp)) {
1022 			err = dsa_port_setup(dp);
1023 			if (err) {
1024 				err = dsa_port_setup_as_unused(dp);
1025 				if (err)
1026 					goto teardown;
1027 			}
1028 		}
1029 	}
1030 
1031 	return 0;
1032 
1033 teardown:
1034 	dsa_tree_teardown_ports(dst);
1035 
1036 	return err;
1037 }
1038 
dsa_tree_setup_switches(struct dsa_switch_tree * dst)1039 static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
1040 {
1041 	struct dsa_port *dp;
1042 	int err = 0;
1043 
1044 	list_for_each_entry(dp, &dst->ports, list) {
1045 		err = dsa_switch_setup(dp->ds);
1046 		if (err) {
1047 			dsa_tree_teardown_switches(dst);
1048 			break;
1049 		}
1050 	}
1051 
1052 	return err;
1053 }
1054 
dsa_tree_setup_master(struct dsa_switch_tree * dst)1055 static int dsa_tree_setup_master(struct dsa_switch_tree *dst)
1056 {
1057 	struct dsa_port *cpu_dp;
1058 	int err = 0;
1059 
1060 	rtnl_lock();
1061 
1062 	dsa_tree_for_each_cpu_port(cpu_dp, dst) {
1063 		struct net_device *master = cpu_dp->master;
1064 		bool admin_up = (master->flags & IFF_UP) &&
1065 				!qdisc_tx_is_noop(master);
1066 
1067 		err = dsa_master_setup(master, cpu_dp);
1068 		if (err)
1069 			break;
1070 
1071 		/* Replay master state event */
1072 		dsa_tree_master_admin_state_change(dst, master, admin_up);
1073 		dsa_tree_master_oper_state_change(dst, master,
1074 						  netif_oper_up(master));
1075 	}
1076 
1077 	rtnl_unlock();
1078 
1079 	return err;
1080 }
1081 
dsa_tree_teardown_master(struct dsa_switch_tree * dst)1082 static void dsa_tree_teardown_master(struct dsa_switch_tree *dst)
1083 {
1084 	struct dsa_port *cpu_dp;
1085 
1086 	rtnl_lock();
1087 
1088 	dsa_tree_for_each_cpu_port(cpu_dp, dst) {
1089 		struct net_device *master = cpu_dp->master;
1090 
1091 		/* Synthesizing an "admin down" state is sufficient for
1092 		 * the switches to get a notification if the master is
1093 		 * currently up and running.
1094 		 */
1095 		dsa_tree_master_admin_state_change(dst, master, false);
1096 
1097 		dsa_master_teardown(master);
1098 	}
1099 
1100 	rtnl_unlock();
1101 }
1102 
dsa_tree_setup_lags(struct dsa_switch_tree * dst)1103 static int dsa_tree_setup_lags(struct dsa_switch_tree *dst)
1104 {
1105 	unsigned int len = 0;
1106 	struct dsa_port *dp;
1107 
1108 	list_for_each_entry(dp, &dst->ports, list) {
1109 		if (dp->ds->num_lag_ids > len)
1110 			len = dp->ds->num_lag_ids;
1111 	}
1112 
1113 	if (!len)
1114 		return 0;
1115 
1116 	dst->lags = kcalloc(len, sizeof(*dst->lags), GFP_KERNEL);
1117 	if (!dst->lags)
1118 		return -ENOMEM;
1119 
1120 	dst->lags_len = len;
1121 	return 0;
1122 }
1123 
dsa_tree_teardown_lags(struct dsa_switch_tree * dst)1124 static void dsa_tree_teardown_lags(struct dsa_switch_tree *dst)
1125 {
1126 	kfree(dst->lags);
1127 }
1128 
dsa_tree_setup(struct dsa_switch_tree * dst)1129 static int dsa_tree_setup(struct dsa_switch_tree *dst)
1130 {
1131 	bool complete;
1132 	int err;
1133 
1134 	if (dst->setup) {
1135 		pr_err("DSA: tree %d already setup! Disjoint trees?\n",
1136 		       dst->index);
1137 		return -EEXIST;
1138 	}
1139 
1140 	complete = dsa_tree_setup_routing_table(dst);
1141 	if (!complete)
1142 		return 0;
1143 
1144 	err = dsa_tree_setup_cpu_ports(dst);
1145 	if (err)
1146 		return err;
1147 
1148 	err = dsa_tree_setup_switches(dst);
1149 	if (err)
1150 		goto teardown_cpu_ports;
1151 
1152 	err = dsa_tree_setup_ports(dst);
1153 	if (err)
1154 		goto teardown_switches;
1155 
1156 	err = dsa_tree_setup_master(dst);
1157 	if (err)
1158 		goto teardown_ports;
1159 
1160 	err = dsa_tree_setup_lags(dst);
1161 	if (err)
1162 		goto teardown_master;
1163 
1164 	dst->setup = true;
1165 
1166 	pr_info("DSA: tree %d setup\n", dst->index);
1167 
1168 	return 0;
1169 
1170 teardown_master:
1171 	dsa_tree_teardown_master(dst);
1172 teardown_ports:
1173 	dsa_tree_teardown_ports(dst);
1174 teardown_switches:
1175 	dsa_tree_teardown_switches(dst);
1176 teardown_cpu_ports:
1177 	dsa_tree_teardown_cpu_ports(dst);
1178 
1179 	return err;
1180 }
1181 
dsa_tree_teardown(struct dsa_switch_tree * dst)1182 static void dsa_tree_teardown(struct dsa_switch_tree *dst)
1183 {
1184 	struct dsa_link *dl, *next;
1185 
1186 	if (!dst->setup)
1187 		return;
1188 
1189 	dsa_tree_teardown_lags(dst);
1190 
1191 	dsa_tree_teardown_master(dst);
1192 
1193 	dsa_tree_teardown_ports(dst);
1194 
1195 	dsa_tree_teardown_switches(dst);
1196 
1197 	dsa_tree_teardown_cpu_ports(dst);
1198 
1199 	list_for_each_entry_safe(dl, next, &dst->rtable, list) {
1200 		list_del(&dl->list);
1201 		kfree(dl);
1202 	}
1203 
1204 	pr_info("DSA: tree %d torn down\n", dst->index);
1205 
1206 	dst->setup = false;
1207 }
1208 
dsa_tree_bind_tag_proto(struct dsa_switch_tree * dst,const struct dsa_device_ops * tag_ops)1209 static int dsa_tree_bind_tag_proto(struct dsa_switch_tree *dst,
1210 				   const struct dsa_device_ops *tag_ops)
1211 {
1212 	const struct dsa_device_ops *old_tag_ops = dst->tag_ops;
1213 	struct dsa_notifier_tag_proto_info info;
1214 	int err;
1215 
1216 	dst->tag_ops = tag_ops;
1217 
1218 	/* Notify the switches from this tree about the connection
1219 	 * to the new tagger
1220 	 */
1221 	info.tag_ops = tag_ops;
1222 	err = dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO_CONNECT, &info);
1223 	if (err && err != -EOPNOTSUPP)
1224 		goto out_disconnect;
1225 
1226 	/* Notify the old tagger about the disconnection from this tree */
1227 	info.tag_ops = old_tag_ops;
1228 	dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO_DISCONNECT, &info);
1229 
1230 	return 0;
1231 
1232 out_disconnect:
1233 	info.tag_ops = tag_ops;
1234 	dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO_DISCONNECT, &info);
1235 	dst->tag_ops = old_tag_ops;
1236 
1237 	return err;
1238 }
1239 
1240 /* Since the dsa/tagging sysfs device attribute is per master, the assumption
1241  * is that all DSA switches within a tree share the same tagger, otherwise
1242  * they would have formed disjoint trees (different "dsa,member" values).
1243  */
dsa_tree_change_tag_proto(struct dsa_switch_tree * dst,const struct dsa_device_ops * tag_ops,const struct dsa_device_ops * old_tag_ops)1244 int dsa_tree_change_tag_proto(struct dsa_switch_tree *dst,
1245 			      const struct dsa_device_ops *tag_ops,
1246 			      const struct dsa_device_ops *old_tag_ops)
1247 {
1248 	struct dsa_notifier_tag_proto_info info;
1249 	struct dsa_port *dp;
1250 	int err = -EBUSY;
1251 
1252 	if (!rtnl_trylock())
1253 		return restart_syscall();
1254 
1255 	/* At the moment we don't allow changing the tag protocol under
1256 	 * traffic. The rtnl_mutex also happens to serialize concurrent
1257 	 * attempts to change the tagging protocol. If we ever lift the IFF_UP
1258 	 * restriction, there needs to be another mutex which serializes this.
1259 	 */
1260 	dsa_tree_for_each_user_port(dp, dst) {
1261 		if (dsa_port_to_master(dp)->flags & IFF_UP)
1262 			goto out_unlock;
1263 
1264 		if (dp->slave->flags & IFF_UP)
1265 			goto out_unlock;
1266 	}
1267 
1268 	/* Notify the tag protocol change */
1269 	info.tag_ops = tag_ops;
1270 	err = dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO, &info);
1271 	if (err)
1272 		goto out_unwind_tagger;
1273 
1274 	err = dsa_tree_bind_tag_proto(dst, tag_ops);
1275 	if (err)
1276 		goto out_unwind_tagger;
1277 
1278 	rtnl_unlock();
1279 
1280 	return 0;
1281 
1282 out_unwind_tagger:
1283 	info.tag_ops = old_tag_ops;
1284 	dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO, &info);
1285 out_unlock:
1286 	rtnl_unlock();
1287 	return err;
1288 }
1289 
dsa_tree_master_state_change(struct dsa_switch_tree * dst,struct net_device * master)1290 static void dsa_tree_master_state_change(struct dsa_switch_tree *dst,
1291 					 struct net_device *master)
1292 {
1293 	struct dsa_notifier_master_state_info info;
1294 	struct dsa_port *cpu_dp = master->dsa_ptr;
1295 
1296 	info.master = master;
1297 	info.operational = dsa_port_master_is_operational(cpu_dp);
1298 
1299 	dsa_tree_notify(dst, DSA_NOTIFIER_MASTER_STATE_CHANGE, &info);
1300 }
1301 
dsa_tree_master_admin_state_change(struct dsa_switch_tree * dst,struct net_device * master,bool up)1302 void dsa_tree_master_admin_state_change(struct dsa_switch_tree *dst,
1303 					struct net_device *master,
1304 					bool up)
1305 {
1306 	struct dsa_port *cpu_dp = master->dsa_ptr;
1307 	bool notify = false;
1308 
1309 	/* Don't keep track of admin state on LAG DSA masters,
1310 	 * but rather just of physical DSA masters
1311 	 */
1312 	if (netif_is_lag_master(master))
1313 		return;
1314 
1315 	if ((dsa_port_master_is_operational(cpu_dp)) !=
1316 	    (up && cpu_dp->master_oper_up))
1317 		notify = true;
1318 
1319 	cpu_dp->master_admin_up = up;
1320 
1321 	if (notify)
1322 		dsa_tree_master_state_change(dst, master);
1323 }
1324 
dsa_tree_master_oper_state_change(struct dsa_switch_tree * dst,struct net_device * master,bool up)1325 void dsa_tree_master_oper_state_change(struct dsa_switch_tree *dst,
1326 				       struct net_device *master,
1327 				       bool up)
1328 {
1329 	struct dsa_port *cpu_dp = master->dsa_ptr;
1330 	bool notify = false;
1331 
1332 	/* Don't keep track of oper state on LAG DSA masters,
1333 	 * but rather just of physical DSA masters
1334 	 */
1335 	if (netif_is_lag_master(master))
1336 		return;
1337 
1338 	if ((dsa_port_master_is_operational(cpu_dp)) !=
1339 	    (cpu_dp->master_admin_up && up))
1340 		notify = true;
1341 
1342 	cpu_dp->master_oper_up = up;
1343 
1344 	if (notify)
1345 		dsa_tree_master_state_change(dst, master);
1346 }
1347 
dsa_port_touch(struct dsa_switch * ds,int index)1348 static struct dsa_port *dsa_port_touch(struct dsa_switch *ds, int index)
1349 {
1350 	struct dsa_switch_tree *dst = ds->dst;
1351 	struct dsa_port *dp;
1352 
1353 	dsa_switch_for_each_port(dp, ds)
1354 		if (dp->index == index)
1355 			return dp;
1356 
1357 	dp = kzalloc(sizeof(*dp), GFP_KERNEL);
1358 	if (!dp)
1359 		return NULL;
1360 
1361 	dp->ds = ds;
1362 	dp->index = index;
1363 
1364 	mutex_init(&dp->addr_lists_lock);
1365 	mutex_init(&dp->vlans_lock);
1366 	INIT_LIST_HEAD(&dp->fdbs);
1367 	INIT_LIST_HEAD(&dp->mdbs);
1368 	INIT_LIST_HEAD(&dp->vlans);
1369 	INIT_LIST_HEAD(&dp->list);
1370 	list_add_tail(&dp->list, &dst->ports);
1371 
1372 	return dp;
1373 }
1374 
dsa_port_parse_user(struct dsa_port * dp,const char * name)1375 static int dsa_port_parse_user(struct dsa_port *dp, const char *name)
1376 {
1377 	if (!name)
1378 		name = "eth%d";
1379 
1380 	dp->type = DSA_PORT_TYPE_USER;
1381 	dp->name = name;
1382 
1383 	return 0;
1384 }
1385 
dsa_port_parse_dsa(struct dsa_port * dp)1386 static int dsa_port_parse_dsa(struct dsa_port *dp)
1387 {
1388 	dp->type = DSA_PORT_TYPE_DSA;
1389 
1390 	return 0;
1391 }
1392 
dsa_get_tag_protocol(struct dsa_port * dp,struct net_device * master)1393 static enum dsa_tag_protocol dsa_get_tag_protocol(struct dsa_port *dp,
1394 						  struct net_device *master)
1395 {
1396 	enum dsa_tag_protocol tag_protocol = DSA_TAG_PROTO_NONE;
1397 	struct dsa_switch *mds, *ds = dp->ds;
1398 	unsigned int mdp_upstream;
1399 	struct dsa_port *mdp;
1400 
1401 	/* It is possible to stack DSA switches onto one another when that
1402 	 * happens the switch driver may want to know if its tagging protocol
1403 	 * is going to work in such a configuration.
1404 	 */
1405 	if (dsa_slave_dev_check(master)) {
1406 		mdp = dsa_slave_to_port(master);
1407 		mds = mdp->ds;
1408 		mdp_upstream = dsa_upstream_port(mds, mdp->index);
1409 		tag_protocol = mds->ops->get_tag_protocol(mds, mdp_upstream,
1410 							  DSA_TAG_PROTO_NONE);
1411 	}
1412 
1413 	/* If the master device is not itself a DSA slave in a disjoint DSA
1414 	 * tree, then return immediately.
1415 	 */
1416 	return ds->ops->get_tag_protocol(ds, dp->index, tag_protocol);
1417 }
1418 
dsa_port_parse_cpu(struct dsa_port * dp,struct net_device * master,const char * user_protocol)1419 static int dsa_port_parse_cpu(struct dsa_port *dp, struct net_device *master,
1420 			      const char *user_protocol)
1421 {
1422 	const struct dsa_device_ops *tag_ops = NULL;
1423 	struct dsa_switch *ds = dp->ds;
1424 	struct dsa_switch_tree *dst = ds->dst;
1425 	enum dsa_tag_protocol default_proto;
1426 
1427 	/* Find out which protocol the switch would prefer. */
1428 	default_proto = dsa_get_tag_protocol(dp, master);
1429 	if (dst->default_proto) {
1430 		if (dst->default_proto != default_proto) {
1431 			dev_err(ds->dev,
1432 				"A DSA switch tree can have only one tagging protocol\n");
1433 			return -EINVAL;
1434 		}
1435 	} else {
1436 		dst->default_proto = default_proto;
1437 	}
1438 
1439 	/* See if the user wants to override that preference. */
1440 	if (user_protocol) {
1441 		if (!ds->ops->change_tag_protocol) {
1442 			dev_err(ds->dev, "Tag protocol cannot be modified\n");
1443 			return -EINVAL;
1444 		}
1445 
1446 		tag_ops = dsa_find_tagger_by_name(user_protocol);
1447 		if (IS_ERR(tag_ops)) {
1448 			dev_warn(ds->dev,
1449 				 "Failed to find a tagging driver for protocol %s, using default\n",
1450 				 user_protocol);
1451 			tag_ops = NULL;
1452 		}
1453 	}
1454 
1455 	if (!tag_ops)
1456 		tag_ops = dsa_tag_driver_get(default_proto);
1457 
1458 	if (IS_ERR(tag_ops)) {
1459 		if (PTR_ERR(tag_ops) == -ENOPROTOOPT)
1460 			return -EPROBE_DEFER;
1461 
1462 		dev_warn(ds->dev, "No tagger for this switch\n");
1463 		return PTR_ERR(tag_ops);
1464 	}
1465 
1466 	if (dst->tag_ops) {
1467 		if (dst->tag_ops != tag_ops) {
1468 			dev_err(ds->dev,
1469 				"A DSA switch tree can have only one tagging protocol\n");
1470 
1471 			dsa_tag_driver_put(tag_ops);
1472 			return -EINVAL;
1473 		}
1474 
1475 		/* In the case of multiple CPU ports per switch, the tagging
1476 		 * protocol is still reference-counted only per switch tree.
1477 		 */
1478 		dsa_tag_driver_put(tag_ops);
1479 	} else {
1480 		dst->tag_ops = tag_ops;
1481 	}
1482 
1483 	dp->master = master;
1484 	dp->type = DSA_PORT_TYPE_CPU;
1485 	dsa_port_set_tag_protocol(dp, dst->tag_ops);
1486 	dp->dst = dst;
1487 
1488 	/* At this point, the tree may be configured to use a different
1489 	 * tagger than the one chosen by the switch driver during
1490 	 * .setup, in the case when a user selects a custom protocol
1491 	 * through the DT.
1492 	 *
1493 	 * This is resolved by syncing the driver with the tree in
1494 	 * dsa_switch_setup_tag_protocol once .setup has run and the
1495 	 * driver is ready to accept calls to .change_tag_protocol. If
1496 	 * the driver does not support the custom protocol at that
1497 	 * point, the tree is wholly rejected, thereby ensuring that the
1498 	 * tree and driver are always in agreement on the protocol to
1499 	 * use.
1500 	 */
1501 	return 0;
1502 }
1503 
dsa_port_parse_of(struct dsa_port * dp,struct device_node * dn)1504 static int dsa_port_parse_of(struct dsa_port *dp, struct device_node *dn)
1505 {
1506 	struct device_node *ethernet = of_parse_phandle(dn, "ethernet", 0);
1507 	const char *name = of_get_property(dn, "label", NULL);
1508 	bool link = of_property_read_bool(dn, "link");
1509 
1510 	dp->dn = dn;
1511 
1512 	if (ethernet) {
1513 		struct net_device *master;
1514 		const char *user_protocol;
1515 
1516 		master = of_find_net_device_by_node(ethernet);
1517 		of_node_put(ethernet);
1518 		if (!master)
1519 			return -EPROBE_DEFER;
1520 
1521 		user_protocol = of_get_property(dn, "dsa-tag-protocol", NULL);
1522 		return dsa_port_parse_cpu(dp, master, user_protocol);
1523 	}
1524 
1525 	if (link)
1526 		return dsa_port_parse_dsa(dp);
1527 
1528 	return dsa_port_parse_user(dp, name);
1529 }
1530 
dsa_switch_parse_ports_of(struct dsa_switch * ds,struct device_node * dn)1531 static int dsa_switch_parse_ports_of(struct dsa_switch *ds,
1532 				     struct device_node *dn)
1533 {
1534 	struct device_node *ports, *port;
1535 	struct dsa_port *dp;
1536 	int err = 0;
1537 	u32 reg;
1538 
1539 	ports = of_get_child_by_name(dn, "ports");
1540 	if (!ports) {
1541 		/* The second possibility is "ethernet-ports" */
1542 		ports = of_get_child_by_name(dn, "ethernet-ports");
1543 		if (!ports) {
1544 			dev_err(ds->dev, "no ports child node found\n");
1545 			return -EINVAL;
1546 		}
1547 	}
1548 
1549 	for_each_available_child_of_node(ports, port) {
1550 		err = of_property_read_u32(port, "reg", &reg);
1551 		if (err) {
1552 			of_node_put(port);
1553 			goto out_put_node;
1554 		}
1555 
1556 		if (reg >= ds->num_ports) {
1557 			dev_err(ds->dev, "port %pOF index %u exceeds num_ports (%u)\n",
1558 				port, reg, ds->num_ports);
1559 			of_node_put(port);
1560 			err = -EINVAL;
1561 			goto out_put_node;
1562 		}
1563 
1564 		dp = dsa_to_port(ds, reg);
1565 
1566 		err = dsa_port_parse_of(dp, port);
1567 		if (err) {
1568 			of_node_put(port);
1569 			goto out_put_node;
1570 		}
1571 	}
1572 
1573 out_put_node:
1574 	of_node_put(ports);
1575 	return err;
1576 }
1577 
dsa_switch_parse_member_of(struct dsa_switch * ds,struct device_node * dn)1578 static int dsa_switch_parse_member_of(struct dsa_switch *ds,
1579 				      struct device_node *dn)
1580 {
1581 	u32 m[2] = { 0, 0 };
1582 	int sz;
1583 
1584 	/* Don't error out if this optional property isn't found */
1585 	sz = of_property_read_variable_u32_array(dn, "dsa,member", m, 2, 2);
1586 	if (sz < 0 && sz != -EINVAL)
1587 		return sz;
1588 
1589 	ds->index = m[1];
1590 
1591 	ds->dst = dsa_tree_touch(m[0]);
1592 	if (!ds->dst)
1593 		return -ENOMEM;
1594 
1595 	if (dsa_switch_find(ds->dst->index, ds->index)) {
1596 		dev_err(ds->dev,
1597 			"A DSA switch with index %d already exists in tree %d\n",
1598 			ds->index, ds->dst->index);
1599 		return -EEXIST;
1600 	}
1601 
1602 	if (ds->dst->last_switch < ds->index)
1603 		ds->dst->last_switch = ds->index;
1604 
1605 	return 0;
1606 }
1607 
dsa_switch_touch_ports(struct dsa_switch * ds)1608 static int dsa_switch_touch_ports(struct dsa_switch *ds)
1609 {
1610 	struct dsa_port *dp;
1611 	int port;
1612 
1613 	for (port = 0; port < ds->num_ports; port++) {
1614 		dp = dsa_port_touch(ds, port);
1615 		if (!dp)
1616 			return -ENOMEM;
1617 	}
1618 
1619 	return 0;
1620 }
1621 
dsa_switch_parse_of(struct dsa_switch * ds,struct device_node * dn)1622 static int dsa_switch_parse_of(struct dsa_switch *ds, struct device_node *dn)
1623 {
1624 	int err;
1625 
1626 	err = dsa_switch_parse_member_of(ds, dn);
1627 	if (err)
1628 		return err;
1629 
1630 	err = dsa_switch_touch_ports(ds);
1631 	if (err)
1632 		return err;
1633 
1634 	return dsa_switch_parse_ports_of(ds, dn);
1635 }
1636 
dsa_port_parse(struct dsa_port * dp,const char * name,struct device * dev)1637 static int dsa_port_parse(struct dsa_port *dp, const char *name,
1638 			  struct device *dev)
1639 {
1640 	if (!strcmp(name, "cpu")) {
1641 		struct net_device *master;
1642 
1643 		master = dsa_dev_to_net_device(dev);
1644 		if (!master)
1645 			return -EPROBE_DEFER;
1646 
1647 		dev_put(master);
1648 
1649 		return dsa_port_parse_cpu(dp, master, NULL);
1650 	}
1651 
1652 	if (!strcmp(name, "dsa"))
1653 		return dsa_port_parse_dsa(dp);
1654 
1655 	return dsa_port_parse_user(dp, name);
1656 }
1657 
dsa_switch_parse_ports(struct dsa_switch * ds,struct dsa_chip_data * cd)1658 static int dsa_switch_parse_ports(struct dsa_switch *ds,
1659 				  struct dsa_chip_data *cd)
1660 {
1661 	bool valid_name_found = false;
1662 	struct dsa_port *dp;
1663 	struct device *dev;
1664 	const char *name;
1665 	unsigned int i;
1666 	int err;
1667 
1668 	for (i = 0; i < DSA_MAX_PORTS; i++) {
1669 		name = cd->port_names[i];
1670 		dev = cd->netdev[i];
1671 		dp = dsa_to_port(ds, i);
1672 
1673 		if (!name)
1674 			continue;
1675 
1676 		err = dsa_port_parse(dp, name, dev);
1677 		if (err)
1678 			return err;
1679 
1680 		valid_name_found = true;
1681 	}
1682 
1683 	if (!valid_name_found && i == DSA_MAX_PORTS)
1684 		return -EINVAL;
1685 
1686 	return 0;
1687 }
1688 
dsa_switch_parse(struct dsa_switch * ds,struct dsa_chip_data * cd)1689 static int dsa_switch_parse(struct dsa_switch *ds, struct dsa_chip_data *cd)
1690 {
1691 	int err;
1692 
1693 	ds->cd = cd;
1694 
1695 	/* We don't support interconnected switches nor multiple trees via
1696 	 * platform data, so this is the unique switch of the tree.
1697 	 */
1698 	ds->index = 0;
1699 	ds->dst = dsa_tree_touch(0);
1700 	if (!ds->dst)
1701 		return -ENOMEM;
1702 
1703 	err = dsa_switch_touch_ports(ds);
1704 	if (err)
1705 		return err;
1706 
1707 	return dsa_switch_parse_ports(ds, cd);
1708 }
1709 
dsa_switch_release_ports(struct dsa_switch * ds)1710 static void dsa_switch_release_ports(struct dsa_switch *ds)
1711 {
1712 	struct dsa_port *dp, *next;
1713 
1714 	dsa_switch_for_each_port_safe(dp, next, ds) {
1715 		WARN_ON(!list_empty(&dp->fdbs));
1716 		WARN_ON(!list_empty(&dp->mdbs));
1717 		WARN_ON(!list_empty(&dp->vlans));
1718 		list_del(&dp->list);
1719 		kfree(dp);
1720 	}
1721 }
1722 
dsa_switch_probe(struct dsa_switch * ds)1723 static int dsa_switch_probe(struct dsa_switch *ds)
1724 {
1725 	struct dsa_switch_tree *dst;
1726 	struct dsa_chip_data *pdata;
1727 	struct device_node *np;
1728 	int err;
1729 
1730 	if (!ds->dev)
1731 		return -ENODEV;
1732 
1733 	pdata = ds->dev->platform_data;
1734 	np = ds->dev->of_node;
1735 
1736 	if (!ds->num_ports)
1737 		return -EINVAL;
1738 
1739 	if (np) {
1740 		err = dsa_switch_parse_of(ds, np);
1741 		if (err)
1742 			dsa_switch_release_ports(ds);
1743 	} else if (pdata) {
1744 		err = dsa_switch_parse(ds, pdata);
1745 		if (err)
1746 			dsa_switch_release_ports(ds);
1747 	} else {
1748 		err = -ENODEV;
1749 	}
1750 
1751 	if (err)
1752 		return err;
1753 
1754 	dst = ds->dst;
1755 	dsa_tree_get(dst);
1756 	err = dsa_tree_setup(dst);
1757 	if (err) {
1758 		dsa_switch_release_ports(ds);
1759 		dsa_tree_put(dst);
1760 	}
1761 
1762 	return err;
1763 }
1764 
dsa_register_switch(struct dsa_switch * ds)1765 int dsa_register_switch(struct dsa_switch *ds)
1766 {
1767 	int err;
1768 
1769 	mutex_lock(&dsa2_mutex);
1770 	err = dsa_switch_probe(ds);
1771 	dsa_tree_put(ds->dst);
1772 	mutex_unlock(&dsa2_mutex);
1773 
1774 	return err;
1775 }
1776 EXPORT_SYMBOL_GPL(dsa_register_switch);
1777 
dsa_switch_remove(struct dsa_switch * ds)1778 static void dsa_switch_remove(struct dsa_switch *ds)
1779 {
1780 	struct dsa_switch_tree *dst = ds->dst;
1781 
1782 	dsa_tree_teardown(dst);
1783 	dsa_switch_release_ports(ds);
1784 	dsa_tree_put(dst);
1785 }
1786 
dsa_unregister_switch(struct dsa_switch * ds)1787 void dsa_unregister_switch(struct dsa_switch *ds)
1788 {
1789 	mutex_lock(&dsa2_mutex);
1790 	dsa_switch_remove(ds);
1791 	mutex_unlock(&dsa2_mutex);
1792 }
1793 EXPORT_SYMBOL_GPL(dsa_unregister_switch);
1794 
1795 /* If the DSA master chooses to unregister its net_device on .shutdown, DSA is
1796  * blocking that operation from completion, due to the dev_hold taken inside
1797  * netdev_upper_dev_link. Unlink the DSA slave interfaces from being uppers of
1798  * the DSA master, so that the system can reboot successfully.
1799  */
dsa_switch_shutdown(struct dsa_switch * ds)1800 void dsa_switch_shutdown(struct dsa_switch *ds)
1801 {
1802 	struct net_device *master, *slave_dev;
1803 	struct dsa_port *dp;
1804 
1805 	mutex_lock(&dsa2_mutex);
1806 
1807 	if (!ds->setup)
1808 		goto out;
1809 
1810 	rtnl_lock();
1811 
1812 	dsa_switch_for_each_user_port(dp, ds) {
1813 		master = dsa_port_to_master(dp);
1814 		slave_dev = dp->slave;
1815 
1816 		netdev_upper_dev_unlink(master, slave_dev);
1817 	}
1818 
1819 	/* Disconnect from further netdevice notifiers on the master,
1820 	 * since netdev_uses_dsa() will now return false.
1821 	 */
1822 	dsa_switch_for_each_cpu_port(dp, ds)
1823 		dp->master->dsa_ptr = NULL;
1824 
1825 	rtnl_unlock();
1826 out:
1827 	mutex_unlock(&dsa2_mutex);
1828 }
1829 EXPORT_SYMBOL_GPL(dsa_switch_shutdown);
1830