1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Handling of a single switch chip, part of a switch fabric
4  *
5  * Copyright (c) 2017 Savoir-faire Linux Inc.
6  *	Vivien Didelot <vivien.didelot@savoirfairelinux.com>
7  */
8 
9 #include <linux/if_bridge.h>
10 #include <linux/netdevice.h>
11 #include <linux/notifier.h>
12 #include <linux/if_vlan.h>
13 #include <net/switchdev.h>
14 
15 #include "dsa_priv.h"
16 
dsa_switch_fastest_ageing_time(struct dsa_switch * ds,unsigned int ageing_time)17 static unsigned int dsa_switch_fastest_ageing_time(struct dsa_switch *ds,
18 						   unsigned int ageing_time)
19 {
20 	struct dsa_port *dp;
21 
22 	dsa_switch_for_each_port(dp, ds)
23 		if (dp->ageing_time && dp->ageing_time < ageing_time)
24 			ageing_time = dp->ageing_time;
25 
26 	return ageing_time;
27 }
28 
dsa_switch_ageing_time(struct dsa_switch * ds,struct dsa_notifier_ageing_time_info * info)29 static int dsa_switch_ageing_time(struct dsa_switch *ds,
30 				  struct dsa_notifier_ageing_time_info *info)
31 {
32 	unsigned int ageing_time = info->ageing_time;
33 
34 	if (ds->ageing_time_min && ageing_time < ds->ageing_time_min)
35 		return -ERANGE;
36 
37 	if (ds->ageing_time_max && ageing_time > ds->ageing_time_max)
38 		return -ERANGE;
39 
40 	/* Program the fastest ageing time in case of multiple bridges */
41 	ageing_time = dsa_switch_fastest_ageing_time(ds, ageing_time);
42 
43 	if (ds->ops->set_ageing_time)
44 		return ds->ops->set_ageing_time(ds, ageing_time);
45 
46 	return 0;
47 }
48 
dsa_port_mtu_match(struct dsa_port * dp,struct dsa_notifier_mtu_info * info)49 static bool dsa_port_mtu_match(struct dsa_port *dp,
50 			       struct dsa_notifier_mtu_info *info)
51 {
52 	return dp == info->dp || dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp);
53 }
54 
dsa_switch_mtu(struct dsa_switch * ds,struct dsa_notifier_mtu_info * info)55 static int dsa_switch_mtu(struct dsa_switch *ds,
56 			  struct dsa_notifier_mtu_info *info)
57 {
58 	struct dsa_port *dp;
59 	int ret;
60 
61 	if (!ds->ops->port_change_mtu)
62 		return -EOPNOTSUPP;
63 
64 	dsa_switch_for_each_port(dp, ds) {
65 		if (dsa_port_mtu_match(dp, info)) {
66 			ret = ds->ops->port_change_mtu(ds, dp->index,
67 						       info->mtu);
68 			if (ret)
69 				return ret;
70 		}
71 	}
72 
73 	return 0;
74 }
75 
dsa_switch_bridge_join(struct dsa_switch * ds,struct dsa_notifier_bridge_info * info)76 static int dsa_switch_bridge_join(struct dsa_switch *ds,
77 				  struct dsa_notifier_bridge_info *info)
78 {
79 	int err;
80 
81 	if (info->dp->ds == ds) {
82 		if (!ds->ops->port_bridge_join)
83 			return -EOPNOTSUPP;
84 
85 		err = ds->ops->port_bridge_join(ds, info->dp->index,
86 						info->bridge,
87 						&info->tx_fwd_offload,
88 						info->extack);
89 		if (err)
90 			return err;
91 	}
92 
93 	if (info->dp->ds != ds && ds->ops->crosschip_bridge_join) {
94 		err = ds->ops->crosschip_bridge_join(ds,
95 						     info->dp->ds->dst->index,
96 						     info->dp->ds->index,
97 						     info->dp->index,
98 						     info->bridge,
99 						     info->extack);
100 		if (err)
101 			return err;
102 	}
103 
104 	return 0;
105 }
106 
dsa_switch_bridge_leave(struct dsa_switch * ds,struct dsa_notifier_bridge_info * info)107 static int dsa_switch_bridge_leave(struct dsa_switch *ds,
108 				   struct dsa_notifier_bridge_info *info)
109 {
110 	if (info->dp->ds == ds && ds->ops->port_bridge_leave)
111 		ds->ops->port_bridge_leave(ds, info->dp->index, info->bridge);
112 
113 	if (info->dp->ds != ds && ds->ops->crosschip_bridge_leave)
114 		ds->ops->crosschip_bridge_leave(ds, info->dp->ds->dst->index,
115 						info->dp->ds->index,
116 						info->dp->index,
117 						info->bridge);
118 
119 	return 0;
120 }
121 
122 /* Matches for all upstream-facing ports (the CPU port and all upstream-facing
123  * DSA links) that sit between the targeted port on which the notifier was
124  * emitted and its dedicated CPU port.
125  */
dsa_port_host_address_match(struct dsa_port * dp,const struct dsa_port * targeted_dp)126 static bool dsa_port_host_address_match(struct dsa_port *dp,
127 					const struct dsa_port *targeted_dp)
128 {
129 	struct dsa_port *cpu_dp = targeted_dp->cpu_dp;
130 
131 	if (dsa_switch_is_upstream_of(dp->ds, targeted_dp->ds))
132 		return dp->index == dsa_towards_port(dp->ds, cpu_dp->ds->index,
133 						     cpu_dp->index);
134 
135 	return false;
136 }
137 
dsa_mac_addr_find(struct list_head * addr_list,const unsigned char * addr,u16 vid,struct dsa_db db)138 static struct dsa_mac_addr *dsa_mac_addr_find(struct list_head *addr_list,
139 					      const unsigned char *addr, u16 vid,
140 					      struct dsa_db db)
141 {
142 	struct dsa_mac_addr *a;
143 
144 	list_for_each_entry(a, addr_list, list)
145 		if (ether_addr_equal(a->addr, addr) && a->vid == vid &&
146 		    dsa_db_equal(&a->db, &db))
147 			return a;
148 
149 	return NULL;
150 }
151 
dsa_port_do_mdb_add(struct dsa_port * dp,const struct switchdev_obj_port_mdb * mdb,struct dsa_db db)152 static int dsa_port_do_mdb_add(struct dsa_port *dp,
153 			       const struct switchdev_obj_port_mdb *mdb,
154 			       struct dsa_db db)
155 {
156 	struct dsa_switch *ds = dp->ds;
157 	struct dsa_mac_addr *a;
158 	int port = dp->index;
159 	int err = 0;
160 
161 	/* No need to bother with refcounting for user ports */
162 	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
163 		return ds->ops->port_mdb_add(ds, port, mdb, db);
164 
165 	mutex_lock(&dp->addr_lists_lock);
166 
167 	a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid, db);
168 	if (a) {
169 		refcount_inc(&a->refcount);
170 		goto out;
171 	}
172 
173 	a = kzalloc(sizeof(*a), GFP_KERNEL);
174 	if (!a) {
175 		err = -ENOMEM;
176 		goto out;
177 	}
178 
179 	err = ds->ops->port_mdb_add(ds, port, mdb, db);
180 	if (err) {
181 		kfree(a);
182 		goto out;
183 	}
184 
185 	ether_addr_copy(a->addr, mdb->addr);
186 	a->vid = mdb->vid;
187 	a->db = db;
188 	refcount_set(&a->refcount, 1);
189 	list_add_tail(&a->list, &dp->mdbs);
190 
191 out:
192 	mutex_unlock(&dp->addr_lists_lock);
193 
194 	return err;
195 }
196 
dsa_port_do_mdb_del(struct dsa_port * dp,const struct switchdev_obj_port_mdb * mdb,struct dsa_db db)197 static int dsa_port_do_mdb_del(struct dsa_port *dp,
198 			       const struct switchdev_obj_port_mdb *mdb,
199 			       struct dsa_db db)
200 {
201 	struct dsa_switch *ds = dp->ds;
202 	struct dsa_mac_addr *a;
203 	int port = dp->index;
204 	int err = 0;
205 
206 	/* No need to bother with refcounting for user ports */
207 	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
208 		return ds->ops->port_mdb_del(ds, port, mdb, db);
209 
210 	mutex_lock(&dp->addr_lists_lock);
211 
212 	a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid, db);
213 	if (!a) {
214 		err = -ENOENT;
215 		goto out;
216 	}
217 
218 	if (!refcount_dec_and_test(&a->refcount))
219 		goto out;
220 
221 	err = ds->ops->port_mdb_del(ds, port, mdb, db);
222 	if (err) {
223 		refcount_set(&a->refcount, 1);
224 		goto out;
225 	}
226 
227 	list_del(&a->list);
228 	kfree(a);
229 
230 out:
231 	mutex_unlock(&dp->addr_lists_lock);
232 
233 	return err;
234 }
235 
dsa_port_do_fdb_add(struct dsa_port * dp,const unsigned char * addr,u16 vid,struct dsa_db db)236 static int dsa_port_do_fdb_add(struct dsa_port *dp, const unsigned char *addr,
237 			       u16 vid, struct dsa_db db)
238 {
239 	struct dsa_switch *ds = dp->ds;
240 	struct dsa_mac_addr *a;
241 	int port = dp->index;
242 	int err = 0;
243 
244 	/* No need to bother with refcounting for user ports */
245 	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
246 		return ds->ops->port_fdb_add(ds, port, addr, vid, db);
247 
248 	mutex_lock(&dp->addr_lists_lock);
249 
250 	a = dsa_mac_addr_find(&dp->fdbs, addr, vid, db);
251 	if (a) {
252 		refcount_inc(&a->refcount);
253 		goto out;
254 	}
255 
256 	a = kzalloc(sizeof(*a), GFP_KERNEL);
257 	if (!a) {
258 		err = -ENOMEM;
259 		goto out;
260 	}
261 
262 	err = ds->ops->port_fdb_add(ds, port, addr, vid, db);
263 	if (err) {
264 		kfree(a);
265 		goto out;
266 	}
267 
268 	ether_addr_copy(a->addr, addr);
269 	a->vid = vid;
270 	a->db = db;
271 	refcount_set(&a->refcount, 1);
272 	list_add_tail(&a->list, &dp->fdbs);
273 
274 out:
275 	mutex_unlock(&dp->addr_lists_lock);
276 
277 	return err;
278 }
279 
dsa_port_do_fdb_del(struct dsa_port * dp,const unsigned char * addr,u16 vid,struct dsa_db db)280 static int dsa_port_do_fdb_del(struct dsa_port *dp, const unsigned char *addr,
281 			       u16 vid, struct dsa_db db)
282 {
283 	struct dsa_switch *ds = dp->ds;
284 	struct dsa_mac_addr *a;
285 	int port = dp->index;
286 	int err = 0;
287 
288 	/* No need to bother with refcounting for user ports */
289 	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
290 		return ds->ops->port_fdb_del(ds, port, addr, vid, db);
291 
292 	mutex_lock(&dp->addr_lists_lock);
293 
294 	a = dsa_mac_addr_find(&dp->fdbs, addr, vid, db);
295 	if (!a) {
296 		err = -ENOENT;
297 		goto out;
298 	}
299 
300 	if (!refcount_dec_and_test(&a->refcount))
301 		goto out;
302 
303 	err = ds->ops->port_fdb_del(ds, port, addr, vid, db);
304 	if (err) {
305 		refcount_set(&a->refcount, 1);
306 		goto out;
307 	}
308 
309 	list_del(&a->list);
310 	kfree(a);
311 
312 out:
313 	mutex_unlock(&dp->addr_lists_lock);
314 
315 	return err;
316 }
317 
dsa_switch_do_lag_fdb_add(struct dsa_switch * ds,struct dsa_lag * lag,const unsigned char * addr,u16 vid,struct dsa_db db)318 static int dsa_switch_do_lag_fdb_add(struct dsa_switch *ds, struct dsa_lag *lag,
319 				     const unsigned char *addr, u16 vid,
320 				     struct dsa_db db)
321 {
322 	struct dsa_mac_addr *a;
323 	int err = 0;
324 
325 	mutex_lock(&lag->fdb_lock);
326 
327 	a = dsa_mac_addr_find(&lag->fdbs, addr, vid, db);
328 	if (a) {
329 		refcount_inc(&a->refcount);
330 		goto out;
331 	}
332 
333 	a = kzalloc(sizeof(*a), GFP_KERNEL);
334 	if (!a) {
335 		err = -ENOMEM;
336 		goto out;
337 	}
338 
339 	err = ds->ops->lag_fdb_add(ds, *lag, addr, vid, db);
340 	if (err) {
341 		kfree(a);
342 		goto out;
343 	}
344 
345 	ether_addr_copy(a->addr, addr);
346 	a->vid = vid;
347 	a->db = db;
348 	refcount_set(&a->refcount, 1);
349 	list_add_tail(&a->list, &lag->fdbs);
350 
351 out:
352 	mutex_unlock(&lag->fdb_lock);
353 
354 	return err;
355 }
356 
dsa_switch_do_lag_fdb_del(struct dsa_switch * ds,struct dsa_lag * lag,const unsigned char * addr,u16 vid,struct dsa_db db)357 static int dsa_switch_do_lag_fdb_del(struct dsa_switch *ds, struct dsa_lag *lag,
358 				     const unsigned char *addr, u16 vid,
359 				     struct dsa_db db)
360 {
361 	struct dsa_mac_addr *a;
362 	int err = 0;
363 
364 	mutex_lock(&lag->fdb_lock);
365 
366 	a = dsa_mac_addr_find(&lag->fdbs, addr, vid, db);
367 	if (!a) {
368 		err = -ENOENT;
369 		goto out;
370 	}
371 
372 	if (!refcount_dec_and_test(&a->refcount))
373 		goto out;
374 
375 	err = ds->ops->lag_fdb_del(ds, *lag, addr, vid, db);
376 	if (err) {
377 		refcount_set(&a->refcount, 1);
378 		goto out;
379 	}
380 
381 	list_del(&a->list);
382 	kfree(a);
383 
384 out:
385 	mutex_unlock(&lag->fdb_lock);
386 
387 	return err;
388 }
389 
dsa_switch_host_fdb_add(struct dsa_switch * ds,struct dsa_notifier_fdb_info * info)390 static int dsa_switch_host_fdb_add(struct dsa_switch *ds,
391 				   struct dsa_notifier_fdb_info *info)
392 {
393 	struct dsa_port *dp;
394 	int err = 0;
395 
396 	if (!ds->ops->port_fdb_add)
397 		return -EOPNOTSUPP;
398 
399 	dsa_switch_for_each_port(dp, ds) {
400 		if (dsa_port_host_address_match(dp, info->dp)) {
401 			err = dsa_port_do_fdb_add(dp, info->addr, info->vid,
402 						  info->db);
403 			if (err)
404 				break;
405 		}
406 	}
407 
408 	return err;
409 }
410 
dsa_switch_host_fdb_del(struct dsa_switch * ds,struct dsa_notifier_fdb_info * info)411 static int dsa_switch_host_fdb_del(struct dsa_switch *ds,
412 				   struct dsa_notifier_fdb_info *info)
413 {
414 	struct dsa_port *dp;
415 	int err = 0;
416 
417 	if (!ds->ops->port_fdb_del)
418 		return -EOPNOTSUPP;
419 
420 	dsa_switch_for_each_port(dp, ds) {
421 		if (dsa_port_host_address_match(dp, info->dp)) {
422 			err = dsa_port_do_fdb_del(dp, info->addr, info->vid,
423 						  info->db);
424 			if (err)
425 				break;
426 		}
427 	}
428 
429 	return err;
430 }
431 
dsa_switch_fdb_add(struct dsa_switch * ds,struct dsa_notifier_fdb_info * info)432 static int dsa_switch_fdb_add(struct dsa_switch *ds,
433 			      struct dsa_notifier_fdb_info *info)
434 {
435 	int port = dsa_towards_port(ds, info->dp->ds->index, info->dp->index);
436 	struct dsa_port *dp = dsa_to_port(ds, port);
437 
438 	if (!ds->ops->port_fdb_add)
439 		return -EOPNOTSUPP;
440 
441 	return dsa_port_do_fdb_add(dp, info->addr, info->vid, info->db);
442 }
443 
dsa_switch_fdb_del(struct dsa_switch * ds,struct dsa_notifier_fdb_info * info)444 static int dsa_switch_fdb_del(struct dsa_switch *ds,
445 			      struct dsa_notifier_fdb_info *info)
446 {
447 	int port = dsa_towards_port(ds, info->dp->ds->index, info->dp->index);
448 	struct dsa_port *dp = dsa_to_port(ds, port);
449 
450 	if (!ds->ops->port_fdb_del)
451 		return -EOPNOTSUPP;
452 
453 	return dsa_port_do_fdb_del(dp, info->addr, info->vid, info->db);
454 }
455 
dsa_switch_lag_fdb_add(struct dsa_switch * ds,struct dsa_notifier_lag_fdb_info * info)456 static int dsa_switch_lag_fdb_add(struct dsa_switch *ds,
457 				  struct dsa_notifier_lag_fdb_info *info)
458 {
459 	struct dsa_port *dp;
460 
461 	if (!ds->ops->lag_fdb_add)
462 		return -EOPNOTSUPP;
463 
464 	/* Notify switch only if it has a port in this LAG */
465 	dsa_switch_for_each_port(dp, ds)
466 		if (dsa_port_offloads_lag(dp, info->lag))
467 			return dsa_switch_do_lag_fdb_add(ds, info->lag,
468 							 info->addr, info->vid,
469 							 info->db);
470 
471 	return 0;
472 }
473 
dsa_switch_lag_fdb_del(struct dsa_switch * ds,struct dsa_notifier_lag_fdb_info * info)474 static int dsa_switch_lag_fdb_del(struct dsa_switch *ds,
475 				  struct dsa_notifier_lag_fdb_info *info)
476 {
477 	struct dsa_port *dp;
478 
479 	if (!ds->ops->lag_fdb_del)
480 		return -EOPNOTSUPP;
481 
482 	/* Notify switch only if it has a port in this LAG */
483 	dsa_switch_for_each_port(dp, ds)
484 		if (dsa_port_offloads_lag(dp, info->lag))
485 			return dsa_switch_do_lag_fdb_del(ds, info->lag,
486 							 info->addr, info->vid,
487 							 info->db);
488 
489 	return 0;
490 }
491 
dsa_switch_lag_change(struct dsa_switch * ds,struct dsa_notifier_lag_info * info)492 static int dsa_switch_lag_change(struct dsa_switch *ds,
493 				 struct dsa_notifier_lag_info *info)
494 {
495 	if (info->dp->ds == ds && ds->ops->port_lag_change)
496 		return ds->ops->port_lag_change(ds, info->dp->index);
497 
498 	if (info->dp->ds != ds && ds->ops->crosschip_lag_change)
499 		return ds->ops->crosschip_lag_change(ds, info->dp->ds->index,
500 						     info->dp->index);
501 
502 	return 0;
503 }
504 
dsa_switch_lag_join(struct dsa_switch * ds,struct dsa_notifier_lag_info * info)505 static int dsa_switch_lag_join(struct dsa_switch *ds,
506 			       struct dsa_notifier_lag_info *info)
507 {
508 	if (info->dp->ds == ds && ds->ops->port_lag_join)
509 		return ds->ops->port_lag_join(ds, info->dp->index, info->lag,
510 					      info->info);
511 
512 	if (info->dp->ds != ds && ds->ops->crosschip_lag_join)
513 		return ds->ops->crosschip_lag_join(ds, info->dp->ds->index,
514 						   info->dp->index, info->lag,
515 						   info->info);
516 
517 	return -EOPNOTSUPP;
518 }
519 
dsa_switch_lag_leave(struct dsa_switch * ds,struct dsa_notifier_lag_info * info)520 static int dsa_switch_lag_leave(struct dsa_switch *ds,
521 				struct dsa_notifier_lag_info *info)
522 {
523 	if (info->dp->ds == ds && ds->ops->port_lag_leave)
524 		return ds->ops->port_lag_leave(ds, info->dp->index, info->lag);
525 
526 	if (info->dp->ds != ds && ds->ops->crosschip_lag_leave)
527 		return ds->ops->crosschip_lag_leave(ds, info->dp->ds->index,
528 						    info->dp->index, info->lag);
529 
530 	return -EOPNOTSUPP;
531 }
532 
dsa_switch_mdb_add(struct dsa_switch * ds,struct dsa_notifier_mdb_info * info)533 static int dsa_switch_mdb_add(struct dsa_switch *ds,
534 			      struct dsa_notifier_mdb_info *info)
535 {
536 	int port = dsa_towards_port(ds, info->dp->ds->index, info->dp->index);
537 	struct dsa_port *dp = dsa_to_port(ds, port);
538 
539 	if (!ds->ops->port_mdb_add)
540 		return -EOPNOTSUPP;
541 
542 	return dsa_port_do_mdb_add(dp, info->mdb, info->db);
543 }
544 
dsa_switch_mdb_del(struct dsa_switch * ds,struct dsa_notifier_mdb_info * info)545 static int dsa_switch_mdb_del(struct dsa_switch *ds,
546 			      struct dsa_notifier_mdb_info *info)
547 {
548 	int port = dsa_towards_port(ds, info->dp->ds->index, info->dp->index);
549 	struct dsa_port *dp = dsa_to_port(ds, port);
550 
551 	if (!ds->ops->port_mdb_del)
552 		return -EOPNOTSUPP;
553 
554 	return dsa_port_do_mdb_del(dp, info->mdb, info->db);
555 }
556 
dsa_switch_host_mdb_add(struct dsa_switch * ds,struct dsa_notifier_mdb_info * info)557 static int dsa_switch_host_mdb_add(struct dsa_switch *ds,
558 				   struct dsa_notifier_mdb_info *info)
559 {
560 	struct dsa_port *dp;
561 	int err = 0;
562 
563 	if (!ds->ops->port_mdb_add)
564 		return -EOPNOTSUPP;
565 
566 	dsa_switch_for_each_port(dp, ds) {
567 		if (dsa_port_host_address_match(dp, info->dp)) {
568 			err = dsa_port_do_mdb_add(dp, info->mdb, info->db);
569 			if (err)
570 				break;
571 		}
572 	}
573 
574 	return err;
575 }
576 
dsa_switch_host_mdb_del(struct dsa_switch * ds,struct dsa_notifier_mdb_info * info)577 static int dsa_switch_host_mdb_del(struct dsa_switch *ds,
578 				   struct dsa_notifier_mdb_info *info)
579 {
580 	struct dsa_port *dp;
581 	int err = 0;
582 
583 	if (!ds->ops->port_mdb_del)
584 		return -EOPNOTSUPP;
585 
586 	dsa_switch_for_each_port(dp, ds) {
587 		if (dsa_port_host_address_match(dp, info->dp)) {
588 			err = dsa_port_do_mdb_del(dp, info->mdb, info->db);
589 			if (err)
590 				break;
591 		}
592 	}
593 
594 	return err;
595 }
596 
597 /* Port VLANs match on the targeted port and on all DSA ports */
dsa_port_vlan_match(struct dsa_port * dp,struct dsa_notifier_vlan_info * info)598 static bool dsa_port_vlan_match(struct dsa_port *dp,
599 				struct dsa_notifier_vlan_info *info)
600 {
601 	return dsa_port_is_dsa(dp) || dp == info->dp;
602 }
603 
604 /* Host VLANs match on the targeted port's CPU port, and on all DSA ports
605  * (upstream and downstream) of that switch and its upstream switches.
606  */
dsa_port_host_vlan_match(struct dsa_port * dp,const struct dsa_port * targeted_dp)607 static bool dsa_port_host_vlan_match(struct dsa_port *dp,
608 				     const struct dsa_port *targeted_dp)
609 {
610 	struct dsa_port *cpu_dp = targeted_dp->cpu_dp;
611 
612 	if (dsa_switch_is_upstream_of(dp->ds, targeted_dp->ds))
613 		return dsa_port_is_dsa(dp) || dp == cpu_dp;
614 
615 	return false;
616 }
617 
dsa_vlan_find(struct list_head * vlan_list,const struct switchdev_obj_port_vlan * vlan)618 static struct dsa_vlan *dsa_vlan_find(struct list_head *vlan_list,
619 				      const struct switchdev_obj_port_vlan *vlan)
620 {
621 	struct dsa_vlan *v;
622 
623 	list_for_each_entry(v, vlan_list, list)
624 		if (v->vid == vlan->vid)
625 			return v;
626 
627 	return NULL;
628 }
629 
dsa_port_do_vlan_add(struct dsa_port * dp,const struct switchdev_obj_port_vlan * vlan,struct netlink_ext_ack * extack)630 static int dsa_port_do_vlan_add(struct dsa_port *dp,
631 				const struct switchdev_obj_port_vlan *vlan,
632 				struct netlink_ext_ack *extack)
633 {
634 	struct dsa_switch *ds = dp->ds;
635 	int port = dp->index;
636 	struct dsa_vlan *v;
637 	int err = 0;
638 
639 	/* No need to bother with refcounting for user ports. */
640 	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
641 		return ds->ops->port_vlan_add(ds, port, vlan, extack);
642 
643 	/* No need to propagate on shared ports the existing VLANs that were
644 	 * re-notified after just the flags have changed. This would cause a
645 	 * refcount bump which we need to avoid, since it unbalances the
646 	 * additions with the deletions.
647 	 */
648 	if (vlan->changed)
649 		return 0;
650 
651 	mutex_lock(&dp->vlans_lock);
652 
653 	v = dsa_vlan_find(&dp->vlans, vlan);
654 	if (v) {
655 		refcount_inc(&v->refcount);
656 		goto out;
657 	}
658 
659 	v = kzalloc(sizeof(*v), GFP_KERNEL);
660 	if (!v) {
661 		err = -ENOMEM;
662 		goto out;
663 	}
664 
665 	err = ds->ops->port_vlan_add(ds, port, vlan, extack);
666 	if (err) {
667 		kfree(v);
668 		goto out;
669 	}
670 
671 	v->vid = vlan->vid;
672 	refcount_set(&v->refcount, 1);
673 	list_add_tail(&v->list, &dp->vlans);
674 
675 out:
676 	mutex_unlock(&dp->vlans_lock);
677 
678 	return err;
679 }
680 
dsa_port_do_vlan_del(struct dsa_port * dp,const struct switchdev_obj_port_vlan * vlan)681 static int dsa_port_do_vlan_del(struct dsa_port *dp,
682 				const struct switchdev_obj_port_vlan *vlan)
683 {
684 	struct dsa_switch *ds = dp->ds;
685 	int port = dp->index;
686 	struct dsa_vlan *v;
687 	int err = 0;
688 
689 	/* No need to bother with refcounting for user ports */
690 	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
691 		return ds->ops->port_vlan_del(ds, port, vlan);
692 
693 	mutex_lock(&dp->vlans_lock);
694 
695 	v = dsa_vlan_find(&dp->vlans, vlan);
696 	if (!v) {
697 		err = -ENOENT;
698 		goto out;
699 	}
700 
701 	if (!refcount_dec_and_test(&v->refcount))
702 		goto out;
703 
704 	err = ds->ops->port_vlan_del(ds, port, vlan);
705 	if (err) {
706 		refcount_set(&v->refcount, 1);
707 		goto out;
708 	}
709 
710 	list_del(&v->list);
711 	kfree(v);
712 
713 out:
714 	mutex_unlock(&dp->vlans_lock);
715 
716 	return err;
717 }
718 
dsa_switch_vlan_add(struct dsa_switch * ds,struct dsa_notifier_vlan_info * info)719 static int dsa_switch_vlan_add(struct dsa_switch *ds,
720 			       struct dsa_notifier_vlan_info *info)
721 {
722 	struct dsa_port *dp;
723 	int err;
724 
725 	if (!ds->ops->port_vlan_add)
726 		return -EOPNOTSUPP;
727 
728 	dsa_switch_for_each_port(dp, ds) {
729 		if (dsa_port_vlan_match(dp, info)) {
730 			err = dsa_port_do_vlan_add(dp, info->vlan,
731 						   info->extack);
732 			if (err)
733 				return err;
734 		}
735 	}
736 
737 	return 0;
738 }
739 
dsa_switch_vlan_del(struct dsa_switch * ds,struct dsa_notifier_vlan_info * info)740 static int dsa_switch_vlan_del(struct dsa_switch *ds,
741 			       struct dsa_notifier_vlan_info *info)
742 {
743 	struct dsa_port *dp;
744 	int err;
745 
746 	if (!ds->ops->port_vlan_del)
747 		return -EOPNOTSUPP;
748 
749 	dsa_switch_for_each_port(dp, ds) {
750 		if (dsa_port_vlan_match(dp, info)) {
751 			err = dsa_port_do_vlan_del(dp, info->vlan);
752 			if (err)
753 				return err;
754 		}
755 	}
756 
757 	return 0;
758 }
759 
dsa_switch_host_vlan_add(struct dsa_switch * ds,struct dsa_notifier_vlan_info * info)760 static int dsa_switch_host_vlan_add(struct dsa_switch *ds,
761 				    struct dsa_notifier_vlan_info *info)
762 {
763 	struct dsa_port *dp;
764 	int err;
765 
766 	if (!ds->ops->port_vlan_add)
767 		return -EOPNOTSUPP;
768 
769 	dsa_switch_for_each_port(dp, ds) {
770 		if (dsa_port_host_vlan_match(dp, info->dp)) {
771 			err = dsa_port_do_vlan_add(dp, info->vlan,
772 						   info->extack);
773 			if (err)
774 				return err;
775 		}
776 	}
777 
778 	return 0;
779 }
780 
dsa_switch_host_vlan_del(struct dsa_switch * ds,struct dsa_notifier_vlan_info * info)781 static int dsa_switch_host_vlan_del(struct dsa_switch *ds,
782 				    struct dsa_notifier_vlan_info *info)
783 {
784 	struct dsa_port *dp;
785 	int err;
786 
787 	if (!ds->ops->port_vlan_del)
788 		return -EOPNOTSUPP;
789 
790 	dsa_switch_for_each_port(dp, ds) {
791 		if (dsa_port_host_vlan_match(dp, info->dp)) {
792 			err = dsa_port_do_vlan_del(dp, info->vlan);
793 			if (err)
794 				return err;
795 		}
796 	}
797 
798 	return 0;
799 }
800 
dsa_switch_change_tag_proto(struct dsa_switch * ds,struct dsa_notifier_tag_proto_info * info)801 static int dsa_switch_change_tag_proto(struct dsa_switch *ds,
802 				       struct dsa_notifier_tag_proto_info *info)
803 {
804 	const struct dsa_device_ops *tag_ops = info->tag_ops;
805 	struct dsa_port *dp, *cpu_dp;
806 	int err;
807 
808 	if (!ds->ops->change_tag_protocol)
809 		return -EOPNOTSUPP;
810 
811 	ASSERT_RTNL();
812 
813 	err = ds->ops->change_tag_protocol(ds, tag_ops->proto);
814 	if (err)
815 		return err;
816 
817 	dsa_switch_for_each_cpu_port(cpu_dp, ds)
818 		dsa_port_set_tag_protocol(cpu_dp, tag_ops);
819 
820 	/* Now that changing the tag protocol can no longer fail, let's update
821 	 * the remaining bits which are "duplicated for faster access", and the
822 	 * bits that depend on the tagger, such as the MTU.
823 	 */
824 	dsa_switch_for_each_user_port(dp, ds) {
825 		struct net_device *slave = dp->slave;
826 
827 		dsa_slave_setup_tagger(slave);
828 
829 		/* rtnl_mutex is held in dsa_tree_change_tag_proto */
830 		dsa_slave_change_mtu(slave, slave->mtu);
831 	}
832 
833 	return 0;
834 }
835 
836 /* We use the same cross-chip notifiers to inform both the tagger side, as well
837  * as the switch side, of connection and disconnection events.
838  * Since ds->tagger_data is owned by the tagger, it isn't a hard error if the
839  * switch side doesn't support connecting to this tagger, and therefore, the
840  * fact that we don't disconnect the tagger side doesn't constitute a memory
841  * leak: the tagger will still operate with persistent per-switch memory, just
842  * with the switch side unconnected to it. What does constitute a hard error is
843  * when the switch side supports connecting but fails.
844  */
845 static int
dsa_switch_connect_tag_proto(struct dsa_switch * ds,struct dsa_notifier_tag_proto_info * info)846 dsa_switch_connect_tag_proto(struct dsa_switch *ds,
847 			     struct dsa_notifier_tag_proto_info *info)
848 {
849 	const struct dsa_device_ops *tag_ops = info->tag_ops;
850 	int err;
851 
852 	/* Notify the new tagger about the connection to this switch */
853 	if (tag_ops->connect) {
854 		err = tag_ops->connect(ds);
855 		if (err)
856 			return err;
857 	}
858 
859 	if (!ds->ops->connect_tag_protocol)
860 		return -EOPNOTSUPP;
861 
862 	/* Notify the switch about the connection to the new tagger */
863 	err = ds->ops->connect_tag_protocol(ds, tag_ops->proto);
864 	if (err) {
865 		/* Revert the new tagger's connection to this tree */
866 		if (tag_ops->disconnect)
867 			tag_ops->disconnect(ds);
868 		return err;
869 	}
870 
871 	return 0;
872 }
873 
874 static int
dsa_switch_disconnect_tag_proto(struct dsa_switch * ds,struct dsa_notifier_tag_proto_info * info)875 dsa_switch_disconnect_tag_proto(struct dsa_switch *ds,
876 				struct dsa_notifier_tag_proto_info *info)
877 {
878 	const struct dsa_device_ops *tag_ops = info->tag_ops;
879 
880 	/* Notify the tagger about the disconnection from this switch */
881 	if (tag_ops->disconnect && ds->tagger_data)
882 		tag_ops->disconnect(ds);
883 
884 	/* No need to notify the switch, since it shouldn't have any
885 	 * resources to tear down
886 	 */
887 	return 0;
888 }
889 
890 static int
dsa_switch_master_state_change(struct dsa_switch * ds,struct dsa_notifier_master_state_info * info)891 dsa_switch_master_state_change(struct dsa_switch *ds,
892 			       struct dsa_notifier_master_state_info *info)
893 {
894 	if (!ds->ops->master_state_change)
895 		return 0;
896 
897 	ds->ops->master_state_change(ds, info->master, info->operational);
898 
899 	return 0;
900 }
901 
dsa_switch_event(struct notifier_block * nb,unsigned long event,void * info)902 static int dsa_switch_event(struct notifier_block *nb,
903 			    unsigned long event, void *info)
904 {
905 	struct dsa_switch *ds = container_of(nb, struct dsa_switch, nb);
906 	int err;
907 
908 	switch (event) {
909 	case DSA_NOTIFIER_AGEING_TIME:
910 		err = dsa_switch_ageing_time(ds, info);
911 		break;
912 	case DSA_NOTIFIER_BRIDGE_JOIN:
913 		err = dsa_switch_bridge_join(ds, info);
914 		break;
915 	case DSA_NOTIFIER_BRIDGE_LEAVE:
916 		err = dsa_switch_bridge_leave(ds, info);
917 		break;
918 	case DSA_NOTIFIER_FDB_ADD:
919 		err = dsa_switch_fdb_add(ds, info);
920 		break;
921 	case DSA_NOTIFIER_FDB_DEL:
922 		err = dsa_switch_fdb_del(ds, info);
923 		break;
924 	case DSA_NOTIFIER_HOST_FDB_ADD:
925 		err = dsa_switch_host_fdb_add(ds, info);
926 		break;
927 	case DSA_NOTIFIER_HOST_FDB_DEL:
928 		err = dsa_switch_host_fdb_del(ds, info);
929 		break;
930 	case DSA_NOTIFIER_LAG_FDB_ADD:
931 		err = dsa_switch_lag_fdb_add(ds, info);
932 		break;
933 	case DSA_NOTIFIER_LAG_FDB_DEL:
934 		err = dsa_switch_lag_fdb_del(ds, info);
935 		break;
936 	case DSA_NOTIFIER_LAG_CHANGE:
937 		err = dsa_switch_lag_change(ds, info);
938 		break;
939 	case DSA_NOTIFIER_LAG_JOIN:
940 		err = dsa_switch_lag_join(ds, info);
941 		break;
942 	case DSA_NOTIFIER_LAG_LEAVE:
943 		err = dsa_switch_lag_leave(ds, info);
944 		break;
945 	case DSA_NOTIFIER_MDB_ADD:
946 		err = dsa_switch_mdb_add(ds, info);
947 		break;
948 	case DSA_NOTIFIER_MDB_DEL:
949 		err = dsa_switch_mdb_del(ds, info);
950 		break;
951 	case DSA_NOTIFIER_HOST_MDB_ADD:
952 		err = dsa_switch_host_mdb_add(ds, info);
953 		break;
954 	case DSA_NOTIFIER_HOST_MDB_DEL:
955 		err = dsa_switch_host_mdb_del(ds, info);
956 		break;
957 	case DSA_NOTIFIER_VLAN_ADD:
958 		err = dsa_switch_vlan_add(ds, info);
959 		break;
960 	case DSA_NOTIFIER_VLAN_DEL:
961 		err = dsa_switch_vlan_del(ds, info);
962 		break;
963 	case DSA_NOTIFIER_HOST_VLAN_ADD:
964 		err = dsa_switch_host_vlan_add(ds, info);
965 		break;
966 	case DSA_NOTIFIER_HOST_VLAN_DEL:
967 		err = dsa_switch_host_vlan_del(ds, info);
968 		break;
969 	case DSA_NOTIFIER_MTU:
970 		err = dsa_switch_mtu(ds, info);
971 		break;
972 	case DSA_NOTIFIER_TAG_PROTO:
973 		err = dsa_switch_change_tag_proto(ds, info);
974 		break;
975 	case DSA_NOTIFIER_TAG_PROTO_CONNECT:
976 		err = dsa_switch_connect_tag_proto(ds, info);
977 		break;
978 	case DSA_NOTIFIER_TAG_PROTO_DISCONNECT:
979 		err = dsa_switch_disconnect_tag_proto(ds, info);
980 		break;
981 	case DSA_NOTIFIER_TAG_8021Q_VLAN_ADD:
982 		err = dsa_switch_tag_8021q_vlan_add(ds, info);
983 		break;
984 	case DSA_NOTIFIER_TAG_8021Q_VLAN_DEL:
985 		err = dsa_switch_tag_8021q_vlan_del(ds, info);
986 		break;
987 	case DSA_NOTIFIER_MASTER_STATE_CHANGE:
988 		err = dsa_switch_master_state_change(ds, info);
989 		break;
990 	default:
991 		err = -EOPNOTSUPP;
992 		break;
993 	}
994 
995 	if (err)
996 		dev_dbg(ds->dev, "breaking chain for DSA event %lu (%d)\n",
997 			event, err);
998 
999 	return notifier_from_errno(err);
1000 }
1001 
dsa_switch_register_notifier(struct dsa_switch * ds)1002 int dsa_switch_register_notifier(struct dsa_switch *ds)
1003 {
1004 	ds->nb.notifier_call = dsa_switch_event;
1005 
1006 	return raw_notifier_chain_register(&ds->dst->nh, &ds->nb);
1007 }
1008 
dsa_switch_unregister_notifier(struct dsa_switch * ds)1009 void dsa_switch_unregister_notifier(struct dsa_switch *ds)
1010 {
1011 	int err;
1012 
1013 	err = raw_notifier_chain_unregister(&ds->dst->nh, &ds->nb);
1014 	if (err)
1015 		dev_err(ds->dev, "failed to unregister notifier (%d)\n", err);
1016 }
1017