1 // SPDX-License-Identifier: GPL-2.0-or-later
2 // Copyright (c) 2020, Nikolay Aleksandrov <nikolay@nvidia.com>
3 #include <linux/err.h>
4 #include <linux/export.h>
5 #include <linux/if_ether.h>
6 #include <linux/igmp.h>
7 #include <linux/in.h>
8 #include <linux/jhash.h>
9 #include <linux/kernel.h>
10 #include <linux/log2.h>
11 #include <linux/netdevice.h>
12 #include <linux/netfilter_bridge.h>
13 #include <linux/random.h>
14 #include <linux/rculist.h>
15 #include <linux/skbuff.h>
16 #include <linux/slab.h>
17 #include <linux/timer.h>
18 #include <linux/inetdevice.h>
19 #include <linux/mroute.h>
20 #include <net/ip.h>
21 #include <net/switchdev.h>
22 #if IS_ENABLED(CONFIG_IPV6)
23 #include <linux/icmpv6.h>
24 #include <net/ipv6.h>
25 #include <net/mld.h>
26 #include <net/ip6_checksum.h>
27 #include <net/addrconf.h>
28 #endif
29
30 #include "br_private.h"
31 #include "br_private_mcast_eht.h"
32
33 static bool br_multicast_del_eht_set_entry(struct net_bridge_port_group *pg,
34 union net_bridge_eht_addr *src_addr,
35 union net_bridge_eht_addr *h_addr);
36 static void br_multicast_create_eht_set_entry(const struct net_bridge_mcast *brmctx,
37 struct net_bridge_port_group *pg,
38 union net_bridge_eht_addr *src_addr,
39 union net_bridge_eht_addr *h_addr,
40 int filter_mode,
41 bool allow_zero_src);
42
43 static struct net_bridge_group_eht_host *
br_multicast_eht_host_lookup(struct net_bridge_port_group * pg,union net_bridge_eht_addr * h_addr)44 br_multicast_eht_host_lookup(struct net_bridge_port_group *pg,
45 union net_bridge_eht_addr *h_addr)
46 {
47 struct rb_node *node = pg->eht_host_tree.rb_node;
48
49 while (node) {
50 struct net_bridge_group_eht_host *this;
51 int result;
52
53 this = rb_entry(node, struct net_bridge_group_eht_host,
54 rb_node);
55 result = memcmp(h_addr, &this->h_addr, sizeof(*h_addr));
56 if (result < 0)
57 node = node->rb_left;
58 else if (result > 0)
59 node = node->rb_right;
60 else
61 return this;
62 }
63
64 return NULL;
65 }
66
br_multicast_eht_host_filter_mode(struct net_bridge_port_group * pg,union net_bridge_eht_addr * h_addr)67 static int br_multicast_eht_host_filter_mode(struct net_bridge_port_group *pg,
68 union net_bridge_eht_addr *h_addr)
69 {
70 struct net_bridge_group_eht_host *eht_host;
71
72 eht_host = br_multicast_eht_host_lookup(pg, h_addr);
73 if (!eht_host)
74 return MCAST_INCLUDE;
75
76 return eht_host->filter_mode;
77 }
78
79 static struct net_bridge_group_eht_set_entry *
br_multicast_eht_set_entry_lookup(struct net_bridge_group_eht_set * eht_set,union net_bridge_eht_addr * h_addr)80 br_multicast_eht_set_entry_lookup(struct net_bridge_group_eht_set *eht_set,
81 union net_bridge_eht_addr *h_addr)
82 {
83 struct rb_node *node = eht_set->entry_tree.rb_node;
84
85 while (node) {
86 struct net_bridge_group_eht_set_entry *this;
87 int result;
88
89 this = rb_entry(node, struct net_bridge_group_eht_set_entry,
90 rb_node);
91 result = memcmp(h_addr, &this->h_addr, sizeof(*h_addr));
92 if (result < 0)
93 node = node->rb_left;
94 else if (result > 0)
95 node = node->rb_right;
96 else
97 return this;
98 }
99
100 return NULL;
101 }
102
103 static struct net_bridge_group_eht_set *
br_multicast_eht_set_lookup(struct net_bridge_port_group * pg,union net_bridge_eht_addr * src_addr)104 br_multicast_eht_set_lookup(struct net_bridge_port_group *pg,
105 union net_bridge_eht_addr *src_addr)
106 {
107 struct rb_node *node = pg->eht_set_tree.rb_node;
108
109 while (node) {
110 struct net_bridge_group_eht_set *this;
111 int result;
112
113 this = rb_entry(node, struct net_bridge_group_eht_set,
114 rb_node);
115 result = memcmp(src_addr, &this->src_addr, sizeof(*src_addr));
116 if (result < 0)
117 node = node->rb_left;
118 else if (result > 0)
119 node = node->rb_right;
120 else
121 return this;
122 }
123
124 return NULL;
125 }
126
__eht_destroy_host(struct net_bridge_group_eht_host * eht_host)127 static void __eht_destroy_host(struct net_bridge_group_eht_host *eht_host)
128 {
129 WARN_ON(!hlist_empty(&eht_host->set_entries));
130
131 br_multicast_eht_hosts_dec(eht_host->pg);
132
133 rb_erase(&eht_host->rb_node, &eht_host->pg->eht_host_tree);
134 RB_CLEAR_NODE(&eht_host->rb_node);
135 kfree(eht_host);
136 }
137
br_multicast_destroy_eht_set_entry(struct net_bridge_mcast_gc * gc)138 static void br_multicast_destroy_eht_set_entry(struct net_bridge_mcast_gc *gc)
139 {
140 struct net_bridge_group_eht_set_entry *set_h;
141
142 set_h = container_of(gc, struct net_bridge_group_eht_set_entry, mcast_gc);
143 WARN_ON(!RB_EMPTY_NODE(&set_h->rb_node));
144
145 del_timer_sync(&set_h->timer);
146 kfree(set_h);
147 }
148
br_multicast_destroy_eht_set(struct net_bridge_mcast_gc * gc)149 static void br_multicast_destroy_eht_set(struct net_bridge_mcast_gc *gc)
150 {
151 struct net_bridge_group_eht_set *eht_set;
152
153 eht_set = container_of(gc, struct net_bridge_group_eht_set, mcast_gc);
154 WARN_ON(!RB_EMPTY_NODE(&eht_set->rb_node));
155 WARN_ON(!RB_EMPTY_ROOT(&eht_set->entry_tree));
156
157 del_timer_sync(&eht_set->timer);
158 kfree(eht_set);
159 }
160
__eht_del_set_entry(struct net_bridge_group_eht_set_entry * set_h)161 static void __eht_del_set_entry(struct net_bridge_group_eht_set_entry *set_h)
162 {
163 struct net_bridge_group_eht_host *eht_host = set_h->h_parent;
164 union net_bridge_eht_addr zero_addr;
165
166 rb_erase(&set_h->rb_node, &set_h->eht_set->entry_tree);
167 RB_CLEAR_NODE(&set_h->rb_node);
168 hlist_del_init(&set_h->host_list);
169 memset(&zero_addr, 0, sizeof(zero_addr));
170 if (memcmp(&set_h->h_addr, &zero_addr, sizeof(zero_addr)))
171 eht_host->num_entries--;
172 hlist_add_head(&set_h->mcast_gc.gc_node, &set_h->br->mcast_gc_list);
173 queue_work(system_long_wq, &set_h->br->mcast_gc_work);
174
175 if (hlist_empty(&eht_host->set_entries))
176 __eht_destroy_host(eht_host);
177 }
178
br_multicast_del_eht_set(struct net_bridge_group_eht_set * eht_set)179 static void br_multicast_del_eht_set(struct net_bridge_group_eht_set *eht_set)
180 {
181 struct net_bridge_group_eht_set_entry *set_h;
182 struct rb_node *node;
183
184 while ((node = rb_first(&eht_set->entry_tree))) {
185 set_h = rb_entry(node, struct net_bridge_group_eht_set_entry,
186 rb_node);
187 __eht_del_set_entry(set_h);
188 }
189
190 rb_erase(&eht_set->rb_node, &eht_set->pg->eht_set_tree);
191 RB_CLEAR_NODE(&eht_set->rb_node);
192 hlist_add_head(&eht_set->mcast_gc.gc_node, &eht_set->br->mcast_gc_list);
193 queue_work(system_long_wq, &eht_set->br->mcast_gc_work);
194 }
195
br_multicast_eht_clean_sets(struct net_bridge_port_group * pg)196 void br_multicast_eht_clean_sets(struct net_bridge_port_group *pg)
197 {
198 struct net_bridge_group_eht_set *eht_set;
199 struct rb_node *node;
200
201 while ((node = rb_first(&pg->eht_set_tree))) {
202 eht_set = rb_entry(node, struct net_bridge_group_eht_set,
203 rb_node);
204 br_multicast_del_eht_set(eht_set);
205 }
206 }
207
br_multicast_eht_set_entry_expired(struct timer_list * t)208 static void br_multicast_eht_set_entry_expired(struct timer_list *t)
209 {
210 struct net_bridge_group_eht_set_entry *set_h = from_timer(set_h, t, timer);
211 struct net_bridge *br = set_h->br;
212
213 spin_lock(&br->multicast_lock);
214 if (RB_EMPTY_NODE(&set_h->rb_node) || timer_pending(&set_h->timer))
215 goto out;
216
217 br_multicast_del_eht_set_entry(set_h->eht_set->pg,
218 &set_h->eht_set->src_addr,
219 &set_h->h_addr);
220 out:
221 spin_unlock(&br->multicast_lock);
222 }
223
br_multicast_eht_set_expired(struct timer_list * t)224 static void br_multicast_eht_set_expired(struct timer_list *t)
225 {
226 struct net_bridge_group_eht_set *eht_set = from_timer(eht_set, t,
227 timer);
228 struct net_bridge *br = eht_set->br;
229
230 spin_lock(&br->multicast_lock);
231 if (RB_EMPTY_NODE(&eht_set->rb_node) || timer_pending(&eht_set->timer))
232 goto out;
233
234 br_multicast_del_eht_set(eht_set);
235 out:
236 spin_unlock(&br->multicast_lock);
237 }
238
239 static struct net_bridge_group_eht_host *
__eht_lookup_create_host(struct net_bridge_port_group * pg,union net_bridge_eht_addr * h_addr,unsigned char filter_mode)240 __eht_lookup_create_host(struct net_bridge_port_group *pg,
241 union net_bridge_eht_addr *h_addr,
242 unsigned char filter_mode)
243 {
244 struct rb_node **link = &pg->eht_host_tree.rb_node, *parent = NULL;
245 struct net_bridge_group_eht_host *eht_host;
246
247 while (*link) {
248 struct net_bridge_group_eht_host *this;
249 int result;
250
251 this = rb_entry(*link, struct net_bridge_group_eht_host,
252 rb_node);
253 result = memcmp(h_addr, &this->h_addr, sizeof(*h_addr));
254 parent = *link;
255 if (result < 0)
256 link = &((*link)->rb_left);
257 else if (result > 0)
258 link = &((*link)->rb_right);
259 else
260 return this;
261 }
262
263 if (br_multicast_eht_hosts_over_limit(pg))
264 return NULL;
265
266 eht_host = kzalloc(sizeof(*eht_host), GFP_ATOMIC);
267 if (!eht_host)
268 return NULL;
269
270 memcpy(&eht_host->h_addr, h_addr, sizeof(*h_addr));
271 INIT_HLIST_HEAD(&eht_host->set_entries);
272 eht_host->pg = pg;
273 eht_host->filter_mode = filter_mode;
274
275 rb_link_node(&eht_host->rb_node, parent, link);
276 rb_insert_color(&eht_host->rb_node, &pg->eht_host_tree);
277
278 br_multicast_eht_hosts_inc(pg);
279
280 return eht_host;
281 }
282
283 static struct net_bridge_group_eht_set_entry *
__eht_lookup_create_set_entry(struct net_bridge * br,struct net_bridge_group_eht_set * eht_set,struct net_bridge_group_eht_host * eht_host,bool allow_zero_src)284 __eht_lookup_create_set_entry(struct net_bridge *br,
285 struct net_bridge_group_eht_set *eht_set,
286 struct net_bridge_group_eht_host *eht_host,
287 bool allow_zero_src)
288 {
289 struct rb_node **link = &eht_set->entry_tree.rb_node, *parent = NULL;
290 struct net_bridge_group_eht_set_entry *set_h;
291
292 while (*link) {
293 struct net_bridge_group_eht_set_entry *this;
294 int result;
295
296 this = rb_entry(*link, struct net_bridge_group_eht_set_entry,
297 rb_node);
298 result = memcmp(&eht_host->h_addr, &this->h_addr,
299 sizeof(union net_bridge_eht_addr));
300 parent = *link;
301 if (result < 0)
302 link = &((*link)->rb_left);
303 else if (result > 0)
304 link = &((*link)->rb_right);
305 else
306 return this;
307 }
308
309 /* always allow auto-created zero entry */
310 if (!allow_zero_src && eht_host->num_entries >= PG_SRC_ENT_LIMIT)
311 return NULL;
312
313 set_h = kzalloc(sizeof(*set_h), GFP_ATOMIC);
314 if (!set_h)
315 return NULL;
316
317 memcpy(&set_h->h_addr, &eht_host->h_addr,
318 sizeof(union net_bridge_eht_addr));
319 set_h->mcast_gc.destroy = br_multicast_destroy_eht_set_entry;
320 set_h->eht_set = eht_set;
321 set_h->h_parent = eht_host;
322 set_h->br = br;
323 timer_setup(&set_h->timer, br_multicast_eht_set_entry_expired, 0);
324
325 hlist_add_head(&set_h->host_list, &eht_host->set_entries);
326 rb_link_node(&set_h->rb_node, parent, link);
327 rb_insert_color(&set_h->rb_node, &eht_set->entry_tree);
328 /* we must not count the auto-created zero entry otherwise we won't be
329 * able to track the full list of PG_SRC_ENT_LIMIT entries
330 */
331 if (!allow_zero_src)
332 eht_host->num_entries++;
333
334 return set_h;
335 }
336
337 static struct net_bridge_group_eht_set *
__eht_lookup_create_set(struct net_bridge_port_group * pg,union net_bridge_eht_addr * src_addr)338 __eht_lookup_create_set(struct net_bridge_port_group *pg,
339 union net_bridge_eht_addr *src_addr)
340 {
341 struct rb_node **link = &pg->eht_set_tree.rb_node, *parent = NULL;
342 struct net_bridge_group_eht_set *eht_set;
343
344 while (*link) {
345 struct net_bridge_group_eht_set *this;
346 int result;
347
348 this = rb_entry(*link, struct net_bridge_group_eht_set,
349 rb_node);
350 result = memcmp(src_addr, &this->src_addr, sizeof(*src_addr));
351 parent = *link;
352 if (result < 0)
353 link = &((*link)->rb_left);
354 else if (result > 0)
355 link = &((*link)->rb_right);
356 else
357 return this;
358 }
359
360 eht_set = kzalloc(sizeof(*eht_set), GFP_ATOMIC);
361 if (!eht_set)
362 return NULL;
363
364 memcpy(&eht_set->src_addr, src_addr, sizeof(*src_addr));
365 eht_set->mcast_gc.destroy = br_multicast_destroy_eht_set;
366 eht_set->pg = pg;
367 eht_set->br = pg->key.port->br;
368 eht_set->entry_tree = RB_ROOT;
369 timer_setup(&eht_set->timer, br_multicast_eht_set_expired, 0);
370
371 rb_link_node(&eht_set->rb_node, parent, link);
372 rb_insert_color(&eht_set->rb_node, &pg->eht_set_tree);
373
374 return eht_set;
375 }
376
br_multicast_ip_src_to_eht_addr(const struct br_ip * src,union net_bridge_eht_addr * dest)377 static void br_multicast_ip_src_to_eht_addr(const struct br_ip *src,
378 union net_bridge_eht_addr *dest)
379 {
380 switch (src->proto) {
381 case htons(ETH_P_IP):
382 dest->ip4 = src->src.ip4;
383 break;
384 #if IS_ENABLED(CONFIG_IPV6)
385 case htons(ETH_P_IPV6):
386 memcpy(&dest->ip6, &src->src.ip6, sizeof(struct in6_addr));
387 break;
388 #endif
389 }
390 }
391
br_eht_convert_host_filter_mode(const struct net_bridge_mcast * brmctx,struct net_bridge_port_group * pg,union net_bridge_eht_addr * h_addr,int filter_mode)392 static void br_eht_convert_host_filter_mode(const struct net_bridge_mcast *brmctx,
393 struct net_bridge_port_group *pg,
394 union net_bridge_eht_addr *h_addr,
395 int filter_mode)
396 {
397 struct net_bridge_group_eht_host *eht_host;
398 union net_bridge_eht_addr zero_addr;
399
400 eht_host = br_multicast_eht_host_lookup(pg, h_addr);
401 if (eht_host)
402 eht_host->filter_mode = filter_mode;
403
404 memset(&zero_addr, 0, sizeof(zero_addr));
405 switch (filter_mode) {
406 case MCAST_INCLUDE:
407 br_multicast_del_eht_set_entry(pg, &zero_addr, h_addr);
408 break;
409 case MCAST_EXCLUDE:
410 br_multicast_create_eht_set_entry(brmctx, pg, &zero_addr,
411 h_addr, MCAST_EXCLUDE,
412 true);
413 break;
414 }
415 }
416
br_multicast_create_eht_set_entry(const struct net_bridge_mcast * brmctx,struct net_bridge_port_group * pg,union net_bridge_eht_addr * src_addr,union net_bridge_eht_addr * h_addr,int filter_mode,bool allow_zero_src)417 static void br_multicast_create_eht_set_entry(const struct net_bridge_mcast *brmctx,
418 struct net_bridge_port_group *pg,
419 union net_bridge_eht_addr *src_addr,
420 union net_bridge_eht_addr *h_addr,
421 int filter_mode,
422 bool allow_zero_src)
423 {
424 struct net_bridge_group_eht_set_entry *set_h;
425 struct net_bridge_group_eht_host *eht_host;
426 struct net_bridge *br = pg->key.port->br;
427 struct net_bridge_group_eht_set *eht_set;
428 union net_bridge_eht_addr zero_addr;
429
430 memset(&zero_addr, 0, sizeof(zero_addr));
431 if (!allow_zero_src && !memcmp(src_addr, &zero_addr, sizeof(zero_addr)))
432 return;
433
434 eht_set = __eht_lookup_create_set(pg, src_addr);
435 if (!eht_set)
436 return;
437
438 eht_host = __eht_lookup_create_host(pg, h_addr, filter_mode);
439 if (!eht_host)
440 goto fail_host;
441
442 set_h = __eht_lookup_create_set_entry(br, eht_set, eht_host,
443 allow_zero_src);
444 if (!set_h)
445 goto fail_set_entry;
446
447 mod_timer(&set_h->timer, jiffies + br_multicast_gmi(brmctx));
448 mod_timer(&eht_set->timer, jiffies + br_multicast_gmi(brmctx));
449
450 return;
451
452 fail_set_entry:
453 if (hlist_empty(&eht_host->set_entries))
454 __eht_destroy_host(eht_host);
455 fail_host:
456 if (RB_EMPTY_ROOT(&eht_set->entry_tree))
457 br_multicast_del_eht_set(eht_set);
458 }
459
br_multicast_del_eht_set_entry(struct net_bridge_port_group * pg,union net_bridge_eht_addr * src_addr,union net_bridge_eht_addr * h_addr)460 static bool br_multicast_del_eht_set_entry(struct net_bridge_port_group *pg,
461 union net_bridge_eht_addr *src_addr,
462 union net_bridge_eht_addr *h_addr)
463 {
464 struct net_bridge_group_eht_set_entry *set_h;
465 struct net_bridge_group_eht_set *eht_set;
466 bool set_deleted = false;
467
468 eht_set = br_multicast_eht_set_lookup(pg, src_addr);
469 if (!eht_set)
470 goto out;
471
472 set_h = br_multicast_eht_set_entry_lookup(eht_set, h_addr);
473 if (!set_h)
474 goto out;
475
476 __eht_del_set_entry(set_h);
477
478 if (RB_EMPTY_ROOT(&eht_set->entry_tree)) {
479 br_multicast_del_eht_set(eht_set);
480 set_deleted = true;
481 }
482
483 out:
484 return set_deleted;
485 }
486
br_multicast_del_eht_host(struct net_bridge_port_group * pg,union net_bridge_eht_addr * h_addr)487 static void br_multicast_del_eht_host(struct net_bridge_port_group *pg,
488 union net_bridge_eht_addr *h_addr)
489 {
490 struct net_bridge_group_eht_set_entry *set_h;
491 struct net_bridge_group_eht_host *eht_host;
492 struct hlist_node *tmp;
493
494 eht_host = br_multicast_eht_host_lookup(pg, h_addr);
495 if (!eht_host)
496 return;
497
498 hlist_for_each_entry_safe(set_h, tmp, &eht_host->set_entries, host_list)
499 br_multicast_del_eht_set_entry(set_h->eht_set->pg,
500 &set_h->eht_set->src_addr,
501 &set_h->h_addr);
502 }
503
504 /* create new set entries from reports */
__eht_create_set_entries(const struct net_bridge_mcast * brmctx,struct net_bridge_port_group * pg,union net_bridge_eht_addr * h_addr,void * srcs,u32 nsrcs,size_t addr_size,int filter_mode)505 static void __eht_create_set_entries(const struct net_bridge_mcast *brmctx,
506 struct net_bridge_port_group *pg,
507 union net_bridge_eht_addr *h_addr,
508 void *srcs,
509 u32 nsrcs,
510 size_t addr_size,
511 int filter_mode)
512 {
513 union net_bridge_eht_addr eht_src_addr;
514 u32 src_idx;
515
516 memset(&eht_src_addr, 0, sizeof(eht_src_addr));
517 for (src_idx = 0; src_idx < nsrcs; src_idx++) {
518 memcpy(&eht_src_addr, srcs + (src_idx * addr_size), addr_size);
519 br_multicast_create_eht_set_entry(brmctx, pg, &eht_src_addr,
520 h_addr, filter_mode,
521 false);
522 }
523 }
524
525 /* delete existing set entries and their (S,G) entries if they were the last */
__eht_del_set_entries(struct net_bridge_port_group * pg,union net_bridge_eht_addr * h_addr,void * srcs,u32 nsrcs,size_t addr_size)526 static bool __eht_del_set_entries(struct net_bridge_port_group *pg,
527 union net_bridge_eht_addr *h_addr,
528 void *srcs,
529 u32 nsrcs,
530 size_t addr_size)
531 {
532 union net_bridge_eht_addr eht_src_addr;
533 struct net_bridge_group_src *src_ent;
534 bool changed = false;
535 struct br_ip src_ip;
536 u32 src_idx;
537
538 memset(&eht_src_addr, 0, sizeof(eht_src_addr));
539 memset(&src_ip, 0, sizeof(src_ip));
540 src_ip.proto = pg->key.addr.proto;
541 for (src_idx = 0; src_idx < nsrcs; src_idx++) {
542 memcpy(&eht_src_addr, srcs + (src_idx * addr_size), addr_size);
543 if (!br_multicast_del_eht_set_entry(pg, &eht_src_addr, h_addr))
544 continue;
545 memcpy(&src_ip, srcs + (src_idx * addr_size), addr_size);
546 src_ent = br_multicast_find_group_src(pg, &src_ip);
547 if (!src_ent)
548 continue;
549 br_multicast_del_group_src(src_ent, true);
550 changed = true;
551 }
552
553 return changed;
554 }
555
br_multicast_eht_allow(const struct net_bridge_mcast * brmctx,struct net_bridge_port_group * pg,union net_bridge_eht_addr * h_addr,void * srcs,u32 nsrcs,size_t addr_size)556 static bool br_multicast_eht_allow(const struct net_bridge_mcast *brmctx,
557 struct net_bridge_port_group *pg,
558 union net_bridge_eht_addr *h_addr,
559 void *srcs,
560 u32 nsrcs,
561 size_t addr_size)
562 {
563 bool changed = false;
564
565 switch (br_multicast_eht_host_filter_mode(pg, h_addr)) {
566 case MCAST_INCLUDE:
567 __eht_create_set_entries(brmctx, pg, h_addr, srcs, nsrcs,
568 addr_size, MCAST_INCLUDE);
569 break;
570 case MCAST_EXCLUDE:
571 changed = __eht_del_set_entries(pg, h_addr, srcs, nsrcs,
572 addr_size);
573 break;
574 }
575
576 return changed;
577 }
578
br_multicast_eht_block(const struct net_bridge_mcast * brmctx,struct net_bridge_port_group * pg,union net_bridge_eht_addr * h_addr,void * srcs,u32 nsrcs,size_t addr_size)579 static bool br_multicast_eht_block(const struct net_bridge_mcast *brmctx,
580 struct net_bridge_port_group *pg,
581 union net_bridge_eht_addr *h_addr,
582 void *srcs,
583 u32 nsrcs,
584 size_t addr_size)
585 {
586 bool changed = false;
587
588 switch (br_multicast_eht_host_filter_mode(pg, h_addr)) {
589 case MCAST_INCLUDE:
590 changed = __eht_del_set_entries(pg, h_addr, srcs, nsrcs,
591 addr_size);
592 break;
593 case MCAST_EXCLUDE:
594 __eht_create_set_entries(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
595 MCAST_EXCLUDE);
596 break;
597 }
598
599 return changed;
600 }
601
602 /* flush_entries is true when changing mode */
__eht_inc_exc(const struct net_bridge_mcast * brmctx,struct net_bridge_port_group * pg,union net_bridge_eht_addr * h_addr,void * srcs,u32 nsrcs,size_t addr_size,unsigned char filter_mode,bool to_report)603 static bool __eht_inc_exc(const struct net_bridge_mcast *brmctx,
604 struct net_bridge_port_group *pg,
605 union net_bridge_eht_addr *h_addr,
606 void *srcs,
607 u32 nsrcs,
608 size_t addr_size,
609 unsigned char filter_mode,
610 bool to_report)
611 {
612 bool changed = false, flush_entries = to_report;
613 union net_bridge_eht_addr eht_src_addr;
614
615 if (br_multicast_eht_host_filter_mode(pg, h_addr) != filter_mode)
616 flush_entries = true;
617
618 memset(&eht_src_addr, 0, sizeof(eht_src_addr));
619 /* if we're changing mode del host and its entries */
620 if (flush_entries)
621 br_multicast_del_eht_host(pg, h_addr);
622 __eht_create_set_entries(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
623 filter_mode);
624 /* we can be missing sets only if we've deleted some entries */
625 if (flush_entries) {
626 struct net_bridge_group_eht_set *eht_set;
627 struct net_bridge_group_src *src_ent;
628 struct hlist_node *tmp;
629
630 hlist_for_each_entry_safe(src_ent, tmp, &pg->src_list, node) {
631 br_multicast_ip_src_to_eht_addr(&src_ent->addr,
632 &eht_src_addr);
633 if (!br_multicast_eht_set_lookup(pg, &eht_src_addr)) {
634 br_multicast_del_group_src(src_ent, true);
635 changed = true;
636 continue;
637 }
638 /* this is an optimization for TO_INCLUDE where we lower
639 * the set's timeout to LMQT to catch timeout hosts:
640 * - host A (timing out): set entries X, Y
641 * - host B: set entry Z (new from current TO_INCLUDE)
642 * sends BLOCK Z after LMQT but host A's EHT
643 * entries still exist (unless lowered to LMQT
644 * so they can timeout with the S,Gs)
645 * => we wait another LMQT, when we can just delete the
646 * group immediately
647 */
648 if (!(src_ent->flags & BR_SGRP_F_SEND) ||
649 filter_mode != MCAST_INCLUDE ||
650 !to_report)
651 continue;
652 eht_set = br_multicast_eht_set_lookup(pg,
653 &eht_src_addr);
654 if (!eht_set)
655 continue;
656 mod_timer(&eht_set->timer, jiffies + br_multicast_lmqt(brmctx));
657 }
658 }
659
660 return changed;
661 }
662
br_multicast_eht_inc(const struct net_bridge_mcast * brmctx,struct net_bridge_port_group * pg,union net_bridge_eht_addr * h_addr,void * srcs,u32 nsrcs,size_t addr_size,bool to_report)663 static bool br_multicast_eht_inc(const struct net_bridge_mcast *brmctx,
664 struct net_bridge_port_group *pg,
665 union net_bridge_eht_addr *h_addr,
666 void *srcs,
667 u32 nsrcs,
668 size_t addr_size,
669 bool to_report)
670 {
671 bool changed;
672
673 changed = __eht_inc_exc(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
674 MCAST_INCLUDE, to_report);
675 br_eht_convert_host_filter_mode(brmctx, pg, h_addr, MCAST_INCLUDE);
676
677 return changed;
678 }
679
br_multicast_eht_exc(const struct net_bridge_mcast * brmctx,struct net_bridge_port_group * pg,union net_bridge_eht_addr * h_addr,void * srcs,u32 nsrcs,size_t addr_size,bool to_report)680 static bool br_multicast_eht_exc(const struct net_bridge_mcast *brmctx,
681 struct net_bridge_port_group *pg,
682 union net_bridge_eht_addr *h_addr,
683 void *srcs,
684 u32 nsrcs,
685 size_t addr_size,
686 bool to_report)
687 {
688 bool changed;
689
690 changed = __eht_inc_exc(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
691 MCAST_EXCLUDE, to_report);
692 br_eht_convert_host_filter_mode(brmctx, pg, h_addr, MCAST_EXCLUDE);
693
694 return changed;
695 }
696
__eht_ip4_handle(const struct net_bridge_mcast * brmctx,struct net_bridge_port_group * pg,union net_bridge_eht_addr * h_addr,void * srcs,u32 nsrcs,int grec_type)697 static bool __eht_ip4_handle(const struct net_bridge_mcast *brmctx,
698 struct net_bridge_port_group *pg,
699 union net_bridge_eht_addr *h_addr,
700 void *srcs,
701 u32 nsrcs,
702 int grec_type)
703 {
704 bool changed = false, to_report = false;
705
706 switch (grec_type) {
707 case IGMPV3_ALLOW_NEW_SOURCES:
708 br_multicast_eht_allow(brmctx, pg, h_addr, srcs, nsrcs,
709 sizeof(__be32));
710 break;
711 case IGMPV3_BLOCK_OLD_SOURCES:
712 changed = br_multicast_eht_block(brmctx, pg, h_addr, srcs, nsrcs,
713 sizeof(__be32));
714 break;
715 case IGMPV3_CHANGE_TO_INCLUDE:
716 to_report = true;
717 fallthrough;
718 case IGMPV3_MODE_IS_INCLUDE:
719 changed = br_multicast_eht_inc(brmctx, pg, h_addr, srcs, nsrcs,
720 sizeof(__be32), to_report);
721 break;
722 case IGMPV3_CHANGE_TO_EXCLUDE:
723 to_report = true;
724 fallthrough;
725 case IGMPV3_MODE_IS_EXCLUDE:
726 changed = br_multicast_eht_exc(brmctx, pg, h_addr, srcs, nsrcs,
727 sizeof(__be32), to_report);
728 break;
729 }
730
731 return changed;
732 }
733
734 #if IS_ENABLED(CONFIG_IPV6)
__eht_ip6_handle(const struct net_bridge_mcast * brmctx,struct net_bridge_port_group * pg,union net_bridge_eht_addr * h_addr,void * srcs,u32 nsrcs,int grec_type)735 static bool __eht_ip6_handle(const struct net_bridge_mcast *brmctx,
736 struct net_bridge_port_group *pg,
737 union net_bridge_eht_addr *h_addr,
738 void *srcs,
739 u32 nsrcs,
740 int grec_type)
741 {
742 bool changed = false, to_report = false;
743
744 switch (grec_type) {
745 case MLD2_ALLOW_NEW_SOURCES:
746 br_multicast_eht_allow(brmctx, pg, h_addr, srcs, nsrcs,
747 sizeof(struct in6_addr));
748 break;
749 case MLD2_BLOCK_OLD_SOURCES:
750 changed = br_multicast_eht_block(brmctx, pg, h_addr, srcs, nsrcs,
751 sizeof(struct in6_addr));
752 break;
753 case MLD2_CHANGE_TO_INCLUDE:
754 to_report = true;
755 fallthrough;
756 case MLD2_MODE_IS_INCLUDE:
757 changed = br_multicast_eht_inc(brmctx, pg, h_addr, srcs, nsrcs,
758 sizeof(struct in6_addr),
759 to_report);
760 break;
761 case MLD2_CHANGE_TO_EXCLUDE:
762 to_report = true;
763 fallthrough;
764 case MLD2_MODE_IS_EXCLUDE:
765 changed = br_multicast_eht_exc(brmctx, pg, h_addr, srcs, nsrcs,
766 sizeof(struct in6_addr),
767 to_report);
768 break;
769 }
770
771 return changed;
772 }
773 #endif
774
775 /* true means an entry was deleted */
br_multicast_eht_handle(const struct net_bridge_mcast * brmctx,struct net_bridge_port_group * pg,void * h_addr,void * srcs,u32 nsrcs,size_t addr_size,int grec_type)776 bool br_multicast_eht_handle(const struct net_bridge_mcast *brmctx,
777 struct net_bridge_port_group *pg,
778 void *h_addr,
779 void *srcs,
780 u32 nsrcs,
781 size_t addr_size,
782 int grec_type)
783 {
784 bool eht_enabled = !!(pg->key.port->flags & BR_MULTICAST_FAST_LEAVE);
785 union net_bridge_eht_addr eht_host_addr;
786 bool changed = false;
787
788 if (!eht_enabled)
789 goto out;
790
791 memset(&eht_host_addr, 0, sizeof(eht_host_addr));
792 memcpy(&eht_host_addr, h_addr, addr_size);
793 if (addr_size == sizeof(__be32))
794 changed = __eht_ip4_handle(brmctx, pg, &eht_host_addr, srcs,
795 nsrcs, grec_type);
796 #if IS_ENABLED(CONFIG_IPV6)
797 else
798 changed = __eht_ip6_handle(brmctx, pg, &eht_host_addr, srcs,
799 nsrcs, grec_type);
800 #endif
801
802 out:
803 return changed;
804 }
805
br_multicast_eht_set_hosts_limit(struct net_bridge_port * p,u32 eht_hosts_limit)806 int br_multicast_eht_set_hosts_limit(struct net_bridge_port *p,
807 u32 eht_hosts_limit)
808 {
809 struct net_bridge *br = p->br;
810
811 if (!eht_hosts_limit)
812 return -EINVAL;
813
814 spin_lock_bh(&br->multicast_lock);
815 p->multicast_eht_hosts_limit = eht_hosts_limit;
816 spin_unlock_bh(&br->multicast_lock);
817
818 return 0;
819 }
820