1 /*
2 * IP multicast routing support for mrouted 3.6/3.8
3 *
4 * (c) 1995 Alan Cox, <alan@lxorguk.ukuu.org.uk>
5 * Linux Consultancy and Custom Driver Development
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 *
12 * Fixes:
13 * Michael Chastain : Incorrect size of copying.
14 * Alan Cox : Added the cache manager code
15 * Alan Cox : Fixed the clone/copy bug and device race.
16 * Mike McLagan : Routing by source
17 * Malcolm Beattie : Buffer handling fixes.
18 * Alexey Kuznetsov : Double buffer free and other fixes.
19 * SVR Anand : Fixed several multicast bugs and problems.
20 * Alexey Kuznetsov : Status, optimisations and more.
21 * Brad Parker : Better behaviour on mrouted upcall
22 * overflow.
23 * Carlos Picoto : PIMv1 Support
24 * Pavlin Ivanov Radoslavov: PIMv2 Registers must checksum only PIM header
25 * Relax this requirement to work with older peers.
26 *
27 */
28
29 #include <asm/uaccess.h>
30 #include <linux/types.h>
31 #include <linux/capability.h>
32 #include <linux/errno.h>
33 #include <linux/timer.h>
34 #include <linux/mm.h>
35 #include <linux/kernel.h>
36 #include <linux/fcntl.h>
37 #include <linux/stat.h>
38 #include <linux/socket.h>
39 #include <linux/in.h>
40 #include <linux/inet.h>
41 #include <linux/netdevice.h>
42 #include <linux/inetdevice.h>
43 #include <linux/igmp.h>
44 #include <linux/proc_fs.h>
45 #include <linux/seq_file.h>
46 #include <linux/mroute.h>
47 #include <linux/init.h>
48 #include <linux/if_ether.h>
49 #include <linux/slab.h>
50 #include <net/net_namespace.h>
51 #include <net/ip.h>
52 #include <net/protocol.h>
53 #include <linux/skbuff.h>
54 #include <net/route.h>
55 #include <net/sock.h>
56 #include <net/icmp.h>
57 #include <net/udp.h>
58 #include <net/raw.h>
59 #include <linux/notifier.h>
60 #include <linux/if_arp.h>
61 #include <linux/netfilter_ipv4.h>
62 #include <linux/compat.h>
63 #include <linux/export.h>
64 #include <net/ipip.h>
65 #include <net/checksum.h>
66 #include <net/netlink.h>
67 #include <net/fib_rules.h>
68
69 #if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
70 #define CONFIG_IP_PIMSM 1
71 #endif
72
73 struct mr_table {
74 struct list_head list;
75 #ifdef CONFIG_NET_NS
76 struct net *net;
77 #endif
78 u32 id;
79 struct sock __rcu *mroute_sk;
80 struct timer_list ipmr_expire_timer;
81 struct list_head mfc_unres_queue;
82 struct list_head mfc_cache_array[MFC_LINES];
83 struct vif_device vif_table[MAXVIFS];
84 int maxvif;
85 atomic_t cache_resolve_queue_len;
86 int mroute_do_assert;
87 int mroute_do_pim;
88 #if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
89 int mroute_reg_vif_num;
90 #endif
91 };
92
93 struct ipmr_rule {
94 struct fib_rule common;
95 };
96
97 struct ipmr_result {
98 struct mr_table *mrt;
99 };
100
101 /* Big lock, protecting vif table, mrt cache and mroute socket state.
102 * Note that the changes are semaphored via rtnl_lock.
103 */
104
105 static DEFINE_RWLOCK(mrt_lock);
106
107 /*
108 * Multicast router control variables
109 */
110
111 #define VIF_EXISTS(_mrt, _idx) ((_mrt)->vif_table[_idx].dev != NULL)
112
113 /* Special spinlock for queue of unresolved entries */
114 static DEFINE_SPINLOCK(mfc_unres_lock);
115
116 /* We return to original Alan's scheme. Hash table of resolved
117 * entries is changed only in process context and protected
118 * with weak lock mrt_lock. Queue of unresolved entries is protected
119 * with strong spinlock mfc_unres_lock.
120 *
121 * In this case data path is free of exclusive locks at all.
122 */
123
124 static struct kmem_cache *mrt_cachep __read_mostly;
125
126 static struct mr_table *ipmr_new_table(struct net *net, u32 id);
127 static void ipmr_free_table(struct mr_table *mrt);
128
129 static int ip_mr_forward(struct net *net, struct mr_table *mrt,
130 struct sk_buff *skb, struct mfc_cache *cache,
131 int local);
132 static int ipmr_cache_report(struct mr_table *mrt,
133 struct sk_buff *pkt, vifi_t vifi, int assert);
134 static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
135 struct mfc_cache *c, struct rtmsg *rtm);
136 static void mroute_clean_tables(struct mr_table *mrt);
137 static void ipmr_expire_process(unsigned long arg);
138
139 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
140 #define ipmr_for_each_table(mrt, net) \
141 list_for_each_entry_rcu(mrt, &net->ipv4.mr_tables, list)
142
ipmr_get_table(struct net * net,u32 id)143 static struct mr_table *ipmr_get_table(struct net *net, u32 id)
144 {
145 struct mr_table *mrt;
146
147 ipmr_for_each_table(mrt, net) {
148 if (mrt->id == id)
149 return mrt;
150 }
151 return NULL;
152 }
153
ipmr_fib_lookup(struct net * net,struct flowi4 * flp4,struct mr_table ** mrt)154 static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4,
155 struct mr_table **mrt)
156 {
157 int err;
158 struct ipmr_result res;
159 struct fib_lookup_arg arg = {
160 .result = &res,
161 .flags = FIB_LOOKUP_NOREF,
162 };
163
164 err = fib_rules_lookup(net->ipv4.mr_rules_ops,
165 flowi4_to_flowi(flp4), 0, &arg);
166 if (err < 0)
167 return err;
168 *mrt = res.mrt;
169 return 0;
170 }
171
ipmr_rule_action(struct fib_rule * rule,struct flowi * flp,int flags,struct fib_lookup_arg * arg)172 static int ipmr_rule_action(struct fib_rule *rule, struct flowi *flp,
173 int flags, struct fib_lookup_arg *arg)
174 {
175 struct ipmr_result *res = arg->result;
176 struct mr_table *mrt;
177
178 switch (rule->action) {
179 case FR_ACT_TO_TBL:
180 break;
181 case FR_ACT_UNREACHABLE:
182 return -ENETUNREACH;
183 case FR_ACT_PROHIBIT:
184 return -EACCES;
185 case FR_ACT_BLACKHOLE:
186 default:
187 return -EINVAL;
188 }
189
190 mrt = ipmr_get_table(rule->fr_net, rule->table);
191 if (mrt == NULL)
192 return -EAGAIN;
193 res->mrt = mrt;
194 return 0;
195 }
196
ipmr_rule_match(struct fib_rule * rule,struct flowi * fl,int flags)197 static int ipmr_rule_match(struct fib_rule *rule, struct flowi *fl, int flags)
198 {
199 return 1;
200 }
201
202 static const struct nla_policy ipmr_rule_policy[FRA_MAX + 1] = {
203 FRA_GENERIC_POLICY,
204 };
205
ipmr_rule_configure(struct fib_rule * rule,struct sk_buff * skb,struct fib_rule_hdr * frh,struct nlattr ** tb)206 static int ipmr_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
207 struct fib_rule_hdr *frh, struct nlattr **tb)
208 {
209 return 0;
210 }
211
ipmr_rule_compare(struct fib_rule * rule,struct fib_rule_hdr * frh,struct nlattr ** tb)212 static int ipmr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
213 struct nlattr **tb)
214 {
215 return 1;
216 }
217
ipmr_rule_fill(struct fib_rule * rule,struct sk_buff * skb,struct fib_rule_hdr * frh)218 static int ipmr_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
219 struct fib_rule_hdr *frh)
220 {
221 frh->dst_len = 0;
222 frh->src_len = 0;
223 frh->tos = 0;
224 return 0;
225 }
226
227 static const struct fib_rules_ops __net_initdata ipmr_rules_ops_template = {
228 .family = RTNL_FAMILY_IPMR,
229 .rule_size = sizeof(struct ipmr_rule),
230 .addr_size = sizeof(u32),
231 .action = ipmr_rule_action,
232 .match = ipmr_rule_match,
233 .configure = ipmr_rule_configure,
234 .compare = ipmr_rule_compare,
235 .default_pref = fib_default_rule_pref,
236 .fill = ipmr_rule_fill,
237 .nlgroup = RTNLGRP_IPV4_RULE,
238 .policy = ipmr_rule_policy,
239 .owner = THIS_MODULE,
240 };
241
ipmr_rules_init(struct net * net)242 static int __net_init ipmr_rules_init(struct net *net)
243 {
244 struct fib_rules_ops *ops;
245 struct mr_table *mrt;
246 int err;
247
248 ops = fib_rules_register(&ipmr_rules_ops_template, net);
249 if (IS_ERR(ops))
250 return PTR_ERR(ops);
251
252 INIT_LIST_HEAD(&net->ipv4.mr_tables);
253
254 mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
255 if (mrt == NULL) {
256 err = -ENOMEM;
257 goto err1;
258 }
259
260 err = fib_default_rule_add(ops, 0x7fff, RT_TABLE_DEFAULT, 0);
261 if (err < 0)
262 goto err2;
263
264 net->ipv4.mr_rules_ops = ops;
265 return 0;
266
267 err2:
268 kfree(mrt);
269 err1:
270 fib_rules_unregister(ops);
271 return err;
272 }
273
ipmr_rules_exit(struct net * net)274 static void __net_exit ipmr_rules_exit(struct net *net)
275 {
276 struct mr_table *mrt, *next;
277
278 list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) {
279 list_del(&mrt->list);
280 ipmr_free_table(mrt);
281 }
282 fib_rules_unregister(net->ipv4.mr_rules_ops);
283 }
284 #else
285 #define ipmr_for_each_table(mrt, net) \
286 for (mrt = net->ipv4.mrt; mrt; mrt = NULL)
287
ipmr_get_table(struct net * net,u32 id)288 static struct mr_table *ipmr_get_table(struct net *net, u32 id)
289 {
290 return net->ipv4.mrt;
291 }
292
ipmr_fib_lookup(struct net * net,struct flowi4 * flp4,struct mr_table ** mrt)293 static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4,
294 struct mr_table **mrt)
295 {
296 *mrt = net->ipv4.mrt;
297 return 0;
298 }
299
ipmr_rules_init(struct net * net)300 static int __net_init ipmr_rules_init(struct net *net)
301 {
302 net->ipv4.mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
303 return net->ipv4.mrt ? 0 : -ENOMEM;
304 }
305
ipmr_rules_exit(struct net * net)306 static void __net_exit ipmr_rules_exit(struct net *net)
307 {
308 ipmr_free_table(net->ipv4.mrt);
309 }
310 #endif
311
ipmr_new_table(struct net * net,u32 id)312 static struct mr_table *ipmr_new_table(struct net *net, u32 id)
313 {
314 struct mr_table *mrt;
315 unsigned int i;
316
317 mrt = ipmr_get_table(net, id);
318 if (mrt != NULL)
319 return mrt;
320
321 mrt = kzalloc(sizeof(*mrt), GFP_KERNEL);
322 if (mrt == NULL)
323 return NULL;
324 write_pnet(&mrt->net, net);
325 mrt->id = id;
326
327 /* Forwarding cache */
328 for (i = 0; i < MFC_LINES; i++)
329 INIT_LIST_HEAD(&mrt->mfc_cache_array[i]);
330
331 INIT_LIST_HEAD(&mrt->mfc_unres_queue);
332
333 setup_timer(&mrt->ipmr_expire_timer, ipmr_expire_process,
334 (unsigned long)mrt);
335
336 #ifdef CONFIG_IP_PIMSM
337 mrt->mroute_reg_vif_num = -1;
338 #endif
339 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
340 list_add_tail_rcu(&mrt->list, &net->ipv4.mr_tables);
341 #endif
342 return mrt;
343 }
344
ipmr_free_table(struct mr_table * mrt)345 static void ipmr_free_table(struct mr_table *mrt)
346 {
347 del_timer_sync(&mrt->ipmr_expire_timer);
348 mroute_clean_tables(mrt);
349 kfree(mrt);
350 }
351
352 /* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */
353
ipmr_del_tunnel(struct net_device * dev,struct vifctl * v)354 static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v)
355 {
356 struct net *net = dev_net(dev);
357
358 dev_close(dev);
359
360 dev = __dev_get_by_name(net, "tunl0");
361 if (dev) {
362 const struct net_device_ops *ops = dev->netdev_ops;
363 struct ifreq ifr;
364 struct ip_tunnel_parm p;
365
366 memset(&p, 0, sizeof(p));
367 p.iph.daddr = v->vifc_rmt_addr.s_addr;
368 p.iph.saddr = v->vifc_lcl_addr.s_addr;
369 p.iph.version = 4;
370 p.iph.ihl = 5;
371 p.iph.protocol = IPPROTO_IPIP;
372 sprintf(p.name, "dvmrp%d", v->vifc_vifi);
373 ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
374
375 if (ops->ndo_do_ioctl) {
376 mm_segment_t oldfs = get_fs();
377
378 set_fs(KERNEL_DS);
379 ops->ndo_do_ioctl(dev, &ifr, SIOCDELTUNNEL);
380 set_fs(oldfs);
381 }
382 }
383 }
384
385 static
ipmr_new_tunnel(struct net * net,struct vifctl * v)386 struct net_device *ipmr_new_tunnel(struct net *net, struct vifctl *v)
387 {
388 struct net_device *dev;
389
390 dev = __dev_get_by_name(net, "tunl0");
391
392 if (dev) {
393 const struct net_device_ops *ops = dev->netdev_ops;
394 int err;
395 struct ifreq ifr;
396 struct ip_tunnel_parm p;
397 struct in_device *in_dev;
398
399 memset(&p, 0, sizeof(p));
400 p.iph.daddr = v->vifc_rmt_addr.s_addr;
401 p.iph.saddr = v->vifc_lcl_addr.s_addr;
402 p.iph.version = 4;
403 p.iph.ihl = 5;
404 p.iph.protocol = IPPROTO_IPIP;
405 sprintf(p.name, "dvmrp%d", v->vifc_vifi);
406 ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
407
408 if (ops->ndo_do_ioctl) {
409 mm_segment_t oldfs = get_fs();
410
411 set_fs(KERNEL_DS);
412 err = ops->ndo_do_ioctl(dev, &ifr, SIOCADDTUNNEL);
413 set_fs(oldfs);
414 } else {
415 err = -EOPNOTSUPP;
416 }
417 dev = NULL;
418
419 if (err == 0 &&
420 (dev = __dev_get_by_name(net, p.name)) != NULL) {
421 dev->flags |= IFF_MULTICAST;
422
423 in_dev = __in_dev_get_rtnl(dev);
424 if (in_dev == NULL)
425 goto failure;
426
427 ipv4_devconf_setall(in_dev);
428 IPV4_DEVCONF(in_dev->cnf, RP_FILTER) = 0;
429
430 if (dev_open(dev))
431 goto failure;
432 dev_hold(dev);
433 }
434 }
435 return dev;
436
437 failure:
438 /* allow the register to be completed before unregistering. */
439 rtnl_unlock();
440 rtnl_lock();
441
442 unregister_netdevice(dev);
443 return NULL;
444 }
445
446 #ifdef CONFIG_IP_PIMSM
447
reg_vif_xmit(struct sk_buff * skb,struct net_device * dev)448 static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
449 {
450 struct net *net = dev_net(dev);
451 struct mr_table *mrt;
452 struct flowi4 fl4 = {
453 .flowi4_oif = dev->ifindex,
454 .flowi4_iif = skb->skb_iif,
455 .flowi4_mark = skb->mark,
456 };
457 int err;
458
459 err = ipmr_fib_lookup(net, &fl4, &mrt);
460 if (err < 0) {
461 kfree_skb(skb);
462 return err;
463 }
464
465 read_lock(&mrt_lock);
466 dev->stats.tx_bytes += skb->len;
467 dev->stats.tx_packets++;
468 ipmr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, IGMPMSG_WHOLEPKT);
469 read_unlock(&mrt_lock);
470 kfree_skb(skb);
471 return NETDEV_TX_OK;
472 }
473
474 static const struct net_device_ops reg_vif_netdev_ops = {
475 .ndo_start_xmit = reg_vif_xmit,
476 };
477
reg_vif_setup(struct net_device * dev)478 static void reg_vif_setup(struct net_device *dev)
479 {
480 dev->type = ARPHRD_PIMREG;
481 dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 8;
482 dev->flags = IFF_NOARP;
483 dev->netdev_ops = ®_vif_netdev_ops,
484 dev->destructor = free_netdev;
485 dev->features |= NETIF_F_NETNS_LOCAL;
486 }
487
ipmr_reg_vif(struct net * net,struct mr_table * mrt)488 static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
489 {
490 struct net_device *dev;
491 struct in_device *in_dev;
492 char name[IFNAMSIZ];
493
494 if (mrt->id == RT_TABLE_DEFAULT)
495 sprintf(name, "pimreg");
496 else
497 sprintf(name, "pimreg%u", mrt->id);
498
499 dev = alloc_netdev(0, name, reg_vif_setup);
500
501 if (dev == NULL)
502 return NULL;
503
504 dev_net_set(dev, net);
505
506 if (register_netdevice(dev)) {
507 free_netdev(dev);
508 return NULL;
509 }
510 dev->iflink = 0;
511
512 rcu_read_lock();
513 in_dev = __in_dev_get_rcu(dev);
514 if (!in_dev) {
515 rcu_read_unlock();
516 goto failure;
517 }
518
519 ipv4_devconf_setall(in_dev);
520 IPV4_DEVCONF(in_dev->cnf, RP_FILTER) = 0;
521 rcu_read_unlock();
522
523 if (dev_open(dev))
524 goto failure;
525
526 dev_hold(dev);
527
528 return dev;
529
530 failure:
531 /* allow the register to be completed before unregistering. */
532 rtnl_unlock();
533 rtnl_lock();
534
535 unregister_netdevice(dev);
536 return NULL;
537 }
538 #endif
539
540 /*
541 * Delete a VIF entry
542 * @notify: Set to 1, if the caller is a notifier_call
543 */
544
vif_delete(struct mr_table * mrt,int vifi,int notify,struct list_head * head)545 static int vif_delete(struct mr_table *mrt, int vifi, int notify,
546 struct list_head *head)
547 {
548 struct vif_device *v;
549 struct net_device *dev;
550 struct in_device *in_dev;
551
552 if (vifi < 0 || vifi >= mrt->maxvif)
553 return -EADDRNOTAVAIL;
554
555 v = &mrt->vif_table[vifi];
556
557 write_lock_bh(&mrt_lock);
558 dev = v->dev;
559 v->dev = NULL;
560
561 if (!dev) {
562 write_unlock_bh(&mrt_lock);
563 return -EADDRNOTAVAIL;
564 }
565
566 #ifdef CONFIG_IP_PIMSM
567 if (vifi == mrt->mroute_reg_vif_num)
568 mrt->mroute_reg_vif_num = -1;
569 #endif
570
571 if (vifi + 1 == mrt->maxvif) {
572 int tmp;
573
574 for (tmp = vifi - 1; tmp >= 0; tmp--) {
575 if (VIF_EXISTS(mrt, tmp))
576 break;
577 }
578 mrt->maxvif = tmp+1;
579 }
580
581 write_unlock_bh(&mrt_lock);
582
583 dev_set_allmulti(dev, -1);
584
585 in_dev = __in_dev_get_rtnl(dev);
586 if (in_dev) {
587 IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)--;
588 ip_rt_multicast_event(in_dev);
589 }
590
591 if (v->flags & (VIFF_TUNNEL | VIFF_REGISTER) && !notify)
592 unregister_netdevice_queue(dev, head);
593
594 dev_put(dev);
595 return 0;
596 }
597
ipmr_cache_free_rcu(struct rcu_head * head)598 static void ipmr_cache_free_rcu(struct rcu_head *head)
599 {
600 struct mfc_cache *c = container_of(head, struct mfc_cache, rcu);
601
602 kmem_cache_free(mrt_cachep, c);
603 }
604
ipmr_cache_free(struct mfc_cache * c)605 static inline void ipmr_cache_free(struct mfc_cache *c)
606 {
607 call_rcu(&c->rcu, ipmr_cache_free_rcu);
608 }
609
610 /* Destroy an unresolved cache entry, killing queued skbs
611 * and reporting error to netlink readers.
612 */
613
ipmr_destroy_unres(struct mr_table * mrt,struct mfc_cache * c)614 static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c)
615 {
616 struct net *net = read_pnet(&mrt->net);
617 struct sk_buff *skb;
618 struct nlmsgerr *e;
619
620 atomic_dec(&mrt->cache_resolve_queue_len);
621
622 while ((skb = skb_dequeue(&c->mfc_un.unres.unresolved))) {
623 if (ip_hdr(skb)->version == 0) {
624 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
625 nlh->nlmsg_type = NLMSG_ERROR;
626 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
627 skb_trim(skb, nlh->nlmsg_len);
628 e = NLMSG_DATA(nlh);
629 e->error = -ETIMEDOUT;
630 memset(&e->msg, 0, sizeof(e->msg));
631
632 rtnl_unicast(skb, net, NETLINK_CB(skb).pid);
633 } else {
634 kfree_skb(skb);
635 }
636 }
637
638 ipmr_cache_free(c);
639 }
640
641
642 /* Timer process for the unresolved queue. */
643
ipmr_expire_process(unsigned long arg)644 static void ipmr_expire_process(unsigned long arg)
645 {
646 struct mr_table *mrt = (struct mr_table *)arg;
647 unsigned long now;
648 unsigned long expires;
649 struct mfc_cache *c, *next;
650
651 if (!spin_trylock(&mfc_unres_lock)) {
652 mod_timer(&mrt->ipmr_expire_timer, jiffies+HZ/10);
653 return;
654 }
655
656 if (list_empty(&mrt->mfc_unres_queue))
657 goto out;
658
659 now = jiffies;
660 expires = 10*HZ;
661
662 list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) {
663 if (time_after(c->mfc_un.unres.expires, now)) {
664 unsigned long interval = c->mfc_un.unres.expires - now;
665 if (interval < expires)
666 expires = interval;
667 continue;
668 }
669
670 list_del(&c->list);
671 ipmr_destroy_unres(mrt, c);
672 }
673
674 if (!list_empty(&mrt->mfc_unres_queue))
675 mod_timer(&mrt->ipmr_expire_timer, jiffies + expires);
676
677 out:
678 spin_unlock(&mfc_unres_lock);
679 }
680
681 /* Fill oifs list. It is called under write locked mrt_lock. */
682
ipmr_update_thresholds(struct mr_table * mrt,struct mfc_cache * cache,unsigned char * ttls)683 static void ipmr_update_thresholds(struct mr_table *mrt, struct mfc_cache *cache,
684 unsigned char *ttls)
685 {
686 int vifi;
687
688 cache->mfc_un.res.minvif = MAXVIFS;
689 cache->mfc_un.res.maxvif = 0;
690 memset(cache->mfc_un.res.ttls, 255, MAXVIFS);
691
692 for (vifi = 0; vifi < mrt->maxvif; vifi++) {
693 if (VIF_EXISTS(mrt, vifi) &&
694 ttls[vifi] && ttls[vifi] < 255) {
695 cache->mfc_un.res.ttls[vifi] = ttls[vifi];
696 if (cache->mfc_un.res.minvif > vifi)
697 cache->mfc_un.res.minvif = vifi;
698 if (cache->mfc_un.res.maxvif <= vifi)
699 cache->mfc_un.res.maxvif = vifi + 1;
700 }
701 }
702 }
703
vif_add(struct net * net,struct mr_table * mrt,struct vifctl * vifc,int mrtsock)704 static int vif_add(struct net *net, struct mr_table *mrt,
705 struct vifctl *vifc, int mrtsock)
706 {
707 int vifi = vifc->vifc_vifi;
708 struct vif_device *v = &mrt->vif_table[vifi];
709 struct net_device *dev;
710 struct in_device *in_dev;
711 int err;
712
713 /* Is vif busy ? */
714 if (VIF_EXISTS(mrt, vifi))
715 return -EADDRINUSE;
716
717 switch (vifc->vifc_flags) {
718 #ifdef CONFIG_IP_PIMSM
719 case VIFF_REGISTER:
720 /*
721 * Special Purpose VIF in PIM
722 * All the packets will be sent to the daemon
723 */
724 if (mrt->mroute_reg_vif_num >= 0)
725 return -EADDRINUSE;
726 dev = ipmr_reg_vif(net, mrt);
727 if (!dev)
728 return -ENOBUFS;
729 err = dev_set_allmulti(dev, 1);
730 if (err) {
731 unregister_netdevice(dev);
732 dev_put(dev);
733 return err;
734 }
735 break;
736 #endif
737 case VIFF_TUNNEL:
738 dev = ipmr_new_tunnel(net, vifc);
739 if (!dev)
740 return -ENOBUFS;
741 err = dev_set_allmulti(dev, 1);
742 if (err) {
743 ipmr_del_tunnel(dev, vifc);
744 dev_put(dev);
745 return err;
746 }
747 break;
748
749 case VIFF_USE_IFINDEX:
750 case 0:
751 if (vifc->vifc_flags == VIFF_USE_IFINDEX) {
752 dev = dev_get_by_index(net, vifc->vifc_lcl_ifindex);
753 if (dev && __in_dev_get_rtnl(dev) == NULL) {
754 dev_put(dev);
755 return -EADDRNOTAVAIL;
756 }
757 } else {
758 dev = ip_dev_find(net, vifc->vifc_lcl_addr.s_addr);
759 }
760 if (!dev)
761 return -EADDRNOTAVAIL;
762 err = dev_set_allmulti(dev, 1);
763 if (err) {
764 dev_put(dev);
765 return err;
766 }
767 break;
768 default:
769 return -EINVAL;
770 }
771
772 in_dev = __in_dev_get_rtnl(dev);
773 if (!in_dev) {
774 dev_put(dev);
775 return -EADDRNOTAVAIL;
776 }
777 IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)++;
778 ip_rt_multicast_event(in_dev);
779
780 /* Fill in the VIF structures */
781
782 v->rate_limit = vifc->vifc_rate_limit;
783 v->local = vifc->vifc_lcl_addr.s_addr;
784 v->remote = vifc->vifc_rmt_addr.s_addr;
785 v->flags = vifc->vifc_flags;
786 if (!mrtsock)
787 v->flags |= VIFF_STATIC;
788 v->threshold = vifc->vifc_threshold;
789 v->bytes_in = 0;
790 v->bytes_out = 0;
791 v->pkt_in = 0;
792 v->pkt_out = 0;
793 v->link = dev->ifindex;
794 if (v->flags & (VIFF_TUNNEL | VIFF_REGISTER))
795 v->link = dev->iflink;
796
797 /* And finish update writing critical data */
798 write_lock_bh(&mrt_lock);
799 v->dev = dev;
800 #ifdef CONFIG_IP_PIMSM
801 if (v->flags & VIFF_REGISTER)
802 mrt->mroute_reg_vif_num = vifi;
803 #endif
804 if (vifi+1 > mrt->maxvif)
805 mrt->maxvif = vifi+1;
806 write_unlock_bh(&mrt_lock);
807 return 0;
808 }
809
810 /* called with rcu_read_lock() */
ipmr_cache_find(struct mr_table * mrt,__be32 origin,__be32 mcastgrp)811 static struct mfc_cache *ipmr_cache_find(struct mr_table *mrt,
812 __be32 origin,
813 __be32 mcastgrp)
814 {
815 int line = MFC_HASH(mcastgrp, origin);
816 struct mfc_cache *c;
817
818 list_for_each_entry_rcu(c, &mrt->mfc_cache_array[line], list) {
819 if (c->mfc_origin == origin && c->mfc_mcastgrp == mcastgrp)
820 return c;
821 }
822 return NULL;
823 }
824
825 /*
826 * Allocate a multicast cache entry
827 */
ipmr_cache_alloc(void)828 static struct mfc_cache *ipmr_cache_alloc(void)
829 {
830 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
831
832 if (c)
833 c->mfc_un.res.minvif = MAXVIFS;
834 return c;
835 }
836
ipmr_cache_alloc_unres(void)837 static struct mfc_cache *ipmr_cache_alloc_unres(void)
838 {
839 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
840
841 if (c) {
842 skb_queue_head_init(&c->mfc_un.unres.unresolved);
843 c->mfc_un.unres.expires = jiffies + 10*HZ;
844 }
845 return c;
846 }
847
848 /*
849 * A cache entry has gone into a resolved state from queued
850 */
851
ipmr_cache_resolve(struct net * net,struct mr_table * mrt,struct mfc_cache * uc,struct mfc_cache * c)852 static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt,
853 struct mfc_cache *uc, struct mfc_cache *c)
854 {
855 struct sk_buff *skb;
856 struct nlmsgerr *e;
857
858 /* Play the pending entries through our router */
859
860 while ((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) {
861 if (ip_hdr(skb)->version == 0) {
862 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
863
864 if (__ipmr_fill_mroute(mrt, skb, c, NLMSG_DATA(nlh)) > 0) {
865 nlh->nlmsg_len = skb_tail_pointer(skb) -
866 (u8 *)nlh;
867 } else {
868 nlh->nlmsg_type = NLMSG_ERROR;
869 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
870 skb_trim(skb, nlh->nlmsg_len);
871 e = NLMSG_DATA(nlh);
872 e->error = -EMSGSIZE;
873 memset(&e->msg, 0, sizeof(e->msg));
874 }
875
876 rtnl_unicast(skb, net, NETLINK_CB(skb).pid);
877 } else {
878 ip_mr_forward(net, mrt, skb, c, 0);
879 }
880 }
881 }
882
883 /*
884 * Bounce a cache query up to mrouted. We could use netlink for this but mrouted
885 * expects the following bizarre scheme.
886 *
887 * Called under mrt_lock.
888 */
889
ipmr_cache_report(struct mr_table * mrt,struct sk_buff * pkt,vifi_t vifi,int assert)890 static int ipmr_cache_report(struct mr_table *mrt,
891 struct sk_buff *pkt, vifi_t vifi, int assert)
892 {
893 struct sk_buff *skb;
894 const int ihl = ip_hdrlen(pkt);
895 struct igmphdr *igmp;
896 struct igmpmsg *msg;
897 struct sock *mroute_sk;
898 int ret;
899
900 #ifdef CONFIG_IP_PIMSM
901 if (assert == IGMPMSG_WHOLEPKT)
902 skb = skb_realloc_headroom(pkt, sizeof(struct iphdr));
903 else
904 #endif
905 skb = alloc_skb(128, GFP_ATOMIC);
906
907 if (!skb)
908 return -ENOBUFS;
909
910 #ifdef CONFIG_IP_PIMSM
911 if (assert == IGMPMSG_WHOLEPKT) {
912 /* Ugly, but we have no choice with this interface.
913 * Duplicate old header, fix ihl, length etc.
914 * And all this only to mangle msg->im_msgtype and
915 * to set msg->im_mbz to "mbz" :-)
916 */
917 skb_push(skb, sizeof(struct iphdr));
918 skb_reset_network_header(skb);
919 skb_reset_transport_header(skb);
920 msg = (struct igmpmsg *)skb_network_header(skb);
921 memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr));
922 msg->im_msgtype = IGMPMSG_WHOLEPKT;
923 msg->im_mbz = 0;
924 msg->im_vif = mrt->mroute_reg_vif_num;
925 ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2;
926 ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) +
927 sizeof(struct iphdr));
928 } else
929 #endif
930 {
931
932 /* Copy the IP header */
933
934 skb->network_header = skb->tail;
935 skb_put(skb, ihl);
936 skb_copy_to_linear_data(skb, pkt->data, ihl);
937 ip_hdr(skb)->protocol = 0; /* Flag to the kernel this is a route add */
938 msg = (struct igmpmsg *)skb_network_header(skb);
939 msg->im_vif = vifi;
940 skb_dst_set(skb, dst_clone(skb_dst(pkt)));
941
942 /* Add our header */
943
944 igmp = (struct igmphdr *)skb_put(skb, sizeof(struct igmphdr));
945 igmp->type =
946 msg->im_msgtype = assert;
947 igmp->code = 0;
948 ip_hdr(skb)->tot_len = htons(skb->len); /* Fix the length */
949 skb->transport_header = skb->network_header;
950 }
951
952 rcu_read_lock();
953 mroute_sk = rcu_dereference(mrt->mroute_sk);
954 if (mroute_sk == NULL) {
955 rcu_read_unlock();
956 kfree_skb(skb);
957 return -EINVAL;
958 }
959
960 /* Deliver to mrouted */
961
962 ret = sock_queue_rcv_skb(mroute_sk, skb);
963 rcu_read_unlock();
964 if (ret < 0) {
965 if (net_ratelimit())
966 pr_warn("mroute: pending queue full, dropping entries\n");
967 kfree_skb(skb);
968 }
969
970 return ret;
971 }
972
973 /*
974 * Queue a packet for resolution. It gets locked cache entry!
975 */
976
977 static int
ipmr_cache_unresolved(struct mr_table * mrt,vifi_t vifi,struct sk_buff * skb)978 ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi, struct sk_buff *skb)
979 {
980 bool found = false;
981 int err;
982 struct mfc_cache *c;
983 const struct iphdr *iph = ip_hdr(skb);
984
985 spin_lock_bh(&mfc_unres_lock);
986 list_for_each_entry(c, &mrt->mfc_unres_queue, list) {
987 if (c->mfc_mcastgrp == iph->daddr &&
988 c->mfc_origin == iph->saddr) {
989 found = true;
990 break;
991 }
992 }
993
994 if (!found) {
995 /* Create a new entry if allowable */
996
997 if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 ||
998 (c = ipmr_cache_alloc_unres()) == NULL) {
999 spin_unlock_bh(&mfc_unres_lock);
1000
1001 kfree_skb(skb);
1002 return -ENOBUFS;
1003 }
1004
1005 /* Fill in the new cache entry */
1006
1007 c->mfc_parent = -1;
1008 c->mfc_origin = iph->saddr;
1009 c->mfc_mcastgrp = iph->daddr;
1010
1011 /* Reflect first query at mrouted. */
1012
1013 err = ipmr_cache_report(mrt, skb, vifi, IGMPMSG_NOCACHE);
1014 if (err < 0) {
1015 /* If the report failed throw the cache entry
1016 out - Brad Parker
1017 */
1018 spin_unlock_bh(&mfc_unres_lock);
1019
1020 ipmr_cache_free(c);
1021 kfree_skb(skb);
1022 return err;
1023 }
1024
1025 atomic_inc(&mrt->cache_resolve_queue_len);
1026 list_add(&c->list, &mrt->mfc_unres_queue);
1027
1028 if (atomic_read(&mrt->cache_resolve_queue_len) == 1)
1029 mod_timer(&mrt->ipmr_expire_timer, c->mfc_un.unres.expires);
1030 }
1031
1032 /* See if we can append the packet */
1033
1034 if (c->mfc_un.unres.unresolved.qlen > 3) {
1035 kfree_skb(skb);
1036 err = -ENOBUFS;
1037 } else {
1038 skb_queue_tail(&c->mfc_un.unres.unresolved, skb);
1039 err = 0;
1040 }
1041
1042 spin_unlock_bh(&mfc_unres_lock);
1043 return err;
1044 }
1045
1046 /*
1047 * MFC cache manipulation by user space mroute daemon
1048 */
1049
ipmr_mfc_delete(struct mr_table * mrt,struct mfcctl * mfc)1050 static int ipmr_mfc_delete(struct mr_table *mrt, struct mfcctl *mfc)
1051 {
1052 int line;
1053 struct mfc_cache *c, *next;
1054
1055 line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
1056
1057 list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[line], list) {
1058 if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
1059 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) {
1060 list_del_rcu(&c->list);
1061
1062 ipmr_cache_free(c);
1063 return 0;
1064 }
1065 }
1066 return -ENOENT;
1067 }
1068
ipmr_mfc_add(struct net * net,struct mr_table * mrt,struct mfcctl * mfc,int mrtsock)1069 static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
1070 struct mfcctl *mfc, int mrtsock)
1071 {
1072 bool found = false;
1073 int line;
1074 struct mfc_cache *uc, *c;
1075
1076 if (mfc->mfcc_parent >= MAXVIFS)
1077 return -ENFILE;
1078
1079 line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
1080
1081 list_for_each_entry(c, &mrt->mfc_cache_array[line], list) {
1082 if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
1083 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) {
1084 found = true;
1085 break;
1086 }
1087 }
1088
1089 if (found) {
1090 write_lock_bh(&mrt_lock);
1091 c->mfc_parent = mfc->mfcc_parent;
1092 ipmr_update_thresholds(mrt, c, mfc->mfcc_ttls);
1093 if (!mrtsock)
1094 c->mfc_flags |= MFC_STATIC;
1095 write_unlock_bh(&mrt_lock);
1096 return 0;
1097 }
1098
1099 if (!ipv4_is_multicast(mfc->mfcc_mcastgrp.s_addr))
1100 return -EINVAL;
1101
1102 c = ipmr_cache_alloc();
1103 if (c == NULL)
1104 return -ENOMEM;
1105
1106 c->mfc_origin = mfc->mfcc_origin.s_addr;
1107 c->mfc_mcastgrp = mfc->mfcc_mcastgrp.s_addr;
1108 c->mfc_parent = mfc->mfcc_parent;
1109 ipmr_update_thresholds(mrt, c, mfc->mfcc_ttls);
1110 if (!mrtsock)
1111 c->mfc_flags |= MFC_STATIC;
1112
1113 list_add_rcu(&c->list, &mrt->mfc_cache_array[line]);
1114
1115 /*
1116 * Check to see if we resolved a queued list. If so we
1117 * need to send on the frames and tidy up.
1118 */
1119 found = false;
1120 spin_lock_bh(&mfc_unres_lock);
1121 list_for_each_entry(uc, &mrt->mfc_unres_queue, list) {
1122 if (uc->mfc_origin == c->mfc_origin &&
1123 uc->mfc_mcastgrp == c->mfc_mcastgrp) {
1124 list_del(&uc->list);
1125 atomic_dec(&mrt->cache_resolve_queue_len);
1126 found = true;
1127 break;
1128 }
1129 }
1130 if (list_empty(&mrt->mfc_unres_queue))
1131 del_timer(&mrt->ipmr_expire_timer);
1132 spin_unlock_bh(&mfc_unres_lock);
1133
1134 if (found) {
1135 ipmr_cache_resolve(net, mrt, uc, c);
1136 ipmr_cache_free(uc);
1137 }
1138 return 0;
1139 }
1140
1141 /*
1142 * Close the multicast socket, and clear the vif tables etc
1143 */
1144
mroute_clean_tables(struct mr_table * mrt)1145 static void mroute_clean_tables(struct mr_table *mrt)
1146 {
1147 int i;
1148 LIST_HEAD(list);
1149 struct mfc_cache *c, *next;
1150
1151 /* Shut down all active vif entries */
1152
1153 for (i = 0; i < mrt->maxvif; i++) {
1154 if (!(mrt->vif_table[i].flags & VIFF_STATIC))
1155 vif_delete(mrt, i, 0, &list);
1156 }
1157 unregister_netdevice_many(&list);
1158
1159 /* Wipe the cache */
1160
1161 for (i = 0; i < MFC_LINES; i++) {
1162 list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[i], list) {
1163 if (c->mfc_flags & MFC_STATIC)
1164 continue;
1165 list_del_rcu(&c->list);
1166 ipmr_cache_free(c);
1167 }
1168 }
1169
1170 if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
1171 spin_lock_bh(&mfc_unres_lock);
1172 list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) {
1173 list_del(&c->list);
1174 ipmr_destroy_unres(mrt, c);
1175 }
1176 spin_unlock_bh(&mfc_unres_lock);
1177 }
1178 }
1179
1180 /* called from ip_ra_control(), before an RCU grace period,
1181 * we dont need to call synchronize_rcu() here
1182 */
mrtsock_destruct(struct sock * sk)1183 static void mrtsock_destruct(struct sock *sk)
1184 {
1185 struct net *net = sock_net(sk);
1186 struct mr_table *mrt;
1187
1188 rtnl_lock();
1189 ipmr_for_each_table(mrt, net) {
1190 if (sk == rtnl_dereference(mrt->mroute_sk)) {
1191 IPV4_DEVCONF_ALL(net, MC_FORWARDING)--;
1192 RCU_INIT_POINTER(mrt->mroute_sk, NULL);
1193 mroute_clean_tables(mrt);
1194 }
1195 }
1196 rtnl_unlock();
1197 }
1198
1199 /*
1200 * Socket options and virtual interface manipulation. The whole
1201 * virtual interface system is a complete heap, but unfortunately
1202 * that's how BSD mrouted happens to think. Maybe one day with a proper
1203 * MOSPF/PIM router set up we can clean this up.
1204 */
1205
ip_mroute_setsockopt(struct sock * sk,int optname,char __user * optval,unsigned int optlen)1206 int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsigned int optlen)
1207 {
1208 int ret;
1209 struct vifctl vif;
1210 struct mfcctl mfc;
1211 struct net *net = sock_net(sk);
1212 struct mr_table *mrt;
1213
1214 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1215 if (mrt == NULL)
1216 return -ENOENT;
1217
1218 if (optname != MRT_INIT) {
1219 if (sk != rcu_access_pointer(mrt->mroute_sk) &&
1220 !capable(CAP_NET_ADMIN))
1221 return -EACCES;
1222 }
1223
1224 switch (optname) {
1225 case MRT_INIT:
1226 if (sk->sk_type != SOCK_RAW ||
1227 inet_sk(sk)->inet_num != IPPROTO_IGMP)
1228 return -EOPNOTSUPP;
1229 if (optlen != sizeof(int))
1230 return -ENOPROTOOPT;
1231
1232 rtnl_lock();
1233 if (rtnl_dereference(mrt->mroute_sk)) {
1234 rtnl_unlock();
1235 return -EADDRINUSE;
1236 }
1237
1238 ret = ip_ra_control(sk, 1, mrtsock_destruct);
1239 if (ret == 0) {
1240 rcu_assign_pointer(mrt->mroute_sk, sk);
1241 IPV4_DEVCONF_ALL(net, MC_FORWARDING)++;
1242 }
1243 rtnl_unlock();
1244 return ret;
1245 case MRT_DONE:
1246 if (sk != rcu_access_pointer(mrt->mroute_sk))
1247 return -EACCES;
1248 return ip_ra_control(sk, 0, NULL);
1249 case MRT_ADD_VIF:
1250 case MRT_DEL_VIF:
1251 if (optlen != sizeof(vif))
1252 return -EINVAL;
1253 if (copy_from_user(&vif, optval, sizeof(vif)))
1254 return -EFAULT;
1255 if (vif.vifc_vifi >= MAXVIFS)
1256 return -ENFILE;
1257 rtnl_lock();
1258 if (optname == MRT_ADD_VIF) {
1259 ret = vif_add(net, mrt, &vif,
1260 sk == rtnl_dereference(mrt->mroute_sk));
1261 } else {
1262 ret = vif_delete(mrt, vif.vifc_vifi, 0, NULL);
1263 }
1264 rtnl_unlock();
1265 return ret;
1266
1267 /*
1268 * Manipulate the forwarding caches. These live
1269 * in a sort of kernel/user symbiosis.
1270 */
1271 case MRT_ADD_MFC:
1272 case MRT_DEL_MFC:
1273 if (optlen != sizeof(mfc))
1274 return -EINVAL;
1275 if (copy_from_user(&mfc, optval, sizeof(mfc)))
1276 return -EFAULT;
1277 rtnl_lock();
1278 if (optname == MRT_DEL_MFC)
1279 ret = ipmr_mfc_delete(mrt, &mfc);
1280 else
1281 ret = ipmr_mfc_add(net, mrt, &mfc,
1282 sk == rtnl_dereference(mrt->mroute_sk));
1283 rtnl_unlock();
1284 return ret;
1285 /*
1286 * Control PIM assert.
1287 */
1288 case MRT_ASSERT:
1289 {
1290 int v;
1291 if (get_user(v, (int __user *)optval))
1292 return -EFAULT;
1293 mrt->mroute_do_assert = (v) ? 1 : 0;
1294 return 0;
1295 }
1296 #ifdef CONFIG_IP_PIMSM
1297 case MRT_PIM:
1298 {
1299 int v;
1300
1301 if (get_user(v, (int __user *)optval))
1302 return -EFAULT;
1303 v = (v) ? 1 : 0;
1304
1305 rtnl_lock();
1306 ret = 0;
1307 if (v != mrt->mroute_do_pim) {
1308 mrt->mroute_do_pim = v;
1309 mrt->mroute_do_assert = v;
1310 }
1311 rtnl_unlock();
1312 return ret;
1313 }
1314 #endif
1315 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
1316 case MRT_TABLE:
1317 {
1318 u32 v;
1319
1320 if (optlen != sizeof(u32))
1321 return -EINVAL;
1322 if (get_user(v, (u32 __user *)optval))
1323 return -EFAULT;
1324
1325 rtnl_lock();
1326 ret = 0;
1327 if (sk == rtnl_dereference(mrt->mroute_sk)) {
1328 ret = -EBUSY;
1329 } else {
1330 if (!ipmr_new_table(net, v))
1331 ret = -ENOMEM;
1332 raw_sk(sk)->ipmr_table = v;
1333 }
1334 rtnl_unlock();
1335 return ret;
1336 }
1337 #endif
1338 /*
1339 * Spurious command, or MRT_VERSION which you cannot
1340 * set.
1341 */
1342 default:
1343 return -ENOPROTOOPT;
1344 }
1345 }
1346
1347 /*
1348 * Getsock opt support for the multicast routing system.
1349 */
1350
ip_mroute_getsockopt(struct sock * sk,int optname,char __user * optval,int __user * optlen)1351 int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int __user *optlen)
1352 {
1353 int olr;
1354 int val;
1355 struct net *net = sock_net(sk);
1356 struct mr_table *mrt;
1357
1358 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1359 if (mrt == NULL)
1360 return -ENOENT;
1361
1362 if (optname != MRT_VERSION &&
1363 #ifdef CONFIG_IP_PIMSM
1364 optname != MRT_PIM &&
1365 #endif
1366 optname != MRT_ASSERT)
1367 return -ENOPROTOOPT;
1368
1369 if (get_user(olr, optlen))
1370 return -EFAULT;
1371
1372 olr = min_t(unsigned int, olr, sizeof(int));
1373 if (olr < 0)
1374 return -EINVAL;
1375
1376 if (put_user(olr, optlen))
1377 return -EFAULT;
1378 if (optname == MRT_VERSION)
1379 val = 0x0305;
1380 #ifdef CONFIG_IP_PIMSM
1381 else if (optname == MRT_PIM)
1382 val = mrt->mroute_do_pim;
1383 #endif
1384 else
1385 val = mrt->mroute_do_assert;
1386 if (copy_to_user(optval, &val, olr))
1387 return -EFAULT;
1388 return 0;
1389 }
1390
1391 /*
1392 * The IP multicast ioctl support routines.
1393 */
1394
ipmr_ioctl(struct sock * sk,int cmd,void __user * arg)1395 int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
1396 {
1397 struct sioc_sg_req sr;
1398 struct sioc_vif_req vr;
1399 struct vif_device *vif;
1400 struct mfc_cache *c;
1401 struct net *net = sock_net(sk);
1402 struct mr_table *mrt;
1403
1404 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1405 if (mrt == NULL)
1406 return -ENOENT;
1407
1408 switch (cmd) {
1409 case SIOCGETVIFCNT:
1410 if (copy_from_user(&vr, arg, sizeof(vr)))
1411 return -EFAULT;
1412 if (vr.vifi >= mrt->maxvif)
1413 return -EINVAL;
1414 read_lock(&mrt_lock);
1415 vif = &mrt->vif_table[vr.vifi];
1416 if (VIF_EXISTS(mrt, vr.vifi)) {
1417 vr.icount = vif->pkt_in;
1418 vr.ocount = vif->pkt_out;
1419 vr.ibytes = vif->bytes_in;
1420 vr.obytes = vif->bytes_out;
1421 read_unlock(&mrt_lock);
1422
1423 if (copy_to_user(arg, &vr, sizeof(vr)))
1424 return -EFAULT;
1425 return 0;
1426 }
1427 read_unlock(&mrt_lock);
1428 return -EADDRNOTAVAIL;
1429 case SIOCGETSGCNT:
1430 if (copy_from_user(&sr, arg, sizeof(sr)))
1431 return -EFAULT;
1432
1433 rcu_read_lock();
1434 c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
1435 if (c) {
1436 sr.pktcnt = c->mfc_un.res.pkt;
1437 sr.bytecnt = c->mfc_un.res.bytes;
1438 sr.wrong_if = c->mfc_un.res.wrong_if;
1439 rcu_read_unlock();
1440
1441 if (copy_to_user(arg, &sr, sizeof(sr)))
1442 return -EFAULT;
1443 return 0;
1444 }
1445 rcu_read_unlock();
1446 return -EADDRNOTAVAIL;
1447 default:
1448 return -ENOIOCTLCMD;
1449 }
1450 }
1451
1452 #ifdef CONFIG_COMPAT
1453 struct compat_sioc_sg_req {
1454 struct in_addr src;
1455 struct in_addr grp;
1456 compat_ulong_t pktcnt;
1457 compat_ulong_t bytecnt;
1458 compat_ulong_t wrong_if;
1459 };
1460
1461 struct compat_sioc_vif_req {
1462 vifi_t vifi; /* Which iface */
1463 compat_ulong_t icount;
1464 compat_ulong_t ocount;
1465 compat_ulong_t ibytes;
1466 compat_ulong_t obytes;
1467 };
1468
ipmr_compat_ioctl(struct sock * sk,unsigned int cmd,void __user * arg)1469 int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
1470 {
1471 struct compat_sioc_sg_req sr;
1472 struct compat_sioc_vif_req vr;
1473 struct vif_device *vif;
1474 struct mfc_cache *c;
1475 struct net *net = sock_net(sk);
1476 struct mr_table *mrt;
1477
1478 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1479 if (mrt == NULL)
1480 return -ENOENT;
1481
1482 switch (cmd) {
1483 case SIOCGETVIFCNT:
1484 if (copy_from_user(&vr, arg, sizeof(vr)))
1485 return -EFAULT;
1486 if (vr.vifi >= mrt->maxvif)
1487 return -EINVAL;
1488 read_lock(&mrt_lock);
1489 vif = &mrt->vif_table[vr.vifi];
1490 if (VIF_EXISTS(mrt, vr.vifi)) {
1491 vr.icount = vif->pkt_in;
1492 vr.ocount = vif->pkt_out;
1493 vr.ibytes = vif->bytes_in;
1494 vr.obytes = vif->bytes_out;
1495 read_unlock(&mrt_lock);
1496
1497 if (copy_to_user(arg, &vr, sizeof(vr)))
1498 return -EFAULT;
1499 return 0;
1500 }
1501 read_unlock(&mrt_lock);
1502 return -EADDRNOTAVAIL;
1503 case SIOCGETSGCNT:
1504 if (copy_from_user(&sr, arg, sizeof(sr)))
1505 return -EFAULT;
1506
1507 rcu_read_lock();
1508 c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
1509 if (c) {
1510 sr.pktcnt = c->mfc_un.res.pkt;
1511 sr.bytecnt = c->mfc_un.res.bytes;
1512 sr.wrong_if = c->mfc_un.res.wrong_if;
1513 rcu_read_unlock();
1514
1515 if (copy_to_user(arg, &sr, sizeof(sr)))
1516 return -EFAULT;
1517 return 0;
1518 }
1519 rcu_read_unlock();
1520 return -EADDRNOTAVAIL;
1521 default:
1522 return -ENOIOCTLCMD;
1523 }
1524 }
1525 #endif
1526
1527
ipmr_device_event(struct notifier_block * this,unsigned long event,void * ptr)1528 static int ipmr_device_event(struct notifier_block *this, unsigned long event, void *ptr)
1529 {
1530 struct net_device *dev = ptr;
1531 struct net *net = dev_net(dev);
1532 struct mr_table *mrt;
1533 struct vif_device *v;
1534 int ct;
1535
1536 if (event != NETDEV_UNREGISTER)
1537 return NOTIFY_DONE;
1538
1539 ipmr_for_each_table(mrt, net) {
1540 v = &mrt->vif_table[0];
1541 for (ct = 0; ct < mrt->maxvif; ct++, v++) {
1542 if (v->dev == dev)
1543 vif_delete(mrt, ct, 1, NULL);
1544 }
1545 }
1546 return NOTIFY_DONE;
1547 }
1548
1549
1550 static struct notifier_block ip_mr_notifier = {
1551 .notifier_call = ipmr_device_event,
1552 };
1553
1554 /*
1555 * Encapsulate a packet by attaching a valid IPIP header to it.
1556 * This avoids tunnel drivers and other mess and gives us the speed so
1557 * important for multicast video.
1558 */
1559
ip_encap(struct sk_buff * skb,__be32 saddr,__be32 daddr)1560 static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr)
1561 {
1562 struct iphdr *iph;
1563 const struct iphdr *old_iph = ip_hdr(skb);
1564
1565 skb_push(skb, sizeof(struct iphdr));
1566 skb->transport_header = skb->network_header;
1567 skb_reset_network_header(skb);
1568 iph = ip_hdr(skb);
1569
1570 iph->version = 4;
1571 iph->tos = old_iph->tos;
1572 iph->ttl = old_iph->ttl;
1573 iph->frag_off = 0;
1574 iph->daddr = daddr;
1575 iph->saddr = saddr;
1576 iph->protocol = IPPROTO_IPIP;
1577 iph->ihl = 5;
1578 iph->tot_len = htons(skb->len);
1579 ip_select_ident(skb, skb_dst(skb), NULL);
1580 ip_send_check(iph);
1581
1582 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1583 nf_reset(skb);
1584 }
1585
ipmr_forward_finish(struct sk_buff * skb)1586 static inline int ipmr_forward_finish(struct sk_buff *skb)
1587 {
1588 struct ip_options *opt = &(IPCB(skb)->opt);
1589
1590 IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS);
1591
1592 if (unlikely(opt->optlen))
1593 ip_forward_options(skb);
1594
1595 return dst_output(skb);
1596 }
1597
1598 /*
1599 * Processing handlers for ipmr_forward
1600 */
1601
ipmr_queue_xmit(struct net * net,struct mr_table * mrt,struct sk_buff * skb,struct mfc_cache * c,int vifi)1602 static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
1603 struct sk_buff *skb, struct mfc_cache *c, int vifi)
1604 {
1605 const struct iphdr *iph = ip_hdr(skb);
1606 struct vif_device *vif = &mrt->vif_table[vifi];
1607 struct net_device *dev;
1608 struct rtable *rt;
1609 struct flowi4 fl4;
1610 int encap = 0;
1611
1612 if (vif->dev == NULL)
1613 goto out_free;
1614
1615 #ifdef CONFIG_IP_PIMSM
1616 if (vif->flags & VIFF_REGISTER) {
1617 vif->pkt_out++;
1618 vif->bytes_out += skb->len;
1619 vif->dev->stats.tx_bytes += skb->len;
1620 vif->dev->stats.tx_packets++;
1621 ipmr_cache_report(mrt, skb, vifi, IGMPMSG_WHOLEPKT);
1622 goto out_free;
1623 }
1624 #endif
1625
1626 if (vif->flags & VIFF_TUNNEL) {
1627 rt = ip_route_output_ports(net, &fl4, NULL,
1628 vif->remote, vif->local,
1629 0, 0,
1630 IPPROTO_IPIP,
1631 RT_TOS(iph->tos), vif->link);
1632 if (IS_ERR(rt))
1633 goto out_free;
1634 encap = sizeof(struct iphdr);
1635 } else {
1636 rt = ip_route_output_ports(net, &fl4, NULL, iph->daddr, 0,
1637 0, 0,
1638 IPPROTO_IPIP,
1639 RT_TOS(iph->tos), vif->link);
1640 if (IS_ERR(rt))
1641 goto out_free;
1642 }
1643
1644 dev = rt->dst.dev;
1645
1646 if (skb->len+encap > dst_mtu(&rt->dst) && (ntohs(iph->frag_off) & IP_DF)) {
1647 /* Do not fragment multicasts. Alas, IPv4 does not
1648 * allow to send ICMP, so that packets will disappear
1649 * to blackhole.
1650 */
1651
1652 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
1653 ip_rt_put(rt);
1654 goto out_free;
1655 }
1656
1657 encap += LL_RESERVED_SPACE(dev) + rt->dst.header_len;
1658
1659 if (skb_cow(skb, encap)) {
1660 ip_rt_put(rt);
1661 goto out_free;
1662 }
1663
1664 vif->pkt_out++;
1665 vif->bytes_out += skb->len;
1666
1667 skb_dst_drop(skb);
1668 skb_dst_set(skb, &rt->dst);
1669 ip_decrease_ttl(ip_hdr(skb));
1670
1671 /* FIXME: forward and output firewalls used to be called here.
1672 * What do we do with netfilter? -- RR
1673 */
1674 if (vif->flags & VIFF_TUNNEL) {
1675 ip_encap(skb, vif->local, vif->remote);
1676 /* FIXME: extra output firewall step used to be here. --RR */
1677 vif->dev->stats.tx_packets++;
1678 vif->dev->stats.tx_bytes += skb->len;
1679 }
1680
1681 IPCB(skb)->flags |= IPSKB_FORWARDED;
1682
1683 /*
1684 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
1685 * not only before forwarding, but after forwarding on all output
1686 * interfaces. It is clear, if mrouter runs a multicasting
1687 * program, it should receive packets not depending to what interface
1688 * program is joined.
1689 * If we will not make it, the program will have to join on all
1690 * interfaces. On the other hand, multihoming host (or router, but
1691 * not mrouter) cannot join to more than one interface - it will
1692 * result in receiving multiple packets.
1693 */
1694 NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD, skb, skb->dev, dev,
1695 ipmr_forward_finish);
1696 return;
1697
1698 out_free:
1699 kfree_skb(skb);
1700 }
1701
ipmr_find_vif(struct mr_table * mrt,struct net_device * dev)1702 static int ipmr_find_vif(struct mr_table *mrt, struct net_device *dev)
1703 {
1704 int ct;
1705
1706 for (ct = mrt->maxvif-1; ct >= 0; ct--) {
1707 if (mrt->vif_table[ct].dev == dev)
1708 break;
1709 }
1710 return ct;
1711 }
1712
1713 /* "local" means that we should preserve one skb (for local delivery) */
1714
ip_mr_forward(struct net * net,struct mr_table * mrt,struct sk_buff * skb,struct mfc_cache * cache,int local)1715 static int ip_mr_forward(struct net *net, struct mr_table *mrt,
1716 struct sk_buff *skb, struct mfc_cache *cache,
1717 int local)
1718 {
1719 int psend = -1;
1720 int vif, ct;
1721
1722 vif = cache->mfc_parent;
1723 cache->mfc_un.res.pkt++;
1724 cache->mfc_un.res.bytes += skb->len;
1725
1726 /*
1727 * Wrong interface: drop packet and (maybe) send PIM assert.
1728 */
1729 if (mrt->vif_table[vif].dev != skb->dev) {
1730 int true_vifi;
1731
1732 if (rt_is_output_route(skb_rtable(skb))) {
1733 /* It is our own packet, looped back.
1734 * Very complicated situation...
1735 *
1736 * The best workaround until routing daemons will be
1737 * fixed is not to redistribute packet, if it was
1738 * send through wrong interface. It means, that
1739 * multicast applications WILL NOT work for
1740 * (S,G), which have default multicast route pointing
1741 * to wrong oif. In any case, it is not a good
1742 * idea to use multicasting applications on router.
1743 */
1744 goto dont_forward;
1745 }
1746
1747 cache->mfc_un.res.wrong_if++;
1748 true_vifi = ipmr_find_vif(mrt, skb->dev);
1749
1750 if (true_vifi >= 0 && mrt->mroute_do_assert &&
1751 /* pimsm uses asserts, when switching from RPT to SPT,
1752 * so that we cannot check that packet arrived on an oif.
1753 * It is bad, but otherwise we would need to move pretty
1754 * large chunk of pimd to kernel. Ough... --ANK
1755 */
1756 (mrt->mroute_do_pim ||
1757 cache->mfc_un.res.ttls[true_vifi] < 255) &&
1758 time_after(jiffies,
1759 cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) {
1760 cache->mfc_un.res.last_assert = jiffies;
1761 ipmr_cache_report(mrt, skb, true_vifi, IGMPMSG_WRONGVIF);
1762 }
1763 goto dont_forward;
1764 }
1765
1766 mrt->vif_table[vif].pkt_in++;
1767 mrt->vif_table[vif].bytes_in += skb->len;
1768
1769 /*
1770 * Forward the frame
1771 */
1772 for (ct = cache->mfc_un.res.maxvif - 1;
1773 ct >= cache->mfc_un.res.minvif; ct--) {
1774 if (ip_hdr(skb)->ttl > cache->mfc_un.res.ttls[ct]) {
1775 if (psend != -1) {
1776 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1777
1778 if (skb2)
1779 ipmr_queue_xmit(net, mrt, skb2, cache,
1780 psend);
1781 }
1782 psend = ct;
1783 }
1784 }
1785 if (psend != -1) {
1786 if (local) {
1787 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1788
1789 if (skb2)
1790 ipmr_queue_xmit(net, mrt, skb2, cache, psend);
1791 } else {
1792 ipmr_queue_xmit(net, mrt, skb, cache, psend);
1793 return 0;
1794 }
1795 }
1796
1797 dont_forward:
1798 if (!local)
1799 kfree_skb(skb);
1800 return 0;
1801 }
1802
ipmr_rt_fib_lookup(struct net * net,struct sk_buff * skb)1803 static struct mr_table *ipmr_rt_fib_lookup(struct net *net, struct sk_buff *skb)
1804 {
1805 struct rtable *rt = skb_rtable(skb);
1806 struct iphdr *iph = ip_hdr(skb);
1807 struct flowi4 fl4 = {
1808 .daddr = iph->daddr,
1809 .saddr = iph->saddr,
1810 .flowi4_tos = RT_TOS(iph->tos),
1811 .flowi4_oif = rt->rt_oif,
1812 .flowi4_iif = rt->rt_iif,
1813 .flowi4_mark = rt->rt_mark,
1814 };
1815 struct mr_table *mrt;
1816 int err;
1817
1818 err = ipmr_fib_lookup(net, &fl4, &mrt);
1819 if (err)
1820 return ERR_PTR(err);
1821 return mrt;
1822 }
1823
1824 /*
1825 * Multicast packets for forwarding arrive here
1826 * Called with rcu_read_lock();
1827 */
1828
ip_mr_input(struct sk_buff * skb)1829 int ip_mr_input(struct sk_buff *skb)
1830 {
1831 struct mfc_cache *cache;
1832 struct net *net = dev_net(skb->dev);
1833 int local = skb_rtable(skb)->rt_flags & RTCF_LOCAL;
1834 struct mr_table *mrt;
1835
1836 /* Packet is looped back after forward, it should not be
1837 * forwarded second time, but still can be delivered locally.
1838 */
1839 if (IPCB(skb)->flags & IPSKB_FORWARDED)
1840 goto dont_forward;
1841
1842 mrt = ipmr_rt_fib_lookup(net, skb);
1843 if (IS_ERR(mrt)) {
1844 kfree_skb(skb);
1845 return PTR_ERR(mrt);
1846 }
1847 if (!local) {
1848 if (IPCB(skb)->opt.router_alert) {
1849 if (ip_call_ra_chain(skb))
1850 return 0;
1851 } else if (ip_hdr(skb)->protocol == IPPROTO_IGMP) {
1852 /* IGMPv1 (and broken IGMPv2 implementations sort of
1853 * Cisco IOS <= 11.2(8)) do not put router alert
1854 * option to IGMP packets destined to routable
1855 * groups. It is very bad, because it means
1856 * that we can forward NO IGMP messages.
1857 */
1858 struct sock *mroute_sk;
1859
1860 mroute_sk = rcu_dereference(mrt->mroute_sk);
1861 if (mroute_sk) {
1862 nf_reset(skb);
1863 raw_rcv(mroute_sk, skb);
1864 return 0;
1865 }
1866 }
1867 }
1868
1869 /* already under rcu_read_lock() */
1870 cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
1871
1872 /*
1873 * No usable cache entry
1874 */
1875 if (cache == NULL) {
1876 int vif;
1877
1878 if (local) {
1879 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1880 ip_local_deliver(skb);
1881 if (skb2 == NULL)
1882 return -ENOBUFS;
1883 skb = skb2;
1884 }
1885
1886 read_lock(&mrt_lock);
1887 vif = ipmr_find_vif(mrt, skb->dev);
1888 if (vif >= 0) {
1889 int err2 = ipmr_cache_unresolved(mrt, vif, skb);
1890 read_unlock(&mrt_lock);
1891
1892 return err2;
1893 }
1894 read_unlock(&mrt_lock);
1895 kfree_skb(skb);
1896 return -ENODEV;
1897 }
1898
1899 read_lock(&mrt_lock);
1900 ip_mr_forward(net, mrt, skb, cache, local);
1901 read_unlock(&mrt_lock);
1902
1903 if (local)
1904 return ip_local_deliver(skb);
1905
1906 return 0;
1907
1908 dont_forward:
1909 if (local)
1910 return ip_local_deliver(skb);
1911 kfree_skb(skb);
1912 return 0;
1913 }
1914
1915 #ifdef CONFIG_IP_PIMSM
1916 /* called with rcu_read_lock() */
__pim_rcv(struct mr_table * mrt,struct sk_buff * skb,unsigned int pimlen)1917 static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb,
1918 unsigned int pimlen)
1919 {
1920 struct net_device *reg_dev = NULL;
1921 struct iphdr *encap;
1922
1923 encap = (struct iphdr *)(skb_transport_header(skb) + pimlen);
1924 /*
1925 * Check that:
1926 * a. packet is really sent to a multicast group
1927 * b. packet is not a NULL-REGISTER
1928 * c. packet is not truncated
1929 */
1930 if (!ipv4_is_multicast(encap->daddr) ||
1931 encap->tot_len == 0 ||
1932 ntohs(encap->tot_len) + pimlen > skb->len)
1933 return 1;
1934
1935 read_lock(&mrt_lock);
1936 if (mrt->mroute_reg_vif_num >= 0)
1937 reg_dev = mrt->vif_table[mrt->mroute_reg_vif_num].dev;
1938 read_unlock(&mrt_lock);
1939
1940 if (reg_dev == NULL)
1941 return 1;
1942
1943 skb->mac_header = skb->network_header;
1944 skb_pull(skb, (u8 *)encap - skb->data);
1945 skb_reset_network_header(skb);
1946 skb->protocol = htons(ETH_P_IP);
1947 skb->ip_summed = CHECKSUM_NONE;
1948 skb->pkt_type = PACKET_HOST;
1949
1950 skb_tunnel_rx(skb, reg_dev);
1951
1952 netif_rx(skb);
1953
1954 return NET_RX_SUCCESS;
1955 }
1956 #endif
1957
1958 #ifdef CONFIG_IP_PIMSM_V1
1959 /*
1960 * Handle IGMP messages of PIMv1
1961 */
1962
pim_rcv_v1(struct sk_buff * skb)1963 int pim_rcv_v1(struct sk_buff *skb)
1964 {
1965 struct igmphdr *pim;
1966 struct net *net = dev_net(skb->dev);
1967 struct mr_table *mrt;
1968
1969 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
1970 goto drop;
1971
1972 pim = igmp_hdr(skb);
1973
1974 mrt = ipmr_rt_fib_lookup(net, skb);
1975 if (IS_ERR(mrt))
1976 goto drop;
1977 if (!mrt->mroute_do_pim ||
1978 pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER)
1979 goto drop;
1980
1981 if (__pim_rcv(mrt, skb, sizeof(*pim))) {
1982 drop:
1983 kfree_skb(skb);
1984 }
1985 return 0;
1986 }
1987 #endif
1988
1989 #ifdef CONFIG_IP_PIMSM_V2
pim_rcv(struct sk_buff * skb)1990 static int pim_rcv(struct sk_buff *skb)
1991 {
1992 struct pimreghdr *pim;
1993 struct net *net = dev_net(skb->dev);
1994 struct mr_table *mrt;
1995
1996 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
1997 goto drop;
1998
1999 pim = (struct pimreghdr *)skb_transport_header(skb);
2000 if (pim->type != ((PIM_VERSION << 4) | (PIM_REGISTER)) ||
2001 (pim->flags & PIM_NULL_REGISTER) ||
2002 (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 &&
2003 csum_fold(skb_checksum(skb, 0, skb->len, 0))))
2004 goto drop;
2005
2006 mrt = ipmr_rt_fib_lookup(net, skb);
2007 if (IS_ERR(mrt))
2008 goto drop;
2009 if (__pim_rcv(mrt, skb, sizeof(*pim))) {
2010 drop:
2011 kfree_skb(skb);
2012 }
2013 return 0;
2014 }
2015 #endif
2016
__ipmr_fill_mroute(struct mr_table * mrt,struct sk_buff * skb,struct mfc_cache * c,struct rtmsg * rtm)2017 static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2018 struct mfc_cache *c, struct rtmsg *rtm)
2019 {
2020 int ct;
2021 struct rtnexthop *nhp;
2022 u8 *b = skb_tail_pointer(skb);
2023 struct rtattr *mp_head;
2024
2025 /* If cache is unresolved, don't try to parse IIF and OIF */
2026 if (c->mfc_parent >= MAXVIFS)
2027 return -ENOENT;
2028
2029 if (VIF_EXISTS(mrt, c->mfc_parent))
2030 RTA_PUT(skb, RTA_IIF, 4, &mrt->vif_table[c->mfc_parent].dev->ifindex);
2031
2032 mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0));
2033
2034 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
2035 if (VIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) {
2036 if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4))
2037 goto rtattr_failure;
2038 nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp)));
2039 nhp->rtnh_flags = 0;
2040 nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
2041 nhp->rtnh_ifindex = mrt->vif_table[ct].dev->ifindex;
2042 nhp->rtnh_len = sizeof(*nhp);
2043 }
2044 }
2045 mp_head->rta_type = RTA_MULTIPATH;
2046 mp_head->rta_len = skb_tail_pointer(skb) - (u8 *)mp_head;
2047 rtm->rtm_type = RTN_MULTICAST;
2048 return 1;
2049
2050 rtattr_failure:
2051 nlmsg_trim(skb, b);
2052 return -EMSGSIZE;
2053 }
2054
ipmr_get_route(struct net * net,struct sk_buff * skb,__be32 saddr,__be32 daddr,struct rtmsg * rtm,int nowait)2055 int ipmr_get_route(struct net *net, struct sk_buff *skb,
2056 __be32 saddr, __be32 daddr,
2057 struct rtmsg *rtm, int nowait)
2058 {
2059 struct mfc_cache *cache;
2060 struct mr_table *mrt;
2061 int err;
2062
2063 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
2064 if (mrt == NULL)
2065 return -ENOENT;
2066
2067 rcu_read_lock();
2068 cache = ipmr_cache_find(mrt, saddr, daddr);
2069
2070 if (cache == NULL) {
2071 struct sk_buff *skb2;
2072 struct iphdr *iph;
2073 struct net_device *dev;
2074 int vif = -1;
2075
2076 if (nowait) {
2077 rcu_read_unlock();
2078 return -EAGAIN;
2079 }
2080
2081 dev = skb->dev;
2082 read_lock(&mrt_lock);
2083 if (dev)
2084 vif = ipmr_find_vif(mrt, dev);
2085 if (vif < 0) {
2086 read_unlock(&mrt_lock);
2087 rcu_read_unlock();
2088 return -ENODEV;
2089 }
2090 skb2 = skb_clone(skb, GFP_ATOMIC);
2091 if (!skb2) {
2092 read_unlock(&mrt_lock);
2093 rcu_read_unlock();
2094 return -ENOMEM;
2095 }
2096
2097 skb_push(skb2, sizeof(struct iphdr));
2098 skb_reset_network_header(skb2);
2099 iph = ip_hdr(skb2);
2100 iph->ihl = sizeof(struct iphdr) >> 2;
2101 iph->saddr = saddr;
2102 iph->daddr = daddr;
2103 iph->version = 0;
2104 err = ipmr_cache_unresolved(mrt, vif, skb2);
2105 read_unlock(&mrt_lock);
2106 rcu_read_unlock();
2107 return err;
2108 }
2109
2110 read_lock(&mrt_lock);
2111 if (!nowait && (rtm->rtm_flags & RTM_F_NOTIFY))
2112 cache->mfc_flags |= MFC_NOTIFY;
2113 err = __ipmr_fill_mroute(mrt, skb, cache, rtm);
2114 read_unlock(&mrt_lock);
2115 rcu_read_unlock();
2116 return err;
2117 }
2118
ipmr_fill_mroute(struct mr_table * mrt,struct sk_buff * skb,u32 pid,u32 seq,struct mfc_cache * c)2119 static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2120 u32 pid, u32 seq, struct mfc_cache *c)
2121 {
2122 struct nlmsghdr *nlh;
2123 struct rtmsg *rtm;
2124
2125 nlh = nlmsg_put(skb, pid, seq, RTM_NEWROUTE, sizeof(*rtm), NLM_F_MULTI);
2126 if (nlh == NULL)
2127 return -EMSGSIZE;
2128
2129 rtm = nlmsg_data(nlh);
2130 rtm->rtm_family = RTNL_FAMILY_IPMR;
2131 rtm->rtm_dst_len = 32;
2132 rtm->rtm_src_len = 32;
2133 rtm->rtm_tos = 0;
2134 rtm->rtm_table = mrt->id;
2135 NLA_PUT_U32(skb, RTA_TABLE, mrt->id);
2136 rtm->rtm_type = RTN_MULTICAST;
2137 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
2138 rtm->rtm_protocol = RTPROT_UNSPEC;
2139 rtm->rtm_flags = 0;
2140
2141 NLA_PUT_BE32(skb, RTA_SRC, c->mfc_origin);
2142 NLA_PUT_BE32(skb, RTA_DST, c->mfc_mcastgrp);
2143
2144 if (__ipmr_fill_mroute(mrt, skb, c, rtm) < 0)
2145 goto nla_put_failure;
2146
2147 return nlmsg_end(skb, nlh);
2148
2149 nla_put_failure:
2150 nlmsg_cancel(skb, nlh);
2151 return -EMSGSIZE;
2152 }
2153
ipmr_rtm_dumproute(struct sk_buff * skb,struct netlink_callback * cb)2154 static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
2155 {
2156 struct net *net = sock_net(skb->sk);
2157 struct mr_table *mrt;
2158 struct mfc_cache *mfc;
2159 unsigned int t = 0, s_t;
2160 unsigned int h = 0, s_h;
2161 unsigned int e = 0, s_e;
2162
2163 s_t = cb->args[0];
2164 s_h = cb->args[1];
2165 s_e = cb->args[2];
2166
2167 rcu_read_lock();
2168 ipmr_for_each_table(mrt, net) {
2169 if (t < s_t)
2170 goto next_table;
2171 if (t > s_t)
2172 s_h = 0;
2173 for (h = s_h; h < MFC_LINES; h++) {
2174 list_for_each_entry_rcu(mfc, &mrt->mfc_cache_array[h], list) {
2175 if (e < s_e)
2176 goto next_entry;
2177 if (ipmr_fill_mroute(mrt, skb,
2178 NETLINK_CB(cb->skb).pid,
2179 cb->nlh->nlmsg_seq,
2180 mfc) < 0)
2181 goto done;
2182 next_entry:
2183 e++;
2184 }
2185 e = s_e = 0;
2186 }
2187 s_h = 0;
2188 next_table:
2189 t++;
2190 }
2191 done:
2192 rcu_read_unlock();
2193
2194 cb->args[2] = e;
2195 cb->args[1] = h;
2196 cb->args[0] = t;
2197
2198 return skb->len;
2199 }
2200
2201 #ifdef CONFIG_PROC_FS
2202 /*
2203 * The /proc interfaces to multicast routing :
2204 * /proc/net/ip_mr_cache & /proc/net/ip_mr_vif
2205 */
2206 struct ipmr_vif_iter {
2207 struct seq_net_private p;
2208 struct mr_table *mrt;
2209 int ct;
2210 };
2211
ipmr_vif_seq_idx(struct net * net,struct ipmr_vif_iter * iter,loff_t pos)2212 static struct vif_device *ipmr_vif_seq_idx(struct net *net,
2213 struct ipmr_vif_iter *iter,
2214 loff_t pos)
2215 {
2216 struct mr_table *mrt = iter->mrt;
2217
2218 for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) {
2219 if (!VIF_EXISTS(mrt, iter->ct))
2220 continue;
2221 if (pos-- == 0)
2222 return &mrt->vif_table[iter->ct];
2223 }
2224 return NULL;
2225 }
2226
ipmr_vif_seq_start(struct seq_file * seq,loff_t * pos)2227 static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos)
2228 __acquires(mrt_lock)
2229 {
2230 struct ipmr_vif_iter *iter = seq->private;
2231 struct net *net = seq_file_net(seq);
2232 struct mr_table *mrt;
2233
2234 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
2235 if (mrt == NULL)
2236 return ERR_PTR(-ENOENT);
2237
2238 iter->mrt = mrt;
2239
2240 read_lock(&mrt_lock);
2241 return *pos ? ipmr_vif_seq_idx(net, seq->private, *pos - 1)
2242 : SEQ_START_TOKEN;
2243 }
2244
ipmr_vif_seq_next(struct seq_file * seq,void * v,loff_t * pos)2245 static void *ipmr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2246 {
2247 struct ipmr_vif_iter *iter = seq->private;
2248 struct net *net = seq_file_net(seq);
2249 struct mr_table *mrt = iter->mrt;
2250
2251 ++*pos;
2252 if (v == SEQ_START_TOKEN)
2253 return ipmr_vif_seq_idx(net, iter, 0);
2254
2255 while (++iter->ct < mrt->maxvif) {
2256 if (!VIF_EXISTS(mrt, iter->ct))
2257 continue;
2258 return &mrt->vif_table[iter->ct];
2259 }
2260 return NULL;
2261 }
2262
ipmr_vif_seq_stop(struct seq_file * seq,void * v)2263 static void ipmr_vif_seq_stop(struct seq_file *seq, void *v)
2264 __releases(mrt_lock)
2265 {
2266 read_unlock(&mrt_lock);
2267 }
2268
ipmr_vif_seq_show(struct seq_file * seq,void * v)2269 static int ipmr_vif_seq_show(struct seq_file *seq, void *v)
2270 {
2271 struct ipmr_vif_iter *iter = seq->private;
2272 struct mr_table *mrt = iter->mrt;
2273
2274 if (v == SEQ_START_TOKEN) {
2275 seq_puts(seq,
2276 "Interface BytesIn PktsIn BytesOut PktsOut Flags Local Remote\n");
2277 } else {
2278 const struct vif_device *vif = v;
2279 const char *name = vif->dev ? vif->dev->name : "none";
2280
2281 seq_printf(seq,
2282 "%2Zd %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n",
2283 vif - mrt->vif_table,
2284 name, vif->bytes_in, vif->pkt_in,
2285 vif->bytes_out, vif->pkt_out,
2286 vif->flags, vif->local, vif->remote);
2287 }
2288 return 0;
2289 }
2290
2291 static const struct seq_operations ipmr_vif_seq_ops = {
2292 .start = ipmr_vif_seq_start,
2293 .next = ipmr_vif_seq_next,
2294 .stop = ipmr_vif_seq_stop,
2295 .show = ipmr_vif_seq_show,
2296 };
2297
ipmr_vif_open(struct inode * inode,struct file * file)2298 static int ipmr_vif_open(struct inode *inode, struct file *file)
2299 {
2300 return seq_open_net(inode, file, &ipmr_vif_seq_ops,
2301 sizeof(struct ipmr_vif_iter));
2302 }
2303
2304 static const struct file_operations ipmr_vif_fops = {
2305 .owner = THIS_MODULE,
2306 .open = ipmr_vif_open,
2307 .read = seq_read,
2308 .llseek = seq_lseek,
2309 .release = seq_release_net,
2310 };
2311
2312 struct ipmr_mfc_iter {
2313 struct seq_net_private p;
2314 struct mr_table *mrt;
2315 struct list_head *cache;
2316 int ct;
2317 };
2318
2319
ipmr_mfc_seq_idx(struct net * net,struct ipmr_mfc_iter * it,loff_t pos)2320 static struct mfc_cache *ipmr_mfc_seq_idx(struct net *net,
2321 struct ipmr_mfc_iter *it, loff_t pos)
2322 {
2323 struct mr_table *mrt = it->mrt;
2324 struct mfc_cache *mfc;
2325
2326 rcu_read_lock();
2327 for (it->ct = 0; it->ct < MFC_LINES; it->ct++) {
2328 it->cache = &mrt->mfc_cache_array[it->ct];
2329 list_for_each_entry_rcu(mfc, it->cache, list)
2330 if (pos-- == 0)
2331 return mfc;
2332 }
2333 rcu_read_unlock();
2334
2335 spin_lock_bh(&mfc_unres_lock);
2336 it->cache = &mrt->mfc_unres_queue;
2337 list_for_each_entry(mfc, it->cache, list)
2338 if (pos-- == 0)
2339 return mfc;
2340 spin_unlock_bh(&mfc_unres_lock);
2341
2342 it->cache = NULL;
2343 return NULL;
2344 }
2345
2346
ipmr_mfc_seq_start(struct seq_file * seq,loff_t * pos)2347 static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
2348 {
2349 struct ipmr_mfc_iter *it = seq->private;
2350 struct net *net = seq_file_net(seq);
2351 struct mr_table *mrt;
2352
2353 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
2354 if (mrt == NULL)
2355 return ERR_PTR(-ENOENT);
2356
2357 it->mrt = mrt;
2358 it->cache = NULL;
2359 it->ct = 0;
2360 return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1)
2361 : SEQ_START_TOKEN;
2362 }
2363
ipmr_mfc_seq_next(struct seq_file * seq,void * v,loff_t * pos)2364 static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2365 {
2366 struct mfc_cache *mfc = v;
2367 struct ipmr_mfc_iter *it = seq->private;
2368 struct net *net = seq_file_net(seq);
2369 struct mr_table *mrt = it->mrt;
2370
2371 ++*pos;
2372
2373 if (v == SEQ_START_TOKEN)
2374 return ipmr_mfc_seq_idx(net, seq->private, 0);
2375
2376 if (mfc->list.next != it->cache)
2377 return list_entry(mfc->list.next, struct mfc_cache, list);
2378
2379 if (it->cache == &mrt->mfc_unres_queue)
2380 goto end_of_list;
2381
2382 BUG_ON(it->cache != &mrt->mfc_cache_array[it->ct]);
2383
2384 while (++it->ct < MFC_LINES) {
2385 it->cache = &mrt->mfc_cache_array[it->ct];
2386 if (list_empty(it->cache))
2387 continue;
2388 return list_first_entry(it->cache, struct mfc_cache, list);
2389 }
2390
2391 /* exhausted cache_array, show unresolved */
2392 rcu_read_unlock();
2393 it->cache = &mrt->mfc_unres_queue;
2394 it->ct = 0;
2395
2396 spin_lock_bh(&mfc_unres_lock);
2397 if (!list_empty(it->cache))
2398 return list_first_entry(it->cache, struct mfc_cache, list);
2399
2400 end_of_list:
2401 spin_unlock_bh(&mfc_unres_lock);
2402 it->cache = NULL;
2403
2404 return NULL;
2405 }
2406
ipmr_mfc_seq_stop(struct seq_file * seq,void * v)2407 static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
2408 {
2409 struct ipmr_mfc_iter *it = seq->private;
2410 struct mr_table *mrt = it->mrt;
2411
2412 if (it->cache == &mrt->mfc_unres_queue)
2413 spin_unlock_bh(&mfc_unres_lock);
2414 else if (it->cache == &mrt->mfc_cache_array[it->ct])
2415 rcu_read_unlock();
2416 }
2417
ipmr_mfc_seq_show(struct seq_file * seq,void * v)2418 static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
2419 {
2420 int n;
2421
2422 if (v == SEQ_START_TOKEN) {
2423 seq_puts(seq,
2424 "Group Origin Iif Pkts Bytes Wrong Oifs\n");
2425 } else {
2426 const struct mfc_cache *mfc = v;
2427 const struct ipmr_mfc_iter *it = seq->private;
2428 const struct mr_table *mrt = it->mrt;
2429
2430 seq_printf(seq, "%08X %08X %-3hd",
2431 (__force u32) mfc->mfc_mcastgrp,
2432 (__force u32) mfc->mfc_origin,
2433 mfc->mfc_parent);
2434
2435 if (it->cache != &mrt->mfc_unres_queue) {
2436 seq_printf(seq, " %8lu %8lu %8lu",
2437 mfc->mfc_un.res.pkt,
2438 mfc->mfc_un.res.bytes,
2439 mfc->mfc_un.res.wrong_if);
2440 for (n = mfc->mfc_un.res.minvif;
2441 n < mfc->mfc_un.res.maxvif; n++) {
2442 if (VIF_EXISTS(mrt, n) &&
2443 mfc->mfc_un.res.ttls[n] < 255)
2444 seq_printf(seq,
2445 " %2d:%-3d",
2446 n, mfc->mfc_un.res.ttls[n]);
2447 }
2448 } else {
2449 /* unresolved mfc_caches don't contain
2450 * pkt, bytes and wrong_if values
2451 */
2452 seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
2453 }
2454 seq_putc(seq, '\n');
2455 }
2456 return 0;
2457 }
2458
2459 static const struct seq_operations ipmr_mfc_seq_ops = {
2460 .start = ipmr_mfc_seq_start,
2461 .next = ipmr_mfc_seq_next,
2462 .stop = ipmr_mfc_seq_stop,
2463 .show = ipmr_mfc_seq_show,
2464 };
2465
ipmr_mfc_open(struct inode * inode,struct file * file)2466 static int ipmr_mfc_open(struct inode *inode, struct file *file)
2467 {
2468 return seq_open_net(inode, file, &ipmr_mfc_seq_ops,
2469 sizeof(struct ipmr_mfc_iter));
2470 }
2471
2472 static const struct file_operations ipmr_mfc_fops = {
2473 .owner = THIS_MODULE,
2474 .open = ipmr_mfc_open,
2475 .read = seq_read,
2476 .llseek = seq_lseek,
2477 .release = seq_release_net,
2478 };
2479 #endif
2480
2481 #ifdef CONFIG_IP_PIMSM_V2
2482 static const struct net_protocol pim_protocol = {
2483 .handler = pim_rcv,
2484 .netns_ok = 1,
2485 };
2486 #endif
2487
2488
2489 /*
2490 * Setup for IP multicast routing
2491 */
ipmr_net_init(struct net * net)2492 static int __net_init ipmr_net_init(struct net *net)
2493 {
2494 int err;
2495
2496 err = ipmr_rules_init(net);
2497 if (err < 0)
2498 goto fail;
2499
2500 #ifdef CONFIG_PROC_FS
2501 err = -ENOMEM;
2502 if (!proc_net_fops_create(net, "ip_mr_vif", 0, &ipmr_vif_fops))
2503 goto proc_vif_fail;
2504 if (!proc_net_fops_create(net, "ip_mr_cache", 0, &ipmr_mfc_fops))
2505 goto proc_cache_fail;
2506 #endif
2507 return 0;
2508
2509 #ifdef CONFIG_PROC_FS
2510 proc_cache_fail:
2511 proc_net_remove(net, "ip_mr_vif");
2512 proc_vif_fail:
2513 ipmr_rules_exit(net);
2514 #endif
2515 fail:
2516 return err;
2517 }
2518
ipmr_net_exit(struct net * net)2519 static void __net_exit ipmr_net_exit(struct net *net)
2520 {
2521 #ifdef CONFIG_PROC_FS
2522 proc_net_remove(net, "ip_mr_cache");
2523 proc_net_remove(net, "ip_mr_vif");
2524 #endif
2525 ipmr_rules_exit(net);
2526 }
2527
2528 static struct pernet_operations ipmr_net_ops = {
2529 .init = ipmr_net_init,
2530 .exit = ipmr_net_exit,
2531 };
2532
ip_mr_init(void)2533 int __init ip_mr_init(void)
2534 {
2535 int err;
2536
2537 mrt_cachep = kmem_cache_create("ip_mrt_cache",
2538 sizeof(struct mfc_cache),
2539 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
2540 NULL);
2541 if (!mrt_cachep)
2542 return -ENOMEM;
2543
2544 err = register_pernet_subsys(&ipmr_net_ops);
2545 if (err)
2546 goto reg_pernet_fail;
2547
2548 err = register_netdevice_notifier(&ip_mr_notifier);
2549 if (err)
2550 goto reg_notif_fail;
2551 #ifdef CONFIG_IP_PIMSM_V2
2552 if (inet_add_protocol(&pim_protocol, IPPROTO_PIM) < 0) {
2553 pr_err("%s: can't add PIM protocol\n", __func__);
2554 err = -EAGAIN;
2555 goto add_proto_fail;
2556 }
2557 #endif
2558 rtnl_register(RTNL_FAMILY_IPMR, RTM_GETROUTE,
2559 NULL, ipmr_rtm_dumproute, NULL);
2560 return 0;
2561
2562 #ifdef CONFIG_IP_PIMSM_V2
2563 add_proto_fail:
2564 unregister_netdevice_notifier(&ip_mr_notifier);
2565 #endif
2566 reg_notif_fail:
2567 unregister_pernet_subsys(&ipmr_net_ops);
2568 reg_pernet_fail:
2569 kmem_cache_destroy(mrt_cachep);
2570 return err;
2571 }
2572