1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * IPv6 tunneling device
4 * Linux INET6 implementation
5 *
6 * Authors:
7 * Ville Nuorvala <vnuorval@tcs.hut.fi>
8 * Yasuyuki Kozakai <kozakai@linux-ipv6.org>
9 *
10 * Based on:
11 * linux/net/ipv6/sit.c and linux/net/ipv4/ipip.c
12 *
13 * RFC 2473
14 */
15
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17
18 #include <linux/module.h>
19 #include <linux/capability.h>
20 #include <linux/errno.h>
21 #include <linux/types.h>
22 #include <linux/sockios.h>
23 #include <linux/icmp.h>
24 #include <linux/if.h>
25 #include <linux/in.h>
26 #include <linux/ip.h>
27 #include <linux/net.h>
28 #include <linux/in6.h>
29 #include <linux/netdevice.h>
30 #include <linux/if_arp.h>
31 #include <linux/icmpv6.h>
32 #include <linux/init.h>
33 #include <linux/route.h>
34 #include <linux/rtnetlink.h>
35 #include <linux/netfilter_ipv6.h>
36 #include <linux/slab.h>
37 #include <linux/hash.h>
38 #include <linux/etherdevice.h>
39
40 #include <linux/uaccess.h>
41 #include <linux/atomic.h>
42
43 #include <net/icmp.h>
44 #include <net/ip.h>
45 #include <net/ip_tunnels.h>
46 #include <net/ipv6.h>
47 #include <net/ip6_route.h>
48 #include <net/addrconf.h>
49 #include <net/ip6_tunnel.h>
50 #include <net/xfrm.h>
51 #include <net/dsfield.h>
52 #include <net/inet_ecn.h>
53 #include <net/net_namespace.h>
54 #include <net/netns/generic.h>
55 #include <net/dst_metadata.h>
56
57 MODULE_AUTHOR("Ville Nuorvala");
58 MODULE_DESCRIPTION("IPv6 tunneling device");
59 MODULE_LICENSE("GPL");
60 MODULE_ALIAS_RTNL_LINK("ip6tnl");
61 MODULE_ALIAS_NETDEV("ip6tnl0");
62
63 #define IP6_TUNNEL_HASH_SIZE_SHIFT 5
64 #define IP6_TUNNEL_HASH_SIZE (1 << IP6_TUNNEL_HASH_SIZE_SHIFT)
65
66 static bool log_ecn_error = true;
67 module_param(log_ecn_error, bool, 0644);
68 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
69
HASH(const struct in6_addr * addr1,const struct in6_addr * addr2)70 static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
71 {
72 u32 hash = ipv6_addr_hash(addr1) ^ ipv6_addr_hash(addr2);
73
74 return hash_32(hash, IP6_TUNNEL_HASH_SIZE_SHIFT);
75 }
76
77 static int ip6_tnl_dev_init(struct net_device *dev);
78 static void ip6_tnl_dev_setup(struct net_device *dev);
79 static struct rtnl_link_ops ip6_link_ops __read_mostly;
80
81 static unsigned int ip6_tnl_net_id __read_mostly;
82 struct ip6_tnl_net {
83 /* the IPv6 tunnel fallback device */
84 struct net_device *fb_tnl_dev;
85 /* lists for storing tunnels in use */
86 struct ip6_tnl __rcu *tnls_r_l[IP6_TUNNEL_HASH_SIZE];
87 struct ip6_tnl __rcu *tnls_wc[1];
88 struct ip6_tnl __rcu **tnls[2];
89 struct ip6_tnl __rcu *collect_md_tun;
90 };
91
ip6_tnl_mpls_supported(void)92 static inline int ip6_tnl_mpls_supported(void)
93 {
94 return IS_ENABLED(CONFIG_MPLS);
95 }
96
97 #define for_each_ip6_tunnel_rcu(start) \
98 for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
99
100 /**
101 * ip6_tnl_lookup - fetch tunnel matching the end-point addresses
102 * @net: network namespace
103 * @link: ifindex of underlying interface
104 * @remote: the address of the tunnel exit-point
105 * @local: the address of the tunnel entry-point
106 *
107 * Return:
108 * tunnel matching given end-points if found,
109 * else fallback tunnel if its device is up,
110 * else %NULL
111 **/
112
113 static struct ip6_tnl *
ip6_tnl_lookup(struct net * net,int link,const struct in6_addr * remote,const struct in6_addr * local)114 ip6_tnl_lookup(struct net *net, int link,
115 const struct in6_addr *remote, const struct in6_addr *local)
116 {
117 unsigned int hash = HASH(remote, local);
118 struct ip6_tnl *t, *cand = NULL;
119 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
120 struct in6_addr any;
121
122 for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
123 if (!ipv6_addr_equal(local, &t->parms.laddr) ||
124 !ipv6_addr_equal(remote, &t->parms.raddr) ||
125 !(t->dev->flags & IFF_UP))
126 continue;
127
128 if (link == t->parms.link)
129 return t;
130 else
131 cand = t;
132 }
133
134 memset(&any, 0, sizeof(any));
135 hash = HASH(&any, local);
136 for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
137 if (!ipv6_addr_equal(local, &t->parms.laddr) ||
138 !ipv6_addr_any(&t->parms.raddr) ||
139 !(t->dev->flags & IFF_UP))
140 continue;
141
142 if (link == t->parms.link)
143 return t;
144 else if (!cand)
145 cand = t;
146 }
147
148 hash = HASH(remote, &any);
149 for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
150 if (!ipv6_addr_equal(remote, &t->parms.raddr) ||
151 !ipv6_addr_any(&t->parms.laddr) ||
152 !(t->dev->flags & IFF_UP))
153 continue;
154
155 if (link == t->parms.link)
156 return t;
157 else if (!cand)
158 cand = t;
159 }
160
161 if (cand)
162 return cand;
163
164 t = rcu_dereference(ip6n->collect_md_tun);
165 if (t && t->dev->flags & IFF_UP)
166 return t;
167
168 t = rcu_dereference(ip6n->tnls_wc[0]);
169 if (t && (t->dev->flags & IFF_UP))
170 return t;
171
172 return NULL;
173 }
174
175 /**
176 * ip6_tnl_bucket - get head of list matching given tunnel parameters
177 * @ip6n: the private data for ip6_vti in the netns
178 * @p: parameters containing tunnel end-points
179 *
180 * Description:
181 * ip6_tnl_bucket() returns the head of the list matching the
182 * &struct in6_addr entries laddr and raddr in @p.
183 *
184 * Return: head of IPv6 tunnel list
185 **/
186
187 static struct ip6_tnl __rcu **
ip6_tnl_bucket(struct ip6_tnl_net * ip6n,const struct __ip6_tnl_parm * p)188 ip6_tnl_bucket(struct ip6_tnl_net *ip6n, const struct __ip6_tnl_parm *p)
189 {
190 const struct in6_addr *remote = &p->raddr;
191 const struct in6_addr *local = &p->laddr;
192 unsigned int h = 0;
193 int prio = 0;
194
195 if (!ipv6_addr_any(remote) || !ipv6_addr_any(local)) {
196 prio = 1;
197 h = HASH(remote, local);
198 }
199 return &ip6n->tnls[prio][h];
200 }
201
202 /**
203 * ip6_tnl_link - add tunnel to hash table
204 * @ip6n: the private data for ip6_vti in the netns
205 * @t: tunnel to be added
206 **/
207
208 static void
ip6_tnl_link(struct ip6_tnl_net * ip6n,struct ip6_tnl * t)209 ip6_tnl_link(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
210 {
211 struct ip6_tnl __rcu **tp = ip6_tnl_bucket(ip6n, &t->parms);
212
213 if (t->parms.collect_md)
214 rcu_assign_pointer(ip6n->collect_md_tun, t);
215 rcu_assign_pointer(t->next , rtnl_dereference(*tp));
216 rcu_assign_pointer(*tp, t);
217 }
218
219 /**
220 * ip6_tnl_unlink - remove tunnel from hash table
221 * @ip6n: the private data for ip6_vti in the netns
222 * @t: tunnel to be removed
223 **/
224
225 static void
ip6_tnl_unlink(struct ip6_tnl_net * ip6n,struct ip6_tnl * t)226 ip6_tnl_unlink(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
227 {
228 struct ip6_tnl __rcu **tp;
229 struct ip6_tnl *iter;
230
231 if (t->parms.collect_md)
232 rcu_assign_pointer(ip6n->collect_md_tun, NULL);
233
234 for (tp = ip6_tnl_bucket(ip6n, &t->parms);
235 (iter = rtnl_dereference(*tp)) != NULL;
236 tp = &iter->next) {
237 if (t == iter) {
238 rcu_assign_pointer(*tp, t->next);
239 break;
240 }
241 }
242 }
243
ip6_dev_free(struct net_device * dev)244 static void ip6_dev_free(struct net_device *dev)
245 {
246 struct ip6_tnl *t = netdev_priv(dev);
247
248 gro_cells_destroy(&t->gro_cells);
249 dst_cache_destroy(&t->dst_cache);
250 free_percpu(dev->tstats);
251 }
252
ip6_tnl_create2(struct net_device * dev)253 static int ip6_tnl_create2(struct net_device *dev)
254 {
255 struct ip6_tnl *t = netdev_priv(dev);
256 struct net *net = dev_net(dev);
257 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
258 int err;
259
260 dev->rtnl_link_ops = &ip6_link_ops;
261 err = register_netdevice(dev);
262 if (err < 0)
263 goto out;
264
265 strcpy(t->parms.name, dev->name);
266
267 ip6_tnl_link(ip6n, t);
268 return 0;
269
270 out:
271 return err;
272 }
273
274 /**
275 * ip6_tnl_create - create a new tunnel
276 * @net: network namespace
277 * @p: tunnel parameters
278 *
279 * Description:
280 * Create tunnel matching given parameters.
281 *
282 * Return:
283 * created tunnel or error pointer
284 **/
285
ip6_tnl_create(struct net * net,struct __ip6_tnl_parm * p)286 static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p)
287 {
288 struct net_device *dev;
289 struct ip6_tnl *t;
290 char name[IFNAMSIZ];
291 int err = -E2BIG;
292
293 if (p->name[0]) {
294 if (!dev_valid_name(p->name))
295 goto failed;
296 strlcpy(name, p->name, IFNAMSIZ);
297 } else {
298 sprintf(name, "ip6tnl%%d");
299 }
300 err = -ENOMEM;
301 dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN,
302 ip6_tnl_dev_setup);
303 if (!dev)
304 goto failed;
305
306 dev_net_set(dev, net);
307
308 t = netdev_priv(dev);
309 t->parms = *p;
310 t->net = dev_net(dev);
311 err = ip6_tnl_create2(dev);
312 if (err < 0)
313 goto failed_free;
314
315 return t;
316
317 failed_free:
318 free_netdev(dev);
319 failed:
320 return ERR_PTR(err);
321 }
322
323 /**
324 * ip6_tnl_locate - find or create tunnel matching given parameters
325 * @net: network namespace
326 * @p: tunnel parameters
327 * @create: != 0 if allowed to create new tunnel if no match found
328 *
329 * Description:
330 * ip6_tnl_locate() first tries to locate an existing tunnel
331 * based on @parms. If this is unsuccessful, but @create is set a new
332 * tunnel device is created and registered for use.
333 *
334 * Return:
335 * matching tunnel or error pointer
336 **/
337
ip6_tnl_locate(struct net * net,struct __ip6_tnl_parm * p,int create)338 static struct ip6_tnl *ip6_tnl_locate(struct net *net,
339 struct __ip6_tnl_parm *p, int create)
340 {
341 const struct in6_addr *remote = &p->raddr;
342 const struct in6_addr *local = &p->laddr;
343 struct ip6_tnl __rcu **tp;
344 struct ip6_tnl *t;
345 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
346
347 for (tp = ip6_tnl_bucket(ip6n, p);
348 (t = rtnl_dereference(*tp)) != NULL;
349 tp = &t->next) {
350 if (ipv6_addr_equal(local, &t->parms.laddr) &&
351 ipv6_addr_equal(remote, &t->parms.raddr) &&
352 p->link == t->parms.link) {
353 if (create)
354 return ERR_PTR(-EEXIST);
355
356 return t;
357 }
358 }
359 if (!create)
360 return ERR_PTR(-ENODEV);
361 return ip6_tnl_create(net, p);
362 }
363
364 /**
365 * ip6_tnl_dev_uninit - tunnel device uninitializer
366 * @dev: the device to be destroyed
367 *
368 * Description:
369 * ip6_tnl_dev_uninit() removes tunnel from its list
370 **/
371
372 static void
ip6_tnl_dev_uninit(struct net_device * dev)373 ip6_tnl_dev_uninit(struct net_device *dev)
374 {
375 struct ip6_tnl *t = netdev_priv(dev);
376 struct net *net = t->net;
377 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
378
379 if (dev == ip6n->fb_tnl_dev)
380 RCU_INIT_POINTER(ip6n->tnls_wc[0], NULL);
381 else
382 ip6_tnl_unlink(ip6n, t);
383 dst_cache_reset(&t->dst_cache);
384 dev_put_track(dev, &t->dev_tracker);
385 }
386
387 /**
388 * ip6_tnl_parse_tlv_enc_lim - handle encapsulation limit option
389 * @skb: received socket buffer
390 * @raw: the ICMPv6 error message data
391 *
392 * Return:
393 * 0 if none was found,
394 * else index to encapsulation limit
395 **/
396
ip6_tnl_parse_tlv_enc_lim(struct sk_buff * skb,__u8 * raw)397 __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
398 {
399 const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)raw;
400 unsigned int nhoff = raw - skb->data;
401 unsigned int off = nhoff + sizeof(*ipv6h);
402 u8 next, nexthdr = ipv6h->nexthdr;
403
404 while (ipv6_ext_hdr(nexthdr) && nexthdr != NEXTHDR_NONE) {
405 struct ipv6_opt_hdr *hdr;
406 u16 optlen;
407
408 if (!pskb_may_pull(skb, off + sizeof(*hdr)))
409 break;
410
411 hdr = (struct ipv6_opt_hdr *)(skb->data + off);
412 if (nexthdr == NEXTHDR_FRAGMENT) {
413 struct frag_hdr *frag_hdr = (struct frag_hdr *) hdr;
414 if (frag_hdr->frag_off)
415 break;
416 optlen = 8;
417 } else if (nexthdr == NEXTHDR_AUTH) {
418 optlen = ipv6_authlen(hdr);
419 } else {
420 optlen = ipv6_optlen(hdr);
421 }
422 /* cache hdr->nexthdr, since pskb_may_pull() might
423 * invalidate hdr
424 */
425 next = hdr->nexthdr;
426 if (nexthdr == NEXTHDR_DEST) {
427 u16 i = 2;
428
429 /* Remember : hdr is no longer valid at this point. */
430 if (!pskb_may_pull(skb, off + optlen))
431 break;
432
433 while (1) {
434 struct ipv6_tlv_tnl_enc_lim *tel;
435
436 /* No more room for encapsulation limit */
437 if (i + sizeof(*tel) > optlen)
438 break;
439
440 tel = (struct ipv6_tlv_tnl_enc_lim *)(skb->data + off + i);
441 /* return index of option if found and valid */
442 if (tel->type == IPV6_TLV_TNL_ENCAP_LIMIT &&
443 tel->length == 1)
444 return i + off - nhoff;
445 /* else jump to next option */
446 if (tel->type)
447 i += tel->length + 2;
448 else
449 i++;
450 }
451 }
452 nexthdr = next;
453 off += optlen;
454 }
455 return 0;
456 }
457 EXPORT_SYMBOL(ip6_tnl_parse_tlv_enc_lim);
458
459 /* ip6_tnl_err() should handle errors in the tunnel according to the
460 * specifications in RFC 2473.
461 */
462 static int
ip6_tnl_err(struct sk_buff * skb,__u8 ipproto,struct inet6_skb_parm * opt,u8 * type,u8 * code,int * msg,__u32 * info,int offset)463 ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt,
464 u8 *type, u8 *code, int *msg, __u32 *info, int offset)
465 {
466 const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)skb->data;
467 struct net *net = dev_net(skb->dev);
468 u8 rel_type = ICMPV6_DEST_UNREACH;
469 u8 rel_code = ICMPV6_ADDR_UNREACH;
470 __u32 rel_info = 0;
471 struct ip6_tnl *t;
472 int err = -ENOENT;
473 int rel_msg = 0;
474 u8 tproto;
475 __u16 len;
476
477 /* If the packet doesn't contain the original IPv6 header we are
478 in trouble since we might need the source address for further
479 processing of the error. */
480
481 rcu_read_lock();
482 t = ip6_tnl_lookup(dev_net(skb->dev), skb->dev->ifindex, &ipv6h->daddr, &ipv6h->saddr);
483 if (!t)
484 goto out;
485
486 tproto = READ_ONCE(t->parms.proto);
487 if (tproto != ipproto && tproto != 0)
488 goto out;
489
490 err = 0;
491
492 switch (*type) {
493 case ICMPV6_DEST_UNREACH:
494 net_dbg_ratelimited("%s: Path to destination invalid or inactive!\n",
495 t->parms.name);
496 rel_msg = 1;
497 break;
498 case ICMPV6_TIME_EXCEED:
499 if ((*code) == ICMPV6_EXC_HOPLIMIT) {
500 net_dbg_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n",
501 t->parms.name);
502 rel_msg = 1;
503 }
504 break;
505 case ICMPV6_PARAMPROB: {
506 struct ipv6_tlv_tnl_enc_lim *tel;
507 __u32 teli;
508
509 teli = 0;
510 if ((*code) == ICMPV6_HDR_FIELD)
511 teli = ip6_tnl_parse_tlv_enc_lim(skb, skb->data);
512
513 if (teli && teli == *info - 2) {
514 tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli];
515 if (tel->encap_limit == 0) {
516 net_dbg_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n",
517 t->parms.name);
518 rel_msg = 1;
519 }
520 } else {
521 net_dbg_ratelimited("%s: Recipient unable to parse tunneled packet!\n",
522 t->parms.name);
523 }
524 break;
525 }
526 case ICMPV6_PKT_TOOBIG: {
527 __u32 mtu;
528
529 ip6_update_pmtu(skb, net, htonl(*info), 0, 0,
530 sock_net_uid(net, NULL));
531 mtu = *info - offset;
532 if (mtu < IPV6_MIN_MTU)
533 mtu = IPV6_MIN_MTU;
534 len = sizeof(*ipv6h) + ntohs(ipv6h->payload_len);
535 if (len > mtu) {
536 rel_type = ICMPV6_PKT_TOOBIG;
537 rel_code = 0;
538 rel_info = mtu;
539 rel_msg = 1;
540 }
541 break;
542 }
543 case NDISC_REDIRECT:
544 ip6_redirect(skb, net, skb->dev->ifindex, 0,
545 sock_net_uid(net, NULL));
546 break;
547 }
548
549 *type = rel_type;
550 *code = rel_code;
551 *info = rel_info;
552 *msg = rel_msg;
553
554 out:
555 rcu_read_unlock();
556 return err;
557 }
558
559 static int
ip4ip6_err(struct sk_buff * skb,struct inet6_skb_parm * opt,u8 type,u8 code,int offset,__be32 info)560 ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
561 u8 type, u8 code, int offset, __be32 info)
562 {
563 __u32 rel_info = ntohl(info);
564 const struct iphdr *eiph;
565 struct sk_buff *skb2;
566 int err, rel_msg = 0;
567 u8 rel_type = type;
568 u8 rel_code = code;
569 struct rtable *rt;
570 struct flowi4 fl4;
571
572 err = ip6_tnl_err(skb, IPPROTO_IPIP, opt, &rel_type, &rel_code,
573 &rel_msg, &rel_info, offset);
574 if (err < 0)
575 return err;
576
577 if (rel_msg == 0)
578 return 0;
579
580 switch (rel_type) {
581 case ICMPV6_DEST_UNREACH:
582 if (rel_code != ICMPV6_ADDR_UNREACH)
583 return 0;
584 rel_type = ICMP_DEST_UNREACH;
585 rel_code = ICMP_HOST_UNREACH;
586 break;
587 case ICMPV6_PKT_TOOBIG:
588 if (rel_code != 0)
589 return 0;
590 rel_type = ICMP_DEST_UNREACH;
591 rel_code = ICMP_FRAG_NEEDED;
592 break;
593 default:
594 return 0;
595 }
596
597 if (!pskb_may_pull(skb, offset + sizeof(struct iphdr)))
598 return 0;
599
600 skb2 = skb_clone(skb, GFP_ATOMIC);
601 if (!skb2)
602 return 0;
603
604 skb_dst_drop(skb2);
605
606 skb_pull(skb2, offset);
607 skb_reset_network_header(skb2);
608 eiph = ip_hdr(skb2);
609
610 /* Try to guess incoming interface */
611 rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL, eiph->saddr,
612 0, 0, 0, IPPROTO_IPIP, RT_TOS(eiph->tos), 0);
613 if (IS_ERR(rt))
614 goto out;
615
616 skb2->dev = rt->dst.dev;
617 ip_rt_put(rt);
618
619 /* route "incoming" packet */
620 if (rt->rt_flags & RTCF_LOCAL) {
621 rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL,
622 eiph->daddr, eiph->saddr, 0, 0,
623 IPPROTO_IPIP, RT_TOS(eiph->tos), 0);
624 if (IS_ERR(rt) || rt->dst.dev->type != ARPHRD_TUNNEL6) {
625 if (!IS_ERR(rt))
626 ip_rt_put(rt);
627 goto out;
628 }
629 skb_dst_set(skb2, &rt->dst);
630 } else {
631 if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos,
632 skb2->dev) ||
633 skb_dst(skb2)->dev->type != ARPHRD_TUNNEL6)
634 goto out;
635 }
636
637 /* change mtu on this route */
638 if (rel_type == ICMP_DEST_UNREACH && rel_code == ICMP_FRAG_NEEDED) {
639 if (rel_info > dst_mtu(skb_dst(skb2)))
640 goto out;
641
642 skb_dst_update_pmtu_no_confirm(skb2, rel_info);
643 }
644
645 icmp_send(skb2, rel_type, rel_code, htonl(rel_info));
646
647 out:
648 kfree_skb(skb2);
649 return 0;
650 }
651
652 static int
ip6ip6_err(struct sk_buff * skb,struct inet6_skb_parm * opt,u8 type,u8 code,int offset,__be32 info)653 ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
654 u8 type, u8 code, int offset, __be32 info)
655 {
656 __u32 rel_info = ntohl(info);
657 int err, rel_msg = 0;
658 u8 rel_type = type;
659 u8 rel_code = code;
660
661 err = ip6_tnl_err(skb, IPPROTO_IPV6, opt, &rel_type, &rel_code,
662 &rel_msg, &rel_info, offset);
663 if (err < 0)
664 return err;
665
666 if (rel_msg && pskb_may_pull(skb, offset + sizeof(struct ipv6hdr))) {
667 struct rt6_info *rt;
668 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
669
670 if (!skb2)
671 return 0;
672
673 skb_dst_drop(skb2);
674 skb_pull(skb2, offset);
675 skb_reset_network_header(skb2);
676
677 /* Try to guess incoming interface */
678 rt = rt6_lookup(dev_net(skb->dev), &ipv6_hdr(skb2)->saddr,
679 NULL, 0, skb2, 0);
680
681 if (rt && rt->dst.dev)
682 skb2->dev = rt->dst.dev;
683
684 icmpv6_send(skb2, rel_type, rel_code, rel_info);
685
686 ip6_rt_put(rt);
687
688 kfree_skb(skb2);
689 }
690
691 return 0;
692 }
693
694 static int
mplsip6_err(struct sk_buff * skb,struct inet6_skb_parm * opt,u8 type,u8 code,int offset,__be32 info)695 mplsip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
696 u8 type, u8 code, int offset, __be32 info)
697 {
698 __u32 rel_info = ntohl(info);
699 int err, rel_msg = 0;
700 u8 rel_type = type;
701 u8 rel_code = code;
702
703 err = ip6_tnl_err(skb, IPPROTO_MPLS, opt, &rel_type, &rel_code,
704 &rel_msg, &rel_info, offset);
705 return err;
706 }
707
ip4ip6_dscp_ecn_decapsulate(const struct ip6_tnl * t,const struct ipv6hdr * ipv6h,struct sk_buff * skb)708 static int ip4ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t,
709 const struct ipv6hdr *ipv6h,
710 struct sk_buff *skb)
711 {
712 __u8 dsfield = ipv6_get_dsfield(ipv6h) & ~INET_ECN_MASK;
713
714 if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY)
715 ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, dsfield);
716
717 return IP6_ECN_decapsulate(ipv6h, skb);
718 }
719
ip6ip6_dscp_ecn_decapsulate(const struct ip6_tnl * t,const struct ipv6hdr * ipv6h,struct sk_buff * skb)720 static int ip6ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t,
721 const struct ipv6hdr *ipv6h,
722 struct sk_buff *skb)
723 {
724 if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY)
725 ipv6_copy_dscp(ipv6_get_dsfield(ipv6h), ipv6_hdr(skb));
726
727 return IP6_ECN_decapsulate(ipv6h, skb);
728 }
729
mplsip6_dscp_ecn_decapsulate(const struct ip6_tnl * t,const struct ipv6hdr * ipv6h,struct sk_buff * skb)730 static inline int mplsip6_dscp_ecn_decapsulate(const struct ip6_tnl *t,
731 const struct ipv6hdr *ipv6h,
732 struct sk_buff *skb)
733 {
734 /* ECN is not supported in AF_MPLS */
735 return 0;
736 }
737
ip6_tnl_get_cap(struct ip6_tnl * t,const struct in6_addr * laddr,const struct in6_addr * raddr)738 __u32 ip6_tnl_get_cap(struct ip6_tnl *t,
739 const struct in6_addr *laddr,
740 const struct in6_addr *raddr)
741 {
742 struct __ip6_tnl_parm *p = &t->parms;
743 int ltype = ipv6_addr_type(laddr);
744 int rtype = ipv6_addr_type(raddr);
745 __u32 flags = 0;
746
747 if (ltype == IPV6_ADDR_ANY || rtype == IPV6_ADDR_ANY) {
748 flags = IP6_TNL_F_CAP_PER_PACKET;
749 } else if (ltype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) &&
750 rtype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) &&
751 !((ltype|rtype) & IPV6_ADDR_LOOPBACK) &&
752 (!((ltype|rtype) & IPV6_ADDR_LINKLOCAL) || p->link)) {
753 if (ltype&IPV6_ADDR_UNICAST)
754 flags |= IP6_TNL_F_CAP_XMIT;
755 if (rtype&IPV6_ADDR_UNICAST)
756 flags |= IP6_TNL_F_CAP_RCV;
757 }
758 return flags;
759 }
760 EXPORT_SYMBOL(ip6_tnl_get_cap);
761
762 /* called with rcu_read_lock() */
ip6_tnl_rcv_ctl(struct ip6_tnl * t,const struct in6_addr * laddr,const struct in6_addr * raddr)763 int ip6_tnl_rcv_ctl(struct ip6_tnl *t,
764 const struct in6_addr *laddr,
765 const struct in6_addr *raddr)
766 {
767 struct __ip6_tnl_parm *p = &t->parms;
768 int ret = 0;
769 struct net *net = t->net;
770
771 if ((p->flags & IP6_TNL_F_CAP_RCV) ||
772 ((p->flags & IP6_TNL_F_CAP_PER_PACKET) &&
773 (ip6_tnl_get_cap(t, laddr, raddr) & IP6_TNL_F_CAP_RCV))) {
774 struct net_device *ldev = NULL;
775
776 if (p->link)
777 ldev = dev_get_by_index_rcu(net, p->link);
778
779 if ((ipv6_addr_is_multicast(laddr) ||
780 likely(ipv6_chk_addr_and_flags(net, laddr, ldev, false,
781 0, IFA_F_TENTATIVE))) &&
782 ((p->flags & IP6_TNL_F_ALLOW_LOCAL_REMOTE) ||
783 likely(!ipv6_chk_addr_and_flags(net, raddr, ldev, true,
784 0, IFA_F_TENTATIVE))))
785 ret = 1;
786 }
787 return ret;
788 }
789 EXPORT_SYMBOL_GPL(ip6_tnl_rcv_ctl);
790
__ip6_tnl_rcv(struct ip6_tnl * tunnel,struct sk_buff * skb,const struct tnl_ptk_info * tpi,struct metadata_dst * tun_dst,int (* dscp_ecn_decapsulate)(const struct ip6_tnl * t,const struct ipv6hdr * ipv6h,struct sk_buff * skb),bool log_ecn_err)791 static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb,
792 const struct tnl_ptk_info *tpi,
793 struct metadata_dst *tun_dst,
794 int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t,
795 const struct ipv6hdr *ipv6h,
796 struct sk_buff *skb),
797 bool log_ecn_err)
798 {
799 struct pcpu_sw_netstats *tstats;
800 const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
801 int err;
802
803 if ((!(tpi->flags & TUNNEL_CSUM) &&
804 (tunnel->parms.i_flags & TUNNEL_CSUM)) ||
805 ((tpi->flags & TUNNEL_CSUM) &&
806 !(tunnel->parms.i_flags & TUNNEL_CSUM))) {
807 tunnel->dev->stats.rx_crc_errors++;
808 tunnel->dev->stats.rx_errors++;
809 goto drop;
810 }
811
812 if (tunnel->parms.i_flags & TUNNEL_SEQ) {
813 if (!(tpi->flags & TUNNEL_SEQ) ||
814 (tunnel->i_seqno &&
815 (s32)(ntohl(tpi->seq) - tunnel->i_seqno) < 0)) {
816 tunnel->dev->stats.rx_fifo_errors++;
817 tunnel->dev->stats.rx_errors++;
818 goto drop;
819 }
820 tunnel->i_seqno = ntohl(tpi->seq) + 1;
821 }
822
823 skb->protocol = tpi->proto;
824
825 /* Warning: All skb pointers will be invalidated! */
826 if (tunnel->dev->type == ARPHRD_ETHER) {
827 if (!pskb_may_pull(skb, ETH_HLEN)) {
828 tunnel->dev->stats.rx_length_errors++;
829 tunnel->dev->stats.rx_errors++;
830 goto drop;
831 }
832
833 ipv6h = ipv6_hdr(skb);
834 skb->protocol = eth_type_trans(skb, tunnel->dev);
835 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
836 } else {
837 skb->dev = tunnel->dev;
838 skb_reset_mac_header(skb);
839 }
840
841 skb_reset_network_header(skb);
842 memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
843
844 __skb_tunnel_rx(skb, tunnel->dev, tunnel->net);
845
846 err = dscp_ecn_decapsulate(tunnel, ipv6h, skb);
847 if (unlikely(err)) {
848 if (log_ecn_err)
849 net_info_ratelimited("non-ECT from %pI6 with DS=%#x\n",
850 &ipv6h->saddr,
851 ipv6_get_dsfield(ipv6h));
852 if (err > 1) {
853 ++tunnel->dev->stats.rx_frame_errors;
854 ++tunnel->dev->stats.rx_errors;
855 goto drop;
856 }
857 }
858
859 tstats = this_cpu_ptr(tunnel->dev->tstats);
860 u64_stats_update_begin(&tstats->syncp);
861 tstats->rx_packets++;
862 tstats->rx_bytes += skb->len;
863 u64_stats_update_end(&tstats->syncp);
864
865 skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(tunnel->dev)));
866
867 if (tun_dst)
868 skb_dst_set(skb, (struct dst_entry *)tun_dst);
869
870 gro_cells_receive(&tunnel->gro_cells, skb);
871 return 0;
872
873 drop:
874 if (tun_dst)
875 dst_release((struct dst_entry *)tun_dst);
876 kfree_skb(skb);
877 return 0;
878 }
879
ip6_tnl_rcv(struct ip6_tnl * t,struct sk_buff * skb,const struct tnl_ptk_info * tpi,struct metadata_dst * tun_dst,bool log_ecn_err)880 int ip6_tnl_rcv(struct ip6_tnl *t, struct sk_buff *skb,
881 const struct tnl_ptk_info *tpi,
882 struct metadata_dst *tun_dst,
883 bool log_ecn_err)
884 {
885 int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t,
886 const struct ipv6hdr *ipv6h,
887 struct sk_buff *skb);
888
889 dscp_ecn_decapsulate = ip6ip6_dscp_ecn_decapsulate;
890 if (tpi->proto == htons(ETH_P_IP))
891 dscp_ecn_decapsulate = ip4ip6_dscp_ecn_decapsulate;
892
893 return __ip6_tnl_rcv(t, skb, tpi, tun_dst, dscp_ecn_decapsulate,
894 log_ecn_err);
895 }
896 EXPORT_SYMBOL(ip6_tnl_rcv);
897
898 static const struct tnl_ptk_info tpi_v6 = {
899 /* no tunnel info required for ipxip6. */
900 .proto = htons(ETH_P_IPV6),
901 };
902
903 static const struct tnl_ptk_info tpi_v4 = {
904 /* no tunnel info required for ipxip6. */
905 .proto = htons(ETH_P_IP),
906 };
907
908 static const struct tnl_ptk_info tpi_mpls = {
909 /* no tunnel info required for mplsip6. */
910 .proto = htons(ETH_P_MPLS_UC),
911 };
912
ipxip6_rcv(struct sk_buff * skb,u8 ipproto,const struct tnl_ptk_info * tpi,int (* dscp_ecn_decapsulate)(const struct ip6_tnl * t,const struct ipv6hdr * ipv6h,struct sk_buff * skb))913 static int ipxip6_rcv(struct sk_buff *skb, u8 ipproto,
914 const struct tnl_ptk_info *tpi,
915 int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t,
916 const struct ipv6hdr *ipv6h,
917 struct sk_buff *skb))
918 {
919 struct ip6_tnl *t;
920 const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
921 struct metadata_dst *tun_dst = NULL;
922 int ret = -1;
923
924 rcu_read_lock();
925 t = ip6_tnl_lookup(dev_net(skb->dev), skb->dev->ifindex, &ipv6h->saddr, &ipv6h->daddr);
926
927 if (t) {
928 u8 tproto = READ_ONCE(t->parms.proto);
929
930 if (tproto != ipproto && tproto != 0)
931 goto drop;
932 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
933 goto drop;
934 ipv6h = ipv6_hdr(skb);
935 if (!ip6_tnl_rcv_ctl(t, &ipv6h->daddr, &ipv6h->saddr))
936 goto drop;
937 if (iptunnel_pull_header(skb, 0, tpi->proto, false))
938 goto drop;
939 if (t->parms.collect_md) {
940 tun_dst = ipv6_tun_rx_dst(skb, 0, 0, 0);
941 if (!tun_dst)
942 goto drop;
943 }
944 ret = __ip6_tnl_rcv(t, skb, tpi, tun_dst, dscp_ecn_decapsulate,
945 log_ecn_error);
946 }
947
948 rcu_read_unlock();
949
950 return ret;
951
952 drop:
953 rcu_read_unlock();
954 kfree_skb(skb);
955 return 0;
956 }
957
ip4ip6_rcv(struct sk_buff * skb)958 static int ip4ip6_rcv(struct sk_buff *skb)
959 {
960 return ipxip6_rcv(skb, IPPROTO_IPIP, &tpi_v4,
961 ip4ip6_dscp_ecn_decapsulate);
962 }
963
ip6ip6_rcv(struct sk_buff * skb)964 static int ip6ip6_rcv(struct sk_buff *skb)
965 {
966 return ipxip6_rcv(skb, IPPROTO_IPV6, &tpi_v6,
967 ip6ip6_dscp_ecn_decapsulate);
968 }
969
mplsip6_rcv(struct sk_buff * skb)970 static int mplsip6_rcv(struct sk_buff *skb)
971 {
972 return ipxip6_rcv(skb, IPPROTO_MPLS, &tpi_mpls,
973 mplsip6_dscp_ecn_decapsulate);
974 }
975
976 struct ipv6_tel_txoption {
977 struct ipv6_txoptions ops;
978 __u8 dst_opt[8];
979 };
980
init_tel_txopt(struct ipv6_tel_txoption * opt,__u8 encap_limit)981 static void init_tel_txopt(struct ipv6_tel_txoption *opt, __u8 encap_limit)
982 {
983 memset(opt, 0, sizeof(struct ipv6_tel_txoption));
984
985 opt->dst_opt[2] = IPV6_TLV_TNL_ENCAP_LIMIT;
986 opt->dst_opt[3] = 1;
987 opt->dst_opt[4] = encap_limit;
988 opt->dst_opt[5] = IPV6_TLV_PADN;
989 opt->dst_opt[6] = 1;
990
991 opt->ops.dst1opt = (struct ipv6_opt_hdr *) opt->dst_opt;
992 opt->ops.opt_nflen = 8;
993 }
994
995 /**
996 * ip6_tnl_addr_conflict - compare packet addresses to tunnel's own
997 * @t: the outgoing tunnel device
998 * @hdr: IPv6 header from the incoming packet
999 *
1000 * Description:
1001 * Avoid trivial tunneling loop by checking that tunnel exit-point
1002 * doesn't match source of incoming packet.
1003 *
1004 * Return:
1005 * 1 if conflict,
1006 * 0 else
1007 **/
1008
1009 static inline bool
ip6_tnl_addr_conflict(const struct ip6_tnl * t,const struct ipv6hdr * hdr)1010 ip6_tnl_addr_conflict(const struct ip6_tnl *t, const struct ipv6hdr *hdr)
1011 {
1012 return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr);
1013 }
1014
ip6_tnl_xmit_ctl(struct ip6_tnl * t,const struct in6_addr * laddr,const struct in6_addr * raddr)1015 int ip6_tnl_xmit_ctl(struct ip6_tnl *t,
1016 const struct in6_addr *laddr,
1017 const struct in6_addr *raddr)
1018 {
1019 struct __ip6_tnl_parm *p = &t->parms;
1020 int ret = 0;
1021 struct net *net = t->net;
1022
1023 if (t->parms.collect_md)
1024 return 1;
1025
1026 if ((p->flags & IP6_TNL_F_CAP_XMIT) ||
1027 ((p->flags & IP6_TNL_F_CAP_PER_PACKET) &&
1028 (ip6_tnl_get_cap(t, laddr, raddr) & IP6_TNL_F_CAP_XMIT))) {
1029 struct net_device *ldev = NULL;
1030
1031 rcu_read_lock();
1032 if (p->link)
1033 ldev = dev_get_by_index_rcu(net, p->link);
1034
1035 if (unlikely(!ipv6_chk_addr_and_flags(net, laddr, ldev, false,
1036 0, IFA_F_TENTATIVE)))
1037 pr_warn_ratelimited("%s xmit: Local address not yet configured!\n",
1038 p->name);
1039 else if (!(p->flags & IP6_TNL_F_ALLOW_LOCAL_REMOTE) &&
1040 !ipv6_addr_is_multicast(raddr) &&
1041 unlikely(ipv6_chk_addr_and_flags(net, raddr, ldev,
1042 true, 0, IFA_F_TENTATIVE)))
1043 pr_warn_ratelimited("%s xmit: Routing loop! Remote address found on this node!\n",
1044 p->name);
1045 else
1046 ret = 1;
1047 rcu_read_unlock();
1048 }
1049 return ret;
1050 }
1051 EXPORT_SYMBOL_GPL(ip6_tnl_xmit_ctl);
1052
1053 /**
1054 * ip6_tnl_xmit - encapsulate packet and send
1055 * @skb: the outgoing socket buffer
1056 * @dev: the outgoing tunnel device
1057 * @dsfield: dscp code for outer header
1058 * @fl6: flow of tunneled packet
1059 * @encap_limit: encapsulation limit
1060 * @pmtu: Path MTU is stored if packet is too big
1061 * @proto: next header value
1062 *
1063 * Description:
1064 * Build new header and do some sanity checks on the packet before sending
1065 * it.
1066 *
1067 * Return:
1068 * 0 on success
1069 * -1 fail
1070 * %-EMSGSIZE message too big. return mtu in this case.
1071 **/
1072
ip6_tnl_xmit(struct sk_buff * skb,struct net_device * dev,__u8 dsfield,struct flowi6 * fl6,int encap_limit,__u32 * pmtu,__u8 proto)1073 int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
1074 struct flowi6 *fl6, int encap_limit, __u32 *pmtu,
1075 __u8 proto)
1076 {
1077 struct ip6_tnl *t = netdev_priv(dev);
1078 struct net *net = t->net;
1079 struct net_device_stats *stats = &t->dev->stats;
1080 struct ipv6hdr *ipv6h;
1081 struct ipv6_tel_txoption opt;
1082 struct dst_entry *dst = NULL, *ndst = NULL;
1083 struct net_device *tdev;
1084 int mtu;
1085 unsigned int eth_hlen = t->dev->type == ARPHRD_ETHER ? ETH_HLEN : 0;
1086 unsigned int psh_hlen = sizeof(struct ipv6hdr) + t->encap_hlen;
1087 unsigned int max_headroom = psh_hlen;
1088 bool use_cache = false;
1089 u8 hop_limit;
1090 int err = -1;
1091
1092 if (t->parms.collect_md) {
1093 hop_limit = skb_tunnel_info(skb)->key.ttl;
1094 goto route_lookup;
1095 } else {
1096 hop_limit = t->parms.hop_limit;
1097 }
1098
1099 /* NBMA tunnel */
1100 if (ipv6_addr_any(&t->parms.raddr)) {
1101 if (skb->protocol == htons(ETH_P_IPV6)) {
1102 struct in6_addr *addr6;
1103 struct neighbour *neigh;
1104 int addr_type;
1105
1106 if (!skb_dst(skb))
1107 goto tx_err_link_failure;
1108
1109 neigh = dst_neigh_lookup(skb_dst(skb),
1110 &ipv6_hdr(skb)->daddr);
1111 if (!neigh)
1112 goto tx_err_link_failure;
1113
1114 addr6 = (struct in6_addr *)&neigh->primary_key;
1115 addr_type = ipv6_addr_type(addr6);
1116
1117 if (addr_type == IPV6_ADDR_ANY)
1118 addr6 = &ipv6_hdr(skb)->daddr;
1119
1120 memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr));
1121 neigh_release(neigh);
1122 } else if (skb->protocol == htons(ETH_P_IP)) {
1123 const struct rtable *rt = skb_rtable(skb);
1124
1125 if (!rt)
1126 goto tx_err_link_failure;
1127
1128 if (rt->rt_gw_family == AF_INET6)
1129 memcpy(&fl6->daddr, &rt->rt_gw6, sizeof(fl6->daddr));
1130 }
1131 } else if (t->parms.proto != 0 && !(t->parms.flags &
1132 (IP6_TNL_F_USE_ORIG_TCLASS |
1133 IP6_TNL_F_USE_ORIG_FWMARK))) {
1134 /* enable the cache only if neither the outer protocol nor the
1135 * routing decision depends on the current inner header value
1136 */
1137 use_cache = true;
1138 }
1139
1140 if (use_cache)
1141 dst = dst_cache_get(&t->dst_cache);
1142
1143 if (!ip6_tnl_xmit_ctl(t, &fl6->saddr, &fl6->daddr))
1144 goto tx_err_link_failure;
1145
1146 if (!dst) {
1147 route_lookup:
1148 /* add dsfield to flowlabel for route lookup */
1149 fl6->flowlabel = ip6_make_flowinfo(dsfield, fl6->flowlabel);
1150
1151 dst = ip6_route_output(net, NULL, fl6);
1152
1153 if (dst->error)
1154 goto tx_err_link_failure;
1155 dst = xfrm_lookup(net, dst, flowi6_to_flowi(fl6), NULL, 0);
1156 if (IS_ERR(dst)) {
1157 err = PTR_ERR(dst);
1158 dst = NULL;
1159 goto tx_err_link_failure;
1160 }
1161 if (t->parms.collect_md && ipv6_addr_any(&fl6->saddr) &&
1162 ipv6_dev_get_saddr(net, ip6_dst_idev(dst)->dev,
1163 &fl6->daddr, 0, &fl6->saddr))
1164 goto tx_err_link_failure;
1165 ndst = dst;
1166 }
1167
1168 tdev = dst->dev;
1169
1170 if (tdev == dev) {
1171 stats->collisions++;
1172 net_warn_ratelimited("%s: Local routing loop detected!\n",
1173 t->parms.name);
1174 goto tx_err_dst_release;
1175 }
1176 mtu = dst_mtu(dst) - eth_hlen - psh_hlen - t->tun_hlen;
1177 if (encap_limit >= 0) {
1178 max_headroom += 8;
1179 mtu -= 8;
1180 }
1181 mtu = max(mtu, skb->protocol == htons(ETH_P_IPV6) ?
1182 IPV6_MIN_MTU : IPV4_MIN_MTU);
1183
1184 skb_dst_update_pmtu_no_confirm(skb, mtu);
1185 if (skb->len - t->tun_hlen - eth_hlen > mtu && !skb_is_gso(skb)) {
1186 *pmtu = mtu;
1187 err = -EMSGSIZE;
1188 goto tx_err_dst_release;
1189 }
1190
1191 if (t->err_count > 0) {
1192 if (time_before(jiffies,
1193 t->err_time + IP6TUNNEL_ERR_TIMEO)) {
1194 t->err_count--;
1195
1196 dst_link_failure(skb);
1197 } else {
1198 t->err_count = 0;
1199 }
1200 }
1201
1202 skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev)));
1203
1204 /*
1205 * Okay, now see if we can stuff it in the buffer as-is.
1206 */
1207 max_headroom += LL_RESERVED_SPACE(tdev);
1208
1209 if (skb_headroom(skb) < max_headroom || skb_shared(skb) ||
1210 (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
1211 struct sk_buff *new_skb;
1212
1213 new_skb = skb_realloc_headroom(skb, max_headroom);
1214 if (!new_skb)
1215 goto tx_err_dst_release;
1216
1217 if (skb->sk)
1218 skb_set_owner_w(new_skb, skb->sk);
1219 consume_skb(skb);
1220 skb = new_skb;
1221 }
1222
1223 if (t->parms.collect_md) {
1224 if (t->encap.type != TUNNEL_ENCAP_NONE)
1225 goto tx_err_dst_release;
1226 } else {
1227 if (use_cache && ndst)
1228 dst_cache_set_ip6(&t->dst_cache, ndst, &fl6->saddr);
1229 }
1230 skb_dst_set(skb, dst);
1231
1232 if (hop_limit == 0) {
1233 if (skb->protocol == htons(ETH_P_IP))
1234 hop_limit = ip_hdr(skb)->ttl;
1235 else if (skb->protocol == htons(ETH_P_IPV6))
1236 hop_limit = ipv6_hdr(skb)->hop_limit;
1237 else
1238 hop_limit = ip6_dst_hoplimit(dst);
1239 }
1240
1241 /* Calculate max headroom for all the headers and adjust
1242 * needed_headroom if necessary.
1243 */
1244 max_headroom = LL_RESERVED_SPACE(dst->dev) + sizeof(struct ipv6hdr)
1245 + dst->header_len + t->hlen;
1246 if (max_headroom > dev->needed_headroom)
1247 dev->needed_headroom = max_headroom;
1248
1249 err = ip6_tnl_encap(skb, t, &proto, fl6);
1250 if (err)
1251 return err;
1252
1253 if (encap_limit >= 0) {
1254 init_tel_txopt(&opt, encap_limit);
1255 ipv6_push_frag_opts(skb, &opt.ops, &proto);
1256 }
1257
1258 skb_push(skb, sizeof(struct ipv6hdr));
1259 skb_reset_network_header(skb);
1260 ipv6h = ipv6_hdr(skb);
1261 ip6_flow_hdr(ipv6h, dsfield,
1262 ip6_make_flowlabel(net, skb, fl6->flowlabel, true, fl6));
1263 ipv6h->hop_limit = hop_limit;
1264 ipv6h->nexthdr = proto;
1265 ipv6h->saddr = fl6->saddr;
1266 ipv6h->daddr = fl6->daddr;
1267 ip6tunnel_xmit(NULL, skb, dev);
1268 return 0;
1269 tx_err_link_failure:
1270 stats->tx_carrier_errors++;
1271 dst_link_failure(skb);
1272 tx_err_dst_release:
1273 dst_release(dst);
1274 return err;
1275 }
1276 EXPORT_SYMBOL(ip6_tnl_xmit);
1277
1278 static inline int
ipxip6_tnl_xmit(struct sk_buff * skb,struct net_device * dev,u8 protocol)1279 ipxip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev,
1280 u8 protocol)
1281 {
1282 struct ip6_tnl *t = netdev_priv(dev);
1283 struct ipv6hdr *ipv6h;
1284 const struct iphdr *iph;
1285 int encap_limit = -1;
1286 __u16 offset;
1287 struct flowi6 fl6;
1288 __u8 dsfield, orig_dsfield;
1289 __u32 mtu;
1290 u8 tproto;
1291 int err;
1292
1293 tproto = READ_ONCE(t->parms.proto);
1294 if (tproto != protocol && tproto != 0)
1295 return -1;
1296
1297 if (t->parms.collect_md) {
1298 struct ip_tunnel_info *tun_info;
1299 const struct ip_tunnel_key *key;
1300
1301 tun_info = skb_tunnel_info(skb);
1302 if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
1303 ip_tunnel_info_af(tun_info) != AF_INET6))
1304 return -1;
1305 key = &tun_info->key;
1306 memset(&fl6, 0, sizeof(fl6));
1307 fl6.flowi6_proto = protocol;
1308 fl6.saddr = key->u.ipv6.src;
1309 fl6.daddr = key->u.ipv6.dst;
1310 fl6.flowlabel = key->label;
1311 dsfield = key->tos;
1312 switch (protocol) {
1313 case IPPROTO_IPIP:
1314 iph = ip_hdr(skb);
1315 orig_dsfield = ipv4_get_dsfield(iph);
1316 break;
1317 case IPPROTO_IPV6:
1318 ipv6h = ipv6_hdr(skb);
1319 orig_dsfield = ipv6_get_dsfield(ipv6h);
1320 break;
1321 default:
1322 orig_dsfield = dsfield;
1323 break;
1324 }
1325 } else {
1326 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1327 encap_limit = t->parms.encap_limit;
1328 if (protocol == IPPROTO_IPV6) {
1329 offset = ip6_tnl_parse_tlv_enc_lim(skb,
1330 skb_network_header(skb));
1331 /* ip6_tnl_parse_tlv_enc_lim() might have
1332 * reallocated skb->head
1333 */
1334 if (offset > 0) {
1335 struct ipv6_tlv_tnl_enc_lim *tel;
1336
1337 tel = (void *)&skb_network_header(skb)[offset];
1338 if (tel->encap_limit == 0) {
1339 icmpv6_ndo_send(skb, ICMPV6_PARAMPROB,
1340 ICMPV6_HDR_FIELD, offset + 2);
1341 return -1;
1342 }
1343 encap_limit = tel->encap_limit - 1;
1344 }
1345 }
1346
1347 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
1348 fl6.flowi6_proto = protocol;
1349
1350 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
1351 fl6.flowi6_mark = skb->mark;
1352 else
1353 fl6.flowi6_mark = t->parms.fwmark;
1354 switch (protocol) {
1355 case IPPROTO_IPIP:
1356 iph = ip_hdr(skb);
1357 orig_dsfield = ipv4_get_dsfield(iph);
1358 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
1359 dsfield = orig_dsfield;
1360 else
1361 dsfield = ip6_tclass(t->parms.flowinfo);
1362 break;
1363 case IPPROTO_IPV6:
1364 ipv6h = ipv6_hdr(skb);
1365 orig_dsfield = ipv6_get_dsfield(ipv6h);
1366 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
1367 dsfield = orig_dsfield;
1368 else
1369 dsfield = ip6_tclass(t->parms.flowinfo);
1370 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)
1371 fl6.flowlabel |= ip6_flowlabel(ipv6h);
1372 break;
1373 default:
1374 orig_dsfield = dsfield = ip6_tclass(t->parms.flowinfo);
1375 break;
1376 }
1377 }
1378
1379 fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
1380 dsfield = INET_ECN_encapsulate(dsfield, orig_dsfield);
1381
1382 if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
1383 return -1;
1384
1385 skb_set_inner_ipproto(skb, protocol);
1386
1387 err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
1388 protocol);
1389 if (err != 0) {
1390 /* XXX: send ICMP error even if DF is not set. */
1391 if (err == -EMSGSIZE)
1392 switch (protocol) {
1393 case IPPROTO_IPIP:
1394 icmp_ndo_send(skb, ICMP_DEST_UNREACH,
1395 ICMP_FRAG_NEEDED, htonl(mtu));
1396 break;
1397 case IPPROTO_IPV6:
1398 icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
1399 break;
1400 default:
1401 break;
1402 }
1403 return -1;
1404 }
1405
1406 return 0;
1407 }
1408
1409 static netdev_tx_t
ip6_tnl_start_xmit(struct sk_buff * skb,struct net_device * dev)1410 ip6_tnl_start_xmit(struct sk_buff *skb, struct net_device *dev)
1411 {
1412 struct ip6_tnl *t = netdev_priv(dev);
1413 struct net_device_stats *stats = &t->dev->stats;
1414 u8 ipproto;
1415 int ret;
1416
1417 if (!pskb_inet_may_pull(skb))
1418 goto tx_err;
1419
1420 switch (skb->protocol) {
1421 case htons(ETH_P_IP):
1422 ipproto = IPPROTO_IPIP;
1423 break;
1424 case htons(ETH_P_IPV6):
1425 if (ip6_tnl_addr_conflict(t, ipv6_hdr(skb)))
1426 goto tx_err;
1427 ipproto = IPPROTO_IPV6;
1428 break;
1429 case htons(ETH_P_MPLS_UC):
1430 ipproto = IPPROTO_MPLS;
1431 break;
1432 default:
1433 goto tx_err;
1434 }
1435
1436 ret = ipxip6_tnl_xmit(skb, dev, ipproto);
1437 if (ret < 0)
1438 goto tx_err;
1439
1440 return NETDEV_TX_OK;
1441
1442 tx_err:
1443 stats->tx_errors++;
1444 stats->tx_dropped++;
1445 kfree_skb(skb);
1446 return NETDEV_TX_OK;
1447 }
1448
ip6_tnl_link_config(struct ip6_tnl * t)1449 static void ip6_tnl_link_config(struct ip6_tnl *t)
1450 {
1451 struct net_device *dev = t->dev;
1452 struct net_device *tdev = NULL;
1453 struct __ip6_tnl_parm *p = &t->parms;
1454 struct flowi6 *fl6 = &t->fl.u.ip6;
1455 unsigned int mtu;
1456 int t_hlen;
1457
1458 __dev_addr_set(dev, &p->laddr, sizeof(struct in6_addr));
1459 memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr));
1460
1461 /* Set up flowi template */
1462 fl6->saddr = p->laddr;
1463 fl6->daddr = p->raddr;
1464 fl6->flowi6_oif = p->link;
1465 fl6->flowlabel = 0;
1466
1467 if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS))
1468 fl6->flowlabel |= IPV6_TCLASS_MASK & p->flowinfo;
1469 if (!(p->flags&IP6_TNL_F_USE_ORIG_FLOWLABEL))
1470 fl6->flowlabel |= IPV6_FLOWLABEL_MASK & p->flowinfo;
1471
1472 p->flags &= ~(IP6_TNL_F_CAP_XMIT|IP6_TNL_F_CAP_RCV|IP6_TNL_F_CAP_PER_PACKET);
1473 p->flags |= ip6_tnl_get_cap(t, &p->laddr, &p->raddr);
1474
1475 if (p->flags&IP6_TNL_F_CAP_XMIT && p->flags&IP6_TNL_F_CAP_RCV)
1476 dev->flags |= IFF_POINTOPOINT;
1477 else
1478 dev->flags &= ~IFF_POINTOPOINT;
1479
1480 t->tun_hlen = 0;
1481 t->hlen = t->encap_hlen + t->tun_hlen;
1482 t_hlen = t->hlen + sizeof(struct ipv6hdr);
1483
1484 if (p->flags & IP6_TNL_F_CAP_XMIT) {
1485 int strict = (ipv6_addr_type(&p->raddr) &
1486 (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL));
1487
1488 struct rt6_info *rt = rt6_lookup(t->net,
1489 &p->raddr, &p->laddr,
1490 p->link, NULL, strict);
1491 if (rt) {
1492 tdev = rt->dst.dev;
1493 ip6_rt_put(rt);
1494 }
1495
1496 if (!tdev && p->link)
1497 tdev = __dev_get_by_index(t->net, p->link);
1498
1499 if (tdev) {
1500 dev->hard_header_len = tdev->hard_header_len + t_hlen;
1501 mtu = min_t(unsigned int, tdev->mtu, IP6_MAX_MTU);
1502
1503 dev->mtu = mtu - t_hlen;
1504 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1505 dev->mtu -= 8;
1506
1507 if (dev->mtu < IPV6_MIN_MTU)
1508 dev->mtu = IPV6_MIN_MTU;
1509 }
1510 }
1511 }
1512
1513 /**
1514 * ip6_tnl_change - update the tunnel parameters
1515 * @t: tunnel to be changed
1516 * @p: tunnel configuration parameters
1517 *
1518 * Description:
1519 * ip6_tnl_change() updates the tunnel parameters
1520 **/
1521
1522 static int
ip6_tnl_change(struct ip6_tnl * t,const struct __ip6_tnl_parm * p)1523 ip6_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p)
1524 {
1525 t->parms.laddr = p->laddr;
1526 t->parms.raddr = p->raddr;
1527 t->parms.flags = p->flags;
1528 t->parms.hop_limit = p->hop_limit;
1529 t->parms.encap_limit = p->encap_limit;
1530 t->parms.flowinfo = p->flowinfo;
1531 t->parms.link = p->link;
1532 t->parms.proto = p->proto;
1533 t->parms.fwmark = p->fwmark;
1534 dst_cache_reset(&t->dst_cache);
1535 ip6_tnl_link_config(t);
1536 return 0;
1537 }
1538
ip6_tnl_update(struct ip6_tnl * t,struct __ip6_tnl_parm * p)1539 static int ip6_tnl_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p)
1540 {
1541 struct net *net = t->net;
1542 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1543 int err;
1544
1545 ip6_tnl_unlink(ip6n, t);
1546 synchronize_net();
1547 err = ip6_tnl_change(t, p);
1548 ip6_tnl_link(ip6n, t);
1549 netdev_state_change(t->dev);
1550 return err;
1551 }
1552
ip6_tnl0_update(struct ip6_tnl * t,struct __ip6_tnl_parm * p)1553 static int ip6_tnl0_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p)
1554 {
1555 /* for default tnl0 device allow to change only the proto */
1556 t->parms.proto = p->proto;
1557 netdev_state_change(t->dev);
1558 return 0;
1559 }
1560
1561 static void
ip6_tnl_parm_from_user(struct __ip6_tnl_parm * p,const struct ip6_tnl_parm * u)1562 ip6_tnl_parm_from_user(struct __ip6_tnl_parm *p, const struct ip6_tnl_parm *u)
1563 {
1564 p->laddr = u->laddr;
1565 p->raddr = u->raddr;
1566 p->flags = u->flags;
1567 p->hop_limit = u->hop_limit;
1568 p->encap_limit = u->encap_limit;
1569 p->flowinfo = u->flowinfo;
1570 p->link = u->link;
1571 p->proto = u->proto;
1572 memcpy(p->name, u->name, sizeof(u->name));
1573 }
1574
1575 static void
ip6_tnl_parm_to_user(struct ip6_tnl_parm * u,const struct __ip6_tnl_parm * p)1576 ip6_tnl_parm_to_user(struct ip6_tnl_parm *u, const struct __ip6_tnl_parm *p)
1577 {
1578 u->laddr = p->laddr;
1579 u->raddr = p->raddr;
1580 u->flags = p->flags;
1581 u->hop_limit = p->hop_limit;
1582 u->encap_limit = p->encap_limit;
1583 u->flowinfo = p->flowinfo;
1584 u->link = p->link;
1585 u->proto = p->proto;
1586 memcpy(u->name, p->name, sizeof(u->name));
1587 }
1588
1589 /**
1590 * ip6_tnl_siocdevprivate - configure ipv6 tunnels from userspace
1591 * @dev: virtual device associated with tunnel
1592 * @ifr: unused
1593 * @data: parameters passed from userspace
1594 * @cmd: command to be performed
1595 *
1596 * Description:
1597 * ip6_tnl_ioctl() is used for managing IPv6 tunnels
1598 * from userspace.
1599 *
1600 * The possible commands are the following:
1601 * %SIOCGETTUNNEL: get tunnel parameters for device
1602 * %SIOCADDTUNNEL: add tunnel matching given tunnel parameters
1603 * %SIOCCHGTUNNEL: change tunnel parameters to those given
1604 * %SIOCDELTUNNEL: delete tunnel
1605 *
1606 * The fallback device "ip6tnl0", created during module
1607 * initialization, can be used for creating other tunnel devices.
1608 *
1609 * Return:
1610 * 0 on success,
1611 * %-EFAULT if unable to copy data to or from userspace,
1612 * %-EPERM if current process hasn't %CAP_NET_ADMIN set
1613 * %-EINVAL if passed tunnel parameters are invalid,
1614 * %-EEXIST if changing a tunnel's parameters would cause a conflict
1615 * %-ENODEV if attempting to change or delete a nonexisting device
1616 **/
1617
1618 static int
ip6_tnl_siocdevprivate(struct net_device * dev,struct ifreq * ifr,void __user * data,int cmd)1619 ip6_tnl_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
1620 void __user *data, int cmd)
1621 {
1622 int err = 0;
1623 struct ip6_tnl_parm p;
1624 struct __ip6_tnl_parm p1;
1625 struct ip6_tnl *t = netdev_priv(dev);
1626 struct net *net = t->net;
1627 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1628
1629 memset(&p1, 0, sizeof(p1));
1630
1631 switch (cmd) {
1632 case SIOCGETTUNNEL:
1633 if (dev == ip6n->fb_tnl_dev) {
1634 if (copy_from_user(&p, data, sizeof(p))) {
1635 err = -EFAULT;
1636 break;
1637 }
1638 ip6_tnl_parm_from_user(&p1, &p);
1639 t = ip6_tnl_locate(net, &p1, 0);
1640 if (IS_ERR(t))
1641 t = netdev_priv(dev);
1642 } else {
1643 memset(&p, 0, sizeof(p));
1644 }
1645 ip6_tnl_parm_to_user(&p, &t->parms);
1646 if (copy_to_user(data, &p, sizeof(p)))
1647 err = -EFAULT;
1648 break;
1649 case SIOCADDTUNNEL:
1650 case SIOCCHGTUNNEL:
1651 err = -EPERM;
1652 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1653 break;
1654 err = -EFAULT;
1655 if (copy_from_user(&p, data, sizeof(p)))
1656 break;
1657 err = -EINVAL;
1658 if (p.proto != IPPROTO_IPV6 && p.proto != IPPROTO_IPIP &&
1659 p.proto != 0)
1660 break;
1661 ip6_tnl_parm_from_user(&p1, &p);
1662 t = ip6_tnl_locate(net, &p1, cmd == SIOCADDTUNNEL);
1663 if (cmd == SIOCCHGTUNNEL) {
1664 if (!IS_ERR(t)) {
1665 if (t->dev != dev) {
1666 err = -EEXIST;
1667 break;
1668 }
1669 } else
1670 t = netdev_priv(dev);
1671 if (dev == ip6n->fb_tnl_dev)
1672 err = ip6_tnl0_update(t, &p1);
1673 else
1674 err = ip6_tnl_update(t, &p1);
1675 }
1676 if (!IS_ERR(t)) {
1677 err = 0;
1678 ip6_tnl_parm_to_user(&p, &t->parms);
1679 if (copy_to_user(data, &p, sizeof(p)))
1680 err = -EFAULT;
1681
1682 } else {
1683 err = PTR_ERR(t);
1684 }
1685 break;
1686 case SIOCDELTUNNEL:
1687 err = -EPERM;
1688 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1689 break;
1690
1691 if (dev == ip6n->fb_tnl_dev) {
1692 err = -EFAULT;
1693 if (copy_from_user(&p, data, sizeof(p)))
1694 break;
1695 err = -ENOENT;
1696 ip6_tnl_parm_from_user(&p1, &p);
1697 t = ip6_tnl_locate(net, &p1, 0);
1698 if (IS_ERR(t))
1699 break;
1700 err = -EPERM;
1701 if (t->dev == ip6n->fb_tnl_dev)
1702 break;
1703 dev = t->dev;
1704 }
1705 err = 0;
1706 unregister_netdevice(dev);
1707 break;
1708 default:
1709 err = -EINVAL;
1710 }
1711 return err;
1712 }
1713
1714 /**
1715 * ip6_tnl_change_mtu - change mtu manually for tunnel device
1716 * @dev: virtual device associated with tunnel
1717 * @new_mtu: the new mtu
1718 *
1719 * Return:
1720 * 0 on success,
1721 * %-EINVAL if mtu too small
1722 **/
1723
ip6_tnl_change_mtu(struct net_device * dev,int new_mtu)1724 int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
1725 {
1726 struct ip6_tnl *tnl = netdev_priv(dev);
1727
1728 if (tnl->parms.proto == IPPROTO_IPV6) {
1729 if (new_mtu < IPV6_MIN_MTU)
1730 return -EINVAL;
1731 } else {
1732 if (new_mtu < ETH_MIN_MTU)
1733 return -EINVAL;
1734 }
1735 if (tnl->parms.proto == IPPROTO_IPV6 || tnl->parms.proto == 0) {
1736 if (new_mtu > IP6_MAX_MTU - dev->hard_header_len)
1737 return -EINVAL;
1738 } else {
1739 if (new_mtu > IP_MAX_MTU - dev->hard_header_len)
1740 return -EINVAL;
1741 }
1742 dev->mtu = new_mtu;
1743 return 0;
1744 }
1745 EXPORT_SYMBOL(ip6_tnl_change_mtu);
1746
ip6_tnl_get_iflink(const struct net_device * dev)1747 int ip6_tnl_get_iflink(const struct net_device *dev)
1748 {
1749 struct ip6_tnl *t = netdev_priv(dev);
1750
1751 return t->parms.link;
1752 }
1753 EXPORT_SYMBOL(ip6_tnl_get_iflink);
1754
ip6_tnl_encap_add_ops(const struct ip6_tnl_encap_ops * ops,unsigned int num)1755 int ip6_tnl_encap_add_ops(const struct ip6_tnl_encap_ops *ops,
1756 unsigned int num)
1757 {
1758 if (num >= MAX_IPTUN_ENCAP_OPS)
1759 return -ERANGE;
1760
1761 return !cmpxchg((const struct ip6_tnl_encap_ops **)
1762 &ip6tun_encaps[num],
1763 NULL, ops) ? 0 : -1;
1764 }
1765 EXPORT_SYMBOL(ip6_tnl_encap_add_ops);
1766
ip6_tnl_encap_del_ops(const struct ip6_tnl_encap_ops * ops,unsigned int num)1767 int ip6_tnl_encap_del_ops(const struct ip6_tnl_encap_ops *ops,
1768 unsigned int num)
1769 {
1770 int ret;
1771
1772 if (num >= MAX_IPTUN_ENCAP_OPS)
1773 return -ERANGE;
1774
1775 ret = (cmpxchg((const struct ip6_tnl_encap_ops **)
1776 &ip6tun_encaps[num],
1777 ops, NULL) == ops) ? 0 : -1;
1778
1779 synchronize_net();
1780
1781 return ret;
1782 }
1783 EXPORT_SYMBOL(ip6_tnl_encap_del_ops);
1784
ip6_tnl_encap_setup(struct ip6_tnl * t,struct ip_tunnel_encap * ipencap)1785 int ip6_tnl_encap_setup(struct ip6_tnl *t,
1786 struct ip_tunnel_encap *ipencap)
1787 {
1788 int hlen;
1789
1790 memset(&t->encap, 0, sizeof(t->encap));
1791
1792 hlen = ip6_encap_hlen(ipencap);
1793 if (hlen < 0)
1794 return hlen;
1795
1796 t->encap.type = ipencap->type;
1797 t->encap.sport = ipencap->sport;
1798 t->encap.dport = ipencap->dport;
1799 t->encap.flags = ipencap->flags;
1800
1801 t->encap_hlen = hlen;
1802 t->hlen = t->encap_hlen + t->tun_hlen;
1803
1804 return 0;
1805 }
1806 EXPORT_SYMBOL_GPL(ip6_tnl_encap_setup);
1807
1808 static const struct net_device_ops ip6_tnl_netdev_ops = {
1809 .ndo_init = ip6_tnl_dev_init,
1810 .ndo_uninit = ip6_tnl_dev_uninit,
1811 .ndo_start_xmit = ip6_tnl_start_xmit,
1812 .ndo_siocdevprivate = ip6_tnl_siocdevprivate,
1813 .ndo_change_mtu = ip6_tnl_change_mtu,
1814 .ndo_get_stats64 = dev_get_tstats64,
1815 .ndo_get_iflink = ip6_tnl_get_iflink,
1816 };
1817
1818 #define IPXIPX_FEATURES (NETIF_F_SG | \
1819 NETIF_F_FRAGLIST | \
1820 NETIF_F_HIGHDMA | \
1821 NETIF_F_GSO_SOFTWARE | \
1822 NETIF_F_HW_CSUM)
1823
1824 /**
1825 * ip6_tnl_dev_setup - setup virtual tunnel device
1826 * @dev: virtual device associated with tunnel
1827 *
1828 * Description:
1829 * Initialize function pointers and device parameters
1830 **/
1831
ip6_tnl_dev_setup(struct net_device * dev)1832 static void ip6_tnl_dev_setup(struct net_device *dev)
1833 {
1834 dev->netdev_ops = &ip6_tnl_netdev_ops;
1835 dev->header_ops = &ip_tunnel_header_ops;
1836 dev->needs_free_netdev = true;
1837 dev->priv_destructor = ip6_dev_free;
1838
1839 dev->type = ARPHRD_TUNNEL6;
1840 dev->flags |= IFF_NOARP;
1841 dev->addr_len = sizeof(struct in6_addr);
1842 dev->features |= NETIF_F_LLTX;
1843 netif_keep_dst(dev);
1844
1845 dev->features |= IPXIPX_FEATURES;
1846 dev->hw_features |= IPXIPX_FEATURES;
1847
1848 /* This perm addr will be used as interface identifier by IPv6 */
1849 dev->addr_assign_type = NET_ADDR_RANDOM;
1850 eth_random_addr(dev->perm_addr);
1851 }
1852
1853
1854 /**
1855 * ip6_tnl_dev_init_gen - general initializer for all tunnel devices
1856 * @dev: virtual device associated with tunnel
1857 **/
1858
1859 static inline int
ip6_tnl_dev_init_gen(struct net_device * dev)1860 ip6_tnl_dev_init_gen(struct net_device *dev)
1861 {
1862 struct ip6_tnl *t = netdev_priv(dev);
1863 int ret;
1864 int t_hlen;
1865
1866 t->dev = dev;
1867 t->net = dev_net(dev);
1868 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
1869 if (!dev->tstats)
1870 return -ENOMEM;
1871
1872 ret = dst_cache_init(&t->dst_cache, GFP_KERNEL);
1873 if (ret)
1874 goto free_stats;
1875
1876 ret = gro_cells_init(&t->gro_cells, dev);
1877 if (ret)
1878 goto destroy_dst;
1879
1880 t->tun_hlen = 0;
1881 t->hlen = t->encap_hlen + t->tun_hlen;
1882 t_hlen = t->hlen + sizeof(struct ipv6hdr);
1883
1884 dev->type = ARPHRD_TUNNEL6;
1885 dev->hard_header_len = LL_MAX_HEADER + t_hlen;
1886 dev->mtu = ETH_DATA_LEN - t_hlen;
1887 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1888 dev->mtu -= 8;
1889 dev->min_mtu = ETH_MIN_MTU;
1890 dev->max_mtu = IP6_MAX_MTU - dev->hard_header_len;
1891
1892 dev_hold_track(dev, &t->dev_tracker, GFP_KERNEL);
1893 return 0;
1894
1895 destroy_dst:
1896 dst_cache_destroy(&t->dst_cache);
1897 free_stats:
1898 free_percpu(dev->tstats);
1899 dev->tstats = NULL;
1900
1901 return ret;
1902 }
1903
1904 /**
1905 * ip6_tnl_dev_init - initializer for all non fallback tunnel devices
1906 * @dev: virtual device associated with tunnel
1907 **/
1908
ip6_tnl_dev_init(struct net_device * dev)1909 static int ip6_tnl_dev_init(struct net_device *dev)
1910 {
1911 struct ip6_tnl *t = netdev_priv(dev);
1912 int err = ip6_tnl_dev_init_gen(dev);
1913
1914 if (err)
1915 return err;
1916 ip6_tnl_link_config(t);
1917 if (t->parms.collect_md)
1918 netif_keep_dst(dev);
1919 return 0;
1920 }
1921
1922 /**
1923 * ip6_fb_tnl_dev_init - initializer for fallback tunnel device
1924 * @dev: fallback device
1925 *
1926 * Return: 0
1927 **/
1928
ip6_fb_tnl_dev_init(struct net_device * dev)1929 static int __net_init ip6_fb_tnl_dev_init(struct net_device *dev)
1930 {
1931 struct ip6_tnl *t = netdev_priv(dev);
1932 struct net *net = dev_net(dev);
1933 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1934
1935 t->parms.proto = IPPROTO_IPV6;
1936
1937 rcu_assign_pointer(ip6n->tnls_wc[0], t);
1938 return 0;
1939 }
1940
ip6_tnl_validate(struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)1941 static int ip6_tnl_validate(struct nlattr *tb[], struct nlattr *data[],
1942 struct netlink_ext_ack *extack)
1943 {
1944 u8 proto;
1945
1946 if (!data || !data[IFLA_IPTUN_PROTO])
1947 return 0;
1948
1949 proto = nla_get_u8(data[IFLA_IPTUN_PROTO]);
1950 if (proto != IPPROTO_IPV6 &&
1951 proto != IPPROTO_IPIP &&
1952 proto != 0)
1953 return -EINVAL;
1954
1955 return 0;
1956 }
1957
ip6_tnl_netlink_parms(struct nlattr * data[],struct __ip6_tnl_parm * parms)1958 static void ip6_tnl_netlink_parms(struct nlattr *data[],
1959 struct __ip6_tnl_parm *parms)
1960 {
1961 memset(parms, 0, sizeof(*parms));
1962
1963 if (!data)
1964 return;
1965
1966 if (data[IFLA_IPTUN_LINK])
1967 parms->link = nla_get_u32(data[IFLA_IPTUN_LINK]);
1968
1969 if (data[IFLA_IPTUN_LOCAL])
1970 parms->laddr = nla_get_in6_addr(data[IFLA_IPTUN_LOCAL]);
1971
1972 if (data[IFLA_IPTUN_REMOTE])
1973 parms->raddr = nla_get_in6_addr(data[IFLA_IPTUN_REMOTE]);
1974
1975 if (data[IFLA_IPTUN_TTL])
1976 parms->hop_limit = nla_get_u8(data[IFLA_IPTUN_TTL]);
1977
1978 if (data[IFLA_IPTUN_ENCAP_LIMIT])
1979 parms->encap_limit = nla_get_u8(data[IFLA_IPTUN_ENCAP_LIMIT]);
1980
1981 if (data[IFLA_IPTUN_FLOWINFO])
1982 parms->flowinfo = nla_get_be32(data[IFLA_IPTUN_FLOWINFO]);
1983
1984 if (data[IFLA_IPTUN_FLAGS])
1985 parms->flags = nla_get_u32(data[IFLA_IPTUN_FLAGS]);
1986
1987 if (data[IFLA_IPTUN_PROTO])
1988 parms->proto = nla_get_u8(data[IFLA_IPTUN_PROTO]);
1989
1990 if (data[IFLA_IPTUN_COLLECT_METADATA])
1991 parms->collect_md = true;
1992
1993 if (data[IFLA_IPTUN_FWMARK])
1994 parms->fwmark = nla_get_u32(data[IFLA_IPTUN_FWMARK]);
1995 }
1996
ip6_tnl_netlink_encap_parms(struct nlattr * data[],struct ip_tunnel_encap * ipencap)1997 static bool ip6_tnl_netlink_encap_parms(struct nlattr *data[],
1998 struct ip_tunnel_encap *ipencap)
1999 {
2000 bool ret = false;
2001
2002 memset(ipencap, 0, sizeof(*ipencap));
2003
2004 if (!data)
2005 return ret;
2006
2007 if (data[IFLA_IPTUN_ENCAP_TYPE]) {
2008 ret = true;
2009 ipencap->type = nla_get_u16(data[IFLA_IPTUN_ENCAP_TYPE]);
2010 }
2011
2012 if (data[IFLA_IPTUN_ENCAP_FLAGS]) {
2013 ret = true;
2014 ipencap->flags = nla_get_u16(data[IFLA_IPTUN_ENCAP_FLAGS]);
2015 }
2016
2017 if (data[IFLA_IPTUN_ENCAP_SPORT]) {
2018 ret = true;
2019 ipencap->sport = nla_get_be16(data[IFLA_IPTUN_ENCAP_SPORT]);
2020 }
2021
2022 if (data[IFLA_IPTUN_ENCAP_DPORT]) {
2023 ret = true;
2024 ipencap->dport = nla_get_be16(data[IFLA_IPTUN_ENCAP_DPORT]);
2025 }
2026
2027 return ret;
2028 }
2029
ip6_tnl_newlink(struct net * src_net,struct net_device * dev,struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)2030 static int ip6_tnl_newlink(struct net *src_net, struct net_device *dev,
2031 struct nlattr *tb[], struct nlattr *data[],
2032 struct netlink_ext_ack *extack)
2033 {
2034 struct net *net = dev_net(dev);
2035 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
2036 struct ip_tunnel_encap ipencap;
2037 struct ip6_tnl *nt, *t;
2038 int err;
2039
2040 nt = netdev_priv(dev);
2041
2042 if (ip6_tnl_netlink_encap_parms(data, &ipencap)) {
2043 err = ip6_tnl_encap_setup(nt, &ipencap);
2044 if (err < 0)
2045 return err;
2046 }
2047
2048 ip6_tnl_netlink_parms(data, &nt->parms);
2049
2050 if (nt->parms.collect_md) {
2051 if (rtnl_dereference(ip6n->collect_md_tun))
2052 return -EEXIST;
2053 } else {
2054 t = ip6_tnl_locate(net, &nt->parms, 0);
2055 if (!IS_ERR(t))
2056 return -EEXIST;
2057 }
2058
2059 err = ip6_tnl_create2(dev);
2060 if (!err && tb[IFLA_MTU])
2061 ip6_tnl_change_mtu(dev, nla_get_u32(tb[IFLA_MTU]));
2062
2063 return err;
2064 }
2065
ip6_tnl_changelink(struct net_device * dev,struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)2066 static int ip6_tnl_changelink(struct net_device *dev, struct nlattr *tb[],
2067 struct nlattr *data[],
2068 struct netlink_ext_ack *extack)
2069 {
2070 struct ip6_tnl *t = netdev_priv(dev);
2071 struct __ip6_tnl_parm p;
2072 struct net *net = t->net;
2073 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
2074 struct ip_tunnel_encap ipencap;
2075
2076 if (dev == ip6n->fb_tnl_dev)
2077 return -EINVAL;
2078
2079 if (ip6_tnl_netlink_encap_parms(data, &ipencap)) {
2080 int err = ip6_tnl_encap_setup(t, &ipencap);
2081
2082 if (err < 0)
2083 return err;
2084 }
2085 ip6_tnl_netlink_parms(data, &p);
2086 if (p.collect_md)
2087 return -EINVAL;
2088
2089 t = ip6_tnl_locate(net, &p, 0);
2090 if (!IS_ERR(t)) {
2091 if (t->dev != dev)
2092 return -EEXIST;
2093 } else
2094 t = netdev_priv(dev);
2095
2096 return ip6_tnl_update(t, &p);
2097 }
2098
ip6_tnl_dellink(struct net_device * dev,struct list_head * head)2099 static void ip6_tnl_dellink(struct net_device *dev, struct list_head *head)
2100 {
2101 struct net *net = dev_net(dev);
2102 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
2103
2104 if (dev != ip6n->fb_tnl_dev)
2105 unregister_netdevice_queue(dev, head);
2106 }
2107
ip6_tnl_get_size(const struct net_device * dev)2108 static size_t ip6_tnl_get_size(const struct net_device *dev)
2109 {
2110 return
2111 /* IFLA_IPTUN_LINK */
2112 nla_total_size(4) +
2113 /* IFLA_IPTUN_LOCAL */
2114 nla_total_size(sizeof(struct in6_addr)) +
2115 /* IFLA_IPTUN_REMOTE */
2116 nla_total_size(sizeof(struct in6_addr)) +
2117 /* IFLA_IPTUN_TTL */
2118 nla_total_size(1) +
2119 /* IFLA_IPTUN_ENCAP_LIMIT */
2120 nla_total_size(1) +
2121 /* IFLA_IPTUN_FLOWINFO */
2122 nla_total_size(4) +
2123 /* IFLA_IPTUN_FLAGS */
2124 nla_total_size(4) +
2125 /* IFLA_IPTUN_PROTO */
2126 nla_total_size(1) +
2127 /* IFLA_IPTUN_ENCAP_TYPE */
2128 nla_total_size(2) +
2129 /* IFLA_IPTUN_ENCAP_FLAGS */
2130 nla_total_size(2) +
2131 /* IFLA_IPTUN_ENCAP_SPORT */
2132 nla_total_size(2) +
2133 /* IFLA_IPTUN_ENCAP_DPORT */
2134 nla_total_size(2) +
2135 /* IFLA_IPTUN_COLLECT_METADATA */
2136 nla_total_size(0) +
2137 /* IFLA_IPTUN_FWMARK */
2138 nla_total_size(4) +
2139 0;
2140 }
2141
ip6_tnl_fill_info(struct sk_buff * skb,const struct net_device * dev)2142 static int ip6_tnl_fill_info(struct sk_buff *skb, const struct net_device *dev)
2143 {
2144 struct ip6_tnl *tunnel = netdev_priv(dev);
2145 struct __ip6_tnl_parm *parm = &tunnel->parms;
2146
2147 if (nla_put_u32(skb, IFLA_IPTUN_LINK, parm->link) ||
2148 nla_put_in6_addr(skb, IFLA_IPTUN_LOCAL, &parm->laddr) ||
2149 nla_put_in6_addr(skb, IFLA_IPTUN_REMOTE, &parm->raddr) ||
2150 nla_put_u8(skb, IFLA_IPTUN_TTL, parm->hop_limit) ||
2151 nla_put_u8(skb, IFLA_IPTUN_ENCAP_LIMIT, parm->encap_limit) ||
2152 nla_put_be32(skb, IFLA_IPTUN_FLOWINFO, parm->flowinfo) ||
2153 nla_put_u32(skb, IFLA_IPTUN_FLAGS, parm->flags) ||
2154 nla_put_u8(skb, IFLA_IPTUN_PROTO, parm->proto) ||
2155 nla_put_u32(skb, IFLA_IPTUN_FWMARK, parm->fwmark))
2156 goto nla_put_failure;
2157
2158 if (nla_put_u16(skb, IFLA_IPTUN_ENCAP_TYPE, tunnel->encap.type) ||
2159 nla_put_be16(skb, IFLA_IPTUN_ENCAP_SPORT, tunnel->encap.sport) ||
2160 nla_put_be16(skb, IFLA_IPTUN_ENCAP_DPORT, tunnel->encap.dport) ||
2161 nla_put_u16(skb, IFLA_IPTUN_ENCAP_FLAGS, tunnel->encap.flags))
2162 goto nla_put_failure;
2163
2164 if (parm->collect_md)
2165 if (nla_put_flag(skb, IFLA_IPTUN_COLLECT_METADATA))
2166 goto nla_put_failure;
2167
2168 return 0;
2169
2170 nla_put_failure:
2171 return -EMSGSIZE;
2172 }
2173
ip6_tnl_get_link_net(const struct net_device * dev)2174 struct net *ip6_tnl_get_link_net(const struct net_device *dev)
2175 {
2176 struct ip6_tnl *tunnel = netdev_priv(dev);
2177
2178 return tunnel->net;
2179 }
2180 EXPORT_SYMBOL(ip6_tnl_get_link_net);
2181
2182 static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = {
2183 [IFLA_IPTUN_LINK] = { .type = NLA_U32 },
2184 [IFLA_IPTUN_LOCAL] = { .len = sizeof(struct in6_addr) },
2185 [IFLA_IPTUN_REMOTE] = { .len = sizeof(struct in6_addr) },
2186 [IFLA_IPTUN_TTL] = { .type = NLA_U8 },
2187 [IFLA_IPTUN_ENCAP_LIMIT] = { .type = NLA_U8 },
2188 [IFLA_IPTUN_FLOWINFO] = { .type = NLA_U32 },
2189 [IFLA_IPTUN_FLAGS] = { .type = NLA_U32 },
2190 [IFLA_IPTUN_PROTO] = { .type = NLA_U8 },
2191 [IFLA_IPTUN_ENCAP_TYPE] = { .type = NLA_U16 },
2192 [IFLA_IPTUN_ENCAP_FLAGS] = { .type = NLA_U16 },
2193 [IFLA_IPTUN_ENCAP_SPORT] = { .type = NLA_U16 },
2194 [IFLA_IPTUN_ENCAP_DPORT] = { .type = NLA_U16 },
2195 [IFLA_IPTUN_COLLECT_METADATA] = { .type = NLA_FLAG },
2196 [IFLA_IPTUN_FWMARK] = { .type = NLA_U32 },
2197 };
2198
2199 static struct rtnl_link_ops ip6_link_ops __read_mostly = {
2200 .kind = "ip6tnl",
2201 .maxtype = IFLA_IPTUN_MAX,
2202 .policy = ip6_tnl_policy,
2203 .priv_size = sizeof(struct ip6_tnl),
2204 .setup = ip6_tnl_dev_setup,
2205 .validate = ip6_tnl_validate,
2206 .newlink = ip6_tnl_newlink,
2207 .changelink = ip6_tnl_changelink,
2208 .dellink = ip6_tnl_dellink,
2209 .get_size = ip6_tnl_get_size,
2210 .fill_info = ip6_tnl_fill_info,
2211 .get_link_net = ip6_tnl_get_link_net,
2212 };
2213
2214 static struct xfrm6_tunnel ip4ip6_handler __read_mostly = {
2215 .handler = ip4ip6_rcv,
2216 .err_handler = ip4ip6_err,
2217 .priority = 1,
2218 };
2219
2220 static struct xfrm6_tunnel ip6ip6_handler __read_mostly = {
2221 .handler = ip6ip6_rcv,
2222 .err_handler = ip6ip6_err,
2223 .priority = 1,
2224 };
2225
2226 static struct xfrm6_tunnel mplsip6_handler __read_mostly = {
2227 .handler = mplsip6_rcv,
2228 .err_handler = mplsip6_err,
2229 .priority = 1,
2230 };
2231
ip6_tnl_destroy_tunnels(struct net * net,struct list_head * list)2232 static void __net_exit ip6_tnl_destroy_tunnels(struct net *net, struct list_head *list)
2233 {
2234 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
2235 struct net_device *dev, *aux;
2236 int h;
2237 struct ip6_tnl *t;
2238
2239 for_each_netdev_safe(net, dev, aux)
2240 if (dev->rtnl_link_ops == &ip6_link_ops)
2241 unregister_netdevice_queue(dev, list);
2242
2243 for (h = 0; h < IP6_TUNNEL_HASH_SIZE; h++) {
2244 t = rtnl_dereference(ip6n->tnls_r_l[h]);
2245 while (t) {
2246 /* If dev is in the same netns, it has already
2247 * been added to the list by the previous loop.
2248 */
2249 if (!net_eq(dev_net(t->dev), net))
2250 unregister_netdevice_queue(t->dev, list);
2251 t = rtnl_dereference(t->next);
2252 }
2253 }
2254
2255 t = rtnl_dereference(ip6n->tnls_wc[0]);
2256 while (t) {
2257 /* If dev is in the same netns, it has already
2258 * been added to the list by the previous loop.
2259 */
2260 if (!net_eq(dev_net(t->dev), net))
2261 unregister_netdevice_queue(t->dev, list);
2262 t = rtnl_dereference(t->next);
2263 }
2264 }
2265
ip6_tnl_init_net(struct net * net)2266 static int __net_init ip6_tnl_init_net(struct net *net)
2267 {
2268 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
2269 struct ip6_tnl *t = NULL;
2270 int err;
2271
2272 ip6n->tnls[0] = ip6n->tnls_wc;
2273 ip6n->tnls[1] = ip6n->tnls_r_l;
2274
2275 if (!net_has_fallback_tunnels(net))
2276 return 0;
2277 err = -ENOMEM;
2278 ip6n->fb_tnl_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6tnl0",
2279 NET_NAME_UNKNOWN, ip6_tnl_dev_setup);
2280
2281 if (!ip6n->fb_tnl_dev)
2282 goto err_alloc_dev;
2283 dev_net_set(ip6n->fb_tnl_dev, net);
2284 ip6n->fb_tnl_dev->rtnl_link_ops = &ip6_link_ops;
2285 /* FB netdevice is special: we have one, and only one per netns.
2286 * Allowing to move it to another netns is clearly unsafe.
2287 */
2288 ip6n->fb_tnl_dev->features |= NETIF_F_NETNS_LOCAL;
2289
2290 err = ip6_fb_tnl_dev_init(ip6n->fb_tnl_dev);
2291 if (err < 0)
2292 goto err_register;
2293
2294 err = register_netdev(ip6n->fb_tnl_dev);
2295 if (err < 0)
2296 goto err_register;
2297
2298 t = netdev_priv(ip6n->fb_tnl_dev);
2299
2300 strcpy(t->parms.name, ip6n->fb_tnl_dev->name);
2301 return 0;
2302
2303 err_register:
2304 free_netdev(ip6n->fb_tnl_dev);
2305 err_alloc_dev:
2306 return err;
2307 }
2308
ip6_tnl_exit_batch_net(struct list_head * net_list)2309 static void __net_exit ip6_tnl_exit_batch_net(struct list_head *net_list)
2310 {
2311 struct net *net;
2312 LIST_HEAD(list);
2313
2314 rtnl_lock();
2315 list_for_each_entry(net, net_list, exit_list)
2316 ip6_tnl_destroy_tunnels(net, &list);
2317 unregister_netdevice_many(&list);
2318 rtnl_unlock();
2319 }
2320
2321 static struct pernet_operations ip6_tnl_net_ops = {
2322 .init = ip6_tnl_init_net,
2323 .exit_batch = ip6_tnl_exit_batch_net,
2324 .id = &ip6_tnl_net_id,
2325 .size = sizeof(struct ip6_tnl_net),
2326 };
2327
2328 /**
2329 * ip6_tunnel_init - register protocol and reserve needed resources
2330 *
2331 * Return: 0 on success
2332 **/
2333
ip6_tunnel_init(void)2334 static int __init ip6_tunnel_init(void)
2335 {
2336 int err;
2337
2338 if (!ipv6_mod_enabled())
2339 return -EOPNOTSUPP;
2340
2341 err = register_pernet_device(&ip6_tnl_net_ops);
2342 if (err < 0)
2343 goto out_pernet;
2344
2345 err = xfrm6_tunnel_register(&ip4ip6_handler, AF_INET);
2346 if (err < 0) {
2347 pr_err("%s: can't register ip4ip6\n", __func__);
2348 goto out_ip4ip6;
2349 }
2350
2351 err = xfrm6_tunnel_register(&ip6ip6_handler, AF_INET6);
2352 if (err < 0) {
2353 pr_err("%s: can't register ip6ip6\n", __func__);
2354 goto out_ip6ip6;
2355 }
2356
2357 if (ip6_tnl_mpls_supported()) {
2358 err = xfrm6_tunnel_register(&mplsip6_handler, AF_MPLS);
2359 if (err < 0) {
2360 pr_err("%s: can't register mplsip6\n", __func__);
2361 goto out_mplsip6;
2362 }
2363 }
2364
2365 err = rtnl_link_register(&ip6_link_ops);
2366 if (err < 0)
2367 goto rtnl_link_failed;
2368
2369 return 0;
2370
2371 rtnl_link_failed:
2372 if (ip6_tnl_mpls_supported())
2373 xfrm6_tunnel_deregister(&mplsip6_handler, AF_MPLS);
2374 out_mplsip6:
2375 xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6);
2376 out_ip6ip6:
2377 xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET);
2378 out_ip4ip6:
2379 unregister_pernet_device(&ip6_tnl_net_ops);
2380 out_pernet:
2381 return err;
2382 }
2383
2384 /**
2385 * ip6_tunnel_cleanup - free resources and unregister protocol
2386 **/
2387
ip6_tunnel_cleanup(void)2388 static void __exit ip6_tunnel_cleanup(void)
2389 {
2390 rtnl_link_unregister(&ip6_link_ops);
2391 if (xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET))
2392 pr_info("%s: can't deregister ip4ip6\n", __func__);
2393
2394 if (xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6))
2395 pr_info("%s: can't deregister ip6ip6\n", __func__);
2396
2397 if (ip6_tnl_mpls_supported() &&
2398 xfrm6_tunnel_deregister(&mplsip6_handler, AF_MPLS))
2399 pr_info("%s: can't deregister mplsip6\n", __func__);
2400 unregister_pernet_device(&ip6_tnl_net_ops);
2401 }
2402
2403 module_init(ip6_tunnel_init);
2404 module_exit(ip6_tunnel_cleanup);
2405