1 /*
2  *	Internet Control Message Protocol (ICMPv6)
3  *	Linux INET6 implementation
4  *
5  *	Authors:
6  *	Pedro Roque		<roque@di.fc.ul.pt>
7  *
8  *	Based on net/ipv4/icmp.c
9  *
10  *	RFC 1885
11  *
12  *	This program is free software; you can redistribute it and/or
13  *      modify it under the terms of the GNU General Public License
14  *      as published by the Free Software Foundation; either version
15  *      2 of the License, or (at your option) any later version.
16  */
17 
18 /*
19  *	Changes:
20  *
21  *	Andi Kleen		:	exception handling
22  *	Andi Kleen			add rate limits. never reply to a icmp.
23  *					add more length checks and other fixes.
24  *	yoshfuji		:	ensure to sent parameter problem for
25  *					fragments.
26  *	YOSHIFUJI Hideaki @USAGI:	added sysctl for icmp rate limit.
27  *	Randy Dunlap and
28  *	YOSHIFUJI Hideaki @USAGI:	Per-interface statistics support
29  *	Kazunori MIYAZAWA @USAGI:       change output process to use ip6_append_data
30  */
31 
32 #include <linux/module.h>
33 #include <linux/errno.h>
34 #include <linux/types.h>
35 #include <linux/socket.h>
36 #include <linux/in.h>
37 #include <linux/kernel.h>
38 #include <linux/sockios.h>
39 #include <linux/net.h>
40 #include <linux/skbuff.h>
41 #include <linux/init.h>
42 #include <linux/netfilter.h>
43 #include <linux/slab.h>
44 
45 #ifdef CONFIG_SYSCTL
46 #include <linux/sysctl.h>
47 #endif
48 
49 #include <linux/inet.h>
50 #include <linux/netdevice.h>
51 #include <linux/icmpv6.h>
52 
53 #include <net/ip.h>
54 #include <net/sock.h>
55 
56 #include <net/ipv6.h>
57 #include <net/ip6_checksum.h>
58 #include <net/protocol.h>
59 #include <net/raw.h>
60 #include <net/rawv6.h>
61 #include <net/transp_v6.h>
62 #include <net/ip6_route.h>
63 #include <net/addrconf.h>
64 #include <net/icmp.h>
65 #include <net/xfrm.h>
66 #include <net/inet_common.h>
67 
68 #include <asm/uaccess.h>
69 #include <asm/system.h>
70 
71 /*
72  *	The ICMP socket(s). This is the most convenient way to flow control
73  *	our ICMP output as well as maintain a clean interface throughout
74  *	all layers. All Socketless IP sends will soon be gone.
75  *
76  *	On SMP we have one ICMP socket per-cpu.
77  */
icmpv6_sk(struct net * net)78 static inline struct sock *icmpv6_sk(struct net *net)
79 {
80 	return net->ipv6.icmp_sk[smp_processor_id()];
81 }
82 
83 static int icmpv6_rcv(struct sk_buff *skb);
84 
85 static const struct inet6_protocol icmpv6_protocol = {
86 	.handler	=	icmpv6_rcv,
87 	.flags		=	INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
88 };
89 
icmpv6_xmit_lock(struct net * net)90 static __inline__ struct sock *icmpv6_xmit_lock(struct net *net)
91 {
92 	struct sock *sk;
93 
94 	local_bh_disable();
95 
96 	sk = icmpv6_sk(net);
97 	if (unlikely(!spin_trylock(&sk->sk_lock.slock))) {
98 		/* This can happen if the output path (f.e. SIT or
99 		 * ip6ip6 tunnel) signals dst_link_failure() for an
100 		 * outgoing ICMP6 packet.
101 		 */
102 		local_bh_enable();
103 		return NULL;
104 	}
105 	return sk;
106 }
107 
icmpv6_xmit_unlock(struct sock * sk)108 static __inline__ void icmpv6_xmit_unlock(struct sock *sk)
109 {
110 	spin_unlock_bh(&sk->sk_lock.slock);
111 }
112 
113 /*
114  * Slightly more convenient version of icmpv6_send.
115  */
icmpv6_param_prob(struct sk_buff * skb,u8 code,int pos)116 void icmpv6_param_prob(struct sk_buff *skb, u8 code, int pos)
117 {
118 	icmpv6_send(skb, ICMPV6_PARAMPROB, code, pos);
119 	kfree_skb(skb);
120 }
121 
122 /*
123  * Figure out, may we reply to this packet with icmp error.
124  *
125  * We do not reply, if:
126  *	- it was icmp error message.
127  *	- it is truncated, so that it is known, that protocol is ICMPV6
128  *	  (i.e. in the middle of some exthdr)
129  *
130  *	--ANK (980726)
131  */
132 
is_ineligible(struct sk_buff * skb)133 static int is_ineligible(struct sk_buff *skb)
134 {
135 	int ptr = (u8 *)(ipv6_hdr(skb) + 1) - skb->data;
136 	int len = skb->len - ptr;
137 	__u8 nexthdr = ipv6_hdr(skb)->nexthdr;
138 
139 	if (len < 0)
140 		return 1;
141 
142 	ptr = ipv6_skip_exthdr(skb, ptr, &nexthdr);
143 	if (ptr < 0)
144 		return 0;
145 	if (nexthdr == IPPROTO_ICMPV6) {
146 		u8 _type, *tp;
147 		tp = skb_header_pointer(skb,
148 			ptr+offsetof(struct icmp6hdr, icmp6_type),
149 			sizeof(_type), &_type);
150 		if (tp == NULL ||
151 		    !(*tp & ICMPV6_INFOMSG_MASK))
152 			return 1;
153 	}
154 	return 0;
155 }
156 
157 /*
158  * Check the ICMP output rate limit
159  */
icmpv6_xrlim_allow(struct sock * sk,u8 type,struct flowi6 * fl6)160 static inline bool icmpv6_xrlim_allow(struct sock *sk, u8 type,
161 				      struct flowi6 *fl6)
162 {
163 	struct dst_entry *dst;
164 	struct net *net = sock_net(sk);
165 	bool res = false;
166 
167 	/* Informational messages are not limited. */
168 	if (type & ICMPV6_INFOMSG_MASK)
169 		return true;
170 
171 	/* Do not limit pmtu discovery, it would break it. */
172 	if (type == ICMPV6_PKT_TOOBIG)
173 		return true;
174 
175 	/*
176 	 * Look up the output route.
177 	 * XXX: perhaps the expire for routing entries cloned by
178 	 * this lookup should be more aggressive (not longer than timeout).
179 	 */
180 	dst = ip6_route_output(net, sk, fl6);
181 	if (dst->error) {
182 		IP6_INC_STATS(net, ip6_dst_idev(dst),
183 			      IPSTATS_MIB_OUTNOROUTES);
184 	} else if (dst->dev && (dst->dev->flags&IFF_LOOPBACK)) {
185 		res = true;
186 	} else {
187 		struct rt6_info *rt = (struct rt6_info *)dst;
188 		int tmo = net->ipv6.sysctl.icmpv6_time;
189 
190 		/* Give more bandwidth to wider prefixes. */
191 		if (rt->rt6i_dst.plen < 128)
192 			tmo >>= ((128 - rt->rt6i_dst.plen)>>5);
193 
194 		if (!rt->rt6i_peer)
195 			rt6_bind_peer(rt, 1);
196 		res = inet_peer_xrlim_allow(rt->rt6i_peer, tmo);
197 	}
198 	dst_release(dst);
199 	return res;
200 }
201 
202 /*
203  *	an inline helper for the "simple" if statement below
204  *	checks if parameter problem report is caused by an
205  *	unrecognized IPv6 option that has the Option Type
206  *	highest-order two bits set to 10
207  */
208 
opt_unrec(struct sk_buff * skb,__u32 offset)209 static __inline__ int opt_unrec(struct sk_buff *skb, __u32 offset)
210 {
211 	u8 _optval, *op;
212 
213 	offset += skb_network_offset(skb);
214 	op = skb_header_pointer(skb, offset, sizeof(_optval), &_optval);
215 	if (op == NULL)
216 		return 1;
217 	return (*op & 0xC0) == 0x80;
218 }
219 
icmpv6_push_pending_frames(struct sock * sk,struct flowi6 * fl6,struct icmp6hdr * thdr,int len)220 static int icmpv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6, struct icmp6hdr *thdr, int len)
221 {
222 	struct sk_buff *skb;
223 	struct icmp6hdr *icmp6h;
224 	int err = 0;
225 
226 	if ((skb = skb_peek(&sk->sk_write_queue)) == NULL)
227 		goto out;
228 
229 	icmp6h = icmp6_hdr(skb);
230 	memcpy(icmp6h, thdr, sizeof(struct icmp6hdr));
231 	icmp6h->icmp6_cksum = 0;
232 
233 	if (skb_queue_len(&sk->sk_write_queue) == 1) {
234 		skb->csum = csum_partial(icmp6h,
235 					sizeof(struct icmp6hdr), skb->csum);
236 		icmp6h->icmp6_cksum = csum_ipv6_magic(&fl6->saddr,
237 						      &fl6->daddr,
238 						      len, fl6->flowi6_proto,
239 						      skb->csum);
240 	} else {
241 		__wsum tmp_csum = 0;
242 
243 		skb_queue_walk(&sk->sk_write_queue, skb) {
244 			tmp_csum = csum_add(tmp_csum, skb->csum);
245 		}
246 
247 		tmp_csum = csum_partial(icmp6h,
248 					sizeof(struct icmp6hdr), tmp_csum);
249 		icmp6h->icmp6_cksum = csum_ipv6_magic(&fl6->saddr,
250 						      &fl6->daddr,
251 						      len, fl6->flowi6_proto,
252 						      tmp_csum);
253 	}
254 	ip6_push_pending_frames(sk);
255 out:
256 	return err;
257 }
258 
259 struct icmpv6_msg {
260 	struct sk_buff	*skb;
261 	int		offset;
262 	uint8_t		type;
263 };
264 
icmpv6_getfrag(void * from,char * to,int offset,int len,int odd,struct sk_buff * skb)265 static int icmpv6_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
266 {
267 	struct icmpv6_msg *msg = (struct icmpv6_msg *) from;
268 	struct sk_buff *org_skb = msg->skb;
269 	__wsum csum = 0;
270 
271 	csum = skb_copy_and_csum_bits(org_skb, msg->offset + offset,
272 				      to, len, csum);
273 	skb->csum = csum_block_add(skb->csum, csum, odd);
274 	if (!(msg->type & ICMPV6_INFOMSG_MASK))
275 		nf_ct_attach(skb, org_skb);
276 	return 0;
277 }
278 
279 #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
mip6_addr_swap(struct sk_buff * skb)280 static void mip6_addr_swap(struct sk_buff *skb)
281 {
282 	struct ipv6hdr *iph = ipv6_hdr(skb);
283 	struct inet6_skb_parm *opt = IP6CB(skb);
284 	struct ipv6_destopt_hao *hao;
285 	struct in6_addr tmp;
286 	int off;
287 
288 	if (opt->dsthao) {
289 		off = ipv6_find_tlv(skb, opt->dsthao, IPV6_TLV_HAO);
290 		if (likely(off >= 0)) {
291 			hao = (struct ipv6_destopt_hao *)
292 					(skb_network_header(skb) + off);
293 			ipv6_addr_copy(&tmp, &iph->saddr);
294 			ipv6_addr_copy(&iph->saddr, &hao->addr);
295 			ipv6_addr_copy(&hao->addr, &tmp);
296 		}
297 	}
298 }
299 #else
mip6_addr_swap(struct sk_buff * skb)300 static inline void mip6_addr_swap(struct sk_buff *skb) {}
301 #endif
302 
icmpv6_route_lookup(struct net * net,struct sk_buff * skb,struct sock * sk,struct flowi6 * fl6)303 static struct dst_entry *icmpv6_route_lookup(struct net *net, struct sk_buff *skb,
304 					     struct sock *sk, struct flowi6 *fl6)
305 {
306 	struct dst_entry *dst, *dst2;
307 	struct flowi6 fl2;
308 	int err;
309 
310 	err = ip6_dst_lookup(sk, &dst, fl6);
311 	if (err)
312 		return ERR_PTR(err);
313 
314 	/*
315 	 * We won't send icmp if the destination is known
316 	 * anycast.
317 	 */
318 	if (((struct rt6_info *)dst)->rt6i_flags & RTF_ANYCAST) {
319 		LIMIT_NETDEBUG(KERN_DEBUG "icmpv6_send: acast source\n");
320 		dst_release(dst);
321 		return ERR_PTR(-EINVAL);
322 	}
323 
324 	/* No need to clone since we're just using its address. */
325 	dst2 = dst;
326 
327 	dst = xfrm_lookup(net, dst, flowi6_to_flowi(fl6), sk, 0);
328 	if (!IS_ERR(dst)) {
329 		if (dst != dst2)
330 			return dst;
331 	} else {
332 		if (PTR_ERR(dst) == -EPERM)
333 			dst = NULL;
334 		else
335 			return dst;
336 	}
337 
338 	err = xfrm_decode_session_reverse(skb, flowi6_to_flowi(&fl2), AF_INET6);
339 	if (err)
340 		goto relookup_failed;
341 
342 	err = ip6_dst_lookup(sk, &dst2, &fl2);
343 	if (err)
344 		goto relookup_failed;
345 
346 	dst2 = xfrm_lookup(net, dst2, flowi6_to_flowi(&fl2), sk, XFRM_LOOKUP_ICMP);
347 	if (!IS_ERR(dst2)) {
348 		dst_release(dst);
349 		dst = dst2;
350 	} else {
351 		err = PTR_ERR(dst2);
352 		if (err == -EPERM) {
353 			dst_release(dst);
354 			return dst2;
355 		} else
356 			goto relookup_failed;
357 	}
358 
359 relookup_failed:
360 	if (dst)
361 		return dst;
362 	return ERR_PTR(err);
363 }
364 
365 /*
366  *	Send an ICMP message in response to a packet in error
367  */
icmpv6_send(struct sk_buff * skb,u8 type,u8 code,__u32 info)368 void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
369 {
370 	struct net *net = dev_net(skb->dev);
371 	struct inet6_dev *idev = NULL;
372 	struct ipv6hdr *hdr = ipv6_hdr(skb);
373 	struct sock *sk;
374 	struct ipv6_pinfo *np;
375 	struct in6_addr *saddr = NULL;
376 	struct dst_entry *dst;
377 	struct icmp6hdr tmp_hdr;
378 	struct flowi6 fl6;
379 	struct icmpv6_msg msg;
380 	int iif = 0;
381 	int addr_type = 0;
382 	int len;
383 	int hlimit;
384 	int err = 0;
385 
386 	if ((u8 *)hdr < skb->head ||
387 	    (skb->network_header + sizeof(*hdr)) > skb->tail)
388 		return;
389 
390 	/*
391 	 *	Make sure we respect the rules
392 	 *	i.e. RFC 1885 2.4(e)
393 	 *	Rule (e.1) is enforced by not using icmpv6_send
394 	 *	in any code that processes icmp errors.
395 	 */
396 	addr_type = ipv6_addr_type(&hdr->daddr);
397 
398 	if (ipv6_chk_addr(net, &hdr->daddr, skb->dev, 0))
399 		saddr = &hdr->daddr;
400 
401 	/*
402 	 *	Dest addr check
403 	 */
404 
405 	if ((addr_type & IPV6_ADDR_MULTICAST || skb->pkt_type != PACKET_HOST)) {
406 		if (type != ICMPV6_PKT_TOOBIG &&
407 		    !(type == ICMPV6_PARAMPROB &&
408 		      code == ICMPV6_UNK_OPTION &&
409 		      (opt_unrec(skb, info))))
410 			return;
411 
412 		saddr = NULL;
413 	}
414 
415 	addr_type = ipv6_addr_type(&hdr->saddr);
416 
417 	/*
418 	 *	Source addr check
419 	 */
420 
421 	if (addr_type & IPV6_ADDR_LINKLOCAL)
422 		iif = skb->dev->ifindex;
423 
424 	/*
425 	 *	Must not send error if the source does not uniquely
426 	 *	identify a single node (RFC2463 Section 2.4).
427 	 *	We check unspecified / multicast addresses here,
428 	 *	and anycast addresses will be checked later.
429 	 */
430 	if ((addr_type == IPV6_ADDR_ANY) || (addr_type & IPV6_ADDR_MULTICAST)) {
431 		LIMIT_NETDEBUG(KERN_DEBUG "icmpv6_send: addr_any/mcast source\n");
432 		return;
433 	}
434 
435 	/*
436 	 *	Never answer to a ICMP packet.
437 	 */
438 	if (is_ineligible(skb)) {
439 		LIMIT_NETDEBUG(KERN_DEBUG "icmpv6_send: no reply to icmp error\n");
440 		return;
441 	}
442 
443 	mip6_addr_swap(skb);
444 
445 	memset(&fl6, 0, sizeof(fl6));
446 	fl6.flowi6_proto = IPPROTO_ICMPV6;
447 	ipv6_addr_copy(&fl6.daddr, &hdr->saddr);
448 	if (saddr)
449 		ipv6_addr_copy(&fl6.saddr, saddr);
450 	fl6.flowi6_oif = iif;
451 	fl6.fl6_icmp_type = type;
452 	fl6.fl6_icmp_code = code;
453 	security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
454 
455 	sk = icmpv6_xmit_lock(net);
456 	if (sk == NULL)
457 		return;
458 	np = inet6_sk(sk);
459 
460 	if (!icmpv6_xrlim_allow(sk, type, &fl6))
461 		goto out;
462 
463 	tmp_hdr.icmp6_type = type;
464 	tmp_hdr.icmp6_code = code;
465 	tmp_hdr.icmp6_cksum = 0;
466 	tmp_hdr.icmp6_pointer = htonl(info);
467 
468 	if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
469 		fl6.flowi6_oif = np->mcast_oif;
470 
471 	dst = icmpv6_route_lookup(net, skb, sk, &fl6);
472 	if (IS_ERR(dst))
473 		goto out;
474 
475 	if (ipv6_addr_is_multicast(&fl6.daddr))
476 		hlimit = np->mcast_hops;
477 	else
478 		hlimit = np->hop_limit;
479 	if (hlimit < 0)
480 		hlimit = ip6_dst_hoplimit(dst);
481 
482 	msg.skb = skb;
483 	msg.offset = skb_network_offset(skb);
484 	msg.type = type;
485 
486 	len = skb->len - msg.offset;
487 	len = min_t(unsigned int, len, IPV6_MIN_MTU - sizeof(struct ipv6hdr) -sizeof(struct icmp6hdr));
488 	if (len < 0) {
489 		LIMIT_NETDEBUG(KERN_DEBUG "icmp: len problem\n");
490 		goto out_dst_release;
491 	}
492 
493 	idev = in6_dev_get(skb->dev);
494 
495 	err = ip6_append_data(sk, icmpv6_getfrag, &msg,
496 			      len + sizeof(struct icmp6hdr),
497 			      sizeof(struct icmp6hdr), hlimit,
498 			      np->tclass, NULL, &fl6, (struct rt6_info*)dst,
499 			      MSG_DONTWAIT, np->dontfrag);
500 	if (err) {
501 		ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS);
502 		ip6_flush_pending_frames(sk);
503 		goto out_put;
504 	}
505 	err = icmpv6_push_pending_frames(sk, &fl6, &tmp_hdr, len + sizeof(struct icmp6hdr));
506 
507 out_put:
508 	if (likely(idev != NULL))
509 		in6_dev_put(idev);
510 out_dst_release:
511 	dst_release(dst);
512 out:
513 	icmpv6_xmit_unlock(sk);
514 }
515 
516 EXPORT_SYMBOL(icmpv6_send);
517 
icmpv6_echo_reply(struct sk_buff * skb)518 static void icmpv6_echo_reply(struct sk_buff *skb)
519 {
520 	struct net *net = dev_net(skb->dev);
521 	struct sock *sk;
522 	struct inet6_dev *idev;
523 	struct ipv6_pinfo *np;
524 	struct in6_addr *saddr = NULL;
525 	struct icmp6hdr *icmph = icmp6_hdr(skb);
526 	struct icmp6hdr tmp_hdr;
527 	struct flowi6 fl6;
528 	struct icmpv6_msg msg;
529 	struct dst_entry *dst;
530 	int err = 0;
531 	int hlimit;
532 
533 	saddr = &ipv6_hdr(skb)->daddr;
534 
535 	if (!ipv6_unicast_destination(skb))
536 		saddr = NULL;
537 
538 	memcpy(&tmp_hdr, icmph, sizeof(tmp_hdr));
539 	tmp_hdr.icmp6_type = ICMPV6_ECHO_REPLY;
540 
541 	memset(&fl6, 0, sizeof(fl6));
542 	fl6.flowi6_proto = IPPROTO_ICMPV6;
543 	ipv6_addr_copy(&fl6.daddr, &ipv6_hdr(skb)->saddr);
544 	if (saddr)
545 		ipv6_addr_copy(&fl6.saddr, saddr);
546 	fl6.flowi6_oif = skb->dev->ifindex;
547 	fl6.fl6_icmp_type = ICMPV6_ECHO_REPLY;
548 	security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
549 
550 	sk = icmpv6_xmit_lock(net);
551 	if (sk == NULL)
552 		return;
553 	np = inet6_sk(sk);
554 
555 	if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
556 		fl6.flowi6_oif = np->mcast_oif;
557 
558 	err = ip6_dst_lookup(sk, &dst, &fl6);
559 	if (err)
560 		goto out;
561 	dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), sk, 0);
562 	if (IS_ERR(dst))
563 		goto out;
564 
565 	if (ipv6_addr_is_multicast(&fl6.daddr))
566 		hlimit = np->mcast_hops;
567 	else
568 		hlimit = np->hop_limit;
569 	if (hlimit < 0)
570 		hlimit = ip6_dst_hoplimit(dst);
571 
572 	idev = in6_dev_get(skb->dev);
573 
574 	msg.skb = skb;
575 	msg.offset = 0;
576 	msg.type = ICMPV6_ECHO_REPLY;
577 
578 	err = ip6_append_data(sk, icmpv6_getfrag, &msg, skb->len + sizeof(struct icmp6hdr),
579 				sizeof(struct icmp6hdr), hlimit, np->tclass, NULL, &fl6,
580 				(struct rt6_info*)dst, MSG_DONTWAIT,
581 				np->dontfrag);
582 
583 	if (err) {
584 		ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS);
585 		ip6_flush_pending_frames(sk);
586 		goto out_put;
587 	}
588 	err = icmpv6_push_pending_frames(sk, &fl6, &tmp_hdr, skb->len + sizeof(struct icmp6hdr));
589 
590 out_put:
591 	if (likely(idev != NULL))
592 		in6_dev_put(idev);
593 	dst_release(dst);
594 out:
595 	icmpv6_xmit_unlock(sk);
596 }
597 
icmpv6_notify(struct sk_buff * skb,u8 type,u8 code,__be32 info)598 static void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info)
599 {
600 	const struct inet6_protocol *ipprot;
601 	int inner_offset;
602 	int hash;
603 	u8 nexthdr;
604 
605 	if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
606 		return;
607 
608 	nexthdr = ((struct ipv6hdr *)skb->data)->nexthdr;
609 	if (ipv6_ext_hdr(nexthdr)) {
610 		/* now skip over extension headers */
611 		inner_offset = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr);
612 		if (inner_offset<0)
613 			return;
614 	} else {
615 		inner_offset = sizeof(struct ipv6hdr);
616 	}
617 
618 	/* Checkin header including 8 bytes of inner protocol header. */
619 	if (!pskb_may_pull(skb, inner_offset+8))
620 		return;
621 
622 	/* BUGGG_FUTURE: we should try to parse exthdrs in this packet.
623 	   Without this we will not able f.e. to make source routed
624 	   pmtu discovery.
625 	   Corresponding argument (opt) to notifiers is already added.
626 	   --ANK (980726)
627 	 */
628 
629 	hash = nexthdr & (MAX_INET_PROTOS - 1);
630 
631 	rcu_read_lock();
632 	ipprot = rcu_dereference(inet6_protos[hash]);
633 	if (ipprot && ipprot->err_handler)
634 		ipprot->err_handler(skb, NULL, type, code, inner_offset, info);
635 	rcu_read_unlock();
636 
637 	raw6_icmp_error(skb, nexthdr, type, code, inner_offset, info);
638 }
639 
640 /*
641  *	Handle icmp messages
642  */
643 
icmpv6_rcv(struct sk_buff * skb)644 static int icmpv6_rcv(struct sk_buff *skb)
645 {
646 	struct net_device *dev = skb->dev;
647 	struct inet6_dev *idev = __in6_dev_get(dev);
648 	struct in6_addr *saddr, *daddr;
649 	struct ipv6hdr *orig_hdr;
650 	struct icmp6hdr *hdr;
651 	u8 type;
652 
653 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
654 		struct sec_path *sp = skb_sec_path(skb);
655 		int nh;
656 
657 		if (!(sp && sp->xvec[sp->len - 1]->props.flags &
658 				 XFRM_STATE_ICMP))
659 			goto drop_no_count;
660 
661 		if (!pskb_may_pull(skb, sizeof(*hdr) + sizeof(*orig_hdr)))
662 			goto drop_no_count;
663 
664 		nh = skb_network_offset(skb);
665 		skb_set_network_header(skb, sizeof(*hdr));
666 
667 		if (!xfrm6_policy_check_reverse(NULL, XFRM_POLICY_IN, skb))
668 			goto drop_no_count;
669 
670 		skb_set_network_header(skb, nh);
671 	}
672 
673 	ICMP6_INC_STATS_BH(dev_net(dev), idev, ICMP6_MIB_INMSGS);
674 
675 	saddr = &ipv6_hdr(skb)->saddr;
676 	daddr = &ipv6_hdr(skb)->daddr;
677 
678 	/* Perform checksum. */
679 	switch (skb->ip_summed) {
680 	case CHECKSUM_COMPLETE:
681 		if (!csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_ICMPV6,
682 				     skb->csum))
683 			break;
684 		/* fall through */
685 	case CHECKSUM_NONE:
686 		skb->csum = ~csum_unfold(csum_ipv6_magic(saddr, daddr, skb->len,
687 					     IPPROTO_ICMPV6, 0));
688 		if (__skb_checksum_complete(skb)) {
689 			LIMIT_NETDEBUG(KERN_DEBUG "ICMPv6 checksum failed [%pI6 > %pI6]\n",
690 				       saddr, daddr);
691 			goto discard_it;
692 		}
693 	}
694 
695 	if (!pskb_pull(skb, sizeof(*hdr)))
696 		goto discard_it;
697 
698 	hdr = icmp6_hdr(skb);
699 
700 	type = hdr->icmp6_type;
701 
702 	ICMP6MSGIN_INC_STATS_BH(dev_net(dev), idev, type);
703 
704 	switch (type) {
705 	case ICMPV6_ECHO_REQUEST:
706 		icmpv6_echo_reply(skb);
707 		break;
708 
709 	case ICMPV6_ECHO_REPLY:
710 		/* we couldn't care less */
711 		break;
712 
713 	case ICMPV6_PKT_TOOBIG:
714 		/* BUGGG_FUTURE: if packet contains rthdr, we cannot update
715 		   standard destination cache. Seems, only "advanced"
716 		   destination cache will allow to solve this problem
717 		   --ANK (980726)
718 		 */
719 		if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
720 			goto discard_it;
721 		hdr = icmp6_hdr(skb);
722 		orig_hdr = (struct ipv6hdr *) (hdr + 1);
723 		rt6_pmtu_discovery(&orig_hdr->daddr, &orig_hdr->saddr, dev,
724 				   ntohl(hdr->icmp6_mtu));
725 
726 		/*
727 		 *	Drop through to notify
728 		 */
729 
730 	case ICMPV6_DEST_UNREACH:
731 	case ICMPV6_TIME_EXCEED:
732 	case ICMPV6_PARAMPROB:
733 		icmpv6_notify(skb, type, hdr->icmp6_code, hdr->icmp6_mtu);
734 		break;
735 
736 	case NDISC_ROUTER_SOLICITATION:
737 	case NDISC_ROUTER_ADVERTISEMENT:
738 	case NDISC_NEIGHBOUR_SOLICITATION:
739 	case NDISC_NEIGHBOUR_ADVERTISEMENT:
740 	case NDISC_REDIRECT:
741 		ndisc_rcv(skb);
742 		break;
743 
744 	case ICMPV6_MGM_QUERY:
745 		igmp6_event_query(skb);
746 		break;
747 
748 	case ICMPV6_MGM_REPORT:
749 		igmp6_event_report(skb);
750 		break;
751 
752 	case ICMPV6_MGM_REDUCTION:
753 	case ICMPV6_NI_QUERY:
754 	case ICMPV6_NI_REPLY:
755 	case ICMPV6_MLD2_REPORT:
756 	case ICMPV6_DHAAD_REQUEST:
757 	case ICMPV6_DHAAD_REPLY:
758 	case ICMPV6_MOBILE_PREFIX_SOL:
759 	case ICMPV6_MOBILE_PREFIX_ADV:
760 		break;
761 
762 	default:
763 		LIMIT_NETDEBUG(KERN_DEBUG "icmpv6: msg of unknown type\n");
764 
765 		/* informational */
766 		if (type & ICMPV6_INFOMSG_MASK)
767 			break;
768 
769 		/*
770 		 * error of unknown type.
771 		 * must pass to upper level
772 		 */
773 
774 		icmpv6_notify(skb, type, hdr->icmp6_code, hdr->icmp6_mtu);
775 	}
776 
777 	kfree_skb(skb);
778 	return 0;
779 
780 discard_it:
781 	ICMP6_INC_STATS_BH(dev_net(dev), idev, ICMP6_MIB_INERRORS);
782 drop_no_count:
783 	kfree_skb(skb);
784 	return 0;
785 }
786 
icmpv6_flow_init(struct sock * sk,struct flowi6 * fl6,u8 type,const struct in6_addr * saddr,const struct in6_addr * daddr,int oif)787 void icmpv6_flow_init(struct sock *sk, struct flowi6 *fl6,
788 		      u8 type,
789 		      const struct in6_addr *saddr,
790 		      const struct in6_addr *daddr,
791 		      int oif)
792 {
793 	memset(fl6, 0, sizeof(*fl6));
794 	ipv6_addr_copy(&fl6->saddr, saddr);
795 	ipv6_addr_copy(&fl6->daddr, daddr);
796 	fl6->flowi6_proto 	= IPPROTO_ICMPV6;
797 	fl6->fl6_icmp_type	= type;
798 	fl6->fl6_icmp_code	= 0;
799 	fl6->flowi6_oif		= oif;
800 	security_sk_classify_flow(sk, flowi6_to_flowi(fl6));
801 }
802 
803 /*
804  * Special lock-class for __icmpv6_sk:
805  */
806 static struct lock_class_key icmpv6_socket_sk_dst_lock_key;
807 
icmpv6_sk_init(struct net * net)808 static int __net_init icmpv6_sk_init(struct net *net)
809 {
810 	struct sock *sk;
811 	int err, i, j;
812 
813 	net->ipv6.icmp_sk =
814 		kzalloc(nr_cpu_ids * sizeof(struct sock *), GFP_KERNEL);
815 	if (net->ipv6.icmp_sk == NULL)
816 		return -ENOMEM;
817 
818 	for_each_possible_cpu(i) {
819 		err = inet_ctl_sock_create(&sk, PF_INET6,
820 					   SOCK_RAW, IPPROTO_ICMPV6, net);
821 		if (err < 0) {
822 			printk(KERN_ERR
823 			       "Failed to initialize the ICMP6 control socket "
824 			       "(err %d).\n",
825 			       err);
826 			goto fail;
827 		}
828 
829 		net->ipv6.icmp_sk[i] = sk;
830 
831 		/*
832 		 * Split off their lock-class, because sk->sk_dst_lock
833 		 * gets used from softirqs, which is safe for
834 		 * __icmpv6_sk (because those never get directly used
835 		 * via userspace syscalls), but unsafe for normal sockets.
836 		 */
837 		lockdep_set_class(&sk->sk_dst_lock,
838 				  &icmpv6_socket_sk_dst_lock_key);
839 
840 		/* Enough space for 2 64K ICMP packets, including
841 		 * sk_buff struct overhead.
842 		 */
843 		sk->sk_sndbuf =
844 			(2 * ((64 * 1024) + sizeof(struct sk_buff)));
845 	}
846 	return 0;
847 
848  fail:
849 	for (j = 0; j < i; j++)
850 		inet_ctl_sock_destroy(net->ipv6.icmp_sk[j]);
851 	kfree(net->ipv6.icmp_sk);
852 	return err;
853 }
854 
icmpv6_sk_exit(struct net * net)855 static void __net_exit icmpv6_sk_exit(struct net *net)
856 {
857 	int i;
858 
859 	for_each_possible_cpu(i) {
860 		inet_ctl_sock_destroy(net->ipv6.icmp_sk[i]);
861 	}
862 	kfree(net->ipv6.icmp_sk);
863 }
864 
865 static struct pernet_operations icmpv6_sk_ops = {
866        .init = icmpv6_sk_init,
867        .exit = icmpv6_sk_exit,
868 };
869 
icmpv6_init(void)870 int __init icmpv6_init(void)
871 {
872 	int err;
873 
874 	err = register_pernet_subsys(&icmpv6_sk_ops);
875 	if (err < 0)
876 		return err;
877 
878 	err = -EAGAIN;
879 	if (inet6_add_protocol(&icmpv6_protocol, IPPROTO_ICMPV6) < 0)
880 		goto fail;
881 	return 0;
882 
883 fail:
884 	printk(KERN_ERR "Failed to register ICMP6 protocol\n");
885 	unregister_pernet_subsys(&icmpv6_sk_ops);
886 	return err;
887 }
888 
icmpv6_cleanup(void)889 void icmpv6_cleanup(void)
890 {
891 	unregister_pernet_subsys(&icmpv6_sk_ops);
892 	inet6_del_protocol(&icmpv6_protocol, IPPROTO_ICMPV6);
893 }
894 
895 
896 static const struct icmp6_err {
897 	int err;
898 	int fatal;
899 } tab_unreach[] = {
900 	{	/* NOROUTE */
901 		.err	= ENETUNREACH,
902 		.fatal	= 0,
903 	},
904 	{	/* ADM_PROHIBITED */
905 		.err	= EACCES,
906 		.fatal	= 1,
907 	},
908 	{	/* Was NOT_NEIGHBOUR, now reserved */
909 		.err	= EHOSTUNREACH,
910 		.fatal	= 0,
911 	},
912 	{	/* ADDR_UNREACH	*/
913 		.err	= EHOSTUNREACH,
914 		.fatal	= 0,
915 	},
916 	{	/* PORT_UNREACH	*/
917 		.err	= ECONNREFUSED,
918 		.fatal	= 1,
919 	},
920 };
921 
icmpv6_err_convert(u8 type,u8 code,int * err)922 int icmpv6_err_convert(u8 type, u8 code, int *err)
923 {
924 	int fatal = 0;
925 
926 	*err = EPROTO;
927 
928 	switch (type) {
929 	case ICMPV6_DEST_UNREACH:
930 		fatal = 1;
931 		if (code <= ICMPV6_PORT_UNREACH) {
932 			*err  = tab_unreach[code].err;
933 			fatal = tab_unreach[code].fatal;
934 		}
935 		break;
936 
937 	case ICMPV6_PKT_TOOBIG:
938 		*err = EMSGSIZE;
939 		break;
940 
941 	case ICMPV6_PARAMPROB:
942 		*err = EPROTO;
943 		fatal = 1;
944 		break;
945 
946 	case ICMPV6_TIME_EXCEED:
947 		*err = EHOSTUNREACH;
948 		break;
949 	}
950 
951 	return fatal;
952 }
953 
954 EXPORT_SYMBOL(icmpv6_err_convert);
955 
956 #ifdef CONFIG_SYSCTL
957 ctl_table ipv6_icmp_table_template[] = {
958 	{
959 		.procname	= "ratelimit",
960 		.data		= &init_net.ipv6.sysctl.icmpv6_time,
961 		.maxlen		= sizeof(int),
962 		.mode		= 0644,
963 		.proc_handler	= proc_dointvec_ms_jiffies,
964 	},
965 	{ },
966 };
967 
ipv6_icmp_sysctl_init(struct net * net)968 struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net)
969 {
970 	struct ctl_table *table;
971 
972 	table = kmemdup(ipv6_icmp_table_template,
973 			sizeof(ipv6_icmp_table_template),
974 			GFP_KERNEL);
975 
976 	if (table)
977 		table[0].data = &net->ipv6.sysctl.icmpv6_time;
978 
979 	return table;
980 }
981 #endif
982 
983