1 /*
2  *	TCP over IPv6
3  *	Linux INET6 implementation
4  *
5  *	Authors:
6  *	Pedro Roque		<roque@di.fc.ul.pt>
7  *
8  *	Based on:
9  *	linux/net/ipv4/tcp.c
10  *	linux/net/ipv4/tcp_input.c
11  *	linux/net/ipv4/tcp_output.c
12  *
13  *	Fixes:
14  *	Hideaki YOSHIFUJI	:	sin6_scope_id support
15  *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
16  *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
17  *					a single port at the same time.
18  *	YOSHIFUJI Hideaki @USAGI:	convert /proc/net/tcp6 to seq_file.
19  *
20  *	This program is free software; you can redistribute it and/or
21  *      modify it under the terms of the GNU General Public License
22  *      as published by the Free Software Foundation; either version
23  *      2 of the License, or (at your option) any later version.
24  */
25 
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
34 #include <linux/in.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
42 
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
46 
47 #include <net/tcp.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
51 #include <net/ipv6.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
58 #include <net/xfrm.h>
59 #include <net/snmp.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/netdma.h>
63 #include <net/inet_common.h>
64 #include <net/secure_seq.h>
65 #include <net/tcp_memcontrol.h>
66 
67 #include <asm/uaccess.h>
68 
69 #include <linux/proc_fs.h>
70 #include <linux/seq_file.h>
71 
72 #include <linux/crypto.h>
73 #include <linux/scatterlist.h>
74 
75 static void	tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
76 static void	tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
77 				      struct request_sock *req);
78 
79 static int	tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
80 static void	__tcp_v6_send_check(struct sk_buff *skb,
81 				    const struct in6_addr *saddr,
82 				    const struct in6_addr *daddr);
83 
84 static const struct inet_connection_sock_af_ops ipv6_mapped;
85 static const struct inet_connection_sock_af_ops ipv6_specific;
86 #ifdef CONFIG_TCP_MD5SIG
87 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
88 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
89 #else
tcp_v6_md5_do_lookup(struct sock * sk,const struct in6_addr * addr)90 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
91 						   const struct in6_addr *addr)
92 {
93 	return NULL;
94 }
95 #endif
96 
tcp_v6_hash(struct sock * sk)97 static void tcp_v6_hash(struct sock *sk)
98 {
99 	if (sk->sk_state != TCP_CLOSE) {
100 		if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
101 			tcp_prot.hash(sk);
102 			return;
103 		}
104 		local_bh_disable();
105 		__inet6_hash(sk, NULL);
106 		local_bh_enable();
107 	}
108 }
109 
tcp_v6_check(int len,const struct in6_addr * saddr,const struct in6_addr * daddr,__wsum base)110 static __inline__ __sum16 tcp_v6_check(int len,
111 				   const struct in6_addr *saddr,
112 				   const struct in6_addr *daddr,
113 				   __wsum base)
114 {
115 	return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
116 }
117 
tcp_v6_init_sequence(const struct sk_buff * skb)118 static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
119 {
120 	return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
121 					    ipv6_hdr(skb)->saddr.s6_addr32,
122 					    tcp_hdr(skb)->dest,
123 					    tcp_hdr(skb)->source);
124 }
125 
tcp_v6_connect(struct sock * sk,struct sockaddr * uaddr,int addr_len)126 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
127 			  int addr_len)
128 {
129 	struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
130 	struct inet_sock *inet = inet_sk(sk);
131 	struct inet_connection_sock *icsk = inet_csk(sk);
132 	struct ipv6_pinfo *np = inet6_sk(sk);
133 	struct tcp_sock *tp = tcp_sk(sk);
134 	struct in6_addr *saddr = NULL, *final_p, final;
135 	struct rt6_info *rt;
136 	struct flowi6 fl6;
137 	struct dst_entry *dst;
138 	int addr_type;
139 	int err;
140 
141 	if (addr_len < SIN6_LEN_RFC2133)
142 		return -EINVAL;
143 
144 	if (usin->sin6_family != AF_INET6)
145 		return -EAFNOSUPPORT;
146 
147 	memset(&fl6, 0, sizeof(fl6));
148 
149 	if (np->sndflow) {
150 		fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
151 		IP6_ECN_flow_init(fl6.flowlabel);
152 		if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
153 			struct ip6_flowlabel *flowlabel;
154 			flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
155 			if (flowlabel == NULL)
156 				return -EINVAL;
157 			usin->sin6_addr = flowlabel->dst;
158 			fl6_sock_release(flowlabel);
159 		}
160 	}
161 
162 	/*
163 	 *	connect() to INADDR_ANY means loopback (BSD'ism).
164 	 */
165 
166 	if(ipv6_addr_any(&usin->sin6_addr))
167 		usin->sin6_addr.s6_addr[15] = 0x1;
168 
169 	addr_type = ipv6_addr_type(&usin->sin6_addr);
170 
171 	if(addr_type & IPV6_ADDR_MULTICAST)
172 		return -ENETUNREACH;
173 
174 	if (addr_type&IPV6_ADDR_LINKLOCAL) {
175 		if (addr_len >= sizeof(struct sockaddr_in6) &&
176 		    usin->sin6_scope_id) {
177 			/* If interface is set while binding, indices
178 			 * must coincide.
179 			 */
180 			if (sk->sk_bound_dev_if &&
181 			    sk->sk_bound_dev_if != usin->sin6_scope_id)
182 				return -EINVAL;
183 
184 			sk->sk_bound_dev_if = usin->sin6_scope_id;
185 		}
186 
187 		/* Connect to link-local address requires an interface */
188 		if (!sk->sk_bound_dev_if)
189 			return -EINVAL;
190 	}
191 
192 	if (tp->rx_opt.ts_recent_stamp &&
193 	    !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) {
194 		tp->rx_opt.ts_recent = 0;
195 		tp->rx_opt.ts_recent_stamp = 0;
196 		tp->write_seq = 0;
197 	}
198 
199 	np->daddr = usin->sin6_addr;
200 	np->flow_label = fl6.flowlabel;
201 
202 	/*
203 	 *	TCP over IPv4
204 	 */
205 
206 	if (addr_type == IPV6_ADDR_MAPPED) {
207 		u32 exthdrlen = icsk->icsk_ext_hdr_len;
208 		struct sockaddr_in sin;
209 
210 		SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
211 
212 		if (__ipv6_only_sock(sk))
213 			return -ENETUNREACH;
214 
215 		sin.sin_family = AF_INET;
216 		sin.sin_port = usin->sin6_port;
217 		sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
218 
219 		icsk->icsk_af_ops = &ipv6_mapped;
220 		sk->sk_backlog_rcv = tcp_v4_do_rcv;
221 #ifdef CONFIG_TCP_MD5SIG
222 		tp->af_specific = &tcp_sock_ipv6_mapped_specific;
223 #endif
224 
225 		err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
226 
227 		if (err) {
228 			icsk->icsk_ext_hdr_len = exthdrlen;
229 			icsk->icsk_af_ops = &ipv6_specific;
230 			sk->sk_backlog_rcv = tcp_v6_do_rcv;
231 #ifdef CONFIG_TCP_MD5SIG
232 			tp->af_specific = &tcp_sock_ipv6_specific;
233 #endif
234 			goto failure;
235 		} else {
236 			ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
237 			ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
238 					       &np->rcv_saddr);
239 		}
240 
241 		return err;
242 	}
243 
244 	if (!ipv6_addr_any(&np->rcv_saddr))
245 		saddr = &np->rcv_saddr;
246 
247 	fl6.flowi6_proto = IPPROTO_TCP;
248 	fl6.daddr = np->daddr;
249 	fl6.saddr = saddr ? *saddr : np->saddr;
250 	fl6.flowi6_oif = sk->sk_bound_dev_if;
251 	fl6.flowi6_mark = sk->sk_mark;
252 	fl6.fl6_dport = usin->sin6_port;
253 	fl6.fl6_sport = inet->inet_sport;
254 
255 	final_p = fl6_update_dst(&fl6, np->opt, &final);
256 
257 	security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
258 
259 	dst = ip6_dst_lookup_flow(sk, &fl6, final_p, true);
260 	if (IS_ERR(dst)) {
261 		err = PTR_ERR(dst);
262 		goto failure;
263 	}
264 
265 	if (saddr == NULL) {
266 		saddr = &fl6.saddr;
267 		np->rcv_saddr = *saddr;
268 	}
269 
270 	/* set the source address */
271 	np->saddr = *saddr;
272 	inet->inet_rcv_saddr = LOOPBACK4_IPV6;
273 
274 	sk->sk_gso_type = SKB_GSO_TCPV6;
275 	__ip6_dst_store(sk, dst, NULL, NULL);
276 
277 	rt = (struct rt6_info *) dst;
278 	if (tcp_death_row.sysctl_tw_recycle &&
279 	    !tp->rx_opt.ts_recent_stamp &&
280 	    ipv6_addr_equal(&rt->rt6i_dst.addr, &np->daddr)) {
281 		struct inet_peer *peer = rt6_get_peer(rt);
282 		/*
283 		 * VJ's idea. We save last timestamp seen from
284 		 * the destination in peer table, when entering state
285 		 * TIME-WAIT * and initialize rx_opt.ts_recent from it,
286 		 * when trying new connection.
287 		 */
288 		if (peer) {
289 			inet_peer_refcheck(peer);
290 			if ((u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) {
291 				tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
292 				tp->rx_opt.ts_recent = peer->tcp_ts;
293 			}
294 		}
295 	}
296 
297 	icsk->icsk_ext_hdr_len = 0;
298 	if (np->opt)
299 		icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
300 					  np->opt->opt_nflen);
301 
302 	tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
303 
304 	inet->inet_dport = usin->sin6_port;
305 
306 	tcp_set_state(sk, TCP_SYN_SENT);
307 	err = inet6_hash_connect(&tcp_death_row, sk);
308 	if (err)
309 		goto late_failure;
310 
311 	if (!tp->write_seq)
312 		tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
313 							     np->daddr.s6_addr32,
314 							     inet->inet_sport,
315 							     inet->inet_dport);
316 
317 	err = tcp_connect(sk);
318 	if (err)
319 		goto late_failure;
320 
321 	return 0;
322 
323 late_failure:
324 	tcp_set_state(sk, TCP_CLOSE);
325 	__sk_dst_reset(sk);
326 failure:
327 	inet->inet_dport = 0;
328 	sk->sk_route_caps = 0;
329 	return err;
330 }
331 
tcp_v6_err(struct sk_buff * skb,struct inet6_skb_parm * opt,u8 type,u8 code,int offset,__be32 info)332 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
333 		u8 type, u8 code, int offset, __be32 info)
334 {
335 	const struct ipv6hdr *hdr = (const struct ipv6hdr*)skb->data;
336 	const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
337 	struct ipv6_pinfo *np;
338 	struct sock *sk;
339 	int err;
340 	struct tcp_sock *tp;
341 	__u32 seq;
342 	struct net *net = dev_net(skb->dev);
343 
344 	sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr,
345 			th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
346 
347 	if (sk == NULL) {
348 		ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
349 				   ICMP6_MIB_INERRORS);
350 		return;
351 	}
352 
353 	if (sk->sk_state == TCP_TIME_WAIT) {
354 		inet_twsk_put(inet_twsk(sk));
355 		return;
356 	}
357 
358 	bh_lock_sock(sk);
359 	if (sock_owned_by_user(sk))
360 		NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
361 
362 	if (sk->sk_state == TCP_CLOSE)
363 		goto out;
364 
365 	if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
366 		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
367 		goto out;
368 	}
369 
370 	tp = tcp_sk(sk);
371 	seq = ntohl(th->seq);
372 	if (sk->sk_state != TCP_LISTEN &&
373 	    !between(seq, tp->snd_una, tp->snd_nxt)) {
374 		NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
375 		goto out;
376 	}
377 
378 	np = inet6_sk(sk);
379 
380 	if (type == ICMPV6_PKT_TOOBIG) {
381 		struct dst_entry *dst;
382 
383 		if (sock_owned_by_user(sk))
384 			goto out;
385 		if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
386 			goto out;
387 
388 		/* icmp should have updated the destination cache entry */
389 		dst = __sk_dst_check(sk, np->dst_cookie);
390 
391 		if (dst == NULL) {
392 			struct inet_sock *inet = inet_sk(sk);
393 			struct flowi6 fl6;
394 
395 			/* BUGGG_FUTURE: Again, it is not clear how
396 			   to handle rthdr case. Ignore this complexity
397 			   for now.
398 			 */
399 			memset(&fl6, 0, sizeof(fl6));
400 			fl6.flowi6_proto = IPPROTO_TCP;
401 			fl6.daddr = np->daddr;
402 			fl6.saddr = np->saddr;
403 			fl6.flowi6_oif = sk->sk_bound_dev_if;
404 			fl6.flowi6_mark = sk->sk_mark;
405 			fl6.fl6_dport = inet->inet_dport;
406 			fl6.fl6_sport = inet->inet_sport;
407 			security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
408 
409 			dst = ip6_dst_lookup_flow(sk, &fl6, NULL, false);
410 			if (IS_ERR(dst)) {
411 				sk->sk_err_soft = -PTR_ERR(dst);
412 				goto out;
413 			}
414 
415 		} else
416 			dst_hold(dst);
417 
418 		if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
419 			tcp_sync_mss(sk, dst_mtu(dst));
420 			tcp_simple_retransmit(sk);
421 		} /* else let the usual retransmit timer handle it */
422 		dst_release(dst);
423 		goto out;
424 	}
425 
426 	icmpv6_err_convert(type, code, &err);
427 
428 	/* Might be for an request_sock */
429 	switch (sk->sk_state) {
430 		struct request_sock *req, **prev;
431 	case TCP_LISTEN:
432 		if (sock_owned_by_user(sk))
433 			goto out;
434 
435 		req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
436 					   &hdr->saddr, inet6_iif(skb));
437 		if (!req)
438 			goto out;
439 
440 		/* ICMPs are not backlogged, hence we cannot get
441 		 * an established socket here.
442 		 */
443 		WARN_ON(req->sk != NULL);
444 
445 		if (seq != tcp_rsk(req)->snt_isn) {
446 			NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
447 			goto out;
448 		}
449 
450 		inet_csk_reqsk_queue_drop(sk, req, prev);
451 		goto out;
452 
453 	case TCP_SYN_SENT:
454 	case TCP_SYN_RECV:  /* Cannot happen.
455 			       It can, it SYNs are crossed. --ANK */
456 		if (!sock_owned_by_user(sk)) {
457 			sk->sk_err = err;
458 			sk->sk_error_report(sk);		/* Wake people up to see the error (see connect in sock.c) */
459 
460 			tcp_done(sk);
461 		} else
462 			sk->sk_err_soft = err;
463 		goto out;
464 	}
465 
466 	if (!sock_owned_by_user(sk) && np->recverr) {
467 		sk->sk_err = err;
468 		sk->sk_error_report(sk);
469 	} else
470 		sk->sk_err_soft = err;
471 
472 out:
473 	bh_unlock_sock(sk);
474 	sock_put(sk);
475 }
476 
477 
tcp_v6_send_synack(struct sock * sk,struct request_sock * req,struct request_values * rvp)478 static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
479 			      struct request_values *rvp)
480 {
481 	struct inet6_request_sock *treq = inet6_rsk(req);
482 	struct ipv6_pinfo *np = inet6_sk(sk);
483 	struct sk_buff * skb;
484 	struct ipv6_txoptions *opt = NULL;
485 	struct in6_addr * final_p, final;
486 	struct flowi6 fl6;
487 	struct dst_entry *dst;
488 	int err;
489 
490 	memset(&fl6, 0, sizeof(fl6));
491 	fl6.flowi6_proto = IPPROTO_TCP;
492 	fl6.daddr = treq->rmt_addr;
493 	fl6.saddr = treq->loc_addr;
494 	fl6.flowlabel = 0;
495 	fl6.flowi6_oif = treq->iif;
496 	fl6.flowi6_mark = sk->sk_mark;
497 	fl6.fl6_dport = inet_rsk(req)->rmt_port;
498 	fl6.fl6_sport = inet_rsk(req)->loc_port;
499 	security_req_classify_flow(req, flowi6_to_flowi(&fl6));
500 
501 	opt = np->opt;
502 	final_p = fl6_update_dst(&fl6, opt, &final);
503 
504 	dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false);
505 	if (IS_ERR(dst)) {
506 		err = PTR_ERR(dst);
507 		dst = NULL;
508 		goto done;
509 	}
510 	skb = tcp_make_synack(sk, dst, req, rvp);
511 	err = -ENOMEM;
512 	if (skb) {
513 		__tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
514 
515 		fl6.daddr = treq->rmt_addr;
516 		err = ip6_xmit(sk, skb, &fl6, opt, np->tclass);
517 		err = net_xmit_eval(err);
518 	}
519 
520 done:
521 	if (opt && opt != np->opt)
522 		sock_kfree_s(sk, opt, opt->tot_len);
523 	dst_release(dst);
524 	return err;
525 }
526 
tcp_v6_rtx_synack(struct sock * sk,struct request_sock * req,struct request_values * rvp)527 static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req,
528 			     struct request_values *rvp)
529 {
530 	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
531 	return tcp_v6_send_synack(sk, req, rvp);
532 }
533 
tcp_v6_reqsk_destructor(struct request_sock * req)534 static void tcp_v6_reqsk_destructor(struct request_sock *req)
535 {
536 	kfree_skb(inet6_rsk(req)->pktopts);
537 }
538 
539 #ifdef CONFIG_TCP_MD5SIG
tcp_v6_md5_do_lookup(struct sock * sk,const struct in6_addr * addr)540 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
541 						   const struct in6_addr *addr)
542 {
543 	return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
544 }
545 
tcp_v6_md5_lookup(struct sock * sk,struct sock * addr_sk)546 static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
547 						struct sock *addr_sk)
548 {
549 	return tcp_v6_md5_do_lookup(sk, &inet6_sk(addr_sk)->daddr);
550 }
551 
tcp_v6_reqsk_md5_lookup(struct sock * sk,struct request_sock * req)552 static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
553 						      struct request_sock *req)
554 {
555 	return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr);
556 }
557 
tcp_v6_parse_md5_keys(struct sock * sk,char __user * optval,int optlen)558 static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
559 				  int optlen)
560 {
561 	struct tcp_md5sig cmd;
562 	struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
563 
564 	if (optlen < sizeof(cmd))
565 		return -EINVAL;
566 
567 	if (copy_from_user(&cmd, optval, sizeof(cmd)))
568 		return -EFAULT;
569 
570 	if (sin6->sin6_family != AF_INET6)
571 		return -EINVAL;
572 
573 	if (!cmd.tcpm_keylen) {
574 		if (ipv6_addr_v4mapped(&sin6->sin6_addr))
575 			return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
576 					      AF_INET);
577 		return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
578 				      AF_INET6);
579 	}
580 
581 	if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
582 		return -EINVAL;
583 
584 	if (ipv6_addr_v4mapped(&sin6->sin6_addr))
585 		return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
586 				      AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
587 
588 	return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
589 			      AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
590 }
591 
tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool * hp,const struct in6_addr * daddr,const struct in6_addr * saddr,int nbytes)592 static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
593 					const struct in6_addr *daddr,
594 					const struct in6_addr *saddr, int nbytes)
595 {
596 	struct tcp6_pseudohdr *bp;
597 	struct scatterlist sg;
598 
599 	bp = &hp->md5_blk.ip6;
600 	/* 1. TCP pseudo-header (RFC2460) */
601 	bp->saddr = *saddr;
602 	bp->daddr = *daddr;
603 	bp->protocol = cpu_to_be32(IPPROTO_TCP);
604 	bp->len = cpu_to_be32(nbytes);
605 
606 	sg_init_one(&sg, bp, sizeof(*bp));
607 	return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
608 }
609 
tcp_v6_md5_hash_hdr(char * md5_hash,struct tcp_md5sig_key * key,const struct in6_addr * daddr,struct in6_addr * saddr,const struct tcphdr * th)610 static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
611 			       const struct in6_addr *daddr, struct in6_addr *saddr,
612 			       const struct tcphdr *th)
613 {
614 	struct tcp_md5sig_pool *hp;
615 	struct hash_desc *desc;
616 
617 	hp = tcp_get_md5sig_pool();
618 	if (!hp)
619 		goto clear_hash_noput;
620 	desc = &hp->md5_desc;
621 
622 	if (crypto_hash_init(desc))
623 		goto clear_hash;
624 	if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
625 		goto clear_hash;
626 	if (tcp_md5_hash_header(hp, th))
627 		goto clear_hash;
628 	if (tcp_md5_hash_key(hp, key))
629 		goto clear_hash;
630 	if (crypto_hash_final(desc, md5_hash))
631 		goto clear_hash;
632 
633 	tcp_put_md5sig_pool();
634 	return 0;
635 
636 clear_hash:
637 	tcp_put_md5sig_pool();
638 clear_hash_noput:
639 	memset(md5_hash, 0, 16);
640 	return 1;
641 }
642 
tcp_v6_md5_hash_skb(char * md5_hash,struct tcp_md5sig_key * key,const struct sock * sk,const struct request_sock * req,const struct sk_buff * skb)643 static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
644 			       const struct sock *sk,
645 			       const struct request_sock *req,
646 			       const struct sk_buff *skb)
647 {
648 	const struct in6_addr *saddr, *daddr;
649 	struct tcp_md5sig_pool *hp;
650 	struct hash_desc *desc;
651 	const struct tcphdr *th = tcp_hdr(skb);
652 
653 	if (sk) {
654 		saddr = &inet6_sk(sk)->saddr;
655 		daddr = &inet6_sk(sk)->daddr;
656 	} else if (req) {
657 		saddr = &inet6_rsk(req)->loc_addr;
658 		daddr = &inet6_rsk(req)->rmt_addr;
659 	} else {
660 		const struct ipv6hdr *ip6h = ipv6_hdr(skb);
661 		saddr = &ip6h->saddr;
662 		daddr = &ip6h->daddr;
663 	}
664 
665 	hp = tcp_get_md5sig_pool();
666 	if (!hp)
667 		goto clear_hash_noput;
668 	desc = &hp->md5_desc;
669 
670 	if (crypto_hash_init(desc))
671 		goto clear_hash;
672 
673 	if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
674 		goto clear_hash;
675 	if (tcp_md5_hash_header(hp, th))
676 		goto clear_hash;
677 	if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
678 		goto clear_hash;
679 	if (tcp_md5_hash_key(hp, key))
680 		goto clear_hash;
681 	if (crypto_hash_final(desc, md5_hash))
682 		goto clear_hash;
683 
684 	tcp_put_md5sig_pool();
685 	return 0;
686 
687 clear_hash:
688 	tcp_put_md5sig_pool();
689 clear_hash_noput:
690 	memset(md5_hash, 0, 16);
691 	return 1;
692 }
693 
tcp_v6_inbound_md5_hash(struct sock * sk,const struct sk_buff * skb)694 static int tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
695 {
696 	const __u8 *hash_location = NULL;
697 	struct tcp_md5sig_key *hash_expected;
698 	const struct ipv6hdr *ip6h = ipv6_hdr(skb);
699 	const struct tcphdr *th = tcp_hdr(skb);
700 	int genhash;
701 	u8 newhash[16];
702 
703 	hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
704 	hash_location = tcp_parse_md5sig_option(th);
705 
706 	/* We've parsed the options - do we have a hash? */
707 	if (!hash_expected && !hash_location)
708 		return 0;
709 
710 	if (hash_expected && !hash_location) {
711 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
712 		return 1;
713 	}
714 
715 	if (!hash_expected && hash_location) {
716 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
717 		return 1;
718 	}
719 
720 	/* check the signature */
721 	genhash = tcp_v6_md5_hash_skb(newhash,
722 				      hash_expected,
723 				      NULL, NULL, skb);
724 
725 	if (genhash || memcmp(hash_location, newhash, 16) != 0) {
726 		if (net_ratelimit()) {
727 			printk(KERN_INFO "MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
728 			       genhash ? "failed" : "mismatch",
729 			       &ip6h->saddr, ntohs(th->source),
730 			       &ip6h->daddr, ntohs(th->dest));
731 		}
732 		return 1;
733 	}
734 	return 0;
735 }
736 #endif
737 
738 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
739 	.family		=	AF_INET6,
740 	.obj_size	=	sizeof(struct tcp6_request_sock),
741 	.rtx_syn_ack	=	tcp_v6_rtx_synack,
742 	.send_ack	=	tcp_v6_reqsk_send_ack,
743 	.destructor	=	tcp_v6_reqsk_destructor,
744 	.send_reset	=	tcp_v6_send_reset,
745 	.syn_ack_timeout = 	tcp_syn_ack_timeout,
746 };
747 
748 #ifdef CONFIG_TCP_MD5SIG
749 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
750 	.md5_lookup	=	tcp_v6_reqsk_md5_lookup,
751 	.calc_md5_hash	=	tcp_v6_md5_hash_skb,
752 };
753 #endif
754 
__tcp_v6_send_check(struct sk_buff * skb,const struct in6_addr * saddr,const struct in6_addr * daddr)755 static void __tcp_v6_send_check(struct sk_buff *skb,
756 				const struct in6_addr *saddr, const struct in6_addr *daddr)
757 {
758 	struct tcphdr *th = tcp_hdr(skb);
759 
760 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
761 		th->check = ~tcp_v6_check(skb->len, saddr, daddr, 0);
762 		skb->csum_start = skb_transport_header(skb) - skb->head;
763 		skb->csum_offset = offsetof(struct tcphdr, check);
764 	} else {
765 		th->check = tcp_v6_check(skb->len, saddr, daddr,
766 					 csum_partial(th, th->doff << 2,
767 						      skb->csum));
768 	}
769 }
770 
tcp_v6_send_check(struct sock * sk,struct sk_buff * skb)771 static void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)
772 {
773 	struct ipv6_pinfo *np = inet6_sk(sk);
774 
775 	__tcp_v6_send_check(skb, &np->saddr, &np->daddr);
776 }
777 
tcp_v6_gso_send_check(struct sk_buff * skb)778 static int tcp_v6_gso_send_check(struct sk_buff *skb)
779 {
780 	const struct ipv6hdr *ipv6h;
781 	struct tcphdr *th;
782 
783 	if (!pskb_may_pull(skb, sizeof(*th)))
784 		return -EINVAL;
785 
786 	ipv6h = ipv6_hdr(skb);
787 	th = tcp_hdr(skb);
788 
789 	th->check = 0;
790 	skb->ip_summed = CHECKSUM_PARTIAL;
791 	__tcp_v6_send_check(skb, &ipv6h->saddr, &ipv6h->daddr);
792 	return 0;
793 }
794 
tcp6_gro_receive(struct sk_buff ** head,struct sk_buff * skb)795 static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
796 					 struct sk_buff *skb)
797 {
798 	const struct ipv6hdr *iph = skb_gro_network_header(skb);
799 
800 	switch (skb->ip_summed) {
801 	case CHECKSUM_COMPLETE:
802 		if (!tcp_v6_check(skb_gro_len(skb), &iph->saddr, &iph->daddr,
803 				  skb->csum)) {
804 			skb->ip_summed = CHECKSUM_UNNECESSARY;
805 			break;
806 		}
807 
808 		/* fall through */
809 	case CHECKSUM_NONE:
810 		NAPI_GRO_CB(skb)->flush = 1;
811 		return NULL;
812 	}
813 
814 	return tcp_gro_receive(head, skb);
815 }
816 
tcp6_gro_complete(struct sk_buff * skb)817 static int tcp6_gro_complete(struct sk_buff *skb)
818 {
819 	const struct ipv6hdr *iph = ipv6_hdr(skb);
820 	struct tcphdr *th = tcp_hdr(skb);
821 
822 	th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
823 				  &iph->saddr, &iph->daddr, 0);
824 	skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
825 
826 	return tcp_gro_complete(skb);
827 }
828 
tcp_v6_send_response(struct sk_buff * skb,u32 seq,u32 ack,u32 win,u32 ts,struct tcp_md5sig_key * key,int rst,u8 tclass)829 static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
830 				 u32 ts, struct tcp_md5sig_key *key, int rst, u8 tclass)
831 {
832 	const struct tcphdr *th = tcp_hdr(skb);
833 	struct tcphdr *t1;
834 	struct sk_buff *buff;
835 	struct flowi6 fl6;
836 	struct net *net = dev_net(skb_dst(skb)->dev);
837 	struct sock *ctl_sk = net->ipv6.tcp_sk;
838 	unsigned int tot_len = sizeof(struct tcphdr);
839 	struct dst_entry *dst;
840 	__be32 *topt;
841 
842 	if (ts)
843 		tot_len += TCPOLEN_TSTAMP_ALIGNED;
844 #ifdef CONFIG_TCP_MD5SIG
845 	if (key)
846 		tot_len += TCPOLEN_MD5SIG_ALIGNED;
847 #endif
848 
849 	buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
850 			 GFP_ATOMIC);
851 	if (buff == NULL)
852 		return;
853 
854 	skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
855 
856 	t1 = (struct tcphdr *) skb_push(buff, tot_len);
857 	skb_reset_transport_header(buff);
858 
859 	/* Swap the send and the receive. */
860 	memset(t1, 0, sizeof(*t1));
861 	t1->dest = th->source;
862 	t1->source = th->dest;
863 	t1->doff = tot_len / 4;
864 	t1->seq = htonl(seq);
865 	t1->ack_seq = htonl(ack);
866 	t1->ack = !rst || !th->ack;
867 	t1->rst = rst;
868 	t1->window = htons(win);
869 
870 	topt = (__be32 *)(t1 + 1);
871 
872 	if (ts) {
873 		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
874 				(TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
875 		*topt++ = htonl(tcp_time_stamp);
876 		*topt++ = htonl(ts);
877 	}
878 
879 #ifdef CONFIG_TCP_MD5SIG
880 	if (key) {
881 		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
882 				(TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
883 		tcp_v6_md5_hash_hdr((__u8 *)topt, key,
884 				    &ipv6_hdr(skb)->saddr,
885 				    &ipv6_hdr(skb)->daddr, t1);
886 	}
887 #endif
888 
889 	memset(&fl6, 0, sizeof(fl6));
890 	fl6.daddr = ipv6_hdr(skb)->saddr;
891 	fl6.saddr = ipv6_hdr(skb)->daddr;
892 
893 	buff->ip_summed = CHECKSUM_PARTIAL;
894 	buff->csum = 0;
895 
896 	__tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
897 
898 	fl6.flowi6_proto = IPPROTO_TCP;
899 	if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
900 		fl6.flowi6_oif = inet6_iif(skb);
901 	fl6.fl6_dport = t1->dest;
902 	fl6.fl6_sport = t1->source;
903 	security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
904 
905 	/* Pass a socket to ip6_dst_lookup either it is for RST
906 	 * Underlying function will use this to retrieve the network
907 	 * namespace
908 	 */
909 	dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL, false);
910 	if (!IS_ERR(dst)) {
911 		skb_dst_set(buff, dst);
912 		ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
913 		TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
914 		if (rst)
915 			TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
916 		return;
917 	}
918 
919 	kfree_skb(buff);
920 }
921 
tcp_v6_send_reset(struct sock * sk,struct sk_buff * skb)922 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
923 {
924 	const struct tcphdr *th = tcp_hdr(skb);
925 	u32 seq = 0, ack_seq = 0;
926 	struct tcp_md5sig_key *key = NULL;
927 #ifdef CONFIG_TCP_MD5SIG
928 	const __u8 *hash_location = NULL;
929 	struct ipv6hdr *ipv6h = ipv6_hdr(skb);
930 	unsigned char newhash[16];
931 	int genhash;
932 	struct sock *sk1 = NULL;
933 #endif
934 
935 	if (th->rst)
936 		return;
937 
938 	if (!ipv6_unicast_destination(skb))
939 		return;
940 
941 #ifdef CONFIG_TCP_MD5SIG
942 	hash_location = tcp_parse_md5sig_option(th);
943 	if (!sk && hash_location) {
944 		/*
945 		 * active side is lost. Try to find listening socket through
946 		 * source port, and then find md5 key through listening socket.
947 		 * we are not loose security here:
948 		 * Incoming packet is checked with md5 hash with finding key,
949 		 * no RST generated if md5 hash doesn't match.
950 		 */
951 		sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
952 					   &tcp_hashinfo, &ipv6h->daddr,
953 					   ntohs(th->source), inet6_iif(skb));
954 		if (!sk1)
955 			return;
956 
957 		rcu_read_lock();
958 		key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
959 		if (!key)
960 			goto release_sk1;
961 
962 		genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, NULL, skb);
963 		if (genhash || memcmp(hash_location, newhash, 16) != 0)
964 			goto release_sk1;
965 	} else {
966 		key = sk ? tcp_v6_md5_do_lookup(sk, &ipv6h->saddr) : NULL;
967 	}
968 #endif
969 
970 	if (th->ack)
971 		seq = ntohl(th->ack_seq);
972 	else
973 		ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
974 			  (th->doff << 2);
975 
976 	tcp_v6_send_response(skb, seq, ack_seq, 0, 0, key, 1, 0);
977 
978 #ifdef CONFIG_TCP_MD5SIG
979 release_sk1:
980 	if (sk1) {
981 		rcu_read_unlock();
982 		sock_put(sk1);
983 	}
984 #endif
985 }
986 
tcp_v6_send_ack(struct sk_buff * skb,u32 seq,u32 ack,u32 win,u32 ts,struct tcp_md5sig_key * key,u8 tclass)987 static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts,
988 			    struct tcp_md5sig_key *key, u8 tclass)
989 {
990 	tcp_v6_send_response(skb, seq, ack, win, ts, key, 0, tclass);
991 }
992 
tcp_v6_timewait_ack(struct sock * sk,struct sk_buff * skb)993 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
994 {
995 	struct inet_timewait_sock *tw = inet_twsk(sk);
996 	struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
997 
998 	tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
999 			tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
1000 			tcptw->tw_ts_recent, tcp_twsk_md5_key(tcptw),
1001 			tw->tw_tclass);
1002 
1003 	inet_twsk_put(tw);
1004 }
1005 
tcp_v6_reqsk_send_ack(struct sock * sk,struct sk_buff * skb,struct request_sock * req)1006 static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
1007 				  struct request_sock *req)
1008 {
1009 	tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent,
1010 			tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr), 0);
1011 }
1012 
1013 
tcp_v6_hnd_req(struct sock * sk,struct sk_buff * skb)1014 static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
1015 {
1016 	struct request_sock *req, **prev;
1017 	const struct tcphdr *th = tcp_hdr(skb);
1018 	struct sock *nsk;
1019 
1020 	/* Find possible connection requests. */
1021 	req = inet6_csk_search_req(sk, &prev, th->source,
1022 				   &ipv6_hdr(skb)->saddr,
1023 				   &ipv6_hdr(skb)->daddr, inet6_iif(skb));
1024 	if (req)
1025 		return tcp_check_req(sk, skb, req, prev);
1026 
1027 	nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
1028 			&ipv6_hdr(skb)->saddr, th->source,
1029 			&ipv6_hdr(skb)->daddr, ntohs(th->dest), inet6_iif(skb));
1030 
1031 	if (nsk) {
1032 		if (nsk->sk_state != TCP_TIME_WAIT) {
1033 			bh_lock_sock(nsk);
1034 			return nsk;
1035 		}
1036 		inet_twsk_put(inet_twsk(nsk));
1037 		return NULL;
1038 	}
1039 
1040 #ifdef CONFIG_SYN_COOKIES
1041 	if (!th->syn)
1042 		sk = cookie_v6_check(sk, skb);
1043 #endif
1044 	return sk;
1045 }
1046 
1047 /* FIXME: this is substantially similar to the ipv4 code.
1048  * Can some kind of merge be done? -- erics
1049  */
tcp_v6_conn_request(struct sock * sk,struct sk_buff * skb)1050 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1051 {
1052 	struct tcp_extend_values tmp_ext;
1053 	struct tcp_options_received tmp_opt;
1054 	const u8 *hash_location;
1055 	struct request_sock *req;
1056 	struct inet6_request_sock *treq;
1057 	struct ipv6_pinfo *np = inet6_sk(sk);
1058 	struct tcp_sock *tp = tcp_sk(sk);
1059 	__u32 isn = TCP_SKB_CB(skb)->when;
1060 	struct dst_entry *dst = NULL;
1061 	int want_cookie = 0;
1062 
1063 	if (skb->protocol == htons(ETH_P_IP))
1064 		return tcp_v4_conn_request(sk, skb);
1065 
1066 	if (!ipv6_unicast_destination(skb))
1067 		goto drop;
1068 
1069 	if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1070 		want_cookie = tcp_syn_flood_action(sk, skb, "TCPv6");
1071 		if (!want_cookie)
1072 			goto drop;
1073 	}
1074 
1075 	if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1076 		goto drop;
1077 
1078 	req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
1079 	if (req == NULL)
1080 		goto drop;
1081 
1082 #ifdef CONFIG_TCP_MD5SIG
1083 	tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops;
1084 #endif
1085 
1086 	tcp_clear_options(&tmp_opt);
1087 	tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
1088 	tmp_opt.user_mss = tp->rx_opt.user_mss;
1089 	tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
1090 
1091 	if (tmp_opt.cookie_plus > 0 &&
1092 	    tmp_opt.saw_tstamp &&
1093 	    !tp->rx_opt.cookie_out_never &&
1094 	    (sysctl_tcp_cookie_size > 0 ||
1095 	     (tp->cookie_values != NULL &&
1096 	      tp->cookie_values->cookie_desired > 0))) {
1097 		u8 *c;
1098 		u32 *d;
1099 		u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
1100 		int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
1101 
1102 		if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
1103 			goto drop_and_free;
1104 
1105 		/* Secret recipe starts with IP addresses */
1106 		d = (__force u32 *)&ipv6_hdr(skb)->daddr.s6_addr32[0];
1107 		*mess++ ^= *d++;
1108 		*mess++ ^= *d++;
1109 		*mess++ ^= *d++;
1110 		*mess++ ^= *d++;
1111 		d = (__force u32 *)&ipv6_hdr(skb)->saddr.s6_addr32[0];
1112 		*mess++ ^= *d++;
1113 		*mess++ ^= *d++;
1114 		*mess++ ^= *d++;
1115 		*mess++ ^= *d++;
1116 
1117 		/* plus variable length Initiator Cookie */
1118 		c = (u8 *)mess;
1119 		while (l-- > 0)
1120 			*c++ ^= *hash_location++;
1121 
1122 		want_cookie = 0;	/* not our kind of cookie */
1123 		tmp_ext.cookie_out_never = 0; /* false */
1124 		tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1125 	} else if (!tp->rx_opt.cookie_in_always) {
1126 		/* redundant indications, but ensure initialization. */
1127 		tmp_ext.cookie_out_never = 1; /* true */
1128 		tmp_ext.cookie_plus = 0;
1129 	} else {
1130 		goto drop_and_free;
1131 	}
1132 	tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
1133 
1134 	if (want_cookie && !tmp_opt.saw_tstamp)
1135 		tcp_clear_options(&tmp_opt);
1136 
1137 	tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1138 	tcp_openreq_init(req, &tmp_opt, skb);
1139 
1140 	treq = inet6_rsk(req);
1141 	treq->rmt_addr = ipv6_hdr(skb)->saddr;
1142 	treq->loc_addr = ipv6_hdr(skb)->daddr;
1143 	if (!want_cookie || tmp_opt.tstamp_ok)
1144 		TCP_ECN_create_request(req, tcp_hdr(skb));
1145 
1146 	treq->iif = sk->sk_bound_dev_if;
1147 
1148 	/* So that link locals have meaning */
1149 	if (!sk->sk_bound_dev_if &&
1150 	    ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
1151 		treq->iif = inet6_iif(skb);
1152 
1153 	if (!isn) {
1154 		struct inet_peer *peer = NULL;
1155 
1156 		if (ipv6_opt_accepted(sk, skb) ||
1157 		    np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
1158 		    np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
1159 			atomic_inc(&skb->users);
1160 			treq->pktopts = skb;
1161 		}
1162 
1163 		if (want_cookie) {
1164 			isn = cookie_v6_init_sequence(sk, skb, &req->mss);
1165 			req->cookie_ts = tmp_opt.tstamp_ok;
1166 			goto have_isn;
1167 		}
1168 
1169 		/* VJ's idea. We save last timestamp seen
1170 		 * from the destination in peer table, when entering
1171 		 * state TIME-WAIT, and check against it before
1172 		 * accepting new connection request.
1173 		 *
1174 		 * If "isn" is not zero, this request hit alive
1175 		 * timewait bucket, so that all the necessary checks
1176 		 * are made in the function processing timewait state.
1177 		 */
1178 		if (tmp_opt.saw_tstamp &&
1179 		    tcp_death_row.sysctl_tw_recycle &&
1180 		    (dst = inet6_csk_route_req(sk, req)) != NULL &&
1181 		    (peer = rt6_get_peer((struct rt6_info *)dst)) != NULL &&
1182 		    ipv6_addr_equal((struct in6_addr *)peer->daddr.addr.a6,
1183 				    &treq->rmt_addr)) {
1184 			inet_peer_refcheck(peer);
1185 			if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
1186 			    (s32)(peer->tcp_ts - req->ts_recent) >
1187 							TCP_PAWS_WINDOW) {
1188 				NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1189 				goto drop_and_release;
1190 			}
1191 		}
1192 		/* Kill the following clause, if you dislike this way. */
1193 		else if (!sysctl_tcp_syncookies &&
1194 			 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1195 			  (sysctl_max_syn_backlog >> 2)) &&
1196 			 (!peer || !peer->tcp_ts_stamp) &&
1197 			 (!dst || !dst_metric(dst, RTAX_RTT))) {
1198 			/* Without syncookies last quarter of
1199 			 * backlog is filled with destinations,
1200 			 * proven to be alive.
1201 			 * It means that we continue to communicate
1202 			 * to destinations, already remembered
1203 			 * to the moment of synflood.
1204 			 */
1205 			LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI6/%u\n",
1206 				       &treq->rmt_addr, ntohs(tcp_hdr(skb)->source));
1207 			goto drop_and_release;
1208 		}
1209 
1210 		isn = tcp_v6_init_sequence(skb);
1211 	}
1212 have_isn:
1213 	tcp_rsk(req)->snt_isn = isn;
1214 	tcp_rsk(req)->snt_synack = tcp_time_stamp;
1215 
1216 	security_inet_conn_request(sk, skb, req);
1217 
1218 	if (tcp_v6_send_synack(sk, req,
1219 			       (struct request_values *)&tmp_ext) ||
1220 	    want_cookie)
1221 		goto drop_and_free;
1222 
1223 	inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1224 	return 0;
1225 
1226 drop_and_release:
1227 	dst_release(dst);
1228 drop_and_free:
1229 	reqsk_free(req);
1230 drop:
1231 	return 0; /* don't send reset */
1232 }
1233 
tcp_v6_syn_recv_sock(struct sock * sk,struct sk_buff * skb,struct request_sock * req,struct dst_entry * dst)1234 static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1235 					  struct request_sock *req,
1236 					  struct dst_entry *dst)
1237 {
1238 	struct inet6_request_sock *treq;
1239 	struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1240 	struct tcp6_sock *newtcp6sk;
1241 	struct inet_sock *newinet;
1242 	struct tcp_sock *newtp;
1243 	struct sock *newsk;
1244 	struct ipv6_txoptions *opt;
1245 #ifdef CONFIG_TCP_MD5SIG
1246 	struct tcp_md5sig_key *key;
1247 #endif
1248 
1249 	if (skb->protocol == htons(ETH_P_IP)) {
1250 		/*
1251 		 *	v6 mapped
1252 		 */
1253 
1254 		newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1255 
1256 		if (newsk == NULL)
1257 			return NULL;
1258 
1259 		newtcp6sk = (struct tcp6_sock *)newsk;
1260 		inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1261 
1262 		newinet = inet_sk(newsk);
1263 		newnp = inet6_sk(newsk);
1264 		newtp = tcp_sk(newsk);
1265 
1266 		memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1267 
1268 		ipv6_addr_set_v4mapped(newinet->inet_daddr, &newnp->daddr);
1269 
1270 		ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
1271 
1272 		newnp->rcv_saddr = newnp->saddr;
1273 
1274 		inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1275 		newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1276 #ifdef CONFIG_TCP_MD5SIG
1277 		newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1278 #endif
1279 
1280 		newnp->ipv6_ac_list = NULL;
1281 		newnp->ipv6_fl_list = NULL;
1282 		newnp->pktoptions  = NULL;
1283 		newnp->opt	   = NULL;
1284 		newnp->mcast_oif   = inet6_iif(skb);
1285 		newnp->mcast_hops  = ipv6_hdr(skb)->hop_limit;
1286 		newnp->rcv_tclass  = ipv6_tclass(ipv6_hdr(skb));
1287 
1288 		/*
1289 		 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1290 		 * here, tcp_create_openreq_child now does this for us, see the comment in
1291 		 * that function for the gory details. -acme
1292 		 */
1293 
1294 		/* It is tricky place. Until this moment IPv4 tcp
1295 		   worked with IPv6 icsk.icsk_af_ops.
1296 		   Sync it now.
1297 		 */
1298 		tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1299 
1300 		return newsk;
1301 	}
1302 
1303 	treq = inet6_rsk(req);
1304 	opt = np->opt;
1305 
1306 	if (sk_acceptq_is_full(sk))
1307 		goto out_overflow;
1308 
1309 	if (!dst) {
1310 		dst = inet6_csk_route_req(sk, req);
1311 		if (!dst)
1312 			goto out;
1313 	}
1314 
1315 	newsk = tcp_create_openreq_child(sk, req, skb);
1316 	if (newsk == NULL)
1317 		goto out_nonewsk;
1318 
1319 	/*
1320 	 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1321 	 * count here, tcp_create_openreq_child now does this for us, see the
1322 	 * comment in that function for the gory details. -acme
1323 	 */
1324 
1325 	newsk->sk_gso_type = SKB_GSO_TCPV6;
1326 	__ip6_dst_store(newsk, dst, NULL, NULL);
1327 
1328 	newtcp6sk = (struct tcp6_sock *)newsk;
1329 	inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1330 
1331 	newtp = tcp_sk(newsk);
1332 	newinet = inet_sk(newsk);
1333 	newnp = inet6_sk(newsk);
1334 
1335 	memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1336 
1337 	newnp->daddr = treq->rmt_addr;
1338 	newnp->saddr = treq->loc_addr;
1339 	newnp->rcv_saddr = treq->loc_addr;
1340 	newsk->sk_bound_dev_if = treq->iif;
1341 
1342 	/* Now IPv6 options...
1343 
1344 	   First: no IPv4 options.
1345 	 */
1346 	newinet->inet_opt = NULL;
1347 	newnp->ipv6_ac_list = NULL;
1348 	newnp->ipv6_fl_list = NULL;
1349 
1350 	/* Clone RX bits */
1351 	newnp->rxopt.all = np->rxopt.all;
1352 
1353 	/* Clone pktoptions received with SYN */
1354 	newnp->pktoptions = NULL;
1355 	if (treq->pktopts != NULL) {
1356 		newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC);
1357 		kfree_skb(treq->pktopts);
1358 		treq->pktopts = NULL;
1359 		if (newnp->pktoptions)
1360 			skb_set_owner_r(newnp->pktoptions, newsk);
1361 	}
1362 	newnp->opt	  = NULL;
1363 	newnp->mcast_oif  = inet6_iif(skb);
1364 	newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1365 	newnp->rcv_tclass = ipv6_tclass(ipv6_hdr(skb));
1366 
1367 	/* Clone native IPv6 options from listening socket (if any)
1368 
1369 	   Yes, keeping reference count would be much more clever,
1370 	   but we make one more one thing there: reattach optmem
1371 	   to newsk.
1372 	 */
1373 	if (opt) {
1374 		newnp->opt = ipv6_dup_options(newsk, opt);
1375 		if (opt != np->opt)
1376 			sock_kfree_s(sk, opt, opt->tot_len);
1377 	}
1378 
1379 	inet_csk(newsk)->icsk_ext_hdr_len = 0;
1380 	if (newnp->opt)
1381 		inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1382 						     newnp->opt->opt_flen);
1383 
1384 	tcp_mtup_init(newsk);
1385 	tcp_sync_mss(newsk, dst_mtu(dst));
1386 	newtp->advmss = dst_metric_advmss(dst);
1387 	if (tcp_sk(sk)->rx_opt.user_mss &&
1388 	    tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1389 		newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1390 
1391 	tcp_initialize_rcv_mss(newsk);
1392 	if (tcp_rsk(req)->snt_synack)
1393 		tcp_valid_rtt_meas(newsk,
1394 		    tcp_time_stamp - tcp_rsk(req)->snt_synack);
1395 	newtp->total_retrans = req->retrans;
1396 
1397 	newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1398 	newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1399 
1400 #ifdef CONFIG_TCP_MD5SIG
1401 	/* Copy over the MD5 key from the original socket */
1402 	if ((key = tcp_v6_md5_do_lookup(sk, &newnp->daddr)) != NULL) {
1403 		/* We're using one, so create a matching key
1404 		 * on the newsk structure. If we fail to get
1405 		 * memory, then we end up not copying the key
1406 		 * across. Shucks.
1407 		 */
1408 		tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newnp->daddr,
1409 			       AF_INET6, key->key, key->keylen, GFP_ATOMIC);
1410 	}
1411 #endif
1412 
1413 	if (__inet_inherit_port(sk, newsk) < 0) {
1414 		inet_csk_prepare_forced_close(newsk);
1415 		tcp_done(newsk);
1416 		goto out;
1417 	}
1418 	__inet6_hash(newsk, NULL);
1419 
1420 	return newsk;
1421 
1422 out_overflow:
1423 	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1424 out_nonewsk:
1425 	if (opt && opt != np->opt)
1426 		sock_kfree_s(sk, opt, opt->tot_len);
1427 	dst_release(dst);
1428 out:
1429 	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1430 	return NULL;
1431 }
1432 
tcp_v6_checksum_init(struct sk_buff * skb)1433 static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
1434 {
1435 	if (skb->ip_summed == CHECKSUM_COMPLETE) {
1436 		if (!tcp_v6_check(skb->len, &ipv6_hdr(skb)->saddr,
1437 				  &ipv6_hdr(skb)->daddr, skb->csum)) {
1438 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1439 			return 0;
1440 		}
1441 	}
1442 
1443 	skb->csum = ~csum_unfold(tcp_v6_check(skb->len,
1444 					      &ipv6_hdr(skb)->saddr,
1445 					      &ipv6_hdr(skb)->daddr, 0));
1446 
1447 	if (skb->len <= 76) {
1448 		return __skb_checksum_complete(skb);
1449 	}
1450 	return 0;
1451 }
1452 
1453 /* The socket must have it's spinlock held when we get
1454  * here.
1455  *
1456  * We have a potential double-lock case here, so even when
1457  * doing backlog processing we use the BH locking scheme.
1458  * This is because we cannot sleep with the original spinlock
1459  * held.
1460  */
tcp_v6_do_rcv(struct sock * sk,struct sk_buff * skb)1461 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1462 {
1463 	struct ipv6_pinfo *np = inet6_sk(sk);
1464 	struct tcp_sock *tp;
1465 	struct sk_buff *opt_skb = NULL;
1466 
1467 	/* Imagine: socket is IPv6. IPv4 packet arrives,
1468 	   goes to IPv4 receive handler and backlogged.
1469 	   From backlog it always goes here. Kerboom...
1470 	   Fortunately, tcp_rcv_established and rcv_established
1471 	   handle them correctly, but it is not case with
1472 	   tcp_v6_hnd_req and tcp_v6_send_reset().   --ANK
1473 	 */
1474 
1475 	if (skb->protocol == htons(ETH_P_IP))
1476 		return tcp_v4_do_rcv(sk, skb);
1477 
1478 #ifdef CONFIG_TCP_MD5SIG
1479 	if (tcp_v6_inbound_md5_hash (sk, skb))
1480 		goto discard;
1481 #endif
1482 
1483 	if (sk_filter(sk, skb))
1484 		goto discard;
1485 
1486 	/*
1487 	 *	socket locking is here for SMP purposes as backlog rcv
1488 	 *	is currently called with bh processing disabled.
1489 	 */
1490 
1491 	/* Do Stevens' IPV6_PKTOPTIONS.
1492 
1493 	   Yes, guys, it is the only place in our code, where we
1494 	   may make it not affecting IPv4.
1495 	   The rest of code is protocol independent,
1496 	   and I do not like idea to uglify IPv4.
1497 
1498 	   Actually, all the idea behind IPV6_PKTOPTIONS
1499 	   looks not very well thought. For now we latch
1500 	   options, received in the last packet, enqueued
1501 	   by tcp. Feel free to propose better solution.
1502 					       --ANK (980728)
1503 	 */
1504 	if (np->rxopt.all)
1505 		opt_skb = skb_clone(skb, GFP_ATOMIC);
1506 
1507 	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1508 		sock_rps_save_rxhash(sk, skb);
1509 		if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
1510 			goto reset;
1511 		if (opt_skb)
1512 			goto ipv6_pktoptions;
1513 		return 0;
1514 	}
1515 
1516 	if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1517 		goto csum_err;
1518 
1519 	if (sk->sk_state == TCP_LISTEN) {
1520 		struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1521 		if (!nsk)
1522 			goto discard;
1523 
1524 		/*
1525 		 * Queue it on the new socket if the new socket is active,
1526 		 * otherwise we just shortcircuit this and continue with
1527 		 * the new socket..
1528 		 */
1529 		if(nsk != sk) {
1530 			sock_rps_save_rxhash(nsk, skb);
1531 			if (tcp_child_process(sk, nsk, skb))
1532 				goto reset;
1533 			if (opt_skb)
1534 				__kfree_skb(opt_skb);
1535 			return 0;
1536 		}
1537 	} else
1538 		sock_rps_save_rxhash(sk, skb);
1539 
1540 	if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1541 		goto reset;
1542 	if (opt_skb)
1543 		goto ipv6_pktoptions;
1544 	return 0;
1545 
1546 reset:
1547 	tcp_v6_send_reset(sk, skb);
1548 discard:
1549 	if (opt_skb)
1550 		__kfree_skb(opt_skb);
1551 	kfree_skb(skb);
1552 	return 0;
1553 csum_err:
1554 	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1555 	goto discard;
1556 
1557 
1558 ipv6_pktoptions:
1559 	/* Do you ask, what is it?
1560 
1561 	   1. skb was enqueued by tcp.
1562 	   2. skb is added to tail of read queue, rather than out of order.
1563 	   3. socket is not in passive state.
1564 	   4. Finally, it really contains options, which user wants to receive.
1565 	 */
1566 	tp = tcp_sk(sk);
1567 	if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1568 	    !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1569 		if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1570 			np->mcast_oif = inet6_iif(opt_skb);
1571 		if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1572 			np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1573 		if (np->rxopt.bits.rxtclass)
1574 			np->rcv_tclass = ipv6_tclass(ipv6_hdr(opt_skb));
1575 		if (ipv6_opt_accepted(sk, opt_skb)) {
1576 			skb_set_owner_r(opt_skb, sk);
1577 			opt_skb = xchg(&np->pktoptions, opt_skb);
1578 		} else {
1579 			__kfree_skb(opt_skb);
1580 			opt_skb = xchg(&np->pktoptions, NULL);
1581 		}
1582 	}
1583 
1584 	kfree_skb(opt_skb);
1585 	return 0;
1586 }
1587 
tcp_v6_rcv(struct sk_buff * skb)1588 static int tcp_v6_rcv(struct sk_buff *skb)
1589 {
1590 	const struct tcphdr *th;
1591 	const struct ipv6hdr *hdr;
1592 	struct sock *sk;
1593 	int ret;
1594 	struct net *net = dev_net(skb->dev);
1595 
1596 	if (skb->pkt_type != PACKET_HOST)
1597 		goto discard_it;
1598 
1599 	/*
1600 	 *	Count it even if it's bad.
1601 	 */
1602 	TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1603 
1604 	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1605 		goto discard_it;
1606 
1607 	th = tcp_hdr(skb);
1608 
1609 	if (th->doff < sizeof(struct tcphdr)/4)
1610 		goto bad_packet;
1611 	if (!pskb_may_pull(skb, th->doff*4))
1612 		goto discard_it;
1613 
1614 	if (!skb_csum_unnecessary(skb) && tcp_v6_checksum_init(skb))
1615 		goto bad_packet;
1616 
1617 	th = tcp_hdr(skb);
1618 	hdr = ipv6_hdr(skb);
1619 	TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1620 	TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1621 				    skb->len - th->doff*4);
1622 	TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1623 	TCP_SKB_CB(skb)->when = 0;
1624 	TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1625 	TCP_SKB_CB(skb)->sacked = 0;
1626 
1627 	sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1628 	if (!sk)
1629 		goto no_tcp_socket;
1630 
1631 process:
1632 	if (sk->sk_state == TCP_TIME_WAIT)
1633 		goto do_time_wait;
1634 
1635 	if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1636 		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1637 		goto discard_and_relse;
1638 	}
1639 
1640 	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1641 		goto discard_and_relse;
1642 
1643 	if (sk_filter(sk, skb))
1644 		goto discard_and_relse;
1645 
1646 	skb->dev = NULL;
1647 
1648 	bh_lock_sock_nested(sk);
1649 	ret = 0;
1650 	if (!sock_owned_by_user(sk)) {
1651 #ifdef CONFIG_NET_DMA
1652 		struct tcp_sock *tp = tcp_sk(sk);
1653 		if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1654 			tp->ucopy.dma_chan = net_dma_find_channel();
1655 		if (tp->ucopy.dma_chan)
1656 			ret = tcp_v6_do_rcv(sk, skb);
1657 		else
1658 #endif
1659 		{
1660 			if (!tcp_prequeue(sk, skb))
1661 				ret = tcp_v6_do_rcv(sk, skb);
1662 		}
1663 	} else if (unlikely(sk_add_backlog(sk, skb))) {
1664 		bh_unlock_sock(sk);
1665 		NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1666 		goto discard_and_relse;
1667 	}
1668 	bh_unlock_sock(sk);
1669 
1670 	sock_put(sk);
1671 	return ret ? -1 : 0;
1672 
1673 no_tcp_socket:
1674 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1675 		goto discard_it;
1676 
1677 	if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1678 bad_packet:
1679 		TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1680 	} else {
1681 		tcp_v6_send_reset(NULL, skb);
1682 	}
1683 
1684 discard_it:
1685 
1686 	/*
1687 	 *	Discard frame
1688 	 */
1689 
1690 	kfree_skb(skb);
1691 	return 0;
1692 
1693 discard_and_relse:
1694 	sock_put(sk);
1695 	goto discard_it;
1696 
1697 do_time_wait:
1698 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1699 		inet_twsk_put(inet_twsk(sk));
1700 		goto discard_it;
1701 	}
1702 
1703 	if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1704 		TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1705 		inet_twsk_put(inet_twsk(sk));
1706 		goto discard_it;
1707 	}
1708 
1709 	switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1710 	case TCP_TW_SYN:
1711 	{
1712 		struct sock *sk2;
1713 
1714 		sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1715 					    &ipv6_hdr(skb)->daddr,
1716 					    ntohs(th->dest), inet6_iif(skb));
1717 		if (sk2 != NULL) {
1718 			struct inet_timewait_sock *tw = inet_twsk(sk);
1719 			inet_twsk_deschedule(tw, &tcp_death_row);
1720 			inet_twsk_put(tw);
1721 			sk = sk2;
1722 			goto process;
1723 		}
1724 		/* Fall through to ACK */
1725 	}
1726 	case TCP_TW_ACK:
1727 		tcp_v6_timewait_ack(sk, skb);
1728 		break;
1729 	case TCP_TW_RST:
1730 		goto no_tcp_socket;
1731 	case TCP_TW_SUCCESS:;
1732 	}
1733 	goto discard_it;
1734 }
1735 
tcp_v6_get_peer(struct sock * sk,bool * release_it)1736 static struct inet_peer *tcp_v6_get_peer(struct sock *sk, bool *release_it)
1737 {
1738 	struct rt6_info *rt = (struct rt6_info *) __sk_dst_get(sk);
1739 	struct ipv6_pinfo *np = inet6_sk(sk);
1740 	struct inet_peer *peer;
1741 
1742 	if (!rt ||
1743 	    !ipv6_addr_equal(&np->daddr, &rt->rt6i_dst.addr)) {
1744 		peer = inet_getpeer_v6(&np->daddr, 1);
1745 		*release_it = true;
1746 	} else {
1747 		if (!rt->rt6i_peer)
1748 			rt6_bind_peer(rt, 1);
1749 		peer = rt->rt6i_peer;
1750 		*release_it = false;
1751 	}
1752 
1753 	return peer;
1754 }
1755 
tcp_v6_tw_get_peer(struct sock * sk)1756 static void *tcp_v6_tw_get_peer(struct sock *sk)
1757 {
1758 	const struct inet6_timewait_sock *tw6 = inet6_twsk(sk);
1759 	const struct inet_timewait_sock *tw = inet_twsk(sk);
1760 
1761 	if (tw->tw_family == AF_INET)
1762 		return tcp_v4_tw_get_peer(sk);
1763 
1764 	return inet_getpeer_v6(&tw6->tw_v6_daddr, 1);
1765 }
1766 
1767 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1768 	.twsk_obj_size	= sizeof(struct tcp6_timewait_sock),
1769 	.twsk_unique	= tcp_twsk_unique,
1770 	.twsk_destructor= tcp_twsk_destructor,
1771 	.twsk_getpeer	= tcp_v6_tw_get_peer,
1772 };
1773 
1774 static const struct inet_connection_sock_af_ops ipv6_specific = {
1775 	.queue_xmit	   = inet6_csk_xmit,
1776 	.send_check	   = tcp_v6_send_check,
1777 	.rebuild_header	   = inet6_sk_rebuild_header,
1778 	.conn_request	   = tcp_v6_conn_request,
1779 	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
1780 	.get_peer	   = tcp_v6_get_peer,
1781 	.net_header_len	   = sizeof(struct ipv6hdr),
1782 	.setsockopt	   = ipv6_setsockopt,
1783 	.getsockopt	   = ipv6_getsockopt,
1784 	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
1785 	.sockaddr_len	   = sizeof(struct sockaddr_in6),
1786 	.bind_conflict	   = inet6_csk_bind_conflict,
1787 #ifdef CONFIG_COMPAT
1788 	.compat_setsockopt = compat_ipv6_setsockopt,
1789 	.compat_getsockopt = compat_ipv6_getsockopt,
1790 #endif
1791 };
1792 
1793 #ifdef CONFIG_TCP_MD5SIG
1794 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1795 	.md5_lookup	=	tcp_v6_md5_lookup,
1796 	.calc_md5_hash	=	tcp_v6_md5_hash_skb,
1797 	.md5_parse	=	tcp_v6_parse_md5_keys,
1798 };
1799 #endif
1800 
1801 /*
1802  *	TCP over IPv4 via INET6 API
1803  */
1804 
1805 static const struct inet_connection_sock_af_ops ipv6_mapped = {
1806 	.queue_xmit	   = ip_queue_xmit,
1807 	.send_check	   = tcp_v4_send_check,
1808 	.rebuild_header	   = inet_sk_rebuild_header,
1809 	.conn_request	   = tcp_v6_conn_request,
1810 	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
1811 	.get_peer	   = tcp_v4_get_peer,
1812 	.net_header_len	   = sizeof(struct iphdr),
1813 	.setsockopt	   = ipv6_setsockopt,
1814 	.getsockopt	   = ipv6_getsockopt,
1815 	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
1816 	.sockaddr_len	   = sizeof(struct sockaddr_in6),
1817 	.bind_conflict	   = inet6_csk_bind_conflict,
1818 #ifdef CONFIG_COMPAT
1819 	.compat_setsockopt = compat_ipv6_setsockopt,
1820 	.compat_getsockopt = compat_ipv6_getsockopt,
1821 #endif
1822 };
1823 
1824 #ifdef CONFIG_TCP_MD5SIG
1825 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1826 	.md5_lookup	=	tcp_v4_md5_lookup,
1827 	.calc_md5_hash	=	tcp_v4_md5_hash_skb,
1828 	.md5_parse	=	tcp_v6_parse_md5_keys,
1829 };
1830 #endif
1831 
1832 /* NOTE: A lot of things set to zero explicitly by call to
1833  *       sk_alloc() so need not be done here.
1834  */
tcp_v6_init_sock(struct sock * sk)1835 static int tcp_v6_init_sock(struct sock *sk)
1836 {
1837 	struct inet_connection_sock *icsk = inet_csk(sk);
1838 	struct tcp_sock *tp = tcp_sk(sk);
1839 
1840 	skb_queue_head_init(&tp->out_of_order_queue);
1841 	tcp_init_xmit_timers(sk);
1842 	tcp_prequeue_init(tp);
1843 
1844 	icsk->icsk_rto = TCP_TIMEOUT_INIT;
1845 	tp->mdev = TCP_TIMEOUT_INIT;
1846 
1847 	/* So many TCP implementations out there (incorrectly) count the
1848 	 * initial SYN frame in their delayed-ACK and congestion control
1849 	 * algorithms that we must have the following bandaid to talk
1850 	 * efficiently to them.  -DaveM
1851 	 */
1852 	tp->snd_cwnd = 2;
1853 
1854 	/* See draft-stevens-tcpca-spec-01 for discussion of the
1855 	 * initialization of these values.
1856 	 */
1857 	tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
1858 	tp->snd_cwnd_clamp = ~0;
1859 	tp->mss_cache = TCP_MSS_DEFAULT;
1860 
1861 	tp->reordering = sysctl_tcp_reordering;
1862 
1863 	sk->sk_state = TCP_CLOSE;
1864 
1865 	icsk->icsk_af_ops = &ipv6_specific;
1866 	icsk->icsk_ca_ops = &tcp_init_congestion_ops;
1867 	icsk->icsk_sync_mss = tcp_sync_mss;
1868 	sk->sk_write_space = sk_stream_write_space;
1869 	sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1870 
1871 #ifdef CONFIG_TCP_MD5SIG
1872 	tp->af_specific = &tcp_sock_ipv6_specific;
1873 #endif
1874 
1875 	/* TCP Cookie Transactions */
1876 	if (sysctl_tcp_cookie_size > 0) {
1877 		/* Default, cookies without s_data_payload. */
1878 		tp->cookie_values =
1879 			kzalloc(sizeof(*tp->cookie_values),
1880 				sk->sk_allocation);
1881 		if (tp->cookie_values != NULL)
1882 			kref_init(&tp->cookie_values->kref);
1883 	}
1884 	/* Presumed zeroed, in order of appearance:
1885 	 *	cookie_in_always, cookie_out_never,
1886 	 *	s_data_constant, s_data_in, s_data_out
1887 	 */
1888 	sk->sk_sndbuf = sysctl_tcp_wmem[1];
1889 	sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1890 
1891 	local_bh_disable();
1892 	sock_update_memcg(sk);
1893 	sk_sockets_allocated_inc(sk);
1894 	local_bh_enable();
1895 
1896 	return 0;
1897 }
1898 
tcp_v6_destroy_sock(struct sock * sk)1899 static void tcp_v6_destroy_sock(struct sock *sk)
1900 {
1901 	tcp_v4_destroy_sock(sk);
1902 	inet6_destroy_sock(sk);
1903 }
1904 
1905 #ifdef CONFIG_PROC_FS
1906 /* Proc filesystem TCPv6 sock list dumping. */
get_openreq6(struct seq_file * seq,const struct sock * sk,struct request_sock * req,int i,int uid)1907 static void get_openreq6(struct seq_file *seq,
1908 			 const struct sock *sk, struct request_sock *req, int i, int uid)
1909 {
1910 	int ttd = req->expires - jiffies;
1911 	const struct in6_addr *src = &inet6_rsk(req)->loc_addr;
1912 	const struct in6_addr *dest = &inet6_rsk(req)->rmt_addr;
1913 
1914 	if (ttd < 0)
1915 		ttd = 0;
1916 
1917 	seq_printf(seq,
1918 		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1919 		   "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1920 		   i,
1921 		   src->s6_addr32[0], src->s6_addr32[1],
1922 		   src->s6_addr32[2], src->s6_addr32[3],
1923 		   ntohs(inet_rsk(req)->loc_port),
1924 		   dest->s6_addr32[0], dest->s6_addr32[1],
1925 		   dest->s6_addr32[2], dest->s6_addr32[3],
1926 		   ntohs(inet_rsk(req)->rmt_port),
1927 		   TCP_SYN_RECV,
1928 		   0,0, /* could print option size, but that is af dependent. */
1929 		   1,   /* timers active (only the expire timer) */
1930 		   jiffies_to_clock_t(ttd),
1931 		   req->retrans,
1932 		   uid,
1933 		   0,  /* non standard timer */
1934 		   0, /* open_requests have no inode */
1935 		   0, req);
1936 }
1937 
get_tcp6_sock(struct seq_file * seq,struct sock * sp,int i)1938 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1939 {
1940 	const struct in6_addr *dest, *src;
1941 	__u16 destp, srcp;
1942 	int timer_active;
1943 	unsigned long timer_expires;
1944 	const struct inet_sock *inet = inet_sk(sp);
1945 	const struct tcp_sock *tp = tcp_sk(sp);
1946 	const struct inet_connection_sock *icsk = inet_csk(sp);
1947 	const struct ipv6_pinfo *np = inet6_sk(sp);
1948 
1949 	dest  = &np->daddr;
1950 	src   = &np->rcv_saddr;
1951 	destp = ntohs(inet->inet_dport);
1952 	srcp  = ntohs(inet->inet_sport);
1953 
1954 	if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1955 		timer_active	= 1;
1956 		timer_expires	= icsk->icsk_timeout;
1957 	} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1958 		timer_active	= 4;
1959 		timer_expires	= icsk->icsk_timeout;
1960 	} else if (timer_pending(&sp->sk_timer)) {
1961 		timer_active	= 2;
1962 		timer_expires	= sp->sk_timer.expires;
1963 	} else {
1964 		timer_active	= 0;
1965 		timer_expires = jiffies;
1966 	}
1967 
1968 	seq_printf(seq,
1969 		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1970 		   "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %lu %lu %u %u %d\n",
1971 		   i,
1972 		   src->s6_addr32[0], src->s6_addr32[1],
1973 		   src->s6_addr32[2], src->s6_addr32[3], srcp,
1974 		   dest->s6_addr32[0], dest->s6_addr32[1],
1975 		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
1976 		   sp->sk_state,
1977 		   tp->write_seq-tp->snd_una,
1978 		   (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
1979 		   timer_active,
1980 		   jiffies_to_clock_t(timer_expires - jiffies),
1981 		   icsk->icsk_retransmits,
1982 		   sock_i_uid(sp),
1983 		   icsk->icsk_probes_out,
1984 		   sock_i_ino(sp),
1985 		   atomic_read(&sp->sk_refcnt), sp,
1986 		   jiffies_to_clock_t(icsk->icsk_rto),
1987 		   jiffies_to_clock_t(icsk->icsk_ack.ato),
1988 		   (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
1989 		   tp->snd_cwnd,
1990 		   tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh
1991 		   );
1992 }
1993 
get_timewait6_sock(struct seq_file * seq,struct inet_timewait_sock * tw,int i)1994 static void get_timewait6_sock(struct seq_file *seq,
1995 			       struct inet_timewait_sock *tw, int i)
1996 {
1997 	const struct in6_addr *dest, *src;
1998 	__u16 destp, srcp;
1999 	const struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
2000 	int ttd = tw->tw_ttd - jiffies;
2001 
2002 	if (ttd < 0)
2003 		ttd = 0;
2004 
2005 	dest = &tw6->tw_v6_daddr;
2006 	src  = &tw6->tw_v6_rcv_saddr;
2007 	destp = ntohs(tw->tw_dport);
2008 	srcp  = ntohs(tw->tw_sport);
2009 
2010 	seq_printf(seq,
2011 		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2012 		   "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
2013 		   i,
2014 		   src->s6_addr32[0], src->s6_addr32[1],
2015 		   src->s6_addr32[2], src->s6_addr32[3], srcp,
2016 		   dest->s6_addr32[0], dest->s6_addr32[1],
2017 		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
2018 		   tw->tw_substate, 0, 0,
2019 		   3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2020 		   atomic_read(&tw->tw_refcnt), tw);
2021 }
2022 
tcp6_seq_show(struct seq_file * seq,void * v)2023 static int tcp6_seq_show(struct seq_file *seq, void *v)
2024 {
2025 	struct tcp_iter_state *st;
2026 
2027 	if (v == SEQ_START_TOKEN) {
2028 		seq_puts(seq,
2029 			 "  sl  "
2030 			 "local_address                         "
2031 			 "remote_address                        "
2032 			 "st tx_queue rx_queue tr tm->when retrnsmt"
2033 			 "   uid  timeout inode\n");
2034 		goto out;
2035 	}
2036 	st = seq->private;
2037 
2038 	switch (st->state) {
2039 	case TCP_SEQ_STATE_LISTENING:
2040 	case TCP_SEQ_STATE_ESTABLISHED:
2041 		get_tcp6_sock(seq, v, st->num);
2042 		break;
2043 	case TCP_SEQ_STATE_OPENREQ:
2044 		get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
2045 		break;
2046 	case TCP_SEQ_STATE_TIME_WAIT:
2047 		get_timewait6_sock(seq, v, st->num);
2048 		break;
2049 	}
2050 out:
2051 	return 0;
2052 }
2053 
2054 static const struct file_operations tcp6_afinfo_seq_fops = {
2055 	.owner   = THIS_MODULE,
2056 	.open    = tcp_seq_open,
2057 	.read    = seq_read,
2058 	.llseek  = seq_lseek,
2059 	.release = seq_release_net
2060 };
2061 
2062 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
2063 	.name		= "tcp6",
2064 	.family		= AF_INET6,
2065 	.seq_fops	= &tcp6_afinfo_seq_fops,
2066 	.seq_ops	= {
2067 		.show		= tcp6_seq_show,
2068 	},
2069 };
2070 
tcp6_proc_init(struct net * net)2071 int __net_init tcp6_proc_init(struct net *net)
2072 {
2073 	return tcp_proc_register(net, &tcp6_seq_afinfo);
2074 }
2075 
tcp6_proc_exit(struct net * net)2076 void tcp6_proc_exit(struct net *net)
2077 {
2078 	tcp_proc_unregister(net, &tcp6_seq_afinfo);
2079 }
2080 #endif
2081 
tcp_v6_clear_sk(struct sock * sk,int size)2082 static void tcp_v6_clear_sk(struct sock *sk, int size)
2083 {
2084 	struct inet_sock *inet = inet_sk(sk);
2085 
2086 	/* we do not want to clear pinet6 field, because of RCU lookups */
2087 	sk_prot_clear_nulls(sk, offsetof(struct inet_sock, pinet6));
2088 
2089 	size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
2090 	memset(&inet->pinet6 + 1, 0, size);
2091 }
2092 
2093 struct proto tcpv6_prot = {
2094 	.name			= "TCPv6",
2095 	.owner			= THIS_MODULE,
2096 	.close			= tcp_close,
2097 	.connect		= tcp_v6_connect,
2098 	.disconnect		= tcp_disconnect,
2099 	.accept			= inet_csk_accept,
2100 	.ioctl			= tcp_ioctl,
2101 	.init			= tcp_v6_init_sock,
2102 	.destroy		= tcp_v6_destroy_sock,
2103 	.shutdown		= tcp_shutdown,
2104 	.setsockopt		= tcp_setsockopt,
2105 	.getsockopt		= tcp_getsockopt,
2106 	.recvmsg		= tcp_recvmsg,
2107 	.sendmsg		= tcp_sendmsg,
2108 	.sendpage		= tcp_sendpage,
2109 	.backlog_rcv		= tcp_v6_do_rcv,
2110 	.hash			= tcp_v6_hash,
2111 	.unhash			= inet_unhash,
2112 	.get_port		= inet_csk_get_port,
2113 	.enter_memory_pressure	= tcp_enter_memory_pressure,
2114 	.sockets_allocated	= &tcp_sockets_allocated,
2115 	.memory_allocated	= &tcp_memory_allocated,
2116 	.memory_pressure	= &tcp_memory_pressure,
2117 	.orphan_count		= &tcp_orphan_count,
2118 	.sysctl_wmem		= sysctl_tcp_wmem,
2119 	.sysctl_rmem		= sysctl_tcp_rmem,
2120 	.max_header		= MAX_TCP_HEADER,
2121 	.obj_size		= sizeof(struct tcp6_sock),
2122 	.slab_flags		= SLAB_DESTROY_BY_RCU,
2123 	.twsk_prot		= &tcp6_timewait_sock_ops,
2124 	.rsk_prot		= &tcp6_request_sock_ops,
2125 	.h.hashinfo		= &tcp_hashinfo,
2126 	.no_autobind		= true,
2127 #ifdef CONFIG_COMPAT
2128 	.compat_setsockopt	= compat_tcp_setsockopt,
2129 	.compat_getsockopt	= compat_tcp_getsockopt,
2130 #endif
2131 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
2132 	.proto_cgroup		= tcp_proto_cgroup,
2133 #endif
2134 	.clear_sk		= tcp_v6_clear_sk,
2135 };
2136 
2137 static const struct inet6_protocol tcpv6_protocol = {
2138 	.handler	=	tcp_v6_rcv,
2139 	.err_handler	=	tcp_v6_err,
2140 	.gso_send_check	=	tcp_v6_gso_send_check,
2141 	.gso_segment	=	tcp_tso_segment,
2142 	.gro_receive	=	tcp6_gro_receive,
2143 	.gro_complete	=	tcp6_gro_complete,
2144 	.flags		=	INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2145 };
2146 
2147 static struct inet_protosw tcpv6_protosw = {
2148 	.type		=	SOCK_STREAM,
2149 	.protocol	=	IPPROTO_TCP,
2150 	.prot		=	&tcpv6_prot,
2151 	.ops		=	&inet6_stream_ops,
2152 	.no_check	=	0,
2153 	.flags		=	INET_PROTOSW_PERMANENT |
2154 				INET_PROTOSW_ICSK,
2155 };
2156 
tcpv6_net_init(struct net * net)2157 static int __net_init tcpv6_net_init(struct net *net)
2158 {
2159 	return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
2160 				    SOCK_RAW, IPPROTO_TCP, net);
2161 }
2162 
tcpv6_net_exit(struct net * net)2163 static void __net_exit tcpv6_net_exit(struct net *net)
2164 {
2165 	inet_ctl_sock_destroy(net->ipv6.tcp_sk);
2166 }
2167 
tcpv6_net_exit_batch(struct list_head * net_exit_list)2168 static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
2169 {
2170 	inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
2171 }
2172 
2173 static struct pernet_operations tcpv6_net_ops = {
2174 	.init	    = tcpv6_net_init,
2175 	.exit	    = tcpv6_net_exit,
2176 	.exit_batch = tcpv6_net_exit_batch,
2177 };
2178 
tcpv6_init(void)2179 int __init tcpv6_init(void)
2180 {
2181 	int ret;
2182 
2183 	ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
2184 	if (ret)
2185 		goto out;
2186 
2187 	/* register inet6 protocol */
2188 	ret = inet6_register_protosw(&tcpv6_protosw);
2189 	if (ret)
2190 		goto out_tcpv6_protocol;
2191 
2192 	ret = register_pernet_subsys(&tcpv6_net_ops);
2193 	if (ret)
2194 		goto out_tcpv6_protosw;
2195 out:
2196 	return ret;
2197 
2198 out_tcpv6_protocol:
2199 	inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2200 out_tcpv6_protosw:
2201 	inet6_unregister_protosw(&tcpv6_protosw);
2202 	goto out;
2203 }
2204 
tcpv6_exit(void)2205 void tcpv6_exit(void)
2206 {
2207 	unregister_pernet_subsys(&tcpv6_net_ops);
2208 	inet6_unregister_protosw(&tcpv6_protosw);
2209 	inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2210 }
2211