1 /*
2  * INET		An implementation of the TCP/IP protocol suite for the LINUX
3  *		operating system.  INET is implemented using the  BSD Socket
4  *		interface as the means of communication with the user level.
5  *
6  *		Implementation of the Transmission Control Protocol(TCP).
7  *
8  * Version:	$Id: tcp_minisocks.c,v 1.14.2.1 2002/03/05 04:30:08 davem Exp $
9  *
10  * Authors:	Ross Biro, <bir7@leland.Stanford.Edu>
11  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12  *		Mark Evans, <evansmp@uhura.aston.ac.uk>
13  *		Corey Minyard <wf-rch!minyard@relay.EU.net>
14  *		Florian La Roche, <flla@stud.uni-sb.de>
15  *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
16  *		Linus Torvalds, <torvalds@cs.helsinki.fi>
17  *		Alan Cox, <gw4pts@gw4pts.ampr.org>
18  *		Matthew Dillon, <dillon@apollo.west.oic.com>
19  *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
20  *		Jorge Cwik, <jorge@laser.satlink.net>
21  */
22 
23 #include <linux/config.h>
24 #include <linux/mm.h>
25 #include <linux/sysctl.h>
26 #include <net/tcp.h>
27 #include <net/inet_common.h>
28 
29 #ifdef CONFIG_SYSCTL
30 #define SYNC_INIT 0 /* let the user enable it */
31 #else
32 #define SYNC_INIT 1
33 #endif
34 
35 int sysctl_tcp_tw_recycle = 0;
36 int sysctl_tcp_max_tw_buckets = NR_FILE*2;
37 
38 int sysctl_tcp_syncookies = SYNC_INIT;
39 int sysctl_tcp_abort_on_overflow = 0;
40 
tcp_in_window(u32 seq,u32 end_seq,u32 s_win,u32 e_win)41 static __inline__ int tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
42 {
43 	if (seq == s_win)
44 		return 1;
45 	if (after(end_seq, s_win) && before(seq, e_win))
46 		return 1;
47 	return (seq == e_win && seq == end_seq);
48 }
49 
50 /* New-style handling of TIME_WAIT sockets. */
51 
52 int tcp_tw_count = 0;
53 
54 
55 /* Must be called with locally disabled BHs. */
tcp_timewait_kill(struct tcp_tw_bucket * tw)56 void tcp_timewait_kill(struct tcp_tw_bucket *tw)
57 {
58 	struct tcp_ehash_bucket *ehead;
59 	struct tcp_bind_hashbucket *bhead;
60 	struct tcp_bind_bucket *tb;
61 
62 	/* Unlink from established hashes. */
63 	ehead = &tcp_ehash[tw->hashent];
64 	write_lock(&ehead->lock);
65 	if (!tw->pprev) {
66 		write_unlock(&ehead->lock);
67 		return;
68 	}
69 	if(tw->next)
70 		tw->next->pprev = tw->pprev;
71 	*(tw->pprev) = tw->next;
72 	tw->pprev = NULL;
73 	write_unlock(&ehead->lock);
74 
75 	/* Disassociate with bind bucket. */
76 	bhead = &tcp_bhash[tcp_bhashfn(tw->num)];
77 	spin_lock(&bhead->lock);
78 	tb = tw->tb;
79 	if(tw->bind_next)
80 		tw->bind_next->bind_pprev = tw->bind_pprev;
81 	*(tw->bind_pprev) = tw->bind_next;
82 	tw->tb = NULL;
83 	if (tb->owners == NULL) {
84 		if (tb->next)
85 			tb->next->pprev = tb->pprev;
86 		*(tb->pprev) = tb->next;
87 		kmem_cache_free(tcp_bucket_cachep, tb);
88 	}
89 	spin_unlock(&bhead->lock);
90 
91 #ifdef INET_REFCNT_DEBUG
92 	if (atomic_read(&tw->refcnt) != 1) {
93 		printk(KERN_DEBUG "tw_bucket %p refcnt=%d\n", tw, atomic_read(&tw->refcnt));
94 	}
95 #endif
96 	tcp_tw_put(tw);
97 }
98 
99 /*
100  * * Main purpose of TIME-WAIT state is to close connection gracefully,
101  *   when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
102  *   (and, probably, tail of data) and one or more our ACKs are lost.
103  * * What is TIME-WAIT timeout? It is associated with maximal packet
104  *   lifetime in the internet, which results in wrong conclusion, that
105  *   it is set to catch "old duplicate segments" wandering out of their path.
106  *   It is not quite correct. This timeout is calculated so that it exceeds
107  *   maximal retransmission timeout enough to allow to lose one (or more)
108  *   segments sent by peer and our ACKs. This time may be calculated from RTO.
109  * * When TIME-WAIT socket receives RST, it means that another end
110  *   finally closed and we are allowed to kill TIME-WAIT too.
111  * * Second purpose of TIME-WAIT is catching old duplicate segments.
112  *   Well, certainly it is pure paranoia, but if we load TIME-WAIT
113  *   with this semantics, we MUST NOT kill TIME-WAIT state with RSTs.
114  * * If we invented some more clever way to catch duplicates
115  *   (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs.
116  *
117  * The algorithm below is based on FORMAL INTERPRETATION of RFCs.
118  * When you compare it to RFCs, please, read section SEGMENT ARRIVES
119  * from the very beginning.
120  *
121  * NOTE. With recycling (and later with fin-wait-2) TW bucket
122  * is _not_ stateless. It means, that strictly speaking we must
123  * spinlock it. I do not want! Well, probability of misbehaviour
124  * is ridiculously low and, seems, we could use some mb() tricks
125  * to avoid misread sequence numbers, states etc.  --ANK
126  */
127 enum tcp_tw_status
tcp_timewait_state_process(struct tcp_tw_bucket * tw,struct sk_buff * skb,struct tcphdr * th,unsigned len)128 tcp_timewait_state_process(struct tcp_tw_bucket *tw, struct sk_buff *skb,
129 			   struct tcphdr *th, unsigned len)
130 {
131 	struct tcp_opt tp;
132 	int paws_reject = 0;
133 
134 	tp.saw_tstamp = 0;
135 	if (th->doff > (sizeof(struct tcphdr)>>2) && tw->ts_recent_stamp) {
136 		tcp_parse_options(skb, &tp, 0);
137 
138 		if (tp.saw_tstamp) {
139 			tp.ts_recent = tw->ts_recent;
140 			tp.ts_recent_stamp = tw->ts_recent_stamp;
141 			paws_reject = tcp_paws_check(&tp, th->rst);
142 		}
143 	}
144 
145 	if (tw->substate == TCP_FIN_WAIT2) {
146 		/* Just repeat all the checks of tcp_rcv_state_process() */
147 
148 		/* Out of window, send ACK */
149 		if (paws_reject ||
150 		    !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
151 				   tw->rcv_nxt, tw->rcv_nxt + tw->rcv_wnd))
152 			return TCP_TW_ACK;
153 
154 		if (th->rst)
155 			goto kill;
156 
157 		if (th->syn && !before(TCP_SKB_CB(skb)->seq, tw->rcv_nxt))
158 			goto kill_with_rst;
159 
160 		/* Dup ACK? */
161 		if (!after(TCP_SKB_CB(skb)->end_seq, tw->rcv_nxt) ||
162 		    TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
163 			tcp_tw_put(tw);
164 			return TCP_TW_SUCCESS;
165 		}
166 
167 		/* New data or FIN. If new data arrive after half-duplex close,
168 		 * reset.
169 		 */
170 		if (!th->fin || TCP_SKB_CB(skb)->end_seq != tw->rcv_nxt+1) {
171 kill_with_rst:
172 			tcp_tw_deschedule(tw);
173 			tcp_timewait_kill(tw);
174 			tcp_tw_put(tw);
175 			return TCP_TW_RST;
176 		}
177 
178 		/* FIN arrived, enter true time-wait state. */
179 		tw->substate = TCP_TIME_WAIT;
180 		tw->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
181 		if (tp.saw_tstamp) {
182 			tw->ts_recent_stamp = xtime.tv_sec;
183 			tw->ts_recent = tp.rcv_tsval;
184 		}
185 
186 		/* I am shamed, but failed to make it more elegant.
187 		 * Yes, it is direct reference to IP, which is impossible
188 		 * to generalize to IPv6. Taking into account that IPv6
189 		 * do not undertsnad recycling in any case, it not
190 		 * a big problem in practice. --ANK */
191 		if (tw->family == AF_INET &&
192 		    sysctl_tcp_tw_recycle && tw->ts_recent_stamp &&
193 		    tcp_v4_tw_remember_stamp(tw))
194 			tcp_tw_schedule(tw, tw->timeout);
195 		else
196 			tcp_tw_schedule(tw, TCP_TIMEWAIT_LEN);
197 		return TCP_TW_ACK;
198 	}
199 
200 	/*
201 	 *	Now real TIME-WAIT state.
202 	 *
203 	 *	RFC 1122:
204 	 *	"When a connection is [...] on TIME-WAIT state [...]
205 	 *	[a TCP] MAY accept a new SYN from the remote TCP to
206 	 *	reopen the connection directly, if it:
207 	 *
208 	 *	(1)  assigns its initial sequence number for the new
209 	 *	connection to be larger than the largest sequence
210 	 *	number it used on the previous connection incarnation,
211 	 *	and
212 	 *
213 	 *	(2)  returns to TIME-WAIT state if the SYN turns out
214 	 *	to be an old duplicate".
215 	 */
216 
217 	if (!paws_reject &&
218 	    (TCP_SKB_CB(skb)->seq == tw->rcv_nxt &&
219 	     (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
220 		/* In window segment, it may be only reset or bare ack. */
221 
222 		if (th->rst) {
223 			/* This is TIME_WAIT assasination, in two flavors.
224 			 * Oh well... nobody has a sufficient solution to this
225 			 * protocol bug yet.
226 			 */
227 			if (sysctl_tcp_rfc1337 == 0) {
228 kill:
229 				tcp_tw_deschedule(tw);
230 				tcp_timewait_kill(tw);
231 				tcp_tw_put(tw);
232 				return TCP_TW_SUCCESS;
233 			}
234 		}
235 		tcp_tw_schedule(tw, TCP_TIMEWAIT_LEN);
236 
237 		if (tp.saw_tstamp) {
238 			tw->ts_recent = tp.rcv_tsval;
239 			tw->ts_recent_stamp = xtime.tv_sec;
240 		}
241 
242 		tcp_tw_put(tw);
243 		return TCP_TW_SUCCESS;
244 	}
245 
246 	/* Out of window segment.
247 
248 	   All the segments are ACKed immediately.
249 
250 	   The only exception is new SYN. We accept it, if it is
251 	   not old duplicate and we are not in danger to be killed
252 	   by delayed old duplicates. RFC check is that it has
253 	   newer sequence number works at rates <40Mbit/sec.
254 	   However, if paws works, it is reliable AND even more,
255 	   we even may relax silly seq space cutoff.
256 
257 	   RED-PEN: we violate main RFC requirement, if this SYN will appear
258 	   old duplicate (i.e. we receive RST in reply to SYN-ACK),
259 	   we must return socket to time-wait state. It is not good,
260 	   but not fatal yet.
261 	 */
262 
263 	if (th->syn && !th->rst && !th->ack && !paws_reject &&
264 	    (after(TCP_SKB_CB(skb)->seq, tw->rcv_nxt) ||
265 	     (tp.saw_tstamp && (s32)(tw->ts_recent - tp.rcv_tsval) < 0))) {
266 		u32 isn = tw->snd_nxt+65535+2;
267 		if (isn == 0)
268 			isn++;
269 		TCP_SKB_CB(skb)->when = isn;
270 		return TCP_TW_SYN;
271 	}
272 
273 	if (paws_reject)
274 		NET_INC_STATS_BH(PAWSEstabRejected);
275 
276 	if(!th->rst) {
277 		/* In this case we must reset the TIMEWAIT timer.
278 		 *
279 		 * If it is ACKless SYN it may be both old duplicate
280 		 * and new good SYN with random sequence number <rcv_nxt.
281 		 * Do not reschedule in the last case.
282 		 */
283 		if (paws_reject || th->ack)
284 			tcp_tw_schedule(tw, TCP_TIMEWAIT_LEN);
285 
286 		/* Send ACK. Note, we do not put the bucket,
287 		 * it will be released by caller.
288 		 */
289 		return TCP_TW_ACK;
290 	}
291 	tcp_tw_put(tw);
292 	return TCP_TW_SUCCESS;
293 }
294 
295 /* Enter the time wait state.  This is called with locally disabled BH.
296  * Essentially we whip up a timewait bucket, copy the
297  * relevant info into it from the SK, and mess with hash chains
298  * and list linkage.
299  */
__tcp_tw_hashdance(struct sock * sk,struct tcp_tw_bucket * tw)300 static void __tcp_tw_hashdance(struct sock *sk, struct tcp_tw_bucket *tw)
301 {
302 	struct tcp_ehash_bucket *ehead = &tcp_ehash[sk->hashent];
303 	struct tcp_bind_hashbucket *bhead;
304 	struct sock **head, *sktw;
305 
306 	/* Step 1: Put TW into bind hash. Original socket stays there too.
307 	   Note, that any socket with sk->num!=0 MUST be bound in binding
308 	   cache, even if it is closed.
309 	 */
310 	bhead = &tcp_bhash[tcp_bhashfn(sk->num)];
311 	spin_lock(&bhead->lock);
312 	tw->tb = (struct tcp_bind_bucket *)sk->prev;
313 	BUG_TRAP(sk->prev!=NULL);
314 	if ((tw->bind_next = tw->tb->owners) != NULL)
315 		tw->tb->owners->bind_pprev = &tw->bind_next;
316 	tw->tb->owners = (struct sock*)tw;
317 	tw->bind_pprev = &tw->tb->owners;
318 	spin_unlock(&bhead->lock);
319 
320 	write_lock(&ehead->lock);
321 
322 	/* Step 2: Remove SK from established hash. */
323 	if (sk->pprev) {
324 		if(sk->next)
325 			sk->next->pprev = sk->pprev;
326 		*sk->pprev = sk->next;
327 		sk->pprev = NULL;
328 		sock_prot_dec_use(sk->prot);
329 	}
330 
331 	/* Step 3: Hash TW into TIMEWAIT half of established hash table. */
332 	head = &(ehead + tcp_ehash_size)->chain;
333 	sktw = (struct sock *)tw;
334 	if((sktw->next = *head) != NULL)
335 		(*head)->pprev = &sktw->next;
336 	*head = sktw;
337 	sktw->pprev = head;
338 	atomic_inc(&tw->refcnt);
339 
340 	write_unlock(&ehead->lock);
341 }
342 
343 /*
344  * Move a socket to time-wait or dead fin-wait-2 state.
345  */
tcp_time_wait(struct sock * sk,int state,int timeo)346 void tcp_time_wait(struct sock *sk, int state, int timeo)
347 {
348 	struct tcp_tw_bucket *tw = NULL;
349 	struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
350 	int recycle_ok = 0;
351 
352 	if (sysctl_tcp_tw_recycle && tp->ts_recent_stamp)
353 		recycle_ok = tp->af_specific->remember_stamp(sk);
354 
355 	if (tcp_tw_count < sysctl_tcp_max_tw_buckets)
356 		tw = kmem_cache_alloc(tcp_timewait_cachep, SLAB_ATOMIC);
357 
358 	if(tw != NULL) {
359 		int rto = (tp->rto<<2) - (tp->rto>>1);
360 
361 		/* Give us an identity. */
362 		tw->daddr	= sk->daddr;
363 		tw->rcv_saddr	= sk->rcv_saddr;
364 		tw->bound_dev_if= sk->bound_dev_if;
365 		tw->num		= sk->num;
366 		tw->state	= TCP_TIME_WAIT;
367 		tw->substate	= state;
368 		tw->sport	= sk->sport;
369 		tw->dport	= sk->dport;
370 		tw->family	= sk->family;
371 		tw->reuse	= sk->reuse;
372 		tw->rcv_wscale	= tp->rcv_wscale;
373 		atomic_set(&tw->refcnt, 1);
374 
375 		tw->hashent	= sk->hashent;
376 		tw->rcv_nxt	= tp->rcv_nxt;
377 		tw->snd_nxt	= tp->snd_nxt;
378 		tw->rcv_wnd	= tcp_receive_window(tp);
379 		tw->ts_recent	= tp->ts_recent;
380 		tw->ts_recent_stamp= tp->ts_recent_stamp;
381 		tw->pprev_death = NULL;
382 
383 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
384 		if(tw->family == PF_INET6) {
385 			memcpy(&tw->v6_daddr,
386 			       &sk->net_pinfo.af_inet6.daddr,
387 			       sizeof(struct in6_addr));
388 			memcpy(&tw->v6_rcv_saddr,
389 			       &sk->net_pinfo.af_inet6.rcv_saddr,
390 			       sizeof(struct in6_addr));
391 		}
392 #endif
393 		/* Linkage updates. */
394 		__tcp_tw_hashdance(sk, tw);
395 
396 		/* Get the TIME_WAIT timeout firing. */
397 		if (timeo < rto)
398 			timeo = rto;
399 
400 		if (recycle_ok) {
401 			tw->timeout = rto;
402 		} else {
403 			tw->timeout = TCP_TIMEWAIT_LEN;
404 			if (state == TCP_TIME_WAIT)
405 				timeo = TCP_TIMEWAIT_LEN;
406 		}
407 
408 		tcp_tw_schedule(tw, timeo);
409 		tcp_tw_put(tw);
410 	} else {
411 		/* Sorry, if we're out of memory, just CLOSE this
412 		 * socket up.  We've got bigger problems than
413 		 * non-graceful socket closings.
414 		 */
415 		if (net_ratelimit())
416 			printk(KERN_INFO "TCP: time wait bucket table overflow\n");
417 	}
418 
419 	tcp_update_metrics(sk);
420 	tcp_done(sk);
421 }
422 
423 /* Kill off TIME_WAIT sockets once their lifetime has expired. */
424 static int tcp_tw_death_row_slot = 0;
425 
426 static void tcp_twkill(unsigned long);
427 
428 static struct tcp_tw_bucket *tcp_tw_death_row[TCP_TWKILL_SLOTS];
429 static spinlock_t tw_death_lock = SPIN_LOCK_UNLOCKED;
430 static struct timer_list tcp_tw_timer = { function: tcp_twkill };
431 
SMP_TIMER_NAME(tcp_twkill)432 static void SMP_TIMER_NAME(tcp_twkill)(unsigned long dummy)
433 {
434 	struct tcp_tw_bucket *tw;
435 	int killed = 0;
436 
437 	/* NOTE: compare this to previous version where lock
438 	 * was released after detaching chain. It was racy,
439 	 * because tw buckets are scheduled in not serialized context
440 	 * in 2.3 (with netfilter), and with softnet it is common, because
441 	 * soft irqs are not sequenced.
442 	 */
443 	spin_lock(&tw_death_lock);
444 
445 	if (tcp_tw_count == 0)
446 		goto out;
447 
448 	while((tw = tcp_tw_death_row[tcp_tw_death_row_slot]) != NULL) {
449 		tcp_tw_death_row[tcp_tw_death_row_slot] = tw->next_death;
450 		if (tw->next_death)
451 			tw->next_death->pprev_death = tw->pprev_death;
452 		tw->pprev_death = NULL;
453 		spin_unlock(&tw_death_lock);
454 
455 		tcp_timewait_kill(tw);
456 		tcp_tw_put(tw);
457 
458 		killed++;
459 
460 		spin_lock(&tw_death_lock);
461 	}
462 	tcp_tw_death_row_slot =
463 		((tcp_tw_death_row_slot + 1) & (TCP_TWKILL_SLOTS - 1));
464 
465 	if ((tcp_tw_count -= killed) != 0)
466 		mod_timer(&tcp_tw_timer, jiffies+TCP_TWKILL_PERIOD);
467 	net_statistics[smp_processor_id()*2].TimeWaited += killed;
468 out:
469 	spin_unlock(&tw_death_lock);
470 }
471 
472 SMP_TIMER_DEFINE(tcp_twkill, tcp_twkill_task);
473 
474 /* These are always called from BH context.  See callers in
475  * tcp_input.c to verify this.
476  */
477 
478 /* This is for handling early-kills of TIME_WAIT sockets. */
tcp_tw_deschedule(struct tcp_tw_bucket * tw)479 void tcp_tw_deschedule(struct tcp_tw_bucket *tw)
480 {
481 	spin_lock(&tw_death_lock);
482 	if (tw->pprev_death) {
483 		if(tw->next_death)
484 			tw->next_death->pprev_death = tw->pprev_death;
485 		*tw->pprev_death = tw->next_death;
486 		tw->pprev_death = NULL;
487 		tcp_tw_put(tw);
488 		if (--tcp_tw_count == 0)
489 			del_timer(&tcp_tw_timer);
490 	}
491 	spin_unlock(&tw_death_lock);
492 }
493 
494 /* Short-time timewait calendar */
495 
496 static int tcp_twcal_hand = -1;
497 static int tcp_twcal_jiffie;
498 static void tcp_twcal_tick(unsigned long);
499 static struct timer_list tcp_twcal_timer = {function: tcp_twcal_tick};
500 static struct tcp_tw_bucket *tcp_twcal_row[TCP_TW_RECYCLE_SLOTS];
501 
tcp_tw_schedule(struct tcp_tw_bucket * tw,int timeo)502 void tcp_tw_schedule(struct tcp_tw_bucket *tw, int timeo)
503 {
504 	struct tcp_tw_bucket **tpp;
505 	int slot;
506 
507 	/* timeout := RTO * 3.5
508 	 *
509 	 * 3.5 = 1+2+0.5 to wait for two retransmits.
510 	 *
511 	 * RATIONALE: if FIN arrived and we entered TIME-WAIT state,
512 	 * our ACK acking that FIN can be lost. If N subsequent retransmitted
513 	 * FINs (or previous seqments) are lost (probability of such event
514 	 * is p^(N+1), where p is probability to lose single packet and
515 	 * time to detect the loss is about RTO*(2^N - 1) with exponential
516 	 * backoff). Normal timewait length is calculated so, that we
517 	 * waited at least for one retransmitted FIN (maximal RTO is 120sec).
518 	 * [ BTW Linux. following BSD, violates this requirement waiting
519 	 *   only for 60sec, we should wait at least for 240 secs.
520 	 *   Well, 240 consumes too much of resources 8)
521 	 * ]
522 	 * This interval is not reduced to catch old duplicate and
523 	 * responces to our wandering segments living for two MSLs.
524 	 * However, if we use PAWS to detect
525 	 * old duplicates, we can reduce the interval to bounds required
526 	 * by RTO, rather than MSL. So, if peer understands PAWS, we
527 	 * kill tw bucket after 3.5*RTO (it is important that this number
528 	 * is greater than TS tick!) and detect old duplicates with help
529 	 * of PAWS.
530 	 */
531 	slot = (timeo + (1<<TCP_TW_RECYCLE_TICK) - 1) >> TCP_TW_RECYCLE_TICK;
532 
533 	spin_lock(&tw_death_lock);
534 
535 	/* Unlink it, if it was scheduled */
536 	if (tw->pprev_death) {
537 		if(tw->next_death)
538 			tw->next_death->pprev_death = tw->pprev_death;
539 		*tw->pprev_death = tw->next_death;
540 		tw->pprev_death = NULL;
541 		tcp_tw_count--;
542 	} else
543 		atomic_inc(&tw->refcnt);
544 
545 	if (slot >= TCP_TW_RECYCLE_SLOTS) {
546 		/* Schedule to slow timer */
547 		if (timeo >= TCP_TIMEWAIT_LEN) {
548 			slot = TCP_TWKILL_SLOTS-1;
549 		} else {
550 			slot = (timeo + TCP_TWKILL_PERIOD-1) / TCP_TWKILL_PERIOD;
551 			if (slot >= TCP_TWKILL_SLOTS)
552 				slot = TCP_TWKILL_SLOTS-1;
553 		}
554 		tw->ttd = jiffies + timeo;
555 		slot = (tcp_tw_death_row_slot + slot) & (TCP_TWKILL_SLOTS - 1);
556 		tpp = &tcp_tw_death_row[slot];
557 	} else {
558 		tw->ttd = jiffies + (slot<<TCP_TW_RECYCLE_TICK);
559 
560 		if (tcp_twcal_hand < 0) {
561 			tcp_twcal_hand = 0;
562 			tcp_twcal_jiffie = jiffies;
563 			tcp_twcal_timer.expires = tcp_twcal_jiffie + (slot<<TCP_TW_RECYCLE_TICK);
564 			add_timer(&tcp_twcal_timer);
565 		} else {
566 			if ((long)(tcp_twcal_timer.expires - jiffies) > (slot<<TCP_TW_RECYCLE_TICK))
567 				mod_timer(&tcp_twcal_timer, jiffies + (slot<<TCP_TW_RECYCLE_TICK));
568 			slot = (tcp_twcal_hand + slot)&(TCP_TW_RECYCLE_SLOTS-1);
569 		}
570 		tpp = &tcp_twcal_row[slot];
571 	}
572 
573 	if((tw->next_death = *tpp) != NULL)
574 		(*tpp)->pprev_death = &tw->next_death;
575 	*tpp = tw;
576 	tw->pprev_death = tpp;
577 
578 	if (tcp_tw_count++ == 0)
579 		mod_timer(&tcp_tw_timer, jiffies+TCP_TWKILL_PERIOD);
580 	spin_unlock(&tw_death_lock);
581 }
582 
SMP_TIMER_NAME(tcp_twcal_tick)583 void SMP_TIMER_NAME(tcp_twcal_tick)(unsigned long dummy)
584 {
585 	int n, slot;
586 	unsigned long j;
587 	unsigned long now = jiffies;
588 	int killed = 0;
589 	int adv = 0;
590 
591 	spin_lock(&tw_death_lock);
592 	if (tcp_twcal_hand < 0)
593 		goto out;
594 
595 	slot = tcp_twcal_hand;
596 	j = tcp_twcal_jiffie;
597 
598 	for (n=0; n<TCP_TW_RECYCLE_SLOTS; n++) {
599 		if ((long)(j - now) <= 0) {
600 			struct tcp_tw_bucket *tw;
601 
602 			while((tw = tcp_twcal_row[slot]) != NULL) {
603 				tcp_twcal_row[slot] = tw->next_death;
604 				tw->pprev_death = NULL;
605 
606 				tcp_timewait_kill(tw);
607 				tcp_tw_put(tw);
608 				killed++;
609 			}
610 		} else {
611 			if (!adv) {
612 				adv = 1;
613 				tcp_twcal_jiffie = j;
614 				tcp_twcal_hand = slot;
615 			}
616 
617 			if (tcp_twcal_row[slot] != NULL) {
618 				mod_timer(&tcp_twcal_timer, j);
619 				goto out;
620 			}
621 		}
622 		j += (1<<TCP_TW_RECYCLE_TICK);
623 		slot = (slot+1)&(TCP_TW_RECYCLE_SLOTS-1);
624 	}
625 	tcp_twcal_hand = -1;
626 
627 out:
628 	if ((tcp_tw_count -= killed) == 0)
629 		del_timer(&tcp_tw_timer);
630 	net_statistics[smp_processor_id()*2].TimeWaitKilled += killed;
631 	spin_unlock(&tw_death_lock);
632 }
633 
634 SMP_TIMER_DEFINE(tcp_twcal_tick, tcp_twcal_tasklet);
635 
636 
637 /* This is not only more efficient than what we used to do, it eliminates
638  * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
639  *
640  * Actually, we could lots of memory writes here. tp of listening
641  * socket contains all necessary default parameters.
642  */
tcp_create_openreq_child(struct sock * sk,struct open_request * req,struct sk_buff * skb)643 struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req, struct sk_buff *skb)
644 {
645 	struct sock *newsk = sk_alloc(PF_INET, GFP_ATOMIC, 0);
646 
647 	if(newsk != NULL) {
648 		struct tcp_opt *newtp;
649 #ifdef CONFIG_FILTER
650 		struct sk_filter *filter;
651 #endif
652 
653 		memcpy(newsk, sk, sizeof(*newsk));
654 		newsk->state = TCP_SYN_RECV;
655 
656 		/* SANITY */
657 		newsk->pprev = NULL;
658 		newsk->prev = NULL;
659 
660 		/* Clone the TCP header template */
661 		newsk->dport = req->rmt_port;
662 
663 		sock_lock_init(newsk);
664 		bh_lock_sock(newsk);
665 
666 		newsk->dst_lock	= RW_LOCK_UNLOCKED;
667 		atomic_set(&newsk->rmem_alloc, 0);
668 		skb_queue_head_init(&newsk->receive_queue);
669 		atomic_set(&newsk->wmem_alloc, 0);
670 		skb_queue_head_init(&newsk->write_queue);
671 		atomic_set(&newsk->omem_alloc, 0);
672 		newsk->wmem_queued = 0;
673 		newsk->forward_alloc = 0;
674 
675 		newsk->done = 0;
676 		newsk->userlocks = sk->userlocks & ~SOCK_BINDPORT_LOCK;
677 		newsk->proc = 0;
678 		newsk->backlog.head = newsk->backlog.tail = NULL;
679 		newsk->callback_lock = RW_LOCK_UNLOCKED;
680 		skb_queue_head_init(&newsk->error_queue);
681 		newsk->write_space = tcp_write_space;
682 #ifdef CONFIG_FILTER
683 		if ((filter = newsk->filter) != NULL)
684 			sk_filter_charge(newsk, filter);
685 #endif
686 
687 		/* Now setup tcp_opt */
688 		newtp = &(newsk->tp_pinfo.af_tcp);
689 		newtp->pred_flags = 0;
690 		newtp->rcv_nxt = req->rcv_isn + 1;
691 		newtp->snd_nxt = req->snt_isn + 1;
692 		newtp->snd_una = req->snt_isn + 1;
693 		newtp->snd_sml = req->snt_isn + 1;
694 
695 		tcp_prequeue_init(newtp);
696 
697 		tcp_init_wl(newtp, req->snt_isn, req->rcv_isn);
698 
699 		newtp->retransmits = 0;
700 		newtp->backoff = 0;
701 		newtp->srtt = 0;
702 		newtp->mdev = TCP_TIMEOUT_INIT;
703 		newtp->rto = TCP_TIMEOUT_INIT;
704 
705 		newtp->packets_out = 0;
706 		newtp->left_out = 0;
707 		newtp->retrans_out = 0;
708 		newtp->sacked_out = 0;
709 		newtp->fackets_out = 0;
710 		newtp->snd_ssthresh = 0x7fffffff;
711 
712 		/* So many TCP implementations out there (incorrectly) count the
713 		 * initial SYN frame in their delayed-ACK and congestion control
714 		 * algorithms that we must have the following bandaid to talk
715 		 * efficiently to them.  -DaveM
716 		 */
717 		newtp->snd_cwnd = 2;
718 		newtp->snd_cwnd_cnt = 0;
719 
720 		newtp->frto_counter = 0;
721 		newtp->frto_highmark = 0;
722 
723 		tcp_set_ca_state(newtp, TCP_CA_Open);
724 		tcp_init_xmit_timers(newsk);
725 		skb_queue_head_init(&newtp->out_of_order_queue);
726 		newtp->send_head = NULL;
727 		newtp->rcv_wup = req->rcv_isn + 1;
728 		newtp->write_seq = req->snt_isn + 1;
729 		newtp->pushed_seq = newtp->write_seq;
730 		newtp->copied_seq = req->rcv_isn + 1;
731 
732 		newtp->saw_tstamp = 0;
733 
734 		newtp->dsack = 0;
735 		newtp->eff_sacks = 0;
736 
737 		newtp->probes_out = 0;
738 		newtp->num_sacks = 0;
739 		newtp->urg_data = 0;
740 		newtp->listen_opt = NULL;
741 		newtp->accept_queue = newtp->accept_queue_tail = NULL;
742 		/* Deinitialize syn_wait_lock to trap illegal accesses. */
743 		memset(&newtp->syn_wait_lock, 0, sizeof(newtp->syn_wait_lock));
744 
745 		/* Back to base struct sock members. */
746 		newsk->err = 0;
747 		newsk->priority = 0;
748 		atomic_set(&newsk->refcnt, 2);
749 #ifdef INET_REFCNT_DEBUG
750 		atomic_inc(&inet_sock_nr);
751 #endif
752 		atomic_inc(&tcp_sockets_allocated);
753 
754 		if (newsk->keepopen)
755 			tcp_reset_keepalive_timer(newsk, keepalive_time_when(newtp));
756 		newsk->socket = NULL;
757 		newsk->sleep = NULL;
758 
759 		newtp->tstamp_ok = req->tstamp_ok;
760 		if((newtp->sack_ok = req->sack_ok) != 0) {
761 			if (sysctl_tcp_fack)
762 				newtp->sack_ok |= 2;
763 		}
764 		newtp->window_clamp = req->window_clamp;
765 		newtp->rcv_ssthresh = req->rcv_wnd;
766 		newtp->rcv_wnd = req->rcv_wnd;
767 		newtp->wscale_ok = req->wscale_ok;
768 		if (newtp->wscale_ok) {
769 			newtp->snd_wscale = req->snd_wscale;
770 			newtp->rcv_wscale = req->rcv_wscale;
771 		} else {
772 			newtp->snd_wscale = newtp->rcv_wscale = 0;
773 			newtp->window_clamp = min(newtp->window_clamp, 65535U);
774 		}
775 		newtp->snd_wnd = ntohs(skb->h.th->window) << newtp->snd_wscale;
776 		newtp->max_window = newtp->snd_wnd;
777 
778 		if (newtp->tstamp_ok) {
779 			newtp->ts_recent = req->ts_recent;
780 			newtp->ts_recent_stamp = xtime.tv_sec;
781 			newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
782 		} else {
783 			newtp->ts_recent_stamp = 0;
784 			newtp->tcp_header_len = sizeof(struct tcphdr);
785 		}
786 		if (skb->len >= TCP_MIN_RCVMSS+newtp->tcp_header_len)
787 			newtp->ack.last_seg_size = skb->len-newtp->tcp_header_len;
788 		newtp->mss_clamp = req->mss;
789 		TCP_ECN_openreq_child(newtp, req);
790 
791 		tcp_ca_init(newtp);
792 		TCP_INC_STATS_BH(TcpPassiveOpens);
793 	}
794 	return newsk;
795 }
796 
797 /*
798  *	Process an incoming packet for SYN_RECV sockets represented
799  *	as an open_request.
800  */
801 
tcp_check_req(struct sock * sk,struct sk_buff * skb,struct open_request * req,struct open_request ** prev)802 struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
803 			   struct open_request *req,
804 			   struct open_request **prev)
805 {
806 	struct tcphdr *th = skb->h.th;
807 	struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
808 	u32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
809 	int paws_reject = 0;
810 	struct tcp_opt ttp;
811 	struct sock *child;
812 
813 	ttp.saw_tstamp = 0;
814 	if (th->doff > (sizeof(struct tcphdr)>>2)) {
815 		tcp_parse_options(skb, &ttp, 0);
816 
817 		if (ttp.saw_tstamp) {
818 			ttp.ts_recent = req->ts_recent;
819 			/* We do not store true stamp, but it is not required,
820 			 * it can be estimated (approximately)
821 			 * from another data.
822 			 */
823 			ttp.ts_recent_stamp = xtime.tv_sec - ((TCP_TIMEOUT_INIT/HZ)<<req->retrans);
824 			paws_reject = tcp_paws_check(&ttp, th->rst);
825 		}
826 	}
827 
828 	/* Check for pure retransmitted SYN. */
829 	if (TCP_SKB_CB(skb)->seq == req->rcv_isn &&
830 	    flg == TCP_FLAG_SYN &&
831 	    !paws_reject) {
832 		/*
833 		 * RFC793 draws (Incorrectly! It was fixed in RFC1122)
834 		 * this case on figure 6 and figure 8, but formal
835 		 * protocol description says NOTHING.
836 		 * To be more exact, it says that we should send ACK,
837 		 * because this segment (at least, if it has no data)
838 		 * is out of window.
839 		 *
840 		 *  CONCLUSION: RFC793 (even with RFC1122) DOES NOT
841 		 *  describe SYN-RECV state. All the description
842 		 *  is wrong, we cannot believe to it and should
843 		 *  rely only on common sense and implementation
844 		 *  experience.
845 		 *
846 		 * Enforce "SYN-ACK" according to figure 8, figure 6
847 		 * of RFC793, fixed by RFC1122.
848 		 */
849 		req->class->rtx_syn_ack(sk, req, NULL);
850 		return NULL;
851 	}
852 
853 	/* Further reproduces section "SEGMENT ARRIVES"
854 	   for state SYN-RECEIVED of RFC793.
855 	   It is broken, however, it does not work only
856 	   when SYNs are crossed.
857 
858 	   You would think that SYN crossing is impossible here, since
859 	   we should have a SYN_SENT socket (from connect()) on our end,
860 	   but this is not true if the crossed SYNs were sent to both
861 	   ends by a malicious third party.  We must defend against this,
862 	   and to do that we first verify the ACK (as per RFC793, page
863 	   36) and reset if it is invalid.  Is this a true full defense?
864 	   To convince ourselves, let us consider a way in which the ACK
865 	   test can still pass in this 'malicious crossed SYNs' case.
866 	   Malicious sender sends identical SYNs (and thus identical sequence
867 	   numbers) to both A and B:
868 
869 		A: gets SYN, seq=7
870 		B: gets SYN, seq=7
871 
872 	   By our good fortune, both A and B select the same initial
873 	   send sequence number of seven :-)
874 
875 		A: sends SYN|ACK, seq=7, ack_seq=8
876 		B: sends SYN|ACK, seq=7, ack_seq=8
877 
878 	   So we are now A eating this SYN|ACK, ACK test passes.  So
879 	   does sequence test, SYN is truncated, and thus we consider
880 	   it a bare ACK.
881 
882 	   If tp->defer_accept, we silently drop this bare ACK.  Otherwise,
883 	   we create an established connection.  Both ends (listening sockets)
884 	   accept the new incoming connection and try to talk to each other. 8-)
885 
886 	   Note: This case is both harmless, and rare.  Possibility is about the
887 	   same as us discovering intelligent life on another plant tomorrow.
888 
889 	   But generally, we should (RFC lies!) to accept ACK
890 	   from SYNACK both here and in tcp_rcv_state_process().
891 	   tcp_rcv_state_process() does not, hence, we do not too.
892 
893 	   Note that the case is absolutely generic:
894 	   we cannot optimize anything here without
895 	   violating protocol. All the checks must be made
896 	   before attempt to create socket.
897 	 */
898 
899 	/* RFC793 page 36: "If the connection is in any non-synchronized state ...
900 	 *                  and the incoming segment acknowledges something not yet
901 	 *                  sent (the segment carries an unaccaptable ACK) ...
902 	 *                  a reset is sent."
903 	 *
904 	 * Invalid ACK: reset will be sent by listening socket
905 	 */
906 	if ((flg & TCP_FLAG_ACK) &&
907 	    (TCP_SKB_CB(skb)->ack_seq != req->snt_isn+1))
908 		return sk;
909 
910 	/* Also, it would be not so bad idea to check rcv_tsecr, which
911 	 * is essentially ACK extension and too early or too late values
912 	 * should cause reset in unsynchronized states.
913 	 */
914 
915 	/* RFC793: "first check sequence number". */
916 
917 	if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
918 					  req->rcv_isn+1, req->rcv_isn+1+req->rcv_wnd)) {
919 		/* Out of window: send ACK and drop. */
920 		if (!(flg & TCP_FLAG_RST))
921 			req->class->send_ack(skb, req);
922 		if (paws_reject)
923 			NET_INC_STATS_BH(PAWSEstabRejected);
924 		return NULL;
925 	}
926 
927 	/* In sequence, PAWS is OK. */
928 
929 	if (ttp.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, req->rcv_isn+1))
930 		req->ts_recent = ttp.rcv_tsval;
931 
932 	if (TCP_SKB_CB(skb)->seq == req->rcv_isn) {
933 		/* Truncate SYN, it is out of window starting
934 		   at req->rcv_isn+1. */
935 		flg &= ~TCP_FLAG_SYN;
936 	}
937 
938 	/* RFC793: "second check the RST bit" and
939 	 *	   "fourth, check the SYN bit"
940 	 */
941 	if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN))
942 		goto embryonic_reset;
943 
944 	/* ACK sequence verified above, just make sure ACK is
945 	 * set.  If ACK not set, just silently drop the packet.
946 	 */
947 	if (!(flg & TCP_FLAG_ACK))
948 		return NULL;
949 
950 	/* If TCP_DEFER_ACCEPT is set, drop bare ACK. */
951 	if (tp->defer_accept && TCP_SKB_CB(skb)->end_seq == req->rcv_isn+1) {
952 		req->acked = 1;
953 		return NULL;
954 	}
955 
956 	/* OK, ACK is valid, create big socket and
957 	 * feed this segment to it. It will repeat all
958 	 * the tests. THIS SEGMENT MUST MOVE SOCKET TO
959 	 * ESTABLISHED STATE. If it will be dropped after
960 	 * socket is created, wait for troubles.
961 	 */
962 	child = tp->af_specific->syn_recv_sock(sk, skb, req, NULL);
963 	if (child == NULL)
964 		goto listen_overflow;
965 
966 	tcp_synq_unlink(tp, req, prev);
967 	tcp_synq_removed(sk, req);
968 
969 	tcp_acceptq_queue(sk, req, child);
970 	return child;
971 
972 listen_overflow:
973 	if (!sysctl_tcp_abort_on_overflow) {
974 		req->acked = 1;
975 		return NULL;
976 	}
977 
978 embryonic_reset:
979 	NET_INC_STATS_BH(EmbryonicRsts);
980 	if (!(flg & TCP_FLAG_RST))
981 		req->class->send_reset(skb);
982 
983 	tcp_synq_drop(sk, req, prev);
984 	return NULL;
985 }
986 
987 /*
988  * Queue segment on the new socket if the new socket is active,
989  * otherwise we just shortcircuit this and continue with
990  * the new socket.
991  */
992 
tcp_child_process(struct sock * parent,struct sock * child,struct sk_buff * skb)993 int tcp_child_process(struct sock *parent, struct sock *child,
994 		      struct sk_buff *skb)
995 {
996 	int ret = 0;
997 	int state = child->state;
998 
999 	if (child->lock.users == 0) {
1000 		ret = tcp_rcv_state_process(child, skb, skb->h.th, skb->len);
1001 
1002 		/* Wakeup parent, send SIGIO */
1003 		if (state == TCP_SYN_RECV && child->state != state)
1004 			parent->data_ready(parent, 0);
1005 	} else {
1006 		/* Alas, it is possible again, because we do lookup
1007 		 * in main socket hash table and lock on listening
1008 		 * socket does not protect us more.
1009 		 */
1010 		sk_add_backlog(child, skb);
1011 	}
1012 
1013 	bh_unlock_sock(child);
1014 	sock_put(child);
1015 	return ret;
1016 }
1017