1 /*
2  * INET		An implementation of the TCP/IP protocol suite for the LINUX
3  *		operating system.  INET is implemented using the  BSD Socket
4  *		interface as the means of communication with the user level.
5  *
6  *		Definitions for the TCP module.
7  *
8  * Version:	@(#)tcp.h	1.0.5	05/23/93
9  *
10  * Authors:	Ross Biro, <bir7@leland.Stanford.Edu>
11  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12  *
13  *		This program is free software; you can redistribute it and/or
14  *		modify it under the terms of the GNU General Public License
15  *		as published by the Free Software Foundation; either version
16  *		2 of the License, or (at your option) any later version.
17  */
18 #ifndef _TCP_H
19 #define _TCP_H
20 
21 #define TCP_DEBUG 1
22 #define FASTRETRANS_DEBUG 1
23 
24 /* Cancel timers, when they are not required. */
25 #undef TCP_CLEAR_TIMERS
26 
27 #include <linux/config.h>
28 #include <linux/tcp.h>
29 #include <linux/slab.h>
30 #include <linux/cache.h>
31 #include <net/checksum.h>
32 #include <net/sock.h>
33 #include <net/snmp.h>
34 
35 /* This is for all connections with a full identity, no wildcards.
36  * New scheme, half the table is for TIME_WAIT, the other half is
37  * for the rest.  I'll experiment with dynamic table growth later.
38  */
39 struct tcp_ehash_bucket {
40 	rwlock_t	lock;
41 	struct sock	*chain;
42 } __attribute__((__aligned__(8)));
43 
44 /* This is for listening sockets, thus all sockets which possess wildcards. */
45 #define TCP_LHTABLE_SIZE	32	/* Yes, really, this is all you need. */
46 
47 /* There are a few simple rules, which allow for local port reuse by
48  * an application.  In essence:
49  *
50  *	1) Sockets bound to different interfaces may share a local port.
51  *	   Failing that, goto test 2.
52  *	2) If all sockets have sk->reuse set, and none of them are in
53  *	   TCP_LISTEN state, the port may be shared.
54  *	   Failing that, goto test 3.
55  *	3) If all sockets are bound to a specific sk->rcv_saddr local
56  *	   address, and none of them are the same, the port may be
57  *	   shared.
58  *	   Failing this, the port cannot be shared.
59  *
60  * The interesting point, is test #2.  This is what an FTP server does
61  * all day.  To optimize this case we use a specific flag bit defined
62  * below.  As we add sockets to a bind bucket list, we perform a
63  * check of: (newsk->reuse && (newsk->state != TCP_LISTEN))
64  * As long as all sockets added to a bind bucket pass this test,
65  * the flag bit will be set.
66  * The resulting situation is that tcp_v[46]_verify_bind() can just check
67  * for this flag bit, if it is set and the socket trying to bind has
68  * sk->reuse set, we don't even have to walk the owners list at all,
69  * we return that it is ok to bind this socket to the requested local port.
70  *
71  * Sounds like a lot of work, but it is worth it.  In a more naive
72  * implementation (ie. current FreeBSD etc.) the entire list of ports
73  * must be walked for each data port opened by an ftp server.  Needless
74  * to say, this does not scale at all.  With a couple thousand FTP
75  * users logged onto your box, isn't it nice to know that new data
76  * ports are created in O(1) time?  I thought so. ;-)	-DaveM
77  */
78 struct tcp_bind_bucket {
79 	unsigned short		port;
80 	signed short		fastreuse;
81 	struct tcp_bind_bucket	*next;
82 	struct sock		*owners;
83 	struct tcp_bind_bucket	**pprev;
84 };
85 
86 struct tcp_bind_hashbucket {
87 	spinlock_t		lock;
88 	struct tcp_bind_bucket	*chain;
89 };
90 
91 extern struct tcp_hashinfo {
92 	/* This is for sockets with full identity only.  Sockets here will
93 	 * always be without wildcards and will have the following invariant:
94 	 *
95 	 *          TCP_ESTABLISHED <= sk->state < TCP_CLOSE
96 	 *
97 	 * First half of the table is for sockets not in TIME_WAIT, second half
98 	 * is for TIME_WAIT sockets only.
99 	 */
100 	struct tcp_ehash_bucket *__tcp_ehash;
101 
102 	/* Ok, let's try this, I give up, we do need a local binding
103 	 * TCP hash as well as the others for fast bind/connect.
104 	 */
105 	struct tcp_bind_hashbucket *__tcp_bhash;
106 
107 	int __tcp_bhash_size;
108 	int __tcp_ehash_size;
109 
110 	/* All sockets in TCP_LISTEN state will be in here.  This is the only
111 	 * table where wildcard'd TCP sockets can exist.  Hash function here
112 	 * is just local port number.
113 	 */
114 	struct sock *__tcp_listening_hash[TCP_LHTABLE_SIZE];
115 
116 	/* All the above members are written once at bootup and
117 	 * never written again _or_ are predominantly read-access.
118 	 *
119 	 * Now align to a new cache line as all the following members
120 	 * are often dirty.
121 	 */
122 	rwlock_t __tcp_lhash_lock ____cacheline_aligned;
123 	atomic_t __tcp_lhash_users;
124 	wait_queue_head_t __tcp_lhash_wait;
125 	spinlock_t __tcp_portalloc_lock;
126 } tcp_hashinfo;
127 
128 #define tcp_ehash	(tcp_hashinfo.__tcp_ehash)
129 #define tcp_bhash	(tcp_hashinfo.__tcp_bhash)
130 #define tcp_ehash_size	(tcp_hashinfo.__tcp_ehash_size)
131 #define tcp_bhash_size	(tcp_hashinfo.__tcp_bhash_size)
132 #define tcp_listening_hash (tcp_hashinfo.__tcp_listening_hash)
133 #define tcp_lhash_lock	(tcp_hashinfo.__tcp_lhash_lock)
134 #define tcp_lhash_users	(tcp_hashinfo.__tcp_lhash_users)
135 #define tcp_lhash_wait	(tcp_hashinfo.__tcp_lhash_wait)
136 #define tcp_portalloc_lock (tcp_hashinfo.__tcp_portalloc_lock)
137 
138 extern kmem_cache_t *tcp_bucket_cachep;
139 extern struct tcp_bind_bucket *tcp_bucket_create(struct tcp_bind_hashbucket *head,
140 						 unsigned short snum);
141 extern void tcp_bucket_unlock(struct sock *sk);
142 extern int tcp_port_rover;
143 extern struct sock *tcp_v4_lookup_listener(u32 addr, unsigned short hnum, int dif);
144 
145 /* These are AF independent. */
tcp_bhashfn(__u16 lport)146 static __inline__ int tcp_bhashfn(__u16 lport)
147 {
148 	return (lport & (tcp_bhash_size - 1));
149 }
150 
151 /* This is a TIME_WAIT bucket.  It works around the memory consumption
152  * problems of sockets in such a state on heavily loaded servers, but
153  * without violating the protocol specification.
154  */
155 struct tcp_tw_bucket {
156 	/* These _must_ match the beginning of struct sock precisely.
157 	 * XXX Yes I know this is gross, but I'd have to edit every single
158 	 * XXX networking file if I created a "struct sock_header". -DaveM
159 	 */
160 	__u32			daddr;
161 	__u32			rcv_saddr;
162 	__u16			dport;
163 	unsigned short		num;
164 	int			bound_dev_if;
165 	struct sock		*next;
166 	struct sock		**pprev;
167 	struct sock		*bind_next;
168 	struct sock		**bind_pprev;
169 	unsigned char		state,
170 				substate; /* "zapped" is replaced with "substate" */
171 	__u16			sport;
172 	unsigned short		family;
173 	unsigned char		reuse,
174 				rcv_wscale; /* It is also TW bucket specific */
175 	atomic_t		refcnt;
176 
177 	/* And these are ours. */
178 	int			hashent;
179 	int			timeout;
180 	__u32			rcv_nxt;
181 	__u32			snd_nxt;
182 	__u32			rcv_wnd;
183         __u32			ts_recent;
184         long			ts_recent_stamp;
185 	unsigned long		ttd;
186 	struct tcp_bind_bucket	*tb;
187 	struct tcp_tw_bucket	*next_death;
188 	struct tcp_tw_bucket	**pprev_death;
189 
190 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
191 	struct in6_addr		v6_daddr;
192 	struct in6_addr		v6_rcv_saddr;
193 #endif
194 };
195 
196 extern kmem_cache_t *tcp_timewait_cachep;
197 
tcp_tw_put(struct tcp_tw_bucket * tw)198 static inline void tcp_tw_put(struct tcp_tw_bucket *tw)
199 {
200 	if (atomic_dec_and_test(&tw->refcnt)) {
201 #ifdef INET_REFCNT_DEBUG
202 		printk(KERN_DEBUG "tw_bucket %p released\n", tw);
203 #endif
204 		kmem_cache_free(tcp_timewait_cachep, tw);
205 	}
206 }
207 
208 extern atomic_t tcp_orphan_count;
209 extern int tcp_tw_count;
210 extern void tcp_time_wait(struct sock *sk, int state, int timeo);
211 extern void tcp_timewait_kill(struct tcp_tw_bucket *tw);
212 extern void tcp_tw_schedule(struct tcp_tw_bucket *tw, int timeo);
213 extern void tcp_tw_deschedule(struct tcp_tw_bucket *tw);
214 
215 
216 /* Socket demux engine toys. */
217 #ifdef __BIG_ENDIAN
218 #define TCP_COMBINED_PORTS(__sport, __dport) \
219 	(((__u32)(__sport)<<16) | (__u32)(__dport))
220 #else /* __LITTLE_ENDIAN */
221 #define TCP_COMBINED_PORTS(__sport, __dport) \
222 	(((__u32)(__dport)<<16) | (__u32)(__sport))
223 #endif
224 
225 #if (BITS_PER_LONG == 64)
226 #ifdef __BIG_ENDIAN
227 #define TCP_V4_ADDR_COOKIE(__name, __saddr, __daddr) \
228 	__u64 __name = (((__u64)(__saddr))<<32)|((__u64)(__daddr));
229 #else /* __LITTLE_ENDIAN */
230 #define TCP_V4_ADDR_COOKIE(__name, __saddr, __daddr) \
231 	__u64 __name = (((__u64)(__daddr))<<32)|((__u64)(__saddr));
232 #endif /* __BIG_ENDIAN */
233 #define TCP_IPV4_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\
234 	(((*((__u64 *)&((__sk)->daddr)))== (__cookie))	&&		\
235 	 ((*((__u32 *)&((__sk)->dport)))== (__ports))   &&		\
236 	 (!((__sk)->bound_dev_if) || ((__sk)->bound_dev_if == (__dif))))
237 #else /* 32-bit arch */
238 #define TCP_V4_ADDR_COOKIE(__name, __saddr, __daddr)
239 #define TCP_IPV4_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\
240 	(((__sk)->daddr			== (__saddr))	&&		\
241 	 ((__sk)->rcv_saddr		== (__daddr))	&&		\
242 	 ((*((__u32 *)&((__sk)->dport)))== (__ports))   &&		\
243 	 (!((__sk)->bound_dev_if) || ((__sk)->bound_dev_if == (__dif))))
244 #endif /* 64-bit arch */
245 
246 #define TCP_IPV6_MATCH(__sk, __saddr, __daddr, __ports, __dif)			   \
247 	(((*((__u32 *)&((__sk)->dport)))== (__ports))   			&& \
248 	 ((__sk)->family		== AF_INET6)				&& \
249 	 !ipv6_addr_cmp(&(__sk)->net_pinfo.af_inet6.daddr, (__saddr))		&& \
250 	 !ipv6_addr_cmp(&(__sk)->net_pinfo.af_inet6.rcv_saddr, (__daddr))	&& \
251 	 (!((__sk)->bound_dev_if) || ((__sk)->bound_dev_if == (__dif))))
252 
253 /* These can have wildcards, don't try too hard. */
tcp_lhashfn(unsigned short num)254 static __inline__ int tcp_lhashfn(unsigned short num)
255 {
256 	return num & (TCP_LHTABLE_SIZE - 1);
257 }
258 
tcp_sk_listen_hashfn(struct sock * sk)259 static __inline__ int tcp_sk_listen_hashfn(struct sock *sk)
260 {
261 	return tcp_lhashfn(sk->num);
262 }
263 
264 #define MAX_TCP_HEADER	(128 + MAX_HEADER)
265 
266 /*
267  * Never offer a window over 32767 without using window scaling. Some
268  * poor stacks do signed 16bit maths!
269  */
270 #define MAX_TCP_WINDOW		32767U
271 
272 /* Minimal accepted MSS. It is (60+60+8) - (20+20). */
273 #define TCP_MIN_MSS		88U
274 
275 /* Minimal RCV_MSS. */
276 #define TCP_MIN_RCVMSS		536U
277 
278 /* After receiving this amount of duplicate ACKs fast retransmit starts. */
279 #define TCP_FASTRETRANS_THRESH 3
280 
281 /* Maximal reordering. */
282 #define TCP_MAX_REORDERING	127
283 
284 /* Maximal number of ACKs sent quickly to accelerate slow-start. */
285 #define TCP_MAX_QUICKACKS	16U
286 
287 /* urg_data states */
288 #define TCP_URG_VALID	0x0100
289 #define TCP_URG_NOTYET	0x0200
290 #define TCP_URG_READ	0x0400
291 
292 #define TCP_RETR1	3	/*
293 				 * This is how many retries it does before it
294 				 * tries to figure out if the gateway is
295 				 * down. Minimal RFC value is 3; it corresponds
296 				 * to ~3sec-8min depending on RTO.
297 				 */
298 
299 #define TCP_RETR2	15	/*
300 				 * This should take at least
301 				 * 90 minutes to time out.
302 				 * RFC1122 says that the limit is 100 sec.
303 				 * 15 is ~13-30min depending on RTO.
304 				 */
305 
306 #define TCP_SYN_RETRIES	 5	/* number of times to retry active opening a
307 				 * connection: ~180sec is RFC minumum	*/
308 
309 #define TCP_SYNACK_RETRIES 5	/* number of times to retry passive opening a
310 				 * connection: ~180sec is RFC minumum	*/
311 
312 
313 #define TCP_ORPHAN_RETRIES 7	/* number of times to retry on an orphaned
314 				 * socket. 7 is ~50sec-16min.
315 				 */
316 
317 
318 #define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT
319 				  * state, about 60 seconds	*/
320 #define TCP_FIN_TIMEOUT	TCP_TIMEWAIT_LEN
321                                  /* BSD style FIN_WAIT2 deadlock breaker.
322 				  * It used to be 3min, new value is 60sec,
323 				  * to combine FIN-WAIT-2 timeout with
324 				  * TIME-WAIT timer.
325 				  */
326 
327 #define TCP_DELACK_MAX	((unsigned)(HZ/5))	/* maximal time to delay before sending an ACK */
328 #if HZ >= 100
329 #define TCP_DELACK_MIN	((unsigned)(HZ/25))	/* minimal time to delay before sending an ACK */
330 #define TCP_ATO_MIN	((unsigned)(HZ/25))
331 #else
332 #define TCP_DELACK_MIN	4U
333 #define TCP_ATO_MIN	4U
334 #endif
335 #define TCP_RTO_MAX	((unsigned)(120*HZ))
336 #define TCP_RTO_MIN	((unsigned)(HZ/5))
337 #define TCP_TIMEOUT_INIT ((unsigned)(3*HZ))	/* RFC 1122 initial RTO value	*/
338 
339 #define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes
340 					                 * for local resources.
341 					                 */
342 
343 #define TCP_KEEPALIVE_TIME	(120*60*HZ)	/* two hours */
344 #define TCP_KEEPALIVE_PROBES	9		/* Max of 9 keepalive probes	*/
345 #define TCP_KEEPALIVE_INTVL	(75*HZ)
346 
347 #define MAX_TCP_KEEPIDLE	32767
348 #define MAX_TCP_KEEPINTVL	32767
349 #define MAX_TCP_KEEPCNT		127
350 #define MAX_TCP_SYNCNT		127
351 
352 /* TIME_WAIT reaping mechanism. */
353 #define TCP_TWKILL_SLOTS	8	/* Please keep this a power of 2. */
354 #define TCP_TWKILL_PERIOD	(TCP_TIMEWAIT_LEN/TCP_TWKILL_SLOTS)
355 
356 #define TCP_SYNQ_INTERVAL	(HZ/5)	/* Period of SYNACK timer */
357 #define TCP_SYNQ_HSIZE		512	/* Size of SYNACK hash table */
358 
359 #define TCP_PAWS_24DAYS	(60 * 60 * 24 * 24)
360 #define TCP_PAWS_MSL	60		/* Per-host timestamps are invalidated
361 					 * after this time. It should be equal
362 					 * (or greater than) TCP_TIMEWAIT_LEN
363 					 * to provide reliability equal to one
364 					 * provided by timewait state.
365 					 */
366 #define TCP_PAWS_WINDOW	1		/* Replay window for per-host
367 					 * timestamps. It must be less than
368 					 * minimal timewait lifetime.
369 					 */
370 
371 #define TCP_TW_RECYCLE_SLOTS_LOG	5
372 #define TCP_TW_RECYCLE_SLOTS		(1<<TCP_TW_RECYCLE_SLOTS_LOG)
373 
374 /* If time > 4sec, it is "slow" path, no recycling is required,
375    so that we select tick to get range about 4 seconds.
376  */
377 
378 #if HZ <= 16 || HZ > 4096
379 # error Unsupported: HZ <= 16 or HZ > 4096
380 #elif HZ <= 32
381 # define TCP_TW_RECYCLE_TICK (5+2-TCP_TW_RECYCLE_SLOTS_LOG)
382 #elif HZ <= 64
383 # define TCP_TW_RECYCLE_TICK (6+2-TCP_TW_RECYCLE_SLOTS_LOG)
384 #elif HZ <= 128
385 # define TCP_TW_RECYCLE_TICK (7+2-TCP_TW_RECYCLE_SLOTS_LOG)
386 #elif HZ <= 256
387 # define TCP_TW_RECYCLE_TICK (8+2-TCP_TW_RECYCLE_SLOTS_LOG)
388 #elif HZ <= 512
389 # define TCP_TW_RECYCLE_TICK (9+2-TCP_TW_RECYCLE_SLOTS_LOG)
390 #elif HZ <= 1024
391 # define TCP_TW_RECYCLE_TICK (10+2-TCP_TW_RECYCLE_SLOTS_LOG)
392 #elif HZ <= 2048
393 # define TCP_TW_RECYCLE_TICK (11+2-TCP_TW_RECYCLE_SLOTS_LOG)
394 #else
395 # define TCP_TW_RECYCLE_TICK (12+2-TCP_TW_RECYCLE_SLOTS_LOG)
396 #endif
397 
398 #define BICTCP_BETA_SCALE    1024	/* Scale factor beta calculation
399 					 * max_cwnd = snd_cwnd * beta
400 					 */
401 #define BICTCP_MAX_INCREMENT 32		/*
402 					 * Limit on the amount of
403 					 * increment allowed during
404 					 * binary search.
405 					 */
406 #define BICTCP_FUNC_OF_MIN_INCR 11	/*
407 					 * log(B/Smin)/log(B/(B-1))+1,
408 					 * Smin:min increment
409 					 * B:log factor
410 					 */
411 #define BICTCP_B		4	 /*
412 					  * In binary search,
413 					  * go to point (max+min)/N
414 					  */
415 
416 /*
417  *	TCP option
418  */
419 
420 #define TCPOPT_NOP		1	/* Padding */
421 #define TCPOPT_EOL		0	/* End of options */
422 #define TCPOPT_MSS		2	/* Segment size negotiating */
423 #define TCPOPT_WINDOW		3	/* Window scaling */
424 #define TCPOPT_SACK_PERM        4       /* SACK Permitted */
425 #define TCPOPT_SACK             5       /* SACK Block */
426 #define TCPOPT_TIMESTAMP	8	/* Better RTT estimations/PAWS */
427 
428 /*
429  *     TCP option lengths
430  */
431 
432 #define TCPOLEN_MSS            4
433 #define TCPOLEN_WINDOW         3
434 #define TCPOLEN_SACK_PERM      2
435 #define TCPOLEN_TIMESTAMP      10
436 
437 /* But this is what stacks really send out. */
438 #define TCPOLEN_TSTAMP_ALIGNED		12
439 #define TCPOLEN_WSCALE_ALIGNED		4
440 #define TCPOLEN_SACKPERM_ALIGNED	4
441 #define TCPOLEN_SACK_BASE		2
442 #define TCPOLEN_SACK_BASE_ALIGNED	4
443 #define TCPOLEN_SACK_PERBLOCK		8
444 
445 #define TCP_TIME_RETRANS	1	/* Retransmit timer */
446 #define TCP_TIME_DACK		2	/* Delayed ack timer */
447 #define TCP_TIME_PROBE0		3	/* Zero window probe timer */
448 #define TCP_TIME_KEEPOPEN	4	/* Keepalive timer */
449 
450 /* sysctl variables for tcp */
451 extern int sysctl_max_syn_backlog;
452 extern int sysctl_tcp_timestamps;
453 extern int sysctl_tcp_window_scaling;
454 extern int sysctl_tcp_sack;
455 extern int sysctl_tcp_fin_timeout;
456 extern int sysctl_tcp_tw_recycle;
457 extern int sysctl_tcp_keepalive_time;
458 extern int sysctl_tcp_keepalive_probes;
459 extern int sysctl_tcp_keepalive_intvl;
460 extern int sysctl_tcp_syn_retries;
461 extern int sysctl_tcp_synack_retries;
462 extern int sysctl_tcp_retries1;
463 extern int sysctl_tcp_retries2;
464 extern int sysctl_tcp_orphan_retries;
465 extern int sysctl_tcp_syncookies;
466 extern int sysctl_tcp_retrans_collapse;
467 extern int sysctl_tcp_stdurg;
468 extern int sysctl_tcp_rfc1337;
469 extern int sysctl_tcp_abort_on_overflow;
470 extern int sysctl_tcp_max_orphans;
471 extern int sysctl_tcp_max_tw_buckets;
472 extern int sysctl_tcp_fack;
473 extern int sysctl_tcp_reordering;
474 extern int sysctl_tcp_ecn;
475 extern int sysctl_tcp_dsack;
476 extern int sysctl_tcp_mem[3];
477 extern int sysctl_tcp_wmem[3];
478 extern int sysctl_tcp_rmem[3];
479 extern int sysctl_tcp_app_win;
480 extern int sysctl_tcp_adv_win_scale;
481 extern int sysctl_tcp_tw_reuse;
482 extern int sysctl_tcp_frto;
483 extern int sysctl_tcp_low_latency;
484 extern int sysctl_tcp_westwood;
485 extern int sysctl_tcp_vegas_cong_avoid;
486 extern int sysctl_tcp_vegas_alpha;
487 extern int sysctl_tcp_vegas_beta;
488 extern int sysctl_tcp_vegas_gamma;
489 extern int sysctl_tcp_nometrics_save;
490 extern int sysctl_tcp_bic;
491 extern int sysctl_tcp_bic_fast_convergence;
492 extern int sysctl_tcp_bic_low_window;
493 extern int sysctl_tcp_bic_beta;
494 extern int sysctl_tcp_default_win_scale;
495 extern int sysctl_tcp_moderate_rcvbuf;
496 
497 extern atomic_t tcp_memory_allocated;
498 extern atomic_t tcp_sockets_allocated;
499 extern int tcp_memory_pressure;
500 
501 struct open_request;
502 
503 struct or_calltable {
504 	int  family;
505 	int  (*rtx_syn_ack)	(struct sock *sk, struct open_request *req, struct dst_entry*);
506 	void (*send_ack)	(struct sk_buff *skb, struct open_request *req);
507 	void (*destructor)	(struct open_request *req);
508 	void (*send_reset)	(struct sk_buff *skb);
509 };
510 
511 struct tcp_v4_open_req {
512 	__u32			loc_addr;
513 	__u32			rmt_addr;
514 	struct ip_options	*opt;
515 };
516 
517 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
518 struct tcp_v6_open_req {
519 	struct in6_addr		loc_addr;
520 	struct in6_addr		rmt_addr;
521 	struct sk_buff		*pktopts;
522 	int			iif;
523 };
524 #endif
525 
526 /* this structure is too big */
527 struct open_request {
528 	struct open_request	*dl_next; /* Must be first member! */
529 	__u32			rcv_isn;
530 	__u32			snt_isn;
531 	__u16			rmt_port;
532 	__u16			mss;
533 	__u8			retrans;
534 	__u8			__pad;
535 	__u16	snd_wscale : 4,
536 		rcv_wscale : 4,
537 		tstamp_ok : 1,
538 		sack_ok : 1,
539 		wscale_ok : 1,
540 		ecn_ok : 1,
541 		acked : 1;
542 	/* The following two fields can be easily recomputed I think -AK */
543 	__u32			window_clamp;	/* window clamp at creation time */
544 	__u32			rcv_wnd;	/* rcv_wnd offered first time */
545 	__u32			ts_recent;
546 	unsigned long		expires;
547 	struct or_calltable	*class;
548 	struct sock		*sk;
549 	union {
550 		struct tcp_v4_open_req v4_req;
551 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
552 		struct tcp_v6_open_req v6_req;
553 #endif
554 	} af;
555 };
556 
557 /* SLAB cache for open requests. */
558 extern kmem_cache_t *tcp_openreq_cachep;
559 
560 #define tcp_openreq_alloc()		kmem_cache_alloc(tcp_openreq_cachep, SLAB_ATOMIC)
561 #define tcp_openreq_fastfree(req)	kmem_cache_free(tcp_openreq_cachep, req)
562 
tcp_openreq_free(struct open_request * req)563 static inline void tcp_openreq_free(struct open_request *req)
564 {
565 	req->class->destructor(req);
566 	tcp_openreq_fastfree(req);
567 }
568 
569 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
570 #define TCP_INET_FAMILY(fam) ((fam) == AF_INET)
571 #else
572 #define TCP_INET_FAMILY(fam) 1
573 #endif
574 
575 /*
576  *	Pointers to address related TCP functions
577  *	(i.e. things that depend on the address family)
578  *
579  * 	BUGGG_FUTURE: all the idea behind this struct is wrong.
580  *	It mixes socket frontend with transport function.
581  *	With port sharing between IPv6/v4 it gives the only advantage,
582  *	only poor IPv6 needs to permanently recheck, that it
583  *	is still IPv6 8)8) It must be cleaned up as soon as possible.
584  *						--ANK (980802)
585  */
586 
587 struct tcp_func {
588 	int			(*queue_xmit)		(struct sk_buff *skb,
589 							 int ipfragok);
590 
591 	void			(*send_check)		(struct sock *sk,
592 							 struct tcphdr *th,
593 							 int len,
594 							 struct sk_buff *skb);
595 
596 	int			(*rebuild_header)	(struct sock *sk);
597 
598 	int			(*conn_request)		(struct sock *sk,
599 							 struct sk_buff *skb);
600 
601 	struct sock *		(*syn_recv_sock)	(struct sock *sk,
602 							 struct sk_buff *skb,
603 							 struct open_request *req,
604 							 struct dst_entry *dst);
605 
606 	int			(*remember_stamp)	(struct sock *sk);
607 
608 	__u16			net_header_len;
609 
610 	int			(*setsockopt)		(struct sock *sk,
611 							 int level,
612 							 int optname,
613 							 char *optval,
614 							 int optlen);
615 
616 	int			(*getsockopt)		(struct sock *sk,
617 							 int level,
618 							 int optname,
619 							 char *optval,
620 							 int *optlen);
621 
622 
623 	void			(*addr2sockaddr)	(struct sock *sk,
624 							 struct sockaddr *);
625 
626 	int sockaddr_len;
627 };
628 
629 /*
630  * The next routines deal with comparing 32 bit unsigned ints
631  * and worry about wraparound (automatic with unsigned arithmetic).
632  */
633 
before(__u32 seq1,__u32 seq2)634 static inline int before(__u32 seq1, __u32 seq2)
635 {
636         return (__s32)(seq1-seq2) < 0;
637 }
638 
after(__u32 seq1,__u32 seq2)639 static inline int after(__u32 seq1, __u32 seq2)
640 {
641 	return (__s32)(seq2-seq1) < 0;
642 }
643 
644 
645 /* is s2<=s1<=s3 ? */
between(__u32 seq1,__u32 seq2,__u32 seq3)646 static inline int between(__u32 seq1, __u32 seq2, __u32 seq3)
647 {
648 	return seq3 - seq2 >= seq1 - seq2;
649 }
650 
651 
652 extern struct proto tcp_prot;
653 
654 extern struct tcp_mib tcp_statistics[NR_CPUS*2];
655 #define TCP_INC_STATS(field)		SNMP_INC_STATS(tcp_statistics, field)
656 #define TCP_INC_STATS_BH(field)		SNMP_INC_STATS_BH(tcp_statistics, field)
657 #define TCP_INC_STATS_USER(field) 	SNMP_INC_STATS_USER(tcp_statistics, field)
658 #define TCP_ADD_STATS_BH(field, val)	SNMP_ADD_STATS_BH(tcp_statistics, field, val)
659 #define TCP_ADD_STATS_USER(field, val)	SNMP_ADD_STATS_USER(tcp_statistics, field, val)
660 
661 extern void			tcp_put_port(struct sock *sk);
662 extern void			__tcp_put_port(struct sock *sk);
663 extern void			tcp_inherit_port(struct sock *sk, struct sock *child);
664 
665 extern void			tcp_v4_err(struct sk_buff *skb, u32);
666 
667 extern void			tcp_shutdown (struct sock *sk, int how);
668 
669 extern int			tcp_v4_rcv(struct sk_buff *skb);
670 
671 extern int			tcp_v4_remember_stamp(struct sock *sk);
672 
673 extern int		    	tcp_v4_tw_remember_stamp(struct tcp_tw_bucket *tw);
674 
675 extern int			tcp_sendmsg(struct sock *sk, struct msghdr *msg, int size);
676 extern ssize_t			tcp_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags);
677 
678 extern int			tcp_ioctl(struct sock *sk,
679 					  int cmd,
680 					  unsigned long arg);
681 
682 extern int			tcp_rcv_state_process(struct sock *sk,
683 						      struct sk_buff *skb,
684 						      struct tcphdr *th,
685 						      unsigned len);
686 
687 extern int			tcp_rcv_established(struct sock *sk,
688 						    struct sk_buff *skb,
689 						    struct tcphdr *th,
690 						    unsigned len);
691 
692 extern void			tcp_rcv_space_adjust(struct sock *sk);
693 
694 enum tcp_ack_state_t
695 {
696 	TCP_ACK_SCHED = 1,
697 	TCP_ACK_TIMER = 2,
698 	TCP_ACK_PUSHED= 4
699 };
700 
tcp_schedule_ack(struct tcp_opt * tp)701 static inline void tcp_schedule_ack(struct tcp_opt *tp)
702 {
703 	tp->ack.pending |= TCP_ACK_SCHED;
704 }
705 
tcp_ack_scheduled(struct tcp_opt * tp)706 static inline int tcp_ack_scheduled(struct tcp_opt *tp)
707 {
708 	return tp->ack.pending&TCP_ACK_SCHED;
709 }
710 
tcp_dec_quickack_mode(struct tcp_opt * tp)711 static __inline__ void tcp_dec_quickack_mode(struct tcp_opt *tp)
712 {
713 	if (tp->ack.quick && --tp->ack.quick == 0) {
714 		/* Leaving quickack mode we deflate ATO. */
715 		tp->ack.ato = TCP_ATO_MIN;
716 	}
717 }
718 
719 extern void tcp_enter_quickack_mode(struct tcp_opt *tp);
720 
tcp_delack_init(struct tcp_opt * tp)721 static __inline__ void tcp_delack_init(struct tcp_opt *tp)
722 {
723 	memset(&tp->ack, 0, sizeof(tp->ack));
724 }
725 
tcp_clear_options(struct tcp_opt * tp)726 static inline void tcp_clear_options(struct tcp_opt *tp)
727 {
728  	tp->tstamp_ok = tp->sack_ok = tp->wscale_ok = tp->snd_wscale = 0;
729 }
730 
731 enum tcp_tw_status
732 {
733 	TCP_TW_SUCCESS = 0,
734 	TCP_TW_RST = 1,
735 	TCP_TW_ACK = 2,
736 	TCP_TW_SYN = 3
737 };
738 
739 
740 extern enum tcp_tw_status	tcp_timewait_state_process(struct tcp_tw_bucket *tw,
741 							   struct sk_buff *skb,
742 							   struct tcphdr *th,
743 							   unsigned len);
744 
745 extern struct sock *		tcp_check_req(struct sock *sk,struct sk_buff *skb,
746 					      struct open_request *req,
747 					      struct open_request **prev);
748 extern int			tcp_child_process(struct sock *parent,
749 						  struct sock *child,
750 						  struct sk_buff *skb);
751 extern void			tcp_enter_frto(struct sock *sk);
752 extern void			tcp_enter_loss(struct sock *sk, int how);
753 extern void			tcp_clear_retrans(struct tcp_opt *tp);
754 extern void			tcp_update_metrics(struct sock *sk);
755 
756 extern void			tcp_close(struct sock *sk,
757 					  long timeout);
758 extern struct sock *		tcp_accept(struct sock *sk, int flags, int *err);
759 extern unsigned int		tcp_poll(struct file * file, struct socket *sock, struct poll_table_struct *wait);
760 extern void			tcp_write_space(struct sock *sk);
761 
762 extern int			tcp_getsockopt(struct sock *sk, int level,
763 					       int optname, char *optval,
764 					       int *optlen);
765 extern int			tcp_setsockopt(struct sock *sk, int level,
766 					       int optname, char *optval,
767 					       int optlen);
768 extern void			tcp_set_keepalive(struct sock *sk, int val);
769 extern int			tcp_recvmsg(struct sock *sk,
770 					    struct msghdr *msg,
771 					    int len, int nonblock,
772 					    int flags, int *addr_len);
773 
774 extern int			tcp_listen_start(struct sock *sk);
775 
776 extern void			tcp_parse_options(struct sk_buff *skb,
777 						  struct tcp_opt *tp,
778 						  int estab);
779 
780 /*
781  *	TCP v4 functions exported for the inet6 API
782  */
783 
784 extern int		       	tcp_v4_rebuild_header(struct sock *sk);
785 
786 extern int		       	tcp_v4_build_header(struct sock *sk,
787 						    struct sk_buff *skb);
788 
789 extern void		       	tcp_v4_send_check(struct sock *sk,
790 						  struct tcphdr *th, int len,
791 						  struct sk_buff *skb);
792 
793 extern int			tcp_v4_conn_request(struct sock *sk,
794 						    struct sk_buff *skb);
795 
796 extern struct sock *		tcp_create_openreq_child(struct sock *sk,
797 							 struct open_request *req,
798 							 struct sk_buff *skb);
799 
800 extern struct sock *		tcp_v4_syn_recv_sock(struct sock *sk,
801 						     struct sk_buff *skb,
802 						     struct open_request *req,
803 							struct dst_entry *dst);
804 
805 extern int			tcp_v4_do_rcv(struct sock *sk,
806 					      struct sk_buff *skb);
807 
808 extern int			tcp_v4_connect(struct sock *sk,
809 					       struct sockaddr *uaddr,
810 					       int addr_len);
811 
812 extern int			tcp_connect(struct sock *sk);
813 
814 extern struct sk_buff *		tcp_make_synack(struct sock *sk,
815 						struct dst_entry *dst,
816 						struct open_request *req);
817 
818 extern int			tcp_disconnect(struct sock *sk, int flags);
819 
820 extern void			tcp_unhash(struct sock *sk);
821 
822 extern int			tcp_v4_hash_connecting(struct sock *sk);
823 
824 
825 /* From syncookies.c */
826 extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
827 				    struct ip_options *opt);
828 extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb,
829 				     __u16 *mss);
830 
831 /* tcp_output.c */
832 
833 extern int tcp_write_xmit(struct sock *, int nonagle);
834 extern int tcp_retransmit_skb(struct sock *, struct sk_buff *);
835 extern void tcp_xmit_retransmit_queue(struct sock *);
836 extern void tcp_simple_retransmit(struct sock *);
837 
838 extern void tcp_send_probe0(struct sock *);
839 extern void tcp_send_partial(struct sock *);
840 extern int  tcp_write_wakeup(struct sock *);
841 extern void tcp_send_fin(struct sock *sk);
842 extern void tcp_send_active_reset(struct sock *sk, int priority);
843 extern int  tcp_send_synack(struct sock *);
844 extern int  tcp_transmit_skb(struct sock *, struct sk_buff *);
845 extern void tcp_send_skb(struct sock *, struct sk_buff *, int force_queue, unsigned mss_now);
846 extern void tcp_push_one(struct sock *, unsigned mss_now);
847 extern void tcp_send_ack(struct sock *sk);
848 extern void tcp_send_delayed_ack(struct sock *sk);
849 
850 /* tcp_timer.c */
851 extern void tcp_init_xmit_timers(struct sock *);
852 extern void tcp_clear_xmit_timers(struct sock *);
853 
854 extern void tcp_delete_keepalive_timer (struct sock *);
855 extern void tcp_reset_keepalive_timer (struct sock *, unsigned long);
856 extern int tcp_sync_mss(struct sock *sk, u32 pmtu);
857 
858 extern const char timer_bug_msg[];
859 
860 /* Read 'sendfile()'-style from a TCP socket */
861 typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *,
862 				unsigned int, size_t);
863 extern int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
864 			 sk_read_actor_t recv_actor);
865 
tcp_clear_xmit_timer(struct sock * sk,int what)866 static inline void tcp_clear_xmit_timer(struct sock *sk, int what)
867 {
868 	struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
869 
870 	switch (what) {
871 	case TCP_TIME_RETRANS:
872 	case TCP_TIME_PROBE0:
873 		tp->pending = 0;
874 
875 #ifdef TCP_CLEAR_TIMERS
876 		if (timer_pending(&tp->retransmit_timer) &&
877 		    del_timer(&tp->retransmit_timer))
878 			__sock_put(sk);
879 #endif
880 		break;
881 	case TCP_TIME_DACK:
882 		tp->ack.blocked = 0;
883 		tp->ack.pending = 0;
884 
885 #ifdef TCP_CLEAR_TIMERS
886 		if (timer_pending(&tp->delack_timer) &&
887 		    del_timer(&tp->delack_timer))
888 			__sock_put(sk);
889 #endif
890 		break;
891 	default:
892 		printk(timer_bug_msg);
893 		return;
894 	};
895 
896 }
897 
898 /*
899  *	Reset the retransmission timer
900  */
tcp_reset_xmit_timer(struct sock * sk,int what,unsigned long when)901 static inline void tcp_reset_xmit_timer(struct sock *sk, int what, unsigned long when)
902 {
903 	struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
904 
905 	if (when > TCP_RTO_MAX) {
906 #ifdef TCP_DEBUG
907 		printk(KERN_DEBUG "reset_xmit_timer sk=%p %d when=0x%lx, caller=%p\n", sk, what, when, current_text_addr());
908 #endif
909 		when = TCP_RTO_MAX;
910 	}
911 
912 	switch (what) {
913 	case TCP_TIME_RETRANS:
914 	case TCP_TIME_PROBE0:
915 		tp->pending = what;
916 		tp->timeout = jiffies+when;
917 		if (!mod_timer(&tp->retransmit_timer, tp->timeout))
918 			sock_hold(sk);
919 		break;
920 
921 	case TCP_TIME_DACK:
922 		tp->ack.pending |= TCP_ACK_TIMER;
923 		tp->ack.timeout = jiffies+when;
924 		if (!mod_timer(&tp->delack_timer, tp->ack.timeout))
925 			sock_hold(sk);
926 		break;
927 
928 	default:
929 		printk(KERN_DEBUG "bug: unknown timer value\n");
930 	};
931 }
932 
933 /* Compute the current effective MSS, taking SACKs and IP options,
934  * and even PMTU discovery events into account.
935  */
936 
tcp_current_mss(struct sock * sk)937 static __inline__ unsigned int tcp_current_mss(struct sock *sk)
938 {
939 	struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
940 	struct dst_entry *dst = __sk_dst_get(sk);
941 	int mss_now = tp->mss_cache;
942 
943 	if (dst && dst->pmtu != tp->pmtu_cookie)
944 		mss_now = tcp_sync_mss(sk, dst->pmtu);
945 
946 	if (tp->eff_sacks)
947 		mss_now -= (TCPOLEN_SACK_BASE_ALIGNED +
948 			    (tp->eff_sacks * TCPOLEN_SACK_PERBLOCK));
949 	return mss_now;
950 }
951 
952 /* Initialize RCV_MSS value.
953  * RCV_MSS is an our guess about MSS used by the peer.
954  * We haven't any direct information about the MSS.
955  * It's better to underestimate the RCV_MSS rather than overestimate.
956  * Overestimations make us ACKing less frequently than needed.
957  * Underestimations are more easy to detect and fix by tcp_measure_rcv_mss().
958  */
959 
tcp_initialize_rcv_mss(struct sock * sk)960 static inline void tcp_initialize_rcv_mss(struct sock *sk)
961 {
962 	struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
963 	unsigned int hint = min(tp->advmss, tp->mss_cache);
964 
965 	hint = min(hint, tp->rcv_wnd/2);
966 	hint = min(hint, TCP_MIN_RCVMSS);
967 	hint = max(hint, TCP_MIN_MSS);
968 
969 	tp->ack.rcv_mss = hint;
970 }
971 
__tcp_fast_path_on(struct tcp_opt * tp,u32 snd_wnd)972 static __inline__ void __tcp_fast_path_on(struct tcp_opt *tp, u32 snd_wnd)
973 {
974 	tp->pred_flags = htonl((tp->tcp_header_len << 26) |
975 			       ntohl(TCP_FLAG_ACK) |
976 			       snd_wnd);
977 }
978 
tcp_fast_path_on(struct tcp_opt * tp)979 static __inline__ void tcp_fast_path_on(struct tcp_opt *tp)
980 {
981 	__tcp_fast_path_on(tp, tp->snd_wnd>>tp->snd_wscale);
982 }
983 
tcp_fast_path_check(struct sock * sk,struct tcp_opt * tp)984 static inline void tcp_fast_path_check(struct sock *sk, struct tcp_opt *tp)
985 {
986 	if (skb_queue_len(&tp->out_of_order_queue) == 0 &&
987 	    tp->rcv_wnd &&
988 	    atomic_read(&sk->rmem_alloc) < sk->rcvbuf &&
989 	    !tp->urg_data)
990 		tcp_fast_path_on(tp);
991 }
992 
993 /* Compute the actual receive window we are currently advertising.
994  * Rcv_nxt can be after the window if our peer push more data
995  * than the offered window.
996  */
tcp_receive_window(struct tcp_opt * tp)997 static __inline__ u32 tcp_receive_window(struct tcp_opt *tp)
998 {
999 	s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;
1000 
1001 	if (win < 0)
1002 		win = 0;
1003 	return (u32) win;
1004 }
1005 
1006 /* Choose a new window, without checks for shrinking, and without
1007  * scaling applied to the result.  The caller does these things
1008  * if necessary.  This is a "raw" window selection.
1009  */
1010 extern u32	__tcp_select_window(struct sock *sk);
1011 
1012 /* TCP timestamps are only 32-bits, this causes a slight
1013  * complication on 64-bit systems since we store a snapshot
1014  * of jiffies in the buffer control blocks below.  We decidely
1015  * only use of the low 32-bits of jiffies and hide the ugly
1016  * casts with the following macro.
1017  */
1018 #define tcp_time_stamp		((__u32)(jiffies))
1019 
1020 /* This is what the send packet queueing engine uses to pass
1021  * TCP per-packet control information to the transmission
1022  * code.  We also store the host-order sequence numbers in
1023  * here too.  This is 36 bytes on 32-bit architectures,
1024  * 40 bytes on 64-bit machines, if this grows please adjust
1025  * skbuff.h:skbuff->cb[xxx] size appropriately.
1026  */
1027 struct tcp_skb_cb {
1028 	union {
1029 		struct inet_skb_parm	h4;
1030 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
1031 		struct inet6_skb_parm	h6;
1032 #endif
1033 	} header;	/* For incoming frames		*/
1034 	__u32		seq;		/* Starting sequence number	*/
1035 	__u32		end_seq;	/* SEQ + FIN + SYN + datalen	*/
1036 	__u32		when;		/* used to compute rtt's	*/
1037 	__u8		flags;		/* TCP header flags.		*/
1038 
1039 	/* NOTE: These must match up to the flags byte in a
1040 	 *       real TCP header.
1041 	 */
1042 #define TCPCB_FLAG_FIN		0x01
1043 #define TCPCB_FLAG_SYN		0x02
1044 #define TCPCB_FLAG_RST		0x04
1045 #define TCPCB_FLAG_PSH		0x08
1046 #define TCPCB_FLAG_ACK		0x10
1047 #define TCPCB_FLAG_URG		0x20
1048 #define TCPCB_FLAG_ECE		0x40
1049 #define TCPCB_FLAG_CWR		0x80
1050 
1051 	__u8		sacked;		/* State flags for SACK/FACK.	*/
1052 #define TCPCB_SACKED_ACKED	0x01	/* SKB ACK'd by a SACK block	*/
1053 #define TCPCB_SACKED_RETRANS	0x02	/* SKB retransmitted		*/
1054 #define TCPCB_LOST		0x04	/* SKB is lost			*/
1055 #define TCPCB_TAGBITS		0x07	/* All tag bits			*/
1056 
1057 #define TCPCB_EVER_RETRANS	0x80	/* Ever retransmitted frame	*/
1058 #define TCPCB_RETRANS		(TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS)
1059 
1060 #define TCPCB_URG		0x20	/* Urgent pointer advenced here	*/
1061 
1062 #define TCPCB_AT_TAIL		(TCPCB_URG)
1063 
1064 	__u16		urg_ptr;	/* Valid w/URG flags is set.	*/
1065 	__u32		ack_seq;	/* Sequence number ACK'd	*/
1066 };
1067 
1068 #define TCP_SKB_CB(__skb)	((struct tcp_skb_cb *)&((__skb)->cb[0]))
1069 
1070 #define for_retrans_queue(skb, sk, tp) \
1071 		for (skb = (sk)->write_queue.next;			\
1072 		     (skb != (tp)->send_head) &&			\
1073 		     (skb != (struct sk_buff *)&(sk)->write_queue);	\
1074 		     skb=skb->next)
1075 
1076 
1077 #include <net/tcp_ecn.h>
1078 
1079 
1080 /*
1081  *	Compute minimal free write space needed to queue new packets.
1082  */
tcp_min_write_space(struct sock * sk)1083 static inline int tcp_min_write_space(struct sock *sk)
1084 {
1085 	return sk->wmem_queued/2;
1086 }
1087 
tcp_wspace(struct sock * sk)1088 static inline int tcp_wspace(struct sock *sk)
1089 {
1090 	return sk->sndbuf - sk->wmem_queued;
1091 }
1092 
1093 
1094 /* This determines how many packets are "in the network" to the best
1095  * of our knowledge.  In many cases it is conservative, but where
1096  * detailed information is available from the receiver (via SACK
1097  * blocks etc.) we can make more aggressive calculations.
1098  *
1099  * Use this for decisions involving congestion control, use just
1100  * tp->packets_out to determine if the send queue is empty or not.
1101  *
1102  * Read this equation as:
1103  *
1104  *	"Packets sent once on transmission queue" MINUS
1105  *	"Packets left network, but not honestly ACKed yet" PLUS
1106  *	"Packets fast retransmitted"
1107  */
tcp_packets_in_flight(struct tcp_opt * tp)1108 static __inline__ unsigned int tcp_packets_in_flight(struct tcp_opt *tp)
1109 {
1110 	return tp->packets_out - tp->left_out + tp->retrans_out;
1111 }
1112 
1113 /*
1114  * Which congestion algorithim is in use on the connection.
1115  */
1116 #define tcp_is_vegas(__tp)	((__tp)->adv_cong == TCP_VEGAS)
1117 #define tcp_is_westwood(__tp)	((__tp)->adv_cong == TCP_WESTWOOD)
1118 #define tcp_is_bic(__tp)	((__tp)->adv_cong == TCP_BIC)
1119 
1120 /* Recalculate snd_ssthresh, we want to set it to:
1121  *
1122  * Reno:
1123  * 	one half the current congestion window, but no
1124  *	less than two segments
1125  *
1126  * BIC:
1127  *	behave like Reno until low_window is reached,
1128  *	then increase congestion window slowly
1129  */
tcp_recalc_ssthresh(struct tcp_opt * tp)1130 static inline __u32 tcp_recalc_ssthresh(struct tcp_opt *tp)
1131 {
1132 	if (tcp_is_bic(tp)) {
1133 		if (sysctl_tcp_bic_fast_convergence &&
1134 		    tp->snd_cwnd < tp->bictcp.last_max_cwnd)
1135 			tp->bictcp.last_max_cwnd = (tp->snd_cwnd *
1136 						    (BICTCP_BETA_SCALE
1137 						     + sysctl_tcp_bic_beta))
1138 				/ (2 * BICTCP_BETA_SCALE);
1139 		else
1140 			tp->bictcp.last_max_cwnd = tp->snd_cwnd;
1141 
1142 		if (tp->snd_cwnd > sysctl_tcp_bic_low_window)
1143 			return max((tp->snd_cwnd * sysctl_tcp_bic_beta)
1144 				   / BICTCP_BETA_SCALE, 2U);
1145 	}
1146 
1147 	return max(tp->snd_cwnd >> 1U, 2U);
1148 }
1149 
1150 /* Stop taking Vegas samples for now. */
1151 #define tcp_vegas_disable(__tp)	((__tp)->vegas.doing_vegas_now = 0)
1152 
tcp_vegas_enable(struct tcp_opt * tp)1153 static inline void tcp_vegas_enable(struct tcp_opt *tp)
1154 {
1155 	/* There are several situations when we must "re-start" Vegas:
1156 	 *
1157 	 *  o when a connection is established
1158 	 *  o after an RTO
1159 	 *  o after fast recovery
1160 	 *  o when we send a packet and there is no outstanding
1161 	 *    unacknowledged data (restarting an idle connection)
1162 	 *
1163 	 * In these circumstances we cannot do a Vegas calculation at the
1164 	 * end of the first RTT, because any calculation we do is using
1165 	 * stale info -- both the saved cwnd and congestion feedback are
1166 	 * stale.
1167 	 *
1168 	 * Instead we must wait until the completion of an RTT during
1169 	 * which we actually receive ACKs.
1170 	 */
1171 
1172 	/* Begin taking Vegas samples next time we send something. */
1173 	tp->vegas.doing_vegas_now = 1;
1174 
1175 	/* Set the beginning of the next send window. */
1176 	tp->vegas.beg_snd_nxt = tp->snd_nxt;
1177 
1178 	tp->vegas.cntRTT = 0;
1179 	tp->vegas.minRTT = 0x7fffffff;
1180 }
1181 
1182 /* Should we be taking Vegas samples right now? */
1183 #define tcp_vegas_enabled(__tp)	((__tp)->vegas.doing_vegas_now)
1184 
1185 extern void tcp_ca_init(struct tcp_opt *tp);
1186 
tcp_set_ca_state(struct tcp_opt * tp,u8 ca_state)1187 static inline void tcp_set_ca_state(struct tcp_opt *tp, u8 ca_state)
1188 {
1189 	if (tcp_is_vegas(tp)) {
1190 		if (ca_state == TCP_CA_Open)
1191 			tcp_vegas_enable(tp);
1192 		else
1193 			tcp_vegas_disable(tp);
1194 	}
1195 	tp->ca_state = ca_state;
1196 }
1197 
1198 /* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
1199  * The exception is rate halving phase, when cwnd is decreasing towards
1200  * ssthresh.
1201  */
tcp_current_ssthresh(struct tcp_opt * tp)1202 static inline __u32 tcp_current_ssthresh(struct tcp_opt *tp)
1203 {
1204 	if ((1<<tp->ca_state)&(TCPF_CA_CWR|TCPF_CA_Recovery))
1205 		return tp->snd_ssthresh;
1206 	else
1207 		return max(tp->snd_ssthresh,
1208 			   ((tp->snd_cwnd >> 1) +
1209 			    (tp->snd_cwnd >> 2)));
1210 }
1211 
tcp_sync_left_out(struct tcp_opt * tp)1212 static inline void tcp_sync_left_out(struct tcp_opt *tp)
1213 {
1214 	if (tp->sack_ok && tp->sacked_out >= tp->packets_out - tp->lost_out)
1215 		tp->sacked_out = tp->packets_out - tp->lost_out;
1216 	tp->left_out = tp->sacked_out + tp->lost_out;
1217 }
1218 
1219 extern void tcp_cwnd_application_limited(struct sock *sk);
1220 
1221 /* Congestion window validation. (RFC2861) */
1222 
tcp_cwnd_validate(struct sock * sk,struct tcp_opt * tp)1223 static inline void tcp_cwnd_validate(struct sock *sk, struct tcp_opt *tp)
1224 {
1225 	if (tp->packets_out >= tp->snd_cwnd) {
1226 		/* Network is feed fully. */
1227 		tp->snd_cwnd_used = 0;
1228 		tp->snd_cwnd_stamp = tcp_time_stamp;
1229 	} else {
1230 		/* Network starves. */
1231 		if (tp->packets_out > tp->snd_cwnd_used)
1232 			tp->snd_cwnd_used = tp->packets_out;
1233 
1234 		if ((s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= tp->rto)
1235 			tcp_cwnd_application_limited(sk);
1236 	}
1237 }
1238 
1239 /* Set slow start threshould and cwnd not falling to slow start */
__tcp_enter_cwr(struct tcp_opt * tp)1240 static inline void __tcp_enter_cwr(struct tcp_opt *tp)
1241 {
1242 	tp->undo_marker = 0;
1243 	tp->snd_ssthresh = tcp_recalc_ssthresh(tp);
1244 	tp->snd_cwnd = min(tp->snd_cwnd,
1245 			   tcp_packets_in_flight(tp) + 1U);
1246 	tp->snd_cwnd_cnt = 0;
1247 	tp->high_seq = tp->snd_nxt;
1248 	tp->snd_cwnd_stamp = tcp_time_stamp;
1249 	TCP_ECN_queue_cwr(tp);
1250 }
1251 
tcp_enter_cwr(struct tcp_opt * tp)1252 static inline void tcp_enter_cwr(struct tcp_opt *tp)
1253 {
1254 	tp->prior_ssthresh = 0;
1255 	if (tp->ca_state < TCP_CA_CWR) {
1256 		__tcp_enter_cwr(tp);
1257 		tcp_set_ca_state(tp, TCP_CA_CWR);
1258 	}
1259 }
1260 
1261 extern __u32 tcp_init_cwnd(struct tcp_opt *tp);
1262 
1263 /* Slow start with delack produces 3 packets of burst, so that
1264  * it is safe "de facto".
1265  */
tcp_max_burst(struct tcp_opt * tp)1266 static __inline__ __u32 tcp_max_burst(struct tcp_opt *tp)
1267 {
1268 	return 3;
1269 }
1270 
tcp_minshall_check(struct tcp_opt * tp)1271 static __inline__ int tcp_minshall_check(struct tcp_opt *tp)
1272 {
1273 	return after(tp->snd_sml,tp->snd_una) &&
1274 		!after(tp->snd_sml, tp->snd_nxt);
1275 }
1276 
tcp_minshall_update(struct tcp_opt * tp,int mss,struct sk_buff * skb)1277 static __inline__ void tcp_minshall_update(struct tcp_opt *tp, int mss, struct sk_buff *skb)
1278 {
1279 	if (skb->len < mss)
1280 		tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
1281 }
1282 
1283 /* Return 0, if packet can be sent now without violation Nagle's rules:
1284    1. It is full sized.
1285    2. Or it contains FIN.
1286    3. Or TCP_NODELAY was set.
1287    4. Or TCP_CORK is not set, and all sent packets are ACKed.
1288       With Minshall's modification: all sent small packets are ACKed.
1289  */
1290 
1291 static __inline__ int
tcp_nagle_check(struct tcp_opt * tp,struct sk_buff * skb,unsigned mss_now,int nonagle)1292 tcp_nagle_check(struct tcp_opt *tp, struct sk_buff *skb, unsigned mss_now, int nonagle)
1293 {
1294 	return (skb->len < mss_now &&
1295 		!(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) &&
1296 		(nonagle == 2 ||
1297 		 (!nonagle &&
1298 		  tp->packets_out &&
1299 		  tcp_minshall_check(tp))));
1300 }
1301 
1302 /* This checks if the data bearing packet SKB (usually tp->send_head)
1303  * should be put on the wire right now.
1304  */
tcp_snd_test(struct tcp_opt * tp,struct sk_buff * skb,unsigned cur_mss,int nonagle)1305 static __inline__ int tcp_snd_test(struct tcp_opt *tp, struct sk_buff *skb,
1306 				   unsigned cur_mss, int nonagle)
1307 {
1308 	/*	RFC 1122 - section 4.2.3.4
1309 	 *
1310 	 *	We must queue if
1311 	 *
1312 	 *	a) The right edge of this frame exceeds the window
1313 	 *	b) There are packets in flight and we have a small segment
1314 	 *	   [SWS avoidance and Nagle algorithm]
1315 	 *	   (part of SWS is done on packetization)
1316 	 *	   Minshall version sounds: there are no _small_
1317 	 *	   segments in flight. (tcp_nagle_check)
1318 	 *	c) We have too many packets 'in flight'
1319 	 *
1320 	 * 	Don't use the nagle rule for urgent data (or
1321 	 *	for the final FIN -DaveM).
1322 	 *
1323 	 *	Also, Nagle rule does not apply to frames, which
1324 	 *	sit in the middle of queue (they have no chances
1325 	 *	to get new data) and if room at tail of skb is
1326 	 *	not enough to save something seriously (<32 for now).
1327 	 */
1328 
1329 	/* Don't be strict about the congestion window for the
1330 	 * final FIN frame.  -DaveM
1331 	 */
1332 	return ((nonagle==1 || tp->urg_mode
1333 		 || !tcp_nagle_check(tp, skb, cur_mss, nonagle)) &&
1334 		((tcp_packets_in_flight(tp) < tp->snd_cwnd) ||
1335 		 (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN)) &&
1336 		!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una + tp->snd_wnd));
1337 }
1338 
tcp_check_probe_timer(struct sock * sk,struct tcp_opt * tp)1339 static __inline__ void tcp_check_probe_timer(struct sock *sk, struct tcp_opt *tp)
1340 {
1341 	if (!tp->packets_out && !tp->pending)
1342 		tcp_reset_xmit_timer(sk, TCP_TIME_PROBE0, tp->rto);
1343 }
1344 
tcp_skb_is_last(struct sock * sk,struct sk_buff * skb)1345 static __inline__ int tcp_skb_is_last(struct sock *sk, struct sk_buff *skb)
1346 {
1347 	return (skb->next == (struct sk_buff*)&sk->write_queue);
1348 }
1349 
1350 /* Push out any pending frames which were held back due to
1351  * TCP_CORK or attempt at coalescing tiny packets.
1352  * The socket must be locked by the caller.
1353  */
__tcp_push_pending_frames(struct sock * sk,struct tcp_opt * tp,unsigned cur_mss,int nonagle)1354 static __inline__ void __tcp_push_pending_frames(struct sock *sk,
1355 						 struct tcp_opt *tp,
1356 						 unsigned cur_mss,
1357 						 int nonagle)
1358 {
1359 	struct sk_buff *skb = tp->send_head;
1360 
1361 	if (skb) {
1362 		if (!tcp_skb_is_last(sk, skb))
1363 			nonagle = 1;
1364 		if (!tcp_snd_test(tp, skb, cur_mss, nonagle) ||
1365 		    tcp_write_xmit(sk, nonagle))
1366 			tcp_check_probe_timer(sk, tp);
1367 	}
1368 	tcp_cwnd_validate(sk, tp);
1369 }
1370 
tcp_push_pending_frames(struct sock * sk,struct tcp_opt * tp)1371 static __inline__ void tcp_push_pending_frames(struct sock *sk,
1372 					       struct tcp_opt *tp)
1373 {
1374 	__tcp_push_pending_frames(sk, tp, tcp_current_mss(sk), tp->nonagle);
1375 }
1376 
tcp_may_send_now(struct sock * sk,struct tcp_opt * tp)1377 static __inline__ int tcp_may_send_now(struct sock *sk, struct tcp_opt *tp)
1378 {
1379 	struct sk_buff *skb = tp->send_head;
1380 
1381 	return (skb &&
1382 		tcp_snd_test(tp, skb, tcp_current_mss(sk),
1383 			     tcp_skb_is_last(sk, skb) ? 1 : tp->nonagle));
1384 }
1385 
tcp_init_wl(struct tcp_opt * tp,u32 ack,u32 seq)1386 static __inline__ void tcp_init_wl(struct tcp_opt *tp, u32 ack, u32 seq)
1387 {
1388 	tp->snd_wl1 = seq;
1389 }
1390 
tcp_update_wl(struct tcp_opt * tp,u32 ack,u32 seq)1391 static __inline__ void tcp_update_wl(struct tcp_opt *tp, u32 ack, u32 seq)
1392 {
1393 	tp->snd_wl1 = seq;
1394 }
1395 
1396 extern void			tcp_destroy_sock(struct sock *sk);
1397 
1398 
1399 /*
1400  * Calculate(/check) TCP checksum
1401  */
tcp_v4_check(struct tcphdr * th,int len,unsigned long saddr,unsigned long daddr,unsigned long base)1402 static __inline__ u16 tcp_v4_check(struct tcphdr *th, int len,
1403 				   unsigned long saddr, unsigned long daddr,
1404 				   unsigned long base)
1405 {
1406 	return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base);
1407 }
1408 
__tcp_checksum_complete(struct sk_buff * skb)1409 static __inline__ int __tcp_checksum_complete(struct sk_buff *skb)
1410 {
1411 	return (unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum));
1412 }
1413 
tcp_checksum_complete(struct sk_buff * skb)1414 static __inline__ int tcp_checksum_complete(struct sk_buff *skb)
1415 {
1416 	return skb->ip_summed != CHECKSUM_UNNECESSARY &&
1417 		__tcp_checksum_complete(skb);
1418 }
1419 
1420 /* Prequeue for VJ style copy to user, combined with checksumming. */
1421 
tcp_prequeue_init(struct tcp_opt * tp)1422 static __inline__ void tcp_prequeue_init(struct tcp_opt *tp)
1423 {
1424 	tp->ucopy.task = NULL;
1425 	tp->ucopy.len = 0;
1426 	tp->ucopy.memory = 0;
1427 	skb_queue_head_init(&tp->ucopy.prequeue);
1428 }
1429 
1430 /* Packet is added to VJ-style prequeue for processing in process
1431  * context, if a reader task is waiting. Apparently, this exciting
1432  * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
1433  * failed somewhere. Latency? Burstiness? Well, at least now we will
1434  * see, why it failed. 8)8)				  --ANK
1435  *
1436  * NOTE: is this not too big to inline?
1437  */
tcp_prequeue(struct sock * sk,struct sk_buff * skb)1438 static __inline__ int tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1439 {
1440 	struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
1441 
1442 	if (!sysctl_tcp_low_latency && tp->ucopy.task) {
1443 		__skb_queue_tail(&tp->ucopy.prequeue, skb);
1444 		tp->ucopy.memory += skb->truesize;
1445 		if (tp->ucopy.memory > sk->rcvbuf) {
1446 			struct sk_buff *skb1;
1447 
1448 			if (sk->lock.users)
1449 				out_of_line_bug();
1450 
1451 			while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
1452 				sk->backlog_rcv(sk, skb1);
1453 				NET_INC_STATS_BH(TCPPrequeueDropped);
1454 			}
1455 
1456 			tp->ucopy.memory = 0;
1457 		} else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
1458 			wake_up_interruptible(sk->sleep);
1459 			if (!tcp_ack_scheduled(tp))
1460 				tcp_reset_xmit_timer(sk, TCP_TIME_DACK, (3*TCP_RTO_MIN)/4);
1461 		}
1462 		return 1;
1463 	}
1464 	return 0;
1465 }
1466 
1467 
1468 #undef STATE_TRACE
1469 
1470 #ifdef STATE_TRACE
1471 static char *statename[]={
1472 	"Unused","Established","Syn Sent","Syn Recv",
1473 	"Fin Wait 1","Fin Wait 2","Time Wait", "Close",
1474 	"Close Wait","Last ACK","Listen","Closing"
1475 };
1476 #endif
1477 
tcp_set_state(struct sock * sk,int state)1478 static __inline__ void tcp_set_state(struct sock *sk, int state)
1479 {
1480 	int oldstate = sk->state;
1481 
1482 	switch (state) {
1483 	case TCP_ESTABLISHED:
1484 		if (oldstate != TCP_ESTABLISHED)
1485 			TCP_INC_STATS(TcpCurrEstab);
1486 		break;
1487 
1488 	case TCP_CLOSE:
1489 		if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED)
1490 			TCP_INC_STATS(TcpEstabResets);
1491 
1492 		sk->prot->unhash(sk);
1493 		if (sk->prev && !(sk->userlocks&SOCK_BINDPORT_LOCK))
1494 			tcp_put_port(sk);
1495 		/* fall through */
1496 	default:
1497 		if (oldstate==TCP_ESTABLISHED)
1498 			tcp_statistics[smp_processor_id()*2+!in_softirq()].TcpCurrEstab--;
1499 	}
1500 
1501 	/* Change state AFTER socket is unhashed to avoid closed
1502 	 * socket sitting in hash tables.
1503 	 */
1504 	sk->state = state;
1505 
1506 #ifdef STATE_TRACE
1507 	SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n",sk, statename[oldstate],statename[state]);
1508 #endif
1509 }
1510 
tcp_done(struct sock * sk)1511 static __inline__ void tcp_done(struct sock *sk)
1512 {
1513 	tcp_set_state(sk, TCP_CLOSE);
1514 	tcp_clear_xmit_timers(sk);
1515 
1516 	sk->shutdown = SHUTDOWN_MASK;
1517 
1518 	if (!sk->dead)
1519 		sk->state_change(sk);
1520 	else
1521 		tcp_destroy_sock(sk);
1522 }
1523 
tcp_sack_reset(struct tcp_opt * tp)1524 static __inline__ void tcp_sack_reset(struct tcp_opt *tp)
1525 {
1526 	tp->dsack = 0;
1527 	tp->eff_sacks = 0;
1528 	tp->num_sacks = 0;
1529 }
1530 
tcp_build_and_update_options(__u32 * ptr,struct tcp_opt * tp,__u32 tstamp)1531 static __inline__ void tcp_build_and_update_options(__u32 *ptr, struct tcp_opt *tp, __u32 tstamp)
1532 {
1533 	if (tp->tstamp_ok) {
1534 		*ptr++ = __constant_htonl((TCPOPT_NOP << 24) |
1535 					  (TCPOPT_NOP << 16) |
1536 					  (TCPOPT_TIMESTAMP << 8) |
1537 					  TCPOLEN_TIMESTAMP);
1538 		*ptr++ = htonl(tstamp);
1539 		*ptr++ = htonl(tp->ts_recent);
1540 	}
1541 	if (tp->eff_sacks) {
1542 		struct tcp_sack_block *sp = tp->dsack ? tp->duplicate_sack : tp->selective_acks;
1543 		int this_sack;
1544 
1545 		*ptr++ = __constant_htonl((TCPOPT_NOP << 24) |
1546 					  (TCPOPT_NOP << 16) |
1547 					  (TCPOPT_SACK << 8) |
1548 					  (TCPOLEN_SACK_BASE +
1549 					   (tp->eff_sacks * TCPOLEN_SACK_PERBLOCK)));
1550 		for(this_sack = 0; this_sack < tp->eff_sacks; this_sack++) {
1551 			*ptr++ = htonl(sp[this_sack].start_seq);
1552 			*ptr++ = htonl(sp[this_sack].end_seq);
1553 		}
1554 		if (tp->dsack) {
1555 			tp->dsack = 0;
1556 			tp->eff_sacks--;
1557 		}
1558 	}
1559 }
1560 
1561 /* Construct a tcp options header for a SYN or SYN_ACK packet.
1562  * If this is every changed make sure to change the definition of
1563  * MAX_SYN_SIZE to match the new maximum number of options that you
1564  * can generate.
1565  */
tcp_syn_build_options(__u32 * ptr,int mss,int ts,int sack,int offer_wscale,int wscale,__u32 tstamp,__u32 ts_recent)1566 static inline void tcp_syn_build_options(__u32 *ptr, int mss, int ts, int sack,
1567 					     int offer_wscale, int wscale, __u32 tstamp, __u32 ts_recent)
1568 {
1569 	/* We always get an MSS option.
1570 	 * The option bytes which will be seen in normal data
1571 	 * packets should timestamps be used, must be in the MSS
1572 	 * advertised.  But we subtract them from tp->mss_cache so
1573 	 * that calculations in tcp_sendmsg are simpler etc.
1574 	 * So account for this fact here if necessary.  If we
1575 	 * don't do this correctly, as a receiver we won't
1576 	 * recognize data packets as being full sized when we
1577 	 * should, and thus we won't abide by the delayed ACK
1578 	 * rules correctly.
1579 	 * SACKs don't matter, we never delay an ACK when we
1580 	 * have any of those going out.
1581 	 */
1582 	*ptr++ = htonl((TCPOPT_MSS << 24) | (TCPOLEN_MSS << 16) | mss);
1583 	if (ts) {
1584 		if(sack)
1585 			*ptr++ = __constant_htonl((TCPOPT_SACK_PERM << 24) | (TCPOLEN_SACK_PERM << 16) |
1586 						  (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
1587 		else
1588 			*ptr++ = __constant_htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1589 						  (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
1590 		*ptr++ = htonl(tstamp);		/* TSVAL */
1591 		*ptr++ = htonl(ts_recent);	/* TSECR */
1592 	} else if(sack)
1593 		*ptr++ = __constant_htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1594 					  (TCPOPT_SACK_PERM << 8) | TCPOLEN_SACK_PERM);
1595 	if (offer_wscale)
1596 		*ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_WINDOW << 16) | (TCPOLEN_WINDOW << 8) | (wscale));
1597 }
1598 
1599 /* Determine a window scaling and initial window to offer.
1600  * Based on the assumption that the given amount of space
1601  * will be offered. Store the results in the tp structure.
1602  * NOTE: for smooth operation initial space offering should
1603  * be a multiple of mss if possible. We assume here that mss >= 1.
1604  * This MUST be enforced by all callers.
1605  */
tcp_select_initial_window(int __space,__u32 mss,__u32 * rcv_wnd,__u32 * window_clamp,int wscale_ok,__u8 * rcv_wscale)1606 static inline void tcp_select_initial_window(int __space, __u32 mss,
1607 	__u32 *rcv_wnd,
1608 	__u32 *window_clamp,
1609 	int wscale_ok,
1610 	__u8 *rcv_wscale)
1611 {
1612 	unsigned int space = (__space < 0 ? 0 : __space);
1613 
1614 	/* If no clamp set the clamp to the max possible scaled window */
1615 	if (*window_clamp == 0)
1616 		(*window_clamp) = (65535 << 14);
1617 	space = min(*window_clamp, space);
1618 
1619 	/* Quantize space offering to a multiple of mss if possible. */
1620 	if (space > mss)
1621 		space = (space / mss) * mss;
1622 
1623 	/* NOTE: offering an initial window larger than 32767
1624 	 * will break some buggy TCP stacks. We try to be nice.
1625 	 * If we are not window scaling, then this truncates
1626 	 * our initial window offering to 32k. There should also
1627 	 * be a sysctl option to stop being nice.
1628 	 */
1629 	(*rcv_wnd) = min(space, MAX_TCP_WINDOW);
1630 	(*rcv_wscale) = 0;
1631 	if (wscale_ok) {
1632 		/* See RFC1323 for an explanation of the limit to 14 */
1633 		while (space > 65535 && (*rcv_wscale) < 14) {
1634 			space >>= 1;
1635 			(*rcv_wscale)++;
1636 		}
1637 		if (*rcv_wscale && sysctl_tcp_app_win && space>=mss &&
1638 		    space - max((space>>sysctl_tcp_app_win), mss>>*rcv_wscale) < 65536/2)
1639 			(*rcv_wscale)--;
1640 
1641 		*rcv_wscale = max((__u8)sysctl_tcp_default_win_scale,
1642 				  *rcv_wscale);
1643 	}
1644 
1645 	/* Set initial window to value enough for senders,
1646 	 * following RFC1414. Senders, not following this RFC,
1647 	 * will be satisfied with 2.
1648 	 */
1649 	if (mss > (1<<*rcv_wscale)) {
1650 		int init_cwnd = 4;
1651 		if (mss > 1460*3)
1652 			init_cwnd = 2;
1653 		else if (mss > 1460)
1654 			init_cwnd = 3;
1655 		if (*rcv_wnd > init_cwnd*mss)
1656 			*rcv_wnd = init_cwnd*mss;
1657 	}
1658 	/* Set the clamp no higher than max representable value */
1659 	(*window_clamp) = min(65535U << (*rcv_wscale), *window_clamp);
1660 }
1661 
tcp_win_from_space(int space)1662 static inline int tcp_win_from_space(int space)
1663 {
1664 	return sysctl_tcp_adv_win_scale<=0 ?
1665 		(space>>(-sysctl_tcp_adv_win_scale)) :
1666 		space - (space>>sysctl_tcp_adv_win_scale);
1667 }
1668 
1669 /* Note: caller must be prepared to deal with negative returns */
tcp_space(struct sock * sk)1670 static inline int tcp_space(struct sock *sk)
1671 {
1672 	return tcp_win_from_space(sk->rcvbuf - atomic_read(&sk->rmem_alloc));
1673 }
1674 
tcp_full_space(struct sock * sk)1675 static inline int tcp_full_space( struct sock *sk)
1676 {
1677 	return tcp_win_from_space(sk->rcvbuf);
1678 }
1679 
tcp_acceptq_removed(struct sock * sk)1680 static inline void tcp_acceptq_removed(struct sock *sk)
1681 {
1682 	sk->ack_backlog--;
1683 }
1684 
tcp_acceptq_added(struct sock * sk)1685 static inline void tcp_acceptq_added(struct sock *sk)
1686 {
1687 	sk->ack_backlog++;
1688 }
1689 
tcp_acceptq_is_full(struct sock * sk)1690 static inline int tcp_acceptq_is_full(struct sock *sk)
1691 {
1692 	return sk->ack_backlog > sk->max_ack_backlog;
1693 }
1694 
tcp_acceptq_queue(struct sock * sk,struct open_request * req,struct sock * child)1695 static inline void tcp_acceptq_queue(struct sock *sk, struct open_request *req,
1696 					 struct sock *child)
1697 {
1698 	struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
1699 
1700 	req->sk = child;
1701 	tcp_acceptq_added(sk);
1702 
1703 	if (!tp->accept_queue_tail) {
1704 		tp->accept_queue = req;
1705 	} else {
1706 		tp->accept_queue_tail->dl_next = req;
1707 	}
1708 	tp->accept_queue_tail = req;
1709 	req->dl_next = NULL;
1710 }
1711 
1712 struct tcp_listen_opt
1713 {
1714 	u8			max_qlen_log;	/* log_2 of maximal queued SYNs */
1715 	int			qlen;
1716 	int			qlen_young;
1717 	int			clock_hand;
1718 	u32			hash_rnd;
1719 	struct open_request	*syn_table[TCP_SYNQ_HSIZE];
1720 };
1721 
1722 static inline void
tcp_synq_removed(struct sock * sk,struct open_request * req)1723 tcp_synq_removed(struct sock *sk, struct open_request *req)
1724 {
1725 	struct tcp_listen_opt *lopt = sk->tp_pinfo.af_tcp.listen_opt;
1726 
1727 	if (--lopt->qlen == 0)
1728 		tcp_delete_keepalive_timer(sk);
1729 	if (req->retrans == 0)
1730 		lopt->qlen_young--;
1731 }
1732 
tcp_synq_added(struct sock * sk)1733 static inline void tcp_synq_added(struct sock *sk)
1734 {
1735 	struct tcp_listen_opt *lopt = sk->tp_pinfo.af_tcp.listen_opt;
1736 
1737 	if (lopt->qlen++ == 0)
1738 		tcp_reset_keepalive_timer(sk, TCP_TIMEOUT_INIT);
1739 	lopt->qlen_young++;
1740 }
1741 
tcp_synq_len(struct sock * sk)1742 static inline int tcp_synq_len(struct sock *sk)
1743 {
1744 	return sk->tp_pinfo.af_tcp.listen_opt->qlen;
1745 }
1746 
tcp_synq_young(struct sock * sk)1747 static inline int tcp_synq_young(struct sock *sk)
1748 {
1749 	return sk->tp_pinfo.af_tcp.listen_opt->qlen_young;
1750 }
1751 
tcp_synq_is_full(struct sock * sk)1752 static inline int tcp_synq_is_full(struct sock *sk)
1753 {
1754 	return tcp_synq_len(sk)>>sk->tp_pinfo.af_tcp.listen_opt->max_qlen_log;
1755 }
1756 
tcp_synq_unlink(struct tcp_opt * tp,struct open_request * req,struct open_request ** prev)1757 static inline void tcp_synq_unlink(struct tcp_opt *tp, struct open_request *req,
1758 				       struct open_request **prev)
1759 {
1760 	write_lock(&tp->syn_wait_lock);
1761 	*prev = req->dl_next;
1762 	write_unlock(&tp->syn_wait_lock);
1763 }
1764 
tcp_synq_drop(struct sock * sk,struct open_request * req,struct open_request ** prev)1765 static inline void tcp_synq_drop(struct sock *sk, struct open_request *req,
1766 				     struct open_request **prev)
1767 {
1768 	tcp_synq_unlink(&sk->tp_pinfo.af_tcp, req, prev);
1769 	tcp_synq_removed(sk, req);
1770 	tcp_openreq_free(req);
1771 }
1772 
tcp_openreq_init(struct open_request * req,struct tcp_opt * tp,struct sk_buff * skb)1773 static __inline__ void tcp_openreq_init(struct open_request *req,
1774 					struct tcp_opt *tp,
1775 					struct sk_buff *skb)
1776 {
1777 	req->rcv_wnd = 0;		/* So that tcp_send_synack() knows! */
1778 	req->rcv_isn = TCP_SKB_CB(skb)->seq;
1779 	req->mss = tp->mss_clamp;
1780 	req->ts_recent = tp->saw_tstamp ? tp->rcv_tsval : 0;
1781 	req->tstamp_ok = tp->tstamp_ok;
1782 	req->sack_ok = tp->sack_ok;
1783 	req->snd_wscale = tp->snd_wscale;
1784 	req->wscale_ok = tp->wscale_ok;
1785 	req->acked = 0;
1786 	req->ecn_ok = 0;
1787 	req->rmt_port = skb->h.th->source;
1788 }
1789 
1790 #define TCP_MEM_QUANTUM	((int)PAGE_SIZE)
1791 
tcp_free_skb(struct sock * sk,struct sk_buff * skb)1792 static inline void tcp_free_skb(struct sock *sk, struct sk_buff *skb)
1793 {
1794 	sk->tp_pinfo.af_tcp.queue_shrunk = 1;
1795 	sk->wmem_queued -= skb->truesize;
1796 	sk->forward_alloc += skb->truesize;
1797 	__kfree_skb(skb);
1798 }
1799 
tcp_charge_skb(struct sock * sk,struct sk_buff * skb)1800 static inline void tcp_charge_skb(struct sock *sk, struct sk_buff *skb)
1801 {
1802 	sk->wmem_queued += skb->truesize;
1803 	sk->forward_alloc -= skb->truesize;
1804 }
1805 
1806 extern void __tcp_mem_reclaim(struct sock *sk);
1807 extern int tcp_mem_schedule(struct sock *sk, int size, int kind);
1808 
tcp_mem_reclaim(struct sock * sk)1809 static inline void tcp_mem_reclaim(struct sock *sk)
1810 {
1811 	if (sk->forward_alloc >= TCP_MEM_QUANTUM)
1812 		__tcp_mem_reclaim(sk);
1813 }
1814 
tcp_enter_memory_pressure(void)1815 static inline void tcp_enter_memory_pressure(void)
1816 {
1817 	if (!tcp_memory_pressure) {
1818 		NET_INC_STATS(TCPMemoryPressures);
1819 		tcp_memory_pressure = 1;
1820 	}
1821 }
1822 
tcp_moderate_sndbuf(struct sock * sk)1823 static inline void tcp_moderate_sndbuf(struct sock *sk)
1824 {
1825 	if (!(sk->userlocks&SOCK_SNDBUF_LOCK)) {
1826 		sk->sndbuf = min(sk->sndbuf, sk->wmem_queued/2);
1827 		sk->sndbuf = max(sk->sndbuf, SOCK_MIN_SNDBUF);
1828 	}
1829 }
1830 
tcp_alloc_pskb(struct sock * sk,int size,int mem,int gfp)1831 static inline struct sk_buff *tcp_alloc_pskb(struct sock *sk, int size, int mem, int gfp)
1832 {
1833 	struct sk_buff *skb = alloc_skb(size+MAX_TCP_HEADER, gfp);
1834 
1835 	if (skb) {
1836 		skb->truesize += mem;
1837 		if (sk->forward_alloc >= (int)skb->truesize ||
1838 		    tcp_mem_schedule(sk, skb->truesize, 0)) {
1839 			skb_reserve(skb, MAX_TCP_HEADER);
1840 			return skb;
1841 		}
1842 		__kfree_skb(skb);
1843 	} else {
1844 		tcp_enter_memory_pressure();
1845 		tcp_moderate_sndbuf(sk);
1846 	}
1847 	return NULL;
1848 }
1849 
tcp_alloc_skb(struct sock * sk,int size,int gfp)1850 static inline struct sk_buff *tcp_alloc_skb(struct sock *sk, int size, int gfp)
1851 {
1852 	return tcp_alloc_pskb(sk, size, 0, gfp);
1853 }
1854 
tcp_alloc_page(struct sock * sk)1855 static inline struct page * tcp_alloc_page(struct sock *sk)
1856 {
1857 	if (sk->forward_alloc >= (int)PAGE_SIZE ||
1858 	    tcp_mem_schedule(sk, PAGE_SIZE, 0)) {
1859 		struct page *page = alloc_pages(sk->allocation, 0);
1860 		if (page)
1861 			return page;
1862 	}
1863 	tcp_enter_memory_pressure();
1864 	tcp_moderate_sndbuf(sk);
1865 	return NULL;
1866 }
1867 
tcp_writequeue_purge(struct sock * sk)1868 static inline void tcp_writequeue_purge(struct sock *sk)
1869 {
1870 	struct sk_buff *skb;
1871 
1872 	while ((skb = __skb_dequeue(&sk->write_queue)) != NULL)
1873 		tcp_free_skb(sk, skb);
1874 	tcp_mem_reclaim(sk);
1875 }
1876 
1877 extern void tcp_rfree(struct sk_buff *skb);
1878 
tcp_set_owner_r(struct sk_buff * skb,struct sock * sk)1879 static inline void tcp_set_owner_r(struct sk_buff *skb, struct sock *sk)
1880 {
1881 	skb->sk = sk;
1882 	skb->destructor = tcp_rfree;
1883 	atomic_add(skb->truesize, &sk->rmem_alloc);
1884 	sk->forward_alloc -= skb->truesize;
1885 }
1886 
1887 extern void tcp_listen_wlock(void);
1888 
1889 /* - We may sleep inside this lock.
1890  * - If sleeping is not required (or called from BH),
1891  *   use plain read_(un)lock(&tcp_lhash_lock).
1892  */
1893 
tcp_listen_lock(void)1894 static inline void tcp_listen_lock(void)
1895 {
1896 	/* read_lock synchronizes to candidates to writers */
1897 	read_lock(&tcp_lhash_lock);
1898 	atomic_inc(&tcp_lhash_users);
1899 	read_unlock(&tcp_lhash_lock);
1900 }
1901 
tcp_listen_unlock(void)1902 static inline void tcp_listen_unlock(void)
1903 {
1904 	if (atomic_dec_and_test(&tcp_lhash_users))
1905 		wake_up(&tcp_lhash_wait);
1906 }
1907 
keepalive_intvl_when(struct tcp_opt * tp)1908 static inline int keepalive_intvl_when(struct tcp_opt *tp)
1909 {
1910 	return tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl;
1911 }
1912 
keepalive_time_when(struct tcp_opt * tp)1913 static inline int keepalive_time_when(struct tcp_opt *tp)
1914 {
1915 	return tp->keepalive_time ? : sysctl_tcp_keepalive_time;
1916 }
1917 
tcp_fin_time(struct tcp_opt * tp)1918 static inline int tcp_fin_time(struct tcp_opt *tp)
1919 {
1920 	int fin_timeout = tp->linger2 ? : sysctl_tcp_fin_timeout;
1921 
1922 	if (fin_timeout < (tp->rto<<2) - (tp->rto>>1))
1923 		fin_timeout = (tp->rto<<2) - (tp->rto>>1);
1924 
1925 	return fin_timeout;
1926 }
1927 
tcp_paws_check(struct tcp_opt * tp,int rst)1928 static inline int tcp_paws_check(struct tcp_opt *tp, int rst)
1929 {
1930 	if ((s32)(tp->rcv_tsval - tp->ts_recent) >= 0)
1931 		return 0;
1932 	if (xtime.tv_sec >= tp->ts_recent_stamp + TCP_PAWS_24DAYS)
1933 		return 0;
1934 
1935 	/* RST segments are not recommended to carry timestamp,
1936 	   and, if they do, it is recommended to ignore PAWS because
1937 	   "their cleanup function should take precedence over timestamps."
1938 	   Certainly, it is mistake. It is necessary to understand the reasons
1939 	   of this constraint to relax it: if peer reboots, clock may go
1940 	   out-of-sync and half-open connections will not be reset.
1941 	   Actually, the problem would be not existing if all
1942 	   the implementations followed draft about maintaining clock
1943 	   via reboots. Linux-2.2 DOES NOT!
1944 
1945 	   However, we can relax time bounds for RST segments to MSL.
1946 	 */
1947 	if (rst && xtime.tv_sec >= tp->ts_recent_stamp + TCP_PAWS_MSL)
1948 		return 0;
1949 	return 1;
1950 }
1951 
1952 #define TCP_CHECK_TIMER(sk) do { } while (0)
1953 
tcp_use_frto(const struct sock * sk)1954 static inline int tcp_use_frto(const struct sock *sk)
1955 {
1956 	const struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
1957 
1958 	/* F-RTO must be activated in sysctl and there must be some
1959 	 * unsent new data, and the advertised window should allow
1960 	 * sending it.
1961 	 */
1962 	return (sysctl_tcp_frto && tp->send_head &&
1963 		!after(TCP_SKB_CB(tp->send_head)->end_seq,
1964 		       tp->snd_una + tp->snd_wnd));
1965 }
1966 
tcp_mib_init(void)1967 static inline void tcp_mib_init(void)
1968 {
1969 	/* See RFC 2012 */
1970 	TCP_ADD_STATS_USER(TcpRtoAlgorithm, 1);
1971 	TCP_ADD_STATS_USER(TcpRtoMin, TCP_RTO_MIN*1000/HZ);
1972 	TCP_ADD_STATS_USER(TcpRtoMax, TCP_RTO_MAX*1000/HZ);
1973 	TCP_ADD_STATS_USER(TcpMaxConn, -1);
1974 }
1975 
1976 
1977 /* TCP Westwood functions and constants */
1978 
1979 #define TCP_WESTWOOD_INIT_RTT               20*HZ           /* maybe too conservative?! */
1980 #define TCP_WESTWOOD_RTT_MIN                HZ/20           /* 50ms */
1981 
tcp_westwood_update_rtt(struct tcp_opt * tp,__u32 rtt_seq)1982 static inline void tcp_westwood_update_rtt(struct tcp_opt *tp, __u32 rtt_seq)
1983 {
1984 	if (tcp_is_westwood(tp))
1985 		tp->westwood.rtt = rtt_seq;
1986 }
1987 
1988 void __tcp_westwood_fast_bw(struct sock *, struct sk_buff *);
1989 void __tcp_westwood_slow_bw(struct sock *, struct sk_buff *);
1990 
1991 /*
1992  * This function initializes fields used in TCP Westwood+. We can't
1993  * get no information about RTTmin at this time so we simply set it to
1994  * TCP_WESTWOOD_INIT_RTT. This value was chosen to be too conservative
1995  * since in this way we're sure it will be updated in a consistent
1996  * way as soon as possible. It will reasonably happen within the first
1997  * RTT period of the connection lifetime.
1998  */
1999 
__tcp_init_westwood(struct sock * sk)2000 static inline void __tcp_init_westwood(struct sock *sk)
2001 {
2002 	struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
2003 
2004 	tp->westwood.bw_ns_est = 0;
2005 	tp->westwood.bw_est = 0;
2006 	tp->westwood.accounted = 0;
2007 	tp->westwood.cumul_ack = 0;
2008 	tp->westwood.rtt_win_sx = tcp_time_stamp;
2009 	tp->westwood.rtt = TCP_WESTWOOD_INIT_RTT;
2010 	tp->westwood.rtt_min = TCP_WESTWOOD_INIT_RTT;
2011 	tp->westwood.snd_una = tp->snd_una;
2012 }
2013 
tcp_init_westwood(struct sock * sk)2014 static inline void tcp_init_westwood(struct sock *sk)
2015 {
2016 	__tcp_init_westwood(sk);
2017 }
2018 
tcp_westwood_fast_bw(struct sock * sk,struct sk_buff * skb)2019 static inline void tcp_westwood_fast_bw(struct sock *sk, struct sk_buff *skb)
2020 {
2021 	if (tcp_is_westwood(&(sk->tp_pinfo.af_tcp)))
2022 		__tcp_westwood_fast_bw(sk, skb);
2023 }
2024 
tcp_westwood_slow_bw(struct sock * sk,struct sk_buff * skb)2025 static inline void tcp_westwood_slow_bw(struct sock *sk, struct sk_buff *skb)
2026 {
2027 	if (tcp_is_westwood(&(sk->tp_pinfo.af_tcp)))
2028 		__tcp_westwood_slow_bw(sk, skb);
2029 }
2030 
__tcp_westwood_bw_rttmin(struct tcp_opt * tp)2031 static inline __u32 __tcp_westwood_bw_rttmin(struct tcp_opt *tp)
2032 {
2033 	return (__u32) ((tp->westwood.bw_est) * (tp->westwood.rtt_min) /
2034 			(__u32) (tp->mss_cache));
2035 }
2036 
tcp_westwood_bw_rttmin(struct tcp_opt * tp)2037 static inline __u32 tcp_westwood_bw_rttmin(struct tcp_opt *tp)
2038 {
2039 	__u32 ret = 0;
2040 
2041 	if (tcp_is_westwood(tp))
2042 		ret = (__u32) (max(__tcp_westwood_bw_rttmin(tp), 2U));
2043 
2044 	return ret;
2045 }
2046 
tcp_westwood_ssthresh(struct tcp_opt * tp)2047 static inline int tcp_westwood_ssthresh(struct tcp_opt *tp)
2048 {
2049 	int ret = 0;
2050 	__u32 ssthresh;
2051 
2052 	if (tcp_is_westwood(tp)) {
2053 		if (!(ssthresh = tcp_westwood_bw_rttmin(tp)))
2054 			return ret;
2055 
2056 		tp->snd_ssthresh = ssthresh;
2057 		ret = 1;
2058 	}
2059 
2060 	return ret;
2061 }
2062 
tcp_westwood_cwnd(struct tcp_opt * tp)2063 static inline int tcp_westwood_cwnd(struct tcp_opt *tp)
2064 {
2065 	int ret = 0;
2066 	__u32 cwnd;
2067 
2068 	if (tcp_is_westwood(tp)) {
2069 		if (!(cwnd = tcp_westwood_bw_rttmin(tp)))
2070 			return ret;
2071 
2072 		tp->snd_cwnd = cwnd;
2073 		ret = 1;
2074 	}
2075 
2076 	return ret;
2077 }
2078 
tcp_westwood_complete_cwr(struct tcp_opt * tp)2079 static inline int tcp_westwood_complete_cwr(struct tcp_opt *tp)
2080 {
2081 	int ret = 0;
2082 
2083 	if (tcp_is_westwood(tp)) {
2084 		if (tcp_westwood_cwnd(tp)) {
2085 			tp->snd_ssthresh = tp->snd_cwnd;
2086 			ret = 1;
2087 		}
2088 	}
2089 
2090 	return ret;
2091 }
2092 
2093 #endif	/* _TCP_H */
2094