1 /*
2  *  net/dccp/timer.c
3  *
4  *  An implementation of the DCCP protocol
5  *  Arnaldo Carvalho de Melo <acme@conectiva.com.br>
6  *
7  *	This program is free software; you can redistribute it and/or
8  *	modify it under the terms of the GNU General Public License
9  *	as published by the Free Software Foundation; either version
10  *	2 of the License, or (at your option) any later version.
11  */
12 
13 #include <linux/dccp.h>
14 #include <linux/skbuff.h>
15 
16 #include "dccp.h"
17 
18 /* sysctl variables governing numbers of retransmission attempts */
19 int  sysctl_dccp_request_retries	__read_mostly = TCP_SYN_RETRIES;
20 int  sysctl_dccp_retries1		__read_mostly = TCP_RETR1;
21 int  sysctl_dccp_retries2		__read_mostly = TCP_RETR2;
22 
dccp_write_err(struct sock * sk)23 static void dccp_write_err(struct sock *sk)
24 {
25 	sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT;
26 	sk->sk_error_report(sk);
27 
28 	dccp_send_reset(sk, DCCP_RESET_CODE_ABORTED);
29 	dccp_done(sk);
30 	DCCP_INC_STATS_BH(DCCP_MIB_ABORTONTIMEOUT);
31 }
32 
33 /* A write timeout has occurred. Process the after effects. */
dccp_write_timeout(struct sock * sk)34 static int dccp_write_timeout(struct sock *sk)
35 {
36 	const struct inet_connection_sock *icsk = inet_csk(sk);
37 	int retry_until;
38 
39 	if (sk->sk_state == DCCP_REQUESTING || sk->sk_state == DCCP_PARTOPEN) {
40 		if (icsk->icsk_retransmits != 0)
41 			dst_negative_advice(sk);
42 		retry_until = icsk->icsk_syn_retries ?
43 			    : sysctl_dccp_request_retries;
44 	} else {
45 		if (icsk->icsk_retransmits >= sysctl_dccp_retries1) {
46 			/* NOTE. draft-ietf-tcpimpl-pmtud-01.txt requires pmtu
47 			   black hole detection. :-(
48 
49 			   It is place to make it. It is not made. I do not want
50 			   to make it. It is disguisting. It does not work in any
51 			   case. Let me to cite the same draft, which requires for
52 			   us to implement this:
53 
54    "The one security concern raised by this memo is that ICMP black holes
55    are often caused by over-zealous security administrators who block
56    all ICMP messages.  It is vitally important that those who design and
57    deploy security systems understand the impact of strict filtering on
58    upper-layer protocols.  The safest web site in the world is worthless
59    if most TCP implementations cannot transfer data from it.  It would
60    be far nicer to have all of the black holes fixed rather than fixing
61    all of the TCP implementations."
62 
63 			   Golden words :-).
64 		   */
65 
66 			dst_negative_advice(sk);
67 		}
68 
69 		retry_until = sysctl_dccp_retries2;
70 		/*
71 		 * FIXME: see tcp_write_timout and tcp_out_of_resources
72 		 */
73 	}
74 
75 	if (icsk->icsk_retransmits >= retry_until) {
76 		/* Has it gone just too far? */
77 		dccp_write_err(sk);
78 		return 1;
79 	}
80 	return 0;
81 }
82 
83 /*
84  *	The DCCP retransmit timer.
85  */
dccp_retransmit_timer(struct sock * sk)86 static void dccp_retransmit_timer(struct sock *sk)
87 {
88 	struct inet_connection_sock *icsk = inet_csk(sk);
89 
90 	/*
91 	 * More than than 4MSL (8 minutes) has passed, a RESET(aborted) was
92 	 * sent, no need to retransmit, this sock is dead.
93 	 */
94 	if (dccp_write_timeout(sk))
95 		return;
96 
97 	/*
98 	 * We want to know the number of packets retransmitted, not the
99 	 * total number of retransmissions of clones of original packets.
100 	 */
101 	if (icsk->icsk_retransmits == 0)
102 		DCCP_INC_STATS_BH(DCCP_MIB_TIMEOUTS);
103 
104 	if (dccp_retransmit_skb(sk) != 0) {
105 		/*
106 		 * Retransmission failed because of local congestion,
107 		 * do not backoff.
108 		 */
109 		if (--icsk->icsk_retransmits == 0)
110 			icsk->icsk_retransmits = 1;
111 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
112 					  min(icsk->icsk_rto,
113 					      TCP_RESOURCE_PROBE_INTERVAL),
114 					  DCCP_RTO_MAX);
115 		return;
116 	}
117 
118 	icsk->icsk_backoff++;
119 
120 	icsk->icsk_rto = min(icsk->icsk_rto << 1, DCCP_RTO_MAX);
121 	inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto,
122 				  DCCP_RTO_MAX);
123 	if (icsk->icsk_retransmits > sysctl_dccp_retries1)
124 		__sk_dst_reset(sk);
125 }
126 
dccp_write_timer(unsigned long data)127 static void dccp_write_timer(unsigned long data)
128 {
129 	struct sock *sk = (struct sock *)data;
130 	struct inet_connection_sock *icsk = inet_csk(sk);
131 	int event = 0;
132 
133 	bh_lock_sock(sk);
134 	if (sock_owned_by_user(sk)) {
135 		/* Try again later */
136 		sk_reset_timer(sk, &icsk->icsk_retransmit_timer,
137 			       jiffies + (HZ / 20));
138 		goto out;
139 	}
140 
141 	if (sk->sk_state == DCCP_CLOSED || !icsk->icsk_pending)
142 		goto out;
143 
144 	if (time_after(icsk->icsk_timeout, jiffies)) {
145 		sk_reset_timer(sk, &icsk->icsk_retransmit_timer,
146 			       icsk->icsk_timeout);
147 		goto out;
148 	}
149 
150 	event = icsk->icsk_pending;
151 	icsk->icsk_pending = 0;
152 
153 	switch (event) {
154 	case ICSK_TIME_RETRANS:
155 		dccp_retransmit_timer(sk);
156 		break;
157 	}
158 out:
159 	bh_unlock_sock(sk);
160 	sock_put(sk);
161 }
162 
163 /*
164  *	Timer for listening sockets
165  */
dccp_response_timer(struct sock * sk)166 static void dccp_response_timer(struct sock *sk)
167 {
168 	inet_csk_reqsk_queue_prune(sk, TCP_SYNQ_INTERVAL, DCCP_TIMEOUT_INIT,
169 				   DCCP_RTO_MAX);
170 }
171 
dccp_keepalive_timer(unsigned long data)172 static void dccp_keepalive_timer(unsigned long data)
173 {
174 	struct sock *sk = (struct sock *)data;
175 
176 	/* Only process if socket is not in use. */
177 	bh_lock_sock(sk);
178 	if (sock_owned_by_user(sk)) {
179 		/* Try again later. */
180 		inet_csk_reset_keepalive_timer(sk, HZ / 20);
181 		goto out;
182 	}
183 
184 	if (sk->sk_state == DCCP_LISTEN) {
185 		dccp_response_timer(sk);
186 		goto out;
187 	}
188 out:
189 	bh_unlock_sock(sk);
190 	sock_put(sk);
191 }
192 
193 /* This is the same as tcp_delack_timer, sans prequeue & mem_reclaim stuff */
dccp_delack_timer(unsigned long data)194 static void dccp_delack_timer(unsigned long data)
195 {
196 	struct sock *sk = (struct sock *)data;
197 	struct inet_connection_sock *icsk = inet_csk(sk);
198 
199 	bh_lock_sock(sk);
200 	if (sock_owned_by_user(sk)) {
201 		/* Try again later. */
202 		icsk->icsk_ack.blocked = 1;
203 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
204 		sk_reset_timer(sk, &icsk->icsk_delack_timer,
205 			       jiffies + TCP_DELACK_MIN);
206 		goto out;
207 	}
208 
209 	if (sk->sk_state == DCCP_CLOSED ||
210 	    !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
211 		goto out;
212 	if (time_after(icsk->icsk_ack.timeout, jiffies)) {
213 		sk_reset_timer(sk, &icsk->icsk_delack_timer,
214 			       icsk->icsk_ack.timeout);
215 		goto out;
216 	}
217 
218 	icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER;
219 
220 	if (inet_csk_ack_scheduled(sk)) {
221 		if (!icsk->icsk_ack.pingpong) {
222 			/* Delayed ACK missed: inflate ATO. */
223 			icsk->icsk_ack.ato = min(icsk->icsk_ack.ato << 1,
224 						 icsk->icsk_rto);
225 		} else {
226 			/* Delayed ACK missed: leave pingpong mode and
227 			 * deflate ATO.
228 			 */
229 			icsk->icsk_ack.pingpong = 0;
230 			icsk->icsk_ack.ato = TCP_ATO_MIN;
231 		}
232 		dccp_send_ack(sk);
233 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKS);
234 	}
235 out:
236 	bh_unlock_sock(sk);
237 	sock_put(sk);
238 }
239 
240 /**
241  * dccp_write_xmitlet  -  Workhorse for CCID packet dequeueing interface
242  * See the comments above %ccid_dequeueing_decision for supported modes.
243  */
dccp_write_xmitlet(unsigned long data)244 static void dccp_write_xmitlet(unsigned long data)
245 {
246 	struct sock *sk = (struct sock *)data;
247 
248 	bh_lock_sock(sk);
249 	if (sock_owned_by_user(sk))
250 		sk_reset_timer(sk, &dccp_sk(sk)->dccps_xmit_timer, jiffies + 1);
251 	else
252 		dccp_write_xmit(sk);
253 	bh_unlock_sock(sk);
254 }
255 
dccp_write_xmit_timer(unsigned long data)256 static void dccp_write_xmit_timer(unsigned long data)
257 {
258 	dccp_write_xmitlet(data);
259 	sock_put((struct sock *)data);
260 }
261 
dccp_init_xmit_timers(struct sock * sk)262 void dccp_init_xmit_timers(struct sock *sk)
263 {
264 	struct dccp_sock *dp = dccp_sk(sk);
265 
266 	tasklet_init(&dp->dccps_xmitlet, dccp_write_xmitlet, (unsigned long)sk);
267 	setup_timer(&dp->dccps_xmit_timer, dccp_write_xmit_timer,
268 							     (unsigned long)sk);
269 	inet_csk_init_xmit_timers(sk, &dccp_write_timer, &dccp_delack_timer,
270 				  &dccp_keepalive_timer);
271 }
272 
273 static ktime_t dccp_timestamp_seed;
274 /**
275  * dccp_timestamp  -  10s of microseconds time source
276  * Returns the number of 10s of microseconds since loading DCCP. This is native
277  * DCCP time difference format (RFC 4340, sec. 13).
278  * Please note: This will wrap around about circa every 11.9 hours.
279  */
dccp_timestamp(void)280 u32 dccp_timestamp(void)
281 {
282 	s64 delta = ktime_us_delta(ktime_get_real(), dccp_timestamp_seed);
283 
284 	do_div(delta, 10);
285 	return delta;
286 }
287 EXPORT_SYMBOL_GPL(dccp_timestamp);
288 
dccp_timestamping_init(void)289 void __init dccp_timestamping_init(void)
290 {
291 	dccp_timestamp_seed = ktime_get_real();
292 }
293