1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * Implementation of the Transmission Control Protocol(TCP).
8 *
9 * Authors: Ross Biro
10 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
11 * Mark Evans, <evansmp@uhura.aston.ac.uk>
12 * Corey Minyard <wf-rch!minyard@relay.EU.net>
13 * Florian La Roche, <flla@stud.uni-sb.de>
14 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
15 * Linus Torvalds, <torvalds@cs.helsinki.fi>
16 * Alan Cox, <gw4pts@gw4pts.ampr.org>
17 * Matthew Dillon, <dillon@apollo.west.oic.com>
18 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
19 * Jorge Cwik, <jorge@laser.satlink.net>
20 */
21
22 #include <linux/module.h>
23 #include <linux/gfp.h>
24 #include <net/tcp.h>
25
tcp_clamp_rto_to_user_timeout(const struct sock * sk)26 static u32 tcp_clamp_rto_to_user_timeout(const struct sock *sk)
27 {
28 struct inet_connection_sock *icsk = inet_csk(sk);
29 u32 elapsed, start_ts;
30 s32 remaining;
31
32 start_ts = tcp_sk(sk)->retrans_stamp;
33 if (!icsk->icsk_user_timeout)
34 return icsk->icsk_rto;
35 elapsed = tcp_time_stamp(tcp_sk(sk)) - start_ts;
36 remaining = icsk->icsk_user_timeout - elapsed;
37 if (remaining <= 0)
38 return 1; /* user timeout has passed; fire ASAP */
39
40 return min_t(u32, icsk->icsk_rto, msecs_to_jiffies(remaining));
41 }
42
tcp_clamp_probe0_to_user_timeout(const struct sock * sk,u32 when)43 u32 tcp_clamp_probe0_to_user_timeout(const struct sock *sk, u32 when)
44 {
45 struct inet_connection_sock *icsk = inet_csk(sk);
46 u32 remaining;
47 s32 elapsed;
48
49 if (!icsk->icsk_user_timeout || !icsk->icsk_probes_tstamp)
50 return when;
51
52 elapsed = tcp_jiffies32 - icsk->icsk_probes_tstamp;
53 if (unlikely(elapsed < 0))
54 elapsed = 0;
55 remaining = msecs_to_jiffies(icsk->icsk_user_timeout) - elapsed;
56 remaining = max_t(u32, remaining, TCP_TIMEOUT_MIN);
57
58 return min_t(u32, remaining, when);
59 }
60
61 /**
62 * tcp_write_err() - close socket and save error info
63 * @sk: The socket the error has appeared on.
64 *
65 * Returns: Nothing (void)
66 */
67
tcp_write_err(struct sock * sk)68 static void tcp_write_err(struct sock *sk)
69 {
70 sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT;
71 sk_error_report(sk);
72
73 tcp_write_queue_purge(sk);
74 tcp_done(sk);
75 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT);
76 }
77
78 /**
79 * tcp_out_of_resources() - Close socket if out of resources
80 * @sk: pointer to current socket
81 * @do_reset: send a last packet with reset flag
82 *
83 * Do not allow orphaned sockets to eat all our resources.
84 * This is direct violation of TCP specs, but it is required
85 * to prevent DoS attacks. It is called when a retransmission timeout
86 * or zero probe timeout occurs on orphaned socket.
87 *
88 * Also close if our net namespace is exiting; in that case there is no
89 * hope of ever communicating again since all netns interfaces are already
90 * down (or about to be down), and we need to release our dst references,
91 * which have been moved to the netns loopback interface, so the namespace
92 * can finish exiting. This condition is only possible if we are a kernel
93 * socket, as those do not hold references to the namespace.
94 *
95 * Criteria is still not confirmed experimentally and may change.
96 * We kill the socket, if:
97 * 1. If number of orphaned sockets exceeds an administratively configured
98 * limit.
99 * 2. If we have strong memory pressure.
100 * 3. If our net namespace is exiting.
101 */
tcp_out_of_resources(struct sock * sk,bool do_reset)102 static int tcp_out_of_resources(struct sock *sk, bool do_reset)
103 {
104 struct tcp_sock *tp = tcp_sk(sk);
105 int shift = 0;
106
107 /* If peer does not open window for long time, or did not transmit
108 * anything for long time, penalize it. */
109 if ((s32)(tcp_jiffies32 - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset)
110 shift++;
111
112 /* If some dubious ICMP arrived, penalize even more. */
113 if (sk->sk_err_soft)
114 shift++;
115
116 if (tcp_check_oom(sk, shift)) {
117 /* Catch exceptional cases, when connection requires reset.
118 * 1. Last segment was sent recently. */
119 if ((s32)(tcp_jiffies32 - tp->lsndtime) <= TCP_TIMEWAIT_LEN ||
120 /* 2. Window is closed. */
121 (!tp->snd_wnd && !tp->packets_out))
122 do_reset = true;
123 if (do_reset)
124 tcp_send_active_reset(sk, GFP_ATOMIC);
125 tcp_done(sk);
126 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY);
127 return 1;
128 }
129
130 if (!check_net(sock_net(sk))) {
131 /* Not possible to send reset; just close */
132 tcp_done(sk);
133 return 1;
134 }
135
136 return 0;
137 }
138
139 /**
140 * tcp_orphan_retries() - Returns maximal number of retries on an orphaned socket
141 * @sk: Pointer to the current socket.
142 * @alive: bool, socket alive state
143 */
tcp_orphan_retries(struct sock * sk,bool alive)144 static int tcp_orphan_retries(struct sock *sk, bool alive)
145 {
146 int retries = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_orphan_retries); /* May be zero. */
147
148 /* We know from an ICMP that something is wrong. */
149 if (sk->sk_err_soft && !alive)
150 retries = 0;
151
152 /* However, if socket sent something recently, select some safe
153 * number of retries. 8 corresponds to >100 seconds with minimal
154 * RTO of 200msec. */
155 if (retries == 0 && alive)
156 retries = 8;
157 return retries;
158 }
159
tcp_mtu_probing(struct inet_connection_sock * icsk,struct sock * sk)160 static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk)
161 {
162 const struct net *net = sock_net(sk);
163 int mss;
164
165 /* Black hole detection */
166 if (!READ_ONCE(net->ipv4.sysctl_tcp_mtu_probing))
167 return;
168
169 if (!icsk->icsk_mtup.enabled) {
170 icsk->icsk_mtup.enabled = 1;
171 icsk->icsk_mtup.probe_timestamp = tcp_jiffies32;
172 } else {
173 mss = tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low) >> 1;
174 mss = min(READ_ONCE(net->ipv4.sysctl_tcp_base_mss), mss);
175 mss = max(mss, READ_ONCE(net->ipv4.sysctl_tcp_mtu_probe_floor));
176 mss = max(mss, READ_ONCE(net->ipv4.sysctl_tcp_min_snd_mss));
177 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss);
178 }
179 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
180 }
181
tcp_model_timeout(struct sock * sk,unsigned int boundary,unsigned int rto_base)182 static unsigned int tcp_model_timeout(struct sock *sk,
183 unsigned int boundary,
184 unsigned int rto_base)
185 {
186 unsigned int linear_backoff_thresh, timeout;
187
188 linear_backoff_thresh = ilog2(TCP_RTO_MAX / rto_base);
189 if (boundary <= linear_backoff_thresh)
190 timeout = ((2 << boundary) - 1) * rto_base;
191 else
192 timeout = ((2 << linear_backoff_thresh) - 1) * rto_base +
193 (boundary - linear_backoff_thresh) * TCP_RTO_MAX;
194 return jiffies_to_msecs(timeout);
195 }
196 /**
197 * retransmits_timed_out() - returns true if this connection has timed out
198 * @sk: The current socket
199 * @boundary: max number of retransmissions
200 * @timeout: A custom timeout value.
201 * If set to 0 the default timeout is calculated and used.
202 * Using TCP_RTO_MIN and the number of unsuccessful retransmits.
203 *
204 * The default "timeout" value this function can calculate and use
205 * is equivalent to the timeout of a TCP Connection
206 * after "boundary" unsuccessful, exponentially backed-off
207 * retransmissions with an initial RTO of TCP_RTO_MIN.
208 */
retransmits_timed_out(struct sock * sk,unsigned int boundary,unsigned int timeout)209 static bool retransmits_timed_out(struct sock *sk,
210 unsigned int boundary,
211 unsigned int timeout)
212 {
213 unsigned int start_ts;
214
215 if (!inet_csk(sk)->icsk_retransmits)
216 return false;
217
218 start_ts = tcp_sk(sk)->retrans_stamp;
219 if (likely(timeout == 0)) {
220 unsigned int rto_base = TCP_RTO_MIN;
221
222 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
223 rto_base = tcp_timeout_init(sk);
224 timeout = tcp_model_timeout(sk, boundary, rto_base);
225 }
226
227 return (s32)(tcp_time_stamp(tcp_sk(sk)) - start_ts - timeout) >= 0;
228 }
229
230 /* A write timeout has occurred. Process the after effects. */
tcp_write_timeout(struct sock * sk)231 static int tcp_write_timeout(struct sock *sk)
232 {
233 struct inet_connection_sock *icsk = inet_csk(sk);
234 struct tcp_sock *tp = tcp_sk(sk);
235 struct net *net = sock_net(sk);
236 bool expired = false, do_reset;
237 int retry_until;
238
239 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
240 if (icsk->icsk_retransmits)
241 __dst_negative_advice(sk);
242 retry_until = icsk->icsk_syn_retries ? :
243 READ_ONCE(net->ipv4.sysctl_tcp_syn_retries);
244 expired = icsk->icsk_retransmits >= retry_until;
245 } else {
246 if (retransmits_timed_out(sk, READ_ONCE(net->ipv4.sysctl_tcp_retries1), 0)) {
247 /* Black hole detection */
248 tcp_mtu_probing(icsk, sk);
249
250 __dst_negative_advice(sk);
251 }
252
253 retry_until = READ_ONCE(net->ipv4.sysctl_tcp_retries2);
254 if (sock_flag(sk, SOCK_DEAD)) {
255 const bool alive = icsk->icsk_rto < TCP_RTO_MAX;
256
257 retry_until = tcp_orphan_retries(sk, alive);
258 do_reset = alive ||
259 !retransmits_timed_out(sk, retry_until, 0);
260
261 if (tcp_out_of_resources(sk, do_reset))
262 return 1;
263 }
264 }
265 if (!expired)
266 expired = retransmits_timed_out(sk, retry_until,
267 icsk->icsk_user_timeout);
268 tcp_fastopen_active_detect_blackhole(sk, expired);
269
270 if (BPF_SOCK_OPS_TEST_FLAG(tp, BPF_SOCK_OPS_RTO_CB_FLAG))
271 tcp_call_bpf_3arg(sk, BPF_SOCK_OPS_RTO_CB,
272 icsk->icsk_retransmits,
273 icsk->icsk_rto, (int)expired);
274
275 if (expired) {
276 /* Has it gone just too far? */
277 tcp_write_err(sk);
278 return 1;
279 }
280
281 if (sk_rethink_txhash(sk)) {
282 tp->timeout_rehash++;
283 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEOUTREHASH);
284 }
285
286 return 0;
287 }
288
289 /* Called with BH disabled */
tcp_delack_timer_handler(struct sock * sk)290 void tcp_delack_timer_handler(struct sock *sk)
291 {
292 struct inet_connection_sock *icsk = inet_csk(sk);
293
294 if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
295 !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
296 return;
297
298 if (time_after(icsk->icsk_ack.timeout, jiffies)) {
299 sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout);
300 return;
301 }
302 icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER;
303
304 if (inet_csk_ack_scheduled(sk)) {
305 if (!inet_csk_in_pingpong_mode(sk)) {
306 /* Delayed ACK missed: inflate ATO. */
307 icsk->icsk_ack.ato = min(icsk->icsk_ack.ato << 1, icsk->icsk_rto);
308 } else {
309 /* Delayed ACK missed: leave pingpong mode and
310 * deflate ATO.
311 */
312 inet_csk_exit_pingpong_mode(sk);
313 icsk->icsk_ack.ato = TCP_ATO_MIN;
314 }
315 tcp_mstamp_refresh(tcp_sk(sk));
316 tcp_send_ack(sk);
317 __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKS);
318 }
319 }
320
321
322 /**
323 * tcp_delack_timer() - The TCP delayed ACK timeout handler
324 * @t: Pointer to the timer. (gets casted to struct sock *)
325 *
326 * This function gets (indirectly) called when the kernel timer for a TCP packet
327 * of this socket expires. Calls tcp_delack_timer_handler() to do the actual work.
328 *
329 * Returns: Nothing (void)
330 */
tcp_delack_timer(struct timer_list * t)331 static void tcp_delack_timer(struct timer_list *t)
332 {
333 struct inet_connection_sock *icsk =
334 from_timer(icsk, t, icsk_delack_timer);
335 struct sock *sk = &icsk->icsk_inet.sk;
336
337 bh_lock_sock(sk);
338 if (!sock_owned_by_user(sk)) {
339 tcp_delack_timer_handler(sk);
340 } else {
341 __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
342 /* deleguate our work to tcp_release_cb() */
343 if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, &sk->sk_tsq_flags))
344 sock_hold(sk);
345 }
346 bh_unlock_sock(sk);
347 sock_put(sk);
348 }
349
tcp_probe_timer(struct sock * sk)350 static void tcp_probe_timer(struct sock *sk)
351 {
352 struct inet_connection_sock *icsk = inet_csk(sk);
353 struct sk_buff *skb = tcp_send_head(sk);
354 struct tcp_sock *tp = tcp_sk(sk);
355 int max_probes;
356
357 if (tp->packets_out || !skb) {
358 icsk->icsk_probes_out = 0;
359 icsk->icsk_probes_tstamp = 0;
360 return;
361 }
362
363 /* RFC 1122 4.2.2.17 requires the sender to stay open indefinitely as
364 * long as the receiver continues to respond probes. We support this by
365 * default and reset icsk_probes_out with incoming ACKs. But if the
366 * socket is orphaned or the user specifies TCP_USER_TIMEOUT, we
367 * kill the socket when the retry count and the time exceeds the
368 * corresponding system limit. We also implement similar policy when
369 * we use RTO to probe window in tcp_retransmit_timer().
370 */
371 if (!icsk->icsk_probes_tstamp)
372 icsk->icsk_probes_tstamp = tcp_jiffies32;
373 else if (icsk->icsk_user_timeout &&
374 (s32)(tcp_jiffies32 - icsk->icsk_probes_tstamp) >=
375 msecs_to_jiffies(icsk->icsk_user_timeout))
376 goto abort;
377
378 max_probes = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_retries2);
379 if (sock_flag(sk, SOCK_DEAD)) {
380 const bool alive = inet_csk_rto_backoff(icsk, TCP_RTO_MAX) < TCP_RTO_MAX;
381
382 max_probes = tcp_orphan_retries(sk, alive);
383 if (!alive && icsk->icsk_backoff >= max_probes)
384 goto abort;
385 if (tcp_out_of_resources(sk, true))
386 return;
387 }
388
389 if (icsk->icsk_probes_out >= max_probes) {
390 abort: tcp_write_err(sk);
391 } else {
392 /* Only send another probe if we didn't close things up. */
393 tcp_send_probe0(sk);
394 }
395 }
396
397 /*
398 * Timer for Fast Open socket to retransmit SYNACK. Note that the
399 * sk here is the child socket, not the parent (listener) socket.
400 */
tcp_fastopen_synack_timer(struct sock * sk,struct request_sock * req)401 static void tcp_fastopen_synack_timer(struct sock *sk, struct request_sock *req)
402 {
403 struct inet_connection_sock *icsk = inet_csk(sk);
404 struct tcp_sock *tp = tcp_sk(sk);
405 int max_retries;
406
407 req->rsk_ops->syn_ack_timeout(req);
408
409 /* add one more retry for fastopen */
410 max_retries = icsk->icsk_syn_retries ? :
411 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_synack_retries) + 1;
412
413 if (req->num_timeout >= max_retries) {
414 tcp_write_err(sk);
415 return;
416 }
417 /* Lower cwnd after certain SYNACK timeout like tcp_init_transfer() */
418 if (icsk->icsk_retransmits == 1)
419 tcp_enter_loss(sk);
420 /* XXX (TFO) - Unlike regular SYN-ACK retransmit, we ignore error
421 * returned from rtx_syn_ack() to make it more persistent like
422 * regular retransmit because if the child socket has been accepted
423 * it's not good to give up too easily.
424 */
425 inet_rtx_syn_ack(sk, req);
426 req->num_timeout++;
427 icsk->icsk_retransmits++;
428 if (!tp->retrans_stamp)
429 tp->retrans_stamp = tcp_time_stamp(tp);
430 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
431 req->timeout << req->num_timeout, TCP_RTO_MAX);
432 }
433
434
435 /**
436 * tcp_retransmit_timer() - The TCP retransmit timeout handler
437 * @sk: Pointer to the current socket.
438 *
439 * This function gets called when the kernel timer for a TCP packet
440 * of this socket expires.
441 *
442 * It handles retransmission, timer adjustment and other necessary measures.
443 *
444 * Returns: Nothing (void)
445 */
tcp_retransmit_timer(struct sock * sk)446 void tcp_retransmit_timer(struct sock *sk)
447 {
448 struct tcp_sock *tp = tcp_sk(sk);
449 struct net *net = sock_net(sk);
450 struct inet_connection_sock *icsk = inet_csk(sk);
451 struct request_sock *req;
452 struct sk_buff *skb;
453
454 req = rcu_dereference_protected(tp->fastopen_rsk,
455 lockdep_sock_is_held(sk));
456 if (req) {
457 WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV &&
458 sk->sk_state != TCP_FIN_WAIT1);
459 tcp_fastopen_synack_timer(sk, req);
460 /* Before we receive ACK to our SYN-ACK don't retransmit
461 * anything else (e.g., data or FIN segments).
462 */
463 return;
464 }
465
466 if (!tp->packets_out)
467 return;
468
469 skb = tcp_rtx_queue_head(sk);
470 if (WARN_ON_ONCE(!skb))
471 return;
472
473 tp->tlp_high_seq = 0;
474
475 if (!tp->snd_wnd && !sock_flag(sk, SOCK_DEAD) &&
476 !((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))) {
477 /* Receiver dastardly shrinks window. Our retransmits
478 * become zero probes, but we should not timeout this
479 * connection. If the socket is an orphan, time it out,
480 * we cannot allow such beasts to hang infinitely.
481 */
482 struct inet_sock *inet = inet_sk(sk);
483 if (sk->sk_family == AF_INET) {
484 net_dbg_ratelimited("Peer %pI4:%u/%u unexpectedly shrunk window %u:%u (repaired)\n",
485 &inet->inet_daddr,
486 ntohs(inet->inet_dport),
487 inet->inet_num,
488 tp->snd_una, tp->snd_nxt);
489 }
490 #if IS_ENABLED(CONFIG_IPV6)
491 else if (sk->sk_family == AF_INET6) {
492 net_dbg_ratelimited("Peer %pI6:%u/%u unexpectedly shrunk window %u:%u (repaired)\n",
493 &sk->sk_v6_daddr,
494 ntohs(inet->inet_dport),
495 inet->inet_num,
496 tp->snd_una, tp->snd_nxt);
497 }
498 #endif
499 if (tcp_jiffies32 - tp->rcv_tstamp > TCP_RTO_MAX) {
500 tcp_write_err(sk);
501 goto out;
502 }
503 tcp_enter_loss(sk);
504 tcp_retransmit_skb(sk, skb, 1);
505 __sk_dst_reset(sk);
506 goto out_reset_timer;
507 }
508
509 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEOUTS);
510 if (tcp_write_timeout(sk))
511 goto out;
512
513 if (icsk->icsk_retransmits == 0) {
514 int mib_idx = 0;
515
516 if (icsk->icsk_ca_state == TCP_CA_Recovery) {
517 if (tcp_is_sack(tp))
518 mib_idx = LINUX_MIB_TCPSACKRECOVERYFAIL;
519 else
520 mib_idx = LINUX_MIB_TCPRENORECOVERYFAIL;
521 } else if (icsk->icsk_ca_state == TCP_CA_Loss) {
522 mib_idx = LINUX_MIB_TCPLOSSFAILURES;
523 } else if ((icsk->icsk_ca_state == TCP_CA_Disorder) ||
524 tp->sacked_out) {
525 if (tcp_is_sack(tp))
526 mib_idx = LINUX_MIB_TCPSACKFAILURES;
527 else
528 mib_idx = LINUX_MIB_TCPRENOFAILURES;
529 }
530 if (mib_idx)
531 __NET_INC_STATS(sock_net(sk), mib_idx);
532 }
533
534 tcp_enter_loss(sk);
535
536 icsk->icsk_retransmits++;
537 if (tcp_retransmit_skb(sk, tcp_rtx_queue_head(sk), 1) > 0) {
538 /* Retransmission failed because of local congestion,
539 * Let senders fight for local resources conservatively.
540 */
541 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
542 TCP_RESOURCE_PROBE_INTERVAL,
543 TCP_RTO_MAX);
544 goto out;
545 }
546
547 /* Increase the timeout each time we retransmit. Note that
548 * we do not increase the rtt estimate. rto is initialized
549 * from rtt, but increases here. Jacobson (SIGCOMM 88) suggests
550 * that doubling rto each time is the least we can get away with.
551 * In KA9Q, Karn uses this for the first few times, and then
552 * goes to quadratic. netBSD doubles, but only goes up to *64,
553 * and clamps at 1 to 64 sec afterwards. Note that 120 sec is
554 * defined in the protocol as the maximum possible RTT. I guess
555 * we'll have to use something other than TCP to talk to the
556 * University of Mars.
557 *
558 * PAWS allows us longer timeouts and large windows, so once
559 * implemented ftp to mars will work nicely. We will have to fix
560 * the 120 second clamps though!
561 */
562 icsk->icsk_backoff++;
563
564 out_reset_timer:
565 /* If stream is thin, use linear timeouts. Since 'icsk_backoff' is
566 * used to reset timer, set to 0. Recalculate 'icsk_rto' as this
567 * might be increased if the stream oscillates between thin and thick,
568 * thus the old value might already be too high compared to the value
569 * set by 'tcp_set_rto' in tcp_input.c which resets the rto without
570 * backoff. Limit to TCP_THIN_LINEAR_RETRIES before initiating
571 * exponential backoff behaviour to avoid continue hammering
572 * linear-timeout retransmissions into a black hole
573 */
574 if (sk->sk_state == TCP_ESTABLISHED &&
575 (tp->thin_lto || READ_ONCE(net->ipv4.sysctl_tcp_thin_linear_timeouts)) &&
576 tcp_stream_is_thin(tp) &&
577 icsk->icsk_retransmits <= TCP_THIN_LINEAR_RETRIES) {
578 icsk->icsk_backoff = 0;
579 icsk->icsk_rto = min(__tcp_set_rto(tp), TCP_RTO_MAX);
580 } else {
581 /* Use normal (exponential) backoff */
582 icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
583 }
584 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
585 tcp_clamp_rto_to_user_timeout(sk), TCP_RTO_MAX);
586 if (retransmits_timed_out(sk, READ_ONCE(net->ipv4.sysctl_tcp_retries1) + 1, 0))
587 __sk_dst_reset(sk);
588
589 out:;
590 }
591
592 /* Called with bottom-half processing disabled.
593 Called by tcp_write_timer() */
tcp_write_timer_handler(struct sock * sk)594 void tcp_write_timer_handler(struct sock *sk)
595 {
596 struct inet_connection_sock *icsk = inet_csk(sk);
597 int event;
598
599 if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
600 !icsk->icsk_pending)
601 return;
602
603 if (time_after(icsk->icsk_timeout, jiffies)) {
604 sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout);
605 return;
606 }
607
608 tcp_mstamp_refresh(tcp_sk(sk));
609 event = icsk->icsk_pending;
610
611 switch (event) {
612 case ICSK_TIME_REO_TIMEOUT:
613 tcp_rack_reo_timeout(sk);
614 break;
615 case ICSK_TIME_LOSS_PROBE:
616 tcp_send_loss_probe(sk);
617 break;
618 case ICSK_TIME_RETRANS:
619 icsk->icsk_pending = 0;
620 tcp_retransmit_timer(sk);
621 break;
622 case ICSK_TIME_PROBE0:
623 icsk->icsk_pending = 0;
624 tcp_probe_timer(sk);
625 break;
626 }
627 }
628
tcp_write_timer(struct timer_list * t)629 static void tcp_write_timer(struct timer_list *t)
630 {
631 struct inet_connection_sock *icsk =
632 from_timer(icsk, t, icsk_retransmit_timer);
633 struct sock *sk = &icsk->icsk_inet.sk;
634
635 bh_lock_sock(sk);
636 if (!sock_owned_by_user(sk)) {
637 tcp_write_timer_handler(sk);
638 } else {
639 /* delegate our work to tcp_release_cb() */
640 if (!test_and_set_bit(TCP_WRITE_TIMER_DEFERRED, &sk->sk_tsq_flags))
641 sock_hold(sk);
642 }
643 bh_unlock_sock(sk);
644 sock_put(sk);
645 }
646
tcp_syn_ack_timeout(const struct request_sock * req)647 void tcp_syn_ack_timeout(const struct request_sock *req)
648 {
649 struct net *net = read_pnet(&inet_rsk(req)->ireq_net);
650
651 __NET_INC_STATS(net, LINUX_MIB_TCPTIMEOUTS);
652 }
653 EXPORT_SYMBOL(tcp_syn_ack_timeout);
654
tcp_set_keepalive(struct sock * sk,int val)655 void tcp_set_keepalive(struct sock *sk, int val)
656 {
657 if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
658 return;
659
660 if (val && !sock_flag(sk, SOCK_KEEPOPEN))
661 inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tcp_sk(sk)));
662 else if (!val)
663 inet_csk_delete_keepalive_timer(sk);
664 }
665 EXPORT_SYMBOL_GPL(tcp_set_keepalive);
666
667
tcp_keepalive_timer(struct timer_list * t)668 static void tcp_keepalive_timer (struct timer_list *t)
669 {
670 struct sock *sk = from_timer(sk, t, sk_timer);
671 struct inet_connection_sock *icsk = inet_csk(sk);
672 struct tcp_sock *tp = tcp_sk(sk);
673 u32 elapsed;
674
675 /* Only process if socket is not in use. */
676 bh_lock_sock(sk);
677 if (sock_owned_by_user(sk)) {
678 /* Try again later. */
679 inet_csk_reset_keepalive_timer (sk, HZ/20);
680 goto out;
681 }
682
683 if (sk->sk_state == TCP_LISTEN) {
684 pr_err("Hmm... keepalive on a LISTEN ???\n");
685 goto out;
686 }
687
688 tcp_mstamp_refresh(tp);
689 if (sk->sk_state == TCP_FIN_WAIT2 && sock_flag(sk, SOCK_DEAD)) {
690 if (tp->linger2 >= 0) {
691 const int tmo = tcp_fin_time(sk) - TCP_TIMEWAIT_LEN;
692
693 if (tmo > 0) {
694 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
695 goto out;
696 }
697 }
698 tcp_send_active_reset(sk, GFP_ATOMIC);
699 goto death;
700 }
701
702 if (!sock_flag(sk, SOCK_KEEPOPEN) ||
703 ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)))
704 goto out;
705
706 elapsed = keepalive_time_when(tp);
707
708 /* It is alive without keepalive 8) */
709 if (tp->packets_out || !tcp_write_queue_empty(sk))
710 goto resched;
711
712 elapsed = keepalive_time_elapsed(tp);
713
714 if (elapsed >= keepalive_time_when(tp)) {
715 /* If the TCP_USER_TIMEOUT option is enabled, use that
716 * to determine when to timeout instead.
717 */
718 if ((icsk->icsk_user_timeout != 0 &&
719 elapsed >= msecs_to_jiffies(icsk->icsk_user_timeout) &&
720 icsk->icsk_probes_out > 0) ||
721 (icsk->icsk_user_timeout == 0 &&
722 icsk->icsk_probes_out >= keepalive_probes(tp))) {
723 tcp_send_active_reset(sk, GFP_ATOMIC);
724 tcp_write_err(sk);
725 goto out;
726 }
727 if (tcp_write_wakeup(sk, LINUX_MIB_TCPKEEPALIVE) <= 0) {
728 icsk->icsk_probes_out++;
729 elapsed = keepalive_intvl_when(tp);
730 } else {
731 /* If keepalive was lost due to local congestion,
732 * try harder.
733 */
734 elapsed = TCP_RESOURCE_PROBE_INTERVAL;
735 }
736 } else {
737 /* It is tp->rcv_tstamp + keepalive_time_when(tp) */
738 elapsed = keepalive_time_when(tp) - elapsed;
739 }
740
741 resched:
742 inet_csk_reset_keepalive_timer (sk, elapsed);
743 goto out;
744
745 death:
746 tcp_done(sk);
747
748 out:
749 bh_unlock_sock(sk);
750 sock_put(sk);
751 }
752
tcp_compressed_ack_kick(struct hrtimer * timer)753 static enum hrtimer_restart tcp_compressed_ack_kick(struct hrtimer *timer)
754 {
755 struct tcp_sock *tp = container_of(timer, struct tcp_sock, compressed_ack_timer);
756 struct sock *sk = (struct sock *)tp;
757
758 bh_lock_sock(sk);
759 if (!sock_owned_by_user(sk)) {
760 if (tp->compressed_ack) {
761 /* Since we have to send one ack finally,
762 * subtract one from tp->compressed_ack to keep
763 * LINUX_MIB_TCPACKCOMPRESSED accurate.
764 */
765 tp->compressed_ack--;
766 tcp_send_ack(sk);
767 }
768 } else {
769 if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED,
770 &sk->sk_tsq_flags))
771 sock_hold(sk);
772 }
773 bh_unlock_sock(sk);
774
775 sock_put(sk);
776
777 return HRTIMER_NORESTART;
778 }
779
tcp_init_xmit_timers(struct sock * sk)780 void tcp_init_xmit_timers(struct sock *sk)
781 {
782 inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer,
783 &tcp_keepalive_timer);
784 hrtimer_init(&tcp_sk(sk)->pacing_timer, CLOCK_MONOTONIC,
785 HRTIMER_MODE_ABS_PINNED_SOFT);
786 tcp_sk(sk)->pacing_timer.function = tcp_pace_kick;
787
788 hrtimer_init(&tcp_sk(sk)->compressed_ack_timer, CLOCK_MONOTONIC,
789 HRTIMER_MODE_REL_PINNED_SOFT);
790 tcp_sk(sk)->compressed_ack_timer.function = tcp_compressed_ack_kick;
791 }
792