1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Pluggable TCP congestion control support and newReno
4 * congestion control.
5 * Based on ideas from I/O scheduler support and Web100.
6 *
7 * Copyright (C) 2005 Stephen Hemminger <shemminger@osdl.org>
8 */
9
10 #define pr_fmt(fmt) "TCP: " fmt
11
12 #include <linux/module.h>
13 #include <linux/mm.h>
14 #include <linux/types.h>
15 #include <linux/list.h>
16 #include <linux/gfp.h>
17 #include <linux/jhash.h>
18 #include <net/tcp.h>
19 #include <trace/events/tcp.h>
20
21 static DEFINE_SPINLOCK(tcp_cong_list_lock);
22 static LIST_HEAD(tcp_cong_list);
23
24 /* Simple linear search, don't expect many entries! */
tcp_ca_find(const char * name)25 struct tcp_congestion_ops *tcp_ca_find(const char *name)
26 {
27 struct tcp_congestion_ops *e;
28
29 list_for_each_entry_rcu(e, &tcp_cong_list, list) {
30 if (strcmp(e->name, name) == 0)
31 return e;
32 }
33
34 return NULL;
35 }
36
tcp_set_ca_state(struct sock * sk,const u8 ca_state)37 void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
38 {
39 struct inet_connection_sock *icsk = inet_csk(sk);
40
41 trace_tcp_cong_state_set(sk, ca_state);
42
43 if (icsk->icsk_ca_ops->set_state)
44 icsk->icsk_ca_ops->set_state(sk, ca_state);
45 icsk->icsk_ca_state = ca_state;
46 }
47
48 /* Must be called with rcu lock held */
tcp_ca_find_autoload(struct net * net,const char * name)49 static struct tcp_congestion_ops *tcp_ca_find_autoload(struct net *net,
50 const char *name)
51 {
52 struct tcp_congestion_ops *ca = tcp_ca_find(name);
53
54 #ifdef CONFIG_MODULES
55 if (!ca && capable(CAP_NET_ADMIN)) {
56 rcu_read_unlock();
57 request_module("tcp_%s", name);
58 rcu_read_lock();
59 ca = tcp_ca_find(name);
60 }
61 #endif
62 return ca;
63 }
64
65 /* Simple linear search, not much in here. */
tcp_ca_find_key(u32 key)66 struct tcp_congestion_ops *tcp_ca_find_key(u32 key)
67 {
68 struct tcp_congestion_ops *e;
69
70 list_for_each_entry_rcu(e, &tcp_cong_list, list) {
71 if (e->key == key)
72 return e;
73 }
74
75 return NULL;
76 }
77
78 /*
79 * Attach new congestion control algorithm to the list
80 * of available options.
81 */
tcp_register_congestion_control(struct tcp_congestion_ops * ca)82 int tcp_register_congestion_control(struct tcp_congestion_ops *ca)
83 {
84 int ret = 0;
85
86 /* all algorithms must implement these */
87 if (!ca->ssthresh || !ca->undo_cwnd ||
88 !(ca->cong_avoid || ca->cong_control)) {
89 pr_err("%s does not implement required ops\n", ca->name);
90 return -EINVAL;
91 }
92
93 ca->key = jhash(ca->name, sizeof(ca->name), strlen(ca->name));
94
95 spin_lock(&tcp_cong_list_lock);
96 if (ca->key == TCP_CA_UNSPEC || tcp_ca_find_key(ca->key)) {
97 pr_notice("%s already registered or non-unique key\n",
98 ca->name);
99 ret = -EEXIST;
100 } else {
101 list_add_tail_rcu(&ca->list, &tcp_cong_list);
102 pr_debug("%s registered\n", ca->name);
103 }
104 spin_unlock(&tcp_cong_list_lock);
105
106 return ret;
107 }
108 EXPORT_SYMBOL_GPL(tcp_register_congestion_control);
109
110 /*
111 * Remove congestion control algorithm, called from
112 * the module's remove function. Module ref counts are used
113 * to ensure that this can't be done till all sockets using
114 * that method are closed.
115 */
tcp_unregister_congestion_control(struct tcp_congestion_ops * ca)116 void tcp_unregister_congestion_control(struct tcp_congestion_ops *ca)
117 {
118 spin_lock(&tcp_cong_list_lock);
119 list_del_rcu(&ca->list);
120 spin_unlock(&tcp_cong_list_lock);
121
122 /* Wait for outstanding readers to complete before the
123 * module gets removed entirely.
124 *
125 * A try_module_get() should fail by now as our module is
126 * in "going" state since no refs are held anymore and
127 * module_exit() handler being called.
128 */
129 synchronize_rcu();
130 }
131 EXPORT_SYMBOL_GPL(tcp_unregister_congestion_control);
132
tcp_ca_get_key_by_name(struct net * net,const char * name,bool * ecn_ca)133 u32 tcp_ca_get_key_by_name(struct net *net, const char *name, bool *ecn_ca)
134 {
135 const struct tcp_congestion_ops *ca;
136 u32 key = TCP_CA_UNSPEC;
137
138 might_sleep();
139
140 rcu_read_lock();
141 ca = tcp_ca_find_autoload(net, name);
142 if (ca) {
143 key = ca->key;
144 *ecn_ca = ca->flags & TCP_CONG_NEEDS_ECN;
145 }
146 rcu_read_unlock();
147
148 return key;
149 }
150
tcp_ca_get_name_by_key(u32 key,char * buffer)151 char *tcp_ca_get_name_by_key(u32 key, char *buffer)
152 {
153 const struct tcp_congestion_ops *ca;
154 char *ret = NULL;
155
156 rcu_read_lock();
157 ca = tcp_ca_find_key(key);
158 if (ca)
159 ret = strncpy(buffer, ca->name,
160 TCP_CA_NAME_MAX);
161 rcu_read_unlock();
162
163 return ret;
164 }
165
166 /* Assign choice of congestion control. */
tcp_assign_congestion_control(struct sock * sk)167 void tcp_assign_congestion_control(struct sock *sk)
168 {
169 struct net *net = sock_net(sk);
170 struct inet_connection_sock *icsk = inet_csk(sk);
171 const struct tcp_congestion_ops *ca;
172
173 rcu_read_lock();
174 ca = rcu_dereference(net->ipv4.tcp_congestion_control);
175 if (unlikely(!bpf_try_module_get(ca, ca->owner)))
176 ca = &tcp_reno;
177 icsk->icsk_ca_ops = ca;
178 rcu_read_unlock();
179
180 memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
181 if (ca->flags & TCP_CONG_NEEDS_ECN)
182 INET_ECN_xmit(sk);
183 else
184 INET_ECN_dontxmit(sk);
185 }
186
tcp_init_congestion_control(struct sock * sk)187 void tcp_init_congestion_control(struct sock *sk)
188 {
189 struct inet_connection_sock *icsk = inet_csk(sk);
190
191 tcp_sk(sk)->prior_ssthresh = 0;
192 if (icsk->icsk_ca_ops->init)
193 icsk->icsk_ca_ops->init(sk);
194 if (tcp_ca_needs_ecn(sk))
195 INET_ECN_xmit(sk);
196 else
197 INET_ECN_dontxmit(sk);
198 icsk->icsk_ca_initialized = 1;
199 }
200
tcp_reinit_congestion_control(struct sock * sk,const struct tcp_congestion_ops * ca)201 static void tcp_reinit_congestion_control(struct sock *sk,
202 const struct tcp_congestion_ops *ca)
203 {
204 struct inet_connection_sock *icsk = inet_csk(sk);
205
206 tcp_cleanup_congestion_control(sk);
207 icsk->icsk_ca_ops = ca;
208 icsk->icsk_ca_setsockopt = 1;
209 memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
210
211 if (ca->flags & TCP_CONG_NEEDS_ECN)
212 INET_ECN_xmit(sk);
213 else
214 INET_ECN_dontxmit(sk);
215
216 if (!((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
217 tcp_init_congestion_control(sk);
218 }
219
220 /* Manage refcounts on socket close. */
tcp_cleanup_congestion_control(struct sock * sk)221 void tcp_cleanup_congestion_control(struct sock *sk)
222 {
223 struct inet_connection_sock *icsk = inet_csk(sk);
224
225 if (icsk->icsk_ca_ops->release)
226 icsk->icsk_ca_ops->release(sk);
227 bpf_module_put(icsk->icsk_ca_ops, icsk->icsk_ca_ops->owner);
228 }
229
230 /* Used by sysctl to change default congestion control */
tcp_set_default_congestion_control(struct net * net,const char * name)231 int tcp_set_default_congestion_control(struct net *net, const char *name)
232 {
233 struct tcp_congestion_ops *ca;
234 const struct tcp_congestion_ops *prev;
235 int ret;
236
237 rcu_read_lock();
238 ca = tcp_ca_find_autoload(net, name);
239 if (!ca) {
240 ret = -ENOENT;
241 } else if (!bpf_try_module_get(ca, ca->owner)) {
242 ret = -EBUSY;
243 } else if (!net_eq(net, &init_net) &&
244 !(ca->flags & TCP_CONG_NON_RESTRICTED)) {
245 /* Only init netns can set default to a restricted algorithm */
246 ret = -EPERM;
247 } else {
248 prev = xchg(&net->ipv4.tcp_congestion_control, ca);
249 if (prev)
250 bpf_module_put(prev, prev->owner);
251
252 ca->flags |= TCP_CONG_NON_RESTRICTED;
253 ret = 0;
254 }
255 rcu_read_unlock();
256
257 return ret;
258 }
259
260 /* Set default value from kernel configuration at bootup */
tcp_congestion_default(void)261 static int __init tcp_congestion_default(void)
262 {
263 return tcp_set_default_congestion_control(&init_net,
264 CONFIG_DEFAULT_TCP_CONG);
265 }
266 late_initcall(tcp_congestion_default);
267
268 /* Build string with list of available congestion control values */
tcp_get_available_congestion_control(char * buf,size_t maxlen)269 void tcp_get_available_congestion_control(char *buf, size_t maxlen)
270 {
271 struct tcp_congestion_ops *ca;
272 size_t offs = 0;
273
274 rcu_read_lock();
275 list_for_each_entry_rcu(ca, &tcp_cong_list, list) {
276 offs += snprintf(buf + offs, maxlen - offs,
277 "%s%s",
278 offs == 0 ? "" : " ", ca->name);
279
280 if (WARN_ON_ONCE(offs >= maxlen))
281 break;
282 }
283 rcu_read_unlock();
284 }
285
286 /* Get current default congestion control */
tcp_get_default_congestion_control(struct net * net,char * name)287 void tcp_get_default_congestion_control(struct net *net, char *name)
288 {
289 const struct tcp_congestion_ops *ca;
290
291 rcu_read_lock();
292 ca = rcu_dereference(net->ipv4.tcp_congestion_control);
293 strncpy(name, ca->name, TCP_CA_NAME_MAX);
294 rcu_read_unlock();
295 }
296
297 /* Built list of non-restricted congestion control values */
tcp_get_allowed_congestion_control(char * buf,size_t maxlen)298 void tcp_get_allowed_congestion_control(char *buf, size_t maxlen)
299 {
300 struct tcp_congestion_ops *ca;
301 size_t offs = 0;
302
303 *buf = '\0';
304 rcu_read_lock();
305 list_for_each_entry_rcu(ca, &tcp_cong_list, list) {
306 if (!(ca->flags & TCP_CONG_NON_RESTRICTED))
307 continue;
308 offs += snprintf(buf + offs, maxlen - offs,
309 "%s%s",
310 offs == 0 ? "" : " ", ca->name);
311
312 if (WARN_ON_ONCE(offs >= maxlen))
313 break;
314 }
315 rcu_read_unlock();
316 }
317
318 /* Change list of non-restricted congestion control */
tcp_set_allowed_congestion_control(char * val)319 int tcp_set_allowed_congestion_control(char *val)
320 {
321 struct tcp_congestion_ops *ca;
322 char *saved_clone, *clone, *name;
323 int ret = 0;
324
325 saved_clone = clone = kstrdup(val, GFP_USER);
326 if (!clone)
327 return -ENOMEM;
328
329 spin_lock(&tcp_cong_list_lock);
330 /* pass 1 check for bad entries */
331 while ((name = strsep(&clone, " ")) && *name) {
332 ca = tcp_ca_find(name);
333 if (!ca) {
334 ret = -ENOENT;
335 goto out;
336 }
337 }
338
339 /* pass 2 clear old values */
340 list_for_each_entry_rcu(ca, &tcp_cong_list, list)
341 ca->flags &= ~TCP_CONG_NON_RESTRICTED;
342
343 /* pass 3 mark as allowed */
344 while ((name = strsep(&val, " ")) && *name) {
345 ca = tcp_ca_find(name);
346 WARN_ON(!ca);
347 if (ca)
348 ca->flags |= TCP_CONG_NON_RESTRICTED;
349 }
350 out:
351 spin_unlock(&tcp_cong_list_lock);
352 kfree(saved_clone);
353
354 return ret;
355 }
356
357 /* Change congestion control for socket. If load is false, then it is the
358 * responsibility of the caller to call tcp_init_congestion_control or
359 * tcp_reinit_congestion_control (if the current congestion control was
360 * already initialized.
361 */
tcp_set_congestion_control(struct sock * sk,const char * name,bool load,bool cap_net_admin)362 int tcp_set_congestion_control(struct sock *sk, const char *name, bool load,
363 bool cap_net_admin)
364 {
365 struct inet_connection_sock *icsk = inet_csk(sk);
366 const struct tcp_congestion_ops *ca;
367 int err = 0;
368
369 if (icsk->icsk_ca_dst_locked)
370 return -EPERM;
371
372 rcu_read_lock();
373 if (!load)
374 ca = tcp_ca_find(name);
375 else
376 ca = tcp_ca_find_autoload(sock_net(sk), name);
377
378 /* No change asking for existing value */
379 if (ca == icsk->icsk_ca_ops) {
380 icsk->icsk_ca_setsockopt = 1;
381 goto out;
382 }
383
384 if (!ca)
385 err = -ENOENT;
386 else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) || cap_net_admin))
387 err = -EPERM;
388 else if (!bpf_try_module_get(ca, ca->owner))
389 err = -EBUSY;
390 else
391 tcp_reinit_congestion_control(sk, ca);
392 out:
393 rcu_read_unlock();
394 return err;
395 }
396
397 /* Slow start is used when congestion window is no greater than the slow start
398 * threshold. We base on RFC2581 and also handle stretch ACKs properly.
399 * We do not implement RFC3465 Appropriate Byte Counting (ABC) per se but
400 * something better;) a packet is only considered (s)acked in its entirety to
401 * defend the ACK attacks described in the RFC. Slow start processes a stretch
402 * ACK of degree N as if N acks of degree 1 are received back to back except
403 * ABC caps N to 2. Slow start exits when cwnd grows over ssthresh and
404 * returns the leftover acks to adjust cwnd in congestion avoidance mode.
405 */
tcp_slow_start(struct tcp_sock * tp,u32 acked)406 u32 tcp_slow_start(struct tcp_sock *tp, u32 acked)
407 {
408 u32 cwnd = min(tcp_snd_cwnd(tp) + acked, tp->snd_ssthresh);
409
410 acked -= cwnd - tcp_snd_cwnd(tp);
411 tcp_snd_cwnd_set(tp, min(cwnd, tp->snd_cwnd_clamp));
412
413 return acked;
414 }
415 EXPORT_SYMBOL_GPL(tcp_slow_start);
416
417 /* In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd (or alternative w),
418 * for every packet that was ACKed.
419 */
tcp_cong_avoid_ai(struct tcp_sock * tp,u32 w,u32 acked)420 void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked)
421 {
422 /* If credits accumulated at a higher w, apply them gently now. */
423 if (tp->snd_cwnd_cnt >= w) {
424 tp->snd_cwnd_cnt = 0;
425 tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + 1);
426 }
427
428 tp->snd_cwnd_cnt += acked;
429 if (tp->snd_cwnd_cnt >= w) {
430 u32 delta = tp->snd_cwnd_cnt / w;
431
432 tp->snd_cwnd_cnt -= delta * w;
433 tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + delta);
434 }
435 tcp_snd_cwnd_set(tp, min(tcp_snd_cwnd(tp), tp->snd_cwnd_clamp));
436 }
437 EXPORT_SYMBOL_GPL(tcp_cong_avoid_ai);
438
439 /*
440 * TCP Reno congestion control
441 * This is special case used for fallback as well.
442 */
443 /* This is Jacobson's slow start and congestion avoidance.
444 * SIGCOMM '88, p. 328.
445 */
tcp_reno_cong_avoid(struct sock * sk,u32 ack,u32 acked)446 void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
447 {
448 struct tcp_sock *tp = tcp_sk(sk);
449
450 if (!tcp_is_cwnd_limited(sk))
451 return;
452
453 /* In "safe" area, increase. */
454 if (tcp_in_slow_start(tp)) {
455 acked = tcp_slow_start(tp, acked);
456 if (!acked)
457 return;
458 }
459 /* In dangerous area, increase slowly. */
460 tcp_cong_avoid_ai(tp, tcp_snd_cwnd(tp), acked);
461 }
462 EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid);
463
464 /* Slow start threshold is half the congestion window (min 2) */
tcp_reno_ssthresh(struct sock * sk)465 u32 tcp_reno_ssthresh(struct sock *sk)
466 {
467 const struct tcp_sock *tp = tcp_sk(sk);
468
469 return max(tcp_snd_cwnd(tp) >> 1U, 2U);
470 }
471 EXPORT_SYMBOL_GPL(tcp_reno_ssthresh);
472
tcp_reno_undo_cwnd(struct sock * sk)473 u32 tcp_reno_undo_cwnd(struct sock *sk)
474 {
475 const struct tcp_sock *tp = tcp_sk(sk);
476
477 return max(tcp_snd_cwnd(tp), tp->prior_cwnd);
478 }
479 EXPORT_SYMBOL_GPL(tcp_reno_undo_cwnd);
480
481 struct tcp_congestion_ops tcp_reno = {
482 .flags = TCP_CONG_NON_RESTRICTED,
483 .name = "reno",
484 .owner = THIS_MODULE,
485 .ssthresh = tcp_reno_ssthresh,
486 .cong_avoid = tcp_reno_cong_avoid,
487 .undo_cwnd = tcp_reno_undo_cwnd,
488 };
489