1 // SPDX-License-Identifier: GPL-2.0
2 /* Multipath TCP
3 *
4 * Copyright (c) 2019, Intel Corporation.
5 */
6 #define pr_fmt(fmt) "MPTCP: " fmt
7
8 #include <linux/kernel.h>
9 #include <net/tcp.h>
10 #include <net/mptcp.h>
11 #include "protocol.h"
12
13 #include "mib.h"
14
15 /* path manager command handlers */
16
mptcp_pm_announce_addr(struct mptcp_sock * msk,const struct mptcp_addr_info * addr,bool echo)17 int mptcp_pm_announce_addr(struct mptcp_sock *msk,
18 const struct mptcp_addr_info *addr,
19 bool echo)
20 {
21 u8 add_addr = READ_ONCE(msk->pm.addr_signal);
22
23 pr_debug("msk=%p, local_id=%d, echo=%d", msk, addr->id, echo);
24
25 lockdep_assert_held(&msk->pm.lock);
26
27 if (add_addr &
28 (echo ? BIT(MPTCP_ADD_ADDR_ECHO) : BIT(MPTCP_ADD_ADDR_SIGNAL))) {
29 pr_warn("addr_signal error, add_addr=%d, echo=%d", add_addr, echo);
30 return -EINVAL;
31 }
32
33 if (echo) {
34 msk->pm.remote = *addr;
35 add_addr |= BIT(MPTCP_ADD_ADDR_ECHO);
36 } else {
37 msk->pm.local = *addr;
38 add_addr |= BIT(MPTCP_ADD_ADDR_SIGNAL);
39 }
40 WRITE_ONCE(msk->pm.addr_signal, add_addr);
41 return 0;
42 }
43
mptcp_pm_remove_addr(struct mptcp_sock * msk,const struct mptcp_rm_list * rm_list)44 int mptcp_pm_remove_addr(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list)
45 {
46 u8 rm_addr = READ_ONCE(msk->pm.addr_signal);
47
48 pr_debug("msk=%p, rm_list_nr=%d", msk, rm_list->nr);
49
50 if (rm_addr) {
51 pr_warn("addr_signal error, rm_addr=%d", rm_addr);
52 return -EINVAL;
53 }
54
55 msk->pm.rm_list_tx = *rm_list;
56 rm_addr |= BIT(MPTCP_RM_ADDR_SIGNAL);
57 WRITE_ONCE(msk->pm.addr_signal, rm_addr);
58 mptcp_pm_nl_addr_send_ack(msk);
59 return 0;
60 }
61
mptcp_pm_remove_subflow(struct mptcp_sock * msk,const struct mptcp_rm_list * rm_list)62 int mptcp_pm_remove_subflow(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list)
63 {
64 pr_debug("msk=%p, rm_list_nr=%d", msk, rm_list->nr);
65
66 spin_lock_bh(&msk->pm.lock);
67 mptcp_pm_nl_rm_subflow_received(msk, rm_list);
68 spin_unlock_bh(&msk->pm.lock);
69 return 0;
70 }
71
72 /* path manager event handlers */
73
mptcp_pm_new_connection(struct mptcp_sock * msk,const struct sock * ssk,int server_side)74 void mptcp_pm_new_connection(struct mptcp_sock *msk, const struct sock *ssk, int server_side)
75 {
76 struct mptcp_pm_data *pm = &msk->pm;
77
78 pr_debug("msk=%p, token=%u side=%d", msk, msk->token, server_side);
79
80 WRITE_ONCE(pm->server_side, server_side);
81 mptcp_event(MPTCP_EVENT_CREATED, msk, ssk, GFP_ATOMIC);
82 }
83
mptcp_pm_allow_new_subflow(struct mptcp_sock * msk)84 bool mptcp_pm_allow_new_subflow(struct mptcp_sock *msk)
85 {
86 struct mptcp_pm_data *pm = &msk->pm;
87 unsigned int subflows_max;
88 int ret = 0;
89
90 if (mptcp_pm_is_userspace(msk))
91 return mptcp_userspace_pm_active(msk);
92
93 subflows_max = mptcp_pm_get_subflows_max(msk);
94
95 pr_debug("msk=%p subflows=%d max=%d allow=%d", msk, pm->subflows,
96 subflows_max, READ_ONCE(pm->accept_subflow));
97
98 /* try to avoid acquiring the lock below */
99 if (!READ_ONCE(pm->accept_subflow))
100 return false;
101
102 spin_lock_bh(&pm->lock);
103 if (READ_ONCE(pm->accept_subflow)) {
104 ret = pm->subflows < subflows_max;
105 if (ret && ++pm->subflows == subflows_max)
106 WRITE_ONCE(pm->accept_subflow, false);
107 }
108 spin_unlock_bh(&pm->lock);
109
110 return ret;
111 }
112
113 /* return true if the new status bit is currently cleared, that is, this event
114 * can be server, eventually by an already scheduled work
115 */
mptcp_pm_schedule_work(struct mptcp_sock * msk,enum mptcp_pm_status new_status)116 static bool mptcp_pm_schedule_work(struct mptcp_sock *msk,
117 enum mptcp_pm_status new_status)
118 {
119 pr_debug("msk=%p status=%x new=%lx", msk, msk->pm.status,
120 BIT(new_status));
121 if (msk->pm.status & BIT(new_status))
122 return false;
123
124 msk->pm.status |= BIT(new_status);
125 mptcp_schedule_work((struct sock *)msk);
126 return true;
127 }
128
mptcp_pm_fully_established(struct mptcp_sock * msk,const struct sock * ssk,gfp_t gfp)129 void mptcp_pm_fully_established(struct mptcp_sock *msk, const struct sock *ssk, gfp_t gfp)
130 {
131 struct mptcp_pm_data *pm = &msk->pm;
132 bool announce = false;
133
134 pr_debug("msk=%p", msk);
135
136 spin_lock_bh(&pm->lock);
137
138 /* mptcp_pm_fully_established() can be invoked by multiple
139 * racing paths - accept() and check_fully_established()
140 * be sure to serve this event only once.
141 */
142 if (READ_ONCE(pm->work_pending) &&
143 !(msk->pm.status & BIT(MPTCP_PM_ALREADY_ESTABLISHED)))
144 mptcp_pm_schedule_work(msk, MPTCP_PM_ESTABLISHED);
145
146 if ((msk->pm.status & BIT(MPTCP_PM_ALREADY_ESTABLISHED)) == 0)
147 announce = true;
148
149 msk->pm.status |= BIT(MPTCP_PM_ALREADY_ESTABLISHED);
150 spin_unlock_bh(&pm->lock);
151
152 if (announce)
153 mptcp_event(MPTCP_EVENT_ESTABLISHED, msk, ssk, gfp);
154 }
155
mptcp_pm_connection_closed(struct mptcp_sock * msk)156 void mptcp_pm_connection_closed(struct mptcp_sock *msk)
157 {
158 pr_debug("msk=%p", msk);
159 }
160
mptcp_pm_subflow_established(struct mptcp_sock * msk)161 void mptcp_pm_subflow_established(struct mptcp_sock *msk)
162 {
163 struct mptcp_pm_data *pm = &msk->pm;
164
165 pr_debug("msk=%p", msk);
166
167 if (!READ_ONCE(pm->work_pending))
168 return;
169
170 spin_lock_bh(&pm->lock);
171
172 if (READ_ONCE(pm->work_pending))
173 mptcp_pm_schedule_work(msk, MPTCP_PM_SUBFLOW_ESTABLISHED);
174
175 spin_unlock_bh(&pm->lock);
176 }
177
mptcp_pm_subflow_check_next(struct mptcp_sock * msk,const struct sock * ssk,const struct mptcp_subflow_context * subflow)178 void mptcp_pm_subflow_check_next(struct mptcp_sock *msk, const struct sock *ssk,
179 const struct mptcp_subflow_context *subflow)
180 {
181 struct mptcp_pm_data *pm = &msk->pm;
182 bool update_subflows;
183
184 update_subflows = (subflow->request_join || subflow->mp_join) &&
185 mptcp_pm_is_kernel(msk);
186 if (!READ_ONCE(pm->work_pending) && !update_subflows)
187 return;
188
189 spin_lock_bh(&pm->lock);
190 if (update_subflows)
191 __mptcp_pm_close_subflow(msk);
192
193 /* Even if this subflow is not really established, tell the PM to try
194 * to pick the next ones, if possible.
195 */
196 if (mptcp_pm_nl_check_work_pending(msk))
197 mptcp_pm_schedule_work(msk, MPTCP_PM_SUBFLOW_ESTABLISHED);
198
199 spin_unlock_bh(&pm->lock);
200 }
201
mptcp_pm_add_addr_received(const struct sock * ssk,const struct mptcp_addr_info * addr)202 void mptcp_pm_add_addr_received(const struct sock *ssk,
203 const struct mptcp_addr_info *addr)
204 {
205 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
206 struct mptcp_sock *msk = mptcp_sk(subflow->conn);
207 struct mptcp_pm_data *pm = &msk->pm;
208
209 pr_debug("msk=%p remote_id=%d accept=%d", msk, addr->id,
210 READ_ONCE(pm->accept_addr));
211
212 mptcp_event_addr_announced(ssk, addr);
213
214 spin_lock_bh(&pm->lock);
215
216 if (mptcp_pm_is_userspace(msk)) {
217 if (mptcp_userspace_pm_active(msk)) {
218 mptcp_pm_announce_addr(msk, addr, true);
219 mptcp_pm_add_addr_send_ack(msk);
220 } else {
221 __MPTCP_INC_STATS(sock_net((struct sock *)msk), MPTCP_MIB_ADDADDRDROP);
222 }
223 } else if (!READ_ONCE(pm->accept_addr)) {
224 mptcp_pm_announce_addr(msk, addr, true);
225 mptcp_pm_add_addr_send_ack(msk);
226 } else if (mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_RECEIVED)) {
227 pm->remote = *addr;
228 } else {
229 __MPTCP_INC_STATS(sock_net((struct sock *)msk), MPTCP_MIB_ADDADDRDROP);
230 }
231
232 spin_unlock_bh(&pm->lock);
233 }
234
mptcp_pm_add_addr_echoed(struct mptcp_sock * msk,const struct mptcp_addr_info * addr)235 void mptcp_pm_add_addr_echoed(struct mptcp_sock *msk,
236 const struct mptcp_addr_info *addr)
237 {
238 struct mptcp_pm_data *pm = &msk->pm;
239
240 pr_debug("msk=%p", msk);
241
242 spin_lock_bh(&pm->lock);
243
244 if (mptcp_lookup_anno_list_by_saddr(msk, addr) && READ_ONCE(pm->work_pending))
245 mptcp_pm_schedule_work(msk, MPTCP_PM_SUBFLOW_ESTABLISHED);
246
247 spin_unlock_bh(&pm->lock);
248 }
249
mptcp_pm_add_addr_send_ack(struct mptcp_sock * msk)250 void mptcp_pm_add_addr_send_ack(struct mptcp_sock *msk)
251 {
252 if (!mptcp_pm_should_add_signal(msk))
253 return;
254
255 mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_SEND_ACK);
256 }
257
mptcp_pm_rm_addr_received(struct mptcp_sock * msk,const struct mptcp_rm_list * rm_list)258 void mptcp_pm_rm_addr_received(struct mptcp_sock *msk,
259 const struct mptcp_rm_list *rm_list)
260 {
261 struct mptcp_pm_data *pm = &msk->pm;
262 u8 i;
263
264 pr_debug("msk=%p remote_ids_nr=%d", msk, rm_list->nr);
265
266 for (i = 0; i < rm_list->nr; i++)
267 mptcp_event_addr_removed(msk, rm_list->ids[i]);
268
269 spin_lock_bh(&pm->lock);
270 if (mptcp_pm_schedule_work(msk, MPTCP_PM_RM_ADDR_RECEIVED))
271 pm->rm_list_rx = *rm_list;
272 else
273 __MPTCP_INC_STATS(sock_net((struct sock *)msk), MPTCP_MIB_RMADDRDROP);
274 spin_unlock_bh(&pm->lock);
275 }
276
mptcp_pm_mp_prio_received(struct sock * ssk,u8 bkup)277 void mptcp_pm_mp_prio_received(struct sock *ssk, u8 bkup)
278 {
279 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
280 struct sock *sk = subflow->conn;
281 struct mptcp_sock *msk;
282
283 pr_debug("subflow->backup=%d, bkup=%d\n", subflow->backup, bkup);
284 msk = mptcp_sk(sk);
285 if (subflow->backup != bkup) {
286 subflow->backup = bkup;
287 mptcp_data_lock(sk);
288 if (!sock_owned_by_user(sk))
289 msk->last_snd = NULL;
290 else
291 __set_bit(MPTCP_RESET_SCHEDULER, &msk->cb_flags);
292 mptcp_data_unlock(sk);
293 }
294
295 mptcp_event(MPTCP_EVENT_SUB_PRIORITY, msk, ssk, GFP_ATOMIC);
296 }
297
mptcp_pm_mp_fail_received(struct sock * sk,u64 fail_seq)298 void mptcp_pm_mp_fail_received(struct sock *sk, u64 fail_seq)
299 {
300 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
301 struct mptcp_sock *msk = mptcp_sk(subflow->conn);
302
303 pr_debug("fail_seq=%llu", fail_seq);
304
305 if (!READ_ONCE(msk->allow_infinite_fallback))
306 return;
307
308 if (!subflow->fail_tout) {
309 pr_debug("send MP_FAIL response and infinite map");
310
311 subflow->send_mp_fail = 1;
312 subflow->send_infinite_map = 1;
313 tcp_send_ack(sk);
314 } else {
315 pr_debug("MP_FAIL response received");
316 WRITE_ONCE(subflow->fail_tout, 0);
317 }
318 }
319
320 /* path manager helpers */
321
mptcp_pm_add_addr_signal(struct mptcp_sock * msk,const struct sk_buff * skb,unsigned int opt_size,unsigned int remaining,struct mptcp_addr_info * addr,bool * echo,bool * drop_other_suboptions)322 bool mptcp_pm_add_addr_signal(struct mptcp_sock *msk, const struct sk_buff *skb,
323 unsigned int opt_size, unsigned int remaining,
324 struct mptcp_addr_info *addr, bool *echo,
325 bool *drop_other_suboptions)
326 {
327 int ret = false;
328 u8 add_addr;
329 u8 family;
330 bool port;
331
332 spin_lock_bh(&msk->pm.lock);
333
334 /* double check after the lock is acquired */
335 if (!mptcp_pm_should_add_signal(msk))
336 goto out_unlock;
337
338 /* always drop every other options for pure ack ADD_ADDR; this is a
339 * plain dup-ack from TCP perspective. The other MPTCP-relevant info,
340 * if any, will be carried by the 'original' TCP ack
341 */
342 if (skb && skb_is_tcp_pure_ack(skb)) {
343 remaining += opt_size;
344 *drop_other_suboptions = true;
345 }
346
347 *echo = mptcp_pm_should_add_signal_echo(msk);
348 port = !!(*echo ? msk->pm.remote.port : msk->pm.local.port);
349
350 family = *echo ? msk->pm.remote.family : msk->pm.local.family;
351 if (remaining < mptcp_add_addr_len(family, *echo, port))
352 goto out_unlock;
353
354 if (*echo) {
355 *addr = msk->pm.remote;
356 add_addr = msk->pm.addr_signal & ~BIT(MPTCP_ADD_ADDR_ECHO);
357 } else {
358 *addr = msk->pm.local;
359 add_addr = msk->pm.addr_signal & ~BIT(MPTCP_ADD_ADDR_SIGNAL);
360 }
361 WRITE_ONCE(msk->pm.addr_signal, add_addr);
362 ret = true;
363
364 out_unlock:
365 spin_unlock_bh(&msk->pm.lock);
366 return ret;
367 }
368
mptcp_pm_rm_addr_signal(struct mptcp_sock * msk,unsigned int remaining,struct mptcp_rm_list * rm_list)369 bool mptcp_pm_rm_addr_signal(struct mptcp_sock *msk, unsigned int remaining,
370 struct mptcp_rm_list *rm_list)
371 {
372 int ret = false, len;
373 u8 rm_addr;
374
375 spin_lock_bh(&msk->pm.lock);
376
377 /* double check after the lock is acquired */
378 if (!mptcp_pm_should_rm_signal(msk))
379 goto out_unlock;
380
381 rm_addr = msk->pm.addr_signal & ~BIT(MPTCP_RM_ADDR_SIGNAL);
382 len = mptcp_rm_addr_len(&msk->pm.rm_list_tx);
383 if (len < 0) {
384 WRITE_ONCE(msk->pm.addr_signal, rm_addr);
385 goto out_unlock;
386 }
387 if (remaining < len)
388 goto out_unlock;
389
390 *rm_list = msk->pm.rm_list_tx;
391 WRITE_ONCE(msk->pm.addr_signal, rm_addr);
392 ret = true;
393
394 out_unlock:
395 spin_unlock_bh(&msk->pm.lock);
396 return ret;
397 }
398
mptcp_pm_get_local_id(struct mptcp_sock * msk,struct sock_common * skc)399 int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc)
400 {
401 return mptcp_pm_nl_get_local_id(msk, skc);
402 }
403
mptcp_pm_subflow_chk_stale(const struct mptcp_sock * msk,struct sock * ssk)404 void mptcp_pm_subflow_chk_stale(const struct mptcp_sock *msk, struct sock *ssk)
405 {
406 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
407 u32 rcv_tstamp = READ_ONCE(tcp_sk(ssk)->rcv_tstamp);
408
409 /* keep track of rtx periods with no progress */
410 if (!subflow->stale_count) {
411 subflow->stale_rcv_tstamp = rcv_tstamp;
412 subflow->stale_count++;
413 } else if (subflow->stale_rcv_tstamp == rcv_tstamp) {
414 if (subflow->stale_count < U8_MAX)
415 subflow->stale_count++;
416 mptcp_pm_nl_subflow_chk_stale(msk, ssk);
417 } else {
418 subflow->stale_count = 0;
419 mptcp_subflow_set_active(subflow);
420 }
421 }
422
423 /* if sk is ipv4 or ipv6_only allows only same-family local and remote addresses,
424 * otherwise allow any matching local/remote pair
425 */
mptcp_pm_addr_families_match(const struct sock * sk,const struct mptcp_addr_info * loc,const struct mptcp_addr_info * rem)426 bool mptcp_pm_addr_families_match(const struct sock *sk,
427 const struct mptcp_addr_info *loc,
428 const struct mptcp_addr_info *rem)
429 {
430 bool mptcp_is_v4 = sk->sk_family == AF_INET;
431
432 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
433 bool loc_is_v4 = loc->family == AF_INET || ipv6_addr_v4mapped(&loc->addr6);
434 bool rem_is_v4 = rem->family == AF_INET || ipv6_addr_v4mapped(&rem->addr6);
435
436 if (mptcp_is_v4)
437 return loc_is_v4 && rem_is_v4;
438
439 if (ipv6_only_sock(sk))
440 return !loc_is_v4 && !rem_is_v4;
441
442 return loc_is_v4 == rem_is_v4;
443 #else
444 return mptcp_is_v4 && loc->family == AF_INET && rem->family == AF_INET;
445 #endif
446 }
447
mptcp_pm_data_reset(struct mptcp_sock * msk)448 void mptcp_pm_data_reset(struct mptcp_sock *msk)
449 {
450 u8 pm_type = mptcp_get_pm_type(sock_net((struct sock *)msk));
451 struct mptcp_pm_data *pm = &msk->pm;
452
453 pm->add_addr_signaled = 0;
454 pm->add_addr_accepted = 0;
455 pm->local_addr_used = 0;
456 pm->subflows = 0;
457 pm->rm_list_tx.nr = 0;
458 pm->rm_list_rx.nr = 0;
459 WRITE_ONCE(pm->pm_type, pm_type);
460
461 if (pm_type == MPTCP_PM_TYPE_KERNEL) {
462 bool subflows_allowed = !!mptcp_pm_get_subflows_max(msk);
463
464 /* pm->work_pending must be only be set to 'true' when
465 * pm->pm_type is set to MPTCP_PM_TYPE_KERNEL
466 */
467 WRITE_ONCE(pm->work_pending,
468 (!!mptcp_pm_get_local_addr_max(msk) &&
469 subflows_allowed) ||
470 !!mptcp_pm_get_add_addr_signal_max(msk));
471 WRITE_ONCE(pm->accept_addr,
472 !!mptcp_pm_get_add_addr_accept_max(msk) &&
473 subflows_allowed);
474 WRITE_ONCE(pm->accept_subflow, subflows_allowed);
475 } else {
476 WRITE_ONCE(pm->work_pending, 0);
477 WRITE_ONCE(pm->accept_addr, 0);
478 WRITE_ONCE(pm->accept_subflow, 0);
479 }
480
481 WRITE_ONCE(pm->addr_signal, 0);
482 WRITE_ONCE(pm->remote_deny_join_id0, false);
483 pm->status = 0;
484 bitmap_fill(msk->pm.id_avail_bitmap, MPTCP_PM_MAX_ADDR_ID + 1);
485 }
486
mptcp_pm_data_init(struct mptcp_sock * msk)487 void mptcp_pm_data_init(struct mptcp_sock *msk)
488 {
489 spin_lock_init(&msk->pm.lock);
490 INIT_LIST_HEAD(&msk->pm.anno_list);
491 INIT_LIST_HEAD(&msk->pm.userspace_pm_local_addr_list);
492 mptcp_pm_data_reset(msk);
493 }
494
mptcp_pm_init(void)495 void __init mptcp_pm_init(void)
496 {
497 mptcp_pm_nl_init();
498 }
499