Lines Matching refs:subflow
67 if (!msk->subflow || READ_ONCE(msk->can_ack)) in __mptcp_nmpc_socket()
70 return msk->subflow; in __mptcp_nmpc_socket()
105 struct mptcp_subflow_context *subflow; in __mptcp_socket_create() local
115 msk->subflow = ssock; in __mptcp_socket_create()
116 subflow = mptcp_subflow_ctx(ssock->sk); in __mptcp_socket_create()
117 list_add(&subflow->node, &msk->conn_list); in __mptcp_socket_create()
119 subflow->request_mptcp = 1; in __mptcp_socket_create()
122 subflow->local_id_valid = 1; in __mptcp_socket_create()
348 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in __mptcp_move_skb() local
368 MPTCP_SKB_CB(skb)->map_seq = mptcp_subflow_get_mapped_dsn(subflow); in __mptcp_move_skb()
486 static long mptcp_timeout_from_subflow(const struct mptcp_subflow_context *subflow) in mptcp_timeout_from_subflow() argument
488 const struct sock *ssk = mptcp_subflow_tcp_sock(subflow); in mptcp_timeout_from_subflow()
490 return inet_csk(ssk)->icsk_pending && !subflow->stale_count ? in mptcp_timeout_from_subflow()
496 struct mptcp_subflow_context *subflow; in mptcp_set_timeout() local
499 mptcp_for_each_subflow(mptcp_sk(sk), subflow) in mptcp_set_timeout()
500 tout = max(tout, mptcp_timeout_from_subflow(subflow)); in mptcp_set_timeout()
527 struct mptcp_subflow_context *subflow; in mptcp_send_ack() local
529 mptcp_for_each_subflow(msk, subflow) in mptcp_send_ack()
530 mptcp_subflow_send_ack(mptcp_subflow_tcp_sock(subflow)); in mptcp_send_ack()
559 struct mptcp_subflow_context *subflow; in mptcp_cleanup_rbuf() local
567 mptcp_for_each_subflow(msk, subflow) { in mptcp_cleanup_rbuf()
568 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); in mptcp_cleanup_rbuf()
631 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in __mptcp_move_skbs_from_subflow() local
659 map_remaining = subflow->map_data_len - in __mptcp_move_skbs_from_subflow()
660 mptcp_subflow_get_map_offset(subflow); in __mptcp_move_skbs_from_subflow()
679 subflow->map_data_len = skb->len; in __mptcp_move_skbs_from_subflow()
793 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in mptcp_data_ready() local
801 if (unlikely(subflow->disposable)) in mptcp_data_ready()
843 struct mptcp_subflow_context *tmp, *subflow; in __mptcp_flush_join_list() local
846 list_for_each_entry_safe(subflow, tmp, &msk->join_list, node) { in __mptcp_flush_join_list()
847 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); in __mptcp_flush_join_list()
850 list_move_tail(&subflow->node, &msk->conn_list); in __mptcp_flush_join_list()
896 struct mptcp_subflow_context *subflow; in mptcp_check_for_eof() local
900 mptcp_for_each_subflow(msk, subflow) in mptcp_check_for_eof()
901 receivers += !subflow->rx_eof; in mptcp_check_for_eof()
933 struct mptcp_subflow_context *subflow; in mptcp_subflow_recv_lookup() local
938 mptcp_for_each_subflow(msk, subflow) { in mptcp_subflow_recv_lookup()
939 if (READ_ONCE(subflow->data_avail)) in mptcp_subflow_recv_lookup()
940 return mptcp_subflow_tcp_sock(subflow); in mptcp_subflow_recv_lookup()
1095 struct mptcp_subflow_context *subflow; in mptcp_enter_memory_pressure() local
1100 mptcp_for_each_subflow(msk, subflow) { in mptcp_enter_memory_pressure()
1101 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); in mptcp_enter_memory_pressure()
1409 void mptcp_subflow_set_active(struct mptcp_subflow_context *subflow) in mptcp_subflow_set_active() argument
1411 if (!subflow->stale) in mptcp_subflow_set_active()
1414 subflow->stale = 0; in mptcp_subflow_set_active()
1415 MPTCP_INC_STATS(sock_net(mptcp_subflow_tcp_sock(subflow)), MPTCP_MIB_SUBFLOWRECOVER); in mptcp_subflow_set_active()
1418 bool mptcp_subflow_active(struct mptcp_subflow_context *subflow) in mptcp_subflow_active() argument
1420 if (unlikely(subflow->stale)) { in mptcp_subflow_active()
1421 u32 rcv_tstamp = READ_ONCE(tcp_sk(mptcp_subflow_tcp_sock(subflow))->rcv_tstamp); in mptcp_subflow_active()
1423 if (subflow->stale_rcv_tstamp == rcv_tstamp) in mptcp_subflow_active()
1426 mptcp_subflow_set_active(subflow); in mptcp_subflow_active()
1428 return __mptcp_subflow_active(subflow); in mptcp_subflow_active()
1442 struct mptcp_subflow_context *subflow; in mptcp_subflow_get_send() local
1473 mptcp_for_each_subflow(msk, subflow) { in mptcp_subflow_get_send()
1474 trace_mptcp_subflow_get_send(subflow); in mptcp_subflow_get_send()
1475 ssk = mptcp_subflow_tcp_sock(subflow); in mptcp_subflow_get_send()
1476 if (!mptcp_subflow_active(subflow)) in mptcp_subflow_get_send()
1479 tout = max(tout, mptcp_timeout_from_subflow(subflow)); in mptcp_subflow_get_send()
1480 nr_active += !subflow->backup; in mptcp_subflow_get_send()
1481 pace = subflow->avg_pacing_rate; in mptcp_subflow_get_send()
1484 subflow->avg_pacing_rate = READ_ONCE(ssk->sk_pacing_rate); in mptcp_subflow_get_send()
1485 pace = subflow->avg_pacing_rate; in mptcp_subflow_get_send()
1491 if (linger_time < send_info[subflow->backup].linger_time) { in mptcp_subflow_get_send()
1492 send_info[subflow->backup].ssk = ssk; in mptcp_subflow_get_send()
1493 send_info[subflow->backup].linger_time = linger_time; in mptcp_subflow_get_send()
1524 subflow = mptcp_subflow_ctx(ssk); in mptcp_subflow_get_send()
1525 subflow->avg_pacing_rate = div_u64((u64)subflow->avg_pacing_rate * wmem + in mptcp_subflow_get_send()
1877 struct mptcp_subflow_context *subflow; in mptcp_rcv_space_adjust() local
1897 mptcp_for_each_subflow(msk, subflow) { in mptcp_rcv_space_adjust()
1902 tp = tcp_sk(mptcp_subflow_tcp_sock(subflow)); in mptcp_rcv_space_adjust()
1949 mptcp_for_each_subflow(msk, subflow) { in mptcp_rcv_space_adjust()
1953 ssk = mptcp_subflow_tcp_sock(subflow); in mptcp_rcv_space_adjust()
2206 struct mptcp_subflow_context *subflow; in mptcp_subflow_get_retrans() local
2214 mptcp_for_each_subflow(msk, subflow) { in mptcp_subflow_get_retrans()
2215 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); in mptcp_subflow_get_retrans()
2217 if (!__mptcp_subflow_active(subflow)) in mptcp_subflow_get_retrans()
2223 min_stale_count = min_t(int, min_stale_count, subflow->stale_count); in mptcp_subflow_get_retrans()
2227 if (subflow->backup) { in mptcp_subflow_get_retrans()
2246 if (msk->subflow) { in mptcp_dispose_initial_subflow()
2247 iput(SOCK_INODE(msk->subflow)); in mptcp_dispose_initial_subflow()
2248 msk->subflow = NULL; in mptcp_dispose_initial_subflow()
2305 struct mptcp_subflow_context *subflow, in __mptcp_close_ssk() argument
2311 dispose_it = !msk->subflow || ssk != msk->subflow->sk; in __mptcp_close_ssk()
2313 list_del(&subflow->node); in __mptcp_close_ssk()
2318 subflow->send_fastclose = 1; in __mptcp_close_ssk()
2323 msk->subflow->state = SS_UNCONNECTED; in __mptcp_close_ssk()
2324 mptcp_subflow_ctx_reset(subflow); in __mptcp_close_ssk()
2336 subflow->disposable = 1; in __mptcp_close_ssk()
2343 kfree_rcu(subflow, rcu); in __mptcp_close_ssk()
2372 struct mptcp_subflow_context *subflow) in mptcp_close_ssk() argument
2380 mptcp_pm_subflow_check_next(mptcp_sk(sk), ssk, subflow); in mptcp_close_ssk()
2382 __mptcp_close_ssk(sk, ssk, subflow, MPTCP_CF_PUSH); in mptcp_close_ssk()
2392 struct mptcp_subflow_context *subflow, *tmp; in __mptcp_close_subflow() local
2396 list_for_each_entry_safe(subflow, tmp, &msk->conn_list, node) { in __mptcp_close_subflow()
2397 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); in __mptcp_close_subflow()
2406 mptcp_close_ssk((struct sock *)msk, ssk, subflow); in __mptcp_close_subflow()
2413 struct mptcp_subflow_context *subflow; in mptcp_check_close_timeout() local
2421 mptcp_for_each_subflow(mptcp_sk(sk), subflow) { in mptcp_check_close_timeout()
2422 if (inet_sk_state_load(mptcp_subflow_tcp_sock(subflow)) != in mptcp_check_close_timeout()
2431 struct mptcp_subflow_context *subflow, *tmp; in mptcp_check_fastclose() local
2439 list_for_each_entry_safe(subflow, tmp, &msk->conn_list, node) { in mptcp_check_fastclose()
2440 struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow); in mptcp_check_fastclose()
2761 struct mptcp_subflow_context *subflow; in __mptcp_check_send_data_fin() local
2790 mptcp_for_each_subflow(msk, subflow) { in __mptcp_check_send_data_fin()
2791 struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow); in __mptcp_check_send_data_fin()
2837 struct mptcp_subflow_context *subflow; in mptcp_close() local
2857 mptcp_for_each_subflow(msk, subflow) { in mptcp_close()
2858 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); in mptcp_close()
2865 subflow->fail_tout = 0; in mptcp_close()
2980 msk->subflow = NULL; in mptcp_sk_clone()
3050 struct mptcp_subflow_context *subflow; in mptcp_accept() local
3053 subflow = mptcp_subflow_ctx(newsk); in mptcp_accept()
3054 new_mptcp_sock = subflow->conn; in mptcp_accept()
3080 struct mptcp_subflow_context *subflow, *tmp; in mptcp_destroy_common() local
3086 list_for_each_entry_safe(subflow, tmp, &msk->conn_list, node) in mptcp_destroy_common()
3087 __mptcp_close_ssk(sk, mptcp_subflow_tcp_sock(subflow), subflow, flags); in mptcp_destroy_common()
3228 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in mptcp_subflow_process_delegated() local
3229 struct sock *sk = subflow->conn; in mptcp_subflow_process_delegated()
3231 if (test_bit(MPTCP_DELEGATE_SEND, &subflow->delegated_status)) { in mptcp_subflow_process_delegated()
3238 mptcp_subflow_delegated_done(subflow, MPTCP_DELEGATE_SEND); in mptcp_subflow_process_delegated()
3240 if (test_bit(MPTCP_DELEGATE_ACK, &subflow->delegated_status)) { in mptcp_subflow_process_delegated()
3242 mptcp_subflow_delegated_done(subflow, MPTCP_DELEGATE_ACK); in mptcp_subflow_process_delegated()
3275 struct mptcp_subflow_context *subflow; in mptcp_finish_connect() local
3280 subflow = mptcp_subflow_ctx(ssk); in mptcp_finish_connect()
3281 sk = subflow->conn; in mptcp_finish_connect()
3284 pr_debug("msk=%p, token=%u", sk, subflow->token); in mptcp_finish_connect()
3286 mptcp_crypto_key_sha(subflow->remote_key, NULL, &ack_seq); in mptcp_finish_connect()
3288 subflow->map_seq = ack_seq; in mptcp_finish_connect()
3289 subflow->map_subflow_seq = 1; in mptcp_finish_connect()
3294 WRITE_ONCE(msk->remote_key, subflow->remote_key); in mptcp_finish_connect()
3295 WRITE_ONCE(msk->local_key, subflow->local_key); in mptcp_finish_connect()
3296 WRITE_ONCE(msk->write_seq, subflow->idsn + 1); in mptcp_finish_connect()
3319 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in mptcp_finish_join() local
3320 struct mptcp_sock *msk = mptcp_sk(subflow->conn); in mptcp_finish_join()
3324 pr_debug("msk=%p, subflow=%p", msk, subflow); in mptcp_finish_join()
3328 subflow->reset_reason = MPTCP_RST_EMPTCP; in mptcp_finish_join()
3332 if (!list_empty(&subflow->node)) in mptcp_finish_join()
3347 list_add_tail(&subflow->node, &msk->conn_list); in mptcp_finish_join()
3351 list_add_tail(&subflow->node, &msk->join_list); in mptcp_finish_join()
3358 subflow->reset_reason = MPTCP_RST_EPROHIBIT; in mptcp_finish_join()
3362 subflow->map_seq = READ_ONCE(msk->ack_seq); in mptcp_finish_join()
3498 struct mptcp_subflow_context *subflow) in mptcp_subflow_early_fallback() argument
3500 subflow->request_mptcp = 0; in mptcp_subflow_early_fallback()
3508 struct mptcp_subflow_context *subflow; in mptcp_stream_connect() local
3524 if (sock->state != SS_UNCONNECTED && msk->subflow) { in mptcp_stream_connect()
3528 ssock = msk->subflow; in mptcp_stream_connect()
3538 subflow = mptcp_subflow_ctx(ssock->sk); in mptcp_stream_connect()
3544 mptcp_subflow_early_fallback(msk, subflow); in mptcp_stream_connect()
3546 if (subflow->request_mptcp && mptcp_token_new_connect(ssock->sk)) { in mptcp_stream_connect()
3548 mptcp_subflow_early_fallback(msk, subflow); in mptcp_stream_connect()
3615 struct mptcp_subflow_context *subflow; in mptcp_stream_accept() local
3627 subflow = mptcp_subflow_ctx(msk->first); in mptcp_stream_accept()
3628 list_add(&subflow->node, &msk->conn_list); in mptcp_stream_accept()
3640 mptcp_for_each_subflow(msk, subflow) { in mptcp_stream_accept()
3641 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); in mptcp_stream_accept()
3696 if (WARN_ON_ONCE(!msk->subflow || !msk->subflow->sk)) in mptcp_poll()
3699 return inet_csk_listen_poll(msk->subflow->sk); in mptcp_poll()
3752 struct mptcp_subflow_context *subflow; in mptcp_napi_poll() local
3756 while ((subflow = mptcp_subflow_delegated_next(delegated)) != NULL) { in mptcp_napi_poll()
3757 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); in mptcp_napi_poll()
3761 mptcp_subflow_has_delegated_action(subflow)) in mptcp_napi_poll()