Lines Matching refs:sk

54 void tcp_time_wait(struct sock *sk, int state, int timeo);
262 static inline bool tcp_under_memory_pressure(const struct sock *sk) in tcp_under_memory_pressure() argument
264 if (mem_cgroup_sockets_enabled && sk->sk_memcg && in tcp_under_memory_pressure()
265 mem_cgroup_under_socket_pressure(sk->sk_memcg)) in tcp_under_memory_pressure()
287 static inline bool tcp_out_of_memory(struct sock *sk) in tcp_out_of_memory() argument
289 if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF && in tcp_out_of_memory()
290 sk_memory_allocated(sk) > sk_prot_mem_limits(sk, 2)) in tcp_out_of_memory()
295 static inline void tcp_wmem_free_skb(struct sock *sk, struct sk_buff *skb) in tcp_wmem_free_skb() argument
297 sk_wmem_queued_add(sk, -skb->truesize); in tcp_wmem_free_skb()
299 sk_mem_uncharge(sk, skb->truesize); in tcp_wmem_free_skb()
301 sk_mem_uncharge(sk, SKB_TRUESIZE(skb_end_offset(skb))); in tcp_wmem_free_skb()
305 void sk_forced_mem_schedule(struct sock *sk, int size);
307 bool tcp_check_oom(struct sock *sk, int shift);
321 void tcp_shutdown(struct sock *sk, int how);
326 void tcp_remove_empty_skb(struct sock *sk);
328 int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
329 int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size);
330 int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, int *copied,
332 int tcp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
334 int tcp_sendpage_locked(struct sock *sk, struct page *page, int offset,
336 ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
338 int tcp_send_mss(struct sock *sk, int *size_goal, int flags);
339 void tcp_push(struct sock *sk, int flags, int mss_now, int nonagle,
341 void tcp_release_cb(struct sock *sk);
343 void tcp_write_timer_handler(struct sock *sk);
344 void tcp_delack_timer_handler(struct sock *sk);
345 int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg);
346 int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb);
347 void tcp_rcv_established(struct sock *sk, struct sk_buff *skb);
348 void tcp_rcv_space_adjust(struct sock *sk);
349 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
350 void tcp_twsk_destructor(struct sock *sk);
352 ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
355 struct sk_buff *tcp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
358 void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks);
359 static inline void tcp_dec_quickack_mode(struct sock *sk, in tcp_dec_quickack_mode() argument
362 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_dec_quickack_mode()
390 struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
395 void tcp_enter_loss(struct sock *sk);
396 void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int newly_lost, int flag);
398 void tcp_update_metrics(struct sock *sk);
399 void tcp_init_metrics(struct sock *sk);
402 void __tcp_close(struct sock *sk, long timeout);
403 void tcp_close(struct sock *sk, long timeout);
404 void tcp_init_sock(struct sock *sk);
405 void tcp_init_transfer(struct sock *sk, int bpf_op, struct sk_buff *skb);
408 int do_tcp_getsockopt(struct sock *sk, int level,
410 int tcp_getsockopt(struct sock *sk, int level, int optname,
413 int do_tcp_setsockopt(struct sock *sk, int level, int optname,
415 int tcp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
417 void tcp_set_keepalive(struct sock *sk, int val);
419 int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
421 int tcp_set_rcvlowat(struct sock *sk, int val);
422 int tcp_set_window_clamp(struct sock *sk, int val);
425 void tcp_recv_timestamp(struct msghdr *msg, const struct sock *sk,
427 void tcp_data_ready(struct sock *sk);
440 u16 tcp_v4_get_syncookie(struct sock *sk, struct iphdr *iph,
442 u16 tcp_v6_get_syncookie(struct sock *sk, struct ipv6hdr *iph,
447 struct sock *sk, struct tcphdr *th);
452 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
453 void tcp_v4_mtu_reduced(struct sock *sk);
454 void tcp_req_err(struct sock *sk, u32 seq, bool abort);
455 void tcp_ld_RTO_revert(struct sock *sk, u32 seq);
456 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
457 struct sock *tcp_create_openreq_child(const struct sock *sk,
460 void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst);
461 struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
466 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
467 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
468 int tcp_connect(struct sock *sk);
474 struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
479 int tcp_disconnect(struct sock *sk, int flags);
481 void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
482 int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size);
483 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb);
486 struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
491 struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
494 struct sock *sk, struct sk_buff *skb);
512 static inline void tcp_synq_overflow(const struct sock *sk) in tcp_synq_overflow() argument
517 if (sk->sk_reuseport) { in tcp_synq_overflow()
520 reuse = rcu_dereference(sk->sk_reuseport_cb); in tcp_synq_overflow()
530 last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp); in tcp_synq_overflow()
532 WRITE_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp, now); in tcp_synq_overflow()
536 static inline bool tcp_synq_no_recent_overflow(const struct sock *sk) in tcp_synq_no_recent_overflow() argument
541 if (sk->sk_reuseport) { in tcp_synq_no_recent_overflow()
544 reuse = rcu_dereference(sk->sk_reuseport_cb); in tcp_synq_no_recent_overflow()
553 last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp); in tcp_synq_no_recent_overflow()
586 struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
594 void tcp_skb_entail(struct sock *sk, struct sk_buff *skb);
596 void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
598 int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
599 int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
600 void tcp_retransmit_timer(struct sock *sk);
603 void tcp_enter_recovery(struct sock *sk, bool ece_ack);
609 int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
616 void tcp_send_fin(struct sock *sk);
617 void tcp_send_active_reset(struct sock *sk, gfp_t priority);
620 void __tcp_send_ack(struct sock *sk, u32 rcv_nxt);
621 void tcp_send_ack(struct sock *sk);
622 void tcp_send_delayed_ack(struct sock *sk);
623 void tcp_send_loss_probe(struct sock *sk);
624 bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto);
629 void tcp_rearm_rto(struct sock *sk);
630 void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req);
631 void tcp_reset(struct sock *sk, struct sk_buff *skb);
633 void tcp_fin(struct sock *sk);
634 void tcp_check_space(struct sock *sk);
638 static inline void tcp_clear_xmit_timers(struct sock *sk) in tcp_clear_xmit_timers() argument
640 if (hrtimer_try_to_cancel(&tcp_sk(sk)->pacing_timer) == 1) in tcp_clear_xmit_timers()
641 __sock_put(sk); in tcp_clear_xmit_timers()
643 if (hrtimer_try_to_cancel(&tcp_sk(sk)->compressed_ack_timer) == 1) in tcp_clear_xmit_timers()
644 __sock_put(sk); in tcp_clear_xmit_timers()
646 inet_csk_clear_xmit_timers(sk); in tcp_clear_xmit_timers()
649 unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
650 unsigned int tcp_current_mss(struct sock *sk);
651 u32 tcp_clamp_probe0_to_user_timeout(const struct sock *sk, u32 when);
680 int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
682 int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
683 struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off);
684 void tcp_read_done(struct sock *sk, size_t len);
686 void tcp_initialize_rcv_mss(struct sock *sk);
688 int tcp_mtu_to_mss(struct sock *sk, int pmtu);
689 int tcp_mss_to_mtu(struct sock *sk, int mss);
690 void tcp_mtup_init(struct sock *sk);
692 static inline void tcp_bound_rto(const struct sock *sk) in tcp_bound_rto() argument
694 if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX) in tcp_bound_rto()
695 inet_csk(sk)->icsk_rto = TCP_RTO_MAX; in tcp_bound_rto()
719 static inline void tcp_fast_path_check(struct sock *sk) in tcp_fast_path_check() argument
721 struct tcp_sock *tp = tcp_sk(sk); in tcp_fast_path_check()
725 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf && in tcp_fast_path_check()
731 static inline u32 tcp_rto_min(struct sock *sk) in tcp_rto_min() argument
733 const struct dst_entry *dst = __sk_dst_get(sk); in tcp_rto_min()
734 u32 rto_min = inet_csk(sk)->icsk_rto_min; in tcp_rto_min()
741 static inline u32 tcp_rto_min_us(struct sock *sk) in tcp_rto_min_us() argument
743 return jiffies_to_usecs(tcp_rto_min(sk)); in tcp_rto_min_us()
774 u32 __tcp_select_window(struct sock *sk);
776 void tcp_send_window_probe(struct sock *sk);
946 INDIRECT_CALLABLE_DECLARE(void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb));
1070 u32 (*ssthresh)(struct sock *sk);
1073 void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked);
1076 void (*set_state)(struct sock *sk, u8 new_state);
1079 void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
1082 void (*in_ack_event)(struct sock *sk, u32 flags);
1085 void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample);
1088 u32 (*min_tso_segs)(struct sock *sk);
1093 void (*cong_control)(struct sock *sk, const struct rate_sample *rs);
1097 u32 (*undo_cwnd)(struct sock *sk);
1099 u32 (*sndbuf_expand)(struct sock *sk);
1103 size_t (*get_info)(struct sock *sk, u32 ext, int *attr,
1113 void (*init)(struct sock *sk);
1115 void (*release)(struct sock *sk);
1121 void tcp_assign_congestion_control(struct sock *sk);
1122 void tcp_init_congestion_control(struct sock *sk);
1123 void tcp_cleanup_congestion_control(struct sock *sk);
1129 int tcp_set_congestion_control(struct sock *sk, const char *name, bool load,
1134 u32 tcp_reno_ssthresh(struct sock *sk);
1135 u32 tcp_reno_undo_cwnd(struct sock *sk);
1136 void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
1151 static inline bool tcp_ca_needs_ecn(const struct sock *sk) in tcp_ca_needs_ecn() argument
1153 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_ca_needs_ecn()
1158 static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event) in tcp_ca_event() argument
1160 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_ca_event()
1163 icsk->icsk_ca_ops->cwnd_event(sk, event); in tcp_ca_event()
1167 void tcp_set_ca_state(struct sock *sk, const u8 ca_state);
1170 void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb);
1171 void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
1173 void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
1175 void tcp_rate_check_app_limited(struct sock *sk);
1246 static inline bool tcp_in_cwnd_reduction(const struct sock *sk) in tcp_in_cwnd_reduction() argument
1249 (1 << inet_csk(sk)->icsk_ca_state); in tcp_in_cwnd_reduction()
1256 static inline __u32 tcp_current_ssthresh(const struct sock *sk) in tcp_current_ssthresh() argument
1258 const struct tcp_sock *tp = tcp_sk(sk); in tcp_current_ssthresh()
1260 if (tcp_in_cwnd_reduction(sk)) in tcp_current_ssthresh()
1271 void tcp_enter_cwr(struct sock *sk);
1301 static inline bool tcp_is_cwnd_limited(const struct sock *sk) in tcp_is_cwnd_limited() argument
1303 const struct tcp_sock *tp = tcp_sk(sk); in tcp_is_cwnd_limited()
1321 static inline bool tcp_needs_internal_pacing(const struct sock *sk) in tcp_needs_internal_pacing() argument
1323 return smp_load_acquire(&sk->sk_pacing_status) == SK_PACING_NEEDED; in tcp_needs_internal_pacing()
1329 static inline unsigned long tcp_pacing_delay(const struct sock *sk) in tcp_pacing_delay() argument
1331 s64 delay = tcp_sk(sk)->tcp_wstamp_ns - tcp_sk(sk)->tcp_clock_cache; in tcp_pacing_delay()
1336 static inline void tcp_reset_xmit_timer(struct sock *sk, in tcp_reset_xmit_timer() argument
1341 inet_csk_reset_xmit_timer(sk, what, when + tcp_pacing_delay(sk), in tcp_reset_xmit_timer()
1351 static inline unsigned long tcp_probe0_base(const struct sock *sk) in tcp_probe0_base() argument
1353 return max_t(unsigned long, inet_csk(sk)->icsk_rto, TCP_RTO_MIN); in tcp_probe0_base()
1357 static inline unsigned long tcp_probe0_when(const struct sock *sk, in tcp_probe0_when() argument
1361 inet_csk(sk)->icsk_backoff); in tcp_probe0_when()
1362 u64 when = (u64)tcp_probe0_base(sk) << backoff; in tcp_probe0_when()
1367 static inline void tcp_check_probe_timer(struct sock *sk) in tcp_check_probe_timer() argument
1369 if (!tcp_sk(sk)->packets_out && !inet_csk(sk)->icsk_pending) in tcp_check_probe_timer()
1370 tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, in tcp_check_probe_timer()
1371 tcp_probe0_base(sk), TCP_RTO_MAX); in tcp_check_probe_timer()
1399 bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb,
1403 int tcp_filter(struct sock *sk, struct sk_buff *skb);
1404 void tcp_set_state(struct sock *sk, int state);
1405 void tcp_done(struct sock *sk);
1406 int tcp_abort(struct sock *sk, int err);
1414 void tcp_cwnd_restart(struct sock *sk, s32 delta);
1416 static inline void tcp_slow_start_after_idle_check(struct sock *sk) in tcp_slow_start_after_idle_check() argument
1418 const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops; in tcp_slow_start_after_idle_check()
1419 struct tcp_sock *tp = tcp_sk(sk); in tcp_slow_start_after_idle_check()
1422 if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle) || in tcp_slow_start_after_idle_check()
1426 if (delta > inet_csk(sk)->icsk_rto) in tcp_slow_start_after_idle_check()
1427 tcp_cwnd_restart(sk, delta); in tcp_slow_start_after_idle_check()
1431 void tcp_select_initial_window(const struct sock *sk, int __space,
1436 static inline int tcp_win_from_space(const struct sock *sk, int space) in tcp_win_from_space() argument
1438 int tcp_adv_win_scale = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_adv_win_scale); in tcp_win_from_space()
1446 static inline int tcp_space(const struct sock *sk) in tcp_space() argument
1448 return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf) - in tcp_space()
1449 READ_ONCE(sk->sk_backlog.len) - in tcp_space()
1450 atomic_read(&sk->sk_rmem_alloc)); in tcp_space()
1453 static inline int tcp_full_space(const struct sock *sk) in tcp_full_space() argument
1455 return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf)); in tcp_full_space()
1458 static inline void tcp_adjust_rcv_ssthresh(struct sock *sk) in tcp_adjust_rcv_ssthresh() argument
1460 int unused_mem = sk_unused_reserved_mem(sk); in tcp_adjust_rcv_ssthresh()
1461 struct tcp_sock *tp = tcp_sk(sk); in tcp_adjust_rcv_ssthresh()
1466 tcp_win_from_space(sk, unused_mem)); in tcp_adjust_rcv_ssthresh()
1469 void tcp_cleanup_rbuf(struct sock *sk, int copied);
1476 static inline bool tcp_rmem_pressure(const struct sock *sk) in tcp_rmem_pressure() argument
1480 if (tcp_under_memory_pressure(sk)) in tcp_rmem_pressure()
1483 rcvbuf = READ_ONCE(sk->sk_rcvbuf); in tcp_rmem_pressure()
1486 return atomic_read(&sk->sk_rmem_alloc) > threshold; in tcp_rmem_pressure()
1489 static inline bool tcp_epollin_ready(const struct sock *sk, int target) in tcp_epollin_ready() argument
1491 const struct tcp_sock *tp = tcp_sk(sk); in tcp_epollin_ready()
1497 return (avail >= target) || tcp_rmem_pressure(sk) || in tcp_epollin_ready()
1498 (tcp_receive_window(tp) <= inet_csk(sk)->icsk_ack.rcv_mss); in tcp_epollin_ready()
1505 void tcp_enter_memory_pressure(struct sock *sk);
1506 void tcp_leave_memory_pressure(struct sock *sk);
1540 static inline int tcp_fin_time(const struct sock *sk) in tcp_fin_time() argument
1542 int fin_timeout = tcp_sk(sk)->linger2 ? : in tcp_fin_time()
1543 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fin_timeout); in tcp_fin_time()
1544 const int rto = inet_csk(sk)->icsk_rto; in tcp_fin_time()
1675 const struct sock *sk, const struct sk_buff *skb);
1676 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1679 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
1681 struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
1687 struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index,
1691 tcp_md5_do_lookup(const struct sock *sk, int l3index, in tcp_md5_do_lookup() argument
1696 return __tcp_md5_do_lookup(sk, l3index, addr, family); in tcp_md5_do_lookup()
1700 tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb,
1708 tcp_md5_do_lookup(const struct sock *sk, int l3index, in tcp_md5_do_lookup() argument
1715 tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb, in tcp_inbound_md5_hash() argument
1738 void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
1740 void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
1752 void tcp_fastopen_destroy_cipher(struct sock *sk);
1754 int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk,
1758 void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb);
1759 struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
1764 bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss,
1766 bool tcp_fastopen_defer_connect(struct sock *sk, int *err);
1779 void tcp_fastopen_active_disable(struct sock *sk);
1780 bool tcp_fastopen_active_should_disable(struct sock *sk);
1781 void tcp_fastopen_active_disable_ofo_check(struct sock *sk);
1782 void tcp_fastopen_active_detect_blackhole(struct sock *sk, bool expired);
1786 struct tcp_fastopen_context *tcp_fastopen_get_ctx(const struct sock *sk) in tcp_fastopen_get_ctx() argument
1790 ctx = rcu_dereference(inet_csk(sk)->icsk_accept_queue.fastopenq.ctx); in tcp_fastopen_get_ctx()
1792 ctx = rcu_dereference(sock_net(sk)->ipv4.tcp_fastopen_ctx); in tcp_fastopen_get_ctx()
1824 void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type);
1825 void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type);
1844 void tcp_write_queue_purge(struct sock *sk);
1846 static inline struct sk_buff *tcp_rtx_queue_head(const struct sock *sk) in tcp_rtx_queue_head() argument
1848 return skb_rb_first(&sk->tcp_rtx_queue); in tcp_rtx_queue_head()
1851 static inline struct sk_buff *tcp_rtx_queue_tail(const struct sock *sk) in tcp_rtx_queue_tail() argument
1853 return skb_rb_last(&sk->tcp_rtx_queue); in tcp_rtx_queue_tail()
1856 static inline struct sk_buff *tcp_write_queue_tail(const struct sock *sk) in tcp_write_queue_tail() argument
1858 return skb_peek_tail(&sk->sk_write_queue); in tcp_write_queue_tail()
1861 #define tcp_for_write_queue_from_safe(skb, tmp, sk) \ argument
1862 skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
1864 static inline struct sk_buff *tcp_send_head(const struct sock *sk) in tcp_send_head() argument
1866 return skb_peek(&sk->sk_write_queue); in tcp_send_head()
1869 static inline bool tcp_skb_is_last(const struct sock *sk, in tcp_skb_is_last() argument
1872 return skb_queue_is_last(&sk->sk_write_queue, skb); in tcp_skb_is_last()
1882 static inline bool tcp_write_queue_empty(const struct sock *sk) in tcp_write_queue_empty() argument
1884 const struct tcp_sock *tp = tcp_sk(sk); in tcp_write_queue_empty()
1889 static inline bool tcp_rtx_queue_empty(const struct sock *sk) in tcp_rtx_queue_empty() argument
1891 return RB_EMPTY_ROOT(&sk->tcp_rtx_queue); in tcp_rtx_queue_empty()
1894 static inline bool tcp_rtx_and_write_queues_empty(const struct sock *sk) in tcp_rtx_and_write_queues_empty() argument
1896 return tcp_rtx_queue_empty(sk) && tcp_write_queue_empty(sk); in tcp_rtx_and_write_queues_empty()
1899 static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb) in tcp_add_write_queue_tail() argument
1901 __skb_queue_tail(&sk->sk_write_queue, skb); in tcp_add_write_queue_tail()
1904 if (sk->sk_write_queue.next == skb) in tcp_add_write_queue_tail()
1905 tcp_chrono_start(sk, TCP_CHRONO_BUSY); in tcp_add_write_queue_tail()
1911 struct sock *sk) in tcp_insert_write_queue_before() argument
1913 __skb_queue_before(&sk->sk_write_queue, skb, new); in tcp_insert_write_queue_before()
1916 static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk) in tcp_unlink_write_queue() argument
1919 __skb_unlink(skb, &sk->sk_write_queue); in tcp_unlink_write_queue()
1924 static inline void tcp_rtx_queue_unlink(struct sk_buff *skb, struct sock *sk) in tcp_rtx_queue_unlink() argument
1927 rb_erase(&skb->rbnode, &sk->tcp_rtx_queue); in tcp_rtx_queue_unlink()
1930 static inline void tcp_rtx_queue_unlink_and_free(struct sk_buff *skb, struct sock *sk) in tcp_rtx_queue_unlink_and_free() argument
1933 tcp_rtx_queue_unlink(skb, sk); in tcp_rtx_queue_unlink_and_free()
1934 tcp_wmem_free_skb(sk, skb); in tcp_rtx_queue_unlink_and_free()
1937 static inline void tcp_push_pending_frames(struct sock *sk) in tcp_push_pending_frames() argument
1939 if (tcp_send_head(sk)) { in tcp_push_pending_frames()
1940 struct tcp_sock *tp = tcp_sk(sk); in tcp_push_pending_frames()
1942 __tcp_push_pending_frames(sk, tcp_current_mss(sk), tp->nonagle); in tcp_push_pending_frames()
1961 static inline void tcp_advance_highest_sack(struct sock *sk, struct sk_buff *skb) in tcp_advance_highest_sack() argument
1963 tcp_sk(sk)->highest_sack = skb_rb_next(skb); in tcp_advance_highest_sack()
1966 static inline struct sk_buff *tcp_highest_sack(struct sock *sk) in tcp_highest_sack() argument
1968 return tcp_sk(sk)->highest_sack; in tcp_highest_sack()
1971 static inline void tcp_highest_sack_reset(struct sock *sk) in tcp_highest_sack_reset() argument
1973 tcp_sk(sk)->highest_sack = tcp_rtx_queue_head(sk); in tcp_highest_sack_reset()
1977 static inline void tcp_highest_sack_replace(struct sock *sk, in tcp_highest_sack_replace() argument
1981 if (old == tcp_highest_sack(sk)) in tcp_highest_sack_replace()
1982 tcp_sk(sk)->highest_sack = new; in tcp_highest_sack_replace()
1986 static inline bool inet_sk_transparent(const struct sock *sk) in inet_sk_transparent() argument
1988 switch (sk->sk_state) { in inet_sk_transparent()
1990 return inet_twsk(sk)->tw_transparent; in inet_sk_transparent()
1992 return inet_rsk(inet_reqsk(sk))->no_srccheck; in inet_sk_transparent()
1994 return inet_sk(sk)->transparent; in inet_sk_transparent()
2030 void tcp_v4_destroy_sock(struct sock *sk);
2049 bool tcp_stream_memory_free(const struct sock *sk, int wake);
2056 int tcp_rtx_synack(const struct sock *sk, struct request_sock *req);
2059 struct sock *sk, struct sk_buff *skb);
2064 struct tcp_md5sig_key *(*md5_lookup) (const struct sock *sk,
2068 const struct sock *sk,
2070 int (*md5_parse)(struct sock *sk,
2080 struct tcp_md5sig_key *(*req_md5_lookup)(const struct sock *sk,
2084 const struct sock *sk,
2091 struct dst_entry *(*route_req)(const struct sock *sk,
2097 int (*send_synack)(const struct sock *sk, struct dst_entry *dst,
2111 const struct sock *sk, struct sk_buff *skb, in cookie_init_sequence() argument
2114 tcp_synq_overflow(sk); in cookie_init_sequence()
2115 __NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT); in cookie_init_sequence()
2120 const struct sock *sk, struct sk_buff *skb, in cookie_init_sequence() argument
2133 void tcp_mark_skb_lost(struct sock *sk, struct sk_buff *skb);
2134 void tcp_newreno_mark_lost(struct sock *sk, bool snd_una_advanced);
2137 extern bool tcp_rack_mark_lost(struct sock *sk);
2140 extern void tcp_rack_reo_timeout(struct sock *sk);
2141 extern void tcp_rack_update_reo_wnd(struct sock *sk, struct rate_sample *rs);
2144 static inline s64 tcp_rto_delta_us(const struct sock *sk) in tcp_rto_delta_us() argument
2146 const struct sk_buff *skb = tcp_rtx_queue_head(sk); in tcp_rto_delta_us()
2147 u32 rto = inet_csk(sk)->icsk_rto; in tcp_rto_delta_us()
2150 return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp; in tcp_rto_delta_us()
2189 static inline int tcp_inq(struct sock *sk) in tcp_inq() argument
2191 struct tcp_sock *tp = tcp_sk(sk); in tcp_inq()
2194 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { in tcp_inq()
2196 } else if (sock_flag(sk, SOCK_URGINLINE) || in tcp_inq()
2204 if (answ && sock_flag(sk, SOCK_DONE)) in tcp_inq()
2236 static inline void tcp_listendrop(const struct sock *sk) in tcp_listendrop() argument
2238 atomic_inc(&((struct sock *)sk)->sk_drops); in tcp_listendrop()
2239 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS); in tcp_listendrop()
2256 int (*init)(struct sock *sk);
2258 void (*update)(struct sock *sk, struct proto *p,
2259 void (*write_space)(struct sock *sk));
2261 void (*release)(struct sock *sk);
2263 int (*get_info)(const struct sock *sk, struct sk_buff *skb);
2264 size_t (*get_info_size)(const struct sock *sk);
2274 int tcp_set_ulp(struct sock *sk, const char *name);
2276 void tcp_cleanup_ulp(struct sock *sk);
2277 void tcp_update_ulp(struct sock *sk, struct proto *p,
2278 void (*write_space)(struct sock *sk));
2289 struct proto *tcp_bpf_get_proto(struct sock *sk, struct sk_psock *psock);
2290 int tcp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore);
2291 void tcp_bpf_clone(const struct sock *sk, struct sock *newsk);
2294 int tcp_bpf_sendmsg_redir(struct sock *sk, bool ingress,
2299 static inline void tcp_bpf_clone(const struct sock *sk, struct sock *newsk) in tcp_bpf_clone() argument
2326 static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args) in tcp_call_bpf() argument
2332 if (sk_fullsock(sk)) { in tcp_call_bpf()
2334 sock_owned_by_me(sk); in tcp_call_bpf()
2337 sock_ops.sk = sk; in tcp_call_bpf()
2350 static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2) in tcp_call_bpf_2arg() argument
2354 return tcp_call_bpf(sk, op, 2, args); in tcp_call_bpf_2arg()
2357 static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2, in tcp_call_bpf_3arg() argument
2362 return tcp_call_bpf(sk, op, 3, args); in tcp_call_bpf_3arg()
2366 static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args) in tcp_call_bpf() argument
2371 static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2) in tcp_call_bpf_2arg() argument
2376 static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2, in tcp_call_bpf_3arg() argument
2384 static inline u32 tcp_timeout_init(struct sock *sk) in tcp_timeout_init() argument
2388 timeout = tcp_call_bpf(sk, BPF_SOCK_OPS_TIMEOUT_INIT, 0, NULL); in tcp_timeout_init()
2395 static inline u32 tcp_rwnd_init_bpf(struct sock *sk) in tcp_rwnd_init_bpf() argument
2399 rwnd = tcp_call_bpf(sk, BPF_SOCK_OPS_RWND_INIT, 0, NULL); in tcp_rwnd_init_bpf()
2406 static inline bool tcp_bpf_ca_needs_ecn(struct sock *sk) in tcp_bpf_ca_needs_ecn() argument
2408 return (tcp_call_bpf(sk, BPF_SOCK_OPS_NEEDS_ECN, 0, NULL) == 1); in tcp_bpf_ca_needs_ecn()
2411 static inline void tcp_bpf_rtt(struct sock *sk) in tcp_bpf_rtt() argument
2413 if (BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk), BPF_SOCK_OPS_RTT_CB_FLAG)) in tcp_bpf_rtt()
2414 tcp_call_bpf(sk, BPF_SOCK_OPS_RTT_CB, 0, NULL); in tcp_bpf_rtt()
2423 void (*cad)(struct sock *sk, u32 ack_seq));
2439 static inline u64 tcp_transmit_time(const struct sock *sk) in tcp_transmit_time() argument
2442 u32 delay = (sk->sk_state == TCP_TIME_WAIT) ? in tcp_transmit_time()
2443 tcp_twsk(sk)->tw_tx_delay : tcp_sk(sk)->tcp_tx_delay; in tcp_transmit_time()