Lines Matching defs:tp
335 static void tcp_ecn_queue_cwr(struct tcp_sock *tp) in tcp_ecn_queue_cwr()
355 static void tcp_ecn_withdraw_cwr(struct tcp_sock *tp) in tcp_ecn_withdraw_cwr()
362 struct tcp_sock *tp = tcp_sk(sk); in __tcp_ecn_check_ce() local
398 static void tcp_ecn_rcv_synack(struct tcp_sock *tp, const struct tcphdr *th) in tcp_ecn_rcv_synack()
404 static void tcp_ecn_rcv_syn(struct tcp_sock *tp, const struct tcphdr *th) in tcp_ecn_rcv_syn()
410 static bool tcp_ecn_rcv_ecn_echo(const struct tcp_sock *tp, const struct tcphdr *th) in tcp_ecn_rcv_ecn_echo()
424 const struct tcp_sock *tp = tcp_sk(sk); in tcp_sndbuf_expand() local
483 const struct tcp_sock *tp = tcp_sk(sk); in __tcp_grow_window() local
520 struct tcp_sock *tp = tcp_sk(sk); in tcp_grow_window() local
560 struct tcp_sock *tp = tcp_sk(sk); in tcp_init_buffer_space() local
596 struct tcp_sock *tp = tcp_sk(sk); in tcp_clamp_window() local
624 const struct tcp_sock *tp = tcp_sk(sk); in tcp_initialize_rcv_mss() local
646 static void tcp_rcv_rtt_update(struct tcp_sock *tp, u32 sample, int win_dep) in tcp_rcv_rtt_update()
678 static inline void tcp_rcv_rtt_measure(struct tcp_sock *tp) in tcp_rcv_rtt_measure()
699 struct tcp_sock *tp = tcp_sk(sk); in tcp_rcv_rtt_measure_ts() local
725 struct tcp_sock *tp = tcp_sk(sk); in tcp_rcv_space_adjust() local
793 struct tcp_sock *tp = tcp_sk(sk); in tcp_event_data_recv() local
847 struct tcp_sock *tp = tcp_sk(sk); in tcp_rtt_estimator() local
915 const struct tcp_sock *tp = tcp_sk(sk); in tcp_update_pacing_rate() local
952 const struct tcp_sock *tp = tcp_sk(sk); in tcp_set_rto() local
977 __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst) in tcp_init_cwnd()
1006 static u32 tcp_dsack_seen(struct tcp_sock *tp, u32 start_seq, in tcp_dsack_seen()
1052 struct tcp_sock *tp = tcp_sk(sk); in tcp_check_sack_reordering() local
1085 static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb) in tcp_verify_retransmit_hint()
1097 static void tcp_notify_skb_loss_event(struct tcp_sock *tp, const struct sk_buff *skb) in tcp_notify_skb_loss_event()
1105 struct tcp_sock *tp = tcp_sk(sk); in tcp_mark_skb_lost() local
1128 static void tcp_count_delivered(struct tcp_sock *tp, u32 delivered, in tcp_count_delivered()
1229 static bool tcp_is_sackblock_valid(struct tcp_sock *tp, bool is_dsack, in tcp_is_sackblock_valid()
1270 struct tcp_sock *tp = tcp_sk(sk); in tcp_check_dsack() local
1368 struct tcp_sock *tp = tcp_sk(sk); in tcp_sacktag_one() local
1453 struct tcp_sock *tp = tcp_sk(sk); in tcp_shifted_skb() local
1566 struct tcp_sock *tp = tcp_sk(sk); in tcp_shift_skb_data() local
1705 struct tcp_sock *tp = tcp_sk(sk); in tcp_sacktag_walk() local
1820 static int tcp_sack_cache_ok(const struct tcp_sock *tp, const struct tcp_sack_block *cache) in tcp_sack_cache_ok()
1829 struct tcp_sock *tp = tcp_sk(sk); in tcp_sacktag_write_queue() local
2024 static bool tcp_limit_reno_sacked(struct tcp_sock *tp) in tcp_limit_reno_sacked()
2044 struct tcp_sock *tp = tcp_sk(sk); in tcp_check_reno_reordering() local
2060 struct tcp_sock *tp = tcp_sk(sk); in tcp_add_reno_sack() local
2077 struct tcp_sock *tp = tcp_sk(sk); in tcp_remove_reno_sacks() local
2092 static inline void tcp_reset_reno_sack(struct tcp_sock *tp) in tcp_reset_reno_sack()
2097 void tcp_clear_retrans(struct tcp_sock *tp) in tcp_clear_retrans()
2106 static inline void tcp_init_undo(struct tcp_sock *tp) in tcp_init_undo()
2125 struct tcp_sock *tp = tcp_sk(sk); in tcp_timeout_mark_lost() local
2157 struct tcp_sock *tp = tcp_sk(sk); in tcp_enter_loss() local
2214 struct tcp_sock *tp = tcp_sk(sk); in tcp_check_sack_reneging() local
2237 static inline int tcp_dupack_heuristics(const struct tcp_sock *tp) in tcp_dupack_heuristics()
2341 struct tcp_sock *tp = tcp_sk(sk); in tcp_time_to_recover() local
2361 struct tcp_sock *tp = tcp_sk(sk); in tcp_mark_head_lost() local
2407 struct tcp_sock *tp = tcp_sk(sk); in tcp_update_scoreboard() local
2418 static bool tcp_tsopt_ecr_before(const struct tcp_sock *tp, u32 when) in tcp_tsopt_ecr_before()
2427 static bool tcp_skb_spurious_retrans(const struct tcp_sock *tp, in tcp_skb_spurious_retrans()
2437 static inline bool tcp_packet_delayed(const struct tcp_sock *tp) in tcp_packet_delayed()
2461 const struct tcp_sock *tp = tcp_sk(sk); in tcp_any_retrans_done() local
2477 struct tcp_sock *tp = tcp_sk(sk); in DBGUNDO() local
2503 struct tcp_sock *tp = tcp_sk(sk); in tcp_undo_cwnd_reduction() local
2530 static inline bool tcp_may_undo(const struct tcp_sock *tp) in tcp_may_undo()
2537 struct tcp_sock *tp = tcp_sk(sk); in tcp_is_non_sack_preventing_reopen() local
2553 struct tcp_sock *tp = tcp_sk(sk); in tcp_try_undo_recovery() local
2582 struct tcp_sock *tp = tcp_sk(sk); in tcp_try_undo_dsack() local
2598 struct tcp_sock *tp = tcp_sk(sk); in tcp_try_undo_loss() local
2631 struct tcp_sock *tp = tcp_sk(sk); in tcp_init_cwnd_reduction() local
2645 struct tcp_sock *tp = tcp_sk(sk); in tcp_cwnd_reduction() local
2671 struct tcp_sock *tp = tcp_sk(sk); in tcp_end_cwnd_reduction() local
2688 struct tcp_sock *tp = tcp_sk(sk); in tcp_enter_cwr() local
2701 struct tcp_sock *tp = tcp_sk(sk); in tcp_try_keep_open() local
2715 struct tcp_sock *tp = tcp_sk(sk); in tcp_try_to_open() local
2741 struct tcp_sock *tp = tcp_sk(sk); in tcp_mtup_probe_success() local
2769 struct tcp_sock *tp = tcp_sk(sk); in tcp_simple_retransmit() local
2821 struct tcp_sock *tp = tcp_sk(sk); in tcp_enter_recovery() local
2848 struct tcp_sock *tp = tcp_sk(sk); in tcp_process_loss() local
2900 struct tcp_sock *tp = tcp_sk(sk); in tcp_force_fast_retransmit() local
2910 struct tcp_sock *tp = tcp_sk(sk); in tcp_try_undo_partial() local
2942 struct tcp_sock *tp = tcp_sk(sk); in tcp_identify_packet_loss() local
2975 struct tcp_sock *tp = tcp_sk(sk); in tcp_fastretrans_alert() local
3092 struct tcp_sock *tp = tcp_sk(sk); in tcp_update_rtt_min() local
3109 const struct tcp_sock *tp = tcp_sk(sk); in tcp_ack_update_rtt() local
3180 struct tcp_sock *tp = tcp_sk(sk); in tcp_rearm_rto() local
3216 struct tcp_sock *tp = tcp_sk(sk); in tcp_tso_acked() local
3262 struct tcp_sock *tp = tcp_sk(sk); in tcp_clean_rtx_queue() local
3466 const struct tcp_sock *tp = tcp_sk(sk); in tcp_ack_probe() local
3536 static inline bool tcp_may_update_window(const struct tcp_sock *tp, in tcp_may_update_window()
3546 static void tcp_snd_una_update(struct tcp_sock *tp, u32 ack) in tcp_snd_una_update()
3556 static void tcp_rcv_nxt_update(struct tcp_sock *tp, u32 seq) in tcp_rcv_nxt_update()
3573 struct tcp_sock *tp = tcp_sk(sk); in tcp_ack_update_window() local
3653 struct tcp_sock *tp = tcp_sk(sk); in tcp_send_challenge_ack() local
3685 static void tcp_store_ts_recent(struct tcp_sock *tp) in tcp_store_ts_recent()
3691 static void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq) in tcp_replace_ts_recent()
3711 struct tcp_sock *tp = tcp_sk(sk); in tcp_process_tlp_ack() local
3753 struct tcp_sock *tp = tcp_sk(sk); in tcp_xmit_recovery() local
3772 struct tcp_sock *tp = tcp_sk(sk); in tcp_newly_delivered() local
3787 struct tcp_sock *tp = tcp_sk(sk); in tcp_ack() local
4187 static bool tcp_parse_aligned_timestamp(struct tcp_sock *tp, const struct tcphdr *th) in tcp_parse_aligned_timestamp()
4211 const struct tcphdr *th, struct tcp_sock *tp) in tcp_fast_parse_options()
4292 const struct tcp_sock *tp = tcp_sk(sk); in tcp_disordered_ack() local
4313 const struct tcp_sock *tp = tcp_sk(sk); in tcp_paws_discard() local
4332 static enum skb_drop_reason tcp_sequence(const struct tcp_sock *tp, in tcp_sequence()
4394 struct tcp_sock *tp = tcp_sk(sk); in tcp_fin() local
4475 struct tcp_sock *tp = tcp_sk(sk); in tcp_dsack_set() local
4495 struct tcp_sock *tp = tcp_sk(sk); in tcp_dsack_extend() local
4517 struct tcp_sock *tp = tcp_sk(sk); in tcp_send_dupack() local
4540 static void tcp_sack_maybe_coalesce(struct tcp_sock *tp) in tcp_sack_maybe_coalesce()
4568 struct tcp_sock *tp = tcp_sk(sk); in tcp_sack_compress_send_ack() local
4595 struct tcp_sock *tp = tcp_sk(sk); in tcp_sack_new_ofo_skb() local
4642 static void tcp_sack_remove(struct tcp_sock *tp) in tcp_sack_remove()
4756 struct tcp_sock *tp = tcp_sk(sk); in tcp_ofo_queue() local
4823 struct tcp_sock *tp = tcp_sk(sk); in tcp_data_queue_ofo() local
5044 struct tcp_sock *tp = tcp_sk(sk); in tcp_data_queue() local
5323 struct tcp_sock *tp = tcp_sk(sk); in tcp_collapse_ofo_queue() local
5383 struct tcp_sock *tp = tcp_sk(sk); in tcp_prune_ofo_queue() local
5437 struct tcp_sock *tp = tcp_sk(sk); in tcp_prune_queue() local
5480 const struct tcp_sock *tp = tcp_sk(sk); in tcp_should_expand_sndbuf() local
5515 struct tcp_sock *tp = tcp_sk(sk); in tcp_new_space() local
5558 struct tcp_sock *tp = tcp_sk(sk); in __tcp_ack_snd_check() local
5636 struct tcp_sock *tp = tcp_sk(sk); in tcp_check_urg() local
5702 struct tcp_sock *tp = tcp_sk(sk); in tcp_urg() local
5735 const struct tcp_sock *tp = tcp_sk(sk); in tcp_reset_check() local
5748 struct tcp_sock *tp = tcp_sk(sk); in tcp_validate_incoming() local
5890 struct tcp_sock *tp = tcp_sk(sk); in tcp_rcv_established() local
6078 struct tcp_sock *tp = tcp_sk(sk); in tcp_init_transfer() local
6105 struct tcp_sock *tp = tcp_sk(sk); in tcp_finish_connect() local
6136 struct tcp_sock *tp = tcp_sk(sk); in tcp_rcv_fastopen_synack() local
6196 static void smc_check_reset_syn(struct tcp_sock *tp) in smc_check_reset_syn()
6208 struct tcp_sock *tp = tcp_sk(sk); in tcp_try_undo_spurious_syn() local
6225 struct tcp_sock *tp = tcp_sk(sk); in tcp_rcv_synsent_state_process() local
6457 struct tcp_sock *tp = tcp_sk(sk); in tcp_rcv_synrecv_state_fastopen() local
6497 struct tcp_sock *tp = tcp_sk(sk); in tcp_rcv_state_process() local
6941 struct tcp_sock *tp = tcp_sk(sk); in tcp_get_syncookie_mss() local
6971 struct tcp_sock *tp = tcp_sk(sk); in tcp_conn_request() local