/linux-3.4.99/net/dccp/ |
D | timer.c | 37 const struct inet_connection_sock *icsk = inet_csk(sk); in dccp_write_timeout() local 41 if (icsk->icsk_retransmits != 0) in dccp_write_timeout() 43 retry_until = icsk->icsk_syn_retries ? in dccp_write_timeout() 46 if (icsk->icsk_retransmits >= sysctl_dccp_retries1) { in dccp_write_timeout() 76 if (icsk->icsk_retransmits >= retry_until) { in dccp_write_timeout() 89 struct inet_connection_sock *icsk = inet_csk(sk); in dccp_retransmit_timer() local 102 if (icsk->icsk_retransmits == 0) in dccp_retransmit_timer() 110 if (--icsk->icsk_retransmits == 0) in dccp_retransmit_timer() 111 icsk->icsk_retransmits = 1; in dccp_retransmit_timer() 113 min(icsk->icsk_rto, in dccp_retransmit_timer() [all …]
|
D | output.c | 49 const struct inet_connection_sock *icsk = inet_csk(sk); in dccp_transmit_skb() local 76 if (icsk->icsk_retransmits == 0) in dccp_transmit_skb() 134 icsk->icsk_af_ops->send_check(sk, skb); in dccp_transmit_skb() 141 err = icsk->icsk_af_ops->queue_xmit(skb, &inet->cork.fl); in dccp_transmit_skb() 164 struct inet_connection_sock *icsk = inet_csk(sk); in dccp_sync_mss() local 170 cur_mps -= (icsk->icsk_af_ops->net_header_len + icsk->icsk_ext_hdr_len + in dccp_sync_mss() 190 icsk->icsk_pmtu_cookie = pmtu; in dccp_sync_mss() 534 struct inet_connection_sock *icsk = inet_csk(sk); in dccp_connect() local 561 icsk->icsk_retransmits = 0; in dccp_connect() 563 icsk->icsk_rto, DCCP_RTO_MAX); in dccp_connect() [all …]
|
D | diag.c | 22 const struct inet_connection_sock *icsk = inet_csk(sk); in dccp_get_info() local 27 info->tcpi_retransmits = icsk->icsk_retransmits; in dccp_get_info() 28 info->tcpi_probes = icsk->icsk_probes_out; in dccp_get_info() 29 info->tcpi_backoff = icsk->icsk_backoff; in dccp_get_info() 30 info->tcpi_pmtu = icsk->icsk_pmtu_cookie; in dccp_get_info()
|
D | input.c | 405 const struct inet_connection_sock *icsk = inet_csk(sk); in dccp_rcv_request_sent_state_process() local 447 dccp_sync_mss(sk, icsk->icsk_pmtu_cookie); in dccp_rcv_request_sent_state_process() 476 icsk->icsk_af_ops->rebuild_header(sk); in dccp_rcv_request_sent_state_process() 483 if (sk->sk_write_pending || icsk->icsk_ack.pingpong || in dccp_rcv_request_sent_state_process() 484 icsk->icsk_accept_queue.rskq_defer_accept) { in dccp_rcv_request_sent_state_process()
|
D | minisocks.c | 54 const struct inet_connection_sock *icsk = inet_csk(sk); in dccp_time_wait() local 55 const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1); in dccp_time_wait()
|
D | proto.c | 176 struct inet_connection_sock *icsk = inet_csk(sk); in dccp_init_sock() local 178 icsk->icsk_rto = DCCP_TIMEOUT_INIT; in dccp_init_sock() 179 icsk->icsk_syn_retries = sysctl_dccp_request_retries; in dccp_init_sock() 182 icsk->icsk_sync_mss = dccp_sync_mss; in dccp_init_sock() 253 struct inet_connection_sock *icsk = inet_csk(sk); in dccp_disconnect() local 290 icsk->icsk_backoff = 0; in dccp_disconnect() 294 WARN_ON(inet->inet_num && !icsk->icsk_bind_hash); in dccp_disconnect()
|
D | ipv6.c | 855 struct inet_connection_sock *icsk = inet_csk(sk); in dccp_v6_connect() local 923 u32 exthdrlen = icsk->icsk_ext_hdr_len; in dccp_v6_connect() 935 icsk->icsk_af_ops = &dccp_ipv6_mapped; in dccp_v6_connect() 940 icsk->icsk_ext_hdr_len = exthdrlen; in dccp_v6_connect() 941 icsk->icsk_af_ops = &dccp_ipv6_af_ops; in dccp_v6_connect() 981 icsk->icsk_ext_hdr_len = 0; in dccp_v6_connect() 983 icsk->icsk_ext_hdr_len = (np->opt->opt_flen + in dccp_v6_connect()
|
/linux-3.4.99/net/ipv4/ |
D | tcp_timer.c | 113 static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk) in tcp_mtu_probing() argument 117 if (!icsk->icsk_mtup.enabled) { in tcp_mtu_probing() 118 icsk->icsk_mtup.enabled = 1; in tcp_mtu_probing() 119 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); in tcp_mtu_probing() 124 mss = tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low) >> 1; in tcp_mtu_probing() 127 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss); in tcp_mtu_probing() 128 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); in tcp_mtu_probing() 169 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_write_timeout() local 174 if (icsk->icsk_retransmits) in tcp_write_timeout() 176 retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries; in tcp_write_timeout() [all …]
|
D | inet_connection_sock.c | 224 struct inet_connection_sock *icsk = inet_csk(sk); in inet_csk_wait_for_connect() local 246 if (reqsk_queue_empty(&icsk->icsk_accept_queue)) in inet_csk_wait_for_connect() 250 if (!reqsk_queue_empty(&icsk->icsk_accept_queue)) in inet_csk_wait_for_connect() 271 struct inet_connection_sock *icsk = inet_csk(sk); in inet_csk_accept() local 285 if (reqsk_queue_empty(&icsk->icsk_accept_queue)) { in inet_csk_accept() 298 newsk = reqsk_queue_get_child(&icsk->icsk_accept_queue, sk); in inet_csk_accept() 320 struct inet_connection_sock *icsk = inet_csk(sk); in inet_csk_init_xmit_timers() local 322 setup_timer(&icsk->icsk_retransmit_timer, retransmit_handler, in inet_csk_init_xmit_timers() 324 setup_timer(&icsk->icsk_delack_timer, delack_handler, in inet_csk_init_xmit_timers() 327 icsk->icsk_pending = icsk->icsk_ack.pending = 0; in inet_csk_init_xmit_timers() [all …]
|
D | tcp_cong.c | 81 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_init_congestion_control() local 85 if (icsk->icsk_ca_ops == &tcp_init_congestion_ops) { in tcp_init_congestion_control() 89 icsk->icsk_ca_ops = ca; in tcp_init_congestion_control() 98 if (icsk->icsk_ca_ops->init) in tcp_init_congestion_control() 99 icsk->icsk_ca_ops->init(sk); in tcp_init_congestion_control() 105 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_cleanup_congestion_control() local 107 if (icsk->icsk_ca_ops->release) in tcp_cleanup_congestion_control() 108 icsk->icsk_ca_ops->release(sk); in tcp_cleanup_congestion_control() 109 module_put(icsk->icsk_ca_ops->owner); in tcp_cleanup_congestion_control() 239 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_set_congestion_control() local [all …]
|
D | tcp_output.c | 159 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_event_data_sent() local 163 (!tp->packets_out && (s32)(now - tp->lsndtime) > icsk->icsk_rto)) in tcp_event_data_sent() 171 if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato) in tcp_event_data_sent() 172 icsk->icsk_ack.pingpong = 1; in tcp_event_data_sent() 799 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_transmit_skb() local 814 if (icsk->icsk_ca_ops->flags & TCP_CONG_RTT_STAMP) in tcp_transmit_skb() 894 icsk->icsk_af_ops->send_check(sk, skb); in tcp_transmit_skb() 906 err = icsk->icsk_af_ops->queue_xmit(skb, &inet->cork.fl); in tcp_transmit_skb() 1158 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_mtu_to_mss() local 1164 mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr); in tcp_mtu_to_mss() [all …]
|
D | tcp_input.c | 135 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_measure_rcv_mss() local 136 const unsigned int lss = icsk->icsk_ack.last_seg_size; in tcp_measure_rcv_mss() 139 icsk->icsk_ack.last_seg_size = 0; in tcp_measure_rcv_mss() 145 if (len >= icsk->icsk_ack.rcv_mss) { in tcp_measure_rcv_mss() 146 icsk->icsk_ack.rcv_mss = len; in tcp_measure_rcv_mss() 167 icsk->icsk_ack.last_seg_size = len; in tcp_measure_rcv_mss() 169 icsk->icsk_ack.rcv_mss = len; in tcp_measure_rcv_mss() 173 if (icsk->icsk_ack.pending & ICSK_ACK_PUSHED) in tcp_measure_rcv_mss() 174 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED2; in tcp_measure_rcv_mss() 175 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED; in tcp_measure_rcv_mss() [all …]
|
D | tcp_ipv4.c | 345 struct inet_connection_sock *icsk; in tcp_v4_err() local 388 icsk = inet_csk(sk); in tcp_v4_err() 419 if (seq != tp->snd_una || !icsk->icsk_retransmits || in tcp_v4_err() 420 !icsk->icsk_backoff) in tcp_v4_err() 426 icsk->icsk_backoff--; in tcp_v4_err() 428 TCP_TIMEOUT_INIT) << icsk->icsk_backoff; in tcp_v4_err() 434 remaining = icsk->icsk_rto - min(icsk->icsk_rto, in tcp_v4_err() 1876 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_v4_init_sock() local 1883 icsk->icsk_rto = TCP_TIMEOUT_INIT; in tcp_v4_init_sock() 1901 icsk->icsk_ca_ops = &tcp_init_congestion_ops; in tcp_v4_init_sock() [all …]
|
D | inet_diag.c | 77 int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, in inet_sk_diag_fill() argument 151 if (icsk == NULL) { in inet_sk_diag_fill() 158 if (icsk->icsk_pending == ICSK_TIME_RETRANS) { in inet_sk_diag_fill() 160 r->idiag_retrans = icsk->icsk_retransmits; in inet_sk_diag_fill() 161 r->idiag_expires = EXPIRES_IN_MS(icsk->icsk_timeout); in inet_sk_diag_fill() 162 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) { in inet_sk_diag_fill() 164 r->idiag_retrans = icsk->icsk_probes_out; in inet_sk_diag_fill() 165 r->idiag_expires = EXPIRES_IN_MS(icsk->icsk_timeout); in inet_sk_diag_fill() 168 r->idiag_retrans = icsk->icsk_probes_out; in inet_sk_diag_fill() 179 if ((ext & (1 << (INET_DIAG_CONG - 1))) && icsk->icsk_ca_ops) { in inet_sk_diag_fill() [all …]
|
D | tcp.c | 1207 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_cleanup_rbuf() local 1210 if (icsk->icsk_ack.blocked || in tcp_cleanup_rbuf() 1212 tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss || in tcp_cleanup_rbuf() 1220 ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED2) || in tcp_cleanup_rbuf() 1221 ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) && in tcp_cleanup_rbuf() 1222 !icsk->icsk_ack.pingpong)) && in tcp_cleanup_rbuf() 2071 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_disconnect() local 2111 icsk->icsk_backoff = 0; in tcp_disconnect() 2113 icsk->icsk_probes_out = 0; in tcp_disconnect() 2126 WARN_ON(inet->inet_num && !icsk->icsk_bind_hash); in tcp_disconnect() [all …]
|
D | tcp_htcp.c | 84 const struct inet_connection_sock *icsk = inet_csk(sk); in measure_rtt() local 92 if (icsk->icsk_ca_state == TCP_CA_Open) { in measure_rtt() 103 const struct inet_connection_sock *icsk = inet_csk(sk); in measure_achieved_throughput() local 108 if (icsk->icsk_ca_state == TCP_CA_Open) in measure_achieved_throughput() 118 if (!((1 << icsk->icsk_ca_state) & (TCPF_CA_Open | TCPF_CA_Disorder))) { in measure_achieved_throughput()
|
D | tcp_minisocks.c | 60 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_remember_stamp() local 65 peer = icsk->icsk_af_ops->get_peer(sk, &release_it); in tcp_remember_stamp() 317 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_time_wait() local 329 const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1); in tcp_time_wait()
|
D | tcp_bic.c | 205 const struct inet_connection_sock *icsk = inet_csk(sk); in bictcp_acked() local 207 if (icsk->icsk_ca_state == TCP_CA_Open) { in bictcp_acked()
|
D | tcp_yeah.c | 63 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_yeah_pkts_acked() local 66 if (icsk->icsk_ca_state == TCP_CA_Open) in tcp_yeah_pkts_acked()
|
D | inet_timewait_sock.c | 130 const struct inet_connection_sock *icsk = inet_csk(sk); in __inet_twsk_hashdance() local 141 tw->tw_tb = icsk->icsk_bind_hash; in __inet_twsk_hashdance() 142 WARN_ON(!icsk->icsk_bind_hash); in __inet_twsk_hashdance()
|
/linux-3.4.99/include/net/ |
D | inet_connection_sock.h | 187 struct inet_connection_sock *icsk = inet_csk(sk); in inet_csk_clear_xmit_timer() local 190 icsk->icsk_pending = 0; in inet_csk_clear_xmit_timer() 192 sk_stop_timer(sk, &icsk->icsk_retransmit_timer); in inet_csk_clear_xmit_timer() 195 icsk->icsk_ack.blocked = icsk->icsk_ack.pending = 0; in inet_csk_clear_xmit_timer() 197 sk_stop_timer(sk, &icsk->icsk_delack_timer); in inet_csk_clear_xmit_timer() 214 struct inet_connection_sock *icsk = inet_csk(sk); in inet_csk_reset_xmit_timer() local 225 icsk->icsk_pending = what; in inet_csk_reset_xmit_timer() 226 icsk->icsk_timeout = jiffies + when; in inet_csk_reset_xmit_timer() 227 sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout); in inet_csk_reset_xmit_timer() 229 icsk->icsk_ack.pending |= ICSK_ACK_TIMER; in inet_csk_reset_xmit_timer() [all …]
|
D | tcp.h | 353 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_dec_quickack_mode() local 355 if (icsk->icsk_ack.quick) { in tcp_dec_quickack_mode() 356 if (pkts >= icsk->icsk_ack.quick) { in tcp_dec_quickack_mode() 357 icsk->icsk_ack.quick = 0; in tcp_dec_quickack_mode() 359 icsk->icsk_ack.ato = TCP_ATO_MIN; in tcp_dec_quickack_mode() 361 icsk->icsk_ack.quick -= pkts; in tcp_dec_quickack_mode() 751 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_set_ca_state() local 753 if (icsk->icsk_ca_ops->set_state) in tcp_set_ca_state() 754 icsk->icsk_ca_ops->set_state(sk, ca_state); in tcp_set_ca_state() 755 icsk->icsk_ca_state = ca_state; in tcp_set_ca_state() [all …]
|
/linux-3.4.99/net/ipv6/ |
D | inet6_connection_sock.c | 111 const struct inet_connection_sock *icsk = inet_csk(sk); in inet6_csk_search_req() local 112 struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt; in inet6_csk_search_req() 142 struct inet_connection_sock *icsk = inet_csk(sk); in inet6_csk_reqsk_queue_hash_add() local 143 struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt; in inet6_csk_reqsk_queue_hash_add() 148 reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, timeout); in inet6_csk_reqsk_queue_hash_add()
|
D | tcp_ipv6.c | 131 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_v6_connect() local 207 u32 exthdrlen = icsk->icsk_ext_hdr_len; in tcp_v6_connect() 219 icsk->icsk_af_ops = &ipv6_mapped; in tcp_v6_connect() 228 icsk->icsk_ext_hdr_len = exthdrlen; in tcp_v6_connect() 229 icsk->icsk_af_ops = &ipv6_specific; in tcp_v6_connect() 297 icsk->icsk_ext_hdr_len = 0; in tcp_v6_connect() 299 icsk->icsk_ext_hdr_len = (np->opt->opt_flen + in tcp_v6_connect() 1837 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_v6_init_sock() local 1844 icsk->icsk_rto = TCP_TIMEOUT_INIT; in tcp_v6_init_sock() 1865 icsk->icsk_af_ops = &ipv6_specific; in tcp_v6_init_sock() [all …]
|
D | ipv6_sockglue.c | 109 struct inet_connection_sock *icsk = inet_csk(sk); in ipv6_update_options() local 110 icsk->icsk_ext_hdr_len = opt->opt_flen + opt->opt_nflen; in ipv6_update_options() 111 icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie); in ipv6_update_options() 193 struct inet_connection_sock *icsk = inet_csk(sk); in do_ipv6_setsockopt() local 199 icsk->icsk_af_ops = &ipv4_specific; in do_ipv6_setsockopt() 202 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); in do_ipv6_setsockopt()
|