Lines Matching refs:hc
25 static int ccid2_hc_tx_alloc_seq(struct ccid2_hc_tx_sock *hc) in ccid2_hc_tx_alloc_seq() argument
31 if (hc->tx_seqbufc >= (sizeof(hc->tx_seqbuf) / in ccid2_hc_tx_alloc_seq()
49 if (hc->tx_seqbufc == 0) in ccid2_hc_tx_alloc_seq()
50 hc->tx_seqh = hc->tx_seqt = seqp; in ccid2_hc_tx_alloc_seq()
53 hc->tx_seqh->ccid2s_next = seqp; in ccid2_hc_tx_alloc_seq()
54 seqp->ccid2s_prev = hc->tx_seqh; in ccid2_hc_tx_alloc_seq()
56 hc->tx_seqt->ccid2s_prev = &seqp[CCID2_SEQBUF_LEN - 1]; in ccid2_hc_tx_alloc_seq()
57 seqp[CCID2_SEQBUF_LEN - 1].ccid2s_next = hc->tx_seqt; in ccid2_hc_tx_alloc_seq()
61 hc->tx_seqbuf[hc->tx_seqbufc] = seqp; in ccid2_hc_tx_alloc_seq()
62 hc->tx_seqbufc++; in ccid2_hc_tx_alloc_seq()
94 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); in ccid2_check_l_ack_ratio() local
106 if (dccp_feat_nn_get(sk, DCCPF_ACK_RATIO) > hc->tx_cwnd) in ccid2_check_l_ack_ratio()
107 ccid2_change_l_ack_ratio(sk, hc->tx_cwnd/2 ? : 1U); in ccid2_check_l_ack_ratio()
129 struct ccid2_hc_tx_sock *hc = from_timer(hc, t, tx_rtotimer); in ccid2_hc_tx_rto_expire() local
130 struct sock *sk = hc->sk; in ccid2_hc_tx_rto_expire()
131 const bool sender_was_blocked = ccid2_cwnd_network_limited(hc); in ccid2_hc_tx_rto_expire()
135 sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + HZ / 5); in ccid2_hc_tx_rto_expire()
145 hc->tx_rto <<= 1; in ccid2_hc_tx_rto_expire()
146 if (hc->tx_rto > DCCP_RTO_MAX) in ccid2_hc_tx_rto_expire()
147 hc->tx_rto = DCCP_RTO_MAX; in ccid2_hc_tx_rto_expire()
150 hc->tx_ssthresh = hc->tx_cwnd / 2; in ccid2_hc_tx_rto_expire()
151 if (hc->tx_ssthresh < 2) in ccid2_hc_tx_rto_expire()
152 hc->tx_ssthresh = 2; in ccid2_hc_tx_rto_expire()
153 hc->tx_cwnd = 1; in ccid2_hc_tx_rto_expire()
154 hc->tx_pipe = 0; in ccid2_hc_tx_rto_expire()
157 hc->tx_seqt = hc->tx_seqh; in ccid2_hc_tx_rto_expire()
158 hc->tx_packets_acked = 0; in ccid2_hc_tx_rto_expire()
161 hc->tx_rpseq = 0; in ccid2_hc_tx_rto_expire()
162 hc->tx_rpdupack = -1; in ccid2_hc_tx_rto_expire()
169 sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto); in ccid2_hc_tx_rto_expire()
192 static void ccid2_update_used_window(struct ccid2_hc_tx_sock *hc, u32 new_wnd) in ccid2_update_used_window() argument
194 hc->tx_expected_wnd = (3 * hc->tx_expected_wnd + new_wnd) / 4; in ccid2_update_used_window()
200 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); in ccid2_cwnd_application_limited() local
203 win_used = max(hc->tx_cwnd_used, init_win); in ccid2_cwnd_application_limited()
205 if (win_used < hc->tx_cwnd) { in ccid2_cwnd_application_limited()
206 hc->tx_ssthresh = max(hc->tx_ssthresh, in ccid2_cwnd_application_limited()
207 (hc->tx_cwnd >> 1) + (hc->tx_cwnd >> 2)); in ccid2_cwnd_application_limited()
208 hc->tx_cwnd = (hc->tx_cwnd + win_used) >> 1; in ccid2_cwnd_application_limited()
210 hc->tx_cwnd_used = 0; in ccid2_cwnd_application_limited()
211 hc->tx_cwnd_stamp = now; in ccid2_cwnd_application_limited()
219 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); in ccid2_cwnd_restart() local
220 u32 cwnd = hc->tx_cwnd, restart_cwnd, in ccid2_cwnd_restart()
222 s32 delta = now - hc->tx_lsndtime; in ccid2_cwnd_restart()
224 hc->tx_ssthresh = max(hc->tx_ssthresh, (cwnd >> 1) + (cwnd >> 2)); in ccid2_cwnd_restart()
229 while ((delta -= hc->tx_rto) >= 0 && cwnd > restart_cwnd) in ccid2_cwnd_restart()
231 hc->tx_cwnd = max(cwnd, restart_cwnd); in ccid2_cwnd_restart()
232 hc->tx_cwnd_stamp = now; in ccid2_cwnd_restart()
233 hc->tx_cwnd_used = 0; in ccid2_cwnd_restart()
241 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); in ccid2_hc_tx_packet_sent() local
246 if (ccid2_do_cwv && !hc->tx_pipe && in ccid2_hc_tx_packet_sent()
247 (s32)(now - hc->tx_lsndtime) >= hc->tx_rto) in ccid2_hc_tx_packet_sent()
250 hc->tx_lsndtime = now; in ccid2_hc_tx_packet_sent()
251 hc->tx_pipe += 1; in ccid2_hc_tx_packet_sent()
254 if (ccid2_cwnd_network_limited(hc)) { in ccid2_hc_tx_packet_sent()
255 ccid2_update_used_window(hc, hc->tx_cwnd); in ccid2_hc_tx_packet_sent()
256 hc->tx_cwnd_used = 0; in ccid2_hc_tx_packet_sent()
257 hc->tx_cwnd_stamp = now; in ccid2_hc_tx_packet_sent()
259 if (hc->tx_pipe > hc->tx_cwnd_used) in ccid2_hc_tx_packet_sent()
260 hc->tx_cwnd_used = hc->tx_pipe; in ccid2_hc_tx_packet_sent()
262 ccid2_update_used_window(hc, hc->tx_cwnd_used); in ccid2_hc_tx_packet_sent()
264 if (ccid2_do_cwv && (s32)(now - hc->tx_cwnd_stamp) >= hc->tx_rto) in ccid2_hc_tx_packet_sent()
268 hc->tx_seqh->ccid2s_seq = dp->dccps_gss; in ccid2_hc_tx_packet_sent()
269 hc->tx_seqh->ccid2s_acked = 0; in ccid2_hc_tx_packet_sent()
270 hc->tx_seqh->ccid2s_sent = now; in ccid2_hc_tx_packet_sent()
272 next = hc->tx_seqh->ccid2s_next; in ccid2_hc_tx_packet_sent()
274 if (next == hc->tx_seqt) { in ccid2_hc_tx_packet_sent()
275 if (ccid2_hc_tx_alloc_seq(hc)) { in ccid2_hc_tx_packet_sent()
280 next = hc->tx_seqh->ccid2s_next; in ccid2_hc_tx_packet_sent()
281 BUG_ON(next == hc->tx_seqt); in ccid2_hc_tx_packet_sent()
283 hc->tx_seqh = next; in ccid2_hc_tx_packet_sent()
285 ccid2_pr_debug("cwnd=%d pipe=%d\n", hc->tx_cwnd, hc->tx_pipe); in ccid2_hc_tx_packet_sent()
308 hc->tx_arsent++; in ccid2_hc_tx_packet_sent()
310 if (hc->tx_ackloss) { in ccid2_hc_tx_packet_sent()
311 if (hc->tx_arsent >= hc->tx_cwnd) { in ccid2_hc_tx_packet_sent()
312 hc->tx_arsent = 0; in ccid2_hc_tx_packet_sent()
313 hc->tx_ackloss = 0; in ccid2_hc_tx_packet_sent()
323 denom = hc->tx_cwnd * hc->tx_cwnd / denom; in ccid2_hc_tx_packet_sent()
325 if (hc->tx_arsent >= denom) { in ccid2_hc_tx_packet_sent()
327 hc->tx_arsent = 0; in ccid2_hc_tx_packet_sent()
331 hc->tx_arsent = 0; /* or maybe set it to cwnd*/ in ccid2_hc_tx_packet_sent()
336 sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto); in ccid2_hc_tx_packet_sent()
340 struct ccid2_seq *seqp = hc->tx_seqt; in ccid2_hc_tx_packet_sent()
342 while (seqp != hc->tx_seqh) { in ccid2_hc_tx_packet_sent()
366 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); in ccid2_rtt_estimator() local
369 if (hc->tx_srtt == 0) { in ccid2_rtt_estimator()
371 hc->tx_srtt = m << 3; in ccid2_rtt_estimator()
372 hc->tx_mdev = m << 1; in ccid2_rtt_estimator()
374 hc->tx_mdev_max = max(hc->tx_mdev, tcp_rto_min(sk)); in ccid2_rtt_estimator()
375 hc->tx_rttvar = hc->tx_mdev_max; in ccid2_rtt_estimator()
377 hc->tx_rtt_seq = dccp_sk(sk)->dccps_gss; in ccid2_rtt_estimator()
380 m -= (hc->tx_srtt >> 3); in ccid2_rtt_estimator()
381 hc->tx_srtt += m; in ccid2_rtt_estimator()
386 m -= (hc->tx_mdev >> 2); in ccid2_rtt_estimator()
395 m -= (hc->tx_mdev >> 2); in ccid2_rtt_estimator()
397 hc->tx_mdev += m; in ccid2_rtt_estimator()
399 if (hc->tx_mdev > hc->tx_mdev_max) { in ccid2_rtt_estimator()
400 hc->tx_mdev_max = hc->tx_mdev; in ccid2_rtt_estimator()
401 if (hc->tx_mdev_max > hc->tx_rttvar) in ccid2_rtt_estimator()
402 hc->tx_rttvar = hc->tx_mdev_max; in ccid2_rtt_estimator()
412 if (after48(dccp_sk(sk)->dccps_gar, hc->tx_rtt_seq)) { in ccid2_rtt_estimator()
413 if (hc->tx_mdev_max < hc->tx_rttvar) in ccid2_rtt_estimator()
414 hc->tx_rttvar -= (hc->tx_rttvar - in ccid2_rtt_estimator()
415 hc->tx_mdev_max) >> 2; in ccid2_rtt_estimator()
416 hc->tx_rtt_seq = dccp_sk(sk)->dccps_gss; in ccid2_rtt_estimator()
417 hc->tx_mdev_max = tcp_rto_min(sk); in ccid2_rtt_estimator()
428 hc->tx_rto = (hc->tx_srtt >> 3) + hc->tx_rttvar; in ccid2_rtt_estimator()
430 if (hc->tx_rto > DCCP_RTO_MAX) in ccid2_rtt_estimator()
431 hc->tx_rto = DCCP_RTO_MAX; in ccid2_rtt_estimator()
437 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); in ccid2_new_ack() local
439 int r_seq_used = hc->tx_cwnd / dp->dccps_l_ack_ratio; in ccid2_new_ack()
441 if (hc->tx_cwnd < dp->dccps_l_seq_win && in ccid2_new_ack()
443 if (hc->tx_cwnd < hc->tx_ssthresh) { in ccid2_new_ack()
444 if (*maxincr > 0 && ++hc->tx_packets_acked >= 2) { in ccid2_new_ack()
445 hc->tx_cwnd += 1; in ccid2_new_ack()
447 hc->tx_packets_acked = 0; in ccid2_new_ack()
449 } else if (++hc->tx_packets_acked >= hc->tx_cwnd) { in ccid2_new_ack()
450 hc->tx_cwnd += 1; in ccid2_new_ack()
451 hc->tx_packets_acked = 0; in ccid2_new_ack()
464 if (hc->tx_cwnd * CCID2_WIN_CHANGE_FACTOR >= dp->dccps_l_seq_win) in ccid2_new_ack()
466 else if (hc->tx_cwnd * CCID2_WIN_CHANGE_FACTOR < dp->dccps_l_seq_win/2) in ccid2_new_ack()
482 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); in ccid2_congestion_event() local
484 if ((s32)(seqp->ccid2s_sent - hc->tx_last_cong) < 0) { in ccid2_congestion_event()
489 hc->tx_last_cong = ccid2_jiffies32; in ccid2_congestion_event()
491 hc->tx_cwnd = hc->tx_cwnd / 2 ? : 1U; in ccid2_congestion_event()
492 hc->tx_ssthresh = max(hc->tx_cwnd, 2U); in ccid2_congestion_event()
500 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); in ccid2_hc_tx_parse_options() local
505 return dccp_ackvec_parsed_add(&hc->tx_av_chunks, optval, optlen, in ccid2_hc_tx_parse_options()
514 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); in ccid2_hc_tx_packet_recv() local
515 const bool sender_was_blocked = ccid2_cwnd_network_limited(hc); in ccid2_hc_tx_packet_recv()
530 if (hc->tx_rpdupack == -1) { in ccid2_hc_tx_packet_recv()
531 hc->tx_rpdupack = 0; in ccid2_hc_tx_packet_recv()
532 hc->tx_rpseq = seqno; in ccid2_hc_tx_packet_recv()
535 if (dccp_delta_seqno(hc->tx_rpseq, seqno) == 1) in ccid2_hc_tx_packet_recv()
536 hc->tx_rpseq = seqno; in ccid2_hc_tx_packet_recv()
538 else if (after48(seqno, hc->tx_rpseq)) { in ccid2_hc_tx_packet_recv()
539 hc->tx_rpdupack++; in ccid2_hc_tx_packet_recv()
542 if (hc->tx_rpdupack >= NUMDUPACK) { in ccid2_hc_tx_packet_recv()
543 hc->tx_rpdupack = -1; /* XXX lame */ in ccid2_hc_tx_packet_recv()
544 hc->tx_rpseq = 0; in ccid2_hc_tx_packet_recv()
564 if (hc->tx_seqh == hc->tx_seqt) in ccid2_hc_tx_packet_recv()
568 if (after48(ackno, hc->tx_high_ack)) in ccid2_hc_tx_packet_recv()
569 hc->tx_high_ack = ackno; in ccid2_hc_tx_packet_recv()
571 seqp = hc->tx_seqt; in ccid2_hc_tx_packet_recv()
574 if (seqp == hc->tx_seqh) { in ccid2_hc_tx_packet_recv()
575 seqp = hc->tx_seqh->ccid2s_prev; in ccid2_hc_tx_packet_recv()
585 if (hc->tx_cwnd < hc->tx_ssthresh) in ccid2_hc_tx_packet_recv()
589 list_for_each_entry(avp, &hc->tx_av_chunks, node) { in ccid2_hc_tx_packet_recv()
604 if (seqp == hc->tx_seqt) { in ccid2_hc_tx_packet_recv()
632 hc->tx_pipe--; in ccid2_hc_tx_packet_recv()
634 if (seqp == hc->tx_seqt) { in ccid2_hc_tx_packet_recv()
652 seqp = hc->tx_seqt; in ccid2_hc_tx_packet_recv()
653 while (before48(seqp->ccid2s_seq, hc->tx_high_ack)) { in ccid2_hc_tx_packet_recv()
655 if (seqp == hc->tx_seqh) { in ccid2_hc_tx_packet_recv()
656 seqp = hc->tx_seqh->ccid2s_prev; in ccid2_hc_tx_packet_recv()
667 if (seqp == hc->tx_seqt) in ccid2_hc_tx_packet_recv()
688 hc->tx_pipe--; in ccid2_hc_tx_packet_recv()
690 if (seqp == hc->tx_seqt) in ccid2_hc_tx_packet_recv()
695 hc->tx_seqt = last_acked; in ccid2_hc_tx_packet_recv()
699 while (hc->tx_seqt != hc->tx_seqh) { in ccid2_hc_tx_packet_recv()
700 if (!hc->tx_seqt->ccid2s_acked) in ccid2_hc_tx_packet_recv()
703 hc->tx_seqt = hc->tx_seqt->ccid2s_next; in ccid2_hc_tx_packet_recv()
707 if (hc->tx_pipe == 0) in ccid2_hc_tx_packet_recv()
708 sk_stop_timer(sk, &hc->tx_rtotimer); in ccid2_hc_tx_packet_recv()
710 sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto); in ccid2_hc_tx_packet_recv()
713 if (sender_was_blocked && !ccid2_cwnd_network_limited(hc)) in ccid2_hc_tx_packet_recv()
715 dccp_ackvec_parsed_cleanup(&hc->tx_av_chunks); in ccid2_hc_tx_packet_recv()
720 struct ccid2_hc_tx_sock *hc = ccid_priv(ccid); in ccid2_hc_tx_init() local
725 hc->tx_ssthresh = ~0U; in ccid2_hc_tx_init()
728 hc->tx_cwnd = rfc3390_bytes_to_packets(dp->dccps_mss_cache); in ccid2_hc_tx_init()
729 hc->tx_expected_wnd = hc->tx_cwnd; in ccid2_hc_tx_init()
732 max_ratio = DIV_ROUND_UP(hc->tx_cwnd, 2); in ccid2_hc_tx_init()
737 if (ccid2_hc_tx_alloc_seq(hc)) in ccid2_hc_tx_init()
740 hc->tx_rto = DCCP_TIMEOUT_INIT; in ccid2_hc_tx_init()
741 hc->tx_rpdupack = -1; in ccid2_hc_tx_init()
742 hc->tx_last_cong = hc->tx_lsndtime = hc->tx_cwnd_stamp = ccid2_jiffies32; in ccid2_hc_tx_init()
743 hc->tx_cwnd_used = 0; in ccid2_hc_tx_init()
744 hc->sk = sk; in ccid2_hc_tx_init()
745 timer_setup(&hc->tx_rtotimer, ccid2_hc_tx_rto_expire, 0); in ccid2_hc_tx_init()
746 INIT_LIST_HEAD(&hc->tx_av_chunks); in ccid2_hc_tx_init()
752 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); in ccid2_hc_tx_exit() local
755 sk_stop_timer(sk, &hc->tx_rtotimer); in ccid2_hc_tx_exit()
757 for (i = 0; i < hc->tx_seqbufc; i++) in ccid2_hc_tx_exit()
758 kfree(hc->tx_seqbuf[i]); in ccid2_hc_tx_exit()
759 hc->tx_seqbufc = 0; in ccid2_hc_tx_exit()
760 dccp_ackvec_parsed_cleanup(&hc->tx_av_chunks); in ccid2_hc_tx_exit()
765 struct ccid2_hc_rx_sock *hc = ccid2_hc_rx_sk(sk); in ccid2_hc_rx_packet_recv() local
770 if (++hc->rx_num_data_pkts >= dccp_sk(sk)->dccps_r_ack_ratio) { in ccid2_hc_rx_packet_recv()
772 hc->rx_num_data_pkts = 0; in ccid2_hc_rx_packet_recv()