Lines Matching refs:ca

107 static inline void bictcp_reset(struct bictcp *ca)  in bictcp_reset()  argument
109 memset(ca, 0, offsetof(struct bictcp, unused)); in bictcp_reset()
110 ca->found = 0; in bictcp_reset()
121 struct bictcp *ca = inet_csk_ca(sk); in bictcp_hystart_reset() local
123 ca->round_start = ca->last_ack = bictcp_clock_us(sk); in bictcp_hystart_reset()
124 ca->end_seq = tp->snd_nxt; in bictcp_hystart_reset()
125 ca->curr_rtt = ~0U; in bictcp_hystart_reset()
126 ca->sample_cnt = 0; in bictcp_hystart_reset()
131 struct bictcp *ca = inet_csk_ca(sk); in cubictcp_init() local
133 bictcp_reset(ca); in cubictcp_init()
145 struct bictcp *ca = inet_csk_ca(sk); in cubictcp_cwnd_event() local
154 if (ca->epoch_start && delta > 0) { in cubictcp_cwnd_event()
155 ca->epoch_start += delta; in cubictcp_cwnd_event()
156 if (after(ca->epoch_start, now)) in cubictcp_cwnd_event()
157 ca->epoch_start = now; in cubictcp_cwnd_event()
214 static inline void bictcp_update(struct bictcp *ca, u32 cwnd, u32 acked) in bictcp_update() argument
219 ca->ack_cnt += acked; /* count the number of ACKed packets */ in bictcp_update()
221 if (ca->last_cwnd == cwnd && in bictcp_update()
222 (s32)(tcp_jiffies32 - ca->last_time) <= HZ / 32) in bictcp_update()
229 if (ca->epoch_start && tcp_jiffies32 == ca->last_time) in bictcp_update()
232 ca->last_cwnd = cwnd; in bictcp_update()
233 ca->last_time = tcp_jiffies32; in bictcp_update()
235 if (ca->epoch_start == 0) { in bictcp_update()
236 ca->epoch_start = tcp_jiffies32; /* record beginning */ in bictcp_update()
237 ca->ack_cnt = acked; /* start counting */ in bictcp_update()
238 ca->tcp_cwnd = cwnd; /* syn with cubic */ in bictcp_update()
240 if (ca->last_max_cwnd <= cwnd) { in bictcp_update()
241 ca->bic_K = 0; in bictcp_update()
242 ca->bic_origin_point = cwnd; in bictcp_update()
247 ca->bic_K = cubic_root(cube_factor in bictcp_update()
248 * (ca->last_max_cwnd - cwnd)); in bictcp_update()
249 ca->bic_origin_point = ca->last_max_cwnd; in bictcp_update()
267 t = (s32)(tcp_jiffies32 - ca->epoch_start); in bictcp_update()
268 t += usecs_to_jiffies(ca->delay_min); in bictcp_update()
273 if (t < ca->bic_K) /* t - K */ in bictcp_update()
274 offs = ca->bic_K - t; in bictcp_update()
276 offs = t - ca->bic_K; in bictcp_update()
280 if (t < ca->bic_K) /* below origin*/ in bictcp_update()
281 bic_target = ca->bic_origin_point - delta; in bictcp_update()
283 bic_target = ca->bic_origin_point + delta; in bictcp_update()
287 ca->cnt = cwnd / (bic_target - cwnd); in bictcp_update()
289 ca->cnt = 100 * cwnd; /* very small increment*/ in bictcp_update()
296 if (ca->last_max_cwnd == 0 && ca->cnt > 20) in bictcp_update()
297 ca->cnt = 20; /* increase cwnd 5% per RTT */ in bictcp_update()
305 while (ca->ack_cnt > delta) { /* update tcp cwnd */ in bictcp_update()
306 ca->ack_cnt -= delta; in bictcp_update()
307 ca->tcp_cwnd++; in bictcp_update()
310 if (ca->tcp_cwnd > cwnd) { /* if bic is slower than tcp */ in bictcp_update()
311 delta = ca->tcp_cwnd - cwnd; in bictcp_update()
313 if (ca->cnt > max_cnt) in bictcp_update()
314 ca->cnt = max_cnt; in bictcp_update()
321 ca->cnt = max(ca->cnt, 2U); in bictcp_update()
327 struct bictcp *ca = inet_csk_ca(sk); in cubictcp_cong_avoid() local
337 bictcp_update(ca, tcp_snd_cwnd(tp), acked); in cubictcp_cong_avoid()
338 tcp_cong_avoid_ai(tp, ca->cnt, acked); in cubictcp_cong_avoid()
344 struct bictcp *ca = inet_csk_ca(sk); in cubictcp_recalc_ssthresh() local
346 ca->epoch_start = 0; /* end of epoch */ in cubictcp_recalc_ssthresh()
349 if (tcp_snd_cwnd(tp) < ca->last_max_cwnd && fast_convergence) in cubictcp_recalc_ssthresh()
350 ca->last_max_cwnd = (tcp_snd_cwnd(tp) * (BICTCP_BETA_SCALE + beta)) in cubictcp_recalc_ssthresh()
353 ca->last_max_cwnd = tcp_snd_cwnd(tp); in cubictcp_recalc_ssthresh()
389 struct bictcp *ca = inet_csk_ca(sk); in hystart_update() local
392 if (after(tp->snd_una, ca->end_seq)) in hystart_update()
399 if ((s32)(now - ca->last_ack) <= hystart_ack_delta_us) { in hystart_update()
400 ca->last_ack = now; in hystart_update()
402 threshold = ca->delay_min + hystart_ack_delay(sk); in hystart_update()
412 if ((s32)(now - ca->round_start) > threshold) { in hystart_update()
413 ca->found = 1; in hystart_update()
415 now - ca->round_start, threshold, in hystart_update()
416 ca->delay_min, hystart_ack_delay(sk), tcp_snd_cwnd(tp)); in hystart_update()
429 if (ca->curr_rtt > delay) in hystart_update()
430 ca->curr_rtt = delay; in hystart_update()
431 if (ca->sample_cnt < HYSTART_MIN_SAMPLES) { in hystart_update()
432 ca->sample_cnt++; in hystart_update()
434 if (ca->curr_rtt > ca->delay_min + in hystart_update()
435 HYSTART_DELAY_THRESH(ca->delay_min >> 3)) { in hystart_update()
436 ca->found = 1; in hystart_update()
451 struct bictcp *ca = inet_csk_ca(sk); in cubictcp_acked() local
459 if (ca->epoch_start && (s32)(tcp_jiffies32 - ca->epoch_start) < HZ) in cubictcp_acked()
467 if (ca->delay_min == 0 || ca->delay_min > delay) in cubictcp_acked()
468 ca->delay_min = delay; in cubictcp_acked()
471 if (!ca->found && tcp_in_slow_start(tp) && hystart && in cubictcp_acked()