Lines Matching refs:q

172 static void fq_flow_unset_throttled(struct fq_sched_data *q, struct fq_flow *f)  in fq_flow_unset_throttled()  argument
174 rb_erase(&f->rate_node, &q->delayed); in fq_flow_unset_throttled()
175 q->throttled_flows--; in fq_flow_unset_throttled()
176 fq_flow_add_tail(&q->old_flows, f); in fq_flow_unset_throttled()
179 static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f) in fq_flow_set_throttled() argument
181 struct rb_node **p = &q->delayed.rb_node, *parent = NULL; in fq_flow_set_throttled()
194 rb_insert_color(&f->rate_node, &q->delayed); in fq_flow_set_throttled()
195 q->throttled_flows++; in fq_flow_set_throttled()
196 q->stat_throttled++; in fq_flow_set_throttled()
199 if (q->time_next_delayed_flow > f->time_next_packet) in fq_flow_set_throttled()
200 q->time_next_delayed_flow = f->time_next_packet; in fq_flow_set_throttled()
217 static void fq_gc(struct fq_sched_data *q, in fq_gc() argument
254 q->flows -= fcnt; in fq_gc()
255 q->inactive_flows -= fcnt; in fq_gc()
256 q->stat_gc_flows += fcnt; in fq_gc()
261 static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q) in fq_classify() argument
270 return &q->internal; in fq_classify()
282 unsigned long hash = skb_get_hash(skb) & q->orphan_mask; in fq_classify()
290 unsigned long hash = skb_get_hash(skb) & q->orphan_mask; in fq_classify()
302 root = &q->fq_root[hash_ptr(sk, q->fq_trees_log)]; in fq_classify()
304 if (q->flows >= (2U << q->fq_trees_log) && in fq_classify()
305 q->inactive_flows > q->flows/2) in fq_classify()
306 fq_gc(q, root, sk); in fq_classify()
322 f->credit = q->initial_quantum; in fq_classify()
324 if (q->rate_enable) in fq_classify()
328 fq_flow_unset_throttled(q, f); in fq_classify()
341 q->stat_allocation_errors++; in fq_classify()
342 return &q->internal; in fq_classify()
350 if (q->rate_enable) in fq_classify()
354 f->credit = q->initial_quantum; in fq_classify()
359 q->flows++; in fq_classify()
360 q->inactive_flows++; in fq_classify()
401 sch->q.qlen--; in fq_dequeue_skb()
437 const struct fq_sched_data *q) in fq_packet_beyond_horizon() argument
439 return unlikely((s64)skb->tstamp > (s64)(q->ktime_cache + q->horizon)); in fq_packet_beyond_horizon()
445 struct fq_sched_data *q = qdisc_priv(sch); in fq_enqueue() local
448 if (unlikely(sch->q.qlen >= sch->limit)) in fq_enqueue()
452 fq_skb_cb(skb)->time_to_send = q->ktime_cache = ktime_get_ns(); in fq_enqueue()
458 if (fq_packet_beyond_horizon(skb, q)) { in fq_enqueue()
460 q->ktime_cache = ktime_get_ns(); in fq_enqueue()
461 if (fq_packet_beyond_horizon(skb, q)) { in fq_enqueue()
462 if (q->horizon_drop) { in fq_enqueue()
463 q->stat_horizon_drops++; in fq_enqueue()
466 q->stat_horizon_caps++; in fq_enqueue()
467 skb->tstamp = q->ktime_cache + q->horizon; in fq_enqueue()
473 f = fq_classify(skb, q); in fq_enqueue()
474 if (unlikely(f->qlen >= q->flow_plimit && f != &q->internal)) { in fq_enqueue()
475 q->stat_flows_plimit++; in fq_enqueue()
482 fq_flow_add_tail(&q->new_flows, f); in fq_enqueue()
483 if (time_after(jiffies, f->age + q->flow_refill_delay)) in fq_enqueue()
484 f->credit = max_t(u32, f->credit, q->quantum); in fq_enqueue()
485 q->inactive_flows--; in fq_enqueue()
491 if (unlikely(f == &q->internal)) { in fq_enqueue()
492 q->stat_internal_packets++; in fq_enqueue()
494 sch->q.qlen++; in fq_enqueue()
499 static void fq_check_throttled(struct fq_sched_data *q, u64 now) in fq_check_throttled() argument
504 if (q->time_next_delayed_flow > now) in fq_check_throttled()
510 sample = (unsigned long)(now - q->time_next_delayed_flow); in fq_check_throttled()
511 q->unthrottle_latency_ns -= q->unthrottle_latency_ns >> 3; in fq_check_throttled()
512 q->unthrottle_latency_ns += sample >> 3; in fq_check_throttled()
514 q->time_next_delayed_flow = ~0ULL; in fq_check_throttled()
515 while ((p = rb_first(&q->delayed)) != NULL) { in fq_check_throttled()
519 q->time_next_delayed_flow = f->time_next_packet; in fq_check_throttled()
522 fq_flow_unset_throttled(q, f); in fq_check_throttled()
528 struct fq_sched_data *q = qdisc_priv(sch); in fq_dequeue() local
536 if (!sch->q.qlen) in fq_dequeue()
539 skb = fq_peek(&q->internal); in fq_dequeue()
541 fq_dequeue_skb(sch, &q->internal, skb); in fq_dequeue()
545 q->ktime_cache = now = ktime_get_ns(); in fq_dequeue()
546 fq_check_throttled(q, now); in fq_dequeue()
548 head = &q->new_flows; in fq_dequeue()
550 head = &q->old_flows; in fq_dequeue()
552 if (q->time_next_delayed_flow != ~0ULL) in fq_dequeue()
553 qdisc_watchdog_schedule_range_ns(&q->watchdog, in fq_dequeue()
554 q->time_next_delayed_flow, in fq_dequeue()
555 q->timer_slack); in fq_dequeue()
562 f->credit += q->quantum; in fq_dequeue()
564 fq_flow_add_tail(&q->old_flows, f); in fq_dequeue()
576 fq_flow_set_throttled(q, f); in fq_dequeue()
580 if ((s64)(now - time_next_packet - q->ce_threshold) > 0) { in fq_dequeue()
582 q->stat_ce_mark++; in fq_dequeue()
588 if ((head == &q->new_flows) && q->old_flows.first) { in fq_dequeue()
589 fq_flow_add_tail(&q->old_flows, f); in fq_dequeue()
592 q->inactive_flows++; in fq_dequeue()
599 if (!q->rate_enable) in fq_dequeue()
602 rate = q->flow_max_rate; in fq_dequeue()
612 if (rate <= q->low_rate_threshold) { in fq_dequeue()
615 plen = max(plen, q->quantum); in fq_dequeue()
631 q->stat_pkts_too_long++; in fq_dequeue()
664 struct fq_sched_data *q = qdisc_priv(sch); in fq_reset() local
670 sch->q.qlen = 0; in fq_reset()
673 fq_flow_purge(&q->internal); in fq_reset()
675 if (!q->fq_root) in fq_reset()
678 for (idx = 0; idx < (1U << q->fq_trees_log); idx++) { in fq_reset()
679 root = &q->fq_root[idx]; in fq_reset()
689 q->new_flows.first = NULL; in fq_reset()
690 q->old_flows.first = NULL; in fq_reset()
691 q->delayed = RB_ROOT; in fq_reset()
692 q->flows = 0; in fq_reset()
693 q->inactive_flows = 0; in fq_reset()
694 q->throttled_flows = 0; in fq_reset()
697 static void fq_rehash(struct fq_sched_data *q, in fq_rehash() argument
737 q->flows -= fcnt; in fq_rehash()
738 q->inactive_flows -= fcnt; in fq_rehash()
739 q->stat_gc_flows += fcnt; in fq_rehash()
749 struct fq_sched_data *q = qdisc_priv(sch); in fq_resize() local
754 if (q->fq_root && log == q->fq_trees_log) in fq_resize()
768 old_fq_root = q->fq_root; in fq_resize()
770 fq_rehash(q, old_fq_root, q->fq_trees_log, array, log); in fq_resize()
772 q->fq_root = array; in fq_resize()
773 q->fq_trees_log = log; in fq_resize()
809 struct fq_sched_data *q = qdisc_priv(sch); in fq_change() local
822 fq_log = q->fq_trees_log; in fq_change()
836 q->flow_plimit = nla_get_u32(tb[TCA_FQ_FLOW_PLIMIT]); in fq_change()
842 q->quantum = quantum; in fq_change()
850 q->initial_quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]); in fq_change()
859 q->flow_max_rate = (rate == ~0U) ? ~0UL : rate; in fq_change()
862 q->low_rate_threshold = in fq_change()
869 q->rate_enable = enable; in fq_change()
877 q->flow_refill_delay = usecs_to_jiffies(usecs_delay); in fq_change()
881 q->orphan_mask = nla_get_u32(tb[TCA_FQ_ORPHAN_MASK]); in fq_change()
884 q->ce_threshold = (u64)NSEC_PER_USEC * in fq_change()
888 q->timer_slack = nla_get_u32(tb[TCA_FQ_TIMER_SLACK]); in fq_change()
891 q->horizon = (u64)NSEC_PER_USEC * in fq_change()
895 q->horizon_drop = nla_get_u8(tb[TCA_FQ_HORIZON_DROP]); in fq_change()
903 while (sch->q.qlen > sch->limit) { in fq_change()
920 struct fq_sched_data *q = qdisc_priv(sch); in fq_destroy() local
923 fq_free(q->fq_root); in fq_destroy()
924 qdisc_watchdog_cancel(&q->watchdog); in fq_destroy()
930 struct fq_sched_data *q = qdisc_priv(sch); in fq_init() local
934 q->flow_plimit = 100; in fq_init()
935 q->quantum = 2 * psched_mtu(qdisc_dev(sch)); in fq_init()
936 q->initial_quantum = 10 * psched_mtu(qdisc_dev(sch)); in fq_init()
937 q->flow_refill_delay = msecs_to_jiffies(40); in fq_init()
938 q->flow_max_rate = ~0UL; in fq_init()
939 q->time_next_delayed_flow = ~0ULL; in fq_init()
940 q->rate_enable = 1; in fq_init()
941 q->new_flows.first = NULL; in fq_init()
942 q->old_flows.first = NULL; in fq_init()
943 q->delayed = RB_ROOT; in fq_init()
944 q->fq_root = NULL; in fq_init()
945 q->fq_trees_log = ilog2(1024); in fq_init()
946 q->orphan_mask = 1024 - 1; in fq_init()
947 q->low_rate_threshold = 550000 / 8; in fq_init()
949 q->timer_slack = 10 * NSEC_PER_USEC; /* 10 usec of hrtimer slack */ in fq_init()
951 q->horizon = 10ULL * NSEC_PER_SEC; /* 10 seconds */ in fq_init()
952 q->horizon_drop = 1; /* by default, drop packets beyond horizon */ in fq_init()
955 q->ce_threshold = (u64)NSEC_PER_USEC * ~0U; in fq_init()
957 qdisc_watchdog_init_clockid(&q->watchdog, sch, CLOCK_MONOTONIC); in fq_init()
962 err = fq_resize(sch, q->fq_trees_log); in fq_init()
969 struct fq_sched_data *q = qdisc_priv(sch); in fq_dump() local
970 u64 ce_threshold = q->ce_threshold; in fq_dump()
971 u64 horizon = q->horizon; in fq_dump()
984 nla_put_u32(skb, TCA_FQ_FLOW_PLIMIT, q->flow_plimit) || in fq_dump()
985 nla_put_u32(skb, TCA_FQ_QUANTUM, q->quantum) || in fq_dump()
986 nla_put_u32(skb, TCA_FQ_INITIAL_QUANTUM, q->initial_quantum) || in fq_dump()
987 nla_put_u32(skb, TCA_FQ_RATE_ENABLE, q->rate_enable) || in fq_dump()
989 min_t(unsigned long, q->flow_max_rate, ~0U)) || in fq_dump()
991 jiffies_to_usecs(q->flow_refill_delay)) || in fq_dump()
992 nla_put_u32(skb, TCA_FQ_ORPHAN_MASK, q->orphan_mask) || in fq_dump()
994 q->low_rate_threshold) || in fq_dump()
996 nla_put_u32(skb, TCA_FQ_BUCKETS_LOG, q->fq_trees_log) || in fq_dump()
997 nla_put_u32(skb, TCA_FQ_TIMER_SLACK, q->timer_slack) || in fq_dump()
999 nla_put_u8(skb, TCA_FQ_HORIZON_DROP, q->horizon_drop)) in fq_dump()
1010 struct fq_sched_data *q = qdisc_priv(sch); in fq_dump_stats() local
1015 st.gc_flows = q->stat_gc_flows; in fq_dump_stats()
1016 st.highprio_packets = q->stat_internal_packets; in fq_dump_stats()
1018 st.throttled = q->stat_throttled; in fq_dump_stats()
1019 st.flows_plimit = q->stat_flows_plimit; in fq_dump_stats()
1020 st.pkts_too_long = q->stat_pkts_too_long; in fq_dump_stats()
1021 st.allocation_errors = q->stat_allocation_errors; in fq_dump_stats()
1022 st.time_next_delayed_flow = q->time_next_delayed_flow + q->timer_slack - in fq_dump_stats()
1024 st.flows = q->flows; in fq_dump_stats()
1025 st.inactive_flows = q->inactive_flows; in fq_dump_stats()
1026 st.throttled_flows = q->throttled_flows; in fq_dump_stats()
1028 q->unthrottle_latency_ns, ~0U); in fq_dump_stats()
1029 st.ce_mark = q->stat_ce_mark; in fq_dump_stats()
1030 st.horizon_drops = q->stat_horizon_drops; in fq_dump_stats()
1031 st.horizon_caps = q->stat_horizon_caps; in fq_dump_stats()