1 /*
2 * Copyright (c) 2003 Patrick McHardy, <kaber@trash.net>
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
8 *
9 * 2003-10-17 - Ported from altq
10 */
11 /*
12 * Copyright (c) 1997-1999 Carnegie Mellon University. All Rights Reserved.
13 *
14 * Permission to use, copy, modify, and distribute this software and
15 * its documentation is hereby granted (including for commercial or
16 * for-profit use), provided that both the copyright notice and this
17 * permission notice appear in all copies of the software, derivative
18 * works, or modified versions, and any portions thereof.
19 *
20 * THIS SOFTWARE IS EXPERIMENTAL AND IS KNOWN TO HAVE BUGS, SOME OF
21 * WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON PROVIDES THIS
22 * SOFTWARE IN ITS ``AS IS'' CONDITION, AND ANY EXPRESS OR IMPLIED
23 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
25 * DISCLAIMED. IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
28 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
29 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
30 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
32 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
33 * DAMAGE.
34 *
35 * Carnegie Mellon encourages (but does not require) users of this
36 * software to return any improvements or extensions that they make,
37 * and to grant Carnegie Mellon the rights to redistribute these
38 * changes without encumbrance.
39 */
40 /*
41 * H-FSC is described in Proceedings of SIGCOMM'97,
42 * "A Hierarchical Fair Service Curve Algorithm for Link-Sharing,
43 * Real-Time and Priority Service"
44 * by Ion Stoica, Hui Zhang, and T. S. Eugene Ng.
45 *
46 * Oleg Cherevko <olwi@aq.ml.com.ua> added the upperlimit for link-sharing.
47 * when a class has an upperlimit, the fit-time is computed from the
48 * upperlimit service curve. the link-sharing scheduler does not schedule
49 * a class whose fit-time exceeds the current time.
50 */
51
52 #include <linux/kernel.h>
53 #include <linux/config.h>
54 #include <linux/module.h>
55 #include <linux/types.h>
56 #include <linux/errno.h>
57 #include <linux/compiler.h>
58 #include <linux/spinlock.h>
59 #include <linux/skbuff.h>
60 #include <linux/string.h>
61 #include <linux/slab.h>
62 #include <linux/timer.h>
63 #include <linux/list.h>
64 #include <linux/rbtree.h>
65 #include <linux/init.h>
66 #include <linux/netdevice.h>
67 #include <linux/rtnetlink.h>
68 #include <linux/pkt_sched.h>
69 #include <net/pkt_sched.h>
70 #include <net/pkt_cls.h>
71 #include <asm/system.h>
72 #include <asm/div64.h>
73
74 #define HFSC_DEBUG 1
75
76 /*
77 * kernel internal service curve representation:
78 * coordinates are given by 64 bit unsigned integers.
79 * x-axis: unit is clock count.
80 * y-axis: unit is byte.
81 *
82 * The service curve parameters are converted to the internal
83 * representation. The slope values are scaled to avoid overflow.
84 * the inverse slope values as well as the y-projection of the 1st
85 * segment are kept in order to to avoid 64-bit divide operations
86 * that are expensive on 32-bit architectures.
87 */
88
89 struct internal_sc
90 {
91 u64 sm1; /* scaled slope of the 1st segment */
92 u64 ism1; /* scaled inverse-slope of the 1st segment */
93 u64 dx; /* the x-projection of the 1st segment */
94 u64 dy; /* the y-projection of the 1st segment */
95 u64 sm2; /* scaled slope of the 2nd segment */
96 u64 ism2; /* scaled inverse-slope of the 2nd segment */
97 };
98
99 /* runtime service curve */
100 struct runtime_sc
101 {
102 u64 x; /* current starting position on x-axis */
103 u64 y; /* current starting position on y-axis */
104 u64 sm1; /* scaled slope of the 1st segment */
105 u64 ism1; /* scaled inverse-slope of the 1st segment */
106 u64 dx; /* the x-projection of the 1st segment */
107 u64 dy; /* the y-projection of the 1st segment */
108 u64 sm2; /* scaled slope of the 2nd segment */
109 u64 ism2; /* scaled inverse-slope of the 2nd segment */
110 };
111
112 enum hfsc_class_flags
113 {
114 HFSC_RSC = 0x1,
115 HFSC_FSC = 0x2,
116 HFSC_USC = 0x4
117 };
118
119 struct hfsc_class
120 {
121 u32 classid; /* class id */
122 unsigned int refcnt; /* usage count */
123
124 struct tc_stats stats; /* generic statistics */
125 unsigned int level; /* class level in hierarchy */
126 struct tcf_proto *filter_list; /* filter list */
127 unsigned int filter_cnt; /* filter count */
128
129 struct hfsc_sched *sched; /* scheduler data */
130 struct hfsc_class *cl_parent; /* parent class */
131 struct list_head siblings; /* sibling classes */
132 struct list_head children; /* child classes */
133 struct Qdisc *qdisc; /* leaf qdisc */
134
135 rb_node_t el_node; /* qdisc's eligible tree member */
136 rb_root_t vt_tree; /* active children sorted by cl_vt */
137 rb_node_t vt_node; /* parent's vt_tree member */
138 rb_root_t cf_tree; /* active children sorted by cl_f */
139 rb_node_t cf_node; /* parent's cf_heap member */
140 struct list_head hlist; /* hash list member */
141 struct list_head dlist; /* drop list member */
142
143 u64 cl_total; /* total work in bytes */
144 u64 cl_cumul; /* cumulative work in bytes done by
145 real-time criteria */
146
147 u64 cl_d; /* deadline*/
148 u64 cl_e; /* eligible time */
149 u64 cl_vt; /* virtual time */
150 u64 cl_f; /* time when this class will fit for
151 link-sharing, max(myf, cfmin) */
152 u64 cl_myf; /* my fit-time (calculated from this
153 class's own upperlimit curve) */
154 u64 cl_myfadj; /* my fit-time adjustment (to cancel
155 history dependence) */
156 u64 cl_cfmin; /* earliest children's fit-time (used
157 with cl_myf to obtain cl_f) */
158 u64 cl_cvtmin; /* minimal virtual time among the
159 children fit for link-sharing
160 (monotonic within a period) */
161 u64 cl_vtadj; /* intra-period cumulative vt
162 adjustment */
163 u64 cl_vtoff; /* inter-period cumulative vt offset */
164 u64 cl_cvtmax; /* max child's vt in the last period */
165 u64 cl_cvtoff; /* cumulative cvtmax of all periods */
166 u64 cl_pcvtoff; /* parent's cvtoff at initalization
167 time */
168
169 struct internal_sc cl_rsc; /* internal real-time service curve */
170 struct internal_sc cl_fsc; /* internal fair service curve */
171 struct internal_sc cl_usc; /* internal upperlimit service curve */
172 struct runtime_sc cl_deadline; /* deadline curve */
173 struct runtime_sc cl_eligible; /* eligible curve */
174 struct runtime_sc cl_virtual; /* virtual curve */
175 struct runtime_sc cl_ulimit; /* upperlimit curve */
176
177 unsigned long cl_flags; /* which curves are valid */
178 unsigned long cl_vtperiod; /* vt period sequence number */
179 unsigned long cl_parentperiod;/* parent's vt period sequence number*/
180 unsigned long cl_nactive; /* number of active children */
181 };
182
183 #define HFSC_HSIZE 16
184
185 struct hfsc_sched
186 {
187 u16 defcls; /* default class id */
188 struct hfsc_class root; /* root class */
189 struct list_head clhash[HFSC_HSIZE]; /* class hash */
190 rb_root_t eligible; /* eligible tree */
191 struct list_head droplist; /* active leaf class list (for
192 dropping) */
193 struct sk_buff_head requeue; /* requeued packet */
194 struct timer_list wd_timer; /* watchdog timer */
195 };
196
197 /*
198 * macros
199 */
200 #if PSCHED_CLOCK_SOURCE == PSCHED_GETTIMEOFDAY
201 #include <linux/time.h>
202 #undef PSCHED_GET_TIME
203 #define PSCHED_GET_TIME(stamp) \
204 do { \
205 struct timeval tv; \
206 do_gettimeofday(&tv); \
207 (stamp) = 1000000ULL * tv.tv_sec + tv.tv_usec; \
208 } while (0)
209 #endif
210
211 #if HFSC_DEBUG
212 #define ASSERT(cond) \
213 do { \
214 if (unlikely(!(cond))) \
215 printk("assertion %s failed at %s:%i (%s)\n", \
216 #cond, __FILE__, __LINE__, __FUNCTION__); \
217 } while (0)
218 #else
219 #define ASSERT(cond)
220 #endif /* HFSC_DEBUG */
221
222 #define HT_INFINITY 0xffffffffffffffffULL /* infinite time value */
223
224
225 /*
226 * eligible tree holds backlogged classes being sorted by their eligible times.
227 * there is one eligible tree per hfsc instance.
228 */
229
230 static void
eltree_insert(struct hfsc_class * cl)231 eltree_insert(struct hfsc_class *cl)
232 {
233 rb_node_t **p = &cl->sched->eligible.rb_node;
234 rb_node_t *parent = NULL;
235 struct hfsc_class *cl1;
236
237 while (*p != NULL) {
238 parent = *p;
239 cl1 = rb_entry(parent, struct hfsc_class, el_node);
240 if (cl->cl_e >= cl1->cl_e)
241 p = &parent->rb_right;
242 else
243 p = &parent->rb_left;
244 }
245 rb_link_node(&cl->el_node, parent, p);
246 rb_insert_color(&cl->el_node, &cl->sched->eligible);
247 }
248
249 static inline void
eltree_remove(struct hfsc_class * cl)250 eltree_remove(struct hfsc_class *cl)
251 {
252 rb_erase(&cl->el_node, &cl->sched->eligible);
253 }
254
255 static inline void
eltree_update(struct hfsc_class * cl)256 eltree_update(struct hfsc_class *cl)
257 {
258 eltree_remove(cl);
259 eltree_insert(cl);
260 }
261
262 /* find the class with the minimum deadline among the eligible classes */
263 static inline struct hfsc_class *
eltree_get_mindl(struct hfsc_sched * q,u64 cur_time)264 eltree_get_mindl(struct hfsc_sched *q, u64 cur_time)
265 {
266 struct hfsc_class *p, *cl = NULL;
267 rb_node_t *n;
268
269 for (n = rb_first(&q->eligible); n != NULL; n = rb_next(n)) {
270 p = rb_entry(n, struct hfsc_class, el_node);
271 if (p->cl_e > cur_time)
272 break;
273 if (cl == NULL || p->cl_d < cl->cl_d)
274 cl = p;
275 }
276 return cl;
277 }
278
279 /* find the class with minimum eligible time among the eligible classes */
280 static inline struct hfsc_class *
eltree_get_minel(struct hfsc_sched * q)281 eltree_get_minel(struct hfsc_sched *q)
282 {
283 rb_node_t *n;
284
285 n = rb_first(&q->eligible);
286 if (n == NULL)
287 return NULL;
288 return rb_entry(n, struct hfsc_class, el_node);
289 }
290
291 /*
292 * vttree holds holds backlogged child classes being sorted by their virtual
293 * time. each intermediate class has one vttree.
294 */
295 static void
vttree_insert(struct hfsc_class * cl)296 vttree_insert(struct hfsc_class *cl)
297 {
298 rb_node_t **p = &cl->cl_parent->vt_tree.rb_node;
299 rb_node_t *parent = NULL;
300 struct hfsc_class *cl1;
301
302 while (*p != NULL) {
303 parent = *p;
304 cl1 = rb_entry(parent, struct hfsc_class, vt_node);
305 if (cl->cl_vt >= cl1->cl_vt)
306 p = &parent->rb_right;
307 else
308 p = &parent->rb_left;
309 }
310 rb_link_node(&cl->vt_node, parent, p);
311 rb_insert_color(&cl->vt_node, &cl->cl_parent->vt_tree);
312 }
313
314 static inline void
vttree_remove(struct hfsc_class * cl)315 vttree_remove(struct hfsc_class *cl)
316 {
317 rb_erase(&cl->vt_node, &cl->cl_parent->vt_tree);
318 }
319
320 static inline void
vttree_update(struct hfsc_class * cl)321 vttree_update(struct hfsc_class *cl)
322 {
323 vttree_remove(cl);
324 vttree_insert(cl);
325 }
326
327 static inline struct hfsc_class *
vttree_firstfit(struct hfsc_class * cl,u64 cur_time)328 vttree_firstfit(struct hfsc_class *cl, u64 cur_time)
329 {
330 struct hfsc_class *p;
331 rb_node_t *n;
332
333 for (n = rb_first(&cl->vt_tree); n != NULL; n = rb_next(n)) {
334 p = rb_entry(n, struct hfsc_class, vt_node);
335 if (p->cl_f <= cur_time)
336 return p;
337 }
338 return NULL;
339 }
340
341 /*
342 * get the leaf class with the minimum vt in the hierarchy
343 */
344 static struct hfsc_class *
vttree_get_minvt(struct hfsc_class * cl,u64 cur_time)345 vttree_get_minvt(struct hfsc_class *cl, u64 cur_time)
346 {
347 /* if root-class's cfmin is bigger than cur_time nothing to do */
348 if (cl->cl_cfmin > cur_time)
349 return NULL;
350
351 while (cl->level > 0) {
352 cl = vttree_firstfit(cl, cur_time);
353 if (cl == NULL)
354 return NULL;
355 /*
356 * update parent's cl_cvtmin.
357 */
358 if (cl->cl_parent->cl_cvtmin < cl->cl_vt)
359 cl->cl_parent->cl_cvtmin = cl->cl_vt;
360 }
361 return cl;
362 }
363
364 static void
cftree_insert(struct hfsc_class * cl)365 cftree_insert(struct hfsc_class *cl)
366 {
367 rb_node_t **p = &cl->cl_parent->cf_tree.rb_node;
368 rb_node_t *parent = NULL;
369 struct hfsc_class *cl1;
370
371 while (*p != NULL) {
372 parent = *p;
373 cl1 = rb_entry(parent, struct hfsc_class, cf_node);
374 if (cl->cl_f >= cl1->cl_f)
375 p = &parent->rb_right;
376 else
377 p = &parent->rb_left;
378 }
379 rb_link_node(&cl->cf_node, parent, p);
380 rb_insert_color(&cl->cf_node, &cl->cl_parent->cf_tree);
381 }
382
383 static inline void
cftree_remove(struct hfsc_class * cl)384 cftree_remove(struct hfsc_class *cl)
385 {
386 rb_erase(&cl->cf_node, &cl->cl_parent->cf_tree);
387 }
388
389 static inline void
cftree_update(struct hfsc_class * cl)390 cftree_update(struct hfsc_class *cl)
391 {
392 cftree_remove(cl);
393 cftree_insert(cl);
394 }
395
396 /*
397 * service curve support functions
398 *
399 * external service curve parameters
400 * m: bps
401 * d: us
402 * internal service curve parameters
403 * sm: (bytes/psched_us) << SM_SHIFT
404 * ism: (psched_us/byte) << ISM_SHIFT
405 * dx: psched_us
406 *
407 * Time source resolution
408 * PSCHED_JIFFIES: for 48<=HZ<=1534 resolution is between 0.63us and 1.27us.
409 * PSCHED_CPU: resolution is between 0.5us and 1us.
410 * PSCHED_GETTIMEOFDAY: resolution is exactly 1us.
411 *
412 * sm and ism are scaled in order to keep effective digits.
413 * SM_SHIFT and ISM_SHIFT are selected to keep at least 4 effective
414 * digits in decimal using the following table.
415 *
416 * Note: We can afford the additional accuracy (altq hfsc keeps at most
417 * 3 effective digits) thanks to the fact that linux clock is bounded
418 * much more tightly.
419 *
420 * bits/sec 100Kbps 1Mbps 10Mbps 100Mbps 1Gbps
421 * ------------+-------------------------------------------------------
422 * bytes/0.5us 6.25e-3 62.5e-3 625e-3 6250e-e 62500e-3
423 * bytes/us 12.5e-3 125e-3 1250e-3 12500e-3 125000e-3
424 * bytes/1.27us 15.875e-3 158.75e-3 1587.5e-3 15875e-3 158750e-3
425 *
426 * 0.5us/byte 160 16 1.6 0.16 0.016
427 * us/byte 80 8 0.8 0.08 0.008
428 * 1.27us/byte 63 6.3 0.63 0.063 0.0063
429 */
430 #define SM_SHIFT 20
431 #define ISM_SHIFT 18
432
433 #define SM_MASK ((1ULL << SM_SHIFT) - 1)
434 #define ISM_MASK ((1ULL << ISM_SHIFT) - 1)
435
436 static inline u64
seg_x2y(u64 x,u64 sm)437 seg_x2y(u64 x, u64 sm)
438 {
439 u64 y;
440
441 /*
442 * compute
443 * y = x * sm >> SM_SHIFT
444 * but divide it for the upper and lower bits to avoid overflow
445 */
446 y = (x >> SM_SHIFT) * sm + (((x & SM_MASK) * sm) >> SM_SHIFT);
447 return y;
448 }
449
450 static inline u64
seg_y2x(u64 y,u64 ism)451 seg_y2x(u64 y, u64 ism)
452 {
453 u64 x;
454
455 if (y == 0)
456 x = 0;
457 else if (ism == HT_INFINITY)
458 x = HT_INFINITY;
459 else {
460 x = (y >> ISM_SHIFT) * ism
461 + (((y & ISM_MASK) * ism) >> ISM_SHIFT);
462 }
463 return x;
464 }
465
466 /* Convert m (bps) into sm (bytes/psched us) */
467 static u64
m2sm(u32 m)468 m2sm(u32 m)
469 {
470 u64 sm;
471
472 sm = ((u64)m << SM_SHIFT);
473 sm += PSCHED_JIFFIE2US(HZ) - 1;
474 do_div(sm, PSCHED_JIFFIE2US(HZ));
475 return sm;
476 }
477
478 /* convert m (bps) into ism (psched us/byte) */
479 static u64
m2ism(u32 m)480 m2ism(u32 m)
481 {
482 u64 ism;
483
484 if (m == 0)
485 ism = HT_INFINITY;
486 else {
487 ism = ((u64)PSCHED_JIFFIE2US(HZ) << ISM_SHIFT);
488 ism += m - 1;
489 do_div(ism, m);
490 }
491 return ism;
492 }
493
494 /* convert d (us) into dx (psched us) */
495 static u64
d2dx(u32 d)496 d2dx(u32 d)
497 {
498 u64 dx;
499
500 dx = ((u64)d * PSCHED_JIFFIE2US(HZ));
501 dx += 1000000 - 1;
502 do_div(dx, 1000000);
503 return dx;
504 }
505
506 /* convert sm (bytes/psched us) into m (bps) */
507 static u32
sm2m(u64 sm)508 sm2m(u64 sm)
509 {
510 u64 m;
511
512 m = (sm * PSCHED_JIFFIE2US(HZ)) >> SM_SHIFT;
513 return (u32)m;
514 }
515
516 /* convert dx (psched us) into d (us) */
517 static u32
dx2d(u64 dx)518 dx2d(u64 dx)
519 {
520 u64 d;
521
522 d = dx * 1000000;
523 do_div(d, PSCHED_JIFFIE2US(HZ));
524 return (u32)d;
525 }
526
527 static void
sc2isc(struct tc_service_curve * sc,struct internal_sc * isc)528 sc2isc(struct tc_service_curve *sc, struct internal_sc *isc)
529 {
530 isc->sm1 = m2sm(sc->m1);
531 isc->ism1 = m2ism(sc->m1);
532 isc->dx = d2dx(sc->d);
533 isc->dy = seg_x2y(isc->dx, isc->sm1);
534 isc->sm2 = m2sm(sc->m2);
535 isc->ism2 = m2ism(sc->m2);
536 }
537
538 /*
539 * initialize the runtime service curve with the given internal
540 * service curve starting at (x, y).
541 */
542 static void
rtsc_init(struct runtime_sc * rtsc,struct internal_sc * isc,u64 x,u64 y)543 rtsc_init(struct runtime_sc *rtsc, struct internal_sc *isc, u64 x, u64 y)
544 {
545 rtsc->x = x;
546 rtsc->y = y;
547 rtsc->sm1 = isc->sm1;
548 rtsc->ism1 = isc->ism1;
549 rtsc->dx = isc->dx;
550 rtsc->dy = isc->dy;
551 rtsc->sm2 = isc->sm2;
552 rtsc->ism2 = isc->ism2;
553 }
554
555 /*
556 * calculate the y-projection of the runtime service curve by the
557 * given x-projection value
558 */
559 static u64
rtsc_y2x(struct runtime_sc * rtsc,u64 y)560 rtsc_y2x(struct runtime_sc *rtsc, u64 y)
561 {
562 u64 x;
563
564 if (y < rtsc->y)
565 x = rtsc->x;
566 else if (y <= rtsc->y + rtsc->dy) {
567 /* x belongs to the 1st segment */
568 if (rtsc->dy == 0)
569 x = rtsc->x + rtsc->dx;
570 else
571 x = rtsc->x + seg_y2x(y - rtsc->y, rtsc->ism1);
572 } else {
573 /* x belongs to the 2nd segment */
574 x = rtsc->x + rtsc->dx
575 + seg_y2x(y - rtsc->y - rtsc->dy, rtsc->ism2);
576 }
577 return x;
578 }
579
580 static u64
rtsc_x2y(struct runtime_sc * rtsc,u64 x)581 rtsc_x2y(struct runtime_sc *rtsc, u64 x)
582 {
583 u64 y;
584
585 if (x <= rtsc->x)
586 y = rtsc->y;
587 else if (x <= rtsc->x + rtsc->dx)
588 /* y belongs to the 1st segment */
589 y = rtsc->y + seg_x2y(x - rtsc->x, rtsc->sm1);
590 else
591 /* y belongs to the 2nd segment */
592 y = rtsc->y + rtsc->dy
593 + seg_x2y(x - rtsc->x - rtsc->dx, rtsc->sm2);
594 return y;
595 }
596
597 /*
598 * update the runtime service curve by taking the minimum of the current
599 * runtime service curve and the service curve starting at (x, y).
600 */
601 static void
rtsc_min(struct runtime_sc * rtsc,struct internal_sc * isc,u64 x,u64 y)602 rtsc_min(struct runtime_sc *rtsc, struct internal_sc *isc, u64 x, u64 y)
603 {
604 u64 y1, y2, dx, dy;
605 u32 dsm;
606
607 if (isc->sm1 <= isc->sm2) {
608 /* service curve is convex */
609 y1 = rtsc_x2y(rtsc, x);
610 if (y1 < y)
611 /* the current rtsc is smaller */
612 return;
613 rtsc->x = x;
614 rtsc->y = y;
615 return;
616 }
617
618 /*
619 * service curve is concave
620 * compute the two y values of the current rtsc
621 * y1: at x
622 * y2: at (x + dx)
623 */
624 y1 = rtsc_x2y(rtsc, x);
625 if (y1 <= y) {
626 /* rtsc is below isc, no change to rtsc */
627 return;
628 }
629
630 y2 = rtsc_x2y(rtsc, x + isc->dx);
631 if (y2 >= y + isc->dy) {
632 /* rtsc is above isc, replace rtsc by isc */
633 rtsc->x = x;
634 rtsc->y = y;
635 rtsc->dx = isc->dx;
636 rtsc->dy = isc->dy;
637 return;
638 }
639
640 /*
641 * the two curves intersect
642 * compute the offsets (dx, dy) using the reverse
643 * function of seg_x2y()
644 * seg_x2y(dx, sm1) == seg_x2y(dx, sm2) + (y1 - y)
645 */
646 dx = (y1 - y) << SM_SHIFT;
647 dsm = isc->sm1 - isc->sm2;
648 do_div(dx, dsm);
649 /*
650 * check if (x, y1) belongs to the 1st segment of rtsc.
651 * if so, add the offset.
652 */
653 if (rtsc->x + rtsc->dx > x)
654 dx += rtsc->x + rtsc->dx - x;
655 dy = seg_x2y(dx, isc->sm1);
656
657 rtsc->x = x;
658 rtsc->y = y;
659 rtsc->dx = dx;
660 rtsc->dy = dy;
661 return;
662 }
663
664 static void
init_ed(struct hfsc_class * cl,unsigned int next_len)665 init_ed(struct hfsc_class *cl, unsigned int next_len)
666 {
667 u64 cur_time;
668
669 PSCHED_GET_TIME(cur_time);
670
671 /* update the deadline curve */
672 rtsc_min(&cl->cl_deadline, &cl->cl_rsc, cur_time, cl->cl_cumul);
673
674 /*
675 * update the eligible curve.
676 * for concave, it is equal to the deadline curve.
677 * for convex, it is a linear curve with slope m2.
678 */
679 cl->cl_eligible = cl->cl_deadline;
680 if (cl->cl_rsc.sm1 <= cl->cl_rsc.sm2) {
681 cl->cl_eligible.dx = 0;
682 cl->cl_eligible.dy = 0;
683 }
684
685 /* compute e and d */
686 cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
687 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
688
689 eltree_insert(cl);
690 }
691
692 static void
update_ed(struct hfsc_class * cl,unsigned int next_len)693 update_ed(struct hfsc_class *cl, unsigned int next_len)
694 {
695 cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
696 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
697
698 eltree_update(cl);
699 }
700
701 static inline void
update_d(struct hfsc_class * cl,unsigned int next_len)702 update_d(struct hfsc_class *cl, unsigned int next_len)
703 {
704 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
705 }
706
707 static inline void
update_cfmin(struct hfsc_class * cl)708 update_cfmin(struct hfsc_class *cl)
709 {
710 rb_node_t *n = rb_first(&cl->cf_tree);
711 struct hfsc_class *p;
712
713 if (n == NULL) {
714 cl->cl_cfmin = 0;
715 return;
716 }
717 p = rb_entry(n, struct hfsc_class, cf_node);
718 cl->cl_cfmin = p->cl_f;
719 }
720
721 static void
init_vf(struct hfsc_class * cl,unsigned int len)722 init_vf(struct hfsc_class *cl, unsigned int len)
723 {
724 struct hfsc_class *max_cl;
725 rb_node_t *n;
726 u64 vt, f, cur_time;
727 int go_active;
728
729 cur_time = 0;
730 go_active = 1;
731 for (; cl->cl_parent != NULL; cl = cl->cl_parent) {
732 if (go_active && cl->cl_nactive++ == 0)
733 go_active = 1;
734 else
735 go_active = 0;
736
737 if (go_active) {
738 n = rb_last(&cl->cl_parent->vt_tree);
739 if (n != NULL) {
740 max_cl = rb_entry(n, struct hfsc_class,vt_node);
741 /*
742 * set vt to the average of the min and max
743 * classes. if the parent's period didn't
744 * change, don't decrease vt of the class.
745 */
746 vt = max_cl->cl_vt;
747 if (cl->cl_parent->cl_cvtmin != 0)
748 vt = (cl->cl_parent->cl_cvtmin + vt)/2;
749
750 if (cl->cl_parent->cl_vtperiod !=
751 cl->cl_parentperiod || vt > cl->cl_vt)
752 cl->cl_vt = vt;
753 } else {
754 /*
755 * first child for a new parent backlog period.
756 * add parent's cvtmax to cvtoff to make a new
757 * vt (vtoff + vt) larger than the vt in the
758 * last period for all children.
759 */
760 vt = cl->cl_parent->cl_cvtmax;
761 cl->cl_parent->cl_cvtoff += vt;
762 cl->cl_parent->cl_cvtmax = 0;
763 cl->cl_parent->cl_cvtmin = 0;
764 cl->cl_vt = 0;
765 }
766
767 cl->cl_vtoff = cl->cl_parent->cl_cvtoff -
768 cl->cl_pcvtoff;
769
770 /* update the virtual curve */
771 vt = cl->cl_vt + cl->cl_vtoff;
772 rtsc_min(&cl->cl_virtual, &cl->cl_fsc, vt,
773 cl->cl_total);
774 if (cl->cl_virtual.x == vt) {
775 cl->cl_virtual.x -= cl->cl_vtoff;
776 cl->cl_vtoff = 0;
777 }
778 cl->cl_vtadj = 0;
779
780 cl->cl_vtperiod++; /* increment vt period */
781 cl->cl_parentperiod = cl->cl_parent->cl_vtperiod;
782 if (cl->cl_parent->cl_nactive == 0)
783 cl->cl_parentperiod++;
784 cl->cl_f = 0;
785
786 vttree_insert(cl);
787 cftree_insert(cl);
788
789 if (cl->cl_flags & HFSC_USC) {
790 /* class has upper limit curve */
791 if (cur_time == 0)
792 PSCHED_GET_TIME(cur_time);
793
794 /* update the ulimit curve */
795 rtsc_min(&cl->cl_ulimit, &cl->cl_usc, cur_time,
796 cl->cl_total);
797 /* compute myf */
798 cl->cl_myf = rtsc_y2x(&cl->cl_ulimit,
799 cl->cl_total);
800 cl->cl_myfadj = 0;
801 }
802 }
803
804 f = max(cl->cl_myf, cl->cl_cfmin);
805 if (f != cl->cl_f) {
806 cl->cl_f = f;
807 cftree_update(cl);
808 update_cfmin(cl->cl_parent);
809 }
810 }
811 }
812
813 static void
update_vf(struct hfsc_class * cl,unsigned int len,u64 cur_time)814 update_vf(struct hfsc_class *cl, unsigned int len, u64 cur_time)
815 {
816 u64 f; /* , myf_bound, delta; */
817 int go_passive = 0;
818
819 if (cl->qdisc->q.qlen == 0 && cl->cl_flags & HFSC_FSC)
820 go_passive = 1;
821
822 for (; cl->cl_parent != NULL; cl = cl->cl_parent) {
823 cl->cl_total += len;
824
825 if (!(cl->cl_flags & HFSC_FSC) || cl->cl_nactive == 0)
826 continue;
827
828 if (go_passive && --cl->cl_nactive == 0)
829 go_passive = 1;
830 else
831 go_passive = 0;
832
833 if (go_passive) {
834 /* no more active child, going passive */
835
836 /* update cvtmax of the parent class */
837 if (cl->cl_vt > cl->cl_parent->cl_cvtmax)
838 cl->cl_parent->cl_cvtmax = cl->cl_vt;
839
840 /* remove this class from the vt tree */
841 vttree_remove(cl);
842
843 cftree_remove(cl);
844 update_cfmin(cl->cl_parent);
845
846 continue;
847 }
848
849 /*
850 * update vt and f
851 */
852 cl->cl_vt = rtsc_y2x(&cl->cl_virtual, cl->cl_total)
853 - cl->cl_vtoff + cl->cl_vtadj;
854
855 /*
856 * if vt of the class is smaller than cvtmin,
857 * the class was skipped in the past due to non-fit.
858 * if so, we need to adjust vtadj.
859 */
860 if (cl->cl_vt < cl->cl_parent->cl_cvtmin) {
861 cl->cl_vtadj += cl->cl_parent->cl_cvtmin - cl->cl_vt;
862 cl->cl_vt = cl->cl_parent->cl_cvtmin;
863 }
864
865 /* update the vt tree */
866 vttree_update(cl);
867
868 if (cl->cl_flags & HFSC_USC) {
869 cl->cl_myf = cl->cl_myfadj + rtsc_y2x(&cl->cl_ulimit,
870 cl->cl_total);
871 #if 0
872 /*
873 * This code causes classes to stay way under their
874 * limit when multiple classes are used at gigabit
875 * speed. needs investigation. -kaber
876 */
877 /*
878 * if myf lags behind by more than one clock tick
879 * from the current time, adjust myfadj to prevent
880 * a rate-limited class from going greedy.
881 * in a steady state under rate-limiting, myf
882 * fluctuates within one clock tick.
883 */
884 myf_bound = cur_time - PSCHED_JIFFIE2US(1);
885 if (cl->cl_myf < myf_bound) {
886 delta = cur_time - cl->cl_myf;
887 cl->cl_myfadj += delta;
888 cl->cl_myf += delta;
889 }
890 #endif
891 }
892
893 f = max(cl->cl_myf, cl->cl_cfmin);
894 if (f != cl->cl_f) {
895 cl->cl_f = f;
896 cftree_update(cl);
897 update_cfmin(cl->cl_parent);
898 }
899 }
900 }
901
902 static void
set_active(struct hfsc_class * cl,unsigned int len)903 set_active(struct hfsc_class *cl, unsigned int len)
904 {
905 if (cl->cl_flags & HFSC_RSC)
906 init_ed(cl, len);
907 if (cl->cl_flags & HFSC_FSC)
908 init_vf(cl, len);
909
910 list_add_tail(&cl->dlist, &cl->sched->droplist);
911 }
912
913 static void
set_passive(struct hfsc_class * cl)914 set_passive(struct hfsc_class *cl)
915 {
916 if (cl->cl_flags & HFSC_RSC)
917 eltree_remove(cl);
918
919 list_del(&cl->dlist);
920
921 /*
922 * vttree is now handled in update_vf() so that update_vf(cl, 0, 0)
923 * needs to be called explicitly to remove a class from vttree.
924 */
925 }
926
927 /*
928 * hack to get length of first packet in queue.
929 */
930 static unsigned int
qdisc_peek_len(struct Qdisc * sch)931 qdisc_peek_len(struct Qdisc *sch)
932 {
933 struct sk_buff *skb;
934 unsigned int len;
935
936 skb = sch->dequeue(sch);
937 if (skb == NULL) {
938 if (net_ratelimit())
939 printk("qdisc_peek_len: non work-conserving qdisc ?\n");
940 return 0;
941 }
942 len = skb->len;
943 if (unlikely(sch->ops->requeue(skb, sch) != NET_XMIT_SUCCESS)) {
944 if (net_ratelimit())
945 printk("qdisc_peek_len: failed to requeue\n");
946 return 0;
947 }
948 return len;
949 }
950
951 static void
hfsc_purge_queue(struct Qdisc * sch,struct hfsc_class * cl)952 hfsc_purge_queue(struct Qdisc *sch, struct hfsc_class *cl)
953 {
954 unsigned int len = cl->qdisc->q.qlen;
955
956 qdisc_reset(cl->qdisc);
957 if (len > 0) {
958 update_vf(cl, 0, 0);
959 set_passive(cl);
960 sch->q.qlen -= len;
961 }
962 }
963
964 static void
hfsc_adjust_levels(struct hfsc_class * cl)965 hfsc_adjust_levels(struct hfsc_class *cl)
966 {
967 struct hfsc_class *p;
968 unsigned int level;
969
970 do {
971 level = 0;
972 list_for_each_entry(p, &cl->children, siblings) {
973 if (p->level > level)
974 level = p->level;
975 }
976 cl->level = level + 1;
977 } while ((cl = cl->cl_parent) != NULL);
978 }
979
980 static inline unsigned int
hfsc_hash(u32 h)981 hfsc_hash(u32 h)
982 {
983 h ^= h >> 8;
984 h ^= h >> 4;
985
986 return h & (HFSC_HSIZE - 1);
987 }
988
989 static inline struct hfsc_class *
hfsc_find_class(u32 classid,struct Qdisc * sch)990 hfsc_find_class(u32 classid, struct Qdisc *sch)
991 {
992 struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
993 struct hfsc_class *cl;
994
995 list_for_each_entry(cl, &q->clhash[hfsc_hash(classid)], hlist) {
996 if (cl->classid == classid)
997 return cl;
998 }
999 return NULL;
1000 }
1001
1002 static void
hfsc_change_rsc(struct hfsc_class * cl,struct tc_service_curve * rsc,u64 cur_time)1003 hfsc_change_rsc(struct hfsc_class *cl, struct tc_service_curve *rsc,
1004 u64 cur_time)
1005 {
1006 sc2isc(rsc, &cl->cl_rsc);
1007 rtsc_init(&cl->cl_deadline, &cl->cl_rsc, cur_time, cl->cl_cumul);
1008 cl->cl_eligible = cl->cl_deadline;
1009 if (cl->cl_rsc.sm1 <= cl->cl_rsc.sm2) {
1010 cl->cl_eligible.dx = 0;
1011 cl->cl_eligible.dy = 0;
1012 }
1013 cl->cl_flags |= HFSC_RSC;
1014 }
1015
1016 static void
hfsc_change_fsc(struct hfsc_class * cl,struct tc_service_curve * fsc)1017 hfsc_change_fsc(struct hfsc_class *cl, struct tc_service_curve *fsc)
1018 {
1019 sc2isc(fsc, &cl->cl_fsc);
1020 rtsc_init(&cl->cl_virtual, &cl->cl_fsc, cl->cl_vt, cl->cl_total);
1021 cl->cl_flags |= HFSC_FSC;
1022 }
1023
1024 static void
hfsc_change_usc(struct hfsc_class * cl,struct tc_service_curve * usc,u64 cur_time)1025 hfsc_change_usc(struct hfsc_class *cl, struct tc_service_curve *usc,
1026 u64 cur_time)
1027 {
1028 sc2isc(usc, &cl->cl_usc);
1029 rtsc_init(&cl->cl_ulimit, &cl->cl_usc, cur_time, cl->cl_total);
1030 cl->cl_flags |= HFSC_USC;
1031 }
1032
1033 static int
hfsc_change_class(struct Qdisc * sch,u32 classid,u32 parentid,struct rtattr ** tca,unsigned long * arg)1034 hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
1035 struct rtattr **tca, unsigned long *arg)
1036 {
1037 struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
1038 struct hfsc_class *cl = (struct hfsc_class *)*arg;
1039 struct hfsc_class *parent = NULL;
1040 struct rtattr *opt = tca[TCA_OPTIONS-1];
1041 struct rtattr *tb[TCA_HFSC_MAX];
1042 struct tc_service_curve *rsc = NULL, *fsc = NULL, *usc = NULL;
1043 u64 cur_time;
1044
1045 if (opt == NULL ||
1046 rtattr_parse(tb, TCA_HFSC_MAX, RTA_DATA(opt), RTA_PAYLOAD(opt)))
1047 return -EINVAL;
1048
1049 if (tb[TCA_HFSC_RSC-1]) {
1050 if (RTA_PAYLOAD(tb[TCA_HFSC_RSC-1]) < sizeof(*rsc))
1051 return -EINVAL;
1052 rsc = RTA_DATA(tb[TCA_HFSC_RSC-1]);
1053 if (rsc->m1 == 0 && rsc->m2 == 0)
1054 rsc = NULL;
1055 }
1056
1057 if (tb[TCA_HFSC_FSC-1]) {
1058 if (RTA_PAYLOAD(tb[TCA_HFSC_FSC-1]) < sizeof(*fsc))
1059 return -EINVAL;
1060 fsc = RTA_DATA(tb[TCA_HFSC_FSC-1]);
1061 if (fsc->m1 == 0 && fsc->m2 == 0)
1062 fsc = NULL;
1063 }
1064
1065 if (tb[TCA_HFSC_USC-1]) {
1066 if (RTA_PAYLOAD(tb[TCA_HFSC_USC-1]) < sizeof(*usc))
1067 return -EINVAL;
1068 usc = RTA_DATA(tb[TCA_HFSC_USC-1]);
1069 if (usc->m1 == 0 && usc->m2 == 0)
1070 usc = NULL;
1071 }
1072
1073 if (cl != NULL) {
1074 if (parentid) {
1075 if (cl->cl_parent && cl->cl_parent->classid != parentid)
1076 return -EINVAL;
1077 if (cl->cl_parent == NULL && parentid != TC_H_ROOT)
1078 return -EINVAL;
1079 }
1080 PSCHED_GET_TIME(cur_time);
1081
1082 sch_tree_lock(sch);
1083 if (rsc != NULL)
1084 hfsc_change_rsc(cl, rsc, cur_time);
1085 if (fsc != NULL)
1086 hfsc_change_fsc(cl, fsc);
1087 if (usc != NULL)
1088 hfsc_change_usc(cl, usc, cur_time);
1089
1090 if (cl->qdisc->q.qlen != 0) {
1091 if (cl->cl_flags & HFSC_RSC)
1092 update_ed(cl, qdisc_peek_len(cl->qdisc));
1093 if (cl->cl_flags & HFSC_FSC)
1094 update_vf(cl, 0, cur_time);
1095 }
1096 sch_tree_unlock(sch);
1097
1098 #ifdef CONFIG_NET_ESTIMATOR
1099 if (tca[TCA_RATE-1]) {
1100 qdisc_kill_estimator(&cl->stats);
1101 qdisc_new_estimator(&cl->stats, tca[TCA_RATE-1]);
1102 }
1103 #endif
1104 return 0;
1105 }
1106
1107 if (parentid == TC_H_ROOT)
1108 return -EEXIST;
1109
1110 parent = &q->root;
1111 if (parentid) {
1112 parent = hfsc_find_class(parentid, sch);
1113 if (parent == NULL)
1114 return -ENOENT;
1115 }
1116
1117 if (classid == 0 || TC_H_MAJ(classid ^ sch->handle) != 0)
1118 return -EINVAL;
1119 if (hfsc_find_class(classid, sch))
1120 return -EEXIST;
1121
1122 if (rsc == NULL && fsc == NULL)
1123 return -EINVAL;
1124
1125 cl = kmalloc(sizeof(struct hfsc_class), GFP_KERNEL);
1126 if (cl == NULL)
1127 return -ENOBUFS;
1128 memset(cl, 0, sizeof(struct hfsc_class));
1129
1130 if (rsc != NULL)
1131 hfsc_change_rsc(cl, rsc, 0);
1132 if (fsc != NULL)
1133 hfsc_change_fsc(cl, fsc);
1134 if (usc != NULL)
1135 hfsc_change_usc(cl, usc, 0);
1136
1137 cl->refcnt = 1;
1138 cl->classid = classid;
1139 cl->sched = q;
1140 cl->cl_parent = parent;
1141 cl->qdisc = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops);
1142 if (cl->qdisc == NULL)
1143 cl->qdisc = &noop_qdisc;
1144 cl->stats.lock = &sch->dev->queue_lock;
1145 INIT_LIST_HEAD(&cl->children);
1146 cl->vt_tree = RB_ROOT;
1147 cl->cf_tree = RB_ROOT;
1148
1149 sch_tree_lock(sch);
1150 list_add_tail(&cl->hlist, &q->clhash[hfsc_hash(classid)]);
1151 list_add_tail(&cl->siblings, &parent->children);
1152 if (parent->level == 0)
1153 hfsc_purge_queue(sch, parent);
1154 hfsc_adjust_levels(parent);
1155 cl->cl_pcvtoff = parent->cl_cvtoff;
1156 sch_tree_unlock(sch);
1157
1158 #ifdef CONFIG_NET_ESTIMATOR
1159 if (tca[TCA_RATE-1])
1160 qdisc_new_estimator(&cl->stats, tca[TCA_RATE-1]);
1161 #endif
1162 *arg = (unsigned long)cl;
1163 return 0;
1164 }
1165
1166 static void
hfsc_destroy_filters(struct tcf_proto ** fl)1167 hfsc_destroy_filters(struct tcf_proto **fl)
1168 {
1169 struct tcf_proto *tp;
1170
1171 while ((tp = *fl) != NULL) {
1172 *fl = tp->next;
1173 tcf_destroy(tp);
1174 }
1175 }
1176
1177 static void
hfsc_destroy_class(struct Qdisc * sch,struct hfsc_class * cl)1178 hfsc_destroy_class(struct Qdisc *sch, struct hfsc_class *cl)
1179 {
1180 struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
1181
1182 hfsc_destroy_filters(&cl->filter_list);
1183 qdisc_destroy(cl->qdisc);
1184 #ifdef CONFIG_NET_ESTIMATOR
1185 qdisc_kill_estimator(&cl->stats);
1186 #endif
1187 if (cl != &q->root)
1188 kfree(cl);
1189 }
1190
1191 static int
hfsc_delete_class(struct Qdisc * sch,unsigned long arg)1192 hfsc_delete_class(struct Qdisc *sch, unsigned long arg)
1193 {
1194 struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
1195 struct hfsc_class *cl = (struct hfsc_class *)arg;
1196
1197 if (cl->level > 0 || cl->filter_cnt > 0 || cl == &q->root)
1198 return -EBUSY;
1199
1200 sch_tree_lock(sch);
1201
1202 list_del(&cl->hlist);
1203 list_del(&cl->siblings);
1204 hfsc_adjust_levels(cl->cl_parent);
1205 hfsc_purge_queue(sch, cl);
1206 if (--cl->refcnt == 0)
1207 hfsc_destroy_class(sch, cl);
1208
1209 sch_tree_unlock(sch);
1210 return 0;
1211 }
1212
1213 static struct hfsc_class *
hfsc_classify(struct sk_buff * skb,struct Qdisc * sch)1214 hfsc_classify(struct sk_buff *skb, struct Qdisc *sch)
1215 {
1216 struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
1217 struct hfsc_class *cl;
1218 struct tcf_result res;
1219 struct tcf_proto *tcf;
1220 int result;
1221
1222 if (TC_H_MAJ(skb->priority ^ sch->handle) == 0 &&
1223 (cl = hfsc_find_class(skb->priority, sch)) != NULL)
1224 if (cl->level == 0)
1225 return cl;
1226
1227 tcf = q->root.filter_list;
1228 while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
1229 #ifdef CONFIG_NET_CLS_POLICE
1230 if (result == TC_POLICE_SHOT)
1231 return NULL;
1232 #endif
1233 if ((cl = (struct hfsc_class *)res.class) == NULL) {
1234 if ((cl = hfsc_find_class(res.classid, sch)) == NULL)
1235 break; /* filter selected invalid classid */
1236 }
1237
1238 if (cl->level == 0)
1239 return cl; /* hit leaf class */
1240
1241 /* apply inner filter chain */
1242 tcf = cl->filter_list;
1243 }
1244
1245 /* classification failed, try default class */
1246 cl = hfsc_find_class(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch);
1247 if (cl == NULL || cl->level > 0)
1248 return NULL;
1249
1250 return cl;
1251 }
1252
1253 static int
hfsc_graft_class(struct Qdisc * sch,unsigned long arg,struct Qdisc * new,struct Qdisc ** old)1254 hfsc_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1255 struct Qdisc **old)
1256 {
1257 struct hfsc_class *cl = (struct hfsc_class *)arg;
1258
1259 if (cl == NULL)
1260 return -ENOENT;
1261 if (cl->level > 0)
1262 return -EINVAL;
1263 if (new == NULL) {
1264 new = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops);
1265 if (new == NULL)
1266 new = &noop_qdisc;
1267 }
1268
1269 sch_tree_lock(sch);
1270 hfsc_purge_queue(sch, cl);
1271 *old = xchg(&cl->qdisc, new);
1272 sch_tree_unlock(sch);
1273 return 0;
1274 }
1275
1276 static struct Qdisc *
hfsc_class_leaf(struct Qdisc * sch,unsigned long arg)1277 hfsc_class_leaf(struct Qdisc *sch, unsigned long arg)
1278 {
1279 struct hfsc_class *cl = (struct hfsc_class *)arg;
1280
1281 if (cl != NULL && cl->level == 0)
1282 return cl->qdisc;
1283
1284 return NULL;
1285 }
1286
1287 static unsigned long
hfsc_get_class(struct Qdisc * sch,u32 classid)1288 hfsc_get_class(struct Qdisc *sch, u32 classid)
1289 {
1290 struct hfsc_class *cl = hfsc_find_class(classid, sch);
1291
1292 if (cl != NULL)
1293 cl->refcnt++;
1294
1295 return (unsigned long)cl;
1296 }
1297
1298 static void
hfsc_put_class(struct Qdisc * sch,unsigned long arg)1299 hfsc_put_class(struct Qdisc *sch, unsigned long arg)
1300 {
1301 struct hfsc_class *cl = (struct hfsc_class *)arg;
1302
1303 if (--cl->refcnt == 0)
1304 hfsc_destroy_class(sch, cl);
1305 }
1306
1307 static unsigned long
hfsc_bind_tcf(struct Qdisc * sch,unsigned long parent,u32 classid)1308 hfsc_bind_tcf(struct Qdisc *sch, unsigned long parent, u32 classid)
1309 {
1310 struct hfsc_class *p = (struct hfsc_class *)parent;
1311 struct hfsc_class *cl = hfsc_find_class(classid, sch);
1312
1313 if (cl != NULL) {
1314 if (p != NULL && p->level <= cl->level)
1315 return 0;
1316 cl->filter_cnt++;
1317 }
1318
1319 return (unsigned long)cl;
1320 }
1321
1322 static void
hfsc_unbind_tcf(struct Qdisc * sch,unsigned long arg)1323 hfsc_unbind_tcf(struct Qdisc *sch, unsigned long arg)
1324 {
1325 struct hfsc_class *cl = (struct hfsc_class *)arg;
1326
1327 cl->filter_cnt--;
1328 }
1329
1330 static struct tcf_proto **
hfsc_tcf_chain(struct Qdisc * sch,unsigned long arg)1331 hfsc_tcf_chain(struct Qdisc *sch, unsigned long arg)
1332 {
1333 struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
1334 struct hfsc_class *cl = (struct hfsc_class *)arg;
1335
1336 if (cl == NULL)
1337 cl = &q->root;
1338
1339 return &cl->filter_list;
1340 }
1341
1342 static int
hfsc_dump_sc(struct sk_buff * skb,int attr,struct internal_sc * sc)1343 hfsc_dump_sc(struct sk_buff *skb, int attr, struct internal_sc *sc)
1344 {
1345 struct tc_service_curve tsc;
1346
1347 tsc.m1 = sm2m(sc->sm1);
1348 tsc.d = dx2d(sc->dx);
1349 tsc.m2 = sm2m(sc->sm2);
1350 RTA_PUT(skb, attr, sizeof(tsc), &tsc);
1351
1352 return skb->len;
1353
1354 rtattr_failure:
1355 return -1;
1356 }
1357
1358 static inline int
hfsc_dump_curves(struct sk_buff * skb,struct hfsc_class * cl)1359 hfsc_dump_curves(struct sk_buff *skb, struct hfsc_class *cl)
1360 {
1361 if ((cl->cl_flags & HFSC_RSC) &&
1362 (hfsc_dump_sc(skb, TCA_HFSC_RSC, &cl->cl_rsc) < 0))
1363 goto rtattr_failure;
1364
1365 if ((cl->cl_flags & HFSC_FSC) &&
1366 (hfsc_dump_sc(skb, TCA_HFSC_FSC, &cl->cl_fsc) < 0))
1367 goto rtattr_failure;
1368
1369 if ((cl->cl_flags & HFSC_USC) &&
1370 (hfsc_dump_sc(skb, TCA_HFSC_USC, &cl->cl_usc) < 0))
1371 goto rtattr_failure;
1372
1373 return skb->len;
1374
1375 rtattr_failure:
1376 return -1;
1377 }
1378
1379 static inline int
hfsc_dump_stats(struct sk_buff * skb,struct hfsc_class * cl)1380 hfsc_dump_stats(struct sk_buff *skb, struct hfsc_class *cl)
1381 {
1382 cl->stats.qlen = cl->qdisc->q.qlen;
1383 if (qdisc_copy_stats(skb, &cl->stats) < 0)
1384 goto rtattr_failure;
1385
1386 return skb->len;
1387
1388 rtattr_failure:
1389 return -1;
1390 }
1391
1392 static inline int
hfsc_dump_xstats(struct sk_buff * skb,struct hfsc_class * cl)1393 hfsc_dump_xstats(struct sk_buff *skb, struct hfsc_class *cl)
1394 {
1395 struct tc_hfsc_stats xstats;
1396
1397 xstats.level = cl->level;
1398 xstats.period = cl->cl_vtperiod;
1399 xstats.work = cl->cl_total;
1400 xstats.rtwork = cl->cl_cumul;
1401 RTA_PUT(skb, TCA_XSTATS, sizeof(xstats), &xstats);
1402
1403 return skb->len;
1404
1405 rtattr_failure:
1406 return -1;
1407 }
1408
1409 static int
hfsc_dump_class(struct Qdisc * sch,unsigned long arg,struct sk_buff * skb,struct tcmsg * tcm)1410 hfsc_dump_class(struct Qdisc *sch, unsigned long arg, struct sk_buff *skb,
1411 struct tcmsg *tcm)
1412 {
1413 struct hfsc_class *cl = (struct hfsc_class *)arg;
1414 unsigned char *b = skb->tail;
1415 struct rtattr *rta = (struct rtattr *)b;
1416
1417 tcm->tcm_parent = cl->cl_parent ? cl->cl_parent->classid : TC_H_ROOT;
1418 tcm->tcm_handle = cl->classid;
1419 if (cl->level == 0)
1420 tcm->tcm_info = cl->qdisc->handle;
1421
1422 RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
1423 if (hfsc_dump_curves(skb, cl) < 0)
1424 goto rtattr_failure;
1425 rta->rta_len = skb->tail - b;
1426
1427 if ((hfsc_dump_stats(skb, cl) < 0) ||
1428 (hfsc_dump_xstats(skb, cl) < 0))
1429 goto rtattr_failure;
1430
1431 return skb->len;
1432
1433 rtattr_failure:
1434 skb_trim(skb, b - skb->data);
1435 return -1;
1436 }
1437
1438 static void
hfsc_walk(struct Qdisc * sch,struct qdisc_walker * arg)1439 hfsc_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1440 {
1441 struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
1442 struct hfsc_class *cl;
1443 unsigned int i;
1444
1445 if (arg->stop)
1446 return;
1447
1448 for (i = 0; i < HFSC_HSIZE; i++) {
1449 list_for_each_entry(cl, &q->clhash[i], hlist) {
1450 if (arg->count < arg->skip) {
1451 arg->count++;
1452 continue;
1453 }
1454 if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
1455 arg->stop = 1;
1456 return;
1457 }
1458 arg->count++;
1459 }
1460 }
1461 }
1462
1463 static void
hfsc_watchdog(unsigned long arg)1464 hfsc_watchdog(unsigned long arg)
1465 {
1466 struct Qdisc *sch = (struct Qdisc *)arg;
1467
1468 sch->flags &= ~TCQ_F_THROTTLED;
1469 netif_schedule(sch->dev);
1470 }
1471
1472 static void
hfsc_schedule_watchdog(struct Qdisc * sch,u64 cur_time)1473 hfsc_schedule_watchdog(struct Qdisc *sch, u64 cur_time)
1474 {
1475 struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
1476 struct hfsc_class *cl;
1477 u64 next_time = 0;
1478 long delay;
1479
1480 if ((cl = eltree_get_minel(q)) != NULL)
1481 next_time = cl->cl_e;
1482 if (q->root.cl_cfmin != 0) {
1483 if (next_time == 0 || next_time > q->root.cl_cfmin)
1484 next_time = q->root.cl_cfmin;
1485 }
1486 ASSERT(next_time != 0);
1487 delay = next_time - cur_time;
1488 delay = PSCHED_US2JIFFIE(delay);
1489
1490 sch->flags |= TCQ_F_THROTTLED;
1491 mod_timer(&q->wd_timer, jiffies + delay);
1492 }
1493
1494 static int
hfsc_init_qdisc(struct Qdisc * sch,struct rtattr * opt)1495 hfsc_init_qdisc(struct Qdisc *sch, struct rtattr *opt)
1496 {
1497 struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
1498 struct tc_hfsc_qopt *qopt;
1499 unsigned int i;
1500
1501 if (opt == NULL || RTA_PAYLOAD(opt) < sizeof(*qopt))
1502 return -EINVAL;
1503 qopt = RTA_DATA(opt);
1504
1505 sch->stats.lock = &sch->dev->queue_lock;
1506
1507 q->defcls = qopt->defcls;
1508 for (i = 0; i < HFSC_HSIZE; i++)
1509 INIT_LIST_HEAD(&q->clhash[i]);
1510 q->eligible = RB_ROOT;
1511 INIT_LIST_HEAD(&q->droplist);
1512 skb_queue_head_init(&q->requeue);
1513
1514 q->root.refcnt = 1;
1515 q->root.classid = sch->handle;
1516 q->root.sched = q;
1517 q->root.qdisc = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops);
1518 if (q->root.qdisc == NULL)
1519 q->root.qdisc = &noop_qdisc;
1520 q->root.stats.lock = &sch->dev->queue_lock;
1521 INIT_LIST_HEAD(&q->root.children);
1522 q->root.vt_tree = RB_ROOT;
1523 q->root.cf_tree = RB_ROOT;
1524
1525 list_add(&q->root.hlist, &q->clhash[hfsc_hash(q->root.classid)]);
1526
1527 init_timer(&q->wd_timer);
1528 q->wd_timer.function = hfsc_watchdog;
1529 q->wd_timer.data = (unsigned long)sch;
1530
1531 MOD_INC_USE_COUNT;
1532 return 0;
1533 }
1534
1535 static int
hfsc_change_qdisc(struct Qdisc * sch,struct rtattr * opt)1536 hfsc_change_qdisc(struct Qdisc *sch, struct rtattr *opt)
1537 {
1538 struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
1539 struct tc_hfsc_qopt *qopt;
1540
1541 if (opt == NULL || RTA_PAYLOAD(opt) < sizeof(*qopt))
1542 return -EINVAL;;
1543 qopt = RTA_DATA(opt);
1544
1545 sch_tree_lock(sch);
1546 q->defcls = qopt->defcls;
1547 sch_tree_unlock(sch);
1548
1549 return 0;
1550 }
1551
1552 static void
hfsc_reset_class(struct hfsc_class * cl)1553 hfsc_reset_class(struct hfsc_class *cl)
1554 {
1555 cl->cl_total = 0;
1556 cl->cl_cumul = 0;
1557 cl->cl_d = 0;
1558 cl->cl_e = 0;
1559 cl->cl_vt = 0;
1560 cl->cl_vtadj = 0;
1561 cl->cl_vtoff = 0;
1562 cl->cl_cvtmin = 0;
1563 cl->cl_cvtmax = 0;
1564 cl->cl_cvtoff = 0;
1565 cl->cl_pcvtoff = 0;
1566 cl->cl_vtperiod = 0;
1567 cl->cl_parentperiod = 0;
1568 cl->cl_f = 0;
1569 cl->cl_myf = 0;
1570 cl->cl_myfadj = 0;
1571 cl->cl_cfmin = 0;
1572 cl->cl_nactive = 0;
1573
1574 cl->vt_tree = RB_ROOT;
1575 cl->cf_tree = RB_ROOT;
1576 qdisc_reset(cl->qdisc);
1577
1578 if (cl->cl_flags & HFSC_RSC)
1579 rtsc_init(&cl->cl_deadline, &cl->cl_rsc, 0, 0);
1580 if (cl->cl_flags & HFSC_FSC)
1581 rtsc_init(&cl->cl_virtual, &cl->cl_fsc, 0, 0);
1582 if (cl->cl_flags & HFSC_USC)
1583 rtsc_init(&cl->cl_ulimit, &cl->cl_usc, 0, 0);
1584 }
1585
1586 static void
hfsc_reset_qdisc(struct Qdisc * sch)1587 hfsc_reset_qdisc(struct Qdisc *sch)
1588 {
1589 struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
1590 struct hfsc_class *cl;
1591 unsigned int i;
1592
1593 for (i = 0; i < HFSC_HSIZE; i++) {
1594 list_for_each_entry(cl, &q->clhash[i], hlist)
1595 hfsc_reset_class(cl);
1596 }
1597 __skb_queue_purge(&q->requeue);
1598 q->eligible = RB_ROOT;
1599 INIT_LIST_HEAD(&q->droplist);
1600 del_timer(&q->wd_timer);
1601 sch->flags &= ~TCQ_F_THROTTLED;
1602 sch->q.qlen = 0;
1603 }
1604
1605 static void
hfsc_destroy_qdisc(struct Qdisc * sch)1606 hfsc_destroy_qdisc(struct Qdisc *sch)
1607 {
1608 struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
1609 struct hfsc_class *cl, *next;
1610 unsigned int i;
1611
1612 for (i = 0; i < HFSC_HSIZE; i++) {
1613 list_for_each_entry_safe(cl, next, &q->clhash[i], hlist)
1614 hfsc_destroy_class(sch, cl);
1615 }
1616 __skb_queue_purge(&q->requeue);
1617 del_timer(&q->wd_timer);
1618 MOD_DEC_USE_COUNT;
1619 }
1620
1621 static int
hfsc_dump_qdisc(struct Qdisc * sch,struct sk_buff * skb)1622 hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb)
1623 {
1624 struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
1625 unsigned char *b = skb->tail;
1626 struct tc_hfsc_qopt qopt;
1627
1628 qopt.defcls = q->defcls;
1629 RTA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt);
1630
1631 return skb->len;
1632
1633 rtattr_failure:
1634 skb_trim(skb, b - skb->data);
1635 return -1;
1636 }
1637
1638 static int
hfsc_enqueue(struct sk_buff * skb,struct Qdisc * sch)1639 hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
1640 {
1641 struct hfsc_class *cl = hfsc_classify(skb, sch);
1642 unsigned int len = skb->len;
1643 int err;
1644
1645 if (cl == NULL) {
1646 kfree_skb(skb);
1647 sch->stats.drops++;
1648 return NET_XMIT_DROP;
1649 }
1650
1651 err = cl->qdisc->enqueue(skb, cl->qdisc);
1652 if (unlikely(err != NET_XMIT_SUCCESS)) {
1653 cl->stats.drops++;
1654 sch->stats.drops++;
1655 return err;
1656 }
1657
1658 if (cl->qdisc->q.qlen == 1)
1659 set_active(cl, len);
1660
1661 cl->stats.packets++;
1662 cl->stats.bytes += len;
1663 sch->stats.packets++;
1664 sch->stats.bytes += len;
1665 sch->q.qlen++;
1666
1667 return NET_XMIT_SUCCESS;
1668 }
1669
1670 static struct sk_buff *
hfsc_dequeue(struct Qdisc * sch)1671 hfsc_dequeue(struct Qdisc *sch)
1672 {
1673 struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
1674 struct hfsc_class *cl;
1675 struct sk_buff *skb;
1676 u64 cur_time;
1677 unsigned int next_len;
1678 int realtime = 0;
1679
1680 if (sch->q.qlen == 0)
1681 return NULL;
1682 if ((skb = __skb_dequeue(&q->requeue)))
1683 goto out;
1684
1685 PSCHED_GET_TIME(cur_time);
1686
1687 /*
1688 * if there are eligible classes, use real-time criteria.
1689 * find the class with the minimum deadline among
1690 * the eligible classes.
1691 */
1692 if ((cl = eltree_get_mindl(q, cur_time)) != NULL) {
1693 realtime = 1;
1694 } else {
1695 /*
1696 * use link-sharing criteria
1697 * get the class with the minimum vt in the hierarchy
1698 */
1699 cl = vttree_get_minvt(&q->root, cur_time);
1700 if (cl == NULL) {
1701 sch->stats.overlimits++;
1702 hfsc_schedule_watchdog(sch, cur_time);
1703 return NULL;
1704 }
1705 }
1706
1707 skb = cl->qdisc->dequeue(cl->qdisc);
1708 if (skb == NULL) {
1709 if (net_ratelimit())
1710 printk("HFSC: Non-work-conserving qdisc ?\n");
1711 return NULL;
1712 }
1713
1714 update_vf(cl, skb->len, cur_time);
1715 if (realtime)
1716 cl->cl_cumul += skb->len;
1717
1718 if (cl->qdisc->q.qlen != 0) {
1719 if (cl->cl_flags & HFSC_RSC) {
1720 /* update ed */
1721 next_len = qdisc_peek_len(cl->qdisc);
1722 if (realtime)
1723 update_ed(cl, next_len);
1724 else
1725 update_d(cl, next_len);
1726 }
1727 } else {
1728 /* the class becomes passive */
1729 set_passive(cl);
1730 }
1731
1732 out:
1733 sch->flags &= ~TCQ_F_THROTTLED;
1734 sch->q.qlen--;
1735
1736 return skb;
1737 }
1738
1739 static int
hfsc_requeue(struct sk_buff * skb,struct Qdisc * sch)1740 hfsc_requeue(struct sk_buff *skb, struct Qdisc *sch)
1741 {
1742 struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
1743
1744 __skb_queue_head(&q->requeue, skb);
1745 sch->q.qlen++;
1746 return NET_XMIT_SUCCESS;
1747 }
1748
1749 static unsigned int
hfsc_drop(struct Qdisc * sch)1750 hfsc_drop(struct Qdisc *sch)
1751 {
1752 struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
1753 struct hfsc_class *cl;
1754 unsigned int len;
1755
1756 list_for_each_entry(cl, &q->droplist, dlist) {
1757 if (cl->qdisc->ops->drop != NULL &&
1758 (len = cl->qdisc->ops->drop(cl->qdisc)) > 0) {
1759 if (cl->qdisc->q.qlen == 0) {
1760 update_vf(cl, 0, 0);
1761 set_passive(cl);
1762 } else {
1763 list_move_tail(&cl->dlist, &q->droplist);
1764 }
1765 cl->stats.drops++;
1766 sch->stats.drops++;
1767 sch->q.qlen--;
1768 return len;
1769 }
1770 }
1771 return 0;
1772 }
1773
1774 static struct Qdisc_class_ops hfsc_class_ops = {
1775 .change = hfsc_change_class,
1776 .delete = hfsc_delete_class,
1777 .graft = hfsc_graft_class,
1778 .leaf = hfsc_class_leaf,
1779 .get = hfsc_get_class,
1780 .put = hfsc_put_class,
1781 .bind_tcf = hfsc_bind_tcf,
1782 .unbind_tcf = hfsc_unbind_tcf,
1783 .tcf_chain = hfsc_tcf_chain,
1784 .dump = hfsc_dump_class,
1785 .walk = hfsc_walk
1786 };
1787
1788 struct Qdisc_ops hfsc_qdisc_ops = {
1789 .id = "hfsc",
1790 .init = hfsc_init_qdisc,
1791 .change = hfsc_change_qdisc,
1792 .reset = hfsc_reset_qdisc,
1793 .destroy = hfsc_destroy_qdisc,
1794 .dump = hfsc_dump_qdisc,
1795 .enqueue = hfsc_enqueue,
1796 .dequeue = hfsc_dequeue,
1797 .requeue = hfsc_requeue,
1798 .drop = hfsc_drop,
1799 .cl_ops = &hfsc_class_ops,
1800 .priv_size = sizeof(struct hfsc_sched)
1801 };
1802
1803 static int __init
hfsc_init(void)1804 hfsc_init(void)
1805 {
1806 return register_qdisc(&hfsc_qdisc_ops);
1807 }
1808
1809 static void __exit
hfsc_cleanup(void)1810 hfsc_cleanup(void)
1811 {
1812 unregister_qdisc(&hfsc_qdisc_ops);
1813 }
1814
1815 MODULE_LICENSE("GPL");
1816 module_init(hfsc_init);
1817 module_exit(hfsc_cleanup);
1818