1 /* vim: ts=8 sw=8
2 * net/sched/sch_htb.c Hierarchical token bucket, feed tree version
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Martin Devera, <devik@cdi.cz>
10 *
11 * Credits (in time order) for older HTB versions:
12 * Stef Coene <stef.coene@docum.org>
13 * HTB support at LARTC mailing list
14 * Ondrej Kraus, <krauso@barr.cz>
15 * found missing INIT_QDISC(htb)
16 * Vladimir Smelhaus, Aamer Akhter, Bert Hubert
17 * helped a lot to locate nasty class stall bug
18 * Andi Kleen, Jamal Hadi, Bert Hubert
19 * code review and helpful comments on shaping
20 * Tomasz Wrona, <tw@eter.tym.pl>
21 * created test case so that I was able to fix nasty bug
22 * Wilfried Weissmann
23 * spotted bug in dequeue code and helped with fix
24 * Jiri Fojtasek
25 * fixed requeue routine
26 * and many others. thanks.
27 *
28 * $Id: sch_htb.c,v 1.25 2003/12/07 11:08:25 devik Exp devik $
29 */
30 #include <linux/config.h>
31 #include <linux/module.h>
32 #include <asm/uaccess.h>
33 #include <asm/system.h>
34 #include <asm/bitops.h>
35 #include <linux/types.h>
36 #include <linux/kernel.h>
37 #include <linux/version.h>
38 #include <linux/sched.h>
39 #include <linux/string.h>
40 #include <linux/mm.h>
41 #include <linux/socket.h>
42 #include <linux/sockios.h>
43 #include <linux/in.h>
44 #include <linux/errno.h>
45 #include <linux/interrupt.h>
46 #include <linux/if_ether.h>
47 #include <linux/inet.h>
48 #include <linux/netdevice.h>
49 #include <linux/etherdevice.h>
50 #include <linux/notifier.h>
51 #include <net/ip.h>
52 #include <net/route.h>
53 #include <linux/skbuff.h>
54 #include <linux/list.h>
55 #include <linux/compiler.h>
56 #include <net/sock.h>
57 #include <net/pkt_sched.h>
58 #include <linux/rbtree.h>
59
60 /* HTB algorithm.
61 Author: devik@cdi.cz
62 ========================================================================
63 HTB is like TBF with multiple classes. It is also similar to CBQ because
64 it allows to assign priority to each class in hierarchy.
65 In fact it is another implementation of Floyd's formal sharing.
66
67 Levels:
68 Each class is assigned level. Leaf has ALWAYS level 0 and root
69 classes have level TC_HTB_MAXDEPTH-1. Interior nodes has level
70 one less than their parent.
71 */
72
73 #define HTB_HSIZE 16 /* classid hash size */
74 #define HTB_EWMAC 2 /* rate average over HTB_EWMAC*HTB_HSIZE sec */
75 #define HTB_DEBUG 1 /* compile debugging support (activated by tc tool) */
76 #define HTB_RATECM 1 /* whether to use rate computer */
77 #define HTB_HYSTERESIS 1/* whether to use mode hysteresis for speedup */
78 #define HTB_QLOCK(S) spin_lock_bh(&(S)->dev->queue_lock)
79 #define HTB_QUNLOCK(S) spin_unlock_bh(&(S)->dev->queue_lock)
80 #define HTB_VER 0x30011 /* major must be matched with number suplied by TC as version */
81
82 #if HTB_VER >> 16 != TC_HTB_PROTOVER
83 #error "Mismatched sch_htb.c and pkt_sch.h"
84 #endif
85
86 /* debugging support; S is subsystem, these are defined:
87 0 - netlink messages
88 1 - enqueue
89 2 - drop & requeue
90 3 - dequeue main
91 4 - dequeue one prio DRR part
92 5 - dequeue class accounting
93 6 - class overlimit status computation
94 7 - hint tree
95 8 - event queue
96 10 - rate estimator
97 11 - classifier
98 12 - fast dequeue cache
99
100 L is level; 0 = none, 1 = basic info, 2 = detailed, 3 = full
101 q->debug uint32 contains 16 2-bit fields one for subsystem starting
102 from LSB
103 */
104 #ifdef HTB_DEBUG
105 #define HTB_DBG_COND(S,L) (((q->debug>>(2*S))&3) >= L)
106 #define HTB_DBG(S,L,FMT,ARG...) if (HTB_DBG_COND(S,L)) \
107 printk(KERN_DEBUG FMT,##ARG)
108 #define HTB_CHCL(cl) BUG_TRAP((cl)->magic == HTB_CMAGIC)
109 #define HTB_PASSQ q,
110 #define HTB_ARGQ struct htb_sched *q,
111 #define static
112 #undef __inline__
113 #define __inline__
114 #undef inline
115 #define inline
116 #define HTB_CMAGIC 0xFEFAFEF1
117 #define htb_safe_rb_erase(N,R) do { BUG_TRAP((N)->rb_color != -1); \
118 if ((N)->rb_color == -1) break; \
119 rb_erase(N,R); \
120 (N)->rb_color = -1; } while (0)
121 #else
122 #define HTB_DBG_COND(S,L) (0)
123 #define HTB_DBG(S,L,FMT,ARG...)
124 #define HTB_PASSQ
125 #define HTB_ARGQ
126 #define HTB_CHCL(cl)
127 #define htb_safe_rb_erase(N,R) rb_erase(N,R)
128 #endif
129
130
131 /* used internaly to keep status of single class */
132 enum htb_cmode {
133 HTB_CANT_SEND, /* class can't send and can't borrow */
134 HTB_MAY_BORROW, /* class can't send but may borrow */
135 HTB_CAN_SEND /* class can send */
136 };
137
138 /* interior & leaf nodes; props specific to leaves are marked L: */
139 struct htb_class
140 {
141 #ifdef HTB_DEBUG
142 unsigned magic;
143 #endif
144 /* general class parameters */
145 u32 classid;
146 struct tc_stats stats; /* generic stats */
147 struct tc_htb_xstats xstats;/* our special stats */
148 int refcnt; /* usage count of this class */
149
150 #ifdef HTB_RATECM
151 /* rate measurement counters */
152 unsigned long rate_bytes,sum_bytes;
153 unsigned long rate_packets,sum_packets;
154 #endif
155
156 /* topology */
157 int level; /* our level (see above) */
158 struct htb_class *parent; /* parent class */
159 struct list_head hlist; /* classid hash list item */
160 struct list_head sibling; /* sibling list item */
161 struct list_head children; /* children list */
162
163 union {
164 struct htb_class_leaf {
165 struct Qdisc *q;
166 int prio;
167 int aprio;
168 int quantum;
169 int deficit[TC_HTB_MAXDEPTH];
170 struct list_head drop_list;
171 } leaf;
172 struct htb_class_inner {
173 rb_root_t feed[TC_HTB_NUMPRIO]; /* feed trees */
174 rb_node_t *ptr[TC_HTB_NUMPRIO]; /* current class ptr */
175 /* When class changes from state 1->2 and disconnects from
176 parent's feed then we lost ptr value and start from the
177 first child again. Here we store classid of the
178 last valid ptr (used when ptr is NULL). */
179 u32 last_ptr_id[TC_HTB_NUMPRIO];
180 } inner;
181 } un;
182 rb_node_t node[TC_HTB_NUMPRIO]; /* node for self or feed tree */
183 rb_node_t pq_node; /* node for event queue */
184 unsigned long pq_key; /* the same type as jiffies global */
185
186 int prio_activity; /* for which prios are we active */
187 enum htb_cmode cmode; /* current mode of the class */
188
189 /* class attached filters */
190 struct tcf_proto *filter_list;
191 int filter_cnt;
192
193 int warned; /* only one warning about non work conserving .. */
194
195 /* token bucket parameters */
196 struct qdisc_rate_table *rate; /* rate table of the class itself */
197 struct qdisc_rate_table *ceil; /* ceiling rate (limits borrows too) */
198 long buffer,cbuffer; /* token bucket depth/rate */
199 long mbuffer; /* max wait time */
200 long tokens,ctokens; /* current number of tokens */
201 psched_time_t t_c; /* checkpoint time */
202 };
203
204 /* TODO: maybe compute rate when size is too large .. or drop ? */
L2T(struct htb_class * cl,struct qdisc_rate_table * rate,int size)205 static __inline__ long L2T(struct htb_class *cl,struct qdisc_rate_table *rate,
206 int size)
207 {
208 int slot = size >> rate->rate.cell_log;
209 if (slot > 255) {
210 cl->xstats.giants++;
211 slot = 255;
212 }
213 return rate->data[slot];
214 }
215
216 struct htb_sched
217 {
218 struct list_head root; /* root classes list */
219 struct list_head hash[HTB_HSIZE]; /* hashed by classid */
220 struct list_head drops[TC_HTB_NUMPRIO]; /* active leaves (for drops) */
221
222 /* self list - roots of self generating tree */
223 rb_root_t row[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
224 int row_mask[TC_HTB_MAXDEPTH];
225 rb_node_t *ptr[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
226 u32 last_ptr_id[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
227
228 /* self wait list - roots of wait PQs per row */
229 rb_root_t wait_pq[TC_HTB_MAXDEPTH];
230
231 /* time of nearest event per level (row) */
232 unsigned long near_ev_cache[TC_HTB_MAXDEPTH];
233
234 /* cached value of jiffies in dequeue */
235 unsigned long jiffies;
236
237 /* whether we hit non-work conserving class during this dequeue; we use */
238 int nwc_hit; /* this to disable mindelay complaint in dequeue */
239
240 int defcls; /* class where unclassified flows go to */
241 u32 debug; /* subsystem debug levels */
242
243 /* filters for qdisc itself */
244 struct tcf_proto *filter_list;
245 int filter_cnt;
246
247 int rate2quantum; /* quant = rate / rate2quantum */
248 psched_time_t now; /* cached dequeue time */
249 struct timer_list timer; /* send delay timer */
250 #ifdef HTB_RATECM
251 struct timer_list rttim; /* rate computer timer */
252 int recmp_bucket; /* which hash bucket to recompute next */
253 #endif
254
255 /* non shaped skbs; let them go directly thru */
256 struct sk_buff_head direct_queue;
257 int direct_qlen; /* max qlen of above */
258
259 long direct_pkts;
260 };
261
262 /* compute hash of size HTB_HSIZE for given handle */
htb_hash(u32 h)263 static __inline__ int htb_hash(u32 h)
264 {
265 #if HTB_HSIZE != 16
266 #error "Declare new hash for your HTB_HSIZE"
267 #endif
268 h ^= h>>8; /* stolen from cbq_hash */
269 h ^= h>>4;
270 return h & 0xf;
271 }
272
273 /* find class in global hash table using given handle */
htb_find(u32 handle,struct Qdisc * sch)274 static __inline__ struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
275 {
276 struct htb_sched *q = (struct htb_sched *)sch->data;
277 struct list_head *p;
278 if (TC_H_MAJ(handle) != sch->handle)
279 return NULL;
280
281 list_for_each (p,q->hash+htb_hash(handle)) {
282 struct htb_class *cl = list_entry(p,struct htb_class,hlist);
283 if (cl->classid == handle)
284 return cl;
285 }
286 return NULL;
287 }
288
289 /**
290 * htb_classify - classify a packet into class
291 *
292 * It returns NULL if the packet should be dropped or -1 if the packet
293 * should be passed directly thru. In all other cases leaf class is returned.
294 * We allow direct class selection by classid in priority. The we examine
295 * filters in qdisc and in inner nodes (if higher filter points to the inner
296 * node). If we end up with classid MAJOR:0 we enqueue the skb into special
297 * internal fifo (direct). These packets then go directly thru. If we still
298 * have no valid leaf we try to use MAJOR:default leaf. It still unsuccessfull
299 * then finish and return direct queue.
300 */
301 #define HTB_DIRECT (struct htb_class*)-1
htb_classify(struct sk_buff * skb,struct Qdisc * sch)302 static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch)
303 {
304 struct htb_sched *q = (struct htb_sched *)sch->data;
305 struct htb_class *cl;
306 struct tcf_result res;
307 struct tcf_proto *tcf;
308 int result;
309
310 /* allow to select class by setting skb->priority to valid classid;
311 note that nfmark can be used too by attaching filter fw with no
312 rules in it */
313 if (skb->priority == sch->handle)
314 return HTB_DIRECT; /* X:0 (direct flow) selected */
315 if ((cl = htb_find(skb->priority,sch)) != NULL && cl->level == 0)
316 return cl;
317
318 tcf = q->filter_list;
319 while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
320 #ifdef CONFIG_NET_CLS_POLICE
321 if (result == TC_POLICE_SHOT)
322 return NULL;
323 #endif
324 if ((cl = (void*)res.class) == NULL) {
325 if (res.classid == sch->handle)
326 return HTB_DIRECT; /* X:0 (direct flow) */
327 if ((cl = htb_find(res.classid,sch)) == NULL)
328 break; /* filter selected invalid classid */
329 }
330 if (!cl->level)
331 return cl; /* we hit leaf; return it */
332
333 /* we have got inner class; apply inner filter chain */
334 tcf = cl->filter_list;
335 }
336 /* classification failed; try to use default class */
337 cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle),q->defcls),sch);
338 if (!cl || cl->level)
339 return HTB_DIRECT; /* bad default .. this is safe bet */
340 return cl;
341 }
342
343 #ifdef HTB_DEBUG
344 static void htb_next_rb_node(rb_node_t **n);
345 #define HTB_DUMTREE(root,memb) if(root) { \
346 rb_node_t *n = (root)->rb_node; \
347 while (n->rb_left) n = n->rb_left; \
348 while (n) { \
349 struct htb_class *cl = rb_entry(n, struct htb_class, memb); \
350 printk(" %x",cl->classid); htb_next_rb_node (&n); \
351 } }
352
htb_debug_dump(struct htb_sched * q)353 static void htb_debug_dump (struct htb_sched *q)
354 {
355 int i,p;
356 printk(KERN_DEBUG "htb*g j=%lu lj=%lu\n",jiffies,q->jiffies);
357 /* rows */
358 for (i=TC_HTB_MAXDEPTH-1;i>=0;i--) {
359 printk(KERN_DEBUG "htb*r%d m=%x",i,q->row_mask[i]);
360 for (p=0;p<TC_HTB_NUMPRIO;p++) {
361 if (!q->row[i][p].rb_node) continue;
362 printk(" p%d:",p);
363 HTB_DUMTREE(q->row[i]+p,node[p]);
364 }
365 printk("\n");
366 }
367 /* classes */
368 for (i = 0; i < HTB_HSIZE; i++) {
369 struct list_head *l;
370 list_for_each (l,q->hash+i) {
371 struct htb_class *cl = list_entry(l,struct htb_class,hlist);
372 long diff = PSCHED_TDIFF_SAFE(q->now, cl->t_c, (u32)cl->mbuffer, 0);
373 printk(KERN_DEBUG "htb*c%x m=%d t=%ld c=%ld pq=%lu df=%ld ql=%d "
374 "pa=%x f:",
375 cl->classid,cl->cmode,cl->tokens,cl->ctokens,
376 cl->pq_node.rb_color==-1?0:cl->pq_key,diff,
377 cl->level?0:cl->un.leaf.q->q.qlen,cl->prio_activity);
378 if (cl->level)
379 for (p=0;p<TC_HTB_NUMPRIO;p++) {
380 if (!cl->un.inner.feed[p].rb_node) continue;
381 printk(" p%d a=%x:",p,cl->un.inner.ptr[p]?rb_entry(cl->un.inner.ptr[p], struct htb_class,node[p])->classid:0);
382 HTB_DUMTREE(cl->un.inner.feed+p,node[p]);
383 }
384 printk("\n");
385 }
386 }
387 }
388 #endif
389 /**
390 * htb_add_to_id_tree - adds class to the round robin list
391 *
392 * Routine adds class to the list (actually tree) sorted by classid.
393 * Make sure that class is not already on such list for given prio.
394 */
htb_add_to_id_tree(HTB_ARGQ rb_root_t * root,struct htb_class * cl,int prio)395 static void htb_add_to_id_tree (HTB_ARGQ rb_root_t *root,
396 struct htb_class *cl,int prio)
397 {
398 rb_node_t **p = &root->rb_node, *parent = NULL;
399 HTB_DBG(7,3,"htb_add_id_tree cl=%X prio=%d\n",cl->classid,prio);
400 #ifdef HTB_DEBUG
401 if (cl->node[prio].rb_color != -1) { BUG_TRAP(0); return; }
402 HTB_CHCL(cl);
403 if (*p) {
404 struct htb_class *x = rb_entry(*p,struct htb_class,node[prio]);
405 HTB_CHCL(x);
406 }
407 #endif
408 while (*p) {
409 struct htb_class *c; parent = *p;
410 c = rb_entry(parent, struct htb_class, node[prio]);
411 HTB_CHCL(c);
412 if (cl->classid > c->classid)
413 p = &parent->rb_right;
414 else
415 p = &parent->rb_left;
416 }
417 rb_link_node(&cl->node[prio], parent, p);
418 rb_insert_color(&cl->node[prio], root);
419 }
420
421 /**
422 * htb_add_to_wait_tree - adds class to the event queue with delay
423 *
424 * The class is added to priority event queue to indicate that class will
425 * change its mode in cl->pq_key microseconds. Make sure that class is not
426 * already in the queue.
427 */
htb_add_to_wait_tree(struct htb_sched * q,struct htb_class * cl,long delay,int debug_hint)428 static void htb_add_to_wait_tree (struct htb_sched *q,
429 struct htb_class *cl,long delay,int debug_hint)
430 {
431 rb_node_t **p = &q->wait_pq[cl->level].rb_node, *parent = NULL;
432 HTB_DBG(7,3,"htb_add_wt cl=%X key=%lu\n",cl->classid,cl->pq_key);
433 #ifdef HTB_DEBUG
434 if (cl->pq_node.rb_color != -1) { BUG_TRAP(0); return; }
435 HTB_CHCL(cl);
436 if ((delay <= 0 || delay > cl->mbuffer) && net_ratelimit())
437 printk(KERN_ERR "HTB: suspicious delay in wait_tree d=%ld cl=%X h=%d\n",delay,cl->classid,debug_hint);
438 #endif
439 cl->pq_key = q->jiffies + PSCHED_US2JIFFIE(delay);
440 if (cl->pq_key == q->jiffies)
441 cl->pq_key++;
442
443 /* update the nearest event cache */
444 if (time_after(q->near_ev_cache[cl->level], cl->pq_key))
445 q->near_ev_cache[cl->level] = cl->pq_key;
446
447 while (*p) {
448 struct htb_class *c; parent = *p;
449 c = rb_entry(parent, struct htb_class, pq_node);
450 if (time_after_eq(cl->pq_key, c->pq_key))
451 p = &parent->rb_right;
452 else
453 p = &parent->rb_left;
454 }
455 rb_link_node(&cl->pq_node, parent, p);
456 rb_insert_color(&cl->pq_node, &q->wait_pq[cl->level]);
457 }
458
459 /**
460 * htb_next_rb_node - finds next node in binary tree
461 *
462 * When we are past last key we return NULL.
463 * Average complexity is 2 steps per call.
464 */
htb_next_rb_node(rb_node_t ** n)465 static void htb_next_rb_node(rb_node_t **n)
466 {
467 rb_node_t *p;
468 if ((*n)->rb_right) {
469 /* child at right. use it or its leftmost ancestor */
470 *n = (*n)->rb_right;
471 while ((*n)->rb_left)
472 *n = (*n)->rb_left;
473 return;
474 }
475 while ((p = (*n)->rb_parent) != NULL) {
476 /* if we've arrived from left child then we have next node */
477 if (p->rb_left == *n) break;
478 *n = p;
479 }
480 *n = p;
481 }
482
483 /**
484 * htb_add_class_to_row - add class to its row
485 *
486 * The class is added to row at priorities marked in mask.
487 * It does nothing if mask == 0.
488 */
htb_add_class_to_row(struct htb_sched * q,struct htb_class * cl,int mask)489 static inline void htb_add_class_to_row(struct htb_sched *q,
490 struct htb_class *cl,int mask)
491 {
492 HTB_DBG(7,2,"htb_addrow cl=%X mask=%X rmask=%X\n",
493 cl->classid,mask,q->row_mask[cl->level]);
494 HTB_CHCL(cl);
495 q->row_mask[cl->level] |= mask;
496 while (mask) {
497 int prio = ffz(~mask);
498 mask &= ~(1 << prio);
499 htb_add_to_id_tree(HTB_PASSQ q->row[cl->level]+prio,cl,prio);
500 }
501 }
502
503 /**
504 * htb_remove_class_from_row - removes class from its row
505 *
506 * The class is removed from row at priorities marked in mask.
507 * It does nothing if mask == 0.
508 */
htb_remove_class_from_row(struct htb_sched * q,struct htb_class * cl,int mask)509 static __inline__ void htb_remove_class_from_row(struct htb_sched *q,
510 struct htb_class *cl,int mask)
511 {
512 int m = 0;
513 HTB_CHCL(cl);
514 while (mask) {
515 int prio = ffz(~mask);
516 mask &= ~(1 << prio);
517 if (q->ptr[cl->level][prio] == cl->node+prio)
518 htb_next_rb_node(q->ptr[cl->level]+prio);
519 htb_safe_rb_erase(cl->node + prio,q->row[cl->level]+prio);
520 if (!q->row[cl->level][prio].rb_node)
521 m |= 1 << prio;
522 }
523 HTB_DBG(7,2,"htb_delrow cl=%X mask=%X rmask=%X maskdel=%X\n",
524 cl->classid,mask,q->row_mask[cl->level],m);
525 q->row_mask[cl->level] &= ~m;
526 }
527
528 /**
529 * htb_activate_prios - creates active classe's feed chain
530 *
531 * The class is connected to ancestors and/or appropriate rows
532 * for priorities it is participating on. cl->cmode must be new
533 * (activated) mode. It does nothing if cl->prio_activity == 0.
534 */
htb_activate_prios(struct htb_sched * q,struct htb_class * cl)535 static void htb_activate_prios(struct htb_sched *q,struct htb_class *cl)
536 {
537 struct htb_class *p = cl->parent;
538 long m,mask = cl->prio_activity;
539 HTB_DBG(7,2,"htb_act_prios cl=%X mask=%lX cmode=%d\n",cl->classid,mask,cl->cmode);
540 HTB_CHCL(cl);
541
542 while (cl->cmode == HTB_MAY_BORROW && p && mask) {
543 HTB_CHCL(p);
544 m = mask; while (m) {
545 int prio = ffz(~m);
546 m &= ~(1 << prio);
547
548 if (p->un.inner.feed[prio].rb_node)
549 /* parent already has its feed in use so that
550 reset bit in mask as parent is already ok */
551 mask &= ~(1 << prio);
552
553 htb_add_to_id_tree(HTB_PASSQ p->un.inner.feed+prio,cl,prio);
554 }
555 HTB_DBG(7,3,"htb_act_pr_aft p=%X pact=%X mask=%lX pmode=%d\n",
556 p->classid,p->prio_activity,mask,p->cmode);
557 p->prio_activity |= mask;
558 cl = p; p = cl->parent;
559 HTB_CHCL(cl);
560 }
561 if (cl->cmode == HTB_CAN_SEND && mask)
562 htb_add_class_to_row(q,cl,mask);
563 }
564
565 /**
566 * htb_deactivate_prios - remove class from feed chain
567 *
568 * cl->cmode must represent old mode (before deactivation). It does
569 * nothing if cl->prio_activity == 0. Class is removed from all feed
570 * chains and rows.
571 */
htb_deactivate_prios(struct htb_sched * q,struct htb_class * cl)572 static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
573 {
574 struct htb_class *p = cl->parent;
575 long m,mask = cl->prio_activity;
576 HTB_DBG(7,2,"htb_deact_prios cl=%X mask=%lX cmode=%d\n",cl->classid,mask,cl->cmode);
577 HTB_CHCL(cl);
578
579 while (cl->cmode == HTB_MAY_BORROW && p && mask) {
580 m = mask; mask = 0;
581 while (m) {
582 int prio = ffz(~m);
583 m &= ~(1 << prio);
584
585 if (p->un.inner.ptr[prio] == cl->node+prio) {
586 /* we are removing child which is pointed to from
587 parent feed - forget the pointer but remember
588 classid */
589 p->un.inner.last_ptr_id[prio] = cl->classid;
590 p->un.inner.ptr[prio] = NULL;
591 }
592
593 htb_safe_rb_erase(cl->node + prio,p->un.inner.feed + prio);
594
595 if (!p->un.inner.feed[prio].rb_node)
596 mask |= 1 << prio;
597 }
598 HTB_DBG(7,3,"htb_deact_pr_aft p=%X pact=%X mask=%lX pmode=%d\n",
599 p->classid,p->prio_activity,mask,p->cmode);
600 p->prio_activity &= ~mask;
601 cl = p; p = cl->parent;
602 HTB_CHCL(cl);
603 }
604 if (cl->cmode == HTB_CAN_SEND && mask)
605 htb_remove_class_from_row(q,cl,mask);
606 }
607
608 /**
609 * htb_class_mode - computes and returns current class mode
610 *
611 * It computes cl's mode at time cl->t_c+diff and returns it. If mode
612 * is not HTB_CAN_SEND then cl->pq_key is updated to time difference
613 * from now to time when cl will change its state.
614 * Also it is worth to note that class mode doesn't change simply
615 * at cl->{c,}tokens == 0 but there can rather be hysteresis of
616 * 0 .. -cl->{c,}buffer range. It is meant to limit number of
617 * mode transitions per time unit. The speed gain is about 1/6.
618 */
619 static __inline__ enum htb_cmode
htb_class_mode(struct htb_class * cl,long * diff)620 htb_class_mode(struct htb_class *cl,long *diff)
621 {
622 long toks;
623
624 if ((toks = (cl->ctokens + *diff)) < (
625 #if HTB_HYSTERESIS
626 cl->cmode != HTB_CANT_SEND ? -cl->cbuffer :
627 #endif
628 0)) {
629 *diff = -toks;
630 return HTB_CANT_SEND;
631 }
632 if ((toks = (cl->tokens + *diff)) >= (
633 #if HTB_HYSTERESIS
634 cl->cmode == HTB_CAN_SEND ? -cl->buffer :
635 #endif
636 0))
637 return HTB_CAN_SEND;
638
639 *diff = -toks;
640 return HTB_MAY_BORROW;
641 }
642
643 /**
644 * htb_change_class_mode - changes classe's mode
645 *
646 * This should be the only way how to change classe's mode under normal
647 * cirsumstances. Routine will update feed lists linkage, change mode
648 * and add class to the wait event queue if appropriate. New mode should
649 * be different from old one and cl->pq_key has to be valid if changing
650 * to mode other than HTB_CAN_SEND (see htb_add_to_wait_tree).
651 */
652 static void
htb_change_class_mode(struct htb_sched * q,struct htb_class * cl,long * diff)653 htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, long *diff)
654 {
655 enum htb_cmode new_mode = htb_class_mode(cl,diff);
656
657 HTB_CHCL(cl);
658 HTB_DBG(7,1,"htb_chging_clmode %d->%d cl=%X\n",cl->cmode,new_mode,cl->classid);
659
660 if (new_mode == cl->cmode)
661 return;
662
663 if (cl->prio_activity) { /* not neccessary: speed optimization */
664 if (cl->cmode != HTB_CANT_SEND)
665 htb_deactivate_prios(q,cl);
666 cl->cmode = new_mode;
667 if (new_mode != HTB_CANT_SEND)
668 htb_activate_prios(q,cl);
669 } else
670 cl->cmode = new_mode;
671 }
672
673 /**
674 * htb_activate - inserts leaf cl into appropriate active feeds
675 *
676 * Routine learns (new) priority of leaf and activates feed chain
677 * for the prio. It can be called on already active leaf safely.
678 * It also adds leaf into droplist.
679 */
htb_activate(struct htb_sched * q,struct htb_class * cl)680 static __inline__ void htb_activate(struct htb_sched *q,struct htb_class *cl)
681 {
682 BUG_TRAP(!cl->level && cl->un.leaf.q && cl->un.leaf.q->q.qlen);
683 HTB_CHCL(cl);
684 if (!cl->prio_activity) {
685 cl->prio_activity = 1 << (cl->un.leaf.aprio = cl->un.leaf.prio);
686 htb_activate_prios(q,cl);
687 list_add_tail(&cl->un.leaf.drop_list,q->drops+cl->un.leaf.aprio);
688 }
689 }
690
691 /**
692 * htb_deactivate - remove leaf cl from active feeds
693 *
694 * Make sure that leaf is active. In the other words it can't be called
695 * with non-active leaf. It also removes class from the drop list.
696 */
697 static __inline__ void
htb_deactivate(struct htb_sched * q,struct htb_class * cl)698 htb_deactivate(struct htb_sched *q,struct htb_class *cl)
699 {
700 BUG_TRAP(cl->prio_activity);
701 HTB_CHCL(cl);
702 htb_deactivate_prios(q,cl);
703 cl->prio_activity = 0;
704 list_del_init(&cl->un.leaf.drop_list);
705 }
706
htb_enqueue(struct sk_buff * skb,struct Qdisc * sch)707 static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
708 {
709 struct htb_sched *q = (struct htb_sched *)sch->data;
710 struct htb_class *cl = htb_classify(skb,sch);
711
712 if (cl == HTB_DIRECT || !cl) {
713 /* enqueue to helper queue */
714 if (q->direct_queue.qlen < q->direct_qlen && cl) {
715 __skb_queue_tail(&q->direct_queue, skb);
716 q->direct_pkts++;
717 } else {
718 kfree_skb (skb);
719 sch->stats.drops++;
720 return NET_XMIT_DROP;
721 }
722 } else if (cl->un.leaf.q->enqueue(skb, cl->un.leaf.q) != NET_XMIT_SUCCESS) {
723 sch->stats.drops++;
724 cl->stats.drops++;
725 return NET_XMIT_DROP;
726 } else {
727 cl->stats.packets++; cl->stats.bytes += skb->len;
728 htb_activate (q,cl);
729 }
730
731 sch->q.qlen++;
732 sch->stats.packets++; sch->stats.bytes += skb->len;
733 HTB_DBG(1,1,"htb_enq_ok cl=%X skb=%p\n",(cl && cl != HTB_DIRECT)?cl->classid:0,skb);
734 return NET_XMIT_SUCCESS;
735 }
736
737 /* TODO: requeuing packet charges it to policers again !! */
htb_requeue(struct sk_buff * skb,struct Qdisc * sch)738 static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch)
739 {
740 struct htb_sched *q = (struct htb_sched *)sch->data;
741 struct htb_class *cl = htb_classify(skb,sch);
742 struct sk_buff *tskb;
743
744 if (cl == HTB_DIRECT || !cl) {
745 /* enqueue to helper queue */
746 if (q->direct_queue.qlen < q->direct_qlen && cl) {
747 __skb_queue_head(&q->direct_queue, skb);
748 } else {
749 __skb_queue_head(&q->direct_queue, skb);
750 tskb = __skb_dequeue_tail(&q->direct_queue);
751 kfree_skb (tskb);
752 sch->stats.drops++;
753 return NET_XMIT_CN;
754 }
755 } else if (cl->un.leaf.q->ops->requeue(skb, cl->un.leaf.q) != NET_XMIT_SUCCESS) {
756 sch->stats.drops++;
757 cl->stats.drops++;
758 return NET_XMIT_DROP;
759 } else
760 htb_activate (q,cl);
761
762 sch->q.qlen++;
763 HTB_DBG(1,1,"htb_req_ok cl=%X skb=%p\n",(cl && cl != HTB_DIRECT)?cl->classid:0,skb);
764 return NET_XMIT_SUCCESS;
765 }
766
htb_timer(unsigned long arg)767 static void htb_timer(unsigned long arg)
768 {
769 struct Qdisc *sch = (struct Qdisc*)arg;
770 sch->flags &= ~TCQ_F_THROTTLED;
771 wmb();
772 netif_schedule(sch->dev);
773 }
774
775 #ifdef HTB_RATECM
776 #define RT_GEN(D,R) R+=D-(R/HTB_EWMAC);D=0
htb_rate_timer(unsigned long arg)777 static void htb_rate_timer(unsigned long arg)
778 {
779 struct Qdisc *sch = (struct Qdisc*)arg;
780 struct htb_sched *q = (struct htb_sched *)sch->data;
781 struct list_head *p;
782
783 /* lock queue so that we can muck with it */
784 HTB_QLOCK(sch);
785 HTB_DBG(10,1,"htb_rttmr j=%ld\n",jiffies);
786
787 q->rttim.expires = jiffies + HZ;
788 add_timer(&q->rttim);
789
790 /* scan and recompute one bucket at time */
791 if (++q->recmp_bucket >= HTB_HSIZE)
792 q->recmp_bucket = 0;
793 list_for_each (p,q->hash+q->recmp_bucket) {
794 struct htb_class *cl = list_entry(p,struct htb_class,hlist);
795 HTB_DBG(10,2,"htb_rttmr_cl cl=%X sbyte=%lu spkt=%lu\n",
796 cl->classid,cl->sum_bytes,cl->sum_packets);
797 RT_GEN (cl->sum_bytes,cl->rate_bytes);
798 RT_GEN (cl->sum_packets,cl->rate_packets);
799 }
800 HTB_QUNLOCK(sch);
801 }
802 #endif
803
804 /**
805 * htb_charge_class - charges ammount "bytes" to leaf and ancestors
806 *
807 * Routine assumes that packet "bytes" long was dequeued from leaf cl
808 * borrowing from "level". It accounts bytes to ceil leaky bucket for
809 * leaf and all ancestors and to rate bucket for ancestors at levels
810 * "level" and higher. It also handles possible change of mode resulting
811 * from the update. Note that mode can also increase here (MAY_BORROW to
812 * CAN_SEND) because we can use more precise clock that event queue here.
813 * In such case we remove class from event queue first.
814 */
htb_charge_class(struct htb_sched * q,struct htb_class * cl,int level,int bytes)815 static void htb_charge_class(struct htb_sched *q,struct htb_class *cl,
816 int level,int bytes)
817 {
818 long toks,diff;
819 enum htb_cmode old_mode;
820 HTB_DBG(5,1,"htb_chrg_cl cl=%X lev=%d len=%d\n",cl->classid,level,bytes);
821
822 #define HTB_ACCNT(T,B,R) toks = diff + cl->T; \
823 if (toks > cl->B) toks = cl->B; \
824 toks -= L2T(cl, cl->R, bytes); \
825 if (toks <= -cl->mbuffer) toks = 1-cl->mbuffer; \
826 cl->T = toks
827
828 while (cl) {
829 HTB_CHCL(cl);
830 diff = PSCHED_TDIFF_SAFE(q->now, cl->t_c, (u32)cl->mbuffer, 0);
831 #ifdef HTB_DEBUG
832 if (diff > cl->mbuffer || diff < 0 || PSCHED_TLESS(q->now, cl->t_c)) {
833 if (net_ratelimit())
834 printk(KERN_ERR "HTB: bad diff in charge, cl=%X diff=%lX now=%Lu then=%Lu j=%lu\n",
835 cl->classid, diff,
836 (unsigned long long) q->now,
837 (unsigned long long) cl->t_c,
838 q->jiffies);
839 diff = 1000;
840 }
841 #endif
842 if (cl->level >= level) {
843 if (cl->level == level) cl->xstats.lends++;
844 HTB_ACCNT (tokens,buffer,rate);
845 } else {
846 cl->xstats.borrows++;
847 cl->tokens += diff; /* we moved t_c; update tokens */
848 }
849 HTB_ACCNT (ctokens,cbuffer,ceil);
850 cl->t_c = q->now;
851 HTB_DBG(5,2,"htb_chrg_clp cl=%X diff=%ld tok=%ld ctok=%ld\n",cl->classid,diff,cl->tokens,cl->ctokens);
852
853 old_mode = cl->cmode; diff = 0;
854 htb_change_class_mode(q,cl,&diff);
855 if (old_mode != cl->cmode) {
856 if (old_mode != HTB_CAN_SEND)
857 htb_safe_rb_erase(&cl->pq_node,q->wait_pq+cl->level);
858 if (cl->cmode != HTB_CAN_SEND)
859 htb_add_to_wait_tree (q,cl,diff,1);
860 }
861
862 #ifdef HTB_RATECM
863 /* update rate counters */
864 cl->sum_bytes += bytes; cl->sum_packets++;
865 #endif
866
867 /* update byte stats except for leaves which are already updated */
868 if (cl->level) {
869 cl->stats.bytes += bytes;
870 cl->stats.packets++;
871 }
872 cl = cl->parent;
873 }
874 }
875
876 /**
877 * htb_do_events - make mode changes to classes at the level
878 *
879 * Scans event queue for pending events and applies them. Returns jiffies to
880 * next pending event (0 for no event in pq).
881 * Note: Aplied are events whose have cl->pq_key <= jiffies.
882 */
htb_do_events(struct htb_sched * q,int level)883 static long htb_do_events(struct htb_sched *q,int level)
884 {
885 int i;
886 HTB_DBG(8,1,"htb_do_events l=%d root=%p rmask=%X\n",
887 level,q->wait_pq[level].rb_node,q->row_mask[level]);
888 for (i = 0; i < 500; i++) {
889 struct htb_class *cl;
890 long diff;
891 rb_node_t *p = q->wait_pq[level].rb_node;
892 if (!p) return 0;
893 while (p->rb_left) p = p->rb_left;
894
895 cl = rb_entry(p, struct htb_class, pq_node);
896 if (time_after(cl->pq_key, q->jiffies)) {
897 HTB_DBG(8,3,"htb_do_ev_ret delay=%ld\n",cl->pq_key - q->jiffies);
898 return cl->pq_key - q->jiffies;
899 }
900 htb_safe_rb_erase(p,q->wait_pq+level);
901 diff = PSCHED_TDIFF_SAFE(q->now, cl->t_c, (u32)cl->mbuffer, 0);
902 #ifdef HTB_DEBUG
903 if (diff > cl->mbuffer || diff < 0 || PSCHED_TLESS(q->now, cl->t_c)) {
904 if (net_ratelimit())
905 printk(KERN_ERR "HTB: bad diff in events, cl=%X diff=%lX now=%Lu then=%Lu j=%lu\n",
906 cl->classid, diff,
907 (unsigned long long) q->now,
908 (unsigned long long) cl->t_c,
909 q->jiffies);
910 diff = 1000;
911 }
912 #endif
913 htb_change_class_mode(q,cl,&diff);
914 if (cl->cmode != HTB_CAN_SEND)
915 htb_add_to_wait_tree (q,cl,diff,2);
916 }
917 if (net_ratelimit())
918 printk(KERN_WARNING "htb: too many events !\n");
919 return HZ/10;
920 }
921
922 /* Returns class->node+prio from id-tree where classe's id is >= id. NULL
923 is no such one exists. */
924 static rb_node_t *
htb_id_find_next_upper(int prio,rb_node_t * n,u32 id)925 htb_id_find_next_upper(int prio,rb_node_t *n,u32 id)
926 {
927 rb_node_t *r = NULL;
928 while (n) {
929 struct htb_class *cl = rb_entry(n,struct htb_class,node[prio]);
930 if (id == cl->classid) return n;
931
932 if (id > cl->classid) {
933 n = n->rb_right;
934 } else {
935 r = n;
936 n = n->rb_left;
937 }
938 }
939 return r;
940 }
941
942 /**
943 * htb_lookup_leaf - returns next leaf class in DRR order
944 *
945 * Find leaf where current feed pointers points to.
946 */
947 static struct htb_class *
htb_lookup_leaf(HTB_ARGQ rb_root_t * tree,int prio,rb_node_t ** pptr,u32 * pid)948 htb_lookup_leaf(HTB_ARGQ rb_root_t *tree,int prio,rb_node_t **pptr,u32 *pid)
949 {
950 int i;
951 struct {
952 rb_node_t *root;
953 rb_node_t **pptr;
954 u32 *pid;
955 } stk[TC_HTB_MAXDEPTH],*sp = stk;
956
957 BUG_TRAP(tree->rb_node);
958 sp->root = tree->rb_node;
959 sp->pptr = pptr;
960 sp->pid = pid;
961
962 for (i = 0; i < 65535; i++) {
963 HTB_DBG(4,2,"htb_lleaf ptr=%p pid=%X\n",*sp->pptr,*sp->pid);
964
965 if (!*sp->pptr && *sp->pid) {
966 /* ptr was invalidated but id is valid - try to recover
967 the original or next ptr */
968 *sp->pptr = htb_id_find_next_upper(prio,sp->root,*sp->pid);
969 }
970 *sp->pid = 0; /* ptr is valid now so that remove this hint as it
971 can become out of date quickly */
972 if (!*sp->pptr) { /* we are at right end; rewind & go up */
973 *sp->pptr = sp->root;
974 while ((*sp->pptr)->rb_left)
975 *sp->pptr = (*sp->pptr)->rb_left;
976 if (sp > stk) {
977 sp--;
978 BUG_TRAP(*sp->pptr); if(!*sp->pptr) return NULL;
979 htb_next_rb_node (sp->pptr);
980 }
981 } else {
982 struct htb_class *cl;
983 cl = rb_entry(*sp->pptr,struct htb_class,node[prio]);
984 HTB_CHCL(cl);
985 if (!cl->level)
986 return cl;
987 (++sp)->root = cl->un.inner.feed[prio].rb_node;
988 sp->pptr = cl->un.inner.ptr+prio;
989 sp->pid = cl->un.inner.last_ptr_id+prio;
990 }
991 }
992 BUG_TRAP(0);
993 return NULL;
994 }
995
996 /* dequeues packet at given priority and level; call only if
997 you are sure that there is active class at prio/level */
998 static struct sk_buff *
htb_dequeue_tree(struct htb_sched * q,int prio,int level)999 htb_dequeue_tree(struct htb_sched *q,int prio,int level)
1000 {
1001 struct sk_buff *skb = NULL;
1002 struct htb_class *cl,*start;
1003 /* look initial class up in the row */
1004 start = cl = htb_lookup_leaf (HTB_PASSQ q->row[level]+prio,prio,
1005 q->ptr[level]+prio,q->last_ptr_id[level]+prio);
1006
1007 do {
1008 next:
1009 BUG_TRAP(cl);
1010 if (!cl) return NULL;
1011 HTB_DBG(4,1,"htb_deq_tr prio=%d lev=%d cl=%X defic=%d\n",
1012 prio,level,cl->classid,cl->un.leaf.deficit[level]);
1013
1014 /* class can be empty - it is unlikely but can be true if leaf
1015 qdisc drops packets in enqueue routine or if someone used
1016 graft operation on the leaf since last dequeue;
1017 simply deactivate and skip such class */
1018 if (unlikely(cl->un.leaf.q->q.qlen == 0)) {
1019 struct htb_class *next;
1020 htb_deactivate(q,cl);
1021
1022 /* row/level might become empty */
1023 if ((q->row_mask[level] & (1 << prio)) == 0)
1024 return NULL;
1025
1026 next = htb_lookup_leaf (HTB_PASSQ q->row[level]+prio,
1027 prio,q->ptr[level]+prio,q->last_ptr_id[level]+prio);
1028 if (cl == start) /* fix start if we just deleted it */
1029 start = next;
1030 cl = next;
1031 goto next;
1032 }
1033
1034 if (likely((skb = cl->un.leaf.q->dequeue(cl->un.leaf.q)) != NULL))
1035 break;
1036 if (!cl->warned) {
1037 printk(KERN_WARNING "htb: class %X isn't work conserving ?!\n",cl->classid);
1038 cl->warned = 1;
1039 }
1040 q->nwc_hit++;
1041 htb_next_rb_node((level?cl->parent->un.inner.ptr:q->ptr[0])+prio);
1042 cl = htb_lookup_leaf (HTB_PASSQ q->row[level]+prio,prio,q->ptr[level]+prio,
1043 q->last_ptr_id[level]+prio);
1044 } while (cl != start);
1045
1046 if (likely(skb != NULL)) {
1047 if ((cl->un.leaf.deficit[level] -= skb->len) < 0) {
1048 HTB_DBG(4,2,"htb_next_cl oldptr=%p quant_add=%d\n",
1049 level?cl->parent->un.inner.ptr[prio]:q->ptr[0][prio],cl->un.leaf.quantum);
1050 cl->un.leaf.deficit[level] += cl->un.leaf.quantum;
1051 htb_next_rb_node((level?cl->parent->un.inner.ptr:q->ptr[0])+prio);
1052 }
1053 /* this used to be after charge_class but this constelation
1054 gives us slightly better performance */
1055 if (!cl->un.leaf.q->q.qlen)
1056 htb_deactivate (q,cl);
1057 htb_charge_class (q,cl,level,skb->len);
1058 }
1059 return skb;
1060 }
1061
htb_delay_by(struct Qdisc * sch,long delay)1062 static void htb_delay_by(struct Qdisc *sch,long delay)
1063 {
1064 struct htb_sched *q = (struct htb_sched *)sch->data;
1065 if (delay <= 0) delay = 1;
1066 if (unlikely(delay > 5*HZ)) {
1067 if (net_ratelimit())
1068 printk(KERN_INFO "HTB delay %ld > 5sec\n", delay);
1069 delay = 5*HZ;
1070 }
1071 /* why don't use jiffies here ? because expires can be in past */
1072 mod_timer(&q->timer, q->jiffies + delay);
1073 sch->flags |= TCQ_F_THROTTLED;
1074 sch->stats.overlimits++;
1075 HTB_DBG(3,1,"htb_deq t_delay=%ld\n",delay);
1076 }
1077
htb_dequeue(struct Qdisc * sch)1078 static struct sk_buff *htb_dequeue(struct Qdisc *sch)
1079 {
1080 struct sk_buff *skb = NULL;
1081 struct htb_sched *q = (struct htb_sched *)sch->data;
1082 int level;
1083 long min_delay;
1084 #ifdef HTB_DEBUG
1085 int evs_used = 0;
1086 #endif
1087
1088 q->jiffies = jiffies;
1089 HTB_DBG(3,1,"htb_deq dircnt=%d qlen=%d\n",skb_queue_len(&q->direct_queue),
1090 sch->q.qlen);
1091
1092 /* try to dequeue direct packets as high prio (!) to minimize cpu work */
1093 if ((skb = __skb_dequeue(&q->direct_queue)) != NULL) {
1094 sch->flags &= ~TCQ_F_THROTTLED;
1095 sch->q.qlen--;
1096 return skb;
1097 }
1098
1099 if (!sch->q.qlen) goto fin;
1100 PSCHED_GET_TIME(q->now);
1101
1102 min_delay = LONG_MAX;
1103 q->nwc_hit = 0;
1104 for (level = 0; level < TC_HTB_MAXDEPTH; level++) {
1105 /* common case optimization - skip event handler quickly */
1106 int m;
1107 long delay;
1108 if (time_after_eq(q->jiffies, q->near_ev_cache[level])) {
1109 delay = htb_do_events(q,level);
1110 q->near_ev_cache[level] = q->jiffies + (delay ? delay : HZ);
1111 #ifdef HTB_DEBUG
1112 evs_used++;
1113 #endif
1114 } else
1115 delay = q->near_ev_cache[level] - q->jiffies;
1116
1117 if (delay && min_delay > delay)
1118 min_delay = delay;
1119 m = ~q->row_mask[level];
1120 while (m != (int)(-1)) {
1121 int prio = ffz (m);
1122 m |= 1 << prio;
1123 skb = htb_dequeue_tree(q,prio,level);
1124 if (likely(skb != NULL)) {
1125 sch->q.qlen--;
1126 sch->flags &= ~TCQ_F_THROTTLED;
1127 goto fin;
1128 }
1129 }
1130 }
1131 #ifdef HTB_DEBUG
1132 if (!q->nwc_hit && min_delay >= 10*HZ && net_ratelimit()) {
1133 if (min_delay == LONG_MAX) {
1134 printk(KERN_ERR "HTB: dequeue bug (%d,%lu,%lu), report it please !\n",
1135 evs_used,q->jiffies,jiffies);
1136 htb_debug_dump(q);
1137 } else
1138 printk(KERN_WARNING "HTB: mindelay=%ld, some class has "
1139 "too small rate\n",min_delay);
1140 }
1141 #endif
1142 htb_delay_by (sch,min_delay > 5*HZ ? 5*HZ : min_delay);
1143 fin:
1144 HTB_DBG(3,1,"htb_deq_end %s j=%lu skb=%p\n",sch->dev->name,q->jiffies,skb);
1145 return skb;
1146 }
1147
1148 /* try to drop from each class (by prio) until one succeed */
htb_drop(struct Qdisc * sch)1149 static unsigned int htb_drop(struct Qdisc* sch)
1150 {
1151 struct htb_sched *q = (struct htb_sched *)sch->data;
1152 int prio;
1153
1154 for (prio = TC_HTB_NUMPRIO - 1; prio >= 0; prio--) {
1155 struct list_head *p;
1156 list_for_each (p,q->drops+prio) {
1157 struct htb_class *cl = list_entry(p, struct htb_class,
1158 un.leaf.drop_list);
1159 unsigned int len;
1160 if (cl->un.leaf.q->ops->drop &&
1161 (len = cl->un.leaf.q->ops->drop(cl->un.leaf.q))) {
1162 sch->q.qlen--;
1163 if (!cl->un.leaf.q->q.qlen)
1164 htb_deactivate (q,cl);
1165 return len;
1166 }
1167 }
1168 }
1169 return 0;
1170 }
1171
1172 /* reset all classes */
1173 /* always caled under BH & queue lock */
htb_reset(struct Qdisc * sch)1174 static void htb_reset(struct Qdisc* sch)
1175 {
1176 struct htb_sched *q = (struct htb_sched *)sch->data;
1177 int i;
1178 HTB_DBG(0,1,"htb_reset sch=%p, handle=%X\n",sch,sch->handle);
1179
1180 for (i = 0; i < HTB_HSIZE; i++) {
1181 struct list_head *p;
1182 list_for_each (p,q->hash+i) {
1183 struct htb_class *cl = list_entry(p,struct htb_class,hlist);
1184 if (cl->level)
1185 memset(&cl->un.inner,0,sizeof(cl->un.inner));
1186 else {
1187 if (cl->un.leaf.q)
1188 qdisc_reset(cl->un.leaf.q);
1189 INIT_LIST_HEAD(&cl->un.leaf.drop_list);
1190 }
1191 cl->prio_activity = 0;
1192 cl->cmode = HTB_CAN_SEND;
1193 #ifdef HTB_DEBUG
1194 cl->pq_node.rb_color = -1;
1195 memset(cl->node,255,sizeof(cl->node));
1196 #endif
1197
1198 }
1199 }
1200 sch->flags &= ~TCQ_F_THROTTLED;
1201 del_timer(&q->timer);
1202 __skb_queue_purge(&q->direct_queue);
1203 sch->q.qlen = 0;
1204 memset(q->row,0,sizeof(q->row));
1205 memset(q->row_mask,0,sizeof(q->row_mask));
1206 memset(q->wait_pq,0,sizeof(q->wait_pq));
1207 memset(q->ptr,0,sizeof(q->ptr));
1208 for (i = 0; i < TC_HTB_NUMPRIO; i++)
1209 INIT_LIST_HEAD(q->drops+i);
1210 }
1211
htb_init(struct Qdisc * sch,struct rtattr * opt)1212 static int htb_init(struct Qdisc *sch, struct rtattr *opt)
1213 {
1214 struct htb_sched *q = (struct htb_sched*)sch->data;
1215 struct rtattr *tb[TCA_HTB_INIT];
1216 struct tc_htb_glob *gopt;
1217 int i;
1218 #ifdef HTB_DEBUG
1219 printk(KERN_INFO "HTB init, kernel part version %d.%d\n",
1220 HTB_VER >> 16,HTB_VER & 0xffff);
1221 #endif
1222 if (!opt || rtattr_parse(tb, TCA_HTB_INIT, RTA_DATA(opt), RTA_PAYLOAD(opt)) ||
1223 tb[TCA_HTB_INIT-1] == NULL ||
1224 RTA_PAYLOAD(tb[TCA_HTB_INIT-1]) < sizeof(*gopt)) {
1225 printk(KERN_ERR "HTB: hey probably you have bad tc tool ?\n");
1226 return -EINVAL;
1227 }
1228 gopt = RTA_DATA(tb[TCA_HTB_INIT-1]);
1229 if (gopt->version != HTB_VER >> 16) {
1230 printk(KERN_ERR "HTB: need tc/htb version %d (minor is %d), you have %d\n",
1231 HTB_VER >> 16,HTB_VER & 0xffff,gopt->version);
1232 return -EINVAL;
1233 }
1234 q->debug = gopt->debug;
1235 HTB_DBG(0,1,"htb_init sch=%p handle=%X r2q=%d\n",sch,sch->handle,gopt->rate2quantum);
1236
1237 INIT_LIST_HEAD(&q->root);
1238 for (i = 0; i < HTB_HSIZE; i++)
1239 INIT_LIST_HEAD(q->hash+i);
1240 for (i = 0; i < TC_HTB_NUMPRIO; i++)
1241 INIT_LIST_HEAD(q->drops+i);
1242
1243 init_timer(&q->timer);
1244 skb_queue_head_init(&q->direct_queue);
1245
1246 q->direct_qlen = sch->dev->tx_queue_len;
1247 if (q->direct_qlen < 2) /* some devices have zero tx_queue_len */
1248 q->direct_qlen = 2;
1249 q->timer.function = htb_timer;
1250 q->timer.data = (unsigned long)sch;
1251
1252 #ifdef HTB_RATECM
1253 init_timer(&q->rttim);
1254 q->rttim.function = htb_rate_timer;
1255 q->rttim.data = (unsigned long)sch;
1256 q->rttim.expires = jiffies + HZ;
1257 add_timer(&q->rttim);
1258 #endif
1259 if ((q->rate2quantum = gopt->rate2quantum) < 1)
1260 q->rate2quantum = 1;
1261 q->defcls = gopt->defcls;
1262
1263 MOD_INC_USE_COUNT;
1264 return 0;
1265 }
1266
htb_dump(struct Qdisc * sch,struct sk_buff * skb)1267 static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
1268 {
1269 struct htb_sched *q = (struct htb_sched*)sch->data;
1270 unsigned char *b = skb->tail;
1271 struct rtattr *rta;
1272 struct tc_htb_glob gopt;
1273 HTB_DBG(0,1,"htb_dump sch=%p, handle=%X\n",sch,sch->handle);
1274 /* stats */
1275 HTB_QLOCK(sch);
1276 gopt.direct_pkts = q->direct_pkts;
1277
1278 #ifdef HTB_DEBUG
1279 if (HTB_DBG_COND(0,2))
1280 htb_debug_dump(q);
1281 #endif
1282 gopt.version = HTB_VER;
1283 gopt.rate2quantum = q->rate2quantum;
1284 gopt.defcls = q->defcls;
1285 gopt.debug = q->debug;
1286 rta = (struct rtattr*)b;
1287 RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
1288 RTA_PUT(skb, TCA_HTB_INIT, sizeof(gopt), &gopt);
1289 rta->rta_len = skb->tail - b;
1290 HTB_QUNLOCK(sch);
1291 return skb->len;
1292 rtattr_failure:
1293 HTB_QUNLOCK(sch);
1294 skb_trim(skb, skb->tail - skb->data);
1295 return -1;
1296 }
1297
htb_dump_class(struct Qdisc * sch,unsigned long arg,struct sk_buff * skb,struct tcmsg * tcm)1298 static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
1299 struct sk_buff *skb, struct tcmsg *tcm)
1300 {
1301 #ifdef HTB_DEBUG
1302 struct htb_sched *q = (struct htb_sched*)sch->data;
1303 #endif
1304 struct htb_class *cl = (struct htb_class*)arg;
1305 unsigned char *b = skb->tail;
1306 struct rtattr *rta;
1307 struct tc_htb_opt opt;
1308
1309 HTB_DBG(0,1,"htb_dump_class handle=%X clid=%X\n",sch->handle,cl->classid);
1310
1311 HTB_QLOCK(sch);
1312 tcm->tcm_parent = cl->parent ? cl->parent->classid : TC_H_ROOT;
1313 tcm->tcm_handle = cl->classid;
1314 if (!cl->level && cl->un.leaf.q) {
1315 tcm->tcm_info = cl->un.leaf.q->handle;
1316 cl->stats.qlen = cl->un.leaf.q->q.qlen;
1317 }
1318
1319 rta = (struct rtattr*)b;
1320 RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
1321
1322 memset (&opt,0,sizeof(opt));
1323
1324 opt.rate = cl->rate->rate; opt.buffer = cl->buffer;
1325 opt.ceil = cl->ceil->rate; opt.cbuffer = cl->cbuffer;
1326 opt.quantum = cl->un.leaf.quantum; opt.prio = cl->un.leaf.prio;
1327 opt.level = cl->level;
1328 RTA_PUT(skb, TCA_HTB_PARMS, sizeof(opt), &opt);
1329 rta->rta_len = skb->tail - b;
1330
1331 #ifdef HTB_RATECM
1332 cl->stats.bps = cl->rate_bytes/(HTB_EWMAC*HTB_HSIZE);
1333 cl->stats.pps = cl->rate_packets/(HTB_EWMAC*HTB_HSIZE);
1334 #endif
1335
1336 cl->xstats.tokens = cl->tokens;
1337 cl->xstats.ctokens = cl->ctokens;
1338 RTA_PUT(skb, TCA_STATS, sizeof(cl->stats), &cl->stats);
1339 RTA_PUT(skb, TCA_XSTATS, sizeof(cl->xstats), &cl->xstats);
1340 HTB_QUNLOCK(sch);
1341 return skb->len;
1342 rtattr_failure:
1343 HTB_QUNLOCK(sch);
1344 skb_trim(skb, b - skb->data);
1345 return -1;
1346 }
1347
htb_graft(struct Qdisc * sch,unsigned long arg,struct Qdisc * new,struct Qdisc ** old)1348 static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1349 struct Qdisc **old)
1350 {
1351 struct htb_class *cl = (struct htb_class*)arg;
1352
1353 if (cl && !cl->level) {
1354 if (new == NULL && (new = qdisc_create_dflt(sch->dev,
1355 &pfifo_qdisc_ops)) == NULL)
1356 return -ENOBUFS;
1357 sch_tree_lock(sch);
1358 if ((*old = xchg(&cl->un.leaf.q, new)) != NULL) {
1359 if (cl->prio_activity)
1360 htb_deactivate ((struct htb_sched*)sch->data,cl);
1361
1362 /* TODO: is it correct ? Why CBQ doesn't do it ? */
1363 sch->q.qlen -= (*old)->q.qlen;
1364 qdisc_reset(*old);
1365 }
1366 sch_tree_unlock(sch);
1367 return 0;
1368 }
1369 return -ENOENT;
1370 }
1371
htb_leaf(struct Qdisc * sch,unsigned long arg)1372 static struct Qdisc * htb_leaf(struct Qdisc *sch, unsigned long arg)
1373 {
1374 struct htb_class *cl = (struct htb_class*)arg;
1375 return (cl && !cl->level) ? cl->un.leaf.q : NULL;
1376 }
1377
htb_get(struct Qdisc * sch,u32 classid)1378 static unsigned long htb_get(struct Qdisc *sch, u32 classid)
1379 {
1380 #ifdef HTB_DEBUG
1381 struct htb_sched *q = (struct htb_sched *)sch->data;
1382 #endif
1383 struct htb_class *cl = htb_find(classid,sch);
1384 HTB_DBG(0,1,"htb_get clid=%X q=%p cl=%p ref=%d\n",classid,q,cl,cl?cl->refcnt:0);
1385 if (cl)
1386 cl->refcnt++;
1387 return (unsigned long)cl;
1388 }
1389
htb_destroy_filters(struct tcf_proto ** fl)1390 static void htb_destroy_filters(struct tcf_proto **fl)
1391 {
1392 struct tcf_proto *tp;
1393
1394 while ((tp = *fl) != NULL) {
1395 *fl = tp->next;
1396 tcf_destroy(tp);
1397 }
1398 }
1399
htb_destroy_class(struct Qdisc * sch,struct htb_class * cl)1400 static void htb_destroy_class(struct Qdisc* sch,struct htb_class *cl)
1401 {
1402 struct htb_sched *q = (struct htb_sched *)sch->data;
1403 HTB_DBG(0,1,"htb_destrycls clid=%X ref=%d\n", cl?cl->classid:0,cl?cl->refcnt:0);
1404 if (!cl->level) {
1405 BUG_TRAP(cl->un.leaf.q);
1406 sch->q.qlen -= cl->un.leaf.q->q.qlen;
1407 qdisc_destroy(cl->un.leaf.q);
1408 }
1409 qdisc_put_rtab(cl->rate);
1410 qdisc_put_rtab(cl->ceil);
1411
1412 #ifdef CONFIG_NET_ESTIMATOR
1413 qdisc_kill_estimator(&cl->stats);
1414 #endif
1415 htb_destroy_filters (&cl->filter_list);
1416
1417 while (!list_empty(&cl->children))
1418 htb_destroy_class (sch,list_entry(cl->children.next,
1419 struct htb_class,sibling));
1420
1421 /* note: this delete may happen twice (see htb_delete) */
1422 list_del(&cl->hlist);
1423 list_del(&cl->sibling);
1424
1425 if (cl->prio_activity)
1426 htb_deactivate (q,cl);
1427
1428 if (cl->cmode != HTB_CAN_SEND)
1429 htb_safe_rb_erase(&cl->pq_node,q->wait_pq+cl->level);
1430
1431 kfree(cl);
1432 }
1433
1434 /* always caled under BH & queue lock */
htb_destroy(struct Qdisc * sch)1435 static void htb_destroy(struct Qdisc* sch)
1436 {
1437 struct htb_sched *q = (struct htb_sched *)sch->data;
1438 HTB_DBG(0,1,"htb_destroy q=%p\n",q);
1439
1440 del_timer_sync (&q->timer);
1441 #ifdef HTB_RATECM
1442 del_timer_sync (&q->rttim);
1443 #endif
1444 /* This line used to be after htb_destroy_class call below
1445 and surprisingly it worked in 2.4. But it must precede it
1446 because filter need its target class alive to be able to call
1447 unbind_filter on it (without Oops). */
1448 htb_destroy_filters(&q->filter_list);
1449
1450 while (!list_empty(&q->root))
1451 htb_destroy_class (sch,list_entry(q->root.next,
1452 struct htb_class,sibling));
1453
1454 __skb_queue_purge(&q->direct_queue);
1455 MOD_DEC_USE_COUNT;
1456 }
1457
htb_delete(struct Qdisc * sch,unsigned long arg)1458 static int htb_delete(struct Qdisc *sch, unsigned long arg)
1459 {
1460 struct htb_sched *q = (struct htb_sched *)sch->data;
1461 struct htb_class *cl = (struct htb_class*)arg;
1462 HTB_DBG(0,1,"htb_delete q=%p cl=%X ref=%d\n",q,cl?cl->classid:0,cl?cl->refcnt:0);
1463
1464 // TODO: why don't allow to delete subtree ? references ? does
1465 // tc subsys quarantee us that in htb_destroy it holds no class
1466 // refs so that we can remove children safely there ?
1467 if (!list_empty(&cl->children) || cl->filter_cnt)
1468 return -EBUSY;
1469
1470 sch_tree_lock(sch);
1471
1472 /* delete from hash and active; remainder in destroy_class */
1473 list_del_init(&cl->hlist);
1474 if (cl->prio_activity)
1475 htb_deactivate (q,cl);
1476
1477 if (--cl->refcnt == 0)
1478 htb_destroy_class(sch,cl);
1479
1480 sch_tree_unlock(sch);
1481 return 0;
1482 }
1483
htb_put(struct Qdisc * sch,unsigned long arg)1484 static void htb_put(struct Qdisc *sch, unsigned long arg)
1485 {
1486 #ifdef HTB_DEBUG
1487 struct htb_sched *q = (struct htb_sched *)sch->data;
1488 #endif
1489 struct htb_class *cl = (struct htb_class*)arg;
1490 HTB_DBG(0,1,"htb_put q=%p cl=%X ref=%d\n",q,cl?cl->classid:0,cl?cl->refcnt:0);
1491
1492 if (--cl->refcnt == 0)
1493 htb_destroy_class(sch,cl);
1494 }
1495
htb_change_class(struct Qdisc * sch,u32 classid,u32 parentid,struct rtattr ** tca,unsigned long * arg)1496 static int htb_change_class(struct Qdisc *sch, u32 classid,
1497 u32 parentid, struct rtattr **tca, unsigned long *arg)
1498 {
1499 int err = -EINVAL;
1500 struct htb_sched *q = (struct htb_sched *)sch->data;
1501 struct htb_class *cl = (struct htb_class*)*arg,*parent;
1502 struct rtattr *opt = tca[TCA_OPTIONS-1];
1503 struct qdisc_rate_table *rtab = NULL, *ctab = NULL;
1504 struct rtattr *tb[TCA_HTB_RTAB];
1505 struct tc_htb_opt *hopt;
1506
1507 /* extract all subattrs from opt attr */
1508 if (!opt || rtattr_parse(tb, TCA_HTB_RTAB, RTA_DATA(opt), RTA_PAYLOAD(opt)) ||
1509 tb[TCA_HTB_PARMS-1] == NULL ||
1510 RTA_PAYLOAD(tb[TCA_HTB_PARMS-1]) < sizeof(*hopt))
1511 goto failure;
1512
1513 parent = parentid == TC_H_ROOT ? NULL : htb_find (parentid,sch);
1514
1515 hopt = RTA_DATA(tb[TCA_HTB_PARMS-1]);
1516 HTB_DBG(0,1,"htb_chg cl=%p(%X), clid=%X, parid=%X, opt/prio=%d, rate=%u, buff=%d, quant=%d\n", cl,cl?cl->classid:0,classid,parentid,(int)hopt->prio,hopt->rate.rate,hopt->buffer,hopt->quantum);
1517 rtab = qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB-1]);
1518 ctab = qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB-1]);
1519 if (!rtab || !ctab) goto failure;
1520
1521 if (!cl) { /* new class */
1522 struct Qdisc *new_q;
1523 /* check for valid classid */
1524 if (!classid || TC_H_MAJ(classid^sch->handle) || htb_find(classid,sch))
1525 goto failure;
1526
1527 /* check maximal depth */
1528 if (parent && parent->parent && parent->parent->level < 2) {
1529 printk(KERN_ERR "htb: tree is too deep\n");
1530 goto failure;
1531 }
1532 err = -ENOBUFS;
1533 if ((cl = kmalloc(sizeof(*cl), GFP_KERNEL)) == NULL)
1534 goto failure;
1535
1536 memset(cl, 0, sizeof(*cl));
1537 cl->refcnt = 1;
1538 INIT_LIST_HEAD(&cl->sibling);
1539 INIT_LIST_HEAD(&cl->hlist);
1540 INIT_LIST_HEAD(&cl->children);
1541 INIT_LIST_HEAD(&cl->un.leaf.drop_list);
1542 #ifdef HTB_DEBUG
1543 cl->magic = HTB_CMAGIC;
1544 #endif
1545
1546 /* create leaf qdisc early because it uses kmalloc(GFP_KERNEL)
1547 so that can't be used inside of sch_tree_lock
1548 -- thanks to Karlis Peisenieks */
1549 new_q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops);
1550 sch_tree_lock(sch);
1551 if (parent && !parent->level) {
1552 /* turn parent into inner node */
1553 sch->q.qlen -= parent->un.leaf.q->q.qlen;
1554 qdisc_destroy (parent->un.leaf.q);
1555 if (parent->prio_activity)
1556 htb_deactivate (q,parent);
1557
1558 /* remove from evt list because of level change */
1559 if (parent->cmode != HTB_CAN_SEND) {
1560 htb_safe_rb_erase(&parent->pq_node,q->wait_pq /*+0*/);
1561 parent->cmode = HTB_CAN_SEND;
1562 }
1563 parent->level = (parent->parent ? parent->parent->level
1564 : TC_HTB_MAXDEPTH) - 1;
1565 memset (&parent->un.inner,0,sizeof(parent->un.inner));
1566 }
1567 /* leaf (we) needs elementary qdisc */
1568 cl->un.leaf.q = new_q ? new_q : &noop_qdisc;
1569
1570 cl->classid = classid; cl->parent = parent;
1571
1572 /* set class to be in HTB_CAN_SEND state */
1573 cl->tokens = hopt->buffer;
1574 cl->ctokens = hopt->cbuffer;
1575 cl->mbuffer = 60000000; /* 1min */
1576 PSCHED_GET_TIME(cl->t_c);
1577 cl->cmode = HTB_CAN_SEND;
1578
1579 /* attach to the hash list and parent's family */
1580 list_add_tail(&cl->hlist, q->hash+htb_hash(classid));
1581 list_add_tail(&cl->sibling, parent ? &parent->children : &q->root);
1582 #ifdef HTB_DEBUG
1583 {
1584 int i;
1585 for (i = 0; i < TC_HTB_NUMPRIO; i++) cl->node[i].rb_color = -1;
1586 cl->pq_node.rb_color = -1;
1587 }
1588 #endif
1589 } else sch_tree_lock(sch);
1590
1591 /* it used to be a nasty bug here, we have to check that node
1592 is really leaf before changing cl->un.leaf ! */
1593 if (!cl->level) {
1594 cl->un.leaf.quantum = rtab->rate.rate / q->rate2quantum;
1595 if (!hopt->quantum && cl->un.leaf.quantum < 1000) {
1596 printk(KERN_WARNING "HTB: quantum of class %X is small. Consider r2q change.\n", cl->classid);
1597 cl->un.leaf.quantum = 1000;
1598 }
1599 if (!hopt->quantum && cl->un.leaf.quantum > 200000) {
1600 printk(KERN_WARNING "HTB: quantum of class %X is big. Consider r2q change.\n", cl->classid);
1601 cl->un.leaf.quantum = 200000;
1602 }
1603 if (hopt->quantum)
1604 cl->un.leaf.quantum = hopt->quantum;
1605 if ((cl->un.leaf.prio = hopt->prio) >= TC_HTB_NUMPRIO)
1606 cl->un.leaf.prio = TC_HTB_NUMPRIO - 1;
1607 }
1608
1609 cl->buffer = hopt->buffer;
1610 cl->cbuffer = hopt->cbuffer;
1611 if (cl->rate) qdisc_put_rtab(cl->rate); cl->rate = rtab;
1612 if (cl->ceil) qdisc_put_rtab(cl->ceil); cl->ceil = ctab;
1613 sch_tree_unlock(sch);
1614
1615 *arg = (unsigned long)cl;
1616 return 0;
1617
1618 failure:
1619 if (rtab) qdisc_put_rtab(rtab);
1620 if (ctab) qdisc_put_rtab(ctab);
1621 return err;
1622 }
1623
htb_find_tcf(struct Qdisc * sch,unsigned long arg)1624 static struct tcf_proto **htb_find_tcf(struct Qdisc *sch, unsigned long arg)
1625 {
1626 struct htb_sched *q = (struct htb_sched *)sch->data;
1627 struct htb_class *cl = (struct htb_class *)arg;
1628 struct tcf_proto **fl = cl ? &cl->filter_list : &q->filter_list;
1629 HTB_DBG(0,2,"htb_tcf q=%p clid=%X fref=%d fl=%p\n",q,cl?cl->classid:0,cl?cl->filter_cnt:q->filter_cnt,*fl);
1630 return fl;
1631 }
1632
htb_bind_filter(struct Qdisc * sch,unsigned long parent,u32 classid)1633 static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent,
1634 u32 classid)
1635 {
1636 struct htb_sched *q = (struct htb_sched *)sch->data;
1637 struct htb_class *cl = htb_find (classid,sch);
1638 HTB_DBG(0,2,"htb_bind q=%p clid=%X cl=%p fref=%d\n",q,classid,cl,cl?cl->filter_cnt:q->filter_cnt);
1639 /*if (cl && !cl->level) return 0;
1640 The line above used to be there to prevent attaching filters to
1641 leaves. But at least tc_index filter uses this just to get class
1642 for other reasons so that we have to allow for it.
1643 ----
1644 19.6.2002 As Werner explained it is ok - bind filter is just
1645 another way to "lock" the class - unlike "get" this lock can
1646 be broken by class during destroy IIUC.
1647 */
1648 if (cl)
1649 cl->filter_cnt++;
1650 else
1651 q->filter_cnt++;
1652 return (unsigned long)cl;
1653 }
1654
htb_unbind_filter(struct Qdisc * sch,unsigned long arg)1655 static void htb_unbind_filter(struct Qdisc *sch, unsigned long arg)
1656 {
1657 struct htb_sched *q = (struct htb_sched *)sch->data;
1658 struct htb_class *cl = (struct htb_class *)arg;
1659 HTB_DBG(0,2,"htb_unbind q=%p cl=%p fref=%d\n",q,cl,cl?cl->filter_cnt:q->filter_cnt);
1660 if (cl)
1661 cl->filter_cnt--;
1662 else
1663 q->filter_cnt--;
1664 }
1665
htb_walk(struct Qdisc * sch,struct qdisc_walker * arg)1666 static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1667 {
1668 struct htb_sched *q = (struct htb_sched *)sch->data;
1669 int i;
1670
1671 if (arg->stop)
1672 return;
1673
1674 for (i = 0; i < HTB_HSIZE; i++) {
1675 struct list_head *p;
1676 list_for_each (p,q->hash+i) {
1677 struct htb_class *cl = list_entry(p,struct htb_class,hlist);
1678 if (arg->count < arg->skip) {
1679 arg->count++;
1680 continue;
1681 }
1682 if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
1683 arg->stop = 1;
1684 return;
1685 }
1686 arg->count++;
1687 }
1688 }
1689 }
1690
1691 static struct Qdisc_class_ops htb_class_ops =
1692 {
1693 htb_graft,
1694 htb_leaf,
1695 htb_get,
1696 htb_put,
1697 htb_change_class,
1698 htb_delete,
1699 htb_walk,
1700
1701 htb_find_tcf,
1702 htb_bind_filter,
1703 htb_unbind_filter,
1704
1705 htb_dump_class,
1706 };
1707
1708 struct Qdisc_ops htb_qdisc_ops =
1709 {
1710 NULL,
1711 &htb_class_ops,
1712 "htb",
1713 sizeof(struct htb_sched),
1714
1715 htb_enqueue,
1716 htb_dequeue,
1717 htb_requeue,
1718 htb_drop,
1719
1720 htb_init,
1721 htb_reset,
1722 htb_destroy,
1723 NULL /* htb_change */,
1724
1725 htb_dump,
1726 };
1727
1728 #ifdef MODULE
init_module(void)1729 int init_module(void)
1730 {
1731 return register_qdisc(&htb_qdisc_ops);
1732 }
1733
cleanup_module(void)1734 void cleanup_module(void)
1735 {
1736 unregister_qdisc(&htb_qdisc_ops);
1737 }
1738 MODULE_LICENSE("GPL");
1739 #endif
1740