1 #ifndef __NET_SCHED_GENERIC_H
2 #define __NET_SCHED_GENERIC_H
3
4 #include <linux/netdevice.h>
5 #include <linux/types.h>
6 #include <linux/rcupdate.h>
7 #include <linux/module.h>
8 #include <linux/pkt_sched.h>
9 #include <linux/pkt_cls.h>
10 #include <net/gen_stats.h>
11 #include <net/rtnetlink.h>
12
13 struct Qdisc_ops;
14 struct qdisc_walker;
15 struct tcf_walker;
16 struct module;
17
18 struct qdisc_rate_table {
19 struct tc_ratespec rate;
20 u32 data[256];
21 struct qdisc_rate_table *next;
22 int refcnt;
23 };
24
25 enum qdisc_state_t {
26 __QDISC_STATE_SCHED,
27 __QDISC_STATE_DEACTIVATED,
28 __QDISC_STATE_THROTTLED,
29 };
30
31 /*
32 * following bits are only changed while qdisc lock is held
33 */
34 enum qdisc___state_t {
35 __QDISC___STATE_RUNNING = 1,
36 };
37
38 struct qdisc_size_table {
39 struct rcu_head rcu;
40 struct list_head list;
41 struct tc_sizespec szopts;
42 int refcnt;
43 u16 data[];
44 };
45
46 struct Qdisc {
47 int (*enqueue)(struct sk_buff *skb, struct Qdisc *dev);
48 struct sk_buff * (*dequeue)(struct Qdisc *dev);
49 unsigned flags;
50 #define TCQ_F_BUILTIN 1
51 #define TCQ_F_INGRESS 2
52 #define TCQ_F_CAN_BYPASS 4
53 #define TCQ_F_MQROOT 8
54 #define TCQ_F_WARN_NONWC (1 << 16)
55 int padded;
56 struct Qdisc_ops *ops;
57 struct qdisc_size_table __rcu *stab;
58 struct list_head list;
59 u32 handle;
60 u32 parent;
61 atomic_t refcnt;
62 struct gnet_stats_rate_est rate_est;
63 int (*reshape_fail)(struct sk_buff *skb,
64 struct Qdisc *q);
65
66 void *u32_node;
67
68 /* This field is deprecated, but it is still used by CBQ
69 * and it will live until better solution will be invented.
70 */
71 struct Qdisc *__parent;
72 struct netdev_queue *dev_queue;
73 struct Qdisc *next_sched;
74
75 struct sk_buff *gso_skb;
76 /*
77 * For performance sake on SMP, we put highly modified fields at the end
78 */
79 unsigned long state;
80 struct sk_buff_head q;
81 struct gnet_stats_basic_packed bstats;
82 unsigned int __state;
83 struct gnet_stats_queue qstats;
84 struct rcu_head rcu_head;
85 spinlock_t busylock;
86 u32 limit;
87 };
88
qdisc_is_running(const struct Qdisc * qdisc)89 static inline bool qdisc_is_running(const struct Qdisc *qdisc)
90 {
91 return (qdisc->__state & __QDISC___STATE_RUNNING) ? true : false;
92 }
93
qdisc_run_begin(struct Qdisc * qdisc)94 static inline bool qdisc_run_begin(struct Qdisc *qdisc)
95 {
96 if (qdisc_is_running(qdisc))
97 return false;
98 qdisc->__state |= __QDISC___STATE_RUNNING;
99 return true;
100 }
101
qdisc_run_end(struct Qdisc * qdisc)102 static inline void qdisc_run_end(struct Qdisc *qdisc)
103 {
104 qdisc->__state &= ~__QDISC___STATE_RUNNING;
105 }
106
qdisc_is_throttled(const struct Qdisc * qdisc)107 static inline bool qdisc_is_throttled(const struct Qdisc *qdisc)
108 {
109 return test_bit(__QDISC_STATE_THROTTLED, &qdisc->state) ? true : false;
110 }
111
qdisc_throttled(struct Qdisc * qdisc)112 static inline void qdisc_throttled(struct Qdisc *qdisc)
113 {
114 set_bit(__QDISC_STATE_THROTTLED, &qdisc->state);
115 }
116
qdisc_unthrottled(struct Qdisc * qdisc)117 static inline void qdisc_unthrottled(struct Qdisc *qdisc)
118 {
119 clear_bit(__QDISC_STATE_THROTTLED, &qdisc->state);
120 }
121
122 struct Qdisc_class_ops {
123 /* Child qdisc manipulation */
124 struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *);
125 int (*graft)(struct Qdisc *, unsigned long cl,
126 struct Qdisc *, struct Qdisc **);
127 struct Qdisc * (*leaf)(struct Qdisc *, unsigned long cl);
128 void (*qlen_notify)(struct Qdisc *, unsigned long);
129
130 /* Class manipulation routines */
131 unsigned long (*get)(struct Qdisc *, u32 classid);
132 void (*put)(struct Qdisc *, unsigned long);
133 int (*change)(struct Qdisc *, u32, u32,
134 struct nlattr **, unsigned long *);
135 int (*delete)(struct Qdisc *, unsigned long);
136 void (*walk)(struct Qdisc *, struct qdisc_walker * arg);
137
138 /* Filter manipulation */
139 struct tcf_proto ** (*tcf_chain)(struct Qdisc *, unsigned long);
140 unsigned long (*bind_tcf)(struct Qdisc *, unsigned long,
141 u32 classid);
142 void (*unbind_tcf)(struct Qdisc *, unsigned long);
143
144 /* rtnetlink specific */
145 int (*dump)(struct Qdisc *, unsigned long,
146 struct sk_buff *skb, struct tcmsg*);
147 int (*dump_stats)(struct Qdisc *, unsigned long,
148 struct gnet_dump *);
149 };
150
151 struct Qdisc_ops {
152 struct Qdisc_ops *next;
153 const struct Qdisc_class_ops *cl_ops;
154 char id[IFNAMSIZ];
155 int priv_size;
156
157 int (*enqueue)(struct sk_buff *, struct Qdisc *);
158 struct sk_buff * (*dequeue)(struct Qdisc *);
159 struct sk_buff * (*peek)(struct Qdisc *);
160 unsigned int (*drop)(struct Qdisc *);
161
162 int (*init)(struct Qdisc *, struct nlattr *arg);
163 void (*reset)(struct Qdisc *);
164 void (*destroy)(struct Qdisc *);
165 int (*change)(struct Qdisc *, struct nlattr *arg);
166 void (*attach)(struct Qdisc *);
167
168 int (*dump)(struct Qdisc *, struct sk_buff *);
169 int (*dump_stats)(struct Qdisc *, struct gnet_dump *);
170
171 struct module *owner;
172 };
173
174
175 struct tcf_result {
176 unsigned long class;
177 u32 classid;
178 };
179
180 struct tcf_proto_ops {
181 struct tcf_proto_ops *next;
182 char kind[IFNAMSIZ];
183
184 int (*classify)(struct sk_buff*, struct tcf_proto*,
185 struct tcf_result *);
186 int (*init)(struct tcf_proto*);
187 void (*destroy)(struct tcf_proto*);
188
189 unsigned long (*get)(struct tcf_proto*, u32 handle);
190 void (*put)(struct tcf_proto*, unsigned long);
191 int (*change)(struct tcf_proto*, unsigned long,
192 u32 handle, struct nlattr **,
193 unsigned long *);
194 int (*delete)(struct tcf_proto*, unsigned long);
195 void (*walk)(struct tcf_proto*, struct tcf_walker *arg);
196
197 /* rtnetlink specific */
198 int (*dump)(struct tcf_proto*, unsigned long,
199 struct sk_buff *skb, struct tcmsg*);
200
201 struct module *owner;
202 };
203
204 struct tcf_proto {
205 /* Fast access part */
206 struct tcf_proto *next;
207 void *root;
208 int (*classify)(struct sk_buff*, struct tcf_proto*,
209 struct tcf_result *);
210 __be16 protocol;
211
212 /* All the rest */
213 u32 prio;
214 u32 classid;
215 struct Qdisc *q;
216 void *data;
217 struct tcf_proto_ops *ops;
218 };
219
220 struct qdisc_skb_cb {
221 unsigned int pkt_len;
222 long data[];
223 };
224
qdisc_qlen(struct Qdisc * q)225 static inline int qdisc_qlen(struct Qdisc *q)
226 {
227 return q->q.qlen;
228 }
229
qdisc_skb_cb(const struct sk_buff * skb)230 static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb)
231 {
232 return (struct qdisc_skb_cb *)skb->cb;
233 }
234
qdisc_lock(struct Qdisc * qdisc)235 static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc)
236 {
237 return &qdisc->q.lock;
238 }
239
qdisc_root(struct Qdisc * qdisc)240 static inline struct Qdisc *qdisc_root(struct Qdisc *qdisc)
241 {
242 return qdisc->dev_queue->qdisc;
243 }
244
qdisc_root_sleeping(struct Qdisc * qdisc)245 static inline struct Qdisc *qdisc_root_sleeping(struct Qdisc *qdisc)
246 {
247 return qdisc->dev_queue->qdisc_sleeping;
248 }
249
250 /* The qdisc root lock is a mechanism by which to top level
251 * of a qdisc tree can be locked from any qdisc node in the
252 * forest. This allows changing the configuration of some
253 * aspect of the qdisc tree while blocking out asynchronous
254 * qdisc access in the packet processing paths.
255 *
256 * It is only legal to do this when the root will not change
257 * on us. Otherwise we'll potentially lock the wrong qdisc
258 * root. This is enforced by holding the RTNL semaphore, which
259 * all users of this lock accessor must do.
260 */
qdisc_root_lock(struct Qdisc * qdisc)261 static inline spinlock_t *qdisc_root_lock(struct Qdisc *qdisc)
262 {
263 struct Qdisc *root = qdisc_root(qdisc);
264
265 ASSERT_RTNL();
266 return qdisc_lock(root);
267 }
268
qdisc_root_sleeping_lock(struct Qdisc * qdisc)269 static inline spinlock_t *qdisc_root_sleeping_lock(struct Qdisc *qdisc)
270 {
271 struct Qdisc *root = qdisc_root_sleeping(qdisc);
272
273 ASSERT_RTNL();
274 return qdisc_lock(root);
275 }
276
qdisc_dev(struct Qdisc * qdisc)277 static inline struct net_device *qdisc_dev(struct Qdisc *qdisc)
278 {
279 return qdisc->dev_queue->dev;
280 }
281
sch_tree_lock(struct Qdisc * q)282 static inline void sch_tree_lock(struct Qdisc *q)
283 {
284 spin_lock_bh(qdisc_root_sleeping_lock(q));
285 }
286
sch_tree_unlock(struct Qdisc * q)287 static inline void sch_tree_unlock(struct Qdisc *q)
288 {
289 spin_unlock_bh(qdisc_root_sleeping_lock(q));
290 }
291
292 #define tcf_tree_lock(tp) sch_tree_lock((tp)->q)
293 #define tcf_tree_unlock(tp) sch_tree_unlock((tp)->q)
294
295 extern struct Qdisc noop_qdisc;
296 extern struct Qdisc_ops noop_qdisc_ops;
297 extern struct Qdisc_ops pfifo_fast_ops;
298 extern struct Qdisc_ops mq_qdisc_ops;
299
300 struct Qdisc_class_common {
301 u32 classid;
302 struct hlist_node hnode;
303 };
304
305 struct Qdisc_class_hash {
306 struct hlist_head *hash;
307 unsigned int hashsize;
308 unsigned int hashmask;
309 unsigned int hashelems;
310 };
311
qdisc_class_hash(u32 id,u32 mask)312 static inline unsigned int qdisc_class_hash(u32 id, u32 mask)
313 {
314 id ^= id >> 8;
315 id ^= id >> 4;
316 return id & mask;
317 }
318
319 static inline struct Qdisc_class_common *
qdisc_class_find(struct Qdisc_class_hash * hash,u32 id)320 qdisc_class_find(struct Qdisc_class_hash *hash, u32 id)
321 {
322 struct Qdisc_class_common *cl;
323 struct hlist_node *n;
324 unsigned int h;
325
326 h = qdisc_class_hash(id, hash->hashmask);
327 hlist_for_each_entry(cl, n, &hash->hash[h], hnode) {
328 if (cl->classid == id)
329 return cl;
330 }
331 return NULL;
332 }
333
334 extern int qdisc_class_hash_init(struct Qdisc_class_hash *);
335 extern void qdisc_class_hash_insert(struct Qdisc_class_hash *, struct Qdisc_class_common *);
336 extern void qdisc_class_hash_remove(struct Qdisc_class_hash *, struct Qdisc_class_common *);
337 extern void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *);
338 extern void qdisc_class_hash_destroy(struct Qdisc_class_hash *);
339
340 extern void dev_init_scheduler(struct net_device *dev);
341 extern void dev_shutdown(struct net_device *dev);
342 extern void dev_activate(struct net_device *dev);
343 extern void dev_deactivate(struct net_device *dev);
344 extern void dev_deactivate_many(struct list_head *head);
345 extern struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
346 struct Qdisc *qdisc);
347 extern void qdisc_reset(struct Qdisc *qdisc);
348 extern void qdisc_destroy(struct Qdisc *qdisc);
349 extern void qdisc_tree_decrease_qlen(struct Qdisc *qdisc, unsigned int n);
350 extern struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
351 struct Qdisc_ops *ops);
352 extern struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
353 struct Qdisc_ops *ops, u32 parentid);
354 extern void __qdisc_calculate_pkt_len(struct sk_buff *skb,
355 const struct qdisc_size_table *stab);
356 extern void tcf_destroy(struct tcf_proto *tp);
357 extern void tcf_destroy_chain(struct tcf_proto **fl);
358
359 /* Reset all TX qdiscs greater then index of a device. */
qdisc_reset_all_tx_gt(struct net_device * dev,unsigned int i)360 static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i)
361 {
362 struct Qdisc *qdisc;
363
364 for (; i < dev->num_tx_queues; i++) {
365 qdisc = netdev_get_tx_queue(dev, i)->qdisc;
366 if (qdisc) {
367 spin_lock_bh(qdisc_lock(qdisc));
368 qdisc_reset(qdisc);
369 spin_unlock_bh(qdisc_lock(qdisc));
370 }
371 }
372 }
373
qdisc_reset_all_tx(struct net_device * dev)374 static inline void qdisc_reset_all_tx(struct net_device *dev)
375 {
376 qdisc_reset_all_tx_gt(dev, 0);
377 }
378
379 /* Are all TX queues of the device empty? */
qdisc_all_tx_empty(const struct net_device * dev)380 static inline bool qdisc_all_tx_empty(const struct net_device *dev)
381 {
382 unsigned int i;
383 for (i = 0; i < dev->num_tx_queues; i++) {
384 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
385 const struct Qdisc *q = txq->qdisc;
386
387 if (q->q.qlen)
388 return false;
389 }
390 return true;
391 }
392
393 /* Are any of the TX qdiscs changing? */
qdisc_tx_changing(struct net_device * dev)394 static inline bool qdisc_tx_changing(struct net_device *dev)
395 {
396 unsigned int i;
397 for (i = 0; i < dev->num_tx_queues; i++) {
398 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
399 if (txq->qdisc != txq->qdisc_sleeping)
400 return true;
401 }
402 return false;
403 }
404
405 /* Is the device using the noop qdisc on all queues? */
qdisc_tx_is_noop(const struct net_device * dev)406 static inline bool qdisc_tx_is_noop(const struct net_device *dev)
407 {
408 unsigned int i;
409 for (i = 0; i < dev->num_tx_queues; i++) {
410 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
411 if (txq->qdisc != &noop_qdisc)
412 return false;
413 }
414 return true;
415 }
416
qdisc_pkt_len(const struct sk_buff * skb)417 static inline unsigned int qdisc_pkt_len(const struct sk_buff *skb)
418 {
419 return qdisc_skb_cb(skb)->pkt_len;
420 }
421
422 /* additional qdisc xmit flags (NET_XMIT_MASK in linux/netdevice.h) */
423 enum net_xmit_qdisc_t {
424 __NET_XMIT_STOLEN = 0x00010000,
425 __NET_XMIT_BYPASS = 0x00020000,
426 };
427
428 #ifdef CONFIG_NET_CLS_ACT
429 #define net_xmit_drop_count(e) ((e) & __NET_XMIT_STOLEN ? 0 : 1)
430 #else
431 #define net_xmit_drop_count(e) (1)
432 #endif
433
qdisc_calculate_pkt_len(struct sk_buff * skb,const struct Qdisc * sch)434 static inline void qdisc_calculate_pkt_len(struct sk_buff *skb,
435 const struct Qdisc *sch)
436 {
437 #ifdef CONFIG_NET_SCHED
438 struct qdisc_size_table *stab = rcu_dereference_bh(sch->stab);
439
440 if (stab)
441 __qdisc_calculate_pkt_len(skb, stab);
442 #endif
443 }
444
qdisc_enqueue(struct sk_buff * skb,struct Qdisc * sch)445 static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
446 {
447 qdisc_calculate_pkt_len(skb, sch);
448 return sch->enqueue(skb, sch);
449 }
450
qdisc_enqueue_root(struct sk_buff * skb,struct Qdisc * sch)451 static inline int qdisc_enqueue_root(struct sk_buff *skb, struct Qdisc *sch)
452 {
453 qdisc_skb_cb(skb)->pkt_len = skb->len;
454 return qdisc_enqueue(skb, sch) & NET_XMIT_MASK;
455 }
456
457
bstats_update(struct gnet_stats_basic_packed * bstats,const struct sk_buff * skb)458 static inline void bstats_update(struct gnet_stats_basic_packed *bstats,
459 const struct sk_buff *skb)
460 {
461 bstats->bytes += qdisc_pkt_len(skb);
462 bstats->packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
463 }
464
qdisc_bstats_update(struct Qdisc * sch,const struct sk_buff * skb)465 static inline void qdisc_bstats_update(struct Qdisc *sch,
466 const struct sk_buff *skb)
467 {
468 bstats_update(&sch->bstats, skb);
469 }
470
__qdisc_enqueue_tail(struct sk_buff * skb,struct Qdisc * sch,struct sk_buff_head * list)471 static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
472 struct sk_buff_head *list)
473 {
474 __skb_queue_tail(list, skb);
475 sch->qstats.backlog += qdisc_pkt_len(skb);
476
477 return NET_XMIT_SUCCESS;
478 }
479
qdisc_enqueue_tail(struct sk_buff * skb,struct Qdisc * sch)480 static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch)
481 {
482 return __qdisc_enqueue_tail(skb, sch, &sch->q);
483 }
484
__qdisc_dequeue_head(struct Qdisc * sch,struct sk_buff_head * list)485 static inline struct sk_buff *__qdisc_dequeue_head(struct Qdisc *sch,
486 struct sk_buff_head *list)
487 {
488 struct sk_buff *skb = __skb_dequeue(list);
489
490 if (likely(skb != NULL)) {
491 sch->qstats.backlog -= qdisc_pkt_len(skb);
492 qdisc_bstats_update(sch, skb);
493 }
494
495 return skb;
496 }
497
qdisc_dequeue_head(struct Qdisc * sch)498 static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
499 {
500 return __qdisc_dequeue_head(sch, &sch->q);
501 }
502
__qdisc_queue_drop_head(struct Qdisc * sch,struct sk_buff_head * list)503 static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
504 struct sk_buff_head *list)
505 {
506 struct sk_buff *skb = __skb_dequeue(list);
507
508 if (likely(skb != NULL)) {
509 unsigned int len = qdisc_pkt_len(skb);
510 sch->qstats.backlog -= len;
511 kfree_skb(skb);
512 return len;
513 }
514
515 return 0;
516 }
517
qdisc_queue_drop_head(struct Qdisc * sch)518 static inline unsigned int qdisc_queue_drop_head(struct Qdisc *sch)
519 {
520 return __qdisc_queue_drop_head(sch, &sch->q);
521 }
522
__qdisc_dequeue_tail(struct Qdisc * sch,struct sk_buff_head * list)523 static inline struct sk_buff *__qdisc_dequeue_tail(struct Qdisc *sch,
524 struct sk_buff_head *list)
525 {
526 struct sk_buff *skb = __skb_dequeue_tail(list);
527
528 if (likely(skb != NULL))
529 sch->qstats.backlog -= qdisc_pkt_len(skb);
530
531 return skb;
532 }
533
qdisc_dequeue_tail(struct Qdisc * sch)534 static inline struct sk_buff *qdisc_dequeue_tail(struct Qdisc *sch)
535 {
536 return __qdisc_dequeue_tail(sch, &sch->q);
537 }
538
qdisc_peek_head(struct Qdisc * sch)539 static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch)
540 {
541 return skb_peek(&sch->q);
542 }
543
544 /* generic pseudo peek method for non-work-conserving qdisc */
qdisc_peek_dequeued(struct Qdisc * sch)545 static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch)
546 {
547 /* we can reuse ->gso_skb because peek isn't called for root qdiscs */
548 if (!sch->gso_skb) {
549 sch->gso_skb = sch->dequeue(sch);
550 if (sch->gso_skb)
551 /* it's still part of the queue */
552 sch->q.qlen++;
553 }
554
555 return sch->gso_skb;
556 }
557
558 /* use instead of qdisc->dequeue() for all qdiscs queried with ->peek() */
qdisc_dequeue_peeked(struct Qdisc * sch)559 static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch)
560 {
561 struct sk_buff *skb = sch->gso_skb;
562
563 if (skb) {
564 sch->gso_skb = NULL;
565 sch->q.qlen--;
566 } else {
567 skb = sch->dequeue(sch);
568 }
569
570 return skb;
571 }
572
__qdisc_reset_queue(struct Qdisc * sch,struct sk_buff_head * list)573 static inline void __qdisc_reset_queue(struct Qdisc *sch,
574 struct sk_buff_head *list)
575 {
576 /*
577 * We do not know the backlog in bytes of this list, it
578 * is up to the caller to correct it
579 */
580 __skb_queue_purge(list);
581 }
582
qdisc_reset_queue(struct Qdisc * sch)583 static inline void qdisc_reset_queue(struct Qdisc *sch)
584 {
585 __qdisc_reset_queue(sch, &sch->q);
586 sch->qstats.backlog = 0;
587 }
588
__qdisc_queue_drop(struct Qdisc * sch,struct sk_buff_head * list)589 static inline unsigned int __qdisc_queue_drop(struct Qdisc *sch,
590 struct sk_buff_head *list)
591 {
592 struct sk_buff *skb = __qdisc_dequeue_tail(sch, list);
593
594 if (likely(skb != NULL)) {
595 unsigned int len = qdisc_pkt_len(skb);
596 kfree_skb(skb);
597 return len;
598 }
599
600 return 0;
601 }
602
qdisc_queue_drop(struct Qdisc * sch)603 static inline unsigned int qdisc_queue_drop(struct Qdisc *sch)
604 {
605 return __qdisc_queue_drop(sch, &sch->q);
606 }
607
qdisc_drop(struct sk_buff * skb,struct Qdisc * sch)608 static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
609 {
610 kfree_skb(skb);
611 sch->qstats.drops++;
612
613 return NET_XMIT_DROP;
614 }
615
qdisc_reshape_fail(struct sk_buff * skb,struct Qdisc * sch)616 static inline int qdisc_reshape_fail(struct sk_buff *skb, struct Qdisc *sch)
617 {
618 sch->qstats.drops++;
619
620 #ifdef CONFIG_NET_CLS_ACT
621 if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch))
622 goto drop;
623
624 return NET_XMIT_SUCCESS;
625
626 drop:
627 #endif
628 kfree_skb(skb);
629 return NET_XMIT_DROP;
630 }
631
632 /* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how
633 long it will take to send a packet given its size.
634 */
qdisc_l2t(struct qdisc_rate_table * rtab,unsigned int pktlen)635 static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen)
636 {
637 int slot = pktlen + rtab->rate.cell_align + rtab->rate.overhead;
638 if (slot < 0)
639 slot = 0;
640 slot >>= rtab->rate.cell_log;
641 if (slot > 255)
642 return rtab->data[255]*(slot >> 8) + rtab->data[slot & 0xFF];
643 return rtab->data[slot];
644 }
645
646 #ifdef CONFIG_NET_CLS_ACT
skb_act_clone(struct sk_buff * skb,gfp_t gfp_mask,int action)647 static inline struct sk_buff *skb_act_clone(struct sk_buff *skb, gfp_t gfp_mask,
648 int action)
649 {
650 struct sk_buff *n;
651
652 n = skb_clone(skb, gfp_mask);
653
654 if (n) {
655 n->tc_verd = SET_TC_VERD(n->tc_verd, 0);
656 n->tc_verd = CLR_TC_OK2MUNGE(n->tc_verd);
657 n->tc_verd = CLR_TC_MUNGED(n->tc_verd);
658 }
659 return n;
660 }
661 #endif
662
663 #endif
664