1 /*
2 * net/sched/sch_tbf.c Token Bucket Filter queue.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 * Dmitry Torokhov <dtor@mail.ru> - allow attaching inner qdiscs -
11 * original idea by Martin Devera
12 *
13 */
14
15 #include <linux/config.h>
16 #include <linux/module.h>
17 #include <asm/uaccess.h>
18 #include <asm/system.h>
19 #include <asm/bitops.h>
20 #include <linux/types.h>
21 #include <linux/kernel.h>
22 #include <linux/sched.h>
23 #include <linux/string.h>
24 #include <linux/mm.h>
25 #include <linux/socket.h>
26 #include <linux/sockios.h>
27 #include <linux/in.h>
28 #include <linux/errno.h>
29 #include <linux/interrupt.h>
30 #include <linux/if_ether.h>
31 #include <linux/inet.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/notifier.h>
35 #include <net/ip.h>
36 #include <net/route.h>
37 #include <linux/skbuff.h>
38 #include <net/sock.h>
39 #include <net/pkt_sched.h>
40
41
42 /* Simple Token Bucket Filter.
43 =======================================
44
45 SOURCE.
46 -------
47
48 None.
49
50 Description.
51 ------------
52
53 A data flow obeys TBF with rate R and depth B, if for any
54 time interval t_i...t_f the number of transmitted bits
55 does not exceed B + R*(t_f-t_i).
56
57 Packetized version of this definition:
58 The sequence of packets of sizes s_i served at moments t_i
59 obeys TBF, if for any i<=k:
60
61 s_i+....+s_k <= B + R*(t_k - t_i)
62
63 Algorithm.
64 ----------
65
66 Let N(t_i) be B/R initially and N(t) grow continuously with time as:
67
68 N(t+delta) = min{B/R, N(t) + delta}
69
70 If the first packet in queue has length S, it may be
71 transmitted only at the time t_* when S/R <= N(t_*),
72 and in this case N(t) jumps:
73
74 N(t_* + 0) = N(t_* - 0) - S/R.
75
76
77
78 Actually, QoS requires two TBF to be applied to a data stream.
79 One of them controls steady state burst size, another
80 one with rate P (peak rate) and depth M (equal to link MTU)
81 limits bursts at a smaller time scale.
82
83 It is easy to see that P>R, and B>M. If P is infinity, this double
84 TBF is equivalent to a single one.
85
86 When TBF works in reshaping mode, latency is estimated as:
87
88 lat = max ((L-B)/R, (L-M)/P)
89
90
91 NOTES.
92 ------
93
94 If TBF throttles, it starts a watchdog timer, which will wake it up
95 when it is ready to transmit.
96 Note that the minimal timer resolution is 1/HZ.
97 If no new packets arrive during this period,
98 or if the device is not awaken by EOI for some previous packet,
99 TBF can stop its activity for 1/HZ.
100
101
102 This means, that with depth B, the maximal rate is
103
104 R_crit = B*HZ
105
106 F.e. for 10Mbit ethernet and HZ=100 the minimal allowed B is ~10Kbytes.
107
108 Note that the peak rate TBF is much more tough: with MTU 1500
109 P_crit = 150Kbytes/sec. So, if you need greater peak
110 rates, use alpha with HZ=1000 :-)
111
112 With classful TBF, limit is just kept for backwards compatibility.
113 It is passed to the default bfifo qdisc - if the inner qdisc is
114 changed the limit is not effective anymore.
115 */
116
117 struct tbf_sched_data
118 {
119 /* Parameters */
120 u32 limit; /* Maximal length of backlog: bytes */
121 u32 buffer; /* Token bucket depth/rate: MUST BE >= MTU/B */
122 u32 mtu;
123 u32 max_size;
124 struct qdisc_rate_table *R_tab;
125 struct qdisc_rate_table *P_tab;
126
127 /* Variables */
128 long tokens; /* Current number of B tokens */
129 long ptokens; /* Current number of P tokens */
130 psched_time_t t_c; /* Time check-point */
131 struct timer_list wd_timer; /* Watchdog timer */
132 struct Qdisc *qdisc; /* Inner qdisc, default - bfifo queue */
133 };
134
135 #define L2T(q,L) ((q)->R_tab->data[(L)>>(q)->R_tab->rate.cell_log])
136 #define L2T_P(q,L) ((q)->P_tab->data[(L)>>(q)->P_tab->rate.cell_log])
137
tbf_enqueue(struct sk_buff * skb,struct Qdisc * sch)138 static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
139 {
140 struct tbf_sched_data *q = (struct tbf_sched_data *)sch->data;
141 int ret;
142
143 if (skb->len > q->max_size) {
144 sch->stats.drops++;
145 #ifdef CONFIG_NET_CLS_POLICE
146 if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch))
147 #endif
148 kfree_skb(skb);
149
150 return NET_XMIT_DROP;
151 }
152
153 if ((ret = q->qdisc->enqueue(skb, q->qdisc)) != 0) {
154 sch->stats.drops++;
155 return ret;
156 }
157
158 sch->q.qlen++;
159 sch->stats.bytes += skb->len;
160 sch->stats.packets++;
161 return 0;
162 }
163
tbf_requeue(struct sk_buff * skb,struct Qdisc * sch)164 static int tbf_requeue(struct sk_buff *skb, struct Qdisc* sch)
165 {
166 struct tbf_sched_data *q = (struct tbf_sched_data *)sch->data;
167 int ret;
168
169 if ((ret = q->qdisc->ops->requeue(skb, q->qdisc)) == 0)
170 sch->q.qlen++;
171
172 return ret;
173 }
174
tbf_drop(struct Qdisc * sch)175 static unsigned int tbf_drop(struct Qdisc* sch)
176 {
177 struct tbf_sched_data *q = (struct tbf_sched_data *)sch->data;
178 unsigned int len;
179
180 if ((len = q->qdisc->ops->drop(q->qdisc)) != 0) {
181 sch->q.qlen--;
182 sch->stats.drops++;
183 }
184 return len;
185 }
186
tbf_watchdog(unsigned long arg)187 static void tbf_watchdog(unsigned long arg)
188 {
189 struct Qdisc *sch = (struct Qdisc*)arg;
190
191 sch->flags &= ~TCQ_F_THROTTLED;
192 netif_schedule(sch->dev);
193 }
194
tbf_dequeue(struct Qdisc * sch)195 static struct sk_buff *tbf_dequeue(struct Qdisc* sch)
196 {
197 struct tbf_sched_data *q = (struct tbf_sched_data *)sch->data;
198 struct sk_buff *skb;
199
200 skb = q->qdisc->dequeue(q->qdisc);
201
202 if (skb) {
203 psched_time_t now;
204 long toks, delay;
205 long ptoks = 0;
206 unsigned int len = skb->len;
207
208 PSCHED_GET_TIME(now);
209
210 toks = PSCHED_TDIFF_SAFE(now, q->t_c, q->buffer, 0);
211
212 if (q->P_tab) {
213 ptoks = toks + q->ptokens;
214 if (ptoks > (long)q->mtu)
215 ptoks = q->mtu;
216 ptoks -= L2T_P(q, len);
217 }
218 toks += q->tokens;
219 if (toks > (long)q->buffer)
220 toks = q->buffer;
221 toks -= L2T(q, len);
222
223 if ((toks|ptoks) >= 0) {
224 q->t_c = now;
225 q->tokens = toks;
226 q->ptokens = ptoks;
227 sch->q.qlen--;
228 sch->flags &= ~TCQ_F_THROTTLED;
229 return skb;
230 }
231
232 delay = PSCHED_US2JIFFIE(max_t(long, -toks, -ptoks));
233
234 if (delay == 0)
235 delay = 1;
236
237 mod_timer(&q->wd_timer, jiffies+delay);
238
239 /* Maybe we have a shorter packet in the queue,
240 which can be sent now. It sounds cool,
241 but, however, this is wrong in principle.
242 We MUST NOT reorder packets under these circumstances.
243
244 Really, if we split the flow into independent
245 subflows, it would be a very good solution.
246 This is the main idea of all FQ algorithms
247 (cf. CSZ, HPFQ, HFSC)
248 */
249
250 if (q->qdisc->ops->requeue(skb, q->qdisc) != NET_XMIT_SUCCESS) {
251 /* When requeue fails skb is dropped */
252 sch->q.qlen--;
253 sch->stats.drops++;
254 }
255
256 sch->flags |= TCQ_F_THROTTLED;
257 sch->stats.overlimits++;
258 }
259 return NULL;
260 }
261
tbf_reset(struct Qdisc * sch)262 static void tbf_reset(struct Qdisc* sch)
263 {
264 struct tbf_sched_data *q = (struct tbf_sched_data *)sch->data;
265
266 qdisc_reset(q->qdisc);
267 sch->q.qlen = 0;
268 PSCHED_GET_TIME(q->t_c);
269 q->tokens = q->buffer;
270 q->ptokens = q->mtu;
271 sch->flags &= ~TCQ_F_THROTTLED;
272 del_timer(&q->wd_timer);
273 }
274
tbf_create_dflt_qdisc(struct net_device * dev,u32 limit)275 static struct Qdisc *tbf_create_dflt_qdisc(struct net_device *dev, u32 limit)
276 {
277 struct Qdisc *q = qdisc_create_dflt(dev, &bfifo_qdisc_ops);
278 struct rtattr *rta;
279 int ret;
280
281 if (q) {
282 rta = kmalloc(RTA_LENGTH(sizeof(struct tc_fifo_qopt)), GFP_KERNEL);
283 if (rta) {
284 rta->rta_type = RTM_NEWQDISC;
285 rta->rta_len = RTA_LENGTH(sizeof(struct tc_fifo_qopt));
286 ((struct tc_fifo_qopt *)RTA_DATA(rta))->limit = limit;
287
288 ret = q->ops->change(q, rta);
289 kfree(rta);
290
291 if (ret == 0)
292 return q;
293 }
294 qdisc_destroy(q);
295 }
296
297 return NULL;
298 }
299
tbf_change(struct Qdisc * sch,struct rtattr * opt)300 static int tbf_change(struct Qdisc* sch, struct rtattr *opt)
301 {
302 int err = -EINVAL;
303 struct tbf_sched_data *q = (struct tbf_sched_data *)sch->data;
304 struct rtattr *tb[TCA_TBF_PTAB];
305 struct tc_tbf_qopt *qopt;
306 struct qdisc_rate_table *rtab = NULL;
307 struct qdisc_rate_table *ptab = NULL;
308 struct Qdisc *child = NULL;
309 int max_size,n;
310
311 if (rtattr_parse(tb, TCA_TBF_PTAB, RTA_DATA(opt), RTA_PAYLOAD(opt)) ||
312 tb[TCA_TBF_PARMS-1] == NULL ||
313 RTA_PAYLOAD(tb[TCA_TBF_PARMS-1]) < sizeof(*qopt))
314 goto done;
315
316 qopt = RTA_DATA(tb[TCA_TBF_PARMS-1]);
317 rtab = qdisc_get_rtab(&qopt->rate, tb[TCA_TBF_RTAB-1]);
318 if (rtab == NULL)
319 goto done;
320
321 if (qopt->peakrate.rate) {
322 if (qopt->peakrate.rate > qopt->rate.rate)
323 ptab = qdisc_get_rtab(&qopt->peakrate, tb[TCA_TBF_PTAB-1]);
324 if (ptab == NULL)
325 goto done;
326 }
327
328 for (n = 0; n < 256; n++)
329 if (rtab->data[n] > qopt->buffer) break;
330 max_size = (n << qopt->rate.cell_log)-1;
331 if (ptab) {
332 int size;
333
334 for (n = 0; n < 256; n++)
335 if (ptab->data[n] > qopt->mtu) break;
336 size = (n << qopt->peakrate.cell_log)-1;
337 if (size < max_size) max_size = size;
338 }
339 if (max_size < 0)
340 goto done;
341
342 if (q->qdisc == &noop_qdisc) {
343 if ((child = tbf_create_dflt_qdisc(sch->dev, qopt->limit)) == NULL)
344 goto done;
345 }
346
347 sch_tree_lock(sch);
348 if (child) q->qdisc = child;
349 q->limit = qopt->limit;
350 q->mtu = qopt->mtu;
351 q->max_size = max_size;
352 q->buffer = qopt->buffer;
353 q->tokens = q->buffer;
354 q->ptokens = q->mtu;
355 rtab = xchg(&q->R_tab, rtab);
356 ptab = xchg(&q->P_tab, ptab);
357 sch_tree_unlock(sch);
358 err = 0;
359 done:
360 if (rtab)
361 qdisc_put_rtab(rtab);
362 if (ptab)
363 qdisc_put_rtab(ptab);
364 return err;
365 }
366
tbf_init(struct Qdisc * sch,struct rtattr * opt)367 static int tbf_init(struct Qdisc* sch, struct rtattr *opt)
368 {
369 int err;
370 struct tbf_sched_data *q = (struct tbf_sched_data *)sch->data;
371
372 if (opt == NULL)
373 return -EINVAL;
374
375 MOD_INC_USE_COUNT;
376
377 PSCHED_GET_TIME(q->t_c);
378 init_timer(&q->wd_timer);
379 q->wd_timer.function = tbf_watchdog;
380 q->wd_timer.data = (unsigned long)sch;
381
382 q->qdisc = &noop_qdisc;
383
384 if ((err = tbf_change(sch, opt)) != 0) {
385 MOD_DEC_USE_COUNT;
386 }
387 return err;
388 }
389
tbf_destroy(struct Qdisc * sch)390 static void tbf_destroy(struct Qdisc *sch)
391 {
392 struct tbf_sched_data *q = (struct tbf_sched_data *)sch->data;
393
394 del_timer(&q->wd_timer);
395
396 if (q->P_tab)
397 qdisc_put_rtab(q->P_tab);
398 if (q->R_tab)
399 qdisc_put_rtab(q->R_tab);
400
401 qdisc_destroy(q->qdisc);
402 q->qdisc = &noop_qdisc;
403
404 MOD_DEC_USE_COUNT;
405 }
406
tbf_dump(struct Qdisc * sch,struct sk_buff * skb)407 static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb)
408 {
409 struct tbf_sched_data *q = (struct tbf_sched_data *)sch->data;
410 unsigned char *b = skb->tail;
411 struct rtattr *rta;
412 struct tc_tbf_qopt opt;
413
414 rta = (struct rtattr*)b;
415 RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
416
417 opt.limit = q->limit;
418 opt.rate = q->R_tab->rate;
419 if (q->P_tab)
420 opt.peakrate = q->P_tab->rate;
421 else
422 memset(&opt.peakrate, 0, sizeof(opt.peakrate));
423 opt.mtu = q->mtu;
424 opt.buffer = q->buffer;
425 RTA_PUT(skb, TCA_TBF_PARMS, sizeof(opt), &opt);
426 rta->rta_len = skb->tail - b;
427
428 return skb->len;
429
430 rtattr_failure:
431 skb_trim(skb, b - skb->data);
432 return -1;
433 }
434
tbf_dump_class(struct Qdisc * sch,unsigned long cl,struct sk_buff * skb,struct tcmsg * tcm)435 static int tbf_dump_class(struct Qdisc *sch, unsigned long cl,
436 struct sk_buff *skb, struct tcmsg *tcm)
437 {
438 struct tbf_sched_data *q = (struct tbf_sched_data*)sch->data;
439
440 if (cl != 1) /* only one class */
441 return -ENOENT;
442
443 tcm->tcm_handle |= TC_H_MIN(1);
444 tcm->tcm_info = q->qdisc->handle;
445
446 return 0;
447 }
448
tbf_graft(struct Qdisc * sch,unsigned long arg,struct Qdisc * new,struct Qdisc ** old)449 static int tbf_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
450 struct Qdisc **old)
451 {
452 struct tbf_sched_data *q = (struct tbf_sched_data *)sch->data;
453
454 if (new == NULL)
455 new = &noop_qdisc;
456
457 sch_tree_lock(sch);
458 *old = xchg(&q->qdisc, new);
459 qdisc_reset(*old);
460 sch->q.qlen = 0;
461 sch_tree_unlock(sch);
462
463 return 0;
464 }
465
tbf_leaf(struct Qdisc * sch,unsigned long arg)466 static struct Qdisc *tbf_leaf(struct Qdisc *sch, unsigned long arg)
467 {
468 struct tbf_sched_data *q = (struct tbf_sched_data *)sch->data;
469 return q->qdisc;
470 }
471
tbf_get(struct Qdisc * sch,u32 classid)472 static unsigned long tbf_get(struct Qdisc *sch, u32 classid)
473 {
474 return 1;
475 }
476
tbf_put(struct Qdisc * sch,unsigned long arg)477 static void tbf_put(struct Qdisc *sch, unsigned long arg)
478 {
479 }
480
tbf_change_class(struct Qdisc * sch,u32 classid,u32 parentid,struct rtattr ** tca,unsigned long * arg)481 static int tbf_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
482 struct rtattr **tca, unsigned long *arg)
483 {
484 return -ENOSYS;
485 }
486
tbf_delete(struct Qdisc * sch,unsigned long arg)487 static int tbf_delete(struct Qdisc *sch, unsigned long arg)
488 {
489 return -ENOSYS;
490 }
491
tbf_walk(struct Qdisc * sch,struct qdisc_walker * walker)492 static void tbf_walk(struct Qdisc *sch, struct qdisc_walker *walker)
493 {
494 if (!walker->stop) {
495 if (walker->count >= walker->skip)
496 if (walker->fn(sch, 1, walker) < 0) {
497 walker->stop = 1;
498 return;
499 }
500 walker->count++;
501 }
502 }
503
tbf_find_tcf(struct Qdisc * sch,unsigned long cl)504 static struct tcf_proto **tbf_find_tcf(struct Qdisc *sch, unsigned long cl)
505 {
506 return NULL;
507 }
508
509 static struct Qdisc_class_ops tbf_class_ops =
510 {
511 .graft = tbf_graft,
512 .leaf = tbf_leaf,
513 .get = tbf_get,
514 .put = tbf_put,
515 .change = tbf_change_class,
516 .delete = tbf_delete,
517 .walk = tbf_walk,
518 .tcf_chain = tbf_find_tcf,
519 .dump = tbf_dump_class,
520 };
521
522 struct Qdisc_ops tbf_qdisc_ops =
523 {
524 NULL,
525 &tbf_class_ops,
526 "tbf",
527 sizeof(struct tbf_sched_data),
528
529 tbf_enqueue,
530 tbf_dequeue,
531 tbf_requeue,
532 tbf_drop,
533
534 tbf_init,
535 tbf_reset,
536 tbf_destroy,
537 tbf_change,
538
539 tbf_dump,
540 };
541
542
543 #ifdef MODULE
init_module(void)544 int init_module(void)
545 {
546 return register_qdisc(&tbf_qdisc_ops);
547 }
548
cleanup_module(void)549 void cleanup_module(void)
550 {
551 unregister_qdisc(&tbf_qdisc_ops);
552 }
553 #endif
554 MODULE_LICENSE("GPL");
555