1 /*
2 * Simple traffic shaper for Linux NET3.
3 *
4 * (c) Copyright 1996 Alan Cox <alan@redhat.com>, All Rights Reserved.
5 * http://www.redhat.com
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 *
12 * Neither Alan Cox nor CymruNet Ltd. admit liability nor provide
13 * warranty for any of this software. This material is provided
14 * "AS-IS" and at no charge.
15 *
16 *
17 * Algorithm:
18 *
19 * Queue Frame:
20 * Compute time length of frame at regulated speed
21 * Add frame to queue at appropriate point
22 * Adjust time length computation for followup frames
23 * Any frame that falls outside of its boundaries is freed
24 *
25 * We work to the following constants
26 *
27 * SHAPER_QLEN Maximum queued frames
28 * SHAPER_LATENCY Bounding latency on a frame. Leaving this latency
29 * window drops the frame. This stops us queueing
30 * frames for a long time and confusing a remote
31 * host.
32 * SHAPER_MAXSLIP Maximum time a priority frame may jump forward.
33 * That bounds the penalty we will inflict on low
34 * priority traffic.
35 * SHAPER_BURST Time range we call "now" in order to reduce
36 * system load. The more we make this the burstier
37 * the behaviour, the better local performance you
38 * get through packet clustering on routers and the
39 * worse the remote end gets to judge rtts.
40 *
41 * This is designed to handle lower speed links ( < 200K/second or so). We
42 * run off a 100-150Hz base clock typically. This gives us a resolution at
43 * 200Kbit/second of about 2Kbit or 256 bytes. Above that our timer
44 * resolution may start to cause much more burstiness in the traffic. We
45 * could avoid a lot of that by calling kick_shaper() at the end of the
46 * tied device transmissions. If you run above about 100K second you
47 * may need to tune the supposed speed rate for the right values.
48 *
49 * BUGS:
50 * Downing the interface under the shaper before the shaper
51 * will render your machine defunct. Don't for now shape over
52 * PPP or SLIP therefore!
53 * This will be fixed in BETA4
54 *
55 * Update History :
56 *
57 * bh_atomic() SMP races fixes and rewritten the locking code to
58 * be SMP safe and irq-mask friendly.
59 * NOTE: we can't use start_bh_atomic() in kick_shaper()
60 * because it's going to be recalled from an irq handler,
61 * and synchronize_bh() is a nono if called from irq context.
62 * 1999 Andrea Arcangeli
63 *
64 * Device statistics (tx_pakets, tx_bytes,
65 * tx_drops: queue_over_time and collisions: max_queue_exceded)
66 * 1999/06/18 Jordi Murgo <savage@apostols.org>
67 *
68 * Use skb->cb for private data.
69 * 2000/03 Andi Kleen
70 */
71
72 #include <linux/config.h>
73 #include <linux/module.h>
74 #include <linux/kernel.h>
75 #include <linux/sched.h>
76 #include <linux/ptrace.h>
77 #include <linux/fcntl.h>
78 #include <linux/mm.h>
79 #include <linux/slab.h>
80 #include <linux/string.h>
81 #include <linux/errno.h>
82 #include <linux/netdevice.h>
83 #include <linux/etherdevice.h>
84 #include <linux/skbuff.h>
85 #include <linux/if_arp.h>
86 #include <linux/init.h>
87 #include <net/dst.h>
88 #include <net/arp.h>
89 #include <linux/if_shaper.h>
90
91 struct shaper_cb {
92 unsigned long shapeclock; /* Time it should go out */
93 unsigned long shapestamp; /* Stamp for shaper */
94 __u32 shapelatency; /* Latency on frame */
95 __u32 shapelen; /* Frame length in clocks */
96 __u16 shapepend; /* Pending */
97 };
98 #define SHAPERCB(skb) ((struct shaper_cb *) ((skb)->cb))
99
100 int sh_debug; /* Debug flag */
101
102 #define SHAPER_BANNER "CymruNet Traffic Shaper BETA 0.04 for Linux 2.1\n"
103
104 /*
105 * Locking
106 */
107
shaper_lock(struct shaper * sh)108 static int shaper_lock(struct shaper *sh)
109 {
110 /*
111 * Lock in an interrupt must fail
112 */
113 while (test_and_set_bit(0, &sh->locked))
114 {
115 if (!in_interrupt())
116 sleep_on(&sh->wait_queue);
117 else
118 return 0;
119
120 }
121 return 1;
122 }
123
124 static void shaper_kick(struct shaper *sh);
125
shaper_unlock(struct shaper * sh)126 static void shaper_unlock(struct shaper *sh)
127 {
128 clear_bit(0, &sh->locked);
129 wake_up(&sh->wait_queue);
130 shaper_kick(sh);
131 }
132
133 /*
134 * Compute clocks on a buffer
135 */
136
shaper_clocks(struct shaper * shaper,struct sk_buff * skb)137 static int shaper_clocks(struct shaper *shaper, struct sk_buff *skb)
138 {
139 int t=skb->len/shaper->bytespertick;
140 return t;
141 }
142
143 /*
144 * Set the speed of a shaper. We compute this in bytes per tick since
145 * thats how the machine wants to run. Quoted input is in bits per second
146 * as is traditional (note not BAUD). We assume 8 bit bytes.
147 */
148
shaper_setspeed(struct shaper * shaper,int bitspersec)149 static void shaper_setspeed(struct shaper *shaper, int bitspersec)
150 {
151 shaper->bitspersec=bitspersec;
152 shaper->bytespertick=(bitspersec/HZ)/8;
153 if(!shaper->bytespertick)
154 shaper->bytespertick++;
155 }
156
157 /*
158 * Throw a frame at a shaper.
159 */
160
shaper_qframe(struct shaper * shaper,struct sk_buff * skb)161 static int shaper_qframe(struct shaper *shaper, struct sk_buff *skb)
162 {
163 struct sk_buff *ptr;
164
165 /*
166 * Get ready to work on this shaper. Lock may fail if its
167 * an interrupt and locked.
168 */
169
170 if(!shaper_lock(shaper))
171 return -1;
172 ptr=shaper->sendq.prev;
173
174 /*
175 * Set up our packet details
176 */
177
178 SHAPERCB(skb)->shapelatency=0;
179 SHAPERCB(skb)->shapeclock=shaper->recovery;
180 if(time_before(SHAPERCB(skb)->shapeclock, jiffies))
181 SHAPERCB(skb)->shapeclock=jiffies;
182 skb->priority=0; /* short term bug fix */
183 SHAPERCB(skb)->shapestamp=jiffies;
184
185 /*
186 * Time slots for this packet.
187 */
188
189 SHAPERCB(skb)->shapelen= shaper_clocks(shaper,skb);
190
191 #ifdef SHAPER_COMPLEX /* and broken.. */
192
193 while(ptr && ptr!=(struct sk_buff *)&shaper->sendq)
194 {
195 if(ptr->pri<skb->pri
196 && jiffies - SHAPERCB(ptr)->shapeclock < SHAPER_MAXSLIP)
197 {
198 struct sk_buff *tmp=ptr->prev;
199
200 /*
201 * It goes before us therefore we slip the length
202 * of the new frame.
203 */
204
205 SHAPERCB(ptr)->shapeclock+=SHAPERCB(skb)->shapelen;
206 SHAPERCB(ptr)->shapelatency+=SHAPERCB(skb)->shapelen;
207
208 /*
209 * The packet may have slipped so far back it
210 * fell off.
211 */
212 if(SHAPERCB(ptr)->shapelatency > SHAPER_LATENCY)
213 {
214 skb_unlink(ptr);
215 dev_kfree_skb(ptr);
216 }
217 ptr=tmp;
218 }
219 else
220 break;
221 }
222 if(ptr==NULL || ptr==(struct sk_buff *)&shaper->sendq)
223 skb_queue_head(&shaper->sendq,skb);
224 else
225 {
226 struct sk_buff *tmp;
227 /*
228 * Set the packet clock out time according to the
229 * frames ahead. Im sure a bit of thought could drop
230 * this loop.
231 */
232 for(tmp=skb_peek(&shaper->sendq); tmp!=NULL && tmp!=ptr; tmp=tmp->next)
233 SHAPERCB(skb)->shapeclock+=tmp->shapelen;
234 skb_append(ptr,skb);
235 }
236 #else
237 {
238 struct sk_buff *tmp;
239 /*
240 * Up our shape clock by the time pending on the queue
241 * (Should keep this in the shaper as a variable..)
242 */
243 for(tmp=skb_peek(&shaper->sendq); tmp!=NULL &&
244 tmp!=(struct sk_buff *)&shaper->sendq; tmp=tmp->next)
245 SHAPERCB(skb)->shapeclock+=SHAPERCB(tmp)->shapelen;
246 /*
247 * Queue over time. Spill packet.
248 */
249 if(SHAPERCB(skb)->shapeclock-jiffies > SHAPER_LATENCY) {
250 dev_kfree_skb(skb);
251 shaper->stats.tx_dropped++;
252 } else
253 skb_queue_tail(&shaper->sendq, skb);
254 }
255 #endif
256 if(sh_debug)
257 printk("Frame queued.\n");
258 if(skb_queue_len(&shaper->sendq)>SHAPER_QLEN)
259 {
260 ptr=skb_dequeue(&shaper->sendq);
261 dev_kfree_skb(ptr);
262 shaper->stats.collisions++;
263 }
264 shaper_unlock(shaper);
265 return 0;
266 }
267
268 /*
269 * Transmit from a shaper
270 */
271
shaper_queue_xmit(struct shaper * shaper,struct sk_buff * skb)272 static void shaper_queue_xmit(struct shaper *shaper, struct sk_buff *skb)
273 {
274 struct sk_buff *newskb=skb_clone(skb, GFP_ATOMIC);
275 if(sh_debug)
276 printk("Kick frame on %p\n",newskb);
277 if(newskb)
278 {
279 newskb->dev=shaper->dev;
280 newskb->priority=2;
281 if(sh_debug)
282 printk("Kick new frame to %s, %d\n",
283 shaper->dev->name,newskb->priority);
284 dev_queue_xmit(newskb);
285
286 shaper->stats.tx_bytes += skb->len;
287 shaper->stats.tx_packets++;
288
289 if(sh_debug)
290 printk("Kicked new frame out.\n");
291 dev_kfree_skb(skb);
292 }
293 }
294
295 /*
296 * Timer handler for shaping clock
297 */
298
shaper_timer(unsigned long data)299 static void shaper_timer(unsigned long data)
300 {
301 struct shaper *sh=(struct shaper *)data;
302 shaper_kick(sh);
303 }
304
305 /*
306 * Kick a shaper queue and try and do something sensible with the
307 * queue.
308 */
309
shaper_kick(struct shaper * shaper)310 static void shaper_kick(struct shaper *shaper)
311 {
312 struct sk_buff *skb;
313
314 /*
315 * Shaper unlock will kick
316 */
317
318 if (test_and_set_bit(0, &shaper->locked))
319 {
320 if(sh_debug)
321 printk("Shaper locked.\n");
322 mod_timer(&shaper->timer, jiffies);
323 return;
324 }
325
326
327 /*
328 * Walk the list (may be empty)
329 */
330
331 while((skb=skb_peek(&shaper->sendq))!=NULL)
332 {
333 /*
334 * Each packet due to go out by now (within an error
335 * of SHAPER_BURST) gets kicked onto the link
336 */
337
338 if(sh_debug)
339 printk("Clock = %ld, jiffies = %ld\n", SHAPERCB(skb)->shapeclock, jiffies);
340 if(time_before_eq(SHAPERCB(skb)->shapeclock - jiffies, SHAPER_BURST))
341 {
342 /*
343 * Pull the frame and get interrupts back on.
344 */
345
346 skb_unlink(skb);
347 if (shaper->recovery <
348 SHAPERCB(skb)->shapeclock + SHAPERCB(skb)->shapelen)
349 shaper->recovery = SHAPERCB(skb)->shapeclock + SHAPERCB(skb)->shapelen;
350 /*
351 * Pass on to the physical target device via
352 * our low level packet thrower.
353 */
354
355 SHAPERCB(skb)->shapepend=0;
356 shaper_queue_xmit(shaper, skb); /* Fire */
357 }
358 else
359 break;
360 }
361
362 /*
363 * Next kick.
364 */
365
366 if(skb!=NULL)
367 mod_timer(&shaper->timer, SHAPERCB(skb)->shapeclock);
368
369 clear_bit(0, &shaper->locked);
370 }
371
372
373 /*
374 * Flush the shaper queues on a closedown
375 */
376
shaper_flush(struct shaper * shaper)377 static void shaper_flush(struct shaper *shaper)
378 {
379 struct sk_buff *skb;
380 if(!shaper_lock(shaper))
381 {
382 printk(KERN_ERR "shaper: shaper_flush() called by an irq!\n");
383 return;
384 }
385 while((skb=skb_dequeue(&shaper->sendq))!=NULL)
386 dev_kfree_skb(skb);
387 shaper_unlock(shaper);
388 }
389
390 /*
391 * Bring the interface up. We just disallow this until a
392 * bind.
393 */
394
shaper_open(struct net_device * dev)395 static int shaper_open(struct net_device *dev)
396 {
397 struct shaper *shaper=dev->priv;
398
399 /*
400 * Can't open until attached.
401 * Also can't open until speed is set, or we'll get
402 * a division by zero.
403 */
404
405 if(shaper->dev==NULL)
406 return -ENODEV;
407 if(shaper->bitspersec==0)
408 return -EINVAL;
409 return 0;
410 }
411
412 /*
413 * Closing a shaper flushes the queues.
414 */
415
shaper_close(struct net_device * dev)416 static int shaper_close(struct net_device *dev)
417 {
418 struct shaper *shaper=dev->priv;
419 shaper_flush(shaper);
420 del_timer_sync(&shaper->timer);
421 return 0;
422 }
423
424 /*
425 * Revectored calls. We alter the parameters and call the functions
426 * for our attached device. This enables us to bandwidth allocate after
427 * ARP and other resolutions and not before.
428 */
429
430
shaper_start_xmit(struct sk_buff * skb,struct net_device * dev)431 static int shaper_start_xmit(struct sk_buff *skb, struct net_device *dev)
432 {
433 struct shaper *sh=dev->priv;
434 return shaper_qframe(sh, skb);
435 }
436
shaper_get_stats(struct net_device * dev)437 static struct net_device_stats *shaper_get_stats(struct net_device *dev)
438 {
439 struct shaper *sh=dev->priv;
440 return &sh->stats;
441 }
442
shaper_header(struct sk_buff * skb,struct net_device * dev,unsigned short type,void * daddr,void * saddr,unsigned len)443 static int shaper_header(struct sk_buff *skb, struct net_device *dev,
444 unsigned short type, void *daddr, void *saddr, unsigned len)
445 {
446 struct shaper *sh=dev->priv;
447 int v;
448 if(sh_debug)
449 printk("Shaper header\n");
450 skb->dev=sh->dev;
451 v=sh->hard_header(skb,sh->dev,type,daddr,saddr,len);
452 skb->dev=dev;
453 return v;
454 }
455
shaper_rebuild_header(struct sk_buff * skb)456 static int shaper_rebuild_header(struct sk_buff *skb)
457 {
458 struct shaper *sh=skb->dev->priv;
459 struct net_device *dev=skb->dev;
460 int v;
461 if(sh_debug)
462 printk("Shaper rebuild header\n");
463 skb->dev=sh->dev;
464 v=sh->rebuild_header(skb);
465 skb->dev=dev;
466 return v;
467 }
468
469 #if 0
470 static int shaper_cache(struct neighbour *neigh, struct hh_cache *hh)
471 {
472 struct shaper *sh=neigh->dev->priv;
473 struct net_device *tmp;
474 int ret;
475 if(sh_debug)
476 printk("Shaper header cache bind\n");
477 tmp=neigh->dev;
478 neigh->dev=sh->dev;
479 ret=sh->hard_header_cache(neigh,hh);
480 neigh->dev=tmp;
481 return ret;
482 }
483
484 static void shaper_cache_update(struct hh_cache *hh, struct net_device *dev,
485 unsigned char *haddr)
486 {
487 struct shaper *sh=dev->priv;
488 if(sh_debug)
489 printk("Shaper cache update\n");
490 sh->header_cache_update(hh, sh->dev, haddr);
491 }
492 #endif
493
494 #ifdef CONFIG_INET
495
shaper_neigh_setup(struct neighbour * n)496 static int shaper_neigh_setup(struct neighbour *n)
497 {
498 #ifdef CONFIG_INET
499 if (n->nud_state == NUD_NONE) {
500 n->ops = &arp_broken_ops;
501 n->output = n->ops->output;
502 }
503 #endif
504 return 0;
505 }
506
shaper_neigh_setup_dev(struct net_device * dev,struct neigh_parms * p)507 static int shaper_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p)
508 {
509 #ifdef CONFIG_INET
510 if (p->tbl->family == AF_INET) {
511 p->neigh_setup = shaper_neigh_setup;
512 p->ucast_probes = 0;
513 p->mcast_probes = 0;
514 }
515 #endif
516 return 0;
517 }
518
519 #else /* !(CONFIG_INET) */
520
shaper_neigh_setup_dev(struct net_device * dev,struct neigh_parms * p)521 static int shaper_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p)
522 {
523 return 0;
524 }
525
526 #endif
527
shaper_attach(struct net_device * shdev,struct shaper * sh,struct net_device * dev)528 static int shaper_attach(struct net_device *shdev, struct shaper *sh, struct net_device *dev)
529 {
530 sh->dev = dev;
531 sh->hard_start_xmit=dev->hard_start_xmit;
532 sh->get_stats=dev->get_stats;
533 if(dev->hard_header)
534 {
535 sh->hard_header=dev->hard_header;
536 shdev->hard_header = shaper_header;
537 }
538 else
539 shdev->hard_header = NULL;
540
541 if(dev->rebuild_header)
542 {
543 sh->rebuild_header = dev->rebuild_header;
544 shdev->rebuild_header = shaper_rebuild_header;
545 }
546 else
547 shdev->rebuild_header = NULL;
548
549 #if 0
550 if(dev->hard_header_cache)
551 {
552 sh->hard_header_cache = dev->hard_header_cache;
553 shdev->hard_header_cache= shaper_cache;
554 }
555 else
556 {
557 shdev->hard_header_cache= NULL;
558 }
559
560 if(dev->header_cache_update)
561 {
562 sh->header_cache_update = dev->header_cache_update;
563 shdev->header_cache_update = shaper_cache_update;
564 }
565 else
566 shdev->header_cache_update= NULL;
567 #else
568 shdev->header_cache_update = NULL;
569 shdev->hard_header_cache = NULL;
570 #endif
571 shdev->neigh_setup = shaper_neigh_setup_dev;
572
573 shdev->hard_header_len=dev->hard_header_len;
574 shdev->type=dev->type;
575 shdev->addr_len=dev->addr_len;
576 shdev->mtu=dev->mtu;
577 sh->bitspersec=0;
578 return 0;
579 }
580
shaper_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)581 static int shaper_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
582 {
583 struct shaperconf *ss= (struct shaperconf *)&ifr->ifr_data;
584 struct shaper *sh=dev->priv;
585
586 if(ss->ss_cmd == SHAPER_SET_DEV || ss->ss_cmd == SHAPER_SET_SPEED)
587 {
588 if(!capable(CAP_NET_ADMIN))
589 return -EPERM;
590 }
591
592 switch(ss->ss_cmd)
593 {
594 case SHAPER_SET_DEV:
595 {
596 struct net_device *them=__dev_get_by_name(ss->ss_name);
597 if(them==NULL)
598 return -ENODEV;
599 if(sh->dev)
600 return -EBUSY;
601 return shaper_attach(dev,dev->priv, them);
602 }
603 case SHAPER_GET_DEV:
604 if(sh->dev==NULL)
605 return -ENODEV;
606 strcpy(ss->ss_name, sh->dev->name);
607 return 0;
608 case SHAPER_SET_SPEED:
609 shaper_setspeed(sh,ss->ss_speed);
610 return 0;
611 case SHAPER_GET_SPEED:
612 ss->ss_speed=sh->bitspersec;
613 return 0;
614 default:
615 return -EINVAL;
616 }
617 }
618
shaper_init_priv(struct net_device * dev)619 static void shaper_init_priv(struct net_device *dev)
620 {
621 struct shaper *sh = dev->priv;
622
623 skb_queue_head_init(&sh->sendq);
624 init_timer(&sh->timer);
625 sh->timer.function=shaper_timer;
626 sh->timer.data=(unsigned long)sh;
627 init_waitqueue_head(&sh->wait_queue);
628 }
629
630 /*
631 * Add a shaper device to the system
632 */
633
shaper_probe(struct net_device * dev)634 static int __init shaper_probe(struct net_device *dev)
635 {
636 /*
637 * Set up the shaper.
638 */
639
640 SET_MODULE_OWNER(dev);
641
642 shaper_init_priv(dev);
643
644 dev->open = shaper_open;
645 dev->stop = shaper_close;
646 dev->hard_start_xmit = shaper_start_xmit;
647 dev->get_stats = shaper_get_stats;
648 dev->set_multicast_list = NULL;
649
650 /*
651 * Intialise the packet queues
652 */
653
654 /*
655 * Handlers for when we attach to a device.
656 */
657
658 dev->hard_header = shaper_header;
659 dev->rebuild_header = shaper_rebuild_header;
660 #if 0
661 dev->hard_header_cache = shaper_cache;
662 dev->header_cache_update= shaper_cache_update;
663 #endif
664 dev->neigh_setup = shaper_neigh_setup_dev;
665 dev->do_ioctl = shaper_ioctl;
666 dev->hard_header_len = 0;
667 dev->type = ARPHRD_ETHER; /* initially */
668 dev->set_mac_address = NULL;
669 dev->mtu = 1500;
670 dev->addr_len = 0;
671 dev->tx_queue_len = 10;
672 dev->flags = 0;
673
674 /*
675 * Shaper is ok
676 */
677
678 return 0;
679 }
680
681 static int shapers = 1;
682 #ifdef MODULE
683
684 MODULE_PARM(shapers, "i");
685 MODULE_PARM_DESC(shapers, "Traffic shaper: maximum nuber of shapers");
686
687 #else /* MODULE */
688
set_num_shapers(char * str)689 static int __init set_num_shapers(char *str)
690 {
691 shapers = simple_strtol(str, NULL, 0);
692 return 1;
693 }
694
695 __setup("shapers=", set_num_shapers);
696
697 #endif /* MODULE */
698
699 static struct net_device *devs;
700
shaper_init(void)701 static int __init shaper_init(void)
702 {
703 int i, err;
704 size_t alloc_size;
705 struct shaper *sp;
706 unsigned int shapers_registered = 0;
707
708 if (shapers < 1)
709 return -ENODEV;
710
711 alloc_size = (sizeof(*devs) * shapers) +
712 (sizeof(struct shaper) * shapers);
713 devs = kmalloc(alloc_size, GFP_KERNEL);
714 if (!devs)
715 return -ENOMEM;
716 memset(devs, 0, alloc_size);
717 sp = (struct shaper *) &devs[shapers];
718
719 for (i = 0; i < shapers; i++) {
720 err = dev_alloc_name(&devs[i], "shaper%d");
721 if (err < 0)
722 break;
723 devs[i].init = shaper_probe;
724 devs[i].priv = &sp[i];
725 if (register_netdev(&devs[i]))
726 break;
727 shapers_registered++;
728 }
729
730 if (!shapers_registered) {
731 kfree(devs);
732 devs = NULL;
733 }
734
735 return (shapers_registered ? 0 : -ENODEV);
736 }
737
shaper_exit(void)738 static void __exit shaper_exit (void)
739 {
740 int i;
741
742 for (i = 0; i < shapers; i++)
743 unregister_netdev(&devs[i]);
744
745 kfree(devs);
746 devs = NULL;
747 }
748
749 module_init(shaper_init);
750 module_exit(shaper_exit);
751 MODULE_LICENSE("GPL");
752
753