1 /*
2 * TUN - Universal TUN/TAP device driver.
3 * Copyright (C) 1999-2002 Maxim Krasnyansky <maxk@qualcomm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * $Id: tun.c,v 1.15 2002/03/01 02:44:24 maxk Exp $
16 */
17
18 /*
19 * Daniel Podlejski <underley@underley.eu.org>
20 * Modifications for 2.3.99-pre5 kernel.
21 */
22
23 #define TUN_VER "1.5"
24
25 #include <linux/config.h>
26 #include <linux/module.h>
27
28 #include <linux/errno.h>
29 #include <linux/kernel.h>
30 #include <linux/major.h>
31 #include <linux/sched.h>
32 #include <linux/slab.h>
33 #include <linux/poll.h>
34 #include <linux/fcntl.h>
35 #include <linux/init.h>
36 #include <linux/random.h>
37
38 #include <linux/skbuff.h>
39 #include <linux/netdevice.h>
40 #include <linux/etherdevice.h>
41 #include <linux/miscdevice.h>
42 #include <linux/rtnetlink.h>
43 #include <linux/if.h>
44 #include <linux/if_arp.h>
45 #include <linux/if_ether.h>
46 #include <linux/if_tun.h>
47
48 #include <asm/system.h>
49 #include <asm/uaccess.h>
50
51 #ifdef TUN_DEBUG
52 static int debug;
53 #endif
54
55 /* Network device part of the driver */
56
57 /* Net device open. */
tun_net_open(struct net_device * dev)58 static int tun_net_open(struct net_device *dev)
59 {
60 netif_start_queue(dev);
61 return 0;
62 }
63
64 /* Net device close. */
tun_net_close(struct net_device * dev)65 static int tun_net_close(struct net_device *dev)
66 {
67 netif_stop_queue(dev);
68 return 0;
69 }
70
71 /* Net device start xmit */
tun_net_xmit(struct sk_buff * skb,struct net_device * dev)72 static int tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
73 {
74 struct tun_struct *tun = (struct tun_struct *)dev->priv;
75
76 DBG(KERN_INFO "%s: tun_net_xmit %d\n", tun->name, skb->len);
77
78 /* Drop packet if interface is not attached */
79 if (!tun->attached)
80 goto drop;
81
82 /* Queue packet */
83 if (!(tun->flags & TUN_ONE_QUEUE)) {
84 /* Normal queueing mode.
85 * Packet scheduler handles dropping. */
86 if (skb_queue_len(&tun->readq) >= TUN_READQ_SIZE)
87 netif_stop_queue(dev);
88 } else {
89 /* Single queue mode.
90 * Driver handles dropping itself. */
91 if (skb_queue_len(&tun->readq) >= dev->tx_queue_len)
92 goto drop;
93 }
94 skb_queue_tail(&tun->readq, skb);
95
96 /* Notify and wake up reader process */
97 if (tun->flags & TUN_FASYNC)
98 kill_fasync(&tun->fasync, SIGIO, POLL_IN);
99 wake_up_interruptible(&tun->read_wait);
100 return 0;
101
102 drop:
103 tun->stats.tx_dropped++;
104 kfree_skb(skb);
105 return 0;
106 }
107
tun_net_mclist(struct net_device * dev)108 static void tun_net_mclist(struct net_device *dev)
109 {
110 /* Nothing to do for multicast filters.
111 * We always accept all frames. */
112 return;
113 }
114
tun_net_stats(struct net_device * dev)115 static struct net_device_stats *tun_net_stats(struct net_device *dev)
116 {
117 struct tun_struct *tun = (struct tun_struct *)dev->priv;
118 return &tun->stats;
119 }
120
121 /* Initialize net device. */
tun_net_init(struct net_device * dev)122 int tun_net_init(struct net_device *dev)
123 {
124 struct tun_struct *tun = (struct tun_struct *)dev->priv;
125
126 DBG(KERN_INFO "%s: tun_net_init\n", tun->name);
127
128 SET_MODULE_OWNER(dev);
129 dev->open = tun_net_open;
130 dev->hard_start_xmit = tun_net_xmit;
131 dev->stop = tun_net_close;
132 dev->get_stats = tun_net_stats;
133
134 switch (tun->flags & TUN_TYPE_MASK) {
135 case TUN_TUN_DEV:
136 /* Point-to-Point TUN Device */
137 dev->hard_header_len = 0;
138 dev->addr_len = 0;
139 dev->mtu = 1500;
140
141 /* Zero header length */
142 dev->type = ARPHRD_NONE;
143 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
144 dev->tx_queue_len = 10;
145 break;
146
147 case TUN_TAP_DEV:
148 /* Ethernet TAP Device */
149 dev->set_multicast_list = tun_net_mclist;
150
151 /* Generate random Ethernet address. */
152 *(u16 *)dev->dev_addr = htons(0x00FF);
153 get_random_bytes(dev->dev_addr + sizeof(u16), 4);
154
155 ether_setup(dev);
156 break;
157 };
158
159 return 0;
160 }
161
162 /* Character device part */
163
164 /* Poll */
tun_chr_poll(struct file * file,poll_table * wait)165 static unsigned int tun_chr_poll(struct file *file, poll_table * wait)
166 {
167 struct tun_struct *tun = (struct tun_struct *)file->private_data;
168 unsigned int mask = POLLOUT | POLLWRNORM;
169
170 if (!tun)
171 return -EBADFD;
172
173 DBG(KERN_INFO "%s: tun_chr_poll\n", tun->name);
174
175 poll_wait(file, &tun->read_wait, wait);
176
177 if (skb_queue_len(&tun->readq))
178 mask |= POLLIN | POLLRDNORM;
179
180 return mask;
181 }
182
183 /* Get packet from user space buffer(already verified) */
tun_get_user(struct tun_struct * tun,struct iovec * iv,size_t count)184 static __inline__ ssize_t tun_get_user(struct tun_struct *tun, struct iovec *iv, size_t count)
185 {
186 struct tun_pi pi = { 0, __constant_htons(ETH_P_IP) };
187 struct sk_buff *skb;
188 size_t len = count;
189
190 if (!(tun->flags & TUN_NO_PI)) {
191 if ((len -= sizeof(pi)) > count)
192 return -EINVAL;
193
194 memcpy_fromiovec((void *)&pi, iv, sizeof(pi));
195 }
196
197 if (!(skb = alloc_skb(len + 2, GFP_KERNEL))) {
198 tun->stats.rx_dropped++;
199 return -ENOMEM;
200 }
201
202 skb_reserve(skb, 2);
203 memcpy_fromiovec(skb_put(skb, len), iv, len);
204
205 skb->dev = &tun->dev;
206 switch (tun->flags & TUN_TYPE_MASK) {
207 case TUN_TUN_DEV:
208 skb->mac.raw = skb->data;
209 skb->protocol = pi.proto;
210 break;
211 case TUN_TAP_DEV:
212 skb->protocol = eth_type_trans(skb, &tun->dev);
213 break;
214 };
215
216 if (tun->flags & TUN_NOCHECKSUM)
217 skb->ip_summed = CHECKSUM_UNNECESSARY;
218
219 netif_rx_ni(skb);
220
221 tun->stats.rx_packets++;
222 tun->stats.rx_bytes += len;
223
224 return count;
225 }
226
227 /* Writev */
tun_chr_writev(struct file * file,const struct iovec * iv,unsigned long count,loff_t * pos)228 static ssize_t tun_chr_writev(struct file * file, const struct iovec *iv,
229 unsigned long count, loff_t *pos)
230 {
231 struct tun_struct *tun = (struct tun_struct *)file->private_data;
232 unsigned long i;
233 size_t len;
234
235 if (!tun)
236 return -EBADFD;
237
238 DBG(KERN_INFO "%s: tun_chr_write %d\n", tun->name, count);
239
240 for (i = 0, len = 0; i < count; i++) {
241 if (verify_area(VERIFY_READ, iv[i].iov_base, iv[i].iov_len))
242 return -EFAULT;
243 len += iv[i].iov_len;
244 }
245
246 return tun_get_user(tun, (struct iovec *) iv, len);
247 }
248
249 /* Write */
tun_chr_write(struct file * file,const char * buf,size_t count,loff_t * pos)250 static ssize_t tun_chr_write(struct file * file, const char * buf,
251 size_t count, loff_t *pos)
252 {
253 struct iovec iv = { (void *) buf, count };
254 return tun_chr_writev(file, &iv, 1, pos);
255 }
256
257 /* Put packet to the user space buffer (already verified) */
tun_put_user(struct tun_struct * tun,struct sk_buff * skb,struct iovec * iv,int len)258 static __inline__ ssize_t tun_put_user(struct tun_struct *tun,
259 struct sk_buff *skb,
260 struct iovec *iv, int len)
261 {
262 struct tun_pi pi = { 0, skb->protocol };
263 ssize_t total = 0;
264
265 if (!(tun->flags & TUN_NO_PI)) {
266 if ((len -= sizeof(pi)) < 0)
267 return -EINVAL;
268
269 if (len < skb->len) {
270 /* Packet will be striped */
271 pi.flags |= TUN_PKT_STRIP;
272 }
273
274 memcpy_toiovec(iv, (void *) &pi, sizeof(pi));
275 total += sizeof(pi);
276 }
277
278 len = MIN(skb->len, len);
279
280 skb_copy_datagram_iovec(skb, 0, iv, len);
281 total += len;
282
283 tun->stats.tx_packets++;
284 tun->stats.tx_bytes += len;
285
286 return total;
287 }
288
289 /* Readv */
tun_chr_readv(struct file * file,const struct iovec * iv,unsigned long count,loff_t * pos)290 static ssize_t tun_chr_readv(struct file *file, const struct iovec *iv,
291 unsigned long count, loff_t *pos)
292 {
293 struct tun_struct *tun = (struct tun_struct *)file->private_data;
294 DECLARE_WAITQUEUE(wait, current);
295 struct sk_buff *skb;
296 ssize_t len, ret = 0;
297 unsigned long i;
298
299 if (!tun)
300 return -EBADFD;
301
302 DBG(KERN_INFO "%s: tun_chr_read\n", tun->name);
303
304 for (i = 0, len = 0; i < count; i++) {
305 if (verify_area(VERIFY_WRITE, iv[i].iov_base, iv[i].iov_len))
306 return -EFAULT;
307 len += iv[i].iov_len;
308 }
309
310 add_wait_queue(&tun->read_wait, &wait);
311 while (len) {
312 current->state = TASK_INTERRUPTIBLE;
313
314 /* Read frames from the queue */
315 if (!(skb=skb_dequeue(&tun->readq))) {
316 if (file->f_flags & O_NONBLOCK) {
317 ret = -EAGAIN;
318 break;
319 }
320 if (signal_pending(current)) {
321 ret = -ERESTARTSYS;
322 break;
323 }
324
325 /* Nothing to read, let's sleep */
326 schedule();
327 continue;
328 }
329 netif_start_queue(&tun->dev);
330
331 ret = tun_put_user(tun, skb, (struct iovec *) iv, len);
332
333 kfree_skb(skb);
334 break;
335 }
336
337 current->state = TASK_RUNNING;
338 remove_wait_queue(&tun->read_wait, &wait);
339
340 return ret;
341 }
342
343 /* Read */
tun_chr_read(struct file * file,char * buf,size_t count,loff_t * pos)344 static ssize_t tun_chr_read(struct file * file, char * buf,
345 size_t count, loff_t *pos)
346 {
347 struct iovec iv = { buf, count };
348 return tun_chr_readv(file, &iv, 1, pos);
349 }
350
tun_set_iff(struct file * file,struct ifreq * ifr)351 static int tun_set_iff(struct file *file, struct ifreq *ifr)
352 {
353 struct tun_struct *tun;
354 struct net_device *dev;
355 int err;
356
357 dev = __dev_get_by_name(ifr->ifr_name);
358 if (dev) {
359 /* Device exist */
360 tun = dev->priv;
361
362 if (dev->init != tun_net_init || tun->attached)
363 return -EBUSY;
364
365 /* Check permissions */
366 if (tun->owner != -1)
367 if (current->euid != tun->owner && !capable(CAP_NET_ADMIN))
368 return -EPERM;
369 } else {
370 char *name;
371
372 /* Allocate new device */
373 if (!(tun = kmalloc(sizeof(struct tun_struct), GFP_KERNEL)) )
374 return -ENOMEM;
375 memset(tun, 0, sizeof(struct tun_struct));
376
377 skb_queue_head_init(&tun->readq);
378 init_waitqueue_head(&tun->read_wait);
379
380 tun->owner = -1;
381 tun->dev.init = tun_net_init;
382 tun->dev.priv = tun;
383
384 err = -EINVAL;
385
386 /* Set dev type */
387 if (ifr->ifr_flags & IFF_TUN) {
388 /* TUN device */
389 tun->flags |= TUN_TUN_DEV;
390 name = "tun%d";
391 } else if (ifr->ifr_flags & IFF_TAP) {
392 /* TAP device */
393 tun->flags |= TUN_TAP_DEV;
394 name = "tap%d";
395 } else
396 goto failed;
397
398 if (*ifr->ifr_name)
399 name = ifr->ifr_name;
400
401 if ((err = dev_alloc_name(&tun->dev, name)) < 0)
402 goto failed;
403 if ((err = register_netdevice(&tun->dev)))
404 goto failed;
405
406 MOD_INC_USE_COUNT;
407
408 tun->name = tun->dev.name;
409 }
410
411 DBG(KERN_INFO "%s: tun_set_iff\n", tun->name);
412
413 if (ifr->ifr_flags & IFF_NO_PI)
414 tun->flags |= TUN_NO_PI;
415
416 if (ifr->ifr_flags & IFF_ONE_QUEUE)
417 tun->flags |= TUN_ONE_QUEUE;
418
419 file->private_data = tun;
420 tun->attached = 1;
421
422 strcpy(ifr->ifr_name, tun->name);
423 return 0;
424
425 failed:
426 kfree(tun);
427 return err;
428 }
429
tun_chr_ioctl(struct inode * inode,struct file * file,unsigned int cmd,unsigned long arg)430 static int tun_chr_ioctl(struct inode *inode, struct file *file,
431 unsigned int cmd, unsigned long arg)
432 {
433 struct tun_struct *tun = (struct tun_struct *)file->private_data;
434
435 if (cmd == TUNSETIFF && !tun) {
436 struct ifreq ifr;
437 int err;
438
439 if (copy_from_user(&ifr, (void *)arg, sizeof(ifr)))
440 return -EFAULT;
441 ifr.ifr_name[IFNAMSIZ-1] = '\0';
442
443 rtnl_lock();
444 err = tun_set_iff(file, &ifr);
445 rtnl_unlock();
446
447 if (err)
448 return err;
449
450 copy_to_user((void *)arg, &ifr, sizeof(ifr));
451 return 0;
452 }
453
454 if (!tun)
455 return -EBADFD;
456
457 DBG(KERN_INFO "%s: tun_chr_ioctl cmd %d\n", tun->name, cmd);
458
459 switch (cmd) {
460 case TUNSETNOCSUM:
461 /* Disable/Enable checksum */
462 if (arg)
463 tun->flags |= TUN_NOCHECKSUM;
464 else
465 tun->flags &= ~TUN_NOCHECKSUM;
466
467 DBG(KERN_INFO "%s: checksum %s\n",
468 tun->name, arg ? "disabled" : "enabled");
469 break;
470
471 case TUNSETPERSIST:
472 /* Disable/Enable persist mode */
473 if (arg)
474 tun->flags |= TUN_PERSIST;
475 else
476 tun->flags &= ~TUN_PERSIST;
477
478 DBG(KERN_INFO "%s: persist %s\n",
479 tun->name, arg ? "disabled" : "enabled");
480 break;
481
482 case TUNSETOWNER:
483 /* Set owner of the device */
484 tun->owner = (uid_t) arg;
485
486 DBG(KERN_INFO "%s: owner set to %d\n", tun->owner);
487 break;
488
489 #ifdef TUN_DEBUG
490 case TUNSETDEBUG:
491 tun->debug = arg;
492 break;
493 #endif
494
495 default:
496 return -EINVAL;
497 };
498
499 return 0;
500 }
501
tun_chr_fasync(int fd,struct file * file,int on)502 static int tun_chr_fasync(int fd, struct file *file, int on)
503 {
504 struct tun_struct *tun = (struct tun_struct *)file->private_data;
505 int ret;
506
507 if (!tun)
508 return -EBADFD;
509
510 DBG(KERN_INFO "%s: tun_chr_fasync %d\n", tun->name, on);
511
512 if ((ret = fasync_helper(fd, file, on, &tun->fasync)) < 0)
513 return ret;
514
515 if (on) {
516 tun->flags |= TUN_FASYNC;
517 if (!file->f_owner.pid) {
518 file->f_owner.pid = current->pid;
519 file->f_owner.uid = current->uid;
520 file->f_owner.euid = current->euid;
521 }
522 } else
523 tun->flags &= ~TUN_FASYNC;
524
525 return 0;
526 }
527
tun_chr_open(struct inode * inode,struct file * file)528 static int tun_chr_open(struct inode *inode, struct file * file)
529 {
530 DBG1(KERN_INFO "tunX: tun_chr_open\n");
531 file->private_data = NULL;
532 return 0;
533 }
534
tun_chr_close(struct inode * inode,struct file * file)535 static int tun_chr_close(struct inode *inode, struct file *file)
536 {
537 struct tun_struct *tun = (struct tun_struct *)file->private_data;
538
539 if (!tun)
540 return 0;
541
542 DBG(KERN_INFO "%s: tun_chr_close\n", tun->name);
543
544 tun_chr_fasync(-1, file, 0);
545
546 rtnl_lock();
547
548 /* Detach from net device */
549 file->private_data = NULL;
550 tun->attached = 0;
551
552 /* Drop read queue */
553 skb_queue_purge(&tun->readq);
554
555 if (!(tun->flags & TUN_PERSIST)) {
556 dev_close(&tun->dev);
557 unregister_netdevice(&tun->dev);
558 kfree(tun);
559 MOD_DEC_USE_COUNT;
560 }
561
562 rtnl_unlock();
563 return 0;
564 }
565
566 static struct file_operations tun_fops = {
567 owner: THIS_MODULE,
568 llseek: no_llseek,
569 read: tun_chr_read,
570 readv: tun_chr_readv,
571 write: tun_chr_write,
572 writev: tun_chr_writev,
573 poll: tun_chr_poll,
574 ioctl: tun_chr_ioctl,
575 open: tun_chr_open,
576 release:tun_chr_close,
577 fasync: tun_chr_fasync
578 };
579
580 static struct miscdevice tun_miscdev=
581 {
582 TUN_MINOR,
583 "net/tun",
584 &tun_fops
585 };
586
tun_init(void)587 int __init tun_init(void)
588 {
589 printk(KERN_INFO "Universal TUN/TAP device driver %s "
590 "(C)1999-2002 Maxim Krasnyansky\n", TUN_VER);
591
592 if (misc_register(&tun_miscdev)) {
593 printk(KERN_ERR "tun: Can't register misc device %d\n", TUN_MINOR);
594 return -EIO;
595 }
596
597 return 0;
598 }
599
tun_cleanup(void)600 void tun_cleanup(void)
601 {
602 misc_deregister(&tun_miscdev);
603 }
604
605 module_init(tun_init);
606 module_exit(tun_cleanup);
607 MODULE_LICENSE("GPL");
608