1 /*
2  * NET3:	Token ring device handling subroutines
3  *
4  *		This program is free software; you can redistribute it and/or
5  *		modify it under the terms of the GNU General Public License
6  *		as published by the Free Software Foundation; either version
7  *		2 of the License, or (at your option) any later version.
8  *
9  * Fixes:       3 Feb 97 Paul Norton <pnorton@cts.com> Minor routing fixes.
10  *              Added rif table to /proc/net/tr_rif and rif timeout to
11  *              /proc/sys/net/token-ring/rif_timeout.
12  *              22 Jun 98 Paul Norton <p.norton@computer.org> Rearranged
13  *              tr_header and tr_type_trans to handle passing IPX SNAP and
14  *              802.2 through the correct layers. Eliminated tr_reformat.
15  *
16  */
17 
18 #include <asm/uaccess.h>
19 #include <linux/module.h>
20 #include <linux/types.h>
21 #include <linux/kernel.h>
22 #include <linux/jiffies.h>
23 #include <linux/string.h>
24 #include <linux/mm.h>
25 #include <linux/socket.h>
26 #include <linux/in.h>
27 #include <linux/inet.h>
28 #include <linux/netdevice.h>
29 #include <linux/trdevice.h>
30 #include <linux/skbuff.h>
31 #include <linux/errno.h>
32 #include <linux/timer.h>
33 #include <linux/net.h>
34 #include <linux/proc_fs.h>
35 #include <linux/seq_file.h>
36 #include <linux/init.h>
37 #include <linux/sysctl.h>
38 #include <linux/slab.h>
39 #include <net/arp.h>
40 #include <net/net_namespace.h>
41 
42 static void tr_add_rif_info(struct trh_hdr *trh, struct net_device *dev);
43 static void rif_check_expire(unsigned long dummy);
44 
45 #define TR_SR_DEBUG 0
46 
47 /*
48  *	Each RIF entry we learn is kept this way
49  */
50 
51 struct rif_cache {
52 	unsigned char addr[TR_ALEN];
53 	int iface;
54 	__be16 rcf;
55 	__be16 rseg[8];
56 	struct rif_cache *next;
57 	unsigned long last_used;
58 	unsigned char local_ring;
59 };
60 
61 #define RIF_TABLE_SIZE 32
62 
63 /*
64  *	We hash the RIF cache 32 ways. We do after all have to look it
65  *	up a lot.
66  */
67 
68 static struct rif_cache *rif_table[RIF_TABLE_SIZE];
69 
70 static DEFINE_SPINLOCK(rif_lock);
71 
72 
73 /*
74  *	Garbage disposal timer.
75  */
76 
77 static struct timer_list rif_timer;
78 
79 static int sysctl_tr_rif_timeout = 60*10*HZ;
80 
rif_hash(const unsigned char * addr)81 static inline unsigned long rif_hash(const unsigned char *addr)
82 {
83 	unsigned long x;
84 
85 	x = addr[0];
86 	x = (x << 2) ^ addr[1];
87 	x = (x << 2) ^ addr[2];
88 	x = (x << 2) ^ addr[3];
89 	x = (x << 2) ^ addr[4];
90 	x = (x << 2) ^ addr[5];
91 
92 	x ^= x >> 8;
93 
94 	return x & (RIF_TABLE_SIZE - 1);
95 }
96 
97 /*
98  *	Put the headers on a token ring packet. Token ring source routing
99  *	makes this a little more exciting than on ethernet.
100  */
101 
tr_header(struct sk_buff * skb,struct net_device * dev,unsigned short type,const void * daddr,const void * saddr,unsigned len)102 static int tr_header(struct sk_buff *skb, struct net_device *dev,
103 		     unsigned short type,
104 		     const void *daddr, const void *saddr, unsigned len)
105 {
106 	struct trh_hdr *trh;
107 	int hdr_len;
108 
109 	/*
110 	 * Add the 802.2 SNAP header if IP as the IPv4/IPv6 code calls
111 	 * dev->hard_header directly.
112 	 */
113 	if (type == ETH_P_IP || type == ETH_P_IPV6 || type == ETH_P_ARP)
114 	{
115 		struct trllc *trllc;
116 
117 		hdr_len = sizeof(struct trh_hdr) + sizeof(struct trllc);
118 		trh = (struct trh_hdr *)skb_push(skb, hdr_len);
119 		trllc = (struct trllc *)(trh+1);
120 		trllc->dsap = trllc->ssap = EXTENDED_SAP;
121 		trllc->llc = UI_CMD;
122 		trllc->protid[0] = trllc->protid[1] = trllc->protid[2] = 0x00;
123 		trllc->ethertype = htons(type);
124 	}
125 	else
126 	{
127 		hdr_len = sizeof(struct trh_hdr);
128 		trh = (struct trh_hdr *)skb_push(skb, hdr_len);
129 	}
130 
131 	trh->ac=AC;
132 	trh->fc=LLC_FRAME;
133 
134 	if(saddr)
135 		memcpy(trh->saddr,saddr,dev->addr_len);
136 	else
137 		memcpy(trh->saddr,dev->dev_addr,dev->addr_len);
138 
139 	/*
140 	 *	Build the destination and then source route the frame
141 	 */
142 
143 	if(daddr)
144 	{
145 		memcpy(trh->daddr,daddr,dev->addr_len);
146 		tr_source_route(skb, trh, dev);
147 		return hdr_len;
148 	}
149 
150 	return -hdr_len;
151 }
152 
153 /*
154  *	A neighbour discovery of some species (eg arp) has completed. We
155  *	can now send the packet.
156  */
157 
tr_rebuild_header(struct sk_buff * skb)158 static int tr_rebuild_header(struct sk_buff *skb)
159 {
160 	struct trh_hdr *trh=(struct trh_hdr *)skb->data;
161 	struct trllc *trllc=(struct trllc *)(skb->data+sizeof(struct trh_hdr));
162 	struct net_device *dev = skb->dev;
163 
164 	/*
165 	 *	FIXME: We don't yet support IPv6 over token rings
166 	 */
167 
168 	if(trllc->ethertype != htons(ETH_P_IP)) {
169 		printk("tr_rebuild_header: Don't know how to resolve type %04X addresses ?\n", ntohs(trllc->ethertype));
170 		return 0;
171 	}
172 
173 #ifdef CONFIG_INET
174 	if(arp_find(trh->daddr, skb)) {
175 			return 1;
176 	}
177 	else
178 #endif
179 	{
180 		tr_source_route(skb,trh,dev);
181 		return 0;
182 	}
183 }
184 
185 /*
186  *	Some of this is a bit hackish. We intercept RIF information
187  *	used for source routing. We also grab IP directly and don't feed
188  *	it via SNAP.
189  */
190 
tr_type_trans(struct sk_buff * skb,struct net_device * dev)191 __be16 tr_type_trans(struct sk_buff *skb, struct net_device *dev)
192 {
193 
194 	struct trh_hdr *trh;
195 	struct trllc *trllc;
196 	unsigned riflen=0;
197 
198 	skb->dev = dev;
199 	skb_reset_mac_header(skb);
200 	trh = tr_hdr(skb);
201 
202 	if(trh->saddr[0] & TR_RII)
203 		riflen = (ntohs(trh->rcf) & TR_RCF_LEN_MASK) >> 8;
204 
205 	trllc = (struct trllc *)(skb->data+sizeof(struct trh_hdr)-TR_MAXRIFLEN+riflen);
206 
207 	skb_pull(skb,sizeof(struct trh_hdr)-TR_MAXRIFLEN+riflen);
208 
209 	if(*trh->daddr & 0x80)
210 	{
211 		if(!memcmp(trh->daddr,dev->broadcast,TR_ALEN))
212 			skb->pkt_type=PACKET_BROADCAST;
213 		else
214 			skb->pkt_type=PACKET_MULTICAST;
215 	}
216 	else if ( (trh->daddr[0] & 0x01) && (trh->daddr[1] & 0x00) && (trh->daddr[2] & 0x5E))
217 	{
218 		skb->pkt_type=PACKET_MULTICAST;
219 	}
220 	else if(dev->flags & IFF_PROMISC)
221 	{
222 		if(memcmp(trh->daddr, dev->dev_addr, TR_ALEN))
223 			skb->pkt_type=PACKET_OTHERHOST;
224 	}
225 
226 	if ((skb->pkt_type != PACKET_BROADCAST) &&
227 	    (skb->pkt_type != PACKET_MULTICAST))
228 		tr_add_rif_info(trh,dev) ;
229 
230 	/*
231 	 * Strip the SNAP header from ARP packets since we don't
232 	 * pass them through to the 802.2/SNAP layers.
233 	 */
234 
235 	if (trllc->dsap == EXTENDED_SAP &&
236 	    (trllc->ethertype == htons(ETH_P_IP) ||
237 	     trllc->ethertype == htons(ETH_P_IPV6) ||
238 	     trllc->ethertype == htons(ETH_P_ARP)))
239 	{
240 		skb_pull(skb, sizeof(struct trllc));
241 		return trllc->ethertype;
242 	}
243 
244 	return htons(ETH_P_TR_802_2);
245 }
246 
247 /*
248  *	We try to do source routing...
249  */
250 
tr_source_route(struct sk_buff * skb,struct trh_hdr * trh,struct net_device * dev)251 void tr_source_route(struct sk_buff *skb,struct trh_hdr *trh,
252 		     struct net_device *dev)
253 {
254 	int slack;
255 	unsigned int hash;
256 	struct rif_cache *entry;
257 	unsigned char *olddata;
258 	unsigned long flags;
259 	static const unsigned char mcast_func_addr[]
260 		= {0xC0,0x00,0x00,0x04,0x00,0x00};
261 
262 	spin_lock_irqsave(&rif_lock, flags);
263 
264 	/*
265 	 *	Broadcasts are single route as stated in RFC 1042
266 	 */
267 	if( (!memcmp(&(trh->daddr[0]),&(dev->broadcast[0]),TR_ALEN)) ||
268 	    (!memcmp(&(trh->daddr[0]),&(mcast_func_addr[0]), TR_ALEN))  )
269 	{
270 		trh->rcf=htons((((sizeof(trh->rcf)) << 8) & TR_RCF_LEN_MASK)
271 			       | TR_RCF_FRAME2K | TR_RCF_LIMITED_BROADCAST);
272 		trh->saddr[0]|=TR_RII;
273 	}
274 	else
275 	{
276 		hash = rif_hash(trh->daddr);
277 		/*
278 		 *	Walk the hash table and look for an entry
279 		 */
280 		for(entry=rif_table[hash];entry && memcmp(&(entry->addr[0]),&(trh->daddr[0]),TR_ALEN);entry=entry->next);
281 
282 		/*
283 		 *	If we found an entry we can route the frame.
284 		 */
285 		if(entry)
286 		{
287 #if TR_SR_DEBUG
288 printk("source routing for %pM\n", trh->daddr);
289 #endif
290 			if(!entry->local_ring && (ntohs(entry->rcf) & TR_RCF_LEN_MASK) >> 8)
291 			{
292 				trh->rcf=entry->rcf;
293 				memcpy(&trh->rseg[0],&entry->rseg[0],8*sizeof(unsigned short));
294 				trh->rcf^=htons(TR_RCF_DIR_BIT);
295 				trh->rcf&=htons(0x1fff);	/* Issam Chehab <ichehab@madge1.demon.co.uk> */
296 
297 				trh->saddr[0]|=TR_RII;
298 #if TR_SR_DEBUG
299 				printk("entry found with rcf %04x\n", entry->rcf);
300 			}
301 			else
302 			{
303 				printk("entry found but without rcf length, local=%02x\n", entry->local_ring);
304 #endif
305 			}
306 			entry->last_used=jiffies;
307 		}
308 		else
309 		{
310 			/*
311 			 *	Without the information we simply have to shout
312 			 *	on the wire. The replies should rapidly clean this
313 			 *	situation up.
314 			 */
315 			trh->rcf=htons((((sizeof(trh->rcf)) << 8) & TR_RCF_LEN_MASK)
316 				       | TR_RCF_FRAME2K | TR_RCF_LIMITED_BROADCAST);
317 			trh->saddr[0]|=TR_RII;
318 #if TR_SR_DEBUG
319 			printk("no entry in rif table found - broadcasting frame\n");
320 #endif
321 		}
322 	}
323 
324 	/* Compress the RIF here so we don't have to do it in the driver(s) */
325 	if (!(trh->saddr[0] & 0x80))
326 		slack = 18;
327 	else
328 		slack = 18 - ((ntohs(trh->rcf) & TR_RCF_LEN_MASK)>>8);
329 	olddata = skb->data;
330 	spin_unlock_irqrestore(&rif_lock, flags);
331 
332 	skb_pull(skb, slack);
333 	memmove(skb->data, olddata, sizeof(struct trh_hdr) - slack);
334 }
335 
336 /*
337  *	We have learned some new RIF information for our source
338  *	routing.
339  */
340 
tr_add_rif_info(struct trh_hdr * trh,struct net_device * dev)341 static void tr_add_rif_info(struct trh_hdr *trh, struct net_device *dev)
342 {
343 	unsigned int hash, rii_p = 0;
344 	unsigned long flags;
345 	struct rif_cache *entry;
346 	unsigned char saddr0;
347 
348 	spin_lock_irqsave(&rif_lock, flags);
349 	saddr0 = trh->saddr[0];
350 
351 	/*
352 	 *	Firstly see if the entry exists
353 	 */
354 
355 	if(trh->saddr[0] & TR_RII)
356 	{
357 		trh->saddr[0]&=0x7f;
358 		if (((ntohs(trh->rcf) & TR_RCF_LEN_MASK) >> 8) > 2)
359 		{
360 			rii_p = 1;
361 		}
362 	}
363 
364 	hash = rif_hash(trh->saddr);
365 	for(entry=rif_table[hash];entry && memcmp(&(entry->addr[0]),&(trh->saddr[0]),TR_ALEN);entry=entry->next);
366 
367 	if(entry==NULL)
368 	{
369 #if TR_SR_DEBUG
370 		printk("adding rif_entry: addr:%pM rcf:%04X\n",
371 		       trh->saddr, ntohs(trh->rcf));
372 #endif
373 		/*
374 		 *	Allocate our new entry. A failure to allocate loses
375 		 *	use the information. This is harmless.
376 		 *
377 		 *	FIXME: We ought to keep some kind of cache size
378 		 *	limiting and adjust the timers to suit.
379 		 */
380 		entry=kmalloc(sizeof(struct rif_cache),GFP_ATOMIC);
381 
382 		if(!entry)
383 		{
384 			printk(KERN_DEBUG "tr.c: Couldn't malloc rif cache entry !\n");
385 			spin_unlock_irqrestore(&rif_lock, flags);
386 			return;
387 		}
388 
389 		memcpy(&(entry->addr[0]),&(trh->saddr[0]),TR_ALEN);
390 		entry->iface = dev->ifindex;
391 		entry->next=rif_table[hash];
392 		entry->last_used=jiffies;
393 		rif_table[hash]=entry;
394 
395 		if (rii_p)
396 		{
397 			entry->rcf = trh->rcf & htons((unsigned short)~TR_RCF_BROADCAST_MASK);
398 			memcpy(&(entry->rseg[0]),&(trh->rseg[0]),8*sizeof(unsigned short));
399 			entry->local_ring = 0;
400 		}
401 		else
402 		{
403 			entry->local_ring = 1;
404 		}
405 	}
406 	else	/* Y. Tahara added */
407 	{
408 		/*
409 		 *	Update existing entries
410 		 */
411 		if (!entry->local_ring)
412 		    if (entry->rcf != (trh->rcf & htons((unsigned short)~TR_RCF_BROADCAST_MASK)) &&
413 			 !(trh->rcf & htons(TR_RCF_BROADCAST_MASK)))
414 		    {
415 #if TR_SR_DEBUG
416 printk("updating rif_entry: addr:%pM rcf:%04X\n",
417 		trh->saddr, ntohs(trh->rcf));
418 #endif
419 			    entry->rcf = trh->rcf & htons((unsigned short)~TR_RCF_BROADCAST_MASK);
420 			    memcpy(&(entry->rseg[0]),&(trh->rseg[0]),8*sizeof(unsigned short));
421 		    }
422 		entry->last_used=jiffies;
423 	}
424 	trh->saddr[0]=saddr0; /* put the routing indicator back for tcpdump */
425 	spin_unlock_irqrestore(&rif_lock, flags);
426 }
427 
428 /*
429  *	Scan the cache with a timer and see what we need to throw out.
430  */
431 
rif_check_expire(unsigned long dummy)432 static void rif_check_expire(unsigned long dummy)
433 {
434 	int i;
435 	unsigned long flags, next_interval = jiffies + sysctl_tr_rif_timeout/2;
436 
437 	spin_lock_irqsave(&rif_lock, flags);
438 
439 	for(i =0; i < RIF_TABLE_SIZE; i++) {
440 		struct rif_cache *entry, **pentry;
441 
442 		pentry = rif_table+i;
443 		while((entry=*pentry) != NULL) {
444 			unsigned long expires
445 				= entry->last_used + sysctl_tr_rif_timeout;
446 
447 			if (time_before_eq(expires, jiffies)) {
448 				*pentry = entry->next;
449 				kfree(entry);
450 			} else {
451 				pentry = &entry->next;
452 
453 				if (time_before(expires, next_interval))
454 					next_interval = expires;
455 			}
456 		}
457 	}
458 
459 	spin_unlock_irqrestore(&rif_lock, flags);
460 
461 	mod_timer(&rif_timer, next_interval);
462 
463 }
464 
465 /*
466  *	Generate the /proc/net information for the token ring RIF
467  *	routing.
468  */
469 
470 #ifdef CONFIG_PROC_FS
471 
rif_get_idx(loff_t pos)472 static struct rif_cache *rif_get_idx(loff_t pos)
473 {
474 	int i;
475 	struct rif_cache *entry;
476 	loff_t off = 0;
477 
478 	for(i = 0; i < RIF_TABLE_SIZE; i++)
479 		for(entry = rif_table[i]; entry; entry = entry->next) {
480 			if (off == pos)
481 				return entry;
482 			++off;
483 		}
484 
485 	return NULL;
486 }
487 
rif_seq_start(struct seq_file * seq,loff_t * pos)488 static void *rif_seq_start(struct seq_file *seq, loff_t *pos)
489 	__acquires(&rif_lock)
490 {
491 	spin_lock_irq(&rif_lock);
492 
493 	return *pos ? rif_get_idx(*pos - 1) : SEQ_START_TOKEN;
494 }
495 
rif_seq_next(struct seq_file * seq,void * v,loff_t * pos)496 static void *rif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
497 {
498 	int i;
499 	struct rif_cache *ent = v;
500 
501 	++*pos;
502 
503 	if (v == SEQ_START_TOKEN) {
504 		i = -1;
505 		goto scan;
506 	}
507 
508 	if (ent->next)
509 		return ent->next;
510 
511 	i = rif_hash(ent->addr);
512  scan:
513 	while (++i < RIF_TABLE_SIZE) {
514 		if ((ent = rif_table[i]) != NULL)
515 			return ent;
516 	}
517 	return NULL;
518 }
519 
rif_seq_stop(struct seq_file * seq,void * v)520 static void rif_seq_stop(struct seq_file *seq, void *v)
521 	__releases(&rif_lock)
522 {
523 	spin_unlock_irq(&rif_lock);
524 }
525 
rif_seq_show(struct seq_file * seq,void * v)526 static int rif_seq_show(struct seq_file *seq, void *v)
527 {
528 	int j, rcf_len, segment, brdgnmb;
529 	struct rif_cache *entry = v;
530 
531 	if (v == SEQ_START_TOKEN)
532 		seq_puts(seq,
533 		     "if     TR address       TTL   rcf   routing segments\n");
534 	else {
535 		struct net_device *dev = dev_get_by_index(&init_net, entry->iface);
536 		long ttl = (long) (entry->last_used + sysctl_tr_rif_timeout)
537 				- (long) jiffies;
538 
539 		seq_printf(seq, "%s %pM %7li ",
540 			   dev?dev->name:"?",
541 			   entry->addr,
542 			   ttl/HZ);
543 
544 			if (entry->local_ring)
545 				seq_puts(seq, "local\n");
546 			else {
547 
548 				seq_printf(seq, "%04X", ntohs(entry->rcf));
549 				rcf_len = ((ntohs(entry->rcf) & TR_RCF_LEN_MASK)>>8)-2;
550 				if (rcf_len)
551 					rcf_len >>= 1;
552 				for(j = 1; j < rcf_len; j++) {
553 					if(j==1) {
554 						segment=ntohs(entry->rseg[j-1])>>4;
555 						seq_printf(seq,"  %03X",segment);
556 					}
557 
558 					segment=ntohs(entry->rseg[j])>>4;
559 					brdgnmb=ntohs(entry->rseg[j-1])&0x00f;
560 					seq_printf(seq,"-%01X-%03X",brdgnmb,segment);
561 				}
562 				seq_putc(seq, '\n');
563 			}
564 
565 		if (dev)
566 			dev_put(dev);
567 		}
568 	return 0;
569 }
570 
571 
572 static const struct seq_operations rif_seq_ops = {
573 	.start = rif_seq_start,
574 	.next  = rif_seq_next,
575 	.stop  = rif_seq_stop,
576 	.show  = rif_seq_show,
577 };
578 
rif_seq_open(struct inode * inode,struct file * file)579 static int rif_seq_open(struct inode *inode, struct file *file)
580 {
581 	return seq_open(file, &rif_seq_ops);
582 }
583 
584 static const struct file_operations rif_seq_fops = {
585 	.owner	 = THIS_MODULE,
586 	.open    = rif_seq_open,
587 	.read    = seq_read,
588 	.llseek  = seq_lseek,
589 	.release = seq_release,
590 };
591 
592 #endif
593 
594 static const struct header_ops tr_header_ops = {
595 	.create = tr_header,
596 	.rebuild= tr_rebuild_header,
597 };
598 
tr_setup(struct net_device * dev)599 static void tr_setup(struct net_device *dev)
600 {
601 	/*
602 	 *	Configure and register
603 	 */
604 
605 	dev->header_ops	= &tr_header_ops;
606 
607 	dev->type		= ARPHRD_IEEE802_TR;
608 	dev->hard_header_len	= TR_HLEN;
609 	dev->mtu		= 2000;
610 	dev->addr_len		= TR_ALEN;
611 	dev->tx_queue_len	= 100;	/* Long queues on tr */
612 
613 	memset(dev->broadcast,0xFF, TR_ALEN);
614 
615 	/* New-style flags. */
616 	dev->flags		= IFF_BROADCAST | IFF_MULTICAST ;
617 }
618 
619 /**
620  * alloc_trdev - Register token ring device
621  * @sizeof_priv: Size of additional driver-private structure to be allocated
622  *	for this token ring device
623  *
624  * Fill in the fields of the device structure with token ring-generic values.
625  *
626  * Constructs a new net device, complete with a private data area of
627  * size @sizeof_priv.  A 32-byte (not bit) alignment is enforced for
628  * this private data area.
629  */
alloc_trdev(int sizeof_priv)630 struct net_device *alloc_trdev(int sizeof_priv)
631 {
632 	return alloc_netdev(sizeof_priv, "tr%d", tr_setup);
633 }
634 
635 #ifdef CONFIG_SYSCTL
636 static struct ctl_table tr_table[] = {
637 	{
638 		.procname	= "rif_timeout",
639 		.data		= &sysctl_tr_rif_timeout,
640 		.maxlen		= sizeof(int),
641 		.mode		= 0644,
642 		.proc_handler	= proc_dointvec
643 	},
644 	{ },
645 };
646 
647 static __initdata struct ctl_path tr_path[] = {
648 	{ .procname = "net", },
649 	{ .procname = "token-ring", },
650 	{ }
651 };
652 #endif
653 
654 /*
655  *	Called during bootup.  We don't actually have to initialise
656  *	too much for this.
657  */
658 
rif_init(void)659 static int __init rif_init(void)
660 {
661 	rif_timer.expires  = jiffies + sysctl_tr_rif_timeout;
662 	setup_timer(&rif_timer, rif_check_expire, 0);
663 	add_timer(&rif_timer);
664 #ifdef CONFIG_SYSCTL
665 	register_sysctl_paths(tr_path, tr_table);
666 #endif
667 	proc_net_fops_create(&init_net, "tr_rif", S_IRUGO, &rif_seq_fops);
668 	return 0;
669 }
670 
671 module_init(rif_init);
672 
673 EXPORT_SYMBOL(tr_type_trans);
674 EXPORT_SYMBOL(alloc_trdev);
675 
676 MODULE_LICENSE("GPL");
677