1 /*
2  *	Generic address resolution entity
3  *
4  *	Authors:
5  *	Pedro Roque		<pedro_m@yahoo.com>
6  *	Alexey Kuznetsov	<kuznet@ms2.inr.ac.ru>
7  *
8  *	This program is free software; you can redistribute it and/or
9  *      modify it under the terms of the GNU General Public License
10  *      as published by the Free Software Foundation; either version
11  *      2 of the License, or (at your option) any later version.
12  *
13  *	Fixes:
14  *	Vitaly E. Lavrov	releasing NULL neighbor in neigh_add.
15  *	Harald Welte		Add neighbour cache statistics like rtstat
16  *	Harald Welte		port neighbour cache rework from 2.6.9-rcX
17  *	Pradeep Vincent		Move neighbour cache entry to stale state
18  */
19 
20 #include <linux/config.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/module.h>
24 #include <linux/socket.h>
25 #include <linux/sched.h>
26 #include <linux/netdevice.h>
27 #include <linux/proc_fs.h>
28 #ifdef CONFIG_SYSCTL
29 #include <linux/sysctl.h>
30 #endif
31 #include <net/neighbour.h>
32 #include <net/dst.h>
33 #include <net/sock.h>
34 #include <linux/rtnetlink.h>
35 #include <linux/random.h>
36 #include <linux/module.h>
37 
38 #define NEIGH_DEBUG 1
39 
40 #define NEIGH_PRINTK(x...) printk(x)
41 #define NEIGH_NOPRINTK(x...) do { ; } while(0)
42 #define NEIGH_PRINTK0 NEIGH_PRINTK
43 #define NEIGH_PRINTK1 NEIGH_NOPRINTK
44 #define NEIGH_PRINTK2 NEIGH_NOPRINTK
45 
46 #if NEIGH_DEBUG >= 1
47 #undef NEIGH_PRINTK1
48 #define NEIGH_PRINTK1 NEIGH_PRINTK
49 #endif
50 #if NEIGH_DEBUG >= 2
51 #undef NEIGH_PRINTK2
52 #define NEIGH_PRINTK2 NEIGH_PRINTK
53 #endif
54 
55 #define PNEIGH_HASHMASK		0xF
56 
57 static void neigh_timer_handler(unsigned long arg);
58 #ifdef CONFIG_ARPD
59 static void neigh_app_notify(struct neighbour *n);
60 #endif
61 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
62 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev);
63 
64 static int neigh_glbl_allocs;
65 static struct neigh_table *neigh_tables;
66 static struct file_operations neigh_stat_seq_fops;
67 
68 /*
69    Neighbour hash table buckets are protected with rwlock tbl->lock.
70 
71    - All the scans/updates to hash buckets MUST be made under this lock.
72    - NOTHING clever should be made under this lock: no callbacks
73      to protocol backends, no attempts to send something to network.
74      It will result in deadlocks, if backend/driver wants to use neighbour
75      cache.
76    - If the entry requires some non-trivial actions, increase
77      its reference count and release table lock.
78 
79    Neighbour entries are protected:
80    - with reference count.
81    - with rwlock neigh->lock
82 
83    Reference count prevents destruction.
84 
85    neigh->lock mainly serializes ll address data and its validity state.
86    However, the same lock is used to protect another entry fields:
87     - timer
88     - resolution queue
89 
90    Again, nothing clever shall be made under neigh->lock,
91    the most complicated procedure, which we allow is dev->hard_header.
92    It is supposed, that dev->hard_header is simplistic and does
93    not make callbacks to neighbour tables.
94 
95    The last lock is neigh_tbl_lock. It is pure SMP lock, protecting
96    list of neighbour tables. This list is used only in process context,
97  */
98 
99 static rwlock_t neigh_tbl_lock = RW_LOCK_UNLOCKED;
100 
neigh_blackhole(struct sk_buff * skb)101 static int neigh_blackhole(struct sk_buff *skb)
102 {
103 	kfree_skb(skb);
104 	return -ENETDOWN;
105 }
106 
107 /*
108  * It is random distribution in the interval (1/2)*base...(3/2)*base.
109  * It corresponds to default IPv6 settings and is not overridable,
110  * because it is really reasonable choice.
111  */
112 
neigh_rand_reach_time(unsigned long base)113 unsigned long neigh_rand_reach_time(unsigned long base)
114 {
115 	return (base ? (net_random() % base) + (base >> 1) : 0);
116 }
117 
118 
neigh_forced_gc(struct neigh_table * tbl)119 static int neigh_forced_gc(struct neigh_table *tbl)
120 {
121 	int shrunk = 0;
122 	int i;
123 
124 	NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
125 
126 	write_lock_bh(&tbl->lock);
127 	for (i = 0; i <= tbl->hash_mask; i++) {
128 		struct neighbour *n, **np;
129 
130 		np = &tbl->hash_buckets[i];
131 		while ((n = *np) != NULL) {
132 			/* Neighbour record may be discarded if:
133 			 * - nobody refers to it.
134 			 * - it is not permanent
135 			 */
136 			write_lock(&n->lock);
137 			if (atomic_read(&n->refcnt) == 1 &&
138 			    !(n->nud_state&NUD_PERMANENT)) {
139 				*np = n->next;
140 				n->dead = 1;
141 				shrunk = 1;
142 				write_unlock(&n->lock);
143 				neigh_release(n);
144 				continue;
145 			}
146 			write_unlock(&n->lock);
147 			np = &n->next;
148 		}
149 	}
150 
151 	tbl->last_flush = jiffies;
152 
153 	write_unlock_bh(&tbl->lock);
154 
155 	return shrunk;
156 }
157 
neigh_del_timer(struct neighbour * n)158 static int neigh_del_timer(struct neighbour *n)
159 {
160 	if (n->nud_state & NUD_IN_TIMER) {
161 		if (del_timer(&n->timer)) {
162 			neigh_release(n);
163 			return 1;
164 		}
165 	}
166 	return 0;
167 }
168 
pneigh_queue_purge(struct sk_buff_head * list)169 static void pneigh_queue_purge(struct sk_buff_head *list)
170 {
171 	struct sk_buff *skb;
172 
173 	while ((skb = skb_dequeue(list)) != NULL) {
174 		dev_put(skb->dev);
175 		kfree_skb(skb);
176 	}
177 }
178 
neigh_changeaddr(struct neigh_table * tbl,struct net_device * dev)179 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
180 {
181 	int i;
182 
183 	write_lock_bh(&tbl->lock);
184 
185 	for (i=0; i <= tbl->hash_mask; i++) {
186 		struct neighbour *n, **np;
187 
188 		np = &tbl->hash_buckets[i];
189 		while ((n = *np) != NULL) {
190 			if (dev && n->dev != dev) {
191 				np = &n->next;
192 				continue;
193 			}
194 			*np = n->next;
195 			write_lock_bh(&n->lock);
196 			n->dead = 1;
197 			neigh_del_timer(n);
198 			write_unlock_bh(&n->lock);
199 			neigh_release(n);
200 		}
201 	}
202 
203         write_unlock_bh(&tbl->lock);
204 }
205 
neigh_ifdown(struct neigh_table * tbl,struct net_device * dev)206 int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
207 {
208 	int i;
209 
210 	write_lock_bh(&tbl->lock);
211 
212 	for (i = 0; i <= tbl->hash_mask; i++) {
213 		struct neighbour *n, **np;
214 
215 		np = &tbl->hash_buckets[i];
216 		while ((n = *np) != NULL) {
217 			if (dev && n->dev != dev) {
218 				np = &n->next;
219 				continue;
220 			}
221 			*np = n->next;
222 			write_lock(&n->lock);
223 			neigh_del_timer(n);
224 			n->dead = 1;
225 
226 			if (atomic_read(&n->refcnt) != 1) {
227 				/* The most unpleasant situation.
228 				   We must destroy neighbour entry,
229 				   but someone still uses it.
230 
231 				   The destroy will be delayed until
232 				   the last user releases us, but
233 				   we must kill timers etc. and move
234 				   it to safe state.
235 				 */
236 				n->parms = &tbl->parms;
237 				skb_queue_purge(&n->arp_queue);
238 				n->output = neigh_blackhole;
239 				if (n->nud_state&NUD_VALID)
240 					n->nud_state = NUD_NOARP;
241 				else
242 					n->nud_state = NUD_NONE;
243 				NEIGH_PRINTK2("neigh %p is stray.\n", n);
244 			}
245 			write_unlock(&n->lock);
246 			neigh_release(n);
247 		}
248 	}
249 
250 	pneigh_ifdown(tbl, dev);
251 	write_unlock_bh(&tbl->lock);
252 
253 	del_timer_sync(&tbl->proxy_timer);
254 	pneigh_queue_purge(&tbl->proxy_queue);
255 	return 0;
256 }
257 
neigh_alloc(struct neigh_table * tbl)258 static struct neighbour *neigh_alloc(struct neigh_table *tbl)
259 {
260 	struct neighbour *n;
261 	unsigned long now = jiffies;
262 
263 	if (atomic_read(&tbl->entries) > tbl->gc_thresh3 ||
264 	    (atomic_read(&tbl->entries) > tbl->gc_thresh2 &&
265 	     now - tbl->last_flush > 5*HZ)) {
266 		if (neigh_forced_gc(tbl) == 0 &&
267 		    atomic_read(&tbl->entries) > tbl->gc_thresh3)
268 			return NULL;
269 	}
270 
271 	n = kmem_cache_alloc(tbl->kmem_cachep, SLAB_ATOMIC);
272 	if (n == NULL)
273 		return NULL;
274 
275 	memset(n, 0, tbl->entry_size);
276 
277 	skb_queue_head_init(&n->arp_queue);
278 	n->lock = RW_LOCK_UNLOCKED;
279 	n->updated = n->used = now;
280 	n->nud_state = NUD_NONE;
281 	n->output = neigh_blackhole;
282 	n->parms = &tbl->parms;
283 	init_timer(&n->timer);
284 	n->timer.function = neigh_timer_handler;
285 	n->timer.data = (unsigned long)n;
286 	NEIGH_CACHE_STAT_INC(tbl, allocs);
287 	neigh_glbl_allocs++;
288 	atomic_inc(&tbl->entries);
289 	n->tbl = tbl;
290 	atomic_set(&n->refcnt, 1);
291 	n->dead = 1;
292 	return n;
293 }
294 
neigh_hash_alloc(unsigned int entries)295 static struct neighbour **neigh_hash_alloc(unsigned int entries)
296 {
297 	unsigned long size = entries * sizeof(struct neighbour *);
298 	struct neighbour **ret;
299 
300 	if (size <= PAGE_SIZE) {
301 		ret = kmalloc(size, GFP_ATOMIC);
302 	} else {
303 		ret = (struct neighbour **)
304 			__get_free_pages(GFP_ATOMIC, get_order(size));
305 	}
306 	if (ret)
307 		memset(ret, 0, size);
308 
309 	return ret;
310 }
311 
neigh_hash_free(struct neighbour ** hash,unsigned int entries)312 static void neigh_hash_free(struct neighbour **hash, unsigned int entries)
313 {
314 	unsigned long size = entries * sizeof(struct neighbour *);
315 
316 	if (size <= PAGE_SIZE)
317 		kfree(hash);
318 	else
319 		free_pages((unsigned long)hash, get_order(size));
320 }
321 
neigh_hash_grow(struct neigh_table * tbl,unsigned long new_entries)322 static void neigh_hash_grow(struct neigh_table *tbl, unsigned long new_entries)
323 {
324 	struct neighbour **new_hash, **old_hash;
325 	unsigned int i, new_hash_mask, old_entries;
326 
327 	NEIGH_CACHE_STAT_INC(tbl, hash_grows);
328 
329 	BUG_ON(new_entries & (new_entries - 1));
330 	new_hash = neigh_hash_alloc(new_entries);
331 	if (!new_hash)
332 		return;
333 
334 	old_entries = tbl->hash_mask + 1;
335 	new_hash_mask = new_entries - 1;
336 	old_hash = tbl->hash_buckets;
337 
338 	get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
339 	for (i = 0; i < old_entries; i++) {
340 		struct neighbour *n, *next;
341 
342 		for (n = old_hash[i]; n; n = next) {
343 			unsigned int hash_val = tbl->hash(n->primary_key, n->dev);
344 
345 			hash_val &= new_hash_mask;
346 			next = n->next;
347 
348 			n->next = new_hash[hash_val];
349 			new_hash[hash_val] = n;
350 		}
351 	}
352 	tbl->hash_buckets = new_hash;
353 	tbl->hash_mask = new_hash_mask;
354 
355 	neigh_hash_free(old_hash, old_entries);
356 }
357 
neigh_lookup(struct neigh_table * tbl,const void * pkey,struct net_device * dev)358 struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
359 			       struct net_device *dev)
360 {
361 	struct neighbour *n;
362 	int key_len = tbl->key_len;
363 	u32 hash_val = tbl->hash(pkey, dev) & tbl->hash_mask;
364 
365 	NEIGH_CACHE_STAT_INC(tbl, lookups);
366 
367 	read_lock_bh(&tbl->lock);
368 	for (n = tbl->hash_buckets[hash_val]; n; n = n->next) {
369 		if (dev == n->dev &&
370 		    memcmp(n->primary_key, pkey, key_len) == 0) {
371 			neigh_hold(n);
372 			NEIGH_CACHE_STAT_INC(tbl, hits);
373 			break;
374 		}
375 	}
376 	read_unlock_bh(&tbl->lock);
377 	return n;
378 }
379 
neigh_lookup_nodev(struct neigh_table * tbl,const void * pkey)380 struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, const void *pkey)
381 {
382 	struct neighbour *n;
383 	int key_len = tbl->key_len;
384 	u32 hash_val = tbl->hash(pkey, NULL) & tbl->hash_mask;
385 
386 	NEIGH_CACHE_STAT_INC(tbl, lookups);
387 
388 	read_lock_bh(&tbl->lock);
389 	for (n = tbl->hash_buckets[hash_val]; n; n = n->next) {
390 		if (!memcmp(n->primary_key, pkey, key_len)) {
391 			neigh_hold(n);
392 			NEIGH_CACHE_STAT_INC(tbl, hits);
393 			break;
394 		}
395 	}
396 	read_unlock_bh(&tbl->lock);
397 	return n;
398 }
399 
neigh_create(struct neigh_table * tbl,const void * pkey,struct net_device * dev)400 struct neighbour * neigh_create(struct neigh_table *tbl, const void *pkey,
401 				struct net_device *dev)
402 {
403 	struct neighbour *n, *n1;
404 	u32 hash_val;
405 	int key_len = tbl->key_len;
406 	int error;
407 
408 	n = neigh_alloc(tbl);
409 	if (n == NULL)
410 		return ERR_PTR(-ENOBUFS);
411 
412 	memcpy(n->primary_key, pkey, key_len);
413 	n->dev = dev;
414 	dev_hold(dev);
415 
416 	/* Protocol specific setup. */
417 	if (tbl->constructor &&	(error = tbl->constructor(n)) < 0) {
418 		neigh_release(n);
419 		return ERR_PTR(error);
420 	}
421 
422 	/* Device specific setup. */
423 	if (n->parms->neigh_setup &&
424 	    (error = n->parms->neigh_setup(n)) < 0) {
425 		neigh_release(n);
426 		return ERR_PTR(error);
427 	}
428 
429 	n->confirmed = jiffies - (n->parms->base_reachable_time<<1);
430 
431 	write_lock_bh(&tbl->lock);
432 	if (atomic_read(&tbl->entries) > (tbl->hash_mask + 1))
433 		neigh_hash_grow(tbl, (tbl->hash_mask + 1) << 1);
434 
435 	hash_val = tbl->hash(pkey, dev) & tbl->hash_mask;
436 
437 	for (n1 = tbl->hash_buckets[hash_val]; n1; n1 = n1->next) {
438 		if (dev == n1->dev &&
439 		    memcmp(n1->primary_key, pkey, key_len) == 0) {
440 			neigh_hold(n1);
441 			write_unlock_bh(&tbl->lock);
442 			neigh_release(n);
443 			return n1;
444 		}
445 	}
446 
447 	n->next = tbl->hash_buckets[hash_val];
448 	tbl->hash_buckets[hash_val] = n;
449 	n->dead = 0;
450 	neigh_hold(n);
451 	write_unlock_bh(&tbl->lock);
452 	NEIGH_PRINTK2("neigh %p is created.\n", n);
453 	return n;
454 }
455 
pneigh_lookup(struct neigh_table * tbl,const void * pkey,struct net_device * dev,int creat)456 struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl, const void *pkey,
457 				    struct net_device *dev, int creat)
458 {
459 	struct pneigh_entry *n;
460 	u32 hash_val;
461 	int key_len = tbl->key_len;
462 
463 	hash_val = *(u32*)(pkey + key_len - 4);
464 	hash_val ^= (hash_val>>16);
465 	hash_val ^= hash_val>>8;
466 	hash_val ^= hash_val>>4;
467 	hash_val &= PNEIGH_HASHMASK;
468 
469 	read_lock_bh(&tbl->lock);
470 
471 	for (n = tbl->phash_buckets[hash_val]; n; n = n->next) {
472 		if (memcmp(n->key, pkey, key_len) == 0 &&
473 		    (n->dev == dev || !n->dev)) {
474 			read_unlock_bh(&tbl->lock);
475 			return n;
476 		}
477 	}
478 	read_unlock_bh(&tbl->lock);
479 	if (!creat)
480 		return NULL;
481 
482 	n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
483 	if (n == NULL)
484 		return NULL;
485 
486 	memcpy(n->key, pkey, key_len);
487 	n->dev = dev;
488 
489 	if (tbl->pconstructor && tbl->pconstructor(n)) {
490 		kfree(n);
491 		return NULL;
492 	}
493 
494 	write_lock_bh(&tbl->lock);
495 	n->next = tbl->phash_buckets[hash_val];
496 	tbl->phash_buckets[hash_val] = n;
497 	write_unlock_bh(&tbl->lock);
498 	return n;
499 }
500 
501 
pneigh_delete(struct neigh_table * tbl,const void * pkey,struct net_device * dev)502 int pneigh_delete(struct neigh_table *tbl, const void *pkey, struct net_device *dev)
503 {
504 	struct pneigh_entry *n, **np;
505 	u32 hash_val;
506 	int key_len = tbl->key_len;
507 
508 	hash_val = *(u32*)(pkey + key_len - 4);
509 	hash_val ^= (hash_val>>16);
510 	hash_val ^= hash_val>>8;
511 	hash_val ^= hash_val>>4;
512 	hash_val &= PNEIGH_HASHMASK;
513 
514 	write_lock_bh(&tbl->lock);
515 	for (np = &tbl->phash_buckets[hash_val]; (n=*np) != NULL; np = &n->next) {
516 		if (memcmp(n->key, pkey, key_len) == 0 && n->dev == dev) {
517 			*np = n->next;
518 			write_unlock_bh(&tbl->lock);
519 			if (tbl->pdestructor)
520 				tbl->pdestructor(n);
521 			kfree(n);
522 			return 0;
523 		}
524 	}
525 	write_unlock_bh(&tbl->lock);
526 	return -ENOENT;
527 }
528 
pneigh_ifdown(struct neigh_table * tbl,struct net_device * dev)529 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
530 {
531 	struct pneigh_entry *n, **np;
532 	u32 h;
533 
534 	for (h=0; h<=PNEIGH_HASHMASK; h++) {
535 		np = &tbl->phash_buckets[h];
536 		while ((n=*np) != NULL) {
537 			if (n->dev == dev || dev == NULL) {
538 				*np = n->next;
539 				if (tbl->pdestructor)
540 					tbl->pdestructor(n);
541 				kfree(n);
542 				continue;
543 			}
544 			np = &n->next;
545 		}
546 	}
547 	return -ENOENT;
548 }
549 
550 
551 /*
552  *	neighbour must already be out of the table;
553  *
554  */
neigh_destroy(struct neighbour * neigh)555 void neigh_destroy(struct neighbour *neigh)
556 {
557 	struct hh_cache *hh;
558 
559 	NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
560 
561 	if (!neigh->dead) {
562 		printk("Destroying alive neighbour %p\n", neigh);
563 		dump_stack();
564 		return;
565 	}
566 
567 	if (neigh_del_timer(neigh))
568 		printk("Impossible event.\n");
569 
570 	while ((hh = neigh->hh) != NULL) {
571 		neigh->hh = hh->hh_next;
572 		hh->hh_next = NULL;
573 		write_lock_bh(&hh->hh_lock);
574 		hh->hh_output = neigh_blackhole;
575 		write_unlock_bh(&hh->hh_lock);
576 		if (atomic_dec_and_test(&hh->hh_refcnt))
577 			kfree(hh);
578 	}
579 
580 	if (neigh->ops && neigh->ops->destructor)
581 		(neigh->ops->destructor)(neigh);
582 
583 	skb_queue_purge(&neigh->arp_queue);
584 
585 	dev_put(neigh->dev);
586 
587 	NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh);
588 
589 	neigh_glbl_allocs--;
590 	atomic_dec(&neigh->tbl->entries);
591 	kmem_cache_free(neigh->tbl->kmem_cachep, neigh);
592 }
593 
594 /* Neighbour state is suspicious;
595    disable fast path.
596 
597    Called with write_locked neigh.
598  */
neigh_suspect(struct neighbour * neigh)599 static void neigh_suspect(struct neighbour *neigh)
600 {
601 	struct hh_cache *hh;
602 
603 	NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
604 
605 	neigh->output = neigh->ops->output;
606 
607 	for (hh = neigh->hh; hh; hh = hh->hh_next)
608 		hh->hh_output = neigh->ops->output;
609 }
610 
611 /* Neighbour state is OK;
612    enable fast path.
613 
614    Called with write_locked neigh.
615  */
neigh_connect(struct neighbour * neigh)616 static void neigh_connect(struct neighbour *neigh)
617 {
618 	struct hh_cache *hh;
619 
620 	NEIGH_PRINTK2("neigh %p is connected.\n", neigh);
621 
622 	neigh->output = neigh->ops->connected_output;
623 
624 	for (hh = neigh->hh; hh; hh = hh->hh_next)
625 		hh->hh_output = neigh->ops->hh_output;
626 }
627 
628 /*
629    Transitions NUD_STALE <-> NUD_REACHABLE do not occur
630    when fast path is built: we have no timers associated with
631    these states, we do not have time to check state when sending.
632    neigh_periodic_timer check periodically neigh->confirmed
633    time and moves NUD_REACHABLE -> NUD_STALE.
634 
635    If a routine wants to know TRUE entry state, it calls
636    neigh_sync before checking state.
637 
638    Called with write_locked neigh.
639  */
640 
neigh_sync(struct neighbour * n)641 static void neigh_sync(struct neighbour *n)
642 {
643 	unsigned long now = jiffies;
644 	u8 state = n->nud_state;
645 
646 	if (state&(NUD_NOARP|NUD_PERMANENT))
647 		return;
648 	if (state&NUD_REACHABLE) {
649 		if (now - n->confirmed > n->parms->reachable_time) {
650 			n->nud_state = NUD_STALE;
651 			neigh_suspect(n);
652 		}
653 	} else if (state&NUD_VALID) {
654 		if (now - n->confirmed < n->parms->reachable_time) {
655 			neigh_del_timer(n);
656 			n->nud_state = NUD_REACHABLE;
657 			neigh_connect(n);
658 		}
659 	}
660 }
661 
SMP_TIMER_NAME(neigh_periodic_timer)662 static void SMP_TIMER_NAME(neigh_periodic_timer)(unsigned long arg)
663 {
664 	struct neigh_table *tbl = (struct neigh_table*)arg;
665 	struct neighbour *n, **np;
666 	unsigned long expire, now = jiffies;
667 
668 	NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
669 
670 	write_lock(&tbl->lock);
671 
672 	/*
673 	 *	periodicly recompute ReachableTime from random function
674 	 */
675 
676 	if (now - tbl->last_rand > 300*HZ) {
677 		struct neigh_parms *p;
678 		tbl->last_rand = now;
679 		for (p=&tbl->parms; p; p = p->next)
680 			p->reachable_time = neigh_rand_reach_time(p->base_reachable_time);
681 	}
682 
683 	np = &tbl->hash_buckets[tbl->hash_chain_gc];
684 	tbl->hash_chain_gc = ((tbl->hash_chain_gc + 1) & tbl->hash_mask);
685 
686 	while ((n = *np) != NULL) {
687 		unsigned int state;
688 
689 		write_lock(&n->lock);
690 
691 		state = n->nud_state;
692 		if (state & (NUD_PERMANENT | NUD_IN_TIMER)) {
693 			write_unlock(&n->lock);
694 			goto next_elt;
695 		}
696 
697 		if (time_before(n->used, n->confirmed))
698 			n->used = n->confirmed;
699 
700 		if (atomic_read(&n->refcnt) == 1 &&
701 		    (state == NUD_FAILED ||
702 		     time_after(now, n->used + n->parms->gc_staletime))) {
703 			*np = n->next;
704 			n->dead = 1;
705 			write_unlock(&n->lock);
706 			neigh_release(n);
707 			continue;
708 		}
709 
710 		/* Mark it stale - To be reconfirmed later when used */
711 		if (n->nud_state & NUD_REACHABLE &&
712 		    now - n->confirmed > n->parms->reachable_time) {
713 			n->nud_state = NUD_STALE;
714 			neigh_suspect(n);
715 		}
716 
717 		write_unlock(&n->lock);
718 
719 next_elt:
720 		np = &n->next;
721 	}
722 
723  	/* Cycle through all hash buckets every base_reachable_time/2 ticks.
724  	 * ARP entry timeouts range from 1/2 base_reachable_time to 3/2
725  	 * base_reachable_time.
726 	 */
727 	expire = tbl->parms.base_reachable_time >> 1;
728 	expire /= (tbl->hash_mask + 1);
729 	if (!expire)
730 		expire = 1;
731 
732  	mod_timer(&tbl->gc_timer, now + expire);
733 
734 	write_unlock(&tbl->lock);
735 }
736 
737 #ifdef CONFIG_SMP
neigh_periodic_timer(unsigned long arg)738 static void neigh_periodic_timer(unsigned long arg)
739 {
740 	struct neigh_table *tbl = (struct neigh_table*)arg;
741 
742 	tasklet_schedule(&tbl->gc_task);
743 }
744 #endif
745 
neigh_max_probes(struct neighbour * n)746 static __inline__ int neigh_max_probes(struct neighbour *n)
747 {
748 	struct neigh_parms *p = n->parms;
749 	return p->ucast_probes + p->app_probes + p->mcast_probes;
750 }
751 
752 
753 /* Called when a timer expires for a neighbour entry. */
754 
neigh_timer_handler(unsigned long arg)755 static void neigh_timer_handler(unsigned long arg)
756 {
757 	unsigned long now = jiffies;
758 	struct neighbour *neigh = (struct neighbour*)arg;
759 	struct sk_buff *skb;
760 	unsigned state;
761 	int notify = 0;
762 
763 	write_lock(&neigh->lock);
764 
765 	state = neigh->nud_state;
766 
767 	if (!(state&NUD_IN_TIMER)) {
768 #ifndef CONFIG_SMP
769 		printk("neigh: timer & !nud_in_timer\n");
770 #endif
771 		goto out;
772 	}
773 
774 	if ((state&NUD_VALID) &&
775 	    now - neigh->confirmed < neigh->parms->reachable_time) {
776 		neigh->nud_state = NUD_REACHABLE;
777 		NEIGH_PRINTK2("neigh %p is still alive.\n", neigh);
778 		neigh_connect(neigh);
779 		goto out;
780 	}
781 	if (state == NUD_DELAY) {
782 		NEIGH_PRINTK2("neigh %p is probed.\n", neigh);
783 		neigh->nud_state = NUD_PROBE;
784 		atomic_set(&neigh->probes, 0);
785 	}
786 
787 	if (atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
788 		struct sk_buff *skb;
789 
790 		neigh->nud_state = NUD_FAILED;
791 		notify = 1;
792 		NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
793 		NEIGH_PRINTK2("neigh %p is failed.\n", neigh);
794 
795 		/* It is very thin place. report_unreachable is very complicated
796 		   routine. Particularly, it can hit the same neighbour entry!
797 
798 		   So that, we try to be accurate and avoid dead loop. --ANK
799 		 */
800 		while(neigh->nud_state==NUD_FAILED && (skb=__skb_dequeue(&neigh->arp_queue)) != NULL) {
801 			write_unlock(&neigh->lock);
802 			neigh->ops->error_report(neigh, skb);
803 			write_lock(&neigh->lock);
804 		}
805 		skb_queue_purge(&neigh->arp_queue);
806 		goto out;
807 	}
808 
809 	neigh->timer.expires = now + neigh->parms->retrans_time;
810 	add_timer(&neigh->timer);
811 
812 	/* keep skb alive even if arp_queue overflows */
813 	skb = skb_peek(&neigh->arp_queue);
814 	if (skb)
815 		skb_get(skb);
816 
817 	write_unlock(&neigh->lock);
818 
819 	neigh->ops->solicit(neigh, skb);
820 	atomic_inc(&neigh->probes);
821 
822 	if (skb)
823 		kfree_skb(skb);
824 
825 	return;
826 
827 out:
828 	write_unlock(&neigh->lock);
829 #ifdef CONFIG_ARPD
830 	if (notify && neigh->parms->app_probes)
831 		neigh_app_notify(neigh);
832 #endif
833 	neigh_release(neigh);
834 }
835 
__neigh_event_send(struct neighbour * neigh,struct sk_buff * skb)836 int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
837 {
838 	write_lock_bh(&neigh->lock);
839 	if (!(neigh->nud_state&(NUD_CONNECTED|NUD_DELAY|NUD_PROBE))) {
840 		if (!(neigh->nud_state&(NUD_STALE|NUD_INCOMPLETE))) {
841 			if (neigh->parms->mcast_probes + neigh->parms->app_probes) {
842 				atomic_set(&neigh->probes, neigh->parms->ucast_probes);
843 				neigh->nud_state = NUD_INCOMPLETE;
844 				neigh_hold(neigh);
845 				neigh->timer.expires = jiffies + neigh->parms->retrans_time;
846 				add_timer(&neigh->timer);
847 				write_unlock_bh(&neigh->lock);
848 				neigh->ops->solicit(neigh, skb);
849 				atomic_inc(&neigh->probes);
850 				write_lock_bh(&neigh->lock);
851 			} else {
852 				neigh->nud_state = NUD_FAILED;
853 				write_unlock_bh(&neigh->lock);
854 
855 				if (skb)
856 					kfree_skb(skb);
857 				return 1;
858 			}
859 		}
860 		if (neigh->nud_state == NUD_INCOMPLETE) {
861 			if (skb) {
862 				if (skb_queue_len(&neigh->arp_queue) >= neigh->parms->queue_len) {
863 					struct sk_buff *buff;
864 					buff = neigh->arp_queue.next;
865 					__skb_unlink(buff, &neigh->arp_queue);
866 					kfree_skb(buff);
867 				}
868 				__skb_queue_tail(&neigh->arp_queue, skb);
869 			}
870 			write_unlock_bh(&neigh->lock);
871 			return 1;
872 		}
873 		if (neigh->nud_state == NUD_STALE) {
874 			NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
875 			neigh_hold(neigh);
876 			neigh->nud_state = NUD_DELAY;
877 			neigh->timer.expires = jiffies + neigh->parms->delay_probe_time;
878 			add_timer(&neigh->timer);
879 		}
880 	}
881 	write_unlock_bh(&neigh->lock);
882 	return 0;
883 }
884 
neigh_update_hhs(struct neighbour * neigh)885 static __inline__ void neigh_update_hhs(struct neighbour *neigh)
886 {
887 	struct hh_cache *hh;
888 	void (*update)(struct hh_cache*, struct net_device*, unsigned char*) =
889 		neigh->dev->header_cache_update;
890 
891 	if (update) {
892 		for (hh=neigh->hh; hh; hh=hh->hh_next) {
893 			write_lock_bh(&hh->hh_lock);
894 			update(hh, neigh->dev, neigh->ha);
895 			write_unlock_bh(&hh->hh_lock);
896 		}
897 	}
898 }
899 
900 
901 
902 /* Generic update routine.
903    -- lladdr is new lladdr or NULL, if it is not supplied.
904    -- new    is new state.
905    -- override==1 allows to override existing lladdr, if it is different.
906    -- arp==0 means that the change is administrative.
907 
908    Caller MUST hold reference count on the entry.
909  */
910 
neigh_update(struct neighbour * neigh,const u8 * lladdr,u8 new,int override,int arp)911 int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, int override, int arp)
912 {
913 	u8 old;
914 	int err;
915 	int notify = 0;
916 	struct net_device *dev = neigh->dev;
917 
918 	write_lock_bh(&neigh->lock);
919 	old = neigh->nud_state;
920 
921 	err = -EPERM;
922 	if (arp && (old&(NUD_NOARP|NUD_PERMANENT)))
923 		goto out;
924 
925 	if (!(new&NUD_VALID)) {
926 		neigh_del_timer(neigh);
927 		if (old&NUD_CONNECTED)
928 			neigh_suspect(neigh);
929 		neigh->nud_state = new;
930 		err = 0;
931 		notify = old&NUD_VALID;
932 		goto out;
933 	}
934 
935 	/* Compare new lladdr with cached one */
936 	if (dev->addr_len == 0) {
937 		/* First case: device needs no address. */
938 		lladdr = neigh->ha;
939 	} else if (lladdr) {
940 		/* The second case: if something is already cached
941 		   and a new address is proposed:
942 		   - compare new & old
943 		   - if they are different, check override flag
944 		 */
945 		if (old&NUD_VALID) {
946 			if (memcmp(lladdr, neigh->ha, dev->addr_len) == 0)
947 				lladdr = neigh->ha;
948 			else if (!override)
949 				goto out;
950 		}
951 	} else {
952 		/* No address is supplied; if we know something,
953 		   use it, otherwise discard the request.
954 		 */
955 		err = -EINVAL;
956 		if (!(old&NUD_VALID))
957 			goto out;
958 		lladdr = neigh->ha;
959 	}
960 
961 	neigh_sync(neigh);
962 	old = neigh->nud_state;
963 	if (new&NUD_CONNECTED)
964 		neigh->confirmed = jiffies;
965 	neigh->updated = jiffies;
966 
967 	/* If entry was valid and address is not changed,
968 	   do not change entry state, if new one is STALE.
969 	 */
970 	err = 0;
971 	if (old&NUD_VALID) {
972 		if (lladdr == neigh->ha)
973 			if (new == old || (new == NUD_STALE && (old&NUD_CONNECTED)))
974 				goto out;
975 	}
976 	neigh_del_timer(neigh);
977 	neigh->nud_state = new;
978 	if (lladdr != neigh->ha) {
979 		memcpy(&neigh->ha, lladdr, dev->addr_len);
980 		neigh_update_hhs(neigh);
981 		if (!(new&NUD_CONNECTED))
982 			neigh->confirmed = jiffies - (neigh->parms->base_reachable_time<<1);
983 #ifdef CONFIG_ARPD
984 		notify = 1;
985 #endif
986 	}
987 	if (new == old)
988 		goto out;
989 	if (new&NUD_CONNECTED)
990 		neigh_connect(neigh);
991 	else
992 		neigh_suspect(neigh);
993 	if (!(old&NUD_VALID)) {
994 		struct sk_buff *skb;
995 
996 		/* Again: avoid dead loop if something went wrong */
997 
998 		while (neigh->nud_state&NUD_VALID &&
999 		       (skb=__skb_dequeue(&neigh->arp_queue)) != NULL) {
1000 			struct neighbour *n1 = neigh;
1001 			write_unlock_bh(&neigh->lock);
1002 			/* On shaper/eql skb->dst->neighbour != neigh :( */
1003 			if (skb->dst && skb->dst->neighbour)
1004 				n1 = skb->dst->neighbour;
1005 			n1->output(skb);
1006 			write_lock_bh(&neigh->lock);
1007 		}
1008 		skb_queue_purge(&neigh->arp_queue);
1009 	}
1010 out:
1011 	write_unlock_bh(&neigh->lock);
1012 #ifdef CONFIG_ARPD
1013 	if (notify && neigh->parms->app_probes)
1014 		neigh_app_notify(neigh);
1015 #endif
1016 	return err;
1017 }
1018 
neigh_event_ns(struct neigh_table * tbl,u8 * lladdr,void * saddr,struct net_device * dev)1019 struct neighbour * neigh_event_ns(struct neigh_table *tbl,
1020 				  u8 *lladdr, void *saddr,
1021 				  struct net_device *dev)
1022 {
1023 	struct neighbour *neigh;
1024 
1025 	neigh = __neigh_lookup(tbl, saddr, dev, lladdr || !dev->addr_len);
1026 	if (neigh)
1027 		neigh_update(neigh, lladdr, NUD_STALE, 1, 1);
1028 	return neigh;
1029 }
1030 
neigh_hh_init(struct neighbour * n,struct dst_entry * dst,u16 protocol)1031 static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst, u16 protocol)
1032 {
1033 	struct hh_cache	*hh = NULL;
1034 	struct net_device *dev = dst->dev;
1035 
1036 	for (hh=n->hh; hh; hh = hh->hh_next)
1037 		if (hh->hh_type == protocol)
1038 			break;
1039 
1040 	if (!hh && (hh = kmalloc(sizeof(*hh), GFP_ATOMIC)) != NULL) {
1041 		memset(hh, 0, sizeof(struct hh_cache));
1042 		hh->hh_lock = RW_LOCK_UNLOCKED;
1043 		hh->hh_type = protocol;
1044 		atomic_set(&hh->hh_refcnt, 0);
1045 		hh->hh_next = NULL;
1046 		if (dev->hard_header_cache(n, hh)) {
1047 			kfree(hh);
1048 			hh = NULL;
1049 		} else {
1050 			atomic_inc(&hh->hh_refcnt);
1051 			hh->hh_next = n->hh;
1052 			n->hh = hh;
1053 			if (n->nud_state&NUD_CONNECTED)
1054 				hh->hh_output = n->ops->hh_output;
1055 			else
1056 				hh->hh_output = n->ops->output;
1057 		}
1058 	}
1059 	if (hh)	{
1060 		atomic_inc(&hh->hh_refcnt);
1061 		dst->hh = hh;
1062 	}
1063 }
1064 
1065 /* This function can be used in contexts, where only old dev_queue_xmit
1066    worked, f.e. if you want to override normal output path (eql, shaper),
1067    but resolution is not made yet.
1068  */
1069 
neigh_compat_output(struct sk_buff * skb)1070 int neigh_compat_output(struct sk_buff *skb)
1071 {
1072 	struct net_device *dev = skb->dev;
1073 
1074 	__skb_pull(skb, skb->nh.raw - skb->data);
1075 
1076 	if (dev->hard_header &&
1077 	    dev->hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL, skb->len) < 0 &&
1078 	    dev->rebuild_header(skb))
1079 		return 0;
1080 
1081 	return dev_queue_xmit(skb);
1082 }
1083 
1084 /* Slow and careful. */
1085 
neigh_resolve_output(struct sk_buff * skb)1086 int neigh_resolve_output(struct sk_buff *skb)
1087 {
1088 	struct dst_entry *dst = skb->dst;
1089 	struct neighbour *neigh;
1090 
1091 	if (!dst || !(neigh = dst->neighbour))
1092 		goto discard;
1093 
1094 	__skb_pull(skb, skb->nh.raw - skb->data);
1095 
1096 	if (neigh_event_send(neigh, skb) == 0) {
1097 		int err;
1098 		struct net_device *dev = neigh->dev;
1099 		if (dev->hard_header_cache && dst->hh == NULL) {
1100 			write_lock_bh(&neigh->lock);
1101 			if (dst->hh == NULL)
1102 				neigh_hh_init(neigh, dst, dst->ops->protocol);
1103 			err = dev->hard_header(skb, dev, ntohs(skb->protocol), neigh->ha, NULL, skb->len);
1104 			write_unlock_bh(&neigh->lock);
1105 		} else {
1106 			read_lock_bh(&neigh->lock);
1107 			err = dev->hard_header(skb, dev, ntohs(skb->protocol), neigh->ha, NULL, skb->len);
1108 			read_unlock_bh(&neigh->lock);
1109 		}
1110 		if (err >= 0)
1111 			return neigh->ops->queue_xmit(skb);
1112 		kfree_skb(skb);
1113 		return -EINVAL;
1114 	}
1115 	return 0;
1116 
1117 discard:
1118 	NEIGH_PRINTK1("neigh_resolve_output: dst=%p neigh=%p\n", dst, dst ? dst->neighbour : NULL);
1119 	kfree_skb(skb);
1120 	return -EINVAL;
1121 }
1122 
1123 /* As fast as possible without hh cache */
1124 
neigh_connected_output(struct sk_buff * skb)1125 int neigh_connected_output(struct sk_buff *skb)
1126 {
1127 	int err;
1128 	struct dst_entry *dst = skb->dst;
1129 	struct neighbour *neigh = dst->neighbour;
1130 	struct net_device *dev = neigh->dev;
1131 
1132 	__skb_pull(skb, skb->nh.raw - skb->data);
1133 
1134 	read_lock_bh(&neigh->lock);
1135 	err = dev->hard_header(skb, dev, ntohs(skb->protocol), neigh->ha, NULL, skb->len);
1136 	read_unlock_bh(&neigh->lock);
1137 	if (err >= 0)
1138 		return neigh->ops->queue_xmit(skb);
1139 	kfree_skb(skb);
1140 	return -EINVAL;
1141 }
1142 
neigh_proxy_process(unsigned long arg)1143 static void neigh_proxy_process(unsigned long arg)
1144 {
1145 	struct neigh_table *tbl = (struct neigh_table *)arg;
1146 	long sched_next = 0;
1147 	unsigned long now = jiffies;
1148 	struct sk_buff *skb;
1149 
1150 	spin_lock(&tbl->proxy_queue.lock);
1151 
1152 	skb = tbl->proxy_queue.next;
1153 
1154 	while (skb != (struct sk_buff*)&tbl->proxy_queue) {
1155 		struct sk_buff *back = skb;
1156 		long tdif = back->stamp.tv_usec - now;
1157 
1158 		skb = skb->next;
1159 		if (tdif <= 0) {
1160 			struct net_device *dev = back->dev;
1161 			__skb_unlink(back, &tbl->proxy_queue);
1162 			if (tbl->proxy_redo && netif_running(dev))
1163 				tbl->proxy_redo(back);
1164 			else
1165 				kfree_skb(back);
1166 
1167 			dev_put(dev);
1168 		} else if (!sched_next || tdif < sched_next)
1169 			sched_next = tdif;
1170 	}
1171 	del_timer(&tbl->proxy_timer);
1172 	if (sched_next)
1173 		mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1174 	spin_unlock(&tbl->proxy_queue.lock);
1175 }
1176 
pneigh_enqueue(struct neigh_table * tbl,struct neigh_parms * p,struct sk_buff * skb)1177 void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1178 		    struct sk_buff *skb)
1179 {
1180 	unsigned long now = jiffies;
1181 	long sched_next = net_random()%p->proxy_delay;
1182 
1183 	if (tbl->proxy_queue.qlen > p->proxy_qlen) {
1184 		kfree_skb(skb);
1185 		return;
1186 	}
1187 	skb->stamp.tv_sec = 0;
1188 	skb->stamp.tv_usec = now + sched_next;
1189 
1190 	spin_lock(&tbl->proxy_queue.lock);
1191 	if (del_timer(&tbl->proxy_timer)) {
1192 		long tval = tbl->proxy_timer.expires - now;
1193 		if (tval < sched_next)
1194 			sched_next = tval;
1195 	}
1196 	dst_release(skb->dst);
1197 	skb->dst = NULL;
1198 	dev_hold(skb->dev);
1199 	__skb_queue_tail(&tbl->proxy_queue, skb);
1200 	mod_timer(&tbl->proxy_timer, now + sched_next);
1201 	spin_unlock(&tbl->proxy_queue.lock);
1202 }
1203 
1204 
neigh_parms_alloc(struct net_device * dev,struct neigh_table * tbl)1205 struct neigh_parms *neigh_parms_alloc(struct net_device *dev, struct neigh_table *tbl)
1206 {
1207 	struct neigh_parms *p;
1208 	p = kmalloc(sizeof(*p), GFP_KERNEL);
1209 	if (p) {
1210 		memcpy(p, &tbl->parms, sizeof(*p));
1211 		p->tbl = tbl;
1212 		p->reachable_time = neigh_rand_reach_time(p->base_reachable_time);
1213 		if (dev && dev->neigh_setup) {
1214 			if (dev->neigh_setup(dev, p)) {
1215 				kfree(p);
1216 				return NULL;
1217 			}
1218 		}
1219 		p->sysctl_table = NULL;
1220 		write_lock_bh(&tbl->lock);
1221 		p->next = tbl->parms.next;
1222 		tbl->parms.next = p;
1223 		write_unlock_bh(&tbl->lock);
1224 	}
1225 	return p;
1226 }
1227 
neigh_parms_release(struct neigh_table * tbl,struct neigh_parms * parms)1228 void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1229 {
1230 	struct neigh_parms **p;
1231 
1232 	if (parms == NULL || parms == &tbl->parms)
1233 		return;
1234 	write_lock_bh(&tbl->lock);
1235 	for (p = &tbl->parms.next; *p; p = &(*p)->next) {
1236 		if (*p == parms) {
1237 			*p = parms->next;
1238 			write_unlock_bh(&tbl->lock);
1239 #ifdef CONFIG_SYSCTL
1240 			neigh_sysctl_unregister(parms);
1241 #endif
1242 			kfree(parms);
1243 			return;
1244 		}
1245 	}
1246 	write_unlock_bh(&tbl->lock);
1247 	NEIGH_PRINTK1("neigh_parms_release: not found\n");
1248 }
1249 
1250 
neigh_table_init(struct neigh_table * tbl)1251 void neigh_table_init(struct neigh_table *tbl)
1252 {
1253 	unsigned long now = jiffies;
1254 	unsigned long phsize;
1255 
1256 	tbl->parms.reachable_time = neigh_rand_reach_time(tbl->parms.base_reachable_time);
1257 
1258 	if (tbl->kmem_cachep == NULL)
1259 		tbl->kmem_cachep = kmem_cache_create(tbl->id,
1260 						     (tbl->entry_size+15)&~15,
1261 						     0, SLAB_HWCACHE_ALIGN,
1262 						     NULL, NULL);
1263 
1264 	if (!tbl->kmem_cachep)
1265 		panic("cannot create neighbour cache");
1266 
1267 #ifdef CONFIG_PROC_FS
1268 	tbl->pde = create_proc_entry(tbl->id, 0, proc_net_stat);
1269 	if (!tbl->pde)
1270 		panic("cannot create neighbour proc dir entry");
1271 	tbl->pde->proc_fops = &neigh_stat_seq_fops;
1272 	tbl->pde->data = tbl;
1273 #endif
1274 
1275 	tbl->hash_mask = 1;
1276 	tbl->hash_buckets = neigh_hash_alloc(tbl->hash_mask + 1);
1277 
1278 	phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1279 	tbl->phash_buckets = kmalloc(phsize, GFP_KERNEL);
1280 
1281 	if (!tbl->hash_buckets || !tbl->phash_buckets)
1282 		panic("cannot allocate neighbour cache hashes");
1283 
1284 	memset(tbl->phash_buckets, 0, phsize);
1285 
1286 	get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
1287 
1288 #ifdef CONFIG_SMP
1289 	tasklet_init(&tbl->gc_task, SMP_TIMER_NAME(neigh_periodic_timer), (unsigned long)tbl);
1290 #endif
1291 	init_timer(&tbl->gc_timer);
1292 	tbl->lock = RW_LOCK_UNLOCKED;
1293 	tbl->gc_timer.data = (unsigned long)tbl;
1294 	tbl->gc_timer.function = neigh_periodic_timer;
1295 	tbl->gc_timer.expires = now + 1;
1296 	add_timer(&tbl->gc_timer);
1297 
1298 	init_timer(&tbl->proxy_timer);
1299 	tbl->proxy_timer.data = (unsigned long)tbl;
1300 	tbl->proxy_timer.function = neigh_proxy_process;
1301 	skb_queue_head_init(&tbl->proxy_queue);
1302 
1303 	tbl->last_flush = now;
1304 	tbl->last_rand = now + tbl->parms.reachable_time*20;
1305 	write_lock(&neigh_tbl_lock);
1306 	tbl->next = neigh_tables;
1307 	neigh_tables = tbl;
1308 	write_unlock(&neigh_tbl_lock);
1309 }
1310 
neigh_table_clear(struct neigh_table * tbl)1311 int neigh_table_clear(struct neigh_table *tbl)
1312 {
1313 	struct neigh_table **tp;
1314 
1315 	/* It is not clean... Fix it to unload IPv6 module safely */
1316 	del_timer_sync(&tbl->gc_timer);
1317 	tasklet_kill(&tbl->gc_task);
1318 	del_timer_sync(&tbl->proxy_timer);
1319 	pneigh_queue_purge(&tbl->proxy_queue);
1320 	neigh_ifdown(tbl, NULL);
1321 	if (atomic_read(&tbl->entries))
1322 		printk(KERN_CRIT "neighbour leakage\n");
1323 	write_lock(&neigh_tbl_lock);
1324 	for (tp = &neigh_tables; *tp; tp = &(*tp)->next) {
1325 		if (*tp == tbl) {
1326 			*tp = tbl->next;
1327 			break;
1328 		}
1329 	}
1330 	write_unlock(&neigh_tbl_lock);
1331 
1332 	neigh_hash_free(tbl->hash_buckets, tbl->hash_mask + 1);
1333 	tbl->hash_buckets = NULL;
1334 
1335 	kfree(tbl->phash_buckets);
1336 	tbl->phash_buckets = NULL;
1337 
1338 #ifdef CONFIG_SYSCTL
1339 	neigh_sysctl_unregister(&tbl->parms);
1340 #endif
1341 	return 0;
1342 }
1343 
neigh_delete(struct sk_buff * skb,struct nlmsghdr * nlh,void * arg)1344 int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1345 {
1346 	struct ndmsg *ndm = NLMSG_DATA(nlh);
1347 	struct rtattr **nda = arg;
1348 	struct neigh_table *tbl;
1349 	struct net_device *dev = NULL;
1350 	int err = 0;
1351 
1352 	if (ndm->ndm_ifindex) {
1353 		if ((dev = dev_get_by_index(ndm->ndm_ifindex)) == NULL)
1354 			return -ENODEV;
1355 	}
1356 
1357 	read_lock(&neigh_tbl_lock);
1358 	for (tbl=neigh_tables; tbl; tbl = tbl->next) {
1359 		struct neighbour *n;
1360 
1361 		if (tbl->family != ndm->ndm_family)
1362 			continue;
1363 		read_unlock(&neigh_tbl_lock);
1364 
1365 		err = -EINVAL;
1366 		if (nda[NDA_DST-1] == NULL ||
1367 		    nda[NDA_DST-1]->rta_len != RTA_LENGTH(tbl->key_len))
1368 			goto out;
1369 
1370 		if (ndm->ndm_flags&NTF_PROXY) {
1371 			err = pneigh_delete(tbl, RTA_DATA(nda[NDA_DST-1]), dev);
1372 			goto out;
1373 		}
1374 
1375 		if (dev == NULL)
1376 			return -EINVAL;
1377 
1378 		n = neigh_lookup(tbl, RTA_DATA(nda[NDA_DST-1]), dev);
1379 		if (n) {
1380 			err = neigh_update(n, NULL, NUD_FAILED, 1, 0);
1381 			neigh_release(n);
1382 		}
1383 out:
1384 		if (dev)
1385 			dev_put(dev);
1386 		return err;
1387 	}
1388 	read_unlock(&neigh_tbl_lock);
1389 
1390 	if (dev)
1391 		dev_put(dev);
1392 
1393 	return -EADDRNOTAVAIL;
1394 }
1395 
neigh_add(struct sk_buff * skb,struct nlmsghdr * nlh,void * arg)1396 int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1397 {
1398 	struct ndmsg *ndm = NLMSG_DATA(nlh);
1399 	struct rtattr **nda = arg;
1400 	struct neigh_table *tbl;
1401 	struct net_device *dev = NULL;
1402 
1403 	if (ndm->ndm_ifindex) {
1404 		if ((dev = dev_get_by_index(ndm->ndm_ifindex)) == NULL)
1405 			return -ENODEV;
1406 	}
1407 
1408 	read_lock(&neigh_tbl_lock);
1409 	for (tbl=neigh_tables; tbl; tbl = tbl->next) {
1410 		int err = 0;
1411 		int override = 1;
1412 		struct neighbour *n;
1413 
1414 		if (tbl->family != ndm->ndm_family)
1415 			continue;
1416 		read_unlock(&neigh_tbl_lock);
1417 
1418 		err = -EINVAL;
1419 		if (nda[NDA_DST-1] == NULL ||
1420 		    nda[NDA_DST-1]->rta_len != RTA_LENGTH(tbl->key_len))
1421 			goto out;
1422 		if (ndm->ndm_flags&NTF_PROXY) {
1423 			err = -ENOBUFS;
1424 			if (pneigh_lookup(tbl, RTA_DATA(nda[NDA_DST-1]), dev, 1))
1425 				err = 0;
1426 			goto out;
1427 		}
1428 		if (dev == NULL)
1429 			return -EINVAL;
1430 		err = -EINVAL;
1431 		if (nda[NDA_LLADDR-1] != NULL &&
1432 		    nda[NDA_LLADDR-1]->rta_len != RTA_LENGTH(dev->addr_len))
1433 			goto out;
1434 		err = 0;
1435 		n = neigh_lookup(tbl, RTA_DATA(nda[NDA_DST-1]), dev);
1436 		if (n) {
1437 			if (nlh->nlmsg_flags&NLM_F_EXCL)
1438 				err = -EEXIST;
1439 			override = nlh->nlmsg_flags&NLM_F_REPLACE;
1440 		} else if (!(nlh->nlmsg_flags&NLM_F_CREATE))
1441 			err = -ENOENT;
1442 		else {
1443 			n = __neigh_lookup_errno(tbl, RTA_DATA(nda[NDA_DST-1]), dev);
1444 			if (IS_ERR(n)) {
1445 				err = PTR_ERR(n);
1446 				n = NULL;
1447 			}
1448 		}
1449 		if (err == 0) {
1450 			err = neigh_update(n, nda[NDA_LLADDR-1] ? RTA_DATA(nda[NDA_LLADDR-1]) : NULL,
1451 					   ndm->ndm_state,
1452 					   override, 0);
1453 		}
1454 		if (n)
1455 			neigh_release(n);
1456 out:
1457 		if (dev)
1458 			dev_put(dev);
1459 		return err;
1460 	}
1461 	read_unlock(&neigh_tbl_lock);
1462 
1463 	if (dev)
1464 		dev_put(dev);
1465 	return -EADDRNOTAVAIL;
1466 }
1467 
1468 
neigh_fill_info(struct sk_buff * skb,struct neighbour * n,u32 pid,u32 seq,int event)1469 static int neigh_fill_info(struct sk_buff *skb, struct neighbour *n,
1470 			   u32 pid, u32 seq, int event)
1471 {
1472 	unsigned long now = jiffies;
1473 	struct ndmsg *ndm;
1474 	struct nlmsghdr  *nlh;
1475 	unsigned char	 *b = skb->tail;
1476 	struct nda_cacheinfo ci;
1477 	int locked = 0;
1478 
1479 	nlh = NLMSG_PUT(skb, pid, seq, event, sizeof(*ndm));
1480 	ndm = NLMSG_DATA(nlh);
1481 	nlh->nlmsg_flags = pid ? NLM_F_MULTI : 0;
1482 	ndm->ndm_family = n->ops->family;
1483 	ndm->ndm_pad1 = 0;
1484 	ndm->ndm_pad2 = 0;
1485 	ndm->ndm_flags = n->flags;
1486 	ndm->ndm_type = n->type;
1487 	ndm->ndm_ifindex = n->dev->ifindex;
1488 	RTA_PUT(skb, NDA_DST, n->tbl->key_len, n->primary_key);
1489 	read_lock_bh(&n->lock);
1490 	locked=1;
1491 	ndm->ndm_state = n->nud_state;
1492 	if (n->nud_state&NUD_VALID)
1493 		RTA_PUT(skb, NDA_LLADDR, n->dev->addr_len, n->ha);
1494 	ci.ndm_used = now - n->used;
1495 	ci.ndm_confirmed = now - n->confirmed;
1496 	ci.ndm_updated = now - n->updated;
1497 	ci.ndm_refcnt = atomic_read(&n->refcnt) - 1;
1498 	read_unlock_bh(&n->lock);
1499 	locked=0;
1500 	RTA_PUT(skb, NDA_CACHEINFO, sizeof(ci), &ci);
1501 	nlh->nlmsg_len = skb->tail - b;
1502 	return skb->len;
1503 
1504 nlmsg_failure:
1505 rtattr_failure:
1506 	if (locked)
1507 		read_unlock_bh(&n->lock);
1508 	skb_trim(skb, b - skb->data);
1509 	return -1;
1510 }
1511 
1512 
neigh_dump_table(struct neigh_table * tbl,struct sk_buff * skb,struct netlink_callback * cb)1513 static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb, struct netlink_callback *cb)
1514 {
1515 	struct neighbour *n;
1516 	int h, s_h;
1517 	int idx, s_idx;
1518 
1519 	s_h = cb->args[1];
1520 	s_idx = idx = cb->args[2];
1521 	for (h=0; h <= tbl->hash_mask; h++) {
1522 		if (h < s_h) continue;
1523 		if (h > s_h)
1524 			s_idx = 0;
1525 		read_lock_bh(&tbl->lock);
1526 		for (n = tbl->hash_buckets[h], idx = 0; n;
1527 		     n = n->next, idx++) {
1528 			if (idx < s_idx)
1529 				continue;
1530 			if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid,
1531 					    cb->nlh->nlmsg_seq, RTM_NEWNEIGH) <= 0) {
1532 				read_unlock_bh(&tbl->lock);
1533 				cb->args[1] = h;
1534 				cb->args[2] = idx;
1535 				return -1;
1536 			}
1537 		}
1538 		read_unlock_bh(&tbl->lock);
1539 	}
1540 
1541 	cb->args[1] = h;
1542 	cb->args[2] = idx;
1543 	return skb->len;
1544 }
1545 
neigh_dump_info(struct sk_buff * skb,struct netlink_callback * cb)1546 int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
1547 {
1548 	int t;
1549 	int s_t;
1550 	struct neigh_table *tbl;
1551 	int family = ((struct rtgenmsg*)NLMSG_DATA(cb->nlh))->rtgen_family;
1552 
1553 	s_t = cb->args[0];
1554 
1555 	read_lock(&neigh_tbl_lock);
1556 	for (tbl=neigh_tables, t=0; tbl; tbl = tbl->next, t++) {
1557 		if (t < s_t) continue;
1558 		if (family && tbl->family != family)
1559 			continue;
1560 		if (t > s_t)
1561 			memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
1562 		if (neigh_dump_table(tbl, skb, cb) < 0)
1563 			break;
1564 	}
1565 	read_unlock(&neigh_tbl_lock);
1566 
1567 	cb->args[0] = t;
1568 
1569 	return skb->len;
1570 }
1571 
neigh_for_each(struct neigh_table * tbl,void (* cb)(struct neighbour *,void *),void * cookie)1572 void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
1573 {
1574 	int chain;
1575 
1576 	read_lock_bh(&tbl->lock);
1577 	for (chain = 0; chain <= tbl->hash_mask; chain++) {
1578 		struct neighbour *n;
1579 
1580 		for (n = tbl->hash_buckets[chain]; n; n = n->next)
1581 			cb(n, cookie);
1582 	}
1583 	read_unlock_bh(&tbl->lock);
1584 }
1585 EXPORT_SYMBOL(neigh_for_each);
1586 
1587 /* The tbl->lock must be held as a writer and BH disabled. */
__neigh_for_each_release(struct neigh_table * tbl,int (* cb)(struct neighbour *))1588 void __neigh_for_each_release(struct neigh_table *tbl,
1589 			      int (*cb)(struct neighbour *))
1590 {
1591 	int chain;
1592 
1593 	for (chain = 0; chain <= tbl->hash_mask; chain++) {
1594 		struct neighbour *n, **np;
1595 
1596 		np = &tbl->hash_buckets[chain];
1597 		while ((n = *np) != NULL) {
1598 			int release;
1599 
1600 			write_lock(&n->lock);
1601 			release = cb(n);
1602 			if (release) {
1603 				*np = n->next;
1604 				n->dead = 1;
1605 			} else
1606 				np = &n->next;
1607 			write_unlock(&n->lock);
1608 			if (release)
1609 				neigh_release(n);
1610 		}
1611 	}
1612 }
1613 EXPORT_SYMBOL(__neigh_for_each_release);
1614 
1615 #ifdef CONFIG_PROC_FS
1616 
neigh_get_first(struct seq_file * seq)1617 static struct neighbour *neigh_get_first(struct seq_file *seq)
1618 {
1619 	struct neigh_seq_state *state = seq->private;
1620 	struct neigh_table *tbl = state->tbl;
1621 	struct neighbour *n = NULL;
1622 	int bucket = state->bucket;
1623 
1624 	state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
1625 	for (bucket = 0; bucket <= tbl->hash_mask; bucket++) {
1626 		n = tbl->hash_buckets[bucket];
1627 
1628 		while (n) {
1629 			if (state->neigh_sub_iter) {
1630 				loff_t fakep = 0;
1631 				void *v;
1632 
1633 				v = state->neigh_sub_iter(state, n, &fakep);
1634 				if (!v)
1635 					goto next;
1636 			}
1637 			if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
1638 				break;
1639 			if (n->nud_state & ~NUD_NOARP)
1640 				break;
1641 		next:
1642 			n = n->next;
1643 		}
1644 
1645 		if (n)
1646 			break;
1647 	}
1648 	state->bucket = bucket;
1649 
1650 	return n;
1651 }
1652 
neigh_get_next(struct seq_file * seq,struct neighbour * n,loff_t * pos)1653 static struct neighbour *neigh_get_next(struct seq_file *seq,
1654 					struct neighbour *n,
1655 					loff_t *pos)
1656 {
1657 	struct neigh_seq_state *state = seq->private;
1658 	struct neigh_table *tbl = state->tbl;
1659 
1660 	if (state->neigh_sub_iter) {
1661 		void *v = state->neigh_sub_iter(state, n, pos);
1662 		if (v)
1663 			return n;
1664 	}
1665 	n = n->next;
1666 
1667 	while (1) {
1668 		while (n) {
1669 			if (state->neigh_sub_iter) {
1670 				void *v = state->neigh_sub_iter(state, n, pos);
1671 				if (v)
1672 					return n;
1673 				goto next;
1674 			}
1675 			if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
1676 				break;
1677 
1678 			if (n->nud_state & ~NUD_NOARP)
1679 				break;
1680 		next:
1681 			n = n->next;
1682 		}
1683 
1684 		if (n)
1685 			break;
1686 
1687 		if (++state->bucket > tbl->hash_mask)
1688 			break;
1689 
1690 		n = tbl->hash_buckets[state->bucket];
1691 	}
1692 
1693 	if (n && pos)
1694 		--(*pos);
1695 	return n;
1696 }
1697 
neigh_get_idx(struct seq_file * seq,loff_t * pos)1698 static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
1699 {
1700 	struct neighbour *n = neigh_get_first(seq);
1701 
1702 	if (n) {
1703 		while (*pos) {
1704 			n = neigh_get_next(seq, n, pos);
1705 			if (!n)
1706 				break;
1707 		}
1708 	}
1709 	return *pos ? NULL : n;
1710 }
1711 
pneigh_get_first(struct seq_file * seq)1712 static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
1713 {
1714 	struct neigh_seq_state *state = seq->private;
1715 	struct neigh_table *tbl = state->tbl;
1716 	struct pneigh_entry *pn = NULL;
1717 	int bucket = state->bucket;
1718 
1719 	state->flags |= NEIGH_SEQ_IS_PNEIGH;
1720 	for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
1721 		pn = tbl->phash_buckets[bucket];
1722 		if (pn)
1723 			break;
1724 	}
1725 	state->bucket = bucket;
1726 
1727 	return pn;
1728 }
1729 
pneigh_get_next(struct seq_file * seq,struct pneigh_entry * pn,loff_t * pos)1730 static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
1731 					    struct pneigh_entry *pn,
1732 					    loff_t *pos)
1733 {
1734 	struct neigh_seq_state *state = seq->private;
1735 	struct neigh_table *tbl = state->tbl;
1736 
1737 	pn = pn->next;
1738 	while (!pn) {
1739 		if (++state->bucket > PNEIGH_HASHMASK)
1740 			break;
1741 		pn = tbl->phash_buckets[state->bucket];
1742 		if (pn)
1743 			break;
1744 	}
1745 
1746 	if (pn && pos)
1747 		--(*pos);
1748 
1749 	return pn;
1750 }
1751 
pneigh_get_idx(struct seq_file * seq,loff_t * pos)1752 static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
1753 {
1754 	struct pneigh_entry *pn = pneigh_get_first(seq);
1755 
1756 	if (pn) {
1757 		while (*pos) {
1758 			pn = pneigh_get_next(seq, pn, pos);
1759 			if (!pn)
1760 				break;
1761 		}
1762 	}
1763 	return *pos ? NULL : pn;
1764 }
1765 
neigh_get_idx_any(struct seq_file * seq,loff_t * pos)1766 static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
1767 {
1768 	struct neigh_seq_state *state = seq->private;
1769 	void *rc;
1770 
1771 	rc = neigh_get_idx(seq, pos);
1772 	if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
1773 		rc = pneigh_get_idx(seq, pos);
1774 
1775 	return rc;
1776 }
1777 
neigh_seq_start(struct seq_file * seq,loff_t * pos,struct neigh_table * tbl,unsigned int neigh_seq_flags)1778 void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
1779 {
1780 	struct neigh_seq_state *state = seq->private;
1781 	loff_t pos_minus_one;
1782 
1783 	state->tbl = tbl;
1784 	state->bucket = 0;
1785 	state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
1786 
1787 	read_lock_bh(&tbl->lock);
1788 
1789 	pos_minus_one = *pos - 1;
1790 	return *pos ? neigh_get_idx_any(seq, &pos_minus_one) : SEQ_START_TOKEN;
1791 }
1792 
neigh_seq_next(struct seq_file * seq,void * v,loff_t * pos)1793 void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1794 {
1795 	struct neigh_seq_state *state;
1796 	void *rc;
1797 
1798 	if (v == SEQ_START_TOKEN) {
1799 		rc = neigh_get_idx(seq, pos);
1800 		goto out;
1801 	}
1802 
1803 	state = seq->private;
1804 	if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
1805 		rc = neigh_get_next(seq, v, NULL);
1806 		if (rc)
1807 			goto out;
1808 		if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
1809 			rc = pneigh_get_first(seq);
1810 	} else {
1811 		BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
1812 		rc = pneigh_get_next(seq, v, NULL);
1813 	}
1814 out:
1815 	++(*pos);
1816 	return rc;
1817 }
1818 
neigh_seq_stop(struct seq_file * seq,void * v)1819 void neigh_seq_stop(struct seq_file *seq, void *v)
1820 {
1821 	struct neigh_seq_state *state = seq->private;
1822 	struct neigh_table *tbl = state->tbl;
1823 
1824 	read_unlock_bh(&tbl->lock);
1825 }
1826 
1827 /* statistics via seq_file */
1828 
neigh_stat_seq_start(struct seq_file * seq,loff_t * pos)1829 static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
1830 {
1831 	struct proc_dir_entry *pde = seq->private;
1832 	struct neigh_table *tbl = pde->data;
1833 	int lcpu;
1834 
1835 	if (*pos == 0)
1836 		return SEQ_START_TOKEN;
1837 
1838 	for (lcpu = *pos-1; lcpu < smp_num_cpus; ++lcpu) {
1839 		int i = cpu_logical_map(lcpu);
1840 		*pos = lcpu+1;
1841 		return &tbl->stats[i];
1842 	}
1843 	return NULL;
1844 }
1845 
neigh_stat_seq_next(struct seq_file * seq,void * v,loff_t * pos)1846 static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1847 {
1848 	struct proc_dir_entry *pde = seq->private;
1849 	struct neigh_table *tbl = pde->data;
1850 	int lcpu;
1851 
1852 	for (lcpu = *pos; lcpu < smp_num_cpus; ++lcpu) {
1853 		int i = cpu_logical_map(lcpu);
1854 		*pos = lcpu+1;
1855 		return &tbl->stats[i];
1856 	}
1857 	return NULL;
1858 }
1859 
neigh_stat_seq_stop(struct seq_file * seq,void * v)1860 static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
1861 {
1862 
1863 }
1864 
neigh_stat_seq_show(struct seq_file * seq,void * v)1865 static int neigh_stat_seq_show(struct seq_file *seq, void *v)
1866 {
1867 	struct proc_dir_entry *pde = seq->private;
1868 	struct neigh_table *tbl = pde->data;
1869 	struct neigh_statistics *st = v;
1870 
1871 	if (v == SEQ_START_TOKEN) {
1872 		seq_printf(seq, "entries  allocs destroys hash_grows  lookups hits  res_failed  rcv_probes_mcast rcv_probes_ucast  periodic_gc_runs forced_gc_runs forced_gc_goal_miss\n");
1873 		return 0;
1874 	}
1875 
1876 	seq_printf(seq, "%08x  %08lx %08lx %08lx  %08lx %08lx  %08lx  "
1877 			"%08lx %08lx  %08lx %08lx\n",
1878 		   atomic_read(&tbl->entries),
1879 
1880 		   st->allocs,
1881 		   st->destroys,
1882 		   st->hash_grows,
1883 
1884 		   st->lookups,
1885 		   st->hits,
1886 
1887 		   st->res_failed,
1888 
1889 		   st->rcv_probes_mcast,
1890 		   st->rcv_probes_ucast,
1891 
1892 		   st->periodic_gc_runs,
1893 		   st->forced_gc_runs
1894 		   );
1895 
1896 	return 0;
1897 }
1898 
1899 static struct seq_operations neigh_stat_seq_ops = {
1900 	.start	= neigh_stat_seq_start,
1901 	.next	= neigh_stat_seq_next,
1902 	.stop	= neigh_stat_seq_stop,
1903 	.show	= neigh_stat_seq_show,
1904 };
1905 
neigh_stat_seq_open(struct inode * inode,struct file * file)1906 static int neigh_stat_seq_open(struct inode *inode, struct file *file)
1907 {
1908 	int ret = seq_open(file, &neigh_stat_seq_ops);
1909 
1910 	if (!ret) {
1911 		struct seq_file *sf = file->private_data;
1912 		sf->private = PDE(inode);
1913 	}
1914 	return ret;
1915 };
1916 
1917 static struct file_operations neigh_stat_seq_fops = {
1918 	.owner	 = THIS_MODULE,
1919 	.open 	 = neigh_stat_seq_open,
1920 	.read	 = seq_read,
1921 	.llseek	 = seq_lseek,
1922 	.release = seq_release,
1923 };
1924 
1925 #endif /* CONFIG_PROC_FS */
1926 
1927 #ifdef CONFIG_ARPD
neigh_app_ns(struct neighbour * n)1928 void neigh_app_ns(struct neighbour *n)
1929 {
1930 	struct sk_buff *skb;
1931 	struct nlmsghdr  *nlh;
1932 	int size = NLMSG_SPACE(sizeof(struct ndmsg)+256);
1933 
1934 	skb = alloc_skb(size, GFP_ATOMIC);
1935 	if (!skb)
1936 		return;
1937 
1938 	if (neigh_fill_info(skb, n, 0, 0, RTM_GETNEIGH) < 0) {
1939 		kfree_skb(skb);
1940 		return;
1941 	}
1942 	nlh = (struct nlmsghdr*)skb->data;
1943 	nlh->nlmsg_flags = NLM_F_REQUEST;
1944 	NETLINK_CB(skb).dst_groups = RTMGRP_NEIGH;
1945 	netlink_broadcast(rtnl, skb, 0, RTMGRP_NEIGH, GFP_ATOMIC);
1946 }
1947 
neigh_app_notify(struct neighbour * n)1948 static void neigh_app_notify(struct neighbour *n)
1949 {
1950 	struct sk_buff *skb;
1951 	struct nlmsghdr  *nlh;
1952 	int size = NLMSG_SPACE(sizeof(struct ndmsg)+256);
1953 
1954 	skb = alloc_skb(size, GFP_ATOMIC);
1955 	if (!skb)
1956 		return;
1957 
1958 	if (neigh_fill_info(skb, n, 0, 0, RTM_NEWNEIGH) < 0) {
1959 		kfree_skb(skb);
1960 		return;
1961 	}
1962 	nlh = (struct nlmsghdr*)skb->data;
1963 	NETLINK_CB(skb).dst_groups = RTMGRP_NEIGH;
1964 	netlink_broadcast(rtnl, skb, 0, RTMGRP_NEIGH, GFP_ATOMIC);
1965 }
1966 
1967 #endif /* CONFIG_ARPD */
1968 
1969 #ifdef CONFIG_SYSCTL
1970 
1971 struct neigh_sysctl_table
1972 {
1973 	struct ctl_table_header *sysctl_header;
1974 	ctl_table neigh_vars[17];
1975 	ctl_table neigh_dev[2];
1976 	ctl_table neigh_neigh_dir[2];
1977 	ctl_table neigh_proto_dir[2];
1978 	ctl_table neigh_root_dir[2];
1979 } neigh_sysctl_template = {
1980 	NULL,
1981         {{NET_NEIGH_MCAST_SOLICIT, "mcast_solicit",
1982          NULL, sizeof(int), 0644, NULL,
1983          &proc_dointvec},
1984 	{NET_NEIGH_UCAST_SOLICIT, "ucast_solicit",
1985          NULL, sizeof(int), 0644, NULL,
1986          &proc_dointvec},
1987 	{NET_NEIGH_APP_SOLICIT, "app_solicit",
1988          NULL, sizeof(int), 0644, NULL,
1989          &proc_dointvec},
1990 	{NET_NEIGH_RETRANS_TIME, "retrans_time",
1991          NULL, sizeof(int), 0644, NULL,
1992          &proc_dointvec},
1993 	{NET_NEIGH_REACHABLE_TIME, "base_reachable_time",
1994          NULL, sizeof(int), 0644, NULL,
1995          &proc_dointvec_jiffies},
1996 	{NET_NEIGH_DELAY_PROBE_TIME, "delay_first_probe_time",
1997          NULL, sizeof(int), 0644, NULL,
1998          &proc_dointvec_jiffies},
1999 	{NET_NEIGH_GC_STALE_TIME, "gc_stale_time",
2000          NULL, sizeof(int), 0644, NULL,
2001          &proc_dointvec_jiffies},
2002 	{NET_NEIGH_UNRES_QLEN, "unres_qlen",
2003          NULL, sizeof(int), 0644, NULL,
2004          &proc_dointvec},
2005 	{NET_NEIGH_PROXY_QLEN, "proxy_qlen",
2006          NULL, sizeof(int), 0644, NULL,
2007          &proc_dointvec},
2008 	{NET_NEIGH_ANYCAST_DELAY, "anycast_delay",
2009          NULL, sizeof(int), 0644, NULL,
2010          &proc_dointvec},
2011 	{NET_NEIGH_PROXY_DELAY, "proxy_delay",
2012          NULL, sizeof(int), 0644, NULL,
2013          &proc_dointvec},
2014 	{NET_NEIGH_LOCKTIME, "locktime",
2015          NULL, sizeof(int), 0644, NULL,
2016          &proc_dointvec},
2017 	{NET_NEIGH_GC_INTERVAL, "gc_interval",
2018          NULL, sizeof(int), 0644, NULL,
2019          &proc_dointvec_jiffies},
2020 	{NET_NEIGH_GC_THRESH1, "gc_thresh1",
2021          NULL, sizeof(int), 0644, NULL,
2022          &proc_dointvec},
2023 	{NET_NEIGH_GC_THRESH2, "gc_thresh2",
2024          NULL, sizeof(int), 0644, NULL,
2025          &proc_dointvec},
2026 	{NET_NEIGH_GC_THRESH3, "gc_thresh3",
2027          NULL, sizeof(int), 0644, NULL,
2028          &proc_dointvec},
2029 	 {0}},
2030 
2031 	{{NET_PROTO_CONF_DEFAULT, "default", NULL, 0, 0555, NULL},{0}},
2032 	{{0, "neigh", NULL, 0, 0555, NULL},{0}},
2033 	{{0, NULL, NULL, 0, 0555, NULL},{0}},
2034 	{{CTL_NET, "net", NULL, 0, 0555, NULL},{0}}
2035 };
2036 
neigh_sysctl_register(struct net_device * dev,struct neigh_parms * p,int p_id,int pdev_id,char * p_name)2037 int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
2038 			  int p_id, int pdev_id, char *p_name)
2039 {
2040 	struct neigh_sysctl_table *t;
2041 
2042 	t = kmalloc(sizeof(*t), GFP_KERNEL);
2043 	if (t == NULL)
2044 		return -ENOBUFS;
2045 	memcpy(t, &neigh_sysctl_template, sizeof(*t));
2046 	t->neigh_vars[0].data = &p->mcast_probes;
2047 	t->neigh_vars[1].data = &p->ucast_probes;
2048 	t->neigh_vars[2].data = &p->app_probes;
2049 	t->neigh_vars[3].data = &p->retrans_time;
2050 	t->neigh_vars[4].data = &p->base_reachable_time;
2051 	t->neigh_vars[5].data = &p->delay_probe_time;
2052 	t->neigh_vars[6].data = &p->gc_staletime;
2053 	t->neigh_vars[7].data = &p->queue_len;
2054 	t->neigh_vars[8].data = &p->proxy_qlen;
2055 	t->neigh_vars[9].data = &p->anycast_delay;
2056 	t->neigh_vars[10].data = &p->proxy_delay;
2057 	t->neigh_vars[11].data = &p->locktime;
2058 	if (dev) {
2059 		t->neigh_dev[0].procname = dev->name;
2060 		t->neigh_dev[0].ctl_name = dev->ifindex;
2061 		memset(&t->neigh_vars[12], 0, sizeof(ctl_table));
2062 	} else {
2063 		t->neigh_vars[12].data = (int*)(p+1);
2064 		t->neigh_vars[13].data = (int*)(p+1) + 1;
2065 		t->neigh_vars[14].data = (int*)(p+1) + 2;
2066 		t->neigh_vars[15].data = (int*)(p+1) + 3;
2067 	}
2068 	t->neigh_neigh_dir[0].ctl_name = pdev_id;
2069 
2070 	t->neigh_proto_dir[0].procname = p_name;
2071 	t->neigh_proto_dir[0].ctl_name = p_id;
2072 
2073 	t->neigh_dev[0].child = t->neigh_vars;
2074 	t->neigh_neigh_dir[0].child = t->neigh_dev;
2075 	t->neigh_proto_dir[0].child = t->neigh_neigh_dir;
2076 	t->neigh_root_dir[0].child = t->neigh_proto_dir;
2077 
2078 	t->sysctl_header = register_sysctl_table(t->neigh_root_dir, 0);
2079 	if (t->sysctl_header == NULL) {
2080 		kfree(t);
2081 		return -ENOBUFS;
2082 	}
2083 	p->sysctl_table = t;
2084 	return 0;
2085 }
2086 
neigh_sysctl_unregister(struct neigh_parms * p)2087 void neigh_sysctl_unregister(struct neigh_parms *p)
2088 {
2089 	if (p->sysctl_table) {
2090 		struct neigh_sysctl_table *t = p->sysctl_table;
2091 		p->sysctl_table = NULL;
2092 		unregister_sysctl_table(t->sysctl_header);
2093 		kfree(t);
2094 	}
2095 }
2096 
2097 #endif	/* CONFIG_SYSCTL */
2098