1 /*
2  *		INETPEER - A storage for permanent information about peers
3  *
4  *  This source is covered by the GNU GPL, the same as all kernel sources.
5  *
6  *  Authors:	Andrey V. Savochkin <saw@msu.ru>
7  */
8 
9 #include <linux/module.h>
10 #include <linux/types.h>
11 #include <linux/slab.h>
12 #include <linux/interrupt.h>
13 #include <linux/spinlock.h>
14 #include <linux/random.h>
15 #include <linux/timer.h>
16 #include <linux/time.h>
17 #include <linux/kernel.h>
18 #include <linux/mm.h>
19 #include <linux/net.h>
20 #include <linux/workqueue.h>
21 #include <net/ip.h>
22 #include <net/inetpeer.h>
23 #include <net/secure_seq.h>
24 
25 /*
26  *  Theory of operations.
27  *  We keep one entry for each peer IP address.  The nodes contains long-living
28  *  information about the peer which doesn't depend on routes.
29  *  At this moment this information consists only of ID field for the next
30  *  outgoing IP packet.  This field is incremented with each packet as encoded
31  *  in inet_getid() function (include/net/inetpeer.h).
32  *  At the moment of writing this notes identifier of IP packets is generated
33  *  to be unpredictable using this code only for packets subjected
34  *  (actually or potentially) to defragmentation.  I.e. DF packets less than
35  *  PMTU in size when local fragmentation is disabled use a constant ID and do
36  *  not use this code (see ip_select_ident() in include/net/ip.h).
37  *
38  *  Route cache entries hold references to our nodes.
39  *  New cache entries get references via lookup by destination IP address in
40  *  the avl tree.  The reference is grabbed only when it's needed i.e. only
41  *  when we try to output IP packet which needs an unpredictable ID (see
42  *  __ip_select_ident() in net/ipv4/route.c).
43  *  Nodes are removed only when reference counter goes to 0.
44  *  When it's happened the node may be removed when a sufficient amount of
45  *  time has been passed since its last use.  The less-recently-used entry can
46  *  also be removed if the pool is overloaded i.e. if the total amount of
47  *  entries is greater-or-equal than the threshold.
48  *
49  *  Node pool is organised as an AVL tree.
50  *  Such an implementation has been chosen not just for fun.  It's a way to
51  *  prevent easy and efficient DoS attacks by creating hash collisions.  A huge
52  *  amount of long living nodes in a single hash slot would significantly delay
53  *  lookups performed with disabled BHs.
54  *
55  *  Serialisation issues.
56  *  1.  Nodes may appear in the tree only with the pool lock held.
57  *  2.  Nodes may disappear from the tree only with the pool lock held
58  *      AND reference count being 0.
59  *  3.  Global variable peer_total is modified under the pool lock.
60  *  4.  struct inet_peer fields modification:
61  *		avl_left, avl_right, avl_parent, avl_height: pool lock
62  *		refcnt: atomically against modifications on other CPU;
63  *		   usually under some other lock to prevent node disappearing
64  *		daddr: unchangeable
65  *		ip_id_count: atomic value (no lock needed)
66  */
67 
68 static struct kmem_cache *peer_cachep __read_mostly;
69 
70 static LIST_HEAD(gc_list);
71 static const int gc_delay = 60 * HZ;
72 static struct delayed_work gc_work;
73 static DEFINE_SPINLOCK(gc_lock);
74 
75 #define node_height(x) x->avl_height
76 
77 #define peer_avl_empty ((struct inet_peer *)&peer_fake_node)
78 #define peer_avl_empty_rcu ((struct inet_peer __rcu __force *)&peer_fake_node)
79 static const struct inet_peer peer_fake_node = {
80 	.avl_left	= peer_avl_empty_rcu,
81 	.avl_right	= peer_avl_empty_rcu,
82 	.avl_height	= 0
83 };
84 
85 struct inet_peer_base {
86 	struct inet_peer __rcu *root;
87 	seqlock_t	lock;
88 	int		total;
89 };
90 
91 static struct inet_peer_base v4_peers = {
92 	.root		= peer_avl_empty_rcu,
93 	.lock		= __SEQLOCK_UNLOCKED(v4_peers.lock),
94 	.total		= 0,
95 };
96 
97 static struct inet_peer_base v6_peers = {
98 	.root		= peer_avl_empty_rcu,
99 	.lock		= __SEQLOCK_UNLOCKED(v6_peers.lock),
100 	.total		= 0,
101 };
102 
103 #define PEER_MAXDEPTH 40 /* sufficient for about 2^27 nodes */
104 
105 /* Exported for sysctl_net_ipv4.  */
106 int inet_peer_threshold __read_mostly = 65536 + 128;	/* start to throw entries more
107 					 * aggressively at this stage */
108 int inet_peer_minttl __read_mostly = 120 * HZ;	/* TTL under high load: 120 sec */
109 int inet_peer_maxttl __read_mostly = 10 * 60 * HZ;	/* usual time to live: 10 min */
110 
inetpeer_gc_worker(struct work_struct * work)111 static void inetpeer_gc_worker(struct work_struct *work)
112 {
113 	struct inet_peer *p, *n;
114 	LIST_HEAD(list);
115 
116 	spin_lock_bh(&gc_lock);
117 	list_replace_init(&gc_list, &list);
118 	spin_unlock_bh(&gc_lock);
119 
120 	if (list_empty(&list))
121 		return;
122 
123 	list_for_each_entry_safe(p, n, &list, gc_list) {
124 
125 		if(need_resched())
126 			cond_resched();
127 
128 		if (p->avl_left != peer_avl_empty) {
129 			list_add_tail(&p->avl_left->gc_list, &list);
130 			p->avl_left = peer_avl_empty;
131 		}
132 
133 		if (p->avl_right != peer_avl_empty) {
134 			list_add_tail(&p->avl_right->gc_list, &list);
135 			p->avl_right = peer_avl_empty;
136 		}
137 
138 		n = list_entry(p->gc_list.next, struct inet_peer, gc_list);
139 
140 		if (!atomic_read(&p->refcnt)) {
141 			list_del(&p->gc_list);
142 			kmem_cache_free(peer_cachep, p);
143 		}
144 	}
145 
146 	if (list_empty(&list))
147 		return;
148 
149 	spin_lock_bh(&gc_lock);
150 	list_splice(&list, &gc_list);
151 	spin_unlock_bh(&gc_lock);
152 
153 	schedule_delayed_work(&gc_work, gc_delay);
154 }
155 
156 /* Called from ip_output.c:ip_init  */
inet_initpeers(void)157 void __init inet_initpeers(void)
158 {
159 	struct sysinfo si;
160 
161 	/* Use the straight interface to information about memory. */
162 	si_meminfo(&si);
163 	/* The values below were suggested by Alexey Kuznetsov
164 	 * <kuznet@ms2.inr.ac.ru>.  I don't have any opinion about the values
165 	 * myself.  --SAW
166 	 */
167 	if (si.totalram <= (32768*1024)/PAGE_SIZE)
168 		inet_peer_threshold >>= 1; /* max pool size about 1MB on IA32 */
169 	if (si.totalram <= (16384*1024)/PAGE_SIZE)
170 		inet_peer_threshold >>= 1; /* about 512KB */
171 	if (si.totalram <= (8192*1024)/PAGE_SIZE)
172 		inet_peer_threshold >>= 2; /* about 128KB */
173 
174 	peer_cachep = kmem_cache_create("inet_peer_cache",
175 			sizeof(struct inet_peer),
176 			0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
177 			NULL);
178 
179 	INIT_DELAYED_WORK_DEFERRABLE(&gc_work, inetpeer_gc_worker);
180 }
181 
addr_compare(const struct inetpeer_addr * a,const struct inetpeer_addr * b)182 static int addr_compare(const struct inetpeer_addr *a,
183 			const struct inetpeer_addr *b)
184 {
185 	int i, n = (a->family == AF_INET ? 1 : 4);
186 
187 	for (i = 0; i < n; i++) {
188 		if (a->addr.a6[i] == b->addr.a6[i])
189 			continue;
190 		if ((__force u32)a->addr.a6[i] < (__force u32)b->addr.a6[i])
191 			return -1;
192 		return 1;
193 	}
194 
195 	return 0;
196 }
197 
198 #define rcu_deref_locked(X, BASE)				\
199 	rcu_dereference_protected(X, lockdep_is_held(&(BASE)->lock.lock))
200 
201 /*
202  * Called with local BH disabled and the pool lock held.
203  */
204 #define lookup(_daddr, _stack, _base)				\
205 ({								\
206 	struct inet_peer *u;					\
207 	struct inet_peer __rcu **v;				\
208 								\
209 	stackptr = _stack;					\
210 	*stackptr++ = &_base->root;				\
211 	for (u = rcu_deref_locked(_base->root, _base);		\
212 	     u != peer_avl_empty; ) {				\
213 		int cmp = addr_compare(_daddr, &u->daddr);	\
214 		if (cmp == 0)					\
215 			break;					\
216 		if (cmp == -1)					\
217 			v = &u->avl_left;			\
218 		else						\
219 			v = &u->avl_right;			\
220 		*stackptr++ = v;				\
221 		u = rcu_deref_locked(*v, _base);		\
222 	}							\
223 	u;							\
224 })
225 
226 /*
227  * Called with rcu_read_lock()
228  * Because we hold no lock against a writer, its quite possible we fall
229  * in an endless loop.
230  * But every pointer we follow is guaranteed to be valid thanks to RCU.
231  * We exit from this function if number of links exceeds PEER_MAXDEPTH
232  */
lookup_rcu(const struct inetpeer_addr * daddr,struct inet_peer_base * base)233 static struct inet_peer *lookup_rcu(const struct inetpeer_addr *daddr,
234 				    struct inet_peer_base *base)
235 {
236 	struct inet_peer *u = rcu_dereference(base->root);
237 	int count = 0;
238 
239 	while (u != peer_avl_empty) {
240 		int cmp = addr_compare(daddr, &u->daddr);
241 		if (cmp == 0) {
242 			/* Before taking a reference, check if this entry was
243 			 * deleted (refcnt=-1)
244 			 */
245 			if (!atomic_add_unless(&u->refcnt, 1, -1))
246 				u = NULL;
247 			return u;
248 		}
249 		if (cmp == -1)
250 			u = rcu_dereference(u->avl_left);
251 		else
252 			u = rcu_dereference(u->avl_right);
253 		if (unlikely(++count == PEER_MAXDEPTH))
254 			break;
255 	}
256 	return NULL;
257 }
258 
259 /* Called with local BH disabled and the pool lock held. */
260 #define lookup_rightempty(start, base)				\
261 ({								\
262 	struct inet_peer *u;					\
263 	struct inet_peer __rcu **v;				\
264 	*stackptr++ = &start->avl_left;				\
265 	v = &start->avl_left;					\
266 	for (u = rcu_deref_locked(*v, base);			\
267 	     u->avl_right != peer_avl_empty_rcu; ) {		\
268 		v = &u->avl_right;				\
269 		*stackptr++ = v;				\
270 		u = rcu_deref_locked(*v, base);			\
271 	}							\
272 	u;							\
273 })
274 
275 /* Called with local BH disabled and the pool lock held.
276  * Variable names are the proof of operation correctness.
277  * Look into mm/map_avl.c for more detail description of the ideas.
278  */
peer_avl_rebalance(struct inet_peer __rcu ** stack[],struct inet_peer __rcu *** stackend,struct inet_peer_base * base)279 static void peer_avl_rebalance(struct inet_peer __rcu **stack[],
280 			       struct inet_peer __rcu ***stackend,
281 			       struct inet_peer_base *base)
282 {
283 	struct inet_peer __rcu **nodep;
284 	struct inet_peer *node, *l, *r;
285 	int lh, rh;
286 
287 	while (stackend > stack) {
288 		nodep = *--stackend;
289 		node = rcu_deref_locked(*nodep, base);
290 		l = rcu_deref_locked(node->avl_left, base);
291 		r = rcu_deref_locked(node->avl_right, base);
292 		lh = node_height(l);
293 		rh = node_height(r);
294 		if (lh > rh + 1) { /* l: RH+2 */
295 			struct inet_peer *ll, *lr, *lrl, *lrr;
296 			int lrh;
297 			ll = rcu_deref_locked(l->avl_left, base);
298 			lr = rcu_deref_locked(l->avl_right, base);
299 			lrh = node_height(lr);
300 			if (lrh <= node_height(ll)) {	/* ll: RH+1 */
301 				RCU_INIT_POINTER(node->avl_left, lr);	/* lr: RH or RH+1 */
302 				RCU_INIT_POINTER(node->avl_right, r);	/* r: RH */
303 				node->avl_height = lrh + 1; /* RH+1 or RH+2 */
304 				RCU_INIT_POINTER(l->avl_left, ll);       /* ll: RH+1 */
305 				RCU_INIT_POINTER(l->avl_right, node);	/* node: RH+1 or RH+2 */
306 				l->avl_height = node->avl_height + 1;
307 				RCU_INIT_POINTER(*nodep, l);
308 			} else { /* ll: RH, lr: RH+1 */
309 				lrl = rcu_deref_locked(lr->avl_left, base);/* lrl: RH or RH-1 */
310 				lrr = rcu_deref_locked(lr->avl_right, base);/* lrr: RH or RH-1 */
311 				RCU_INIT_POINTER(node->avl_left, lrr);	/* lrr: RH or RH-1 */
312 				RCU_INIT_POINTER(node->avl_right, r);	/* r: RH */
313 				node->avl_height = rh + 1; /* node: RH+1 */
314 				RCU_INIT_POINTER(l->avl_left, ll);	/* ll: RH */
315 				RCU_INIT_POINTER(l->avl_right, lrl);	/* lrl: RH or RH-1 */
316 				l->avl_height = rh + 1;	/* l: RH+1 */
317 				RCU_INIT_POINTER(lr->avl_left, l);	/* l: RH+1 */
318 				RCU_INIT_POINTER(lr->avl_right, node);	/* node: RH+1 */
319 				lr->avl_height = rh + 2;
320 				RCU_INIT_POINTER(*nodep, lr);
321 			}
322 		} else if (rh > lh + 1) { /* r: LH+2 */
323 			struct inet_peer *rr, *rl, *rlr, *rll;
324 			int rlh;
325 			rr = rcu_deref_locked(r->avl_right, base);
326 			rl = rcu_deref_locked(r->avl_left, base);
327 			rlh = node_height(rl);
328 			if (rlh <= node_height(rr)) {	/* rr: LH+1 */
329 				RCU_INIT_POINTER(node->avl_right, rl);	/* rl: LH or LH+1 */
330 				RCU_INIT_POINTER(node->avl_left, l);	/* l: LH */
331 				node->avl_height = rlh + 1; /* LH+1 or LH+2 */
332 				RCU_INIT_POINTER(r->avl_right, rr);	/* rr: LH+1 */
333 				RCU_INIT_POINTER(r->avl_left, node);	/* node: LH+1 or LH+2 */
334 				r->avl_height = node->avl_height + 1;
335 				RCU_INIT_POINTER(*nodep, r);
336 			} else { /* rr: RH, rl: RH+1 */
337 				rlr = rcu_deref_locked(rl->avl_right, base);/* rlr: LH or LH-1 */
338 				rll = rcu_deref_locked(rl->avl_left, base);/* rll: LH or LH-1 */
339 				RCU_INIT_POINTER(node->avl_right, rll);	/* rll: LH or LH-1 */
340 				RCU_INIT_POINTER(node->avl_left, l);	/* l: LH */
341 				node->avl_height = lh + 1; /* node: LH+1 */
342 				RCU_INIT_POINTER(r->avl_right, rr);	/* rr: LH */
343 				RCU_INIT_POINTER(r->avl_left, rlr);	/* rlr: LH or LH-1 */
344 				r->avl_height = lh + 1;	/* r: LH+1 */
345 				RCU_INIT_POINTER(rl->avl_right, r);	/* r: LH+1 */
346 				RCU_INIT_POINTER(rl->avl_left, node);	/* node: LH+1 */
347 				rl->avl_height = lh + 2;
348 				RCU_INIT_POINTER(*nodep, rl);
349 			}
350 		} else {
351 			node->avl_height = (lh > rh ? lh : rh) + 1;
352 		}
353 	}
354 }
355 
356 /* Called with local BH disabled and the pool lock held. */
357 #define link_to_pool(n, base)					\
358 do {								\
359 	n->avl_height = 1;					\
360 	n->avl_left = peer_avl_empty_rcu;			\
361 	n->avl_right = peer_avl_empty_rcu;			\
362 	/* lockless readers can catch us now */			\
363 	rcu_assign_pointer(**--stackptr, n);			\
364 	peer_avl_rebalance(stack, stackptr, base);		\
365 } while (0)
366 
inetpeer_free_rcu(struct rcu_head * head)367 static void inetpeer_free_rcu(struct rcu_head *head)
368 {
369 	kmem_cache_free(peer_cachep, container_of(head, struct inet_peer, rcu));
370 }
371 
unlink_from_pool(struct inet_peer * p,struct inet_peer_base * base,struct inet_peer __rcu ** stack[PEER_MAXDEPTH])372 static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base,
373 			     struct inet_peer __rcu **stack[PEER_MAXDEPTH])
374 {
375 	struct inet_peer __rcu ***stackptr, ***delp;
376 
377 	if (lookup(&p->daddr, stack, base) != p)
378 		BUG();
379 	delp = stackptr - 1; /* *delp[0] == p */
380 	if (p->avl_left == peer_avl_empty_rcu) {
381 		*delp[0] = p->avl_right;
382 		--stackptr;
383 	} else {
384 		/* look for a node to insert instead of p */
385 		struct inet_peer *t;
386 		t = lookup_rightempty(p, base);
387 		BUG_ON(rcu_deref_locked(*stackptr[-1], base) != t);
388 		**--stackptr = t->avl_left;
389 		/* t is removed, t->daddr > x->daddr for any
390 		 * x in p->avl_left subtree.
391 		 * Put t in the old place of p. */
392 		RCU_INIT_POINTER(*delp[0], t);
393 		t->avl_left = p->avl_left;
394 		t->avl_right = p->avl_right;
395 		t->avl_height = p->avl_height;
396 		BUG_ON(delp[1] != &p->avl_left);
397 		delp[1] = &t->avl_left; /* was &p->avl_left */
398 	}
399 	peer_avl_rebalance(stack, stackptr, base);
400 	base->total--;
401 	call_rcu(&p->rcu, inetpeer_free_rcu);
402 }
403 
family_to_base(int family)404 static struct inet_peer_base *family_to_base(int family)
405 {
406 	return family == AF_INET ? &v4_peers : &v6_peers;
407 }
408 
409 /* perform garbage collect on all items stacked during a lookup */
inet_peer_gc(struct inet_peer_base * base,struct inet_peer __rcu ** stack[PEER_MAXDEPTH],struct inet_peer __rcu *** stackptr)410 static int inet_peer_gc(struct inet_peer_base *base,
411 			struct inet_peer __rcu **stack[PEER_MAXDEPTH],
412 			struct inet_peer __rcu ***stackptr)
413 {
414 	struct inet_peer *p, *gchead = NULL;
415 	__u32 delta, ttl;
416 	int cnt = 0;
417 
418 	if (base->total >= inet_peer_threshold)
419 		ttl = 0; /* be aggressive */
420 	else
421 		ttl = inet_peer_maxttl
422 				- (inet_peer_maxttl - inet_peer_minttl) / HZ *
423 					base->total / inet_peer_threshold * HZ;
424 	stackptr--; /* last stack slot is peer_avl_empty */
425 	while (stackptr > stack) {
426 		stackptr--;
427 		p = rcu_deref_locked(**stackptr, base);
428 		if (atomic_read(&p->refcnt) == 0) {
429 			smp_rmb();
430 			delta = (__u32)jiffies - p->dtime;
431 			if (delta >= ttl &&
432 			    atomic_cmpxchg(&p->refcnt, 0, -1) == 0) {
433 				p->gc_next = gchead;
434 				gchead = p;
435 			}
436 		}
437 	}
438 	while ((p = gchead) != NULL) {
439 		gchead = p->gc_next;
440 		cnt++;
441 		unlink_from_pool(p, base, stack);
442 	}
443 	return cnt;
444 }
445 
inet_getpeer(const struct inetpeer_addr * daddr,int create)446 struct inet_peer *inet_getpeer(const struct inetpeer_addr *daddr, int create)
447 {
448 	struct inet_peer __rcu **stack[PEER_MAXDEPTH], ***stackptr;
449 	struct inet_peer_base *base = family_to_base(daddr->family);
450 	struct inet_peer *p;
451 	unsigned int sequence;
452 	int invalidated, gccnt = 0;
453 
454 	/* Attempt a lockless lookup first.
455 	 * Because of a concurrent writer, we might not find an existing entry.
456 	 */
457 	rcu_read_lock();
458 	sequence = read_seqbegin(&base->lock);
459 	p = lookup_rcu(daddr, base);
460 	invalidated = read_seqretry(&base->lock, sequence);
461 	rcu_read_unlock();
462 
463 	if (p)
464 		return p;
465 
466 	/* If no writer did a change during our lookup, we can return early. */
467 	if (!create && !invalidated)
468 		return NULL;
469 
470 	/* retry an exact lookup, taking the lock before.
471 	 * At least, nodes should be hot in our cache.
472 	 */
473 	write_seqlock_bh(&base->lock);
474 relookup:
475 	p = lookup(daddr, stack, base);
476 	if (p != peer_avl_empty) {
477 		atomic_inc(&p->refcnt);
478 		write_sequnlock_bh(&base->lock);
479 		return p;
480 	}
481 	if (!gccnt) {
482 		gccnt = inet_peer_gc(base, stack, stackptr);
483 		if (gccnt && create)
484 			goto relookup;
485 	}
486 	p = create ? kmem_cache_alloc(peer_cachep, GFP_ATOMIC) : NULL;
487 	if (p) {
488 		p->daddr = *daddr;
489 		atomic_set(&p->refcnt, 1);
490 		atomic_set(&p->rid, 0);
491 		atomic_set(&p->ip_id_count,
492 				(daddr->family == AF_INET) ?
493 					secure_ip_id(daddr->addr.a4) :
494 					secure_ipv6_id(daddr->addr.a6));
495 		p->tcp_ts_stamp = 0;
496 		p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
497 		p->rate_tokens = 0;
498 		p->rate_last = 0;
499 		p->pmtu_expires = 0;
500 		p->pmtu_orig = 0;
501 		memset(&p->redirect_learned, 0, sizeof(p->redirect_learned));
502 		INIT_LIST_HEAD(&p->gc_list);
503 
504 		/* Link the node. */
505 		link_to_pool(p, base);
506 		base->total++;
507 	}
508 	write_sequnlock_bh(&base->lock);
509 
510 	return p;
511 }
512 EXPORT_SYMBOL_GPL(inet_getpeer);
513 
inet_putpeer(struct inet_peer * p)514 void inet_putpeer(struct inet_peer *p)
515 {
516 	p->dtime = (__u32)jiffies;
517 	smp_mb__before_atomic_dec();
518 	atomic_dec(&p->refcnt);
519 }
520 EXPORT_SYMBOL_GPL(inet_putpeer);
521 
522 /*
523  *	Check transmit rate limitation for given message.
524  *	The rate information is held in the inet_peer entries now.
525  *	This function is generic and could be used for other purposes
526  *	too. It uses a Token bucket filter as suggested by Alexey Kuznetsov.
527  *
528  *	Note that the same inet_peer fields are modified by functions in
529  *	route.c too, but these work for packet destinations while xrlim_allow
530  *	works for icmp destinations. This means the rate limiting information
531  *	for one "ip object" is shared - and these ICMPs are twice limited:
532  *	by source and by destination.
533  *
534  *	RFC 1812: 4.3.2.8 SHOULD be able to limit error message rate
535  *			  SHOULD allow setting of rate limits
536  *
537  * 	Shared between ICMPv4 and ICMPv6.
538  */
539 #define XRLIM_BURST_FACTOR 6
inet_peer_xrlim_allow(struct inet_peer * peer,int timeout)540 bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout)
541 {
542 	unsigned long now, token;
543 	bool rc = false;
544 
545 	if (!peer)
546 		return true;
547 
548 	token = peer->rate_tokens;
549 	now = jiffies;
550 	token += now - peer->rate_last;
551 	peer->rate_last = now;
552 	if (token > XRLIM_BURST_FACTOR * timeout)
553 		token = XRLIM_BURST_FACTOR * timeout;
554 	if (token >= timeout) {
555 		token -= timeout;
556 		rc = true;
557 	}
558 	peer->rate_tokens = token;
559 	return rc;
560 }
561 EXPORT_SYMBOL(inet_peer_xrlim_allow);
562 
inetpeer_inval_rcu(struct rcu_head * head)563 static void inetpeer_inval_rcu(struct rcu_head *head)
564 {
565 	struct inet_peer *p = container_of(head, struct inet_peer, gc_rcu);
566 
567 	spin_lock_bh(&gc_lock);
568 	list_add_tail(&p->gc_list, &gc_list);
569 	spin_unlock_bh(&gc_lock);
570 
571 	schedule_delayed_work(&gc_work, gc_delay);
572 }
573 
inetpeer_invalidate_tree(int family)574 void inetpeer_invalidate_tree(int family)
575 {
576 	struct inet_peer *old, *new, *prev;
577 	struct inet_peer_base *base = family_to_base(family);
578 
579 	write_seqlock_bh(&base->lock);
580 
581 	old = base->root;
582 	if (old == peer_avl_empty_rcu)
583 		goto out;
584 
585 	new = peer_avl_empty_rcu;
586 
587 	prev = cmpxchg(&base->root, old, new);
588 	if (prev == old) {
589 		base->total = 0;
590 		call_rcu(&prev->gc_rcu, inetpeer_inval_rcu);
591 	}
592 
593 out:
594 	write_sequnlock_bh(&base->lock);
595 }
596 EXPORT_SYMBOL(inetpeer_invalidate_tree);
597