1 /*
2  * 2002-10-18  written by Jim Houston jim.houston@ccur.com
3  *	Copyright (C) 2002 by Concurrent Computer Corporation
4  *	Distributed under the GNU GPL license version 2.
5  *
6  * Modified by George Anzinger to reuse immediately and to use
7  * find bit instructions.  Also removed _irq on spinlocks.
8  *
9  * Modified by Nadia Derbey to make it RCU safe.
10  *
11  * Small id to pointer translation service.
12  *
13  * It uses a radix tree like structure as a sparse array indexed
14  * by the id to obtain the pointer.  The bitmap makes allocating
15  * a new id quick.
16  *
17  * You call it to allocate an id (an int) an associate with that id a
18  * pointer or what ever, we treat it as a (void *).  You can pass this
19  * id to a user for him to pass back at a later time.  You then pass
20  * that id to this code and it returns your pointer.
21 
22  * You can release ids at any time. When all ids are released, most of
23  * the memory is returned (we keep IDR_FREE_MAX) in a local pool so we
24  * don't need to go to the memory "store" during an id allocate, just
25  * so you don't need to be too concerned about locking and conflicts
26  * with the slab allocator.
27  */
28 
29 #ifndef TEST                        // to test in user space...
30 #include <linux/slab.h>
31 #include <linux/init.h>
32 #include <linux/export.h>
33 #endif
34 #include <linux/err.h>
35 #include <linux/string.h>
36 #include <linux/idr.h>
37 #include <linux/spinlock.h>
38 
39 static struct kmem_cache *idr_layer_cache;
40 static DEFINE_SPINLOCK(simple_ida_lock);
41 
42 /* the maximum ID which can be allocated given idr->layers */
idr_max(int layers)43 static int idr_max(int layers)
44 {
45 	int bits = min_t(int, layers * IDR_BITS, MAX_ID_SHIFT);
46 
47 	return (1 << bits) - 1;
48 }
49 
get_from_free_list(struct idr * idp)50 static struct idr_layer *get_from_free_list(struct idr *idp)
51 {
52 	struct idr_layer *p;
53 	unsigned long flags;
54 
55 	spin_lock_irqsave(&idp->lock, flags);
56 	if ((p = idp->id_free)) {
57 		idp->id_free = p->ary[0];
58 		idp->id_free_cnt--;
59 		p->ary[0] = NULL;
60 	}
61 	spin_unlock_irqrestore(&idp->lock, flags);
62 	return(p);
63 }
64 
idr_layer_rcu_free(struct rcu_head * head)65 static void idr_layer_rcu_free(struct rcu_head *head)
66 {
67 	struct idr_layer *layer;
68 
69 	layer = container_of(head, struct idr_layer, rcu_head);
70 	kmem_cache_free(idr_layer_cache, layer);
71 }
72 
free_layer(struct idr_layer * p)73 static inline void free_layer(struct idr_layer *p)
74 {
75 	call_rcu(&p->rcu_head, idr_layer_rcu_free);
76 }
77 
78 /* only called when idp->lock is held */
__move_to_free_list(struct idr * idp,struct idr_layer * p)79 static void __move_to_free_list(struct idr *idp, struct idr_layer *p)
80 {
81 	p->ary[0] = idp->id_free;
82 	idp->id_free = p;
83 	idp->id_free_cnt++;
84 }
85 
move_to_free_list(struct idr * idp,struct idr_layer * p)86 static void move_to_free_list(struct idr *idp, struct idr_layer *p)
87 {
88 	unsigned long flags;
89 
90 	/*
91 	 * Depends on the return element being zeroed.
92 	 */
93 	spin_lock_irqsave(&idp->lock, flags);
94 	__move_to_free_list(idp, p);
95 	spin_unlock_irqrestore(&idp->lock, flags);
96 }
97 
idr_mark_full(struct idr_layer ** pa,int id)98 static void idr_mark_full(struct idr_layer **pa, int id)
99 {
100 	struct idr_layer *p = pa[0];
101 	int l = 0;
102 
103 	__set_bit(id & IDR_MASK, &p->bitmap);
104 	/*
105 	 * If this layer is full mark the bit in the layer above to
106 	 * show that this part of the radix tree is full.  This may
107 	 * complete the layer above and require walking up the radix
108 	 * tree.
109 	 */
110 	while (p->bitmap == IDR_FULL) {
111 		if (!(p = pa[++l]))
112 			break;
113 		id = id >> IDR_BITS;
114 		__set_bit((id & IDR_MASK), &p->bitmap);
115 	}
116 }
117 
118 /**
119  * idr_pre_get - reserve resources for idr allocation
120  * @idp:	idr handle
121  * @gfp_mask:	memory allocation flags
122  *
123  * This function should be called prior to calling the idr_get_new* functions.
124  * It preallocates enough memory to satisfy the worst possible allocation. The
125  * caller should pass in GFP_KERNEL if possible.  This of course requires that
126  * no spinning locks be held.
127  *
128  * If the system is REALLY out of memory this function returns %0,
129  * otherwise %1.
130  */
idr_pre_get(struct idr * idp,gfp_t gfp_mask)131 int idr_pre_get(struct idr *idp, gfp_t gfp_mask)
132 {
133 	while (idp->id_free_cnt < IDR_FREE_MAX) {
134 		struct idr_layer *new;
135 		new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
136 		if (new == NULL)
137 			return (0);
138 		move_to_free_list(idp, new);
139 	}
140 	return 1;
141 }
142 EXPORT_SYMBOL(idr_pre_get);
143 
sub_alloc(struct idr * idp,int * starting_id,struct idr_layer ** pa)144 static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
145 {
146 	int n, m, sh;
147 	struct idr_layer *p, *new;
148 	int l, id, oid;
149 	unsigned long bm;
150 
151 	id = *starting_id;
152  restart:
153 	p = idp->top;
154 	l = idp->layers;
155 	pa[l--] = NULL;
156 	while (1) {
157 		/*
158 		 * We run around this while until we reach the leaf node...
159 		 */
160 		n = (id >> (IDR_BITS*l)) & IDR_MASK;
161 		bm = ~p->bitmap;
162 		m = find_next_bit(&bm, IDR_SIZE, n);
163 		if (m == IDR_SIZE) {
164 			/* no space available go back to previous layer. */
165 			l++;
166 			oid = id;
167 			id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
168 
169 			/* if already at the top layer, we need to grow */
170 			if (id >= 1 << (idp->layers * IDR_BITS)) {
171 				*starting_id = id;
172 				return IDR_NEED_TO_GROW;
173 			}
174 			p = pa[l];
175 			BUG_ON(!p);
176 
177 			/* If we need to go up one layer, continue the
178 			 * loop; otherwise, restart from the top.
179 			 */
180 			sh = IDR_BITS * (l + 1);
181 			if (oid >> sh == id >> sh)
182 				continue;
183 			else
184 				goto restart;
185 		}
186 		if (m != n) {
187 			sh = IDR_BITS*l;
188 			id = ((id >> sh) ^ n ^ m) << sh;
189 		}
190 		if ((id >= MAX_ID_BIT) || (id < 0))
191 			return IDR_NOMORE_SPACE;
192 		if (l == 0)
193 			break;
194 		/*
195 		 * Create the layer below if it is missing.
196 		 */
197 		if (!p->ary[m]) {
198 			new = get_from_free_list(idp);
199 			if (!new)
200 				return -1;
201 			new->layer = l-1;
202 			rcu_assign_pointer(p->ary[m], new);
203 			p->count++;
204 		}
205 		pa[l--] = p;
206 		p = p->ary[m];
207 	}
208 
209 	pa[l] = p;
210 	return id;
211 }
212 
idr_get_empty_slot(struct idr * idp,int starting_id,struct idr_layer ** pa)213 static int idr_get_empty_slot(struct idr *idp, int starting_id,
214 			      struct idr_layer **pa)
215 {
216 	struct idr_layer *p, *new;
217 	int layers, v, id;
218 	unsigned long flags;
219 
220 	id = starting_id;
221 build_up:
222 	p = idp->top;
223 	layers = idp->layers;
224 	if (unlikely(!p)) {
225 		if (!(p = get_from_free_list(idp)))
226 			return -1;
227 		p->layer = 0;
228 		layers = 1;
229 	}
230 	/*
231 	 * Add a new layer to the top of the tree if the requested
232 	 * id is larger than the currently allocated space.
233 	 */
234 	while (id > idr_max(layers)) {
235 		layers++;
236 		if (!p->count) {
237 			/* special case: if the tree is currently empty,
238 			 * then we grow the tree by moving the top node
239 			 * upwards.
240 			 */
241 			p->layer++;
242 			continue;
243 		}
244 		if (!(new = get_from_free_list(idp))) {
245 			/*
246 			 * The allocation failed.  If we built part of
247 			 * the structure tear it down.
248 			 */
249 			spin_lock_irqsave(&idp->lock, flags);
250 			for (new = p; p && p != idp->top; new = p) {
251 				p = p->ary[0];
252 				new->ary[0] = NULL;
253 				new->bitmap = new->count = 0;
254 				__move_to_free_list(idp, new);
255 			}
256 			spin_unlock_irqrestore(&idp->lock, flags);
257 			return -1;
258 		}
259 		new->ary[0] = p;
260 		new->count = 1;
261 		new->layer = layers-1;
262 		if (p->bitmap == IDR_FULL)
263 			__set_bit(0, &new->bitmap);
264 		p = new;
265 	}
266 	rcu_assign_pointer(idp->top, p);
267 	idp->layers = layers;
268 	v = sub_alloc(idp, &id, pa);
269 	if (v == IDR_NEED_TO_GROW)
270 		goto build_up;
271 	return(v);
272 }
273 
idr_get_new_above_int(struct idr * idp,void * ptr,int starting_id)274 static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id)
275 {
276 	struct idr_layer *pa[MAX_LEVEL + 1];
277 	int id;
278 
279 	id = idr_get_empty_slot(idp, starting_id, pa);
280 	if (id >= 0) {
281 		/*
282 		 * Successfully found an empty slot.  Install the user
283 		 * pointer and mark the slot full.
284 		 */
285 		rcu_assign_pointer(pa[0]->ary[id & IDR_MASK],
286 				(struct idr_layer *)ptr);
287 		pa[0]->count++;
288 		idr_mark_full(pa, id);
289 	}
290 
291 	return id;
292 }
293 
294 /**
295  * idr_get_new_above - allocate new idr entry above or equal to a start id
296  * @idp: idr handle
297  * @ptr: pointer you want associated with the id
298  * @starting_id: id to start search at
299  * @id: pointer to the allocated handle
300  *
301  * This is the allocate id function.  It should be called with any
302  * required locks.
303  *
304  * If allocation from IDR's private freelist fails, idr_get_new_above() will
305  * return %-EAGAIN.  The caller should retry the idr_pre_get() call to refill
306  * IDR's preallocation and then retry the idr_get_new_above() call.
307  *
308  * If the idr is full idr_get_new_above() will return %-ENOSPC.
309  *
310  * @id returns a value in the range @starting_id ... %0x7fffffff
311  */
idr_get_new_above(struct idr * idp,void * ptr,int starting_id,int * id)312 int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
313 {
314 	int rv;
315 
316 	rv = idr_get_new_above_int(idp, ptr, starting_id);
317 	/*
318 	 * This is a cheap hack until the IDR code can be fixed to
319 	 * return proper error values.
320 	 */
321 	if (rv < 0)
322 		return _idr_rc_to_errno(rv);
323 	*id = rv;
324 	return 0;
325 }
326 EXPORT_SYMBOL(idr_get_new_above);
327 
328 /**
329  * idr_get_new - allocate new idr entry
330  * @idp: idr handle
331  * @ptr: pointer you want associated with the id
332  * @id: pointer to the allocated handle
333  *
334  * If allocation from IDR's private freelist fails, idr_get_new_above() will
335  * return %-EAGAIN.  The caller should retry the idr_pre_get() call to refill
336  * IDR's preallocation and then retry the idr_get_new_above() call.
337  *
338  * If the idr is full idr_get_new_above() will return %-ENOSPC.
339  *
340  * @id returns a value in the range %0 ... %0x7fffffff
341  */
idr_get_new(struct idr * idp,void * ptr,int * id)342 int idr_get_new(struct idr *idp, void *ptr, int *id)
343 {
344 	int rv;
345 
346 	rv = idr_get_new_above_int(idp, ptr, 0);
347 	/*
348 	 * This is a cheap hack until the IDR code can be fixed to
349 	 * return proper error values.
350 	 */
351 	if (rv < 0)
352 		return _idr_rc_to_errno(rv);
353 	*id = rv;
354 	return 0;
355 }
356 EXPORT_SYMBOL(idr_get_new);
357 
idr_remove_warning(int id)358 static void idr_remove_warning(int id)
359 {
360 	printk(KERN_WARNING
361 		"idr_remove called for id=%d which is not allocated.\n", id);
362 	dump_stack();
363 }
364 
sub_remove(struct idr * idp,int shift,int id)365 static void sub_remove(struct idr *idp, int shift, int id)
366 {
367 	struct idr_layer *p = idp->top;
368 	struct idr_layer **pa[MAX_LEVEL + 1];
369 	struct idr_layer ***paa = &pa[0];
370 	struct idr_layer *to_free;
371 	int n;
372 
373 	*paa = NULL;
374 	*++paa = &idp->top;
375 
376 	while ((shift > 0) && p) {
377 		n = (id >> shift) & IDR_MASK;
378 		__clear_bit(n, &p->bitmap);
379 		*++paa = &p->ary[n];
380 		p = p->ary[n];
381 		shift -= IDR_BITS;
382 	}
383 	n = id & IDR_MASK;
384 	if (likely(p != NULL && test_bit(n, &p->bitmap))){
385 		__clear_bit(n, &p->bitmap);
386 		rcu_assign_pointer(p->ary[n], NULL);
387 		to_free = NULL;
388 		while(*paa && ! --((**paa)->count)){
389 			if (to_free)
390 				free_layer(to_free);
391 			to_free = **paa;
392 			**paa-- = NULL;
393 		}
394 		if (!*paa)
395 			idp->layers = 0;
396 		if (to_free)
397 			free_layer(to_free);
398 	} else
399 		idr_remove_warning(id);
400 }
401 
402 /**
403  * idr_remove - remove the given id and free its slot
404  * @idp: idr handle
405  * @id: unique key
406  */
idr_remove(struct idr * idp,int id)407 void idr_remove(struct idr *idp, int id)
408 {
409 	struct idr_layer *p;
410 	struct idr_layer *to_free;
411 
412 	/* Mask off upper bits we don't use for the search. */
413 	id &= MAX_ID_MASK;
414 
415 	sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
416 	if (idp->top && idp->top->count == 1 && (idp->layers > 1) &&
417 	    idp->top->ary[0]) {
418 		/*
419 		 * Single child at leftmost slot: we can shrink the tree.
420 		 * This level is not needed anymore since when layers are
421 		 * inserted, they are inserted at the top of the existing
422 		 * tree.
423 		 */
424 		to_free = idp->top;
425 		p = idp->top->ary[0];
426 		rcu_assign_pointer(idp->top, p);
427 		--idp->layers;
428 		to_free->bitmap = to_free->count = 0;
429 		free_layer(to_free);
430 	}
431 	while (idp->id_free_cnt >= IDR_FREE_MAX) {
432 		p = get_from_free_list(idp);
433 		/*
434 		 * Note: we don't call the rcu callback here, since the only
435 		 * layers that fall into the freelist are those that have been
436 		 * preallocated.
437 		 */
438 		kmem_cache_free(idr_layer_cache, p);
439 	}
440 	return;
441 }
442 EXPORT_SYMBOL(idr_remove);
443 
444 /**
445  * idr_remove_all - remove all ids from the given idr tree
446  * @idp: idr handle
447  *
448  * idr_destroy() only frees up unused, cached idp_layers, but this
449  * function will remove all id mappings and leave all idp_layers
450  * unused.
451  *
452  * A typical clean-up sequence for objects stored in an idr tree will
453  * use idr_for_each() to free all objects, if necessay, then
454  * idr_remove_all() to remove all ids, and idr_destroy() to free
455  * up the cached idr_layers.
456  */
idr_remove_all(struct idr * idp)457 void idr_remove_all(struct idr *idp)
458 {
459 	int n, id, max;
460 	int bt_mask;
461 	struct idr_layer *p;
462 	struct idr_layer *pa[MAX_LEVEL + 1];
463 	struct idr_layer **paa = &pa[0];
464 
465 	n = idp->layers * IDR_BITS;
466 	p = idp->top;
467 	rcu_assign_pointer(idp->top, NULL);
468 	max = idr_max(idp->layers);
469 
470 	id = 0;
471 	while (id >= 0 && id <= max) {
472 		while (n > IDR_BITS && p) {
473 			n -= IDR_BITS;
474 			*paa++ = p;
475 			p = p->ary[(id >> n) & IDR_MASK];
476 		}
477 
478 		bt_mask = id;
479 		id += 1 << n;
480 		/* Get the highest bit that the above add changed from 0->1. */
481 		while (n < fls(id ^ bt_mask)) {
482 			if (p)
483 				free_layer(p);
484 			n += IDR_BITS;
485 			p = *--paa;
486 		}
487 	}
488 	idp->layers = 0;
489 }
490 EXPORT_SYMBOL(idr_remove_all);
491 
492 /**
493  * idr_destroy - release all cached layers within an idr tree
494  * @idp: idr handle
495  */
idr_destroy(struct idr * idp)496 void idr_destroy(struct idr *idp)
497 {
498 	while (idp->id_free_cnt) {
499 		struct idr_layer *p = get_from_free_list(idp);
500 		kmem_cache_free(idr_layer_cache, p);
501 	}
502 }
503 EXPORT_SYMBOL(idr_destroy);
504 
505 /**
506  * idr_find - return pointer for given id
507  * @idp: idr handle
508  * @id: lookup key
509  *
510  * Return the pointer given the id it has been registered with.  A %NULL
511  * return indicates that @id is not valid or you passed %NULL in
512  * idr_get_new().
513  *
514  * This function can be called under rcu_read_lock(), given that the leaf
515  * pointers lifetimes are correctly managed.
516  */
idr_find(struct idr * idp,int id)517 void *idr_find(struct idr *idp, int id)
518 {
519 	int n;
520 	struct idr_layer *p;
521 
522 	p = rcu_dereference_raw(idp->top);
523 	if (!p)
524 		return NULL;
525 	n = (p->layer+1) * IDR_BITS;
526 
527 	/* Mask off upper bits we don't use for the search. */
528 	id &= MAX_ID_MASK;
529 
530 	if (id > idr_max(p->layer + 1))
531 		return NULL;
532 	BUG_ON(n == 0);
533 
534 	while (n > 0 && p) {
535 		n -= IDR_BITS;
536 		BUG_ON(n != p->layer*IDR_BITS);
537 		p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
538 	}
539 	return((void *)p);
540 }
541 EXPORT_SYMBOL(idr_find);
542 
543 /**
544  * idr_for_each - iterate through all stored pointers
545  * @idp: idr handle
546  * @fn: function to be called for each pointer
547  * @data: data passed back to callback function
548  *
549  * Iterate over the pointers registered with the given idr.  The
550  * callback function will be called for each pointer currently
551  * registered, passing the id, the pointer and the data pointer passed
552  * to this function.  It is not safe to modify the idr tree while in
553  * the callback, so functions such as idr_get_new and idr_remove are
554  * not allowed.
555  *
556  * We check the return of @fn each time. If it returns anything other
557  * than %0, we break out and return that value.
558  *
559  * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove().
560  */
idr_for_each(struct idr * idp,int (* fn)(int id,void * p,void * data),void * data)561 int idr_for_each(struct idr *idp,
562 		 int (*fn)(int id, void *p, void *data), void *data)
563 {
564 	int n, id, max, error = 0;
565 	struct idr_layer *p;
566 	struct idr_layer *pa[MAX_LEVEL + 1];
567 	struct idr_layer **paa = &pa[0];
568 
569 	n = idp->layers * IDR_BITS;
570 	p = rcu_dereference_raw(idp->top);
571 	max = idr_max(idp->layers);
572 
573 	id = 0;
574 	while (id >= 0 && id <= max) {
575 		while (n > 0 && p) {
576 			n -= IDR_BITS;
577 			*paa++ = p;
578 			p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
579 		}
580 
581 		if (p) {
582 			error = fn(id, (void *)p, data);
583 			if (error)
584 				break;
585 		}
586 
587 		id += 1 << n;
588 		while (n < fls(id)) {
589 			n += IDR_BITS;
590 			p = *--paa;
591 		}
592 	}
593 
594 	return error;
595 }
596 EXPORT_SYMBOL(idr_for_each);
597 
598 /**
599  * idr_get_next - lookup next object of id to given id.
600  * @idp: idr handle
601  * @nextidp:  pointer to lookup key
602  *
603  * Returns pointer to registered object with id, which is next number to
604  * given id. After being looked up, *@nextidp will be updated for the next
605  * iteration.
606  *
607  * This function can be called under rcu_read_lock(), given that the leaf
608  * pointers lifetimes are correctly managed.
609  */
idr_get_next(struct idr * idp,int * nextidp)610 void *idr_get_next(struct idr *idp, int *nextidp)
611 {
612 	struct idr_layer *p, *pa[MAX_LEVEL + 1];
613 	struct idr_layer **paa = &pa[0];
614 	int id = *nextidp;
615 	int n, max;
616 
617 	/* find first ent */
618 	p = rcu_dereference_raw(idp->top);
619 	if (!p)
620 		return NULL;
621 	n = (p->layer + 1) * IDR_BITS;
622 	max = idr_max(p->layer + 1);
623 
624 	while (id >= 0 && id <= max) {
625 		while (n > 0 && p) {
626 			n -= IDR_BITS;
627 			*paa++ = p;
628 			p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
629 		}
630 
631 		if (p) {
632 			*nextidp = id;
633 			return p;
634 		}
635 
636 		/*
637 		 * Proceed to the next layer at the current level.  Unlike
638 		 * idr_for_each(), @id isn't guaranteed to be aligned to
639 		 * layer boundary at this point and adding 1 << n may
640 		 * incorrectly skip IDs.  Make sure we jump to the
641 		 * beginning of the next layer using round_up().
642 		 */
643 		id = round_up(id + 1, 1 << n);
644 		while (n < fls(id)) {
645 			n += IDR_BITS;
646 			p = *--paa;
647 		}
648 	}
649 	return NULL;
650 }
651 EXPORT_SYMBOL(idr_get_next);
652 
653 
654 /**
655  * idr_replace - replace pointer for given id
656  * @idp: idr handle
657  * @ptr: pointer you want associated with the id
658  * @id: lookup key
659  *
660  * Replace the pointer registered with an id and return the old value.
661  * A %-ENOENT return indicates that @id was not found.
662  * A %-EINVAL return indicates that @id was not within valid constraints.
663  *
664  * The caller must serialize with writers.
665  */
idr_replace(struct idr * idp,void * ptr,int id)666 void *idr_replace(struct idr *idp, void *ptr, int id)
667 {
668 	int n;
669 	struct idr_layer *p, *old_p;
670 
671 	p = idp->top;
672 	if (!p)
673 		return ERR_PTR(-EINVAL);
674 
675 	n = (p->layer+1) * IDR_BITS;
676 
677 	id &= MAX_ID_MASK;
678 
679 	if (id >= (1 << n))
680 		return ERR_PTR(-EINVAL);
681 
682 	n -= IDR_BITS;
683 	while ((n > 0) && p) {
684 		p = p->ary[(id >> n) & IDR_MASK];
685 		n -= IDR_BITS;
686 	}
687 
688 	n = id & IDR_MASK;
689 	if (unlikely(p == NULL || !test_bit(n, &p->bitmap)))
690 		return ERR_PTR(-ENOENT);
691 
692 	old_p = p->ary[n];
693 	rcu_assign_pointer(p->ary[n], ptr);
694 
695 	return old_p;
696 }
697 EXPORT_SYMBOL(idr_replace);
698 
idr_init_cache(void)699 void __init idr_init_cache(void)
700 {
701 	idr_layer_cache = kmem_cache_create("idr_layer_cache",
702 				sizeof(struct idr_layer), 0, SLAB_PANIC, NULL);
703 }
704 
705 /**
706  * idr_init - initialize idr handle
707  * @idp:	idr handle
708  *
709  * This function is use to set up the handle (@idp) that you will pass
710  * to the rest of the functions.
711  */
idr_init(struct idr * idp)712 void idr_init(struct idr *idp)
713 {
714 	memset(idp, 0, sizeof(struct idr));
715 	spin_lock_init(&idp->lock);
716 }
717 EXPORT_SYMBOL(idr_init);
718 
719 
720 /**
721  * DOC: IDA description
722  * IDA - IDR based ID allocator
723  *
724  * This is id allocator without id -> pointer translation.  Memory
725  * usage is much lower than full blown idr because each id only
726  * occupies a bit.  ida uses a custom leaf node which contains
727  * IDA_BITMAP_BITS slots.
728  *
729  * 2007-04-25  written by Tejun Heo <htejun@gmail.com>
730  */
731 
free_bitmap(struct ida * ida,struct ida_bitmap * bitmap)732 static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap)
733 {
734 	unsigned long flags;
735 
736 	if (!ida->free_bitmap) {
737 		spin_lock_irqsave(&ida->idr.lock, flags);
738 		if (!ida->free_bitmap) {
739 			ida->free_bitmap = bitmap;
740 			bitmap = NULL;
741 		}
742 		spin_unlock_irqrestore(&ida->idr.lock, flags);
743 	}
744 
745 	kfree(bitmap);
746 }
747 
748 /**
749  * ida_pre_get - reserve resources for ida allocation
750  * @ida:	ida handle
751  * @gfp_mask:	memory allocation flag
752  *
753  * This function should be called prior to locking and calling the
754  * following function.  It preallocates enough memory to satisfy the
755  * worst possible allocation.
756  *
757  * If the system is REALLY out of memory this function returns %0,
758  * otherwise %1.
759  */
ida_pre_get(struct ida * ida,gfp_t gfp_mask)760 int ida_pre_get(struct ida *ida, gfp_t gfp_mask)
761 {
762 	/* allocate idr_layers */
763 	if (!idr_pre_get(&ida->idr, gfp_mask))
764 		return 0;
765 
766 	/* allocate free_bitmap */
767 	if (!ida->free_bitmap) {
768 		struct ida_bitmap *bitmap;
769 
770 		bitmap = kmalloc(sizeof(struct ida_bitmap), gfp_mask);
771 		if (!bitmap)
772 			return 0;
773 
774 		free_bitmap(ida, bitmap);
775 	}
776 
777 	return 1;
778 }
779 EXPORT_SYMBOL(ida_pre_get);
780 
781 /**
782  * ida_get_new_above - allocate new ID above or equal to a start id
783  * @ida:	ida handle
784  * @starting_id: id to start search at
785  * @p_id:	pointer to the allocated handle
786  *
787  * Allocate new ID above or equal to @starting_id.  It should be called
788  * with any required locks.
789  *
790  * If memory is required, it will return %-EAGAIN, you should unlock
791  * and go back to the ida_pre_get() call.  If the ida is full, it will
792  * return %-ENOSPC.
793  *
794  * @p_id returns a value in the range @starting_id ... %0x7fffffff.
795  */
ida_get_new_above(struct ida * ida,int starting_id,int * p_id)796 int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
797 {
798 	struct idr_layer *pa[MAX_LEVEL + 1];
799 	struct ida_bitmap *bitmap;
800 	unsigned long flags;
801 	int idr_id = starting_id / IDA_BITMAP_BITS;
802 	int offset = starting_id % IDA_BITMAP_BITS;
803 	int t, id;
804 
805  restart:
806 	/* get vacant slot */
807 	t = idr_get_empty_slot(&ida->idr, idr_id, pa);
808 	if (t < 0)
809 		return _idr_rc_to_errno(t);
810 
811 	if (t * IDA_BITMAP_BITS >= MAX_ID_BIT)
812 		return -ENOSPC;
813 
814 	if (t != idr_id)
815 		offset = 0;
816 	idr_id = t;
817 
818 	/* if bitmap isn't there, create a new one */
819 	bitmap = (void *)pa[0]->ary[idr_id & IDR_MASK];
820 	if (!bitmap) {
821 		spin_lock_irqsave(&ida->idr.lock, flags);
822 		bitmap = ida->free_bitmap;
823 		ida->free_bitmap = NULL;
824 		spin_unlock_irqrestore(&ida->idr.lock, flags);
825 
826 		if (!bitmap)
827 			return -EAGAIN;
828 
829 		memset(bitmap, 0, sizeof(struct ida_bitmap));
830 		rcu_assign_pointer(pa[0]->ary[idr_id & IDR_MASK],
831 				(void *)bitmap);
832 		pa[0]->count++;
833 	}
834 
835 	/* lookup for empty slot */
836 	t = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, offset);
837 	if (t == IDA_BITMAP_BITS) {
838 		/* no empty slot after offset, continue to the next chunk */
839 		idr_id++;
840 		offset = 0;
841 		goto restart;
842 	}
843 
844 	id = idr_id * IDA_BITMAP_BITS + t;
845 	if (id >= MAX_ID_BIT)
846 		return -ENOSPC;
847 
848 	__set_bit(t, bitmap->bitmap);
849 	if (++bitmap->nr_busy == IDA_BITMAP_BITS)
850 		idr_mark_full(pa, idr_id);
851 
852 	*p_id = id;
853 
854 	/* Each leaf node can handle nearly a thousand slots and the
855 	 * whole idea of ida is to have small memory foot print.
856 	 * Throw away extra resources one by one after each successful
857 	 * allocation.
858 	 */
859 	if (ida->idr.id_free_cnt || ida->free_bitmap) {
860 		struct idr_layer *p = get_from_free_list(&ida->idr);
861 		if (p)
862 			kmem_cache_free(idr_layer_cache, p);
863 	}
864 
865 	return 0;
866 }
867 EXPORT_SYMBOL(ida_get_new_above);
868 
869 /**
870  * ida_get_new - allocate new ID
871  * @ida:	idr handle
872  * @p_id:	pointer to the allocated handle
873  *
874  * Allocate new ID.  It should be called with any required locks.
875  *
876  * If memory is required, it will return %-EAGAIN, you should unlock
877  * and go back to the idr_pre_get() call.  If the idr is full, it will
878  * return %-ENOSPC.
879  *
880  * @p_id returns a value in the range %0 ... %0x7fffffff.
881  */
ida_get_new(struct ida * ida,int * p_id)882 int ida_get_new(struct ida *ida, int *p_id)
883 {
884 	return ida_get_new_above(ida, 0, p_id);
885 }
886 EXPORT_SYMBOL(ida_get_new);
887 
888 /**
889  * ida_remove - remove the given ID
890  * @ida:	ida handle
891  * @id:		ID to free
892  */
ida_remove(struct ida * ida,int id)893 void ida_remove(struct ida *ida, int id)
894 {
895 	struct idr_layer *p = ida->idr.top;
896 	int shift = (ida->idr.layers - 1) * IDR_BITS;
897 	int idr_id = id / IDA_BITMAP_BITS;
898 	int offset = id % IDA_BITMAP_BITS;
899 	int n;
900 	struct ida_bitmap *bitmap;
901 
902 	/* clear full bits while looking up the leaf idr_layer */
903 	while ((shift > 0) && p) {
904 		n = (idr_id >> shift) & IDR_MASK;
905 		__clear_bit(n, &p->bitmap);
906 		p = p->ary[n];
907 		shift -= IDR_BITS;
908 	}
909 
910 	if (p == NULL)
911 		goto err;
912 
913 	n = idr_id & IDR_MASK;
914 	__clear_bit(n, &p->bitmap);
915 
916 	bitmap = (void *)p->ary[n];
917 	if (!test_bit(offset, bitmap->bitmap))
918 		goto err;
919 
920 	/* update bitmap and remove it if empty */
921 	__clear_bit(offset, bitmap->bitmap);
922 	if (--bitmap->nr_busy == 0) {
923 		__set_bit(n, &p->bitmap);	/* to please idr_remove() */
924 		idr_remove(&ida->idr, idr_id);
925 		free_bitmap(ida, bitmap);
926 	}
927 
928 	return;
929 
930  err:
931 	printk(KERN_WARNING
932 	       "ida_remove called for id=%d which is not allocated.\n", id);
933 }
934 EXPORT_SYMBOL(ida_remove);
935 
936 /**
937  * ida_destroy - release all cached layers within an ida tree
938  * @ida:		ida handle
939  */
ida_destroy(struct ida * ida)940 void ida_destroy(struct ida *ida)
941 {
942 	idr_destroy(&ida->idr);
943 	kfree(ida->free_bitmap);
944 }
945 EXPORT_SYMBOL(ida_destroy);
946 
947 /**
948  * ida_simple_get - get a new id.
949  * @ida: the (initialized) ida.
950  * @start: the minimum id (inclusive, < 0x8000000)
951  * @end: the maximum id (exclusive, < 0x8000000 or 0)
952  * @gfp_mask: memory allocation flags
953  *
954  * Allocates an id in the range start <= id < end, or returns -ENOSPC.
955  * On memory allocation failure, returns -ENOMEM.
956  *
957  * Use ida_simple_remove() to get rid of an id.
958  */
ida_simple_get(struct ida * ida,unsigned int start,unsigned int end,gfp_t gfp_mask)959 int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
960 		   gfp_t gfp_mask)
961 {
962 	int ret, id;
963 	unsigned int max;
964 	unsigned long flags;
965 
966 	BUG_ON((int)start < 0);
967 	BUG_ON((int)end < 0);
968 
969 	if (end == 0)
970 		max = 0x80000000;
971 	else {
972 		BUG_ON(end < start);
973 		max = end - 1;
974 	}
975 
976 again:
977 	if (!ida_pre_get(ida, gfp_mask))
978 		return -ENOMEM;
979 
980 	spin_lock_irqsave(&simple_ida_lock, flags);
981 	ret = ida_get_new_above(ida, start, &id);
982 	if (!ret) {
983 		if (id > max) {
984 			ida_remove(ida, id);
985 			ret = -ENOSPC;
986 		} else {
987 			ret = id;
988 		}
989 	}
990 	spin_unlock_irqrestore(&simple_ida_lock, flags);
991 
992 	if (unlikely(ret == -EAGAIN))
993 		goto again;
994 
995 	return ret;
996 }
997 EXPORT_SYMBOL(ida_simple_get);
998 
999 /**
1000  * ida_simple_remove - remove an allocated id.
1001  * @ida: the (initialized) ida.
1002  * @id: the id returned by ida_simple_get.
1003  */
ida_simple_remove(struct ida * ida,unsigned int id)1004 void ida_simple_remove(struct ida *ida, unsigned int id)
1005 {
1006 	unsigned long flags;
1007 
1008 	BUG_ON((int)id < 0);
1009 	spin_lock_irqsave(&simple_ida_lock, flags);
1010 	ida_remove(ida, id);
1011 	spin_unlock_irqrestore(&simple_ida_lock, flags);
1012 }
1013 EXPORT_SYMBOL(ida_simple_remove);
1014 
1015 /**
1016  * ida_init - initialize ida handle
1017  * @ida:	ida handle
1018  *
1019  * This function is use to set up the handle (@ida) that you will pass
1020  * to the rest of the functions.
1021  */
ida_init(struct ida * ida)1022 void ida_init(struct ida *ida)
1023 {
1024 	memset(ida, 0, sizeof(struct ida));
1025 	idr_init(&ida->idr);
1026 
1027 }
1028 EXPORT_SYMBOL(ida_init);
1029