1 /*
2  * Basic general purpose allocator for managing special purpose
3  * memory, for example, memory that is not managed by the regular
4  * kmalloc/kfree interface.  Uses for this includes on-device special
5  * memory, uncached memory etc.
6  *
7  * It is safe to use the allocator in NMI handlers and other special
8  * unblockable contexts that could otherwise deadlock on locks.  This
9  * is implemented by using atomic operations and retries on any
10  * conflicts.  The disadvantage is that there may be livelocks in
11  * extreme cases.  For better scalability, one allocator can be used
12  * for each CPU.
13  *
14  * The lockless operation only works if there is enough memory
15  * available.  If new memory is added to the pool a lock has to be
16  * still taken.  So any user relying on locklessness has to ensure
17  * that sufficient memory is preallocated.
18  *
19  * The basic atomic operation of this allocator is cmpxchg on long.
20  * On architectures that don't have NMI-safe cmpxchg implementation,
21  * the allocator can NOT be used in NMI handler.  So code uses the
22  * allocator in NMI handler should depend on
23  * CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG.
24  *
25  * Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org>
26  *
27  * This source code is licensed under the GNU General Public License,
28  * Version 2.  See the file COPYING for more details.
29  */
30 
31 #include <linux/slab.h>
32 #include <linux/export.h>
33 #include <linux/bitmap.h>
34 #include <linux/rculist.h>
35 #include <linux/interrupt.h>
36 #include <linux/genalloc.h>
37 
set_bits_ll(unsigned long * addr,unsigned long mask_to_set)38 static int set_bits_ll(unsigned long *addr, unsigned long mask_to_set)
39 {
40 	unsigned long val, nval;
41 
42 	nval = *addr;
43 	do {
44 		val = nval;
45 		if (val & mask_to_set)
46 			return -EBUSY;
47 		cpu_relax();
48 	} while ((nval = cmpxchg(addr, val, val | mask_to_set)) != val);
49 
50 	return 0;
51 }
52 
clear_bits_ll(unsigned long * addr,unsigned long mask_to_clear)53 static int clear_bits_ll(unsigned long *addr, unsigned long mask_to_clear)
54 {
55 	unsigned long val, nval;
56 
57 	nval = *addr;
58 	do {
59 		val = nval;
60 		if ((val & mask_to_clear) != mask_to_clear)
61 			return -EBUSY;
62 		cpu_relax();
63 	} while ((nval = cmpxchg(addr, val, val & ~mask_to_clear)) != val);
64 
65 	return 0;
66 }
67 
68 /*
69  * bitmap_set_ll - set the specified number of bits at the specified position
70  * @map: pointer to a bitmap
71  * @start: a bit position in @map
72  * @nr: number of bits to set
73  *
74  * Set @nr bits start from @start in @map lock-lessly. Several users
75  * can set/clear the same bitmap simultaneously without lock. If two
76  * users set the same bit, one user will return remain bits, otherwise
77  * return 0.
78  */
bitmap_set_ll(unsigned long * map,int start,int nr)79 static int bitmap_set_ll(unsigned long *map, int start, int nr)
80 {
81 	unsigned long *p = map + BIT_WORD(start);
82 	const int size = start + nr;
83 	int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
84 	unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start);
85 
86 	while (nr - bits_to_set >= 0) {
87 		if (set_bits_ll(p, mask_to_set))
88 			return nr;
89 		nr -= bits_to_set;
90 		bits_to_set = BITS_PER_LONG;
91 		mask_to_set = ~0UL;
92 		p++;
93 	}
94 	if (nr) {
95 		mask_to_set &= BITMAP_LAST_WORD_MASK(size);
96 		if (set_bits_ll(p, mask_to_set))
97 			return nr;
98 	}
99 
100 	return 0;
101 }
102 
103 /*
104  * bitmap_clear_ll - clear the specified number of bits at the specified position
105  * @map: pointer to a bitmap
106  * @start: a bit position in @map
107  * @nr: number of bits to set
108  *
109  * Clear @nr bits start from @start in @map lock-lessly. Several users
110  * can set/clear the same bitmap simultaneously without lock. If two
111  * users clear the same bit, one user will return remain bits,
112  * otherwise return 0.
113  */
bitmap_clear_ll(unsigned long * map,int start,int nr)114 static int bitmap_clear_ll(unsigned long *map, int start, int nr)
115 {
116 	unsigned long *p = map + BIT_WORD(start);
117 	const int size = start + nr;
118 	int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
119 	unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
120 
121 	while (nr - bits_to_clear >= 0) {
122 		if (clear_bits_ll(p, mask_to_clear))
123 			return nr;
124 		nr -= bits_to_clear;
125 		bits_to_clear = BITS_PER_LONG;
126 		mask_to_clear = ~0UL;
127 		p++;
128 	}
129 	if (nr) {
130 		mask_to_clear &= BITMAP_LAST_WORD_MASK(size);
131 		if (clear_bits_ll(p, mask_to_clear))
132 			return nr;
133 	}
134 
135 	return 0;
136 }
137 
138 /**
139  * gen_pool_create - create a new special memory pool
140  * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
141  * @nid: node id of the node the pool structure should be allocated on, or -1
142  *
143  * Create a new special memory pool that can be used to manage special purpose
144  * memory not managed by the regular kmalloc/kfree interface.
145  */
gen_pool_create(int min_alloc_order,int nid)146 struct gen_pool *gen_pool_create(int min_alloc_order, int nid)
147 {
148 	struct gen_pool *pool;
149 
150 	pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid);
151 	if (pool != NULL) {
152 		spin_lock_init(&pool->lock);
153 		INIT_LIST_HEAD(&pool->chunks);
154 		pool->min_alloc_order = min_alloc_order;
155 	}
156 	return pool;
157 }
158 EXPORT_SYMBOL(gen_pool_create);
159 
160 /**
161  * gen_pool_add_virt - add a new chunk of special memory to the pool
162  * @pool: pool to add new memory chunk to
163  * @virt: virtual starting address of memory chunk to add to pool
164  * @phys: physical starting address of memory chunk to add to pool
165  * @size: size in bytes of the memory chunk to add to pool
166  * @nid: node id of the node the chunk structure and bitmap should be
167  *       allocated on, or -1
168  *
169  * Add a new chunk of special memory to the specified pool.
170  *
171  * Returns 0 on success or a -ve errno on failure.
172  */
gen_pool_add_virt(struct gen_pool * pool,unsigned long virt,phys_addr_t phys,size_t size,int nid)173 int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phys,
174 		 size_t size, int nid)
175 {
176 	struct gen_pool_chunk *chunk;
177 	int nbits = size >> pool->min_alloc_order;
178 	int nbytes = sizeof(struct gen_pool_chunk) +
179 				BITS_TO_LONGS(nbits) * sizeof(long);
180 
181 	chunk = kmalloc_node(nbytes, GFP_KERNEL | __GFP_ZERO, nid);
182 	if (unlikely(chunk == NULL))
183 		return -ENOMEM;
184 
185 	chunk->phys_addr = phys;
186 	chunk->start_addr = virt;
187 	chunk->end_addr = virt + size;
188 	atomic_set(&chunk->avail, size);
189 
190 	spin_lock(&pool->lock);
191 	list_add_rcu(&chunk->next_chunk, &pool->chunks);
192 	spin_unlock(&pool->lock);
193 
194 	return 0;
195 }
196 EXPORT_SYMBOL(gen_pool_add_virt);
197 
198 /**
199  * gen_pool_virt_to_phys - return the physical address of memory
200  * @pool: pool to allocate from
201  * @addr: starting address of memory
202  *
203  * Returns the physical address on success, or -1 on error.
204  */
gen_pool_virt_to_phys(struct gen_pool * pool,unsigned long addr)205 phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long addr)
206 {
207 	struct gen_pool_chunk *chunk;
208 	phys_addr_t paddr = -1;
209 
210 	rcu_read_lock();
211 	list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
212 		if (addr >= chunk->start_addr && addr < chunk->end_addr) {
213 			paddr = chunk->phys_addr + (addr - chunk->start_addr);
214 			break;
215 		}
216 	}
217 	rcu_read_unlock();
218 
219 	return paddr;
220 }
221 EXPORT_SYMBOL(gen_pool_virt_to_phys);
222 
223 /**
224  * gen_pool_destroy - destroy a special memory pool
225  * @pool: pool to destroy
226  *
227  * Destroy the specified special memory pool. Verifies that there are no
228  * outstanding allocations.
229  */
gen_pool_destroy(struct gen_pool * pool)230 void gen_pool_destroy(struct gen_pool *pool)
231 {
232 	struct list_head *_chunk, *_next_chunk;
233 	struct gen_pool_chunk *chunk;
234 	int order = pool->min_alloc_order;
235 	int bit, end_bit;
236 
237 	list_for_each_safe(_chunk, _next_chunk, &pool->chunks) {
238 		chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
239 		list_del(&chunk->next_chunk);
240 
241 		end_bit = (chunk->end_addr - chunk->start_addr) >> order;
242 		bit = find_next_bit(chunk->bits, end_bit, 0);
243 		BUG_ON(bit < end_bit);
244 
245 		kfree(chunk);
246 	}
247 	kfree(pool);
248 	return;
249 }
250 EXPORT_SYMBOL(gen_pool_destroy);
251 
252 /**
253  * gen_pool_alloc - allocate special memory from the pool
254  * @pool: pool to allocate from
255  * @size: number of bytes to allocate from the pool
256  *
257  * Allocate the requested number of bytes from the specified pool.
258  * Uses a first-fit algorithm. Can not be used in NMI handler on
259  * architectures without NMI-safe cmpxchg implementation.
260  */
gen_pool_alloc(struct gen_pool * pool,size_t size)261 unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
262 {
263 	struct gen_pool_chunk *chunk;
264 	unsigned long addr = 0;
265 	int order = pool->min_alloc_order;
266 	int nbits, start_bit = 0, end_bit, remain;
267 
268 #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
269 	BUG_ON(in_nmi());
270 #endif
271 
272 	if (size == 0)
273 		return 0;
274 
275 	nbits = (size + (1UL << order) - 1) >> order;
276 	rcu_read_lock();
277 	list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
278 		if (size > atomic_read(&chunk->avail))
279 			continue;
280 
281 		end_bit = (chunk->end_addr - chunk->start_addr) >> order;
282 retry:
283 		start_bit = bitmap_find_next_zero_area(chunk->bits, end_bit,
284 						       start_bit, nbits, 0);
285 		if (start_bit >= end_bit)
286 			continue;
287 		remain = bitmap_set_ll(chunk->bits, start_bit, nbits);
288 		if (remain) {
289 			remain = bitmap_clear_ll(chunk->bits, start_bit,
290 						 nbits - remain);
291 			BUG_ON(remain);
292 			goto retry;
293 		}
294 
295 		addr = chunk->start_addr + ((unsigned long)start_bit << order);
296 		size = nbits << order;
297 		atomic_sub(size, &chunk->avail);
298 		break;
299 	}
300 	rcu_read_unlock();
301 	return addr;
302 }
303 EXPORT_SYMBOL(gen_pool_alloc);
304 
305 /**
306  * gen_pool_free - free allocated special memory back to the pool
307  * @pool: pool to free to
308  * @addr: starting address of memory to free back to pool
309  * @size: size in bytes of memory to free
310  *
311  * Free previously allocated special memory back to the specified
312  * pool.  Can not be used in NMI handler on architectures without
313  * NMI-safe cmpxchg implementation.
314  */
gen_pool_free(struct gen_pool * pool,unsigned long addr,size_t size)315 void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size)
316 {
317 	struct gen_pool_chunk *chunk;
318 	int order = pool->min_alloc_order;
319 	int start_bit, nbits, remain;
320 
321 #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
322 	BUG_ON(in_nmi());
323 #endif
324 
325 	nbits = (size + (1UL << order) - 1) >> order;
326 	rcu_read_lock();
327 	list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
328 		if (addr >= chunk->start_addr && addr < chunk->end_addr) {
329 			BUG_ON(addr + size > chunk->end_addr);
330 			start_bit = (addr - chunk->start_addr) >> order;
331 			remain = bitmap_clear_ll(chunk->bits, start_bit, nbits);
332 			BUG_ON(remain);
333 			size = nbits << order;
334 			atomic_add(size, &chunk->avail);
335 			rcu_read_unlock();
336 			return;
337 		}
338 	}
339 	rcu_read_unlock();
340 	BUG();
341 }
342 EXPORT_SYMBOL(gen_pool_free);
343 
344 /**
345  * gen_pool_for_each_chunk - call func for every chunk of generic memory pool
346  * @pool:	the generic memory pool
347  * @func:	func to call
348  * @data:	additional data used by @func
349  *
350  * Call @func for every chunk of generic memory pool.  The @func is
351  * called with rcu_read_lock held.
352  */
gen_pool_for_each_chunk(struct gen_pool * pool,void (* func)(struct gen_pool * pool,struct gen_pool_chunk * chunk,void * data),void * data)353 void gen_pool_for_each_chunk(struct gen_pool *pool,
354 	void (*func)(struct gen_pool *pool, struct gen_pool_chunk *chunk, void *data),
355 	void *data)
356 {
357 	struct gen_pool_chunk *chunk;
358 
359 	rcu_read_lock();
360 	list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk)
361 		func(pool, chunk, data);
362 	rcu_read_unlock();
363 }
364 EXPORT_SYMBOL(gen_pool_for_each_chunk);
365 
366 /**
367  * gen_pool_avail - get available free space of the pool
368  * @pool: pool to get available free space
369  *
370  * Return available free space of the specified pool.
371  */
gen_pool_avail(struct gen_pool * pool)372 size_t gen_pool_avail(struct gen_pool *pool)
373 {
374 	struct gen_pool_chunk *chunk;
375 	size_t avail = 0;
376 
377 	rcu_read_lock();
378 	list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
379 		avail += atomic_read(&chunk->avail);
380 	rcu_read_unlock();
381 	return avail;
382 }
383 EXPORT_SYMBOL_GPL(gen_pool_avail);
384 
385 /**
386  * gen_pool_size - get size in bytes of memory managed by the pool
387  * @pool: pool to get size
388  *
389  * Return size in bytes of memory managed by the pool.
390  */
gen_pool_size(struct gen_pool * pool)391 size_t gen_pool_size(struct gen_pool *pool)
392 {
393 	struct gen_pool_chunk *chunk;
394 	size_t size = 0;
395 
396 	rcu_read_lock();
397 	list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
398 		size += chunk->end_addr - chunk->start_addr;
399 	rcu_read_unlock();
400 	return size;
401 }
402 EXPORT_SYMBOL_GPL(gen_pool_size);
403