1 /*
2 * Basic general purpose allocator for managing special purpose memory
3 * not managed by the regular kmalloc/kfree interface.
4 * Uses for this includes on-device special memory, uncached memory
5 * etc.
6 *
7 * Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org>
8 *
9 * This source code is licensed under the GNU General Public License,
10 * Version 2. See the file COPYING for more details.
11 */
12
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <linux/bitmap.h>
16 #include <linux/genalloc.h>
17
18
19 /**
20 * gen_pool_create - create a new special memory pool
21 * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
22 * @nid: node id of the node the pool structure should be allocated on, or -1
23 *
24 * Create a new special memory pool that can be used to manage special purpose
25 * memory not managed by the regular kmalloc/kfree interface.
26 */
gen_pool_create(int min_alloc_order,int nid)27 struct gen_pool *gen_pool_create(int min_alloc_order, int nid)
28 {
29 struct gen_pool *pool;
30
31 pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid);
32 if (pool != NULL) {
33 rwlock_init(&pool->lock);
34 INIT_LIST_HEAD(&pool->chunks);
35 pool->min_alloc_order = min_alloc_order;
36 }
37 return pool;
38 }
39 EXPORT_SYMBOL(gen_pool_create);
40
41 /**
42 * gen_pool_add - add a new chunk of special memory to the pool
43 * @pool: pool to add new memory chunk to
44 * @addr: starting address of memory chunk to add to pool
45 * @size: size in bytes of the memory chunk to add to pool
46 * @nid: node id of the node the chunk structure and bitmap should be
47 * allocated on, or -1
48 *
49 * Add a new chunk of special memory to the specified pool.
50 */
gen_pool_add(struct gen_pool * pool,unsigned long addr,size_t size,int nid)51 int gen_pool_add(struct gen_pool *pool, unsigned long addr, size_t size,
52 int nid)
53 {
54 struct gen_pool_chunk *chunk;
55 int nbits = size >> pool->min_alloc_order;
56 int nbytes = sizeof(struct gen_pool_chunk) +
57 (nbits + BITS_PER_BYTE - 1) / BITS_PER_BYTE;
58
59 chunk = kmalloc_node(nbytes, GFP_KERNEL | __GFP_ZERO, nid);
60 if (unlikely(chunk == NULL))
61 return -1;
62
63 spin_lock_init(&chunk->lock);
64 chunk->start_addr = addr;
65 chunk->end_addr = addr + size;
66
67 write_lock(&pool->lock);
68 list_add(&chunk->next_chunk, &pool->chunks);
69 write_unlock(&pool->lock);
70
71 return 0;
72 }
73 EXPORT_SYMBOL(gen_pool_add);
74
75 /**
76 * gen_pool_destroy - destroy a special memory pool
77 * @pool: pool to destroy
78 *
79 * Destroy the specified special memory pool. Verifies that there are no
80 * outstanding allocations.
81 */
gen_pool_destroy(struct gen_pool * pool)82 void gen_pool_destroy(struct gen_pool *pool)
83 {
84 struct list_head *_chunk, *_next_chunk;
85 struct gen_pool_chunk *chunk;
86 int order = pool->min_alloc_order;
87 int bit, end_bit;
88
89
90 list_for_each_safe(_chunk, _next_chunk, &pool->chunks) {
91 chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
92 list_del(&chunk->next_chunk);
93
94 end_bit = (chunk->end_addr - chunk->start_addr) >> order;
95 bit = find_next_bit(chunk->bits, end_bit, 0);
96 BUG_ON(bit < end_bit);
97
98 kfree(chunk);
99 }
100 kfree(pool);
101 return;
102 }
103 EXPORT_SYMBOL(gen_pool_destroy);
104
105 /**
106 * gen_pool_alloc - allocate special memory from the pool
107 * @pool: pool to allocate from
108 * @size: number of bytes to allocate from the pool
109 *
110 * Allocate the requested number of bytes from the specified pool.
111 * Uses a first-fit algorithm.
112 */
gen_pool_alloc(struct gen_pool * pool,size_t size)113 unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
114 {
115 struct list_head *_chunk;
116 struct gen_pool_chunk *chunk;
117 unsigned long addr, flags;
118 int order = pool->min_alloc_order;
119 int nbits, start_bit, end_bit;
120
121 if (size == 0)
122 return 0;
123
124 nbits = (size + (1UL << order) - 1) >> order;
125
126 read_lock(&pool->lock);
127 list_for_each(_chunk, &pool->chunks) {
128 chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
129
130 end_bit = (chunk->end_addr - chunk->start_addr) >> order;
131
132 spin_lock_irqsave(&chunk->lock, flags);
133 start_bit = bitmap_find_next_zero_area(chunk->bits, end_bit, 0,
134 nbits, 0);
135 if (start_bit >= end_bit) {
136 spin_unlock_irqrestore(&chunk->lock, flags);
137 continue;
138 }
139
140 addr = chunk->start_addr + ((unsigned long)start_bit << order);
141
142 bitmap_set(chunk->bits, start_bit, nbits);
143 spin_unlock_irqrestore(&chunk->lock, flags);
144 read_unlock(&pool->lock);
145 return addr;
146 }
147 read_unlock(&pool->lock);
148 return 0;
149 }
150 EXPORT_SYMBOL(gen_pool_alloc);
151
152 /**
153 * gen_pool_free - free allocated special memory back to the pool
154 * @pool: pool to free to
155 * @addr: starting address of memory to free back to pool
156 * @size: size in bytes of memory to free
157 *
158 * Free previously allocated special memory back to the specified pool.
159 */
gen_pool_free(struct gen_pool * pool,unsigned long addr,size_t size)160 void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size)
161 {
162 struct list_head *_chunk;
163 struct gen_pool_chunk *chunk;
164 unsigned long flags;
165 int order = pool->min_alloc_order;
166 int bit, nbits;
167
168 nbits = (size + (1UL << order) - 1) >> order;
169
170 read_lock(&pool->lock);
171 list_for_each(_chunk, &pool->chunks) {
172 chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
173
174 if (addr >= chunk->start_addr && addr < chunk->end_addr) {
175 BUG_ON(addr + size > chunk->end_addr);
176 spin_lock_irqsave(&chunk->lock, flags);
177 bit = (addr - chunk->start_addr) >> order;
178 while (nbits--)
179 __clear_bit(bit++, chunk->bits);
180 spin_unlock_irqrestore(&chunk->lock, flags);
181 break;
182 }
183 }
184 BUG_ON(nbits > 0);
185 read_unlock(&pool->lock);
186 }
187 EXPORT_SYMBOL(gen_pool_free);
188