1 /*
2   NOTE:
3 
4   this code was lifted straight out of drivers/pci/pci.c;
5   when compiling for the Intel StrongARM SA-1110/SA-1111 the
6   usb-ohci.c driver needs these routines even when the architecture
7   has no pci bus...
8 */
9 
10 #include <linux/config.h>
11 #include <linux/module.h>
12 #include <linux/types.h>
13 #include <linux/kernel.h>
14 #include <linux/pci.h>
15 #include <linux/string.h>
16 #include <linux/init.h>
17 #include <linux/slab.h>
18 #include <linux/spinlock.h>
19 #include <linux/bitops.h>
20 
21 #include <asm/page.h>
22 
23 /*
24  * Pool allocator ... wraps the pci_alloc_consistent page allocator, so
25  * small blocks are easily used by drivers for bus mastering controllers.
26  * This should probably be sharing the guts of the slab allocator.
27  */
28 
29 struct pci_pool {	/* the pool */
30 	struct list_head	page_list;
31 	spinlock_t		lock;
32 	size_t			blocks_per_page;
33 	size_t			size;
34 	struct pci_dev		*dev;
35 	size_t			allocation;
36 	char			name [32];
37 	wait_queue_head_t	waitq;
38 };
39 
40 struct pci_page {	/* cacheable header for 'allocation' bytes */
41 	struct list_head	page_list;
42 	void			*vaddr;
43 	dma_addr_t		dma;
44 	unsigned long		bitmap [0];
45 };
46 
47 #define	POOL_TIMEOUT_JIFFIES	((100 /* msec */ * HZ) / 1000)
48 #define	POOL_POISON_BYTE	0xa7
49 
50 // #define CONFIG_PCIPOOL_DEBUG
51 
slot_name(const struct pci_pool * pool)52 static inline const char *slot_name(const struct pci_pool *pool)
53 {
54 	const struct pci_dev *pdev = pool->dev;
55 
56 	if (pdev == 0)
57 		return "[0]";
58 
59 	else if (dev_is_sa1111(pdev))
60 		return "[SA-1111]";
61 	else
62 		return pdev->slot_name;
63 }
64 
65 
66 /**
67  * pci_pool_create - Creates a pool of pci consistent memory blocks, for dma.
68  * @name: name of pool, for diagnostics
69  * @pdev: pci device that will be doing the DMA
70  * @size: size of the blocks in this pool.
71  * @align: alignment requirement for blocks; must be a power of two
72  * @allocation: returned blocks won't cross this boundary (or zero)
73  * @mem_flags: SLAB_* flags.
74  *
75  * Returns a pci allocation pool with the requested characteristics, or
76  * null if one can't be created.  Given one of these pools, pci_pool_alloc()
77  * may be used to allocate memory.  Such memory will all have "consistent"
78  * DMA mappings, accessible by the device and its driver without using
79  * cache flushing primitives.  The actual size of blocks allocated may be
80  * larger than requested because of alignment.
81  *
82  * If allocation is nonzero, objects returned from pci_pool_alloc() won't
83  * cross that size boundary.  This is useful for devices which have
84  * addressing restrictions on individual DMA transfers, such as not crossing
85  * boundaries of 4KBytes.
86  */
87 struct pci_pool *
pci_pool_create(const char * name,struct pci_dev * pdev,size_t size,size_t align,size_t allocation,int mem_flags)88 pci_pool_create (const char *name, struct pci_dev *pdev,
89 	size_t size, size_t align, size_t allocation, int mem_flags)
90 {
91 	struct pci_pool		*retval;
92 
93 	if (align == 0)
94 		align = 1;
95 	if (size == 0)
96 		return 0;
97 	else if (size < align)
98 		size = align;
99 	else if ((size % align) != 0) {
100 		size += align + 1;
101 		size &= ~(align - 1);
102 	}
103 
104 	if (allocation == 0) {
105 		if (PAGE_SIZE < size)
106 			allocation = size;
107 		else
108 			allocation = PAGE_SIZE;
109 		// FIXME: round up for less fragmentation
110 	} else if (allocation < size)
111 		return 0;
112 
113 	if (!(retval = kmalloc (sizeof *retval, mem_flags)))
114 		return retval;
115 
116 	strncpy (retval->name, name, sizeof retval->name);
117 	retval->name [sizeof retval->name - 1] = 0;
118 
119 	retval->dev = pdev;
120 	INIT_LIST_HEAD (&retval->page_list);
121 	spin_lock_init (&retval->lock);
122 	retval->size = size;
123 	retval->allocation = allocation;
124 	retval->blocks_per_page = allocation / size;
125 	init_waitqueue_head (&retval->waitq);
126 
127 #ifdef CONFIG_PCIPOOL_DEBUG
128 	printk (KERN_DEBUG "pcipool create %s/%s size %d, %d/page (%d alloc)\n",
129 		slot_name(retval), retval->name, size,
130 		retval->blocks_per_page, allocation);
131 #endif
132 
133 	return retval;
134 }
135 
136 
137 static struct pci_page *
pool_alloc_page(struct pci_pool * pool,int mem_flags)138 pool_alloc_page (struct pci_pool *pool, int mem_flags)
139 {
140 	struct pci_page	*page;
141 	int		mapsize;
142 
143 	mapsize = pool->blocks_per_page;
144 	mapsize = (mapsize + BITS_PER_LONG - 1) / BITS_PER_LONG;
145 	mapsize *= sizeof (long);
146 
147 	page = (struct pci_page *) kmalloc (mapsize + sizeof *page, mem_flags);
148 	if (!page)
149 		return 0;
150 	page->vaddr = pci_alloc_consistent (pool->dev,
151 					    pool->allocation,
152 					    &page->dma);
153 	if (page->vaddr) {
154 		memset (page->bitmap, 0xff, mapsize);	// bit set == free
155 #ifdef	CONFIG_DEBUG_SLAB
156 		memset (page->vaddr, POOL_POISON_BYTE, pool->allocation);
157 #endif
158 		list_add (&page->page_list, &pool->page_list);
159 	} else {
160 		kfree (page);
161 		page = 0;
162 	}
163 	return page;
164 }
165 
166 
167 static inline int
is_page_busy(int blocks,unsigned long * bitmap)168 is_page_busy (int blocks, unsigned long *bitmap)
169 {
170 	while (blocks > 0) {
171 		if (*bitmap++ != ~0UL)
172 			return 1;
173 		blocks -= BITS_PER_LONG;
174 	}
175 	return 0;
176 }
177 
178 static void
pool_free_page(struct pci_pool * pool,struct pci_page * page)179 pool_free_page (struct pci_pool *pool, struct pci_page *page)
180 {
181 	dma_addr_t	dma = page->dma;
182 
183 #ifdef	CONFIG_DEBUG_SLAB
184 	memset (page->vaddr, POOL_POISON_BYTE, pool->allocation);
185 #endif
186 	pci_free_consistent (pool->dev, pool->allocation, page->vaddr, dma);
187 	list_del (&page->page_list);
188 	kfree (page);
189 }
190 
191 
192 /**
193  * pci_pool_destroy - destroys a pool of pci memory blocks.
194  * @pool: pci pool that will be destroyed
195  *
196  * Caller guarantees that no more memory from the pool is in use,
197  * and that nothing will try to use the pool after this call.
198  */
199 void
pci_pool_destroy(struct pci_pool * pool)200 pci_pool_destroy (struct pci_pool *pool)
201 {
202 	unsigned long		flags;
203 
204 #ifdef CONFIG_PCIPOOL_DEBUG
205 	printk (KERN_DEBUG "pcipool destroy %s/%s\n",
206 		slot_name(pool), pool->name);
207 #endif
208 
209 	spin_lock_irqsave (&pool->lock, flags);
210 	while (!list_empty (&pool->page_list)) {
211 		struct pci_page		*page;
212 		page = list_entry (pool->page_list.next,
213 				struct pci_page, page_list);
214 		if (is_page_busy (pool->blocks_per_page, page->bitmap)) {
215 			printk (KERN_ERR "pci_pool_destroy %s/%s, %p busy\n",
216 				slot_name(pool), pool->name, page->vaddr);
217 			/* leak the still-in-use consistent memory */
218 			list_del (&page->page_list);
219 			kfree (page);
220 		} else
221 			pool_free_page (pool, page);
222 	}
223 	spin_unlock_irqrestore (&pool->lock, flags);
224 	kfree (pool);
225 }
226 
227 
228 /**
229  * pci_pool_alloc - get a block of consistent memory
230  * @pool: pci pool that will produce the block
231  * @mem_flags: SLAB_KERNEL or SLAB_ATOMIC
232  * @handle: pointer to dma address of block
233  *
234  * This returns the kernel virtual address of a currently unused block,
235  * and reports its dma address through the handle.
236  * If such a memory block can't be allocated, null is returned.
237  */
238 void *
pci_pool_alloc(struct pci_pool * pool,int mem_flags,dma_addr_t * handle)239 pci_pool_alloc (struct pci_pool *pool, int mem_flags, dma_addr_t *handle)
240 {
241 	unsigned long		flags;
242 	struct list_head	*entry;
243 	struct pci_page		*page;
244 	int			map, block;
245 	size_t			offset;
246 	void			*retval;
247 
248 restart:
249 	spin_lock_irqsave (&pool->lock, flags);
250 	list_for_each (entry, &pool->page_list) {
251 		int		i;
252 		page = list_entry (entry, struct pci_page, page_list);
253 		/* only cachable accesses here ... */
254 		for (map = 0, i = 0;
255 				i < pool->blocks_per_page;
256 				i += BITS_PER_LONG, map++) {
257 			if (page->bitmap [map] == 0)
258 				continue;
259 			block = ffz (~ page->bitmap [map]);
260 			if ((i + block) < pool->blocks_per_page) {
261 				clear_bit (block, &page->bitmap [map]);
262 				offset = (BITS_PER_LONG * map) + block;
263 				offset *= pool->size;
264 				goto ready;
265 			}
266 		}
267 	}
268 	if (!(page = pool_alloc_page (pool, mem_flags))) {
269 		if (mem_flags == SLAB_KERNEL) {
270 			DECLARE_WAITQUEUE (wait, current);
271 
272 			current->state = TASK_INTERRUPTIBLE;
273 			add_wait_queue (&pool->waitq, &wait);
274 			spin_unlock_irqrestore (&pool->lock, flags);
275 
276 			schedule_timeout (POOL_TIMEOUT_JIFFIES);
277 
278 			current->state = TASK_RUNNING;
279 			remove_wait_queue (&pool->waitq, &wait);
280 			goto restart;
281 		}
282 		retval = 0;
283 		goto done;
284 	}
285 
286 	clear_bit (0, &page->bitmap [0]);
287 	offset = 0;
288 ready:
289 	retval = offset + page->vaddr;
290 	*handle = offset + page->dma;
291 done:
292 	spin_unlock_irqrestore (&pool->lock, flags);
293 	return retval;
294 }
295 
296 
297 static struct pci_page *
pool_find_page(struct pci_pool * pool,dma_addr_t dma)298 pool_find_page (struct pci_pool *pool, dma_addr_t dma)
299 {
300 	unsigned long		flags;
301 	struct list_head	*entry;
302 	struct pci_page		*page;
303 
304 	spin_lock_irqsave (&pool->lock, flags);
305 	list_for_each (entry, &pool->page_list) {
306 		page = list_entry (entry, struct pci_page, page_list);
307 		if (dma < page->dma)
308 			continue;
309 		if (dma < (page->dma + pool->allocation))
310 			goto done;
311 	}
312 	page = 0;
313 done:
314 	spin_unlock_irqrestore (&pool->lock, flags);
315 	return page;
316 }
317 
318 
319 /**
320  * pci_pool_free - put block back into pci pool
321  * @pool: the pci pool holding the block
322  * @vaddr: virtual address of block
323  * @dma: dma address of block
324  *
325  * Caller promises neither device nor driver will again touch this block
326  * unless it is first re-allocated.
327  */
328 void
pci_pool_free(struct pci_pool * pool,void * vaddr,dma_addr_t dma)329 pci_pool_free (struct pci_pool *pool, void *vaddr, dma_addr_t dma)
330 {
331 	struct pci_page		*page;
332 	unsigned long		flags;
333 	int			map, block;
334 
335 	if ((page = pool_find_page (pool, dma)) == 0) {
336 		printk (KERN_ERR "pci_pool_free %s/%s, %p/%lx (bad dma)\n",
337 			pool->dev ? pool->dev->slot_name : NULL,
338 			pool->name, vaddr, (unsigned long) dma);
339 		return;
340 	}
341 
342 	block = dma - page->dma;
343 	block /= pool->size;
344 	map = block / BITS_PER_LONG;
345 	block %= BITS_PER_LONG;
346 
347 #ifdef	CONFIG_DEBUG_SLAB
348 	if (((dma - page->dma) + (void *)page->vaddr) != vaddr) {
349 		printk (KERN_ERR "pci_pool_free %s/%s, %p (bad vaddr)/%lx\n",
350 			pool->dev ? pool->dev->slot_name : NULL,
351 			pool->name, vaddr, (unsigned long) dma);
352 		return;
353 	}
354 	if (page->bitmap [map] & (1UL << block)) {
355 		printk (KERN_ERR "pci_pool_free %s/%s, dma %x already free\n",
356 			pool->dev ? pool->dev->slot_name : NULL,
357 			pool->name, dma);
358 		return;
359 	}
360 	memset (vaddr, POOL_POISON_BYTE, pool->size);
361 #endif
362 
363 	spin_lock_irqsave (&pool->lock, flags);
364 	set_bit (block, &page->bitmap [map]);
365 	if (waitqueue_active (&pool->waitq))
366 		wake_up (&pool->waitq);
367 	/*
368 	 * Resist a temptation to do
369 	 *    if (!is_page_busy(bpp, page->bitmap)) pool_free_page(pool, page);
370 	 * it is not interrupt safe. Better have empty pages hang around.
371 	 */
372 	spin_unlock_irqrestore (&pool->lock, flags);
373 }
374 
375 
376 EXPORT_SYMBOL (pci_pool_create);
377 EXPORT_SYMBOL (pci_pool_destroy);
378 EXPORT_SYMBOL (pci_pool_alloc);
379 EXPORT_SYMBOL (pci_pool_free);
380 
381 /* **************************************** */
382 
pcipool_init(void)383 static int __init pcipool_init(void)
384 {
385 	MOD_INC_USE_COUNT;	/* never unload */
386 
387 	return 0;
388 }
389 module_init(pcipool_init);
390 
391 MODULE_LICENSE("GPL");
392