Lines Matching refs:pool

71 	struct dma_pool *pool;  in pools_show()  local
81 list_for_each_entry(pool, &dev->dma_pools, pools) { in pools_show()
85 spin_lock_irq(&pool->lock); in pools_show()
86 list_for_each_entry(page, &pool->page_list, page_list) { in pools_show()
90 spin_unlock_irq(&pool->lock); in pools_show()
94 pool->name, blocks, in pools_show()
95 pages * (pool->allocation / pool->size), in pools_show()
96 pool->size, pages); in pools_show()
203 static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page) in pool_initialise_page() argument
206 unsigned int next_boundary = pool->boundary; in pool_initialise_page()
209 unsigned int next = offset + pool->size; in pool_initialise_page()
210 if (unlikely((next + pool->size) >= next_boundary)) { in pool_initialise_page()
212 next_boundary += pool->boundary; in pool_initialise_page()
216 } while (offset < pool->allocation); in pool_initialise_page()
219 static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags) in pool_alloc_page() argument
226 page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation, in pool_alloc_page()
230 memset(page->vaddr, POOL_POISON_FREED, pool->allocation); in pool_alloc_page()
232 pool_initialise_page(pool, page); in pool_alloc_page()
247 static void pool_free_page(struct dma_pool *pool, struct dma_page *page) in pool_free_page() argument
252 memset(page->vaddr, POOL_POISON_FREED, pool->allocation); in pool_free_page()
254 dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma); in pool_free_page()
267 void dma_pool_destroy(struct dma_pool *pool) in dma_pool_destroy() argument
272 if (unlikely(!pool)) in dma_pool_destroy()
277 list_del(&pool->pools); in dma_pool_destroy()
278 if (pool->dev && list_empty(&pool->dev->dma_pools)) in dma_pool_destroy()
282 device_remove_file(pool->dev, &dev_attr_pools); in dma_pool_destroy()
285 list_for_each_entry_safe(page, tmp, &pool->page_list, page_list) { in dma_pool_destroy()
287 if (pool->dev) in dma_pool_destroy()
288 dev_err(pool->dev, "%s %s, %p busy\n", __func__, in dma_pool_destroy()
289 pool->name, page->vaddr); in dma_pool_destroy()
292 pool->name, page->vaddr); in dma_pool_destroy()
297 pool_free_page(pool, page); in dma_pool_destroy()
300 kfree(pool); in dma_pool_destroy()
314 void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, in dma_pool_alloc() argument
324 spin_lock_irqsave(&pool->lock, flags); in dma_pool_alloc()
325 list_for_each_entry(page, &pool->page_list, page_list) { in dma_pool_alloc()
326 if (page->offset < pool->allocation) in dma_pool_alloc()
331 spin_unlock_irqrestore(&pool->lock, flags); in dma_pool_alloc()
333 page = pool_alloc_page(pool, mem_flags & (~__GFP_ZERO)); in dma_pool_alloc()
337 spin_lock_irqsave(&pool->lock, flags); in dma_pool_alloc()
339 list_add(&page->page_list, &pool->page_list); in dma_pool_alloc()
351 for (i = sizeof(page->offset); i < pool->size; i++) { in dma_pool_alloc()
354 if (pool->dev) in dma_pool_alloc()
355 dev_err(pool->dev, "%s %s, %p (corrupted)\n", in dma_pool_alloc()
356 __func__, pool->name, retval); in dma_pool_alloc()
359 __func__, pool->name, retval); in dma_pool_alloc()
366 data, pool->size, 1); in dma_pool_alloc()
371 memset(retval, POOL_POISON_ALLOCATED, pool->size); in dma_pool_alloc()
373 spin_unlock_irqrestore(&pool->lock, flags); in dma_pool_alloc()
376 memset(retval, 0, pool->size); in dma_pool_alloc()
382 static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma) in pool_find_page() argument
386 list_for_each_entry(page, &pool->page_list, page_list) { in pool_find_page()
389 if ((dma - page->dma) < pool->allocation) in pool_find_page()
404 void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma) in dma_pool_free() argument
410 spin_lock_irqsave(&pool->lock, flags); in dma_pool_free()
411 page = pool_find_page(pool, dma); in dma_pool_free()
413 spin_unlock_irqrestore(&pool->lock, flags); in dma_pool_free()
414 if (pool->dev) in dma_pool_free()
415 dev_err(pool->dev, "%s %s, %p/%pad (bad dma)\n", in dma_pool_free()
416 __func__, pool->name, vaddr, &dma); in dma_pool_free()
419 __func__, pool->name, vaddr, &dma); in dma_pool_free()
425 memset(vaddr, 0, pool->size); in dma_pool_free()
428 spin_unlock_irqrestore(&pool->lock, flags); in dma_pool_free()
429 if (pool->dev) in dma_pool_free()
430 dev_err(pool->dev, "%s %s, %p (bad vaddr)/%pad\n", in dma_pool_free()
431 __func__, pool->name, vaddr, &dma); in dma_pool_free()
434 __func__, pool->name, vaddr, &dma); in dma_pool_free()
439 while (chain < pool->allocation) { in dma_pool_free()
444 spin_unlock_irqrestore(&pool->lock, flags); in dma_pool_free()
445 if (pool->dev) in dma_pool_free()
446 dev_err(pool->dev, "%s %s, dma %pad already free\n", in dma_pool_free()
447 __func__, pool->name, &dma); in dma_pool_free()
450 __func__, pool->name, &dma); in dma_pool_free()
454 memset(vaddr, POOL_POISON_FREED, pool->size); in dma_pool_free()
465 spin_unlock_irqrestore(&pool->lock, flags); in dma_pool_free()
474 struct dma_pool *pool = *(struct dma_pool **)res; in dmam_pool_release() local
476 dma_pool_destroy(pool); in dmam_pool_release()
501 struct dma_pool **ptr, *pool; in dmam_pool_create() local
507 pool = *ptr = dma_pool_create(name, dev, size, align, allocation); in dmam_pool_create()
508 if (pool) in dmam_pool_create()
513 return pool; in dmam_pool_create()
523 void dmam_pool_destroy(struct dma_pool *pool) in dmam_pool_destroy() argument
525 struct device *dev = pool->dev; in dmam_pool_destroy()
527 WARN_ON(devres_release(dev, dmam_pool_release, dmam_pool_match, pool)); in dmam_pool_destroy()