1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/arch/arm/mm/dma-mapping.c
4 *
5 * Copyright (C) 2000-2004 Russell King
6 *
7 * DMA uncached mapping support.
8 */
9 #include <linux/module.h>
10 #include <linux/mm.h>
11 #include <linux/genalloc.h>
12 #include <linux/gfp.h>
13 #include <linux/errno.h>
14 #include <linux/list.h>
15 #include <linux/init.h>
16 #include <linux/device.h>
17 #include <linux/dma-direct.h>
18 #include <linux/dma-map-ops.h>
19 #include <linux/highmem.h>
20 #include <linux/memblock.h>
21 #include <linux/slab.h>
22 #include <linux/iommu.h>
23 #include <linux/io.h>
24 #include <linux/vmalloc.h>
25 #include <linux/sizes.h>
26 #include <linux/cma.h>
27
28 #include <asm/memory.h>
29 #include <asm/highmem.h>
30 #include <asm/cacheflush.h>
31 #include <asm/tlbflush.h>
32 #include <asm/mach/arch.h>
33 #include <asm/dma-iommu.h>
34 #include <asm/mach/map.h>
35 #include <asm/system_info.h>
36 #include <asm/xen/xen-ops.h>
37
38 #include "dma.h"
39 #include "mm.h"
40
41 struct arm_dma_alloc_args {
42 struct device *dev;
43 size_t size;
44 gfp_t gfp;
45 pgprot_t prot;
46 const void *caller;
47 bool want_vaddr;
48 int coherent_flag;
49 };
50
51 struct arm_dma_free_args {
52 struct device *dev;
53 size_t size;
54 void *cpu_addr;
55 struct page *page;
56 bool want_vaddr;
57 };
58
59 #define NORMAL 0
60 #define COHERENT 1
61
62 struct arm_dma_allocator {
63 void *(*alloc)(struct arm_dma_alloc_args *args,
64 struct page **ret_page);
65 void (*free)(struct arm_dma_free_args *args);
66 };
67
68 struct arm_dma_buffer {
69 struct list_head list;
70 void *virt;
71 struct arm_dma_allocator *allocator;
72 };
73
74 static LIST_HEAD(arm_dma_bufs);
75 static DEFINE_SPINLOCK(arm_dma_bufs_lock);
76
arm_dma_buffer_find(void * virt)77 static struct arm_dma_buffer *arm_dma_buffer_find(void *virt)
78 {
79 struct arm_dma_buffer *buf, *found = NULL;
80 unsigned long flags;
81
82 spin_lock_irqsave(&arm_dma_bufs_lock, flags);
83 list_for_each_entry(buf, &arm_dma_bufs, list) {
84 if (buf->virt == virt) {
85 list_del(&buf->list);
86 found = buf;
87 break;
88 }
89 }
90 spin_unlock_irqrestore(&arm_dma_bufs_lock, flags);
91 return found;
92 }
93
94 /*
95 * The DMA API is built upon the notion of "buffer ownership". A buffer
96 * is either exclusively owned by the CPU (and therefore may be accessed
97 * by it) or exclusively owned by the DMA device. These helper functions
98 * represent the transitions between these two ownership states.
99 *
100 * Note, however, that on later ARMs, this notion does not work due to
101 * speculative prefetches. We model our approach on the assumption that
102 * the CPU does do speculative prefetches, which means we clean caches
103 * before transfers and delay cache invalidation until transfer completion.
104 *
105 */
106 static void __dma_page_cpu_to_dev(struct page *, unsigned long,
107 size_t, enum dma_data_direction);
108 static void __dma_page_dev_to_cpu(struct page *, unsigned long,
109 size_t, enum dma_data_direction);
110
111 /**
112 * arm_dma_map_page - map a portion of a page for streaming DMA
113 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
114 * @page: page that buffer resides in
115 * @offset: offset into page for start of buffer
116 * @size: size of buffer to map
117 * @dir: DMA transfer direction
118 *
119 * Ensure that any data held in the cache is appropriately discarded
120 * or written back.
121 *
122 * The device owns this memory once this call has completed. The CPU
123 * can regain ownership by calling dma_unmap_page().
124 */
arm_dma_map_page(struct device * dev,struct page * page,unsigned long offset,size_t size,enum dma_data_direction dir,unsigned long attrs)125 static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page,
126 unsigned long offset, size_t size, enum dma_data_direction dir,
127 unsigned long attrs)
128 {
129 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
130 __dma_page_cpu_to_dev(page, offset, size, dir);
131 return pfn_to_dma(dev, page_to_pfn(page)) + offset;
132 }
133
arm_coherent_dma_map_page(struct device * dev,struct page * page,unsigned long offset,size_t size,enum dma_data_direction dir,unsigned long attrs)134 static dma_addr_t arm_coherent_dma_map_page(struct device *dev, struct page *page,
135 unsigned long offset, size_t size, enum dma_data_direction dir,
136 unsigned long attrs)
137 {
138 return pfn_to_dma(dev, page_to_pfn(page)) + offset;
139 }
140
141 /**
142 * arm_dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
143 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
144 * @handle: DMA address of buffer
145 * @size: size of buffer (same as passed to dma_map_page)
146 * @dir: DMA transfer direction (same as passed to dma_map_page)
147 *
148 * Unmap a page streaming mode DMA translation. The handle and size
149 * must match what was provided in the previous dma_map_page() call.
150 * All other usages are undefined.
151 *
152 * After this call, reads by the CPU to the buffer are guaranteed to see
153 * whatever the device wrote there.
154 */
arm_dma_unmap_page(struct device * dev,dma_addr_t handle,size_t size,enum dma_data_direction dir,unsigned long attrs)155 static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle,
156 size_t size, enum dma_data_direction dir, unsigned long attrs)
157 {
158 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
159 __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
160 handle & ~PAGE_MASK, size, dir);
161 }
162
arm_dma_sync_single_for_cpu(struct device * dev,dma_addr_t handle,size_t size,enum dma_data_direction dir)163 static void arm_dma_sync_single_for_cpu(struct device *dev,
164 dma_addr_t handle, size_t size, enum dma_data_direction dir)
165 {
166 unsigned int offset = handle & (PAGE_SIZE - 1);
167 struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
168 __dma_page_dev_to_cpu(page, offset, size, dir);
169 }
170
arm_dma_sync_single_for_device(struct device * dev,dma_addr_t handle,size_t size,enum dma_data_direction dir)171 static void arm_dma_sync_single_for_device(struct device *dev,
172 dma_addr_t handle, size_t size, enum dma_data_direction dir)
173 {
174 unsigned int offset = handle & (PAGE_SIZE - 1);
175 struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
176 __dma_page_cpu_to_dev(page, offset, size, dir);
177 }
178
179 /*
180 * Return whether the given device DMA address mask can be supported
181 * properly. For example, if your device can only drive the low 24-bits
182 * during bus mastering, then you would pass 0x00ffffff as the mask
183 * to this function.
184 */
arm_dma_supported(struct device * dev,u64 mask)185 static int arm_dma_supported(struct device *dev, u64 mask)
186 {
187 unsigned long max_dma_pfn = min(max_pfn - 1, arm_dma_pfn_limit);
188
189 /*
190 * Translate the device's DMA mask to a PFN limit. This
191 * PFN number includes the page which we can DMA to.
192 */
193 return dma_to_pfn(dev, mask) >= max_dma_pfn;
194 }
195
196 const struct dma_map_ops arm_dma_ops = {
197 .alloc = arm_dma_alloc,
198 .free = arm_dma_free,
199 .alloc_pages = dma_direct_alloc_pages,
200 .free_pages = dma_direct_free_pages,
201 .mmap = arm_dma_mmap,
202 .get_sgtable = arm_dma_get_sgtable,
203 .map_page = arm_dma_map_page,
204 .unmap_page = arm_dma_unmap_page,
205 .map_sg = arm_dma_map_sg,
206 .unmap_sg = arm_dma_unmap_sg,
207 .map_resource = dma_direct_map_resource,
208 .sync_single_for_cpu = arm_dma_sync_single_for_cpu,
209 .sync_single_for_device = arm_dma_sync_single_for_device,
210 .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu,
211 .sync_sg_for_device = arm_dma_sync_sg_for_device,
212 .dma_supported = arm_dma_supported,
213 .get_required_mask = dma_direct_get_required_mask,
214 };
215 EXPORT_SYMBOL(arm_dma_ops);
216
217 static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
218 dma_addr_t *handle, gfp_t gfp, unsigned long attrs);
219 static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr,
220 dma_addr_t handle, unsigned long attrs);
221 static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma,
222 void *cpu_addr, dma_addr_t dma_addr, size_t size,
223 unsigned long attrs);
224
225 const struct dma_map_ops arm_coherent_dma_ops = {
226 .alloc = arm_coherent_dma_alloc,
227 .free = arm_coherent_dma_free,
228 .alloc_pages = dma_direct_alloc_pages,
229 .free_pages = dma_direct_free_pages,
230 .mmap = arm_coherent_dma_mmap,
231 .get_sgtable = arm_dma_get_sgtable,
232 .map_page = arm_coherent_dma_map_page,
233 .map_sg = arm_dma_map_sg,
234 .map_resource = dma_direct_map_resource,
235 .dma_supported = arm_dma_supported,
236 .get_required_mask = dma_direct_get_required_mask,
237 };
238 EXPORT_SYMBOL(arm_coherent_dma_ops);
239
__dma_clear_buffer(struct page * page,size_t size,int coherent_flag)240 static void __dma_clear_buffer(struct page *page, size_t size, int coherent_flag)
241 {
242 /*
243 * Ensure that the allocated pages are zeroed, and that any data
244 * lurking in the kernel direct-mapped region is invalidated.
245 */
246 if (PageHighMem(page)) {
247 phys_addr_t base = __pfn_to_phys(page_to_pfn(page));
248 phys_addr_t end = base + size;
249 while (size > 0) {
250 void *ptr = kmap_atomic(page);
251 memset(ptr, 0, PAGE_SIZE);
252 if (coherent_flag != COHERENT)
253 dmac_flush_range(ptr, ptr + PAGE_SIZE);
254 kunmap_atomic(ptr);
255 page++;
256 size -= PAGE_SIZE;
257 }
258 if (coherent_flag != COHERENT)
259 outer_flush_range(base, end);
260 } else {
261 void *ptr = page_address(page);
262 memset(ptr, 0, size);
263 if (coherent_flag != COHERENT) {
264 dmac_flush_range(ptr, ptr + size);
265 outer_flush_range(__pa(ptr), __pa(ptr) + size);
266 }
267 }
268 }
269
270 /*
271 * Allocate a DMA buffer for 'dev' of size 'size' using the
272 * specified gfp mask. Note that 'size' must be page aligned.
273 */
__dma_alloc_buffer(struct device * dev,size_t size,gfp_t gfp,int coherent_flag)274 static struct page *__dma_alloc_buffer(struct device *dev, size_t size,
275 gfp_t gfp, int coherent_flag)
276 {
277 unsigned long order = get_order(size);
278 struct page *page, *p, *e;
279
280 page = alloc_pages(gfp, order);
281 if (!page)
282 return NULL;
283
284 /*
285 * Now split the huge page and free the excess pages
286 */
287 split_page(page, order);
288 for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
289 __free_page(p);
290
291 __dma_clear_buffer(page, size, coherent_flag);
292
293 return page;
294 }
295
296 /*
297 * Free a DMA buffer. 'size' must be page aligned.
298 */
__dma_free_buffer(struct page * page,size_t size)299 static void __dma_free_buffer(struct page *page, size_t size)
300 {
301 struct page *e = page + (size >> PAGE_SHIFT);
302
303 while (page < e) {
304 __free_page(page);
305 page++;
306 }
307 }
308
309 static void *__alloc_from_contiguous(struct device *dev, size_t size,
310 pgprot_t prot, struct page **ret_page,
311 const void *caller, bool want_vaddr,
312 int coherent_flag, gfp_t gfp);
313
314 static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
315 pgprot_t prot, struct page **ret_page,
316 const void *caller, bool want_vaddr);
317
318 #define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
319 static struct gen_pool *atomic_pool __ro_after_init;
320
321 static size_t atomic_pool_size __initdata = DEFAULT_DMA_COHERENT_POOL_SIZE;
322
early_coherent_pool(char * p)323 static int __init early_coherent_pool(char *p)
324 {
325 atomic_pool_size = memparse(p, &p);
326 return 0;
327 }
328 early_param("coherent_pool", early_coherent_pool);
329
330 /*
331 * Initialise the coherent pool for atomic allocations.
332 */
atomic_pool_init(void)333 static int __init atomic_pool_init(void)
334 {
335 pgprot_t prot = pgprot_dmacoherent(PAGE_KERNEL);
336 gfp_t gfp = GFP_KERNEL | GFP_DMA;
337 struct page *page;
338 void *ptr;
339
340 atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
341 if (!atomic_pool)
342 goto out;
343 /*
344 * The atomic pool is only used for non-coherent allocations
345 * so we must pass NORMAL for coherent_flag.
346 */
347 if (dev_get_cma_area(NULL))
348 ptr = __alloc_from_contiguous(NULL, atomic_pool_size, prot,
349 &page, atomic_pool_init, true, NORMAL,
350 GFP_KERNEL);
351 else
352 ptr = __alloc_remap_buffer(NULL, atomic_pool_size, gfp, prot,
353 &page, atomic_pool_init, true);
354 if (ptr) {
355 int ret;
356
357 ret = gen_pool_add_virt(atomic_pool, (unsigned long)ptr,
358 page_to_phys(page),
359 atomic_pool_size, -1);
360 if (ret)
361 goto destroy_genpool;
362
363 gen_pool_set_algo(atomic_pool,
364 gen_pool_first_fit_order_align,
365 NULL);
366 pr_info("DMA: preallocated %zu KiB pool for atomic coherent allocations\n",
367 atomic_pool_size / 1024);
368 return 0;
369 }
370
371 destroy_genpool:
372 gen_pool_destroy(atomic_pool);
373 atomic_pool = NULL;
374 out:
375 pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n",
376 atomic_pool_size / 1024);
377 return -ENOMEM;
378 }
379 /*
380 * CMA is activated by core_initcall, so we must be called after it.
381 */
382 postcore_initcall(atomic_pool_init);
383
384 #ifdef CONFIG_CMA_AREAS
385 struct dma_contig_early_reserve {
386 phys_addr_t base;
387 unsigned long size;
388 };
389
390 static struct dma_contig_early_reserve dma_mmu_remap[MAX_CMA_AREAS] __initdata;
391
392 static int dma_mmu_remap_num __initdata;
393
dma_contiguous_early_fixup(phys_addr_t base,unsigned long size)394 void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size)
395 {
396 dma_mmu_remap[dma_mmu_remap_num].base = base;
397 dma_mmu_remap[dma_mmu_remap_num].size = size;
398 dma_mmu_remap_num++;
399 }
400
dma_contiguous_remap(void)401 void __init dma_contiguous_remap(void)
402 {
403 int i;
404 for (i = 0; i < dma_mmu_remap_num; i++) {
405 phys_addr_t start = dma_mmu_remap[i].base;
406 phys_addr_t end = start + dma_mmu_remap[i].size;
407 struct map_desc map;
408 unsigned long addr;
409
410 if (end > arm_lowmem_limit)
411 end = arm_lowmem_limit;
412 if (start >= end)
413 continue;
414
415 map.pfn = __phys_to_pfn(start);
416 map.virtual = __phys_to_virt(start);
417 map.length = end - start;
418 map.type = MT_MEMORY_DMA_READY;
419
420 /*
421 * Clear previous low-memory mapping to ensure that the
422 * TLB does not see any conflicting entries, then flush
423 * the TLB of the old entries before creating new mappings.
424 *
425 * This ensures that any speculatively loaded TLB entries
426 * (even though they may be rare) can not cause any problems,
427 * and ensures that this code is architecturally compliant.
428 */
429 for (addr = __phys_to_virt(start); addr < __phys_to_virt(end);
430 addr += PMD_SIZE)
431 pmd_clear(pmd_off_k(addr));
432
433 flush_tlb_kernel_range(__phys_to_virt(start),
434 __phys_to_virt(end));
435
436 iotable_init(&map, 1);
437 }
438 }
439 #endif
440
__dma_update_pte(pte_t * pte,unsigned long addr,void * data)441 static int __dma_update_pte(pte_t *pte, unsigned long addr, void *data)
442 {
443 struct page *page = virt_to_page(addr);
444 pgprot_t prot = *(pgprot_t *)data;
445
446 set_pte_ext(pte, mk_pte(page, prot), 0);
447 return 0;
448 }
449
__dma_remap(struct page * page,size_t size,pgprot_t prot)450 static void __dma_remap(struct page *page, size_t size, pgprot_t prot)
451 {
452 unsigned long start = (unsigned long) page_address(page);
453 unsigned end = start + size;
454
455 apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot);
456 flush_tlb_kernel_range(start, end);
457 }
458
__alloc_remap_buffer(struct device * dev,size_t size,gfp_t gfp,pgprot_t prot,struct page ** ret_page,const void * caller,bool want_vaddr)459 static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
460 pgprot_t prot, struct page **ret_page,
461 const void *caller, bool want_vaddr)
462 {
463 struct page *page;
464 void *ptr = NULL;
465 /*
466 * __alloc_remap_buffer is only called when the device is
467 * non-coherent
468 */
469 page = __dma_alloc_buffer(dev, size, gfp, NORMAL);
470 if (!page)
471 return NULL;
472 if (!want_vaddr)
473 goto out;
474
475 ptr = dma_common_contiguous_remap(page, size, prot, caller);
476 if (!ptr) {
477 __dma_free_buffer(page, size);
478 return NULL;
479 }
480
481 out:
482 *ret_page = page;
483 return ptr;
484 }
485
__alloc_from_pool(size_t size,struct page ** ret_page)486 static void *__alloc_from_pool(size_t size, struct page **ret_page)
487 {
488 unsigned long val;
489 void *ptr = NULL;
490
491 if (!atomic_pool) {
492 WARN(1, "coherent pool not initialised!\n");
493 return NULL;
494 }
495
496 val = gen_pool_alloc(atomic_pool, size);
497 if (val) {
498 phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val);
499
500 *ret_page = phys_to_page(phys);
501 ptr = (void *)val;
502 }
503
504 return ptr;
505 }
506
__in_atomic_pool(void * start,size_t size)507 static bool __in_atomic_pool(void *start, size_t size)
508 {
509 return gen_pool_has_addr(atomic_pool, (unsigned long)start, size);
510 }
511
__free_from_pool(void * start,size_t size)512 static int __free_from_pool(void *start, size_t size)
513 {
514 if (!__in_atomic_pool(start, size))
515 return 0;
516
517 gen_pool_free(atomic_pool, (unsigned long)start, size);
518
519 return 1;
520 }
521
__alloc_from_contiguous(struct device * dev,size_t size,pgprot_t prot,struct page ** ret_page,const void * caller,bool want_vaddr,int coherent_flag,gfp_t gfp)522 static void *__alloc_from_contiguous(struct device *dev, size_t size,
523 pgprot_t prot, struct page **ret_page,
524 const void *caller, bool want_vaddr,
525 int coherent_flag, gfp_t gfp)
526 {
527 unsigned long order = get_order(size);
528 size_t count = size >> PAGE_SHIFT;
529 struct page *page;
530 void *ptr = NULL;
531
532 page = dma_alloc_from_contiguous(dev, count, order, gfp & __GFP_NOWARN);
533 if (!page)
534 return NULL;
535
536 __dma_clear_buffer(page, size, coherent_flag);
537
538 if (!want_vaddr)
539 goto out;
540
541 if (PageHighMem(page)) {
542 ptr = dma_common_contiguous_remap(page, size, prot, caller);
543 if (!ptr) {
544 dma_release_from_contiguous(dev, page, count);
545 return NULL;
546 }
547 } else {
548 __dma_remap(page, size, prot);
549 ptr = page_address(page);
550 }
551
552 out:
553 *ret_page = page;
554 return ptr;
555 }
556
__free_from_contiguous(struct device * dev,struct page * page,void * cpu_addr,size_t size,bool want_vaddr)557 static void __free_from_contiguous(struct device *dev, struct page *page,
558 void *cpu_addr, size_t size, bool want_vaddr)
559 {
560 if (want_vaddr) {
561 if (PageHighMem(page))
562 dma_common_free_remap(cpu_addr, size);
563 else
564 __dma_remap(page, size, PAGE_KERNEL);
565 }
566 dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
567 }
568
__get_dma_pgprot(unsigned long attrs,pgprot_t prot)569 static inline pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot)
570 {
571 prot = (attrs & DMA_ATTR_WRITE_COMBINE) ?
572 pgprot_writecombine(prot) :
573 pgprot_dmacoherent(prot);
574 return prot;
575 }
576
__alloc_simple_buffer(struct device * dev,size_t size,gfp_t gfp,struct page ** ret_page)577 static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp,
578 struct page **ret_page)
579 {
580 struct page *page;
581 /* __alloc_simple_buffer is only called when the device is coherent */
582 page = __dma_alloc_buffer(dev, size, gfp, COHERENT);
583 if (!page)
584 return NULL;
585
586 *ret_page = page;
587 return page_address(page);
588 }
589
simple_allocator_alloc(struct arm_dma_alloc_args * args,struct page ** ret_page)590 static void *simple_allocator_alloc(struct arm_dma_alloc_args *args,
591 struct page **ret_page)
592 {
593 return __alloc_simple_buffer(args->dev, args->size, args->gfp,
594 ret_page);
595 }
596
simple_allocator_free(struct arm_dma_free_args * args)597 static void simple_allocator_free(struct arm_dma_free_args *args)
598 {
599 __dma_free_buffer(args->page, args->size);
600 }
601
602 static struct arm_dma_allocator simple_allocator = {
603 .alloc = simple_allocator_alloc,
604 .free = simple_allocator_free,
605 };
606
cma_allocator_alloc(struct arm_dma_alloc_args * args,struct page ** ret_page)607 static void *cma_allocator_alloc(struct arm_dma_alloc_args *args,
608 struct page **ret_page)
609 {
610 return __alloc_from_contiguous(args->dev, args->size, args->prot,
611 ret_page, args->caller,
612 args->want_vaddr, args->coherent_flag,
613 args->gfp);
614 }
615
cma_allocator_free(struct arm_dma_free_args * args)616 static void cma_allocator_free(struct arm_dma_free_args *args)
617 {
618 __free_from_contiguous(args->dev, args->page, args->cpu_addr,
619 args->size, args->want_vaddr);
620 }
621
622 static struct arm_dma_allocator cma_allocator = {
623 .alloc = cma_allocator_alloc,
624 .free = cma_allocator_free,
625 };
626
pool_allocator_alloc(struct arm_dma_alloc_args * args,struct page ** ret_page)627 static void *pool_allocator_alloc(struct arm_dma_alloc_args *args,
628 struct page **ret_page)
629 {
630 return __alloc_from_pool(args->size, ret_page);
631 }
632
pool_allocator_free(struct arm_dma_free_args * args)633 static void pool_allocator_free(struct arm_dma_free_args *args)
634 {
635 __free_from_pool(args->cpu_addr, args->size);
636 }
637
638 static struct arm_dma_allocator pool_allocator = {
639 .alloc = pool_allocator_alloc,
640 .free = pool_allocator_free,
641 };
642
remap_allocator_alloc(struct arm_dma_alloc_args * args,struct page ** ret_page)643 static void *remap_allocator_alloc(struct arm_dma_alloc_args *args,
644 struct page **ret_page)
645 {
646 return __alloc_remap_buffer(args->dev, args->size, args->gfp,
647 args->prot, ret_page, args->caller,
648 args->want_vaddr);
649 }
650
remap_allocator_free(struct arm_dma_free_args * args)651 static void remap_allocator_free(struct arm_dma_free_args *args)
652 {
653 if (args->want_vaddr)
654 dma_common_free_remap(args->cpu_addr, args->size);
655
656 __dma_free_buffer(args->page, args->size);
657 }
658
659 static struct arm_dma_allocator remap_allocator = {
660 .alloc = remap_allocator_alloc,
661 .free = remap_allocator_free,
662 };
663
__dma_alloc(struct device * dev,size_t size,dma_addr_t * handle,gfp_t gfp,pgprot_t prot,bool is_coherent,unsigned long attrs,const void * caller)664 static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
665 gfp_t gfp, pgprot_t prot, bool is_coherent,
666 unsigned long attrs, const void *caller)
667 {
668 u64 mask = min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit);
669 struct page *page = NULL;
670 void *addr;
671 bool allowblock, cma;
672 struct arm_dma_buffer *buf;
673 struct arm_dma_alloc_args args = {
674 .dev = dev,
675 .size = PAGE_ALIGN(size),
676 .gfp = gfp,
677 .prot = prot,
678 .caller = caller,
679 .want_vaddr = ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0),
680 .coherent_flag = is_coherent ? COHERENT : NORMAL,
681 };
682
683 #ifdef CONFIG_DMA_API_DEBUG
684 u64 limit = (mask + 1) & ~mask;
685 if (limit && size >= limit) {
686 dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n",
687 size, mask);
688 return NULL;
689 }
690 #endif
691
692 buf = kzalloc(sizeof(*buf),
693 gfp & ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM));
694 if (!buf)
695 return NULL;
696
697 if (mask < 0xffffffffULL)
698 gfp |= GFP_DMA;
699
700 /*
701 * Following is a work-around (a.k.a. hack) to prevent pages
702 * with __GFP_COMP being passed to split_page() which cannot
703 * handle them. The real problem is that this flag probably
704 * should be 0 on ARM as it is not supported on this
705 * platform; see CONFIG_HUGETLBFS.
706 */
707 gfp &= ~(__GFP_COMP);
708 args.gfp = gfp;
709
710 *handle = DMA_MAPPING_ERROR;
711 allowblock = gfpflags_allow_blocking(gfp);
712 cma = allowblock ? dev_get_cma_area(dev) : false;
713
714 if (cma)
715 buf->allocator = &cma_allocator;
716 else if (is_coherent)
717 buf->allocator = &simple_allocator;
718 else if (allowblock)
719 buf->allocator = &remap_allocator;
720 else
721 buf->allocator = &pool_allocator;
722
723 addr = buf->allocator->alloc(&args, &page);
724
725 if (page) {
726 unsigned long flags;
727
728 *handle = pfn_to_dma(dev, page_to_pfn(page));
729 buf->virt = args.want_vaddr ? addr : page;
730
731 spin_lock_irqsave(&arm_dma_bufs_lock, flags);
732 list_add(&buf->list, &arm_dma_bufs);
733 spin_unlock_irqrestore(&arm_dma_bufs_lock, flags);
734 } else {
735 kfree(buf);
736 }
737
738 return args.want_vaddr ? addr : page;
739 }
740
741 /*
742 * Allocate DMA-coherent memory space and return both the kernel remapped
743 * virtual and bus address for that space.
744 */
arm_dma_alloc(struct device * dev,size_t size,dma_addr_t * handle,gfp_t gfp,unsigned long attrs)745 void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
746 gfp_t gfp, unsigned long attrs)
747 {
748 pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
749
750 return __dma_alloc(dev, size, handle, gfp, prot, false,
751 attrs, __builtin_return_address(0));
752 }
753
arm_coherent_dma_alloc(struct device * dev,size_t size,dma_addr_t * handle,gfp_t gfp,unsigned long attrs)754 static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
755 dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
756 {
757 return __dma_alloc(dev, size, handle, gfp, PAGE_KERNEL, true,
758 attrs, __builtin_return_address(0));
759 }
760
__arm_dma_mmap(struct device * dev,struct vm_area_struct * vma,void * cpu_addr,dma_addr_t dma_addr,size_t size,unsigned long attrs)761 static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
762 void *cpu_addr, dma_addr_t dma_addr, size_t size,
763 unsigned long attrs)
764 {
765 int ret = -ENXIO;
766 unsigned long nr_vma_pages = vma_pages(vma);
767 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
768 unsigned long pfn = dma_to_pfn(dev, dma_addr);
769 unsigned long off = vma->vm_pgoff;
770
771 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
772 return ret;
773
774 if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
775 ret = remap_pfn_range(vma, vma->vm_start,
776 pfn + off,
777 vma->vm_end - vma->vm_start,
778 vma->vm_page_prot);
779 }
780
781 return ret;
782 }
783
784 /*
785 * Create userspace mapping for the DMA-coherent memory.
786 */
arm_coherent_dma_mmap(struct device * dev,struct vm_area_struct * vma,void * cpu_addr,dma_addr_t dma_addr,size_t size,unsigned long attrs)787 static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma,
788 void *cpu_addr, dma_addr_t dma_addr, size_t size,
789 unsigned long attrs)
790 {
791 return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
792 }
793
arm_dma_mmap(struct device * dev,struct vm_area_struct * vma,void * cpu_addr,dma_addr_t dma_addr,size_t size,unsigned long attrs)794 int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
795 void *cpu_addr, dma_addr_t dma_addr, size_t size,
796 unsigned long attrs)
797 {
798 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
799 return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
800 }
801
802 /*
803 * Free a buffer as defined by the above mapping.
804 */
__arm_dma_free(struct device * dev,size_t size,void * cpu_addr,dma_addr_t handle,unsigned long attrs,bool is_coherent)805 static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
806 dma_addr_t handle, unsigned long attrs,
807 bool is_coherent)
808 {
809 struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
810 struct arm_dma_buffer *buf;
811 struct arm_dma_free_args args = {
812 .dev = dev,
813 .size = PAGE_ALIGN(size),
814 .cpu_addr = cpu_addr,
815 .page = page,
816 .want_vaddr = ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0),
817 };
818
819 buf = arm_dma_buffer_find(cpu_addr);
820 if (WARN(!buf, "Freeing invalid buffer %p\n", cpu_addr))
821 return;
822
823 buf->allocator->free(&args);
824 kfree(buf);
825 }
826
arm_dma_free(struct device * dev,size_t size,void * cpu_addr,dma_addr_t handle,unsigned long attrs)827 void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
828 dma_addr_t handle, unsigned long attrs)
829 {
830 __arm_dma_free(dev, size, cpu_addr, handle, attrs, false);
831 }
832
arm_coherent_dma_free(struct device * dev,size_t size,void * cpu_addr,dma_addr_t handle,unsigned long attrs)833 static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr,
834 dma_addr_t handle, unsigned long attrs)
835 {
836 __arm_dma_free(dev, size, cpu_addr, handle, attrs, true);
837 }
838
arm_dma_get_sgtable(struct device * dev,struct sg_table * sgt,void * cpu_addr,dma_addr_t handle,size_t size,unsigned long attrs)839 int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
840 void *cpu_addr, dma_addr_t handle, size_t size,
841 unsigned long attrs)
842 {
843 unsigned long pfn = dma_to_pfn(dev, handle);
844 struct page *page;
845 int ret;
846
847 /* If the PFN is not valid, we do not have a struct page */
848 if (!pfn_valid(pfn))
849 return -ENXIO;
850
851 page = pfn_to_page(pfn);
852
853 ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
854 if (unlikely(ret))
855 return ret;
856
857 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
858 return 0;
859 }
860
dma_cache_maint_page(struct page * page,unsigned long offset,size_t size,enum dma_data_direction dir,void (* op)(const void *,size_t,int))861 static void dma_cache_maint_page(struct page *page, unsigned long offset,
862 size_t size, enum dma_data_direction dir,
863 void (*op)(const void *, size_t, int))
864 {
865 unsigned long pfn;
866 size_t left = size;
867
868 pfn = page_to_pfn(page) + offset / PAGE_SIZE;
869 offset %= PAGE_SIZE;
870
871 /*
872 * A single sg entry may refer to multiple physically contiguous
873 * pages. But we still need to process highmem pages individually.
874 * If highmem is not configured then the bulk of this loop gets
875 * optimized out.
876 */
877 do {
878 size_t len = left;
879 void *vaddr;
880
881 page = pfn_to_page(pfn);
882
883 if (PageHighMem(page)) {
884 if (len + offset > PAGE_SIZE)
885 len = PAGE_SIZE - offset;
886
887 if (cache_is_vipt_nonaliasing()) {
888 vaddr = kmap_atomic(page);
889 op(vaddr + offset, len, dir);
890 kunmap_atomic(vaddr);
891 } else {
892 vaddr = kmap_high_get(page);
893 if (vaddr) {
894 op(vaddr + offset, len, dir);
895 kunmap_high(page);
896 }
897 }
898 } else {
899 vaddr = page_address(page) + offset;
900 op(vaddr, len, dir);
901 }
902 offset = 0;
903 pfn++;
904 left -= len;
905 } while (left);
906 }
907
908 /*
909 * Make an area consistent for devices.
910 * Note: Drivers should NOT use this function directly, as it will break
911 * platforms with CONFIG_DMABOUNCE.
912 * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
913 */
__dma_page_cpu_to_dev(struct page * page,unsigned long off,size_t size,enum dma_data_direction dir)914 static void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
915 size_t size, enum dma_data_direction dir)
916 {
917 phys_addr_t paddr;
918
919 dma_cache_maint_page(page, off, size, dir, dmac_map_area);
920
921 paddr = page_to_phys(page) + off;
922 if (dir == DMA_FROM_DEVICE) {
923 outer_inv_range(paddr, paddr + size);
924 } else {
925 outer_clean_range(paddr, paddr + size);
926 }
927 /* FIXME: non-speculating: flush on bidirectional mappings? */
928 }
929
__dma_page_dev_to_cpu(struct page * page,unsigned long off,size_t size,enum dma_data_direction dir)930 static void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
931 size_t size, enum dma_data_direction dir)
932 {
933 phys_addr_t paddr = page_to_phys(page) + off;
934
935 /* FIXME: non-speculating: not required */
936 /* in any case, don't bother invalidating if DMA to device */
937 if (dir != DMA_TO_DEVICE) {
938 outer_inv_range(paddr, paddr + size);
939
940 dma_cache_maint_page(page, off, size, dir, dmac_unmap_area);
941 }
942
943 /*
944 * Mark the D-cache clean for these pages to avoid extra flushing.
945 */
946 if (dir != DMA_TO_DEVICE && size >= PAGE_SIZE) {
947 unsigned long pfn;
948 size_t left = size;
949
950 pfn = page_to_pfn(page) + off / PAGE_SIZE;
951 off %= PAGE_SIZE;
952 if (off) {
953 pfn++;
954 left -= PAGE_SIZE - off;
955 }
956 while (left >= PAGE_SIZE) {
957 page = pfn_to_page(pfn++);
958 set_bit(PG_dcache_clean, &page->flags);
959 left -= PAGE_SIZE;
960 }
961 }
962 }
963
964 /**
965 * arm_dma_map_sg - map a set of SG buffers for streaming mode DMA
966 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
967 * @sg: list of buffers
968 * @nents: number of buffers to map
969 * @dir: DMA transfer direction
970 *
971 * Map a set of buffers described by scatterlist in streaming mode for DMA.
972 * This is the scatter-gather version of the dma_map_single interface.
973 * Here the scatter gather list elements are each tagged with the
974 * appropriate dma address and length. They are obtained via
975 * sg_dma_{address,length}.
976 *
977 * Device ownership issues as mentioned for dma_map_single are the same
978 * here.
979 */
arm_dma_map_sg(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir,unsigned long attrs)980 int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
981 enum dma_data_direction dir, unsigned long attrs)
982 {
983 const struct dma_map_ops *ops = get_dma_ops(dev);
984 struct scatterlist *s;
985 int i, j, ret;
986
987 for_each_sg(sg, s, nents, i) {
988 #ifdef CONFIG_NEED_SG_DMA_LENGTH
989 s->dma_length = s->length;
990 #endif
991 s->dma_address = ops->map_page(dev, sg_page(s), s->offset,
992 s->length, dir, attrs);
993 if (dma_mapping_error(dev, s->dma_address)) {
994 ret = -EIO;
995 goto bad_mapping;
996 }
997 }
998 return nents;
999
1000 bad_mapping:
1001 for_each_sg(sg, s, i, j)
1002 ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs);
1003 return ret;
1004 }
1005
1006 /**
1007 * arm_dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
1008 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
1009 * @sg: list of buffers
1010 * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
1011 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1012 *
1013 * Unmap a set of streaming mode DMA translations. Again, CPU access
1014 * rules concerning calls here are the same as for dma_unmap_single().
1015 */
arm_dma_unmap_sg(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir,unsigned long attrs)1016 void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
1017 enum dma_data_direction dir, unsigned long attrs)
1018 {
1019 const struct dma_map_ops *ops = get_dma_ops(dev);
1020 struct scatterlist *s;
1021
1022 int i;
1023
1024 for_each_sg(sg, s, nents, i)
1025 ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs);
1026 }
1027
1028 /**
1029 * arm_dma_sync_sg_for_cpu
1030 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
1031 * @sg: list of buffers
1032 * @nents: number of buffers to map (returned from dma_map_sg)
1033 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1034 */
arm_dma_sync_sg_for_cpu(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir)1035 void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
1036 int nents, enum dma_data_direction dir)
1037 {
1038 const struct dma_map_ops *ops = get_dma_ops(dev);
1039 struct scatterlist *s;
1040 int i;
1041
1042 for_each_sg(sg, s, nents, i)
1043 ops->sync_single_for_cpu(dev, sg_dma_address(s), s->length,
1044 dir);
1045 }
1046
1047 /**
1048 * arm_dma_sync_sg_for_device
1049 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
1050 * @sg: list of buffers
1051 * @nents: number of buffers to map (returned from dma_map_sg)
1052 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1053 */
arm_dma_sync_sg_for_device(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir)1054 void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
1055 int nents, enum dma_data_direction dir)
1056 {
1057 const struct dma_map_ops *ops = get_dma_ops(dev);
1058 struct scatterlist *s;
1059 int i;
1060
1061 for_each_sg(sg, s, nents, i)
1062 ops->sync_single_for_device(dev, sg_dma_address(s), s->length,
1063 dir);
1064 }
1065
arm_get_dma_map_ops(bool coherent)1066 static const struct dma_map_ops *arm_get_dma_map_ops(bool coherent)
1067 {
1068 /*
1069 * When CONFIG_ARM_LPAE is set, physical address can extend above
1070 * 32-bits, which then can't be addressed by devices that only support
1071 * 32-bit DMA.
1072 * Use the generic dma-direct / swiotlb ops code in that case, as that
1073 * handles bounce buffering for us.
1074 */
1075 if (IS_ENABLED(CONFIG_ARM_LPAE))
1076 return NULL;
1077 return coherent ? &arm_coherent_dma_ops : &arm_dma_ops;
1078 }
1079
1080 #ifdef CONFIG_ARM_DMA_USE_IOMMU
1081
__dma_info_to_prot(enum dma_data_direction dir,unsigned long attrs)1082 static int __dma_info_to_prot(enum dma_data_direction dir, unsigned long attrs)
1083 {
1084 int prot = 0;
1085
1086 if (attrs & DMA_ATTR_PRIVILEGED)
1087 prot |= IOMMU_PRIV;
1088
1089 switch (dir) {
1090 case DMA_BIDIRECTIONAL:
1091 return prot | IOMMU_READ | IOMMU_WRITE;
1092 case DMA_TO_DEVICE:
1093 return prot | IOMMU_READ;
1094 case DMA_FROM_DEVICE:
1095 return prot | IOMMU_WRITE;
1096 default:
1097 return prot;
1098 }
1099 }
1100
1101 /* IOMMU */
1102
1103 static int extend_iommu_mapping(struct dma_iommu_mapping *mapping);
1104
__alloc_iova(struct dma_iommu_mapping * mapping,size_t size)1105 static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
1106 size_t size)
1107 {
1108 unsigned int order = get_order(size);
1109 unsigned int align = 0;
1110 unsigned int count, start;
1111 size_t mapping_size = mapping->bits << PAGE_SHIFT;
1112 unsigned long flags;
1113 dma_addr_t iova;
1114 int i;
1115
1116 if (order > CONFIG_ARM_DMA_IOMMU_ALIGNMENT)
1117 order = CONFIG_ARM_DMA_IOMMU_ALIGNMENT;
1118
1119 count = PAGE_ALIGN(size) >> PAGE_SHIFT;
1120 align = (1 << order) - 1;
1121
1122 spin_lock_irqsave(&mapping->lock, flags);
1123 for (i = 0; i < mapping->nr_bitmaps; i++) {
1124 start = bitmap_find_next_zero_area(mapping->bitmaps[i],
1125 mapping->bits, 0, count, align);
1126
1127 if (start > mapping->bits)
1128 continue;
1129
1130 bitmap_set(mapping->bitmaps[i], start, count);
1131 break;
1132 }
1133
1134 /*
1135 * No unused range found. Try to extend the existing mapping
1136 * and perform a second attempt to reserve an IO virtual
1137 * address range of size bytes.
1138 */
1139 if (i == mapping->nr_bitmaps) {
1140 if (extend_iommu_mapping(mapping)) {
1141 spin_unlock_irqrestore(&mapping->lock, flags);
1142 return DMA_MAPPING_ERROR;
1143 }
1144
1145 start = bitmap_find_next_zero_area(mapping->bitmaps[i],
1146 mapping->bits, 0, count, align);
1147
1148 if (start > mapping->bits) {
1149 spin_unlock_irqrestore(&mapping->lock, flags);
1150 return DMA_MAPPING_ERROR;
1151 }
1152
1153 bitmap_set(mapping->bitmaps[i], start, count);
1154 }
1155 spin_unlock_irqrestore(&mapping->lock, flags);
1156
1157 iova = mapping->base + (mapping_size * i);
1158 iova += start << PAGE_SHIFT;
1159
1160 return iova;
1161 }
1162
__free_iova(struct dma_iommu_mapping * mapping,dma_addr_t addr,size_t size)1163 static inline void __free_iova(struct dma_iommu_mapping *mapping,
1164 dma_addr_t addr, size_t size)
1165 {
1166 unsigned int start, count;
1167 size_t mapping_size = mapping->bits << PAGE_SHIFT;
1168 unsigned long flags;
1169 dma_addr_t bitmap_base;
1170 u32 bitmap_index;
1171
1172 if (!size)
1173 return;
1174
1175 bitmap_index = (u32) (addr - mapping->base) / (u32) mapping_size;
1176 BUG_ON(addr < mapping->base || bitmap_index > mapping->extensions);
1177
1178 bitmap_base = mapping->base + mapping_size * bitmap_index;
1179
1180 start = (addr - bitmap_base) >> PAGE_SHIFT;
1181
1182 if (addr + size > bitmap_base + mapping_size) {
1183 /*
1184 * The address range to be freed reaches into the iova
1185 * range of the next bitmap. This should not happen as
1186 * we don't allow this in __alloc_iova (at the
1187 * moment).
1188 */
1189 BUG();
1190 } else
1191 count = size >> PAGE_SHIFT;
1192
1193 spin_lock_irqsave(&mapping->lock, flags);
1194 bitmap_clear(mapping->bitmaps[bitmap_index], start, count);
1195 spin_unlock_irqrestore(&mapping->lock, flags);
1196 }
1197
1198 /* We'll try 2M, 1M, 64K, and finally 4K; array must end with 0! */
1199 static const int iommu_order_array[] = { 9, 8, 4, 0 };
1200
__iommu_alloc_buffer(struct device * dev,size_t size,gfp_t gfp,unsigned long attrs,int coherent_flag)1201 static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
1202 gfp_t gfp, unsigned long attrs,
1203 int coherent_flag)
1204 {
1205 struct page **pages;
1206 int count = size >> PAGE_SHIFT;
1207 int array_size = count * sizeof(struct page *);
1208 int i = 0;
1209 int order_idx = 0;
1210
1211 if (array_size <= PAGE_SIZE)
1212 pages = kzalloc(array_size, GFP_KERNEL);
1213 else
1214 pages = vzalloc(array_size);
1215 if (!pages)
1216 return NULL;
1217
1218 if (attrs & DMA_ATTR_FORCE_CONTIGUOUS)
1219 {
1220 unsigned long order = get_order(size);
1221 struct page *page;
1222
1223 page = dma_alloc_from_contiguous(dev, count, order,
1224 gfp & __GFP_NOWARN);
1225 if (!page)
1226 goto error;
1227
1228 __dma_clear_buffer(page, size, coherent_flag);
1229
1230 for (i = 0; i < count; i++)
1231 pages[i] = page + i;
1232
1233 return pages;
1234 }
1235
1236 /* Go straight to 4K chunks if caller says it's OK. */
1237 if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES)
1238 order_idx = ARRAY_SIZE(iommu_order_array) - 1;
1239
1240 /*
1241 * IOMMU can map any pages, so himem can also be used here
1242 */
1243 gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
1244
1245 while (count) {
1246 int j, order;
1247
1248 order = iommu_order_array[order_idx];
1249
1250 /* Drop down when we get small */
1251 if (__fls(count) < order) {
1252 order_idx++;
1253 continue;
1254 }
1255
1256 if (order) {
1257 /* See if it's easy to allocate a high-order chunk */
1258 pages[i] = alloc_pages(gfp | __GFP_NORETRY, order);
1259
1260 /* Go down a notch at first sign of pressure */
1261 if (!pages[i]) {
1262 order_idx++;
1263 continue;
1264 }
1265 } else {
1266 pages[i] = alloc_pages(gfp, 0);
1267 if (!pages[i])
1268 goto error;
1269 }
1270
1271 if (order) {
1272 split_page(pages[i], order);
1273 j = 1 << order;
1274 while (--j)
1275 pages[i + j] = pages[i] + j;
1276 }
1277
1278 __dma_clear_buffer(pages[i], PAGE_SIZE << order, coherent_flag);
1279 i += 1 << order;
1280 count -= 1 << order;
1281 }
1282
1283 return pages;
1284 error:
1285 while (i--)
1286 if (pages[i])
1287 __free_pages(pages[i], 0);
1288 kvfree(pages);
1289 return NULL;
1290 }
1291
__iommu_free_buffer(struct device * dev,struct page ** pages,size_t size,unsigned long attrs)1292 static int __iommu_free_buffer(struct device *dev, struct page **pages,
1293 size_t size, unsigned long attrs)
1294 {
1295 int count = size >> PAGE_SHIFT;
1296 int i;
1297
1298 if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
1299 dma_release_from_contiguous(dev, pages[0], count);
1300 } else {
1301 for (i = 0; i < count; i++)
1302 if (pages[i])
1303 __free_pages(pages[i], 0);
1304 }
1305
1306 kvfree(pages);
1307 return 0;
1308 }
1309
1310 /*
1311 * Create a mapping in device IO address space for specified pages
1312 */
1313 static dma_addr_t
__iommu_create_mapping(struct device * dev,struct page ** pages,size_t size,unsigned long attrs)1314 __iommu_create_mapping(struct device *dev, struct page **pages, size_t size,
1315 unsigned long attrs)
1316 {
1317 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1318 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
1319 dma_addr_t dma_addr, iova;
1320 int i;
1321
1322 dma_addr = __alloc_iova(mapping, size);
1323 if (dma_addr == DMA_MAPPING_ERROR)
1324 return dma_addr;
1325
1326 iova = dma_addr;
1327 for (i = 0; i < count; ) {
1328 int ret;
1329
1330 unsigned int next_pfn = page_to_pfn(pages[i]) + 1;
1331 phys_addr_t phys = page_to_phys(pages[i]);
1332 unsigned int len, j;
1333
1334 for (j = i + 1; j < count; j++, next_pfn++)
1335 if (page_to_pfn(pages[j]) != next_pfn)
1336 break;
1337
1338 len = (j - i) << PAGE_SHIFT;
1339 ret = iommu_map(mapping->domain, iova, phys, len,
1340 __dma_info_to_prot(DMA_BIDIRECTIONAL, attrs));
1341 if (ret < 0)
1342 goto fail;
1343 iova += len;
1344 i = j;
1345 }
1346 return dma_addr;
1347 fail:
1348 iommu_unmap(mapping->domain, dma_addr, iova-dma_addr);
1349 __free_iova(mapping, dma_addr, size);
1350 return DMA_MAPPING_ERROR;
1351 }
1352
__iommu_remove_mapping(struct device * dev,dma_addr_t iova,size_t size)1353 static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size)
1354 {
1355 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1356
1357 /*
1358 * add optional in-page offset from iova to size and align
1359 * result to page size
1360 */
1361 size = PAGE_ALIGN((iova & ~PAGE_MASK) + size);
1362 iova &= PAGE_MASK;
1363
1364 iommu_unmap(mapping->domain, iova, size);
1365 __free_iova(mapping, iova, size);
1366 return 0;
1367 }
1368
__atomic_get_pages(void * addr)1369 static struct page **__atomic_get_pages(void *addr)
1370 {
1371 struct page *page;
1372 phys_addr_t phys;
1373
1374 phys = gen_pool_virt_to_phys(atomic_pool, (unsigned long)addr);
1375 page = phys_to_page(phys);
1376
1377 return (struct page **)page;
1378 }
1379
__iommu_get_pages(void * cpu_addr,unsigned long attrs)1380 static struct page **__iommu_get_pages(void *cpu_addr, unsigned long attrs)
1381 {
1382 if (__in_atomic_pool(cpu_addr, PAGE_SIZE))
1383 return __atomic_get_pages(cpu_addr);
1384
1385 if (attrs & DMA_ATTR_NO_KERNEL_MAPPING)
1386 return cpu_addr;
1387
1388 return dma_common_find_pages(cpu_addr);
1389 }
1390
__iommu_alloc_simple(struct device * dev,size_t size,gfp_t gfp,dma_addr_t * handle,int coherent_flag,unsigned long attrs)1391 static void *__iommu_alloc_simple(struct device *dev, size_t size, gfp_t gfp,
1392 dma_addr_t *handle, int coherent_flag,
1393 unsigned long attrs)
1394 {
1395 struct page *page;
1396 void *addr;
1397
1398 if (coherent_flag == COHERENT)
1399 addr = __alloc_simple_buffer(dev, size, gfp, &page);
1400 else
1401 addr = __alloc_from_pool(size, &page);
1402 if (!addr)
1403 return NULL;
1404
1405 *handle = __iommu_create_mapping(dev, &page, size, attrs);
1406 if (*handle == DMA_MAPPING_ERROR)
1407 goto err_mapping;
1408
1409 return addr;
1410
1411 err_mapping:
1412 __free_from_pool(addr, size);
1413 return NULL;
1414 }
1415
__iommu_free_atomic(struct device * dev,void * cpu_addr,dma_addr_t handle,size_t size,int coherent_flag)1416 static void __iommu_free_atomic(struct device *dev, void *cpu_addr,
1417 dma_addr_t handle, size_t size, int coherent_flag)
1418 {
1419 __iommu_remove_mapping(dev, handle, size);
1420 if (coherent_flag == COHERENT)
1421 __dma_free_buffer(virt_to_page(cpu_addr), size);
1422 else
1423 __free_from_pool(cpu_addr, size);
1424 }
1425
__arm_iommu_alloc_attrs(struct device * dev,size_t size,dma_addr_t * handle,gfp_t gfp,unsigned long attrs,int coherent_flag)1426 static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size,
1427 dma_addr_t *handle, gfp_t gfp, unsigned long attrs,
1428 int coherent_flag)
1429 {
1430 pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
1431 struct page **pages;
1432 void *addr = NULL;
1433
1434 *handle = DMA_MAPPING_ERROR;
1435 size = PAGE_ALIGN(size);
1436
1437 if (coherent_flag == COHERENT || !gfpflags_allow_blocking(gfp))
1438 return __iommu_alloc_simple(dev, size, gfp, handle,
1439 coherent_flag, attrs);
1440
1441 /*
1442 * Following is a work-around (a.k.a. hack) to prevent pages
1443 * with __GFP_COMP being passed to split_page() which cannot
1444 * handle them. The real problem is that this flag probably
1445 * should be 0 on ARM as it is not supported on this
1446 * platform; see CONFIG_HUGETLBFS.
1447 */
1448 gfp &= ~(__GFP_COMP);
1449
1450 pages = __iommu_alloc_buffer(dev, size, gfp, attrs, coherent_flag);
1451 if (!pages)
1452 return NULL;
1453
1454 *handle = __iommu_create_mapping(dev, pages, size, attrs);
1455 if (*handle == DMA_MAPPING_ERROR)
1456 goto err_buffer;
1457
1458 if (attrs & DMA_ATTR_NO_KERNEL_MAPPING)
1459 return pages;
1460
1461 addr = dma_common_pages_remap(pages, size, prot,
1462 __builtin_return_address(0));
1463 if (!addr)
1464 goto err_mapping;
1465
1466 return addr;
1467
1468 err_mapping:
1469 __iommu_remove_mapping(dev, *handle, size);
1470 err_buffer:
1471 __iommu_free_buffer(dev, pages, size, attrs);
1472 return NULL;
1473 }
1474
arm_iommu_alloc_attrs(struct device * dev,size_t size,dma_addr_t * handle,gfp_t gfp,unsigned long attrs)1475 static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
1476 dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
1477 {
1478 return __arm_iommu_alloc_attrs(dev, size, handle, gfp, attrs, NORMAL);
1479 }
1480
arm_coherent_iommu_alloc_attrs(struct device * dev,size_t size,dma_addr_t * handle,gfp_t gfp,unsigned long attrs)1481 static void *arm_coherent_iommu_alloc_attrs(struct device *dev, size_t size,
1482 dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
1483 {
1484 return __arm_iommu_alloc_attrs(dev, size, handle, gfp, attrs, COHERENT);
1485 }
1486
__arm_iommu_mmap_attrs(struct device * dev,struct vm_area_struct * vma,void * cpu_addr,dma_addr_t dma_addr,size_t size,unsigned long attrs)1487 static int __arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
1488 void *cpu_addr, dma_addr_t dma_addr, size_t size,
1489 unsigned long attrs)
1490 {
1491 struct page **pages = __iommu_get_pages(cpu_addr, attrs);
1492 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
1493 int err;
1494
1495 if (!pages)
1496 return -ENXIO;
1497
1498 if (vma->vm_pgoff >= nr_pages)
1499 return -ENXIO;
1500
1501 err = vm_map_pages(vma, pages, nr_pages);
1502 if (err)
1503 pr_err("Remapping memory failed: %d\n", err);
1504
1505 return err;
1506 }
arm_iommu_mmap_attrs(struct device * dev,struct vm_area_struct * vma,void * cpu_addr,dma_addr_t dma_addr,size_t size,unsigned long attrs)1507 static int arm_iommu_mmap_attrs(struct device *dev,
1508 struct vm_area_struct *vma, void *cpu_addr,
1509 dma_addr_t dma_addr, size_t size, unsigned long attrs)
1510 {
1511 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
1512
1513 return __arm_iommu_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, attrs);
1514 }
1515
arm_coherent_iommu_mmap_attrs(struct device * dev,struct vm_area_struct * vma,void * cpu_addr,dma_addr_t dma_addr,size_t size,unsigned long attrs)1516 static int arm_coherent_iommu_mmap_attrs(struct device *dev,
1517 struct vm_area_struct *vma, void *cpu_addr,
1518 dma_addr_t dma_addr, size_t size, unsigned long attrs)
1519 {
1520 return __arm_iommu_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, attrs);
1521 }
1522
1523 /*
1524 * free a page as defined by the above mapping.
1525 * Must not be called with IRQs disabled.
1526 */
__arm_iommu_free_attrs(struct device * dev,size_t size,void * cpu_addr,dma_addr_t handle,unsigned long attrs,int coherent_flag)1527 static void __arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
1528 dma_addr_t handle, unsigned long attrs, int coherent_flag)
1529 {
1530 struct page **pages;
1531 size = PAGE_ALIGN(size);
1532
1533 if (coherent_flag == COHERENT || __in_atomic_pool(cpu_addr, size)) {
1534 __iommu_free_atomic(dev, cpu_addr, handle, size, coherent_flag);
1535 return;
1536 }
1537
1538 pages = __iommu_get_pages(cpu_addr, attrs);
1539 if (!pages) {
1540 WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
1541 return;
1542 }
1543
1544 if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0)
1545 dma_common_free_remap(cpu_addr, size);
1546
1547 __iommu_remove_mapping(dev, handle, size);
1548 __iommu_free_buffer(dev, pages, size, attrs);
1549 }
1550
arm_iommu_free_attrs(struct device * dev,size_t size,void * cpu_addr,dma_addr_t handle,unsigned long attrs)1551 static void arm_iommu_free_attrs(struct device *dev, size_t size,
1552 void *cpu_addr, dma_addr_t handle,
1553 unsigned long attrs)
1554 {
1555 __arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, NORMAL);
1556 }
1557
arm_coherent_iommu_free_attrs(struct device * dev,size_t size,void * cpu_addr,dma_addr_t handle,unsigned long attrs)1558 static void arm_coherent_iommu_free_attrs(struct device *dev, size_t size,
1559 void *cpu_addr, dma_addr_t handle, unsigned long attrs)
1560 {
1561 __arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, COHERENT);
1562 }
1563
arm_iommu_get_sgtable(struct device * dev,struct sg_table * sgt,void * cpu_addr,dma_addr_t dma_addr,size_t size,unsigned long attrs)1564 static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
1565 void *cpu_addr, dma_addr_t dma_addr,
1566 size_t size, unsigned long attrs)
1567 {
1568 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
1569 struct page **pages = __iommu_get_pages(cpu_addr, attrs);
1570
1571 if (!pages)
1572 return -ENXIO;
1573
1574 return sg_alloc_table_from_pages(sgt, pages, count, 0, size,
1575 GFP_KERNEL);
1576 }
1577
1578 /*
1579 * Map a part of the scatter-gather list into contiguous io address space
1580 */
__map_sg_chunk(struct device * dev,struct scatterlist * sg,size_t size,dma_addr_t * handle,enum dma_data_direction dir,unsigned long attrs,bool is_coherent)1581 static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
1582 size_t size, dma_addr_t *handle,
1583 enum dma_data_direction dir, unsigned long attrs,
1584 bool is_coherent)
1585 {
1586 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1587 dma_addr_t iova, iova_base;
1588 int ret = 0;
1589 unsigned int count;
1590 struct scatterlist *s;
1591 int prot;
1592
1593 size = PAGE_ALIGN(size);
1594 *handle = DMA_MAPPING_ERROR;
1595
1596 iova_base = iova = __alloc_iova(mapping, size);
1597 if (iova == DMA_MAPPING_ERROR)
1598 return -ENOMEM;
1599
1600 for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) {
1601 phys_addr_t phys = page_to_phys(sg_page(s));
1602 unsigned int len = PAGE_ALIGN(s->offset + s->length);
1603
1604 if (!is_coherent && (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
1605 __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
1606
1607 prot = __dma_info_to_prot(dir, attrs);
1608
1609 ret = iommu_map(mapping->domain, iova, phys, len, prot);
1610 if (ret < 0)
1611 goto fail;
1612 count += len >> PAGE_SHIFT;
1613 iova += len;
1614 }
1615 *handle = iova_base;
1616
1617 return 0;
1618 fail:
1619 iommu_unmap(mapping->domain, iova_base, count * PAGE_SIZE);
1620 __free_iova(mapping, iova_base, size);
1621 return ret;
1622 }
1623
__iommu_map_sg(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir,unsigned long attrs,bool is_coherent)1624 static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
1625 enum dma_data_direction dir, unsigned long attrs,
1626 bool is_coherent)
1627 {
1628 struct scatterlist *s = sg, *dma = sg, *start = sg;
1629 int i, count = 0, ret;
1630 unsigned int offset = s->offset;
1631 unsigned int size = s->offset + s->length;
1632 unsigned int max = dma_get_max_seg_size(dev);
1633
1634 for (i = 1; i < nents; i++) {
1635 s = sg_next(s);
1636
1637 s->dma_length = 0;
1638
1639 if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) {
1640 ret = __map_sg_chunk(dev, start, size,
1641 &dma->dma_address, dir, attrs,
1642 is_coherent);
1643 if (ret < 0)
1644 goto bad_mapping;
1645
1646 dma->dma_address += offset;
1647 dma->dma_length = size - offset;
1648
1649 size = offset = s->offset;
1650 start = s;
1651 dma = sg_next(dma);
1652 count += 1;
1653 }
1654 size += s->length;
1655 }
1656 ret = __map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs,
1657 is_coherent);
1658 if (ret < 0)
1659 goto bad_mapping;
1660
1661 dma->dma_address += offset;
1662 dma->dma_length = size - offset;
1663
1664 return count+1;
1665
1666 bad_mapping:
1667 for_each_sg(sg, s, count, i)
1668 __iommu_remove_mapping(dev, sg_dma_address(s), sg_dma_len(s));
1669 if (ret == -ENOMEM)
1670 return ret;
1671 return -EINVAL;
1672 }
1673
1674 /**
1675 * arm_coherent_iommu_map_sg - map a set of SG buffers for streaming mode DMA
1676 * @dev: valid struct device pointer
1677 * @sg: list of buffers
1678 * @nents: number of buffers to map
1679 * @dir: DMA transfer direction
1680 *
1681 * Map a set of i/o coherent buffers described by scatterlist in streaming
1682 * mode for DMA. The scatter gather list elements are merged together (if
1683 * possible) and tagged with the appropriate dma address and length. They are
1684 * obtained via sg_dma_{address,length}.
1685 */
arm_coherent_iommu_map_sg(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir,unsigned long attrs)1686 static int arm_coherent_iommu_map_sg(struct device *dev, struct scatterlist *sg,
1687 int nents, enum dma_data_direction dir, unsigned long attrs)
1688 {
1689 return __iommu_map_sg(dev, sg, nents, dir, attrs, true);
1690 }
1691
1692 /**
1693 * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA
1694 * @dev: valid struct device pointer
1695 * @sg: list of buffers
1696 * @nents: number of buffers to map
1697 * @dir: DMA transfer direction
1698 *
1699 * Map a set of buffers described by scatterlist in streaming mode for DMA.
1700 * The scatter gather list elements are merged together (if possible) and
1701 * tagged with the appropriate dma address and length. They are obtained via
1702 * sg_dma_{address,length}.
1703 */
arm_iommu_map_sg(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir,unsigned long attrs)1704 static int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg,
1705 int nents, enum dma_data_direction dir, unsigned long attrs)
1706 {
1707 return __iommu_map_sg(dev, sg, nents, dir, attrs, false);
1708 }
1709
__iommu_unmap_sg(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir,unsigned long attrs,bool is_coherent)1710 static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg,
1711 int nents, enum dma_data_direction dir,
1712 unsigned long attrs, bool is_coherent)
1713 {
1714 struct scatterlist *s;
1715 int i;
1716
1717 for_each_sg(sg, s, nents, i) {
1718 if (sg_dma_len(s))
1719 __iommu_remove_mapping(dev, sg_dma_address(s),
1720 sg_dma_len(s));
1721 if (!is_coherent && (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
1722 __dma_page_dev_to_cpu(sg_page(s), s->offset,
1723 s->length, dir);
1724 }
1725 }
1726
1727 /**
1728 * arm_coherent_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
1729 * @dev: valid struct device pointer
1730 * @sg: list of buffers
1731 * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
1732 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1733 *
1734 * Unmap a set of streaming mode DMA translations. Again, CPU access
1735 * rules concerning calls here are the same as for dma_unmap_single().
1736 */
arm_coherent_iommu_unmap_sg(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir,unsigned long attrs)1737 static void arm_coherent_iommu_unmap_sg(struct device *dev,
1738 struct scatterlist *sg, int nents, enum dma_data_direction dir,
1739 unsigned long attrs)
1740 {
1741 __iommu_unmap_sg(dev, sg, nents, dir, attrs, true);
1742 }
1743
1744 /**
1745 * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
1746 * @dev: valid struct device pointer
1747 * @sg: list of buffers
1748 * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
1749 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1750 *
1751 * Unmap a set of streaming mode DMA translations. Again, CPU access
1752 * rules concerning calls here are the same as for dma_unmap_single().
1753 */
arm_iommu_unmap_sg(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir,unsigned long attrs)1754 static void arm_iommu_unmap_sg(struct device *dev,
1755 struct scatterlist *sg, int nents,
1756 enum dma_data_direction dir,
1757 unsigned long attrs)
1758 {
1759 __iommu_unmap_sg(dev, sg, nents, dir, attrs, false);
1760 }
1761
1762 /**
1763 * arm_iommu_sync_sg_for_cpu
1764 * @dev: valid struct device pointer
1765 * @sg: list of buffers
1766 * @nents: number of buffers to map (returned from dma_map_sg)
1767 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1768 */
arm_iommu_sync_sg_for_cpu(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir)1769 static void arm_iommu_sync_sg_for_cpu(struct device *dev,
1770 struct scatterlist *sg,
1771 int nents, enum dma_data_direction dir)
1772 {
1773 struct scatterlist *s;
1774 int i;
1775
1776 for_each_sg(sg, s, nents, i)
1777 __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir);
1778
1779 }
1780
1781 /**
1782 * arm_iommu_sync_sg_for_device
1783 * @dev: valid struct device pointer
1784 * @sg: list of buffers
1785 * @nents: number of buffers to map (returned from dma_map_sg)
1786 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1787 */
arm_iommu_sync_sg_for_device(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir)1788 static void arm_iommu_sync_sg_for_device(struct device *dev,
1789 struct scatterlist *sg,
1790 int nents, enum dma_data_direction dir)
1791 {
1792 struct scatterlist *s;
1793 int i;
1794
1795 for_each_sg(sg, s, nents, i)
1796 __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
1797 }
1798
1799
1800 /**
1801 * arm_coherent_iommu_map_page
1802 * @dev: valid struct device pointer
1803 * @page: page that buffer resides in
1804 * @offset: offset into page for start of buffer
1805 * @size: size of buffer to map
1806 * @dir: DMA transfer direction
1807 *
1808 * Coherent IOMMU aware version of arm_dma_map_page()
1809 */
arm_coherent_iommu_map_page(struct device * dev,struct page * page,unsigned long offset,size_t size,enum dma_data_direction dir,unsigned long attrs)1810 static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *page,
1811 unsigned long offset, size_t size, enum dma_data_direction dir,
1812 unsigned long attrs)
1813 {
1814 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1815 dma_addr_t dma_addr;
1816 int ret, prot, len = PAGE_ALIGN(size + offset);
1817
1818 dma_addr = __alloc_iova(mapping, len);
1819 if (dma_addr == DMA_MAPPING_ERROR)
1820 return dma_addr;
1821
1822 prot = __dma_info_to_prot(dir, attrs);
1823
1824 ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot);
1825 if (ret < 0)
1826 goto fail;
1827
1828 return dma_addr + offset;
1829 fail:
1830 __free_iova(mapping, dma_addr, len);
1831 return DMA_MAPPING_ERROR;
1832 }
1833
1834 /**
1835 * arm_iommu_map_page
1836 * @dev: valid struct device pointer
1837 * @page: page that buffer resides in
1838 * @offset: offset into page for start of buffer
1839 * @size: size of buffer to map
1840 * @dir: DMA transfer direction
1841 *
1842 * IOMMU aware version of arm_dma_map_page()
1843 */
arm_iommu_map_page(struct device * dev,struct page * page,unsigned long offset,size_t size,enum dma_data_direction dir,unsigned long attrs)1844 static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
1845 unsigned long offset, size_t size, enum dma_data_direction dir,
1846 unsigned long attrs)
1847 {
1848 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
1849 __dma_page_cpu_to_dev(page, offset, size, dir);
1850
1851 return arm_coherent_iommu_map_page(dev, page, offset, size, dir, attrs);
1852 }
1853
1854 /**
1855 * arm_coherent_iommu_unmap_page
1856 * @dev: valid struct device pointer
1857 * @handle: DMA address of buffer
1858 * @size: size of buffer (same as passed to dma_map_page)
1859 * @dir: DMA transfer direction (same as passed to dma_map_page)
1860 *
1861 * Coherent IOMMU aware version of arm_dma_unmap_page()
1862 */
arm_coherent_iommu_unmap_page(struct device * dev,dma_addr_t handle,size_t size,enum dma_data_direction dir,unsigned long attrs)1863 static void arm_coherent_iommu_unmap_page(struct device *dev, dma_addr_t handle,
1864 size_t size, enum dma_data_direction dir, unsigned long attrs)
1865 {
1866 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1867 dma_addr_t iova = handle & PAGE_MASK;
1868 int offset = handle & ~PAGE_MASK;
1869 int len = PAGE_ALIGN(size + offset);
1870
1871 if (!iova)
1872 return;
1873
1874 iommu_unmap(mapping->domain, iova, len);
1875 __free_iova(mapping, iova, len);
1876 }
1877
1878 /**
1879 * arm_iommu_unmap_page
1880 * @dev: valid struct device pointer
1881 * @handle: DMA address of buffer
1882 * @size: size of buffer (same as passed to dma_map_page)
1883 * @dir: DMA transfer direction (same as passed to dma_map_page)
1884 *
1885 * IOMMU aware version of arm_dma_unmap_page()
1886 */
arm_iommu_unmap_page(struct device * dev,dma_addr_t handle,size_t size,enum dma_data_direction dir,unsigned long attrs)1887 static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
1888 size_t size, enum dma_data_direction dir, unsigned long attrs)
1889 {
1890 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1891 dma_addr_t iova = handle & PAGE_MASK;
1892 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
1893 int offset = handle & ~PAGE_MASK;
1894 int len = PAGE_ALIGN(size + offset);
1895
1896 if (!iova)
1897 return;
1898
1899 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
1900 __dma_page_dev_to_cpu(page, offset, size, dir);
1901
1902 iommu_unmap(mapping->domain, iova, len);
1903 __free_iova(mapping, iova, len);
1904 }
1905
1906 /**
1907 * arm_iommu_map_resource - map a device resource for DMA
1908 * @dev: valid struct device pointer
1909 * @phys_addr: physical address of resource
1910 * @size: size of resource to map
1911 * @dir: DMA transfer direction
1912 */
arm_iommu_map_resource(struct device * dev,phys_addr_t phys_addr,size_t size,enum dma_data_direction dir,unsigned long attrs)1913 static dma_addr_t arm_iommu_map_resource(struct device *dev,
1914 phys_addr_t phys_addr, size_t size,
1915 enum dma_data_direction dir, unsigned long attrs)
1916 {
1917 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1918 dma_addr_t dma_addr;
1919 int ret, prot;
1920 phys_addr_t addr = phys_addr & PAGE_MASK;
1921 unsigned int offset = phys_addr & ~PAGE_MASK;
1922 size_t len = PAGE_ALIGN(size + offset);
1923
1924 dma_addr = __alloc_iova(mapping, len);
1925 if (dma_addr == DMA_MAPPING_ERROR)
1926 return dma_addr;
1927
1928 prot = __dma_info_to_prot(dir, attrs) | IOMMU_MMIO;
1929
1930 ret = iommu_map(mapping->domain, dma_addr, addr, len, prot);
1931 if (ret < 0)
1932 goto fail;
1933
1934 return dma_addr + offset;
1935 fail:
1936 __free_iova(mapping, dma_addr, len);
1937 return DMA_MAPPING_ERROR;
1938 }
1939
1940 /**
1941 * arm_iommu_unmap_resource - unmap a device DMA resource
1942 * @dev: valid struct device pointer
1943 * @dma_handle: DMA address to resource
1944 * @size: size of resource to map
1945 * @dir: DMA transfer direction
1946 */
arm_iommu_unmap_resource(struct device * dev,dma_addr_t dma_handle,size_t size,enum dma_data_direction dir,unsigned long attrs)1947 static void arm_iommu_unmap_resource(struct device *dev, dma_addr_t dma_handle,
1948 size_t size, enum dma_data_direction dir,
1949 unsigned long attrs)
1950 {
1951 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1952 dma_addr_t iova = dma_handle & PAGE_MASK;
1953 unsigned int offset = dma_handle & ~PAGE_MASK;
1954 size_t len = PAGE_ALIGN(size + offset);
1955
1956 if (!iova)
1957 return;
1958
1959 iommu_unmap(mapping->domain, iova, len);
1960 __free_iova(mapping, iova, len);
1961 }
1962
arm_iommu_sync_single_for_cpu(struct device * dev,dma_addr_t handle,size_t size,enum dma_data_direction dir)1963 static void arm_iommu_sync_single_for_cpu(struct device *dev,
1964 dma_addr_t handle, size_t size, enum dma_data_direction dir)
1965 {
1966 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1967 dma_addr_t iova = handle & PAGE_MASK;
1968 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
1969 unsigned int offset = handle & ~PAGE_MASK;
1970
1971 if (!iova)
1972 return;
1973
1974 __dma_page_dev_to_cpu(page, offset, size, dir);
1975 }
1976
arm_iommu_sync_single_for_device(struct device * dev,dma_addr_t handle,size_t size,enum dma_data_direction dir)1977 static void arm_iommu_sync_single_for_device(struct device *dev,
1978 dma_addr_t handle, size_t size, enum dma_data_direction dir)
1979 {
1980 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1981 dma_addr_t iova = handle & PAGE_MASK;
1982 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
1983 unsigned int offset = handle & ~PAGE_MASK;
1984
1985 if (!iova)
1986 return;
1987
1988 __dma_page_cpu_to_dev(page, offset, size, dir);
1989 }
1990
1991 static const struct dma_map_ops iommu_ops = {
1992 .alloc = arm_iommu_alloc_attrs,
1993 .free = arm_iommu_free_attrs,
1994 .mmap = arm_iommu_mmap_attrs,
1995 .get_sgtable = arm_iommu_get_sgtable,
1996
1997 .map_page = arm_iommu_map_page,
1998 .unmap_page = arm_iommu_unmap_page,
1999 .sync_single_for_cpu = arm_iommu_sync_single_for_cpu,
2000 .sync_single_for_device = arm_iommu_sync_single_for_device,
2001
2002 .map_sg = arm_iommu_map_sg,
2003 .unmap_sg = arm_iommu_unmap_sg,
2004 .sync_sg_for_cpu = arm_iommu_sync_sg_for_cpu,
2005 .sync_sg_for_device = arm_iommu_sync_sg_for_device,
2006
2007 .map_resource = arm_iommu_map_resource,
2008 .unmap_resource = arm_iommu_unmap_resource,
2009
2010 .dma_supported = arm_dma_supported,
2011 };
2012
2013 static const struct dma_map_ops iommu_coherent_ops = {
2014 .alloc = arm_coherent_iommu_alloc_attrs,
2015 .free = arm_coherent_iommu_free_attrs,
2016 .mmap = arm_coherent_iommu_mmap_attrs,
2017 .get_sgtable = arm_iommu_get_sgtable,
2018
2019 .map_page = arm_coherent_iommu_map_page,
2020 .unmap_page = arm_coherent_iommu_unmap_page,
2021
2022 .map_sg = arm_coherent_iommu_map_sg,
2023 .unmap_sg = arm_coherent_iommu_unmap_sg,
2024
2025 .map_resource = arm_iommu_map_resource,
2026 .unmap_resource = arm_iommu_unmap_resource,
2027
2028 .dma_supported = arm_dma_supported,
2029 };
2030
2031 /**
2032 * arm_iommu_create_mapping
2033 * @bus: pointer to the bus holding the client device (for IOMMU calls)
2034 * @base: start address of the valid IO address space
2035 * @size: maximum size of the valid IO address space
2036 *
2037 * Creates a mapping structure which holds information about used/unused
2038 * IO address ranges, which is required to perform memory allocation and
2039 * mapping with IOMMU aware functions.
2040 *
2041 * The client device need to be attached to the mapping with
2042 * arm_iommu_attach_device function.
2043 */
2044 struct dma_iommu_mapping *
arm_iommu_create_mapping(struct bus_type * bus,dma_addr_t base,u64 size)2045 arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, u64 size)
2046 {
2047 unsigned int bits = size >> PAGE_SHIFT;
2048 unsigned int bitmap_size = BITS_TO_LONGS(bits) * sizeof(long);
2049 struct dma_iommu_mapping *mapping;
2050 int extensions = 1;
2051 int err = -ENOMEM;
2052
2053 /* currently only 32-bit DMA address space is supported */
2054 if (size > DMA_BIT_MASK(32) + 1)
2055 return ERR_PTR(-ERANGE);
2056
2057 if (!bitmap_size)
2058 return ERR_PTR(-EINVAL);
2059
2060 if (bitmap_size > PAGE_SIZE) {
2061 extensions = bitmap_size / PAGE_SIZE;
2062 bitmap_size = PAGE_SIZE;
2063 }
2064
2065 mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL);
2066 if (!mapping)
2067 goto err;
2068
2069 mapping->bitmap_size = bitmap_size;
2070 mapping->bitmaps = kcalloc(extensions, sizeof(unsigned long *),
2071 GFP_KERNEL);
2072 if (!mapping->bitmaps)
2073 goto err2;
2074
2075 mapping->bitmaps[0] = kzalloc(bitmap_size, GFP_KERNEL);
2076 if (!mapping->bitmaps[0])
2077 goto err3;
2078
2079 mapping->nr_bitmaps = 1;
2080 mapping->extensions = extensions;
2081 mapping->base = base;
2082 mapping->bits = BITS_PER_BYTE * bitmap_size;
2083
2084 spin_lock_init(&mapping->lock);
2085
2086 mapping->domain = iommu_domain_alloc(bus);
2087 if (!mapping->domain)
2088 goto err4;
2089
2090 kref_init(&mapping->kref);
2091 return mapping;
2092 err4:
2093 kfree(mapping->bitmaps[0]);
2094 err3:
2095 kfree(mapping->bitmaps);
2096 err2:
2097 kfree(mapping);
2098 err:
2099 return ERR_PTR(err);
2100 }
2101 EXPORT_SYMBOL_GPL(arm_iommu_create_mapping);
2102
release_iommu_mapping(struct kref * kref)2103 static void release_iommu_mapping(struct kref *kref)
2104 {
2105 int i;
2106 struct dma_iommu_mapping *mapping =
2107 container_of(kref, struct dma_iommu_mapping, kref);
2108
2109 iommu_domain_free(mapping->domain);
2110 for (i = 0; i < mapping->nr_bitmaps; i++)
2111 kfree(mapping->bitmaps[i]);
2112 kfree(mapping->bitmaps);
2113 kfree(mapping);
2114 }
2115
extend_iommu_mapping(struct dma_iommu_mapping * mapping)2116 static int extend_iommu_mapping(struct dma_iommu_mapping *mapping)
2117 {
2118 int next_bitmap;
2119
2120 if (mapping->nr_bitmaps >= mapping->extensions)
2121 return -EINVAL;
2122
2123 next_bitmap = mapping->nr_bitmaps;
2124 mapping->bitmaps[next_bitmap] = kzalloc(mapping->bitmap_size,
2125 GFP_ATOMIC);
2126 if (!mapping->bitmaps[next_bitmap])
2127 return -ENOMEM;
2128
2129 mapping->nr_bitmaps++;
2130
2131 return 0;
2132 }
2133
arm_iommu_release_mapping(struct dma_iommu_mapping * mapping)2134 void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping)
2135 {
2136 if (mapping)
2137 kref_put(&mapping->kref, release_iommu_mapping);
2138 }
2139 EXPORT_SYMBOL_GPL(arm_iommu_release_mapping);
2140
__arm_iommu_attach_device(struct device * dev,struct dma_iommu_mapping * mapping)2141 static int __arm_iommu_attach_device(struct device *dev,
2142 struct dma_iommu_mapping *mapping)
2143 {
2144 int err;
2145
2146 err = iommu_attach_device(mapping->domain, dev);
2147 if (err)
2148 return err;
2149
2150 kref_get(&mapping->kref);
2151 to_dma_iommu_mapping(dev) = mapping;
2152
2153 pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev));
2154 return 0;
2155 }
2156
2157 /**
2158 * arm_iommu_attach_device
2159 * @dev: valid struct device pointer
2160 * @mapping: io address space mapping structure (returned from
2161 * arm_iommu_create_mapping)
2162 *
2163 * Attaches specified io address space mapping to the provided device.
2164 * This replaces the dma operations (dma_map_ops pointer) with the
2165 * IOMMU aware version.
2166 *
2167 * More than one client might be attached to the same io address space
2168 * mapping.
2169 */
arm_iommu_attach_device(struct device * dev,struct dma_iommu_mapping * mapping)2170 int arm_iommu_attach_device(struct device *dev,
2171 struct dma_iommu_mapping *mapping)
2172 {
2173 int err;
2174
2175 err = __arm_iommu_attach_device(dev, mapping);
2176 if (err)
2177 return err;
2178
2179 set_dma_ops(dev, &iommu_ops);
2180 return 0;
2181 }
2182 EXPORT_SYMBOL_GPL(arm_iommu_attach_device);
2183
2184 /**
2185 * arm_iommu_detach_device
2186 * @dev: valid struct device pointer
2187 *
2188 * Detaches the provided device from a previously attached map.
2189 * This overwrites the dma_ops pointer with appropriate non-IOMMU ops.
2190 */
arm_iommu_detach_device(struct device * dev)2191 void arm_iommu_detach_device(struct device *dev)
2192 {
2193 struct dma_iommu_mapping *mapping;
2194
2195 mapping = to_dma_iommu_mapping(dev);
2196 if (!mapping) {
2197 dev_warn(dev, "Not attached\n");
2198 return;
2199 }
2200
2201 iommu_detach_device(mapping->domain, dev);
2202 kref_put(&mapping->kref, release_iommu_mapping);
2203 to_dma_iommu_mapping(dev) = NULL;
2204 set_dma_ops(dev, arm_get_dma_map_ops(dev->archdata.dma_coherent));
2205
2206 pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev));
2207 }
2208 EXPORT_SYMBOL_GPL(arm_iommu_detach_device);
2209
arm_get_iommu_dma_map_ops(bool coherent)2210 static const struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent)
2211 {
2212 return coherent ? &iommu_coherent_ops : &iommu_ops;
2213 }
2214
arm_setup_iommu_dma_ops(struct device * dev,u64 dma_base,u64 size,const struct iommu_ops * iommu)2215 static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
2216 const struct iommu_ops *iommu)
2217 {
2218 struct dma_iommu_mapping *mapping;
2219
2220 if (!iommu)
2221 return false;
2222
2223 mapping = arm_iommu_create_mapping(dev->bus, dma_base, size);
2224 if (IS_ERR(mapping)) {
2225 pr_warn("Failed to create %llu-byte IOMMU mapping for device %s\n",
2226 size, dev_name(dev));
2227 return false;
2228 }
2229
2230 if (__arm_iommu_attach_device(dev, mapping)) {
2231 pr_warn("Failed to attached device %s to IOMMU_mapping\n",
2232 dev_name(dev));
2233 arm_iommu_release_mapping(mapping);
2234 return false;
2235 }
2236
2237 return true;
2238 }
2239
arm_teardown_iommu_dma_ops(struct device * dev)2240 static void arm_teardown_iommu_dma_ops(struct device *dev)
2241 {
2242 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
2243
2244 if (!mapping)
2245 return;
2246
2247 arm_iommu_detach_device(dev);
2248 arm_iommu_release_mapping(mapping);
2249 }
2250
2251 #else
2252
arm_setup_iommu_dma_ops(struct device * dev,u64 dma_base,u64 size,const struct iommu_ops * iommu)2253 static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
2254 const struct iommu_ops *iommu)
2255 {
2256 return false;
2257 }
2258
arm_teardown_iommu_dma_ops(struct device * dev)2259 static void arm_teardown_iommu_dma_ops(struct device *dev) { }
2260
2261 #define arm_get_iommu_dma_map_ops arm_get_dma_map_ops
2262
2263 #endif /* CONFIG_ARM_DMA_USE_IOMMU */
2264
arch_setup_dma_ops(struct device * dev,u64 dma_base,u64 size,const struct iommu_ops * iommu,bool coherent)2265 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
2266 const struct iommu_ops *iommu, bool coherent)
2267 {
2268 const struct dma_map_ops *dma_ops;
2269
2270 dev->archdata.dma_coherent = coherent;
2271 #ifdef CONFIG_SWIOTLB
2272 dev->dma_coherent = coherent;
2273 #endif
2274
2275 /*
2276 * Don't override the dma_ops if they have already been set. Ideally
2277 * this should be the only location where dma_ops are set, remove this
2278 * check when all other callers of set_dma_ops will have disappeared.
2279 */
2280 if (dev->dma_ops)
2281 return;
2282
2283 if (arm_setup_iommu_dma_ops(dev, dma_base, size, iommu))
2284 dma_ops = arm_get_iommu_dma_map_ops(coherent);
2285 else
2286 dma_ops = arm_get_dma_map_ops(coherent);
2287
2288 set_dma_ops(dev, dma_ops);
2289
2290 xen_setup_dma_ops(dev);
2291 dev->archdata.dma_ops_setup = true;
2292 }
2293
arch_teardown_dma_ops(struct device * dev)2294 void arch_teardown_dma_ops(struct device *dev)
2295 {
2296 if (!dev->archdata.dma_ops_setup)
2297 return;
2298
2299 arm_teardown_iommu_dma_ops(dev);
2300 /* Let arch_setup_dma_ops() start again from scratch upon re-probe */
2301 set_dma_ops(dev, NULL);
2302 }
2303
2304 #ifdef CONFIG_SWIOTLB
arch_sync_dma_for_device(phys_addr_t paddr,size_t size,enum dma_data_direction dir)2305 void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
2306 enum dma_data_direction dir)
2307 {
2308 __dma_page_cpu_to_dev(phys_to_page(paddr), paddr & (PAGE_SIZE - 1),
2309 size, dir);
2310 }
2311
arch_sync_dma_for_cpu(phys_addr_t paddr,size_t size,enum dma_data_direction dir)2312 void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
2313 enum dma_data_direction dir)
2314 {
2315 __dma_page_dev_to_cpu(phys_to_page(paddr), paddr & (PAGE_SIZE - 1),
2316 size, dir);
2317 }
2318
arch_dma_alloc(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t gfp,unsigned long attrs)2319 void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
2320 gfp_t gfp, unsigned long attrs)
2321 {
2322 return __dma_alloc(dev, size, dma_handle, gfp,
2323 __get_dma_pgprot(attrs, PAGE_KERNEL), false,
2324 attrs, __builtin_return_address(0));
2325 }
2326
arch_dma_free(struct device * dev,size_t size,void * cpu_addr,dma_addr_t dma_handle,unsigned long attrs)2327 void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
2328 dma_addr_t dma_handle, unsigned long attrs)
2329 {
2330 __arm_dma_free(dev, size, cpu_addr, dma_handle, attrs, false);
2331 }
2332 #endif /* CONFIG_SWIOTLB */
2333