1 /*
2  * omap iommu: simple virtual address space management
3  *
4  * Copyright (C) 2008-2009 Nokia Corporation
5  *
6  * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12 
13 #include <linux/err.h>
14 #include <linux/slab.h>
15 #include <linux/vmalloc.h>
16 #include <linux/device.h>
17 #include <linux/scatterlist.h>
18 
19 #include <asm/cacheflush.h>
20 #include <asm/mach/map.h>
21 
22 #include <plat/iommu.h>
23 #include <plat/iovmm.h>
24 
25 #include "iopgtable.h"
26 
27 /*
28  * A device driver needs to create address mappings between:
29  *
30  * - iommu/device address
31  * - physical address
32  * - mpu virtual address
33  *
34  * There are 4 possible patterns for them:
35  *
36  *    |iova/			  mapping		iommu_		page
37  *    | da	pa	va	(d)-(p)-(v)		function	type
38  *  ---------------------------------------------------------------------------
39  *  1 | c	c	c	 1 - 1 - 1	  _kmap() / _kunmap()	s
40  *  2 | c	c,a	c	 1 - 1 - 1	_kmalloc()/ _kfree()	s
41  *  3 | c	d	c	 1 - n - 1	  _vmap() / _vunmap()	s
42  *  4 | c	d,a	c	 1 - n - 1	_vmalloc()/ _vfree()	n*
43  *
44  *
45  *	'iova':	device iommu virtual address
46  *	'da':	alias of 'iova'
47  *	'pa':	physical address
48  *	'va':	mpu virtual address
49  *
50  *	'c':	contiguous memory area
51  *	'd':	discontiguous memory area
52  *	'a':	anonymous memory allocation
53  *	'()':	optional feature
54  *
55  *	'n':	a normal page(4KB) size is used.
56  *	's':	multiple iommu superpage(16MB, 1MB, 64KB, 4KB) size is used.
57  *
58  *	'*':	not yet, but feasible.
59  */
60 
61 static struct kmem_cache *iovm_area_cachep;
62 
63 /* return total bytes of sg buffers */
sgtable_len(const struct sg_table * sgt)64 static size_t sgtable_len(const struct sg_table *sgt)
65 {
66 	unsigned int i, total = 0;
67 	struct scatterlist *sg;
68 
69 	if (!sgt)
70 		return 0;
71 
72 	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
73 		size_t bytes;
74 
75 		bytes = sg_dma_len(sg);
76 
77 		if (!iopgsz_ok(bytes)) {
78 			pr_err("%s: sg[%d] not iommu pagesize(%x)\n",
79 			       __func__, i, bytes);
80 			return 0;
81 		}
82 
83 		total += bytes;
84 	}
85 
86 	return total;
87 }
88 #define sgtable_ok(x)	(!!sgtable_len(x))
89 
max_alignment(u32 addr)90 static unsigned max_alignment(u32 addr)
91 {
92 	int i;
93 	unsigned pagesize[] = { SZ_16M, SZ_1M, SZ_64K, SZ_4K, };
94 	for (i = 0; i < ARRAY_SIZE(pagesize) && addr & (pagesize[i] - 1); i++)
95 		;
96 	return (i < ARRAY_SIZE(pagesize)) ? pagesize[i] : 0;
97 }
98 
99 /*
100  * calculate the optimal number sg elements from total bytes based on
101  * iommu superpages
102  */
sgtable_nents(size_t bytes,u32 da,u32 pa)103 static unsigned sgtable_nents(size_t bytes, u32 da, u32 pa)
104 {
105 	unsigned nr_entries = 0, ent_sz;
106 
107 	if (!IS_ALIGNED(bytes, PAGE_SIZE)) {
108 		pr_err("%s: wrong size %08x\n", __func__, bytes);
109 		return 0;
110 	}
111 
112 	while (bytes) {
113 		ent_sz = max_alignment(da | pa);
114 		ent_sz = min_t(unsigned, ent_sz, iopgsz_max(bytes));
115 		nr_entries++;
116 		da += ent_sz;
117 		pa += ent_sz;
118 		bytes -= ent_sz;
119 	}
120 
121 	return nr_entries;
122 }
123 
124 /* allocate and initialize sg_table header(a kind of 'superblock') */
sgtable_alloc(const size_t bytes,u32 flags,u32 da,u32 pa)125 static struct sg_table *sgtable_alloc(const size_t bytes, u32 flags,
126 							u32 da, u32 pa)
127 {
128 	unsigned int nr_entries;
129 	int err;
130 	struct sg_table *sgt;
131 
132 	if (!bytes)
133 		return ERR_PTR(-EINVAL);
134 
135 	if (!IS_ALIGNED(bytes, PAGE_SIZE))
136 		return ERR_PTR(-EINVAL);
137 
138 	if (flags & IOVMF_LINEAR) {
139 		nr_entries = sgtable_nents(bytes, da, pa);
140 		if (!nr_entries)
141 			return ERR_PTR(-EINVAL);
142 	} else
143 		nr_entries =  bytes / PAGE_SIZE;
144 
145 	sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
146 	if (!sgt)
147 		return ERR_PTR(-ENOMEM);
148 
149 	err = sg_alloc_table(sgt, nr_entries, GFP_KERNEL);
150 	if (err) {
151 		kfree(sgt);
152 		return ERR_PTR(err);
153 	}
154 
155 	pr_debug("%s: sgt:%p(%d entries)\n", __func__, sgt, nr_entries);
156 
157 	return sgt;
158 }
159 
160 /* free sg_table header(a kind of superblock) */
sgtable_free(struct sg_table * sgt)161 static void sgtable_free(struct sg_table *sgt)
162 {
163 	if (!sgt)
164 		return;
165 
166 	sg_free_table(sgt);
167 	kfree(sgt);
168 
169 	pr_debug("%s: sgt:%p\n", __func__, sgt);
170 }
171 
172 /* map 'sglist' to a contiguous mpu virtual area and return 'va' */
vmap_sg(const struct sg_table * sgt)173 static void *vmap_sg(const struct sg_table *sgt)
174 {
175 	u32 va;
176 	size_t total;
177 	unsigned int i;
178 	struct scatterlist *sg;
179 	struct vm_struct *new;
180 	const struct mem_type *mtype;
181 
182 	mtype = get_mem_type(MT_DEVICE);
183 	if (!mtype)
184 		return ERR_PTR(-EINVAL);
185 
186 	total = sgtable_len(sgt);
187 	if (!total)
188 		return ERR_PTR(-EINVAL);
189 
190 	new = __get_vm_area(total, VM_IOREMAP, VMALLOC_START, VMALLOC_END);
191 	if (!new)
192 		return ERR_PTR(-ENOMEM);
193 	va = (u32)new->addr;
194 
195 	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
196 		size_t bytes;
197 		u32 pa;
198 		int err;
199 
200 		pa = sg_phys(sg);
201 		bytes = sg_dma_len(sg);
202 
203 		BUG_ON(bytes != PAGE_SIZE);
204 
205 		err = ioremap_page(va,  pa, mtype);
206 		if (err)
207 			goto err_out;
208 
209 		va += bytes;
210 	}
211 
212 	flush_cache_vmap((unsigned long)new->addr,
213 				(unsigned long)(new->addr + total));
214 	return new->addr;
215 
216 err_out:
217 	WARN_ON(1); /* FIXME: cleanup some mpu mappings */
218 	vunmap(new->addr);
219 	return ERR_PTR(-EAGAIN);
220 }
221 
vunmap_sg(const void * va)222 static inline void vunmap_sg(const void *va)
223 {
224 	vunmap(va);
225 }
226 
__find_iovm_area(struct iommu * obj,const u32 da)227 static struct iovm_struct *__find_iovm_area(struct iommu *obj, const u32 da)
228 {
229 	struct iovm_struct *tmp;
230 
231 	list_for_each_entry(tmp, &obj->mmap, list) {
232 		if ((da >= tmp->da_start) && (da < tmp->da_end)) {
233 			size_t len;
234 
235 			len = tmp->da_end - tmp->da_start;
236 
237 			dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n",
238 				__func__, tmp->da_start, da, tmp->da_end, len,
239 				tmp->flags);
240 
241 			return tmp;
242 		}
243 	}
244 
245 	return NULL;
246 }
247 
248 /**
249  * find_iovm_area  -  find iovma which includes @da
250  * @da:		iommu device virtual address
251  *
252  * Find the existing iovma starting at @da
253  */
find_iovm_area(struct iommu * obj,u32 da)254 struct iovm_struct *find_iovm_area(struct iommu *obj, u32 da)
255 {
256 	struct iovm_struct *area;
257 
258 	mutex_lock(&obj->mmap_lock);
259 	area = __find_iovm_area(obj, da);
260 	mutex_unlock(&obj->mmap_lock);
261 
262 	return area;
263 }
264 EXPORT_SYMBOL_GPL(find_iovm_area);
265 
266 /*
267  * This finds the hole(area) which fits the requested address and len
268  * in iovmas mmap, and returns the new allocated iovma.
269  */
alloc_iovm_area(struct iommu * obj,u32 da,size_t bytes,u32 flags)270 static struct iovm_struct *alloc_iovm_area(struct iommu *obj, u32 da,
271 					   size_t bytes, u32 flags)
272 {
273 	struct iovm_struct *new, *tmp;
274 	u32 start, prev_end, alignment;
275 
276 	if (!obj || !bytes)
277 		return ERR_PTR(-EINVAL);
278 
279 	start = da;
280 	alignment = PAGE_SIZE;
281 
282 	if (~flags & IOVMF_DA_FIXED) {
283 		/* Don't map address 0 */
284 		start = obj->da_start ? obj->da_start : alignment;
285 
286 		if (flags & IOVMF_LINEAR)
287 			alignment = iopgsz_max(bytes);
288 		start = roundup(start, alignment);
289 	} else if (start < obj->da_start || start > obj->da_end ||
290 					obj->da_end - start < bytes) {
291 		return ERR_PTR(-EINVAL);
292 	}
293 
294 	tmp = NULL;
295 	if (list_empty(&obj->mmap))
296 		goto found;
297 
298 	prev_end = 0;
299 	list_for_each_entry(tmp, &obj->mmap, list) {
300 
301 		if (prev_end > start)
302 			break;
303 
304 		if (tmp->da_start > start && (tmp->da_start - start) >= bytes)
305 			goto found;
306 
307 		if (tmp->da_end >= start && ~flags & IOVMF_DA_FIXED)
308 			start = roundup(tmp->da_end + 1, alignment);
309 
310 		prev_end = tmp->da_end;
311 	}
312 
313 	if ((start >= prev_end) && (obj->da_end - start >= bytes))
314 		goto found;
315 
316 	dev_dbg(obj->dev, "%s: no space to fit %08x(%x) flags: %08x\n",
317 		__func__, da, bytes, flags);
318 
319 	return ERR_PTR(-EINVAL);
320 
321 found:
322 	new = kmem_cache_zalloc(iovm_area_cachep, GFP_KERNEL);
323 	if (!new)
324 		return ERR_PTR(-ENOMEM);
325 
326 	new->iommu = obj;
327 	new->da_start = start;
328 	new->da_end = start + bytes;
329 	new->flags = flags;
330 
331 	/*
332 	 * keep ascending order of iovmas
333 	 */
334 	if (tmp)
335 		list_add_tail(&new->list, &tmp->list);
336 	else
337 		list_add(&new->list, &obj->mmap);
338 
339 	dev_dbg(obj->dev, "%s: found %08x-%08x-%08x(%x) %08x\n",
340 		__func__, new->da_start, start, new->da_end, bytes, flags);
341 
342 	return new;
343 }
344 
free_iovm_area(struct iommu * obj,struct iovm_struct * area)345 static void free_iovm_area(struct iommu *obj, struct iovm_struct *area)
346 {
347 	size_t bytes;
348 
349 	BUG_ON(!obj || !area);
350 
351 	bytes = area->da_end - area->da_start;
352 
353 	dev_dbg(obj->dev, "%s: %08x-%08x(%x) %08x\n",
354 		__func__, area->da_start, area->da_end, bytes, area->flags);
355 
356 	list_del(&area->list);
357 	kmem_cache_free(iovm_area_cachep, area);
358 }
359 
360 /**
361  * da_to_va - convert (d) to (v)
362  * @obj:	objective iommu
363  * @da:		iommu device virtual address
364  * @va:		mpu virtual address
365  *
366  * Returns mpu virtual addr which corresponds to a given device virtual addr
367  */
da_to_va(struct iommu * obj,u32 da)368 void *da_to_va(struct iommu *obj, u32 da)
369 {
370 	void *va = NULL;
371 	struct iovm_struct *area;
372 
373 	mutex_lock(&obj->mmap_lock);
374 
375 	area = __find_iovm_area(obj, da);
376 	if (!area) {
377 		dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da);
378 		goto out;
379 	}
380 	va = area->va;
381 out:
382 	mutex_unlock(&obj->mmap_lock);
383 
384 	return va;
385 }
386 EXPORT_SYMBOL_GPL(da_to_va);
387 
sgtable_fill_vmalloc(struct sg_table * sgt,void * _va)388 static void sgtable_fill_vmalloc(struct sg_table *sgt, void *_va)
389 {
390 	unsigned int i;
391 	struct scatterlist *sg;
392 	void *va = _va;
393 	void *va_end;
394 
395 	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
396 		struct page *pg;
397 		const size_t bytes = PAGE_SIZE;
398 
399 		/*
400 		 * iommu 'superpage' isn't supported with 'iommu_vmalloc()'
401 		 */
402 		pg = vmalloc_to_page(va);
403 		BUG_ON(!pg);
404 		sg_set_page(sg, pg, bytes, 0);
405 
406 		va += bytes;
407 	}
408 
409 	va_end = _va + PAGE_SIZE * i;
410 }
411 
sgtable_drain_vmalloc(struct sg_table * sgt)412 static inline void sgtable_drain_vmalloc(struct sg_table *sgt)
413 {
414 	/*
415 	 * Actually this is not necessary at all, just exists for
416 	 * consistency of the code readability.
417 	 */
418 	BUG_ON(!sgt);
419 }
420 
sgtable_fill_kmalloc(struct sg_table * sgt,u32 pa,u32 da,size_t len)421 static void sgtable_fill_kmalloc(struct sg_table *sgt, u32 pa, u32 da,
422 								size_t len)
423 {
424 	unsigned int i;
425 	struct scatterlist *sg;
426 	void *va;
427 
428 	va = phys_to_virt(pa);
429 
430 	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
431 		unsigned bytes;
432 
433 		bytes = max_alignment(da | pa);
434 		bytes = min_t(unsigned, bytes, iopgsz_max(len));
435 
436 		BUG_ON(!iopgsz_ok(bytes));
437 
438 		sg_set_buf(sg, phys_to_virt(pa), bytes);
439 		/*
440 		 * 'pa' is cotinuous(linear).
441 		 */
442 		pa += bytes;
443 		da += bytes;
444 		len -= bytes;
445 	}
446 	BUG_ON(len);
447 }
448 
sgtable_drain_kmalloc(struct sg_table * sgt)449 static inline void sgtable_drain_kmalloc(struct sg_table *sgt)
450 {
451 	/*
452 	 * Actually this is not necessary at all, just exists for
453 	 * consistency of the code readability
454 	 */
455 	BUG_ON(!sgt);
456 }
457 
458 /* create 'da' <-> 'pa' mapping from 'sgt' */
map_iovm_area(struct iommu * obj,struct iovm_struct * new,const struct sg_table * sgt,u32 flags)459 static int map_iovm_area(struct iommu *obj, struct iovm_struct *new,
460 			 const struct sg_table *sgt, u32 flags)
461 {
462 	int err;
463 	unsigned int i, j;
464 	struct scatterlist *sg;
465 	u32 da = new->da_start;
466 
467 	if (!obj || !sgt)
468 		return -EINVAL;
469 
470 	BUG_ON(!sgtable_ok(sgt));
471 
472 	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
473 		u32 pa;
474 		int pgsz;
475 		size_t bytes;
476 		struct iotlb_entry e;
477 
478 		pa = sg_phys(sg);
479 		bytes = sg_dma_len(sg);
480 
481 		flags &= ~IOVMF_PGSZ_MASK;
482 		pgsz = bytes_to_iopgsz(bytes);
483 		if (pgsz < 0)
484 			goto err_out;
485 		flags |= pgsz;
486 
487 		pr_debug("%s: [%d] %08x %08x(%x)\n", __func__,
488 			 i, da, pa, bytes);
489 
490 		iotlb_init_entry(&e, da, pa, flags);
491 		err = iopgtable_store_entry(obj, &e);
492 		if (err)
493 			goto err_out;
494 
495 		da += bytes;
496 	}
497 	return 0;
498 
499 err_out:
500 	da = new->da_start;
501 
502 	for_each_sg(sgt->sgl, sg, i, j) {
503 		size_t bytes;
504 
505 		bytes = iopgtable_clear_entry(obj, da);
506 
507 		BUG_ON(!iopgsz_ok(bytes));
508 
509 		da += bytes;
510 	}
511 	return err;
512 }
513 
514 /* release 'da' <-> 'pa' mapping */
unmap_iovm_area(struct iommu * obj,struct iovm_struct * area)515 static void unmap_iovm_area(struct iommu *obj, struct iovm_struct *area)
516 {
517 	u32 start;
518 	size_t total = area->da_end - area->da_start;
519 
520 	BUG_ON((!total) || !IS_ALIGNED(total, PAGE_SIZE));
521 
522 	start = area->da_start;
523 	while (total > 0) {
524 		size_t bytes;
525 
526 		bytes = iopgtable_clear_entry(obj, start);
527 		if (bytes == 0)
528 			bytes = PAGE_SIZE;
529 		else
530 			dev_dbg(obj->dev, "%s: unmap %08x(%x) %08x\n",
531 				__func__, start, bytes, area->flags);
532 
533 		BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
534 
535 		total -= bytes;
536 		start += bytes;
537 	}
538 	BUG_ON(total);
539 }
540 
541 /* template function for all unmapping */
unmap_vm_area(struct iommu * obj,const u32 da,void (* fn)(const void *),u32 flags)542 static struct sg_table *unmap_vm_area(struct iommu *obj, const u32 da,
543 				      void (*fn)(const void *), u32 flags)
544 {
545 	struct sg_table *sgt = NULL;
546 	struct iovm_struct *area;
547 
548 	if (!IS_ALIGNED(da, PAGE_SIZE)) {
549 		dev_err(obj->dev, "%s: alignment err(%08x)\n", __func__, da);
550 		return NULL;
551 	}
552 
553 	mutex_lock(&obj->mmap_lock);
554 
555 	area = __find_iovm_area(obj, da);
556 	if (!area) {
557 		dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da);
558 		goto out;
559 	}
560 
561 	if ((area->flags & flags) != flags) {
562 		dev_err(obj->dev, "%s: wrong flags(%08x)\n", __func__,
563 			area->flags);
564 		goto out;
565 	}
566 	sgt = (struct sg_table *)area->sgt;
567 
568 	unmap_iovm_area(obj, area);
569 
570 	fn(area->va);
571 
572 	dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n", __func__,
573 		area->da_start, da, area->da_end,
574 		area->da_end - area->da_start, area->flags);
575 
576 	free_iovm_area(obj, area);
577 out:
578 	mutex_unlock(&obj->mmap_lock);
579 
580 	return sgt;
581 }
582 
map_iommu_region(struct iommu * obj,u32 da,const struct sg_table * sgt,void * va,size_t bytes,u32 flags)583 static u32 map_iommu_region(struct iommu *obj, u32 da,
584 	      const struct sg_table *sgt, void *va, size_t bytes, u32 flags)
585 {
586 	int err = -ENOMEM;
587 	struct iovm_struct *new;
588 
589 	mutex_lock(&obj->mmap_lock);
590 
591 	new = alloc_iovm_area(obj, da, bytes, flags);
592 	if (IS_ERR(new)) {
593 		err = PTR_ERR(new);
594 		goto err_alloc_iovma;
595 	}
596 	new->va = va;
597 	new->sgt = sgt;
598 
599 	if (map_iovm_area(obj, new, sgt, new->flags))
600 		goto err_map;
601 
602 	mutex_unlock(&obj->mmap_lock);
603 
604 	dev_dbg(obj->dev, "%s: da:%08x(%x) flags:%08x va:%p\n",
605 		__func__, new->da_start, bytes, new->flags, va);
606 
607 	return new->da_start;
608 
609 err_map:
610 	free_iovm_area(obj, new);
611 err_alloc_iovma:
612 	mutex_unlock(&obj->mmap_lock);
613 	return err;
614 }
615 
__iommu_vmap(struct iommu * obj,u32 da,const struct sg_table * sgt,void * va,size_t bytes,u32 flags)616 static inline u32 __iommu_vmap(struct iommu *obj, u32 da,
617 		 const struct sg_table *sgt, void *va, size_t bytes, u32 flags)
618 {
619 	return map_iommu_region(obj, da, sgt, va, bytes, flags);
620 }
621 
622 /**
623  * iommu_vmap  -  (d)-(p)-(v) address mapper
624  * @obj:	objective iommu
625  * @sgt:	address of scatter gather table
626  * @flags:	iovma and page property
627  *
628  * Creates 1-n-1 mapping with given @sgt and returns @da.
629  * All @sgt element must be io page size aligned.
630  */
iommu_vmap(struct iommu * obj,u32 da,const struct sg_table * sgt,u32 flags)631 u32 iommu_vmap(struct iommu *obj, u32 da, const struct sg_table *sgt,
632 		 u32 flags)
633 {
634 	size_t bytes;
635 	void *va = NULL;
636 
637 	if (!obj || !obj->dev || !sgt)
638 		return -EINVAL;
639 
640 	bytes = sgtable_len(sgt);
641 	if (!bytes)
642 		return -EINVAL;
643 	bytes = PAGE_ALIGN(bytes);
644 
645 	if (flags & IOVMF_MMIO) {
646 		va = vmap_sg(sgt);
647 		if (IS_ERR(va))
648 			return PTR_ERR(va);
649 	}
650 
651 	flags &= IOVMF_HW_MASK;
652 	flags |= IOVMF_DISCONT;
653 	flags |= IOVMF_MMIO;
654 
655 	da = __iommu_vmap(obj, da, sgt, va, bytes, flags);
656 	if (IS_ERR_VALUE(da))
657 		vunmap_sg(va);
658 
659 	return da;
660 }
661 EXPORT_SYMBOL_GPL(iommu_vmap);
662 
663 /**
664  * iommu_vunmap  -  release virtual mapping obtained by 'iommu_vmap()'
665  * @obj:	objective iommu
666  * @da:		iommu device virtual address
667  *
668  * Free the iommu virtually contiguous memory area starting at
669  * @da, which was returned by 'iommu_vmap()'.
670  */
iommu_vunmap(struct iommu * obj,u32 da)671 struct sg_table *iommu_vunmap(struct iommu *obj, u32 da)
672 {
673 	struct sg_table *sgt;
674 	/*
675 	 * 'sgt' is allocated before 'iommu_vmalloc()' is called.
676 	 * Just returns 'sgt' to the caller to free
677 	 */
678 	sgt = unmap_vm_area(obj, da, vunmap_sg, IOVMF_DISCONT | IOVMF_MMIO);
679 	if (!sgt)
680 		dev_dbg(obj->dev, "%s: No sgt\n", __func__);
681 	return sgt;
682 }
683 EXPORT_SYMBOL_GPL(iommu_vunmap);
684 
685 /**
686  * iommu_vmalloc  -  (d)-(p)-(v) address allocator and mapper
687  * @obj:	objective iommu
688  * @da:		contiguous iommu virtual memory
689  * @bytes:	allocation size
690  * @flags:	iovma and page property
691  *
692  * Allocate @bytes linearly and creates 1-n-1 mapping and returns
693  * @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set.
694  */
iommu_vmalloc(struct iommu * obj,u32 da,size_t bytes,u32 flags)695 u32 iommu_vmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags)
696 {
697 	void *va;
698 	struct sg_table *sgt;
699 
700 	if (!obj || !obj->dev || !bytes)
701 		return -EINVAL;
702 
703 	bytes = PAGE_ALIGN(bytes);
704 
705 	va = vmalloc(bytes);
706 	if (!va)
707 		return -ENOMEM;
708 
709 	flags &= IOVMF_HW_MASK;
710 	flags |= IOVMF_DISCONT;
711 	flags |= IOVMF_ALLOC;
712 
713 	sgt = sgtable_alloc(bytes, flags, da, 0);
714 	if (IS_ERR(sgt)) {
715 		da = PTR_ERR(sgt);
716 		goto err_sgt_alloc;
717 	}
718 	sgtable_fill_vmalloc(sgt, va);
719 
720 	da = __iommu_vmap(obj, da, sgt, va, bytes, flags);
721 	if (IS_ERR_VALUE(da))
722 		goto err_iommu_vmap;
723 
724 	return da;
725 
726 err_iommu_vmap:
727 	sgtable_drain_vmalloc(sgt);
728 	sgtable_free(sgt);
729 err_sgt_alloc:
730 	vfree(va);
731 	return da;
732 }
733 EXPORT_SYMBOL_GPL(iommu_vmalloc);
734 
735 /**
736  * iommu_vfree  -  release memory allocated by 'iommu_vmalloc()'
737  * @obj:	objective iommu
738  * @da:		iommu device virtual address
739  *
740  * Frees the iommu virtually continuous memory area starting at
741  * @da, as obtained from 'iommu_vmalloc()'.
742  */
iommu_vfree(struct iommu * obj,const u32 da)743 void iommu_vfree(struct iommu *obj, const u32 da)
744 {
745 	struct sg_table *sgt;
746 
747 	sgt = unmap_vm_area(obj, da, vfree, IOVMF_DISCONT | IOVMF_ALLOC);
748 	if (!sgt)
749 		dev_dbg(obj->dev, "%s: No sgt\n", __func__);
750 	sgtable_free(sgt);
751 }
752 EXPORT_SYMBOL_GPL(iommu_vfree);
753 
__iommu_kmap(struct iommu * obj,u32 da,u32 pa,void * va,size_t bytes,u32 flags)754 static u32 __iommu_kmap(struct iommu *obj, u32 da, u32 pa, void *va,
755 			  size_t bytes, u32 flags)
756 {
757 	struct sg_table *sgt;
758 
759 	sgt = sgtable_alloc(bytes, flags, da, pa);
760 	if (IS_ERR(sgt))
761 		return PTR_ERR(sgt);
762 
763 	sgtable_fill_kmalloc(sgt, pa, da, bytes);
764 
765 	da = map_iommu_region(obj, da, sgt, va, bytes, flags);
766 	if (IS_ERR_VALUE(da)) {
767 		sgtable_drain_kmalloc(sgt);
768 		sgtable_free(sgt);
769 	}
770 
771 	return da;
772 }
773 
774 /**
775  * iommu_kmap  -  (d)-(p)-(v) address mapper
776  * @obj:	objective iommu
777  * @da:		contiguous iommu virtual memory
778  * @pa:		contiguous physical memory
779  * @flags:	iovma and page property
780  *
781  * Creates 1-1-1 mapping and returns @da again, which can be
782  * adjusted if 'IOVMF_DA_FIXED' is not set.
783  */
iommu_kmap(struct iommu * obj,u32 da,u32 pa,size_t bytes,u32 flags)784 u32 iommu_kmap(struct iommu *obj, u32 da, u32 pa, size_t bytes,
785 		 u32 flags)
786 {
787 	void *va;
788 
789 	if (!obj || !obj->dev || !bytes)
790 		return -EINVAL;
791 
792 	bytes = PAGE_ALIGN(bytes);
793 
794 	va = ioremap(pa, bytes);
795 	if (!va)
796 		return -ENOMEM;
797 
798 	flags &= IOVMF_HW_MASK;
799 	flags |= IOVMF_LINEAR;
800 	flags |= IOVMF_MMIO;
801 
802 	da = __iommu_kmap(obj, da, pa, va, bytes, flags);
803 	if (IS_ERR_VALUE(da))
804 		iounmap(va);
805 
806 	return da;
807 }
808 EXPORT_SYMBOL_GPL(iommu_kmap);
809 
810 /**
811  * iommu_kunmap  -  release virtual mapping obtained by 'iommu_kmap()'
812  * @obj:	objective iommu
813  * @da:		iommu device virtual address
814  *
815  * Frees the iommu virtually contiguous memory area starting at
816  * @da, which was passed to and was returned by'iommu_kmap()'.
817  */
iommu_kunmap(struct iommu * obj,u32 da)818 void iommu_kunmap(struct iommu *obj, u32 da)
819 {
820 	struct sg_table *sgt;
821 	typedef void (*func_t)(const void *);
822 
823 	sgt = unmap_vm_area(obj, da, (func_t)iounmap,
824 			    IOVMF_LINEAR | IOVMF_MMIO);
825 	if (!sgt)
826 		dev_dbg(obj->dev, "%s: No sgt\n", __func__);
827 	sgtable_free(sgt);
828 }
829 EXPORT_SYMBOL_GPL(iommu_kunmap);
830 
831 /**
832  * iommu_kmalloc  -  (d)-(p)-(v) address allocator and mapper
833  * @obj:	objective iommu
834  * @da:		contiguous iommu virtual memory
835  * @bytes:	bytes for allocation
836  * @flags:	iovma and page property
837  *
838  * Allocate @bytes linearly and creates 1-1-1 mapping and returns
839  * @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set.
840  */
iommu_kmalloc(struct iommu * obj,u32 da,size_t bytes,u32 flags)841 u32 iommu_kmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags)
842 {
843 	void *va;
844 	u32 pa;
845 
846 	if (!obj || !obj->dev || !bytes)
847 		return -EINVAL;
848 
849 	bytes = PAGE_ALIGN(bytes);
850 
851 	va = kmalloc(bytes, GFP_KERNEL | GFP_DMA);
852 	if (!va)
853 		return -ENOMEM;
854 	pa = virt_to_phys(va);
855 
856 	flags &= IOVMF_HW_MASK;
857 	flags |= IOVMF_LINEAR;
858 	flags |= IOVMF_ALLOC;
859 
860 	da = __iommu_kmap(obj, da, pa, va, bytes, flags);
861 	if (IS_ERR_VALUE(da))
862 		kfree(va);
863 
864 	return da;
865 }
866 EXPORT_SYMBOL_GPL(iommu_kmalloc);
867 
868 /**
869  * iommu_kfree  -  release virtual mapping obtained by 'iommu_kmalloc()'
870  * @obj:	objective iommu
871  * @da:		iommu device virtual address
872  *
873  * Frees the iommu virtually contiguous memory area starting at
874  * @da, which was passed to and was returned by'iommu_kmalloc()'.
875  */
iommu_kfree(struct iommu * obj,u32 da)876 void iommu_kfree(struct iommu *obj, u32 da)
877 {
878 	struct sg_table *sgt;
879 
880 	sgt = unmap_vm_area(obj, da, kfree, IOVMF_LINEAR | IOVMF_ALLOC);
881 	if (!sgt)
882 		dev_dbg(obj->dev, "%s: No sgt\n", __func__);
883 	sgtable_free(sgt);
884 }
885 EXPORT_SYMBOL_GPL(iommu_kfree);
886 
887 
iovmm_init(void)888 static int __init iovmm_init(void)
889 {
890 	const unsigned long flags = SLAB_HWCACHE_ALIGN;
891 	struct kmem_cache *p;
892 
893 	p = kmem_cache_create("iovm_area_cache", sizeof(struct iovm_struct), 0,
894 			      flags, NULL);
895 	if (!p)
896 		return -ENOMEM;
897 	iovm_area_cachep = p;
898 
899 	return 0;
900 }
901 module_init(iovmm_init);
902 
iovmm_exit(void)903 static void __exit iovmm_exit(void)
904 {
905 	kmem_cache_destroy(iovm_area_cachep);
906 }
907 module_exit(iovmm_exit);
908 
909 MODULE_DESCRIPTION("omap iommu: simple virtual address space management");
910 MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>");
911 MODULE_LICENSE("GPL v2");
912