1 /*
2  * omap iommu: tlb and pagetable primitives
3  *
4  * Copyright (C) 2008-2010 Nokia Corporation
5  *
6  * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>,
7  *		Paul Mundt and Toshihiro Kobayashi
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  */
13 
14 #include <linux/err.h>
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <linux/interrupt.h>
18 #include <linux/ioport.h>
19 #include <linux/clk.h>
20 #include <linux/platform_device.h>
21 
22 #include <asm/cacheflush.h>
23 
24 #include <plat/iommu.h>
25 
26 #include "iopgtable.h"
27 
28 #define for_each_iotlb_cr(obj, n, __i, cr)				\
29 	for (__i = 0;							\
30 	     (__i < (n)) && (cr = __iotlb_read_cr((obj), __i), true);	\
31 	     __i++)
32 
33 /* accommodate the difference between omap1 and omap2/3 */
34 static const struct iommu_functions *arch_iommu;
35 
36 static struct platform_driver omap_iommu_driver;
37 static struct kmem_cache *iopte_cachep;
38 
39 /**
40  * install_iommu_arch - Install archtecure specific iommu functions
41  * @ops:	a pointer to architecture specific iommu functions
42  *
43  * There are several kind of iommu algorithm(tlb, pagetable) among
44  * omap series. This interface installs such an iommu algorighm.
45  **/
install_iommu_arch(const struct iommu_functions * ops)46 int install_iommu_arch(const struct iommu_functions *ops)
47 {
48 	if (arch_iommu)
49 		return -EBUSY;
50 
51 	arch_iommu = ops;
52 	return 0;
53 }
54 EXPORT_SYMBOL_GPL(install_iommu_arch);
55 
56 /**
57  * uninstall_iommu_arch - Uninstall archtecure specific iommu functions
58  * @ops:	a pointer to architecture specific iommu functions
59  *
60  * This interface uninstalls the iommu algorighm installed previously.
61  **/
uninstall_iommu_arch(const struct iommu_functions * ops)62 void uninstall_iommu_arch(const struct iommu_functions *ops)
63 {
64 	if (arch_iommu != ops)
65 		pr_err("%s: not your arch\n", __func__);
66 
67 	arch_iommu = NULL;
68 }
69 EXPORT_SYMBOL_GPL(uninstall_iommu_arch);
70 
71 /**
72  * iommu_save_ctx - Save registers for pm off-mode support
73  * @obj:	target iommu
74  **/
iommu_save_ctx(struct iommu * obj)75 void iommu_save_ctx(struct iommu *obj)
76 {
77 	arch_iommu->save_ctx(obj);
78 }
79 EXPORT_SYMBOL_GPL(iommu_save_ctx);
80 
81 /**
82  * iommu_restore_ctx - Restore registers for pm off-mode support
83  * @obj:	target iommu
84  **/
iommu_restore_ctx(struct iommu * obj)85 void iommu_restore_ctx(struct iommu *obj)
86 {
87 	arch_iommu->restore_ctx(obj);
88 }
89 EXPORT_SYMBOL_GPL(iommu_restore_ctx);
90 
91 /**
92  * iommu_arch_version - Return running iommu arch version
93  **/
iommu_arch_version(void)94 u32 iommu_arch_version(void)
95 {
96 	return arch_iommu->version;
97 }
98 EXPORT_SYMBOL_GPL(iommu_arch_version);
99 
iommu_enable(struct iommu * obj)100 static int iommu_enable(struct iommu *obj)
101 {
102 	int err;
103 
104 	if (!obj)
105 		return -EINVAL;
106 
107 	if (!arch_iommu)
108 		return -ENODEV;
109 
110 	clk_enable(obj->clk);
111 
112 	err = arch_iommu->enable(obj);
113 
114 	clk_disable(obj->clk);
115 	return err;
116 }
117 
iommu_disable(struct iommu * obj)118 static void iommu_disable(struct iommu *obj)
119 {
120 	if (!obj)
121 		return;
122 
123 	clk_enable(obj->clk);
124 
125 	arch_iommu->disable(obj);
126 
127 	clk_disable(obj->clk);
128 }
129 
130 /*
131  *	TLB operations
132  */
iotlb_cr_to_e(struct cr_regs * cr,struct iotlb_entry * e)133 void iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e)
134 {
135 	BUG_ON(!cr || !e);
136 
137 	arch_iommu->cr_to_e(cr, e);
138 }
139 EXPORT_SYMBOL_GPL(iotlb_cr_to_e);
140 
iotlb_cr_valid(struct cr_regs * cr)141 static inline int iotlb_cr_valid(struct cr_regs *cr)
142 {
143 	if (!cr)
144 		return -EINVAL;
145 
146 	return arch_iommu->cr_valid(cr);
147 }
148 
iotlb_alloc_cr(struct iommu * obj,struct iotlb_entry * e)149 static inline struct cr_regs *iotlb_alloc_cr(struct iommu *obj,
150 					     struct iotlb_entry *e)
151 {
152 	if (!e)
153 		return NULL;
154 
155 	return arch_iommu->alloc_cr(obj, e);
156 }
157 
iotlb_cr_to_virt(struct cr_regs * cr)158 u32 iotlb_cr_to_virt(struct cr_regs *cr)
159 {
160 	return arch_iommu->cr_to_virt(cr);
161 }
162 EXPORT_SYMBOL_GPL(iotlb_cr_to_virt);
163 
get_iopte_attr(struct iotlb_entry * e)164 static u32 get_iopte_attr(struct iotlb_entry *e)
165 {
166 	return arch_iommu->get_pte_attr(e);
167 }
168 
iommu_report_fault(struct iommu * obj,u32 * da)169 static u32 iommu_report_fault(struct iommu *obj, u32 *da)
170 {
171 	return arch_iommu->fault_isr(obj, da);
172 }
173 
iotlb_lock_get(struct iommu * obj,struct iotlb_lock * l)174 static void iotlb_lock_get(struct iommu *obj, struct iotlb_lock *l)
175 {
176 	u32 val;
177 
178 	val = iommu_read_reg(obj, MMU_LOCK);
179 
180 	l->base = MMU_LOCK_BASE(val);
181 	l->vict = MMU_LOCK_VICT(val);
182 
183 }
184 
iotlb_lock_set(struct iommu * obj,struct iotlb_lock * l)185 static void iotlb_lock_set(struct iommu *obj, struct iotlb_lock *l)
186 {
187 	u32 val;
188 
189 	val = (l->base << MMU_LOCK_BASE_SHIFT);
190 	val |= (l->vict << MMU_LOCK_VICT_SHIFT);
191 
192 	iommu_write_reg(obj, val, MMU_LOCK);
193 }
194 
iotlb_read_cr(struct iommu * obj,struct cr_regs * cr)195 static void iotlb_read_cr(struct iommu *obj, struct cr_regs *cr)
196 {
197 	arch_iommu->tlb_read_cr(obj, cr);
198 }
199 
iotlb_load_cr(struct iommu * obj,struct cr_regs * cr)200 static void iotlb_load_cr(struct iommu *obj, struct cr_regs *cr)
201 {
202 	arch_iommu->tlb_load_cr(obj, cr);
203 
204 	iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
205 	iommu_write_reg(obj, 1, MMU_LD_TLB);
206 }
207 
208 /**
209  * iotlb_dump_cr - Dump an iommu tlb entry into buf
210  * @obj:	target iommu
211  * @cr:		contents of cam and ram register
212  * @buf:	output buffer
213  **/
iotlb_dump_cr(struct iommu * obj,struct cr_regs * cr,char * buf)214 static inline ssize_t iotlb_dump_cr(struct iommu *obj, struct cr_regs *cr,
215 				    char *buf)
216 {
217 	BUG_ON(!cr || !buf);
218 
219 	return arch_iommu->dump_cr(obj, cr, buf);
220 }
221 
222 /* only used in iotlb iteration for-loop */
__iotlb_read_cr(struct iommu * obj,int n)223 static struct cr_regs __iotlb_read_cr(struct iommu *obj, int n)
224 {
225 	struct cr_regs cr;
226 	struct iotlb_lock l;
227 
228 	iotlb_lock_get(obj, &l);
229 	l.vict = n;
230 	iotlb_lock_set(obj, &l);
231 	iotlb_read_cr(obj, &cr);
232 
233 	return cr;
234 }
235 
236 /**
237  * load_iotlb_entry - Set an iommu tlb entry
238  * @obj:	target iommu
239  * @e:		an iommu tlb entry info
240  **/
load_iotlb_entry(struct iommu * obj,struct iotlb_entry * e)241 int load_iotlb_entry(struct iommu *obj, struct iotlb_entry *e)
242 {
243 	int err = 0;
244 	struct iotlb_lock l;
245 	struct cr_regs *cr;
246 
247 	if (!obj || !obj->nr_tlb_entries || !e)
248 		return -EINVAL;
249 
250 	clk_enable(obj->clk);
251 
252 	iotlb_lock_get(obj, &l);
253 	if (l.base == obj->nr_tlb_entries) {
254 		dev_warn(obj->dev, "%s: preserve entries full\n", __func__);
255 		err = -EBUSY;
256 		goto out;
257 	}
258 	if (!e->prsvd) {
259 		int i;
260 		struct cr_regs tmp;
261 
262 		for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, tmp)
263 			if (!iotlb_cr_valid(&tmp))
264 				break;
265 
266 		if (i == obj->nr_tlb_entries) {
267 			dev_dbg(obj->dev, "%s: full: no entry\n", __func__);
268 			err = -EBUSY;
269 			goto out;
270 		}
271 
272 		iotlb_lock_get(obj, &l);
273 	} else {
274 		l.vict = l.base;
275 		iotlb_lock_set(obj, &l);
276 	}
277 
278 	cr = iotlb_alloc_cr(obj, e);
279 	if (IS_ERR(cr)) {
280 		clk_disable(obj->clk);
281 		return PTR_ERR(cr);
282 	}
283 
284 	iotlb_load_cr(obj, cr);
285 	kfree(cr);
286 
287 	if (e->prsvd)
288 		l.base++;
289 	/* increment victim for next tlb load */
290 	if (++l.vict == obj->nr_tlb_entries)
291 		l.vict = l.base;
292 	iotlb_lock_set(obj, &l);
293 out:
294 	clk_disable(obj->clk);
295 	return err;
296 }
297 EXPORT_SYMBOL_GPL(load_iotlb_entry);
298 
299 /**
300  * flush_iotlb_page - Clear an iommu tlb entry
301  * @obj:	target iommu
302  * @da:		iommu device virtual address
303  *
304  * Clear an iommu tlb entry which includes 'da' address.
305  **/
flush_iotlb_page(struct iommu * obj,u32 da)306 void flush_iotlb_page(struct iommu *obj, u32 da)
307 {
308 	int i;
309 	struct cr_regs cr;
310 
311 	clk_enable(obj->clk);
312 
313 	for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, cr) {
314 		u32 start;
315 		size_t bytes;
316 
317 		if (!iotlb_cr_valid(&cr))
318 			continue;
319 
320 		start = iotlb_cr_to_virt(&cr);
321 		bytes = iopgsz_to_bytes(cr.cam & 3);
322 
323 		if ((start <= da) && (da < start + bytes)) {
324 			dev_dbg(obj->dev, "%s: %08x<=%08x(%x)\n",
325 				__func__, start, da, bytes);
326 			iotlb_load_cr(obj, &cr);
327 			iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
328 		}
329 	}
330 	clk_disable(obj->clk);
331 
332 	if (i == obj->nr_tlb_entries)
333 		dev_dbg(obj->dev, "%s: no page for %08x\n", __func__, da);
334 }
335 EXPORT_SYMBOL_GPL(flush_iotlb_page);
336 
337 /**
338  * flush_iotlb_range - Clear an iommu tlb entries
339  * @obj:	target iommu
340  * @start:	iommu device virtual address(start)
341  * @end:	iommu device virtual address(end)
342  *
343  * Clear an iommu tlb entry which includes 'da' address.
344  **/
flush_iotlb_range(struct iommu * obj,u32 start,u32 end)345 void flush_iotlb_range(struct iommu *obj, u32 start, u32 end)
346 {
347 	u32 da = start;
348 
349 	while (da < end) {
350 		flush_iotlb_page(obj, da);
351 		/* FIXME: Optimize for multiple page size */
352 		da += IOPTE_SIZE;
353 	}
354 }
355 EXPORT_SYMBOL_GPL(flush_iotlb_range);
356 
357 /**
358  * flush_iotlb_all - Clear all iommu tlb entries
359  * @obj:	target iommu
360  **/
flush_iotlb_all(struct iommu * obj)361 void flush_iotlb_all(struct iommu *obj)
362 {
363 	struct iotlb_lock l;
364 
365 	clk_enable(obj->clk);
366 
367 	l.base = 0;
368 	l.vict = 0;
369 	iotlb_lock_set(obj, &l);
370 
371 	iommu_write_reg(obj, 1, MMU_GFLUSH);
372 
373 	clk_disable(obj->clk);
374 }
375 EXPORT_SYMBOL_GPL(flush_iotlb_all);
376 
377 /**
378  * iommu_set_twl - enable/disable table walking logic
379  * @obj:	target iommu
380  * @on:		enable/disable
381  *
382  * Function used to enable/disable TWL. If one wants to work
383  * exclusively with locked TLB entries and receive notifications
384  * for TLB miss then call this function to disable TWL.
385  */
iommu_set_twl(struct iommu * obj,bool on)386 void iommu_set_twl(struct iommu *obj, bool on)
387 {
388 	clk_enable(obj->clk);
389 	arch_iommu->set_twl(obj, on);
390 	clk_disable(obj->clk);
391 }
392 EXPORT_SYMBOL_GPL(iommu_set_twl);
393 
394 #if defined(CONFIG_OMAP_IOMMU_DEBUG_MODULE)
395 
iommu_dump_ctx(struct iommu * obj,char * buf,ssize_t bytes)396 ssize_t iommu_dump_ctx(struct iommu *obj, char *buf, ssize_t bytes)
397 {
398 	if (!obj || !buf)
399 		return -EINVAL;
400 
401 	clk_enable(obj->clk);
402 
403 	bytes = arch_iommu->dump_ctx(obj, buf, bytes);
404 
405 	clk_disable(obj->clk);
406 
407 	return bytes;
408 }
409 EXPORT_SYMBOL_GPL(iommu_dump_ctx);
410 
__dump_tlb_entries(struct iommu * obj,struct cr_regs * crs,int num)411 static int __dump_tlb_entries(struct iommu *obj, struct cr_regs *crs, int num)
412 {
413 	int i;
414 	struct iotlb_lock saved;
415 	struct cr_regs tmp;
416 	struct cr_regs *p = crs;
417 
418 	clk_enable(obj->clk);
419 	iotlb_lock_get(obj, &saved);
420 
421 	for_each_iotlb_cr(obj, num, i, tmp) {
422 		if (!iotlb_cr_valid(&tmp))
423 			continue;
424 		*p++ = tmp;
425 	}
426 
427 	iotlb_lock_set(obj, &saved);
428 	clk_disable(obj->clk);
429 
430 	return  p - crs;
431 }
432 
433 /**
434  * dump_tlb_entries - dump cr arrays to given buffer
435  * @obj:	target iommu
436  * @buf:	output buffer
437  **/
dump_tlb_entries(struct iommu * obj,char * buf,ssize_t bytes)438 size_t dump_tlb_entries(struct iommu *obj, char *buf, ssize_t bytes)
439 {
440 	int i, num;
441 	struct cr_regs *cr;
442 	char *p = buf;
443 
444 	num = bytes / sizeof(*cr);
445 	num = min(obj->nr_tlb_entries, num);
446 
447 	cr = kcalloc(num, sizeof(*cr), GFP_KERNEL);
448 	if (!cr)
449 		return 0;
450 
451 	num = __dump_tlb_entries(obj, cr, num);
452 	for (i = 0; i < num; i++)
453 		p += iotlb_dump_cr(obj, cr + i, p);
454 	kfree(cr);
455 
456 	return p - buf;
457 }
458 EXPORT_SYMBOL_GPL(dump_tlb_entries);
459 
foreach_iommu_device(void * data,int (* fn)(struct device *,void *))460 int foreach_iommu_device(void *data, int (*fn)(struct device *, void *))
461 {
462 	return driver_for_each_device(&omap_iommu_driver.driver,
463 				      NULL, data, fn);
464 }
465 EXPORT_SYMBOL_GPL(foreach_iommu_device);
466 
467 #endif /* CONFIG_OMAP_IOMMU_DEBUG_MODULE */
468 
469 /*
470  *	H/W pagetable operations
471  */
flush_iopgd_range(u32 * first,u32 * last)472 static void flush_iopgd_range(u32 *first, u32 *last)
473 {
474 	/* FIXME: L2 cache should be taken care of if it exists */
475 	do {
476 		asm("mcr	p15, 0, %0, c7, c10, 1 @ flush_pgd"
477 		    : : "r" (first));
478 		first += L1_CACHE_BYTES / sizeof(*first);
479 	} while (first <= last);
480 }
481 
flush_iopte_range(u32 * first,u32 * last)482 static void flush_iopte_range(u32 *first, u32 *last)
483 {
484 	/* FIXME: L2 cache should be taken care of if it exists */
485 	do {
486 		asm("mcr	p15, 0, %0, c7, c10, 1 @ flush_pte"
487 		    : : "r" (first));
488 		first += L1_CACHE_BYTES / sizeof(*first);
489 	} while (first <= last);
490 }
491 
iopte_free(u32 * iopte)492 static void iopte_free(u32 *iopte)
493 {
494 	/* Note: freed iopte's must be clean ready for re-use */
495 	kmem_cache_free(iopte_cachep, iopte);
496 }
497 
iopte_alloc(struct iommu * obj,u32 * iopgd,u32 da)498 static u32 *iopte_alloc(struct iommu *obj, u32 *iopgd, u32 da)
499 {
500 	u32 *iopte;
501 
502 	/* a table has already existed */
503 	if (*iopgd)
504 		goto pte_ready;
505 
506 	/*
507 	 * do the allocation outside the page table lock
508 	 */
509 	spin_unlock(&obj->page_table_lock);
510 	iopte = kmem_cache_zalloc(iopte_cachep, GFP_KERNEL);
511 	spin_lock(&obj->page_table_lock);
512 
513 	if (!*iopgd) {
514 		if (!iopte)
515 			return ERR_PTR(-ENOMEM);
516 
517 		*iopgd = virt_to_phys(iopte) | IOPGD_TABLE;
518 		flush_iopgd_range(iopgd, iopgd);
519 
520 		dev_vdbg(obj->dev, "%s: a new pte:%p\n", __func__, iopte);
521 	} else {
522 		/* We raced, free the reduniovant table */
523 		iopte_free(iopte);
524 	}
525 
526 pte_ready:
527 	iopte = iopte_offset(iopgd, da);
528 
529 	dev_vdbg(obj->dev,
530 		 "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n",
531 		 __func__, da, iopgd, *iopgd, iopte, *iopte);
532 
533 	return iopte;
534 }
535 
iopgd_alloc_section(struct iommu * obj,u32 da,u32 pa,u32 prot)536 static int iopgd_alloc_section(struct iommu *obj, u32 da, u32 pa, u32 prot)
537 {
538 	u32 *iopgd = iopgd_offset(obj, da);
539 
540 	if ((da | pa) & ~IOSECTION_MASK) {
541 		dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
542 			__func__, da, pa, IOSECTION_SIZE);
543 		return -EINVAL;
544 	}
545 
546 	*iopgd = (pa & IOSECTION_MASK) | prot | IOPGD_SECTION;
547 	flush_iopgd_range(iopgd, iopgd);
548 	return 0;
549 }
550 
iopgd_alloc_super(struct iommu * obj,u32 da,u32 pa,u32 prot)551 static int iopgd_alloc_super(struct iommu *obj, u32 da, u32 pa, u32 prot)
552 {
553 	u32 *iopgd = iopgd_offset(obj, da);
554 	int i;
555 
556 	if ((da | pa) & ~IOSUPER_MASK) {
557 		dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
558 			__func__, da, pa, IOSUPER_SIZE);
559 		return -EINVAL;
560 	}
561 
562 	for (i = 0; i < 16; i++)
563 		*(iopgd + i) = (pa & IOSUPER_MASK) | prot | IOPGD_SUPER;
564 	flush_iopgd_range(iopgd, iopgd + 15);
565 	return 0;
566 }
567 
iopte_alloc_page(struct iommu * obj,u32 da,u32 pa,u32 prot)568 static int iopte_alloc_page(struct iommu *obj, u32 da, u32 pa, u32 prot)
569 {
570 	u32 *iopgd = iopgd_offset(obj, da);
571 	u32 *iopte = iopte_alloc(obj, iopgd, da);
572 
573 	if (IS_ERR(iopte))
574 		return PTR_ERR(iopte);
575 
576 	*iopte = (pa & IOPAGE_MASK) | prot | IOPTE_SMALL;
577 	flush_iopte_range(iopte, iopte);
578 
579 	dev_vdbg(obj->dev, "%s: da:%08x pa:%08x pte:%p *pte:%08x\n",
580 		 __func__, da, pa, iopte, *iopte);
581 
582 	return 0;
583 }
584 
iopte_alloc_large(struct iommu * obj,u32 da,u32 pa,u32 prot)585 static int iopte_alloc_large(struct iommu *obj, u32 da, u32 pa, u32 prot)
586 {
587 	u32 *iopgd = iopgd_offset(obj, da);
588 	u32 *iopte = iopte_alloc(obj, iopgd, da);
589 	int i;
590 
591 	if ((da | pa) & ~IOLARGE_MASK) {
592 		dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
593 			__func__, da, pa, IOLARGE_SIZE);
594 		return -EINVAL;
595 	}
596 
597 	if (IS_ERR(iopte))
598 		return PTR_ERR(iopte);
599 
600 	for (i = 0; i < 16; i++)
601 		*(iopte + i) = (pa & IOLARGE_MASK) | prot | IOPTE_LARGE;
602 	flush_iopte_range(iopte, iopte + 15);
603 	return 0;
604 }
605 
iopgtable_store_entry_core(struct iommu * obj,struct iotlb_entry * e)606 static int iopgtable_store_entry_core(struct iommu *obj, struct iotlb_entry *e)
607 {
608 	int (*fn)(struct iommu *, u32, u32, u32);
609 	u32 prot;
610 	int err;
611 
612 	if (!obj || !e)
613 		return -EINVAL;
614 
615 	switch (e->pgsz) {
616 	case MMU_CAM_PGSZ_16M:
617 		fn = iopgd_alloc_super;
618 		break;
619 	case MMU_CAM_PGSZ_1M:
620 		fn = iopgd_alloc_section;
621 		break;
622 	case MMU_CAM_PGSZ_64K:
623 		fn = iopte_alloc_large;
624 		break;
625 	case MMU_CAM_PGSZ_4K:
626 		fn = iopte_alloc_page;
627 		break;
628 	default:
629 		fn = NULL;
630 		BUG();
631 		break;
632 	}
633 
634 	prot = get_iopte_attr(e);
635 
636 	spin_lock(&obj->page_table_lock);
637 	err = fn(obj, e->da, e->pa, prot);
638 	spin_unlock(&obj->page_table_lock);
639 
640 	return err;
641 }
642 
643 /**
644  * iopgtable_store_entry - Make an iommu pte entry
645  * @obj:	target iommu
646  * @e:		an iommu tlb entry info
647  **/
iopgtable_store_entry(struct iommu * obj,struct iotlb_entry * e)648 int iopgtable_store_entry(struct iommu *obj, struct iotlb_entry *e)
649 {
650 	int err;
651 
652 	flush_iotlb_page(obj, e->da);
653 	err = iopgtable_store_entry_core(obj, e);
654 #ifdef PREFETCH_IOTLB
655 	if (!err)
656 		load_iotlb_entry(obj, e);
657 #endif
658 	return err;
659 }
660 EXPORT_SYMBOL_GPL(iopgtable_store_entry);
661 
662 /**
663  * iopgtable_lookup_entry - Lookup an iommu pte entry
664  * @obj:	target iommu
665  * @da:		iommu device virtual address
666  * @ppgd:	iommu pgd entry pointer to be returned
667  * @ppte:	iommu pte entry pointer to be returned
668  **/
iopgtable_lookup_entry(struct iommu * obj,u32 da,u32 ** ppgd,u32 ** ppte)669 void iopgtable_lookup_entry(struct iommu *obj, u32 da, u32 **ppgd, u32 **ppte)
670 {
671 	u32 *iopgd, *iopte = NULL;
672 
673 	iopgd = iopgd_offset(obj, da);
674 	if (!*iopgd)
675 		goto out;
676 
677 	if (iopgd_is_table(*iopgd))
678 		iopte = iopte_offset(iopgd, da);
679 out:
680 	*ppgd = iopgd;
681 	*ppte = iopte;
682 }
683 EXPORT_SYMBOL_GPL(iopgtable_lookup_entry);
684 
iopgtable_clear_entry_core(struct iommu * obj,u32 da)685 static size_t iopgtable_clear_entry_core(struct iommu *obj, u32 da)
686 {
687 	size_t bytes;
688 	u32 *iopgd = iopgd_offset(obj, da);
689 	int nent = 1;
690 
691 	if (!*iopgd)
692 		return 0;
693 
694 	if (iopgd_is_table(*iopgd)) {
695 		int i;
696 		u32 *iopte = iopte_offset(iopgd, da);
697 
698 		bytes = IOPTE_SIZE;
699 		if (*iopte & IOPTE_LARGE) {
700 			nent *= 16;
701 			/* rewind to the 1st entry */
702 			iopte = iopte_offset(iopgd, (da & IOLARGE_MASK));
703 		}
704 		bytes *= nent;
705 		memset(iopte, 0, nent * sizeof(*iopte));
706 		flush_iopte_range(iopte, iopte + (nent - 1) * sizeof(*iopte));
707 
708 		/*
709 		 * do table walk to check if this table is necessary or not
710 		 */
711 		iopte = iopte_offset(iopgd, 0);
712 		for (i = 0; i < PTRS_PER_IOPTE; i++)
713 			if (iopte[i])
714 				goto out;
715 
716 		iopte_free(iopte);
717 		nent = 1; /* for the next L1 entry */
718 	} else {
719 		bytes = IOPGD_SIZE;
720 		if ((*iopgd & IOPGD_SUPER) == IOPGD_SUPER) {
721 			nent *= 16;
722 			/* rewind to the 1st entry */
723 			iopgd = iopgd_offset(obj, (da & IOSUPER_MASK));
724 		}
725 		bytes *= nent;
726 	}
727 	memset(iopgd, 0, nent * sizeof(*iopgd));
728 	flush_iopgd_range(iopgd, iopgd + (nent - 1) * sizeof(*iopgd));
729 out:
730 	return bytes;
731 }
732 
733 /**
734  * iopgtable_clear_entry - Remove an iommu pte entry
735  * @obj:	target iommu
736  * @da:		iommu device virtual address
737  **/
iopgtable_clear_entry(struct iommu * obj,u32 da)738 size_t iopgtable_clear_entry(struct iommu *obj, u32 da)
739 {
740 	size_t bytes;
741 
742 	spin_lock(&obj->page_table_lock);
743 
744 	bytes = iopgtable_clear_entry_core(obj, da);
745 	flush_iotlb_page(obj, da);
746 
747 	spin_unlock(&obj->page_table_lock);
748 
749 	return bytes;
750 }
751 EXPORT_SYMBOL_GPL(iopgtable_clear_entry);
752 
iopgtable_clear_entry_all(struct iommu * obj)753 static void iopgtable_clear_entry_all(struct iommu *obj)
754 {
755 	int i;
756 
757 	spin_lock(&obj->page_table_lock);
758 
759 	for (i = 0; i < PTRS_PER_IOPGD; i++) {
760 		u32 da;
761 		u32 *iopgd;
762 
763 		da = i << IOPGD_SHIFT;
764 		iopgd = iopgd_offset(obj, da);
765 
766 		if (!*iopgd)
767 			continue;
768 
769 		if (iopgd_is_table(*iopgd))
770 			iopte_free(iopte_offset(iopgd, 0));
771 
772 		*iopgd = 0;
773 		flush_iopgd_range(iopgd, iopgd);
774 	}
775 
776 	flush_iotlb_all(obj);
777 
778 	spin_unlock(&obj->page_table_lock);
779 }
780 
781 /*
782  *	Device IOMMU generic operations
783  */
iommu_fault_handler(int irq,void * data)784 static irqreturn_t iommu_fault_handler(int irq, void *data)
785 {
786 	u32 da, errs;
787 	u32 *iopgd, *iopte;
788 	struct iommu *obj = data;
789 
790 	if (!obj->refcount)
791 		return IRQ_NONE;
792 
793 	clk_enable(obj->clk);
794 	errs = iommu_report_fault(obj, &da);
795 	clk_disable(obj->clk);
796 	if (errs == 0)
797 		return IRQ_HANDLED;
798 
799 	/* Fault callback or TLB/PTE Dynamic loading */
800 	if (obj->isr && !obj->isr(obj, da, errs, obj->isr_priv))
801 		return IRQ_HANDLED;
802 
803 	iommu_disable(obj);
804 
805 	iopgd = iopgd_offset(obj, da);
806 
807 	if (!iopgd_is_table(*iopgd)) {
808 		dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p "
809 			"*pgd:px%08x\n", obj->name, errs, da, iopgd, *iopgd);
810 		return IRQ_NONE;
811 	}
812 
813 	iopte = iopte_offset(iopgd, da);
814 
815 	dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:0x%08x "
816 		"pte:0x%p *pte:0x%08x\n", obj->name, errs, da, iopgd, *iopgd,
817 		iopte, *iopte);
818 
819 	return IRQ_NONE;
820 }
821 
device_match_by_alias(struct device * dev,void * data)822 static int device_match_by_alias(struct device *dev, void *data)
823 {
824 	struct iommu *obj = to_iommu(dev);
825 	const char *name = data;
826 
827 	pr_debug("%s: %s %s\n", __func__, obj->name, name);
828 
829 	return strcmp(obj->name, name) == 0;
830 }
831 
832 /**
833  * iommu_set_da_range - Set a valid device address range
834  * @obj:		target iommu
835  * @start		Start of valid range
836  * @end			End of valid range
837  **/
iommu_set_da_range(struct iommu * obj,u32 start,u32 end)838 int iommu_set_da_range(struct iommu *obj, u32 start, u32 end)
839 {
840 
841 	if (!obj)
842 		return -EFAULT;
843 
844 	if (end < start || !PAGE_ALIGN(start | end))
845 		return -EINVAL;
846 
847 	obj->da_start = start;
848 	obj->da_end = end;
849 
850 	return 0;
851 }
852 EXPORT_SYMBOL_GPL(iommu_set_da_range);
853 
854 /**
855  * iommu_get - Get iommu handler
856  * @name:	target iommu name
857  **/
iommu_get(const char * name)858 struct iommu *iommu_get(const char *name)
859 {
860 	int err = -ENOMEM;
861 	struct device *dev;
862 	struct iommu *obj;
863 
864 	dev = driver_find_device(&omap_iommu_driver.driver, NULL, (void *)name,
865 				 device_match_by_alias);
866 	if (!dev)
867 		return ERR_PTR(-ENODEV);
868 
869 	obj = to_iommu(dev);
870 
871 	mutex_lock(&obj->iommu_lock);
872 
873 	if (obj->refcount++ == 0) {
874 		err = iommu_enable(obj);
875 		if (err)
876 			goto err_enable;
877 		flush_iotlb_all(obj);
878 	}
879 
880 	if (!try_module_get(obj->owner))
881 		goto err_module;
882 
883 	mutex_unlock(&obj->iommu_lock);
884 
885 	dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
886 	return obj;
887 
888 err_module:
889 	if (obj->refcount == 1)
890 		iommu_disable(obj);
891 err_enable:
892 	obj->refcount--;
893 	mutex_unlock(&obj->iommu_lock);
894 	return ERR_PTR(err);
895 }
896 EXPORT_SYMBOL_GPL(iommu_get);
897 
898 /**
899  * iommu_put - Put back iommu handler
900  * @obj:	target iommu
901  **/
iommu_put(struct iommu * obj)902 void iommu_put(struct iommu *obj)
903 {
904 	if (!obj || IS_ERR(obj))
905 		return;
906 
907 	mutex_lock(&obj->iommu_lock);
908 
909 	if (--obj->refcount == 0)
910 		iommu_disable(obj);
911 
912 	module_put(obj->owner);
913 
914 	mutex_unlock(&obj->iommu_lock);
915 
916 	dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
917 }
918 EXPORT_SYMBOL_GPL(iommu_put);
919 
iommu_set_isr(const char * name,int (* isr)(struct iommu * obj,u32 da,u32 iommu_errs,void * priv),void * isr_priv)920 int iommu_set_isr(const char *name,
921 		  int (*isr)(struct iommu *obj, u32 da, u32 iommu_errs,
922 			     void *priv),
923 		  void *isr_priv)
924 {
925 	struct device *dev;
926 	struct iommu *obj;
927 
928 	dev = driver_find_device(&omap_iommu_driver.driver, NULL, (void *)name,
929 				 device_match_by_alias);
930 	if (!dev)
931 		return -ENODEV;
932 
933 	obj = to_iommu(dev);
934 	mutex_lock(&obj->iommu_lock);
935 	if (obj->refcount != 0) {
936 		mutex_unlock(&obj->iommu_lock);
937 		return -EBUSY;
938 	}
939 	obj->isr = isr;
940 	obj->isr_priv = isr_priv;
941 	mutex_unlock(&obj->iommu_lock);
942 
943 	return 0;
944 }
945 EXPORT_SYMBOL_GPL(iommu_set_isr);
946 
947 /*
948  *	OMAP Device MMU(IOMMU) detection
949  */
omap_iommu_probe(struct platform_device * pdev)950 static int __devinit omap_iommu_probe(struct platform_device *pdev)
951 {
952 	int err = -ENODEV;
953 	void *p;
954 	int irq;
955 	struct iommu *obj;
956 	struct resource *res;
957 	struct iommu_platform_data *pdata = pdev->dev.platform_data;
958 
959 	if (pdev->num_resources != 2)
960 		return -EINVAL;
961 
962 	obj = kzalloc(sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL);
963 	if (!obj)
964 		return -ENOMEM;
965 
966 	obj->clk = clk_get(&pdev->dev, pdata->clk_name);
967 	if (IS_ERR(obj->clk))
968 		goto err_clk;
969 
970 	obj->nr_tlb_entries = pdata->nr_tlb_entries;
971 	obj->name = pdata->name;
972 	obj->dev = &pdev->dev;
973 	obj->ctx = (void *)obj + sizeof(*obj);
974 	obj->da_start = pdata->da_start;
975 	obj->da_end = pdata->da_end;
976 
977 	mutex_init(&obj->iommu_lock);
978 	mutex_init(&obj->mmap_lock);
979 	spin_lock_init(&obj->page_table_lock);
980 	INIT_LIST_HEAD(&obj->mmap);
981 
982 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
983 	if (!res) {
984 		err = -ENODEV;
985 		goto err_mem;
986 	}
987 
988 	res = request_mem_region(res->start, resource_size(res),
989 				 dev_name(&pdev->dev));
990 	if (!res) {
991 		err = -EIO;
992 		goto err_mem;
993 	}
994 
995 	obj->regbase = ioremap(res->start, resource_size(res));
996 	if (!obj->regbase) {
997 		err = -ENOMEM;
998 		goto err_ioremap;
999 	}
1000 
1001 	irq = platform_get_irq(pdev, 0);
1002 	if (irq < 0) {
1003 		err = -ENODEV;
1004 		goto err_irq;
1005 	}
1006 	err = request_irq(irq, iommu_fault_handler, IRQF_SHARED,
1007 			  dev_name(&pdev->dev), obj);
1008 	if (err < 0)
1009 		goto err_irq;
1010 	platform_set_drvdata(pdev, obj);
1011 
1012 	p = (void *)__get_free_pages(GFP_KERNEL, get_order(IOPGD_TABLE_SIZE));
1013 	if (!p) {
1014 		err = -ENOMEM;
1015 		goto err_pgd;
1016 	}
1017 	memset(p, 0, IOPGD_TABLE_SIZE);
1018 	clean_dcache_area(p, IOPGD_TABLE_SIZE);
1019 	obj->iopgd = p;
1020 
1021 	BUG_ON(!IS_ALIGNED((unsigned long)obj->iopgd, IOPGD_TABLE_SIZE));
1022 
1023 	dev_info(&pdev->dev, "%s registered\n", obj->name);
1024 	return 0;
1025 
1026 err_pgd:
1027 	free_irq(irq, obj);
1028 err_irq:
1029 	iounmap(obj->regbase);
1030 err_ioremap:
1031 	release_mem_region(res->start, resource_size(res));
1032 err_mem:
1033 	clk_put(obj->clk);
1034 err_clk:
1035 	kfree(obj);
1036 	return err;
1037 }
1038 
omap_iommu_remove(struct platform_device * pdev)1039 static int __devexit omap_iommu_remove(struct platform_device *pdev)
1040 {
1041 	int irq;
1042 	struct resource *res;
1043 	struct iommu *obj = platform_get_drvdata(pdev);
1044 
1045 	platform_set_drvdata(pdev, NULL);
1046 
1047 	iopgtable_clear_entry_all(obj);
1048 	free_pages((unsigned long)obj->iopgd, get_order(IOPGD_TABLE_SIZE));
1049 
1050 	irq = platform_get_irq(pdev, 0);
1051 	free_irq(irq, obj);
1052 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1053 	release_mem_region(res->start, resource_size(res));
1054 	iounmap(obj->regbase);
1055 
1056 	clk_put(obj->clk);
1057 	dev_info(&pdev->dev, "%s removed\n", obj->name);
1058 	kfree(obj);
1059 	return 0;
1060 }
1061 
1062 static struct platform_driver omap_iommu_driver = {
1063 	.probe	= omap_iommu_probe,
1064 	.remove	= __devexit_p(omap_iommu_remove),
1065 	.driver	= {
1066 		.name	= "omap-iommu",
1067 	},
1068 };
1069 
iopte_cachep_ctor(void * iopte)1070 static void iopte_cachep_ctor(void *iopte)
1071 {
1072 	clean_dcache_area(iopte, IOPTE_TABLE_SIZE);
1073 }
1074 
omap_iommu_init(void)1075 static int __init omap_iommu_init(void)
1076 {
1077 	struct kmem_cache *p;
1078 	const unsigned long flags = SLAB_HWCACHE_ALIGN;
1079 	size_t align = 1 << 10; /* L2 pagetable alignement */
1080 
1081 	p = kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE, align, flags,
1082 			      iopte_cachep_ctor);
1083 	if (!p)
1084 		return -ENOMEM;
1085 	iopte_cachep = p;
1086 
1087 	return platform_driver_register(&omap_iommu_driver);
1088 }
1089 module_init(omap_iommu_init);
1090 
omap_iommu_exit(void)1091 static void __exit omap_iommu_exit(void)
1092 {
1093 	kmem_cache_destroy(iopte_cachep);
1094 
1095 	platform_driver_unregister(&omap_iommu_driver);
1096 }
1097 module_exit(omap_iommu_exit);
1098 
1099 MODULE_DESCRIPTION("omap iommu: tlb and pagetable primitives");
1100 MODULE_ALIAS("platform:omap-iommu");
1101 MODULE_AUTHOR("Hiroshi DOYU, Paul Mundt and Toshihiro Kobayashi");
1102 MODULE_LICENSE("GPL v2");
1103