1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2011,2016 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 */
6
7 #ifdef CONFIG_EXYNOS_IOMMU_DEBUG
8 #define DEBUG
9 #endif
10
11 #include <linux/clk.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/err.h>
14 #include <linux/io.h>
15 #include <linux/iommu.h>
16 #include <linux/interrupt.h>
17 #include <linux/kmemleak.h>
18 #include <linux/list.h>
19 #include <linux/of.h>
20 #include <linux/of_platform.h>
21 #include <linux/platform_device.h>
22 #include <linux/pm_runtime.h>
23 #include <linux/slab.h>
24
25 typedef u32 sysmmu_iova_t;
26 typedef u32 sysmmu_pte_t;
27
28 /* We do not consider super section mapping (16MB) */
29 #define SECT_ORDER 20
30 #define LPAGE_ORDER 16
31 #define SPAGE_ORDER 12
32
33 #define SECT_SIZE (1 << SECT_ORDER)
34 #define LPAGE_SIZE (1 << LPAGE_ORDER)
35 #define SPAGE_SIZE (1 << SPAGE_ORDER)
36
37 #define SECT_MASK (~(SECT_SIZE - 1))
38 #define LPAGE_MASK (~(LPAGE_SIZE - 1))
39 #define SPAGE_MASK (~(SPAGE_SIZE - 1))
40
41 #define lv1ent_fault(sent) ((*(sent) == ZERO_LV2LINK) || \
42 ((*(sent) & 3) == 0) || ((*(sent) & 3) == 3))
43 #define lv1ent_zero(sent) (*(sent) == ZERO_LV2LINK)
44 #define lv1ent_page_zero(sent) ((*(sent) & 3) == 1)
45 #define lv1ent_page(sent) ((*(sent) != ZERO_LV2LINK) && \
46 ((*(sent) & 3) == 1))
47 #define lv1ent_section(sent) ((*(sent) & 3) == 2)
48
49 #define lv2ent_fault(pent) ((*(pent) & 3) == 0)
50 #define lv2ent_small(pent) ((*(pent) & 2) == 2)
51 #define lv2ent_large(pent) ((*(pent) & 3) == 1)
52
53 /*
54 * v1.x - v3.x SYSMMU supports 32bit physical and 32bit virtual address spaces
55 * v5.0 introduced support for 36bit physical address space by shifting
56 * all page entry values by 4 bits.
57 * All SYSMMU controllers in the system support the address spaces of the same
58 * size, so PG_ENT_SHIFT can be initialized on first SYSMMU probe to proper
59 * value (0 or 4).
60 */
61 static short PG_ENT_SHIFT = -1;
62 #define SYSMMU_PG_ENT_SHIFT 0
63 #define SYSMMU_V5_PG_ENT_SHIFT 4
64
65 static const sysmmu_pte_t *LV1_PROT;
66 static const sysmmu_pte_t SYSMMU_LV1_PROT[] = {
67 ((0 << 15) | (0 << 10)), /* no access */
68 ((1 << 15) | (1 << 10)), /* IOMMU_READ only */
69 ((0 << 15) | (1 << 10)), /* IOMMU_WRITE not supported, use read/write */
70 ((0 << 15) | (1 << 10)), /* IOMMU_READ | IOMMU_WRITE */
71 };
72 static const sysmmu_pte_t SYSMMU_V5_LV1_PROT[] = {
73 (0 << 4), /* no access */
74 (1 << 4), /* IOMMU_READ only */
75 (2 << 4), /* IOMMU_WRITE only */
76 (3 << 4), /* IOMMU_READ | IOMMU_WRITE */
77 };
78
79 static const sysmmu_pte_t *LV2_PROT;
80 static const sysmmu_pte_t SYSMMU_LV2_PROT[] = {
81 ((0 << 9) | (0 << 4)), /* no access */
82 ((1 << 9) | (1 << 4)), /* IOMMU_READ only */
83 ((0 << 9) | (1 << 4)), /* IOMMU_WRITE not supported, use read/write */
84 ((0 << 9) | (1 << 4)), /* IOMMU_READ | IOMMU_WRITE */
85 };
86 static const sysmmu_pte_t SYSMMU_V5_LV2_PROT[] = {
87 (0 << 2), /* no access */
88 (1 << 2), /* IOMMU_READ only */
89 (2 << 2), /* IOMMU_WRITE only */
90 (3 << 2), /* IOMMU_READ | IOMMU_WRITE */
91 };
92
93 #define SYSMMU_SUPPORTED_PROT_BITS (IOMMU_READ | IOMMU_WRITE)
94
95 #define sect_to_phys(ent) (((phys_addr_t) ent) << PG_ENT_SHIFT)
96 #define section_phys(sent) (sect_to_phys(*(sent)) & SECT_MASK)
97 #define section_offs(iova) (iova & (SECT_SIZE - 1))
98 #define lpage_phys(pent) (sect_to_phys(*(pent)) & LPAGE_MASK)
99 #define lpage_offs(iova) (iova & (LPAGE_SIZE - 1))
100 #define spage_phys(pent) (sect_to_phys(*(pent)) & SPAGE_MASK)
101 #define spage_offs(iova) (iova & (SPAGE_SIZE - 1))
102
103 #define NUM_LV1ENTRIES 4096
104 #define NUM_LV2ENTRIES (SECT_SIZE / SPAGE_SIZE)
105
lv1ent_offset(sysmmu_iova_t iova)106 static u32 lv1ent_offset(sysmmu_iova_t iova)
107 {
108 return iova >> SECT_ORDER;
109 }
110
lv2ent_offset(sysmmu_iova_t iova)111 static u32 lv2ent_offset(sysmmu_iova_t iova)
112 {
113 return (iova >> SPAGE_ORDER) & (NUM_LV2ENTRIES - 1);
114 }
115
116 #define LV1TABLE_SIZE (NUM_LV1ENTRIES * sizeof(sysmmu_pte_t))
117 #define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(sysmmu_pte_t))
118
119 #define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
120 #define lv2table_base(sent) (sect_to_phys(*(sent) & 0xFFFFFFC0))
121
122 #define mk_lv1ent_sect(pa, prot) ((pa >> PG_ENT_SHIFT) | LV1_PROT[prot] | 2)
123 #define mk_lv1ent_page(pa) ((pa >> PG_ENT_SHIFT) | 1)
124 #define mk_lv2ent_lpage(pa, prot) ((pa >> PG_ENT_SHIFT) | LV2_PROT[prot] | 1)
125 #define mk_lv2ent_spage(pa, prot) ((pa >> PG_ENT_SHIFT) | LV2_PROT[prot] | 2)
126
127 #define CTRL_ENABLE 0x5
128 #define CTRL_BLOCK 0x7
129 #define CTRL_DISABLE 0x0
130
131 #define CFG_LRU 0x1
132 #define CFG_EAP (1 << 2)
133 #define CFG_QOS(n) ((n & 0xF) << 7)
134 #define CFG_ACGEN (1 << 24) /* System MMU 3.3 only */
135 #define CFG_SYSSEL (1 << 22) /* System MMU 3.2 only */
136 #define CFG_FLPDCACHE (1 << 20) /* System MMU 3.2+ only */
137
138 /* common registers */
139 #define REG_MMU_CTRL 0x000
140 #define REG_MMU_CFG 0x004
141 #define REG_MMU_STATUS 0x008
142 #define REG_MMU_VERSION 0x034
143
144 #define MMU_MAJ_VER(val) ((val) >> 7)
145 #define MMU_MIN_VER(val) ((val) & 0x7F)
146 #define MMU_RAW_VER(reg) (((reg) >> 21) & ((1 << 11) - 1)) /* 11 bits */
147
148 #define MAKE_MMU_VER(maj, min) ((((maj) & 0xF) << 7) | ((min) & 0x7F))
149
150 /* v1.x - v3.x registers */
151 #define REG_MMU_FLUSH 0x00C
152 #define REG_MMU_FLUSH_ENTRY 0x010
153 #define REG_PT_BASE_ADDR 0x014
154 #define REG_INT_STATUS 0x018
155 #define REG_INT_CLEAR 0x01C
156
157 #define REG_PAGE_FAULT_ADDR 0x024
158 #define REG_AW_FAULT_ADDR 0x028
159 #define REG_AR_FAULT_ADDR 0x02C
160 #define REG_DEFAULT_SLAVE_ADDR 0x030
161
162 /* v5.x registers */
163 #define REG_V5_PT_BASE_PFN 0x00C
164 #define REG_V5_MMU_FLUSH_ALL 0x010
165 #define REG_V5_MMU_FLUSH_ENTRY 0x014
166 #define REG_V5_MMU_FLUSH_RANGE 0x018
167 #define REG_V5_MMU_FLUSH_START 0x020
168 #define REG_V5_MMU_FLUSH_END 0x024
169 #define REG_V5_INT_STATUS 0x060
170 #define REG_V5_INT_CLEAR 0x064
171 #define REG_V5_FAULT_AR_VA 0x070
172 #define REG_V5_FAULT_AW_VA 0x080
173
174 #define has_sysmmu(dev) (dev_iommu_priv_get(dev) != NULL)
175
176 static struct device *dma_dev;
177 static struct kmem_cache *lv2table_kmem_cache;
178 static sysmmu_pte_t *zero_lv2_table;
179 #define ZERO_LV2LINK mk_lv1ent_page(virt_to_phys(zero_lv2_table))
180
section_entry(sysmmu_pte_t * pgtable,sysmmu_iova_t iova)181 static sysmmu_pte_t *section_entry(sysmmu_pte_t *pgtable, sysmmu_iova_t iova)
182 {
183 return pgtable + lv1ent_offset(iova);
184 }
185
page_entry(sysmmu_pte_t * sent,sysmmu_iova_t iova)186 static sysmmu_pte_t *page_entry(sysmmu_pte_t *sent, sysmmu_iova_t iova)
187 {
188 return (sysmmu_pte_t *)phys_to_virt(
189 lv2table_base(sent)) + lv2ent_offset(iova);
190 }
191
192 /*
193 * IOMMU fault information register
194 */
195 struct sysmmu_fault_info {
196 unsigned int bit; /* bit number in STATUS register */
197 unsigned short addr_reg; /* register to read VA fault address */
198 const char *name; /* human readable fault name */
199 unsigned int type; /* fault type for report_iommu_fault */
200 };
201
202 static const struct sysmmu_fault_info sysmmu_faults[] = {
203 { 0, REG_PAGE_FAULT_ADDR, "PAGE", IOMMU_FAULT_READ },
204 { 1, REG_AR_FAULT_ADDR, "AR MULTI-HIT", IOMMU_FAULT_READ },
205 { 2, REG_AW_FAULT_ADDR, "AW MULTI-HIT", IOMMU_FAULT_WRITE },
206 { 3, REG_DEFAULT_SLAVE_ADDR, "BUS ERROR", IOMMU_FAULT_READ },
207 { 4, REG_AR_FAULT_ADDR, "AR SECURITY PROTECTION", IOMMU_FAULT_READ },
208 { 5, REG_AR_FAULT_ADDR, "AR ACCESS PROTECTION", IOMMU_FAULT_READ },
209 { 6, REG_AW_FAULT_ADDR, "AW SECURITY PROTECTION", IOMMU_FAULT_WRITE },
210 { 7, REG_AW_FAULT_ADDR, "AW ACCESS PROTECTION", IOMMU_FAULT_WRITE },
211 };
212
213 static const struct sysmmu_fault_info sysmmu_v5_faults[] = {
214 { 0, REG_V5_FAULT_AR_VA, "AR PTW", IOMMU_FAULT_READ },
215 { 1, REG_V5_FAULT_AR_VA, "AR PAGE", IOMMU_FAULT_READ },
216 { 2, REG_V5_FAULT_AR_VA, "AR MULTI-HIT", IOMMU_FAULT_READ },
217 { 3, REG_V5_FAULT_AR_VA, "AR ACCESS PROTECTION", IOMMU_FAULT_READ },
218 { 4, REG_V5_FAULT_AR_VA, "AR SECURITY PROTECTION", IOMMU_FAULT_READ },
219 { 16, REG_V5_FAULT_AW_VA, "AW PTW", IOMMU_FAULT_WRITE },
220 { 17, REG_V5_FAULT_AW_VA, "AW PAGE", IOMMU_FAULT_WRITE },
221 { 18, REG_V5_FAULT_AW_VA, "AW MULTI-HIT", IOMMU_FAULT_WRITE },
222 { 19, REG_V5_FAULT_AW_VA, "AW ACCESS PROTECTION", IOMMU_FAULT_WRITE },
223 { 20, REG_V5_FAULT_AW_VA, "AW SECURITY PROTECTION", IOMMU_FAULT_WRITE },
224 };
225
226 /*
227 * This structure is attached to dev->iommu->priv of the master device
228 * on device add, contains a list of SYSMMU controllers defined by device tree,
229 * which are bound to given master device. It is usually referenced by 'owner'
230 * pointer.
231 */
232 struct exynos_iommu_owner {
233 struct list_head controllers; /* list of sysmmu_drvdata.owner_node */
234 struct iommu_domain *domain; /* domain this device is attached */
235 struct mutex rpm_lock; /* for runtime pm of all sysmmus */
236 };
237
238 /*
239 * This structure exynos specific generalization of struct iommu_domain.
240 * It contains list of SYSMMU controllers from all master devices, which has
241 * been attached to this domain and page tables of IO address space defined by
242 * it. It is usually referenced by 'domain' pointer.
243 */
244 struct exynos_iommu_domain {
245 struct list_head clients; /* list of sysmmu_drvdata.domain_node */
246 sysmmu_pte_t *pgtable; /* lv1 page table, 16KB */
247 short *lv2entcnt; /* free lv2 entry counter for each section */
248 spinlock_t lock; /* lock for modyfying list of clients */
249 spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */
250 struct iommu_domain domain; /* generic domain data structure */
251 };
252
253 /*
254 * This structure hold all data of a single SYSMMU controller, this includes
255 * hw resources like registers and clocks, pointers and list nodes to connect
256 * it to all other structures, internal state and parameters read from device
257 * tree. It is usually referenced by 'data' pointer.
258 */
259 struct sysmmu_drvdata {
260 struct device *sysmmu; /* SYSMMU controller device */
261 struct device *master; /* master device (owner) */
262 struct device_link *link; /* runtime PM link to master */
263 void __iomem *sfrbase; /* our registers */
264 struct clk *clk; /* SYSMMU's clock */
265 struct clk *aclk; /* SYSMMU's aclk clock */
266 struct clk *pclk; /* SYSMMU's pclk clock */
267 struct clk *clk_master; /* master's device clock */
268 spinlock_t lock; /* lock for modyfying state */
269 bool active; /* current status */
270 struct exynos_iommu_domain *domain; /* domain we belong to */
271 struct list_head domain_node; /* node for domain clients list */
272 struct list_head owner_node; /* node for owner controllers list */
273 phys_addr_t pgtable; /* assigned page table structure */
274 unsigned int version; /* our version */
275
276 struct iommu_device iommu; /* IOMMU core handle */
277 };
278
to_exynos_domain(struct iommu_domain * dom)279 static struct exynos_iommu_domain *to_exynos_domain(struct iommu_domain *dom)
280 {
281 return container_of(dom, struct exynos_iommu_domain, domain);
282 }
283
sysmmu_unblock(struct sysmmu_drvdata * data)284 static void sysmmu_unblock(struct sysmmu_drvdata *data)
285 {
286 writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
287 }
288
sysmmu_block(struct sysmmu_drvdata * data)289 static bool sysmmu_block(struct sysmmu_drvdata *data)
290 {
291 int i = 120;
292
293 writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
294 while ((i > 0) && !(readl(data->sfrbase + REG_MMU_STATUS) & 1))
295 --i;
296
297 if (!(readl(data->sfrbase + REG_MMU_STATUS) & 1)) {
298 sysmmu_unblock(data);
299 return false;
300 }
301
302 return true;
303 }
304
__sysmmu_tlb_invalidate(struct sysmmu_drvdata * data)305 static void __sysmmu_tlb_invalidate(struct sysmmu_drvdata *data)
306 {
307 if (MMU_MAJ_VER(data->version) < 5)
308 writel(0x1, data->sfrbase + REG_MMU_FLUSH);
309 else
310 writel(0x1, data->sfrbase + REG_V5_MMU_FLUSH_ALL);
311 }
312
__sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata * data,sysmmu_iova_t iova,unsigned int num_inv)313 static void __sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
314 sysmmu_iova_t iova, unsigned int num_inv)
315 {
316 unsigned int i;
317
318 if (MMU_MAJ_VER(data->version) < 5) {
319 for (i = 0; i < num_inv; i++) {
320 writel((iova & SPAGE_MASK) | 1,
321 data->sfrbase + REG_MMU_FLUSH_ENTRY);
322 iova += SPAGE_SIZE;
323 }
324 } else {
325 if (num_inv == 1) {
326 writel((iova & SPAGE_MASK) | 1,
327 data->sfrbase + REG_V5_MMU_FLUSH_ENTRY);
328 } else {
329 writel((iova & SPAGE_MASK),
330 data->sfrbase + REG_V5_MMU_FLUSH_START);
331 writel((iova & SPAGE_MASK) + (num_inv - 1) * SPAGE_SIZE,
332 data->sfrbase + REG_V5_MMU_FLUSH_END);
333 writel(1, data->sfrbase + REG_V5_MMU_FLUSH_RANGE);
334 }
335 }
336 }
337
__sysmmu_set_ptbase(struct sysmmu_drvdata * data,phys_addr_t pgd)338 static void __sysmmu_set_ptbase(struct sysmmu_drvdata *data, phys_addr_t pgd)
339 {
340 if (MMU_MAJ_VER(data->version) < 5)
341 writel(pgd, data->sfrbase + REG_PT_BASE_ADDR);
342 else
343 writel(pgd >> PAGE_SHIFT,
344 data->sfrbase + REG_V5_PT_BASE_PFN);
345
346 __sysmmu_tlb_invalidate(data);
347 }
348
__sysmmu_enable_clocks(struct sysmmu_drvdata * data)349 static void __sysmmu_enable_clocks(struct sysmmu_drvdata *data)
350 {
351 BUG_ON(clk_prepare_enable(data->clk_master));
352 BUG_ON(clk_prepare_enable(data->clk));
353 BUG_ON(clk_prepare_enable(data->pclk));
354 BUG_ON(clk_prepare_enable(data->aclk));
355 }
356
__sysmmu_disable_clocks(struct sysmmu_drvdata * data)357 static void __sysmmu_disable_clocks(struct sysmmu_drvdata *data)
358 {
359 clk_disable_unprepare(data->aclk);
360 clk_disable_unprepare(data->pclk);
361 clk_disable_unprepare(data->clk);
362 clk_disable_unprepare(data->clk_master);
363 }
364
__sysmmu_get_version(struct sysmmu_drvdata * data)365 static void __sysmmu_get_version(struct sysmmu_drvdata *data)
366 {
367 u32 ver;
368
369 __sysmmu_enable_clocks(data);
370
371 ver = readl(data->sfrbase + REG_MMU_VERSION);
372
373 /* controllers on some SoCs don't report proper version */
374 if (ver == 0x80000001u)
375 data->version = MAKE_MMU_VER(1, 0);
376 else
377 data->version = MMU_RAW_VER(ver);
378
379 dev_dbg(data->sysmmu, "hardware version: %d.%d\n",
380 MMU_MAJ_VER(data->version), MMU_MIN_VER(data->version));
381
382 __sysmmu_disable_clocks(data);
383 }
384
show_fault_information(struct sysmmu_drvdata * data,const struct sysmmu_fault_info * finfo,sysmmu_iova_t fault_addr)385 static void show_fault_information(struct sysmmu_drvdata *data,
386 const struct sysmmu_fault_info *finfo,
387 sysmmu_iova_t fault_addr)
388 {
389 sysmmu_pte_t *ent;
390
391 dev_err(data->sysmmu, "%s: %s FAULT occurred at %#x\n",
392 dev_name(data->master), finfo->name, fault_addr);
393 dev_dbg(data->sysmmu, "Page table base: %pa\n", &data->pgtable);
394 ent = section_entry(phys_to_virt(data->pgtable), fault_addr);
395 dev_dbg(data->sysmmu, "\tLv1 entry: %#x\n", *ent);
396 if (lv1ent_page(ent)) {
397 ent = page_entry(ent, fault_addr);
398 dev_dbg(data->sysmmu, "\t Lv2 entry: %#x\n", *ent);
399 }
400 }
401
exynos_sysmmu_irq(int irq,void * dev_id)402 static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
403 {
404 /* SYSMMU is in blocked state when interrupt occurred. */
405 struct sysmmu_drvdata *data = dev_id;
406 const struct sysmmu_fault_info *finfo;
407 unsigned int i, n, itype;
408 sysmmu_iova_t fault_addr;
409 unsigned short reg_status, reg_clear;
410 int ret = -ENOSYS;
411
412 WARN_ON(!data->active);
413
414 if (MMU_MAJ_VER(data->version) < 5) {
415 reg_status = REG_INT_STATUS;
416 reg_clear = REG_INT_CLEAR;
417 finfo = sysmmu_faults;
418 n = ARRAY_SIZE(sysmmu_faults);
419 } else {
420 reg_status = REG_V5_INT_STATUS;
421 reg_clear = REG_V5_INT_CLEAR;
422 finfo = sysmmu_v5_faults;
423 n = ARRAY_SIZE(sysmmu_v5_faults);
424 }
425
426 spin_lock(&data->lock);
427
428 clk_enable(data->clk_master);
429
430 itype = __ffs(readl(data->sfrbase + reg_status));
431 for (i = 0; i < n; i++, finfo++)
432 if (finfo->bit == itype)
433 break;
434 /* unknown/unsupported fault */
435 BUG_ON(i == n);
436
437 /* print debug message */
438 fault_addr = readl(data->sfrbase + finfo->addr_reg);
439 show_fault_information(data, finfo, fault_addr);
440
441 if (data->domain)
442 ret = report_iommu_fault(&data->domain->domain,
443 data->master, fault_addr, finfo->type);
444 /* fault is not recovered by fault handler */
445 BUG_ON(ret != 0);
446
447 writel(1 << itype, data->sfrbase + reg_clear);
448
449 sysmmu_unblock(data);
450
451 clk_disable(data->clk_master);
452
453 spin_unlock(&data->lock);
454
455 return IRQ_HANDLED;
456 }
457
__sysmmu_disable(struct sysmmu_drvdata * data)458 static void __sysmmu_disable(struct sysmmu_drvdata *data)
459 {
460 unsigned long flags;
461
462 clk_enable(data->clk_master);
463
464 spin_lock_irqsave(&data->lock, flags);
465 writel(CTRL_DISABLE, data->sfrbase + REG_MMU_CTRL);
466 writel(0, data->sfrbase + REG_MMU_CFG);
467 data->active = false;
468 spin_unlock_irqrestore(&data->lock, flags);
469
470 __sysmmu_disable_clocks(data);
471 }
472
__sysmmu_init_config(struct sysmmu_drvdata * data)473 static void __sysmmu_init_config(struct sysmmu_drvdata *data)
474 {
475 unsigned int cfg;
476
477 if (data->version <= MAKE_MMU_VER(3, 1))
478 cfg = CFG_LRU | CFG_QOS(15);
479 else if (data->version <= MAKE_MMU_VER(3, 2))
480 cfg = CFG_LRU | CFG_QOS(15) | CFG_FLPDCACHE | CFG_SYSSEL;
481 else
482 cfg = CFG_QOS(15) | CFG_FLPDCACHE | CFG_ACGEN;
483
484 cfg |= CFG_EAP; /* enable access protection bits check */
485
486 writel(cfg, data->sfrbase + REG_MMU_CFG);
487 }
488
__sysmmu_enable(struct sysmmu_drvdata * data)489 static void __sysmmu_enable(struct sysmmu_drvdata *data)
490 {
491 unsigned long flags;
492
493 __sysmmu_enable_clocks(data);
494
495 spin_lock_irqsave(&data->lock, flags);
496 writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
497 __sysmmu_init_config(data);
498 __sysmmu_set_ptbase(data, data->pgtable);
499 writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
500 data->active = true;
501 spin_unlock_irqrestore(&data->lock, flags);
502
503 /*
504 * SYSMMU driver keeps master's clock enabled only for the short
505 * time, while accessing the registers. For performing address
506 * translation during DMA transaction it relies on the client
507 * driver to enable it.
508 */
509 clk_disable(data->clk_master);
510 }
511
sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata * data,sysmmu_iova_t iova)512 static void sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data,
513 sysmmu_iova_t iova)
514 {
515 unsigned long flags;
516
517 spin_lock_irqsave(&data->lock, flags);
518 if (data->active && data->version >= MAKE_MMU_VER(3, 3)) {
519 clk_enable(data->clk_master);
520 if (sysmmu_block(data)) {
521 if (data->version >= MAKE_MMU_VER(5, 0))
522 __sysmmu_tlb_invalidate(data);
523 else
524 __sysmmu_tlb_invalidate_entry(data, iova, 1);
525 sysmmu_unblock(data);
526 }
527 clk_disable(data->clk_master);
528 }
529 spin_unlock_irqrestore(&data->lock, flags);
530 }
531
sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata * data,sysmmu_iova_t iova,size_t size)532 static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
533 sysmmu_iova_t iova, size_t size)
534 {
535 unsigned long flags;
536
537 spin_lock_irqsave(&data->lock, flags);
538 if (data->active) {
539 unsigned int num_inv = 1;
540
541 clk_enable(data->clk_master);
542
543 /*
544 * L2TLB invalidation required
545 * 4KB page: 1 invalidation
546 * 64KB page: 16 invalidations
547 * 1MB page: 64 invalidations
548 * because it is set-associative TLB
549 * with 8-way and 64 sets.
550 * 1MB page can be cached in one of all sets.
551 * 64KB page can be one of 16 consecutive sets.
552 */
553 if (MMU_MAJ_VER(data->version) == 2)
554 num_inv = min_t(unsigned int, size / PAGE_SIZE, 64);
555
556 if (sysmmu_block(data)) {
557 __sysmmu_tlb_invalidate_entry(data, iova, num_inv);
558 sysmmu_unblock(data);
559 }
560 clk_disable(data->clk_master);
561 }
562 spin_unlock_irqrestore(&data->lock, flags);
563 }
564
565 static const struct iommu_ops exynos_iommu_ops;
566
exynos_sysmmu_probe(struct platform_device * pdev)567 static int exynos_sysmmu_probe(struct platform_device *pdev)
568 {
569 int irq, ret;
570 struct device *dev = &pdev->dev;
571 struct sysmmu_drvdata *data;
572 struct resource *res;
573
574 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
575 if (!data)
576 return -ENOMEM;
577
578 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
579 data->sfrbase = devm_ioremap_resource(dev, res);
580 if (IS_ERR(data->sfrbase))
581 return PTR_ERR(data->sfrbase);
582
583 irq = platform_get_irq(pdev, 0);
584 if (irq <= 0)
585 return irq;
586
587 ret = devm_request_irq(dev, irq, exynos_sysmmu_irq, 0,
588 dev_name(dev), data);
589 if (ret) {
590 dev_err(dev, "Unabled to register handler of irq %d\n", irq);
591 return ret;
592 }
593
594 data->clk = devm_clk_get(dev, "sysmmu");
595 if (PTR_ERR(data->clk) == -ENOENT)
596 data->clk = NULL;
597 else if (IS_ERR(data->clk))
598 return PTR_ERR(data->clk);
599
600 data->aclk = devm_clk_get(dev, "aclk");
601 if (PTR_ERR(data->aclk) == -ENOENT)
602 data->aclk = NULL;
603 else if (IS_ERR(data->aclk))
604 return PTR_ERR(data->aclk);
605
606 data->pclk = devm_clk_get(dev, "pclk");
607 if (PTR_ERR(data->pclk) == -ENOENT)
608 data->pclk = NULL;
609 else if (IS_ERR(data->pclk))
610 return PTR_ERR(data->pclk);
611
612 if (!data->clk && (!data->aclk || !data->pclk)) {
613 dev_err(dev, "Failed to get device clock(s)!\n");
614 return -ENOSYS;
615 }
616
617 data->clk_master = devm_clk_get(dev, "master");
618 if (PTR_ERR(data->clk_master) == -ENOENT)
619 data->clk_master = NULL;
620 else if (IS_ERR(data->clk_master))
621 return PTR_ERR(data->clk_master);
622
623 data->sysmmu = dev;
624 spin_lock_init(&data->lock);
625
626 ret = iommu_device_sysfs_add(&data->iommu, &pdev->dev, NULL,
627 dev_name(data->sysmmu));
628 if (ret)
629 return ret;
630
631 ret = iommu_device_register(&data->iommu, &exynos_iommu_ops, dev);
632 if (ret)
633 goto err_iommu_register;
634
635 platform_set_drvdata(pdev, data);
636
637 __sysmmu_get_version(data);
638 if (PG_ENT_SHIFT < 0) {
639 if (MMU_MAJ_VER(data->version) < 5) {
640 PG_ENT_SHIFT = SYSMMU_PG_ENT_SHIFT;
641 LV1_PROT = SYSMMU_LV1_PROT;
642 LV2_PROT = SYSMMU_LV2_PROT;
643 } else {
644 PG_ENT_SHIFT = SYSMMU_V5_PG_ENT_SHIFT;
645 LV1_PROT = SYSMMU_V5_LV1_PROT;
646 LV2_PROT = SYSMMU_V5_LV2_PROT;
647 }
648 }
649
650 /*
651 * use the first registered sysmmu device for performing
652 * dma mapping operations on iommu page tables (cpu cache flush)
653 */
654 if (!dma_dev)
655 dma_dev = &pdev->dev;
656
657 pm_runtime_enable(dev);
658
659 return 0;
660
661 err_iommu_register:
662 iommu_device_sysfs_remove(&data->iommu);
663 return ret;
664 }
665
exynos_sysmmu_suspend(struct device * dev)666 static int __maybe_unused exynos_sysmmu_suspend(struct device *dev)
667 {
668 struct sysmmu_drvdata *data = dev_get_drvdata(dev);
669 struct device *master = data->master;
670
671 if (master) {
672 struct exynos_iommu_owner *owner = dev_iommu_priv_get(master);
673
674 mutex_lock(&owner->rpm_lock);
675 if (data->domain) {
676 dev_dbg(data->sysmmu, "saving state\n");
677 __sysmmu_disable(data);
678 }
679 mutex_unlock(&owner->rpm_lock);
680 }
681 return 0;
682 }
683
exynos_sysmmu_resume(struct device * dev)684 static int __maybe_unused exynos_sysmmu_resume(struct device *dev)
685 {
686 struct sysmmu_drvdata *data = dev_get_drvdata(dev);
687 struct device *master = data->master;
688
689 if (master) {
690 struct exynos_iommu_owner *owner = dev_iommu_priv_get(master);
691
692 mutex_lock(&owner->rpm_lock);
693 if (data->domain) {
694 dev_dbg(data->sysmmu, "restoring state\n");
695 __sysmmu_enable(data);
696 }
697 mutex_unlock(&owner->rpm_lock);
698 }
699 return 0;
700 }
701
702 static const struct dev_pm_ops sysmmu_pm_ops = {
703 SET_RUNTIME_PM_OPS(exynos_sysmmu_suspend, exynos_sysmmu_resume, NULL)
704 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
705 pm_runtime_force_resume)
706 };
707
708 static const struct of_device_id sysmmu_of_match[] = {
709 { .compatible = "samsung,exynos-sysmmu", },
710 { },
711 };
712
713 static struct platform_driver exynos_sysmmu_driver __refdata = {
714 .probe = exynos_sysmmu_probe,
715 .driver = {
716 .name = "exynos-sysmmu",
717 .of_match_table = sysmmu_of_match,
718 .pm = &sysmmu_pm_ops,
719 .suppress_bind_attrs = true,
720 }
721 };
722
exynos_iommu_set_pte(sysmmu_pte_t * ent,sysmmu_pte_t val)723 static inline void exynos_iommu_set_pte(sysmmu_pte_t *ent, sysmmu_pte_t val)
724 {
725 dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent), sizeof(*ent),
726 DMA_TO_DEVICE);
727 *ent = cpu_to_le32(val);
728 dma_sync_single_for_device(dma_dev, virt_to_phys(ent), sizeof(*ent),
729 DMA_TO_DEVICE);
730 }
731
exynos_iommu_domain_alloc(unsigned type)732 static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type)
733 {
734 struct exynos_iommu_domain *domain;
735 dma_addr_t handle;
736 int i;
737
738 /* Check if correct PTE offsets are initialized */
739 BUG_ON(PG_ENT_SHIFT < 0 || !dma_dev);
740
741 if (type != IOMMU_DOMAIN_DMA && type != IOMMU_DOMAIN_UNMANAGED)
742 return NULL;
743
744 domain = kzalloc(sizeof(*domain), GFP_KERNEL);
745 if (!domain)
746 return NULL;
747
748 domain->pgtable = (sysmmu_pte_t *)__get_free_pages(GFP_KERNEL, 2);
749 if (!domain->pgtable)
750 goto err_pgtable;
751
752 domain->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
753 if (!domain->lv2entcnt)
754 goto err_counter;
755
756 /* Workaround for System MMU v3.3 to prevent caching 1MiB mapping */
757 for (i = 0; i < NUM_LV1ENTRIES; i++)
758 domain->pgtable[i] = ZERO_LV2LINK;
759
760 handle = dma_map_single(dma_dev, domain->pgtable, LV1TABLE_SIZE,
761 DMA_TO_DEVICE);
762 /* For mapping page table entries we rely on dma == phys */
763 BUG_ON(handle != virt_to_phys(domain->pgtable));
764 if (dma_mapping_error(dma_dev, handle))
765 goto err_lv2ent;
766
767 spin_lock_init(&domain->lock);
768 spin_lock_init(&domain->pgtablelock);
769 INIT_LIST_HEAD(&domain->clients);
770
771 domain->domain.geometry.aperture_start = 0;
772 domain->domain.geometry.aperture_end = ~0UL;
773 domain->domain.geometry.force_aperture = true;
774
775 return &domain->domain;
776
777 err_lv2ent:
778 free_pages((unsigned long)domain->lv2entcnt, 1);
779 err_counter:
780 free_pages((unsigned long)domain->pgtable, 2);
781 err_pgtable:
782 kfree(domain);
783 return NULL;
784 }
785
exynos_iommu_domain_free(struct iommu_domain * iommu_domain)786 static void exynos_iommu_domain_free(struct iommu_domain *iommu_domain)
787 {
788 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
789 struct sysmmu_drvdata *data, *next;
790 unsigned long flags;
791 int i;
792
793 WARN_ON(!list_empty(&domain->clients));
794
795 spin_lock_irqsave(&domain->lock, flags);
796
797 list_for_each_entry_safe(data, next, &domain->clients, domain_node) {
798 spin_lock(&data->lock);
799 __sysmmu_disable(data);
800 data->pgtable = 0;
801 data->domain = NULL;
802 list_del_init(&data->domain_node);
803 spin_unlock(&data->lock);
804 }
805
806 spin_unlock_irqrestore(&domain->lock, flags);
807
808 dma_unmap_single(dma_dev, virt_to_phys(domain->pgtable), LV1TABLE_SIZE,
809 DMA_TO_DEVICE);
810
811 for (i = 0; i < NUM_LV1ENTRIES; i++)
812 if (lv1ent_page(domain->pgtable + i)) {
813 phys_addr_t base = lv2table_base(domain->pgtable + i);
814
815 dma_unmap_single(dma_dev, base, LV2TABLE_SIZE,
816 DMA_TO_DEVICE);
817 kmem_cache_free(lv2table_kmem_cache,
818 phys_to_virt(base));
819 }
820
821 free_pages((unsigned long)domain->pgtable, 2);
822 free_pages((unsigned long)domain->lv2entcnt, 1);
823 kfree(domain);
824 }
825
exynos_iommu_detach_device(struct iommu_domain * iommu_domain,struct device * dev)826 static void exynos_iommu_detach_device(struct iommu_domain *iommu_domain,
827 struct device *dev)
828 {
829 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
830 struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
831 phys_addr_t pagetable = virt_to_phys(domain->pgtable);
832 struct sysmmu_drvdata *data, *next;
833 unsigned long flags;
834
835 if (!has_sysmmu(dev) || owner->domain != iommu_domain)
836 return;
837
838 mutex_lock(&owner->rpm_lock);
839
840 list_for_each_entry(data, &owner->controllers, owner_node) {
841 pm_runtime_get_noresume(data->sysmmu);
842 if (pm_runtime_active(data->sysmmu))
843 __sysmmu_disable(data);
844 pm_runtime_put(data->sysmmu);
845 }
846
847 spin_lock_irqsave(&domain->lock, flags);
848 list_for_each_entry_safe(data, next, &domain->clients, domain_node) {
849 spin_lock(&data->lock);
850 data->pgtable = 0;
851 data->domain = NULL;
852 list_del_init(&data->domain_node);
853 spin_unlock(&data->lock);
854 }
855 owner->domain = NULL;
856 spin_unlock_irqrestore(&domain->lock, flags);
857
858 mutex_unlock(&owner->rpm_lock);
859
860 dev_dbg(dev, "%s: Detached IOMMU with pgtable %pa\n", __func__,
861 &pagetable);
862 }
863
exynos_iommu_attach_device(struct iommu_domain * iommu_domain,struct device * dev)864 static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain,
865 struct device *dev)
866 {
867 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
868 struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
869 struct sysmmu_drvdata *data;
870 phys_addr_t pagetable = virt_to_phys(domain->pgtable);
871 unsigned long flags;
872
873 if (!has_sysmmu(dev))
874 return -ENODEV;
875
876 if (owner->domain)
877 exynos_iommu_detach_device(owner->domain, dev);
878
879 mutex_lock(&owner->rpm_lock);
880
881 spin_lock_irqsave(&domain->lock, flags);
882 list_for_each_entry(data, &owner->controllers, owner_node) {
883 spin_lock(&data->lock);
884 data->pgtable = pagetable;
885 data->domain = domain;
886 list_add_tail(&data->domain_node, &domain->clients);
887 spin_unlock(&data->lock);
888 }
889 owner->domain = iommu_domain;
890 spin_unlock_irqrestore(&domain->lock, flags);
891
892 list_for_each_entry(data, &owner->controllers, owner_node) {
893 pm_runtime_get_noresume(data->sysmmu);
894 if (pm_runtime_active(data->sysmmu))
895 __sysmmu_enable(data);
896 pm_runtime_put(data->sysmmu);
897 }
898
899 mutex_unlock(&owner->rpm_lock);
900
901 dev_dbg(dev, "%s: Attached IOMMU with pgtable %pa\n", __func__,
902 &pagetable);
903
904 return 0;
905 }
906
alloc_lv2entry(struct exynos_iommu_domain * domain,sysmmu_pte_t * sent,sysmmu_iova_t iova,short * pgcounter)907 static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain,
908 sysmmu_pte_t *sent, sysmmu_iova_t iova, short *pgcounter)
909 {
910 if (lv1ent_section(sent)) {
911 WARN(1, "Trying mapping on %#08x mapped with 1MiB page", iova);
912 return ERR_PTR(-EADDRINUSE);
913 }
914
915 if (lv1ent_fault(sent)) {
916 dma_addr_t handle;
917 sysmmu_pte_t *pent;
918 bool need_flush_flpd_cache = lv1ent_zero(sent);
919
920 pent = kmem_cache_zalloc(lv2table_kmem_cache, GFP_ATOMIC);
921 BUG_ON((uintptr_t)pent & (LV2TABLE_SIZE - 1));
922 if (!pent)
923 return ERR_PTR(-ENOMEM);
924
925 exynos_iommu_set_pte(sent, mk_lv1ent_page(virt_to_phys(pent)));
926 kmemleak_ignore(pent);
927 *pgcounter = NUM_LV2ENTRIES;
928 handle = dma_map_single(dma_dev, pent, LV2TABLE_SIZE,
929 DMA_TO_DEVICE);
930 if (dma_mapping_error(dma_dev, handle)) {
931 kmem_cache_free(lv2table_kmem_cache, pent);
932 return ERR_PTR(-EADDRINUSE);
933 }
934
935 /*
936 * If pre-fetched SLPD is a faulty SLPD in zero_l2_table,
937 * FLPD cache may cache the address of zero_l2_table. This
938 * function replaces the zero_l2_table with new L2 page table
939 * to write valid mappings.
940 * Accessing the valid area may cause page fault since FLPD
941 * cache may still cache zero_l2_table for the valid area
942 * instead of new L2 page table that has the mapping
943 * information of the valid area.
944 * Thus any replacement of zero_l2_table with other valid L2
945 * page table must involve FLPD cache invalidation for System
946 * MMU v3.3.
947 * FLPD cache invalidation is performed with TLB invalidation
948 * by VPN without blocking. It is safe to invalidate TLB without
949 * blocking because the target address of TLB invalidation is
950 * not currently mapped.
951 */
952 if (need_flush_flpd_cache) {
953 struct sysmmu_drvdata *data;
954
955 spin_lock(&domain->lock);
956 list_for_each_entry(data, &domain->clients, domain_node)
957 sysmmu_tlb_invalidate_flpdcache(data, iova);
958 spin_unlock(&domain->lock);
959 }
960 }
961
962 return page_entry(sent, iova);
963 }
964
lv1set_section(struct exynos_iommu_domain * domain,sysmmu_pte_t * sent,sysmmu_iova_t iova,phys_addr_t paddr,int prot,short * pgcnt)965 static int lv1set_section(struct exynos_iommu_domain *domain,
966 sysmmu_pte_t *sent, sysmmu_iova_t iova,
967 phys_addr_t paddr, int prot, short *pgcnt)
968 {
969 if (lv1ent_section(sent)) {
970 WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
971 iova);
972 return -EADDRINUSE;
973 }
974
975 if (lv1ent_page(sent)) {
976 if (*pgcnt != NUM_LV2ENTRIES) {
977 WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
978 iova);
979 return -EADDRINUSE;
980 }
981
982 kmem_cache_free(lv2table_kmem_cache, page_entry(sent, 0));
983 *pgcnt = 0;
984 }
985
986 exynos_iommu_set_pte(sent, mk_lv1ent_sect(paddr, prot));
987
988 spin_lock(&domain->lock);
989 if (lv1ent_page_zero(sent)) {
990 struct sysmmu_drvdata *data;
991 /*
992 * Flushing FLPD cache in System MMU v3.3 that may cache a FLPD
993 * entry by speculative prefetch of SLPD which has no mapping.
994 */
995 list_for_each_entry(data, &domain->clients, domain_node)
996 sysmmu_tlb_invalidate_flpdcache(data, iova);
997 }
998 spin_unlock(&domain->lock);
999
1000 return 0;
1001 }
1002
lv2set_page(sysmmu_pte_t * pent,phys_addr_t paddr,size_t size,int prot,short * pgcnt)1003 static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
1004 int prot, short *pgcnt)
1005 {
1006 if (size == SPAGE_SIZE) {
1007 if (WARN_ON(!lv2ent_fault(pent)))
1008 return -EADDRINUSE;
1009
1010 exynos_iommu_set_pte(pent, mk_lv2ent_spage(paddr, prot));
1011 *pgcnt -= 1;
1012 } else { /* size == LPAGE_SIZE */
1013 int i;
1014 dma_addr_t pent_base = virt_to_phys(pent);
1015
1016 dma_sync_single_for_cpu(dma_dev, pent_base,
1017 sizeof(*pent) * SPAGES_PER_LPAGE,
1018 DMA_TO_DEVICE);
1019 for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) {
1020 if (WARN_ON(!lv2ent_fault(pent))) {
1021 if (i > 0)
1022 memset(pent - i, 0, sizeof(*pent) * i);
1023 return -EADDRINUSE;
1024 }
1025
1026 *pent = mk_lv2ent_lpage(paddr, prot);
1027 }
1028 dma_sync_single_for_device(dma_dev, pent_base,
1029 sizeof(*pent) * SPAGES_PER_LPAGE,
1030 DMA_TO_DEVICE);
1031 *pgcnt -= SPAGES_PER_LPAGE;
1032 }
1033
1034 return 0;
1035 }
1036
1037 /*
1038 * *CAUTION* to the I/O virtual memory managers that support exynos-iommu:
1039 *
1040 * System MMU v3.x has advanced logic to improve address translation
1041 * performance with caching more page table entries by a page table walk.
1042 * However, the logic has a bug that while caching faulty page table entries,
1043 * System MMU reports page fault if the cached fault entry is hit even though
1044 * the fault entry is updated to a valid entry after the entry is cached.
1045 * To prevent caching faulty page table entries which may be updated to valid
1046 * entries later, the virtual memory manager should care about the workaround
1047 * for the problem. The following describes the workaround.
1048 *
1049 * Any two consecutive I/O virtual address regions must have a hole of 128KiB
1050 * at maximum to prevent misbehavior of System MMU 3.x (workaround for h/w bug).
1051 *
1052 * Precisely, any start address of I/O virtual region must be aligned with
1053 * the following sizes for System MMU v3.1 and v3.2.
1054 * System MMU v3.1: 128KiB
1055 * System MMU v3.2: 256KiB
1056 *
1057 * Because System MMU v3.3 caches page table entries more aggressively, it needs
1058 * more workarounds.
1059 * - Any two consecutive I/O virtual regions must have a hole of size larger
1060 * than or equal to 128KiB.
1061 * - Start address of an I/O virtual region must be aligned by 128KiB.
1062 */
exynos_iommu_map(struct iommu_domain * iommu_domain,unsigned long l_iova,phys_addr_t paddr,size_t size,int prot,gfp_t gfp)1063 static int exynos_iommu_map(struct iommu_domain *iommu_domain,
1064 unsigned long l_iova, phys_addr_t paddr, size_t size,
1065 int prot, gfp_t gfp)
1066 {
1067 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
1068 sysmmu_pte_t *entry;
1069 sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
1070 unsigned long flags;
1071 int ret = -ENOMEM;
1072
1073 BUG_ON(domain->pgtable == NULL);
1074 prot &= SYSMMU_SUPPORTED_PROT_BITS;
1075
1076 spin_lock_irqsave(&domain->pgtablelock, flags);
1077
1078 entry = section_entry(domain->pgtable, iova);
1079
1080 if (size == SECT_SIZE) {
1081 ret = lv1set_section(domain, entry, iova, paddr, prot,
1082 &domain->lv2entcnt[lv1ent_offset(iova)]);
1083 } else {
1084 sysmmu_pte_t *pent;
1085
1086 pent = alloc_lv2entry(domain, entry, iova,
1087 &domain->lv2entcnt[lv1ent_offset(iova)]);
1088
1089 if (IS_ERR(pent))
1090 ret = PTR_ERR(pent);
1091 else
1092 ret = lv2set_page(pent, paddr, size, prot,
1093 &domain->lv2entcnt[lv1ent_offset(iova)]);
1094 }
1095
1096 if (ret)
1097 pr_err("%s: Failed(%d) to map %#zx bytes @ %#x\n",
1098 __func__, ret, size, iova);
1099
1100 spin_unlock_irqrestore(&domain->pgtablelock, flags);
1101
1102 return ret;
1103 }
1104
exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain * domain,sysmmu_iova_t iova,size_t size)1105 static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain *domain,
1106 sysmmu_iova_t iova, size_t size)
1107 {
1108 struct sysmmu_drvdata *data;
1109 unsigned long flags;
1110
1111 spin_lock_irqsave(&domain->lock, flags);
1112
1113 list_for_each_entry(data, &domain->clients, domain_node)
1114 sysmmu_tlb_invalidate_entry(data, iova, size);
1115
1116 spin_unlock_irqrestore(&domain->lock, flags);
1117 }
1118
exynos_iommu_unmap(struct iommu_domain * iommu_domain,unsigned long l_iova,size_t size,struct iommu_iotlb_gather * gather)1119 static size_t exynos_iommu_unmap(struct iommu_domain *iommu_domain,
1120 unsigned long l_iova, size_t size,
1121 struct iommu_iotlb_gather *gather)
1122 {
1123 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
1124 sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
1125 sysmmu_pte_t *ent;
1126 size_t err_pgsize;
1127 unsigned long flags;
1128
1129 BUG_ON(domain->pgtable == NULL);
1130
1131 spin_lock_irqsave(&domain->pgtablelock, flags);
1132
1133 ent = section_entry(domain->pgtable, iova);
1134
1135 if (lv1ent_section(ent)) {
1136 if (WARN_ON(size < SECT_SIZE)) {
1137 err_pgsize = SECT_SIZE;
1138 goto err;
1139 }
1140
1141 /* workaround for h/w bug in System MMU v3.3 */
1142 exynos_iommu_set_pte(ent, ZERO_LV2LINK);
1143 size = SECT_SIZE;
1144 goto done;
1145 }
1146
1147 if (unlikely(lv1ent_fault(ent))) {
1148 if (size > SECT_SIZE)
1149 size = SECT_SIZE;
1150 goto done;
1151 }
1152
1153 /* lv1ent_page(sent) == true here */
1154
1155 ent = page_entry(ent, iova);
1156
1157 if (unlikely(lv2ent_fault(ent))) {
1158 size = SPAGE_SIZE;
1159 goto done;
1160 }
1161
1162 if (lv2ent_small(ent)) {
1163 exynos_iommu_set_pte(ent, 0);
1164 size = SPAGE_SIZE;
1165 domain->lv2entcnt[lv1ent_offset(iova)] += 1;
1166 goto done;
1167 }
1168
1169 /* lv1ent_large(ent) == true here */
1170 if (WARN_ON(size < LPAGE_SIZE)) {
1171 err_pgsize = LPAGE_SIZE;
1172 goto err;
1173 }
1174
1175 dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent),
1176 sizeof(*ent) * SPAGES_PER_LPAGE,
1177 DMA_TO_DEVICE);
1178 memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE);
1179 dma_sync_single_for_device(dma_dev, virt_to_phys(ent),
1180 sizeof(*ent) * SPAGES_PER_LPAGE,
1181 DMA_TO_DEVICE);
1182 size = LPAGE_SIZE;
1183 domain->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE;
1184 done:
1185 spin_unlock_irqrestore(&domain->pgtablelock, flags);
1186
1187 exynos_iommu_tlb_invalidate_entry(domain, iova, size);
1188
1189 return size;
1190 err:
1191 spin_unlock_irqrestore(&domain->pgtablelock, flags);
1192
1193 pr_err("%s: Failed: size(%#zx) @ %#x is smaller than page size %#zx\n",
1194 __func__, size, iova, err_pgsize);
1195
1196 return 0;
1197 }
1198
exynos_iommu_iova_to_phys(struct iommu_domain * iommu_domain,dma_addr_t iova)1199 static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *iommu_domain,
1200 dma_addr_t iova)
1201 {
1202 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
1203 sysmmu_pte_t *entry;
1204 unsigned long flags;
1205 phys_addr_t phys = 0;
1206
1207 spin_lock_irqsave(&domain->pgtablelock, flags);
1208
1209 entry = section_entry(domain->pgtable, iova);
1210
1211 if (lv1ent_section(entry)) {
1212 phys = section_phys(entry) + section_offs(iova);
1213 } else if (lv1ent_page(entry)) {
1214 entry = page_entry(entry, iova);
1215
1216 if (lv2ent_large(entry))
1217 phys = lpage_phys(entry) + lpage_offs(iova);
1218 else if (lv2ent_small(entry))
1219 phys = spage_phys(entry) + spage_offs(iova);
1220 }
1221
1222 spin_unlock_irqrestore(&domain->pgtablelock, flags);
1223
1224 return phys;
1225 }
1226
exynos_iommu_probe_device(struct device * dev)1227 static struct iommu_device *exynos_iommu_probe_device(struct device *dev)
1228 {
1229 struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
1230 struct sysmmu_drvdata *data;
1231
1232 if (!has_sysmmu(dev))
1233 return ERR_PTR(-ENODEV);
1234
1235 list_for_each_entry(data, &owner->controllers, owner_node) {
1236 /*
1237 * SYSMMU will be runtime activated via device link
1238 * (dependency) to its master device, so there are no
1239 * direct calls to pm_runtime_get/put in this driver.
1240 */
1241 data->link = device_link_add(dev, data->sysmmu,
1242 DL_FLAG_STATELESS |
1243 DL_FLAG_PM_RUNTIME);
1244 }
1245
1246 /* There is always at least one entry, see exynos_iommu_of_xlate() */
1247 data = list_first_entry(&owner->controllers,
1248 struct sysmmu_drvdata, owner_node);
1249
1250 return &data->iommu;
1251 }
1252
exynos_iommu_release_device(struct device * dev)1253 static void exynos_iommu_release_device(struct device *dev)
1254 {
1255 struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
1256 struct sysmmu_drvdata *data;
1257
1258 if (!has_sysmmu(dev))
1259 return;
1260
1261 if (owner->domain) {
1262 struct iommu_group *group = iommu_group_get(dev);
1263
1264 if (group) {
1265 WARN_ON(owner->domain !=
1266 iommu_group_default_domain(group));
1267 exynos_iommu_detach_device(owner->domain, dev);
1268 iommu_group_put(group);
1269 }
1270 }
1271
1272 list_for_each_entry(data, &owner->controllers, owner_node)
1273 device_link_del(data->link);
1274 }
1275
exynos_iommu_of_xlate(struct device * dev,struct of_phandle_args * spec)1276 static int exynos_iommu_of_xlate(struct device *dev,
1277 struct of_phandle_args *spec)
1278 {
1279 struct platform_device *sysmmu = of_find_device_by_node(spec->np);
1280 struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
1281 struct sysmmu_drvdata *data, *entry;
1282
1283 if (!sysmmu)
1284 return -ENODEV;
1285
1286 data = platform_get_drvdata(sysmmu);
1287 if (!data) {
1288 put_device(&sysmmu->dev);
1289 return -ENODEV;
1290 }
1291
1292 if (!owner) {
1293 owner = kzalloc(sizeof(*owner), GFP_KERNEL);
1294 if (!owner) {
1295 put_device(&sysmmu->dev);
1296 return -ENOMEM;
1297 }
1298
1299 INIT_LIST_HEAD(&owner->controllers);
1300 mutex_init(&owner->rpm_lock);
1301 dev_iommu_priv_set(dev, owner);
1302 }
1303
1304 list_for_each_entry(entry, &owner->controllers, owner_node)
1305 if (entry == data)
1306 return 0;
1307
1308 list_add_tail(&data->owner_node, &owner->controllers);
1309 data->master = dev;
1310
1311 return 0;
1312 }
1313
1314 static const struct iommu_ops exynos_iommu_ops = {
1315 .domain_alloc = exynos_iommu_domain_alloc,
1316 .device_group = generic_device_group,
1317 .probe_device = exynos_iommu_probe_device,
1318 .release_device = exynos_iommu_release_device,
1319 .pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
1320 .of_xlate = exynos_iommu_of_xlate,
1321 .default_domain_ops = &(const struct iommu_domain_ops) {
1322 .attach_dev = exynos_iommu_attach_device,
1323 .detach_dev = exynos_iommu_detach_device,
1324 .map = exynos_iommu_map,
1325 .unmap = exynos_iommu_unmap,
1326 .iova_to_phys = exynos_iommu_iova_to_phys,
1327 .free = exynos_iommu_domain_free,
1328 }
1329 };
1330
exynos_iommu_init(void)1331 static int __init exynos_iommu_init(void)
1332 {
1333 struct device_node *np;
1334 int ret;
1335
1336 np = of_find_matching_node(NULL, sysmmu_of_match);
1337 if (!np)
1338 return 0;
1339
1340 of_node_put(np);
1341
1342 lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table",
1343 LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL);
1344 if (!lv2table_kmem_cache) {
1345 pr_err("%s: Failed to create kmem cache\n", __func__);
1346 return -ENOMEM;
1347 }
1348
1349 ret = platform_driver_register(&exynos_sysmmu_driver);
1350 if (ret) {
1351 pr_err("%s: Failed to register driver\n", __func__);
1352 goto err_reg_driver;
1353 }
1354
1355 zero_lv2_table = kmem_cache_zalloc(lv2table_kmem_cache, GFP_KERNEL);
1356 if (zero_lv2_table == NULL) {
1357 pr_err("%s: Failed to allocate zero level2 page table\n",
1358 __func__);
1359 ret = -ENOMEM;
1360 goto err_zero_lv2;
1361 }
1362
1363 ret = bus_set_iommu(&platform_bus_type, &exynos_iommu_ops);
1364 if (ret) {
1365 pr_err("%s: Failed to register exynos-iommu driver.\n",
1366 __func__);
1367 goto err_set_iommu;
1368 }
1369
1370 return 0;
1371 err_set_iommu:
1372 kmem_cache_free(lv2table_kmem_cache, zero_lv2_table);
1373 err_zero_lv2:
1374 platform_driver_unregister(&exynos_sysmmu_driver);
1375 err_reg_driver:
1376 kmem_cache_destroy(lv2table_kmem_cache);
1377 return ret;
1378 }
1379 core_initcall(exynos_iommu_init);
1380