1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2015-2018 Etnaviv Project
4 */
5
6 #include <linux/dma-mapping.h>
7 #include <linux/scatterlist.h>
8
9 #include "common.xml.h"
10 #include "etnaviv_cmdbuf.h"
11 #include "etnaviv_drv.h"
12 #include "etnaviv_gem.h"
13 #include "etnaviv_gpu.h"
14 #include "etnaviv_mmu.h"
15
etnaviv_context_unmap(struct etnaviv_iommu_context * context,unsigned long iova,size_t size)16 static void etnaviv_context_unmap(struct etnaviv_iommu_context *context,
17 unsigned long iova, size_t size)
18 {
19 size_t unmapped_page, unmapped = 0;
20 size_t pgsize = SZ_4K;
21
22 if (!IS_ALIGNED(iova | size, pgsize)) {
23 pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%zx\n",
24 iova, size, pgsize);
25 return;
26 }
27
28 while (unmapped < size) {
29 unmapped_page = context->global->ops->unmap(context, iova,
30 pgsize);
31 if (!unmapped_page)
32 break;
33
34 iova += unmapped_page;
35 unmapped += unmapped_page;
36 }
37 }
38
etnaviv_context_map(struct etnaviv_iommu_context * context,unsigned long iova,phys_addr_t paddr,size_t size,int prot)39 static int etnaviv_context_map(struct etnaviv_iommu_context *context,
40 unsigned long iova, phys_addr_t paddr,
41 size_t size, int prot)
42 {
43 unsigned long orig_iova = iova;
44 size_t pgsize = SZ_4K;
45 size_t orig_size = size;
46 int ret = 0;
47
48 if (!IS_ALIGNED(iova | paddr | size, pgsize)) {
49 pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%zx\n",
50 iova, &paddr, size, pgsize);
51 return -EINVAL;
52 }
53
54 while (size) {
55 ret = context->global->ops->map(context, iova, paddr, pgsize,
56 prot);
57 if (ret)
58 break;
59
60 iova += pgsize;
61 paddr += pgsize;
62 size -= pgsize;
63 }
64
65 /* unroll mapping in case something went wrong */
66 if (ret)
67 etnaviv_context_unmap(context, orig_iova, orig_size - size);
68
69 return ret;
70 }
71
etnaviv_iommu_map(struct etnaviv_iommu_context * context,u32 iova,struct sg_table * sgt,unsigned len,int prot)72 static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova,
73 struct sg_table *sgt, unsigned len, int prot)
74 { struct scatterlist *sg;
75 unsigned int da = iova;
76 unsigned int i;
77 int ret;
78
79 if (!context || !sgt)
80 return -EINVAL;
81
82 for_each_sgtable_dma_sg(sgt, sg, i) {
83 u32 pa = sg_dma_address(sg) - sg->offset;
84 size_t bytes = sg_dma_len(sg) + sg->offset;
85
86 VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes);
87
88 ret = etnaviv_context_map(context, da, pa, bytes, prot);
89 if (ret)
90 goto fail;
91
92 da += bytes;
93 }
94
95 context->flush_seq++;
96
97 return 0;
98
99 fail:
100 etnaviv_context_unmap(context, iova, da - iova);
101 return ret;
102 }
103
etnaviv_iommu_unmap(struct etnaviv_iommu_context * context,u32 iova,struct sg_table * sgt,unsigned len)104 static void etnaviv_iommu_unmap(struct etnaviv_iommu_context *context, u32 iova,
105 struct sg_table *sgt, unsigned len)
106 {
107 struct scatterlist *sg;
108 unsigned int da = iova;
109 int i;
110
111 for_each_sgtable_dma_sg(sgt, sg, i) {
112 size_t bytes = sg_dma_len(sg) + sg->offset;
113
114 etnaviv_context_unmap(context, da, bytes);
115
116 VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
117
118 BUG_ON(!PAGE_ALIGNED(bytes));
119
120 da += bytes;
121 }
122
123 context->flush_seq++;
124 }
125
etnaviv_iommu_remove_mapping(struct etnaviv_iommu_context * context,struct etnaviv_vram_mapping * mapping)126 static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu_context *context,
127 struct etnaviv_vram_mapping *mapping)
128 {
129 struct etnaviv_gem_object *etnaviv_obj = mapping->object;
130
131 lockdep_assert_held(&context->lock);
132
133 etnaviv_iommu_unmap(context, mapping->vram_node.start,
134 etnaviv_obj->sgt, etnaviv_obj->base.size);
135 drm_mm_remove_node(&mapping->vram_node);
136 }
137
etnaviv_iommu_find_iova(struct etnaviv_iommu_context * context,struct drm_mm_node * node,size_t size)138 static int etnaviv_iommu_find_iova(struct etnaviv_iommu_context *context,
139 struct drm_mm_node *node, size_t size)
140 {
141 struct etnaviv_vram_mapping *free = NULL;
142 enum drm_mm_insert_mode mode = DRM_MM_INSERT_LOW;
143 int ret;
144
145 lockdep_assert_held(&context->lock);
146
147 while (1) {
148 struct etnaviv_vram_mapping *m, *n;
149 struct drm_mm_scan scan;
150 struct list_head list;
151 bool found;
152
153 ret = drm_mm_insert_node_in_range(&context->mm, node,
154 size, 0, 0, 0, U64_MAX, mode);
155 if (ret != -ENOSPC)
156 break;
157
158 /* Try to retire some entries */
159 drm_mm_scan_init(&scan, &context->mm, size, 0, 0, mode);
160
161 found = 0;
162 INIT_LIST_HEAD(&list);
163 list_for_each_entry(free, &context->mappings, mmu_node) {
164 /* If this vram node has not been used, skip this. */
165 if (!free->vram_node.mm)
166 continue;
167
168 /*
169 * If the iova is pinned, then it's in-use,
170 * so we must keep its mapping.
171 */
172 if (free->use)
173 continue;
174
175 list_add(&free->scan_node, &list);
176 if (drm_mm_scan_add_block(&scan, &free->vram_node)) {
177 found = true;
178 break;
179 }
180 }
181
182 if (!found) {
183 /* Nothing found, clean up and fail */
184 list_for_each_entry_safe(m, n, &list, scan_node)
185 BUG_ON(drm_mm_scan_remove_block(&scan, &m->vram_node));
186 break;
187 }
188
189 /*
190 * drm_mm does not allow any other operations while
191 * scanning, so we have to remove all blocks first.
192 * If drm_mm_scan_remove_block() returns false, we
193 * can leave the block pinned.
194 */
195 list_for_each_entry_safe(m, n, &list, scan_node)
196 if (!drm_mm_scan_remove_block(&scan, &m->vram_node))
197 list_del_init(&m->scan_node);
198
199 /*
200 * Unmap the blocks which need to be reaped from the MMU.
201 * Clear the mmu pointer to prevent the mapping_get finding
202 * this mapping.
203 */
204 list_for_each_entry_safe(m, n, &list, scan_node) {
205 etnaviv_iommu_remove_mapping(context, m);
206 etnaviv_iommu_context_put(m->context);
207 m->context = NULL;
208 list_del_init(&m->mmu_node);
209 list_del_init(&m->scan_node);
210 }
211
212 mode = DRM_MM_INSERT_EVICT;
213
214 /*
215 * We removed enough mappings so that the new allocation will
216 * succeed, retry the allocation one more time.
217 */
218 }
219
220 return ret;
221 }
222
etnaviv_iommu_insert_exact(struct etnaviv_iommu_context * context,struct drm_mm_node * node,size_t size,u64 va)223 static int etnaviv_iommu_insert_exact(struct etnaviv_iommu_context *context,
224 struct drm_mm_node *node, size_t size, u64 va)
225 {
226 struct etnaviv_vram_mapping *m, *n;
227 struct drm_mm_node *scan_node;
228 LIST_HEAD(scan_list);
229 int ret;
230
231 lockdep_assert_held(&context->lock);
232
233 ret = drm_mm_insert_node_in_range(&context->mm, node, size, 0, 0, va,
234 va + size, DRM_MM_INSERT_LOWEST);
235 if (ret != -ENOSPC)
236 return ret;
237
238 /*
239 * When we can't insert the node, due to a existing mapping blocking
240 * the address space, there are two possible reasons:
241 * 1. Userspace genuinely messed up and tried to reuse address space
242 * before the last job using this VMA has finished executing.
243 * 2. The existing buffer mappings are idle, but the buffers are not
244 * destroyed yet (likely due to being referenced by another context) in
245 * which case the mappings will not be cleaned up and we must reap them
246 * here to make space for the new mapping.
247 */
248
249 drm_mm_for_each_node_in_range(scan_node, &context->mm, va, va + size) {
250 m = container_of(scan_node, struct etnaviv_vram_mapping,
251 vram_node);
252
253 if (m->use)
254 return -ENOSPC;
255
256 list_add(&m->scan_node, &scan_list);
257 }
258
259 list_for_each_entry_safe(m, n, &scan_list, scan_node) {
260 etnaviv_iommu_remove_mapping(context, m);
261 etnaviv_iommu_context_put(m->context);
262 m->context = NULL;
263 list_del_init(&m->mmu_node);
264 list_del_init(&m->scan_node);
265 }
266
267 return drm_mm_insert_node_in_range(&context->mm, node, size, 0, 0, va,
268 va + size, DRM_MM_INSERT_LOWEST);
269 }
270
etnaviv_iommu_map_gem(struct etnaviv_iommu_context * context,struct etnaviv_gem_object * etnaviv_obj,u32 memory_base,struct etnaviv_vram_mapping * mapping,u64 va)271 int etnaviv_iommu_map_gem(struct etnaviv_iommu_context *context,
272 struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
273 struct etnaviv_vram_mapping *mapping, u64 va)
274 {
275 struct sg_table *sgt = etnaviv_obj->sgt;
276 struct drm_mm_node *node;
277 int ret;
278
279 lockdep_assert_held(&etnaviv_obj->lock);
280
281 mutex_lock(&context->lock);
282
283 /* v1 MMU can optimize single entry (contiguous) scatterlists */
284 if (context->global->version == ETNAVIV_IOMMU_V1 &&
285 sgt->nents == 1 && !(etnaviv_obj->flags & ETNA_BO_FORCE_MMU)) {
286 u32 iova;
287
288 iova = sg_dma_address(sgt->sgl) - memory_base;
289 if (iova < 0x80000000 - sg_dma_len(sgt->sgl)) {
290 mapping->iova = iova;
291 mapping->context = etnaviv_iommu_context_get(context);
292 list_add_tail(&mapping->mmu_node, &context->mappings);
293 ret = 0;
294 goto unlock;
295 }
296 }
297
298 node = &mapping->vram_node;
299
300 if (va)
301 ret = etnaviv_iommu_insert_exact(context, node,
302 etnaviv_obj->base.size, va);
303 else
304 ret = etnaviv_iommu_find_iova(context, node,
305 etnaviv_obj->base.size);
306 if (ret < 0)
307 goto unlock;
308
309 mapping->iova = node->start;
310 ret = etnaviv_iommu_map(context, node->start, sgt, etnaviv_obj->base.size,
311 ETNAVIV_PROT_READ | ETNAVIV_PROT_WRITE);
312
313 if (ret < 0) {
314 drm_mm_remove_node(node);
315 goto unlock;
316 }
317
318 mapping->context = etnaviv_iommu_context_get(context);
319 list_add_tail(&mapping->mmu_node, &context->mappings);
320 unlock:
321 mutex_unlock(&context->lock);
322
323 return ret;
324 }
325
etnaviv_iommu_unmap_gem(struct etnaviv_iommu_context * context,struct etnaviv_vram_mapping * mapping)326 void etnaviv_iommu_unmap_gem(struct etnaviv_iommu_context *context,
327 struct etnaviv_vram_mapping *mapping)
328 {
329 WARN_ON(mapping->use);
330
331 mutex_lock(&context->lock);
332
333 /* Bail if the mapping has been reaped by another thread */
334 if (!mapping->context) {
335 mutex_unlock(&context->lock);
336 return;
337 }
338
339 /* If the vram node is on the mm, unmap and remove the node */
340 if (mapping->vram_node.mm == &context->mm)
341 etnaviv_iommu_remove_mapping(context, mapping);
342
343 list_del(&mapping->mmu_node);
344 mutex_unlock(&context->lock);
345 etnaviv_iommu_context_put(context);
346 }
347
etnaviv_iommu_context_free(struct kref * kref)348 static void etnaviv_iommu_context_free(struct kref *kref)
349 {
350 struct etnaviv_iommu_context *context =
351 container_of(kref, struct etnaviv_iommu_context, refcount);
352
353 etnaviv_cmdbuf_suballoc_unmap(context, &context->cmdbuf_mapping);
354
355 context->global->ops->free(context);
356 }
etnaviv_iommu_context_put(struct etnaviv_iommu_context * context)357 void etnaviv_iommu_context_put(struct etnaviv_iommu_context *context)
358 {
359 kref_put(&context->refcount, etnaviv_iommu_context_free);
360 }
361
362 struct etnaviv_iommu_context *
etnaviv_iommu_context_init(struct etnaviv_iommu_global * global,struct etnaviv_cmdbuf_suballoc * suballoc)363 etnaviv_iommu_context_init(struct etnaviv_iommu_global *global,
364 struct etnaviv_cmdbuf_suballoc *suballoc)
365 {
366 struct etnaviv_iommu_context *ctx;
367 int ret;
368
369 if (global->version == ETNAVIV_IOMMU_V1)
370 ctx = etnaviv_iommuv1_context_alloc(global);
371 else
372 ctx = etnaviv_iommuv2_context_alloc(global);
373
374 if (!ctx)
375 return NULL;
376
377 ret = etnaviv_cmdbuf_suballoc_map(suballoc, ctx, &ctx->cmdbuf_mapping,
378 global->memory_base);
379 if (ret)
380 goto out_free;
381
382 if (global->version == ETNAVIV_IOMMU_V1 &&
383 ctx->cmdbuf_mapping.iova > 0x80000000) {
384 dev_err(global->dev,
385 "command buffer outside valid memory window\n");
386 goto out_unmap;
387 }
388
389 return ctx;
390
391 out_unmap:
392 etnaviv_cmdbuf_suballoc_unmap(ctx, &ctx->cmdbuf_mapping);
393 out_free:
394 global->ops->free(ctx);
395 return NULL;
396 }
397
etnaviv_iommu_restore(struct etnaviv_gpu * gpu,struct etnaviv_iommu_context * context)398 void etnaviv_iommu_restore(struct etnaviv_gpu *gpu,
399 struct etnaviv_iommu_context *context)
400 {
401 context->global->ops->restore(gpu, context);
402 }
403
etnaviv_iommu_get_suballoc_va(struct etnaviv_iommu_context * context,struct etnaviv_vram_mapping * mapping,u32 memory_base,dma_addr_t paddr,size_t size)404 int etnaviv_iommu_get_suballoc_va(struct etnaviv_iommu_context *context,
405 struct etnaviv_vram_mapping *mapping,
406 u32 memory_base, dma_addr_t paddr,
407 size_t size)
408 {
409 mutex_lock(&context->lock);
410
411 if (mapping->use > 0) {
412 mapping->use++;
413 mutex_unlock(&context->lock);
414 return 0;
415 }
416
417 /*
418 * For MMUv1 we don't add the suballoc region to the pagetables, as
419 * those GPUs can only work with cmdbufs accessed through the linear
420 * window. Instead we manufacture a mapping to make it look uniform
421 * to the upper layers.
422 */
423 if (context->global->version == ETNAVIV_IOMMU_V1) {
424 mapping->iova = paddr - memory_base;
425 } else {
426 struct drm_mm_node *node = &mapping->vram_node;
427 int ret;
428
429 ret = etnaviv_iommu_find_iova(context, node, size);
430 if (ret < 0) {
431 mutex_unlock(&context->lock);
432 return ret;
433 }
434
435 mapping->iova = node->start;
436 ret = etnaviv_context_map(context, node->start, paddr, size,
437 ETNAVIV_PROT_READ);
438 if (ret < 0) {
439 drm_mm_remove_node(node);
440 mutex_unlock(&context->lock);
441 return ret;
442 }
443
444 context->flush_seq++;
445 }
446
447 list_add_tail(&mapping->mmu_node, &context->mappings);
448 mapping->use = 1;
449
450 mutex_unlock(&context->lock);
451
452 return 0;
453 }
454
etnaviv_iommu_put_suballoc_va(struct etnaviv_iommu_context * context,struct etnaviv_vram_mapping * mapping)455 void etnaviv_iommu_put_suballoc_va(struct etnaviv_iommu_context *context,
456 struct etnaviv_vram_mapping *mapping)
457 {
458 struct drm_mm_node *node = &mapping->vram_node;
459
460 mutex_lock(&context->lock);
461 mapping->use--;
462
463 if (mapping->use > 0 || context->global->version == ETNAVIV_IOMMU_V1) {
464 mutex_unlock(&context->lock);
465 return;
466 }
467
468 etnaviv_context_unmap(context, node->start, node->size);
469 drm_mm_remove_node(node);
470 mutex_unlock(&context->lock);
471 }
472
etnaviv_iommu_dump_size(struct etnaviv_iommu_context * context)473 size_t etnaviv_iommu_dump_size(struct etnaviv_iommu_context *context)
474 {
475 return context->global->ops->dump_size(context);
476 }
477
etnaviv_iommu_dump(struct etnaviv_iommu_context * context,void * buf)478 void etnaviv_iommu_dump(struct etnaviv_iommu_context *context, void *buf)
479 {
480 context->global->ops->dump(context, buf);
481 }
482
etnaviv_iommu_global_init(struct etnaviv_gpu * gpu)483 int etnaviv_iommu_global_init(struct etnaviv_gpu *gpu)
484 {
485 enum etnaviv_iommu_version version = ETNAVIV_IOMMU_V1;
486 struct etnaviv_drm_private *priv = gpu->drm->dev_private;
487 struct etnaviv_iommu_global *global;
488 struct device *dev = gpu->drm->dev;
489
490 if (gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION)
491 version = ETNAVIV_IOMMU_V2;
492
493 if (priv->mmu_global) {
494 if (priv->mmu_global->version != version) {
495 dev_err(gpu->dev,
496 "MMU version doesn't match global version\n");
497 return -ENXIO;
498 }
499
500 priv->mmu_global->use++;
501 return 0;
502 }
503
504 global = kzalloc(sizeof(*global), GFP_KERNEL);
505 if (!global)
506 return -ENOMEM;
507
508 global->bad_page_cpu = dma_alloc_wc(dev, SZ_4K, &global->bad_page_dma,
509 GFP_KERNEL);
510 if (!global->bad_page_cpu)
511 goto free_global;
512
513 memset32(global->bad_page_cpu, 0xdead55aa, SZ_4K / sizeof(u32));
514
515 if (version == ETNAVIV_IOMMU_V2) {
516 global->v2.pta_cpu = dma_alloc_wc(dev, ETNAVIV_PTA_SIZE,
517 &global->v2.pta_dma, GFP_KERNEL);
518 if (!global->v2.pta_cpu)
519 goto free_bad_page;
520 }
521
522 global->dev = dev;
523 global->version = version;
524 global->use = 1;
525 mutex_init(&global->lock);
526
527 if (version == ETNAVIV_IOMMU_V1)
528 global->ops = &etnaviv_iommuv1_ops;
529 else
530 global->ops = &etnaviv_iommuv2_ops;
531
532 priv->mmu_global = global;
533
534 return 0;
535
536 free_bad_page:
537 dma_free_wc(dev, SZ_4K, global->bad_page_cpu, global->bad_page_dma);
538 free_global:
539 kfree(global);
540
541 return -ENOMEM;
542 }
543
etnaviv_iommu_global_fini(struct etnaviv_gpu * gpu)544 void etnaviv_iommu_global_fini(struct etnaviv_gpu *gpu)
545 {
546 struct etnaviv_drm_private *priv = gpu->drm->dev_private;
547 struct etnaviv_iommu_global *global = priv->mmu_global;
548
549 if (--global->use > 0)
550 return;
551
552 if (global->v2.pta_cpu)
553 dma_free_wc(global->dev, ETNAVIV_PTA_SIZE,
554 global->v2.pta_cpu, global->v2.pta_dma);
555
556 if (global->bad_page_cpu)
557 dma_free_wc(global->dev, SZ_4K,
558 global->bad_page_cpu, global->bad_page_dma);
559
560 mutex_destroy(&global->lock);
561 kfree(global);
562
563 priv->mmu_global = NULL;
564 }
565