1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
5 */
6
7 #include <linux/dma-map-ops.h>
8 #include <linux/vmalloc.h>
9 #include <linux/spinlock.h>
10 #include <linux/shmem_fs.h>
11 #include <linux/dma-buf.h>
12 #include <linux/pfn_t.h>
13
14 #include <drm/drm_prime.h>
15
16 #include "msm_drv.h"
17 #include "msm_fence.h"
18 #include "msm_gem.h"
19 #include "msm_gpu.h"
20 #include "msm_mmu.h"
21
22 static void update_inactive(struct msm_gem_object *msm_obj);
23
physaddr(struct drm_gem_object * obj)24 static dma_addr_t physaddr(struct drm_gem_object *obj)
25 {
26 struct msm_gem_object *msm_obj = to_msm_bo(obj);
27 struct msm_drm_private *priv = obj->dev->dev_private;
28 return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
29 priv->vram.paddr;
30 }
31
use_pages(struct drm_gem_object * obj)32 static bool use_pages(struct drm_gem_object *obj)
33 {
34 struct msm_gem_object *msm_obj = to_msm_bo(obj);
35 return !msm_obj->vram_node;
36 }
37
38 /*
39 * Cache sync.. this is a bit over-complicated, to fit dma-mapping
40 * API. Really GPU cache is out of scope here (handled on cmdstream)
41 * and all we need to do is invalidate newly allocated pages before
42 * mapping to CPU as uncached/writecombine.
43 *
44 * On top of this, we have the added headache, that depending on
45 * display generation, the display's iommu may be wired up to either
46 * the toplevel drm device (mdss), or to the mdp sub-node, meaning
47 * that here we either have dma-direct or iommu ops.
48 *
49 * Let this be a cautionary tail of abstraction gone wrong.
50 */
51
sync_for_device(struct msm_gem_object * msm_obj)52 static void sync_for_device(struct msm_gem_object *msm_obj)
53 {
54 struct device *dev = msm_obj->base.dev->dev;
55
56 dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
57 }
58
sync_for_cpu(struct msm_gem_object * msm_obj)59 static void sync_for_cpu(struct msm_gem_object *msm_obj)
60 {
61 struct device *dev = msm_obj->base.dev->dev;
62
63 dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
64 }
65
66 /* allocate pages from VRAM carveout, used when no IOMMU: */
get_pages_vram(struct drm_gem_object * obj,int npages)67 static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
68 {
69 struct msm_gem_object *msm_obj = to_msm_bo(obj);
70 struct msm_drm_private *priv = obj->dev->dev_private;
71 dma_addr_t paddr;
72 struct page **p;
73 int ret, i;
74
75 p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
76 if (!p)
77 return ERR_PTR(-ENOMEM);
78
79 spin_lock(&priv->vram.lock);
80 ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
81 spin_unlock(&priv->vram.lock);
82 if (ret) {
83 kvfree(p);
84 return ERR_PTR(ret);
85 }
86
87 paddr = physaddr(obj);
88 for (i = 0; i < npages; i++) {
89 p[i] = pfn_to_page(__phys_to_pfn(paddr));
90 paddr += PAGE_SIZE;
91 }
92
93 return p;
94 }
95
get_pages(struct drm_gem_object * obj)96 static struct page **get_pages(struct drm_gem_object *obj)
97 {
98 struct msm_gem_object *msm_obj = to_msm_bo(obj);
99
100 GEM_WARN_ON(!msm_gem_is_locked(obj));
101
102 if (!msm_obj->pages) {
103 struct drm_device *dev = obj->dev;
104 struct page **p;
105 int npages = obj->size >> PAGE_SHIFT;
106
107 if (use_pages(obj))
108 p = drm_gem_get_pages(obj);
109 else
110 p = get_pages_vram(obj, npages);
111
112 if (IS_ERR(p)) {
113 DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n",
114 PTR_ERR(p));
115 return p;
116 }
117
118 msm_obj->pages = p;
119
120 msm_obj->sgt = drm_prime_pages_to_sg(obj->dev, p, npages);
121 if (IS_ERR(msm_obj->sgt)) {
122 void *ptr = ERR_CAST(msm_obj->sgt);
123
124 DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n");
125 msm_obj->sgt = NULL;
126 return ptr;
127 }
128
129 /* For non-cached buffers, ensure the new pages are clean
130 * because display controller, GPU, etc. are not coherent:
131 */
132 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
133 sync_for_device(msm_obj);
134
135 update_inactive(msm_obj);
136 }
137
138 return msm_obj->pages;
139 }
140
put_pages_vram(struct drm_gem_object * obj)141 static void put_pages_vram(struct drm_gem_object *obj)
142 {
143 struct msm_gem_object *msm_obj = to_msm_bo(obj);
144 struct msm_drm_private *priv = obj->dev->dev_private;
145
146 spin_lock(&priv->vram.lock);
147 drm_mm_remove_node(msm_obj->vram_node);
148 spin_unlock(&priv->vram.lock);
149
150 kvfree(msm_obj->pages);
151 }
152
put_pages(struct drm_gem_object * obj)153 static void put_pages(struct drm_gem_object *obj)
154 {
155 struct msm_gem_object *msm_obj = to_msm_bo(obj);
156
157 if (msm_obj->pages) {
158 if (msm_obj->sgt) {
159 /* For non-cached buffers, ensure the new
160 * pages are clean because display controller,
161 * GPU, etc. are not coherent:
162 */
163 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
164 sync_for_cpu(msm_obj);
165
166 sg_free_table(msm_obj->sgt);
167 kfree(msm_obj->sgt);
168 msm_obj->sgt = NULL;
169 }
170
171 if (use_pages(obj))
172 drm_gem_put_pages(obj, msm_obj->pages, true, false);
173 else
174 put_pages_vram(obj);
175
176 msm_obj->pages = NULL;
177 }
178 }
179
msm_gem_get_pages(struct drm_gem_object * obj)180 struct page **msm_gem_get_pages(struct drm_gem_object *obj)
181 {
182 struct msm_gem_object *msm_obj = to_msm_bo(obj);
183 struct page **p;
184
185 msm_gem_lock(obj);
186
187 if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
188 msm_gem_unlock(obj);
189 return ERR_PTR(-EBUSY);
190 }
191
192 p = get_pages(obj);
193
194 if (!IS_ERR(p)) {
195 msm_obj->pin_count++;
196 update_inactive(msm_obj);
197 }
198
199 msm_gem_unlock(obj);
200 return p;
201 }
202
msm_gem_put_pages(struct drm_gem_object * obj)203 void msm_gem_put_pages(struct drm_gem_object *obj)
204 {
205 struct msm_gem_object *msm_obj = to_msm_bo(obj);
206
207 msm_gem_lock(obj);
208 msm_obj->pin_count--;
209 GEM_WARN_ON(msm_obj->pin_count < 0);
210 update_inactive(msm_obj);
211 msm_gem_unlock(obj);
212 }
213
msm_gem_pgprot(struct msm_gem_object * msm_obj,pgprot_t prot)214 static pgprot_t msm_gem_pgprot(struct msm_gem_object *msm_obj, pgprot_t prot)
215 {
216 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
217 return pgprot_writecombine(prot);
218 return prot;
219 }
220
msm_gem_fault(struct vm_fault * vmf)221 static vm_fault_t msm_gem_fault(struct vm_fault *vmf)
222 {
223 struct vm_area_struct *vma = vmf->vma;
224 struct drm_gem_object *obj = vma->vm_private_data;
225 struct msm_gem_object *msm_obj = to_msm_bo(obj);
226 struct page **pages;
227 unsigned long pfn;
228 pgoff_t pgoff;
229 int err;
230 vm_fault_t ret;
231
232 /*
233 * vm_ops.open/drm_gem_mmap_obj and close get and put
234 * a reference on obj. So, we dont need to hold one here.
235 */
236 err = msm_gem_lock_interruptible(obj);
237 if (err) {
238 ret = VM_FAULT_NOPAGE;
239 goto out;
240 }
241
242 if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
243 msm_gem_unlock(obj);
244 return VM_FAULT_SIGBUS;
245 }
246
247 /* make sure we have pages attached now */
248 pages = get_pages(obj);
249 if (IS_ERR(pages)) {
250 ret = vmf_error(PTR_ERR(pages));
251 goto out_unlock;
252 }
253
254 /* We don't use vmf->pgoff since that has the fake offset: */
255 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
256
257 pfn = page_to_pfn(pages[pgoff]);
258
259 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
260 pfn, pfn << PAGE_SHIFT);
261
262 ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
263 out_unlock:
264 msm_gem_unlock(obj);
265 out:
266 return ret;
267 }
268
269 /** get mmap offset */
mmap_offset(struct drm_gem_object * obj)270 static uint64_t mmap_offset(struct drm_gem_object *obj)
271 {
272 struct drm_device *dev = obj->dev;
273 int ret;
274
275 GEM_WARN_ON(!msm_gem_is_locked(obj));
276
277 /* Make it mmapable */
278 ret = drm_gem_create_mmap_offset(obj);
279
280 if (ret) {
281 DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n");
282 return 0;
283 }
284
285 return drm_vma_node_offset_addr(&obj->vma_node);
286 }
287
msm_gem_mmap_offset(struct drm_gem_object * obj)288 uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
289 {
290 uint64_t offset;
291
292 msm_gem_lock(obj);
293 offset = mmap_offset(obj);
294 msm_gem_unlock(obj);
295 return offset;
296 }
297
add_vma(struct drm_gem_object * obj,struct msm_gem_address_space * aspace)298 static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
299 struct msm_gem_address_space *aspace)
300 {
301 struct msm_gem_object *msm_obj = to_msm_bo(obj);
302 struct msm_gem_vma *vma;
303
304 GEM_WARN_ON(!msm_gem_is_locked(obj));
305
306 vma = kzalloc(sizeof(*vma), GFP_KERNEL);
307 if (!vma)
308 return ERR_PTR(-ENOMEM);
309
310 vma->aspace = aspace;
311
312 list_add_tail(&vma->list, &msm_obj->vmas);
313
314 return vma;
315 }
316
lookup_vma(struct drm_gem_object * obj,struct msm_gem_address_space * aspace)317 static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
318 struct msm_gem_address_space *aspace)
319 {
320 struct msm_gem_object *msm_obj = to_msm_bo(obj);
321 struct msm_gem_vma *vma;
322
323 GEM_WARN_ON(!msm_gem_is_locked(obj));
324
325 list_for_each_entry(vma, &msm_obj->vmas, list) {
326 if (vma->aspace == aspace)
327 return vma;
328 }
329
330 return NULL;
331 }
332
del_vma(struct msm_gem_vma * vma)333 static void del_vma(struct msm_gem_vma *vma)
334 {
335 if (!vma)
336 return;
337
338 list_del(&vma->list);
339 kfree(vma);
340 }
341
342 /*
343 * If close is true, this also closes the VMA (releasing the allocated
344 * iova range) in addition to removing the iommu mapping. In the eviction
345 * case (!close), we keep the iova allocated, but only remove the iommu
346 * mapping.
347 */
348 static void
put_iova_spaces(struct drm_gem_object * obj,bool close)349 put_iova_spaces(struct drm_gem_object *obj, bool close)
350 {
351 struct msm_gem_object *msm_obj = to_msm_bo(obj);
352 struct msm_gem_vma *vma;
353
354 GEM_WARN_ON(!msm_gem_is_locked(obj));
355
356 list_for_each_entry(vma, &msm_obj->vmas, list) {
357 if (vma->aspace) {
358 msm_gem_purge_vma(vma->aspace, vma);
359 if (close)
360 msm_gem_close_vma(vma->aspace, vma);
361 }
362 }
363 }
364
365 /* Called with msm_obj locked */
366 static void
put_iova_vmas(struct drm_gem_object * obj)367 put_iova_vmas(struct drm_gem_object *obj)
368 {
369 struct msm_gem_object *msm_obj = to_msm_bo(obj);
370 struct msm_gem_vma *vma, *tmp;
371
372 GEM_WARN_ON(!msm_gem_is_locked(obj));
373
374 list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
375 del_vma(vma);
376 }
377 }
378
get_vma_locked(struct drm_gem_object * obj,struct msm_gem_address_space * aspace,u64 range_start,u64 range_end)379 static struct msm_gem_vma *get_vma_locked(struct drm_gem_object *obj,
380 struct msm_gem_address_space *aspace,
381 u64 range_start, u64 range_end)
382 {
383 struct msm_gem_vma *vma;
384
385 GEM_WARN_ON(!msm_gem_is_locked(obj));
386
387 vma = lookup_vma(obj, aspace);
388
389 if (!vma) {
390 int ret;
391
392 vma = add_vma(obj, aspace);
393 if (IS_ERR(vma))
394 return vma;
395
396 ret = msm_gem_init_vma(aspace, vma, obj->size,
397 range_start, range_end);
398 if (ret) {
399 del_vma(vma);
400 return ERR_PTR(ret);
401 }
402 } else {
403 GEM_WARN_ON(vma->iova < range_start);
404 GEM_WARN_ON((vma->iova + obj->size) > range_end);
405 }
406
407 return vma;
408 }
409
msm_gem_pin_vma_locked(struct drm_gem_object * obj,struct msm_gem_vma * vma)410 int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma)
411 {
412 struct msm_gem_object *msm_obj = to_msm_bo(obj);
413 struct page **pages;
414 int ret, prot = IOMMU_READ;
415
416 if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
417 prot |= IOMMU_WRITE;
418
419 if (msm_obj->flags & MSM_BO_MAP_PRIV)
420 prot |= IOMMU_PRIV;
421
422 if (msm_obj->flags & MSM_BO_CACHED_COHERENT)
423 prot |= IOMMU_CACHE;
424
425 GEM_WARN_ON(!msm_gem_is_locked(obj));
426
427 if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
428 return -EBUSY;
429
430 pages = get_pages(obj);
431 if (IS_ERR(pages))
432 return PTR_ERR(pages);
433
434 ret = msm_gem_map_vma(vma->aspace, vma, prot, msm_obj->sgt, obj->size);
435
436 if (!ret)
437 msm_obj->pin_count++;
438
439 return ret;
440 }
441
msm_gem_unpin_locked(struct drm_gem_object * obj)442 void msm_gem_unpin_locked(struct drm_gem_object *obj)
443 {
444 struct msm_gem_object *msm_obj = to_msm_bo(obj);
445
446 GEM_WARN_ON(!msm_gem_is_locked(obj));
447
448 msm_obj->pin_count--;
449 GEM_WARN_ON(msm_obj->pin_count < 0);
450
451 update_inactive(msm_obj);
452 }
453
msm_gem_get_vma_locked(struct drm_gem_object * obj,struct msm_gem_address_space * aspace)454 struct msm_gem_vma *msm_gem_get_vma_locked(struct drm_gem_object *obj,
455 struct msm_gem_address_space *aspace)
456 {
457 return get_vma_locked(obj, aspace, 0, U64_MAX);
458 }
459
get_and_pin_iova_range_locked(struct drm_gem_object * obj,struct msm_gem_address_space * aspace,uint64_t * iova,u64 range_start,u64 range_end)460 static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
461 struct msm_gem_address_space *aspace, uint64_t *iova,
462 u64 range_start, u64 range_end)
463 {
464 struct msm_gem_vma *vma;
465 int ret;
466
467 GEM_WARN_ON(!msm_gem_is_locked(obj));
468
469 vma = get_vma_locked(obj, aspace, range_start, range_end);
470 if (IS_ERR(vma))
471 return PTR_ERR(vma);
472
473 ret = msm_gem_pin_vma_locked(obj, vma);
474 if (!ret)
475 *iova = vma->iova;
476
477 return ret;
478 }
479
480 /*
481 * get iova and pin it. Should have a matching put
482 * limits iova to specified range (in pages)
483 */
msm_gem_get_and_pin_iova_range(struct drm_gem_object * obj,struct msm_gem_address_space * aspace,uint64_t * iova,u64 range_start,u64 range_end)484 int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
485 struct msm_gem_address_space *aspace, uint64_t *iova,
486 u64 range_start, u64 range_end)
487 {
488 int ret;
489
490 msm_gem_lock(obj);
491 ret = get_and_pin_iova_range_locked(obj, aspace, iova, range_start, range_end);
492 msm_gem_unlock(obj);
493
494 return ret;
495 }
496
497 /* get iova and pin it. Should have a matching put */
msm_gem_get_and_pin_iova(struct drm_gem_object * obj,struct msm_gem_address_space * aspace,uint64_t * iova)498 int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
499 struct msm_gem_address_space *aspace, uint64_t *iova)
500 {
501 return msm_gem_get_and_pin_iova_range(obj, aspace, iova, 0, U64_MAX);
502 }
503
504 /*
505 * Get an iova but don't pin it. Doesn't need a put because iovas are currently
506 * valid for the life of the object
507 */
msm_gem_get_iova(struct drm_gem_object * obj,struct msm_gem_address_space * aspace,uint64_t * iova)508 int msm_gem_get_iova(struct drm_gem_object *obj,
509 struct msm_gem_address_space *aspace, uint64_t *iova)
510 {
511 struct msm_gem_vma *vma;
512 int ret = 0;
513
514 msm_gem_lock(obj);
515 vma = get_vma_locked(obj, aspace, 0, U64_MAX);
516 if (IS_ERR(vma)) {
517 ret = PTR_ERR(vma);
518 } else {
519 *iova = vma->iova;
520 }
521 msm_gem_unlock(obj);
522
523 return ret;
524 }
525
clear_iova(struct drm_gem_object * obj,struct msm_gem_address_space * aspace)526 static int clear_iova(struct drm_gem_object *obj,
527 struct msm_gem_address_space *aspace)
528 {
529 struct msm_gem_vma *vma = lookup_vma(obj, aspace);
530
531 if (!vma)
532 return 0;
533
534 if (msm_gem_vma_inuse(vma))
535 return -EBUSY;
536
537 msm_gem_purge_vma(vma->aspace, vma);
538 msm_gem_close_vma(vma->aspace, vma);
539 del_vma(vma);
540
541 return 0;
542 }
543
544 /*
545 * Get the requested iova but don't pin it. Fails if the requested iova is
546 * not available. Doesn't need a put because iovas are currently valid for
547 * the life of the object.
548 *
549 * Setting an iova of zero will clear the vma.
550 */
msm_gem_set_iova(struct drm_gem_object * obj,struct msm_gem_address_space * aspace,uint64_t iova)551 int msm_gem_set_iova(struct drm_gem_object *obj,
552 struct msm_gem_address_space *aspace, uint64_t iova)
553 {
554 int ret = 0;
555
556 msm_gem_lock(obj);
557 if (!iova) {
558 ret = clear_iova(obj, aspace);
559 } else {
560 struct msm_gem_vma *vma;
561 vma = get_vma_locked(obj, aspace, iova, iova + obj->size);
562 if (IS_ERR(vma)) {
563 ret = PTR_ERR(vma);
564 } else if (GEM_WARN_ON(vma->iova != iova)) {
565 clear_iova(obj, aspace);
566 ret = -EBUSY;
567 }
568 }
569 msm_gem_unlock(obj);
570
571 return ret;
572 }
573
574 /*
575 * Unpin a iova by updating the reference counts. The memory isn't actually
576 * purged until something else (shrinker, mm_notifier, destroy, etc) decides
577 * to get rid of it
578 */
msm_gem_unpin_iova(struct drm_gem_object * obj,struct msm_gem_address_space * aspace)579 void msm_gem_unpin_iova(struct drm_gem_object *obj,
580 struct msm_gem_address_space *aspace)
581 {
582 struct msm_gem_vma *vma;
583
584 msm_gem_lock(obj);
585 vma = lookup_vma(obj, aspace);
586 if (!GEM_WARN_ON(!vma)) {
587 msm_gem_unpin_vma(vma);
588 msm_gem_unpin_locked(obj);
589 }
590 msm_gem_unlock(obj);
591 }
592
msm_gem_dumb_create(struct drm_file * file,struct drm_device * dev,struct drm_mode_create_dumb * args)593 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
594 struct drm_mode_create_dumb *args)
595 {
596 args->pitch = align_pitch(args->width, args->bpp);
597 args->size = PAGE_ALIGN(args->pitch * args->height);
598 return msm_gem_new_handle(dev, file, args->size,
599 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
600 }
601
msm_gem_dumb_map_offset(struct drm_file * file,struct drm_device * dev,uint32_t handle,uint64_t * offset)602 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
603 uint32_t handle, uint64_t *offset)
604 {
605 struct drm_gem_object *obj;
606 int ret = 0;
607
608 /* GEM does all our handle to object mapping */
609 obj = drm_gem_object_lookup(file, handle);
610 if (obj == NULL) {
611 ret = -ENOENT;
612 goto fail;
613 }
614
615 *offset = msm_gem_mmap_offset(obj);
616
617 drm_gem_object_put(obj);
618
619 fail:
620 return ret;
621 }
622
get_vaddr(struct drm_gem_object * obj,unsigned madv)623 static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
624 {
625 struct msm_gem_object *msm_obj = to_msm_bo(obj);
626 int ret = 0;
627
628 GEM_WARN_ON(!msm_gem_is_locked(obj));
629
630 if (obj->import_attach)
631 return ERR_PTR(-ENODEV);
632
633 if (GEM_WARN_ON(msm_obj->madv > madv)) {
634 DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n",
635 msm_obj->madv, madv);
636 return ERR_PTR(-EBUSY);
637 }
638
639 /* increment vmap_count *before* vmap() call, so shrinker can
640 * check vmap_count (is_vunmapable()) outside of msm_obj lock.
641 * This guarantees that we won't try to msm_gem_vunmap() this
642 * same object from within the vmap() call (while we already
643 * hold msm_obj lock)
644 */
645 msm_obj->vmap_count++;
646
647 if (!msm_obj->vaddr) {
648 struct page **pages = get_pages(obj);
649 if (IS_ERR(pages)) {
650 ret = PTR_ERR(pages);
651 goto fail;
652 }
653 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
654 VM_MAP, msm_gem_pgprot(msm_obj, PAGE_KERNEL));
655 if (msm_obj->vaddr == NULL) {
656 ret = -ENOMEM;
657 goto fail;
658 }
659
660 update_inactive(msm_obj);
661 }
662
663 return msm_obj->vaddr;
664
665 fail:
666 msm_obj->vmap_count--;
667 return ERR_PTR(ret);
668 }
669
msm_gem_get_vaddr_locked(struct drm_gem_object * obj)670 void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj)
671 {
672 return get_vaddr(obj, MSM_MADV_WILLNEED);
673 }
674
msm_gem_get_vaddr(struct drm_gem_object * obj)675 void *msm_gem_get_vaddr(struct drm_gem_object *obj)
676 {
677 void *ret;
678
679 msm_gem_lock(obj);
680 ret = msm_gem_get_vaddr_locked(obj);
681 msm_gem_unlock(obj);
682
683 return ret;
684 }
685
686 /*
687 * Don't use this! It is for the very special case of dumping
688 * submits from GPU hangs or faults, were the bo may already
689 * be MSM_MADV_DONTNEED, but we know the buffer is still on the
690 * active list.
691 */
msm_gem_get_vaddr_active(struct drm_gem_object * obj)692 void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
693 {
694 return get_vaddr(obj, __MSM_MADV_PURGED);
695 }
696
msm_gem_put_vaddr_locked(struct drm_gem_object * obj)697 void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
698 {
699 struct msm_gem_object *msm_obj = to_msm_bo(obj);
700
701 GEM_WARN_ON(!msm_gem_is_locked(obj));
702 GEM_WARN_ON(msm_obj->vmap_count < 1);
703
704 msm_obj->vmap_count--;
705 }
706
msm_gem_put_vaddr(struct drm_gem_object * obj)707 void msm_gem_put_vaddr(struct drm_gem_object *obj)
708 {
709 msm_gem_lock(obj);
710 msm_gem_put_vaddr_locked(obj);
711 msm_gem_unlock(obj);
712 }
713
714 /* Update madvise status, returns true if not purged, else
715 * false or -errno.
716 */
msm_gem_madvise(struct drm_gem_object * obj,unsigned madv)717 int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
718 {
719 struct msm_gem_object *msm_obj = to_msm_bo(obj);
720
721 msm_gem_lock(obj);
722
723 if (msm_obj->madv != __MSM_MADV_PURGED)
724 msm_obj->madv = madv;
725
726 madv = msm_obj->madv;
727
728 /* If the obj is inactive, we might need to move it
729 * between inactive lists
730 */
731 if (msm_obj->active_count == 0)
732 update_inactive(msm_obj);
733
734 msm_gem_unlock(obj);
735
736 return (madv != __MSM_MADV_PURGED);
737 }
738
msm_gem_purge(struct drm_gem_object * obj)739 void msm_gem_purge(struct drm_gem_object *obj)
740 {
741 struct drm_device *dev = obj->dev;
742 struct msm_gem_object *msm_obj = to_msm_bo(obj);
743
744 GEM_WARN_ON(!msm_gem_is_locked(obj));
745 GEM_WARN_ON(!is_purgeable(msm_obj));
746
747 /* Get rid of any iommu mapping(s): */
748 put_iova_spaces(obj, true);
749
750 msm_gem_vunmap(obj);
751
752 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
753
754 put_pages(obj);
755
756 put_iova_vmas(obj);
757
758 msm_obj->madv = __MSM_MADV_PURGED;
759 update_inactive(msm_obj);
760
761 drm_gem_free_mmap_offset(obj);
762
763 /* Our goal here is to return as much of the memory as
764 * is possible back to the system as we are called from OOM.
765 * To do this we must instruct the shmfs to drop all of its
766 * backing pages, *now*.
767 */
768 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
769
770 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
771 0, (loff_t)-1);
772 }
773
774 /*
775 * Unpin the backing pages and make them available to be swapped out.
776 */
msm_gem_evict(struct drm_gem_object * obj)777 void msm_gem_evict(struct drm_gem_object *obj)
778 {
779 struct drm_device *dev = obj->dev;
780 struct msm_gem_object *msm_obj = to_msm_bo(obj);
781
782 GEM_WARN_ON(!msm_gem_is_locked(obj));
783 GEM_WARN_ON(is_unevictable(msm_obj));
784 GEM_WARN_ON(!msm_obj->evictable);
785 GEM_WARN_ON(msm_obj->active_count);
786
787 /* Get rid of any iommu mapping(s): */
788 put_iova_spaces(obj, false);
789
790 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
791
792 put_pages(obj);
793
794 update_inactive(msm_obj);
795 }
796
msm_gem_vunmap(struct drm_gem_object * obj)797 void msm_gem_vunmap(struct drm_gem_object *obj)
798 {
799 struct msm_gem_object *msm_obj = to_msm_bo(obj);
800
801 GEM_WARN_ON(!msm_gem_is_locked(obj));
802
803 if (!msm_obj->vaddr || GEM_WARN_ON(!is_vunmapable(msm_obj)))
804 return;
805
806 vunmap(msm_obj->vaddr);
807 msm_obj->vaddr = NULL;
808 }
809
msm_gem_active_get(struct drm_gem_object * obj,struct msm_gpu * gpu)810 void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu)
811 {
812 struct msm_gem_object *msm_obj = to_msm_bo(obj);
813 struct msm_drm_private *priv = obj->dev->dev_private;
814
815 might_sleep();
816 GEM_WARN_ON(!msm_gem_is_locked(obj));
817 GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
818 GEM_WARN_ON(msm_obj->dontneed);
819
820 if (msm_obj->active_count++ == 0) {
821 mutex_lock(&priv->mm_lock);
822 if (msm_obj->evictable)
823 mark_unevictable(msm_obj);
824 list_move_tail(&msm_obj->mm_list, &gpu->active_list);
825 mutex_unlock(&priv->mm_lock);
826 }
827 }
828
msm_gem_active_put(struct drm_gem_object * obj)829 void msm_gem_active_put(struct drm_gem_object *obj)
830 {
831 struct msm_gem_object *msm_obj = to_msm_bo(obj);
832
833 might_sleep();
834 GEM_WARN_ON(!msm_gem_is_locked(obj));
835
836 if (--msm_obj->active_count == 0) {
837 update_inactive(msm_obj);
838 }
839 }
840
update_inactive(struct msm_gem_object * msm_obj)841 static void update_inactive(struct msm_gem_object *msm_obj)
842 {
843 struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
844
845 GEM_WARN_ON(!msm_gem_is_locked(&msm_obj->base));
846
847 if (msm_obj->active_count != 0)
848 return;
849
850 mutex_lock(&priv->mm_lock);
851
852 if (msm_obj->dontneed)
853 mark_unpurgeable(msm_obj);
854 if (msm_obj->evictable)
855 mark_unevictable(msm_obj);
856
857 list_del(&msm_obj->mm_list);
858 if ((msm_obj->madv == MSM_MADV_WILLNEED) && msm_obj->sgt) {
859 list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed);
860 mark_evictable(msm_obj);
861 } else if (msm_obj->madv == MSM_MADV_DONTNEED) {
862 list_add_tail(&msm_obj->mm_list, &priv->inactive_dontneed);
863 mark_purgeable(msm_obj);
864 } else {
865 GEM_WARN_ON((msm_obj->madv != __MSM_MADV_PURGED) && msm_obj->sgt);
866 list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned);
867 }
868
869 mutex_unlock(&priv->mm_lock);
870 }
871
msm_gem_cpu_prep(struct drm_gem_object * obj,uint32_t op,ktime_t * timeout)872 int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
873 {
874 bool write = !!(op & MSM_PREP_WRITE);
875 unsigned long remain =
876 op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
877 long ret;
878
879 ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(write),
880 true, remain);
881 if (ret == 0)
882 return remain == 0 ? -EBUSY : -ETIMEDOUT;
883 else if (ret < 0)
884 return ret;
885
886 /* TODO cache maintenance */
887
888 return 0;
889 }
890
msm_gem_cpu_fini(struct drm_gem_object * obj)891 int msm_gem_cpu_fini(struct drm_gem_object *obj)
892 {
893 /* TODO cache maintenance */
894 return 0;
895 }
896
897 #ifdef CONFIG_DEBUG_FS
msm_gem_describe(struct drm_gem_object * obj,struct seq_file * m,struct msm_gem_stats * stats)898 void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
899 struct msm_gem_stats *stats)
900 {
901 struct msm_gem_object *msm_obj = to_msm_bo(obj);
902 struct dma_resv *robj = obj->resv;
903 struct msm_gem_vma *vma;
904 uint64_t off = drm_vma_node_start(&obj->vma_node);
905 const char *madv;
906
907 msm_gem_lock(obj);
908
909 stats->all.count++;
910 stats->all.size += obj->size;
911
912 if (is_active(msm_obj)) {
913 stats->active.count++;
914 stats->active.size += obj->size;
915 }
916
917 if (msm_obj->pages) {
918 stats->resident.count++;
919 stats->resident.size += obj->size;
920 }
921
922 switch (msm_obj->madv) {
923 case __MSM_MADV_PURGED:
924 stats->purged.count++;
925 stats->purged.size += obj->size;
926 madv = " purged";
927 break;
928 case MSM_MADV_DONTNEED:
929 stats->purgeable.count++;
930 stats->purgeable.size += obj->size;
931 madv = " purgeable";
932 break;
933 case MSM_MADV_WILLNEED:
934 default:
935 madv = "";
936 break;
937 }
938
939 seq_printf(m, "%08x: %c %2d (%2d) %08llx %p",
940 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
941 obj->name, kref_read(&obj->refcount),
942 off, msm_obj->vaddr);
943
944 seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
945
946 if (!list_empty(&msm_obj->vmas)) {
947
948 seq_puts(m, " vmas:");
949
950 list_for_each_entry(vma, &msm_obj->vmas, list) {
951 const char *name, *comm;
952 if (vma->aspace) {
953 struct msm_gem_address_space *aspace = vma->aspace;
954 struct task_struct *task =
955 get_pid_task(aspace->pid, PIDTYPE_PID);
956 if (task) {
957 comm = kstrdup(task->comm, GFP_KERNEL);
958 put_task_struct(task);
959 } else {
960 comm = NULL;
961 }
962 name = aspace->name;
963 } else {
964 name = comm = NULL;
965 }
966 seq_printf(m, " [%s%s%s: aspace=%p, %08llx,%s,inuse=%d]",
967 name, comm ? ":" : "", comm ? comm : "",
968 vma->aspace, vma->iova,
969 vma->mapped ? "mapped" : "unmapped",
970 msm_gem_vma_inuse(vma));
971 kfree(comm);
972 }
973
974 seq_puts(m, "\n");
975 }
976
977 dma_resv_describe(robj, m);
978 msm_gem_unlock(obj);
979 }
980
msm_gem_describe_objects(struct list_head * list,struct seq_file * m)981 void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
982 {
983 struct msm_gem_stats stats = {};
984 struct msm_gem_object *msm_obj;
985
986 seq_puts(m, " flags id ref offset kaddr size madv name\n");
987 list_for_each_entry(msm_obj, list, node) {
988 struct drm_gem_object *obj = &msm_obj->base;
989 seq_puts(m, " ");
990 msm_gem_describe(obj, m, &stats);
991 }
992
993 seq_printf(m, "Total: %4d objects, %9zu bytes\n",
994 stats.all.count, stats.all.size);
995 seq_printf(m, "Active: %4d objects, %9zu bytes\n",
996 stats.active.count, stats.active.size);
997 seq_printf(m, "Resident: %4d objects, %9zu bytes\n",
998 stats.resident.count, stats.resident.size);
999 seq_printf(m, "Purgeable: %4d objects, %9zu bytes\n",
1000 stats.purgeable.count, stats.purgeable.size);
1001 seq_printf(m, "Purged: %4d objects, %9zu bytes\n",
1002 stats.purged.count, stats.purged.size);
1003 }
1004 #endif
1005
1006 /* don't call directly! Use drm_gem_object_put() */
msm_gem_free_object(struct drm_gem_object * obj)1007 void msm_gem_free_object(struct drm_gem_object *obj)
1008 {
1009 struct msm_gem_object *msm_obj = to_msm_bo(obj);
1010 struct drm_device *dev = obj->dev;
1011 struct msm_drm_private *priv = dev->dev_private;
1012
1013 mutex_lock(&priv->obj_lock);
1014 list_del(&msm_obj->node);
1015 mutex_unlock(&priv->obj_lock);
1016
1017 mutex_lock(&priv->mm_lock);
1018 if (msm_obj->dontneed)
1019 mark_unpurgeable(msm_obj);
1020 list_del(&msm_obj->mm_list);
1021 mutex_unlock(&priv->mm_lock);
1022
1023 msm_gem_lock(obj);
1024
1025 /* object should not be on active list: */
1026 GEM_WARN_ON(is_active(msm_obj));
1027
1028 put_iova_spaces(obj, true);
1029
1030 if (obj->import_attach) {
1031 GEM_WARN_ON(msm_obj->vaddr);
1032
1033 /* Don't drop the pages for imported dmabuf, as they are not
1034 * ours, just free the array we allocated:
1035 */
1036 kvfree(msm_obj->pages);
1037
1038 put_iova_vmas(obj);
1039
1040 /* dma_buf_detach() grabs resv lock, so we need to unlock
1041 * prior to drm_prime_gem_destroy
1042 */
1043 msm_gem_unlock(obj);
1044
1045 drm_prime_gem_destroy(obj, msm_obj->sgt);
1046 } else {
1047 msm_gem_vunmap(obj);
1048 put_pages(obj);
1049 put_iova_vmas(obj);
1050 msm_gem_unlock(obj);
1051 }
1052
1053 drm_gem_object_release(obj);
1054
1055 kfree(msm_obj);
1056 }
1057
msm_gem_object_mmap(struct drm_gem_object * obj,struct vm_area_struct * vma)1058 static int msm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
1059 {
1060 struct msm_gem_object *msm_obj = to_msm_bo(obj);
1061
1062 vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
1063 vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags));
1064
1065 return 0;
1066 }
1067
1068 /* convenience method to construct a GEM buffer object, and userspace handle */
msm_gem_new_handle(struct drm_device * dev,struct drm_file * file,uint32_t size,uint32_t flags,uint32_t * handle,char * name)1069 int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
1070 uint32_t size, uint32_t flags, uint32_t *handle,
1071 char *name)
1072 {
1073 struct drm_gem_object *obj;
1074 int ret;
1075
1076 obj = msm_gem_new(dev, size, flags);
1077
1078 if (IS_ERR(obj))
1079 return PTR_ERR(obj);
1080
1081 if (name)
1082 msm_gem_object_set_name(obj, "%s", name);
1083
1084 ret = drm_gem_handle_create(file, obj, handle);
1085
1086 /* drop reference from allocate - handle holds it now */
1087 drm_gem_object_put(obj);
1088
1089 return ret;
1090 }
1091
1092 static const struct vm_operations_struct vm_ops = {
1093 .fault = msm_gem_fault,
1094 .open = drm_gem_vm_open,
1095 .close = drm_gem_vm_close,
1096 };
1097
1098 static const struct drm_gem_object_funcs msm_gem_object_funcs = {
1099 .free = msm_gem_free_object,
1100 .pin = msm_gem_prime_pin,
1101 .unpin = msm_gem_prime_unpin,
1102 .get_sg_table = msm_gem_prime_get_sg_table,
1103 .vmap = msm_gem_prime_vmap,
1104 .vunmap = msm_gem_prime_vunmap,
1105 .mmap = msm_gem_object_mmap,
1106 .vm_ops = &vm_ops,
1107 };
1108
msm_gem_new_impl(struct drm_device * dev,uint32_t size,uint32_t flags,struct drm_gem_object ** obj)1109 static int msm_gem_new_impl(struct drm_device *dev,
1110 uint32_t size, uint32_t flags,
1111 struct drm_gem_object **obj)
1112 {
1113 struct msm_drm_private *priv = dev->dev_private;
1114 struct msm_gem_object *msm_obj;
1115
1116 switch (flags & MSM_BO_CACHE_MASK) {
1117 case MSM_BO_UNCACHED:
1118 case MSM_BO_CACHED:
1119 case MSM_BO_WC:
1120 break;
1121 case MSM_BO_CACHED_COHERENT:
1122 if (priv->has_cached_coherent)
1123 break;
1124 fallthrough;
1125 default:
1126 DRM_DEV_DEBUG(dev->dev, "invalid cache flag: %x\n",
1127 (flags & MSM_BO_CACHE_MASK));
1128 return -EINVAL;
1129 }
1130
1131 msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
1132 if (!msm_obj)
1133 return -ENOMEM;
1134
1135 msm_obj->flags = flags;
1136 msm_obj->madv = MSM_MADV_WILLNEED;
1137
1138 INIT_LIST_HEAD(&msm_obj->node);
1139 INIT_LIST_HEAD(&msm_obj->vmas);
1140
1141 *obj = &msm_obj->base;
1142 (*obj)->funcs = &msm_gem_object_funcs;
1143
1144 return 0;
1145 }
1146
msm_gem_new(struct drm_device * dev,uint32_t size,uint32_t flags)1147 struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32_t flags)
1148 {
1149 struct msm_drm_private *priv = dev->dev_private;
1150 struct msm_gem_object *msm_obj;
1151 struct drm_gem_object *obj = NULL;
1152 bool use_vram = false;
1153 int ret;
1154
1155 size = PAGE_ALIGN(size);
1156
1157 if (!msm_use_mmu(dev))
1158 use_vram = true;
1159 else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
1160 use_vram = true;
1161
1162 if (GEM_WARN_ON(use_vram && !priv->vram.size))
1163 return ERR_PTR(-EINVAL);
1164
1165 /* Disallow zero sized objects as they make the underlying
1166 * infrastructure grumpy
1167 */
1168 if (size == 0)
1169 return ERR_PTR(-EINVAL);
1170
1171 ret = msm_gem_new_impl(dev, size, flags, &obj);
1172 if (ret)
1173 return ERR_PTR(ret);
1174
1175 msm_obj = to_msm_bo(obj);
1176
1177 if (use_vram) {
1178 struct msm_gem_vma *vma;
1179 struct page **pages;
1180
1181 drm_gem_private_object_init(dev, obj, size);
1182
1183 msm_gem_lock(obj);
1184
1185 vma = add_vma(obj, NULL);
1186 msm_gem_unlock(obj);
1187 if (IS_ERR(vma)) {
1188 ret = PTR_ERR(vma);
1189 goto fail;
1190 }
1191
1192 to_msm_bo(obj)->vram_node = &vma->node;
1193
1194 /* Call chain get_pages() -> update_inactive() tries to
1195 * access msm_obj->mm_list, but it is not initialized yet.
1196 * To avoid NULL pointer dereference error, initialize
1197 * mm_list to be empty.
1198 */
1199 INIT_LIST_HEAD(&msm_obj->mm_list);
1200
1201 msm_gem_lock(obj);
1202 pages = get_pages(obj);
1203 msm_gem_unlock(obj);
1204 if (IS_ERR(pages)) {
1205 ret = PTR_ERR(pages);
1206 goto fail;
1207 }
1208
1209 vma->iova = physaddr(obj);
1210 } else {
1211 ret = drm_gem_object_init(dev, obj, size);
1212 if (ret)
1213 goto fail;
1214 /*
1215 * Our buffers are kept pinned, so allocating them from the
1216 * MOVABLE zone is a really bad idea, and conflicts with CMA.
1217 * See comments above new_inode() why this is required _and_
1218 * expected if you're going to pin these pages.
1219 */
1220 mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
1221 }
1222
1223 mutex_lock(&priv->mm_lock);
1224 list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned);
1225 mutex_unlock(&priv->mm_lock);
1226
1227 mutex_lock(&priv->obj_lock);
1228 list_add_tail(&msm_obj->node, &priv->objects);
1229 mutex_unlock(&priv->obj_lock);
1230
1231 return obj;
1232
1233 fail:
1234 drm_gem_object_put(obj);
1235 return ERR_PTR(ret);
1236 }
1237
msm_gem_import(struct drm_device * dev,struct dma_buf * dmabuf,struct sg_table * sgt)1238 struct drm_gem_object *msm_gem_import(struct drm_device *dev,
1239 struct dma_buf *dmabuf, struct sg_table *sgt)
1240 {
1241 struct msm_drm_private *priv = dev->dev_private;
1242 struct msm_gem_object *msm_obj;
1243 struct drm_gem_object *obj;
1244 uint32_t size;
1245 int ret, npages;
1246
1247 /* if we don't have IOMMU, don't bother pretending we can import: */
1248 if (!msm_use_mmu(dev)) {
1249 DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n");
1250 return ERR_PTR(-EINVAL);
1251 }
1252
1253 size = PAGE_ALIGN(dmabuf->size);
1254
1255 ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
1256 if (ret)
1257 return ERR_PTR(ret);
1258
1259 drm_gem_private_object_init(dev, obj, size);
1260
1261 npages = size / PAGE_SIZE;
1262
1263 msm_obj = to_msm_bo(obj);
1264 msm_gem_lock(obj);
1265 msm_obj->sgt = sgt;
1266 msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
1267 if (!msm_obj->pages) {
1268 msm_gem_unlock(obj);
1269 ret = -ENOMEM;
1270 goto fail;
1271 }
1272
1273 ret = drm_prime_sg_to_page_array(sgt, msm_obj->pages, npages);
1274 if (ret) {
1275 msm_gem_unlock(obj);
1276 goto fail;
1277 }
1278
1279 msm_gem_unlock(obj);
1280
1281 mutex_lock(&priv->mm_lock);
1282 list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned);
1283 mutex_unlock(&priv->mm_lock);
1284
1285 mutex_lock(&priv->obj_lock);
1286 list_add_tail(&msm_obj->node, &priv->objects);
1287 mutex_unlock(&priv->obj_lock);
1288
1289 return obj;
1290
1291 fail:
1292 drm_gem_object_put(obj);
1293 return ERR_PTR(ret);
1294 }
1295
msm_gem_kernel_new(struct drm_device * dev,uint32_t size,uint32_t flags,struct msm_gem_address_space * aspace,struct drm_gem_object ** bo,uint64_t * iova)1296 void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1297 uint32_t flags, struct msm_gem_address_space *aspace,
1298 struct drm_gem_object **bo, uint64_t *iova)
1299 {
1300 void *vaddr;
1301 struct drm_gem_object *obj = msm_gem_new(dev, size, flags);
1302 int ret;
1303
1304 if (IS_ERR(obj))
1305 return ERR_CAST(obj);
1306
1307 if (iova) {
1308 ret = msm_gem_get_and_pin_iova(obj, aspace, iova);
1309 if (ret)
1310 goto err;
1311 }
1312
1313 vaddr = msm_gem_get_vaddr(obj);
1314 if (IS_ERR(vaddr)) {
1315 msm_gem_unpin_iova(obj, aspace);
1316 ret = PTR_ERR(vaddr);
1317 goto err;
1318 }
1319
1320 if (bo)
1321 *bo = obj;
1322
1323 return vaddr;
1324 err:
1325 drm_gem_object_put(obj);
1326
1327 return ERR_PTR(ret);
1328
1329 }
1330
msm_gem_kernel_put(struct drm_gem_object * bo,struct msm_gem_address_space * aspace)1331 void msm_gem_kernel_put(struct drm_gem_object *bo,
1332 struct msm_gem_address_space *aspace)
1333 {
1334 if (IS_ERR_OR_NULL(bo))
1335 return;
1336
1337 msm_gem_put_vaddr(bo);
1338 msm_gem_unpin_iova(bo, aspace);
1339 drm_gem_object_put(bo);
1340 }
1341
msm_gem_object_set_name(struct drm_gem_object * bo,const char * fmt,...)1342 void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
1343 {
1344 struct msm_gem_object *msm_obj = to_msm_bo(bo);
1345 va_list ap;
1346
1347 if (!fmt)
1348 return;
1349
1350 va_start(ap, fmt);
1351 vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap);
1352 va_end(ap);
1353 }
1354