1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Framework for buffer objects that can be shared across devices/subsystems.
4 *
5 * Copyright(C) 2011 Linaro Limited. All rights reserved.
6 * Author: Sumit Semwal <sumit.semwal@ti.com>
7 *
8 * Many thanks to linaro-mm-sig list, and specially
9 * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and
10 * Daniel Vetter <daniel@ffwll.ch> for their support in creation and
11 * refining of this idea.
12 */
13
14 #include <linux/fs.h>
15 #include <linux/slab.h>
16 #include <linux/dma-buf.h>
17 #include <linux/dma-fence.h>
18 #include <linux/dma-fence-unwrap.h>
19 #include <linux/anon_inodes.h>
20 #include <linux/export.h>
21 #include <linux/debugfs.h>
22 #include <linux/module.h>
23 #include <linux/seq_file.h>
24 #include <linux/sync_file.h>
25 #include <linux/poll.h>
26 #include <linux/dma-resv.h>
27 #include <linux/mm.h>
28 #include <linux/mount.h>
29 #include <linux/pseudo_fs.h>
30
31 #include <uapi/linux/dma-buf.h>
32 #include <uapi/linux/magic.h>
33
34 #include "dma-buf-sysfs-stats.h"
35
36 static inline int is_dma_buf_file(struct file *);
37
38 struct dma_buf_list {
39 struct list_head head;
40 struct mutex lock;
41 };
42
43 static struct dma_buf_list db_list;
44
dmabuffs_dname(struct dentry * dentry,char * buffer,int buflen)45 static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)
46 {
47 struct dma_buf *dmabuf;
48 char name[DMA_BUF_NAME_LEN];
49 size_t ret = 0;
50
51 dmabuf = dentry->d_fsdata;
52 spin_lock(&dmabuf->name_lock);
53 if (dmabuf->name)
54 ret = strlcpy(name, dmabuf->name, DMA_BUF_NAME_LEN);
55 spin_unlock(&dmabuf->name_lock);
56
57 return dynamic_dname(buffer, buflen, "/%s:%s",
58 dentry->d_name.name, ret > 0 ? name : "");
59 }
60
dma_buf_release(struct dentry * dentry)61 static void dma_buf_release(struct dentry *dentry)
62 {
63 struct dma_buf *dmabuf;
64
65 dmabuf = dentry->d_fsdata;
66 if (unlikely(!dmabuf))
67 return;
68
69 BUG_ON(dmabuf->vmapping_counter);
70
71 /*
72 * If you hit this BUG() it could mean:
73 * * There's a file reference imbalance in dma_buf_poll / dma_buf_poll_cb or somewhere else
74 * * dmabuf->cb_in/out.active are non-0 despite no pending fence callback
75 */
76 BUG_ON(dmabuf->cb_in.active || dmabuf->cb_out.active);
77
78 dma_buf_stats_teardown(dmabuf);
79 dmabuf->ops->release(dmabuf);
80
81 if (dmabuf->resv == (struct dma_resv *)&dmabuf[1])
82 dma_resv_fini(dmabuf->resv);
83
84 WARN_ON(!list_empty(&dmabuf->attachments));
85 module_put(dmabuf->owner);
86 kfree(dmabuf->name);
87 kfree(dmabuf);
88 }
89
dma_buf_file_release(struct inode * inode,struct file * file)90 static int dma_buf_file_release(struct inode *inode, struct file *file)
91 {
92 struct dma_buf *dmabuf;
93
94 if (!is_dma_buf_file(file))
95 return -EINVAL;
96
97 dmabuf = file->private_data;
98 if (dmabuf) {
99 mutex_lock(&db_list.lock);
100 list_del(&dmabuf->list_node);
101 mutex_unlock(&db_list.lock);
102 }
103
104 return 0;
105 }
106
107 static const struct dentry_operations dma_buf_dentry_ops = {
108 .d_dname = dmabuffs_dname,
109 .d_release = dma_buf_release,
110 };
111
112 static struct vfsmount *dma_buf_mnt;
113
dma_buf_fs_init_context(struct fs_context * fc)114 static int dma_buf_fs_init_context(struct fs_context *fc)
115 {
116 struct pseudo_fs_context *ctx;
117
118 ctx = init_pseudo(fc, DMA_BUF_MAGIC);
119 if (!ctx)
120 return -ENOMEM;
121 ctx->dops = &dma_buf_dentry_ops;
122 return 0;
123 }
124
125 static struct file_system_type dma_buf_fs_type = {
126 .name = "dmabuf",
127 .init_fs_context = dma_buf_fs_init_context,
128 .kill_sb = kill_anon_super,
129 };
130
dma_buf_mmap_internal(struct file * file,struct vm_area_struct * vma)131 static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
132 {
133 struct dma_buf *dmabuf;
134
135 if (!is_dma_buf_file(file))
136 return -EINVAL;
137
138 dmabuf = file->private_data;
139
140 /* check if buffer supports mmap */
141 if (!dmabuf->ops->mmap)
142 return -EINVAL;
143
144 /* check for overflowing the buffer's size */
145 if (vma->vm_pgoff + vma_pages(vma) >
146 dmabuf->size >> PAGE_SHIFT)
147 return -EINVAL;
148
149 return dmabuf->ops->mmap(dmabuf, vma);
150 }
151
dma_buf_llseek(struct file * file,loff_t offset,int whence)152 static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
153 {
154 struct dma_buf *dmabuf;
155 loff_t base;
156
157 if (!is_dma_buf_file(file))
158 return -EBADF;
159
160 dmabuf = file->private_data;
161
162 /* only support discovering the end of the buffer,
163 but also allow SEEK_SET to maintain the idiomatic
164 SEEK_END(0), SEEK_CUR(0) pattern */
165 if (whence == SEEK_END)
166 base = dmabuf->size;
167 else if (whence == SEEK_SET)
168 base = 0;
169 else
170 return -EINVAL;
171
172 if (offset != 0)
173 return -EINVAL;
174
175 return base + offset;
176 }
177
178 /**
179 * DOC: implicit fence polling
180 *
181 * To support cross-device and cross-driver synchronization of buffer access
182 * implicit fences (represented internally in the kernel with &struct dma_fence)
183 * can be attached to a &dma_buf. The glue for that and a few related things are
184 * provided in the &dma_resv structure.
185 *
186 * Userspace can query the state of these implicitly tracked fences using poll()
187 * and related system calls:
188 *
189 * - Checking for EPOLLIN, i.e. read access, can be use to query the state of the
190 * most recent write or exclusive fence.
191 *
192 * - Checking for EPOLLOUT, i.e. write access, can be used to query the state of
193 * all attached fences, shared and exclusive ones.
194 *
195 * Note that this only signals the completion of the respective fences, i.e. the
196 * DMA transfers are complete. Cache flushing and any other necessary
197 * preparations before CPU access can begin still need to happen.
198 *
199 * As an alternative to poll(), the set of fences on DMA buffer can be
200 * exported as a &sync_file using &dma_buf_sync_file_export.
201 */
202
dma_buf_poll_cb(struct dma_fence * fence,struct dma_fence_cb * cb)203 static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
204 {
205 struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb;
206 struct dma_buf *dmabuf = container_of(dcb->poll, struct dma_buf, poll);
207 unsigned long flags;
208
209 spin_lock_irqsave(&dcb->poll->lock, flags);
210 wake_up_locked_poll(dcb->poll, dcb->active);
211 dcb->active = 0;
212 spin_unlock_irqrestore(&dcb->poll->lock, flags);
213 dma_fence_put(fence);
214 /* Paired with get_file in dma_buf_poll */
215 fput(dmabuf->file);
216 }
217
dma_buf_poll_add_cb(struct dma_resv * resv,bool write,struct dma_buf_poll_cb_t * dcb)218 static bool dma_buf_poll_add_cb(struct dma_resv *resv, bool write,
219 struct dma_buf_poll_cb_t *dcb)
220 {
221 struct dma_resv_iter cursor;
222 struct dma_fence *fence;
223 int r;
224
225 dma_resv_for_each_fence(&cursor, resv, dma_resv_usage_rw(write),
226 fence) {
227 dma_fence_get(fence);
228 r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb);
229 if (!r)
230 return true;
231 dma_fence_put(fence);
232 }
233
234 return false;
235 }
236
dma_buf_poll(struct file * file,poll_table * poll)237 static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
238 {
239 struct dma_buf *dmabuf;
240 struct dma_resv *resv;
241 __poll_t events;
242
243 dmabuf = file->private_data;
244 if (!dmabuf || !dmabuf->resv)
245 return EPOLLERR;
246
247 resv = dmabuf->resv;
248
249 poll_wait(file, &dmabuf->poll, poll);
250
251 events = poll_requested_events(poll) & (EPOLLIN | EPOLLOUT);
252 if (!events)
253 return 0;
254
255 dma_resv_lock(resv, NULL);
256
257 if (events & EPOLLOUT) {
258 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_out;
259
260 /* Check that callback isn't busy */
261 spin_lock_irq(&dmabuf->poll.lock);
262 if (dcb->active)
263 events &= ~EPOLLOUT;
264 else
265 dcb->active = EPOLLOUT;
266 spin_unlock_irq(&dmabuf->poll.lock);
267
268 if (events & EPOLLOUT) {
269 /* Paired with fput in dma_buf_poll_cb */
270 get_file(dmabuf->file);
271
272 if (!dma_buf_poll_add_cb(resv, true, dcb))
273 /* No callback queued, wake up any other waiters */
274 dma_buf_poll_cb(NULL, &dcb->cb);
275 else
276 events &= ~EPOLLOUT;
277 }
278 }
279
280 if (events & EPOLLIN) {
281 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_in;
282
283 /* Check that callback isn't busy */
284 spin_lock_irq(&dmabuf->poll.lock);
285 if (dcb->active)
286 events &= ~EPOLLIN;
287 else
288 dcb->active = EPOLLIN;
289 spin_unlock_irq(&dmabuf->poll.lock);
290
291 if (events & EPOLLIN) {
292 /* Paired with fput in dma_buf_poll_cb */
293 get_file(dmabuf->file);
294
295 if (!dma_buf_poll_add_cb(resv, false, dcb))
296 /* No callback queued, wake up any other waiters */
297 dma_buf_poll_cb(NULL, &dcb->cb);
298 else
299 events &= ~EPOLLIN;
300 }
301 }
302
303 dma_resv_unlock(resv);
304 return events;
305 }
306
307 /**
308 * dma_buf_set_name - Set a name to a specific dma_buf to track the usage.
309 * It could support changing the name of the dma-buf if the same
310 * piece of memory is used for multiple purpose between different devices.
311 *
312 * @dmabuf: [in] dmabuf buffer that will be renamed.
313 * @buf: [in] A piece of userspace memory that contains the name of
314 * the dma-buf.
315 *
316 * Returns 0 on success. If the dma-buf buffer is already attached to
317 * devices, return -EBUSY.
318 *
319 */
dma_buf_set_name(struct dma_buf * dmabuf,const char __user * buf)320 static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf)
321 {
322 char *name = strndup_user(buf, DMA_BUF_NAME_LEN);
323
324 if (IS_ERR(name))
325 return PTR_ERR(name);
326
327 spin_lock(&dmabuf->name_lock);
328 kfree(dmabuf->name);
329 dmabuf->name = name;
330 spin_unlock(&dmabuf->name_lock);
331
332 return 0;
333 }
334
335 #if IS_ENABLED(CONFIG_SYNC_FILE)
dma_buf_export_sync_file(struct dma_buf * dmabuf,void __user * user_data)336 static long dma_buf_export_sync_file(struct dma_buf *dmabuf,
337 void __user *user_data)
338 {
339 struct dma_buf_export_sync_file arg;
340 enum dma_resv_usage usage;
341 struct dma_fence *fence = NULL;
342 struct sync_file *sync_file;
343 int fd, ret;
344
345 if (copy_from_user(&arg, user_data, sizeof(arg)))
346 return -EFAULT;
347
348 if (arg.flags & ~DMA_BUF_SYNC_RW)
349 return -EINVAL;
350
351 if ((arg.flags & DMA_BUF_SYNC_RW) == 0)
352 return -EINVAL;
353
354 fd = get_unused_fd_flags(O_CLOEXEC);
355 if (fd < 0)
356 return fd;
357
358 usage = dma_resv_usage_rw(arg.flags & DMA_BUF_SYNC_WRITE);
359 ret = dma_resv_get_singleton(dmabuf->resv, usage, &fence);
360 if (ret)
361 goto err_put_fd;
362
363 if (!fence)
364 fence = dma_fence_get_stub();
365
366 sync_file = sync_file_create(fence);
367
368 dma_fence_put(fence);
369
370 if (!sync_file) {
371 ret = -ENOMEM;
372 goto err_put_fd;
373 }
374
375 arg.fd = fd;
376 if (copy_to_user(user_data, &arg, sizeof(arg))) {
377 ret = -EFAULT;
378 goto err_put_file;
379 }
380
381 fd_install(fd, sync_file->file);
382
383 return 0;
384
385 err_put_file:
386 fput(sync_file->file);
387 err_put_fd:
388 put_unused_fd(fd);
389 return ret;
390 }
391
dma_buf_import_sync_file(struct dma_buf * dmabuf,const void __user * user_data)392 static long dma_buf_import_sync_file(struct dma_buf *dmabuf,
393 const void __user *user_data)
394 {
395 struct dma_buf_import_sync_file arg;
396 struct dma_fence *fence, *f;
397 enum dma_resv_usage usage;
398 struct dma_fence_unwrap iter;
399 unsigned int num_fences;
400 int ret = 0;
401
402 if (copy_from_user(&arg, user_data, sizeof(arg)))
403 return -EFAULT;
404
405 if (arg.flags & ~DMA_BUF_SYNC_RW)
406 return -EINVAL;
407
408 if ((arg.flags & DMA_BUF_SYNC_RW) == 0)
409 return -EINVAL;
410
411 fence = sync_file_get_fence(arg.fd);
412 if (!fence)
413 return -EINVAL;
414
415 usage = (arg.flags & DMA_BUF_SYNC_WRITE) ? DMA_RESV_USAGE_WRITE :
416 DMA_RESV_USAGE_READ;
417
418 num_fences = 0;
419 dma_fence_unwrap_for_each(f, &iter, fence)
420 ++num_fences;
421
422 if (num_fences > 0) {
423 dma_resv_lock(dmabuf->resv, NULL);
424
425 ret = dma_resv_reserve_fences(dmabuf->resv, num_fences);
426 if (!ret) {
427 dma_fence_unwrap_for_each(f, &iter, fence)
428 dma_resv_add_fence(dmabuf->resv, f, usage);
429 }
430
431 dma_resv_unlock(dmabuf->resv);
432 }
433
434 dma_fence_put(fence);
435
436 return ret;
437 }
438 #endif
439
dma_buf_ioctl(struct file * file,unsigned int cmd,unsigned long arg)440 static long dma_buf_ioctl(struct file *file,
441 unsigned int cmd, unsigned long arg)
442 {
443 struct dma_buf *dmabuf;
444 struct dma_buf_sync sync;
445 enum dma_data_direction direction;
446 int ret;
447
448 dmabuf = file->private_data;
449
450 switch (cmd) {
451 case DMA_BUF_IOCTL_SYNC:
452 if (copy_from_user(&sync, (void __user *) arg, sizeof(sync)))
453 return -EFAULT;
454
455 if (sync.flags & ~DMA_BUF_SYNC_VALID_FLAGS_MASK)
456 return -EINVAL;
457
458 switch (sync.flags & DMA_BUF_SYNC_RW) {
459 case DMA_BUF_SYNC_READ:
460 direction = DMA_FROM_DEVICE;
461 break;
462 case DMA_BUF_SYNC_WRITE:
463 direction = DMA_TO_DEVICE;
464 break;
465 case DMA_BUF_SYNC_RW:
466 direction = DMA_BIDIRECTIONAL;
467 break;
468 default:
469 return -EINVAL;
470 }
471
472 if (sync.flags & DMA_BUF_SYNC_END)
473 ret = dma_buf_end_cpu_access(dmabuf, direction);
474 else
475 ret = dma_buf_begin_cpu_access(dmabuf, direction);
476
477 return ret;
478
479 case DMA_BUF_SET_NAME_A:
480 case DMA_BUF_SET_NAME_B:
481 return dma_buf_set_name(dmabuf, (const char __user *)arg);
482
483 #if IS_ENABLED(CONFIG_SYNC_FILE)
484 case DMA_BUF_IOCTL_EXPORT_SYNC_FILE:
485 return dma_buf_export_sync_file(dmabuf, (void __user *)arg);
486 case DMA_BUF_IOCTL_IMPORT_SYNC_FILE:
487 return dma_buf_import_sync_file(dmabuf, (const void __user *)arg);
488 #endif
489
490 default:
491 return -ENOTTY;
492 }
493 }
494
dma_buf_show_fdinfo(struct seq_file * m,struct file * file)495 static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file)
496 {
497 struct dma_buf *dmabuf = file->private_data;
498
499 seq_printf(m, "size:\t%zu\n", dmabuf->size);
500 /* Don't count the temporary reference taken inside procfs seq_show */
501 seq_printf(m, "count:\t%ld\n", file_count(dmabuf->file) - 1);
502 seq_printf(m, "exp_name:\t%s\n", dmabuf->exp_name);
503 spin_lock(&dmabuf->name_lock);
504 if (dmabuf->name)
505 seq_printf(m, "name:\t%s\n", dmabuf->name);
506 spin_unlock(&dmabuf->name_lock);
507 }
508
509 static const struct file_operations dma_buf_fops = {
510 .release = dma_buf_file_release,
511 .mmap = dma_buf_mmap_internal,
512 .llseek = dma_buf_llseek,
513 .poll = dma_buf_poll,
514 .unlocked_ioctl = dma_buf_ioctl,
515 .compat_ioctl = compat_ptr_ioctl,
516 .show_fdinfo = dma_buf_show_fdinfo,
517 };
518
519 /*
520 * is_dma_buf_file - Check if struct file* is associated with dma_buf
521 */
is_dma_buf_file(struct file * file)522 static inline int is_dma_buf_file(struct file *file)
523 {
524 return file->f_op == &dma_buf_fops;
525 }
526
dma_buf_getfile(size_t size,int flags)527 static struct file *dma_buf_getfile(size_t size, int flags)
528 {
529 static atomic64_t dmabuf_inode = ATOMIC64_INIT(0);
530 struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb);
531 struct file *file;
532
533 if (IS_ERR(inode))
534 return ERR_CAST(inode);
535
536 inode->i_size = size;
537 inode_set_bytes(inode, size);
538
539 /*
540 * The ->i_ino acquired from get_next_ino() is not unique thus
541 * not suitable for using it as dentry name by dmabuf stats.
542 * Override ->i_ino with the unique and dmabuffs specific
543 * value.
544 */
545 inode->i_ino = atomic64_add_return(1, &dmabuf_inode);
546 flags &= O_ACCMODE | O_NONBLOCK;
547 file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf",
548 flags, &dma_buf_fops);
549 if (IS_ERR(file))
550 goto err_alloc_file;
551
552 return file;
553
554 err_alloc_file:
555 iput(inode);
556 return file;
557 }
558
559 /**
560 * DOC: dma buf device access
561 *
562 * For device DMA access to a shared DMA buffer the usual sequence of operations
563 * is fairly simple:
564 *
565 * 1. The exporter defines his exporter instance using
566 * DEFINE_DMA_BUF_EXPORT_INFO() and calls dma_buf_export() to wrap a private
567 * buffer object into a &dma_buf. It then exports that &dma_buf to userspace
568 * as a file descriptor by calling dma_buf_fd().
569 *
570 * 2. Userspace passes this file-descriptors to all drivers it wants this buffer
571 * to share with: First the file descriptor is converted to a &dma_buf using
572 * dma_buf_get(). Then the buffer is attached to the device using
573 * dma_buf_attach().
574 *
575 * Up to this stage the exporter is still free to migrate or reallocate the
576 * backing storage.
577 *
578 * 3. Once the buffer is attached to all devices userspace can initiate DMA
579 * access to the shared buffer. In the kernel this is done by calling
580 * dma_buf_map_attachment() and dma_buf_unmap_attachment().
581 *
582 * 4. Once a driver is done with a shared buffer it needs to call
583 * dma_buf_detach() (after cleaning up any mappings) and then release the
584 * reference acquired with dma_buf_get() by calling dma_buf_put().
585 *
586 * For the detailed semantics exporters are expected to implement see
587 * &dma_buf_ops.
588 */
589
590 /**
591 * dma_buf_export - Creates a new dma_buf, and associates an anon file
592 * with this buffer, so it can be exported.
593 * Also connect the allocator specific data and ops to the buffer.
594 * Additionally, provide a name string for exporter; useful in debugging.
595 *
596 * @exp_info: [in] holds all the export related information provided
597 * by the exporter. see &struct dma_buf_export_info
598 * for further details.
599 *
600 * Returns, on success, a newly created struct dma_buf object, which wraps the
601 * supplied private data and operations for struct dma_buf_ops. On either
602 * missing ops, or error in allocating struct dma_buf, will return negative
603 * error.
604 *
605 * For most cases the easiest way to create @exp_info is through the
606 * %DEFINE_DMA_BUF_EXPORT_INFO macro.
607 */
dma_buf_export(const struct dma_buf_export_info * exp_info)608 struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
609 {
610 struct dma_buf *dmabuf;
611 struct dma_resv *resv = exp_info->resv;
612 struct file *file;
613 size_t alloc_size = sizeof(struct dma_buf);
614 int ret;
615
616 if (WARN_ON(!exp_info->priv || !exp_info->ops
617 || !exp_info->ops->map_dma_buf
618 || !exp_info->ops->unmap_dma_buf
619 || !exp_info->ops->release))
620 return ERR_PTR(-EINVAL);
621
622 if (WARN_ON(exp_info->ops->cache_sgt_mapping &&
623 (exp_info->ops->pin || exp_info->ops->unpin)))
624 return ERR_PTR(-EINVAL);
625
626 if (WARN_ON(!exp_info->ops->pin != !exp_info->ops->unpin))
627 return ERR_PTR(-EINVAL);
628
629 if (!try_module_get(exp_info->owner))
630 return ERR_PTR(-ENOENT);
631
632 file = dma_buf_getfile(exp_info->size, exp_info->flags);
633 if (IS_ERR(file)) {
634 ret = PTR_ERR(file);
635 goto err_module;
636 }
637
638 if (!exp_info->resv)
639 alloc_size += sizeof(struct dma_resv);
640 else
641 /* prevent &dma_buf[1] == dma_buf->resv */
642 alloc_size += 1;
643 dmabuf = kzalloc(alloc_size, GFP_KERNEL);
644 if (!dmabuf) {
645 ret = -ENOMEM;
646 goto err_file;
647 }
648
649 dmabuf->priv = exp_info->priv;
650 dmabuf->ops = exp_info->ops;
651 dmabuf->size = exp_info->size;
652 dmabuf->exp_name = exp_info->exp_name;
653 dmabuf->owner = exp_info->owner;
654 spin_lock_init(&dmabuf->name_lock);
655 init_waitqueue_head(&dmabuf->poll);
656 dmabuf->cb_in.poll = dmabuf->cb_out.poll = &dmabuf->poll;
657 dmabuf->cb_in.active = dmabuf->cb_out.active = 0;
658 mutex_init(&dmabuf->lock);
659 INIT_LIST_HEAD(&dmabuf->attachments);
660
661 if (!resv) {
662 dmabuf->resv = (struct dma_resv *)&dmabuf[1];
663 dma_resv_init(dmabuf->resv);
664 } else {
665 dmabuf->resv = resv;
666 }
667
668 ret = dma_buf_stats_setup(dmabuf, file);
669 if (ret)
670 goto err_dmabuf;
671
672 file->private_data = dmabuf;
673 file->f_path.dentry->d_fsdata = dmabuf;
674 dmabuf->file = file;
675
676 mutex_lock(&db_list.lock);
677 list_add(&dmabuf->list_node, &db_list.head);
678 mutex_unlock(&db_list.lock);
679
680 return dmabuf;
681
682 err_dmabuf:
683 if (!resv)
684 dma_resv_fini(dmabuf->resv);
685 kfree(dmabuf);
686 err_file:
687 fput(file);
688 err_module:
689 module_put(exp_info->owner);
690 return ERR_PTR(ret);
691 }
692 EXPORT_SYMBOL_NS_GPL(dma_buf_export, DMA_BUF);
693
694 /**
695 * dma_buf_fd - returns a file descriptor for the given struct dma_buf
696 * @dmabuf: [in] pointer to dma_buf for which fd is required.
697 * @flags: [in] flags to give to fd
698 *
699 * On success, returns an associated 'fd'. Else, returns error.
700 */
dma_buf_fd(struct dma_buf * dmabuf,int flags)701 int dma_buf_fd(struct dma_buf *dmabuf, int flags)
702 {
703 int fd;
704
705 if (!dmabuf || !dmabuf->file)
706 return -EINVAL;
707
708 fd = get_unused_fd_flags(flags);
709 if (fd < 0)
710 return fd;
711
712 fd_install(fd, dmabuf->file);
713
714 return fd;
715 }
716 EXPORT_SYMBOL_NS_GPL(dma_buf_fd, DMA_BUF);
717
718 /**
719 * dma_buf_get - returns the struct dma_buf related to an fd
720 * @fd: [in] fd associated with the struct dma_buf to be returned
721 *
722 * On success, returns the struct dma_buf associated with an fd; uses
723 * file's refcounting done by fget to increase refcount. returns ERR_PTR
724 * otherwise.
725 */
dma_buf_get(int fd)726 struct dma_buf *dma_buf_get(int fd)
727 {
728 struct file *file;
729
730 file = fget(fd);
731
732 if (!file)
733 return ERR_PTR(-EBADF);
734
735 if (!is_dma_buf_file(file)) {
736 fput(file);
737 return ERR_PTR(-EINVAL);
738 }
739
740 return file->private_data;
741 }
742 EXPORT_SYMBOL_NS_GPL(dma_buf_get, DMA_BUF);
743
744 /**
745 * dma_buf_put - decreases refcount of the buffer
746 * @dmabuf: [in] buffer to reduce refcount of
747 *
748 * Uses file's refcounting done implicitly by fput().
749 *
750 * If, as a result of this call, the refcount becomes 0, the 'release' file
751 * operation related to this fd is called. It calls &dma_buf_ops.release vfunc
752 * in turn, and frees the memory allocated for dmabuf when exported.
753 */
dma_buf_put(struct dma_buf * dmabuf)754 void dma_buf_put(struct dma_buf *dmabuf)
755 {
756 if (WARN_ON(!dmabuf || !dmabuf->file))
757 return;
758
759 fput(dmabuf->file);
760 }
761 EXPORT_SYMBOL_NS_GPL(dma_buf_put, DMA_BUF);
762
mangle_sg_table(struct sg_table * sg_table)763 static void mangle_sg_table(struct sg_table *sg_table)
764 {
765 #ifdef CONFIG_DMABUF_DEBUG
766 int i;
767 struct scatterlist *sg;
768
769 /* To catch abuse of the underlying struct page by importers mix
770 * up the bits, but take care to preserve the low SG_ bits to
771 * not corrupt the sgt. The mixing is undone in __unmap_dma_buf
772 * before passing the sgt back to the exporter. */
773 for_each_sgtable_sg(sg_table, sg, i)
774 sg->page_link ^= ~0xffUL;
775 #endif
776
777 }
__map_dma_buf(struct dma_buf_attachment * attach,enum dma_data_direction direction)778 static struct sg_table * __map_dma_buf(struct dma_buf_attachment *attach,
779 enum dma_data_direction direction)
780 {
781 struct sg_table *sg_table;
782 signed long ret;
783
784 sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
785 if (IS_ERR_OR_NULL(sg_table))
786 return sg_table;
787
788 if (!dma_buf_attachment_is_dynamic(attach)) {
789 ret = dma_resv_wait_timeout(attach->dmabuf->resv,
790 DMA_RESV_USAGE_KERNEL, true,
791 MAX_SCHEDULE_TIMEOUT);
792 if (ret < 0) {
793 attach->dmabuf->ops->unmap_dma_buf(attach, sg_table,
794 direction);
795 return ERR_PTR(ret);
796 }
797 }
798
799 mangle_sg_table(sg_table);
800 return sg_table;
801 }
802
803 /**
804 * dma_buf_dynamic_attach - Add the device to dma_buf's attachments list
805 * @dmabuf: [in] buffer to attach device to.
806 * @dev: [in] device to be attached.
807 * @importer_ops: [in] importer operations for the attachment
808 * @importer_priv: [in] importer private pointer for the attachment
809 *
810 * Returns struct dma_buf_attachment pointer for this attachment. Attachments
811 * must be cleaned up by calling dma_buf_detach().
812 *
813 * Optionally this calls &dma_buf_ops.attach to allow device-specific attach
814 * functionality.
815 *
816 * Returns:
817 *
818 * A pointer to newly created &dma_buf_attachment on success, or a negative
819 * error code wrapped into a pointer on failure.
820 *
821 * Note that this can fail if the backing storage of @dmabuf is in a place not
822 * accessible to @dev, and cannot be moved to a more suitable place. This is
823 * indicated with the error code -EBUSY.
824 */
825 struct dma_buf_attachment *
dma_buf_dynamic_attach(struct dma_buf * dmabuf,struct device * dev,const struct dma_buf_attach_ops * importer_ops,void * importer_priv)826 dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
827 const struct dma_buf_attach_ops *importer_ops,
828 void *importer_priv)
829 {
830 struct dma_buf_attachment *attach;
831 int ret;
832
833 if (WARN_ON(!dmabuf || !dev))
834 return ERR_PTR(-EINVAL);
835
836 if (WARN_ON(importer_ops && !importer_ops->move_notify))
837 return ERR_PTR(-EINVAL);
838
839 attach = kzalloc(sizeof(*attach), GFP_KERNEL);
840 if (!attach)
841 return ERR_PTR(-ENOMEM);
842
843 attach->dev = dev;
844 attach->dmabuf = dmabuf;
845 if (importer_ops)
846 attach->peer2peer = importer_ops->allow_peer2peer;
847 attach->importer_ops = importer_ops;
848 attach->importer_priv = importer_priv;
849
850 if (dmabuf->ops->attach) {
851 ret = dmabuf->ops->attach(dmabuf, attach);
852 if (ret)
853 goto err_attach;
854 }
855 dma_resv_lock(dmabuf->resv, NULL);
856 list_add(&attach->node, &dmabuf->attachments);
857 dma_resv_unlock(dmabuf->resv);
858
859 /* When either the importer or the exporter can't handle dynamic
860 * mappings we cache the mapping here to avoid issues with the
861 * reservation object lock.
862 */
863 if (dma_buf_attachment_is_dynamic(attach) !=
864 dma_buf_is_dynamic(dmabuf)) {
865 struct sg_table *sgt;
866
867 if (dma_buf_is_dynamic(attach->dmabuf)) {
868 dma_resv_lock(attach->dmabuf->resv, NULL);
869 ret = dmabuf->ops->pin(attach);
870 if (ret)
871 goto err_unlock;
872 }
873
874 sgt = __map_dma_buf(attach, DMA_BIDIRECTIONAL);
875 if (!sgt)
876 sgt = ERR_PTR(-ENOMEM);
877 if (IS_ERR(sgt)) {
878 ret = PTR_ERR(sgt);
879 goto err_unpin;
880 }
881 if (dma_buf_is_dynamic(attach->dmabuf))
882 dma_resv_unlock(attach->dmabuf->resv);
883 attach->sgt = sgt;
884 attach->dir = DMA_BIDIRECTIONAL;
885 }
886
887 return attach;
888
889 err_attach:
890 kfree(attach);
891 return ERR_PTR(ret);
892
893 err_unpin:
894 if (dma_buf_is_dynamic(attach->dmabuf))
895 dmabuf->ops->unpin(attach);
896
897 err_unlock:
898 if (dma_buf_is_dynamic(attach->dmabuf))
899 dma_resv_unlock(attach->dmabuf->resv);
900
901 dma_buf_detach(dmabuf, attach);
902 return ERR_PTR(ret);
903 }
904 EXPORT_SYMBOL_NS_GPL(dma_buf_dynamic_attach, DMA_BUF);
905
906 /**
907 * dma_buf_attach - Wrapper for dma_buf_dynamic_attach
908 * @dmabuf: [in] buffer to attach device to.
909 * @dev: [in] device to be attached.
910 *
911 * Wrapper to call dma_buf_dynamic_attach() for drivers which still use a static
912 * mapping.
913 */
dma_buf_attach(struct dma_buf * dmabuf,struct device * dev)914 struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
915 struct device *dev)
916 {
917 return dma_buf_dynamic_attach(dmabuf, dev, NULL, NULL);
918 }
919 EXPORT_SYMBOL_NS_GPL(dma_buf_attach, DMA_BUF);
920
__unmap_dma_buf(struct dma_buf_attachment * attach,struct sg_table * sg_table,enum dma_data_direction direction)921 static void __unmap_dma_buf(struct dma_buf_attachment *attach,
922 struct sg_table *sg_table,
923 enum dma_data_direction direction)
924 {
925 /* uses XOR, hence this unmangles */
926 mangle_sg_table(sg_table);
927
928 attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
929 }
930
931 /**
932 * dma_buf_detach - Remove the given attachment from dmabuf's attachments list
933 * @dmabuf: [in] buffer to detach from.
934 * @attach: [in] attachment to be detached; is free'd after this call.
935 *
936 * Clean up a device attachment obtained by calling dma_buf_attach().
937 *
938 * Optionally this calls &dma_buf_ops.detach for device-specific detach.
939 */
dma_buf_detach(struct dma_buf * dmabuf,struct dma_buf_attachment * attach)940 void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
941 {
942 if (WARN_ON(!dmabuf || !attach))
943 return;
944
945 if (attach->sgt) {
946 if (dma_buf_is_dynamic(attach->dmabuf))
947 dma_resv_lock(attach->dmabuf->resv, NULL);
948
949 __unmap_dma_buf(attach, attach->sgt, attach->dir);
950
951 if (dma_buf_is_dynamic(attach->dmabuf)) {
952 dmabuf->ops->unpin(attach);
953 dma_resv_unlock(attach->dmabuf->resv);
954 }
955 }
956
957 dma_resv_lock(dmabuf->resv, NULL);
958 list_del(&attach->node);
959 dma_resv_unlock(dmabuf->resv);
960 if (dmabuf->ops->detach)
961 dmabuf->ops->detach(dmabuf, attach);
962
963 kfree(attach);
964 }
965 EXPORT_SYMBOL_NS_GPL(dma_buf_detach, DMA_BUF);
966
967 /**
968 * dma_buf_pin - Lock down the DMA-buf
969 * @attach: [in] attachment which should be pinned
970 *
971 * Only dynamic importers (who set up @attach with dma_buf_dynamic_attach()) may
972 * call this, and only for limited use cases like scanout and not for temporary
973 * pin operations. It is not permitted to allow userspace to pin arbitrary
974 * amounts of buffers through this interface.
975 *
976 * Buffers must be unpinned by calling dma_buf_unpin().
977 *
978 * Returns:
979 * 0 on success, negative error code on failure.
980 */
dma_buf_pin(struct dma_buf_attachment * attach)981 int dma_buf_pin(struct dma_buf_attachment *attach)
982 {
983 struct dma_buf *dmabuf = attach->dmabuf;
984 int ret = 0;
985
986 WARN_ON(!dma_buf_attachment_is_dynamic(attach));
987
988 dma_resv_assert_held(dmabuf->resv);
989
990 if (dmabuf->ops->pin)
991 ret = dmabuf->ops->pin(attach);
992
993 return ret;
994 }
995 EXPORT_SYMBOL_NS_GPL(dma_buf_pin, DMA_BUF);
996
997 /**
998 * dma_buf_unpin - Unpin a DMA-buf
999 * @attach: [in] attachment which should be unpinned
1000 *
1001 * This unpins a buffer pinned by dma_buf_pin() and allows the exporter to move
1002 * any mapping of @attach again and inform the importer through
1003 * &dma_buf_attach_ops.move_notify.
1004 */
dma_buf_unpin(struct dma_buf_attachment * attach)1005 void dma_buf_unpin(struct dma_buf_attachment *attach)
1006 {
1007 struct dma_buf *dmabuf = attach->dmabuf;
1008
1009 WARN_ON(!dma_buf_attachment_is_dynamic(attach));
1010
1011 dma_resv_assert_held(dmabuf->resv);
1012
1013 if (dmabuf->ops->unpin)
1014 dmabuf->ops->unpin(attach);
1015 }
1016 EXPORT_SYMBOL_NS_GPL(dma_buf_unpin, DMA_BUF);
1017
1018 /**
1019 * dma_buf_map_attachment - Returns the scatterlist table of the attachment;
1020 * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
1021 * dma_buf_ops.
1022 * @attach: [in] attachment whose scatterlist is to be returned
1023 * @direction: [in] direction of DMA transfer
1024 *
1025 * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR
1026 * on error. May return -EINTR if it is interrupted by a signal.
1027 *
1028 * On success, the DMA addresses and lengths in the returned scatterlist are
1029 * PAGE_SIZE aligned.
1030 *
1031 * A mapping must be unmapped by using dma_buf_unmap_attachment(). Note that
1032 * the underlying backing storage is pinned for as long as a mapping exists,
1033 * therefore users/importers should not hold onto a mapping for undue amounts of
1034 * time.
1035 *
1036 * Important: Dynamic importers must wait for the exclusive fence of the struct
1037 * dma_resv attached to the DMA-BUF first.
1038 */
dma_buf_map_attachment(struct dma_buf_attachment * attach,enum dma_data_direction direction)1039 struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
1040 enum dma_data_direction direction)
1041 {
1042 struct sg_table *sg_table;
1043 int r;
1044
1045 might_sleep();
1046
1047 if (WARN_ON(!attach || !attach->dmabuf))
1048 return ERR_PTR(-EINVAL);
1049
1050 if (dma_buf_attachment_is_dynamic(attach))
1051 dma_resv_assert_held(attach->dmabuf->resv);
1052
1053 if (attach->sgt) {
1054 /*
1055 * Two mappings with different directions for the same
1056 * attachment are not allowed.
1057 */
1058 if (attach->dir != direction &&
1059 attach->dir != DMA_BIDIRECTIONAL)
1060 return ERR_PTR(-EBUSY);
1061
1062 return attach->sgt;
1063 }
1064
1065 if (dma_buf_is_dynamic(attach->dmabuf)) {
1066 dma_resv_assert_held(attach->dmabuf->resv);
1067 if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
1068 r = attach->dmabuf->ops->pin(attach);
1069 if (r)
1070 return ERR_PTR(r);
1071 }
1072 }
1073
1074 sg_table = __map_dma_buf(attach, direction);
1075 if (!sg_table)
1076 sg_table = ERR_PTR(-ENOMEM);
1077
1078 if (IS_ERR(sg_table) && dma_buf_is_dynamic(attach->dmabuf) &&
1079 !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
1080 attach->dmabuf->ops->unpin(attach);
1081
1082 if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
1083 attach->sgt = sg_table;
1084 attach->dir = direction;
1085 }
1086
1087 #ifdef CONFIG_DMA_API_DEBUG
1088 if (!IS_ERR(sg_table)) {
1089 struct scatterlist *sg;
1090 u64 addr;
1091 int len;
1092 int i;
1093
1094 for_each_sgtable_dma_sg(sg_table, sg, i) {
1095 addr = sg_dma_address(sg);
1096 len = sg_dma_len(sg);
1097 if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(len)) {
1098 pr_debug("%s: addr %llx or len %x is not page aligned!\n",
1099 __func__, addr, len);
1100 }
1101 }
1102 }
1103 #endif /* CONFIG_DMA_API_DEBUG */
1104 return sg_table;
1105 }
1106 EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment, DMA_BUF);
1107
1108 /**
1109 * dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might
1110 * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
1111 * dma_buf_ops.
1112 * @attach: [in] attachment to unmap buffer from
1113 * @sg_table: [in] scatterlist info of the buffer to unmap
1114 * @direction: [in] direction of DMA transfer
1115 *
1116 * This unmaps a DMA mapping for @attached obtained by dma_buf_map_attachment().
1117 */
dma_buf_unmap_attachment(struct dma_buf_attachment * attach,struct sg_table * sg_table,enum dma_data_direction direction)1118 void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
1119 struct sg_table *sg_table,
1120 enum dma_data_direction direction)
1121 {
1122 might_sleep();
1123
1124 if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
1125 return;
1126
1127 if (dma_buf_attachment_is_dynamic(attach))
1128 dma_resv_assert_held(attach->dmabuf->resv);
1129
1130 if (attach->sgt == sg_table)
1131 return;
1132
1133 if (dma_buf_is_dynamic(attach->dmabuf))
1134 dma_resv_assert_held(attach->dmabuf->resv);
1135
1136 __unmap_dma_buf(attach, sg_table, direction);
1137
1138 if (dma_buf_is_dynamic(attach->dmabuf) &&
1139 !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
1140 dma_buf_unpin(attach);
1141 }
1142 EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment, DMA_BUF);
1143
1144 /**
1145 * dma_buf_move_notify - notify attachments that DMA-buf is moving
1146 *
1147 * @dmabuf: [in] buffer which is moving
1148 *
1149 * Informs all attachmenst that they need to destroy and recreated all their
1150 * mappings.
1151 */
dma_buf_move_notify(struct dma_buf * dmabuf)1152 void dma_buf_move_notify(struct dma_buf *dmabuf)
1153 {
1154 struct dma_buf_attachment *attach;
1155
1156 dma_resv_assert_held(dmabuf->resv);
1157
1158 list_for_each_entry(attach, &dmabuf->attachments, node)
1159 if (attach->importer_ops)
1160 attach->importer_ops->move_notify(attach);
1161 }
1162 EXPORT_SYMBOL_NS_GPL(dma_buf_move_notify, DMA_BUF);
1163
1164 /**
1165 * DOC: cpu access
1166 *
1167 * There are mutliple reasons for supporting CPU access to a dma buffer object:
1168 *
1169 * - Fallback operations in the kernel, for example when a device is connected
1170 * over USB and the kernel needs to shuffle the data around first before
1171 * sending it away. Cache coherency is handled by braketing any transactions
1172 * with calls to dma_buf_begin_cpu_access() and dma_buf_end_cpu_access()
1173 * access.
1174 *
1175 * Since for most kernel internal dma-buf accesses need the entire buffer, a
1176 * vmap interface is introduced. Note that on very old 32-bit architectures
1177 * vmalloc space might be limited and result in vmap calls failing.
1178 *
1179 * Interfaces::
1180 *
1181 * void \*dma_buf_vmap(struct dma_buf \*dmabuf, struct iosys_map \*map)
1182 * void dma_buf_vunmap(struct dma_buf \*dmabuf, struct iosys_map \*map)
1183 *
1184 * The vmap call can fail if there is no vmap support in the exporter, or if
1185 * it runs out of vmalloc space. Note that the dma-buf layer keeps a reference
1186 * count for all vmap access and calls down into the exporter's vmap function
1187 * only when no vmapping exists, and only unmaps it once. Protection against
1188 * concurrent vmap/vunmap calls is provided by taking the &dma_buf.lock mutex.
1189 *
1190 * - For full compatibility on the importer side with existing userspace
1191 * interfaces, which might already support mmap'ing buffers. This is needed in
1192 * many processing pipelines (e.g. feeding a software rendered image into a
1193 * hardware pipeline, thumbnail creation, snapshots, ...). Also, Android's ION
1194 * framework already supported this and for DMA buffer file descriptors to
1195 * replace ION buffers mmap support was needed.
1196 *
1197 * There is no special interfaces, userspace simply calls mmap on the dma-buf
1198 * fd. But like for CPU access there's a need to braket the actual access,
1199 * which is handled by the ioctl (DMA_BUF_IOCTL_SYNC). Note that
1200 * DMA_BUF_IOCTL_SYNC can fail with -EAGAIN or -EINTR, in which case it must
1201 * be restarted.
1202 *
1203 * Some systems might need some sort of cache coherency management e.g. when
1204 * CPU and GPU domains are being accessed through dma-buf at the same time.
1205 * To circumvent this problem there are begin/end coherency markers, that
1206 * forward directly to existing dma-buf device drivers vfunc hooks. Userspace
1207 * can make use of those markers through the DMA_BUF_IOCTL_SYNC ioctl. The
1208 * sequence would be used like following:
1209 *
1210 * - mmap dma-buf fd
1211 * - for each drawing/upload cycle in CPU 1. SYNC_START ioctl, 2. read/write
1212 * to mmap area 3. SYNC_END ioctl. This can be repeated as often as you
1213 * want (with the new data being consumed by say the GPU or the scanout
1214 * device)
1215 * - munmap once you don't need the buffer any more
1216 *
1217 * For correctness and optimal performance, it is always required to use
1218 * SYNC_START and SYNC_END before and after, respectively, when accessing the
1219 * mapped address. Userspace cannot rely on coherent access, even when there
1220 * are systems where it just works without calling these ioctls.
1221 *
1222 * - And as a CPU fallback in userspace processing pipelines.
1223 *
1224 * Similar to the motivation for kernel cpu access it is again important that
1225 * the userspace code of a given importing subsystem can use the same
1226 * interfaces with a imported dma-buf buffer object as with a native buffer
1227 * object. This is especially important for drm where the userspace part of
1228 * contemporary OpenGL, X, and other drivers is huge, and reworking them to
1229 * use a different way to mmap a buffer rather invasive.
1230 *
1231 * The assumption in the current dma-buf interfaces is that redirecting the
1232 * initial mmap is all that's needed. A survey of some of the existing
1233 * subsystems shows that no driver seems to do any nefarious thing like
1234 * syncing up with outstanding asynchronous processing on the device or
1235 * allocating special resources at fault time. So hopefully this is good
1236 * enough, since adding interfaces to intercept pagefaults and allow pte
1237 * shootdowns would increase the complexity quite a bit.
1238 *
1239 * Interface::
1240 *
1241 * int dma_buf_mmap(struct dma_buf \*, struct vm_area_struct \*,
1242 * unsigned long);
1243 *
1244 * If the importing subsystem simply provides a special-purpose mmap call to
1245 * set up a mapping in userspace, calling do_mmap with &dma_buf.file will
1246 * equally achieve that for a dma-buf object.
1247 */
1248
__dma_buf_begin_cpu_access(struct dma_buf * dmabuf,enum dma_data_direction direction)1249 static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
1250 enum dma_data_direction direction)
1251 {
1252 bool write = (direction == DMA_BIDIRECTIONAL ||
1253 direction == DMA_TO_DEVICE);
1254 struct dma_resv *resv = dmabuf->resv;
1255 long ret;
1256
1257 /* Wait on any implicit rendering fences */
1258 ret = dma_resv_wait_timeout(resv, dma_resv_usage_rw(write),
1259 true, MAX_SCHEDULE_TIMEOUT);
1260 if (ret < 0)
1261 return ret;
1262
1263 return 0;
1264 }
1265
1266 /**
1267 * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the
1268 * cpu in the kernel context. Calls begin_cpu_access to allow exporter-specific
1269 * preparations. Coherency is only guaranteed in the specified range for the
1270 * specified access direction.
1271 * @dmabuf: [in] buffer to prepare cpu access for.
1272 * @direction: [in] length of range for cpu access.
1273 *
1274 * After the cpu access is complete the caller should call
1275 * dma_buf_end_cpu_access(). Only when cpu access is braketed by both calls is
1276 * it guaranteed to be coherent with other DMA access.
1277 *
1278 * This function will also wait for any DMA transactions tracked through
1279 * implicit synchronization in &dma_buf.resv. For DMA transactions with explicit
1280 * synchronization this function will only ensure cache coherency, callers must
1281 * ensure synchronization with such DMA transactions on their own.
1282 *
1283 * Can return negative error values, returns 0 on success.
1284 */
dma_buf_begin_cpu_access(struct dma_buf * dmabuf,enum dma_data_direction direction)1285 int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
1286 enum dma_data_direction direction)
1287 {
1288 int ret = 0;
1289
1290 if (WARN_ON(!dmabuf))
1291 return -EINVAL;
1292
1293 might_lock(&dmabuf->resv->lock.base);
1294
1295 if (dmabuf->ops->begin_cpu_access)
1296 ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);
1297
1298 /* Ensure that all fences are waited upon - but we first allow
1299 * the native handler the chance to do so more efficiently if it
1300 * chooses. A double invocation here will be reasonably cheap no-op.
1301 */
1302 if (ret == 0)
1303 ret = __dma_buf_begin_cpu_access(dmabuf, direction);
1304
1305 return ret;
1306 }
1307 EXPORT_SYMBOL_NS_GPL(dma_buf_begin_cpu_access, DMA_BUF);
1308
1309 /**
1310 * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the
1311 * cpu in the kernel context. Calls end_cpu_access to allow exporter-specific
1312 * actions. Coherency is only guaranteed in the specified range for the
1313 * specified access direction.
1314 * @dmabuf: [in] buffer to complete cpu access for.
1315 * @direction: [in] length of range for cpu access.
1316 *
1317 * This terminates CPU access started with dma_buf_begin_cpu_access().
1318 *
1319 * Can return negative error values, returns 0 on success.
1320 */
dma_buf_end_cpu_access(struct dma_buf * dmabuf,enum dma_data_direction direction)1321 int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
1322 enum dma_data_direction direction)
1323 {
1324 int ret = 0;
1325
1326 WARN_ON(!dmabuf);
1327
1328 might_lock(&dmabuf->resv->lock.base);
1329
1330 if (dmabuf->ops->end_cpu_access)
1331 ret = dmabuf->ops->end_cpu_access(dmabuf, direction);
1332
1333 return ret;
1334 }
1335 EXPORT_SYMBOL_NS_GPL(dma_buf_end_cpu_access, DMA_BUF);
1336
1337
1338 /**
1339 * dma_buf_mmap - Setup up a userspace mmap with the given vma
1340 * @dmabuf: [in] buffer that should back the vma
1341 * @vma: [in] vma for the mmap
1342 * @pgoff: [in] offset in pages where this mmap should start within the
1343 * dma-buf buffer.
1344 *
1345 * This function adjusts the passed in vma so that it points at the file of the
1346 * dma_buf operation. It also adjusts the starting pgoff and does bounds
1347 * checking on the size of the vma. Then it calls the exporters mmap function to
1348 * set up the mapping.
1349 *
1350 * Can return negative error values, returns 0 on success.
1351 */
dma_buf_mmap(struct dma_buf * dmabuf,struct vm_area_struct * vma,unsigned long pgoff)1352 int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
1353 unsigned long pgoff)
1354 {
1355 if (WARN_ON(!dmabuf || !vma))
1356 return -EINVAL;
1357
1358 /* check if buffer supports mmap */
1359 if (!dmabuf->ops->mmap)
1360 return -EINVAL;
1361
1362 /* check for offset overflow */
1363 if (pgoff + vma_pages(vma) < pgoff)
1364 return -EOVERFLOW;
1365
1366 /* check for overflowing the buffer's size */
1367 if (pgoff + vma_pages(vma) >
1368 dmabuf->size >> PAGE_SHIFT)
1369 return -EINVAL;
1370
1371 /* readjust the vma */
1372 vma_set_file(vma, dmabuf->file);
1373 vma->vm_pgoff = pgoff;
1374
1375 return dmabuf->ops->mmap(dmabuf, vma);
1376 }
1377 EXPORT_SYMBOL_NS_GPL(dma_buf_mmap, DMA_BUF);
1378
1379 /**
1380 * dma_buf_vmap - Create virtual mapping for the buffer object into kernel
1381 * address space. Same restrictions as for vmap and friends apply.
1382 * @dmabuf: [in] buffer to vmap
1383 * @map: [out] returns the vmap pointer
1384 *
1385 * This call may fail due to lack of virtual mapping address space.
1386 * These calls are optional in drivers. The intended use for them
1387 * is for mapping objects linear in kernel space for high use objects.
1388 *
1389 * To ensure coherency users must call dma_buf_begin_cpu_access() and
1390 * dma_buf_end_cpu_access() around any cpu access performed through this
1391 * mapping.
1392 *
1393 * Returns 0 on success, or a negative errno code otherwise.
1394 */
dma_buf_vmap(struct dma_buf * dmabuf,struct iosys_map * map)1395 int dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
1396 {
1397 struct iosys_map ptr;
1398 int ret = 0;
1399
1400 iosys_map_clear(map);
1401
1402 if (WARN_ON(!dmabuf))
1403 return -EINVAL;
1404
1405 if (!dmabuf->ops->vmap)
1406 return -EINVAL;
1407
1408 mutex_lock(&dmabuf->lock);
1409 if (dmabuf->vmapping_counter) {
1410 dmabuf->vmapping_counter++;
1411 BUG_ON(iosys_map_is_null(&dmabuf->vmap_ptr));
1412 *map = dmabuf->vmap_ptr;
1413 goto out_unlock;
1414 }
1415
1416 BUG_ON(iosys_map_is_set(&dmabuf->vmap_ptr));
1417
1418 ret = dmabuf->ops->vmap(dmabuf, &ptr);
1419 if (WARN_ON_ONCE(ret))
1420 goto out_unlock;
1421
1422 dmabuf->vmap_ptr = ptr;
1423 dmabuf->vmapping_counter = 1;
1424
1425 *map = dmabuf->vmap_ptr;
1426
1427 out_unlock:
1428 mutex_unlock(&dmabuf->lock);
1429 return ret;
1430 }
1431 EXPORT_SYMBOL_NS_GPL(dma_buf_vmap, DMA_BUF);
1432
1433 /**
1434 * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap.
1435 * @dmabuf: [in] buffer to vunmap
1436 * @map: [in] vmap pointer to vunmap
1437 */
dma_buf_vunmap(struct dma_buf * dmabuf,struct iosys_map * map)1438 void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)
1439 {
1440 if (WARN_ON(!dmabuf))
1441 return;
1442
1443 BUG_ON(iosys_map_is_null(&dmabuf->vmap_ptr));
1444 BUG_ON(dmabuf->vmapping_counter == 0);
1445 BUG_ON(!iosys_map_is_equal(&dmabuf->vmap_ptr, map));
1446
1447 mutex_lock(&dmabuf->lock);
1448 if (--dmabuf->vmapping_counter == 0) {
1449 if (dmabuf->ops->vunmap)
1450 dmabuf->ops->vunmap(dmabuf, map);
1451 iosys_map_clear(&dmabuf->vmap_ptr);
1452 }
1453 mutex_unlock(&dmabuf->lock);
1454 }
1455 EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap, DMA_BUF);
1456
1457 #ifdef CONFIG_DEBUG_FS
dma_buf_debug_show(struct seq_file * s,void * unused)1458 static int dma_buf_debug_show(struct seq_file *s, void *unused)
1459 {
1460 struct dma_buf *buf_obj;
1461 struct dma_buf_attachment *attach_obj;
1462 int count = 0, attach_count;
1463 size_t size = 0;
1464 int ret;
1465
1466 ret = mutex_lock_interruptible(&db_list.lock);
1467
1468 if (ret)
1469 return ret;
1470
1471 seq_puts(s, "\nDma-buf Objects:\n");
1472 seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\tname\n",
1473 "size", "flags", "mode", "count", "ino");
1474
1475 list_for_each_entry(buf_obj, &db_list.head, list_node) {
1476
1477 ret = dma_resv_lock_interruptible(buf_obj->resv, NULL);
1478 if (ret)
1479 goto error_unlock;
1480
1481
1482 spin_lock(&buf_obj->name_lock);
1483 seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\n",
1484 buf_obj->size,
1485 buf_obj->file->f_flags, buf_obj->file->f_mode,
1486 file_count(buf_obj->file),
1487 buf_obj->exp_name,
1488 file_inode(buf_obj->file)->i_ino,
1489 buf_obj->name ?: "<none>");
1490 spin_unlock(&buf_obj->name_lock);
1491
1492 dma_resv_describe(buf_obj->resv, s);
1493
1494 seq_puts(s, "\tAttached Devices:\n");
1495 attach_count = 0;
1496
1497 list_for_each_entry(attach_obj, &buf_obj->attachments, node) {
1498 seq_printf(s, "\t%s\n", dev_name(attach_obj->dev));
1499 attach_count++;
1500 }
1501 dma_resv_unlock(buf_obj->resv);
1502
1503 seq_printf(s, "Total %d devices attached\n\n",
1504 attach_count);
1505
1506 count++;
1507 size += buf_obj->size;
1508 }
1509
1510 seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size);
1511
1512 mutex_unlock(&db_list.lock);
1513 return 0;
1514
1515 error_unlock:
1516 mutex_unlock(&db_list.lock);
1517 return ret;
1518 }
1519
1520 DEFINE_SHOW_ATTRIBUTE(dma_buf_debug);
1521
1522 static struct dentry *dma_buf_debugfs_dir;
1523
dma_buf_init_debugfs(void)1524 static int dma_buf_init_debugfs(void)
1525 {
1526 struct dentry *d;
1527 int err = 0;
1528
1529 d = debugfs_create_dir("dma_buf", NULL);
1530 if (IS_ERR(d))
1531 return PTR_ERR(d);
1532
1533 dma_buf_debugfs_dir = d;
1534
1535 d = debugfs_create_file("bufinfo", S_IRUGO, dma_buf_debugfs_dir,
1536 NULL, &dma_buf_debug_fops);
1537 if (IS_ERR(d)) {
1538 pr_debug("dma_buf: debugfs: failed to create node bufinfo\n");
1539 debugfs_remove_recursive(dma_buf_debugfs_dir);
1540 dma_buf_debugfs_dir = NULL;
1541 err = PTR_ERR(d);
1542 }
1543
1544 return err;
1545 }
1546
dma_buf_uninit_debugfs(void)1547 static void dma_buf_uninit_debugfs(void)
1548 {
1549 debugfs_remove_recursive(dma_buf_debugfs_dir);
1550 }
1551 #else
dma_buf_init_debugfs(void)1552 static inline int dma_buf_init_debugfs(void)
1553 {
1554 return 0;
1555 }
dma_buf_uninit_debugfs(void)1556 static inline void dma_buf_uninit_debugfs(void)
1557 {
1558 }
1559 #endif
1560
dma_buf_init(void)1561 static int __init dma_buf_init(void)
1562 {
1563 int ret;
1564
1565 ret = dma_buf_init_sysfs_statistics();
1566 if (ret)
1567 return ret;
1568
1569 dma_buf_mnt = kern_mount(&dma_buf_fs_type);
1570 if (IS_ERR(dma_buf_mnt))
1571 return PTR_ERR(dma_buf_mnt);
1572
1573 mutex_init(&db_list.lock);
1574 INIT_LIST_HEAD(&db_list.head);
1575 dma_buf_init_debugfs();
1576 return 0;
1577 }
1578 subsys_initcall(dma_buf_init);
1579
dma_buf_deinit(void)1580 static void __exit dma_buf_deinit(void)
1581 {
1582 dma_buf_uninit_debugfs();
1583 kern_unmount(dma_buf_mnt);
1584 dma_buf_uninit_sysfs_statistics();
1585 }
1586 __exitcall(dma_buf_deinit);
1587