1 /*
2  * videobuf2-memops.c - generic memory handling routines for videobuf2
3  *
4  * Copyright (C) 2010 Samsung Electronics
5  *
6  * Author: Pawel Osciak <pawel@osciak.com>
7  *	   Marek Szyprowski <m.szyprowski@samsung.com>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation.
12  */
13 
14 #include <linux/slab.h>
15 #include <linux/module.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/vmalloc.h>
18 #include <linux/mm.h>
19 #include <linux/sched.h>
20 #include <linux/file.h>
21 
22 #include <media/videobuf2-core.h>
23 #include <media/videobuf2-memops.h>
24 
25 /**
26  * vb2_get_vma() - acquire and lock the virtual memory area
27  * @vma:	given virtual memory area
28  *
29  * This function attempts to acquire an area mapped in the userspace for
30  * the duration of a hardware operation. The area is "locked" by performing
31  * the same set of operation that are done when process calls fork() and
32  * memory areas are duplicated.
33  *
34  * Returns a copy of a virtual memory region on success or NULL.
35  */
vb2_get_vma(struct vm_area_struct * vma)36 struct vm_area_struct *vb2_get_vma(struct vm_area_struct *vma)
37 {
38 	struct vm_area_struct *vma_copy;
39 
40 	vma_copy = kmalloc(sizeof(*vma_copy), GFP_KERNEL);
41 	if (vma_copy == NULL)
42 		return NULL;
43 
44 	if (vma->vm_ops && vma->vm_ops->open)
45 		vma->vm_ops->open(vma);
46 
47 	if (vma->vm_file)
48 		get_file(vma->vm_file);
49 
50 	memcpy(vma_copy, vma, sizeof(*vma));
51 
52 	vma_copy->vm_mm = NULL;
53 	vma_copy->vm_next = NULL;
54 	vma_copy->vm_prev = NULL;
55 
56 	return vma_copy;
57 }
58 EXPORT_SYMBOL_GPL(vb2_get_vma);
59 
60 /**
61  * vb2_put_userptr() - release a userspace virtual memory area
62  * @vma:	virtual memory region associated with the area to be released
63  *
64  * This function releases the previously acquired memory area after a hardware
65  * operation.
66  */
vb2_put_vma(struct vm_area_struct * vma)67 void vb2_put_vma(struct vm_area_struct *vma)
68 {
69 	if (!vma)
70 		return;
71 
72 	if (vma->vm_ops && vma->vm_ops->close)
73 		vma->vm_ops->close(vma);
74 
75 	if (vma->vm_file)
76 		fput(vma->vm_file);
77 
78 	kfree(vma);
79 }
80 EXPORT_SYMBOL_GPL(vb2_put_vma);
81 
82 /**
83  * vb2_get_contig_userptr() - lock physically contiguous userspace mapped memory
84  * @vaddr:	starting virtual address of the area to be verified
85  * @size:	size of the area
86  * @res_paddr:	will return physical address for the given vaddr
87  * @res_vma:	will return locked copy of struct vm_area for the given area
88  *
89  * This function will go through memory area of size @size mapped at @vaddr and
90  * verify that the underlying physical pages are contiguous. If they are
91  * contiguous the virtual memory area is locked and a @res_vma is filled with
92  * the copy and @res_pa set to the physical address of the buffer.
93  *
94  * Returns 0 on success.
95  */
vb2_get_contig_userptr(unsigned long vaddr,unsigned long size,struct vm_area_struct ** res_vma,dma_addr_t * res_pa)96 int vb2_get_contig_userptr(unsigned long vaddr, unsigned long size,
97 			   struct vm_area_struct **res_vma, dma_addr_t *res_pa)
98 {
99 	struct mm_struct *mm = current->mm;
100 	struct vm_area_struct *vma;
101 	unsigned long offset, start, end;
102 	unsigned long this_pfn, prev_pfn;
103 	dma_addr_t pa = 0;
104 
105 	start = vaddr;
106 	offset = start & ~PAGE_MASK;
107 	end = start + size;
108 
109 	vma = find_vma(mm, start);
110 
111 	if (vma == NULL || vma->vm_end < end)
112 		return -EFAULT;
113 
114 	for (prev_pfn = 0; start < end; start += PAGE_SIZE) {
115 		int ret = follow_pfn(vma, start, &this_pfn);
116 		if (ret)
117 			return ret;
118 
119 		if (prev_pfn == 0)
120 			pa = this_pfn << PAGE_SHIFT;
121 		else if (this_pfn != prev_pfn + 1)
122 			return -EFAULT;
123 
124 		prev_pfn = this_pfn;
125 	}
126 
127 	/*
128 	 * Memory is contigous, lock vma and return to the caller
129 	 */
130 	*res_vma = vb2_get_vma(vma);
131 	if (*res_vma == NULL)
132 		return -ENOMEM;
133 
134 	*res_pa = pa + offset;
135 	return 0;
136 }
137 EXPORT_SYMBOL_GPL(vb2_get_contig_userptr);
138 
139 /**
140  * vb2_mmap_pfn_range() - map physical pages to userspace
141  * @vma:	virtual memory region for the mapping
142  * @paddr:	starting physical address of the memory to be mapped
143  * @size:	size of the memory to be mapped
144  * @vm_ops:	vm operations to be assigned to the created area
145  * @priv:	private data to be associated with the area
146  *
147  * Returns 0 on success.
148  */
vb2_mmap_pfn_range(struct vm_area_struct * vma,unsigned long paddr,unsigned long size,const struct vm_operations_struct * vm_ops,void * priv)149 int vb2_mmap_pfn_range(struct vm_area_struct *vma, unsigned long paddr,
150 				unsigned long size,
151 				const struct vm_operations_struct *vm_ops,
152 				void *priv)
153 {
154 	int ret;
155 
156 	size = min_t(unsigned long, vma->vm_end - vma->vm_start, size);
157 
158 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
159 	ret = remap_pfn_range(vma, vma->vm_start, paddr >> PAGE_SHIFT,
160 				size, vma->vm_page_prot);
161 	if (ret) {
162 		printk(KERN_ERR "Remapping memory failed, error: %d\n", ret);
163 		return ret;
164 	}
165 
166 	vma->vm_flags		|= VM_DONTEXPAND | VM_RESERVED;
167 	vma->vm_private_data	= priv;
168 	vma->vm_ops		= vm_ops;
169 
170 	vma->vm_ops->open(vma);
171 
172 	pr_debug("%s: mapped paddr 0x%08lx at 0x%08lx, size %ld\n",
173 			__func__, paddr, vma->vm_start, size);
174 
175 	return 0;
176 }
177 EXPORT_SYMBOL_GPL(vb2_mmap_pfn_range);
178 
179 /**
180  * vb2_common_vm_open() - increase refcount of the vma
181  * @vma:	virtual memory region for the mapping
182  *
183  * This function adds another user to the provided vma. It expects
184  * struct vb2_vmarea_handler pointer in vma->vm_private_data.
185  */
vb2_common_vm_open(struct vm_area_struct * vma)186 static void vb2_common_vm_open(struct vm_area_struct *vma)
187 {
188 	struct vb2_vmarea_handler *h = vma->vm_private_data;
189 
190 	pr_debug("%s: %p, refcount: %d, vma: %08lx-%08lx\n",
191 	       __func__, h, atomic_read(h->refcount), vma->vm_start,
192 	       vma->vm_end);
193 
194 	atomic_inc(h->refcount);
195 }
196 
197 /**
198  * vb2_common_vm_close() - decrease refcount of the vma
199  * @vma:	virtual memory region for the mapping
200  *
201  * This function releases the user from the provided vma. It expects
202  * struct vb2_vmarea_handler pointer in vma->vm_private_data.
203  */
vb2_common_vm_close(struct vm_area_struct * vma)204 static void vb2_common_vm_close(struct vm_area_struct *vma)
205 {
206 	struct vb2_vmarea_handler *h = vma->vm_private_data;
207 
208 	pr_debug("%s: %p, refcount: %d, vma: %08lx-%08lx\n",
209 	       __func__, h, atomic_read(h->refcount), vma->vm_start,
210 	       vma->vm_end);
211 
212 	h->put(h->arg);
213 }
214 
215 /**
216  * vb2_common_vm_ops - common vm_ops used for tracking refcount of mmaped
217  * video buffers
218  */
219 const struct vm_operations_struct vb2_common_vm_ops = {
220 	.open = vb2_common_vm_open,
221 	.close = vb2_common_vm_close,
222 };
223 EXPORT_SYMBOL_GPL(vb2_common_vm_ops);
224 
225 MODULE_DESCRIPTION("common memory handling routines for videobuf2");
226 MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
227 MODULE_LICENSE("GPL");
228