1 /* vm.c -- Memory mapping for DRM -*- linux-c -*-
2 * Created: Mon Jan 4 08:58:31 1999 by faith@precisioninsight.com
3 *
4 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
26 *
27 * Authors:
28 * Rickard E. (Rik) Faith <faith@valinux.com>
29 *
30 */
31
32 #define __NO_VERSION__
33 #include "drmP.h"
34
35 struct vm_operations_struct drm_vm_ops = {
36 nopage: drm_vm_nopage,
37 open: drm_vm_open,
38 close: drm_vm_close,
39 };
40
41 struct vm_operations_struct drm_vm_shm_ops = {
42 nopage: drm_vm_shm_nopage,
43 open: drm_vm_open,
44 close: drm_vm_close,
45 };
46
47 struct vm_operations_struct drm_vm_shm_lock_ops = {
48 nopage: drm_vm_shm_nopage_lock,
49 open: drm_vm_open,
50 close: drm_vm_close,
51 };
52
53 struct vm_operations_struct drm_vm_dma_ops = {
54 nopage: drm_vm_dma_nopage,
55 open: drm_vm_open,
56 close: drm_vm_close,
57 };
58
59 #if LINUX_VERSION_CODE < 0x020317
drm_vm_nopage(struct vm_area_struct * vma,unsigned long address,int write_access)60 unsigned long drm_vm_nopage(struct vm_area_struct *vma,
61 unsigned long address,
62 int write_access)
63 #else
64 /* Return type changed in 2.3.23 */
65 struct page *drm_vm_nopage(struct vm_area_struct *vma,
66 unsigned long address,
67 int write_access)
68 #endif
69 {
70 return NOPAGE_SIGBUS; /* Disallow mremap */
71 }
72
73 #if LINUX_VERSION_CODE < 0x020317
drm_vm_shm_nopage(struct vm_area_struct * vma,unsigned long address,int write_access)74 unsigned long drm_vm_shm_nopage(struct vm_area_struct *vma,
75 unsigned long address,
76 int write_access)
77 #else
78 /* Return type changed in 2.3.23 */
79 struct page *drm_vm_shm_nopage(struct vm_area_struct *vma,
80 unsigned long address,
81 int write_access)
82 #endif
83 {
84 #if LINUX_VERSION_CODE >= 0x020300
85 drm_map_t *map = (drm_map_t *)vma->vm_private_data;
86 #else
87 drm_map_t *map = (drm_map_t *)vma->vm_pte;
88 #endif
89 unsigned long physical;
90 unsigned long offset;
91
92 if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */
93 if (!map) return NOPAGE_OOM; /* Nothing allocated */
94
95 offset = address - vma->vm_start;
96 physical = (unsigned long)map->handle + offset;
97 atomic_inc(&virt_to_page(physical)->count); /* Dec. by kernel */
98
99 DRM_DEBUG("0x%08lx => 0x%08lx\n", address, physical);
100 #if LINUX_VERSION_CODE < 0x020317
101 return physical;
102 #else
103 return virt_to_page(physical);
104 #endif
105 }
106
107 #if LINUX_VERSION_CODE < 0x020317
drm_vm_shm_nopage_lock(struct vm_area_struct * vma,unsigned long address,int write_access)108 unsigned long drm_vm_shm_nopage_lock(struct vm_area_struct *vma,
109 unsigned long address,
110 int write_access)
111 #else
112 /* Return type changed in 2.3.23 */
113 struct page *drm_vm_shm_nopage_lock(struct vm_area_struct *vma,
114 unsigned long address,
115 int write_access)
116 #endif
117 {
118 drm_file_t *priv = vma->vm_file->private_data;
119 drm_device_t *dev = priv->dev;
120 unsigned long physical;
121 unsigned long offset;
122 unsigned long page;
123
124 if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */
125 if (!dev->lock.hw_lock) return NOPAGE_OOM; /* Nothing allocated */
126
127 offset = address - vma->vm_start;
128 page = offset >> PAGE_SHIFT;
129 physical = (unsigned long)dev->lock.hw_lock + offset;
130 atomic_inc(&virt_to_page(physical)->count); /* Dec. by kernel */
131
132 DRM_DEBUG("0x%08lx (page %lu) => 0x%08lx\n", address, page, physical);
133 #if LINUX_VERSION_CODE < 0x020317
134 return physical;
135 #else
136 return virt_to_page(physical);
137 #endif
138 }
139
140 #if LINUX_VERSION_CODE < 0x020317
drm_vm_dma_nopage(struct vm_area_struct * vma,unsigned long address,int write_access)141 unsigned long drm_vm_dma_nopage(struct vm_area_struct *vma,
142 unsigned long address,
143 int write_access)
144 #else
145 /* Return type changed in 2.3.23 */
146 struct page *drm_vm_dma_nopage(struct vm_area_struct *vma,
147 unsigned long address,
148 int write_access)
149 #endif
150 {
151 drm_file_t *priv = vma->vm_file->private_data;
152 drm_device_t *dev = priv->dev;
153 drm_device_dma_t *dma = dev->dma;
154 unsigned long physical;
155 unsigned long offset;
156 unsigned long page;
157
158 if (!dma) return NOPAGE_SIGBUS; /* Error */
159 if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */
160 if (!dma->pagelist) return NOPAGE_OOM ; /* Nothing allocated */
161
162 offset = address - vma->vm_start; /* vm_[pg]off[set] should be 0 */
163 page = offset >> PAGE_SHIFT;
164 physical = dma->pagelist[page] + (offset & (~PAGE_MASK));
165 atomic_inc(&virt_to_page(physical)->count); /* Dec. by kernel */
166
167 DRM_DEBUG("0x%08lx (page %lu) => 0x%08lx\n", address, page, physical);
168 #if LINUX_VERSION_CODE < 0x020317
169 return physical;
170 #else
171 return virt_to_page(physical);
172 #endif
173 }
174
drm_vm_open(struct vm_area_struct * vma)175 void drm_vm_open(struct vm_area_struct *vma)
176 {
177 drm_file_t *priv = vma->vm_file->private_data;
178 drm_device_t *dev = priv->dev;
179 #if DRM_DEBUG_CODE
180 drm_vma_entry_t *vma_entry;
181 #endif
182
183 DRM_DEBUG("0x%08lx,0x%08lx\n",
184 vma->vm_start, vma->vm_end - vma->vm_start);
185 atomic_inc(&dev->vma_count);
186 #if LINUX_VERSION_CODE < 0x020333
187 /* The map can exist after the fd is closed. */
188 MOD_INC_USE_COUNT; /* Needed before Linux 2.3.51 */
189 #endif
190
191
192 #if DRM_DEBUG_CODE
193 vma_entry = drm_alloc(sizeof(*vma_entry), DRM_MEM_VMAS);
194 if (vma_entry) {
195 down(&dev->struct_sem);
196 vma_entry->vma = vma;
197 vma_entry->next = dev->vmalist;
198 vma_entry->pid = current->pid;
199 dev->vmalist = vma_entry;
200 up(&dev->struct_sem);
201 }
202 #endif
203 }
204
drm_vm_close(struct vm_area_struct * vma)205 void drm_vm_close(struct vm_area_struct *vma)
206 {
207 drm_file_t *priv = vma->vm_file->private_data;
208 drm_device_t *dev = priv->dev;
209 #if DRM_DEBUG_CODE
210 drm_vma_entry_t *pt, *prev;
211 #endif
212
213 DRM_DEBUG("0x%08lx,0x%08lx\n",
214 vma->vm_start, vma->vm_end - vma->vm_start);
215 #if LINUX_VERSION_CODE < 0x020333
216 MOD_DEC_USE_COUNT; /* Needed before Linux 2.3.51 */
217 #endif
218 atomic_dec(&dev->vma_count);
219
220 #if DRM_DEBUG_CODE
221 down(&dev->struct_sem);
222 for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) {
223 if (pt->vma == vma) {
224 if (prev) {
225 prev->next = pt->next;
226 } else {
227 dev->vmalist = pt->next;
228 }
229 drm_free(pt, sizeof(*pt), DRM_MEM_VMAS);
230 break;
231 }
232 }
233 up(&dev->struct_sem);
234 #endif
235 }
236
drm_mmap_dma(struct file * filp,struct vm_area_struct * vma)237 int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
238 {
239 drm_file_t *priv = filp->private_data;
240 drm_device_t *dev;
241 drm_device_dma_t *dma;
242 unsigned long length = vma->vm_end - vma->vm_start;
243
244 lock_kernel();
245 dev = priv->dev;
246 dma = dev->dma;
247 DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",
248 vma->vm_start, vma->vm_end, VM_OFFSET(vma));
249
250 /* Length must match exact page count */
251 if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
252 unlock_kernel();
253 return -EINVAL;
254 }
255 unlock_kernel();
256
257 vma->vm_ops = &drm_vm_dma_ops;
258 vma->vm_flags |= VM_LOCKED | VM_SHM; /* Don't swap */
259 vma->vm_flags |= VM_DONTEXPAND;
260
261 #if LINUX_VERSION_CODE < 0x020203 /* KERNEL_VERSION(2,2,3) */
262 /* In Linux 2.2.3 and above, this is
263 handled in do_mmap() in mm/mmap.c. */
264 ++filp->f_count;
265 #endif
266 vma->vm_file = filp; /* Needed for drm_vm_open() */
267 drm_vm_open(vma);
268 return 0;
269 }
270
drm_mmap(struct file * filp,struct vm_area_struct * vma)271 int drm_mmap(struct file *filp, struct vm_area_struct *vma)
272 {
273 drm_file_t *priv = filp->private_data;
274 drm_device_t *dev = priv->dev;
275 drm_map_t *map = NULL;
276 int i;
277
278 DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",
279 vma->vm_start, vma->vm_end, VM_OFFSET(vma));
280
281 if (!VM_OFFSET(vma)) return drm_mmap_dma(filp, vma);
282
283 /* A sequential search of a linked list is
284 fine here because: 1) there will only be
285 about 5-10 entries in the list and, 2) a
286 DRI client only has to do this mapping
287 once, so it doesn't have to be optimized
288 for performance, even if the list was a
289 bit longer. */
290 for (i = 0; i < dev->map_count; i++) {
291 map = dev->maplist[i];
292 if (map->offset == VM_OFFSET(vma)) break;
293 }
294
295 if (i >= dev->map_count) return -EINVAL;
296 if (!map || ((map->flags&_DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
297 return -EPERM;
298
299 /* Check for valid size. */
300 if (map->size != vma->vm_end - vma->vm_start) return -EINVAL;
301
302 if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
303 vma->vm_flags &= VM_MAYWRITE;
304 #if defined(__i386__)
305 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
306 #else
307 /* Ye gads this is ugly. With more thought
308 we could move this up higher and use
309 `protection_map' instead. */
310 vma->vm_page_prot = __pgprot(pte_val(pte_wrprotect(
311 __pte(pgprot_val(vma->vm_page_prot)))));
312 #endif
313 }
314
315 switch (map->type) {
316 case _DRM_FRAME_BUFFER:
317 case _DRM_REGISTERS:
318 case _DRM_AGP:
319 if (VM_OFFSET(vma) >= __pa(high_memory)) {
320 #if defined(__i386__) || defined(__x86_64__)
321 if (boot_cpu_data.x86 > 3 && map->type != _DRM_AGP) {
322 pgprot_val(vma->vm_page_prot) |= _PAGE_PCD;
323 pgprot_val(vma->vm_page_prot) &= ~_PAGE_PWT;
324 }
325 #elif defined(__ia64__)
326 if (map->type != _DRM_AGP)
327 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
328 #endif
329 vma->vm_flags |= VM_IO; /* not in core dump */
330 }
331 if (remap_page_range(vma->vm_start,
332 VM_OFFSET(vma),
333 vma->vm_end - vma->vm_start,
334 vma->vm_page_prot))
335 return -EAGAIN;
336 DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx,"
337 " offset = 0x%lx\n",
338 map->type,
339 vma->vm_start, vma->vm_end, VM_OFFSET(vma));
340 vma->vm_ops = &drm_vm_ops;
341 break;
342 case _DRM_SHM:
343 if (map->flags & _DRM_CONTAINS_LOCK)
344 vma->vm_ops = &drm_vm_shm_lock_ops;
345 else {
346 vma->vm_ops = &drm_vm_shm_ops;
347 #if LINUX_VERSION_CODE >= 0x020300
348 vma->vm_private_data = (void *)map;
349 #else
350 vma->vm_pte = (unsigned long)map;
351 #endif
352 }
353
354 /* Don't let this area swap. Change when
355 DRM_KERNEL advisory is supported. */
356 vma->vm_flags |= VM_LOCKED;
357 break;
358 default:
359 return -EINVAL; /* This should never happen. */
360 }
361 vma->vm_flags |= VM_LOCKED | VM_SHM; /* Don't swap */
362 vma->vm_flags |= VM_DONTEXPAND;
363
364 #if LINUX_VERSION_CODE < 0x020203 /* KERNEL_VERSION(2,2,3) */
365 /* In Linux 2.2.3 and above, this is
366 handled in do_mmap() in mm/mmap.c. */
367 ++filp->f_count;
368 #endif
369 vma->vm_file = filp; /* Needed for drm_vm_open() */
370 drm_vm_open(vma);
371 return 0;
372 }
373