1 /* drm_vm.h -- Memory mapping for DRM -*- linux-c -*-
2  * Created: Mon Jan  4 08:58:31 1999 by faith@valinux.com
3  *
4  * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6  * All Rights Reserved.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a
9  * copy of this software and associated documentation files (the "Software"),
10  * to deal in the Software without restriction, including without limitation
11  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12  * and/or sell copies of the Software, and to permit persons to whom the
13  * Software is furnished to do so, subject to the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the next
16  * paragraph) shall be included in all copies or substantial portions of the
17  * Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25  * OTHER DEALINGS IN THE SOFTWARE.
26  *
27  * Authors:
28  *    Rickard E. (Rik) Faith <faith@valinux.com>
29  *    Gareth Hughes <gareth@valinux.com>
30  */
31 
32 #include "drmP.h"
33 
34 struct vm_operations_struct   DRM(vm_ops) = {
35 	nopage:	 DRM(vm_nopage),
36 	open:	 DRM(vm_open),
37 	close:	 DRM(vm_close),
38 };
39 
40 struct vm_operations_struct   DRM(vm_shm_ops) = {
41 	nopage:	 DRM(vm_shm_nopage),
42 	open:	 DRM(vm_open),
43 	close:	 DRM(vm_shm_close),
44 };
45 
46 struct vm_operations_struct   DRM(vm_dma_ops) = {
47 	nopage:	 DRM(vm_dma_nopage),
48 	open:	 DRM(vm_open),
49 	close:	 DRM(vm_close),
50 };
51 
52 struct vm_operations_struct   DRM(vm_sg_ops) = {
53 	nopage:  DRM(vm_sg_nopage),
54 	open:    DRM(vm_open),
55 	close:   DRM(vm_close),
56 };
57 
DRM(vm_nopage)58 struct page *DRM(vm_nopage)(struct vm_area_struct *vma,
59 			    unsigned long address,
60 			    int write_access)
61 {
62 #if __REALLY_HAVE_AGP
63 	drm_file_t *priv  = vma->vm_file->private_data;
64 	drm_device_t *dev = priv->dev;
65 	drm_map_t *map    = NULL;
66 	drm_map_list_t  *r_list;
67 	struct list_head *list;
68 
69 	/*
70          * Find the right map
71          */
72 
73 	if(!dev->agp || !dev->agp->cant_use_aperture) goto vm_nopage_error;
74 
75 	list_for_each(list, &dev->maplist->head) {
76 		r_list = (drm_map_list_t *)list;
77 		map = r_list->map;
78 		if (!map) continue;
79 		if (map->offset == VM_OFFSET(vma)) break;
80 	}
81 
82 	if (map && map->type == _DRM_AGP) {
83 		unsigned long offset = address - vma->vm_start;
84 		unsigned long baddr = VM_OFFSET(vma) + offset;
85 		struct drm_agp_mem *agpmem;
86 		struct page *page;
87 
88 #if __alpha__
89 		/*
90                  * Adjust to a bus-relative address
91                  */
92 		baddr -= dev->hose->mem_space->start;
93 #endif
94 
95 		/*
96                  * It's AGP memory - find the real physical page to map
97                  */
98 		for(agpmem = dev->agp->memory; agpmem; agpmem = agpmem->next) {
99 			if (agpmem->bound <= baddr &&
100 			    agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
101 				break;
102 		}
103 
104 		if (!agpmem) goto vm_nopage_error;
105 
106 		/*
107                  * Get the page, inc the use count, and return it
108                  */
109 		offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
110 		agpmem->memory->memory[offset] &= dev->agp->page_mask;
111 		page = virt_to_page(__va(agpmem->memory->memory[offset]));
112 		get_page(page);
113 
114 		DRM_DEBUG("baddr = 0x%lx page = 0x%p, offset = 0x%lx\n",
115 			  baddr, __va(agpmem->memory->memory[offset]), offset);
116 
117 		return page;
118         }
119 vm_nopage_error:
120 #endif /* __REALLY_HAVE_AGP */
121 
122 	return NOPAGE_SIGBUS;		/* Disallow mremap */
123 }
124 
DRM(vm_shm_nopage)125 struct page *DRM(vm_shm_nopage)(struct vm_area_struct *vma,
126 				unsigned long address,
127 				int write_access)
128 {
129 	drm_map_t	 *map	 = (drm_map_t *)vma->vm_private_data;
130 	unsigned long	 offset;
131 	unsigned long	 i;
132 	struct page	 *page;
133 
134 	if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */
135 	if (!map)    		   return NOPAGE_OOM;  /* Nothing allocated */
136 
137 	offset	 = address - vma->vm_start;
138 	i = (unsigned long)map->handle + offset;
139 	page = vmalloc_to_page((void *)i);
140 	if (!page)
141 		return NOPAGE_OOM;
142 	get_page(page);
143 
144 	DRM_DEBUG("shm_nopage 0x%lx\n", address);
145 	return page;
146 }
147 
148 /* Special close routine which deletes map information if we are the last
149  * person to close a mapping and its not in the global maplist.
150  */
151 
DRM(vm_shm_close)152 void DRM(vm_shm_close)(struct vm_area_struct *vma)
153 {
154 	drm_file_t	*priv	= vma->vm_file->private_data;
155 	drm_device_t	*dev	= priv->dev;
156 	drm_vma_entry_t *pt, *prev, *next;
157 	drm_map_t *map;
158 	drm_map_list_t *r_list;
159 	struct list_head *list;
160 	int found_maps = 0;
161 
162 	DRM_DEBUG("0x%08lx,0x%08lx\n",
163 		  vma->vm_start, vma->vm_end - vma->vm_start);
164 	atomic_dec(&dev->vma_count);
165 
166 	map = vma->vm_private_data;
167 
168 	down(&dev->struct_sem);
169 	for (pt = dev->vmalist, prev = NULL; pt; pt = next) {
170 		next = pt->next;
171 		if (pt->vma->vm_private_data == map) found_maps++;
172 		if (pt->vma == vma) {
173 			if (prev) {
174 				prev->next = pt->next;
175 			} else {
176 				dev->vmalist = pt->next;
177 			}
178 			DRM(free)(pt, sizeof(*pt), DRM_MEM_VMAS);
179 		} else {
180 			prev = pt;
181 		}
182 	}
183 	/* We were the only map that was found */
184 	if(found_maps == 1 &&
185 	   map->flags & _DRM_REMOVABLE) {
186 		/* Check to see if we are in the maplist, if we are not, then
187 		 * we delete this mappings information.
188 		 */
189 		found_maps = 0;
190 		list = &dev->maplist->head;
191 		list_for_each(list, &dev->maplist->head) {
192 			r_list = (drm_map_list_t *) list;
193 			if (r_list->map == map) found_maps++;
194 		}
195 
196 		if(!found_maps) {
197 			switch (map->type) {
198 			case _DRM_REGISTERS:
199 			case _DRM_FRAME_BUFFER:
200 #if __REALLY_HAVE_MTRR
201 				if (map->mtrr >= 0) {
202 					int retcode;
203 					retcode = mtrr_del(map->mtrr,
204 							   map->offset,
205 							   map->size);
206 					DRM_DEBUG("mtrr_del = %d\n", retcode);
207 				}
208 #endif
209 				DRM(ioremapfree)(map->handle, map->size, dev);
210 				break;
211 			case _DRM_SHM:
212 				vfree(map->handle);
213 				break;
214 			case _DRM_AGP:
215 			case _DRM_SCATTER_GATHER:
216 				break;
217 			}
218 			DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
219 		}
220 	}
221 	up(&dev->struct_sem);
222 }
223 
DRM(vm_dma_nopage)224 struct page *DRM(vm_dma_nopage)(struct vm_area_struct *vma,
225 				unsigned long address,
226 				int write_access)
227 {
228 	drm_file_t	 *priv	 = vma->vm_file->private_data;
229 	drm_device_t	 *dev	 = priv->dev;
230 	drm_device_dma_t *dma	 = dev->dma;
231 	unsigned long	 offset;
232 	unsigned long	 page_nr;
233 	struct page	 *page;
234 
235 	if (!dma)		   return NOPAGE_SIGBUS; /* Error */
236 	if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */
237 	if (!dma->pagelist)	   return NOPAGE_OOM ; /* Nothing allocated */
238 
239 	offset	 = address - vma->vm_start; /* vm_[pg]off[set] should be 0 */
240 	page_nr  = offset >> PAGE_SHIFT;
241 	page = virt_to_page((dma->pagelist[page_nr] +
242 			     (offset & (~PAGE_MASK))));
243 
244 	get_page(page);
245 
246 	DRM_DEBUG("dma_nopage 0x%lx (page %lu)\n", address, page_nr);
247 	return page;
248 }
249 
DRM(vm_sg_nopage)250 struct page *DRM(vm_sg_nopage)(struct vm_area_struct *vma,
251 			       unsigned long address,
252 			       int write_access)
253 {
254 	drm_map_t        *map    = (drm_map_t *)vma->vm_private_data;
255 	drm_file_t *priv = vma->vm_file->private_data;
256 	drm_device_t *dev = priv->dev;
257 	drm_sg_mem_t *entry = dev->sg;
258 	unsigned long offset;
259 	unsigned long map_offset;
260 	unsigned long page_offset;
261 	struct page *page;
262 
263 	if (!entry)                return NOPAGE_SIGBUS; /* Error */
264 	if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */
265 	if (!entry->pagelist)      return NOPAGE_OOM ;  /* Nothing allocated */
266 
267 
268 	offset = address - vma->vm_start;
269 	map_offset = map->offset - dev->sg->handle;
270 	page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
271 	page = entry->pagelist[page_offset];
272 	get_page(page);
273 
274 	return page;
275 }
276 
DRM(vm_open)277 void DRM(vm_open)(struct vm_area_struct *vma)
278 {
279 	drm_file_t	*priv	= vma->vm_file->private_data;
280 	drm_device_t	*dev	= priv->dev;
281 	drm_vma_entry_t *vma_entry;
282 
283 	DRM_DEBUG("0x%08lx,0x%08lx\n",
284 		  vma->vm_start, vma->vm_end - vma->vm_start);
285 	atomic_inc(&dev->vma_count);
286 
287 	vma_entry = DRM(alloc)(sizeof(*vma_entry), DRM_MEM_VMAS);
288 	if (vma_entry) {
289 		down(&dev->struct_sem);
290 		vma_entry->vma	= vma;
291 		vma_entry->next = dev->vmalist;
292 		vma_entry->pid	= current->pid;
293 		dev->vmalist	= vma_entry;
294 		up(&dev->struct_sem);
295 	}
296 }
297 
DRM(vm_close)298 void DRM(vm_close)(struct vm_area_struct *vma)
299 {
300 	drm_file_t	*priv	= vma->vm_file->private_data;
301 	drm_device_t	*dev	= priv->dev;
302 	drm_vma_entry_t *pt, *prev;
303 
304 	DRM_DEBUG("0x%08lx,0x%08lx\n",
305 		  vma->vm_start, vma->vm_end - vma->vm_start);
306 	atomic_dec(&dev->vma_count);
307 
308 	down(&dev->struct_sem);
309 	for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) {
310 		if (pt->vma == vma) {
311 			if (prev) {
312 				prev->next = pt->next;
313 			} else {
314 				dev->vmalist = pt->next;
315 			}
316 			DRM(free)(pt, sizeof(*pt), DRM_MEM_VMAS);
317 			break;
318 		}
319 	}
320 	up(&dev->struct_sem);
321 }
322 
DRM(mmap_dma)323 int DRM(mmap_dma)(struct file *filp, struct vm_area_struct *vma)
324 {
325 	drm_file_t	 *priv	 = filp->private_data;
326 	drm_device_t	 *dev;
327 	drm_device_dma_t *dma;
328 	unsigned long	 length	 = vma->vm_end - vma->vm_start;
329 
330 	lock_kernel();
331 	dev	 = priv->dev;
332 	dma	 = dev->dma;
333 	DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",
334 		  vma->vm_start, vma->vm_end, VM_OFFSET(vma));
335 
336 				/* Length must match exact page count */
337 	if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
338 		unlock_kernel();
339 		return -EINVAL;
340 	}
341 	unlock_kernel();
342 
343 	vma->vm_ops   = &DRM(vm_dma_ops);
344 	vma->vm_flags |= VM_RESERVED; /* Don't swap */
345 	vma->vm_flags |= VM_DONTEXPAND;
346 	vma->vm_file  =	 filp;	/* Needed for drm_vm_open() */
347 	DRM(vm_open)(vma);
348 	return 0;
349 }
350 
351 #ifndef DRIVER_GET_MAP_OFS
352 #define DRIVER_GET_MAP_OFS()	(map->offset)
353 #endif
354 
355 #ifndef DRIVER_GET_REG_OFS
356 #ifdef __alpha__
357 #define DRIVER_GET_REG_OFS()	(dev->hose->dense_mem_base -	\
358 				 dev->hose->mem_space->start)
359 #else
360 #define DRIVER_GET_REG_OFS()	0
361 #endif
362 #endif
363 
DRM(mmap)364 int DRM(mmap)(struct file *filp, struct vm_area_struct *vma)
365 {
366 	drm_file_t	*priv	= filp->private_data;
367 	drm_device_t	*dev	= priv->dev;
368 	drm_map_t	*map	= NULL;
369 	drm_map_list_t  *r_list;
370 	unsigned long   offset  = 0;
371 	struct list_head *list;
372 
373 	DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",
374 		  vma->vm_start, vma->vm_end, VM_OFFSET(vma));
375 
376 	if ( !priv->authenticated ) return -EACCES;
377 
378 	if (!VM_OFFSET(vma)) return DRM(mmap_dma)(filp, vma);
379 
380 				/* A sequential search of a linked list is
381 				   fine here because: 1) there will only be
382 				   about 5-10 entries in the list and, 2) a
383 				   DRI client only has to do this mapping
384 				   once, so it doesn't have to be optimized
385 				   for performance, even if the list was a
386 				   bit longer. */
387 	list_for_each(list, &dev->maplist->head) {
388 		unsigned long off;
389 
390 		r_list = (drm_map_list_t *)list;
391 		map = r_list->map;
392 		if (!map) continue;
393 		off = DRIVER_GET_MAP_OFS();
394 		if (off == VM_OFFSET(vma)) break;
395 	}
396 
397 	if (!map || ((map->flags&_DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
398 		return -EPERM;
399 
400 				/* Check for valid size. */
401 	if (map->size != vma->vm_end - vma->vm_start) return -EINVAL;
402 
403 	if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
404 		vma->vm_flags &= VM_MAYWRITE;
405 #if defined(__i386__) || defined(__x86_64__)
406 		pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
407 #else
408 				/* Ye gads this is ugly.  With more thought
409                                    we could move this up higher and use
410                                    `protection_map' instead.  */
411 		vma->vm_page_prot = __pgprot(pte_val(pte_wrprotect(
412 			__pte(pgprot_val(vma->vm_page_prot)))));
413 #endif
414 	}
415 
416 	switch (map->type) {
417         case _DRM_AGP:
418 #if defined(__alpha__)
419                 /*
420                  * On Alpha we can't talk to bus dma address from the
421                  * CPU, so for memory of type DRM_AGP, we'll deal with
422                  * sorting out the real physical pages and mappings
423                  * in nopage()
424                  */
425                 vma->vm_ops = &DRM(vm_ops);
426                 break;
427 #endif
428                 /* fall through to _DRM_FRAME_BUFFER... */
429 	case _DRM_FRAME_BUFFER:
430 	case _DRM_REGISTERS:
431 		if (VM_OFFSET(vma) >= __pa(high_memory)) {
432 #if defined(__i386__) || defined(__x86_64__)
433 			if (boot_cpu_data.x86 > 3 && map->type != _DRM_AGP) {
434 				pgprot_val(vma->vm_page_prot) |= _PAGE_PCD;
435 				pgprot_val(vma->vm_page_prot) &= ~_PAGE_PWT;
436 			}
437 #elif defined(__ia64__)
438 			if (map->type != _DRM_AGP)
439 				vma->vm_page_prot =
440 					pgprot_writecombine(vma->vm_page_prot);
441 #elif defined(__powerpc__)
442 			pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE | _PAGE_GUARDED;
443 #endif
444 			vma->vm_flags |= VM_IO;	/* not in core dump */
445 		}
446 		offset = DRIVER_GET_REG_OFS();
447 #ifdef __sparc__
448 		if (io_remap_page_range(DRM_RPR_ARG(vma) vma->vm_start,
449 					VM_OFFSET(vma) + offset,
450 					vma->vm_end - vma->vm_start,
451 					vma->vm_page_prot, 0))
452 #else
453 		if (remap_page_range(DRM_RPR_ARG(vma) vma->vm_start,
454 				     VM_OFFSET(vma) + offset,
455 				     vma->vm_end - vma->vm_start,
456 				     vma->vm_page_prot))
457 #endif
458 				return -EAGAIN;
459 		DRM_DEBUG("   Type = %d; start = 0x%lx, end = 0x%lx,"
460 			  " offset = 0x%lx\n",
461 			  map->type,
462 			  vma->vm_start, vma->vm_end, VM_OFFSET(vma) + offset);
463 		vma->vm_ops = &DRM(vm_ops);
464 		break;
465 	case _DRM_SHM:
466 		vma->vm_ops = &DRM(vm_shm_ops);
467 		vma->vm_private_data = (void *)map;
468 				/* Don't let this area swap.  Change when
469 				   DRM_KERNEL advisory is supported. */
470 		break;
471 	case _DRM_SCATTER_GATHER:
472 		vma->vm_ops = &DRM(vm_sg_ops);
473 		vma->vm_private_data = (void *)map;
474                 break;
475 	default:
476 		return -EINVAL;	/* This should never happen. */
477 	}
478 	vma->vm_flags |= VM_RESERVED; /* Don't swap */
479 	vma->vm_flags |= VM_DONTEXPAND;
480 
481 	vma->vm_file  =	 filp;	/* Needed for drm_vm_open() */
482 	DRM(vm_open)(vma);
483 	return 0;
484 }
485