1 /**
2  * \file drm_bufs.c
3  * Generic buffer template
4  *
5  * \author Rickard E. (Rik) Faith <faith@valinux.com>
6  * \author Gareth Hughes <gareth@valinux.com>
7  */
8 
9 /*
10  * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
11  *
12  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
13  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14  * All Rights Reserved.
15  *
16  * Permission is hereby granted, free of charge, to any person obtaining a
17  * copy of this software and associated documentation files (the "Software"),
18  * to deal in the Software without restriction, including without limitation
19  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20  * and/or sell copies of the Software, and to permit persons to whom the
21  * Software is furnished to do so, subject to the following conditions:
22  *
23  * The above copyright notice and this permission notice (including the next
24  * paragraph) shall be included in all copies or substantial portions of the
25  * Software.
26  *
27  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
30  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33  * OTHER DEALINGS IN THE SOFTWARE.
34  */
35 
36 #include <linux/vmalloc.h>
37 #include <linux/slab.h>
38 #include <linux/log2.h>
39 #include <linux/export.h>
40 #include <asm/shmparam.h>
41 #include "drmP.h"
42 
drm_find_matching_map(struct drm_device * dev,struct drm_local_map * map)43 static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
44 						  struct drm_local_map *map)
45 {
46 	struct drm_map_list *entry;
47 	list_for_each_entry(entry, &dev->maplist, head) {
48 		/*
49 		 * Because the kernel-userspace ABI is fixed at a 32-bit offset
50 		 * while PCI resources may live above that, we only compare the
51 		 * lower 32 bits of the map offset for maps of type
52 		 * _DRM_FRAMEBUFFER or _DRM_REGISTERS.
53 		 * It is assumed that if a driver have more than one resource
54 		 * of each type, the lower 32 bits are different.
55 		 */
56 		if (!entry->map ||
57 		    map->type != entry->map->type ||
58 		    entry->master != dev->primary->master)
59 			continue;
60 		switch (map->type) {
61 		case _DRM_SHM:
62 			if (map->flags != _DRM_CONTAINS_LOCK)
63 				break;
64 			return entry;
65 		case _DRM_REGISTERS:
66 		case _DRM_FRAME_BUFFER:
67 			if ((entry->map->offset & 0xffffffff) ==
68 			    (map->offset & 0xffffffff))
69 				return entry;
70 		default: /* Make gcc happy */
71 			;
72 		}
73 		if (entry->map->offset == map->offset)
74 			return entry;
75 	}
76 
77 	return NULL;
78 }
79 
drm_map_handle(struct drm_device * dev,struct drm_hash_item * hash,unsigned long user_token,int hashed_handle,int shm)80 static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash,
81 			  unsigned long user_token, int hashed_handle, int shm)
82 {
83 	int use_hashed_handle, shift;
84 	unsigned long add;
85 
86 #if (BITS_PER_LONG == 64)
87 	use_hashed_handle = ((user_token & 0xFFFFFFFF00000000UL) || hashed_handle);
88 #elif (BITS_PER_LONG == 32)
89 	use_hashed_handle = hashed_handle;
90 #else
91 #error Unsupported long size. Neither 64 nor 32 bits.
92 #endif
93 
94 	if (!use_hashed_handle) {
95 		int ret;
96 		hash->key = user_token >> PAGE_SHIFT;
97 		ret = drm_ht_insert_item(&dev->map_hash, hash);
98 		if (ret != -EINVAL)
99 			return ret;
100 	}
101 
102 	shift = 0;
103 	add = DRM_MAP_HASH_OFFSET >> PAGE_SHIFT;
104 	if (shm && (SHMLBA > PAGE_SIZE)) {
105 		int bits = ilog2(SHMLBA >> PAGE_SHIFT) + 1;
106 
107 		/* For shared memory, we have to preserve the SHMLBA
108 		 * bits of the eventual vma->vm_pgoff value during
109 		 * mmap().  Otherwise we run into cache aliasing problems
110 		 * on some platforms.  On these platforms, the pgoff of
111 		 * a mmap() request is used to pick a suitable virtual
112 		 * address for the mmap() region such that it will not
113 		 * cause cache aliasing problems.
114 		 *
115 		 * Therefore, make sure the SHMLBA relevant bits of the
116 		 * hash value we use are equal to those in the original
117 		 * kernel virtual address.
118 		 */
119 		shift = bits;
120 		add |= ((user_token >> PAGE_SHIFT) & ((1UL << bits) - 1UL));
121 	}
122 
123 	return drm_ht_just_insert_please(&dev->map_hash, hash,
124 					 user_token, 32 - PAGE_SHIFT - 3,
125 					 shift, add);
126 }
127 
128 /**
129  * Core function to create a range of memory available for mapping by a
130  * non-root process.
131  *
132  * Adjusts the memory offset to its absolute value according to the mapping
133  * type.  Adds the map to the map list drm_device::maplist. Adds MTRR's where
134  * applicable and if supported by the kernel.
135  */
drm_addmap_core(struct drm_device * dev,resource_size_t offset,unsigned int size,enum drm_map_type type,enum drm_map_flags flags,struct drm_map_list ** maplist)136 static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
137 			   unsigned int size, enum drm_map_type type,
138 			   enum drm_map_flags flags,
139 			   struct drm_map_list ** maplist)
140 {
141 	struct drm_local_map *map;
142 	struct drm_map_list *list;
143 	drm_dma_handle_t *dmah;
144 	unsigned long user_token;
145 	int ret;
146 
147 	map = kmalloc(sizeof(*map), GFP_KERNEL);
148 	if (!map)
149 		return -ENOMEM;
150 
151 	map->offset = offset;
152 	map->size = size;
153 	map->flags = flags;
154 	map->type = type;
155 
156 	/* Only allow shared memory to be removable since we only keep enough
157 	 * book keeping information about shared memory to allow for removal
158 	 * when processes fork.
159 	 */
160 	if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) {
161 		kfree(map);
162 		return -EINVAL;
163 	}
164 	DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n",
165 		  (unsigned long long)map->offset, map->size, map->type);
166 
167 	/* page-align _DRM_SHM maps. They are allocated here so there is no security
168 	 * hole created by that and it works around various broken drivers that use
169 	 * a non-aligned quantity to map the SAREA. --BenH
170 	 */
171 	if (map->type == _DRM_SHM)
172 		map->size = PAGE_ALIGN(map->size);
173 
174 	if ((map->offset & (~(resource_size_t)PAGE_MASK)) || (map->size & (~PAGE_MASK))) {
175 		kfree(map);
176 		return -EINVAL;
177 	}
178 	map->mtrr = -1;
179 	map->handle = NULL;
180 
181 	switch (map->type) {
182 	case _DRM_REGISTERS:
183 	case _DRM_FRAME_BUFFER:
184 #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__arm__)
185 		if (map->offset + (map->size-1) < map->offset ||
186 		    map->offset < virt_to_phys(high_memory)) {
187 			kfree(map);
188 			return -EINVAL;
189 		}
190 #endif
191 		/* Some drivers preinitialize some maps, without the X Server
192 		 * needing to be aware of it.  Therefore, we just return success
193 		 * when the server tries to create a duplicate map.
194 		 */
195 		list = drm_find_matching_map(dev, map);
196 		if (list != NULL) {
197 			if (list->map->size != map->size) {
198 				DRM_DEBUG("Matching maps of type %d with "
199 					  "mismatched sizes, (%ld vs %ld)\n",
200 					  map->type, map->size,
201 					  list->map->size);
202 				list->map->size = map->size;
203 			}
204 
205 			kfree(map);
206 			*maplist = list;
207 			return 0;
208 		}
209 
210 		if (drm_core_has_MTRR(dev)) {
211 			if (map->type == _DRM_FRAME_BUFFER ||
212 			    (map->flags & _DRM_WRITE_COMBINING)) {
213 				map->mtrr = mtrr_add(map->offset, map->size,
214 						     MTRR_TYPE_WRCOMB, 1);
215 			}
216 		}
217 		if (map->type == _DRM_REGISTERS) {
218 			map->handle = ioremap(map->offset, map->size);
219 			if (!map->handle) {
220 				kfree(map);
221 				return -ENOMEM;
222 			}
223 		}
224 
225 		break;
226 	case _DRM_SHM:
227 		list = drm_find_matching_map(dev, map);
228 		if (list != NULL) {
229 			if(list->map->size != map->size) {
230 				DRM_DEBUG("Matching maps of type %d with "
231 					  "mismatched sizes, (%ld vs %ld)\n",
232 					  map->type, map->size, list->map->size);
233 				list->map->size = map->size;
234 			}
235 
236 			kfree(map);
237 			*maplist = list;
238 			return 0;
239 		}
240 		map->handle = vmalloc_user(map->size);
241 		DRM_DEBUG("%lu %d %p\n",
242 			  map->size, drm_order(map->size), map->handle);
243 		if (!map->handle) {
244 			kfree(map);
245 			return -ENOMEM;
246 		}
247 		map->offset = (unsigned long)map->handle;
248 		if (map->flags & _DRM_CONTAINS_LOCK) {
249 			/* Prevent a 2nd X Server from creating a 2nd lock */
250 			if (dev->primary->master->lock.hw_lock != NULL) {
251 				vfree(map->handle);
252 				kfree(map);
253 				return -EBUSY;
254 			}
255 			dev->sigdata.lock = dev->primary->master->lock.hw_lock = map->handle;	/* Pointer to lock */
256 		}
257 		break;
258 	case _DRM_AGP: {
259 		struct drm_agp_mem *entry;
260 		int valid = 0;
261 
262 		if (!drm_core_has_AGP(dev)) {
263 			kfree(map);
264 			return -EINVAL;
265 		}
266 #ifdef __alpha__
267 		map->offset += dev->hose->mem_space->start;
268 #endif
269 		/* In some cases (i810 driver), user space may have already
270 		 * added the AGP base itself, because dev->agp->base previously
271 		 * only got set during AGP enable.  So, only add the base
272 		 * address if the map's offset isn't already within the
273 		 * aperture.
274 		 */
275 		if (map->offset < dev->agp->base ||
276 		    map->offset > dev->agp->base +
277 		    dev->agp->agp_info.aper_size * 1024 * 1024 - 1) {
278 			map->offset += dev->agp->base;
279 		}
280 		map->mtrr = dev->agp->agp_mtrr;	/* for getmap */
281 
282 		/* This assumes the DRM is in total control of AGP space.
283 		 * It's not always the case as AGP can be in the control
284 		 * of user space (i.e. i810 driver). So this loop will get
285 		 * skipped and we double check that dev->agp->memory is
286 		 * actually set as well as being invalid before EPERM'ing
287 		 */
288 		list_for_each_entry(entry, &dev->agp->memory, head) {
289 			if ((map->offset >= entry->bound) &&
290 			    (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) {
291 				valid = 1;
292 				break;
293 			}
294 		}
295 		if (!list_empty(&dev->agp->memory) && !valid) {
296 			kfree(map);
297 			return -EPERM;
298 		}
299 		DRM_DEBUG("AGP offset = 0x%08llx, size = 0x%08lx\n",
300 			  (unsigned long long)map->offset, map->size);
301 
302 		break;
303 	}
304 	case _DRM_GEM:
305 		DRM_ERROR("tried to addmap GEM object\n");
306 		break;
307 	case _DRM_SCATTER_GATHER:
308 		if (!dev->sg) {
309 			kfree(map);
310 			return -EINVAL;
311 		}
312 		map->offset += (unsigned long)dev->sg->virtual;
313 		break;
314 	case _DRM_CONSISTENT:
315 		/* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
316 		 * As we're limiting the address to 2^32-1 (or less),
317 		 * casting it down to 32 bits is no problem, but we
318 		 * need to point to a 64bit variable first. */
319 		dmah = drm_pci_alloc(dev, map->size, map->size);
320 		if (!dmah) {
321 			kfree(map);
322 			return -ENOMEM;
323 		}
324 		map->handle = dmah->vaddr;
325 		map->offset = (unsigned long)dmah->busaddr;
326 		kfree(dmah);
327 		break;
328 	default:
329 		kfree(map);
330 		return -EINVAL;
331 	}
332 
333 	list = kzalloc(sizeof(*list), GFP_KERNEL);
334 	if (!list) {
335 		if (map->type == _DRM_REGISTERS)
336 			iounmap(map->handle);
337 		kfree(map);
338 		return -EINVAL;
339 	}
340 	list->map = map;
341 
342 	mutex_lock(&dev->struct_mutex);
343 	list_add(&list->head, &dev->maplist);
344 
345 	/* Assign a 32-bit handle */
346 	/* We do it here so that dev->struct_mutex protects the increment */
347 	user_token = (map->type == _DRM_SHM) ? (unsigned long)map->handle :
348 		map->offset;
349 	ret = drm_map_handle(dev, &list->hash, user_token, 0,
350 			     (map->type == _DRM_SHM));
351 	if (ret) {
352 		if (map->type == _DRM_REGISTERS)
353 			iounmap(map->handle);
354 		kfree(map);
355 		kfree(list);
356 		mutex_unlock(&dev->struct_mutex);
357 		return ret;
358 	}
359 
360 	list->user_token = list->hash.key << PAGE_SHIFT;
361 	mutex_unlock(&dev->struct_mutex);
362 
363 	if (!(map->flags & _DRM_DRIVER))
364 		list->master = dev->primary->master;
365 	*maplist = list;
366 	return 0;
367 	}
368 
drm_addmap(struct drm_device * dev,resource_size_t offset,unsigned int size,enum drm_map_type type,enum drm_map_flags flags,struct drm_local_map ** map_ptr)369 int drm_addmap(struct drm_device * dev, resource_size_t offset,
370 	       unsigned int size, enum drm_map_type type,
371 	       enum drm_map_flags flags, struct drm_local_map ** map_ptr)
372 {
373 	struct drm_map_list *list;
374 	int rc;
375 
376 	rc = drm_addmap_core(dev, offset, size, type, flags, &list);
377 	if (!rc)
378 		*map_ptr = list->map;
379 	return rc;
380 }
381 
382 EXPORT_SYMBOL(drm_addmap);
383 
384 /**
385  * Ioctl to specify a range of memory that is available for mapping by a
386  * non-root process.
387  *
388  * \param inode device inode.
389  * \param file_priv DRM file private.
390  * \param cmd command.
391  * \param arg pointer to a drm_map structure.
392  * \return zero on success or a negative value on error.
393  *
394  */
drm_addmap_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)395 int drm_addmap_ioctl(struct drm_device *dev, void *data,
396 		     struct drm_file *file_priv)
397 {
398 	struct drm_map *map = data;
399 	struct drm_map_list *maplist;
400 	int err;
401 
402 	if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP || map->type == _DRM_SHM))
403 		return -EPERM;
404 
405 	err = drm_addmap_core(dev, map->offset, map->size, map->type,
406 			      map->flags, &maplist);
407 
408 	if (err)
409 		return err;
410 
411 	/* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
412 	map->handle = (void *)(unsigned long)maplist->user_token;
413 	return 0;
414 }
415 
416 /**
417  * Remove a map private from list and deallocate resources if the mapping
418  * isn't in use.
419  *
420  * Searches the map on drm_device::maplist, removes it from the list, see if
421  * its being used, and free any associate resource (such as MTRR's) if it's not
422  * being on use.
423  *
424  * \sa drm_addmap
425  */
drm_rmmap_locked(struct drm_device * dev,struct drm_local_map * map)426 int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
427 {
428 	struct drm_map_list *r_list = NULL, *list_t;
429 	drm_dma_handle_t dmah;
430 	int found = 0;
431 	struct drm_master *master;
432 
433 	/* Find the list entry for the map and remove it */
434 	list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
435 		if (r_list->map == map) {
436 			master = r_list->master;
437 			list_del(&r_list->head);
438 			drm_ht_remove_key(&dev->map_hash,
439 					  r_list->user_token >> PAGE_SHIFT);
440 			kfree(r_list);
441 			found = 1;
442 			break;
443 		}
444 	}
445 
446 	if (!found)
447 		return -EINVAL;
448 
449 	switch (map->type) {
450 	case _DRM_REGISTERS:
451 		iounmap(map->handle);
452 		/* FALLTHROUGH */
453 	case _DRM_FRAME_BUFFER:
454 		if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
455 			int retcode;
456 			retcode = mtrr_del(map->mtrr, map->offset, map->size);
457 			DRM_DEBUG("mtrr_del=%d\n", retcode);
458 		}
459 		break;
460 	case _DRM_SHM:
461 		vfree(map->handle);
462 		if (master) {
463 			if (dev->sigdata.lock == master->lock.hw_lock)
464 				dev->sigdata.lock = NULL;
465 			master->lock.hw_lock = NULL;   /* SHM removed */
466 			master->lock.file_priv = NULL;
467 			wake_up_interruptible_all(&master->lock.lock_queue);
468 		}
469 		break;
470 	case _DRM_AGP:
471 	case _DRM_SCATTER_GATHER:
472 		break;
473 	case _DRM_CONSISTENT:
474 		dmah.vaddr = map->handle;
475 		dmah.busaddr = map->offset;
476 		dmah.size = map->size;
477 		__drm_pci_free(dev, &dmah);
478 		break;
479 	case _DRM_GEM:
480 		DRM_ERROR("tried to rmmap GEM object\n");
481 		break;
482 	}
483 	kfree(map);
484 
485 	return 0;
486 }
487 EXPORT_SYMBOL(drm_rmmap_locked);
488 
drm_rmmap(struct drm_device * dev,struct drm_local_map * map)489 int drm_rmmap(struct drm_device *dev, struct drm_local_map *map)
490 {
491 	int ret;
492 
493 	mutex_lock(&dev->struct_mutex);
494 	ret = drm_rmmap_locked(dev, map);
495 	mutex_unlock(&dev->struct_mutex);
496 
497 	return ret;
498 }
499 EXPORT_SYMBOL(drm_rmmap);
500 
501 /* The rmmap ioctl appears to be unnecessary.  All mappings are torn down on
502  * the last close of the device, and this is necessary for cleanup when things
503  * exit uncleanly.  Therefore, having userland manually remove mappings seems
504  * like a pointless exercise since they're going away anyway.
505  *
506  * One use case might be after addmap is allowed for normal users for SHM and
507  * gets used by drivers that the server doesn't need to care about.  This seems
508  * unlikely.
509  *
510  * \param inode device inode.
511  * \param file_priv DRM file private.
512  * \param cmd command.
513  * \param arg pointer to a struct drm_map structure.
514  * \return zero on success or a negative value on error.
515  */
drm_rmmap_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)516 int drm_rmmap_ioctl(struct drm_device *dev, void *data,
517 		    struct drm_file *file_priv)
518 {
519 	struct drm_map *request = data;
520 	struct drm_local_map *map = NULL;
521 	struct drm_map_list *r_list;
522 	int ret;
523 
524 	mutex_lock(&dev->struct_mutex);
525 	list_for_each_entry(r_list, &dev->maplist, head) {
526 		if (r_list->map &&
527 		    r_list->user_token == (unsigned long)request->handle &&
528 		    r_list->map->flags & _DRM_REMOVABLE) {
529 			map = r_list->map;
530 			break;
531 		}
532 	}
533 
534 	/* List has wrapped around to the head pointer, or its empty we didn't
535 	 * find anything.
536 	 */
537 	if (list_empty(&dev->maplist) || !map) {
538 		mutex_unlock(&dev->struct_mutex);
539 		return -EINVAL;
540 	}
541 
542 	/* Register and framebuffer maps are permanent */
543 	if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
544 		mutex_unlock(&dev->struct_mutex);
545 		return 0;
546 	}
547 
548 	ret = drm_rmmap_locked(dev, map);
549 
550 	mutex_unlock(&dev->struct_mutex);
551 
552 	return ret;
553 }
554 
555 /**
556  * Cleanup after an error on one of the addbufs() functions.
557  *
558  * \param dev DRM device.
559  * \param entry buffer entry where the error occurred.
560  *
561  * Frees any pages and buffers associated with the given entry.
562  */
drm_cleanup_buf_error(struct drm_device * dev,struct drm_buf_entry * entry)563 static void drm_cleanup_buf_error(struct drm_device * dev,
564 				  struct drm_buf_entry * entry)
565 {
566 	int i;
567 
568 	if (entry->seg_count) {
569 		for (i = 0; i < entry->seg_count; i++) {
570 			if (entry->seglist[i]) {
571 				drm_pci_free(dev, entry->seglist[i]);
572 			}
573 		}
574 		kfree(entry->seglist);
575 
576 		entry->seg_count = 0;
577 	}
578 
579 	if (entry->buf_count) {
580 		for (i = 0; i < entry->buf_count; i++) {
581 			kfree(entry->buflist[i].dev_private);
582 		}
583 		kfree(entry->buflist);
584 
585 		entry->buf_count = 0;
586 	}
587 }
588 
589 #if __OS_HAS_AGP
590 /**
591  * Add AGP buffers for DMA transfers.
592  *
593  * \param dev struct drm_device to which the buffers are to be added.
594  * \param request pointer to a struct drm_buf_desc describing the request.
595  * \return zero on success or a negative number on failure.
596  *
597  * After some sanity checks creates a drm_buf structure for each buffer and
598  * reallocates the buffer list of the same size order to accommodate the new
599  * buffers.
600  */
drm_addbufs_agp(struct drm_device * dev,struct drm_buf_desc * request)601 int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
602 {
603 	struct drm_device_dma *dma = dev->dma;
604 	struct drm_buf_entry *entry;
605 	struct drm_agp_mem *agp_entry;
606 	struct drm_buf *buf;
607 	unsigned long offset;
608 	unsigned long agp_offset;
609 	int count;
610 	int order;
611 	int size;
612 	int alignment;
613 	int page_order;
614 	int total;
615 	int byte_count;
616 	int i, valid;
617 	struct drm_buf **temp_buflist;
618 
619 	if (!dma)
620 		return -EINVAL;
621 
622 	count = request->count;
623 	order = drm_order(request->size);
624 	size = 1 << order;
625 
626 	alignment = (request->flags & _DRM_PAGE_ALIGN)
627 	    ? PAGE_ALIGN(size) : size;
628 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
629 	total = PAGE_SIZE << page_order;
630 
631 	byte_count = 0;
632 	agp_offset = dev->agp->base + request->agp_start;
633 
634 	DRM_DEBUG("count:      %d\n", count);
635 	DRM_DEBUG("order:      %d\n", order);
636 	DRM_DEBUG("size:       %d\n", size);
637 	DRM_DEBUG("agp_offset: %lx\n", agp_offset);
638 	DRM_DEBUG("alignment:  %d\n", alignment);
639 	DRM_DEBUG("page_order: %d\n", page_order);
640 	DRM_DEBUG("total:      %d\n", total);
641 
642 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
643 		return -EINVAL;
644 	if (dev->queue_count)
645 		return -EBUSY;	/* Not while in use */
646 
647 	/* Make sure buffers are located in AGP memory that we own */
648 	valid = 0;
649 	list_for_each_entry(agp_entry, &dev->agp->memory, head) {
650 		if ((agp_offset >= agp_entry->bound) &&
651 		    (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
652 			valid = 1;
653 			break;
654 		}
655 	}
656 	if (!list_empty(&dev->agp->memory) && !valid) {
657 		DRM_DEBUG("zone invalid\n");
658 		return -EINVAL;
659 	}
660 	spin_lock(&dev->count_lock);
661 	if (dev->buf_use) {
662 		spin_unlock(&dev->count_lock);
663 		return -EBUSY;
664 	}
665 	atomic_inc(&dev->buf_alloc);
666 	spin_unlock(&dev->count_lock);
667 
668 	mutex_lock(&dev->struct_mutex);
669 	entry = &dma->bufs[order];
670 	if (entry->buf_count) {
671 		mutex_unlock(&dev->struct_mutex);
672 		atomic_dec(&dev->buf_alloc);
673 		return -ENOMEM;	/* May only call once for each order */
674 	}
675 
676 	if (count < 0 || count > 4096) {
677 		mutex_unlock(&dev->struct_mutex);
678 		atomic_dec(&dev->buf_alloc);
679 		return -EINVAL;
680 	}
681 
682 	entry->buflist = kzalloc(count * sizeof(*entry->buflist), GFP_KERNEL);
683 	if (!entry->buflist) {
684 		mutex_unlock(&dev->struct_mutex);
685 		atomic_dec(&dev->buf_alloc);
686 		return -ENOMEM;
687 	}
688 
689 	entry->buf_size = size;
690 	entry->page_order = page_order;
691 
692 	offset = 0;
693 
694 	while (entry->buf_count < count) {
695 		buf = &entry->buflist[entry->buf_count];
696 		buf->idx = dma->buf_count + entry->buf_count;
697 		buf->total = alignment;
698 		buf->order = order;
699 		buf->used = 0;
700 
701 		buf->offset = (dma->byte_count + offset);
702 		buf->bus_address = agp_offset + offset;
703 		buf->address = (void *)(agp_offset + offset);
704 		buf->next = NULL;
705 		buf->waiting = 0;
706 		buf->pending = 0;
707 		init_waitqueue_head(&buf->dma_wait);
708 		buf->file_priv = NULL;
709 
710 		buf->dev_priv_size = dev->driver->dev_priv_size;
711 		buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
712 		if (!buf->dev_private) {
713 			/* Set count correctly so we free the proper amount. */
714 			entry->buf_count = count;
715 			drm_cleanup_buf_error(dev, entry);
716 			mutex_unlock(&dev->struct_mutex);
717 			atomic_dec(&dev->buf_alloc);
718 			return -ENOMEM;
719 		}
720 
721 		DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
722 
723 		offset += alignment;
724 		entry->buf_count++;
725 		byte_count += PAGE_SIZE << page_order;
726 	}
727 
728 	DRM_DEBUG("byte_count: %d\n", byte_count);
729 
730 	temp_buflist = krealloc(dma->buflist,
731 				(dma->buf_count + entry->buf_count) *
732 				sizeof(*dma->buflist), GFP_KERNEL);
733 	if (!temp_buflist) {
734 		/* Free the entry because it isn't valid */
735 		drm_cleanup_buf_error(dev, entry);
736 		mutex_unlock(&dev->struct_mutex);
737 		atomic_dec(&dev->buf_alloc);
738 		return -ENOMEM;
739 	}
740 	dma->buflist = temp_buflist;
741 
742 	for (i = 0; i < entry->buf_count; i++) {
743 		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
744 	}
745 
746 	dma->buf_count += entry->buf_count;
747 	dma->seg_count += entry->seg_count;
748 	dma->page_count += byte_count >> PAGE_SHIFT;
749 	dma->byte_count += byte_count;
750 
751 	DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
752 	DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
753 
754 	mutex_unlock(&dev->struct_mutex);
755 
756 	request->count = entry->buf_count;
757 	request->size = size;
758 
759 	dma->flags = _DRM_DMA_USE_AGP;
760 
761 	atomic_dec(&dev->buf_alloc);
762 	return 0;
763 }
764 EXPORT_SYMBOL(drm_addbufs_agp);
765 #endif				/* __OS_HAS_AGP */
766 
drm_addbufs_pci(struct drm_device * dev,struct drm_buf_desc * request)767 int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
768 {
769 	struct drm_device_dma *dma = dev->dma;
770 	int count;
771 	int order;
772 	int size;
773 	int total;
774 	int page_order;
775 	struct drm_buf_entry *entry;
776 	drm_dma_handle_t *dmah;
777 	struct drm_buf *buf;
778 	int alignment;
779 	unsigned long offset;
780 	int i;
781 	int byte_count;
782 	int page_count;
783 	unsigned long *temp_pagelist;
784 	struct drm_buf **temp_buflist;
785 
786 	if (!drm_core_check_feature(dev, DRIVER_PCI_DMA))
787 		return -EINVAL;
788 
789 	if (!dma)
790 		return -EINVAL;
791 
792 	if (!capable(CAP_SYS_ADMIN))
793 		return -EPERM;
794 
795 	count = request->count;
796 	order = drm_order(request->size);
797 	size = 1 << order;
798 
799 	DRM_DEBUG("count=%d, size=%d (%d), order=%d, queue_count=%d\n",
800 		  request->count, request->size, size, order, dev->queue_count);
801 
802 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
803 		return -EINVAL;
804 	if (dev->queue_count)
805 		return -EBUSY;	/* Not while in use */
806 
807 	alignment = (request->flags & _DRM_PAGE_ALIGN)
808 	    ? PAGE_ALIGN(size) : size;
809 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
810 	total = PAGE_SIZE << page_order;
811 
812 	spin_lock(&dev->count_lock);
813 	if (dev->buf_use) {
814 		spin_unlock(&dev->count_lock);
815 		return -EBUSY;
816 	}
817 	atomic_inc(&dev->buf_alloc);
818 	spin_unlock(&dev->count_lock);
819 
820 	mutex_lock(&dev->struct_mutex);
821 	entry = &dma->bufs[order];
822 	if (entry->buf_count) {
823 		mutex_unlock(&dev->struct_mutex);
824 		atomic_dec(&dev->buf_alloc);
825 		return -ENOMEM;	/* May only call once for each order */
826 	}
827 
828 	if (count < 0 || count > 4096) {
829 		mutex_unlock(&dev->struct_mutex);
830 		atomic_dec(&dev->buf_alloc);
831 		return -EINVAL;
832 	}
833 
834 	entry->buflist = kzalloc(count * sizeof(*entry->buflist), GFP_KERNEL);
835 	if (!entry->buflist) {
836 		mutex_unlock(&dev->struct_mutex);
837 		atomic_dec(&dev->buf_alloc);
838 		return -ENOMEM;
839 	}
840 
841 	entry->seglist = kzalloc(count * sizeof(*entry->seglist), GFP_KERNEL);
842 	if (!entry->seglist) {
843 		kfree(entry->buflist);
844 		mutex_unlock(&dev->struct_mutex);
845 		atomic_dec(&dev->buf_alloc);
846 		return -ENOMEM;
847 	}
848 
849 	/* Keep the original pagelist until we know all the allocations
850 	 * have succeeded
851 	 */
852 	temp_pagelist = kmalloc((dma->page_count + (count << page_order)) *
853 			       sizeof(*dma->pagelist), GFP_KERNEL);
854 	if (!temp_pagelist) {
855 		kfree(entry->buflist);
856 		kfree(entry->seglist);
857 		mutex_unlock(&dev->struct_mutex);
858 		atomic_dec(&dev->buf_alloc);
859 		return -ENOMEM;
860 	}
861 	memcpy(temp_pagelist,
862 	       dma->pagelist, dma->page_count * sizeof(*dma->pagelist));
863 	DRM_DEBUG("pagelist: %d entries\n",
864 		  dma->page_count + (count << page_order));
865 
866 	entry->buf_size = size;
867 	entry->page_order = page_order;
868 	byte_count = 0;
869 	page_count = 0;
870 
871 	while (entry->buf_count < count) {
872 
873 		dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000);
874 
875 		if (!dmah) {
876 			/* Set count correctly so we free the proper amount. */
877 			entry->buf_count = count;
878 			entry->seg_count = count;
879 			drm_cleanup_buf_error(dev, entry);
880 			kfree(temp_pagelist);
881 			mutex_unlock(&dev->struct_mutex);
882 			atomic_dec(&dev->buf_alloc);
883 			return -ENOMEM;
884 		}
885 		entry->seglist[entry->seg_count++] = dmah;
886 		for (i = 0; i < (1 << page_order); i++) {
887 			DRM_DEBUG("page %d @ 0x%08lx\n",
888 				  dma->page_count + page_count,
889 				  (unsigned long)dmah->vaddr + PAGE_SIZE * i);
890 			temp_pagelist[dma->page_count + page_count++]
891 				= (unsigned long)dmah->vaddr + PAGE_SIZE * i;
892 		}
893 		for (offset = 0;
894 		     offset + size <= total && entry->buf_count < count;
895 		     offset += alignment, ++entry->buf_count) {
896 			buf = &entry->buflist[entry->buf_count];
897 			buf->idx = dma->buf_count + entry->buf_count;
898 			buf->total = alignment;
899 			buf->order = order;
900 			buf->used = 0;
901 			buf->offset = (dma->byte_count + byte_count + offset);
902 			buf->address = (void *)(dmah->vaddr + offset);
903 			buf->bus_address = dmah->busaddr + offset;
904 			buf->next = NULL;
905 			buf->waiting = 0;
906 			buf->pending = 0;
907 			init_waitqueue_head(&buf->dma_wait);
908 			buf->file_priv = NULL;
909 
910 			buf->dev_priv_size = dev->driver->dev_priv_size;
911 			buf->dev_private = kzalloc(buf->dev_priv_size,
912 						GFP_KERNEL);
913 			if (!buf->dev_private) {
914 				/* Set count correctly so we free the proper amount. */
915 				entry->buf_count = count;
916 				entry->seg_count = count;
917 				drm_cleanup_buf_error(dev, entry);
918 				kfree(temp_pagelist);
919 				mutex_unlock(&dev->struct_mutex);
920 				atomic_dec(&dev->buf_alloc);
921 				return -ENOMEM;
922 			}
923 
924 			DRM_DEBUG("buffer %d @ %p\n",
925 				  entry->buf_count, buf->address);
926 		}
927 		byte_count += PAGE_SIZE << page_order;
928 	}
929 
930 	temp_buflist = krealloc(dma->buflist,
931 				(dma->buf_count + entry->buf_count) *
932 				sizeof(*dma->buflist), GFP_KERNEL);
933 	if (!temp_buflist) {
934 		/* Free the entry because it isn't valid */
935 		drm_cleanup_buf_error(dev, entry);
936 		kfree(temp_pagelist);
937 		mutex_unlock(&dev->struct_mutex);
938 		atomic_dec(&dev->buf_alloc);
939 		return -ENOMEM;
940 	}
941 	dma->buflist = temp_buflist;
942 
943 	for (i = 0; i < entry->buf_count; i++) {
944 		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
945 	}
946 
947 	/* No allocations failed, so now we can replace the original pagelist
948 	 * with the new one.
949 	 */
950 	if (dma->page_count) {
951 		kfree(dma->pagelist);
952 	}
953 	dma->pagelist = temp_pagelist;
954 
955 	dma->buf_count += entry->buf_count;
956 	dma->seg_count += entry->seg_count;
957 	dma->page_count += entry->seg_count << page_order;
958 	dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
959 
960 	mutex_unlock(&dev->struct_mutex);
961 
962 	request->count = entry->buf_count;
963 	request->size = size;
964 
965 	if (request->flags & _DRM_PCI_BUFFER_RO)
966 		dma->flags = _DRM_DMA_USE_PCI_RO;
967 
968 	atomic_dec(&dev->buf_alloc);
969 	return 0;
970 
971 }
972 EXPORT_SYMBOL(drm_addbufs_pci);
973 
drm_addbufs_sg(struct drm_device * dev,struct drm_buf_desc * request)974 static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request)
975 {
976 	struct drm_device_dma *dma = dev->dma;
977 	struct drm_buf_entry *entry;
978 	struct drm_buf *buf;
979 	unsigned long offset;
980 	unsigned long agp_offset;
981 	int count;
982 	int order;
983 	int size;
984 	int alignment;
985 	int page_order;
986 	int total;
987 	int byte_count;
988 	int i;
989 	struct drm_buf **temp_buflist;
990 
991 	if (!drm_core_check_feature(dev, DRIVER_SG))
992 		return -EINVAL;
993 
994 	if (!dma)
995 		return -EINVAL;
996 
997 	if (!capable(CAP_SYS_ADMIN))
998 		return -EPERM;
999 
1000 	count = request->count;
1001 	order = drm_order(request->size);
1002 	size = 1 << order;
1003 
1004 	alignment = (request->flags & _DRM_PAGE_ALIGN)
1005 	    ? PAGE_ALIGN(size) : size;
1006 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
1007 	total = PAGE_SIZE << page_order;
1008 
1009 	byte_count = 0;
1010 	agp_offset = request->agp_start;
1011 
1012 	DRM_DEBUG("count:      %d\n", count);
1013 	DRM_DEBUG("order:      %d\n", order);
1014 	DRM_DEBUG("size:       %d\n", size);
1015 	DRM_DEBUG("agp_offset: %lu\n", agp_offset);
1016 	DRM_DEBUG("alignment:  %d\n", alignment);
1017 	DRM_DEBUG("page_order: %d\n", page_order);
1018 	DRM_DEBUG("total:      %d\n", total);
1019 
1020 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1021 		return -EINVAL;
1022 	if (dev->queue_count)
1023 		return -EBUSY;	/* Not while in use */
1024 
1025 	spin_lock(&dev->count_lock);
1026 	if (dev->buf_use) {
1027 		spin_unlock(&dev->count_lock);
1028 		return -EBUSY;
1029 	}
1030 	atomic_inc(&dev->buf_alloc);
1031 	spin_unlock(&dev->count_lock);
1032 
1033 	mutex_lock(&dev->struct_mutex);
1034 	entry = &dma->bufs[order];
1035 	if (entry->buf_count) {
1036 		mutex_unlock(&dev->struct_mutex);
1037 		atomic_dec(&dev->buf_alloc);
1038 		return -ENOMEM;	/* May only call once for each order */
1039 	}
1040 
1041 	if (count < 0 || count > 4096) {
1042 		mutex_unlock(&dev->struct_mutex);
1043 		atomic_dec(&dev->buf_alloc);
1044 		return -EINVAL;
1045 	}
1046 
1047 	entry->buflist = kzalloc(count * sizeof(*entry->buflist),
1048 				GFP_KERNEL);
1049 	if (!entry->buflist) {
1050 		mutex_unlock(&dev->struct_mutex);
1051 		atomic_dec(&dev->buf_alloc);
1052 		return -ENOMEM;
1053 	}
1054 
1055 	entry->buf_size = size;
1056 	entry->page_order = page_order;
1057 
1058 	offset = 0;
1059 
1060 	while (entry->buf_count < count) {
1061 		buf = &entry->buflist[entry->buf_count];
1062 		buf->idx = dma->buf_count + entry->buf_count;
1063 		buf->total = alignment;
1064 		buf->order = order;
1065 		buf->used = 0;
1066 
1067 		buf->offset = (dma->byte_count + offset);
1068 		buf->bus_address = agp_offset + offset;
1069 		buf->address = (void *)(agp_offset + offset
1070 					+ (unsigned long)dev->sg->virtual);
1071 		buf->next = NULL;
1072 		buf->waiting = 0;
1073 		buf->pending = 0;
1074 		init_waitqueue_head(&buf->dma_wait);
1075 		buf->file_priv = NULL;
1076 
1077 		buf->dev_priv_size = dev->driver->dev_priv_size;
1078 		buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
1079 		if (!buf->dev_private) {
1080 			/* Set count correctly so we free the proper amount. */
1081 			entry->buf_count = count;
1082 			drm_cleanup_buf_error(dev, entry);
1083 			mutex_unlock(&dev->struct_mutex);
1084 			atomic_dec(&dev->buf_alloc);
1085 			return -ENOMEM;
1086 		}
1087 
1088 		DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1089 
1090 		offset += alignment;
1091 		entry->buf_count++;
1092 		byte_count += PAGE_SIZE << page_order;
1093 	}
1094 
1095 	DRM_DEBUG("byte_count: %d\n", byte_count);
1096 
1097 	temp_buflist = krealloc(dma->buflist,
1098 				(dma->buf_count + entry->buf_count) *
1099 				sizeof(*dma->buflist), GFP_KERNEL);
1100 	if (!temp_buflist) {
1101 		/* Free the entry because it isn't valid */
1102 		drm_cleanup_buf_error(dev, entry);
1103 		mutex_unlock(&dev->struct_mutex);
1104 		atomic_dec(&dev->buf_alloc);
1105 		return -ENOMEM;
1106 	}
1107 	dma->buflist = temp_buflist;
1108 
1109 	for (i = 0; i < entry->buf_count; i++) {
1110 		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1111 	}
1112 
1113 	dma->buf_count += entry->buf_count;
1114 	dma->seg_count += entry->seg_count;
1115 	dma->page_count += byte_count >> PAGE_SHIFT;
1116 	dma->byte_count += byte_count;
1117 
1118 	DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1119 	DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1120 
1121 	mutex_unlock(&dev->struct_mutex);
1122 
1123 	request->count = entry->buf_count;
1124 	request->size = size;
1125 
1126 	dma->flags = _DRM_DMA_USE_SG;
1127 
1128 	atomic_dec(&dev->buf_alloc);
1129 	return 0;
1130 }
1131 
drm_addbufs_fb(struct drm_device * dev,struct drm_buf_desc * request)1132 static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request)
1133 {
1134 	struct drm_device_dma *dma = dev->dma;
1135 	struct drm_buf_entry *entry;
1136 	struct drm_buf *buf;
1137 	unsigned long offset;
1138 	unsigned long agp_offset;
1139 	int count;
1140 	int order;
1141 	int size;
1142 	int alignment;
1143 	int page_order;
1144 	int total;
1145 	int byte_count;
1146 	int i;
1147 	struct drm_buf **temp_buflist;
1148 
1149 	if (!drm_core_check_feature(dev, DRIVER_FB_DMA))
1150 		return -EINVAL;
1151 
1152 	if (!dma)
1153 		return -EINVAL;
1154 
1155 	if (!capable(CAP_SYS_ADMIN))
1156 		return -EPERM;
1157 
1158 	count = request->count;
1159 	order = drm_order(request->size);
1160 	size = 1 << order;
1161 
1162 	alignment = (request->flags & _DRM_PAGE_ALIGN)
1163 	    ? PAGE_ALIGN(size) : size;
1164 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
1165 	total = PAGE_SIZE << page_order;
1166 
1167 	byte_count = 0;
1168 	agp_offset = request->agp_start;
1169 
1170 	DRM_DEBUG("count:      %d\n", count);
1171 	DRM_DEBUG("order:      %d\n", order);
1172 	DRM_DEBUG("size:       %d\n", size);
1173 	DRM_DEBUG("agp_offset: %lu\n", agp_offset);
1174 	DRM_DEBUG("alignment:  %d\n", alignment);
1175 	DRM_DEBUG("page_order: %d\n", page_order);
1176 	DRM_DEBUG("total:      %d\n", total);
1177 
1178 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1179 		return -EINVAL;
1180 	if (dev->queue_count)
1181 		return -EBUSY;	/* Not while in use */
1182 
1183 	spin_lock(&dev->count_lock);
1184 	if (dev->buf_use) {
1185 		spin_unlock(&dev->count_lock);
1186 		return -EBUSY;
1187 	}
1188 	atomic_inc(&dev->buf_alloc);
1189 	spin_unlock(&dev->count_lock);
1190 
1191 	mutex_lock(&dev->struct_mutex);
1192 	entry = &dma->bufs[order];
1193 	if (entry->buf_count) {
1194 		mutex_unlock(&dev->struct_mutex);
1195 		atomic_dec(&dev->buf_alloc);
1196 		return -ENOMEM;	/* May only call once for each order */
1197 	}
1198 
1199 	if (count < 0 || count > 4096) {
1200 		mutex_unlock(&dev->struct_mutex);
1201 		atomic_dec(&dev->buf_alloc);
1202 		return -EINVAL;
1203 	}
1204 
1205 	entry->buflist = kzalloc(count * sizeof(*entry->buflist),
1206 				GFP_KERNEL);
1207 	if (!entry->buflist) {
1208 		mutex_unlock(&dev->struct_mutex);
1209 		atomic_dec(&dev->buf_alloc);
1210 		return -ENOMEM;
1211 	}
1212 
1213 	entry->buf_size = size;
1214 	entry->page_order = page_order;
1215 
1216 	offset = 0;
1217 
1218 	while (entry->buf_count < count) {
1219 		buf = &entry->buflist[entry->buf_count];
1220 		buf->idx = dma->buf_count + entry->buf_count;
1221 		buf->total = alignment;
1222 		buf->order = order;
1223 		buf->used = 0;
1224 
1225 		buf->offset = (dma->byte_count + offset);
1226 		buf->bus_address = agp_offset + offset;
1227 		buf->address = (void *)(agp_offset + offset);
1228 		buf->next = NULL;
1229 		buf->waiting = 0;
1230 		buf->pending = 0;
1231 		init_waitqueue_head(&buf->dma_wait);
1232 		buf->file_priv = NULL;
1233 
1234 		buf->dev_priv_size = dev->driver->dev_priv_size;
1235 		buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
1236 		if (!buf->dev_private) {
1237 			/* Set count correctly so we free the proper amount. */
1238 			entry->buf_count = count;
1239 			drm_cleanup_buf_error(dev, entry);
1240 			mutex_unlock(&dev->struct_mutex);
1241 			atomic_dec(&dev->buf_alloc);
1242 			return -ENOMEM;
1243 		}
1244 
1245 		DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1246 
1247 		offset += alignment;
1248 		entry->buf_count++;
1249 		byte_count += PAGE_SIZE << page_order;
1250 	}
1251 
1252 	DRM_DEBUG("byte_count: %d\n", byte_count);
1253 
1254 	temp_buflist = krealloc(dma->buflist,
1255 				(dma->buf_count + entry->buf_count) *
1256 				sizeof(*dma->buflist), GFP_KERNEL);
1257 	if (!temp_buflist) {
1258 		/* Free the entry because it isn't valid */
1259 		drm_cleanup_buf_error(dev, entry);
1260 		mutex_unlock(&dev->struct_mutex);
1261 		atomic_dec(&dev->buf_alloc);
1262 		return -ENOMEM;
1263 	}
1264 	dma->buflist = temp_buflist;
1265 
1266 	for (i = 0; i < entry->buf_count; i++) {
1267 		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1268 	}
1269 
1270 	dma->buf_count += entry->buf_count;
1271 	dma->seg_count += entry->seg_count;
1272 	dma->page_count += byte_count >> PAGE_SHIFT;
1273 	dma->byte_count += byte_count;
1274 
1275 	DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1276 	DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1277 
1278 	mutex_unlock(&dev->struct_mutex);
1279 
1280 	request->count = entry->buf_count;
1281 	request->size = size;
1282 
1283 	dma->flags = _DRM_DMA_USE_FB;
1284 
1285 	atomic_dec(&dev->buf_alloc);
1286 	return 0;
1287 }
1288 
1289 
1290 /**
1291  * Add buffers for DMA transfers (ioctl).
1292  *
1293  * \param inode device inode.
1294  * \param file_priv DRM file private.
1295  * \param cmd command.
1296  * \param arg pointer to a struct drm_buf_desc request.
1297  * \return zero on success or a negative number on failure.
1298  *
1299  * According with the memory type specified in drm_buf_desc::flags and the
1300  * build options, it dispatches the call either to addbufs_agp(),
1301  * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
1302  * PCI memory respectively.
1303  */
drm_addbufs(struct drm_device * dev,void * data,struct drm_file * file_priv)1304 int drm_addbufs(struct drm_device *dev, void *data,
1305 		struct drm_file *file_priv)
1306 {
1307 	struct drm_buf_desc *request = data;
1308 	int ret;
1309 
1310 	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1311 		return -EINVAL;
1312 
1313 #if __OS_HAS_AGP
1314 	if (request->flags & _DRM_AGP_BUFFER)
1315 		ret = drm_addbufs_agp(dev, request);
1316 	else
1317 #endif
1318 	if (request->flags & _DRM_SG_BUFFER)
1319 		ret = drm_addbufs_sg(dev, request);
1320 	else if (request->flags & _DRM_FB_BUFFER)
1321 		ret = drm_addbufs_fb(dev, request);
1322 	else
1323 		ret = drm_addbufs_pci(dev, request);
1324 
1325 	return ret;
1326 }
1327 
1328 /**
1329  * Get information about the buffer mappings.
1330  *
1331  * This was originally mean for debugging purposes, or by a sophisticated
1332  * client library to determine how best to use the available buffers (e.g.,
1333  * large buffers can be used for image transfer).
1334  *
1335  * \param inode device inode.
1336  * \param file_priv DRM file private.
1337  * \param cmd command.
1338  * \param arg pointer to a drm_buf_info structure.
1339  * \return zero on success or a negative number on failure.
1340  *
1341  * Increments drm_device::buf_use while holding the drm_device::count_lock
1342  * lock, preventing of allocating more buffers after this call. Information
1343  * about each requested buffer is then copied into user space.
1344  */
drm_infobufs(struct drm_device * dev,void * data,struct drm_file * file_priv)1345 int drm_infobufs(struct drm_device *dev, void *data,
1346 		 struct drm_file *file_priv)
1347 {
1348 	struct drm_device_dma *dma = dev->dma;
1349 	struct drm_buf_info *request = data;
1350 	int i;
1351 	int count;
1352 
1353 	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1354 		return -EINVAL;
1355 
1356 	if (!dma)
1357 		return -EINVAL;
1358 
1359 	spin_lock(&dev->count_lock);
1360 	if (atomic_read(&dev->buf_alloc)) {
1361 		spin_unlock(&dev->count_lock);
1362 		return -EBUSY;
1363 	}
1364 	++dev->buf_use;		/* Can't allocate more after this call */
1365 	spin_unlock(&dev->count_lock);
1366 
1367 	for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1368 		if (dma->bufs[i].buf_count)
1369 			++count;
1370 	}
1371 
1372 	DRM_DEBUG("count = %d\n", count);
1373 
1374 	if (request->count >= count) {
1375 		for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1376 			if (dma->bufs[i].buf_count) {
1377 				struct drm_buf_desc __user *to =
1378 				    &request->list[count];
1379 				struct drm_buf_entry *from = &dma->bufs[i];
1380 				struct drm_freelist *list = &dma->bufs[i].freelist;
1381 				if (copy_to_user(&to->count,
1382 						 &from->buf_count,
1383 						 sizeof(from->buf_count)) ||
1384 				    copy_to_user(&to->size,
1385 						 &from->buf_size,
1386 						 sizeof(from->buf_size)) ||
1387 				    copy_to_user(&to->low_mark,
1388 						 &list->low_mark,
1389 						 sizeof(list->low_mark)) ||
1390 				    copy_to_user(&to->high_mark,
1391 						 &list->high_mark,
1392 						 sizeof(list->high_mark)))
1393 					return -EFAULT;
1394 
1395 				DRM_DEBUG("%d %d %d %d %d\n",
1396 					  i,
1397 					  dma->bufs[i].buf_count,
1398 					  dma->bufs[i].buf_size,
1399 					  dma->bufs[i].freelist.low_mark,
1400 					  dma->bufs[i].freelist.high_mark);
1401 				++count;
1402 			}
1403 		}
1404 	}
1405 	request->count = count;
1406 
1407 	return 0;
1408 }
1409 
1410 /**
1411  * Specifies a low and high water mark for buffer allocation
1412  *
1413  * \param inode device inode.
1414  * \param file_priv DRM file private.
1415  * \param cmd command.
1416  * \param arg a pointer to a drm_buf_desc structure.
1417  * \return zero on success or a negative number on failure.
1418  *
1419  * Verifies that the size order is bounded between the admissible orders and
1420  * updates the respective drm_device_dma::bufs entry low and high water mark.
1421  *
1422  * \note This ioctl is deprecated and mostly never used.
1423  */
drm_markbufs(struct drm_device * dev,void * data,struct drm_file * file_priv)1424 int drm_markbufs(struct drm_device *dev, void *data,
1425 		 struct drm_file *file_priv)
1426 {
1427 	struct drm_device_dma *dma = dev->dma;
1428 	struct drm_buf_desc *request = data;
1429 	int order;
1430 	struct drm_buf_entry *entry;
1431 
1432 	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1433 		return -EINVAL;
1434 
1435 	if (!dma)
1436 		return -EINVAL;
1437 
1438 	DRM_DEBUG("%d, %d, %d\n",
1439 		  request->size, request->low_mark, request->high_mark);
1440 	order = drm_order(request->size);
1441 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1442 		return -EINVAL;
1443 	entry = &dma->bufs[order];
1444 
1445 	if (request->low_mark < 0 || request->low_mark > entry->buf_count)
1446 		return -EINVAL;
1447 	if (request->high_mark < 0 || request->high_mark > entry->buf_count)
1448 		return -EINVAL;
1449 
1450 	entry->freelist.low_mark = request->low_mark;
1451 	entry->freelist.high_mark = request->high_mark;
1452 
1453 	return 0;
1454 }
1455 
1456 /**
1457  * Unreserve the buffers in list, previously reserved using drmDMA.
1458  *
1459  * \param inode device inode.
1460  * \param file_priv DRM file private.
1461  * \param cmd command.
1462  * \param arg pointer to a drm_buf_free structure.
1463  * \return zero on success or a negative number on failure.
1464  *
1465  * Calls free_buffer() for each used buffer.
1466  * This function is primarily used for debugging.
1467  */
drm_freebufs(struct drm_device * dev,void * data,struct drm_file * file_priv)1468 int drm_freebufs(struct drm_device *dev, void *data,
1469 		 struct drm_file *file_priv)
1470 {
1471 	struct drm_device_dma *dma = dev->dma;
1472 	struct drm_buf_free *request = data;
1473 	int i;
1474 	int idx;
1475 	struct drm_buf *buf;
1476 
1477 	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1478 		return -EINVAL;
1479 
1480 	if (!dma)
1481 		return -EINVAL;
1482 
1483 	DRM_DEBUG("%d\n", request->count);
1484 	for (i = 0; i < request->count; i++) {
1485 		if (copy_from_user(&idx, &request->list[i], sizeof(idx)))
1486 			return -EFAULT;
1487 		if (idx < 0 || idx >= dma->buf_count) {
1488 			DRM_ERROR("Index %d (of %d max)\n",
1489 				  idx, dma->buf_count - 1);
1490 			return -EINVAL;
1491 		}
1492 		buf = dma->buflist[idx];
1493 		if (buf->file_priv != file_priv) {
1494 			DRM_ERROR("Process %d freeing buffer not owned\n",
1495 				  task_pid_nr(current));
1496 			return -EINVAL;
1497 		}
1498 		drm_free_buffer(dev, buf);
1499 	}
1500 
1501 	return 0;
1502 }
1503 
1504 /**
1505  * Maps all of the DMA buffers into client-virtual space (ioctl).
1506  *
1507  * \param inode device inode.
1508  * \param file_priv DRM file private.
1509  * \param cmd command.
1510  * \param arg pointer to a drm_buf_map structure.
1511  * \return zero on success or a negative number on failure.
1512  *
1513  * Maps the AGP, SG or PCI buffer region with vm_mmap(), and copies information
1514  * about each buffer into user space. For PCI buffers, it calls vm_mmap() with
1515  * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
1516  * drm_mmap_dma().
1517  */
drm_mapbufs(struct drm_device * dev,void * data,struct drm_file * file_priv)1518 int drm_mapbufs(struct drm_device *dev, void *data,
1519 	        struct drm_file *file_priv)
1520 {
1521 	struct drm_device_dma *dma = dev->dma;
1522 	int retcode = 0;
1523 	const int zero = 0;
1524 	unsigned long virtual;
1525 	unsigned long address;
1526 	struct drm_buf_map *request = data;
1527 	int i;
1528 
1529 	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1530 		return -EINVAL;
1531 
1532 	if (!dma)
1533 		return -EINVAL;
1534 
1535 	spin_lock(&dev->count_lock);
1536 	if (atomic_read(&dev->buf_alloc)) {
1537 		spin_unlock(&dev->count_lock);
1538 		return -EBUSY;
1539 	}
1540 	dev->buf_use++;		/* Can't allocate more after this call */
1541 	spin_unlock(&dev->count_lock);
1542 
1543 	if (request->count >= dma->buf_count) {
1544 		if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
1545 		    || (drm_core_check_feature(dev, DRIVER_SG)
1546 			&& (dma->flags & _DRM_DMA_USE_SG))
1547 		    || (drm_core_check_feature(dev, DRIVER_FB_DMA)
1548 			&& (dma->flags & _DRM_DMA_USE_FB))) {
1549 			struct drm_local_map *map = dev->agp_buffer_map;
1550 			unsigned long token = dev->agp_buffer_token;
1551 
1552 			if (!map) {
1553 				retcode = -EINVAL;
1554 				goto done;
1555 			}
1556 			virtual = vm_mmap(file_priv->filp, 0, map->size,
1557 					  PROT_READ | PROT_WRITE,
1558 					  MAP_SHARED,
1559 					  token);
1560 		} else {
1561 			virtual = vm_mmap(file_priv->filp, 0, dma->byte_count,
1562 					  PROT_READ | PROT_WRITE,
1563 					  MAP_SHARED, 0);
1564 		}
1565 		if (virtual > -1024UL) {
1566 			/* Real error */
1567 			retcode = (signed long)virtual;
1568 			goto done;
1569 		}
1570 		request->virtual = (void __user *)virtual;
1571 
1572 		for (i = 0; i < dma->buf_count; i++) {
1573 			if (copy_to_user(&request->list[i].idx,
1574 					 &dma->buflist[i]->idx,
1575 					 sizeof(request->list[0].idx))) {
1576 				retcode = -EFAULT;
1577 				goto done;
1578 			}
1579 			if (copy_to_user(&request->list[i].total,
1580 					 &dma->buflist[i]->total,
1581 					 sizeof(request->list[0].total))) {
1582 				retcode = -EFAULT;
1583 				goto done;
1584 			}
1585 			if (copy_to_user(&request->list[i].used,
1586 					 &zero, sizeof(zero))) {
1587 				retcode = -EFAULT;
1588 				goto done;
1589 			}
1590 			address = virtual + dma->buflist[i]->offset;	/* *** */
1591 			if (copy_to_user(&request->list[i].address,
1592 					 &address, sizeof(address))) {
1593 				retcode = -EFAULT;
1594 				goto done;
1595 			}
1596 		}
1597 	}
1598       done:
1599 	request->count = dma->buf_count;
1600 	DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode);
1601 
1602 	return retcode;
1603 }
1604 
1605 /**
1606  * Compute size order.  Returns the exponent of the smaller power of two which
1607  * is greater or equal to given number.
1608  *
1609  * \param size size.
1610  * \return order.
1611  *
1612  * \todo Can be made faster.
1613  */
drm_order(unsigned long size)1614 int drm_order(unsigned long size)
1615 {
1616 	int order;
1617 	unsigned long tmp;
1618 
1619 	for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ;
1620 
1621 	if (size & (size - 1))
1622 		++order;
1623 
1624 	return order;
1625 }
1626 EXPORT_SYMBOL(drm_order);
1627