1 /* drm_bufs.h -- Generic buffer template -*- linux-c -*-
2 * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
3 *
4 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
26 *
27 * Authors:
28 * Rickard E. (Rik) Faith <faith@valinux.com>
29 * Gareth Hughes <gareth@valinux.com>
30 */
31
32 #include <linux/vmalloc.h>
33 #include "drmP.h"
34
35 #ifndef __HAVE_PCI_DMA
36 #define __HAVE_PCI_DMA 0
37 #endif
38
39 #ifndef __HAVE_SG
40 #define __HAVE_SG 0
41 #endif
42
43 #ifndef DRIVER_BUF_PRIV_T
44 #define DRIVER_BUF_PRIV_T u32
45 #endif
46 #ifndef DRIVER_AGP_BUFFERS_MAP
47 #if __HAVE_AGP && __HAVE_DMA
48 #error "You must define DRIVER_AGP_BUFFERS_MAP()"
49 #else
50 #define DRIVER_AGP_BUFFERS_MAP( dev ) NULL
51 #endif
52 #endif
53
54 /*
55 * Compute order. Can be made faster.
56 */
DRM(order)57 int DRM(order)( unsigned long size )
58 {
59 int order;
60 unsigned long tmp;
61
62 for ( order = 0, tmp = size ; tmp >>= 1 ; ++order );
63
64 if ( size & ~(1 << order) )
65 ++order;
66
67 return order;
68 }
69
DRM(addmap)70 int DRM(addmap)( struct inode *inode, struct file *filp,
71 unsigned int cmd, unsigned long arg )
72 {
73 drm_file_t *priv = filp->private_data;
74 drm_device_t *dev = priv->dev;
75 drm_map_t *map;
76 drm_map_list_t *list;
77
78 if ( !(filp->f_mode & 3) ) return -EACCES; /* Require read/write */
79
80 map = DRM(alloc)( sizeof(*map), DRM_MEM_MAPS );
81 if ( !map )
82 return -ENOMEM;
83
84 if ( copy_from_user( map, (drm_map_t *)arg, sizeof(*map) ) ) {
85 DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
86 return -EFAULT;
87 }
88
89 /* Only allow shared memory to be removable since we only keep enough
90 * book keeping information about shared memory to allow for removal
91 * when processes fork.
92 */
93 if ( (map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM ) {
94 DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
95 return -EINVAL;
96 }
97 DRM_DEBUG( "offset = 0x%08lx, size = 0x%08lx, type = %d\n",
98 map->offset, map->size, map->type );
99 if ( (map->offset & (~PAGE_MASK)) || (map->size & (~PAGE_MASK)) ) {
100 DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
101 return -EINVAL;
102 }
103 map->mtrr = -1;
104 map->handle = 0;
105
106 switch ( map->type ) {
107 case _DRM_REGISTERS:
108 case _DRM_FRAME_BUFFER:
109 #if !defined(__sparc__) && !defined(__alpha__)
110 if ( map->offset + map->size < map->offset ||
111 map->offset < virt_to_phys(high_memory) ) {
112 DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
113 return -EINVAL;
114 }
115 #endif
116 #ifdef __alpha__
117 map->offset += dev->hose->mem_space->start;
118 #endif
119 #if __REALLY_HAVE_MTRR
120 if ( map->type == _DRM_FRAME_BUFFER ||
121 (map->flags & _DRM_WRITE_COMBINING) ) {
122 map->mtrr = mtrr_add( map->offset, map->size,
123 MTRR_TYPE_WRCOMB, 1 );
124 }
125 #endif
126 map->handle = DRM(ioremap)( map->offset, map->size, dev );
127 break;
128
129 case _DRM_SHM:
130 map->handle = vmalloc_32(map->size);
131 DRM_DEBUG( "%ld %d %p\n",
132 map->size, DRM(order)( map->size ), map->handle );
133 if ( !map->handle ) {
134 DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
135 return -ENOMEM;
136 }
137 map->offset = (unsigned long)map->handle;
138 if ( map->flags & _DRM_CONTAINS_LOCK ) {
139 dev->sigdata.lock =
140 dev->lock.hw_lock = map->handle; /* Pointer to lock */
141 }
142 break;
143 #if __REALLY_HAVE_AGP
144 case _DRM_AGP:
145 #ifdef __alpha__
146 map->offset += dev->hose->mem_space->start;
147 #endif
148 map->offset = map->offset + dev->agp->base;
149 map->mtrr = dev->agp->agp_mtrr; /* for getmap */
150 break;
151 #endif
152 case _DRM_SCATTER_GATHER:
153 if (!dev->sg) {
154 DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
155 return -EINVAL;
156 }
157 map->offset = map->offset + dev->sg->handle;
158 break;
159
160 default:
161 DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
162 return -EINVAL;
163 }
164
165 list = DRM(alloc)(sizeof(*list), DRM_MEM_MAPS);
166 if(!list) {
167 DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
168 return -EINVAL;
169 }
170 memset(list, 0, sizeof(*list));
171 list->map = map;
172
173 down(&dev->struct_sem);
174 list_add(&list->head, &dev->maplist->head);
175 up(&dev->struct_sem);
176
177 if ( copy_to_user( (drm_map_t *)arg, map, sizeof(*map) ) )
178 return -EFAULT;
179 if ( map->type != _DRM_SHM ) {
180 if ( copy_to_user( &((drm_map_t *)arg)->handle,
181 &map->offset,
182 sizeof(map->offset) ) )
183 return -EFAULT;
184 }
185 return 0;
186 }
187
188
189 /* Remove a map private from list and deallocate resources if the mapping
190 * isn't in use.
191 */
192
DRM(rmmap)193 int DRM(rmmap)(struct inode *inode, struct file *filp,
194 unsigned int cmd, unsigned long arg)
195 {
196 drm_file_t *priv = filp->private_data;
197 drm_device_t *dev = priv->dev;
198 struct list_head *list;
199 drm_map_list_t *r_list = NULL;
200 drm_vma_entry_t *pt, *prev;
201 drm_map_t *map;
202 drm_map_t request;
203 int found_maps = 0;
204
205 if (copy_from_user(&request, (drm_map_t *)arg,
206 sizeof(request))) {
207 return -EFAULT;
208 }
209
210 down(&dev->struct_sem);
211 list = &dev->maplist->head;
212 list_for_each(list, &dev->maplist->head) {
213 r_list = (drm_map_list_t *) list;
214
215 if(r_list->map &&
216 r_list->map->handle == request.handle &&
217 r_list->map->flags & _DRM_REMOVABLE) break;
218 }
219
220 /* List has wrapped around to the head pointer, or its empty we didn't
221 * find anything.
222 */
223 if(list == (&dev->maplist->head)) {
224 up(&dev->struct_sem);
225 return -EINVAL;
226 }
227 map = r_list->map;
228 list_del(list);
229 DRM(free)(list, sizeof(*list), DRM_MEM_MAPS);
230
231 for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) {
232 if (pt->vma->vm_private_data == map) found_maps++;
233 }
234
235 if(!found_maps) {
236 switch (map->type) {
237 case _DRM_REGISTERS:
238 case _DRM_FRAME_BUFFER:
239 #if __REALLY_HAVE_MTRR
240 if (map->mtrr >= 0) {
241 int retcode;
242 retcode = mtrr_del(map->mtrr,
243 map->offset,
244 map->size);
245 DRM_DEBUG("mtrr_del = %d\n", retcode);
246 }
247 #endif
248 DRM(ioremapfree)(map->handle, map->size, dev);
249 break;
250 case _DRM_SHM:
251 vfree(map->handle);
252 break;
253 case _DRM_AGP:
254 case _DRM_SCATTER_GATHER:
255 break;
256 }
257 DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
258 }
259 up(&dev->struct_sem);
260 return 0;
261 }
262
263 #if __HAVE_DMA
264
265
DRM(cleanup_buf_error)266 static void DRM(cleanup_buf_error)(drm_buf_entry_t *entry)
267 {
268 int i;
269
270 if (entry->seg_count) {
271 for (i = 0; i < entry->seg_count; i++) {
272 DRM(free_pages)(entry->seglist[i],
273 entry->page_order,
274 DRM_MEM_DMA);
275 }
276 DRM(free)(entry->seglist,
277 entry->seg_count *
278 sizeof(*entry->seglist),
279 DRM_MEM_SEGS);
280
281 entry->seg_count = 0;
282 }
283
284 if(entry->buf_count) {
285 for(i = 0; i < entry->buf_count; i++) {
286 if(entry->buflist[i].dev_private) {
287 DRM(free)(entry->buflist[i].dev_private,
288 entry->buflist[i].dev_priv_size,
289 DRM_MEM_BUFS);
290 }
291 }
292 DRM(free)(entry->buflist,
293 entry->buf_count *
294 sizeof(*entry->buflist),
295 DRM_MEM_BUFS);
296
297 #if __HAVE_DMA_FREELIST
298 DRM(freelist_destroy)(&entry->freelist);
299 #endif
300
301 entry->buf_count = 0;
302 }
303 }
304
305 #if __REALLY_HAVE_AGP
DRM(addbufs_agp)306 int DRM(addbufs_agp)( struct inode *inode, struct file *filp,
307 unsigned int cmd, unsigned long arg )
308 {
309 drm_file_t *priv = filp->private_data;
310 drm_device_t *dev = priv->dev;
311 drm_device_dma_t *dma = dev->dma;
312 drm_buf_desc_t request;
313 drm_buf_entry_t *entry;
314 drm_buf_t *buf;
315 unsigned long offset;
316 unsigned long agp_offset;
317 int count;
318 int order;
319 int size;
320 int alignment;
321 int page_order;
322 int total;
323 int byte_count;
324 int i;
325 drm_buf_t **temp_buflist;
326
327 if ( !dma ) return -EINVAL;
328
329 if ( copy_from_user( &request, (drm_buf_desc_t *)arg,
330 sizeof(request) ) )
331 return -EFAULT;
332
333 count = request.count;
334 order = DRM(order)( request.size );
335 size = 1 << order;
336
337 alignment = (request.flags & _DRM_PAGE_ALIGN)
338 ? PAGE_ALIGN(size) : size;
339 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
340 total = PAGE_SIZE << page_order;
341
342 byte_count = 0;
343 agp_offset = dev->agp->base + request.agp_start;
344
345 DRM_DEBUG( "count: %d\n", count );
346 DRM_DEBUG( "order: %d\n", order );
347 DRM_DEBUG( "size: %d\n", size );
348 DRM_DEBUG( "agp_offset: %ld\n", agp_offset );
349 DRM_DEBUG( "alignment: %d\n", alignment );
350 DRM_DEBUG( "page_order: %d\n", page_order );
351 DRM_DEBUG( "total: %d\n", total );
352
353 if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
354 if ( dev->queue_count ) return -EBUSY; /* Not while in use */
355
356 spin_lock( &dev->count_lock );
357 if ( dev->buf_use ) {
358 spin_unlock( &dev->count_lock );
359 return -EBUSY;
360 }
361 atomic_inc( &dev->buf_alloc );
362 spin_unlock( &dev->count_lock );
363
364 down( &dev->struct_sem );
365 entry = &dma->bufs[order];
366 if ( entry->buf_count ) {
367 up( &dev->struct_sem );
368 atomic_dec( &dev->buf_alloc );
369 return -ENOMEM; /* May only call once for each order */
370 }
371
372 if (count < 0 || count > 4096) {
373 up( &dev->struct_sem );
374 atomic_dec( &dev->buf_alloc );
375 return -EINVAL;
376 }
377
378 entry->buflist = DRM(alloc)( count * sizeof(*entry->buflist),
379 DRM_MEM_BUFS );
380 if ( !entry->buflist ) {
381 up( &dev->struct_sem );
382 atomic_dec( &dev->buf_alloc );
383 return -ENOMEM;
384 }
385 memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
386
387 entry->buf_size = size;
388 entry->page_order = page_order;
389
390 offset = 0;
391
392 while ( entry->buf_count < count ) {
393 buf = &entry->buflist[entry->buf_count];
394 buf->idx = dma->buf_count + entry->buf_count;
395 buf->total = alignment;
396 buf->order = order;
397 buf->used = 0;
398
399 buf->offset = (dma->byte_count + offset);
400 buf->bus_address = agp_offset + offset;
401 buf->address = (void *)(agp_offset + offset);
402 buf->next = NULL;
403 buf->waiting = 0;
404 buf->pending = 0;
405 init_waitqueue_head( &buf->dma_wait );
406 buf->pid = 0;
407
408 buf->dev_priv_size = sizeof(DRIVER_BUF_PRIV_T);
409 buf->dev_private = DRM(alloc)( sizeof(DRIVER_BUF_PRIV_T),
410 DRM_MEM_BUFS );
411 if(!buf->dev_private) {
412 /* Set count correctly so we free the proper amount. */
413 entry->buf_count = count;
414 DRM(cleanup_buf_error)(entry);
415 }
416 memset( buf->dev_private, 0, buf->dev_priv_size );
417
418 #if __HAVE_DMA_HISTOGRAM
419 buf->time_queued = 0;
420 buf->time_dispatched = 0;
421 buf->time_completed = 0;
422 buf->time_freed = 0;
423 #endif
424 DRM_DEBUG( "buffer %d @ %p\n",
425 entry->buf_count, buf->address );
426
427 offset += alignment;
428 entry->buf_count++;
429 byte_count += PAGE_SIZE << page_order;
430 }
431
432 DRM_DEBUG( "byte_count: %d\n", byte_count );
433
434 temp_buflist = DRM(realloc)( dma->buflist,
435 dma->buf_count * sizeof(*dma->buflist),
436 (dma->buf_count + entry->buf_count)
437 * sizeof(*dma->buflist),
438 DRM_MEM_BUFS );
439 if(!temp_buflist) {
440 /* Free the entry because it isn't valid */
441 DRM(cleanup_buf_error)(entry);
442 up( &dev->struct_sem );
443 atomic_dec( &dev->buf_alloc );
444 return -ENOMEM;
445 }
446 dma->buflist = temp_buflist;
447
448 for ( i = 0 ; i < entry->buf_count ; i++ ) {
449 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
450 }
451
452 dma->buf_count += entry->buf_count;
453 dma->byte_count += byte_count;
454
455 DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
456 DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
457
458 #if __HAVE_DMA_FREELIST
459 DRM(freelist_create)( &entry->freelist, entry->buf_count );
460 for ( i = 0 ; i < entry->buf_count ; i++ ) {
461 DRM(freelist_put)( dev, &entry->freelist, &entry->buflist[i] );
462 }
463 #endif
464 up( &dev->struct_sem );
465
466 request.count = entry->buf_count;
467 request.size = size;
468
469 if ( copy_to_user( (drm_buf_desc_t *)arg, &request, sizeof(request) ) )
470 return -EFAULT;
471
472 dma->flags = _DRM_DMA_USE_AGP;
473
474 atomic_dec( &dev->buf_alloc );
475 return 0;
476 }
477 #endif /* __REALLY_HAVE_AGP */
478
479 #if __HAVE_PCI_DMA
DRM(addbufs_pci)480 int DRM(addbufs_pci)( struct inode *inode, struct file *filp,
481 unsigned int cmd, unsigned long arg )
482 {
483 drm_file_t *priv = filp->private_data;
484 drm_device_t *dev = priv->dev;
485 drm_device_dma_t *dma = dev->dma;
486 drm_buf_desc_t request;
487 int count;
488 int order;
489 int size;
490 int total;
491 int page_order;
492 drm_buf_entry_t *entry;
493 unsigned long page;
494 drm_buf_t *buf;
495 int alignment;
496 unsigned long offset;
497 int i;
498 int byte_count;
499 int page_count;
500 unsigned long *temp_pagelist;
501 drm_buf_t **temp_buflist;
502
503 if ( !dma ) return -EINVAL;
504
505 if ( copy_from_user( &request, (drm_buf_desc_t *)arg,
506 sizeof(request) ) )
507 return -EFAULT;
508
509 count = request.count;
510 order = DRM(order)( request.size );
511 size = 1 << order;
512
513 DRM_DEBUG( "count=%d, size=%d (%d), order=%d, queue_count=%d\n",
514 request.count, request.size, size,
515 order, dev->queue_count );
516
517 if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
518 if ( dev->queue_count ) return -EBUSY; /* Not while in use */
519
520 alignment = (request.flags & _DRM_PAGE_ALIGN)
521 ? PAGE_ALIGN(size) : size;
522 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
523 total = PAGE_SIZE << page_order;
524
525 spin_lock( &dev->count_lock );
526 if ( dev->buf_use ) {
527 spin_unlock( &dev->count_lock );
528 return -EBUSY;
529 }
530 atomic_inc( &dev->buf_alloc );
531 spin_unlock( &dev->count_lock );
532
533 down( &dev->struct_sem );
534 entry = &dma->bufs[order];
535 if ( entry->buf_count ) {
536 up( &dev->struct_sem );
537 atomic_dec( &dev->buf_alloc );
538 return -ENOMEM; /* May only call once for each order */
539 }
540
541 if (count < 0 || count > 4096) {
542 up( &dev->struct_sem );
543 atomic_dec( &dev->buf_alloc );
544 return -EINVAL;
545 }
546
547 entry->buflist = DRM(alloc)( count * sizeof(*entry->buflist),
548 DRM_MEM_BUFS );
549 if ( !entry->buflist ) {
550 up( &dev->struct_sem );
551 atomic_dec( &dev->buf_alloc );
552 return -ENOMEM;
553 }
554 memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
555
556 entry->seglist = DRM(alloc)( count * sizeof(*entry->seglist),
557 DRM_MEM_SEGS );
558 if ( !entry->seglist ) {
559 DRM(free)( entry->buflist,
560 count * sizeof(*entry->buflist),
561 DRM_MEM_BUFS );
562 up( &dev->struct_sem );
563 atomic_dec( &dev->buf_alloc );
564 return -ENOMEM;
565 }
566 memset( entry->seglist, 0, count * sizeof(*entry->seglist) );
567
568 temp_pagelist = DRM(realloc)( dma->pagelist,
569 dma->page_count * sizeof(*dma->pagelist),
570 (dma->page_count + (count << page_order))
571 * sizeof(*dma->pagelist),
572 DRM_MEM_PAGES );
573 if(!temp_pagelist) {
574 DRM(free)( entry->buflist,
575 count * sizeof(*entry->buflist),
576 DRM_MEM_BUFS );
577 DRM(free)( entry->seglist,
578 count * sizeof(*entry->seglist),
579 DRM_MEM_SEGS );
580 up( &dev->struct_sem );
581 atomic_dec( &dev->buf_alloc );
582 return -ENOMEM;
583 }
584
585 dma->pagelist = temp_pagelist;
586 DRM_DEBUG( "pagelist: %d entries\n",
587 dma->page_count + (count << page_order) );
588
589 entry->buf_size = size;
590 entry->page_order = page_order;
591 byte_count = 0;
592 page_count = 0;
593
594 while ( entry->buf_count < count ) {
595 page = DRM(alloc_pages)( page_order, DRM_MEM_DMA );
596 if ( !page ) break;
597 entry->seglist[entry->seg_count++] = page;
598 for ( i = 0 ; i < (1 << page_order) ; i++ ) {
599 DRM_DEBUG( "page %d @ 0x%08lx\n",
600 dma->page_count + page_count,
601 page + PAGE_SIZE * i );
602 dma->pagelist[dma->page_count + page_count++]
603 = page + PAGE_SIZE * i;
604 }
605 for ( offset = 0 ;
606 offset + size <= total && entry->buf_count < count ;
607 offset += alignment, ++entry->buf_count ) {
608 buf = &entry->buflist[entry->buf_count];
609 buf->idx = dma->buf_count + entry->buf_count;
610 buf->total = alignment;
611 buf->order = order;
612 buf->used = 0;
613 buf->offset = (dma->byte_count + byte_count + offset);
614 buf->address = (void *)(page + offset);
615 buf->next = NULL;
616 buf->waiting = 0;
617 buf->pending = 0;
618 init_waitqueue_head( &buf->dma_wait );
619 buf->pid = 0;
620 #if __HAVE_DMA_HISTOGRAM
621 buf->time_queued = 0;
622 buf->time_dispatched = 0;
623 buf->time_completed = 0;
624 buf->time_freed = 0;
625 #endif
626 DRM_DEBUG( "buffer %d @ %p\n",
627 entry->buf_count, buf->address );
628 }
629 byte_count += PAGE_SIZE << page_order;
630 }
631
632 temp_buflist = DRM(realloc)( dma->buflist,
633 dma->buf_count * sizeof(*dma->buflist),
634 (dma->buf_count + entry->buf_count)
635 * sizeof(*dma->buflist),
636 DRM_MEM_BUFS );
637 if(!temp_buflist) {
638 /* Free the entry because it isn't valid */
639 DRM(cleanup_buf_error)(entry);
640 up( &dev->struct_sem );
641 atomic_dec( &dev->buf_alloc );
642 return -ENOMEM;
643 }
644 dma->buflist = temp_buflist;
645
646 for ( i = 0 ; i < entry->buf_count ; i++ ) {
647 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
648 }
649
650 dma->buf_count += entry->buf_count;
651 dma->seg_count += entry->seg_count;
652 dma->page_count += entry->seg_count << page_order;
653 dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
654
655 #if __HAVE_DMA_FREELIST
656 DRM(freelist_create)( &entry->freelist, entry->buf_count );
657 for ( i = 0 ; i < entry->buf_count ; i++ ) {
658 DRM(freelist_put)( dev, &entry->freelist, &entry->buflist[i] );
659 }
660 #endif
661 up( &dev->struct_sem );
662
663 request.count = entry->buf_count;
664 request.size = size;
665
666 if ( copy_to_user( (drm_buf_desc_t *)arg, &request, sizeof(request) ) )
667 return -EFAULT;
668
669 atomic_dec( &dev->buf_alloc );
670 return 0;
671
672 }
673 #endif /* __HAVE_PCI_DMA */
674
675 #ifdef __HAVE_SG
DRM(addbufs_sg)676 int DRM(addbufs_sg)( struct inode *inode, struct file *filp,
677 unsigned int cmd, unsigned long arg )
678 {
679 drm_file_t *priv = filp->private_data;
680 drm_device_t *dev = priv->dev;
681 drm_device_dma_t *dma = dev->dma;
682 drm_buf_desc_t request;
683 drm_buf_entry_t *entry;
684 drm_buf_t *buf;
685 unsigned long offset;
686 unsigned long agp_offset;
687 int count;
688 int order;
689 int size;
690 int alignment;
691 int page_order;
692 int total;
693 int byte_count;
694 int i;
695 drm_buf_t **temp_buflist;
696
697 if ( !dma ) return -EINVAL;
698
699 if ( copy_from_user( &request, (drm_buf_desc_t *)arg,
700 sizeof(request) ) )
701 return -EFAULT;
702
703 count = request.count;
704 order = DRM(order)( request.size );
705 size = 1 << order;
706
707 alignment = (request.flags & _DRM_PAGE_ALIGN)
708 ? PAGE_ALIGN(size) : size;
709 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
710 total = PAGE_SIZE << page_order;
711
712 byte_count = 0;
713 agp_offset = request.agp_start;
714
715 DRM_DEBUG( "count: %d\n", count );
716 DRM_DEBUG( "order: %d\n", order );
717 DRM_DEBUG( "size: %d\n", size );
718 DRM_DEBUG( "agp_offset: %ld\n", agp_offset );
719 DRM_DEBUG( "alignment: %d\n", alignment );
720 DRM_DEBUG( "page_order: %d\n", page_order );
721 DRM_DEBUG( "total: %d\n", total );
722
723 if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
724 if ( dev->queue_count ) return -EBUSY; /* Not while in use */
725
726 spin_lock( &dev->count_lock );
727 if ( dev->buf_use ) {
728 spin_unlock( &dev->count_lock );
729 return -EBUSY;
730 }
731 atomic_inc( &dev->buf_alloc );
732 spin_unlock( &dev->count_lock );
733
734 down( &dev->struct_sem );
735 entry = &dma->bufs[order];
736 if ( entry->buf_count ) {
737 up( &dev->struct_sem );
738 atomic_dec( &dev->buf_alloc );
739 return -ENOMEM; /* May only call once for each order */
740 }
741
742 if (count < 0 || count > 4096) {
743 up( &dev->struct_sem );
744 atomic_dec( &dev->buf_alloc );
745 return -EINVAL;
746 }
747
748 entry->buflist = DRM(alloc)( count * sizeof(*entry->buflist),
749 DRM_MEM_BUFS );
750 if ( !entry->buflist ) {
751 up( &dev->struct_sem );
752 atomic_dec( &dev->buf_alloc );
753 return -ENOMEM;
754 }
755 memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
756
757 entry->buf_size = size;
758 entry->page_order = page_order;
759
760 offset = 0;
761
762 while ( entry->buf_count < count ) {
763 buf = &entry->buflist[entry->buf_count];
764 buf->idx = dma->buf_count + entry->buf_count;
765 buf->total = alignment;
766 buf->order = order;
767 buf->used = 0;
768
769 buf->offset = (dma->byte_count + offset);
770 buf->bus_address = agp_offset + offset;
771 buf->address = (void *)(agp_offset + offset + dev->sg->handle);
772 buf->next = NULL;
773 buf->waiting = 0;
774 buf->pending = 0;
775 init_waitqueue_head( &buf->dma_wait );
776 buf->pid = 0;
777
778 buf->dev_priv_size = sizeof(DRIVER_BUF_PRIV_T);
779 buf->dev_private = DRM(alloc)( sizeof(DRIVER_BUF_PRIV_T),
780 DRM_MEM_BUFS );
781 if(!buf->dev_private) {
782 /* Set count correctly so we free the proper amount. */
783 entry->buf_count = count;
784 DRM(cleanup_buf_error)(entry);
785 up( &dev->struct_sem );
786 atomic_dec( &dev->buf_alloc );
787 return -ENOMEM;
788 }
789
790 memset( buf->dev_private, 0, buf->dev_priv_size );
791
792 # if __HAVE_DMA_HISTOGRAM
793 buf->time_queued = 0;
794 buf->time_dispatched = 0;
795 buf->time_completed = 0;
796 buf->time_freed = 0;
797 # endif
798 DRM_DEBUG( "buffer %d @ %p\n",
799 entry->buf_count, buf->address );
800
801 offset += alignment;
802 entry->buf_count++;
803 byte_count += PAGE_SIZE << page_order;
804 }
805
806 DRM_DEBUG( "byte_count: %d\n", byte_count );
807
808 temp_buflist = DRM(realloc)( dma->buflist,
809 dma->buf_count * sizeof(*dma->buflist),
810 (dma->buf_count + entry->buf_count)
811 * sizeof(*dma->buflist),
812 DRM_MEM_BUFS );
813 if(!temp_buflist) {
814 /* Free the entry because it isn't valid */
815 DRM(cleanup_buf_error)(entry);
816 up( &dev->struct_sem );
817 atomic_dec( &dev->buf_alloc );
818 return -ENOMEM;
819 }
820 dma->buflist = temp_buflist;
821
822 for ( i = 0 ; i < entry->buf_count ; i++ ) {
823 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
824 }
825
826 dma->buf_count += entry->buf_count;
827 dma->byte_count += byte_count;
828
829 DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
830 DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
831
832 #if __HAVE_DMA_FREELIST
833 DRM(freelist_create)( &entry->freelist, entry->buf_count );
834 for ( i = 0 ; i < entry->buf_count ; i++ ) {
835 DRM(freelist_put)( dev, &entry->freelist, &entry->buflist[i] );
836 }
837 #endif
838 up( &dev->struct_sem );
839
840 request.count = entry->buf_count;
841 request.size = size;
842
843 if ( copy_to_user( (drm_buf_desc_t *)arg, &request, sizeof(request) ) )
844 return -EFAULT;
845
846 dma->flags = _DRM_DMA_USE_SG;
847
848 atomic_dec( &dev->buf_alloc );
849 return 0;
850 }
851 #endif /* __HAVE_SG */
852
DRM(addbufs)853 int DRM(addbufs)( struct inode *inode, struct file *filp,
854 unsigned int cmd, unsigned long arg )
855 {
856 drm_buf_desc_t request;
857
858 if ( copy_from_user( &request, (drm_buf_desc_t *)arg,
859 sizeof(request) ) )
860 return -EFAULT;
861
862 #if __REALLY_HAVE_AGP
863 if ( request.flags & _DRM_AGP_BUFFER )
864 return DRM(addbufs_agp)( inode, filp, cmd, arg );
865 else
866 #endif
867 #if __HAVE_SG
868 if ( request.flags & _DRM_SG_BUFFER )
869 return DRM(addbufs_sg)( inode, filp, cmd, arg );
870 else
871 #endif
872 #if __HAVE_PCI_DMA
873 return DRM(addbufs_pci)( inode, filp, cmd, arg );
874 #else
875 return -EINVAL;
876 #endif
877 }
878
DRM(infobufs)879 int DRM(infobufs)( struct inode *inode, struct file *filp,
880 unsigned int cmd, unsigned long arg )
881 {
882 drm_file_t *priv = filp->private_data;
883 drm_device_t *dev = priv->dev;
884 drm_device_dma_t *dma = dev->dma;
885 drm_buf_info_t request;
886 int i;
887 int count;
888
889 if ( !dma ) return -EINVAL;
890
891 spin_lock( &dev->count_lock );
892 if ( atomic_read( &dev->buf_alloc ) ) {
893 spin_unlock( &dev->count_lock );
894 return -EBUSY;
895 }
896 ++dev->buf_use; /* Can't allocate more after this call */
897 spin_unlock( &dev->count_lock );
898
899 if ( copy_from_user( &request,
900 (drm_buf_info_t *)arg,
901 sizeof(request) ) )
902 return -EFAULT;
903
904 for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
905 if ( dma->bufs[i].buf_count ) ++count;
906 }
907
908 DRM_DEBUG( "count = %d\n", count );
909
910 if ( request.count >= count ) {
911 for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
912 if ( dma->bufs[i].buf_count ) {
913 drm_buf_desc_t *to = &request.list[count];
914 drm_buf_entry_t *from = &dma->bufs[i];
915 drm_freelist_t *list = &dma->bufs[i].freelist;
916 if ( copy_to_user( &to->count,
917 &from->buf_count,
918 sizeof(from->buf_count) ) ||
919 copy_to_user( &to->size,
920 &from->buf_size,
921 sizeof(from->buf_size) ) ||
922 copy_to_user( &to->low_mark,
923 &list->low_mark,
924 sizeof(list->low_mark) ) ||
925 copy_to_user( &to->high_mark,
926 &list->high_mark,
927 sizeof(list->high_mark) ) )
928 return -EFAULT;
929
930 DRM_DEBUG( "%d %d %d %d %d\n",
931 i,
932 dma->bufs[i].buf_count,
933 dma->bufs[i].buf_size,
934 dma->bufs[i].freelist.low_mark,
935 dma->bufs[i].freelist.high_mark );
936 ++count;
937 }
938 }
939 }
940 request.count = count;
941
942 if ( copy_to_user( (drm_buf_info_t *)arg,
943 &request,
944 sizeof(request) ) )
945 return -EFAULT;
946
947 return 0;
948 }
949
DRM(markbufs)950 int DRM(markbufs)( struct inode *inode, struct file *filp,
951 unsigned int cmd, unsigned long arg )
952 {
953 drm_file_t *priv = filp->private_data;
954 drm_device_t *dev = priv->dev;
955 drm_device_dma_t *dma = dev->dma;
956 drm_buf_desc_t request;
957 int order;
958 drm_buf_entry_t *entry;
959
960 if ( !dma ) return -EINVAL;
961
962 if ( copy_from_user( &request,
963 (drm_buf_desc_t *)arg,
964 sizeof(request) ) )
965 return -EFAULT;
966
967 DRM_DEBUG( "%d, %d, %d\n",
968 request.size, request.low_mark, request.high_mark );
969 order = DRM(order)( request.size );
970 if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
971 entry = &dma->bufs[order];
972
973 if ( request.low_mark < 0 || request.low_mark > entry->buf_count )
974 return -EINVAL;
975 if ( request.high_mark < 0 || request.high_mark > entry->buf_count )
976 return -EINVAL;
977
978 entry->freelist.low_mark = request.low_mark;
979 entry->freelist.high_mark = request.high_mark;
980
981 return 0;
982 }
983
DRM(freebufs)984 int DRM(freebufs)( struct inode *inode, struct file *filp,
985 unsigned int cmd, unsigned long arg )
986 {
987 drm_file_t *priv = filp->private_data;
988 drm_device_t *dev = priv->dev;
989 drm_device_dma_t *dma = dev->dma;
990 drm_buf_free_t request;
991 int i;
992 int idx;
993 drm_buf_t *buf;
994
995 if ( !dma ) return -EINVAL;
996
997 if ( copy_from_user( &request,
998 (drm_buf_free_t *)arg,
999 sizeof(request) ) )
1000 return -EFAULT;
1001
1002 DRM_DEBUG( "%d\n", request.count );
1003 for ( i = 0 ; i < request.count ; i++ ) {
1004 if ( copy_from_user( &idx,
1005 &request.list[i],
1006 sizeof(idx) ) )
1007 return -EFAULT;
1008 if ( idx < 0 || idx >= dma->buf_count ) {
1009 DRM_ERROR( "Index %d (of %d max)\n",
1010 idx, dma->buf_count - 1 );
1011 return -EINVAL;
1012 }
1013 buf = dma->buflist[idx];
1014 if ( buf->pid != current->pid ) {
1015 DRM_ERROR( "Process %d freeing buffer owned by %d\n",
1016 current->pid, buf->pid );
1017 return -EINVAL;
1018 }
1019 DRM(free_buffer)( dev, buf );
1020 }
1021
1022 return 0;
1023 }
1024
DRM(mapbufs)1025 int DRM(mapbufs)( struct inode *inode, struct file *filp,
1026 unsigned int cmd, unsigned long arg )
1027 {
1028 drm_file_t *priv = filp->private_data;
1029 drm_device_t *dev = priv->dev;
1030 drm_device_dma_t *dma = dev->dma;
1031 int retcode = 0;
1032 const int zero = 0;
1033 unsigned long virtual;
1034 unsigned long address;
1035 drm_buf_map_t request;
1036 int i;
1037
1038 if ( !dma ) return -EINVAL;
1039
1040 spin_lock( &dev->count_lock );
1041 if ( atomic_read( &dev->buf_alloc ) ) {
1042 spin_unlock( &dev->count_lock );
1043 return -EBUSY;
1044 }
1045 dev->buf_use++; /* Can't allocate more after this call */
1046 spin_unlock( &dev->count_lock );
1047
1048 if ( copy_from_user( &request, (drm_buf_map_t *)arg,
1049 sizeof(request) ) )
1050 return -EFAULT;
1051
1052 if ( request.count >= dma->buf_count ) {
1053 if ( (__HAVE_AGP && (dma->flags & _DRM_DMA_USE_AGP)) ||
1054 (__HAVE_SG && (dma->flags & _DRM_DMA_USE_SG)) ) {
1055 drm_map_t *map = DRIVER_AGP_BUFFERS_MAP( dev );
1056
1057 if ( !map ) {
1058 retcode = -EINVAL;
1059 goto done;
1060 }
1061
1062 down_write( ¤t->mm->mmap_sem );
1063 virtual = do_mmap( filp, 0, map->size,
1064 PROT_READ | PROT_WRITE,
1065 MAP_SHARED,
1066 (unsigned long)map->offset );
1067 up_write( ¤t->mm->mmap_sem );
1068 } else {
1069 down_write( ¤t->mm->mmap_sem );
1070 virtual = do_mmap( filp, 0, dma->byte_count,
1071 PROT_READ | PROT_WRITE,
1072 MAP_SHARED, 0 );
1073 up_write( ¤t->mm->mmap_sem );
1074 }
1075 if ( virtual > -1024UL ) {
1076 /* Real error */
1077 retcode = (signed long)virtual;
1078 goto done;
1079 }
1080 request.virtual = (void *)virtual;
1081
1082 for ( i = 0 ; i < dma->buf_count ; i++ ) {
1083 if ( copy_to_user( &request.list[i].idx,
1084 &dma->buflist[i]->idx,
1085 sizeof(request.list[0].idx) ) ) {
1086 retcode = -EFAULT;
1087 goto done;
1088 }
1089 if ( copy_to_user( &request.list[i].total,
1090 &dma->buflist[i]->total,
1091 sizeof(request.list[0].total) ) ) {
1092 retcode = -EFAULT;
1093 goto done;
1094 }
1095 if ( copy_to_user( &request.list[i].used,
1096 &zero,
1097 sizeof(zero) ) ) {
1098 retcode = -EFAULT;
1099 goto done;
1100 }
1101 address = virtual + dma->buflist[i]->offset; /* *** */
1102 if ( copy_to_user( &request.list[i].address,
1103 &address,
1104 sizeof(address) ) ) {
1105 retcode = -EFAULT;
1106 goto done;
1107 }
1108 }
1109 }
1110 done:
1111 request.count = dma->buf_count;
1112 DRM_DEBUG( "%d buffers, retcode = %d\n", request.count, retcode );
1113
1114 if ( copy_to_user( (drm_buf_map_t *)arg, &request, sizeof(request) ) )
1115 return -EFAULT;
1116
1117 return retcode;
1118 }
1119
1120 #endif /* __HAVE_DMA */
1121