1 /* radeon_bufs.c -- IOCTLs to manage buffers -*- linux-c -*-
2 *
3 * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
4 * Copyright 2000 VA Linux Systems, Inc., Fremont, California.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
16 * Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
22 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
23 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
24 * DEALINGS IN THE SOFTWARE.
25 *
26 * Authors: Kevin E. Martin <martin@valinux.com>
27 * Rickard E. (Rik) Faith <faith@valinux.com>
28 * Jeff Hartmann <jhartmann@valinux.com>
29 *
30 */
31
32 #define __NO_VERSION__
33 #include <linux/config.h>
34 #include "drmP.h"
35 #include "radeon_drv.h"
36 #include "linux/un.h"
37
38
39 #if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE)
radeon_addbufs_agp(struct inode * inode,struct file * filp,unsigned int cmd,unsigned long arg)40 int radeon_addbufs_agp(struct inode *inode, struct file *filp,
41 unsigned int cmd, unsigned long arg)
42 {
43 drm_file_t *priv = filp->private_data;
44 drm_device_t *dev = priv->dev;
45 drm_device_dma_t *dma = dev->dma;
46 drm_buf_desc_t request;
47 drm_buf_entry_t *entry;
48 drm_buf_t *buf;
49 unsigned long offset;
50 unsigned long agp_offset;
51 int count;
52 int order;
53 int size;
54 int alignment;
55 int page_order;
56 int total;
57 int byte_count;
58 int i;
59
60 if (!dma) return -EINVAL;
61
62 if (copy_from_user(&request, (drm_buf_desc_t *)arg, sizeof(request)))
63 return -EFAULT;
64
65 count = request.count;
66 order = drm_order(request.size);
67 size = 1 << order;
68
69 alignment = (request.flags & _DRM_PAGE_ALIGN) ? PAGE_ALIGN(size):size;
70 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
71 total = PAGE_SIZE << page_order;
72
73 byte_count = 0;
74 agp_offset = dev->agp->base + request.agp_start;
75
76 DRM_DEBUG("count: %d\n", count);
77 DRM_DEBUG("order: %d\n", order);
78 DRM_DEBUG("size: %d\n", size);
79 DRM_DEBUG("agp_offset: %ld\n", agp_offset);
80 DRM_DEBUG("alignment: %d\n", alignment);
81 DRM_DEBUG("page_order: %d\n", page_order);
82 DRM_DEBUG("total: %d\n", total);
83
84 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return -EINVAL;
85 if (dev->queue_count) return -EBUSY; /* Not while in use */
86
87 spin_lock(&dev->count_lock);
88 if (dev->buf_use) {
89 spin_unlock(&dev->count_lock);
90 return -EBUSY;
91 }
92 atomic_inc(&dev->buf_alloc);
93 spin_unlock(&dev->count_lock);
94
95 down(&dev->struct_sem);
96 entry = &dma->bufs[order];
97 if (entry->buf_count) {
98 up(&dev->struct_sem);
99 atomic_dec(&dev->buf_alloc);
100 return -ENOMEM; /* May only call once for each order */
101 }
102
103 /* Might be too low a limit. XFree folks need to fix this properly */
104
105 if(count < 0 || count > 4096)
106 {
107 up(&dev->struct_sem);
108 atomic_dec(&dev->buf_alloc);
109 return -EINVAL;
110 }
111
112 entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
113 DRM_MEM_BUFS);
114 if (!entry->buflist) {
115 up(&dev->struct_sem);
116 atomic_dec(&dev->buf_alloc);
117 return -ENOMEM;
118 }
119 memset(entry->buflist, 0, count * sizeof(*entry->buflist));
120
121 entry->buf_size = size;
122 entry->page_order = page_order;
123 offset = 0;
124
125 for (offset = 0;
126 entry->buf_count < count;
127 offset += alignment, ++entry->buf_count) {
128 buf = &entry->buflist[entry->buf_count];
129 buf->idx = dma->buf_count + entry->buf_count;
130 buf->total = alignment;
131 buf->order = order;
132 buf->used = 0;
133 buf->offset = (dma->byte_count + offset);
134 buf->address = (void *)(agp_offset + offset);
135 buf->next = NULL;
136 buf->waiting = 0;
137 buf->pending = 0;
138 init_waitqueue_head(&buf->dma_wait);
139 buf->pid = 0;
140
141 buf->dev_priv_size = sizeof(drm_radeon_buf_priv_t);
142 buf->dev_private = drm_alloc(sizeof(drm_radeon_buf_priv_t),
143 DRM_MEM_BUFS);
144 if (!buf->dev_private) {
145 up(&dev->struct_sem);
146 atomic_dec(&dev->buf_alloc);
147 return -ENOMEM;
148 }
149 memset(buf->dev_private, 0, buf->dev_priv_size);
150
151 #if DRM_DMA_HISTOGRAM
152 buf->time_queued = 0;
153 buf->time_dispatched = 0;
154 buf->time_completed = 0;
155 buf->time_freed = 0;
156 #endif
157
158 byte_count += PAGE_SIZE << page_order;
159
160 DRM_DEBUG("buffer %d @ %p\n",
161 entry->buf_count, buf->address);
162 }
163
164 DRM_DEBUG("byte_count: %d\n", byte_count);
165
166 dma->buflist = drm_realloc(dma->buflist,
167 dma->buf_count * sizeof(*dma->buflist),
168 (dma->buf_count + entry->buf_count)
169 * sizeof(*dma->buflist),
170 DRM_MEM_BUFS);
171 for (i = dma->buf_count; i < dma->buf_count + entry->buf_count; i++)
172 dma->buflist[i] = &entry->buflist[i - dma->buf_count];
173
174 dma->buf_count += entry->buf_count;
175 dma->byte_count += byte_count;
176
177 drm_freelist_create(&entry->freelist, entry->buf_count);
178 for (i = 0; i < entry->buf_count; i++) {
179 drm_freelist_put(dev, &entry->freelist, &entry->buflist[i]);
180 }
181
182 up(&dev->struct_sem);
183
184 request.count = entry->buf_count;
185 request.size = size;
186
187 if (copy_to_user((drm_buf_desc_t *)arg, &request, sizeof(request)))
188 return -EFAULT;
189
190 dma->flags = _DRM_DMA_USE_AGP;
191
192 atomic_dec(&dev->buf_alloc);
193 return 0;
194 }
195 #endif
196
radeon_addbufs(struct inode * inode,struct file * filp,unsigned int cmd,unsigned long arg)197 int radeon_addbufs(struct inode *inode, struct file *filp, unsigned int cmd,
198 unsigned long arg)
199 {
200 drm_file_t *priv = filp->private_data;
201 drm_device_t *dev = priv->dev;
202 drm_radeon_private_t *dev_priv = dev->dev_private;
203 drm_buf_desc_t request;
204
205 if (!dev_priv || dev_priv->is_pci) return -EINVAL;
206
207 if (copy_from_user(&request, (drm_buf_desc_t *)arg, sizeof(request)))
208 return -EFAULT;
209
210 #if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE)
211 if (request.flags & _DRM_AGP_BUFFER)
212 return radeon_addbufs_agp(inode, filp, cmd, arg);
213 else
214 #endif
215 return -EINVAL;
216 }
217
radeon_mapbufs(struct inode * inode,struct file * filp,unsigned int cmd,unsigned long arg)218 int radeon_mapbufs(struct inode *inode, struct file *filp, unsigned int cmd,
219 unsigned long arg)
220 {
221 drm_file_t *priv = filp->private_data;
222 drm_device_t *dev = priv->dev;
223 drm_radeon_private_t *dev_priv = dev->dev_private;
224 drm_device_dma_t *dma = dev->dma;
225 int retcode = 0;
226 const int zero = 0;
227 unsigned long virtual;
228 unsigned long address;
229 drm_buf_map_t request;
230 int i;
231
232 if (!dma || !dev_priv || dev_priv->is_pci) return -EINVAL;
233
234 DRM_DEBUG("\n");
235
236 spin_lock(&dev->count_lock);
237 if (atomic_read(&dev->buf_alloc)) {
238 spin_unlock(&dev->count_lock);
239 return -EBUSY;
240 }
241 ++dev->buf_use; /* Can't allocate more after this call */
242 spin_unlock(&dev->count_lock);
243
244 if (copy_from_user(&request, (drm_buf_map_t *)arg, sizeof(request)))
245 return -EFAULT;
246
247 if (request.count >= dma->buf_count) {
248 if (dma->flags & _DRM_DMA_USE_AGP) {
249 drm_map_t *map;
250
251 map = dev_priv->buffers;
252 if (!map) {
253 retcode = -EINVAL;
254 goto done;
255 }
256
257 down_write(¤t->mm->mmap_sem);
258 virtual = do_mmap(filp, 0, map->size,
259 PROT_READ|PROT_WRITE,
260 MAP_SHARED,
261 (unsigned long)map->offset);
262 up_write(¤t->mm->mmap_sem);
263 } else {
264 down_write(¤t->mm->mmap_sem);
265 virtual = do_mmap(filp, 0, dma->byte_count,
266 PROT_READ|PROT_WRITE, MAP_SHARED, 0);
267 up_write(¤t->mm->mmap_sem);
268 }
269 if (virtual > -1024UL) {
270 /* Real error */
271 retcode = (signed long)virtual;
272 goto done;
273 }
274 request.virtual = (void *)virtual;
275
276 for (i = 0; i < dma->buf_count; i++) {
277 if (copy_to_user(&request.list[i].idx,
278 &dma->buflist[i]->idx,
279 sizeof(request.list[0].idx))) {
280 retcode = -EFAULT;
281 goto done;
282 }
283 if (copy_to_user(&request.list[i].total,
284 &dma->buflist[i]->total,
285 sizeof(request.list[0].total))) {
286 retcode = -EFAULT;
287 goto done;
288 }
289 if (copy_to_user(&request.list[i].used,
290 &zero,
291 sizeof(zero))) {
292 retcode = -EFAULT;
293 goto done;
294 }
295 address = virtual + dma->buflist[i]->offset;
296 if (copy_to_user(&request.list[i].address,
297 &address,
298 sizeof(address))) {
299 retcode = -EFAULT;
300 goto done;
301 }
302 }
303 }
304 done:
305 request.count = dma->buf_count;
306 DRM_DEBUG("%d buffers, retcode = %d\n", request.count, retcode);
307
308 if (copy_to_user((drm_buf_map_t *)arg, &request, sizeof(request)))
309 return -EFAULT;
310
311 return retcode;
312 }
313