1 /* i810_bufs.c -- IOCTLs to manage buffers -*- linux-c -*-
2  * Created: Thu Jan 6 01:47:26 2000 by jhartmann@precisioninsight.com
3  *
4  * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6  * All Rights Reserved.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a
9  * copy of this software and associated documentation files (the "Software"),
10  * to deal in the Software without restriction, including without limitation
11  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12  * and/or sell copies of the Software, and to permit persons to whom the
13  * Software is furnished to do so, subject to the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the next
16  * paragraph) shall be included in all copies or substantial portions of the
17  * Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22  * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25  * DEALINGS IN THE SOFTWARE.
26  *
27  * Authors: Rickard E. (Rik) Faith <faith@valinux.com>
28  *	    Jeff Hartmann <jhartmann@valinux.com>
29  *
30  */
31 
32 #define __NO_VERSION__
33 #include "drmP.h"
34 #include "i810_drv.h"
35 #include "linux/un.h"
36 
i810_addbufs_agp(struct inode * inode,struct file * filp,unsigned int cmd,unsigned long arg)37 int i810_addbufs_agp(struct inode *inode, struct file *filp, unsigned int cmd,
38 		    unsigned long arg)
39 {
40 	drm_file_t *priv = filp->private_data;
41 	drm_device_t *dev = priv->dev;
42 	drm_device_dma_t *dma = dev->dma;
43 	drm_buf_desc_t request;
44 	drm_buf_entry_t *entry;
45 	drm_buf_t *buf;
46 	unsigned long offset;
47 	unsigned long agp_offset;
48 	int count;
49 	int order;
50 	int size;
51 	int alignment;
52 	int page_order;
53 	int total;
54 	int byte_count;
55 	int i;
56 
57 	if (!dma) return -EINVAL;
58 
59 	if (copy_from_user(&request,
60 			   (drm_buf_desc_t *)arg,
61 			   sizeof(request)))
62 		return -EFAULT;
63 
64 	count = request.count;
65 	order = drm_order(request.size);
66 	size	= 1 << order;
67 	agp_offset = request.agp_start;
68 	alignment  = (request.flags & _DRM_PAGE_ALIGN) ? PAGE_ALIGN(size) :size;
69 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
70 	total = PAGE_SIZE << page_order;
71 	byte_count = 0;
72 
73 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return -EINVAL;
74 	if (dev->queue_count) return -EBUSY; /* Not while in use */
75 	spin_lock(&dev->count_lock);
76 	if (dev->buf_use) {
77 		spin_unlock(&dev->count_lock);
78 		return -EBUSY;
79 	}
80 	atomic_inc(&dev->buf_alloc);
81 	spin_unlock(&dev->count_lock);
82 
83 	down(&dev->struct_sem);
84 	entry = &dma->bufs[order];
85 	if (entry->buf_count) {
86 		up(&dev->struct_sem);
87 		atomic_dec(&dev->buf_alloc);
88 		return -ENOMEM; /* May only call once for each order */
89 	}
90 
91 	if(count < 0 || count > 4096)
92 	{
93 		up(&dev->struct_sem);
94 		atomic_dec(&dev->buf_alloc);
95 		return -EINVAL;
96 	}
97 
98 	entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
99 				   DRM_MEM_BUFS);
100 	if (!entry->buflist) {
101 		up(&dev->struct_sem);
102 		atomic_dec(&dev->buf_alloc);
103 		return -ENOMEM;
104 	}
105 	memset(entry->buflist, 0, count * sizeof(*entry->buflist));
106 
107 	entry->buf_size   = size;
108 	entry->page_order = page_order;
109 	offset = 0;
110 
111 	while(entry->buf_count < count) {
112 		buf = &entry->buflist[entry->buf_count];
113 		buf->idx = dma->buf_count + entry->buf_count;
114 		buf->total = alignment;
115 		buf->order = order;
116 		buf->used = 0;
117 		buf->offset = offset;
118 		buf->bus_address = dev->agp->base + agp_offset + offset;
119 		buf->address = (void *)(agp_offset + offset + dev->agp->base);
120 		buf->next = NULL;
121 		buf->waiting = 0;
122 		buf->pending = 0;
123 		init_waitqueue_head(&buf->dma_wait);
124 		buf->pid = 0;
125 
126 		buf->dev_private = drm_alloc(sizeof(drm_i810_buf_priv_t),
127 					     DRM_MEM_BUFS);
128 		buf->dev_priv_size = sizeof(drm_i810_buf_priv_t);
129 	   	memset(buf->dev_private, 0, sizeof(drm_i810_buf_priv_t));
130 
131 #if DRM_DMA_HISTOGRAM
132 		buf->time_queued = 0;
133 		buf->time_dispatched = 0;
134 		buf->time_completed = 0;
135 		buf->time_freed = 0;
136 #endif
137 		offset = offset + alignment;
138 		entry->buf_count++;
139 		byte_count += PAGE_SIZE << page_order;
140 
141 		DRM_DEBUG("buffer %d @ %p\n",
142 			  entry->buf_count, buf->address);
143 	}
144 
145 	dma->buflist = drm_realloc(dma->buflist,
146 				   dma->buf_count * sizeof(*dma->buflist),
147 				   (dma->buf_count + entry->buf_count)
148 				   * sizeof(*dma->buflist),
149 				   DRM_MEM_BUFS);
150 	for (i = dma->buf_count; i < dma->buf_count + entry->buf_count; i++)
151 		dma->buflist[i] = &entry->buflist[i - dma->buf_count];
152 
153 	dma->buf_count  += entry->buf_count;
154 	dma->byte_count += byte_count;
155 	drm_freelist_create(&entry->freelist, entry->buf_count);
156 	for (i = 0; i < entry->buf_count; i++) {
157 		drm_freelist_put(dev, &entry->freelist, &entry->buflist[i]);
158 	}
159 
160 	up(&dev->struct_sem);
161 
162 	request.count = entry->buf_count;
163 	request.size  = size;
164 
165 	if (copy_to_user((drm_buf_desc_t *)arg,
166 			 &request,
167 			 sizeof(request)))
168 		return -EFAULT;
169 
170 	atomic_dec(&dev->buf_alloc);
171 	dma->flags = _DRM_DMA_USE_AGP;
172 	return 0;
173 }
174 
i810_addbufs(struct inode * inode,struct file * filp,unsigned int cmd,unsigned long arg)175 int i810_addbufs(struct inode *inode, struct file *filp, unsigned int cmd,
176 		unsigned long arg)
177 {
178 	drm_buf_desc_t	 request;
179 
180 	if (copy_from_user(&request,
181 			   (drm_buf_desc_t *)arg,
182 			   sizeof(request)))
183 		return -EFAULT;
184 
185 	if(request.flags & _DRM_AGP_BUFFER)
186 		return i810_addbufs_agp(inode, filp, cmd, arg);
187 	else
188 		return -EINVAL;
189 }
190 
i810_infobufs(struct inode * inode,struct file * filp,unsigned int cmd,unsigned long arg)191 int i810_infobufs(struct inode *inode, struct file *filp, unsigned int cmd,
192 		 unsigned long arg)
193 {
194 	drm_file_t	 *priv	 = filp->private_data;
195 	drm_device_t	 *dev	 = priv->dev;
196 	drm_device_dma_t *dma	 = dev->dma;
197 	drm_buf_info_t	 request;
198 	int		 i;
199 	int		 count;
200 
201 	if (!dma) return -EINVAL;
202 
203 	spin_lock(&dev->count_lock);
204 	if (atomic_read(&dev->buf_alloc)) {
205 		spin_unlock(&dev->count_lock);
206 		return -EBUSY;
207 	}
208 	++dev->buf_use;		/* Can't allocate more after this call */
209 	spin_unlock(&dev->count_lock);
210 
211 	if (copy_from_user(&request,
212 			   (drm_buf_info_t *)arg,
213 			   sizeof(request)))
214 		return -EFAULT;
215 
216 	for (i = 0, count = 0; i < DRM_MAX_ORDER+1; i++) {
217 		if (dma->bufs[i].buf_count) ++count;
218 	}
219 
220 	DRM_DEBUG("count = %d\n", count);
221 
222 	if (request.count >= count) {
223 		for (i = 0, count = 0; i < DRM_MAX_ORDER+1; i++) {
224 			if (dma->bufs[i].buf_count) {
225 				if (copy_to_user(&request.list[count].count,
226 						 &dma->bufs[i].buf_count,
227 						 sizeof(dma->bufs[0]
228 							.buf_count)) ||
229 				    copy_to_user(&request.list[count].size,
230 						 &dma->bufs[i].buf_size,
231 						 sizeof(dma->bufs[0].buf_size)) ||
232 				    copy_to_user(&request.list[count].low_mark,
233 						 &dma->bufs[i]
234 						 .freelist.low_mark,
235 						 sizeof(dma->bufs[0]
236 							.freelist.low_mark)) ||
237 				    copy_to_user(&request.list[count]
238 						 .high_mark,
239 						 &dma->bufs[i]
240 						 .freelist.high_mark,
241 						 sizeof(dma->bufs[0]
242 							.freelist.high_mark)))
243 					return -EFAULT;
244 
245 				DRM_DEBUG("%d %d %d %d %d\n",
246 					  i,
247 					  dma->bufs[i].buf_count,
248 					  dma->bufs[i].buf_size,
249 					  dma->bufs[i].freelist.low_mark,
250 					  dma->bufs[i].freelist.high_mark);
251 				++count;
252 			}
253 		}
254 	}
255 	request.count = count;
256 
257 	if (copy_to_user((drm_buf_info_t *)arg,
258 			 &request,
259 			 sizeof(request)))
260 		return -EFAULT;
261 
262 	return 0;
263 }
264 
i810_markbufs(struct inode * inode,struct file * filp,unsigned int cmd,unsigned long arg)265 int i810_markbufs(struct inode *inode, struct file *filp, unsigned int cmd,
266 		 unsigned long arg)
267 {
268 	drm_file_t	 *priv	 = filp->private_data;
269 	drm_device_t	 *dev	 = priv->dev;
270 	drm_device_dma_t *dma	 = dev->dma;
271 	drm_buf_desc_t	 request;
272 	int		 order;
273 	drm_buf_entry_t	 *entry;
274 
275 	if (!dma) return -EINVAL;
276 
277 	if (copy_from_user(&request,
278 			   (drm_buf_desc_t *)arg,
279 			   sizeof(request)))
280 		return -EFAULT;
281 
282 	DRM_DEBUG("%d, %d, %d\n",
283 		  request.size, request.low_mark, request.high_mark);
284 	order = drm_order(request.size);
285 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return -EINVAL;
286 	entry = &dma->bufs[order];
287 
288 	if (request.low_mark < 0 || request.low_mark > entry->buf_count)
289 		return -EINVAL;
290 	if (request.high_mark < 0 || request.high_mark > entry->buf_count)
291 		return -EINVAL;
292 
293 	entry->freelist.low_mark  = request.low_mark;
294 	entry->freelist.high_mark = request.high_mark;
295 
296 	return 0;
297 }
298 
i810_freebufs(struct inode * inode,struct file * filp,unsigned int cmd,unsigned long arg)299 int i810_freebufs(struct inode *inode, struct file *filp, unsigned int cmd,
300 		 unsigned long arg)
301 {
302 	drm_file_t	 *priv	 = filp->private_data;
303 	drm_device_t	 *dev	 = priv->dev;
304 	drm_device_dma_t *dma	 = dev->dma;
305 	drm_buf_free_t	 request;
306 	int		 i;
307 	int		 idx;
308 	drm_buf_t	 *buf;
309 
310 	if (!dma) return -EINVAL;
311 
312 	if (copy_from_user(&request,
313 			   (drm_buf_free_t *)arg,
314 			   sizeof(request)))
315 		return -EFAULT;
316 
317 	DRM_DEBUG("%d\n", request.count);
318 	for (i = 0; i < request.count; i++) {
319 		if (copy_from_user(&idx,
320 				   &request.list[i],
321 				   sizeof(idx)))
322 			return -EFAULT;
323 		if (idx < 0 || idx >= dma->buf_count) {
324 			DRM_ERROR("Index %d (of %d max)\n",
325 				  idx, dma->buf_count - 1);
326 			return -EINVAL;
327 		}
328 		buf = dma->buflist[idx];
329 		if (buf->pid != current->pid) {
330 			DRM_ERROR("Process %d freeing buffer owned by %d\n",
331 				  current->pid, buf->pid);
332 			return -EINVAL;
333 		}
334 		drm_free_buffer(dev, buf);
335 	}
336 
337 	return 0;
338 }
339 
340