1 /* dma.c -- DMA IOCTL and function support -*- linux-c -*-
2  * Created: Fri Mar 19 14:30:16 1999 by faith@precisioninsight.com
3  *
4  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
5  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6  * All Rights Reserved.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a
9  * copy of this software and associated documentation files (the "Software"),
10  * to deal in the Software without restriction, including without limitation
11  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12  * and/or sell copies of the Software, and to permit persons to whom the
13  * Software is furnished to do so, subject to the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the next
16  * paragraph) shall be included in all copies or substantial portions of the
17  * Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22  * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25  * DEALINGS IN THE SOFTWARE.
26  *
27  * Authors:
28  *    Rickard E. (Rik) Faith <faith@valinuxa.com>
29  *
30  */
31 
32 #define __NO_VERSION__
33 #include "drmP.h"
34 
35 #include <linux/interrupt.h>	/* For task queue support */
36 
drm_dma_setup(drm_device_t * dev)37 void drm_dma_setup(drm_device_t *dev)
38 {
39 	int i;
40 
41 	if (!(dev->dma = drm_alloc(sizeof(*dev->dma), DRM_MEM_DRIVER))) {
42                 printk(KERN_ERR "drm_dma_setup: can't drm_alloc dev->dma");
43                 return;
44         }
45 	memset(dev->dma, 0, sizeof(*dev->dma));
46 	for (i = 0; i <= DRM_MAX_ORDER; i++)
47 		memset(&dev->dma->bufs[i], 0, sizeof(dev->dma->bufs[0]));
48 }
49 
drm_dma_takedown(drm_device_t * dev)50 void drm_dma_takedown(drm_device_t *dev)
51 {
52 	drm_device_dma_t  *dma = dev->dma;
53 	int		  i, j;
54 
55 	if (!dma) return;
56 
57 				/* Clear dma buffers */
58 	for (i = 0; i <= DRM_MAX_ORDER; i++) {
59 		if (dma->bufs[i].seg_count) {
60 			DRM_DEBUG("order %d: buf_count = %d,"
61 				  " seg_count = %d\n",
62 				  i,
63 				  dma->bufs[i].buf_count,
64 				  dma->bufs[i].seg_count);
65 			for (j = 0; j < dma->bufs[i].seg_count; j++) {
66 				drm_free_pages(dma->bufs[i].seglist[j],
67 					       dma->bufs[i].page_order,
68 					       DRM_MEM_DMA);
69 			}
70 			drm_free(dma->bufs[i].seglist,
71 				 dma->bufs[i].seg_count
72 				 * sizeof(*dma->bufs[0].seglist),
73 				 DRM_MEM_SEGS);
74 		}
75 	   	if(dma->bufs[i].buf_count) {
76 		   	for(j = 0; j < dma->bufs[i].buf_count; j++) {
77 			   if(dma->bufs[i].buflist[j].dev_private) {
78 			      drm_free(dma->bufs[i].buflist[j].dev_private,
79 				       dma->bufs[i].buflist[j].dev_priv_size,
80 				       DRM_MEM_BUFS);
81 			   }
82 			}
83 		   	drm_free(dma->bufs[i].buflist,
84 				 dma->bufs[i].buf_count *
85 				 sizeof(*dma->bufs[0].buflist),
86 				 DRM_MEM_BUFS);
87 		   	drm_freelist_destroy(&dma->bufs[i].freelist);
88 		}
89 	}
90 
91 	if (dma->buflist) {
92 		drm_free(dma->buflist,
93 			 dma->buf_count * sizeof(*dma->buflist),
94 			 DRM_MEM_BUFS);
95 	}
96 
97 	if (dma->pagelist) {
98 		drm_free(dma->pagelist,
99 			 dma->page_count * sizeof(*dma->pagelist),
100 			 DRM_MEM_PAGES);
101 	}
102 	drm_free(dev->dma, sizeof(*dev->dma), DRM_MEM_DRIVER);
103 	dev->dma = NULL;
104 }
105 
106 #if DRM_DMA_HISTOGRAM
107 /* This is slow, but is useful for debugging. */
drm_histogram_slot(unsigned long count)108 int drm_histogram_slot(unsigned long count)
109 {
110 	int value = DRM_DMA_HISTOGRAM_INITIAL;
111 	int slot;
112 
113 	for (slot = 0;
114 	     slot < DRM_DMA_HISTOGRAM_SLOTS;
115 	     ++slot, value = DRM_DMA_HISTOGRAM_NEXT(value)) {
116 		if (count < value) return slot;
117 	}
118 	return DRM_DMA_HISTOGRAM_SLOTS - 1;
119 }
120 
drm_histogram_compute(drm_device_t * dev,drm_buf_t * buf)121 void drm_histogram_compute(drm_device_t *dev, drm_buf_t *buf)
122 {
123 	cycles_t queued_to_dispatched;
124 	cycles_t dispatched_to_completed;
125 	cycles_t completed_to_freed;
126 	int	 q2d, d2c, c2f, q2c, q2f;
127 
128 	if (buf->time_queued) {
129 		queued_to_dispatched	= (buf->time_dispatched
130 					   - buf->time_queued);
131 		dispatched_to_completed = (buf->time_completed
132 					   - buf->time_dispatched);
133 		completed_to_freed	= (buf->time_freed
134 					   - buf->time_completed);
135 
136 		q2d = drm_histogram_slot(queued_to_dispatched);
137 		d2c = drm_histogram_slot(dispatched_to_completed);
138 		c2f = drm_histogram_slot(completed_to_freed);
139 
140 		q2c = drm_histogram_slot(queued_to_dispatched
141 					 + dispatched_to_completed);
142 		q2f = drm_histogram_slot(queued_to_dispatched
143 					 + dispatched_to_completed
144 					 + completed_to_freed);
145 
146 		atomic_inc(&dev->histo.total);
147 		atomic_inc(&dev->histo.queued_to_dispatched[q2d]);
148 		atomic_inc(&dev->histo.dispatched_to_completed[d2c]);
149 		atomic_inc(&dev->histo.completed_to_freed[c2f]);
150 
151 		atomic_inc(&dev->histo.queued_to_completed[q2c]);
152 		atomic_inc(&dev->histo.queued_to_freed[q2f]);
153 
154 	}
155 	buf->time_queued     = 0;
156 	buf->time_dispatched = 0;
157 	buf->time_completed  = 0;
158 	buf->time_freed	     = 0;
159 }
160 #endif
161 
drm_free_buffer(drm_device_t * dev,drm_buf_t * buf)162 void drm_free_buffer(drm_device_t *dev, drm_buf_t *buf)
163 {
164 	drm_device_dma_t *dma = dev->dma;
165 
166 	if (!buf) return;
167 
168 	buf->waiting  = 0;
169 	buf->pending  = 0;
170 	buf->pid      = 0;
171 	buf->used     = 0;
172 #if DRM_DMA_HISTOGRAM
173 	buf->time_completed = get_cycles();
174 #endif
175 	if (waitqueue_active(&buf->dma_wait)) {
176 		wake_up_interruptible(&buf->dma_wait);
177 	} else {
178 				/* If processes are waiting, the last one
179 				   to wake will put the buffer on the free
180 				   list.  If no processes are waiting, we
181 				   put the buffer on the freelist here. */
182 		drm_freelist_put(dev, &dma->bufs[buf->order].freelist, buf);
183 	}
184 }
185 
drm_reclaim_buffers(drm_device_t * dev,pid_t pid)186 void drm_reclaim_buffers(drm_device_t *dev, pid_t pid)
187 {
188 	drm_device_dma_t *dma = dev->dma;
189 	int		 i;
190 
191 	if (!dma) return;
192 	for (i = 0; i < dma->buf_count; i++) {
193 		if (dma->buflist[i]->pid == pid) {
194 			switch (dma->buflist[i]->list) {
195 			case DRM_LIST_NONE:
196 				drm_free_buffer(dev, dma->buflist[i]);
197 				break;
198 			case DRM_LIST_WAIT:
199 				dma->buflist[i]->list = DRM_LIST_RECLAIM;
200 				break;
201 			default:
202 				/* Buffer already on hardware. */
203 				break;
204 			}
205 		}
206 	}
207 }
208 
drm_context_switch(drm_device_t * dev,int old,int new)209 int drm_context_switch(drm_device_t *dev, int old, int new)
210 {
211 	char	    buf[64];
212 	drm_queue_t *q;
213 
214 	atomic_inc(&dev->total_ctx);
215 
216 	if (test_and_set_bit(0, &dev->context_flag)) {
217 		DRM_ERROR("Reentering -- FIXME\n");
218 		return -EBUSY;
219 	}
220 
221 #if DRM_DMA_HISTOGRAM
222 	dev->ctx_start = get_cycles();
223 #endif
224 
225 	DRM_DEBUG("Context switch from %d to %d\n", old, new);
226 
227 	if (new >= dev->queue_count) {
228 		clear_bit(0, &dev->context_flag);
229 		return -EINVAL;
230 	}
231 
232 	if (new == dev->last_context) {
233 		clear_bit(0, &dev->context_flag);
234 		return 0;
235 	}
236 
237 	q = dev->queuelist[new];
238 	atomic_inc(&q->use_count);
239 	if (atomic_read(&q->use_count) == 1) {
240 		atomic_dec(&q->use_count);
241 		clear_bit(0, &dev->context_flag);
242 		return -EINVAL;
243 	}
244 
245 	if (drm_flags & DRM_FLAG_NOCTX) {
246 		drm_context_switch_complete(dev, new);
247 	} else {
248 		sprintf(buf, "C %d %d\n", old, new);
249 		drm_write_string(dev, buf);
250 	}
251 
252 	atomic_dec(&q->use_count);
253 
254 	return 0;
255 }
256 
drm_context_switch_complete(drm_device_t * dev,int new)257 int drm_context_switch_complete(drm_device_t *dev, int new)
258 {
259 	drm_device_dma_t *dma = dev->dma;
260 
261 	dev->last_context = new;  /* PRE/POST: This is the _only_ writer. */
262 	dev->last_switch  = jiffies;
263 
264 	if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
265 		DRM_ERROR("Lock isn't held after context switch\n");
266 	}
267 
268 	if (!dma || !(dma->next_buffer && dma->next_buffer->while_locked)) {
269 		if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
270 				  DRM_KERNEL_CONTEXT)) {
271 			DRM_ERROR("Cannot free lock\n");
272 		}
273 	}
274 
275 #if DRM_DMA_HISTOGRAM
276 	atomic_inc(&dev->histo.ctx[drm_histogram_slot(get_cycles()
277 						      - dev->ctx_start)]);
278 
279 #endif
280 	clear_bit(0, &dev->context_flag);
281 	wake_up_interruptible(&dev->context_wait);
282 
283 	return 0;
284 }
285 
drm_clear_next_buffer(drm_device_t * dev)286 void drm_clear_next_buffer(drm_device_t *dev)
287 {
288 	drm_device_dma_t *dma = dev->dma;
289 
290 	dma->next_buffer = NULL;
291 	if (dma->next_queue && !DRM_BUFCOUNT(&dma->next_queue->waitlist)) {
292 		wake_up_interruptible(&dma->next_queue->flush_queue);
293 	}
294 	dma->next_queue	 = NULL;
295 }
296 
297 
drm_select_queue(drm_device_t * dev,void (* wrapper)(unsigned long))298 int drm_select_queue(drm_device_t *dev, void (*wrapper)(unsigned long))
299 {
300 	int	   i;
301 	int	   candidate = -1;
302 	int	   j	     = jiffies;
303 
304 	if (!dev) {
305 		DRM_ERROR("No device\n");
306 		return -1;
307 	}
308 	if (!dev->queuelist || !dev->queuelist[DRM_KERNEL_CONTEXT]) {
309 				/* This only happens between the time the
310 				   interrupt is initialized and the time
311 				   the queues are initialized. */
312 		return -1;
313 	}
314 
315 				/* Doing "while locked" DMA? */
316 	if (DRM_WAITCOUNT(dev, DRM_KERNEL_CONTEXT)) {
317 		return DRM_KERNEL_CONTEXT;
318 	}
319 
320 				/* If there are buffers on the last_context
321 				   queue, and we have not been executing
322 				   this context very long, continue to
323 				   execute this context. */
324 	if (dev->last_switch <= j
325 	    && dev->last_switch + DRM_TIME_SLICE > j
326 	    && DRM_WAITCOUNT(dev, dev->last_context)) {
327 		return dev->last_context;
328 	}
329 
330 				/* Otherwise, find a candidate */
331 	for (i = dev->last_checked + 1; i < dev->queue_count; i++) {
332 		if (DRM_WAITCOUNT(dev, i)) {
333 			candidate = dev->last_checked = i;
334 			break;
335 		}
336 	}
337 
338 	if (candidate < 0) {
339 		for (i = 0; i < dev->queue_count; i++) {
340 			if (DRM_WAITCOUNT(dev, i)) {
341 				candidate = dev->last_checked = i;
342 				break;
343 			}
344 		}
345 	}
346 
347 	if (wrapper
348 	    && candidate >= 0
349 	    && candidate != dev->last_context
350 	    && dev->last_switch <= j
351 	    && dev->last_switch + DRM_TIME_SLICE > j) {
352 		if (dev->timer.expires != dev->last_switch + DRM_TIME_SLICE) {
353 			del_timer(&dev->timer);
354 			dev->timer.function = wrapper;
355 			dev->timer.data	    = (unsigned long)dev;
356 			dev->timer.expires  = dev->last_switch+DRM_TIME_SLICE;
357 			add_timer(&dev->timer);
358 		}
359 		return -1;
360 	}
361 
362 	return candidate;
363 }
364 
365 
drm_dma_enqueue(drm_device_t * dev,drm_dma_t * d)366 int drm_dma_enqueue(drm_device_t *dev, drm_dma_t *d)
367 {
368 	int		  i;
369 	drm_queue_t	  *q;
370 	drm_buf_t	  *buf;
371 	int		  idx;
372 	int		  while_locked = 0;
373 	drm_device_dma_t  *dma = dev->dma;
374 	DECLARE_WAITQUEUE(entry, current);
375 
376 	DRM_DEBUG("%d\n", d->send_count);
377 
378 	if (d->flags & _DRM_DMA_WHILE_LOCKED) {
379 		int context = dev->lock.hw_lock->lock;
380 
381 		if (!_DRM_LOCK_IS_HELD(context)) {
382 			DRM_ERROR("No lock held during \"while locked\""
383 				  " request\n");
384 			return -EINVAL;
385 		}
386 		if (d->context != _DRM_LOCKING_CONTEXT(context)
387 		    && _DRM_LOCKING_CONTEXT(context) != DRM_KERNEL_CONTEXT) {
388 			DRM_ERROR("Lock held by %d while %d makes"
389 				  " \"while locked\" request\n",
390 				  _DRM_LOCKING_CONTEXT(context),
391 				  d->context);
392 			return -EINVAL;
393 		}
394 		q = dev->queuelist[DRM_KERNEL_CONTEXT];
395 		while_locked = 1;
396 	} else {
397 		q = dev->queuelist[d->context];
398 	}
399 
400 
401 	atomic_inc(&q->use_count);
402 	if (atomic_read(&q->block_write)) {
403 		add_wait_queue(&q->write_queue, &entry);
404 		atomic_inc(&q->block_count);
405 		for (;;) {
406 			current->state = TASK_INTERRUPTIBLE;
407 			if (!atomic_read(&q->block_write)) break;
408 			schedule();
409 			if (signal_pending(current)) {
410 				atomic_dec(&q->use_count);
411 				remove_wait_queue(&q->write_queue, &entry);
412 				return -EINTR;
413 			}
414 		}
415 		atomic_dec(&q->block_count);
416 		current->state = TASK_RUNNING;
417 		remove_wait_queue(&q->write_queue, &entry);
418 	}
419 
420 	for (i = 0; i < d->send_count; i++) {
421 		idx = d->send_indices[i];
422 		if (idx < 0 || idx >= dma->buf_count) {
423 			atomic_dec(&q->use_count);
424 			DRM_ERROR("Index %d (of %d max)\n",
425 				  d->send_indices[i], dma->buf_count - 1);
426 			return -EINVAL;
427 		}
428 		buf = dma->buflist[ idx ];
429 		if (buf->pid != current->pid) {
430 			atomic_dec(&q->use_count);
431 			DRM_ERROR("Process %d using buffer owned by %d\n",
432 				  current->pid, buf->pid);
433 			return -EINVAL;
434 		}
435 		if (buf->list != DRM_LIST_NONE) {
436 			atomic_dec(&q->use_count);
437 			DRM_ERROR("Process %d using buffer %d on list %d\n",
438 				  current->pid, buf->idx, buf->list);
439 		}
440 		buf->used	  = d->send_sizes[i];
441 		buf->while_locked = while_locked;
442 		buf->context	  = d->context;
443 		if (!buf->used) {
444 			DRM_ERROR("Queueing 0 length buffer\n");
445 		}
446 		if (buf->pending) {
447 			atomic_dec(&q->use_count);
448 			DRM_ERROR("Queueing pending buffer:"
449 				  " buffer %d, offset %d\n",
450 				  d->send_indices[i], i);
451 			return -EINVAL;
452 		}
453 		if (buf->waiting) {
454 			atomic_dec(&q->use_count);
455 			DRM_ERROR("Queueing waiting buffer:"
456 				  " buffer %d, offset %d\n",
457 				  d->send_indices[i], i);
458 			return -EINVAL;
459 		}
460 		buf->waiting = 1;
461 		if (atomic_read(&q->use_count) == 1
462 		    || atomic_read(&q->finalization)) {
463 			drm_free_buffer(dev, buf);
464 		} else {
465 			drm_waitlist_put(&q->waitlist, buf);
466 			atomic_inc(&q->total_queued);
467 		}
468 	}
469 	atomic_dec(&q->use_count);
470 
471 	return 0;
472 }
473 
drm_dma_get_buffers_of_order(drm_device_t * dev,drm_dma_t * d,int order)474 static int drm_dma_get_buffers_of_order(drm_device_t *dev, drm_dma_t *d,
475 					int order)
476 {
477 	int		  i;
478 	drm_buf_t	  *buf;
479 	drm_device_dma_t  *dma = dev->dma;
480 
481 	for (i = d->granted_count; i < d->request_count; i++) {
482 		buf = drm_freelist_get(&dma->bufs[order].freelist,
483 				       d->flags & _DRM_DMA_WAIT);
484 		if (!buf) break;
485 		if (buf->pending || buf->waiting) {
486 			DRM_ERROR("Free buffer %d in use by %d (w%d, p%d)\n",
487 				  buf->idx,
488 				  buf->pid,
489 				  buf->waiting,
490 				  buf->pending);
491 		}
492 		buf->pid     = current->pid;
493 		if (copy_to_user(&d->request_indices[i],
494 				 &buf->idx,
495 				 sizeof(buf->idx)))
496 			return -EFAULT;
497 
498 		if (copy_to_user(&d->request_sizes[i],
499 				 &buf->total,
500 				 sizeof(buf->total)))
501 			return -EFAULT;
502 
503 		++d->granted_count;
504 	}
505 	return 0;
506 }
507 
508 
drm_dma_get_buffers(drm_device_t * dev,drm_dma_t * dma)509 int drm_dma_get_buffers(drm_device_t *dev, drm_dma_t *dma)
510 {
511 	int		  order;
512 	int		  retcode = 0;
513 	int		  tmp_order;
514 
515 	order = drm_order(dma->request_size);
516 
517 	dma->granted_count = 0;
518 	retcode		   = drm_dma_get_buffers_of_order(dev, dma, order);
519 
520 	if (dma->granted_count < dma->request_count
521 	    && (dma->flags & _DRM_DMA_SMALLER_OK)) {
522 		for (tmp_order = order - 1;
523 		     !retcode
524 			     && dma->granted_count < dma->request_count
525 			     && tmp_order >= DRM_MIN_ORDER;
526 		     --tmp_order) {
527 
528 			retcode = drm_dma_get_buffers_of_order(dev, dma,
529 							       tmp_order);
530 		}
531 	}
532 
533 	if (dma->granted_count < dma->request_count
534 	    && (dma->flags & _DRM_DMA_LARGER_OK)) {
535 		for (tmp_order = order + 1;
536 		     !retcode
537 			     && dma->granted_count < dma->request_count
538 			     && tmp_order <= DRM_MAX_ORDER;
539 		     ++tmp_order) {
540 
541 			retcode = drm_dma_get_buffers_of_order(dev, dma,
542 							       tmp_order);
543 		}
544 	}
545 	return 0;
546 }
547