1 /**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29 #include "drmP.h"
30 #include "vmwgfx_drv.h"
31
32 #include "ttm/ttm_placement.h"
33
34 #include "svga_overlay.h"
35 #include "svga_escape.h"
36
37 #define VMW_MAX_NUM_STREAMS 1
38
39 struct vmw_stream {
40 struct vmw_dma_buffer *buf;
41 bool claimed;
42 bool paused;
43 struct drm_vmw_control_stream_arg saved;
44 };
45
46 /**
47 * Overlay control
48 */
49 struct vmw_overlay {
50 /*
51 * Each stream is a single overlay. In Xv these are called ports.
52 */
53 struct mutex mutex;
54 struct vmw_stream stream[VMW_MAX_NUM_STREAMS];
55 };
56
vmw_overlay(struct drm_device * dev)57 static inline struct vmw_overlay *vmw_overlay(struct drm_device *dev)
58 {
59 struct vmw_private *dev_priv = vmw_priv(dev);
60 return dev_priv ? dev_priv->overlay_priv : NULL;
61 }
62
63 struct vmw_escape_header {
64 uint32_t cmd;
65 SVGAFifoCmdEscape body;
66 };
67
68 struct vmw_escape_video_flush {
69 struct vmw_escape_header escape;
70 SVGAEscapeVideoFlush flush;
71 };
72
fill_escape(struct vmw_escape_header * header,uint32_t size)73 static inline void fill_escape(struct vmw_escape_header *header,
74 uint32_t size)
75 {
76 header->cmd = SVGA_CMD_ESCAPE;
77 header->body.nsid = SVGA_ESCAPE_NSID_VMWARE;
78 header->body.size = size;
79 }
80
fill_flush(struct vmw_escape_video_flush * cmd,uint32_t stream_id)81 static inline void fill_flush(struct vmw_escape_video_flush *cmd,
82 uint32_t stream_id)
83 {
84 fill_escape(&cmd->escape, sizeof(cmd->flush));
85 cmd->flush.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_FLUSH;
86 cmd->flush.streamId = stream_id;
87 }
88
89 /**
90 * Pin or unpin a buffer in vram.
91 *
92 * @dev_priv: Driver private.
93 * @buf: DMA buffer to pin or unpin.
94 * @pin: Pin buffer in vram if true.
95 * @interruptible: Use interruptible wait.
96 *
97 * Takes the current masters ttm lock in read.
98 *
99 * Returns
100 * -ERESTARTSYS if interrupted by a signal.
101 */
vmw_dmabuf_pin_in_vram(struct vmw_private * dev_priv,struct vmw_dma_buffer * buf,bool pin,bool interruptible)102 static int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
103 struct vmw_dma_buffer *buf,
104 bool pin, bool interruptible)
105 {
106 struct ttm_buffer_object *bo = &buf->base;
107 struct ttm_placement *overlay_placement = &vmw_vram_placement;
108 int ret;
109
110 ret = ttm_read_lock(&dev_priv->active_master->lock, interruptible);
111 if (unlikely(ret != 0))
112 return ret;
113
114 ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
115 if (unlikely(ret != 0))
116 goto err;
117
118 if (pin)
119 overlay_placement = &vmw_vram_ne_placement;
120
121 ret = ttm_bo_validate(bo, overlay_placement, interruptible, false, false);
122
123 ttm_bo_unreserve(bo);
124
125 err:
126 ttm_read_unlock(&dev_priv->active_master->lock);
127
128 return ret;
129 }
130
131 /**
132 * Send put command to hw.
133 *
134 * Returns
135 * -ERESTARTSYS if interrupted by a signal.
136 */
vmw_overlay_send_put(struct vmw_private * dev_priv,struct vmw_dma_buffer * buf,struct drm_vmw_control_stream_arg * arg,bool interruptible)137 static int vmw_overlay_send_put(struct vmw_private *dev_priv,
138 struct vmw_dma_buffer *buf,
139 struct drm_vmw_control_stream_arg *arg,
140 bool interruptible)
141 {
142 struct {
143 struct vmw_escape_header escape;
144 struct {
145 struct {
146 uint32_t cmdType;
147 uint32_t streamId;
148 } header;
149 struct {
150 uint32_t registerId;
151 uint32_t value;
152 } items[SVGA_VIDEO_PITCH_3 + 1];
153 } body;
154 struct vmw_escape_video_flush flush;
155 } *cmds;
156 uint32_t offset;
157 int i, ret;
158
159 for (;;) {
160 cmds = vmw_fifo_reserve(dev_priv, sizeof(*cmds));
161 if (cmds)
162 break;
163
164 ret = vmw_fallback_wait(dev_priv, false, true, 0,
165 interruptible, 3*HZ);
166 if (interruptible && ret == -ERESTARTSYS)
167 return ret;
168 else
169 BUG_ON(ret != 0);
170 }
171
172 fill_escape(&cmds->escape, sizeof(cmds->body));
173 cmds->body.header.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS;
174 cmds->body.header.streamId = arg->stream_id;
175
176 for (i = 0; i <= SVGA_VIDEO_PITCH_3; i++)
177 cmds->body.items[i].registerId = i;
178
179 offset = buf->base.offset + arg->offset;
180
181 cmds->body.items[SVGA_VIDEO_ENABLED].value = true;
182 cmds->body.items[SVGA_VIDEO_FLAGS].value = arg->flags;
183 cmds->body.items[SVGA_VIDEO_DATA_OFFSET].value = offset;
184 cmds->body.items[SVGA_VIDEO_FORMAT].value = arg->format;
185 cmds->body.items[SVGA_VIDEO_COLORKEY].value = arg->color_key;
186 cmds->body.items[SVGA_VIDEO_SIZE].value = arg->size;
187 cmds->body.items[SVGA_VIDEO_WIDTH].value = arg->width;
188 cmds->body.items[SVGA_VIDEO_HEIGHT].value = arg->height;
189 cmds->body.items[SVGA_VIDEO_SRC_X].value = arg->src.x;
190 cmds->body.items[SVGA_VIDEO_SRC_Y].value = arg->src.y;
191 cmds->body.items[SVGA_VIDEO_SRC_WIDTH].value = arg->src.w;
192 cmds->body.items[SVGA_VIDEO_SRC_HEIGHT].value = arg->src.h;
193 cmds->body.items[SVGA_VIDEO_DST_X].value = arg->dst.x;
194 cmds->body.items[SVGA_VIDEO_DST_Y].value = arg->dst.y;
195 cmds->body.items[SVGA_VIDEO_DST_WIDTH].value = arg->dst.w;
196 cmds->body.items[SVGA_VIDEO_DST_HEIGHT].value = arg->dst.h;
197 cmds->body.items[SVGA_VIDEO_PITCH_1].value = arg->pitch[0];
198 cmds->body.items[SVGA_VIDEO_PITCH_2].value = arg->pitch[1];
199 cmds->body.items[SVGA_VIDEO_PITCH_3].value = arg->pitch[2];
200
201 fill_flush(&cmds->flush, arg->stream_id);
202
203 vmw_fifo_commit(dev_priv, sizeof(*cmds));
204
205 return 0;
206 }
207
208 /**
209 * Send stop command to hw.
210 *
211 * Returns
212 * -ERESTARTSYS if interrupted by a signal.
213 */
vmw_overlay_send_stop(struct vmw_private * dev_priv,uint32_t stream_id,bool interruptible)214 static int vmw_overlay_send_stop(struct vmw_private *dev_priv,
215 uint32_t stream_id,
216 bool interruptible)
217 {
218 struct {
219 struct vmw_escape_header escape;
220 SVGAEscapeVideoSetRegs body;
221 struct vmw_escape_video_flush flush;
222 } *cmds;
223 int ret;
224
225 for (;;) {
226 cmds = vmw_fifo_reserve(dev_priv, sizeof(*cmds));
227 if (cmds)
228 break;
229
230 ret = vmw_fallback_wait(dev_priv, false, true, 0,
231 interruptible, 3*HZ);
232 if (interruptible && ret == -ERESTARTSYS)
233 return ret;
234 else
235 BUG_ON(ret != 0);
236 }
237
238 fill_escape(&cmds->escape, sizeof(cmds->body));
239 cmds->body.header.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS;
240 cmds->body.header.streamId = stream_id;
241 cmds->body.items[0].registerId = SVGA_VIDEO_ENABLED;
242 cmds->body.items[0].value = false;
243 fill_flush(&cmds->flush, stream_id);
244
245 vmw_fifo_commit(dev_priv, sizeof(*cmds));
246
247 return 0;
248 }
249
250 /**
251 * Stop or pause a stream.
252 *
253 * If the stream is paused the no evict flag is removed from the buffer
254 * but left in vram. This allows for instance mode_set to evict it
255 * should it need to.
256 *
257 * The caller must hold the overlay lock.
258 *
259 * @stream_id which stream to stop/pause.
260 * @pause true to pause, false to stop completely.
261 */
vmw_overlay_stop(struct vmw_private * dev_priv,uint32_t stream_id,bool pause,bool interruptible)262 static int vmw_overlay_stop(struct vmw_private *dev_priv,
263 uint32_t stream_id, bool pause,
264 bool interruptible)
265 {
266 struct vmw_overlay *overlay = dev_priv->overlay_priv;
267 struct vmw_stream *stream = &overlay->stream[stream_id];
268 int ret;
269
270 /* no buffer attached the stream is completely stopped */
271 if (!stream->buf)
272 return 0;
273
274 /* If the stream is paused this is already done */
275 if (!stream->paused) {
276 ret = vmw_overlay_send_stop(dev_priv, stream_id,
277 interruptible);
278 if (ret)
279 return ret;
280
281 /* We just remove the NO_EVICT flag so no -ENOMEM */
282 ret = vmw_dmabuf_pin_in_vram(dev_priv, stream->buf, false,
283 interruptible);
284 if (interruptible && ret == -ERESTARTSYS)
285 return ret;
286 else
287 BUG_ON(ret != 0);
288 }
289
290 if (!pause) {
291 vmw_dmabuf_unreference(&stream->buf);
292 stream->paused = false;
293 } else {
294 stream->paused = true;
295 }
296
297 return 0;
298 }
299
300 /**
301 * Update a stream and send any put or stop fifo commands needed.
302 *
303 * The caller must hold the overlay lock.
304 *
305 * Returns
306 * -ENOMEM if buffer doesn't fit in vram.
307 * -ERESTARTSYS if interrupted.
308 */
vmw_overlay_update_stream(struct vmw_private * dev_priv,struct vmw_dma_buffer * buf,struct drm_vmw_control_stream_arg * arg,bool interruptible)309 static int vmw_overlay_update_stream(struct vmw_private *dev_priv,
310 struct vmw_dma_buffer *buf,
311 struct drm_vmw_control_stream_arg *arg,
312 bool interruptible)
313 {
314 struct vmw_overlay *overlay = dev_priv->overlay_priv;
315 struct vmw_stream *stream = &overlay->stream[arg->stream_id];
316 int ret = 0;
317
318 if (!buf)
319 return -EINVAL;
320
321 DRM_DEBUG(" %s: old %p, new %p, %spaused\n", __func__,
322 stream->buf, buf, stream->paused ? "" : "not ");
323
324 if (stream->buf != buf) {
325 ret = vmw_overlay_stop(dev_priv, arg->stream_id,
326 false, interruptible);
327 if (ret)
328 return ret;
329 } else if (!stream->paused) {
330 /* If the buffers match and not paused then just send
331 * the put command, no need to do anything else.
332 */
333 ret = vmw_overlay_send_put(dev_priv, buf, arg, interruptible);
334 if (ret == 0)
335 stream->saved = *arg;
336 else
337 BUG_ON(!interruptible);
338
339 return ret;
340 }
341
342 /* We don't start the old stream if we are interrupted.
343 * Might return -ENOMEM if it can't fit the buffer in vram.
344 */
345 ret = vmw_dmabuf_pin_in_vram(dev_priv, buf, true, interruptible);
346 if (ret)
347 return ret;
348
349 ret = vmw_overlay_send_put(dev_priv, buf, arg, interruptible);
350 if (ret) {
351 /* This one needs to happen no matter what. We only remove
352 * the NO_EVICT flag so this is safe from -ENOMEM.
353 */
354 BUG_ON(vmw_dmabuf_pin_in_vram(dev_priv, buf, false, false) != 0);
355 return ret;
356 }
357
358 if (stream->buf != buf)
359 stream->buf = vmw_dmabuf_reference(buf);
360 stream->saved = *arg;
361 /* stream is no longer stopped/paused */
362 stream->paused = false;
363
364 return 0;
365 }
366
367 /**
368 * Stop all streams.
369 *
370 * Used by the fb code when starting.
371 *
372 * Takes the overlay lock.
373 */
vmw_overlay_stop_all(struct vmw_private * dev_priv)374 int vmw_overlay_stop_all(struct vmw_private *dev_priv)
375 {
376 struct vmw_overlay *overlay = dev_priv->overlay_priv;
377 int i, ret;
378
379 if (!overlay)
380 return 0;
381
382 mutex_lock(&overlay->mutex);
383
384 for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
385 struct vmw_stream *stream = &overlay->stream[i];
386 if (!stream->buf)
387 continue;
388
389 ret = vmw_overlay_stop(dev_priv, i, false, false);
390 WARN_ON(ret != 0);
391 }
392
393 mutex_unlock(&overlay->mutex);
394
395 return 0;
396 }
397
398 /**
399 * Try to resume all paused streams.
400 *
401 * Used by the kms code after moving a new scanout buffer to vram.
402 *
403 * Takes the overlay lock.
404 */
vmw_overlay_resume_all(struct vmw_private * dev_priv)405 int vmw_overlay_resume_all(struct vmw_private *dev_priv)
406 {
407 struct vmw_overlay *overlay = dev_priv->overlay_priv;
408 int i, ret;
409
410 if (!overlay)
411 return 0;
412
413 mutex_lock(&overlay->mutex);
414
415 for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
416 struct vmw_stream *stream = &overlay->stream[i];
417 if (!stream->paused)
418 continue;
419
420 ret = vmw_overlay_update_stream(dev_priv, stream->buf,
421 &stream->saved, false);
422 if (ret != 0)
423 DRM_INFO("%s: *warning* failed to resume stream %i\n",
424 __func__, i);
425 }
426
427 mutex_unlock(&overlay->mutex);
428
429 return 0;
430 }
431
432 /**
433 * Pauses all active streams.
434 *
435 * Used by the kms code when moving a new scanout buffer to vram.
436 *
437 * Takes the overlay lock.
438 */
vmw_overlay_pause_all(struct vmw_private * dev_priv)439 int vmw_overlay_pause_all(struct vmw_private *dev_priv)
440 {
441 struct vmw_overlay *overlay = dev_priv->overlay_priv;
442 int i, ret;
443
444 if (!overlay)
445 return 0;
446
447 mutex_lock(&overlay->mutex);
448
449 for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
450 if (overlay->stream[i].paused)
451 DRM_INFO("%s: *warning* stream %i already paused\n",
452 __func__, i);
453 ret = vmw_overlay_stop(dev_priv, i, true, false);
454 WARN_ON(ret != 0);
455 }
456
457 mutex_unlock(&overlay->mutex);
458
459 return 0;
460 }
461
vmw_overlay_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)462 int vmw_overlay_ioctl(struct drm_device *dev, void *data,
463 struct drm_file *file_priv)
464 {
465 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
466 struct vmw_private *dev_priv = vmw_priv(dev);
467 struct vmw_overlay *overlay = dev_priv->overlay_priv;
468 struct drm_vmw_control_stream_arg *arg =
469 (struct drm_vmw_control_stream_arg *)data;
470 struct vmw_dma_buffer *buf;
471 struct vmw_resource *res;
472 int ret;
473
474 if (!overlay)
475 return -ENOSYS;
476
477 ret = vmw_user_stream_lookup(dev_priv, tfile, &arg->stream_id, &res);
478 if (ret)
479 return ret;
480
481 mutex_lock(&overlay->mutex);
482
483 if (!arg->enabled) {
484 ret = vmw_overlay_stop(dev_priv, arg->stream_id, false, true);
485 goto out_unlock;
486 }
487
488 ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &buf);
489 if (ret)
490 goto out_unlock;
491
492 ret = vmw_overlay_update_stream(dev_priv, buf, arg, true);
493
494 vmw_dmabuf_unreference(&buf);
495
496 out_unlock:
497 mutex_unlock(&overlay->mutex);
498 vmw_resource_unreference(&res);
499
500 return ret;
501 }
502
vmw_overlay_num_overlays(struct vmw_private * dev_priv)503 int vmw_overlay_num_overlays(struct vmw_private *dev_priv)
504 {
505 if (!dev_priv->overlay_priv)
506 return 0;
507
508 return VMW_MAX_NUM_STREAMS;
509 }
510
vmw_overlay_num_free_overlays(struct vmw_private * dev_priv)511 int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv)
512 {
513 struct vmw_overlay *overlay = dev_priv->overlay_priv;
514 int i, k;
515
516 if (!overlay)
517 return 0;
518
519 mutex_lock(&overlay->mutex);
520
521 for (i = 0, k = 0; i < VMW_MAX_NUM_STREAMS; i++)
522 if (!overlay->stream[i].claimed)
523 k++;
524
525 mutex_unlock(&overlay->mutex);
526
527 return k;
528 }
529
vmw_overlay_claim(struct vmw_private * dev_priv,uint32_t * out)530 int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out)
531 {
532 struct vmw_overlay *overlay = dev_priv->overlay_priv;
533 int i;
534
535 if (!overlay)
536 return -ENOSYS;
537
538 mutex_lock(&overlay->mutex);
539
540 for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
541
542 if (overlay->stream[i].claimed)
543 continue;
544
545 overlay->stream[i].claimed = true;
546 *out = i;
547 mutex_unlock(&overlay->mutex);
548 return 0;
549 }
550
551 mutex_unlock(&overlay->mutex);
552 return -ESRCH;
553 }
554
vmw_overlay_unref(struct vmw_private * dev_priv,uint32_t stream_id)555 int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id)
556 {
557 struct vmw_overlay *overlay = dev_priv->overlay_priv;
558
559 BUG_ON(stream_id >= VMW_MAX_NUM_STREAMS);
560
561 if (!overlay)
562 return -ENOSYS;
563
564 mutex_lock(&overlay->mutex);
565
566 WARN_ON(!overlay->stream[stream_id].claimed);
567 vmw_overlay_stop(dev_priv, stream_id, false, false);
568 overlay->stream[stream_id].claimed = false;
569
570 mutex_unlock(&overlay->mutex);
571 return 0;
572 }
573
vmw_overlay_init(struct vmw_private * dev_priv)574 int vmw_overlay_init(struct vmw_private *dev_priv)
575 {
576 struct vmw_overlay *overlay;
577 int i;
578
579 if (dev_priv->overlay_priv)
580 return -EINVAL;
581
582 if (!(dev_priv->fifo.capabilities & SVGA_FIFO_CAP_VIDEO) &&
583 (dev_priv->fifo.capabilities & SVGA_FIFO_CAP_ESCAPE)) {
584 DRM_INFO("hardware doesn't support overlays\n");
585 return -ENOSYS;
586 }
587
588 overlay = kmalloc(sizeof(*overlay), GFP_KERNEL);
589 if (!overlay)
590 return -ENOMEM;
591
592 memset(overlay, 0, sizeof(*overlay));
593 mutex_init(&overlay->mutex);
594 for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
595 overlay->stream[i].buf = NULL;
596 overlay->stream[i].paused = false;
597 overlay->stream[i].claimed = false;
598 }
599
600 dev_priv->overlay_priv = overlay;
601
602 return 0;
603 }
604
vmw_overlay_close(struct vmw_private * dev_priv)605 int vmw_overlay_close(struct vmw_private *dev_priv)
606 {
607 struct vmw_overlay *overlay = dev_priv->overlay_priv;
608 bool forgotten_buffer = false;
609 int i;
610
611 if (!overlay)
612 return -ENOSYS;
613
614 for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
615 if (overlay->stream[i].buf) {
616 forgotten_buffer = true;
617 vmw_overlay_stop(dev_priv, i, false, false);
618 }
619 }
620
621 WARN_ON(forgotten_buffer);
622
623 dev_priv->overlay_priv = NULL;
624 kfree(overlay);
625
626 return 0;
627 }
628