1 /**************************************************************************
2 *
3 * Copyright © 2007 David Airlie
4 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 **************************************************************************/
28
29 #include "drmP.h"
30 #include "vmwgfx_drv.h"
31
32 #include "ttm/ttm_placement.h"
33
34 #define VMW_DIRTY_DELAY (HZ / 30)
35
36 struct vmw_fb_par {
37 struct vmw_private *vmw_priv;
38
39 void *vmalloc;
40
41 struct vmw_dma_buffer *vmw_bo;
42 struct ttm_bo_kmap_obj map;
43
44 u32 pseudo_palette[17];
45
46 unsigned depth;
47 unsigned bpp;
48
49 unsigned max_width;
50 unsigned max_height;
51
52 void *bo_ptr;
53 unsigned bo_size;
54 bool bo_iowrite;
55
56 struct {
57 spinlock_t lock;
58 bool active;
59 unsigned x1;
60 unsigned y1;
61 unsigned x2;
62 unsigned y2;
63 } dirty;
64 };
65
vmw_fb_setcolreg(unsigned regno,unsigned red,unsigned green,unsigned blue,unsigned transp,struct fb_info * info)66 static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
67 unsigned blue, unsigned transp,
68 struct fb_info *info)
69 {
70 struct vmw_fb_par *par = info->par;
71 u32 *pal = par->pseudo_palette;
72
73 if (regno > 15) {
74 DRM_ERROR("Bad regno %u.\n", regno);
75 return 1;
76 }
77
78 switch (par->depth) {
79 case 24:
80 case 32:
81 pal[regno] = ((red & 0xff00) << 8) |
82 (green & 0xff00) |
83 ((blue & 0xff00) >> 8);
84 break;
85 default:
86 DRM_ERROR("Bad depth %u, bpp %u.\n", par->depth, par->bpp);
87 return 1;
88 }
89
90 return 0;
91 }
92
vmw_fb_check_var(struct fb_var_screeninfo * var,struct fb_info * info)93 static int vmw_fb_check_var(struct fb_var_screeninfo *var,
94 struct fb_info *info)
95 {
96 int depth = var->bits_per_pixel;
97 struct vmw_fb_par *par = info->par;
98 struct vmw_private *vmw_priv = par->vmw_priv;
99
100 switch (var->bits_per_pixel) {
101 case 32:
102 depth = (var->transp.length > 0) ? 32 : 24;
103 break;
104 default:
105 DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel);
106 return -EINVAL;
107 }
108
109 switch (depth) {
110 case 24:
111 var->red.offset = 16;
112 var->green.offset = 8;
113 var->blue.offset = 0;
114 var->red.length = 8;
115 var->green.length = 8;
116 var->blue.length = 8;
117 var->transp.length = 0;
118 var->transp.offset = 0;
119 break;
120 case 32:
121 var->red.offset = 16;
122 var->green.offset = 8;
123 var->blue.offset = 0;
124 var->red.length = 8;
125 var->green.length = 8;
126 var->blue.length = 8;
127 var->transp.length = 8;
128 var->transp.offset = 24;
129 break;
130 default:
131 DRM_ERROR("Bad depth %u.\n", depth);
132 return -EINVAL;
133 }
134
135 if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
136 (var->xoffset != 0 || var->yoffset != 0)) {
137 DRM_ERROR("Can not handle panning without display topology\n");
138 return -EINVAL;
139 }
140
141 if ((var->xoffset + var->xres) > par->max_width ||
142 (var->yoffset + var->yres) > par->max_height) {
143 DRM_ERROR("Requested geom can not fit in framebuffer\n");
144 return -EINVAL;
145 }
146
147 if (!vmw_kms_validate_mode_vram(vmw_priv,
148 info->fix.line_length,
149 var->yoffset + var->yres)) {
150 DRM_ERROR("Requested geom can not fit in framebuffer\n");
151 return -EINVAL;
152 }
153
154 return 0;
155 }
156
vmw_fb_set_par(struct fb_info * info)157 static int vmw_fb_set_par(struct fb_info *info)
158 {
159 struct vmw_fb_par *par = info->par;
160 struct vmw_private *vmw_priv = par->vmw_priv;
161
162 vmw_kms_write_svga(vmw_priv, info->var.xres, info->var.yres,
163 info->fix.line_length,
164 par->bpp, par->depth);
165 if (vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) {
166 /* TODO check if pitch and offset changes */
167 vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
168 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0);
169 vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
170 vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, info->var.xoffset);
171 vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, info->var.yoffset);
172 vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres);
173 vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres);
174 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
175 }
176
177 /* This is really helpful since if this fails the user
178 * can probably not see anything on the screen.
179 */
180 WARN_ON(vmw_read(vmw_priv, SVGA_REG_FB_OFFSET) != 0);
181
182 return 0;
183 }
184
vmw_fb_pan_display(struct fb_var_screeninfo * var,struct fb_info * info)185 static int vmw_fb_pan_display(struct fb_var_screeninfo *var,
186 struct fb_info *info)
187 {
188 return 0;
189 }
190
vmw_fb_blank(int blank,struct fb_info * info)191 static int vmw_fb_blank(int blank, struct fb_info *info)
192 {
193 return 0;
194 }
195
196 /*
197 * Dirty code
198 */
199
vmw_fb_dirty_flush(struct vmw_fb_par * par)200 static void vmw_fb_dirty_flush(struct vmw_fb_par *par)
201 {
202 struct vmw_private *vmw_priv = par->vmw_priv;
203 struct fb_info *info = vmw_priv->fb_info;
204 int stride = (info->fix.line_length / 4);
205 int *src = (int *)info->screen_base;
206 __le32 __iomem *vram_mem = par->bo_ptr;
207 unsigned long flags;
208 unsigned x, y, w, h;
209 int i, k;
210 struct {
211 uint32_t header;
212 SVGAFifoCmdUpdate body;
213 } *cmd;
214
215 if (vmw_priv->suspended)
216 return;
217
218 spin_lock_irqsave(&par->dirty.lock, flags);
219 if (!par->dirty.active) {
220 spin_unlock_irqrestore(&par->dirty.lock, flags);
221 return;
222 }
223 x = par->dirty.x1;
224 y = par->dirty.y1;
225 w = min(par->dirty.x2, info->var.xres) - x;
226 h = min(par->dirty.y2, info->var.yres) - y;
227 par->dirty.x1 = par->dirty.x2 = 0;
228 par->dirty.y1 = par->dirty.y2 = 0;
229 spin_unlock_irqrestore(&par->dirty.lock, flags);
230
231 for (i = y * stride; i < info->fix.smem_len / 4; i += stride) {
232 for (k = i+x; k < i+x+w && k < info->fix.smem_len / 4; k++)
233 iowrite32(src[k], vram_mem + k);
234 }
235
236 #if 0
237 DRM_INFO("%s, (%u, %u) (%ux%u)\n", __func__, x, y, w, h);
238 #endif
239
240 cmd = vmw_fifo_reserve(vmw_priv, sizeof(*cmd));
241 if (unlikely(cmd == NULL)) {
242 DRM_ERROR("Fifo reserve failed.\n");
243 return;
244 }
245
246 cmd->header = cpu_to_le32(SVGA_CMD_UPDATE);
247 cmd->body.x = cpu_to_le32(x);
248 cmd->body.y = cpu_to_le32(y);
249 cmd->body.width = cpu_to_le32(w);
250 cmd->body.height = cpu_to_le32(h);
251 vmw_fifo_commit(vmw_priv, sizeof(*cmd));
252 }
253
vmw_fb_dirty_mark(struct vmw_fb_par * par,unsigned x1,unsigned y1,unsigned width,unsigned height)254 static void vmw_fb_dirty_mark(struct vmw_fb_par *par,
255 unsigned x1, unsigned y1,
256 unsigned width, unsigned height)
257 {
258 struct fb_info *info = par->vmw_priv->fb_info;
259 unsigned long flags;
260 unsigned x2 = x1 + width;
261 unsigned y2 = y1 + height;
262
263 spin_lock_irqsave(&par->dirty.lock, flags);
264 if (par->dirty.x1 == par->dirty.x2) {
265 par->dirty.x1 = x1;
266 par->dirty.y1 = y1;
267 par->dirty.x2 = x2;
268 par->dirty.y2 = y2;
269 /* if we are active start the dirty work
270 * we share the work with the defio system */
271 if (par->dirty.active)
272 schedule_delayed_work(&info->deferred_work, VMW_DIRTY_DELAY);
273 } else {
274 if (x1 < par->dirty.x1)
275 par->dirty.x1 = x1;
276 if (y1 < par->dirty.y1)
277 par->dirty.y1 = y1;
278 if (x2 > par->dirty.x2)
279 par->dirty.x2 = x2;
280 if (y2 > par->dirty.y2)
281 par->dirty.y2 = y2;
282 }
283 spin_unlock_irqrestore(&par->dirty.lock, flags);
284 }
285
vmw_deferred_io(struct fb_info * info,struct list_head * pagelist)286 static void vmw_deferred_io(struct fb_info *info,
287 struct list_head *pagelist)
288 {
289 struct vmw_fb_par *par = info->par;
290 unsigned long start, end, min, max;
291 unsigned long flags;
292 struct page *page;
293 int y1, y2;
294
295 min = ULONG_MAX;
296 max = 0;
297 list_for_each_entry(page, pagelist, lru) {
298 start = page->index << PAGE_SHIFT;
299 end = start + PAGE_SIZE - 1;
300 min = min(min, start);
301 max = max(max, end);
302 }
303
304 if (min < max) {
305 y1 = min / info->fix.line_length;
306 y2 = (max / info->fix.line_length) + 1;
307
308 spin_lock_irqsave(&par->dirty.lock, flags);
309 par->dirty.x1 = 0;
310 par->dirty.y1 = y1;
311 par->dirty.x2 = info->var.xres;
312 par->dirty.y2 = y2;
313 spin_unlock_irqrestore(&par->dirty.lock, flags);
314 }
315
316 vmw_fb_dirty_flush(par);
317 };
318
319 struct fb_deferred_io vmw_defio = {
320 .delay = VMW_DIRTY_DELAY,
321 .deferred_io = vmw_deferred_io,
322 };
323
324 /*
325 * Draw code
326 */
327
vmw_fb_fillrect(struct fb_info * info,const struct fb_fillrect * rect)328 static void vmw_fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
329 {
330 cfb_fillrect(info, rect);
331 vmw_fb_dirty_mark(info->par, rect->dx, rect->dy,
332 rect->width, rect->height);
333 }
334
vmw_fb_copyarea(struct fb_info * info,const struct fb_copyarea * region)335 static void vmw_fb_copyarea(struct fb_info *info, const struct fb_copyarea *region)
336 {
337 cfb_copyarea(info, region);
338 vmw_fb_dirty_mark(info->par, region->dx, region->dy,
339 region->width, region->height);
340 }
341
vmw_fb_imageblit(struct fb_info * info,const struct fb_image * image)342 static void vmw_fb_imageblit(struct fb_info *info, const struct fb_image *image)
343 {
344 cfb_imageblit(info, image);
345 vmw_fb_dirty_mark(info->par, image->dx, image->dy,
346 image->width, image->height);
347 }
348
349 /*
350 * Bring up code
351 */
352
353 static struct fb_ops vmw_fb_ops = {
354 .owner = THIS_MODULE,
355 .fb_check_var = vmw_fb_check_var,
356 .fb_set_par = vmw_fb_set_par,
357 .fb_setcolreg = vmw_fb_setcolreg,
358 .fb_fillrect = vmw_fb_fillrect,
359 .fb_copyarea = vmw_fb_copyarea,
360 .fb_imageblit = vmw_fb_imageblit,
361 .fb_pan_display = vmw_fb_pan_display,
362 .fb_blank = vmw_fb_blank,
363 };
364
vmw_fb_create_bo(struct vmw_private * vmw_priv,size_t size,struct vmw_dma_buffer ** out)365 static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
366 size_t size, struct vmw_dma_buffer **out)
367 {
368 struct vmw_dma_buffer *vmw_bo;
369 struct ttm_placement ne_placement = vmw_vram_ne_placement;
370 int ret;
371
372 ne_placement.lpfn = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
373
374 /* interuptable? */
375 ret = ttm_write_lock(&vmw_priv->fbdev_master.lock, false);
376 if (unlikely(ret != 0))
377 return ret;
378
379 vmw_bo = kmalloc(sizeof(*vmw_bo), GFP_KERNEL);
380 if (!vmw_bo)
381 goto err_unlock;
382
383 ret = vmw_dmabuf_init(vmw_priv, vmw_bo, size,
384 &ne_placement,
385 false,
386 &vmw_dmabuf_bo_free);
387 if (unlikely(ret != 0))
388 goto err_unlock; /* init frees the buffer on failure */
389
390 *out = vmw_bo;
391
392 ttm_write_unlock(&vmw_priv->fbdev_master.lock);
393
394 return 0;
395
396 err_unlock:
397 ttm_write_unlock(&vmw_priv->fbdev_master.lock);
398 return ret;
399 }
400
vmw_fb_init(struct vmw_private * vmw_priv)401 int vmw_fb_init(struct vmw_private *vmw_priv)
402 {
403 struct device *device = &vmw_priv->dev->pdev->dev;
404 struct vmw_fb_par *par;
405 struct fb_info *info;
406 unsigned initial_width, initial_height;
407 unsigned fb_width, fb_height;
408 unsigned fb_bbp, fb_depth, fb_offset, fb_pitch, fb_size;
409 int ret;
410
411 /* XXX These shouldn't be hardcoded. */
412 initial_width = 800;
413 initial_height = 600;
414
415 fb_bbp = 32;
416 fb_depth = 24;
417
418 /* XXX As shouldn't these be as well. */
419 fb_width = min(vmw_priv->fb_max_width, (unsigned)2048);
420 fb_height = min(vmw_priv->fb_max_height, (unsigned)2048);
421
422 initial_width = min(fb_width, initial_width);
423 initial_height = min(fb_height, initial_height);
424
425 fb_pitch = fb_width * fb_bbp / 8;
426 fb_size = fb_pitch * fb_height;
427 fb_offset = vmw_read(vmw_priv, SVGA_REG_FB_OFFSET);
428
429 info = framebuffer_alloc(sizeof(*par), device);
430 if (!info)
431 return -ENOMEM;
432
433 /*
434 * Par
435 */
436 vmw_priv->fb_info = info;
437 par = info->par;
438 par->vmw_priv = vmw_priv;
439 par->depth = fb_depth;
440 par->bpp = fb_bbp;
441 par->vmalloc = NULL;
442 par->max_width = fb_width;
443 par->max_height = fb_height;
444
445 /*
446 * Create buffers and alloc memory
447 */
448 par->vmalloc = vmalloc(fb_size);
449 if (unlikely(par->vmalloc == NULL)) {
450 ret = -ENOMEM;
451 goto err_free;
452 }
453
454 ret = vmw_fb_create_bo(vmw_priv, fb_size, &par->vmw_bo);
455 if (unlikely(ret != 0))
456 goto err_free;
457
458 ret = ttm_bo_kmap(&par->vmw_bo->base,
459 0,
460 par->vmw_bo->base.num_pages,
461 &par->map);
462 if (unlikely(ret != 0))
463 goto err_unref;
464 par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite);
465 par->bo_size = fb_size;
466
467 /*
468 * Fixed and var
469 */
470 strcpy(info->fix.id, "svgadrmfb");
471 info->fix.type = FB_TYPE_PACKED_PIXELS;
472 info->fix.visual = FB_VISUAL_TRUECOLOR;
473 info->fix.type_aux = 0;
474 info->fix.xpanstep = 1; /* doing it in hw */
475 info->fix.ypanstep = 1; /* doing it in hw */
476 info->fix.ywrapstep = 0;
477 info->fix.accel = FB_ACCEL_NONE;
478 info->fix.line_length = fb_pitch;
479
480 info->fix.smem_start = 0;
481 info->fix.smem_len = fb_size;
482
483 info->pseudo_palette = par->pseudo_palette;
484 info->screen_base = par->vmalloc;
485 info->screen_size = fb_size;
486
487 info->flags = FBINFO_DEFAULT;
488 info->fbops = &vmw_fb_ops;
489
490 /* 24 depth per default */
491 info->var.red.offset = 16;
492 info->var.green.offset = 8;
493 info->var.blue.offset = 0;
494 info->var.red.length = 8;
495 info->var.green.length = 8;
496 info->var.blue.length = 8;
497 info->var.transp.offset = 0;
498 info->var.transp.length = 0;
499
500 info->var.xres_virtual = fb_width;
501 info->var.yres_virtual = fb_height;
502 info->var.bits_per_pixel = par->bpp;
503 info->var.xoffset = 0;
504 info->var.yoffset = 0;
505 info->var.activate = FB_ACTIVATE_NOW;
506 info->var.height = -1;
507 info->var.width = -1;
508
509 info->var.xres = initial_width;
510 info->var.yres = initial_height;
511
512 #if 0
513 info->pixmap.size = 64*1024;
514 info->pixmap.buf_align = 8;
515 info->pixmap.access_align = 32;
516 info->pixmap.flags = FB_PIXMAP_SYSTEM;
517 info->pixmap.scan_align = 1;
518 #else
519 info->pixmap.size = 0;
520 info->pixmap.buf_align = 8;
521 info->pixmap.access_align = 32;
522 info->pixmap.flags = FB_PIXMAP_SYSTEM;
523 info->pixmap.scan_align = 1;
524 #endif
525
526 info->apertures = alloc_apertures(1);
527 if (!info->apertures) {
528 ret = -ENOMEM;
529 goto err_aper;
530 }
531 info->apertures->ranges[0].base = vmw_priv->vram_start;
532 info->apertures->ranges[0].size = vmw_priv->vram_size;
533
534 /*
535 * Dirty & Deferred IO
536 */
537 par->dirty.x1 = par->dirty.x2 = 0;
538 par->dirty.y1 = par->dirty.y2 = 0;
539 par->dirty.active = true;
540 spin_lock_init(&par->dirty.lock);
541 info->fbdefio = &vmw_defio;
542 fb_deferred_io_init(info);
543
544 ret = register_framebuffer(info);
545 if (unlikely(ret != 0))
546 goto err_defio;
547
548 return 0;
549
550 err_defio:
551 fb_deferred_io_cleanup(info);
552 err_aper:
553 ttm_bo_kunmap(&par->map);
554 err_unref:
555 ttm_bo_unref((struct ttm_buffer_object **)&par->vmw_bo);
556 err_free:
557 vfree(par->vmalloc);
558 framebuffer_release(info);
559 vmw_priv->fb_info = NULL;
560
561 return ret;
562 }
563
vmw_fb_close(struct vmw_private * vmw_priv)564 int vmw_fb_close(struct vmw_private *vmw_priv)
565 {
566 struct fb_info *info;
567 struct vmw_fb_par *par;
568 struct ttm_buffer_object *bo;
569
570 if (!vmw_priv->fb_info)
571 return 0;
572
573 info = vmw_priv->fb_info;
574 par = info->par;
575 bo = &par->vmw_bo->base;
576 par->vmw_bo = NULL;
577
578 /* ??? order */
579 fb_deferred_io_cleanup(info);
580 unregister_framebuffer(info);
581
582 ttm_bo_kunmap(&par->map);
583 ttm_bo_unref(&bo);
584
585 vfree(par->vmalloc);
586 framebuffer_release(info);
587
588 return 0;
589 }
590
vmw_dmabuf_from_vram(struct vmw_private * vmw_priv,struct vmw_dma_buffer * vmw_bo)591 int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv,
592 struct vmw_dma_buffer *vmw_bo)
593 {
594 struct ttm_buffer_object *bo = &vmw_bo->base;
595 int ret = 0;
596
597 ret = ttm_bo_reserve(bo, false, false, false, 0);
598 if (unlikely(ret != 0))
599 return ret;
600
601 ret = ttm_bo_validate(bo, &vmw_sys_placement, false, false, false);
602 ttm_bo_unreserve(bo);
603
604 return ret;
605 }
606
vmw_dmabuf_to_start_of_vram(struct vmw_private * vmw_priv,struct vmw_dma_buffer * vmw_bo)607 int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
608 struct vmw_dma_buffer *vmw_bo)
609 {
610 struct ttm_buffer_object *bo = &vmw_bo->base;
611 struct ttm_placement ne_placement = vmw_vram_ne_placement;
612 int ret = 0;
613
614 ne_placement.lpfn = bo->num_pages;
615
616 /* interuptable? */
617 ret = ttm_write_lock(&vmw_priv->active_master->lock, false);
618 if (unlikely(ret != 0))
619 return ret;
620
621 ret = ttm_bo_reserve(bo, false, false, false, 0);
622 if (unlikely(ret != 0))
623 goto err_unlock;
624
625 if (bo->mem.mem_type == TTM_PL_VRAM &&
626 bo->mem.start < bo->num_pages &&
627 bo->mem.start > 0)
628 (void) ttm_bo_validate(bo, &vmw_sys_placement, false,
629 false, false);
630
631 ret = ttm_bo_validate(bo, &ne_placement, false, false, false);
632
633 /* Could probably bug on */
634 WARN_ON(bo->offset != 0);
635
636 ttm_bo_unreserve(bo);
637 err_unlock:
638 ttm_write_unlock(&vmw_priv->active_master->lock);
639
640 return ret;
641 }
642
vmw_fb_off(struct vmw_private * vmw_priv)643 int vmw_fb_off(struct vmw_private *vmw_priv)
644 {
645 struct fb_info *info;
646 struct vmw_fb_par *par;
647 unsigned long flags;
648
649 if (!vmw_priv->fb_info)
650 return -EINVAL;
651
652 info = vmw_priv->fb_info;
653 par = info->par;
654
655 spin_lock_irqsave(&par->dirty.lock, flags);
656 par->dirty.active = false;
657 spin_unlock_irqrestore(&par->dirty.lock, flags);
658
659 flush_delayed_work_sync(&info->deferred_work);
660
661 par->bo_ptr = NULL;
662 ttm_bo_kunmap(&par->map);
663
664 vmw_dmabuf_from_vram(vmw_priv, par->vmw_bo);
665
666 return 0;
667 }
668
vmw_fb_on(struct vmw_private * vmw_priv)669 int vmw_fb_on(struct vmw_private *vmw_priv)
670 {
671 struct fb_info *info;
672 struct vmw_fb_par *par;
673 unsigned long flags;
674 bool dummy;
675 int ret;
676
677 if (!vmw_priv->fb_info)
678 return -EINVAL;
679
680 info = vmw_priv->fb_info;
681 par = info->par;
682
683 /* we are already active */
684 if (par->bo_ptr != NULL)
685 return 0;
686
687 /* Make sure that all overlays are stoped when we take over */
688 vmw_overlay_stop_all(vmw_priv);
689
690 ret = vmw_dmabuf_to_start_of_vram(vmw_priv, par->vmw_bo);
691 if (unlikely(ret != 0)) {
692 DRM_ERROR("could not move buffer to start of VRAM\n");
693 goto err_no_buffer;
694 }
695
696 ret = ttm_bo_kmap(&par->vmw_bo->base,
697 0,
698 par->vmw_bo->base.num_pages,
699 &par->map);
700 BUG_ON(ret != 0);
701 par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &dummy);
702
703 spin_lock_irqsave(&par->dirty.lock, flags);
704 par->dirty.active = true;
705 spin_unlock_irqrestore(&par->dirty.lock, flags);
706
707 err_no_buffer:
708 vmw_fb_set_par(info);
709
710 vmw_fb_dirty_mark(par, 0, 0, info->var.xres, info->var.yres);
711
712 /* If there already was stuff dirty we wont
713 * schedule a new work, so lets do it now */
714 schedule_delayed_work(&info->deferred_work, 0);
715
716 return 0;
717 }
718