1 /**************************************************************************
2  *
3  * Copyright © 2007 David Airlie
4  * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28 
29 #include <linux/export.h>
30 
31 #include "drmP.h"
32 #include "vmwgfx_drv.h"
33 
34 #include "ttm/ttm_placement.h"
35 
36 #define VMW_DIRTY_DELAY (HZ / 30)
37 
38 struct vmw_fb_par {
39 	struct vmw_private *vmw_priv;
40 
41 	void *vmalloc;
42 
43 	struct vmw_dma_buffer *vmw_bo;
44 	struct ttm_bo_kmap_obj map;
45 
46 	u32 pseudo_palette[17];
47 
48 	unsigned depth;
49 	unsigned bpp;
50 
51 	unsigned max_width;
52 	unsigned max_height;
53 
54 	void *bo_ptr;
55 	unsigned bo_size;
56 	bool bo_iowrite;
57 
58 	struct {
59 		spinlock_t lock;
60 		bool active;
61 		unsigned x1;
62 		unsigned y1;
63 		unsigned x2;
64 		unsigned y2;
65 	} dirty;
66 };
67 
vmw_fb_setcolreg(unsigned regno,unsigned red,unsigned green,unsigned blue,unsigned transp,struct fb_info * info)68 static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
69 			    unsigned blue, unsigned transp,
70 			    struct fb_info *info)
71 {
72 	struct vmw_fb_par *par = info->par;
73 	u32 *pal = par->pseudo_palette;
74 
75 	if (regno > 15) {
76 		DRM_ERROR("Bad regno %u.\n", regno);
77 		return 1;
78 	}
79 
80 	switch (par->depth) {
81 	case 24:
82 	case 32:
83 		pal[regno] = ((red & 0xff00) << 8) |
84 			      (green & 0xff00) |
85 			     ((blue  & 0xff00) >> 8);
86 		break;
87 	default:
88 		DRM_ERROR("Bad depth %u, bpp %u.\n", par->depth, par->bpp);
89 		return 1;
90 	}
91 
92 	return 0;
93 }
94 
vmw_fb_check_var(struct fb_var_screeninfo * var,struct fb_info * info)95 static int vmw_fb_check_var(struct fb_var_screeninfo *var,
96 			    struct fb_info *info)
97 {
98 	int depth = var->bits_per_pixel;
99 	struct vmw_fb_par *par = info->par;
100 	struct vmw_private *vmw_priv = par->vmw_priv;
101 
102 	switch (var->bits_per_pixel) {
103 	case 32:
104 		depth = (var->transp.length > 0) ? 32 : 24;
105 		break;
106 	default:
107 		DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel);
108 		return -EINVAL;
109 	}
110 
111 	switch (depth) {
112 	case 24:
113 		var->red.offset = 16;
114 		var->green.offset = 8;
115 		var->blue.offset = 0;
116 		var->red.length = 8;
117 		var->green.length = 8;
118 		var->blue.length = 8;
119 		var->transp.length = 0;
120 		var->transp.offset = 0;
121 		break;
122 	case 32:
123 		var->red.offset = 16;
124 		var->green.offset = 8;
125 		var->blue.offset = 0;
126 		var->red.length = 8;
127 		var->green.length = 8;
128 		var->blue.length = 8;
129 		var->transp.length = 8;
130 		var->transp.offset = 24;
131 		break;
132 	default:
133 		DRM_ERROR("Bad depth %u.\n", depth);
134 		return -EINVAL;
135 	}
136 
137 	if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
138 	    (var->xoffset != 0 || var->yoffset != 0)) {
139 		DRM_ERROR("Can not handle panning without display topology\n");
140 		return -EINVAL;
141 	}
142 
143 	if ((var->xoffset + var->xres) > par->max_width ||
144 	    (var->yoffset + var->yres) > par->max_height) {
145 		DRM_ERROR("Requested geom can not fit in framebuffer\n");
146 		return -EINVAL;
147 	}
148 
149 	if (!vmw_kms_validate_mode_vram(vmw_priv,
150 					var->xres * var->bits_per_pixel/8,
151 					var->yoffset + var->yres)) {
152 		DRM_ERROR("Requested geom can not fit in framebuffer\n");
153 		return -EINVAL;
154 	}
155 
156 	return 0;
157 }
158 
vmw_fb_set_par(struct fb_info * info)159 static int vmw_fb_set_par(struct fb_info *info)
160 {
161 	struct vmw_fb_par *par = info->par;
162 	struct vmw_private *vmw_priv = par->vmw_priv;
163 	int ret;
164 
165 	info->fix.line_length = info->var.xres * info->var.bits_per_pixel/8;
166 
167 	ret = vmw_kms_write_svga(vmw_priv, info->var.xres, info->var.yres,
168 				 info->fix.line_length,
169 				 par->bpp, par->depth);
170 	if (ret)
171 		return ret;
172 
173 	if (vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) {
174 		/* TODO check if pitch and offset changes */
175 		vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
176 		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0);
177 		vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
178 		vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, info->var.xoffset);
179 		vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, info->var.yoffset);
180 		vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres);
181 		vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres);
182 		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
183 	}
184 
185 	/* This is really helpful since if this fails the user
186 	 * can probably not see anything on the screen.
187 	 */
188 	WARN_ON(vmw_read(vmw_priv, SVGA_REG_FB_OFFSET) != 0);
189 
190 	return 0;
191 }
192 
vmw_fb_pan_display(struct fb_var_screeninfo * var,struct fb_info * info)193 static int vmw_fb_pan_display(struct fb_var_screeninfo *var,
194 			      struct fb_info *info)
195 {
196 	return 0;
197 }
198 
vmw_fb_blank(int blank,struct fb_info * info)199 static int vmw_fb_blank(int blank, struct fb_info *info)
200 {
201 	return 0;
202 }
203 
204 /*
205  * Dirty code
206  */
207 
vmw_fb_dirty_flush(struct vmw_fb_par * par)208 static void vmw_fb_dirty_flush(struct vmw_fb_par *par)
209 {
210 	struct vmw_private *vmw_priv = par->vmw_priv;
211 	struct fb_info *info = vmw_priv->fb_info;
212 	int stride = (info->fix.line_length / 4);
213 	int *src = (int *)info->screen_base;
214 	__le32 __iomem *vram_mem = par->bo_ptr;
215 	unsigned long flags;
216 	unsigned x, y, w, h;
217 	int i, k;
218 	struct {
219 		uint32_t header;
220 		SVGAFifoCmdUpdate body;
221 	} *cmd;
222 
223 	if (vmw_priv->suspended)
224 		return;
225 
226 	spin_lock_irqsave(&par->dirty.lock, flags);
227 	if (!par->dirty.active) {
228 		spin_unlock_irqrestore(&par->dirty.lock, flags);
229 		return;
230 	}
231 	x = par->dirty.x1;
232 	y = par->dirty.y1;
233 	w = min(par->dirty.x2, info->var.xres) - x;
234 	h = min(par->dirty.y2, info->var.yres) - y;
235 	par->dirty.x1 = par->dirty.x2 = 0;
236 	par->dirty.y1 = par->dirty.y2 = 0;
237 	spin_unlock_irqrestore(&par->dirty.lock, flags);
238 
239 	for (i = y * stride; i < info->fix.smem_len / 4; i += stride) {
240 		for (k = i+x; k < i+x+w && k < info->fix.smem_len / 4; k++)
241 			iowrite32(src[k], vram_mem + k);
242 	}
243 
244 #if 0
245 	DRM_INFO("%s, (%u, %u) (%ux%u)\n", __func__, x, y, w, h);
246 #endif
247 
248 	cmd = vmw_fifo_reserve(vmw_priv, sizeof(*cmd));
249 	if (unlikely(cmd == NULL)) {
250 		DRM_ERROR("Fifo reserve failed.\n");
251 		return;
252 	}
253 
254 	cmd->header = cpu_to_le32(SVGA_CMD_UPDATE);
255 	cmd->body.x = cpu_to_le32(x);
256 	cmd->body.y = cpu_to_le32(y);
257 	cmd->body.width = cpu_to_le32(w);
258 	cmd->body.height = cpu_to_le32(h);
259 	vmw_fifo_commit(vmw_priv, sizeof(*cmd));
260 }
261 
vmw_fb_dirty_mark(struct vmw_fb_par * par,unsigned x1,unsigned y1,unsigned width,unsigned height)262 static void vmw_fb_dirty_mark(struct vmw_fb_par *par,
263 			      unsigned x1, unsigned y1,
264 			      unsigned width, unsigned height)
265 {
266 	struct fb_info *info = par->vmw_priv->fb_info;
267 	unsigned long flags;
268 	unsigned x2 = x1 + width;
269 	unsigned y2 = y1 + height;
270 
271 	spin_lock_irqsave(&par->dirty.lock, flags);
272 	if (par->dirty.x1 == par->dirty.x2) {
273 		par->dirty.x1 = x1;
274 		par->dirty.y1 = y1;
275 		par->dirty.x2 = x2;
276 		par->dirty.y2 = y2;
277 		/* if we are active start the dirty work
278 		 * we share the work with the defio system */
279 		if (par->dirty.active)
280 			schedule_delayed_work(&info->deferred_work, VMW_DIRTY_DELAY);
281 	} else {
282 		if (x1 < par->dirty.x1)
283 			par->dirty.x1 = x1;
284 		if (y1 < par->dirty.y1)
285 			par->dirty.y1 = y1;
286 		if (x2 > par->dirty.x2)
287 			par->dirty.x2 = x2;
288 		if (y2 > par->dirty.y2)
289 			par->dirty.y2 = y2;
290 	}
291 	spin_unlock_irqrestore(&par->dirty.lock, flags);
292 }
293 
vmw_deferred_io(struct fb_info * info,struct list_head * pagelist)294 static void vmw_deferred_io(struct fb_info *info,
295 			    struct list_head *pagelist)
296 {
297 	struct vmw_fb_par *par = info->par;
298 	unsigned long start, end, min, max;
299 	unsigned long flags;
300 	struct page *page;
301 	int y1, y2;
302 
303 	min = ULONG_MAX;
304 	max = 0;
305 	list_for_each_entry(page, pagelist, lru) {
306 		start = page->index << PAGE_SHIFT;
307 		end = start + PAGE_SIZE - 1;
308 		min = min(min, start);
309 		max = max(max, end);
310 	}
311 
312 	if (min < max) {
313 		y1 = min / info->fix.line_length;
314 		y2 = (max / info->fix.line_length) + 1;
315 
316 		spin_lock_irqsave(&par->dirty.lock, flags);
317 		par->dirty.x1 = 0;
318 		par->dirty.y1 = y1;
319 		par->dirty.x2 = info->var.xres;
320 		par->dirty.y2 = y2;
321 		spin_unlock_irqrestore(&par->dirty.lock, flags);
322 	}
323 
324 	vmw_fb_dirty_flush(par);
325 };
326 
327 struct fb_deferred_io vmw_defio = {
328 	.delay		= VMW_DIRTY_DELAY,
329 	.deferred_io	= vmw_deferred_io,
330 };
331 
332 /*
333  * Draw code
334  */
335 
vmw_fb_fillrect(struct fb_info * info,const struct fb_fillrect * rect)336 static void vmw_fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
337 {
338 	cfb_fillrect(info, rect);
339 	vmw_fb_dirty_mark(info->par, rect->dx, rect->dy,
340 			  rect->width, rect->height);
341 }
342 
vmw_fb_copyarea(struct fb_info * info,const struct fb_copyarea * region)343 static void vmw_fb_copyarea(struct fb_info *info, const struct fb_copyarea *region)
344 {
345 	cfb_copyarea(info, region);
346 	vmw_fb_dirty_mark(info->par, region->dx, region->dy,
347 			  region->width, region->height);
348 }
349 
vmw_fb_imageblit(struct fb_info * info,const struct fb_image * image)350 static void vmw_fb_imageblit(struct fb_info *info, const struct fb_image *image)
351 {
352 	cfb_imageblit(info, image);
353 	vmw_fb_dirty_mark(info->par, image->dx, image->dy,
354 			  image->width, image->height);
355 }
356 
357 /*
358  * Bring up code
359  */
360 
361 static struct fb_ops vmw_fb_ops = {
362 	.owner = THIS_MODULE,
363 	.fb_check_var = vmw_fb_check_var,
364 	.fb_set_par = vmw_fb_set_par,
365 	.fb_setcolreg = vmw_fb_setcolreg,
366 	.fb_fillrect = vmw_fb_fillrect,
367 	.fb_copyarea = vmw_fb_copyarea,
368 	.fb_imageblit = vmw_fb_imageblit,
369 	.fb_pan_display = vmw_fb_pan_display,
370 	.fb_blank = vmw_fb_blank,
371 };
372 
vmw_fb_create_bo(struct vmw_private * vmw_priv,size_t size,struct vmw_dma_buffer ** out)373 static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
374 			    size_t size, struct vmw_dma_buffer **out)
375 {
376 	struct vmw_dma_buffer *vmw_bo;
377 	struct ttm_placement ne_placement = vmw_vram_ne_placement;
378 	int ret;
379 
380 	ne_placement.lpfn = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
381 
382 	/* interuptable? */
383 	ret = ttm_write_lock(&vmw_priv->fbdev_master.lock, false);
384 	if (unlikely(ret != 0))
385 		return ret;
386 
387 	vmw_bo = kmalloc(sizeof(*vmw_bo), GFP_KERNEL);
388 	if (!vmw_bo)
389 		goto err_unlock;
390 
391 	ret = vmw_dmabuf_init(vmw_priv, vmw_bo, size,
392 			      &ne_placement,
393 			      false,
394 			      &vmw_dmabuf_bo_free);
395 	if (unlikely(ret != 0))
396 		goto err_unlock; /* init frees the buffer on failure */
397 
398 	*out = vmw_bo;
399 
400 	ttm_write_unlock(&vmw_priv->fbdev_master.lock);
401 
402 	return 0;
403 
404 err_unlock:
405 	ttm_write_unlock(&vmw_priv->fbdev_master.lock);
406 	return ret;
407 }
408 
vmw_fb_init(struct vmw_private * vmw_priv)409 int vmw_fb_init(struct vmw_private *vmw_priv)
410 {
411 	struct device *device = &vmw_priv->dev->pdev->dev;
412 	struct vmw_fb_par *par;
413 	struct fb_info *info;
414 	unsigned initial_width, initial_height;
415 	unsigned fb_width, fb_height;
416 	unsigned fb_bpp, fb_depth, fb_offset, fb_pitch, fb_size;
417 	int ret;
418 
419 	fb_bpp = 32;
420 	fb_depth = 24;
421 
422 	/* XXX As shouldn't these be as well. */
423 	fb_width = min(vmw_priv->fb_max_width, (unsigned)2048);
424 	fb_height = min(vmw_priv->fb_max_height, (unsigned)2048);
425 
426 	initial_width = min(vmw_priv->initial_width, fb_width);
427 	initial_height = min(vmw_priv->initial_height, fb_height);
428 
429 	fb_pitch = fb_width * fb_bpp / 8;
430 	fb_size = fb_pitch * fb_height;
431 	fb_offset = vmw_read(vmw_priv, SVGA_REG_FB_OFFSET);
432 
433 	info = framebuffer_alloc(sizeof(*par), device);
434 	if (!info)
435 		return -ENOMEM;
436 
437 	/*
438 	 * Par
439 	 */
440 	vmw_priv->fb_info = info;
441 	par = info->par;
442 	par->vmw_priv = vmw_priv;
443 	par->depth = fb_depth;
444 	par->bpp = fb_bpp;
445 	par->vmalloc = NULL;
446 	par->max_width = fb_width;
447 	par->max_height = fb_height;
448 
449 	/*
450 	 * Create buffers and alloc memory
451 	 */
452 	par->vmalloc = vmalloc(fb_size);
453 	if (unlikely(par->vmalloc == NULL)) {
454 		ret = -ENOMEM;
455 		goto err_free;
456 	}
457 
458 	ret = vmw_fb_create_bo(vmw_priv, fb_size, &par->vmw_bo);
459 	if (unlikely(ret != 0))
460 		goto err_free;
461 
462 	ret = ttm_bo_kmap(&par->vmw_bo->base,
463 			  0,
464 			  par->vmw_bo->base.num_pages,
465 			  &par->map);
466 	if (unlikely(ret != 0))
467 		goto err_unref;
468 	par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite);
469 	par->bo_size = fb_size;
470 
471 	/*
472 	 * Fixed and var
473 	 */
474 	strcpy(info->fix.id, "svgadrmfb");
475 	info->fix.type = FB_TYPE_PACKED_PIXELS;
476 	info->fix.visual = FB_VISUAL_TRUECOLOR;
477 	info->fix.type_aux = 0;
478 	info->fix.xpanstep = 1; /* doing it in hw */
479 	info->fix.ypanstep = 1; /* doing it in hw */
480 	info->fix.ywrapstep = 0;
481 	info->fix.accel = FB_ACCEL_NONE;
482 	info->fix.line_length = fb_pitch;
483 
484 	info->fix.smem_start = 0;
485 	info->fix.smem_len = fb_size;
486 
487 	info->pseudo_palette = par->pseudo_palette;
488 	info->screen_base = par->vmalloc;
489 	info->screen_size = fb_size;
490 
491 	info->flags = FBINFO_DEFAULT;
492 	info->fbops = &vmw_fb_ops;
493 
494 	/* 24 depth per default */
495 	info->var.red.offset = 16;
496 	info->var.green.offset = 8;
497 	info->var.blue.offset = 0;
498 	info->var.red.length = 8;
499 	info->var.green.length = 8;
500 	info->var.blue.length = 8;
501 	info->var.transp.offset = 0;
502 	info->var.transp.length = 0;
503 
504 	info->var.xres_virtual = fb_width;
505 	info->var.yres_virtual = fb_height;
506 	info->var.bits_per_pixel = par->bpp;
507 	info->var.xoffset = 0;
508 	info->var.yoffset = 0;
509 	info->var.activate = FB_ACTIVATE_NOW;
510 	info->var.height = -1;
511 	info->var.width = -1;
512 
513 	info->var.xres = initial_width;
514 	info->var.yres = initial_height;
515 
516 	/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
517 
518 	info->apertures = alloc_apertures(1);
519 	if (!info->apertures) {
520 		ret = -ENOMEM;
521 		goto err_aper;
522 	}
523 	info->apertures->ranges[0].base = vmw_priv->vram_start;
524 	info->apertures->ranges[0].size = vmw_priv->vram_size;
525 
526 	/*
527 	 * Dirty & Deferred IO
528 	 */
529 	par->dirty.x1 = par->dirty.x2 = 0;
530 	par->dirty.y1 = par->dirty.y2 = 0;
531 	par->dirty.active = true;
532 	spin_lock_init(&par->dirty.lock);
533 	info->fbdefio = &vmw_defio;
534 	fb_deferred_io_init(info);
535 
536 	ret = register_framebuffer(info);
537 	if (unlikely(ret != 0))
538 		goto err_defio;
539 
540 	return 0;
541 
542 err_defio:
543 	fb_deferred_io_cleanup(info);
544 err_aper:
545 	ttm_bo_kunmap(&par->map);
546 err_unref:
547 	ttm_bo_unref((struct ttm_buffer_object **)&par->vmw_bo);
548 err_free:
549 	vfree(par->vmalloc);
550 	framebuffer_release(info);
551 	vmw_priv->fb_info = NULL;
552 
553 	return ret;
554 }
555 
vmw_fb_close(struct vmw_private * vmw_priv)556 int vmw_fb_close(struct vmw_private *vmw_priv)
557 {
558 	struct fb_info *info;
559 	struct vmw_fb_par *par;
560 	struct ttm_buffer_object *bo;
561 
562 	if (!vmw_priv->fb_info)
563 		return 0;
564 
565 	info = vmw_priv->fb_info;
566 	par = info->par;
567 	bo = &par->vmw_bo->base;
568 	par->vmw_bo = NULL;
569 
570 	/* ??? order */
571 	fb_deferred_io_cleanup(info);
572 	unregister_framebuffer(info);
573 
574 	ttm_bo_kunmap(&par->map);
575 	ttm_bo_unref(&bo);
576 
577 	vfree(par->vmalloc);
578 	framebuffer_release(info);
579 
580 	return 0;
581 }
582 
vmw_fb_off(struct vmw_private * vmw_priv)583 int vmw_fb_off(struct vmw_private *vmw_priv)
584 {
585 	struct fb_info *info;
586 	struct vmw_fb_par *par;
587 	unsigned long flags;
588 
589 	if (!vmw_priv->fb_info)
590 		return -EINVAL;
591 
592 	info = vmw_priv->fb_info;
593 	par = info->par;
594 
595 	spin_lock_irqsave(&par->dirty.lock, flags);
596 	par->dirty.active = false;
597 	spin_unlock_irqrestore(&par->dirty.lock, flags);
598 
599 	flush_delayed_work_sync(&info->deferred_work);
600 
601 	par->bo_ptr = NULL;
602 	ttm_bo_kunmap(&par->map);
603 
604 	vmw_dmabuf_unpin(vmw_priv, par->vmw_bo, false);
605 
606 	return 0;
607 }
608 
vmw_fb_on(struct vmw_private * vmw_priv)609 int vmw_fb_on(struct vmw_private *vmw_priv)
610 {
611 	struct fb_info *info;
612 	struct vmw_fb_par *par;
613 	unsigned long flags;
614 	bool dummy;
615 	int ret;
616 
617 	if (!vmw_priv->fb_info)
618 		return -EINVAL;
619 
620 	info = vmw_priv->fb_info;
621 	par = info->par;
622 
623 	/* we are already active */
624 	if (par->bo_ptr != NULL)
625 		return 0;
626 
627 	/* Make sure that all overlays are stoped when we take over */
628 	vmw_overlay_stop_all(vmw_priv);
629 
630 	ret = vmw_dmabuf_to_start_of_vram(vmw_priv, par->vmw_bo, true, false);
631 	if (unlikely(ret != 0)) {
632 		DRM_ERROR("could not move buffer to start of VRAM\n");
633 		goto err_no_buffer;
634 	}
635 
636 	ret = ttm_bo_kmap(&par->vmw_bo->base,
637 			  0,
638 			  par->vmw_bo->base.num_pages,
639 			  &par->map);
640 	BUG_ON(ret != 0);
641 	par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &dummy);
642 
643 	spin_lock_irqsave(&par->dirty.lock, flags);
644 	par->dirty.active = true;
645 	spin_unlock_irqrestore(&par->dirty.lock, flags);
646 
647 err_no_buffer:
648 	vmw_fb_set_par(info);
649 
650 	vmw_fb_dirty_mark(par, 0, 0, info->var.xres, info->var.yres);
651 
652 	/* If there already was stuff dirty we wont
653 	 * schedule a new work, so lets do it now */
654 	schedule_delayed_work(&info->deferred_work, 0);
655 
656 	return 0;
657 }
658