1 /**************************************************************************
2 *
3 * Copyright © 2007 David Airlie
4 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 **************************************************************************/
28
29 #include <linux/pci.h>
30
31 #include <drm/drm_fourcc.h>
32 #include <drm/ttm/ttm_placement.h>
33
34 #include "vmwgfx_drv.h"
35 #include "vmwgfx_kms.h"
36
37 #define VMW_DIRTY_DELAY (HZ / 30)
38
39 struct vmw_fb_par {
40 struct vmw_private *vmw_priv;
41
42 void *vmalloc;
43
44 struct mutex bo_mutex;
45 struct vmw_buffer_object *vmw_bo;
46 unsigned bo_size;
47 struct drm_framebuffer *set_fb;
48 struct drm_display_mode *set_mode;
49 u32 fb_x;
50 u32 fb_y;
51 bool bo_iowrite;
52
53 u32 pseudo_palette[17];
54
55 unsigned max_width;
56 unsigned max_height;
57
58 struct {
59 spinlock_t lock;
60 bool active;
61 unsigned x1;
62 unsigned y1;
63 unsigned x2;
64 unsigned y2;
65 } dirty;
66
67 struct drm_crtc *crtc;
68 struct drm_connector *con;
69 struct delayed_work local_work;
70 };
71
vmw_fb_setcolreg(unsigned regno,unsigned red,unsigned green,unsigned blue,unsigned transp,struct fb_info * info)72 static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
73 unsigned blue, unsigned transp,
74 struct fb_info *info)
75 {
76 struct vmw_fb_par *par = info->par;
77 u32 *pal = par->pseudo_palette;
78
79 if (regno > 15) {
80 DRM_ERROR("Bad regno %u.\n", regno);
81 return 1;
82 }
83
84 switch (par->set_fb->format->depth) {
85 case 24:
86 case 32:
87 pal[regno] = ((red & 0xff00) << 8) |
88 (green & 0xff00) |
89 ((blue & 0xff00) >> 8);
90 break;
91 default:
92 DRM_ERROR("Bad depth %u, bpp %u.\n",
93 par->set_fb->format->depth,
94 par->set_fb->format->cpp[0] * 8);
95 return 1;
96 }
97
98 return 0;
99 }
100
vmw_fb_check_var(struct fb_var_screeninfo * var,struct fb_info * info)101 static int vmw_fb_check_var(struct fb_var_screeninfo *var,
102 struct fb_info *info)
103 {
104 int depth = var->bits_per_pixel;
105 struct vmw_fb_par *par = info->par;
106 struct vmw_private *vmw_priv = par->vmw_priv;
107
108 switch (var->bits_per_pixel) {
109 case 32:
110 depth = (var->transp.length > 0) ? 32 : 24;
111 break;
112 default:
113 DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel);
114 return -EINVAL;
115 }
116
117 switch (depth) {
118 case 24:
119 var->red.offset = 16;
120 var->green.offset = 8;
121 var->blue.offset = 0;
122 var->red.length = 8;
123 var->green.length = 8;
124 var->blue.length = 8;
125 var->transp.length = 0;
126 var->transp.offset = 0;
127 break;
128 case 32:
129 var->red.offset = 16;
130 var->green.offset = 8;
131 var->blue.offset = 0;
132 var->red.length = 8;
133 var->green.length = 8;
134 var->blue.length = 8;
135 var->transp.length = 8;
136 var->transp.offset = 24;
137 break;
138 default:
139 DRM_ERROR("Bad depth %u.\n", depth);
140 return -EINVAL;
141 }
142
143 if ((var->xoffset + var->xres) > par->max_width ||
144 (var->yoffset + var->yres) > par->max_height) {
145 DRM_ERROR("Requested geom can not fit in framebuffer\n");
146 return -EINVAL;
147 }
148
149 if (!vmw_kms_validate_mode_vram(vmw_priv,
150 var->xres * var->bits_per_pixel/8,
151 var->yoffset + var->yres)) {
152 DRM_ERROR("Requested geom can not fit in framebuffer\n");
153 return -EINVAL;
154 }
155
156 return 0;
157 }
158
vmw_fb_blank(int blank,struct fb_info * info)159 static int vmw_fb_blank(int blank, struct fb_info *info)
160 {
161 return 0;
162 }
163
164 /**
165 * vmw_fb_dirty_flush - flush dirty regions to the kms framebuffer
166 *
167 * @work: The struct work_struct associated with this task.
168 *
169 * This function flushes the dirty regions of the vmalloc framebuffer to the
170 * kms framebuffer, and if the kms framebuffer is visible, also updated the
171 * corresponding displays. Note that this function runs even if the kms
172 * framebuffer is not bound to a crtc and thus not visible, but it's turned
173 * off during hibernation using the par->dirty.active bool.
174 */
vmw_fb_dirty_flush(struct work_struct * work)175 static void vmw_fb_dirty_flush(struct work_struct *work)
176 {
177 struct vmw_fb_par *par = container_of(work, struct vmw_fb_par,
178 local_work.work);
179 struct vmw_private *vmw_priv = par->vmw_priv;
180 struct fb_info *info = vmw_priv->fb_info;
181 unsigned long irq_flags;
182 s32 dst_x1, dst_x2, dst_y1, dst_y2, w = 0, h = 0;
183 u32 cpp, max_x, max_y;
184 struct drm_clip_rect clip;
185 struct drm_framebuffer *cur_fb;
186 u8 *src_ptr, *dst_ptr;
187 struct vmw_buffer_object *vbo = par->vmw_bo;
188 void *virtual;
189
190 if (!READ_ONCE(par->dirty.active))
191 return;
192
193 mutex_lock(&par->bo_mutex);
194 cur_fb = par->set_fb;
195 if (!cur_fb)
196 goto out_unlock;
197
198 (void) ttm_bo_reserve(&vbo->base, false, false, NULL);
199 virtual = vmw_bo_map_and_cache(vbo);
200 if (!virtual)
201 goto out_unreserve;
202
203 spin_lock_irqsave(&par->dirty.lock, irq_flags);
204 if (!par->dirty.active) {
205 spin_unlock_irqrestore(&par->dirty.lock, irq_flags);
206 goto out_unreserve;
207 }
208
209 /*
210 * Handle panning when copying from vmalloc to framebuffer.
211 * Clip dirty area to framebuffer.
212 */
213 cpp = cur_fb->format->cpp[0];
214 max_x = par->fb_x + cur_fb->width;
215 max_y = par->fb_y + cur_fb->height;
216
217 dst_x1 = par->dirty.x1 - par->fb_x;
218 dst_y1 = par->dirty.y1 - par->fb_y;
219 dst_x1 = max_t(s32, dst_x1, 0);
220 dst_y1 = max_t(s32, dst_y1, 0);
221
222 dst_x2 = par->dirty.x2 - par->fb_x;
223 dst_y2 = par->dirty.y2 - par->fb_y;
224 dst_x2 = min_t(s32, dst_x2, max_x);
225 dst_y2 = min_t(s32, dst_y2, max_y);
226 w = dst_x2 - dst_x1;
227 h = dst_y2 - dst_y1;
228 w = max_t(s32, 0, w);
229 h = max_t(s32, 0, h);
230
231 par->dirty.x1 = par->dirty.x2 = 0;
232 par->dirty.y1 = par->dirty.y2 = 0;
233 spin_unlock_irqrestore(&par->dirty.lock, irq_flags);
234
235 if (w && h) {
236 dst_ptr = (u8 *)virtual +
237 (dst_y1 * par->set_fb->pitches[0] + dst_x1 * cpp);
238 src_ptr = (u8 *)par->vmalloc +
239 ((dst_y1 + par->fb_y) * info->fix.line_length +
240 (dst_x1 + par->fb_x) * cpp);
241
242 while (h-- > 0) {
243 memcpy(dst_ptr, src_ptr, w*cpp);
244 dst_ptr += par->set_fb->pitches[0];
245 src_ptr += info->fix.line_length;
246 }
247
248 clip.x1 = dst_x1;
249 clip.x2 = dst_x2;
250 clip.y1 = dst_y1;
251 clip.y2 = dst_y2;
252 }
253
254 out_unreserve:
255 ttm_bo_unreserve(&vbo->base);
256 if (w && h) {
257 WARN_ON_ONCE(par->set_fb->funcs->dirty(cur_fb, NULL, 0, 0,
258 &clip, 1));
259 vmw_cmd_flush(vmw_priv, false);
260 }
261 out_unlock:
262 mutex_unlock(&par->bo_mutex);
263 }
264
vmw_fb_dirty_mark(struct vmw_fb_par * par,unsigned x1,unsigned y1,unsigned width,unsigned height)265 static void vmw_fb_dirty_mark(struct vmw_fb_par *par,
266 unsigned x1, unsigned y1,
267 unsigned width, unsigned height)
268 {
269 unsigned long flags;
270 unsigned x2 = x1 + width;
271 unsigned y2 = y1 + height;
272
273 spin_lock_irqsave(&par->dirty.lock, flags);
274 if (par->dirty.x1 == par->dirty.x2) {
275 par->dirty.x1 = x1;
276 par->dirty.y1 = y1;
277 par->dirty.x2 = x2;
278 par->dirty.y2 = y2;
279 /* if we are active start the dirty work
280 * we share the work with the defio system */
281 if (par->dirty.active)
282 schedule_delayed_work(&par->local_work,
283 VMW_DIRTY_DELAY);
284 } else {
285 if (x1 < par->dirty.x1)
286 par->dirty.x1 = x1;
287 if (y1 < par->dirty.y1)
288 par->dirty.y1 = y1;
289 if (x2 > par->dirty.x2)
290 par->dirty.x2 = x2;
291 if (y2 > par->dirty.y2)
292 par->dirty.y2 = y2;
293 }
294 spin_unlock_irqrestore(&par->dirty.lock, flags);
295 }
296
vmw_fb_pan_display(struct fb_var_screeninfo * var,struct fb_info * info)297 static int vmw_fb_pan_display(struct fb_var_screeninfo *var,
298 struct fb_info *info)
299 {
300 struct vmw_fb_par *par = info->par;
301
302 if ((var->xoffset + var->xres) > var->xres_virtual ||
303 (var->yoffset + var->yres) > var->yres_virtual) {
304 DRM_ERROR("Requested panning can not fit in framebuffer\n");
305 return -EINVAL;
306 }
307
308 mutex_lock(&par->bo_mutex);
309 par->fb_x = var->xoffset;
310 par->fb_y = var->yoffset;
311 if (par->set_fb)
312 vmw_fb_dirty_mark(par, par->fb_x, par->fb_y, par->set_fb->width,
313 par->set_fb->height);
314 mutex_unlock(&par->bo_mutex);
315
316 return 0;
317 }
318
vmw_deferred_io(struct fb_info * info,struct list_head * pagereflist)319 static void vmw_deferred_io(struct fb_info *info, struct list_head *pagereflist)
320 {
321 struct vmw_fb_par *par = info->par;
322 unsigned long start, end, min, max;
323 unsigned long flags;
324 struct fb_deferred_io_pageref *pageref;
325 int y1, y2;
326
327 min = ULONG_MAX;
328 max = 0;
329 list_for_each_entry(pageref, pagereflist, list) {
330 start = pageref->offset;
331 end = start + PAGE_SIZE - 1;
332 min = min(min, start);
333 max = max(max, end);
334 }
335
336 if (min < max) {
337 y1 = min / info->fix.line_length;
338 y2 = (max / info->fix.line_length) + 1;
339
340 spin_lock_irqsave(&par->dirty.lock, flags);
341 par->dirty.x1 = 0;
342 par->dirty.y1 = y1;
343 par->dirty.x2 = info->var.xres;
344 par->dirty.y2 = y2;
345 spin_unlock_irqrestore(&par->dirty.lock, flags);
346
347 /*
348 * Since we've already waited on this work once, try to
349 * execute asap.
350 */
351 cancel_delayed_work(&par->local_work);
352 schedule_delayed_work(&par->local_work, 0);
353 }
354 };
355
356 static struct fb_deferred_io vmw_defio = {
357 .delay = VMW_DIRTY_DELAY,
358 .deferred_io = vmw_deferred_io,
359 };
360
361 /*
362 * Draw code
363 */
364
vmw_fb_fillrect(struct fb_info * info,const struct fb_fillrect * rect)365 static void vmw_fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
366 {
367 cfb_fillrect(info, rect);
368 vmw_fb_dirty_mark(info->par, rect->dx, rect->dy,
369 rect->width, rect->height);
370 }
371
vmw_fb_copyarea(struct fb_info * info,const struct fb_copyarea * region)372 static void vmw_fb_copyarea(struct fb_info *info, const struct fb_copyarea *region)
373 {
374 cfb_copyarea(info, region);
375 vmw_fb_dirty_mark(info->par, region->dx, region->dy,
376 region->width, region->height);
377 }
378
vmw_fb_imageblit(struct fb_info * info,const struct fb_image * image)379 static void vmw_fb_imageblit(struct fb_info *info, const struct fb_image *image)
380 {
381 cfb_imageblit(info, image);
382 vmw_fb_dirty_mark(info->par, image->dx, image->dy,
383 image->width, image->height);
384 }
385
386 /*
387 * Bring up code
388 */
389
vmw_fb_create_bo(struct vmw_private * vmw_priv,size_t size,struct vmw_buffer_object ** out)390 static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
391 size_t size, struct vmw_buffer_object **out)
392 {
393 struct vmw_buffer_object *vmw_bo;
394 int ret;
395
396 ret = vmw_bo_create(vmw_priv, size,
397 &vmw_sys_placement,
398 false, false,
399 &vmw_bo_bo_free, &vmw_bo);
400 if (unlikely(ret != 0))
401 return ret;
402
403 *out = vmw_bo;
404
405 return ret;
406 }
407
vmw_fb_compute_depth(struct fb_var_screeninfo * var,int * depth)408 static int vmw_fb_compute_depth(struct fb_var_screeninfo *var,
409 int *depth)
410 {
411 switch (var->bits_per_pixel) {
412 case 32:
413 *depth = (var->transp.length > 0) ? 32 : 24;
414 break;
415 default:
416 DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel);
417 return -EINVAL;
418 }
419
420 return 0;
421 }
422
vmwgfx_set_config_internal(struct drm_mode_set * set)423 static int vmwgfx_set_config_internal(struct drm_mode_set *set)
424 {
425 struct drm_crtc *crtc = set->crtc;
426 struct drm_modeset_acquire_ctx ctx;
427 int ret;
428
429 drm_modeset_acquire_init(&ctx, 0);
430
431 restart:
432 ret = crtc->funcs->set_config(set, &ctx);
433
434 if (ret == -EDEADLK) {
435 drm_modeset_backoff(&ctx);
436 goto restart;
437 }
438
439 drm_modeset_drop_locks(&ctx);
440 drm_modeset_acquire_fini(&ctx);
441
442 return ret;
443 }
444
vmw_fb_kms_detach(struct vmw_fb_par * par,bool detach_bo,bool unref_bo)445 static int vmw_fb_kms_detach(struct vmw_fb_par *par,
446 bool detach_bo,
447 bool unref_bo)
448 {
449 struct drm_framebuffer *cur_fb = par->set_fb;
450 int ret;
451
452 /* Detach the KMS framebuffer from crtcs */
453 if (par->set_mode) {
454 struct drm_mode_set set;
455
456 set.crtc = par->crtc;
457 set.x = 0;
458 set.y = 0;
459 set.mode = NULL;
460 set.fb = NULL;
461 set.num_connectors = 0;
462 set.connectors = &par->con;
463 ret = vmwgfx_set_config_internal(&set);
464 if (ret) {
465 DRM_ERROR("Could not unset a mode.\n");
466 return ret;
467 }
468 drm_mode_destroy(&par->vmw_priv->drm, par->set_mode);
469 par->set_mode = NULL;
470 }
471
472 if (cur_fb) {
473 drm_framebuffer_put(cur_fb);
474 par->set_fb = NULL;
475 }
476
477 if (par->vmw_bo && detach_bo && unref_bo)
478 vmw_bo_unreference(&par->vmw_bo);
479
480 return 0;
481 }
482
vmw_fb_kms_framebuffer(struct fb_info * info)483 static int vmw_fb_kms_framebuffer(struct fb_info *info)
484 {
485 struct drm_mode_fb_cmd2 mode_cmd = {0};
486 struct vmw_fb_par *par = info->par;
487 struct fb_var_screeninfo *var = &info->var;
488 struct drm_framebuffer *cur_fb;
489 struct vmw_framebuffer *vfb;
490 int ret = 0, depth;
491 size_t new_bo_size;
492
493 ret = vmw_fb_compute_depth(var, &depth);
494 if (ret)
495 return ret;
496
497 mode_cmd.width = var->xres;
498 mode_cmd.height = var->yres;
499 mode_cmd.pitches[0] = ((var->bits_per_pixel + 7) / 8) * mode_cmd.width;
500 mode_cmd.pixel_format =
501 drm_mode_legacy_fb_format(var->bits_per_pixel, depth);
502
503 cur_fb = par->set_fb;
504 if (cur_fb && cur_fb->width == mode_cmd.width &&
505 cur_fb->height == mode_cmd.height &&
506 cur_fb->format->format == mode_cmd.pixel_format &&
507 cur_fb->pitches[0] == mode_cmd.pitches[0])
508 return 0;
509
510 /* Need new buffer object ? */
511 new_bo_size = (size_t) mode_cmd.pitches[0] * (size_t) mode_cmd.height;
512 ret = vmw_fb_kms_detach(par,
513 par->bo_size < new_bo_size ||
514 par->bo_size > 2*new_bo_size,
515 true);
516 if (ret)
517 return ret;
518
519 if (!par->vmw_bo) {
520 ret = vmw_fb_create_bo(par->vmw_priv, new_bo_size,
521 &par->vmw_bo);
522 if (ret) {
523 DRM_ERROR("Failed creating a buffer object for "
524 "fbdev.\n");
525 return ret;
526 }
527 par->bo_size = new_bo_size;
528 }
529
530 vfb = vmw_kms_new_framebuffer(par->vmw_priv, par->vmw_bo, NULL,
531 true, &mode_cmd);
532 if (IS_ERR(vfb))
533 return PTR_ERR(vfb);
534
535 par->set_fb = &vfb->base;
536
537 return 0;
538 }
539
vmw_fb_set_par(struct fb_info * info)540 static int vmw_fb_set_par(struct fb_info *info)
541 {
542 struct vmw_fb_par *par = info->par;
543 struct vmw_private *vmw_priv = par->vmw_priv;
544 struct drm_mode_set set;
545 struct fb_var_screeninfo *var = &info->var;
546 struct drm_display_mode new_mode = { DRM_MODE("fb_mode",
547 DRM_MODE_TYPE_DRIVER,
548 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
549 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
550 };
551 struct drm_display_mode *mode;
552 int ret;
553
554 mode = drm_mode_duplicate(&vmw_priv->drm, &new_mode);
555 if (!mode) {
556 DRM_ERROR("Could not create new fb mode.\n");
557 return -ENOMEM;
558 }
559
560 mode->hdisplay = var->xres;
561 mode->vdisplay = var->yres;
562 vmw_guess_mode_timing(mode);
563
564 if (!vmw_kms_validate_mode_vram(vmw_priv,
565 mode->hdisplay *
566 DIV_ROUND_UP(var->bits_per_pixel, 8),
567 mode->vdisplay)) {
568 drm_mode_destroy(&vmw_priv->drm, mode);
569 return -EINVAL;
570 }
571
572 mutex_lock(&par->bo_mutex);
573 ret = vmw_fb_kms_framebuffer(info);
574 if (ret)
575 goto out_unlock;
576
577 par->fb_x = var->xoffset;
578 par->fb_y = var->yoffset;
579
580 set.crtc = par->crtc;
581 set.x = 0;
582 set.y = 0;
583 set.mode = mode;
584 set.fb = par->set_fb;
585 set.num_connectors = 1;
586 set.connectors = &par->con;
587
588 ret = vmwgfx_set_config_internal(&set);
589 if (ret)
590 goto out_unlock;
591
592 vmw_fb_dirty_mark(par, par->fb_x, par->fb_y,
593 par->set_fb->width, par->set_fb->height);
594
595 /* If there already was stuff dirty we wont
596 * schedule a new work, so lets do it now */
597
598 schedule_delayed_work(&par->local_work, 0);
599
600 out_unlock:
601 if (par->set_mode)
602 drm_mode_destroy(&vmw_priv->drm, par->set_mode);
603 par->set_mode = mode;
604
605 mutex_unlock(&par->bo_mutex);
606
607 return ret;
608 }
609
610
611 static const struct fb_ops vmw_fb_ops = {
612 .owner = THIS_MODULE,
613 .fb_check_var = vmw_fb_check_var,
614 .fb_set_par = vmw_fb_set_par,
615 .fb_setcolreg = vmw_fb_setcolreg,
616 .fb_fillrect = vmw_fb_fillrect,
617 .fb_copyarea = vmw_fb_copyarea,
618 .fb_imageblit = vmw_fb_imageblit,
619 .fb_pan_display = vmw_fb_pan_display,
620 .fb_blank = vmw_fb_blank,
621 .fb_mmap = fb_deferred_io_mmap,
622 };
623
vmw_fb_init(struct vmw_private * vmw_priv)624 int vmw_fb_init(struct vmw_private *vmw_priv)
625 {
626 struct device *device = vmw_priv->drm.dev;
627 struct vmw_fb_par *par;
628 struct fb_info *info;
629 unsigned fb_width, fb_height;
630 unsigned int fb_bpp, fb_pitch, fb_size;
631 struct drm_display_mode *init_mode;
632 int ret;
633
634 fb_bpp = 32;
635
636 /* XXX As shouldn't these be as well. */
637 fb_width = min(vmw_priv->fb_max_width, (unsigned)2048);
638 fb_height = min(vmw_priv->fb_max_height, (unsigned)2048);
639
640 fb_pitch = fb_width * fb_bpp / 8;
641 fb_size = fb_pitch * fb_height;
642
643 info = framebuffer_alloc(sizeof(*par), device);
644 if (!info)
645 return -ENOMEM;
646
647 /*
648 * Par
649 */
650 vmw_priv->fb_info = info;
651 par = info->par;
652 memset(par, 0, sizeof(*par));
653 INIT_DELAYED_WORK(&par->local_work, &vmw_fb_dirty_flush);
654 par->vmw_priv = vmw_priv;
655 par->vmalloc = NULL;
656 par->max_width = fb_width;
657 par->max_height = fb_height;
658
659 ret = vmw_kms_fbdev_init_data(vmw_priv, 0, par->max_width,
660 par->max_height, &par->con,
661 &par->crtc, &init_mode);
662 if (ret)
663 goto err_kms;
664
665 info->var.xres = init_mode->hdisplay;
666 info->var.yres = init_mode->vdisplay;
667
668 /*
669 * Create buffers and alloc memory
670 */
671 par->vmalloc = vzalloc(fb_size);
672 if (unlikely(par->vmalloc == NULL)) {
673 ret = -ENOMEM;
674 goto err_free;
675 }
676
677 /*
678 * Fixed and var
679 */
680 strcpy(info->fix.id, "svgadrmfb");
681 info->fix.type = FB_TYPE_PACKED_PIXELS;
682 info->fix.visual = FB_VISUAL_TRUECOLOR;
683 info->fix.type_aux = 0;
684 info->fix.xpanstep = 1; /* doing it in hw */
685 info->fix.ypanstep = 1; /* doing it in hw */
686 info->fix.ywrapstep = 0;
687 info->fix.accel = FB_ACCEL_NONE;
688 info->fix.line_length = fb_pitch;
689
690 info->fix.smem_start = 0;
691 info->fix.smem_len = fb_size;
692
693 info->pseudo_palette = par->pseudo_palette;
694 info->screen_base = (char __iomem *)par->vmalloc;
695 info->screen_size = fb_size;
696
697 info->fbops = &vmw_fb_ops;
698
699 /* 24 depth per default */
700 info->var.red.offset = 16;
701 info->var.green.offset = 8;
702 info->var.blue.offset = 0;
703 info->var.red.length = 8;
704 info->var.green.length = 8;
705 info->var.blue.length = 8;
706 info->var.transp.offset = 0;
707 info->var.transp.length = 0;
708
709 info->var.xres_virtual = fb_width;
710 info->var.yres_virtual = fb_height;
711 info->var.bits_per_pixel = fb_bpp;
712 info->var.xoffset = 0;
713 info->var.yoffset = 0;
714 info->var.activate = FB_ACTIVATE_NOW;
715 info->var.height = -1;
716 info->var.width = -1;
717
718 /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
719 info->apertures = alloc_apertures(1);
720 if (!info->apertures) {
721 ret = -ENOMEM;
722 goto err_aper;
723 }
724 info->apertures->ranges[0].base = vmw_priv->vram_start;
725 info->apertures->ranges[0].size = vmw_priv->vram_size;
726
727 /*
728 * Dirty & Deferred IO
729 */
730 par->dirty.x1 = par->dirty.x2 = 0;
731 par->dirty.y1 = par->dirty.y2 = 0;
732 par->dirty.active = true;
733 spin_lock_init(&par->dirty.lock);
734 mutex_init(&par->bo_mutex);
735 info->fbdefio = &vmw_defio;
736 fb_deferred_io_init(info);
737
738 ret = register_framebuffer(info);
739 if (unlikely(ret != 0))
740 goto err_defio;
741
742 vmw_fb_set_par(info);
743
744 return 0;
745
746 err_defio:
747 fb_deferred_io_cleanup(info);
748 err_aper:
749 err_free:
750 vfree(par->vmalloc);
751 err_kms:
752 framebuffer_release(info);
753 vmw_priv->fb_info = NULL;
754
755 return ret;
756 }
757
vmw_fb_close(struct vmw_private * vmw_priv)758 int vmw_fb_close(struct vmw_private *vmw_priv)
759 {
760 struct fb_info *info;
761 struct vmw_fb_par *par;
762
763 if (!vmw_priv->fb_info)
764 return 0;
765
766 info = vmw_priv->fb_info;
767 par = info->par;
768
769 /* ??? order */
770 fb_deferred_io_cleanup(info);
771 cancel_delayed_work_sync(&par->local_work);
772 unregister_framebuffer(info);
773
774 mutex_lock(&par->bo_mutex);
775 (void) vmw_fb_kms_detach(par, true, true);
776 mutex_unlock(&par->bo_mutex);
777
778 vfree(par->vmalloc);
779 framebuffer_release(info);
780
781 return 0;
782 }
783
vmw_fb_off(struct vmw_private * vmw_priv)784 int vmw_fb_off(struct vmw_private *vmw_priv)
785 {
786 struct fb_info *info;
787 struct vmw_fb_par *par;
788 unsigned long flags;
789
790 if (!vmw_priv->fb_info)
791 return -EINVAL;
792
793 info = vmw_priv->fb_info;
794 par = info->par;
795
796 spin_lock_irqsave(&par->dirty.lock, flags);
797 par->dirty.active = false;
798 spin_unlock_irqrestore(&par->dirty.lock, flags);
799
800 flush_delayed_work(&info->deferred_work);
801 flush_delayed_work(&par->local_work);
802
803 return 0;
804 }
805
vmw_fb_on(struct vmw_private * vmw_priv)806 int vmw_fb_on(struct vmw_private *vmw_priv)
807 {
808 struct fb_info *info;
809 struct vmw_fb_par *par;
810 unsigned long flags;
811
812 if (!vmw_priv->fb_info)
813 return -EINVAL;
814
815 info = vmw_priv->fb_info;
816 par = info->par;
817
818 spin_lock_irqsave(&par->dirty.lock, flags);
819 par->dirty.active = true;
820 spin_unlock_irqrestore(&par->dirty.lock, flags);
821
822 /*
823 * Need to reschedule a dirty update, because otherwise that's
824 * only done in dirty_mark() if the previous coalesced
825 * dirty region was empty.
826 */
827 schedule_delayed_work(&par->local_work, 0);
828
829 return 0;
830 }
831