1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3 *
4 * Copyright 2009-2022 VMware, Inc., Palo Alto, CA., USA
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include <drm/drm_atomic.h>
29 #include <drm/drm_atomic_helper.h>
30 #include <drm/drm_damage_helper.h>
31 #include <drm/drm_fourcc.h>
32 #include <drm/drm_plane_helper.h>
33 #include <drm/drm_rect.h>
34 #include <drm/drm_sysfs.h>
35 #include <drm/drm_vblank.h>
36
37 #include "vmwgfx_kms.h"
38
vmw_du_cleanup(struct vmw_display_unit * du)39 void vmw_du_cleanup(struct vmw_display_unit *du)
40 {
41 struct vmw_private *dev_priv = vmw_priv(du->primary.dev);
42 drm_plane_cleanup(&du->primary);
43 if (vmw_cmd_supported(dev_priv))
44 drm_plane_cleanup(&du->cursor.base);
45
46 drm_connector_unregister(&du->connector);
47 drm_crtc_cleanup(&du->crtc);
48 drm_encoder_cleanup(&du->encoder);
49 drm_connector_cleanup(&du->connector);
50 }
51
52 /*
53 * Display Unit Cursor functions
54 */
55
56 static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
57 struct ttm_buffer_object *bo,
58 struct ttm_bo_kmap_obj *map,
59 u32 *image, u32 width, u32 height,
60 u32 hotspotX, u32 hotspotY);
61
62 struct vmw_svga_fifo_cmd_define_cursor {
63 u32 cmd;
64 SVGAFifoCmdDefineAlphaCursor cursor;
65 };
66
vmw_cursor_update_image(struct vmw_private * dev_priv,struct ttm_buffer_object * cm_bo,struct ttm_bo_kmap_obj * cm_map,u32 * image,u32 width,u32 height,u32 hotspotX,u32 hotspotY)67 static void vmw_cursor_update_image(struct vmw_private *dev_priv,
68 struct ttm_buffer_object *cm_bo,
69 struct ttm_bo_kmap_obj *cm_map,
70 u32 *image, u32 width, u32 height,
71 u32 hotspotX, u32 hotspotY)
72 {
73 struct vmw_svga_fifo_cmd_define_cursor *cmd;
74 const u32 image_size = width * height * sizeof(*image);
75 const u32 cmd_size = sizeof(*cmd) + image_size;
76
77 if (cm_bo != NULL) {
78 vmw_cursor_update_mob(dev_priv, cm_bo, cm_map, image,
79 width, height,
80 hotspotX, hotspotY);
81 return;
82 }
83
84 /* Try to reserve fifocmd space and swallow any failures;
85 such reservations cannot be left unconsumed for long
86 under the risk of clogging other fifocmd users, so
87 we treat reservations separtely from the way we treat
88 other fallible KMS-atomic resources at prepare_fb */
89 cmd = VMW_CMD_RESERVE(dev_priv, cmd_size);
90
91 if (unlikely(cmd == NULL))
92 return;
93
94 memset(cmd, 0, sizeof(*cmd));
95
96 memcpy(&cmd[1], image, image_size);
97
98 cmd->cmd = SVGA_CMD_DEFINE_ALPHA_CURSOR;
99 cmd->cursor.id = 0;
100 cmd->cursor.width = width;
101 cmd->cursor.height = height;
102 cmd->cursor.hotspotX = hotspotX;
103 cmd->cursor.hotspotY = hotspotY;
104
105 vmw_cmd_commit_flush(dev_priv, cmd_size);
106 }
107
108 /**
109 * vmw_cursor_update_mob - Update cursor vis CursorMob mechanism
110 *
111 * @dev_priv: device to work with
112 * @bo: BO for the MOB
113 * @map: kmap obj for the BO
114 * @image: cursor source data to fill the MOB with
115 * @width: source data width
116 * @height: source data height
117 * @hotspotX: cursor hotspot x
118 * @hotspotY: cursor hotspot Y
119 */
vmw_cursor_update_mob(struct vmw_private * dev_priv,struct ttm_buffer_object * bo,struct ttm_bo_kmap_obj * map,u32 * image,u32 width,u32 height,u32 hotspotX,u32 hotspotY)120 static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
121 struct ttm_buffer_object *bo,
122 struct ttm_bo_kmap_obj *map,
123 u32 *image, u32 width, u32 height,
124 u32 hotspotX, u32 hotspotY)
125 {
126 SVGAGBCursorHeader *header;
127 SVGAGBAlphaCursorHeader *alpha_header;
128 const u32 image_size = width * height * sizeof(*image);
129 bool dummy;
130
131 BUG_ON(!image);
132
133 header = (SVGAGBCursorHeader *)ttm_kmap_obj_virtual(map, &dummy);
134 alpha_header = &header->header.alphaHeader;
135
136 header->type = SVGA_ALPHA_CURSOR;
137 header->sizeInBytes = image_size;
138
139 alpha_header->hotspotX = hotspotX;
140 alpha_header->hotspotY = hotspotY;
141 alpha_header->width = width;
142 alpha_header->height = height;
143
144 memcpy(header + 1, image, image_size);
145
146 vmw_write(dev_priv, SVGA_REG_CURSOR_MOBID, bo->resource->start);
147 }
148
vmw_du_destroy_cursor_mob_array(struct vmw_cursor_plane * vcp)149 void vmw_du_destroy_cursor_mob_array(struct vmw_cursor_plane *vcp)
150 {
151 size_t i;
152
153 for (i = 0; i < ARRAY_SIZE(vcp->cursor_mob); i++) {
154 if (vcp->cursor_mob[i] != NULL) {
155 ttm_bo_unpin(vcp->cursor_mob[i]);
156 ttm_bo_put(vcp->cursor_mob[i]);
157 kfree(vcp->cursor_mob[i]);
158 vcp->cursor_mob[i] = NULL;
159 }
160 }
161 }
162
163 #define CURSOR_MOB_SIZE(dimension) \
164 ((dimension) * (dimension) * sizeof(u32) + sizeof(SVGAGBCursorHeader))
165
vmw_du_create_cursor_mob_array(struct vmw_cursor_plane * cursor)166 int vmw_du_create_cursor_mob_array(struct vmw_cursor_plane *cursor)
167 {
168 struct vmw_private *dev_priv = cursor->base.dev->dev_private;
169 uint32_t cursor_max_dim, mob_max_size;
170 int ret = 0;
171 size_t i;
172
173 if (!dev_priv->has_mob || (dev_priv->capabilities2 & SVGA_CAP2_CURSOR_MOB) == 0)
174 return -ENOSYS;
175
176 mob_max_size = vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
177 cursor_max_dim = vmw_read(dev_priv, SVGA_REG_CURSOR_MAX_DIMENSION);
178
179 if (CURSOR_MOB_SIZE(cursor_max_dim) > mob_max_size)
180 cursor_max_dim = 64; /* Mandatorily-supported cursor dimension */
181
182 for (i = 0; i < ARRAY_SIZE(cursor->cursor_mob); i++) {
183 struct ttm_buffer_object **const bo = &cursor->cursor_mob[i];
184
185 ret = vmw_bo_create_kernel(dev_priv,
186 CURSOR_MOB_SIZE(cursor_max_dim),
187 &vmw_mob_placement, bo);
188
189 if (ret != 0)
190 goto teardown;
191
192 if ((*bo)->resource->mem_type != VMW_PL_MOB) {
193 DRM_ERROR("Obtained buffer object is not a MOB.\n");
194 ret = -ENOSYS;
195 goto teardown;
196 }
197
198 /* Fence the mob creation so we are guarateed to have the mob */
199 ret = ttm_bo_reserve(*bo, false, false, NULL);
200
201 if (ret != 0)
202 goto teardown;
203
204 vmw_bo_fence_single(*bo, NULL);
205
206 ttm_bo_unreserve(*bo);
207
208 drm_info(&dev_priv->drm, "Using CursorMob mobid %lu, max dimension %u\n",
209 (*bo)->resource->start, cursor_max_dim);
210 }
211
212 return 0;
213
214 teardown:
215 vmw_du_destroy_cursor_mob_array(cursor);
216
217 return ret;
218 }
219
220 #undef CURSOR_MOB_SIZE
221
vmw_cursor_update_bo(struct vmw_private * dev_priv,struct ttm_buffer_object * cm_bo,struct ttm_bo_kmap_obj * cm_map,struct vmw_buffer_object * bo,u32 width,u32 height,u32 hotspotX,u32 hotspotY)222 static void vmw_cursor_update_bo(struct vmw_private *dev_priv,
223 struct ttm_buffer_object *cm_bo,
224 struct ttm_bo_kmap_obj *cm_map,
225 struct vmw_buffer_object *bo,
226 u32 width, u32 height,
227 u32 hotspotX, u32 hotspotY)
228 {
229 void *virtual;
230 bool dummy;
231
232 virtual = ttm_kmap_obj_virtual(&bo->map, &dummy);
233 if (virtual) {
234 vmw_cursor_update_image(dev_priv, cm_bo, cm_map, virtual,
235 width, height,
236 hotspotX, hotspotY);
237 atomic_dec(&bo->base_mapped_count);
238 }
239 }
240
241
vmw_cursor_update_position(struct vmw_private * dev_priv,bool show,int x,int y)242 static void vmw_cursor_update_position(struct vmw_private *dev_priv,
243 bool show, int x, int y)
244 {
245 const uint32_t svga_cursor_on = show ? SVGA_CURSOR_ON_SHOW
246 : SVGA_CURSOR_ON_HIDE;
247 uint32_t count;
248
249 spin_lock(&dev_priv->cursor_lock);
250 if (dev_priv->capabilities2 & SVGA_CAP2_EXTRA_REGS) {
251 vmw_write(dev_priv, SVGA_REG_CURSOR4_X, x);
252 vmw_write(dev_priv, SVGA_REG_CURSOR4_Y, y);
253 vmw_write(dev_priv, SVGA_REG_CURSOR4_SCREEN_ID, SVGA3D_INVALID_ID);
254 vmw_write(dev_priv, SVGA_REG_CURSOR4_ON, svga_cursor_on);
255 vmw_write(dev_priv, SVGA_REG_CURSOR4_SUBMIT, TRUE);
256 } else if (vmw_is_cursor_bypass3_enabled(dev_priv)) {
257 vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_ON, svga_cursor_on);
258 vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_X, x);
259 vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_Y, y);
260 count = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CURSOR_COUNT);
261 vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_COUNT, ++count);
262 } else {
263 vmw_write(dev_priv, SVGA_REG_CURSOR_X, x);
264 vmw_write(dev_priv, SVGA_REG_CURSOR_Y, y);
265 vmw_write(dev_priv, SVGA_REG_CURSOR_ON, svga_cursor_on);
266 }
267 spin_unlock(&dev_priv->cursor_lock);
268 }
269
270
vmw_kms_cursor_snoop(struct vmw_surface * srf,struct ttm_object_file * tfile,struct ttm_buffer_object * bo,SVGA3dCmdHeader * header)271 void vmw_kms_cursor_snoop(struct vmw_surface *srf,
272 struct ttm_object_file *tfile,
273 struct ttm_buffer_object *bo,
274 SVGA3dCmdHeader *header)
275 {
276 struct ttm_bo_kmap_obj map;
277 unsigned long kmap_offset;
278 unsigned long kmap_num;
279 SVGA3dCopyBox *box;
280 unsigned box_count;
281 void *virtual;
282 bool dummy;
283 struct vmw_dma_cmd {
284 SVGA3dCmdHeader header;
285 SVGA3dCmdSurfaceDMA dma;
286 } *cmd;
287 int i, ret;
288
289 cmd = container_of(header, struct vmw_dma_cmd, header);
290
291 /* No snooper installed */
292 if (!srf->snooper.image)
293 return;
294
295 if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) {
296 DRM_ERROR("face and mipmap for cursors should never != 0\n");
297 return;
298 }
299
300 if (cmd->header.size < 64) {
301 DRM_ERROR("at least one full copy box must be given\n");
302 return;
303 }
304
305 box = (SVGA3dCopyBox *)&cmd[1];
306 box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
307 sizeof(SVGA3dCopyBox);
308
309 if (cmd->dma.guest.ptr.offset % PAGE_SIZE ||
310 box->x != 0 || box->y != 0 || box->z != 0 ||
311 box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
312 box->d != 1 || box_count != 1) {
313 /* TODO handle none page aligned offsets */
314 /* TODO handle more dst & src != 0 */
315 /* TODO handle more then one copy */
316 DRM_ERROR("Can't snoop dma request for cursor!\n");
317 DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n",
318 box->srcx, box->srcy, box->srcz,
319 box->x, box->y, box->z,
320 box->w, box->h, box->d, box_count,
321 cmd->dma.guest.ptr.offset);
322 return;
323 }
324
325 kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
326 kmap_num = (64*64*4) >> PAGE_SHIFT;
327
328 ret = ttm_bo_reserve(bo, true, false, NULL);
329 if (unlikely(ret != 0)) {
330 DRM_ERROR("reserve failed\n");
331 return;
332 }
333
334 ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
335 if (unlikely(ret != 0))
336 goto err_unreserve;
337
338 virtual = ttm_kmap_obj_virtual(&map, &dummy);
339
340 if (box->w == 64 && cmd->dma.guest.pitch == 64*4) {
341 memcpy(srf->snooper.image, virtual, 64*64*4);
342 } else {
343 /* Image is unsigned pointer. */
344 for (i = 0; i < box->h; i++)
345 memcpy(srf->snooper.image + i * 64,
346 virtual + i * cmd->dma.guest.pitch,
347 box->w * 4);
348 }
349
350 srf->snooper.age++;
351
352 ttm_bo_kunmap(&map);
353 err_unreserve:
354 ttm_bo_unreserve(bo);
355 }
356
357 /**
358 * vmw_kms_legacy_hotspot_clear - Clear legacy hotspots
359 *
360 * @dev_priv: Pointer to the device private struct.
361 *
362 * Clears all legacy hotspots.
363 */
vmw_kms_legacy_hotspot_clear(struct vmw_private * dev_priv)364 void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv)
365 {
366 struct drm_device *dev = &dev_priv->drm;
367 struct vmw_display_unit *du;
368 struct drm_crtc *crtc;
369
370 drm_modeset_lock_all(dev);
371 drm_for_each_crtc(crtc, dev) {
372 du = vmw_crtc_to_du(crtc);
373
374 du->hotspot_x = 0;
375 du->hotspot_y = 0;
376 }
377 drm_modeset_unlock_all(dev);
378 }
379
vmw_kms_cursor_post_execbuf(struct vmw_private * dev_priv)380 void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
381 {
382 struct drm_device *dev = &dev_priv->drm;
383 struct vmw_display_unit *du;
384 struct drm_crtc *crtc;
385
386 mutex_lock(&dev->mode_config.mutex);
387
388 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
389 du = vmw_crtc_to_du(crtc);
390 if (!du->cursor_surface ||
391 du->cursor_age == du->cursor_surface->snooper.age)
392 continue;
393
394 du->cursor_age = du->cursor_surface->snooper.age;
395 vmw_cursor_update_image(dev_priv, NULL, NULL,
396 du->cursor_surface->snooper.image,
397 64, 64,
398 du->hotspot_x + du->core_hotspot_x,
399 du->hotspot_y + du->core_hotspot_y);
400 }
401
402 mutex_unlock(&dev->mode_config.mutex);
403 }
404
405
vmw_du_cursor_plane_destroy(struct drm_plane * plane)406 void vmw_du_cursor_plane_destroy(struct drm_plane *plane)
407 {
408 vmw_cursor_update_position(plane->dev->dev_private, false, 0, 0);
409 vmw_du_destroy_cursor_mob_array(vmw_plane_to_vcp(plane));
410 drm_plane_cleanup(plane);
411 }
412
413
vmw_du_primary_plane_destroy(struct drm_plane * plane)414 void vmw_du_primary_plane_destroy(struct drm_plane *plane)
415 {
416 drm_plane_cleanup(plane);
417
418 /* Planes are static in our case so we don't free it */
419 }
420
421
422 /**
423 * vmw_du_plane_unpin_surf - unpins resource associated with a framebuffer surface
424 *
425 * @vps: plane state associated with the display surface
426 * @unreference: true if we also want to unreference the display.
427 */
vmw_du_plane_unpin_surf(struct vmw_plane_state * vps,bool unreference)428 void vmw_du_plane_unpin_surf(struct vmw_plane_state *vps,
429 bool unreference)
430 {
431 if (vps->surf) {
432 if (vps->pinned) {
433 vmw_resource_unpin(&vps->surf->res);
434 vps->pinned--;
435 }
436
437 if (unreference) {
438 if (vps->pinned)
439 DRM_ERROR("Surface still pinned\n");
440 vmw_surface_unreference(&vps->surf);
441 }
442 }
443 }
444
445
446 /**
447 * vmw_du_plane_cleanup_fb - Unpins the plane surface
448 *
449 * @plane: display plane
450 * @old_state: Contains the FB to clean up
451 *
452 * Unpins the framebuffer surface
453 *
454 * Returns 0 on success
455 */
456 void
vmw_du_plane_cleanup_fb(struct drm_plane * plane,struct drm_plane_state * old_state)457 vmw_du_plane_cleanup_fb(struct drm_plane *plane,
458 struct drm_plane_state *old_state)
459 {
460 struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
461
462 vmw_du_plane_unpin_surf(vps, false);
463 }
464
465
466 /**
467 * vmw_du_cursor_plane_cleanup_fb - Unpins the plane surface
468 *
469 * @plane: cursor plane
470 * @old_state: contains the state to clean up
471 *
472 * Unmaps all cursor bo mappings and unpins the cursor surface
473 *
474 * Returns 0 on success
475 */
476 void
vmw_du_cursor_plane_cleanup_fb(struct drm_plane * plane,struct drm_plane_state * old_state)477 vmw_du_cursor_plane_cleanup_fb(struct drm_plane *plane,
478 struct drm_plane_state *old_state)
479 {
480 struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
481 bool dummy;
482
483 if (vps->bo != NULL && ttm_kmap_obj_virtual(&vps->bo->map, &dummy) != NULL) {
484 const int ret = ttm_bo_reserve(&vps->bo->base, true, false, NULL);
485
486 if (likely(ret == 0)) {
487 if (atomic_read(&vps->bo->base_mapped_count) == 0)
488 ttm_bo_kunmap(&vps->bo->map);
489 ttm_bo_unreserve(&vps->bo->base);
490 }
491 }
492
493 if (vps->cm_bo != NULL && ttm_kmap_obj_virtual(&vps->cm_map, &dummy) != NULL) {
494 const int ret = ttm_bo_reserve(vps->cm_bo, true, false, NULL);
495
496 if (likely(ret == 0)) {
497 ttm_bo_kunmap(&vps->cm_map);
498 ttm_bo_unreserve(vps->cm_bo);
499 }
500 }
501
502 vmw_du_plane_unpin_surf(vps, false);
503
504 if (vps->surf) {
505 vmw_surface_unreference(&vps->surf);
506 vps->surf = NULL;
507 }
508
509 if (vps->bo) {
510 vmw_bo_unreference(&vps->bo);
511 vps->bo = NULL;
512 }
513 }
514
515 /**
516 * vmw_du_cursor_plane_prepare_fb - Readies the cursor by referencing it
517 *
518 * @plane: display plane
519 * @new_state: info on the new plane state, including the FB
520 *
521 * Returns 0 on success
522 */
523 int
vmw_du_cursor_plane_prepare_fb(struct drm_plane * plane,struct drm_plane_state * new_state)524 vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
525 struct drm_plane_state *new_state)
526 {
527 struct drm_framebuffer *fb = new_state->fb;
528 struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
529 struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
530 struct ttm_buffer_object *cm_bo = NULL;
531 bool dummy;
532 int ret = 0;
533
534 if (vps->surf) {
535 vmw_surface_unreference(&vps->surf);
536 vps->surf = NULL;
537 }
538
539 if (vps->bo) {
540 vmw_bo_unreference(&vps->bo);
541 vps->bo = NULL;
542 }
543
544 if (fb) {
545 if (vmw_framebuffer_to_vfb(fb)->bo) {
546 vps->bo = vmw_framebuffer_to_vfbd(fb)->buffer;
547 vmw_bo_reference(vps->bo);
548 } else {
549 vps->surf = vmw_framebuffer_to_vfbs(fb)->surface;
550 vmw_surface_reference(vps->surf);
551 }
552 }
553
554 vps->cm_bo = NULL;
555
556 if (vps->surf == NULL && vps->bo != NULL) {
557 const u32 size = new_state->crtc_w * new_state->crtc_h * sizeof(u32);
558
559 /* Not using vmw_bo_map_and_cache() helper here as we need to reserve
560 the ttm_buffer_object first which wmw_bo_map_and_cache() omits. */
561 ret = ttm_bo_reserve(&vps->bo->base, true, false, NULL);
562
563 if (unlikely(ret != 0))
564 return -ENOMEM;
565
566 ret = ttm_bo_kmap(&vps->bo->base, 0, PFN_UP(size), &vps->bo->map);
567
568 if (likely(ret == 0))
569 atomic_inc(&vps->bo->base_mapped_count);
570
571 ttm_bo_unreserve(&vps->bo->base);
572
573 if (unlikely(ret != 0))
574 return -ENOMEM;
575 }
576
577 if (vps->surf || vps->bo) {
578 unsigned cursor_mob_idx = vps->cursor_mob_idx;
579
580 /* Lazily set up cursor MOBs just once -- no reattempts. */
581 if (cursor_mob_idx == 0 && vcp->cursor_mob[0] == NULL)
582 if (vmw_du_create_cursor_mob_array(vcp) != 0)
583 vps->cursor_mob_idx = cursor_mob_idx = -1U;
584
585 if (cursor_mob_idx < ARRAY_SIZE(vcp->cursor_mob)) {
586 const u32 size = sizeof(SVGAGBCursorHeader) +
587 new_state->crtc_w * new_state->crtc_h * sizeof(u32);
588
589 cm_bo = vcp->cursor_mob[cursor_mob_idx];
590
591 if (cm_bo->resource->num_pages * PAGE_SIZE < size) {
592 ret = -EINVAL;
593 goto error_bo_unmap;
594 }
595
596 ret = ttm_bo_reserve(cm_bo, false, false, NULL);
597
598 if (unlikely(ret != 0)) {
599 ret = -ENOMEM;
600 goto error_bo_unmap;
601 }
602
603 ret = ttm_bo_kmap(cm_bo, 0, PFN_UP(size), &vps->cm_map);
604
605 /*
606 * We just want to try to get mob bind to finish
607 * so that the first write to SVGA_REG_CURSOR_MOBID
608 * is done with a buffer that the device has already
609 * seen
610 */
611 (void) ttm_bo_wait(cm_bo, false, false);
612
613 ttm_bo_unreserve(cm_bo);
614
615 if (unlikely(ret != 0)) {
616 ret = -ENOMEM;
617 goto error_bo_unmap;
618 }
619
620 vps->cursor_mob_idx = cursor_mob_idx ^ 1;
621 vps->cm_bo = cm_bo;
622 }
623 }
624
625 return 0;
626
627 error_bo_unmap:
628 if (vps->bo != NULL && ttm_kmap_obj_virtual(&vps->bo->map, &dummy) != NULL) {
629 const int ret = ttm_bo_reserve(&vps->bo->base, true, false, NULL);
630 if (likely(ret == 0)) {
631 atomic_dec(&vps->bo->base_mapped_count);
632 ttm_bo_kunmap(&vps->bo->map);
633 ttm_bo_unreserve(&vps->bo->base);
634 }
635 }
636
637 return ret;
638 }
639
640
641 void
vmw_du_cursor_plane_atomic_update(struct drm_plane * plane,struct drm_atomic_state * state)642 vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
643 struct drm_atomic_state *state)
644 {
645 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
646 plane);
647 struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
648 plane);
649 struct drm_crtc *crtc = new_state->crtc ?: old_state->crtc;
650 struct vmw_private *dev_priv = vmw_priv(crtc->dev);
651 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
652 struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
653 s32 hotspot_x, hotspot_y;
654
655 hotspot_x = du->hotspot_x;
656 hotspot_y = du->hotspot_y;
657
658 if (new_state->fb) {
659 hotspot_x += new_state->fb->hot_x;
660 hotspot_y += new_state->fb->hot_y;
661 }
662
663 du->cursor_surface = vps->surf;
664 du->cursor_bo = vps->bo;
665
666 if (vps->surf) {
667 du->cursor_age = du->cursor_surface->snooper.age;
668
669 vmw_cursor_update_image(dev_priv, vps->cm_bo, &vps->cm_map,
670 vps->surf->snooper.image,
671 new_state->crtc_w,
672 new_state->crtc_h,
673 hotspot_x, hotspot_y);
674 } else if (vps->bo) {
675 vmw_cursor_update_bo(dev_priv, vps->cm_bo, &vps->cm_map,
676 vps->bo,
677 new_state->crtc_w,
678 new_state->crtc_h,
679 hotspot_x, hotspot_y);
680 } else {
681 vmw_cursor_update_position(dev_priv, false, 0, 0);
682 return;
683 }
684
685 du->cursor_x = new_state->crtc_x + du->set_gui_x;
686 du->cursor_y = new_state->crtc_y + du->set_gui_y;
687
688 vmw_cursor_update_position(dev_priv, true,
689 du->cursor_x + hotspot_x,
690 du->cursor_y + hotspot_y);
691
692 du->core_hotspot_x = hotspot_x - du->hotspot_x;
693 du->core_hotspot_y = hotspot_y - du->hotspot_y;
694 }
695
696
697 /**
698 * vmw_du_primary_plane_atomic_check - check if the new state is okay
699 *
700 * @plane: display plane
701 * @state: info on the new plane state, including the FB
702 *
703 * Check if the new state is settable given the current state. Other
704 * than what the atomic helper checks, we care about crtc fitting
705 * the FB and maintaining one active framebuffer.
706 *
707 * Returns 0 on success
708 */
vmw_du_primary_plane_atomic_check(struct drm_plane * plane,struct drm_atomic_state * state)709 int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
710 struct drm_atomic_state *state)
711 {
712 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
713 plane);
714 struct drm_crtc_state *crtc_state = NULL;
715 struct drm_framebuffer *new_fb = new_state->fb;
716 int ret;
717
718 if (new_state->crtc)
719 crtc_state = drm_atomic_get_new_crtc_state(state,
720 new_state->crtc);
721
722 ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
723 DRM_PLANE_HELPER_NO_SCALING,
724 DRM_PLANE_HELPER_NO_SCALING,
725 false, true);
726
727 if (!ret && new_fb) {
728 struct drm_crtc *crtc = new_state->crtc;
729 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
730
731 vmw_connector_state_to_vcs(du->connector.state);
732 }
733
734
735 return ret;
736 }
737
738
739 /**
740 * vmw_du_cursor_plane_atomic_check - check if the new state is okay
741 *
742 * @plane: cursor plane
743 * @state: info on the new plane state
744 *
745 * This is a chance to fail if the new cursor state does not fit
746 * our requirements.
747 *
748 * Returns 0 on success
749 */
vmw_du_cursor_plane_atomic_check(struct drm_plane * plane,struct drm_atomic_state * state)750 int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
751 struct drm_atomic_state *state)
752 {
753 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
754 plane);
755 int ret = 0;
756 struct drm_crtc_state *crtc_state = NULL;
757 struct vmw_surface *surface = NULL;
758 struct drm_framebuffer *fb = new_state->fb;
759
760 if (new_state->crtc)
761 crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
762 new_state->crtc);
763
764 ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
765 DRM_PLANE_HELPER_NO_SCALING,
766 DRM_PLANE_HELPER_NO_SCALING,
767 true, true);
768 if (ret)
769 return ret;
770
771 /* Turning off */
772 if (!fb)
773 return 0;
774
775 /* A lot of the code assumes this */
776 if (new_state->crtc_w != 64 || new_state->crtc_h != 64) {
777 DRM_ERROR("Invalid cursor dimensions (%d, %d)\n",
778 new_state->crtc_w, new_state->crtc_h);
779 return -EINVAL;
780 }
781
782 if (!vmw_framebuffer_to_vfb(fb)->bo)
783 surface = vmw_framebuffer_to_vfbs(fb)->surface;
784
785 if (surface && !surface->snooper.image) {
786 DRM_ERROR("surface not suitable for cursor\n");
787 return -EINVAL;
788 }
789
790 return 0;
791 }
792
793
vmw_du_crtc_atomic_check(struct drm_crtc * crtc,struct drm_atomic_state * state)794 int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
795 struct drm_atomic_state *state)
796 {
797 struct drm_crtc_state *new_state = drm_atomic_get_new_crtc_state(state,
798 crtc);
799 struct vmw_display_unit *du = vmw_crtc_to_du(new_state->crtc);
800 int connector_mask = drm_connector_mask(&du->connector);
801 bool has_primary = new_state->plane_mask &
802 drm_plane_mask(crtc->primary);
803
804 /* We always want to have an active plane with an active CRTC */
805 if (has_primary != new_state->enable)
806 return -EINVAL;
807
808
809 if (new_state->connector_mask != connector_mask &&
810 new_state->connector_mask != 0) {
811 DRM_ERROR("Invalid connectors configuration\n");
812 return -EINVAL;
813 }
814
815 /*
816 * Our virtual device does not have a dot clock, so use the logical
817 * clock value as the dot clock.
818 */
819 if (new_state->mode.crtc_clock == 0)
820 new_state->adjusted_mode.crtc_clock = new_state->mode.clock;
821
822 return 0;
823 }
824
825
vmw_du_crtc_atomic_begin(struct drm_crtc * crtc,struct drm_atomic_state * state)826 void vmw_du_crtc_atomic_begin(struct drm_crtc *crtc,
827 struct drm_atomic_state *state)
828 {
829 }
830
831
vmw_du_crtc_atomic_flush(struct drm_crtc * crtc,struct drm_atomic_state * state)832 void vmw_du_crtc_atomic_flush(struct drm_crtc *crtc,
833 struct drm_atomic_state *state)
834 {
835 struct drm_pending_vblank_event *event = crtc->state->event;
836
837 if (event) {
838 crtc->state->event = NULL;
839
840 spin_lock_irq(&crtc->dev->event_lock);
841 drm_crtc_send_vblank_event(crtc, event);
842 spin_unlock_irq(&crtc->dev->event_lock);
843 }
844 }
845
846
847 /**
848 * vmw_du_crtc_duplicate_state - duplicate crtc state
849 * @crtc: DRM crtc
850 *
851 * Allocates and returns a copy of the crtc state (both common and
852 * vmw-specific) for the specified crtc.
853 *
854 * Returns: The newly allocated crtc state, or NULL on failure.
855 */
856 struct drm_crtc_state *
vmw_du_crtc_duplicate_state(struct drm_crtc * crtc)857 vmw_du_crtc_duplicate_state(struct drm_crtc *crtc)
858 {
859 struct drm_crtc_state *state;
860 struct vmw_crtc_state *vcs;
861
862 if (WARN_ON(!crtc->state))
863 return NULL;
864
865 vcs = kmemdup(crtc->state, sizeof(*vcs), GFP_KERNEL);
866
867 if (!vcs)
868 return NULL;
869
870 state = &vcs->base;
871
872 __drm_atomic_helper_crtc_duplicate_state(crtc, state);
873
874 return state;
875 }
876
877
878 /**
879 * vmw_du_crtc_reset - creates a blank vmw crtc state
880 * @crtc: DRM crtc
881 *
882 * Resets the atomic state for @crtc by freeing the state pointer (which
883 * might be NULL, e.g. at driver load time) and allocating a new empty state
884 * object.
885 */
vmw_du_crtc_reset(struct drm_crtc * crtc)886 void vmw_du_crtc_reset(struct drm_crtc *crtc)
887 {
888 struct vmw_crtc_state *vcs;
889
890
891 if (crtc->state) {
892 __drm_atomic_helper_crtc_destroy_state(crtc->state);
893
894 kfree(vmw_crtc_state_to_vcs(crtc->state));
895 }
896
897 vcs = kzalloc(sizeof(*vcs), GFP_KERNEL);
898
899 if (!vcs) {
900 DRM_ERROR("Cannot allocate vmw_crtc_state\n");
901 return;
902 }
903
904 __drm_atomic_helper_crtc_reset(crtc, &vcs->base);
905 }
906
907
908 /**
909 * vmw_du_crtc_destroy_state - destroy crtc state
910 * @crtc: DRM crtc
911 * @state: state object to destroy
912 *
913 * Destroys the crtc state (both common and vmw-specific) for the
914 * specified plane.
915 */
916 void
vmw_du_crtc_destroy_state(struct drm_crtc * crtc,struct drm_crtc_state * state)917 vmw_du_crtc_destroy_state(struct drm_crtc *crtc,
918 struct drm_crtc_state *state)
919 {
920 drm_atomic_helper_crtc_destroy_state(crtc, state);
921 }
922
923
924 /**
925 * vmw_du_plane_duplicate_state - duplicate plane state
926 * @plane: drm plane
927 *
928 * Allocates and returns a copy of the plane state (both common and
929 * vmw-specific) for the specified plane.
930 *
931 * Returns: The newly allocated plane state, or NULL on failure.
932 */
933 struct drm_plane_state *
vmw_du_plane_duplicate_state(struct drm_plane * plane)934 vmw_du_plane_duplicate_state(struct drm_plane *plane)
935 {
936 struct drm_plane_state *state;
937 struct vmw_plane_state *vps;
938
939 vps = kmemdup(plane->state, sizeof(*vps), GFP_KERNEL);
940
941 if (!vps)
942 return NULL;
943
944 vps->pinned = 0;
945 vps->cpp = 0;
946
947 /* Each ref counted resource needs to be acquired again */
948 if (vps->surf)
949 (void) vmw_surface_reference(vps->surf);
950
951 if (vps->bo)
952 (void) vmw_bo_reference(vps->bo);
953
954 state = &vps->base;
955
956 __drm_atomic_helper_plane_duplicate_state(plane, state);
957
958 return state;
959 }
960
961
962 /**
963 * vmw_du_plane_reset - creates a blank vmw plane state
964 * @plane: drm plane
965 *
966 * Resets the atomic state for @plane by freeing the state pointer (which might
967 * be NULL, e.g. at driver load time) and allocating a new empty state object.
968 */
vmw_du_plane_reset(struct drm_plane * plane)969 void vmw_du_plane_reset(struct drm_plane *plane)
970 {
971 struct vmw_plane_state *vps;
972
973 if (plane->state)
974 vmw_du_plane_destroy_state(plane, plane->state);
975
976 vps = kzalloc(sizeof(*vps), GFP_KERNEL);
977
978 if (!vps) {
979 DRM_ERROR("Cannot allocate vmw_plane_state\n");
980 return;
981 }
982
983 __drm_atomic_helper_plane_reset(plane, &vps->base);
984 }
985
986
987 /**
988 * vmw_du_plane_destroy_state - destroy plane state
989 * @plane: DRM plane
990 * @state: state object to destroy
991 *
992 * Destroys the plane state (both common and vmw-specific) for the
993 * specified plane.
994 */
995 void
vmw_du_plane_destroy_state(struct drm_plane * plane,struct drm_plane_state * state)996 vmw_du_plane_destroy_state(struct drm_plane *plane,
997 struct drm_plane_state *state)
998 {
999 struct vmw_plane_state *vps = vmw_plane_state_to_vps(state);
1000
1001
1002 /* Should have been freed by cleanup_fb */
1003 if (vps->surf)
1004 vmw_surface_unreference(&vps->surf);
1005
1006 if (vps->bo)
1007 vmw_bo_unreference(&vps->bo);
1008
1009 drm_atomic_helper_plane_destroy_state(plane, state);
1010 }
1011
1012
1013 /**
1014 * vmw_du_connector_duplicate_state - duplicate connector state
1015 * @connector: DRM connector
1016 *
1017 * Allocates and returns a copy of the connector state (both common and
1018 * vmw-specific) for the specified connector.
1019 *
1020 * Returns: The newly allocated connector state, or NULL on failure.
1021 */
1022 struct drm_connector_state *
vmw_du_connector_duplicate_state(struct drm_connector * connector)1023 vmw_du_connector_duplicate_state(struct drm_connector *connector)
1024 {
1025 struct drm_connector_state *state;
1026 struct vmw_connector_state *vcs;
1027
1028 if (WARN_ON(!connector->state))
1029 return NULL;
1030
1031 vcs = kmemdup(connector->state, sizeof(*vcs), GFP_KERNEL);
1032
1033 if (!vcs)
1034 return NULL;
1035
1036 state = &vcs->base;
1037
1038 __drm_atomic_helper_connector_duplicate_state(connector, state);
1039
1040 return state;
1041 }
1042
1043
1044 /**
1045 * vmw_du_connector_reset - creates a blank vmw connector state
1046 * @connector: DRM connector
1047 *
1048 * Resets the atomic state for @connector by freeing the state pointer (which
1049 * might be NULL, e.g. at driver load time) and allocating a new empty state
1050 * object.
1051 */
vmw_du_connector_reset(struct drm_connector * connector)1052 void vmw_du_connector_reset(struct drm_connector *connector)
1053 {
1054 struct vmw_connector_state *vcs;
1055
1056
1057 if (connector->state) {
1058 __drm_atomic_helper_connector_destroy_state(connector->state);
1059
1060 kfree(vmw_connector_state_to_vcs(connector->state));
1061 }
1062
1063 vcs = kzalloc(sizeof(*vcs), GFP_KERNEL);
1064
1065 if (!vcs) {
1066 DRM_ERROR("Cannot allocate vmw_connector_state\n");
1067 return;
1068 }
1069
1070 __drm_atomic_helper_connector_reset(connector, &vcs->base);
1071 }
1072
1073
1074 /**
1075 * vmw_du_connector_destroy_state - destroy connector state
1076 * @connector: DRM connector
1077 * @state: state object to destroy
1078 *
1079 * Destroys the connector state (both common and vmw-specific) for the
1080 * specified plane.
1081 */
1082 void
vmw_du_connector_destroy_state(struct drm_connector * connector,struct drm_connector_state * state)1083 vmw_du_connector_destroy_state(struct drm_connector *connector,
1084 struct drm_connector_state *state)
1085 {
1086 drm_atomic_helper_connector_destroy_state(connector, state);
1087 }
1088 /*
1089 * Generic framebuffer code
1090 */
1091
1092 /*
1093 * Surface framebuffer code
1094 */
1095
vmw_framebuffer_surface_destroy(struct drm_framebuffer * framebuffer)1096 static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
1097 {
1098 struct vmw_framebuffer_surface *vfbs =
1099 vmw_framebuffer_to_vfbs(framebuffer);
1100
1101 drm_framebuffer_cleanup(framebuffer);
1102 vmw_surface_unreference(&vfbs->surface);
1103
1104 kfree(vfbs);
1105 }
1106
1107 /**
1108 * vmw_kms_readback - Perform a readback from the screen system to
1109 * a buffer-object backed framebuffer.
1110 *
1111 * @dev_priv: Pointer to the device private structure.
1112 * @file_priv: Pointer to a struct drm_file identifying the caller.
1113 * Must be set to NULL if @user_fence_rep is NULL.
1114 * @vfb: Pointer to the buffer-object backed framebuffer.
1115 * @user_fence_rep: User-space provided structure for fence information.
1116 * Must be set to non-NULL if @file_priv is non-NULL.
1117 * @vclips: Array of clip rects.
1118 * @num_clips: Number of clip rects in @vclips.
1119 *
1120 * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
1121 * interrupted.
1122 */
vmw_kms_readback(struct vmw_private * dev_priv,struct drm_file * file_priv,struct vmw_framebuffer * vfb,struct drm_vmw_fence_rep __user * user_fence_rep,struct drm_vmw_rect * vclips,uint32_t num_clips)1123 int vmw_kms_readback(struct vmw_private *dev_priv,
1124 struct drm_file *file_priv,
1125 struct vmw_framebuffer *vfb,
1126 struct drm_vmw_fence_rep __user *user_fence_rep,
1127 struct drm_vmw_rect *vclips,
1128 uint32_t num_clips)
1129 {
1130 switch (dev_priv->active_display_unit) {
1131 case vmw_du_screen_object:
1132 return vmw_kms_sou_readback(dev_priv, file_priv, vfb,
1133 user_fence_rep, vclips, num_clips,
1134 NULL);
1135 case vmw_du_screen_target:
1136 return vmw_kms_stdu_dma(dev_priv, file_priv, vfb,
1137 user_fence_rep, NULL, vclips, num_clips,
1138 1, false, true, NULL);
1139 default:
1140 WARN_ONCE(true,
1141 "Readback called with invalid display system.\n");
1142 }
1143
1144 return -ENOSYS;
1145 }
1146
1147
1148 static const struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
1149 .destroy = vmw_framebuffer_surface_destroy,
1150 .dirty = drm_atomic_helper_dirtyfb,
1151 };
1152
vmw_kms_new_framebuffer_surface(struct vmw_private * dev_priv,struct vmw_surface * surface,struct vmw_framebuffer ** out,const struct drm_mode_fb_cmd2 * mode_cmd,bool is_bo_proxy)1153 static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
1154 struct vmw_surface *surface,
1155 struct vmw_framebuffer **out,
1156 const struct drm_mode_fb_cmd2
1157 *mode_cmd,
1158 bool is_bo_proxy)
1159
1160 {
1161 struct drm_device *dev = &dev_priv->drm;
1162 struct vmw_framebuffer_surface *vfbs;
1163 enum SVGA3dSurfaceFormat format;
1164 int ret;
1165
1166 /* 3D is only supported on HWv8 and newer hosts */
1167 if (dev_priv->active_display_unit == vmw_du_legacy)
1168 return -ENOSYS;
1169
1170 /*
1171 * Sanity checks.
1172 */
1173
1174 if (!drm_any_plane_has_format(&dev_priv->drm,
1175 mode_cmd->pixel_format,
1176 mode_cmd->modifier[0])) {
1177 drm_dbg(&dev_priv->drm,
1178 "unsupported pixel format %p4cc / modifier 0x%llx\n",
1179 &mode_cmd->pixel_format, mode_cmd->modifier[0]);
1180 return -EINVAL;
1181 }
1182
1183 /* Surface must be marked as a scanout. */
1184 if (unlikely(!surface->metadata.scanout))
1185 return -EINVAL;
1186
1187 if (unlikely(surface->metadata.mip_levels[0] != 1 ||
1188 surface->metadata.num_sizes != 1 ||
1189 surface->metadata.base_size.width < mode_cmd->width ||
1190 surface->metadata.base_size.height < mode_cmd->height ||
1191 surface->metadata.base_size.depth != 1)) {
1192 DRM_ERROR("Incompatible surface dimensions "
1193 "for requested mode.\n");
1194 return -EINVAL;
1195 }
1196
1197 switch (mode_cmd->pixel_format) {
1198 case DRM_FORMAT_ARGB8888:
1199 format = SVGA3D_A8R8G8B8;
1200 break;
1201 case DRM_FORMAT_XRGB8888:
1202 format = SVGA3D_X8R8G8B8;
1203 break;
1204 case DRM_FORMAT_RGB565:
1205 format = SVGA3D_R5G6B5;
1206 break;
1207 case DRM_FORMAT_XRGB1555:
1208 format = SVGA3D_A1R5G5B5;
1209 break;
1210 default:
1211 DRM_ERROR("Invalid pixel format: %p4cc\n",
1212 &mode_cmd->pixel_format);
1213 return -EINVAL;
1214 }
1215
1216 /*
1217 * For DX, surface format validation is done when surface->scanout
1218 * is set.
1219 */
1220 if (!has_sm4_context(dev_priv) && format != surface->metadata.format) {
1221 DRM_ERROR("Invalid surface format for requested mode.\n");
1222 return -EINVAL;
1223 }
1224
1225 vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL);
1226 if (!vfbs) {
1227 ret = -ENOMEM;
1228 goto out_err1;
1229 }
1230
1231 drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, mode_cmd);
1232 vfbs->surface = vmw_surface_reference(surface);
1233 vfbs->base.user_handle = mode_cmd->handles[0];
1234 vfbs->is_bo_proxy = is_bo_proxy;
1235
1236 *out = &vfbs->base;
1237
1238 ret = drm_framebuffer_init(dev, &vfbs->base.base,
1239 &vmw_framebuffer_surface_funcs);
1240 if (ret)
1241 goto out_err2;
1242
1243 return 0;
1244
1245 out_err2:
1246 vmw_surface_unreference(&surface);
1247 kfree(vfbs);
1248 out_err1:
1249 return ret;
1250 }
1251
1252 /*
1253 * Buffer-object framebuffer code
1254 */
1255
vmw_framebuffer_bo_create_handle(struct drm_framebuffer * fb,struct drm_file * file_priv,unsigned int * handle)1256 static int vmw_framebuffer_bo_create_handle(struct drm_framebuffer *fb,
1257 struct drm_file *file_priv,
1258 unsigned int *handle)
1259 {
1260 struct vmw_framebuffer_bo *vfbd =
1261 vmw_framebuffer_to_vfbd(fb);
1262
1263 return drm_gem_handle_create(file_priv, &vfbd->buffer->base.base, handle);
1264 }
1265
vmw_framebuffer_bo_destroy(struct drm_framebuffer * framebuffer)1266 static void vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer)
1267 {
1268 struct vmw_framebuffer_bo *vfbd =
1269 vmw_framebuffer_to_vfbd(framebuffer);
1270
1271 drm_framebuffer_cleanup(framebuffer);
1272 vmw_bo_unreference(&vfbd->buffer);
1273
1274 kfree(vfbd);
1275 }
1276
vmw_framebuffer_bo_dirty(struct drm_framebuffer * framebuffer,struct drm_file * file_priv,unsigned int flags,unsigned int color,struct drm_clip_rect * clips,unsigned int num_clips)1277 static int vmw_framebuffer_bo_dirty(struct drm_framebuffer *framebuffer,
1278 struct drm_file *file_priv,
1279 unsigned int flags, unsigned int color,
1280 struct drm_clip_rect *clips,
1281 unsigned int num_clips)
1282 {
1283 struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
1284 struct vmw_framebuffer_bo *vfbd =
1285 vmw_framebuffer_to_vfbd(framebuffer);
1286 struct drm_clip_rect norect;
1287 int ret, increment = 1;
1288
1289 drm_modeset_lock_all(&dev_priv->drm);
1290
1291 if (!num_clips) {
1292 num_clips = 1;
1293 clips = &norect;
1294 norect.x1 = norect.y1 = 0;
1295 norect.x2 = framebuffer->width;
1296 norect.y2 = framebuffer->height;
1297 } else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
1298 num_clips /= 2;
1299 increment = 2;
1300 }
1301
1302 switch (dev_priv->active_display_unit) {
1303 case vmw_du_legacy:
1304 ret = vmw_kms_ldu_do_bo_dirty(dev_priv, &vfbd->base, 0, 0,
1305 clips, num_clips, increment);
1306 break;
1307 default:
1308 ret = -EINVAL;
1309 WARN_ONCE(true, "Dirty called with invalid display system.\n");
1310 break;
1311 }
1312
1313 vmw_cmd_flush(dev_priv, false);
1314
1315 drm_modeset_unlock_all(&dev_priv->drm);
1316
1317 return ret;
1318 }
1319
vmw_framebuffer_bo_dirty_ext(struct drm_framebuffer * framebuffer,struct drm_file * file_priv,unsigned int flags,unsigned int color,struct drm_clip_rect * clips,unsigned int num_clips)1320 static int vmw_framebuffer_bo_dirty_ext(struct drm_framebuffer *framebuffer,
1321 struct drm_file *file_priv,
1322 unsigned int flags, unsigned int color,
1323 struct drm_clip_rect *clips,
1324 unsigned int num_clips)
1325 {
1326 struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
1327
1328 if (dev_priv->active_display_unit == vmw_du_legacy &&
1329 vmw_cmd_supported(dev_priv))
1330 return vmw_framebuffer_bo_dirty(framebuffer, file_priv, flags,
1331 color, clips, num_clips);
1332
1333 return drm_atomic_helper_dirtyfb(framebuffer, file_priv, flags, color,
1334 clips, num_clips);
1335 }
1336
1337 static const struct drm_framebuffer_funcs vmw_framebuffer_bo_funcs = {
1338 .create_handle = vmw_framebuffer_bo_create_handle,
1339 .destroy = vmw_framebuffer_bo_destroy,
1340 .dirty = vmw_framebuffer_bo_dirty_ext,
1341 };
1342
1343 /*
1344 * Pin the bofer in a location suitable for access by the
1345 * display system.
1346 */
vmw_framebuffer_pin(struct vmw_framebuffer * vfb)1347 static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb)
1348 {
1349 struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
1350 struct vmw_buffer_object *buf;
1351 struct ttm_placement *placement;
1352 int ret;
1353
1354 buf = vfb->bo ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
1355 vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
1356
1357 if (!buf)
1358 return 0;
1359
1360 switch (dev_priv->active_display_unit) {
1361 case vmw_du_legacy:
1362 vmw_overlay_pause_all(dev_priv);
1363 ret = vmw_bo_pin_in_start_of_vram(dev_priv, buf, false);
1364 vmw_overlay_resume_all(dev_priv);
1365 break;
1366 case vmw_du_screen_object:
1367 case vmw_du_screen_target:
1368 if (vfb->bo) {
1369 if (dev_priv->capabilities & SVGA_CAP_3D) {
1370 /*
1371 * Use surface DMA to get content to
1372 * sreen target surface.
1373 */
1374 placement = &vmw_vram_gmr_placement;
1375 } else {
1376 /* Use CPU blit. */
1377 placement = &vmw_sys_placement;
1378 }
1379 } else {
1380 /* Use surface / image update */
1381 placement = &vmw_mob_placement;
1382 }
1383
1384 return vmw_bo_pin_in_placement(dev_priv, buf, placement, false);
1385 default:
1386 return -EINVAL;
1387 }
1388
1389 return ret;
1390 }
1391
vmw_framebuffer_unpin(struct vmw_framebuffer * vfb)1392 static int vmw_framebuffer_unpin(struct vmw_framebuffer *vfb)
1393 {
1394 struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
1395 struct vmw_buffer_object *buf;
1396
1397 buf = vfb->bo ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
1398 vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
1399
1400 if (WARN_ON(!buf))
1401 return 0;
1402
1403 return vmw_bo_unpin(dev_priv, buf, false);
1404 }
1405
1406 /**
1407 * vmw_create_bo_proxy - create a proxy surface for the buffer object
1408 *
1409 * @dev: DRM device
1410 * @mode_cmd: parameters for the new surface
1411 * @bo_mob: MOB backing the buffer object
1412 * @srf_out: newly created surface
1413 *
1414 * When the content FB is a buffer object, we create a surface as a proxy to the
1415 * same buffer. This way we can do a surface copy rather than a surface DMA.
1416 * This is a more efficient approach
1417 *
1418 * RETURNS:
1419 * 0 on success, error code otherwise
1420 */
vmw_create_bo_proxy(struct drm_device * dev,const struct drm_mode_fb_cmd2 * mode_cmd,struct vmw_buffer_object * bo_mob,struct vmw_surface ** srf_out)1421 static int vmw_create_bo_proxy(struct drm_device *dev,
1422 const struct drm_mode_fb_cmd2 *mode_cmd,
1423 struct vmw_buffer_object *bo_mob,
1424 struct vmw_surface **srf_out)
1425 {
1426 struct vmw_surface_metadata metadata = {0};
1427 uint32_t format;
1428 struct vmw_resource *res;
1429 unsigned int bytes_pp;
1430 int ret;
1431
1432 switch (mode_cmd->pixel_format) {
1433 case DRM_FORMAT_ARGB8888:
1434 case DRM_FORMAT_XRGB8888:
1435 format = SVGA3D_X8R8G8B8;
1436 bytes_pp = 4;
1437 break;
1438
1439 case DRM_FORMAT_RGB565:
1440 case DRM_FORMAT_XRGB1555:
1441 format = SVGA3D_R5G6B5;
1442 bytes_pp = 2;
1443 break;
1444
1445 case 8:
1446 format = SVGA3D_P8;
1447 bytes_pp = 1;
1448 break;
1449
1450 default:
1451 DRM_ERROR("Invalid framebuffer format %p4cc\n",
1452 &mode_cmd->pixel_format);
1453 return -EINVAL;
1454 }
1455
1456 metadata.format = format;
1457 metadata.mip_levels[0] = 1;
1458 metadata.num_sizes = 1;
1459 metadata.base_size.width = mode_cmd->pitches[0] / bytes_pp;
1460 metadata.base_size.height = mode_cmd->height;
1461 metadata.base_size.depth = 1;
1462 metadata.scanout = true;
1463
1464 ret = vmw_gb_surface_define(vmw_priv(dev), &metadata, srf_out);
1465 if (ret) {
1466 DRM_ERROR("Failed to allocate proxy content buffer\n");
1467 return ret;
1468 }
1469
1470 res = &(*srf_out)->res;
1471
1472 /* Reserve and switch the backing mob. */
1473 mutex_lock(&res->dev_priv->cmdbuf_mutex);
1474 (void) vmw_resource_reserve(res, false, true);
1475 vmw_bo_unreference(&res->backup);
1476 res->backup = vmw_bo_reference(bo_mob);
1477 res->backup_offset = 0;
1478 vmw_resource_unreserve(res, false, false, false, NULL, 0);
1479 mutex_unlock(&res->dev_priv->cmdbuf_mutex);
1480
1481 return 0;
1482 }
1483
1484
1485
vmw_kms_new_framebuffer_bo(struct vmw_private * dev_priv,struct vmw_buffer_object * bo,struct vmw_framebuffer ** out,const struct drm_mode_fb_cmd2 * mode_cmd)1486 static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
1487 struct vmw_buffer_object *bo,
1488 struct vmw_framebuffer **out,
1489 const struct drm_mode_fb_cmd2
1490 *mode_cmd)
1491
1492 {
1493 struct drm_device *dev = &dev_priv->drm;
1494 struct vmw_framebuffer_bo *vfbd;
1495 unsigned int requested_size;
1496 int ret;
1497
1498 requested_size = mode_cmd->height * mode_cmd->pitches[0];
1499 if (unlikely(requested_size > bo->base.base.size)) {
1500 DRM_ERROR("Screen buffer object size is too small "
1501 "for requested mode.\n");
1502 return -EINVAL;
1503 }
1504
1505 if (!drm_any_plane_has_format(&dev_priv->drm,
1506 mode_cmd->pixel_format,
1507 mode_cmd->modifier[0])) {
1508 drm_dbg(&dev_priv->drm,
1509 "unsupported pixel format %p4cc / modifier 0x%llx\n",
1510 &mode_cmd->pixel_format, mode_cmd->modifier[0]);
1511 return -EINVAL;
1512 }
1513
1514 vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL);
1515 if (!vfbd) {
1516 ret = -ENOMEM;
1517 goto out_err1;
1518 }
1519
1520 vfbd->base.base.obj[0] = &bo->base.base;
1521 drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd);
1522 vfbd->base.bo = true;
1523 vfbd->buffer = vmw_bo_reference(bo);
1524 vfbd->base.user_handle = mode_cmd->handles[0];
1525 *out = &vfbd->base;
1526
1527 ret = drm_framebuffer_init(dev, &vfbd->base.base,
1528 &vmw_framebuffer_bo_funcs);
1529 if (ret)
1530 goto out_err2;
1531
1532 return 0;
1533
1534 out_err2:
1535 vmw_bo_unreference(&bo);
1536 kfree(vfbd);
1537 out_err1:
1538 return ret;
1539 }
1540
1541
1542 /**
1543 * vmw_kms_srf_ok - check if a surface can be created
1544 *
1545 * @dev_priv: Pointer to device private struct.
1546 * @width: requested width
1547 * @height: requested height
1548 *
1549 * Surfaces need to be less than texture size
1550 */
1551 static bool
vmw_kms_srf_ok(struct vmw_private * dev_priv,uint32_t width,uint32_t height)1552 vmw_kms_srf_ok(struct vmw_private *dev_priv, uint32_t width, uint32_t height)
1553 {
1554 if (width > dev_priv->texture_max_width ||
1555 height > dev_priv->texture_max_height)
1556 return false;
1557
1558 return true;
1559 }
1560
1561 /**
1562 * vmw_kms_new_framebuffer - Create a new framebuffer.
1563 *
1564 * @dev_priv: Pointer to device private struct.
1565 * @bo: Pointer to buffer object to wrap the kms framebuffer around.
1566 * Either @bo or @surface must be NULL.
1567 * @surface: Pointer to a surface to wrap the kms framebuffer around.
1568 * Either @bo or @surface must be NULL.
1569 * @only_2d: No presents will occur to this buffer object based framebuffer.
1570 * This helps the code to do some important optimizations.
1571 * @mode_cmd: Frame-buffer metadata.
1572 */
1573 struct vmw_framebuffer *
vmw_kms_new_framebuffer(struct vmw_private * dev_priv,struct vmw_buffer_object * bo,struct vmw_surface * surface,bool only_2d,const struct drm_mode_fb_cmd2 * mode_cmd)1574 vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
1575 struct vmw_buffer_object *bo,
1576 struct vmw_surface *surface,
1577 bool only_2d,
1578 const struct drm_mode_fb_cmd2 *mode_cmd)
1579 {
1580 struct vmw_framebuffer *vfb = NULL;
1581 bool is_bo_proxy = false;
1582 int ret;
1583
1584 /*
1585 * We cannot use the SurfaceDMA command in an non-accelerated VM,
1586 * therefore, wrap the buffer object in a surface so we can use the
1587 * SurfaceCopy command.
1588 */
1589 if (vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height) &&
1590 bo && only_2d &&
1591 mode_cmd->width > 64 && /* Don't create a proxy for cursor */
1592 dev_priv->active_display_unit == vmw_du_screen_target) {
1593 ret = vmw_create_bo_proxy(&dev_priv->drm, mode_cmd,
1594 bo, &surface);
1595 if (ret)
1596 return ERR_PTR(ret);
1597
1598 is_bo_proxy = true;
1599 }
1600
1601 /* Create the new framebuffer depending one what we have */
1602 if (surface) {
1603 ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
1604 mode_cmd,
1605 is_bo_proxy);
1606 /*
1607 * vmw_create_bo_proxy() adds a reference that is no longer
1608 * needed
1609 */
1610 if (is_bo_proxy)
1611 vmw_surface_unreference(&surface);
1612 } else if (bo) {
1613 ret = vmw_kms_new_framebuffer_bo(dev_priv, bo, &vfb,
1614 mode_cmd);
1615 } else {
1616 BUG();
1617 }
1618
1619 if (ret)
1620 return ERR_PTR(ret);
1621
1622 vfb->pin = vmw_framebuffer_pin;
1623 vfb->unpin = vmw_framebuffer_unpin;
1624
1625 return vfb;
1626 }
1627
1628 /*
1629 * Generic Kernel modesetting functions
1630 */
1631
vmw_kms_fb_create(struct drm_device * dev,struct drm_file * file_priv,const struct drm_mode_fb_cmd2 * mode_cmd)1632 static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
1633 struct drm_file *file_priv,
1634 const struct drm_mode_fb_cmd2 *mode_cmd)
1635 {
1636 struct vmw_private *dev_priv = vmw_priv(dev);
1637 struct vmw_framebuffer *vfb = NULL;
1638 struct vmw_surface *surface = NULL;
1639 struct vmw_buffer_object *bo = NULL;
1640 int ret;
1641
1642 /* returns either a bo or surface */
1643 ret = vmw_user_lookup_handle(dev_priv, file_priv,
1644 mode_cmd->handles[0],
1645 &surface, &bo);
1646 if (ret) {
1647 DRM_ERROR("Invalid buffer object handle %u (0x%x).\n",
1648 mode_cmd->handles[0], mode_cmd->handles[0]);
1649 goto err_out;
1650 }
1651
1652
1653 if (!bo &&
1654 !vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)) {
1655 DRM_ERROR("Surface size cannot exceed %dx%d\n",
1656 dev_priv->texture_max_width,
1657 dev_priv->texture_max_height);
1658 goto err_out;
1659 }
1660
1661
1662 vfb = vmw_kms_new_framebuffer(dev_priv, bo, surface,
1663 !(dev_priv->capabilities & SVGA_CAP_3D),
1664 mode_cmd);
1665 if (IS_ERR(vfb)) {
1666 ret = PTR_ERR(vfb);
1667 goto err_out;
1668 }
1669
1670 err_out:
1671 /* vmw_user_lookup_handle takes one ref so does new_fb */
1672 if (bo)
1673 vmw_bo_unreference(&bo);
1674 if (surface)
1675 vmw_surface_unreference(&surface);
1676
1677 if (ret) {
1678 DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
1679 return ERR_PTR(ret);
1680 }
1681
1682 return &vfb->base;
1683 }
1684
1685 /**
1686 * vmw_kms_check_display_memory - Validates display memory required for a
1687 * topology
1688 * @dev: DRM device
1689 * @num_rects: number of drm_rect in rects
1690 * @rects: array of drm_rect representing the topology to validate indexed by
1691 * crtc index.
1692 *
1693 * Returns:
1694 * 0 on success otherwise negative error code
1695 */
vmw_kms_check_display_memory(struct drm_device * dev,uint32_t num_rects,struct drm_rect * rects)1696 static int vmw_kms_check_display_memory(struct drm_device *dev,
1697 uint32_t num_rects,
1698 struct drm_rect *rects)
1699 {
1700 struct vmw_private *dev_priv = vmw_priv(dev);
1701 struct drm_rect bounding_box = {0};
1702 u64 total_pixels = 0, pixel_mem, bb_mem;
1703 int i;
1704
1705 for (i = 0; i < num_rects; i++) {
1706 /*
1707 * For STDU only individual screen (screen target) is limited by
1708 * SCREENTARGET_MAX_WIDTH/HEIGHT registers.
1709 */
1710 if (dev_priv->active_display_unit == vmw_du_screen_target &&
1711 (drm_rect_width(&rects[i]) > dev_priv->stdu_max_width ||
1712 drm_rect_height(&rects[i]) > dev_priv->stdu_max_height)) {
1713 VMW_DEBUG_KMS("Screen size not supported.\n");
1714 return -EINVAL;
1715 }
1716
1717 /* Bounding box upper left is at (0,0). */
1718 if (rects[i].x2 > bounding_box.x2)
1719 bounding_box.x2 = rects[i].x2;
1720
1721 if (rects[i].y2 > bounding_box.y2)
1722 bounding_box.y2 = rects[i].y2;
1723
1724 total_pixels += (u64) drm_rect_width(&rects[i]) *
1725 (u64) drm_rect_height(&rects[i]);
1726 }
1727
1728 /* Virtual svga device primary limits are always in 32-bpp. */
1729 pixel_mem = total_pixels * 4;
1730
1731 /*
1732 * For HV10 and below prim_bb_mem is vram size. When
1733 * SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM is not present vram size is
1734 * limit on primary bounding box
1735 */
1736 if (pixel_mem > dev_priv->max_primary_mem) {
1737 VMW_DEBUG_KMS("Combined output size too large.\n");
1738 return -EINVAL;
1739 }
1740
1741 /* SVGA_CAP_NO_BB_RESTRICTION is available for STDU only. */
1742 if (dev_priv->active_display_unit != vmw_du_screen_target ||
1743 !(dev_priv->capabilities & SVGA_CAP_NO_BB_RESTRICTION)) {
1744 bb_mem = (u64) bounding_box.x2 * bounding_box.y2 * 4;
1745
1746 if (bb_mem > dev_priv->max_primary_mem) {
1747 VMW_DEBUG_KMS("Topology is beyond supported limits.\n");
1748 return -EINVAL;
1749 }
1750 }
1751
1752 return 0;
1753 }
1754
1755 /**
1756 * vmw_crtc_state_and_lock - Return new or current crtc state with locked
1757 * crtc mutex
1758 * @state: The atomic state pointer containing the new atomic state
1759 * @crtc: The crtc
1760 *
1761 * This function returns the new crtc state if it's part of the state update.
1762 * Otherwise returns the current crtc state. It also makes sure that the
1763 * crtc mutex is locked.
1764 *
1765 * Returns: A valid crtc state pointer or NULL. It may also return a
1766 * pointer error, in particular -EDEADLK if locking needs to be rerun.
1767 */
1768 static struct drm_crtc_state *
vmw_crtc_state_and_lock(struct drm_atomic_state * state,struct drm_crtc * crtc)1769 vmw_crtc_state_and_lock(struct drm_atomic_state *state, struct drm_crtc *crtc)
1770 {
1771 struct drm_crtc_state *crtc_state;
1772
1773 crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
1774 if (crtc_state) {
1775 lockdep_assert_held(&crtc->mutex.mutex.base);
1776 } else {
1777 int ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx);
1778
1779 if (ret != 0 && ret != -EALREADY)
1780 return ERR_PTR(ret);
1781
1782 crtc_state = crtc->state;
1783 }
1784
1785 return crtc_state;
1786 }
1787
1788 /**
1789 * vmw_kms_check_implicit - Verify that all implicit display units scan out
1790 * from the same fb after the new state is committed.
1791 * @dev: The drm_device.
1792 * @state: The new state to be checked.
1793 *
1794 * Returns:
1795 * Zero on success,
1796 * -EINVAL on invalid state,
1797 * -EDEADLK if modeset locking needs to be rerun.
1798 */
vmw_kms_check_implicit(struct drm_device * dev,struct drm_atomic_state * state)1799 static int vmw_kms_check_implicit(struct drm_device *dev,
1800 struct drm_atomic_state *state)
1801 {
1802 struct drm_framebuffer *implicit_fb = NULL;
1803 struct drm_crtc *crtc;
1804 struct drm_crtc_state *crtc_state;
1805 struct drm_plane_state *plane_state;
1806
1807 drm_for_each_crtc(crtc, dev) {
1808 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1809
1810 if (!du->is_implicit)
1811 continue;
1812
1813 crtc_state = vmw_crtc_state_and_lock(state, crtc);
1814 if (IS_ERR(crtc_state))
1815 return PTR_ERR(crtc_state);
1816
1817 if (!crtc_state || !crtc_state->enable)
1818 continue;
1819
1820 /*
1821 * Can't move primary planes across crtcs, so this is OK.
1822 * It also means we don't need to take the plane mutex.
1823 */
1824 plane_state = du->primary.state;
1825 if (plane_state->crtc != crtc)
1826 continue;
1827
1828 if (!implicit_fb)
1829 implicit_fb = plane_state->fb;
1830 else if (implicit_fb != plane_state->fb)
1831 return -EINVAL;
1832 }
1833
1834 return 0;
1835 }
1836
1837 /**
1838 * vmw_kms_check_topology - Validates topology in drm_atomic_state
1839 * @dev: DRM device
1840 * @state: the driver state object
1841 *
1842 * Returns:
1843 * 0 on success otherwise negative error code
1844 */
vmw_kms_check_topology(struct drm_device * dev,struct drm_atomic_state * state)1845 static int vmw_kms_check_topology(struct drm_device *dev,
1846 struct drm_atomic_state *state)
1847 {
1848 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
1849 struct drm_rect *rects;
1850 struct drm_crtc *crtc;
1851 uint32_t i;
1852 int ret = 0;
1853
1854 rects = kcalloc(dev->mode_config.num_crtc, sizeof(struct drm_rect),
1855 GFP_KERNEL);
1856 if (!rects)
1857 return -ENOMEM;
1858
1859 drm_for_each_crtc(crtc, dev) {
1860 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1861 struct drm_crtc_state *crtc_state;
1862
1863 i = drm_crtc_index(crtc);
1864
1865 crtc_state = vmw_crtc_state_and_lock(state, crtc);
1866 if (IS_ERR(crtc_state)) {
1867 ret = PTR_ERR(crtc_state);
1868 goto clean;
1869 }
1870
1871 if (!crtc_state)
1872 continue;
1873
1874 if (crtc_state->enable) {
1875 rects[i].x1 = du->gui_x;
1876 rects[i].y1 = du->gui_y;
1877 rects[i].x2 = du->gui_x + crtc_state->mode.hdisplay;
1878 rects[i].y2 = du->gui_y + crtc_state->mode.vdisplay;
1879 } else {
1880 rects[i].x1 = 0;
1881 rects[i].y1 = 0;
1882 rects[i].x2 = 0;
1883 rects[i].y2 = 0;
1884 }
1885 }
1886
1887 /* Determine change to topology due to new atomic state */
1888 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
1889 new_crtc_state, i) {
1890 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1891 struct drm_connector *connector;
1892 struct drm_connector_state *conn_state;
1893 struct vmw_connector_state *vmw_conn_state;
1894
1895 if (!du->pref_active && new_crtc_state->enable) {
1896 VMW_DEBUG_KMS("Enabling a disabled display unit\n");
1897 ret = -EINVAL;
1898 goto clean;
1899 }
1900
1901 /*
1902 * For vmwgfx each crtc has only one connector attached and it
1903 * is not changed so don't really need to check the
1904 * crtc->connector_mask and iterate over it.
1905 */
1906 connector = &du->connector;
1907 conn_state = drm_atomic_get_connector_state(state, connector);
1908 if (IS_ERR(conn_state)) {
1909 ret = PTR_ERR(conn_state);
1910 goto clean;
1911 }
1912
1913 vmw_conn_state = vmw_connector_state_to_vcs(conn_state);
1914 vmw_conn_state->gui_x = du->gui_x;
1915 vmw_conn_state->gui_y = du->gui_y;
1916 }
1917
1918 ret = vmw_kms_check_display_memory(dev, dev->mode_config.num_crtc,
1919 rects);
1920
1921 clean:
1922 kfree(rects);
1923 return ret;
1924 }
1925
1926 /**
1927 * vmw_kms_atomic_check_modeset- validate state object for modeset changes
1928 *
1929 * @dev: DRM device
1930 * @state: the driver state object
1931 *
1932 * This is a simple wrapper around drm_atomic_helper_check_modeset() for
1933 * us to assign a value to mode->crtc_clock so that
1934 * drm_calc_timestamping_constants() won't throw an error message
1935 *
1936 * Returns:
1937 * Zero for success or -errno
1938 */
1939 static int
vmw_kms_atomic_check_modeset(struct drm_device * dev,struct drm_atomic_state * state)1940 vmw_kms_atomic_check_modeset(struct drm_device *dev,
1941 struct drm_atomic_state *state)
1942 {
1943 struct drm_crtc *crtc;
1944 struct drm_crtc_state *crtc_state;
1945 bool need_modeset = false;
1946 int i, ret;
1947
1948 ret = drm_atomic_helper_check(dev, state);
1949 if (ret)
1950 return ret;
1951
1952 ret = vmw_kms_check_implicit(dev, state);
1953 if (ret) {
1954 VMW_DEBUG_KMS("Invalid implicit state\n");
1955 return ret;
1956 }
1957
1958 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
1959 if (drm_atomic_crtc_needs_modeset(crtc_state))
1960 need_modeset = true;
1961 }
1962
1963 if (need_modeset)
1964 return vmw_kms_check_topology(dev, state);
1965
1966 return ret;
1967 }
1968
1969 static const struct drm_mode_config_funcs vmw_kms_funcs = {
1970 .fb_create = vmw_kms_fb_create,
1971 .atomic_check = vmw_kms_atomic_check_modeset,
1972 .atomic_commit = drm_atomic_helper_commit,
1973 };
1974
vmw_kms_generic_present(struct vmw_private * dev_priv,struct drm_file * file_priv,struct vmw_framebuffer * vfb,struct vmw_surface * surface,uint32_t sid,int32_t destX,int32_t destY,struct drm_vmw_rect * clips,uint32_t num_clips)1975 static int vmw_kms_generic_present(struct vmw_private *dev_priv,
1976 struct drm_file *file_priv,
1977 struct vmw_framebuffer *vfb,
1978 struct vmw_surface *surface,
1979 uint32_t sid,
1980 int32_t destX, int32_t destY,
1981 struct drm_vmw_rect *clips,
1982 uint32_t num_clips)
1983 {
1984 return vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL, clips,
1985 &surface->res, destX, destY,
1986 num_clips, 1, NULL, NULL);
1987 }
1988
1989
vmw_kms_present(struct vmw_private * dev_priv,struct drm_file * file_priv,struct vmw_framebuffer * vfb,struct vmw_surface * surface,uint32_t sid,int32_t destX,int32_t destY,struct drm_vmw_rect * clips,uint32_t num_clips)1990 int vmw_kms_present(struct vmw_private *dev_priv,
1991 struct drm_file *file_priv,
1992 struct vmw_framebuffer *vfb,
1993 struct vmw_surface *surface,
1994 uint32_t sid,
1995 int32_t destX, int32_t destY,
1996 struct drm_vmw_rect *clips,
1997 uint32_t num_clips)
1998 {
1999 int ret;
2000
2001 switch (dev_priv->active_display_unit) {
2002 case vmw_du_screen_target:
2003 ret = vmw_kms_stdu_surface_dirty(dev_priv, vfb, NULL, clips,
2004 &surface->res, destX, destY,
2005 num_clips, 1, NULL, NULL);
2006 break;
2007 case vmw_du_screen_object:
2008 ret = vmw_kms_generic_present(dev_priv, file_priv, vfb, surface,
2009 sid, destX, destY, clips,
2010 num_clips);
2011 break;
2012 default:
2013 WARN_ONCE(true,
2014 "Present called with invalid display system.\n");
2015 ret = -ENOSYS;
2016 break;
2017 }
2018 if (ret)
2019 return ret;
2020
2021 vmw_cmd_flush(dev_priv, false);
2022
2023 return 0;
2024 }
2025
2026 static void
vmw_kms_create_hotplug_mode_update_property(struct vmw_private * dev_priv)2027 vmw_kms_create_hotplug_mode_update_property(struct vmw_private *dev_priv)
2028 {
2029 if (dev_priv->hotplug_mode_update_property)
2030 return;
2031
2032 dev_priv->hotplug_mode_update_property =
2033 drm_property_create_range(&dev_priv->drm,
2034 DRM_MODE_PROP_IMMUTABLE,
2035 "hotplug_mode_update", 0, 1);
2036 }
2037
vmw_kms_init(struct vmw_private * dev_priv)2038 int vmw_kms_init(struct vmw_private *dev_priv)
2039 {
2040 struct drm_device *dev = &dev_priv->drm;
2041 int ret;
2042 static const char *display_unit_names[] = {
2043 "Invalid",
2044 "Legacy",
2045 "Screen Object",
2046 "Screen Target",
2047 "Invalid (max)"
2048 };
2049
2050 drm_mode_config_init(dev);
2051 dev->mode_config.funcs = &vmw_kms_funcs;
2052 dev->mode_config.min_width = 1;
2053 dev->mode_config.min_height = 1;
2054 dev->mode_config.max_width = dev_priv->texture_max_width;
2055 dev->mode_config.max_height = dev_priv->texture_max_height;
2056
2057 drm_mode_create_suggested_offset_properties(dev);
2058 vmw_kms_create_hotplug_mode_update_property(dev_priv);
2059
2060 ret = vmw_kms_stdu_init_display(dev_priv);
2061 if (ret) {
2062 ret = vmw_kms_sou_init_display(dev_priv);
2063 if (ret) /* Fallback */
2064 ret = vmw_kms_ldu_init_display(dev_priv);
2065 }
2066 BUILD_BUG_ON(ARRAY_SIZE(display_unit_names) != (vmw_du_max + 1));
2067 drm_info(&dev_priv->drm, "%s display unit initialized\n",
2068 display_unit_names[dev_priv->active_display_unit]);
2069
2070 return ret;
2071 }
2072
vmw_kms_close(struct vmw_private * dev_priv)2073 int vmw_kms_close(struct vmw_private *dev_priv)
2074 {
2075 int ret = 0;
2076
2077 /*
2078 * Docs says we should take the lock before calling this function
2079 * but since it destroys encoders and our destructor calls
2080 * drm_encoder_cleanup which takes the lock we deadlock.
2081 */
2082 drm_mode_config_cleanup(&dev_priv->drm);
2083 if (dev_priv->active_display_unit == vmw_du_legacy)
2084 ret = vmw_kms_ldu_close_display(dev_priv);
2085
2086 return ret;
2087 }
2088
vmw_kms_cursor_bypass_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)2089 int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
2090 struct drm_file *file_priv)
2091 {
2092 struct drm_vmw_cursor_bypass_arg *arg = data;
2093 struct vmw_display_unit *du;
2094 struct drm_crtc *crtc;
2095 int ret = 0;
2096
2097
2098 mutex_lock(&dev->mode_config.mutex);
2099 if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {
2100
2101 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2102 du = vmw_crtc_to_du(crtc);
2103 du->hotspot_x = arg->xhot;
2104 du->hotspot_y = arg->yhot;
2105 }
2106
2107 mutex_unlock(&dev->mode_config.mutex);
2108 return 0;
2109 }
2110
2111 crtc = drm_crtc_find(dev, file_priv, arg->crtc_id);
2112 if (!crtc) {
2113 ret = -ENOENT;
2114 goto out;
2115 }
2116
2117 du = vmw_crtc_to_du(crtc);
2118
2119 du->hotspot_x = arg->xhot;
2120 du->hotspot_y = arg->yhot;
2121
2122 out:
2123 mutex_unlock(&dev->mode_config.mutex);
2124
2125 return ret;
2126 }
2127
vmw_kms_write_svga(struct vmw_private * vmw_priv,unsigned width,unsigned height,unsigned pitch,unsigned bpp,unsigned depth)2128 int vmw_kms_write_svga(struct vmw_private *vmw_priv,
2129 unsigned width, unsigned height, unsigned pitch,
2130 unsigned bpp, unsigned depth)
2131 {
2132 if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
2133 vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch);
2134 else if (vmw_fifo_have_pitchlock(vmw_priv))
2135 vmw_fifo_mem_write(vmw_priv, SVGA_FIFO_PITCHLOCK, pitch);
2136 vmw_write(vmw_priv, SVGA_REG_WIDTH, width);
2137 vmw_write(vmw_priv, SVGA_REG_HEIGHT, height);
2138 if ((vmw_priv->capabilities & SVGA_CAP_8BIT_EMULATION) != 0)
2139 vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bpp);
2140
2141 if (vmw_read(vmw_priv, SVGA_REG_DEPTH) != depth) {
2142 DRM_ERROR("Invalid depth %u for %u bpp, host expects %u\n",
2143 depth, bpp, vmw_read(vmw_priv, SVGA_REG_DEPTH));
2144 return -EINVAL;
2145 }
2146
2147 return 0;
2148 }
2149
vmw_kms_validate_mode_vram(struct vmw_private * dev_priv,uint32_t pitch,uint32_t height)2150 bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
2151 uint32_t pitch,
2152 uint32_t height)
2153 {
2154 return ((u64) pitch * (u64) height) < (u64)
2155 ((dev_priv->active_display_unit == vmw_du_screen_target) ?
2156 dev_priv->max_primary_mem : dev_priv->vram_size);
2157 }
2158
2159
2160 /*
2161 * Function called by DRM code called with vbl_lock held.
2162 */
vmw_get_vblank_counter(struct drm_crtc * crtc)2163 u32 vmw_get_vblank_counter(struct drm_crtc *crtc)
2164 {
2165 return 0;
2166 }
2167
2168 /*
2169 * Function called by DRM code called with vbl_lock held.
2170 */
vmw_enable_vblank(struct drm_crtc * crtc)2171 int vmw_enable_vblank(struct drm_crtc *crtc)
2172 {
2173 return -EINVAL;
2174 }
2175
2176 /*
2177 * Function called by DRM code called with vbl_lock held.
2178 */
vmw_disable_vblank(struct drm_crtc * crtc)2179 void vmw_disable_vblank(struct drm_crtc *crtc)
2180 {
2181 }
2182
2183 /**
2184 * vmw_du_update_layout - Update the display unit with topology from resolution
2185 * plugin and generate DRM uevent
2186 * @dev_priv: device private
2187 * @num_rects: number of drm_rect in rects
2188 * @rects: toplogy to update
2189 */
vmw_du_update_layout(struct vmw_private * dev_priv,unsigned int num_rects,struct drm_rect * rects)2190 static int vmw_du_update_layout(struct vmw_private *dev_priv,
2191 unsigned int num_rects, struct drm_rect *rects)
2192 {
2193 struct drm_device *dev = &dev_priv->drm;
2194 struct vmw_display_unit *du;
2195 struct drm_connector *con;
2196 struct drm_connector_list_iter conn_iter;
2197 struct drm_modeset_acquire_ctx ctx;
2198 struct drm_crtc *crtc;
2199 int ret;
2200
2201 /* Currently gui_x/y is protected with the crtc mutex */
2202 mutex_lock(&dev->mode_config.mutex);
2203 drm_modeset_acquire_init(&ctx, 0);
2204 retry:
2205 drm_for_each_crtc(crtc, dev) {
2206 ret = drm_modeset_lock(&crtc->mutex, &ctx);
2207 if (ret < 0) {
2208 if (ret == -EDEADLK) {
2209 drm_modeset_backoff(&ctx);
2210 goto retry;
2211 }
2212 goto out_fini;
2213 }
2214 }
2215
2216 drm_connector_list_iter_begin(dev, &conn_iter);
2217 drm_for_each_connector_iter(con, &conn_iter) {
2218 du = vmw_connector_to_du(con);
2219 if (num_rects > du->unit) {
2220 du->pref_width = drm_rect_width(&rects[du->unit]);
2221 du->pref_height = drm_rect_height(&rects[du->unit]);
2222 du->pref_active = true;
2223 du->gui_x = rects[du->unit].x1;
2224 du->gui_y = rects[du->unit].y1;
2225 } else {
2226 du->pref_width = 800;
2227 du->pref_height = 600;
2228 du->pref_active = false;
2229 du->gui_x = 0;
2230 du->gui_y = 0;
2231 }
2232 }
2233 drm_connector_list_iter_end(&conn_iter);
2234
2235 list_for_each_entry(con, &dev->mode_config.connector_list, head) {
2236 du = vmw_connector_to_du(con);
2237 if (num_rects > du->unit) {
2238 drm_object_property_set_value
2239 (&con->base, dev->mode_config.suggested_x_property,
2240 du->gui_x);
2241 drm_object_property_set_value
2242 (&con->base, dev->mode_config.suggested_y_property,
2243 du->gui_y);
2244 } else {
2245 drm_object_property_set_value
2246 (&con->base, dev->mode_config.suggested_x_property,
2247 0);
2248 drm_object_property_set_value
2249 (&con->base, dev->mode_config.suggested_y_property,
2250 0);
2251 }
2252 con->status = vmw_du_connector_detect(con, true);
2253 }
2254
2255 drm_sysfs_hotplug_event(dev);
2256 out_fini:
2257 drm_modeset_drop_locks(&ctx);
2258 drm_modeset_acquire_fini(&ctx);
2259 mutex_unlock(&dev->mode_config.mutex);
2260
2261 return 0;
2262 }
2263
vmw_du_crtc_gamma_set(struct drm_crtc * crtc,u16 * r,u16 * g,u16 * b,uint32_t size,struct drm_modeset_acquire_ctx * ctx)2264 int vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
2265 u16 *r, u16 *g, u16 *b,
2266 uint32_t size,
2267 struct drm_modeset_acquire_ctx *ctx)
2268 {
2269 struct vmw_private *dev_priv = vmw_priv(crtc->dev);
2270 int i;
2271
2272 for (i = 0; i < size; i++) {
2273 DRM_DEBUG("%d r/g/b = 0x%04x / 0x%04x / 0x%04x\n", i,
2274 r[i], g[i], b[i]);
2275 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 0, r[i] >> 8);
2276 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 1, g[i] >> 8);
2277 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 2, b[i] >> 8);
2278 }
2279
2280 return 0;
2281 }
2282
vmw_du_connector_dpms(struct drm_connector * connector,int mode)2283 int vmw_du_connector_dpms(struct drm_connector *connector, int mode)
2284 {
2285 return 0;
2286 }
2287
2288 enum drm_connector_status
vmw_du_connector_detect(struct drm_connector * connector,bool force)2289 vmw_du_connector_detect(struct drm_connector *connector, bool force)
2290 {
2291 uint32_t num_displays;
2292 struct drm_device *dev = connector->dev;
2293 struct vmw_private *dev_priv = vmw_priv(dev);
2294 struct vmw_display_unit *du = vmw_connector_to_du(connector);
2295
2296 num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS);
2297
2298 return ((vmw_connector_to_du(connector)->unit < num_displays &&
2299 du->pref_active) ?
2300 connector_status_connected : connector_status_disconnected);
2301 }
2302
2303 static struct drm_display_mode vmw_kms_connector_builtin[] = {
2304 /* 640x480@60Hz */
2305 { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
2306 752, 800, 0, 480, 489, 492, 525, 0,
2307 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
2308 /* 800x600@60Hz */
2309 { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
2310 968, 1056, 0, 600, 601, 605, 628, 0,
2311 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2312 /* 1024x768@60Hz */
2313 { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
2314 1184, 1344, 0, 768, 771, 777, 806, 0,
2315 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
2316 /* 1152x864@75Hz */
2317 { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
2318 1344, 1600, 0, 864, 865, 868, 900, 0,
2319 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2320 /* 1280x720@60Hz */
2321 { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74500, 1280, 1344,
2322 1472, 1664, 0, 720, 723, 728, 748, 0,
2323 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2324 /* 1280x768@60Hz */
2325 { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
2326 1472, 1664, 0, 768, 771, 778, 798, 0,
2327 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2328 /* 1280x800@60Hz */
2329 { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
2330 1480, 1680, 0, 800, 803, 809, 831, 0,
2331 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2332 /* 1280x960@60Hz */
2333 { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
2334 1488, 1800, 0, 960, 961, 964, 1000, 0,
2335 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2336 /* 1280x1024@60Hz */
2337 { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
2338 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
2339 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2340 /* 1360x768@60Hz */
2341 { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
2342 1536, 1792, 0, 768, 771, 777, 795, 0,
2343 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2344 /* 1440x1050@60Hz */
2345 { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
2346 1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
2347 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2348 /* 1440x900@60Hz */
2349 { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
2350 1672, 1904, 0, 900, 903, 909, 934, 0,
2351 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2352 /* 1600x1200@60Hz */
2353 { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
2354 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
2355 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2356 /* 1680x1050@60Hz */
2357 { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
2358 1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
2359 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2360 /* 1792x1344@60Hz */
2361 { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
2362 2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
2363 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2364 /* 1853x1392@60Hz */
2365 { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
2366 2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
2367 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2368 /* 1920x1080@60Hz */
2369 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 173000, 1920, 2048,
2370 2248, 2576, 0, 1080, 1083, 1088, 1120, 0,
2371 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2372 /* 1920x1200@60Hz */
2373 { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
2374 2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
2375 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2376 /* 1920x1440@60Hz */
2377 { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
2378 2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
2379 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2380 /* 2560x1440@60Hz */
2381 { DRM_MODE("2560x1440", DRM_MODE_TYPE_DRIVER, 241500, 2560, 2608,
2382 2640, 2720, 0, 1440, 1443, 1448, 1481, 0,
2383 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2384 /* 2560x1600@60Hz */
2385 { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
2386 3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
2387 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2388 /* 2880x1800@60Hz */
2389 { DRM_MODE("2880x1800", DRM_MODE_TYPE_DRIVER, 337500, 2880, 2928,
2390 2960, 3040, 0, 1800, 1803, 1809, 1852, 0,
2391 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2392 /* 3840x2160@60Hz */
2393 { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 533000, 3840, 3888,
2394 3920, 4000, 0, 2160, 2163, 2168, 2222, 0,
2395 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2396 /* 3840x2400@60Hz */
2397 { DRM_MODE("3840x2400", DRM_MODE_TYPE_DRIVER, 592250, 3840, 3888,
2398 3920, 4000, 0, 2400, 2403, 2409, 2469, 0,
2399 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2400 /* Terminate */
2401 { DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) },
2402 };
2403
2404 /**
2405 * vmw_guess_mode_timing - Provide fake timings for a
2406 * 60Hz vrefresh mode.
2407 *
2408 * @mode: Pointer to a struct drm_display_mode with hdisplay and vdisplay
2409 * members filled in.
2410 */
vmw_guess_mode_timing(struct drm_display_mode * mode)2411 void vmw_guess_mode_timing(struct drm_display_mode *mode)
2412 {
2413 mode->hsync_start = mode->hdisplay + 50;
2414 mode->hsync_end = mode->hsync_start + 50;
2415 mode->htotal = mode->hsync_end + 50;
2416
2417 mode->vsync_start = mode->vdisplay + 50;
2418 mode->vsync_end = mode->vsync_start + 50;
2419 mode->vtotal = mode->vsync_end + 50;
2420
2421 mode->clock = (u32)mode->htotal * (u32)mode->vtotal / 100 * 6;
2422 }
2423
2424
vmw_du_connector_fill_modes(struct drm_connector * connector,uint32_t max_width,uint32_t max_height)2425 int vmw_du_connector_fill_modes(struct drm_connector *connector,
2426 uint32_t max_width, uint32_t max_height)
2427 {
2428 struct vmw_display_unit *du = vmw_connector_to_du(connector);
2429 struct drm_device *dev = connector->dev;
2430 struct vmw_private *dev_priv = vmw_priv(dev);
2431 struct drm_display_mode *mode = NULL;
2432 struct drm_display_mode *bmode;
2433 struct drm_display_mode prefmode = { DRM_MODE("preferred",
2434 DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
2435 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2436 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
2437 };
2438 int i;
2439 u32 assumed_bpp = 4;
2440
2441 if (dev_priv->assume_16bpp)
2442 assumed_bpp = 2;
2443
2444 max_width = min(max_width, dev_priv->texture_max_width);
2445 max_height = min(max_height, dev_priv->texture_max_height);
2446
2447 /*
2448 * For STDU extra limit for a mode on SVGA_REG_SCREENTARGET_MAX_WIDTH/
2449 * HEIGHT registers.
2450 */
2451 if (dev_priv->active_display_unit == vmw_du_screen_target) {
2452 max_width = min(max_width, dev_priv->stdu_max_width);
2453 max_height = min(max_height, dev_priv->stdu_max_height);
2454 }
2455
2456 /* Add preferred mode */
2457 mode = drm_mode_duplicate(dev, &prefmode);
2458 if (!mode)
2459 return 0;
2460 mode->hdisplay = du->pref_width;
2461 mode->vdisplay = du->pref_height;
2462 vmw_guess_mode_timing(mode);
2463 drm_mode_set_name(mode);
2464
2465 if (vmw_kms_validate_mode_vram(dev_priv,
2466 mode->hdisplay * assumed_bpp,
2467 mode->vdisplay)) {
2468 drm_mode_probed_add(connector, mode);
2469 } else {
2470 drm_mode_destroy(dev, mode);
2471 mode = NULL;
2472 }
2473
2474 if (du->pref_mode) {
2475 list_del_init(&du->pref_mode->head);
2476 drm_mode_destroy(dev, du->pref_mode);
2477 }
2478
2479 /* mode might be null here, this is intended */
2480 du->pref_mode = mode;
2481
2482 for (i = 0; vmw_kms_connector_builtin[i].type != 0; i++) {
2483 bmode = &vmw_kms_connector_builtin[i];
2484 if (bmode->hdisplay > max_width ||
2485 bmode->vdisplay > max_height)
2486 continue;
2487
2488 if (!vmw_kms_validate_mode_vram(dev_priv,
2489 bmode->hdisplay * assumed_bpp,
2490 bmode->vdisplay))
2491 continue;
2492
2493 mode = drm_mode_duplicate(dev, bmode);
2494 if (!mode)
2495 return 0;
2496
2497 drm_mode_probed_add(connector, mode);
2498 }
2499
2500 drm_connector_list_update(connector);
2501 /* Move the prefered mode first, help apps pick the right mode. */
2502 drm_mode_sort(&connector->modes);
2503
2504 return 1;
2505 }
2506
2507 /**
2508 * vmw_kms_update_layout_ioctl - Handler for DRM_VMW_UPDATE_LAYOUT ioctl
2509 * @dev: drm device for the ioctl
2510 * @data: data pointer for the ioctl
2511 * @file_priv: drm file for the ioctl call
2512 *
2513 * Update preferred topology of display unit as per ioctl request. The topology
2514 * is expressed as array of drm_vmw_rect.
2515 * e.g.
2516 * [0 0 640 480] [640 0 800 600] [0 480 640 480]
2517 *
2518 * NOTE:
2519 * The x and y offset (upper left) in drm_vmw_rect cannot be less than 0. Beside
2520 * device limit on topology, x + w and y + h (lower right) cannot be greater
2521 * than INT_MAX. So topology beyond these limits will return with error.
2522 *
2523 * Returns:
2524 * Zero on success, negative errno on failure.
2525 */
vmw_kms_update_layout_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)2526 int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
2527 struct drm_file *file_priv)
2528 {
2529 struct vmw_private *dev_priv = vmw_priv(dev);
2530 struct drm_mode_config *mode_config = &dev->mode_config;
2531 struct drm_vmw_update_layout_arg *arg =
2532 (struct drm_vmw_update_layout_arg *)data;
2533 void __user *user_rects;
2534 struct drm_vmw_rect *rects;
2535 struct drm_rect *drm_rects;
2536 unsigned rects_size;
2537 int ret, i;
2538
2539 if (!arg->num_outputs) {
2540 struct drm_rect def_rect = {0, 0, 800, 600};
2541 VMW_DEBUG_KMS("Default layout x1 = %d y1 = %d x2 = %d y2 = %d\n",
2542 def_rect.x1, def_rect.y1,
2543 def_rect.x2, def_rect.y2);
2544 vmw_du_update_layout(dev_priv, 1, &def_rect);
2545 return 0;
2546 }
2547
2548 rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
2549 rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect),
2550 GFP_KERNEL);
2551 if (unlikely(!rects))
2552 return -ENOMEM;
2553
2554 user_rects = (void __user *)(unsigned long)arg->rects;
2555 ret = copy_from_user(rects, user_rects, rects_size);
2556 if (unlikely(ret != 0)) {
2557 DRM_ERROR("Failed to get rects.\n");
2558 ret = -EFAULT;
2559 goto out_free;
2560 }
2561
2562 drm_rects = (struct drm_rect *)rects;
2563
2564 VMW_DEBUG_KMS("Layout count = %u\n", arg->num_outputs);
2565 for (i = 0; i < arg->num_outputs; i++) {
2566 struct drm_vmw_rect curr_rect;
2567
2568 /* Verify user-space for overflow as kernel use drm_rect */
2569 if ((rects[i].x + rects[i].w > INT_MAX) ||
2570 (rects[i].y + rects[i].h > INT_MAX)) {
2571 ret = -ERANGE;
2572 goto out_free;
2573 }
2574
2575 curr_rect = rects[i];
2576 drm_rects[i].x1 = curr_rect.x;
2577 drm_rects[i].y1 = curr_rect.y;
2578 drm_rects[i].x2 = curr_rect.x + curr_rect.w;
2579 drm_rects[i].y2 = curr_rect.y + curr_rect.h;
2580
2581 VMW_DEBUG_KMS(" x1 = %d y1 = %d x2 = %d y2 = %d\n",
2582 drm_rects[i].x1, drm_rects[i].y1,
2583 drm_rects[i].x2, drm_rects[i].y2);
2584
2585 /*
2586 * Currently this check is limiting the topology within
2587 * mode_config->max (which actually is max texture size
2588 * supported by virtual device). This limit is here to address
2589 * window managers that create a big framebuffer for whole
2590 * topology.
2591 */
2592 if (drm_rects[i].x1 < 0 || drm_rects[i].y1 < 0 ||
2593 drm_rects[i].x2 > mode_config->max_width ||
2594 drm_rects[i].y2 > mode_config->max_height) {
2595 VMW_DEBUG_KMS("Invalid layout %d %d %d %d\n",
2596 drm_rects[i].x1, drm_rects[i].y1,
2597 drm_rects[i].x2, drm_rects[i].y2);
2598 ret = -EINVAL;
2599 goto out_free;
2600 }
2601 }
2602
2603 ret = vmw_kms_check_display_memory(dev, arg->num_outputs, drm_rects);
2604
2605 if (ret == 0)
2606 vmw_du_update_layout(dev_priv, arg->num_outputs, drm_rects);
2607
2608 out_free:
2609 kfree(rects);
2610 return ret;
2611 }
2612
2613 /**
2614 * vmw_kms_helper_dirty - Helper to build commands and perform actions based
2615 * on a set of cliprects and a set of display units.
2616 *
2617 * @dev_priv: Pointer to a device private structure.
2618 * @framebuffer: Pointer to the framebuffer on which to perform the actions.
2619 * @clips: A set of struct drm_clip_rect. Either this os @vclips must be NULL.
2620 * Cliprects are given in framebuffer coordinates.
2621 * @vclips: A set of struct drm_vmw_rect cliprects. Either this or @clips must
2622 * be NULL. Cliprects are given in source coordinates.
2623 * @dest_x: X coordinate offset for the crtc / destination clip rects.
2624 * @dest_y: Y coordinate offset for the crtc / destination clip rects.
2625 * @num_clips: Number of cliprects in the @clips or @vclips array.
2626 * @increment: Integer with which to increment the clip counter when looping.
2627 * Used to skip a predetermined number of clip rects.
2628 * @dirty: Closure structure. See the description of struct vmw_kms_dirty.
2629 */
vmw_kms_helper_dirty(struct vmw_private * dev_priv,struct vmw_framebuffer * framebuffer,const struct drm_clip_rect * clips,const struct drm_vmw_rect * vclips,s32 dest_x,s32 dest_y,int num_clips,int increment,struct vmw_kms_dirty * dirty)2630 int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
2631 struct vmw_framebuffer *framebuffer,
2632 const struct drm_clip_rect *clips,
2633 const struct drm_vmw_rect *vclips,
2634 s32 dest_x, s32 dest_y,
2635 int num_clips,
2636 int increment,
2637 struct vmw_kms_dirty *dirty)
2638 {
2639 struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
2640 struct drm_crtc *crtc;
2641 u32 num_units = 0;
2642 u32 i, k;
2643
2644 dirty->dev_priv = dev_priv;
2645
2646 /* If crtc is passed, no need to iterate over other display units */
2647 if (dirty->crtc) {
2648 units[num_units++] = vmw_crtc_to_du(dirty->crtc);
2649 } else {
2650 list_for_each_entry(crtc, &dev_priv->drm.mode_config.crtc_list,
2651 head) {
2652 struct drm_plane *plane = crtc->primary;
2653
2654 if (plane->state->fb == &framebuffer->base)
2655 units[num_units++] = vmw_crtc_to_du(crtc);
2656 }
2657 }
2658
2659 for (k = 0; k < num_units; k++) {
2660 struct vmw_display_unit *unit = units[k];
2661 s32 crtc_x = unit->crtc.x;
2662 s32 crtc_y = unit->crtc.y;
2663 s32 crtc_width = unit->crtc.mode.hdisplay;
2664 s32 crtc_height = unit->crtc.mode.vdisplay;
2665 const struct drm_clip_rect *clips_ptr = clips;
2666 const struct drm_vmw_rect *vclips_ptr = vclips;
2667
2668 dirty->unit = unit;
2669 if (dirty->fifo_reserve_size > 0) {
2670 dirty->cmd = VMW_CMD_RESERVE(dev_priv,
2671 dirty->fifo_reserve_size);
2672 if (!dirty->cmd)
2673 return -ENOMEM;
2674
2675 memset(dirty->cmd, 0, dirty->fifo_reserve_size);
2676 }
2677 dirty->num_hits = 0;
2678 for (i = 0; i < num_clips; i++, clips_ptr += increment,
2679 vclips_ptr += increment) {
2680 s32 clip_left;
2681 s32 clip_top;
2682
2683 /*
2684 * Select clip array type. Note that integer type
2685 * in @clips is unsigned short, whereas in @vclips
2686 * it's 32-bit.
2687 */
2688 if (clips) {
2689 dirty->fb_x = (s32) clips_ptr->x1;
2690 dirty->fb_y = (s32) clips_ptr->y1;
2691 dirty->unit_x2 = (s32) clips_ptr->x2 + dest_x -
2692 crtc_x;
2693 dirty->unit_y2 = (s32) clips_ptr->y2 + dest_y -
2694 crtc_y;
2695 } else {
2696 dirty->fb_x = vclips_ptr->x;
2697 dirty->fb_y = vclips_ptr->y;
2698 dirty->unit_x2 = dirty->fb_x + vclips_ptr->w +
2699 dest_x - crtc_x;
2700 dirty->unit_y2 = dirty->fb_y + vclips_ptr->h +
2701 dest_y - crtc_y;
2702 }
2703
2704 dirty->unit_x1 = dirty->fb_x + dest_x - crtc_x;
2705 dirty->unit_y1 = dirty->fb_y + dest_y - crtc_y;
2706
2707 /* Skip this clip if it's outside the crtc region */
2708 if (dirty->unit_x1 >= crtc_width ||
2709 dirty->unit_y1 >= crtc_height ||
2710 dirty->unit_x2 <= 0 || dirty->unit_y2 <= 0)
2711 continue;
2712
2713 /* Clip right and bottom to crtc limits */
2714 dirty->unit_x2 = min_t(s32, dirty->unit_x2,
2715 crtc_width);
2716 dirty->unit_y2 = min_t(s32, dirty->unit_y2,
2717 crtc_height);
2718
2719 /* Clip left and top to crtc limits */
2720 clip_left = min_t(s32, dirty->unit_x1, 0);
2721 clip_top = min_t(s32, dirty->unit_y1, 0);
2722 dirty->unit_x1 -= clip_left;
2723 dirty->unit_y1 -= clip_top;
2724 dirty->fb_x -= clip_left;
2725 dirty->fb_y -= clip_top;
2726
2727 dirty->clip(dirty);
2728 }
2729
2730 dirty->fifo_commit(dirty);
2731 }
2732
2733 return 0;
2734 }
2735
2736 /**
2737 * vmw_kms_helper_validation_finish - Helper for post KMS command submission
2738 * cleanup and fencing
2739 * @dev_priv: Pointer to the device-private struct
2740 * @file_priv: Pointer identifying the client when user-space fencing is used
2741 * @ctx: Pointer to the validation context
2742 * @out_fence: If non-NULL, returned refcounted fence-pointer
2743 * @user_fence_rep: If non-NULL, pointer to user-space address area
2744 * in which to copy user-space fence info
2745 */
vmw_kms_helper_validation_finish(struct vmw_private * dev_priv,struct drm_file * file_priv,struct vmw_validation_context * ctx,struct vmw_fence_obj ** out_fence,struct drm_vmw_fence_rep __user * user_fence_rep)2746 void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
2747 struct drm_file *file_priv,
2748 struct vmw_validation_context *ctx,
2749 struct vmw_fence_obj **out_fence,
2750 struct drm_vmw_fence_rep __user *
2751 user_fence_rep)
2752 {
2753 struct vmw_fence_obj *fence = NULL;
2754 uint32_t handle = 0;
2755 int ret = 0;
2756
2757 if (file_priv || user_fence_rep || vmw_validation_has_bos(ctx) ||
2758 out_fence)
2759 ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
2760 file_priv ? &handle : NULL);
2761 vmw_validation_done(ctx, fence);
2762 if (file_priv)
2763 vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
2764 ret, user_fence_rep, fence,
2765 handle, -1);
2766 if (out_fence)
2767 *out_fence = fence;
2768 else
2769 vmw_fence_obj_unreference(&fence);
2770 }
2771
2772 /**
2773 * vmw_kms_update_proxy - Helper function to update a proxy surface from
2774 * its backing MOB.
2775 *
2776 * @res: Pointer to the surface resource
2777 * @clips: Clip rects in framebuffer (surface) space.
2778 * @num_clips: Number of clips in @clips.
2779 * @increment: Integer with which to increment the clip counter when looping.
2780 * Used to skip a predetermined number of clip rects.
2781 *
2782 * This function makes sure the proxy surface is updated from its backing MOB
2783 * using the region given by @clips. The surface resource @res and its backing
2784 * MOB needs to be reserved and validated on call.
2785 */
vmw_kms_update_proxy(struct vmw_resource * res,const struct drm_clip_rect * clips,unsigned num_clips,int increment)2786 int vmw_kms_update_proxy(struct vmw_resource *res,
2787 const struct drm_clip_rect *clips,
2788 unsigned num_clips,
2789 int increment)
2790 {
2791 struct vmw_private *dev_priv = res->dev_priv;
2792 struct drm_vmw_size *size = &vmw_res_to_srf(res)->metadata.base_size;
2793 struct {
2794 SVGA3dCmdHeader header;
2795 SVGA3dCmdUpdateGBImage body;
2796 } *cmd;
2797 SVGA3dBox *box;
2798 size_t copy_size = 0;
2799 int i;
2800
2801 if (!clips)
2802 return 0;
2803
2804 cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd) * num_clips);
2805 if (!cmd)
2806 return -ENOMEM;
2807
2808 for (i = 0; i < num_clips; ++i, clips += increment, ++cmd) {
2809 box = &cmd->body.box;
2810
2811 cmd->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
2812 cmd->header.size = sizeof(cmd->body);
2813 cmd->body.image.sid = res->id;
2814 cmd->body.image.face = 0;
2815 cmd->body.image.mipmap = 0;
2816
2817 if (clips->x1 > size->width || clips->x2 > size->width ||
2818 clips->y1 > size->height || clips->y2 > size->height) {
2819 DRM_ERROR("Invalid clips outsize of framebuffer.\n");
2820 return -EINVAL;
2821 }
2822
2823 box->x = clips->x1;
2824 box->y = clips->y1;
2825 box->z = 0;
2826 box->w = clips->x2 - clips->x1;
2827 box->h = clips->y2 - clips->y1;
2828 box->d = 1;
2829
2830 copy_size += sizeof(*cmd);
2831 }
2832
2833 vmw_cmd_commit(dev_priv, copy_size);
2834
2835 return 0;
2836 }
2837
vmw_kms_fbdev_init_data(struct vmw_private * dev_priv,unsigned unit,u32 max_width,u32 max_height,struct drm_connector ** p_con,struct drm_crtc ** p_crtc,struct drm_display_mode ** p_mode)2838 int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
2839 unsigned unit,
2840 u32 max_width,
2841 u32 max_height,
2842 struct drm_connector **p_con,
2843 struct drm_crtc **p_crtc,
2844 struct drm_display_mode **p_mode)
2845 {
2846 struct drm_connector *con;
2847 struct vmw_display_unit *du;
2848 struct drm_display_mode *mode;
2849 int i = 0;
2850 int ret = 0;
2851
2852 mutex_lock(&dev_priv->drm.mode_config.mutex);
2853 list_for_each_entry(con, &dev_priv->drm.mode_config.connector_list,
2854 head) {
2855 if (i == unit)
2856 break;
2857
2858 ++i;
2859 }
2860
2861 if (&con->head == &dev_priv->drm.mode_config.connector_list) {
2862 DRM_ERROR("Could not find initial display unit.\n");
2863 ret = -EINVAL;
2864 goto out_unlock;
2865 }
2866
2867 if (list_empty(&con->modes))
2868 (void) vmw_du_connector_fill_modes(con, max_width, max_height);
2869
2870 if (list_empty(&con->modes)) {
2871 DRM_ERROR("Could not find initial display mode.\n");
2872 ret = -EINVAL;
2873 goto out_unlock;
2874 }
2875
2876 du = vmw_connector_to_du(con);
2877 *p_con = con;
2878 *p_crtc = &du->crtc;
2879
2880 list_for_each_entry(mode, &con->modes, head) {
2881 if (mode->type & DRM_MODE_TYPE_PREFERRED)
2882 break;
2883 }
2884
2885 if (&mode->head == &con->modes) {
2886 WARN_ONCE(true, "Could not find initial preferred mode.\n");
2887 *p_mode = list_first_entry(&con->modes,
2888 struct drm_display_mode,
2889 head);
2890 } else {
2891 *p_mode = mode;
2892 }
2893
2894 out_unlock:
2895 mutex_unlock(&dev_priv->drm.mode_config.mutex);
2896
2897 return ret;
2898 }
2899
2900 /**
2901 * vmw_kms_create_implicit_placement_property - Set up the implicit placement
2902 * property.
2903 *
2904 * @dev_priv: Pointer to a device private struct.
2905 *
2906 * Sets up the implicit placement property unless it's already set up.
2907 */
2908 void
vmw_kms_create_implicit_placement_property(struct vmw_private * dev_priv)2909 vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv)
2910 {
2911 if (dev_priv->implicit_placement_property)
2912 return;
2913
2914 dev_priv->implicit_placement_property =
2915 drm_property_create_range(&dev_priv->drm,
2916 DRM_MODE_PROP_IMMUTABLE,
2917 "implicit_placement", 0, 1);
2918 }
2919
2920 /**
2921 * vmw_kms_suspend - Save modesetting state and turn modesetting off.
2922 *
2923 * @dev: Pointer to the drm device
2924 * Return: 0 on success. Negative error code on failure.
2925 */
vmw_kms_suspend(struct drm_device * dev)2926 int vmw_kms_suspend(struct drm_device *dev)
2927 {
2928 struct vmw_private *dev_priv = vmw_priv(dev);
2929
2930 dev_priv->suspend_state = drm_atomic_helper_suspend(dev);
2931 if (IS_ERR(dev_priv->suspend_state)) {
2932 int ret = PTR_ERR(dev_priv->suspend_state);
2933
2934 DRM_ERROR("Failed kms suspend: %d\n", ret);
2935 dev_priv->suspend_state = NULL;
2936
2937 return ret;
2938 }
2939
2940 return 0;
2941 }
2942
2943
2944 /**
2945 * vmw_kms_resume - Re-enable modesetting and restore state
2946 *
2947 * @dev: Pointer to the drm device
2948 * Return: 0 on success. Negative error code on failure.
2949 *
2950 * State is resumed from a previous vmw_kms_suspend(). It's illegal
2951 * to call this function without a previous vmw_kms_suspend().
2952 */
vmw_kms_resume(struct drm_device * dev)2953 int vmw_kms_resume(struct drm_device *dev)
2954 {
2955 struct vmw_private *dev_priv = vmw_priv(dev);
2956 int ret;
2957
2958 if (WARN_ON(!dev_priv->suspend_state))
2959 return 0;
2960
2961 ret = drm_atomic_helper_resume(dev, dev_priv->suspend_state);
2962 dev_priv->suspend_state = NULL;
2963
2964 return ret;
2965 }
2966
2967 /**
2968 * vmw_kms_lost_device - Notify kms that modesetting capabilities will be lost
2969 *
2970 * @dev: Pointer to the drm device
2971 */
vmw_kms_lost_device(struct drm_device * dev)2972 void vmw_kms_lost_device(struct drm_device *dev)
2973 {
2974 drm_atomic_helper_shutdown(dev);
2975 }
2976
2977 /**
2978 * vmw_du_helper_plane_update - Helper to do plane update on a display unit.
2979 * @update: The closure structure.
2980 *
2981 * Call this helper after setting callbacks in &vmw_du_update_plane to do plane
2982 * update on display unit.
2983 *
2984 * Return: 0 on success or a negative error code on failure.
2985 */
vmw_du_helper_plane_update(struct vmw_du_update_plane * update)2986 int vmw_du_helper_plane_update(struct vmw_du_update_plane *update)
2987 {
2988 struct drm_plane_state *state = update->plane->state;
2989 struct drm_plane_state *old_state = update->old_state;
2990 struct drm_atomic_helper_damage_iter iter;
2991 struct drm_rect clip;
2992 struct drm_rect bb;
2993 DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
2994 uint32_t reserved_size = 0;
2995 uint32_t submit_size = 0;
2996 uint32_t curr_size = 0;
2997 uint32_t num_hits = 0;
2998 void *cmd_start;
2999 char *cmd_next;
3000 int ret;
3001
3002 /*
3003 * Iterate in advance to check if really need plane update and find the
3004 * number of clips that actually are in plane src for fifo allocation.
3005 */
3006 drm_atomic_helper_damage_iter_init(&iter, old_state, state);
3007 drm_atomic_for_each_plane_damage(&iter, &clip)
3008 num_hits++;
3009
3010 if (num_hits == 0)
3011 return 0;
3012
3013 if (update->vfb->bo) {
3014 struct vmw_framebuffer_bo *vfbbo =
3015 container_of(update->vfb, typeof(*vfbbo), base);
3016
3017 ret = vmw_validation_add_bo(&val_ctx, vfbbo->buffer, false,
3018 update->cpu_blit);
3019 } else {
3020 struct vmw_framebuffer_surface *vfbs =
3021 container_of(update->vfb, typeof(*vfbs), base);
3022
3023 ret = vmw_validation_add_resource(&val_ctx, &vfbs->surface->res,
3024 0, VMW_RES_DIRTY_NONE, NULL,
3025 NULL);
3026 }
3027
3028 if (ret)
3029 return ret;
3030
3031 ret = vmw_validation_prepare(&val_ctx, update->mutex, update->intr);
3032 if (ret)
3033 goto out_unref;
3034
3035 reserved_size = update->calc_fifo_size(update, num_hits);
3036 cmd_start = VMW_CMD_RESERVE(update->dev_priv, reserved_size);
3037 if (!cmd_start) {
3038 ret = -ENOMEM;
3039 goto out_revert;
3040 }
3041
3042 cmd_next = cmd_start;
3043
3044 if (update->post_prepare) {
3045 curr_size = update->post_prepare(update, cmd_next);
3046 cmd_next += curr_size;
3047 submit_size += curr_size;
3048 }
3049
3050 if (update->pre_clip) {
3051 curr_size = update->pre_clip(update, cmd_next, num_hits);
3052 cmd_next += curr_size;
3053 submit_size += curr_size;
3054 }
3055
3056 bb.x1 = INT_MAX;
3057 bb.y1 = INT_MAX;
3058 bb.x2 = INT_MIN;
3059 bb.y2 = INT_MIN;
3060
3061 drm_atomic_helper_damage_iter_init(&iter, old_state, state);
3062 drm_atomic_for_each_plane_damage(&iter, &clip) {
3063 uint32_t fb_x = clip.x1;
3064 uint32_t fb_y = clip.y1;
3065
3066 vmw_du_translate_to_crtc(state, &clip);
3067 if (update->clip) {
3068 curr_size = update->clip(update, cmd_next, &clip, fb_x,
3069 fb_y);
3070 cmd_next += curr_size;
3071 submit_size += curr_size;
3072 }
3073 bb.x1 = min_t(int, bb.x1, clip.x1);
3074 bb.y1 = min_t(int, bb.y1, clip.y1);
3075 bb.x2 = max_t(int, bb.x2, clip.x2);
3076 bb.y2 = max_t(int, bb.y2, clip.y2);
3077 }
3078
3079 curr_size = update->post_clip(update, cmd_next, &bb);
3080 submit_size += curr_size;
3081
3082 if (reserved_size < submit_size)
3083 submit_size = 0;
3084
3085 vmw_cmd_commit(update->dev_priv, submit_size);
3086
3087 vmw_kms_helper_validation_finish(update->dev_priv, NULL, &val_ctx,
3088 update->out_fence, NULL);
3089 return ret;
3090
3091 out_revert:
3092 vmw_validation_revert(&val_ctx);
3093
3094 out_unref:
3095 vmw_validation_unref_lists(&val_ctx);
3096 return ret;
3097 }
3098