1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3 *
4 * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include <drm/ttm/ttm_placement.h>
29
30 #include "vmwgfx_drv.h"
31 #include "vmwgfx_resource_priv.h"
32 #include "vmwgfx_binding.h"
33
34 struct vmw_user_context {
35 struct ttm_base_object base;
36 struct vmw_resource res;
37 struct vmw_ctx_binding_state *cbs;
38 struct vmw_cmdbuf_res_manager *man;
39 struct vmw_resource *cotables[SVGA_COTABLE_MAX];
40 spinlock_t cotable_lock;
41 struct vmw_buffer_object *dx_query_mob;
42 };
43
44 static void vmw_user_context_free(struct vmw_resource *res);
45 static struct vmw_resource *
46 vmw_user_context_base_to_res(struct ttm_base_object *base);
47
48 static int vmw_gb_context_create(struct vmw_resource *res);
49 static int vmw_gb_context_bind(struct vmw_resource *res,
50 struct ttm_validate_buffer *val_buf);
51 static int vmw_gb_context_unbind(struct vmw_resource *res,
52 bool readback,
53 struct ttm_validate_buffer *val_buf);
54 static int vmw_gb_context_destroy(struct vmw_resource *res);
55 static int vmw_dx_context_create(struct vmw_resource *res);
56 static int vmw_dx_context_bind(struct vmw_resource *res,
57 struct ttm_validate_buffer *val_buf);
58 static int vmw_dx_context_unbind(struct vmw_resource *res,
59 bool readback,
60 struct ttm_validate_buffer *val_buf);
61 static int vmw_dx_context_destroy(struct vmw_resource *res);
62
63 static const struct vmw_user_resource_conv user_context_conv = {
64 .object_type = VMW_RES_CONTEXT,
65 .base_obj_to_res = vmw_user_context_base_to_res,
66 .res_free = vmw_user_context_free
67 };
68
69 const struct vmw_user_resource_conv *user_context_converter =
70 &user_context_conv;
71
72
73 static const struct vmw_res_func vmw_legacy_context_func = {
74 .res_type = vmw_res_context,
75 .needs_backup = false,
76 .may_evict = false,
77 .type_name = "legacy contexts",
78 .backup_placement = NULL,
79 .create = NULL,
80 .destroy = NULL,
81 .bind = NULL,
82 .unbind = NULL
83 };
84
85 static const struct vmw_res_func vmw_gb_context_func = {
86 .res_type = vmw_res_context,
87 .needs_backup = true,
88 .may_evict = true,
89 .prio = 3,
90 .dirty_prio = 3,
91 .type_name = "guest backed contexts",
92 .backup_placement = &vmw_mob_placement,
93 .create = vmw_gb_context_create,
94 .destroy = vmw_gb_context_destroy,
95 .bind = vmw_gb_context_bind,
96 .unbind = vmw_gb_context_unbind
97 };
98
99 static const struct vmw_res_func vmw_dx_context_func = {
100 .res_type = vmw_res_dx_context,
101 .needs_backup = true,
102 .may_evict = true,
103 .prio = 3,
104 .dirty_prio = 3,
105 .type_name = "dx contexts",
106 .backup_placement = &vmw_mob_placement,
107 .create = vmw_dx_context_create,
108 .destroy = vmw_dx_context_destroy,
109 .bind = vmw_dx_context_bind,
110 .unbind = vmw_dx_context_unbind
111 };
112
113 /*
114 * Context management:
115 */
116
vmw_context_cotables_unref(struct vmw_private * dev_priv,struct vmw_user_context * uctx)117 static void vmw_context_cotables_unref(struct vmw_private *dev_priv,
118 struct vmw_user_context *uctx)
119 {
120 struct vmw_resource *res;
121 int i;
122 u32 cotable_max = has_sm5_context(dev_priv) ?
123 SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
124
125 for (i = 0; i < cotable_max; ++i) {
126 spin_lock(&uctx->cotable_lock);
127 res = uctx->cotables[i];
128 uctx->cotables[i] = NULL;
129 spin_unlock(&uctx->cotable_lock);
130
131 if (res)
132 vmw_resource_unreference(&res);
133 }
134 }
135
vmw_hw_context_destroy(struct vmw_resource * res)136 static void vmw_hw_context_destroy(struct vmw_resource *res)
137 {
138 struct vmw_user_context *uctx =
139 container_of(res, struct vmw_user_context, res);
140 struct vmw_private *dev_priv = res->dev_priv;
141 struct {
142 SVGA3dCmdHeader header;
143 SVGA3dCmdDestroyContext body;
144 } *cmd;
145
146
147 if (res->func->destroy == vmw_gb_context_destroy ||
148 res->func->destroy == vmw_dx_context_destroy) {
149 mutex_lock(&dev_priv->cmdbuf_mutex);
150 vmw_cmdbuf_res_man_destroy(uctx->man);
151 mutex_lock(&dev_priv->binding_mutex);
152 vmw_binding_state_kill(uctx->cbs);
153 (void) res->func->destroy(res);
154 mutex_unlock(&dev_priv->binding_mutex);
155 if (dev_priv->pinned_bo != NULL &&
156 !dev_priv->query_cid_valid)
157 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
158 mutex_unlock(&dev_priv->cmdbuf_mutex);
159 vmw_context_cotables_unref(dev_priv, uctx);
160 return;
161 }
162
163 vmw_execbuf_release_pinned_bo(dev_priv);
164 cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
165 if (unlikely(cmd == NULL))
166 return;
167
168 cmd->header.id = SVGA_3D_CMD_CONTEXT_DESTROY;
169 cmd->header.size = sizeof(cmd->body);
170 cmd->body.cid = res->id;
171
172 vmw_cmd_commit(dev_priv, sizeof(*cmd));
173 vmw_fifo_resource_dec(dev_priv);
174 }
175
vmw_gb_context_init(struct vmw_private * dev_priv,bool dx,struct vmw_resource * res,void (* res_free)(struct vmw_resource * res))176 static int vmw_gb_context_init(struct vmw_private *dev_priv,
177 bool dx,
178 struct vmw_resource *res,
179 void (*res_free)(struct vmw_resource *res))
180 {
181 int ret, i;
182 struct vmw_user_context *uctx =
183 container_of(res, struct vmw_user_context, res);
184
185 res->backup_size = (dx ? sizeof(SVGADXContextMobFormat) :
186 sizeof(SVGAGBContextData));
187 ret = vmw_resource_init(dev_priv, res, true,
188 res_free,
189 dx ? &vmw_dx_context_func :
190 &vmw_gb_context_func);
191 if (unlikely(ret != 0))
192 goto out_err;
193
194 if (dev_priv->has_mob) {
195 uctx->man = vmw_cmdbuf_res_man_create(dev_priv);
196 if (IS_ERR(uctx->man)) {
197 ret = PTR_ERR(uctx->man);
198 uctx->man = NULL;
199 goto out_err;
200 }
201 }
202
203 uctx->cbs = vmw_binding_state_alloc(dev_priv);
204 if (IS_ERR(uctx->cbs)) {
205 ret = PTR_ERR(uctx->cbs);
206 goto out_err;
207 }
208
209 spin_lock_init(&uctx->cotable_lock);
210
211 if (dx) {
212 u32 cotable_max = has_sm5_context(dev_priv) ?
213 SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
214 for (i = 0; i < cotable_max; ++i) {
215 uctx->cotables[i] = vmw_cotable_alloc(dev_priv,
216 &uctx->res, i);
217 if (IS_ERR(uctx->cotables[i])) {
218 ret = PTR_ERR(uctx->cotables[i]);
219 goto out_cotables;
220 }
221 }
222 }
223
224 res->hw_destroy = vmw_hw_context_destroy;
225 return 0;
226
227 out_cotables:
228 vmw_context_cotables_unref(dev_priv, uctx);
229 out_err:
230 if (res_free)
231 res_free(res);
232 else
233 kfree(res);
234 return ret;
235 }
236
vmw_context_init(struct vmw_private * dev_priv,struct vmw_resource * res,void (* res_free)(struct vmw_resource * res),bool dx)237 static int vmw_context_init(struct vmw_private *dev_priv,
238 struct vmw_resource *res,
239 void (*res_free)(struct vmw_resource *res),
240 bool dx)
241 {
242 int ret;
243
244 struct {
245 SVGA3dCmdHeader header;
246 SVGA3dCmdDefineContext body;
247 } *cmd;
248
249 if (dev_priv->has_mob)
250 return vmw_gb_context_init(dev_priv, dx, res, res_free);
251
252 ret = vmw_resource_init(dev_priv, res, false,
253 res_free, &vmw_legacy_context_func);
254
255 if (unlikely(ret != 0)) {
256 DRM_ERROR("Failed to allocate a resource id.\n");
257 goto out_early;
258 }
259
260 if (unlikely(res->id >= SVGA3D_HB_MAX_CONTEXT_IDS)) {
261 DRM_ERROR("Out of hw context ids.\n");
262 vmw_resource_unreference(&res);
263 return -ENOMEM;
264 }
265
266 cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
267 if (unlikely(cmd == NULL)) {
268 vmw_resource_unreference(&res);
269 return -ENOMEM;
270 }
271
272 cmd->header.id = SVGA_3D_CMD_CONTEXT_DEFINE;
273 cmd->header.size = sizeof(cmd->body);
274 cmd->body.cid = res->id;
275
276 vmw_cmd_commit(dev_priv, sizeof(*cmd));
277 vmw_fifo_resource_inc(dev_priv);
278 res->hw_destroy = vmw_hw_context_destroy;
279 return 0;
280
281 out_early:
282 if (res_free == NULL)
283 kfree(res);
284 else
285 res_free(res);
286 return ret;
287 }
288
289
290 /*
291 * GB context.
292 */
293
vmw_gb_context_create(struct vmw_resource * res)294 static int vmw_gb_context_create(struct vmw_resource *res)
295 {
296 struct vmw_private *dev_priv = res->dev_priv;
297 int ret;
298 struct {
299 SVGA3dCmdHeader header;
300 SVGA3dCmdDefineGBContext body;
301 } *cmd;
302
303 if (likely(res->id != -1))
304 return 0;
305
306 ret = vmw_resource_alloc_id(res);
307 if (unlikely(ret != 0)) {
308 DRM_ERROR("Failed to allocate a context id.\n");
309 goto out_no_id;
310 }
311
312 if (unlikely(res->id >= VMWGFX_NUM_GB_CONTEXT)) {
313 ret = -EBUSY;
314 goto out_no_fifo;
315 }
316
317 cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
318 if (unlikely(cmd == NULL)) {
319 ret = -ENOMEM;
320 goto out_no_fifo;
321 }
322
323 cmd->header.id = SVGA_3D_CMD_DEFINE_GB_CONTEXT;
324 cmd->header.size = sizeof(cmd->body);
325 cmd->body.cid = res->id;
326 vmw_cmd_commit(dev_priv, sizeof(*cmd));
327 vmw_fifo_resource_inc(dev_priv);
328
329 return 0;
330
331 out_no_fifo:
332 vmw_resource_release_id(res);
333 out_no_id:
334 return ret;
335 }
336
vmw_gb_context_bind(struct vmw_resource * res,struct ttm_validate_buffer * val_buf)337 static int vmw_gb_context_bind(struct vmw_resource *res,
338 struct ttm_validate_buffer *val_buf)
339 {
340 struct vmw_private *dev_priv = res->dev_priv;
341 struct {
342 SVGA3dCmdHeader header;
343 SVGA3dCmdBindGBContext body;
344 } *cmd;
345 struct ttm_buffer_object *bo = val_buf->bo;
346
347 BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
348
349 cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
350 if (unlikely(cmd == NULL))
351 return -ENOMEM;
352
353 cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
354 cmd->header.size = sizeof(cmd->body);
355 cmd->body.cid = res->id;
356 cmd->body.mobid = bo->resource->start;
357 cmd->body.validContents = res->backup_dirty;
358 res->backup_dirty = false;
359 vmw_cmd_commit(dev_priv, sizeof(*cmd));
360
361 return 0;
362 }
363
vmw_gb_context_unbind(struct vmw_resource * res,bool readback,struct ttm_validate_buffer * val_buf)364 static int vmw_gb_context_unbind(struct vmw_resource *res,
365 bool readback,
366 struct ttm_validate_buffer *val_buf)
367 {
368 struct vmw_private *dev_priv = res->dev_priv;
369 struct ttm_buffer_object *bo = val_buf->bo;
370 struct vmw_fence_obj *fence;
371 struct vmw_user_context *uctx =
372 container_of(res, struct vmw_user_context, res);
373
374 struct {
375 SVGA3dCmdHeader header;
376 SVGA3dCmdReadbackGBContext body;
377 } *cmd1;
378 struct {
379 SVGA3dCmdHeader header;
380 SVGA3dCmdBindGBContext body;
381 } *cmd2;
382 uint32_t submit_size;
383 uint8_t *cmd;
384
385
386 BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
387
388 mutex_lock(&dev_priv->binding_mutex);
389 vmw_binding_state_scrub(uctx->cbs);
390
391 submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
392
393 cmd = VMW_CMD_RESERVE(dev_priv, submit_size);
394 if (unlikely(cmd == NULL)) {
395 mutex_unlock(&dev_priv->binding_mutex);
396 return -ENOMEM;
397 }
398
399 cmd2 = (void *) cmd;
400 if (readback) {
401 cmd1 = (void *) cmd;
402 cmd1->header.id = SVGA_3D_CMD_READBACK_GB_CONTEXT;
403 cmd1->header.size = sizeof(cmd1->body);
404 cmd1->body.cid = res->id;
405 cmd2 = (void *) (&cmd1[1]);
406 }
407 cmd2->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
408 cmd2->header.size = sizeof(cmd2->body);
409 cmd2->body.cid = res->id;
410 cmd2->body.mobid = SVGA3D_INVALID_ID;
411
412 vmw_cmd_commit(dev_priv, submit_size);
413 mutex_unlock(&dev_priv->binding_mutex);
414
415 /*
416 * Create a fence object and fence the backup buffer.
417 */
418
419 (void) vmw_execbuf_fence_commands(NULL, dev_priv,
420 &fence, NULL);
421
422 vmw_bo_fence_single(bo, fence);
423
424 if (likely(fence != NULL))
425 vmw_fence_obj_unreference(&fence);
426
427 return 0;
428 }
429
vmw_gb_context_destroy(struct vmw_resource * res)430 static int vmw_gb_context_destroy(struct vmw_resource *res)
431 {
432 struct vmw_private *dev_priv = res->dev_priv;
433 struct {
434 SVGA3dCmdHeader header;
435 SVGA3dCmdDestroyGBContext body;
436 } *cmd;
437
438 if (likely(res->id == -1))
439 return 0;
440
441 cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
442 if (unlikely(cmd == NULL))
443 return -ENOMEM;
444
445 cmd->header.id = SVGA_3D_CMD_DESTROY_GB_CONTEXT;
446 cmd->header.size = sizeof(cmd->body);
447 cmd->body.cid = res->id;
448 vmw_cmd_commit(dev_priv, sizeof(*cmd));
449 if (dev_priv->query_cid == res->id)
450 dev_priv->query_cid_valid = false;
451 vmw_resource_release_id(res);
452 vmw_fifo_resource_dec(dev_priv);
453
454 return 0;
455 }
456
457 /*
458 * DX context.
459 */
460
vmw_dx_context_create(struct vmw_resource * res)461 static int vmw_dx_context_create(struct vmw_resource *res)
462 {
463 struct vmw_private *dev_priv = res->dev_priv;
464 int ret;
465 struct {
466 SVGA3dCmdHeader header;
467 SVGA3dCmdDXDefineContext body;
468 } *cmd;
469
470 if (likely(res->id != -1))
471 return 0;
472
473 ret = vmw_resource_alloc_id(res);
474 if (unlikely(ret != 0)) {
475 DRM_ERROR("Failed to allocate a context id.\n");
476 goto out_no_id;
477 }
478
479 if (unlikely(res->id >= VMWGFX_NUM_DXCONTEXT)) {
480 ret = -EBUSY;
481 goto out_no_fifo;
482 }
483
484 cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
485 if (unlikely(cmd == NULL)) {
486 ret = -ENOMEM;
487 goto out_no_fifo;
488 }
489
490 cmd->header.id = SVGA_3D_CMD_DX_DEFINE_CONTEXT;
491 cmd->header.size = sizeof(cmd->body);
492 cmd->body.cid = res->id;
493 vmw_cmd_commit(dev_priv, sizeof(*cmd));
494 vmw_fifo_resource_inc(dev_priv);
495
496 return 0;
497
498 out_no_fifo:
499 vmw_resource_release_id(res);
500 out_no_id:
501 return ret;
502 }
503
vmw_dx_context_bind(struct vmw_resource * res,struct ttm_validate_buffer * val_buf)504 static int vmw_dx_context_bind(struct vmw_resource *res,
505 struct ttm_validate_buffer *val_buf)
506 {
507 struct vmw_private *dev_priv = res->dev_priv;
508 struct {
509 SVGA3dCmdHeader header;
510 SVGA3dCmdDXBindContext body;
511 } *cmd;
512 struct ttm_buffer_object *bo = val_buf->bo;
513
514 BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
515
516 cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
517 if (unlikely(cmd == NULL))
518 return -ENOMEM;
519
520 cmd->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
521 cmd->header.size = sizeof(cmd->body);
522 cmd->body.cid = res->id;
523 cmd->body.mobid = bo->resource->start;
524 cmd->body.validContents = res->backup_dirty;
525 res->backup_dirty = false;
526 vmw_cmd_commit(dev_priv, sizeof(*cmd));
527
528
529 return 0;
530 }
531
532 /**
533 * vmw_dx_context_scrub_cotables - Scrub all bindings and
534 * cotables from a context
535 *
536 * @ctx: Pointer to the context resource
537 * @readback: Whether to save the otable contents on scrubbing.
538 *
539 * COtables must be unbound before their context, but unbinding requires
540 * the backup buffer being reserved, whereas scrubbing does not.
541 * This function scrubs all cotables of a context, potentially reading back
542 * the contents into their backup buffers. However, scrubbing cotables
543 * also makes the device context invalid, so scrub all bindings first so
544 * that doesn't have to be done later with an invalid context.
545 */
vmw_dx_context_scrub_cotables(struct vmw_resource * ctx,bool readback)546 void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
547 bool readback)
548 {
549 struct vmw_user_context *uctx =
550 container_of(ctx, struct vmw_user_context, res);
551 u32 cotable_max = has_sm5_context(ctx->dev_priv) ?
552 SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
553 int i;
554
555 vmw_binding_state_scrub(uctx->cbs);
556 for (i = 0; i < cotable_max; ++i) {
557 struct vmw_resource *res;
558
559 /* Avoid racing with ongoing cotable destruction. */
560 spin_lock(&uctx->cotable_lock);
561 res = uctx->cotables[vmw_cotable_scrub_order[i]];
562 if (res)
563 res = vmw_resource_reference_unless_doomed(res);
564 spin_unlock(&uctx->cotable_lock);
565 if (!res)
566 continue;
567
568 WARN_ON(vmw_cotable_scrub(res, readback));
569 vmw_resource_unreference(&res);
570 }
571 }
572
vmw_dx_context_unbind(struct vmw_resource * res,bool readback,struct ttm_validate_buffer * val_buf)573 static int vmw_dx_context_unbind(struct vmw_resource *res,
574 bool readback,
575 struct ttm_validate_buffer *val_buf)
576 {
577 struct vmw_private *dev_priv = res->dev_priv;
578 struct ttm_buffer_object *bo = val_buf->bo;
579 struct vmw_fence_obj *fence;
580 struct vmw_user_context *uctx =
581 container_of(res, struct vmw_user_context, res);
582
583 struct {
584 SVGA3dCmdHeader header;
585 SVGA3dCmdDXReadbackContext body;
586 } *cmd1;
587 struct {
588 SVGA3dCmdHeader header;
589 SVGA3dCmdDXBindContext body;
590 } *cmd2;
591 uint32_t submit_size;
592 uint8_t *cmd;
593
594
595 BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
596
597 mutex_lock(&dev_priv->binding_mutex);
598 vmw_dx_context_scrub_cotables(res, readback);
599
600 if (uctx->dx_query_mob && uctx->dx_query_mob->dx_query_ctx &&
601 readback) {
602 WARN_ON(uctx->dx_query_mob->dx_query_ctx != res);
603 if (vmw_query_readback_all(uctx->dx_query_mob))
604 DRM_ERROR("Failed to read back query states\n");
605 }
606
607 submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
608
609 cmd = VMW_CMD_RESERVE(dev_priv, submit_size);
610 if (unlikely(cmd == NULL)) {
611 mutex_unlock(&dev_priv->binding_mutex);
612 return -ENOMEM;
613 }
614
615 cmd2 = (void *) cmd;
616 if (readback) {
617 cmd1 = (void *) cmd;
618 cmd1->header.id = SVGA_3D_CMD_DX_READBACK_CONTEXT;
619 cmd1->header.size = sizeof(cmd1->body);
620 cmd1->body.cid = res->id;
621 cmd2 = (void *) (&cmd1[1]);
622 }
623 cmd2->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
624 cmd2->header.size = sizeof(cmd2->body);
625 cmd2->body.cid = res->id;
626 cmd2->body.mobid = SVGA3D_INVALID_ID;
627
628 vmw_cmd_commit(dev_priv, submit_size);
629 mutex_unlock(&dev_priv->binding_mutex);
630
631 /*
632 * Create a fence object and fence the backup buffer.
633 */
634
635 (void) vmw_execbuf_fence_commands(NULL, dev_priv,
636 &fence, NULL);
637
638 vmw_bo_fence_single(bo, fence);
639
640 if (likely(fence != NULL))
641 vmw_fence_obj_unreference(&fence);
642
643 return 0;
644 }
645
vmw_dx_context_destroy(struct vmw_resource * res)646 static int vmw_dx_context_destroy(struct vmw_resource *res)
647 {
648 struct vmw_private *dev_priv = res->dev_priv;
649 struct {
650 SVGA3dCmdHeader header;
651 SVGA3dCmdDXDestroyContext body;
652 } *cmd;
653
654 if (likely(res->id == -1))
655 return 0;
656
657 cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
658 if (unlikely(cmd == NULL))
659 return -ENOMEM;
660
661 cmd->header.id = SVGA_3D_CMD_DX_DESTROY_CONTEXT;
662 cmd->header.size = sizeof(cmd->body);
663 cmd->body.cid = res->id;
664 vmw_cmd_commit(dev_priv, sizeof(*cmd));
665 if (dev_priv->query_cid == res->id)
666 dev_priv->query_cid_valid = false;
667 vmw_resource_release_id(res);
668 vmw_fifo_resource_dec(dev_priv);
669
670 return 0;
671 }
672
673 /*
674 * User-space context management:
675 */
676
677 static struct vmw_resource *
vmw_user_context_base_to_res(struct ttm_base_object * base)678 vmw_user_context_base_to_res(struct ttm_base_object *base)
679 {
680 return &(container_of(base, struct vmw_user_context, base)->res);
681 }
682
vmw_user_context_free(struct vmw_resource * res)683 static void vmw_user_context_free(struct vmw_resource *res)
684 {
685 struct vmw_user_context *ctx =
686 container_of(res, struct vmw_user_context, res);
687
688 if (ctx->cbs)
689 vmw_binding_state_free(ctx->cbs);
690
691 (void) vmw_context_bind_dx_query(res, NULL);
692
693 ttm_base_object_kfree(ctx, base);
694 }
695
696 /*
697 * This function is called when user space has no more references on the
698 * base object. It releases the base-object's reference on the resource object.
699 */
700
vmw_user_context_base_release(struct ttm_base_object ** p_base)701 static void vmw_user_context_base_release(struct ttm_base_object **p_base)
702 {
703 struct ttm_base_object *base = *p_base;
704 struct vmw_user_context *ctx =
705 container_of(base, struct vmw_user_context, base);
706 struct vmw_resource *res = &ctx->res;
707
708 *p_base = NULL;
709 vmw_resource_unreference(&res);
710 }
711
vmw_context_destroy_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)712 int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
713 struct drm_file *file_priv)
714 {
715 struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
716 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
717
718 return ttm_ref_object_base_unref(tfile, arg->cid);
719 }
720
vmw_context_define(struct drm_device * dev,void * data,struct drm_file * file_priv,bool dx)721 static int vmw_context_define(struct drm_device *dev, void *data,
722 struct drm_file *file_priv, bool dx)
723 {
724 struct vmw_private *dev_priv = vmw_priv(dev);
725 struct vmw_user_context *ctx;
726 struct vmw_resource *res;
727 struct vmw_resource *tmp;
728 struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
729 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
730 int ret;
731
732 if (!has_sm4_context(dev_priv) && dx) {
733 VMW_DEBUG_USER("DX contexts not supported by device.\n");
734 return -EINVAL;
735 }
736
737 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
738 if (unlikely(!ctx)) {
739 ret = -ENOMEM;
740 goto out_ret;
741 }
742
743 res = &ctx->res;
744 ctx->base.shareable = false;
745 ctx->base.tfile = NULL;
746
747 /*
748 * From here on, the destructor takes over resource freeing.
749 */
750
751 ret = vmw_context_init(dev_priv, res, vmw_user_context_free, dx);
752 if (unlikely(ret != 0))
753 goto out_ret;
754
755 tmp = vmw_resource_reference(&ctx->res);
756 ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
757 &vmw_user_context_base_release);
758
759 if (unlikely(ret != 0)) {
760 vmw_resource_unreference(&tmp);
761 goto out_err;
762 }
763
764 arg->cid = ctx->base.handle;
765 out_err:
766 vmw_resource_unreference(&res);
767 out_ret:
768 return ret;
769 }
770
vmw_context_define_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)771 int vmw_context_define_ioctl(struct drm_device *dev, void *data,
772 struct drm_file *file_priv)
773 {
774 return vmw_context_define(dev, data, file_priv, false);
775 }
776
vmw_extended_context_define_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)777 int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data,
778 struct drm_file *file_priv)
779 {
780 union drm_vmw_extended_context_arg *arg = (typeof(arg)) data;
781 struct drm_vmw_context_arg *rep = &arg->rep;
782
783 switch (arg->req) {
784 case drm_vmw_context_legacy:
785 return vmw_context_define(dev, rep, file_priv, false);
786 case drm_vmw_context_dx:
787 return vmw_context_define(dev, rep, file_priv, true);
788 default:
789 break;
790 }
791 return -EINVAL;
792 }
793
794 /**
795 * vmw_context_binding_list - Return a list of context bindings
796 *
797 * @ctx: The context resource
798 *
799 * Returns the current list of bindings of the given context. Note that
800 * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked.
801 */
vmw_context_binding_list(struct vmw_resource * ctx)802 struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
803 {
804 struct vmw_user_context *uctx =
805 container_of(ctx, struct vmw_user_context, res);
806
807 return vmw_binding_state_list(uctx->cbs);
808 }
809
vmw_context_res_man(struct vmw_resource * ctx)810 struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx)
811 {
812 return container_of(ctx, struct vmw_user_context, res)->man;
813 }
814
vmw_context_cotable(struct vmw_resource * ctx,SVGACOTableType cotable_type)815 struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx,
816 SVGACOTableType cotable_type)
817 {
818 u32 cotable_max = has_sm5_context(ctx->dev_priv) ?
819 SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
820
821 if (cotable_type >= cotable_max)
822 return ERR_PTR(-EINVAL);
823
824 return container_of(ctx, struct vmw_user_context, res)->
825 cotables[cotable_type];
826 }
827
828 /**
829 * vmw_context_binding_state -
830 * Return a pointer to a context binding state structure
831 *
832 * @ctx: The context resource
833 *
834 * Returns the current state of bindings of the given context. Note that
835 * this state becomes stale as soon as the dev_priv::binding_mutex is unlocked.
836 */
837 struct vmw_ctx_binding_state *
vmw_context_binding_state(struct vmw_resource * ctx)838 vmw_context_binding_state(struct vmw_resource *ctx)
839 {
840 return container_of(ctx, struct vmw_user_context, res)->cbs;
841 }
842
843 /**
844 * vmw_context_bind_dx_query -
845 * Sets query MOB for the context. If @mob is NULL, then this function will
846 * remove the association between the MOB and the context. This function
847 * assumes the binding_mutex is held.
848 *
849 * @ctx_res: The context resource
850 * @mob: a reference to the query MOB
851 *
852 * Returns -EINVAL if a MOB has already been set and does not match the one
853 * specified in the parameter. 0 otherwise.
854 */
vmw_context_bind_dx_query(struct vmw_resource * ctx_res,struct vmw_buffer_object * mob)855 int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
856 struct vmw_buffer_object *mob)
857 {
858 struct vmw_user_context *uctx =
859 container_of(ctx_res, struct vmw_user_context, res);
860
861 if (mob == NULL) {
862 if (uctx->dx_query_mob) {
863 uctx->dx_query_mob->dx_query_ctx = NULL;
864 vmw_bo_unreference(&uctx->dx_query_mob);
865 uctx->dx_query_mob = NULL;
866 }
867
868 return 0;
869 }
870
871 /* Can only have one MOB per context for queries */
872 if (uctx->dx_query_mob && uctx->dx_query_mob != mob)
873 return -EINVAL;
874
875 mob->dx_query_ctx = ctx_res;
876
877 if (!uctx->dx_query_mob)
878 uctx->dx_query_mob = vmw_bo_reference(mob);
879
880 return 0;
881 }
882
883 /**
884 * vmw_context_get_dx_query_mob - Returns non-counted reference to DX query mob
885 *
886 * @ctx_res: The context resource
887 */
888 struct vmw_buffer_object *
vmw_context_get_dx_query_mob(struct vmw_resource * ctx_res)889 vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res)
890 {
891 struct vmw_user_context *uctx =
892 container_of(ctx_res, struct vmw_user_context, res);
893
894 return uctx->dx_query_mob;
895 }
896