1 /*
2 * Copyright 2017 Intel Corporation. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Zhiyuan Lv <zhiyuan.lv@intel.com>
25 *
26 * Contributors:
27 * Xiaoguang Chen
28 * Tina Zhang <tina.zhang@intel.com>
29 */
30
31 #include <linux/dma-buf.h>
32 #include <linux/mdev.h>
33
34 #include <drm/drm_fourcc.h>
35 #include <drm/drm_plane.h>
36
37 #include "gem/i915_gem_dmabuf.h"
38
39 #include "i915_drv.h"
40 #include "i915_reg.h"
41 #include "gvt.h"
42
43 #define GEN8_DECODE_PTE(pte) (pte & GENMASK_ULL(63, 12))
44
vgpu_gem_get_pages(struct drm_i915_gem_object * obj)45 static int vgpu_gem_get_pages(
46 struct drm_i915_gem_object *obj)
47 {
48 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
49 struct intel_vgpu *vgpu;
50 struct sg_table *st;
51 struct scatterlist *sg;
52 int i, j, ret;
53 gen8_pte_t __iomem *gtt_entries;
54 struct intel_vgpu_fb_info *fb_info;
55 u32 page_num;
56
57 fb_info = (struct intel_vgpu_fb_info *)obj->gvt_info;
58 if (drm_WARN_ON(&dev_priv->drm, !fb_info))
59 return -ENODEV;
60
61 vgpu = fb_info->obj->vgpu;
62 if (drm_WARN_ON(&dev_priv->drm, !vgpu))
63 return -ENODEV;
64
65 st = kmalloc(sizeof(*st), GFP_KERNEL);
66 if (unlikely(!st))
67 return -ENOMEM;
68
69 page_num = obj->base.size >> PAGE_SHIFT;
70 ret = sg_alloc_table(st, page_num, GFP_KERNEL);
71 if (ret) {
72 kfree(st);
73 return ret;
74 }
75 gtt_entries = (gen8_pte_t __iomem *)to_gt(dev_priv)->ggtt->gsm +
76 (fb_info->start >> PAGE_SHIFT);
77 for_each_sg(st->sgl, sg, page_num, i) {
78 dma_addr_t dma_addr =
79 GEN8_DECODE_PTE(readq(>t_entries[i]));
80 if (intel_gvt_dma_pin_guest_page(vgpu, dma_addr)) {
81 ret = -EINVAL;
82 goto out;
83 }
84
85 sg->offset = 0;
86 sg->length = PAGE_SIZE;
87 sg_dma_len(sg) = PAGE_SIZE;
88 sg_dma_address(sg) = dma_addr;
89 }
90
91 __i915_gem_object_set_pages(obj, st, PAGE_SIZE);
92 out:
93 if (ret) {
94 dma_addr_t dma_addr;
95
96 for_each_sg(st->sgl, sg, i, j) {
97 dma_addr = sg_dma_address(sg);
98 if (dma_addr)
99 intel_gvt_dma_unmap_guest_page(vgpu, dma_addr);
100 }
101 sg_free_table(st);
102 kfree(st);
103 }
104
105 return ret;
106
107 }
108
vgpu_gem_put_pages(struct drm_i915_gem_object * obj,struct sg_table * pages)109 static void vgpu_gem_put_pages(struct drm_i915_gem_object *obj,
110 struct sg_table *pages)
111 {
112 struct scatterlist *sg;
113
114 if (obj->base.dma_buf) {
115 struct intel_vgpu_fb_info *fb_info = obj->gvt_info;
116 struct intel_vgpu_dmabuf_obj *obj = fb_info->obj;
117 struct intel_vgpu *vgpu = obj->vgpu;
118 int i;
119
120 for_each_sg(pages->sgl, sg, fb_info->size, i)
121 intel_gvt_dma_unmap_guest_page(vgpu,
122 sg_dma_address(sg));
123 }
124
125 sg_free_table(pages);
126 kfree(pages);
127 }
128
dmabuf_gem_object_free(struct kref * kref)129 static void dmabuf_gem_object_free(struct kref *kref)
130 {
131 struct intel_vgpu_dmabuf_obj *obj =
132 container_of(kref, struct intel_vgpu_dmabuf_obj, kref);
133 struct intel_vgpu *vgpu = obj->vgpu;
134 struct list_head *pos;
135 struct intel_vgpu_dmabuf_obj *dmabuf_obj;
136
137 if (vgpu && vgpu->active && !list_empty(&vgpu->dmabuf_obj_list_head)) {
138 list_for_each(pos, &vgpu->dmabuf_obj_list_head) {
139 dmabuf_obj = list_entry(pos, struct intel_vgpu_dmabuf_obj, list);
140 if (dmabuf_obj == obj) {
141 list_del(pos);
142 idr_remove(&vgpu->object_idr,
143 dmabuf_obj->dmabuf_id);
144 kfree(dmabuf_obj->info);
145 kfree(dmabuf_obj);
146 break;
147 }
148 }
149 } else {
150 /* Free the orphan dmabuf_objs here */
151 kfree(obj->info);
152 kfree(obj);
153 }
154 }
155
156
dmabuf_obj_get(struct intel_vgpu_dmabuf_obj * obj)157 static inline void dmabuf_obj_get(struct intel_vgpu_dmabuf_obj *obj)
158 {
159 kref_get(&obj->kref);
160 }
161
dmabuf_obj_put(struct intel_vgpu_dmabuf_obj * obj)162 static inline void dmabuf_obj_put(struct intel_vgpu_dmabuf_obj *obj)
163 {
164 kref_put(&obj->kref, dmabuf_gem_object_free);
165 }
166
vgpu_gem_release(struct drm_i915_gem_object * gem_obj)167 static void vgpu_gem_release(struct drm_i915_gem_object *gem_obj)
168 {
169
170 struct intel_vgpu_fb_info *fb_info = gem_obj->gvt_info;
171 struct intel_vgpu_dmabuf_obj *obj = fb_info->obj;
172 struct intel_vgpu *vgpu = obj->vgpu;
173
174 if (vgpu) {
175 mutex_lock(&vgpu->dmabuf_lock);
176 gem_obj->base.dma_buf = NULL;
177 dmabuf_obj_put(obj);
178 mutex_unlock(&vgpu->dmabuf_lock);
179 } else {
180 /* vgpu is NULL, as it has been removed already */
181 gem_obj->base.dma_buf = NULL;
182 dmabuf_obj_put(obj);
183 }
184 }
185
186 static const struct drm_i915_gem_object_ops intel_vgpu_gem_ops = {
187 .name = "i915_gem_object_vgpu",
188 .flags = I915_GEM_OBJECT_IS_PROXY,
189 .get_pages = vgpu_gem_get_pages,
190 .put_pages = vgpu_gem_put_pages,
191 .release = vgpu_gem_release,
192 };
193
vgpu_create_gem(struct drm_device * dev,struct intel_vgpu_fb_info * info)194 static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
195 struct intel_vgpu_fb_info *info)
196 {
197 static struct lock_class_key lock_class;
198 struct drm_i915_private *dev_priv = to_i915(dev);
199 struct drm_i915_gem_object *obj;
200
201 obj = i915_gem_object_alloc();
202 if (obj == NULL)
203 return NULL;
204
205 drm_gem_private_object_init(dev, &obj->base,
206 roundup(info->size, PAGE_SIZE));
207 i915_gem_object_init(obj, &intel_vgpu_gem_ops, &lock_class, 0);
208 i915_gem_object_set_readonly(obj);
209
210 obj->read_domains = I915_GEM_DOMAIN_GTT;
211 obj->write_domain = 0;
212 if (GRAPHICS_VER(dev_priv) >= 9) {
213 unsigned int tiling_mode = 0;
214 unsigned int stride = 0;
215
216 switch (info->drm_format_mod) {
217 case DRM_FORMAT_MOD_LINEAR:
218 tiling_mode = I915_TILING_NONE;
219 break;
220 case I915_FORMAT_MOD_X_TILED:
221 tiling_mode = I915_TILING_X;
222 stride = info->stride;
223 break;
224 case I915_FORMAT_MOD_Y_TILED:
225 case I915_FORMAT_MOD_Yf_TILED:
226 tiling_mode = I915_TILING_Y;
227 stride = info->stride;
228 break;
229 default:
230 gvt_dbg_core("invalid drm_format_mod %llx for tiling\n",
231 info->drm_format_mod);
232 }
233 obj->tiling_and_stride = tiling_mode | stride;
234 } else {
235 obj->tiling_and_stride = info->drm_format_mod ?
236 I915_TILING_X : 0;
237 }
238
239 return obj;
240 }
241
validate_hotspot(struct intel_vgpu_cursor_plane_format * c)242 static bool validate_hotspot(struct intel_vgpu_cursor_plane_format *c)
243 {
244 if (c && c->x_hot <= c->width && c->y_hot <= c->height)
245 return true;
246 else
247 return false;
248 }
249
vgpu_get_plane_info(struct drm_device * dev,struct intel_vgpu * vgpu,struct intel_vgpu_fb_info * info,int plane_id)250 static int vgpu_get_plane_info(struct drm_device *dev,
251 struct intel_vgpu *vgpu,
252 struct intel_vgpu_fb_info *info,
253 int plane_id)
254 {
255 struct intel_vgpu_primary_plane_format p;
256 struct intel_vgpu_cursor_plane_format c;
257 int ret, tile_height = 1;
258
259 memset(info, 0, sizeof(*info));
260
261 if (plane_id == DRM_PLANE_TYPE_PRIMARY) {
262 ret = intel_vgpu_decode_primary_plane(vgpu, &p);
263 if (ret)
264 return ret;
265 info->start = p.base;
266 info->start_gpa = p.base_gpa;
267 info->width = p.width;
268 info->height = p.height;
269 info->stride = p.stride;
270 info->drm_format = p.drm_format;
271
272 switch (p.tiled) {
273 case PLANE_CTL_TILED_LINEAR:
274 info->drm_format_mod = DRM_FORMAT_MOD_LINEAR;
275 break;
276 case PLANE_CTL_TILED_X:
277 info->drm_format_mod = I915_FORMAT_MOD_X_TILED;
278 tile_height = 8;
279 break;
280 case PLANE_CTL_TILED_Y:
281 info->drm_format_mod = I915_FORMAT_MOD_Y_TILED;
282 tile_height = 32;
283 break;
284 case PLANE_CTL_TILED_YF:
285 info->drm_format_mod = I915_FORMAT_MOD_Yf_TILED;
286 tile_height = 32;
287 break;
288 default:
289 gvt_vgpu_err("invalid tiling mode: %x\n", p.tiled);
290 }
291 } else if (plane_id == DRM_PLANE_TYPE_CURSOR) {
292 ret = intel_vgpu_decode_cursor_plane(vgpu, &c);
293 if (ret)
294 return ret;
295 info->start = c.base;
296 info->start_gpa = c.base_gpa;
297 info->width = c.width;
298 info->height = c.height;
299 info->stride = c.width * (c.bpp / 8);
300 info->drm_format = c.drm_format;
301 info->drm_format_mod = 0;
302 info->x_pos = c.x_pos;
303 info->y_pos = c.y_pos;
304
305 if (validate_hotspot(&c)) {
306 info->x_hot = c.x_hot;
307 info->y_hot = c.y_hot;
308 } else {
309 info->x_hot = UINT_MAX;
310 info->y_hot = UINT_MAX;
311 }
312 } else {
313 gvt_vgpu_err("invalid plane id:%d\n", plane_id);
314 return -EINVAL;
315 }
316
317 info->size = info->stride * roundup(info->height, tile_height);
318 if (info->size == 0) {
319 gvt_vgpu_err("fb size is zero\n");
320 return -EINVAL;
321 }
322
323 if (info->start & (PAGE_SIZE - 1)) {
324 gvt_vgpu_err("Not aligned fb address:0x%llx\n", info->start);
325 return -EFAULT;
326 }
327
328 if (!intel_gvt_ggtt_validate_range(vgpu, info->start, info->size)) {
329 gvt_vgpu_err("invalid gma addr\n");
330 return -EFAULT;
331 }
332
333 return 0;
334 }
335
336 static struct intel_vgpu_dmabuf_obj *
pick_dmabuf_by_info(struct intel_vgpu * vgpu,struct intel_vgpu_fb_info * latest_info)337 pick_dmabuf_by_info(struct intel_vgpu *vgpu,
338 struct intel_vgpu_fb_info *latest_info)
339 {
340 struct list_head *pos;
341 struct intel_vgpu_fb_info *fb_info;
342 struct intel_vgpu_dmabuf_obj *dmabuf_obj = NULL;
343 struct intel_vgpu_dmabuf_obj *ret = NULL;
344
345 list_for_each(pos, &vgpu->dmabuf_obj_list_head) {
346 dmabuf_obj = list_entry(pos, struct intel_vgpu_dmabuf_obj, list);
347 if (!dmabuf_obj->info)
348 continue;
349
350 fb_info = (struct intel_vgpu_fb_info *)dmabuf_obj->info;
351 if ((fb_info->start == latest_info->start) &&
352 (fb_info->start_gpa == latest_info->start_gpa) &&
353 (fb_info->size == latest_info->size) &&
354 (fb_info->drm_format_mod == latest_info->drm_format_mod) &&
355 (fb_info->drm_format == latest_info->drm_format) &&
356 (fb_info->width == latest_info->width) &&
357 (fb_info->height == latest_info->height)) {
358 ret = dmabuf_obj;
359 break;
360 }
361 }
362
363 return ret;
364 }
365
366 static struct intel_vgpu_dmabuf_obj *
pick_dmabuf_by_num(struct intel_vgpu * vgpu,u32 id)367 pick_dmabuf_by_num(struct intel_vgpu *vgpu, u32 id)
368 {
369 struct list_head *pos;
370 struct intel_vgpu_dmabuf_obj *dmabuf_obj = NULL;
371 struct intel_vgpu_dmabuf_obj *ret = NULL;
372
373 list_for_each(pos, &vgpu->dmabuf_obj_list_head) {
374 dmabuf_obj = list_entry(pos, struct intel_vgpu_dmabuf_obj, list);
375 if (dmabuf_obj->dmabuf_id == id) {
376 ret = dmabuf_obj;
377 break;
378 }
379 }
380
381 return ret;
382 }
383
update_fb_info(struct vfio_device_gfx_plane_info * gvt_dmabuf,struct intel_vgpu_fb_info * fb_info)384 static void update_fb_info(struct vfio_device_gfx_plane_info *gvt_dmabuf,
385 struct intel_vgpu_fb_info *fb_info)
386 {
387 gvt_dmabuf->drm_format = fb_info->drm_format;
388 gvt_dmabuf->drm_format_mod = fb_info->drm_format_mod;
389 gvt_dmabuf->width = fb_info->width;
390 gvt_dmabuf->height = fb_info->height;
391 gvt_dmabuf->stride = fb_info->stride;
392 gvt_dmabuf->size = fb_info->size;
393 gvt_dmabuf->x_pos = fb_info->x_pos;
394 gvt_dmabuf->y_pos = fb_info->y_pos;
395 gvt_dmabuf->x_hot = fb_info->x_hot;
396 gvt_dmabuf->y_hot = fb_info->y_hot;
397 }
398
intel_vgpu_query_plane(struct intel_vgpu * vgpu,void * args)399 int intel_vgpu_query_plane(struct intel_vgpu *vgpu, void *args)
400 {
401 struct drm_device *dev = &vgpu->gvt->gt->i915->drm;
402 struct vfio_device_gfx_plane_info *gfx_plane_info = args;
403 struct intel_vgpu_dmabuf_obj *dmabuf_obj;
404 struct intel_vgpu_fb_info fb_info;
405 int ret = 0;
406
407 if (gfx_plane_info->flags == (VFIO_GFX_PLANE_TYPE_DMABUF |
408 VFIO_GFX_PLANE_TYPE_PROBE))
409 return ret;
410 else if ((gfx_plane_info->flags & ~VFIO_GFX_PLANE_TYPE_DMABUF) ||
411 (!gfx_plane_info->flags))
412 return -EINVAL;
413
414 ret = vgpu_get_plane_info(dev, vgpu, &fb_info,
415 gfx_plane_info->drm_plane_type);
416 if (ret != 0)
417 goto out;
418
419 mutex_lock(&vgpu->dmabuf_lock);
420 /* If exists, pick up the exposed dmabuf_obj */
421 dmabuf_obj = pick_dmabuf_by_info(vgpu, &fb_info);
422 if (dmabuf_obj) {
423 update_fb_info(gfx_plane_info, &fb_info);
424 gfx_plane_info->dmabuf_id = dmabuf_obj->dmabuf_id;
425
426 /* This buffer may be released between query_plane ioctl and
427 * get_dmabuf ioctl. Add the refcount to make sure it won't
428 * be released between the two ioctls.
429 */
430 if (!dmabuf_obj->initref) {
431 dmabuf_obj->initref = true;
432 dmabuf_obj_get(dmabuf_obj);
433 }
434 ret = 0;
435 gvt_dbg_dpy("vgpu%d: re-use dmabuf_obj ref %d, id %d\n",
436 vgpu->id, kref_read(&dmabuf_obj->kref),
437 gfx_plane_info->dmabuf_id);
438 mutex_unlock(&vgpu->dmabuf_lock);
439 goto out;
440 }
441
442 mutex_unlock(&vgpu->dmabuf_lock);
443
444 /* Need to allocate a new one*/
445 dmabuf_obj = kmalloc(sizeof(struct intel_vgpu_dmabuf_obj), GFP_KERNEL);
446 if (unlikely(!dmabuf_obj)) {
447 gvt_vgpu_err("alloc dmabuf_obj failed\n");
448 ret = -ENOMEM;
449 goto out;
450 }
451
452 dmabuf_obj->info = kmalloc(sizeof(struct intel_vgpu_fb_info),
453 GFP_KERNEL);
454 if (unlikely(!dmabuf_obj->info)) {
455 gvt_vgpu_err("allocate intel vgpu fb info failed\n");
456 ret = -ENOMEM;
457 goto out_free_dmabuf;
458 }
459 memcpy(dmabuf_obj->info, &fb_info, sizeof(struct intel_vgpu_fb_info));
460
461 ((struct intel_vgpu_fb_info *)dmabuf_obj->info)->obj = dmabuf_obj;
462
463 dmabuf_obj->vgpu = vgpu;
464
465 ret = idr_alloc(&vgpu->object_idr, dmabuf_obj, 1, 0, GFP_NOWAIT);
466 if (ret < 0)
467 goto out_free_info;
468 gfx_plane_info->dmabuf_id = ret;
469 dmabuf_obj->dmabuf_id = ret;
470
471 dmabuf_obj->initref = true;
472
473 kref_init(&dmabuf_obj->kref);
474
475 update_fb_info(gfx_plane_info, &fb_info);
476
477 INIT_LIST_HEAD(&dmabuf_obj->list);
478 mutex_lock(&vgpu->dmabuf_lock);
479 list_add_tail(&dmabuf_obj->list, &vgpu->dmabuf_obj_list_head);
480 mutex_unlock(&vgpu->dmabuf_lock);
481
482 gvt_dbg_dpy("vgpu%d: %s new dmabuf_obj ref %d, id %d\n", vgpu->id,
483 __func__, kref_read(&dmabuf_obj->kref), ret);
484
485 return 0;
486
487 out_free_info:
488 kfree(dmabuf_obj->info);
489 out_free_dmabuf:
490 kfree(dmabuf_obj);
491 out:
492 /* ENODEV means plane isn't ready, which might be a normal case. */
493 return (ret == -ENODEV) ? 0 : ret;
494 }
495
496 /* To associate an exposed dmabuf with the dmabuf_obj */
intel_vgpu_get_dmabuf(struct intel_vgpu * vgpu,unsigned int dmabuf_id)497 int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id)
498 {
499 struct drm_device *dev = &vgpu->gvt->gt->i915->drm;
500 struct intel_vgpu_dmabuf_obj *dmabuf_obj;
501 struct drm_i915_gem_object *obj;
502 struct dma_buf *dmabuf;
503 int dmabuf_fd;
504 int ret = 0;
505
506 mutex_lock(&vgpu->dmabuf_lock);
507
508 dmabuf_obj = pick_dmabuf_by_num(vgpu, dmabuf_id);
509 if (dmabuf_obj == NULL) {
510 gvt_vgpu_err("invalid dmabuf id:%d\n", dmabuf_id);
511 ret = -EINVAL;
512 goto out;
513 }
514
515 obj = vgpu_create_gem(dev, dmabuf_obj->info);
516 if (obj == NULL) {
517 gvt_vgpu_err("create gvt gem obj failed\n");
518 ret = -ENOMEM;
519 goto out;
520 }
521
522 obj->gvt_info = dmabuf_obj->info;
523
524 dmabuf = i915_gem_prime_export(&obj->base, DRM_CLOEXEC | DRM_RDWR);
525 if (IS_ERR(dmabuf)) {
526 gvt_vgpu_err("export dma-buf failed\n");
527 ret = PTR_ERR(dmabuf);
528 goto out_free_gem;
529 }
530
531 ret = dma_buf_fd(dmabuf, DRM_CLOEXEC | DRM_RDWR);
532 if (ret < 0) {
533 gvt_vgpu_err("create dma-buf fd failed ret:%d\n", ret);
534 goto out_free_dmabuf;
535 }
536 dmabuf_fd = ret;
537
538 dmabuf_obj_get(dmabuf_obj);
539
540 if (dmabuf_obj->initref) {
541 dmabuf_obj->initref = false;
542 dmabuf_obj_put(dmabuf_obj);
543 }
544
545 mutex_unlock(&vgpu->dmabuf_lock);
546
547 gvt_dbg_dpy("vgpu%d: dmabuf:%d, dmabuf ref %d, fd:%d\n"
548 " file count: %ld, GEM ref: %d\n",
549 vgpu->id, dmabuf_obj->dmabuf_id,
550 kref_read(&dmabuf_obj->kref),
551 dmabuf_fd,
552 file_count(dmabuf->file),
553 kref_read(&obj->base.refcount));
554
555 i915_gem_object_put(obj);
556
557 return dmabuf_fd;
558
559 out_free_dmabuf:
560 dma_buf_put(dmabuf);
561 out_free_gem:
562 i915_gem_object_put(obj);
563 out:
564 mutex_unlock(&vgpu->dmabuf_lock);
565 return ret;
566 }
567
intel_vgpu_dmabuf_cleanup(struct intel_vgpu * vgpu)568 void intel_vgpu_dmabuf_cleanup(struct intel_vgpu *vgpu)
569 {
570 struct list_head *pos, *n;
571 struct intel_vgpu_dmabuf_obj *dmabuf_obj;
572
573 mutex_lock(&vgpu->dmabuf_lock);
574 list_for_each_safe(pos, n, &vgpu->dmabuf_obj_list_head) {
575 dmabuf_obj = list_entry(pos, struct intel_vgpu_dmabuf_obj, list);
576 dmabuf_obj->vgpu = NULL;
577
578 idr_remove(&vgpu->object_idr, dmabuf_obj->dmabuf_id);
579 list_del(pos);
580
581 /* dmabuf_obj might be freed in dmabuf_obj_put */
582 if (dmabuf_obj->initref) {
583 dmabuf_obj->initref = false;
584 dmabuf_obj_put(dmabuf_obj);
585 }
586
587 }
588 mutex_unlock(&vgpu->dmabuf_lock);
589 }
590