1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright © 2011-2018 VMware, Inc., Palo Alto, CA., USA
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28 
29 #include <drm/ttm/ttm_placement.h>
30 
31 #include "vmwgfx_drv.h"
32 #include "ttm_object.h"
33 
34 
35 /**
36  * vmw_buffer_object - Convert a struct ttm_buffer_object to a struct
37  * vmw_buffer_object.
38  *
39  * @bo: Pointer to the TTM buffer object.
40  * Return: Pointer to the struct vmw_buffer_object embedding the
41  * TTM buffer object.
42  */
43 static struct vmw_buffer_object *
vmw_buffer_object(struct ttm_buffer_object * bo)44 vmw_buffer_object(struct ttm_buffer_object *bo)
45 {
46 	return container_of(bo, struct vmw_buffer_object, base);
47 }
48 
49 /**
50  * bo_is_vmw - check if the buffer object is a &vmw_buffer_object
51  * @bo: ttm buffer object to be checked
52  *
53  * Uses destroy function associated with the object to determine if this is
54  * a &vmw_buffer_object.
55  *
56  * Returns:
57  * true if the object is of &vmw_buffer_object type, false if not.
58  */
bo_is_vmw(struct ttm_buffer_object * bo)59 static bool bo_is_vmw(struct ttm_buffer_object *bo)
60 {
61 	return bo->destroy == &vmw_bo_bo_free ||
62 	       bo->destroy == &vmw_gem_destroy;
63 }
64 
65 /**
66  * vmw_bo_pin_in_placement - Validate a buffer to placement.
67  *
68  * @dev_priv:  Driver private.
69  * @buf:  DMA buffer to move.
70  * @placement:  The placement to pin it.
71  * @interruptible:  Use interruptible wait.
72  * Return: Zero on success, Negative error code on failure. In particular
73  * -ERESTARTSYS if interrupted by a signal
74  */
vmw_bo_pin_in_placement(struct vmw_private * dev_priv,struct vmw_buffer_object * buf,struct ttm_placement * placement,bool interruptible)75 int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
76 			    struct vmw_buffer_object *buf,
77 			    struct ttm_placement *placement,
78 			    bool interruptible)
79 {
80 	struct ttm_operation_ctx ctx = {interruptible, false };
81 	struct ttm_buffer_object *bo = &buf->base;
82 	int ret;
83 
84 	vmw_execbuf_release_pinned_bo(dev_priv);
85 
86 	ret = ttm_bo_reserve(bo, interruptible, false, NULL);
87 	if (unlikely(ret != 0))
88 		goto err;
89 
90 	if (buf->base.pin_count > 0)
91 		ret = ttm_resource_compat(bo->resource, placement)
92 			? 0 : -EINVAL;
93 	else
94 		ret = ttm_bo_validate(bo, placement, &ctx);
95 
96 	if (!ret)
97 		vmw_bo_pin_reserved(buf, true);
98 
99 	ttm_bo_unreserve(bo);
100 err:
101 	return ret;
102 }
103 
104 
105 /**
106  * vmw_bo_pin_in_vram_or_gmr - Move a buffer to vram or gmr.
107  *
108  * This function takes the reservation_sem in write mode.
109  * Flushes and unpins the query bo to avoid failures.
110  *
111  * @dev_priv:  Driver private.
112  * @buf:  DMA buffer to move.
113  * @interruptible:  Use interruptible wait.
114  * Return: Zero on success, Negative error code on failure. In particular
115  * -ERESTARTSYS if interrupted by a signal
116  */
vmw_bo_pin_in_vram_or_gmr(struct vmw_private * dev_priv,struct vmw_buffer_object * buf,bool interruptible)117 int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
118 			      struct vmw_buffer_object *buf,
119 			      bool interruptible)
120 {
121 	struct ttm_operation_ctx ctx = {interruptible, false };
122 	struct ttm_buffer_object *bo = &buf->base;
123 	int ret;
124 
125 	vmw_execbuf_release_pinned_bo(dev_priv);
126 
127 	ret = ttm_bo_reserve(bo, interruptible, false, NULL);
128 	if (unlikely(ret != 0))
129 		goto err;
130 
131 	if (buf->base.pin_count > 0) {
132 		ret = ttm_resource_compat(bo->resource, &vmw_vram_gmr_placement)
133 			? 0 : -EINVAL;
134 		goto out_unreserve;
135 	}
136 
137 	ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
138 	if (likely(ret == 0) || ret == -ERESTARTSYS)
139 		goto out_unreserve;
140 
141 	ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
142 
143 out_unreserve:
144 	if (!ret)
145 		vmw_bo_pin_reserved(buf, true);
146 
147 	ttm_bo_unreserve(bo);
148 err:
149 	return ret;
150 }
151 
152 
153 /**
154  * vmw_bo_pin_in_vram - Move a buffer to vram.
155  *
156  * This function takes the reservation_sem in write mode.
157  * Flushes and unpins the query bo to avoid failures.
158  *
159  * @dev_priv:  Driver private.
160  * @buf:  DMA buffer to move.
161  * @interruptible:  Use interruptible wait.
162  * Return: Zero on success, Negative error code on failure. In particular
163  * -ERESTARTSYS if interrupted by a signal
164  */
vmw_bo_pin_in_vram(struct vmw_private * dev_priv,struct vmw_buffer_object * buf,bool interruptible)165 int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
166 		       struct vmw_buffer_object *buf,
167 		       bool interruptible)
168 {
169 	return vmw_bo_pin_in_placement(dev_priv, buf, &vmw_vram_placement,
170 				       interruptible);
171 }
172 
173 
174 /**
175  * vmw_bo_pin_in_start_of_vram - Move a buffer to start of vram.
176  *
177  * This function takes the reservation_sem in write mode.
178  * Flushes and unpins the query bo to avoid failures.
179  *
180  * @dev_priv:  Driver private.
181  * @buf:  DMA buffer to pin.
182  * @interruptible:  Use interruptible wait.
183  * Return: Zero on success, Negative error code on failure. In particular
184  * -ERESTARTSYS if interrupted by a signal
185  */
vmw_bo_pin_in_start_of_vram(struct vmw_private * dev_priv,struct vmw_buffer_object * buf,bool interruptible)186 int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
187 				struct vmw_buffer_object *buf,
188 				bool interruptible)
189 {
190 	struct ttm_operation_ctx ctx = {interruptible, false };
191 	struct ttm_buffer_object *bo = &buf->base;
192 	struct ttm_placement placement;
193 	struct ttm_place place;
194 	int ret = 0;
195 
196 	place = vmw_vram_placement.placement[0];
197 	place.lpfn = bo->resource->num_pages;
198 	placement.num_placement = 1;
199 	placement.placement = &place;
200 	placement.num_busy_placement = 1;
201 	placement.busy_placement = &place;
202 
203 	vmw_execbuf_release_pinned_bo(dev_priv);
204 	ret = ttm_bo_reserve(bo, interruptible, false, NULL);
205 	if (unlikely(ret != 0))
206 		goto err_unlock;
207 
208 	/*
209 	 * Is this buffer already in vram but not at the start of it?
210 	 * In that case, evict it first because TTM isn't good at handling
211 	 * that situation.
212 	 */
213 	if (bo->resource->mem_type == TTM_PL_VRAM &&
214 	    bo->resource->start < bo->resource->num_pages &&
215 	    bo->resource->start > 0 &&
216 	    buf->base.pin_count == 0) {
217 		ctx.interruptible = false;
218 		(void) ttm_bo_validate(bo, &vmw_sys_placement, &ctx);
219 	}
220 
221 	if (buf->base.pin_count > 0)
222 		ret = ttm_resource_compat(bo->resource, &placement)
223 			? 0 : -EINVAL;
224 	else
225 		ret = ttm_bo_validate(bo, &placement, &ctx);
226 
227 	/* For some reason we didn't end up at the start of vram */
228 	WARN_ON(ret == 0 && bo->resource->start != 0);
229 	if (!ret)
230 		vmw_bo_pin_reserved(buf, true);
231 
232 	ttm_bo_unreserve(bo);
233 err_unlock:
234 
235 	return ret;
236 }
237 
238 
239 /**
240  * vmw_bo_unpin - Unpin the buffer given buffer, does not move the buffer.
241  *
242  * This function takes the reservation_sem in write mode.
243  *
244  * @dev_priv:  Driver private.
245  * @buf:  DMA buffer to unpin.
246  * @interruptible:  Use interruptible wait.
247  * Return: Zero on success, Negative error code on failure. In particular
248  * -ERESTARTSYS if interrupted by a signal
249  */
vmw_bo_unpin(struct vmw_private * dev_priv,struct vmw_buffer_object * buf,bool interruptible)250 int vmw_bo_unpin(struct vmw_private *dev_priv,
251 		 struct vmw_buffer_object *buf,
252 		 bool interruptible)
253 {
254 	struct ttm_buffer_object *bo = &buf->base;
255 	int ret;
256 
257 	ret = ttm_bo_reserve(bo, interruptible, false, NULL);
258 	if (unlikely(ret != 0))
259 		goto err;
260 
261 	vmw_bo_pin_reserved(buf, false);
262 
263 	ttm_bo_unreserve(bo);
264 
265 err:
266 	return ret;
267 }
268 
269 /**
270  * vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement
271  * of a buffer.
272  *
273  * @bo: Pointer to a struct ttm_buffer_object. Must be pinned or reserved.
274  * @ptr: SVGAGuestPtr returning the result.
275  */
vmw_bo_get_guest_ptr(const struct ttm_buffer_object * bo,SVGAGuestPtr * ptr)276 void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
277 			  SVGAGuestPtr *ptr)
278 {
279 	if (bo->resource->mem_type == TTM_PL_VRAM) {
280 		ptr->gmrId = SVGA_GMR_FRAMEBUFFER;
281 		ptr->offset = bo->resource->start << PAGE_SHIFT;
282 	} else {
283 		ptr->gmrId = bo->resource->start;
284 		ptr->offset = 0;
285 	}
286 }
287 
288 
289 /**
290  * vmw_bo_pin_reserved - Pin or unpin a buffer object without moving it.
291  *
292  * @vbo: The buffer object. Must be reserved.
293  * @pin: Whether to pin or unpin.
294  *
295  */
vmw_bo_pin_reserved(struct vmw_buffer_object * vbo,bool pin)296 void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin)
297 {
298 	struct ttm_operation_ctx ctx = { false, true };
299 	struct ttm_place pl;
300 	struct ttm_placement placement;
301 	struct ttm_buffer_object *bo = &vbo->base;
302 	uint32_t old_mem_type = bo->resource->mem_type;
303 	int ret;
304 
305 	dma_resv_assert_held(bo->base.resv);
306 
307 	if (pin == !!bo->pin_count)
308 		return;
309 
310 	pl.fpfn = 0;
311 	pl.lpfn = 0;
312 	pl.mem_type = bo->resource->mem_type;
313 	pl.flags = bo->resource->placement;
314 
315 	memset(&placement, 0, sizeof(placement));
316 	placement.num_placement = 1;
317 	placement.placement = &pl;
318 
319 	ret = ttm_bo_validate(bo, &placement, &ctx);
320 
321 	BUG_ON(ret != 0 || bo->resource->mem_type != old_mem_type);
322 
323 	if (pin)
324 		ttm_bo_pin(bo);
325 	else
326 		ttm_bo_unpin(bo);
327 }
328 
329 /**
330  * vmw_bo_map_and_cache - Map a buffer object and cache the map
331  *
332  * @vbo: The buffer object to map
333  * Return: A kernel virtual address or NULL if mapping failed.
334  *
335  * This function maps a buffer object into the kernel address space, or
336  * returns the virtual kernel address of an already existing map. The virtual
337  * address remains valid as long as the buffer object is pinned or reserved.
338  * The cached map is torn down on either
339  * 1) Buffer object move
340  * 2) Buffer object swapout
341  * 3) Buffer object destruction
342  *
343  */
vmw_bo_map_and_cache(struct vmw_buffer_object * vbo)344 void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo)
345 {
346 	struct ttm_buffer_object *bo = &vbo->base;
347 	bool not_used;
348 	void *virtual;
349 	int ret;
350 
351 	virtual = ttm_kmap_obj_virtual(&vbo->map, &not_used);
352 	if (virtual)
353 		return virtual;
354 
355 	ret = ttm_bo_kmap(bo, 0, bo->resource->num_pages, &vbo->map);
356 	if (ret)
357 		DRM_ERROR("Buffer object map failed: %d.\n", ret);
358 
359 	return ttm_kmap_obj_virtual(&vbo->map, &not_used);
360 }
361 
362 
363 /**
364  * vmw_bo_unmap - Tear down a cached buffer object map.
365  *
366  * @vbo: The buffer object whose map we are tearing down.
367  *
368  * This function tears down a cached map set up using
369  * vmw_buffer_object_map_and_cache().
370  */
vmw_bo_unmap(struct vmw_buffer_object * vbo)371 void vmw_bo_unmap(struct vmw_buffer_object *vbo)
372 {
373 	if (vbo->map.bo == NULL)
374 		return;
375 
376 	ttm_bo_kunmap(&vbo->map);
377 }
378 
379 
380 /**
381  * vmw_bo_bo_free - vmw buffer object destructor
382  *
383  * @bo: Pointer to the embedded struct ttm_buffer_object
384  */
vmw_bo_bo_free(struct ttm_buffer_object * bo)385 void vmw_bo_bo_free(struct ttm_buffer_object *bo)
386 {
387 	struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
388 
389 	WARN_ON(vmw_bo->dirty);
390 	WARN_ON(!RB_EMPTY_ROOT(&vmw_bo->res_tree));
391 	vmw_bo_unmap(vmw_bo);
392 	drm_gem_object_release(&bo->base);
393 	kfree(vmw_bo);
394 }
395 
396 /**
397  * vmw_bo_create_kernel - Create a pinned BO for internal kernel use.
398  *
399  * @dev_priv: Pointer to the device private struct
400  * @size: size of the BO we need
401  * @placement: where to put it
402  * @p_bo: resulting BO
403  *
404  * Creates and pin a simple BO for in kernel use.
405  */
vmw_bo_create_kernel(struct vmw_private * dev_priv,unsigned long size,struct ttm_placement * placement,struct ttm_buffer_object ** p_bo)406 int vmw_bo_create_kernel(struct vmw_private *dev_priv, unsigned long size,
407 			 struct ttm_placement *placement,
408 			 struct ttm_buffer_object **p_bo)
409 {
410 	struct ttm_operation_ctx ctx = {
411 		.interruptible = false,
412 		.no_wait_gpu = false
413 	};
414 	struct ttm_buffer_object *bo;
415 	struct drm_device *vdev = &dev_priv->drm;
416 	int ret;
417 
418 	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
419 	if (unlikely(!bo))
420 		return -ENOMEM;
421 
422 	size = ALIGN(size, PAGE_SIZE);
423 
424 	drm_gem_private_object_init(vdev, &bo->base, size);
425 
426 	ret = ttm_bo_init_reserved(&dev_priv->bdev, bo, size,
427 				   ttm_bo_type_kernel, placement, 0,
428 				   &ctx, NULL, NULL, NULL);
429 	if (unlikely(ret))
430 		goto error_free;
431 
432 	ttm_bo_pin(bo);
433 	ttm_bo_unreserve(bo);
434 	*p_bo = bo;
435 
436 	return 0;
437 
438 error_free:
439 	kfree(bo);
440 	return ret;
441 }
442 
vmw_bo_create(struct vmw_private * vmw,size_t size,struct ttm_placement * placement,bool interruptible,bool pin,void (* bo_free)(struct ttm_buffer_object * bo),struct vmw_buffer_object ** p_bo)443 int vmw_bo_create(struct vmw_private *vmw,
444 		  size_t size, struct ttm_placement *placement,
445 		  bool interruptible, bool pin,
446 		  void (*bo_free)(struct ttm_buffer_object *bo),
447 		  struct vmw_buffer_object **p_bo)
448 {
449 	int ret;
450 
451 	*p_bo = kmalloc(sizeof(**p_bo), GFP_KERNEL);
452 	if (unlikely(!*p_bo)) {
453 		DRM_ERROR("Failed to allocate a buffer.\n");
454 		return -ENOMEM;
455 	}
456 
457 	ret = vmw_bo_init(vmw, *p_bo, size,
458 			  placement, interruptible, pin,
459 			  bo_free);
460 	if (unlikely(ret != 0))
461 		goto out_error;
462 
463 	return ret;
464 out_error:
465 	kfree(*p_bo);
466 	*p_bo = NULL;
467 	return ret;
468 }
469 
470 /**
471  * vmw_bo_init - Initialize a vmw buffer object
472  *
473  * @dev_priv: Pointer to the device private struct
474  * @vmw_bo: Pointer to the struct vmw_buffer_object to initialize.
475  * @size: Buffer object size in bytes.
476  * @placement: Initial placement.
477  * @interruptible: Whether waits should be performed interruptible.
478  * @pin: If the BO should be created pinned at a fixed location.
479  * @bo_free: The buffer object destructor.
480  * Returns: Zero on success, negative error code on error.
481  *
482  * Note that on error, the code will free the buffer object.
483  */
vmw_bo_init(struct vmw_private * dev_priv,struct vmw_buffer_object * vmw_bo,size_t size,struct ttm_placement * placement,bool interruptible,bool pin,void (* bo_free)(struct ttm_buffer_object * bo))484 int vmw_bo_init(struct vmw_private *dev_priv,
485 		struct vmw_buffer_object *vmw_bo,
486 		size_t size, struct ttm_placement *placement,
487 		bool interruptible, bool pin,
488 		void (*bo_free)(struct ttm_buffer_object *bo))
489 {
490 	struct ttm_operation_ctx ctx = {
491 		.interruptible = interruptible,
492 		.no_wait_gpu = false
493 	};
494 	struct ttm_device *bdev = &dev_priv->bdev;
495 	struct drm_device *vdev = &dev_priv->drm;
496 	int ret;
497 
498 	WARN_ON_ONCE(!bo_free);
499 	memset(vmw_bo, 0, sizeof(*vmw_bo));
500 	BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3);
501 	vmw_bo->base.priority = 3;
502 	vmw_bo->res_tree = RB_ROOT;
503 
504 	size = ALIGN(size, PAGE_SIZE);
505 	drm_gem_private_object_init(vdev, &vmw_bo->base.base, size);
506 
507 	ret = ttm_bo_init_reserved(bdev, &vmw_bo->base, size,
508 				   ttm_bo_type_device,
509 				   placement,
510 				   0, &ctx, NULL, NULL, bo_free);
511 	if (unlikely(ret)) {
512 		return ret;
513 	}
514 
515 	if (pin)
516 		ttm_bo_pin(&vmw_bo->base);
517 	ttm_bo_unreserve(&vmw_bo->base);
518 
519 	return 0;
520 }
521 
522 /**
523  * vmw_user_bo_synccpu_grab - Grab a struct vmw_buffer_object for cpu
524  * access, idling previous GPU operations on the buffer and optionally
525  * blocking it for further command submissions.
526  *
527  * @vmw_bo: Pointer to the buffer object being grabbed for CPU access
528  * @flags: Flags indicating how the grab should be performed.
529  * Return: Zero on success, Negative error code on error. In particular,
530  * -EBUSY will be returned if a dontblock operation is requested and the
531  * buffer object is busy, and -ERESTARTSYS will be returned if a wait is
532  * interrupted by a signal.
533  *
534  * A blocking grab will be automatically released when @tfile is closed.
535  */
vmw_user_bo_synccpu_grab(struct vmw_buffer_object * vmw_bo,uint32_t flags)536 static int vmw_user_bo_synccpu_grab(struct vmw_buffer_object *vmw_bo,
537 				    uint32_t flags)
538 {
539 	bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
540 	struct ttm_buffer_object *bo = &vmw_bo->base;
541 	int ret;
542 
543 	if (flags & drm_vmw_synccpu_allow_cs) {
544 		long lret;
545 
546 		lret = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_READ,
547 					     true, nonblock ? 0 :
548 					     MAX_SCHEDULE_TIMEOUT);
549 		if (!lret)
550 			return -EBUSY;
551 		else if (lret < 0)
552 			return lret;
553 		return 0;
554 	}
555 
556 	ret = ttm_bo_reserve(bo, true, nonblock, NULL);
557 	if (unlikely(ret != 0))
558 		return ret;
559 
560 	ret = ttm_bo_wait(bo, true, nonblock);
561 	if (likely(ret == 0))
562 		atomic_inc(&vmw_bo->cpu_writers);
563 
564 	ttm_bo_unreserve(bo);
565 	if (unlikely(ret != 0))
566 		return ret;
567 
568 	return ret;
569 }
570 
571 /**
572  * vmw_user_bo_synccpu_release - Release a previous grab for CPU access,
573  * and unblock command submission on the buffer if blocked.
574  *
575  * @filp: Identifying the caller.
576  * @handle: Handle identifying the buffer object.
577  * @flags: Flags indicating the type of release.
578  */
vmw_user_bo_synccpu_release(struct drm_file * filp,uint32_t handle,uint32_t flags)579 static int vmw_user_bo_synccpu_release(struct drm_file *filp,
580 				       uint32_t handle,
581 				       uint32_t flags)
582 {
583 	struct vmw_buffer_object *vmw_bo;
584 	int ret = vmw_user_bo_lookup(filp, handle, &vmw_bo);
585 
586 	if (!ret) {
587 		if (!(flags & drm_vmw_synccpu_allow_cs)) {
588 			atomic_dec(&vmw_bo->cpu_writers);
589 		}
590 		ttm_bo_put(&vmw_bo->base);
591 	}
592 
593 	return ret;
594 }
595 
596 
597 /**
598  * vmw_user_bo_synccpu_ioctl - ioctl function implementing the synccpu
599  * functionality.
600  *
601  * @dev: Identifies the drm device.
602  * @data: Pointer to the ioctl argument.
603  * @file_priv: Identifies the caller.
604  * Return: Zero on success, negative error code on error.
605  *
606  * This function checks the ioctl arguments for validity and calls the
607  * relevant synccpu functions.
608  */
vmw_user_bo_synccpu_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)609 int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
610 			      struct drm_file *file_priv)
611 {
612 	struct drm_vmw_synccpu_arg *arg =
613 		(struct drm_vmw_synccpu_arg *) data;
614 	struct vmw_buffer_object *vbo;
615 	int ret;
616 
617 	if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
618 	    || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
619 			       drm_vmw_synccpu_dontblock |
620 			       drm_vmw_synccpu_allow_cs)) != 0) {
621 		DRM_ERROR("Illegal synccpu flags.\n");
622 		return -EINVAL;
623 	}
624 
625 	switch (arg->op) {
626 	case drm_vmw_synccpu_grab:
627 		ret = vmw_user_bo_lookup(file_priv, arg->handle, &vbo);
628 		if (unlikely(ret != 0))
629 			return ret;
630 
631 		ret = vmw_user_bo_synccpu_grab(vbo, arg->flags);
632 		vmw_bo_unreference(&vbo);
633 		if (unlikely(ret != 0)) {
634 			if (ret == -ERESTARTSYS || ret == -EBUSY)
635 				return -EBUSY;
636 			DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
637 				  (unsigned int) arg->handle);
638 			return ret;
639 		}
640 		break;
641 	case drm_vmw_synccpu_release:
642 		ret = vmw_user_bo_synccpu_release(file_priv,
643 						  arg->handle,
644 						  arg->flags);
645 		if (unlikely(ret != 0)) {
646 			DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
647 				  (unsigned int) arg->handle);
648 			return ret;
649 		}
650 		break;
651 	default:
652 		DRM_ERROR("Invalid synccpu operation.\n");
653 		return -EINVAL;
654 	}
655 
656 	return 0;
657 }
658 
659 /**
660  * vmw_bo_unref_ioctl - Generic handle close ioctl.
661  *
662  * @dev: Identifies the drm device.
663  * @data: Pointer to the ioctl argument.
664  * @file_priv: Identifies the caller.
665  * Return: Zero on success, negative error code on error.
666  *
667  * This function checks the ioctl arguments for validity and closes a
668  * handle to a TTM base object, optionally freeing the object.
669  */
vmw_bo_unref_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)670 int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
671 		       struct drm_file *file_priv)
672 {
673 	struct drm_vmw_unref_dmabuf_arg *arg =
674 	    (struct drm_vmw_unref_dmabuf_arg *)data;
675 
676 	drm_gem_handle_delete(file_priv, arg->handle);
677 	return 0;
678 }
679 
680 
681 /**
682  * vmw_user_bo_lookup - Look up a vmw user buffer object from a handle.
683  *
684  * @filp: The file the handle is registered with.
685  * @handle: The user buffer object handle
686  * @out: Pointer to a where a pointer to the embedded
687  * struct vmw_buffer_object should be placed.
688  * Return: Zero on success, Negative error code on error.
689  *
690  * The vmw buffer object pointer will be refcounted.
691  */
vmw_user_bo_lookup(struct drm_file * filp,uint32_t handle,struct vmw_buffer_object ** out)692 int vmw_user_bo_lookup(struct drm_file *filp,
693 		       uint32_t handle,
694 		       struct vmw_buffer_object **out)
695 {
696 	struct drm_gem_object *gobj;
697 
698 	gobj = drm_gem_object_lookup(filp, handle);
699 	if (!gobj) {
700 		DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
701 			  (unsigned long)handle);
702 		return -ESRCH;
703 	}
704 
705 	*out = gem_to_vmw_bo(gobj);
706 	ttm_bo_get(&(*out)->base);
707 	drm_gem_object_put(gobj);
708 
709 	return 0;
710 }
711 
712 /**
713  * vmw_user_bo_noref_lookup - Look up a vmw user buffer object without reference
714  * @filp: The TTM object file the handle is registered with.
715  * @handle: The user buffer object handle.
716  *
717  * This function looks up a struct vmw_bo and returns a pointer to the
718  * struct vmw_buffer_object it derives from without refcounting the pointer.
719  * The returned pointer is only valid until vmw_user_bo_noref_release() is
720  * called, and the object pointed to by the returned pointer may be doomed.
721  * Any persistent usage of the object requires a refcount to be taken using
722  * ttm_bo_reference_unless_doomed(). Iff this function returns successfully it
723  * needs to be paired with vmw_user_bo_noref_release() and no sleeping-
724  * or scheduling functions may be called inbetween these function calls.
725  *
726  * Return: A struct vmw_buffer_object pointer if successful or negative
727  * error pointer on failure.
728  */
729 struct vmw_buffer_object *
vmw_user_bo_noref_lookup(struct drm_file * filp,u32 handle)730 vmw_user_bo_noref_lookup(struct drm_file *filp, u32 handle)
731 {
732 	struct vmw_buffer_object *vmw_bo;
733 	struct ttm_buffer_object *bo;
734 	struct drm_gem_object *gobj = drm_gem_object_lookup(filp, handle);
735 
736 	if (!gobj) {
737 		DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
738 			  (unsigned long)handle);
739 		return ERR_PTR(-ESRCH);
740 	}
741 	vmw_bo = gem_to_vmw_bo(gobj);
742 	bo = ttm_bo_get_unless_zero(&vmw_bo->base);
743 	vmw_bo = vmw_buffer_object(bo);
744 	drm_gem_object_put(gobj);
745 
746 	return vmw_bo;
747 }
748 
749 
750 /**
751  * vmw_bo_fence_single - Utility function to fence a single TTM buffer
752  *                       object without unreserving it.
753  *
754  * @bo:             Pointer to the struct ttm_buffer_object to fence.
755  * @fence:          Pointer to the fence. If NULL, this function will
756  *                  insert a fence into the command stream..
757  *
758  * Contrary to the ttm_eu version of this function, it takes only
759  * a single buffer object instead of a list, and it also doesn't
760  * unreserve the buffer object, which needs to be done separately.
761  */
vmw_bo_fence_single(struct ttm_buffer_object * bo,struct vmw_fence_obj * fence)762 void vmw_bo_fence_single(struct ttm_buffer_object *bo,
763 			 struct vmw_fence_obj *fence)
764 {
765 	struct ttm_device *bdev = bo->bdev;
766 	struct vmw_private *dev_priv =
767 		container_of(bdev, struct vmw_private, bdev);
768 	int ret;
769 
770 	if (fence == NULL)
771 		vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
772 	else
773 		dma_fence_get(&fence->base);
774 
775 	ret = dma_resv_reserve_fences(bo->base.resv, 1);
776 	if (!ret)
777 		dma_resv_add_fence(bo->base.resv, &fence->base,
778 				   DMA_RESV_USAGE_KERNEL);
779 	else
780 		/* Last resort fallback when we are OOM */
781 		dma_fence_wait(&fence->base, false);
782 	dma_fence_put(&fence->base);
783 }
784 
785 
786 /**
787  * vmw_dumb_create - Create a dumb kms buffer
788  *
789  * @file_priv: Pointer to a struct drm_file identifying the caller.
790  * @dev: Pointer to the drm device.
791  * @args: Pointer to a struct drm_mode_create_dumb structure
792  * Return: Zero on success, negative error code on failure.
793  *
794  * This is a driver callback for the core drm create_dumb functionality.
795  * Note that this is very similar to the vmw_bo_alloc ioctl, except
796  * that the arguments have a different format.
797  */
vmw_dumb_create(struct drm_file * file_priv,struct drm_device * dev,struct drm_mode_create_dumb * args)798 int vmw_dumb_create(struct drm_file *file_priv,
799 		    struct drm_device *dev,
800 		    struct drm_mode_create_dumb *args)
801 {
802 	struct vmw_private *dev_priv = vmw_priv(dev);
803 	struct vmw_buffer_object *vbo;
804 	int ret;
805 
806 	args->pitch = args->width * ((args->bpp + 7) / 8);
807 	args->size = ALIGN(args->pitch * args->height, PAGE_SIZE);
808 
809 	ret = vmw_gem_object_create_with_handle(dev_priv, file_priv,
810 						args->size, &args->handle,
811 						&vbo);
812 
813 	return ret;
814 }
815 
816 /**
817  * vmw_bo_swap_notify - swapout notify callback.
818  *
819  * @bo: The buffer object to be swapped out.
820  */
vmw_bo_swap_notify(struct ttm_buffer_object * bo)821 void vmw_bo_swap_notify(struct ttm_buffer_object *bo)
822 {
823 	/* Is @bo embedded in a struct vmw_buffer_object? */
824 	if (!bo_is_vmw(bo))
825 		return;
826 
827 	/* Kill any cached kernel maps before swapout */
828 	vmw_bo_unmap(vmw_buffer_object(bo));
829 }
830 
831 
832 /**
833  * vmw_bo_move_notify - TTM move_notify_callback
834  *
835  * @bo: The TTM buffer object about to move.
836  * @mem: The struct ttm_resource indicating to what memory
837  *       region the move is taking place.
838  *
839  * Detaches cached maps and device bindings that require that the
840  * buffer doesn't move.
841  */
vmw_bo_move_notify(struct ttm_buffer_object * bo,struct ttm_resource * mem)842 void vmw_bo_move_notify(struct ttm_buffer_object *bo,
843 			struct ttm_resource *mem)
844 {
845 	struct vmw_buffer_object *vbo;
846 
847 	/* Make sure @bo is embedded in a struct vmw_buffer_object? */
848 	if (!bo_is_vmw(bo))
849 		return;
850 
851 	vbo = container_of(bo, struct vmw_buffer_object, base);
852 
853 	/*
854 	 * Kill any cached kernel maps before move to or from VRAM.
855 	 * With other types of moves, the underlying pages stay the same,
856 	 * and the map can be kept.
857 	 */
858 	if (mem->mem_type == TTM_PL_VRAM || bo->resource->mem_type == TTM_PL_VRAM)
859 		vmw_bo_unmap(vbo);
860 
861 	/*
862 	 * If we're moving a backup MOB out of MOB placement, then make sure we
863 	 * read back all resource content first, and unbind the MOB from
864 	 * the resource.
865 	 */
866 	if (mem->mem_type != VMW_PL_MOB && bo->resource->mem_type == VMW_PL_MOB)
867 		vmw_resource_unbind_list(vbo);
868 }
869