1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 #include <drm/ttm/ttm_placement.h>
29 
30 #include "vmwgfx_resource_priv.h"
31 #include "vmwgfx_binding.h"
32 #include "vmwgfx_drv.h"
33 
34 #define VMW_RES_EVICT_ERR_COUNT 10
35 
36 /**
37  * vmw_resource_mob_attach - Mark a resource as attached to its backing mob
38  * @res: The resource
39  */
vmw_resource_mob_attach(struct vmw_resource * res)40 void vmw_resource_mob_attach(struct vmw_resource *res)
41 {
42 	struct vmw_buffer_object *backup = res->backup;
43 	struct rb_node **new = &backup->res_tree.rb_node, *parent = NULL;
44 
45 	dma_resv_assert_held(res->backup->base.base.resv);
46 	res->used_prio = (res->res_dirty) ? res->func->dirty_prio :
47 		res->func->prio;
48 
49 	while (*new) {
50 		struct vmw_resource *this =
51 			container_of(*new, struct vmw_resource, mob_node);
52 
53 		parent = *new;
54 		new = (res->backup_offset < this->backup_offset) ?
55 			&((*new)->rb_left) : &((*new)->rb_right);
56 	}
57 
58 	rb_link_node(&res->mob_node, parent, new);
59 	rb_insert_color(&res->mob_node, &backup->res_tree);
60 
61 	vmw_bo_prio_add(backup, res->used_prio);
62 }
63 
64 /**
65  * vmw_resource_mob_detach - Mark a resource as detached from its backing mob
66  * @res: The resource
67  */
vmw_resource_mob_detach(struct vmw_resource * res)68 void vmw_resource_mob_detach(struct vmw_resource *res)
69 {
70 	struct vmw_buffer_object *backup = res->backup;
71 
72 	dma_resv_assert_held(backup->base.base.resv);
73 	if (vmw_resource_mob_attached(res)) {
74 		rb_erase(&res->mob_node, &backup->res_tree);
75 		RB_CLEAR_NODE(&res->mob_node);
76 		vmw_bo_prio_del(backup, res->used_prio);
77 	}
78 }
79 
vmw_resource_reference(struct vmw_resource * res)80 struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
81 {
82 	kref_get(&res->kref);
83 	return res;
84 }
85 
86 struct vmw_resource *
vmw_resource_reference_unless_doomed(struct vmw_resource * res)87 vmw_resource_reference_unless_doomed(struct vmw_resource *res)
88 {
89 	return kref_get_unless_zero(&res->kref) ? res : NULL;
90 }
91 
92 /**
93  * vmw_resource_release_id - release a resource id to the id manager.
94  *
95  * @res: Pointer to the resource.
96  *
97  * Release the resource id to the resource id manager and set it to -1
98  */
vmw_resource_release_id(struct vmw_resource * res)99 void vmw_resource_release_id(struct vmw_resource *res)
100 {
101 	struct vmw_private *dev_priv = res->dev_priv;
102 	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
103 
104 	spin_lock(&dev_priv->resource_lock);
105 	if (res->id != -1)
106 		idr_remove(idr, res->id);
107 	res->id = -1;
108 	spin_unlock(&dev_priv->resource_lock);
109 }
110 
vmw_resource_release(struct kref * kref)111 static void vmw_resource_release(struct kref *kref)
112 {
113 	struct vmw_resource *res =
114 	    container_of(kref, struct vmw_resource, kref);
115 	struct vmw_private *dev_priv = res->dev_priv;
116 	int id;
117 	int ret;
118 	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
119 
120 	spin_lock(&dev_priv->resource_lock);
121 	list_del_init(&res->lru_head);
122 	spin_unlock(&dev_priv->resource_lock);
123 	if (res->backup) {
124 		struct ttm_buffer_object *bo = &res->backup->base;
125 
126 		ret = ttm_bo_reserve(bo, false, false, NULL);
127 		BUG_ON(ret);
128 		if (vmw_resource_mob_attached(res) &&
129 		    res->func->unbind != NULL) {
130 			struct ttm_validate_buffer val_buf;
131 
132 			val_buf.bo = bo;
133 			val_buf.num_shared = 0;
134 			res->func->unbind(res, false, &val_buf);
135 		}
136 		res->backup_dirty = false;
137 		vmw_resource_mob_detach(res);
138 		if (res->dirty)
139 			res->func->dirty_free(res);
140 		if (res->coherent)
141 			vmw_bo_dirty_release(res->backup);
142 		ttm_bo_unreserve(bo);
143 		vmw_bo_unreference(&res->backup);
144 	}
145 
146 	if (likely(res->hw_destroy != NULL)) {
147 		mutex_lock(&dev_priv->binding_mutex);
148 		vmw_binding_res_list_kill(&res->binding_head);
149 		mutex_unlock(&dev_priv->binding_mutex);
150 		res->hw_destroy(res);
151 	}
152 
153 	id = res->id;
154 	if (res->res_free != NULL)
155 		res->res_free(res);
156 	else
157 		kfree(res);
158 
159 	spin_lock(&dev_priv->resource_lock);
160 	if (id != -1)
161 		idr_remove(idr, id);
162 	spin_unlock(&dev_priv->resource_lock);
163 }
164 
vmw_resource_unreference(struct vmw_resource ** p_res)165 void vmw_resource_unreference(struct vmw_resource **p_res)
166 {
167 	struct vmw_resource *res = *p_res;
168 
169 	*p_res = NULL;
170 	kref_put(&res->kref, vmw_resource_release);
171 }
172 
173 
174 /**
175  * vmw_resource_alloc_id - release a resource id to the id manager.
176  *
177  * @res: Pointer to the resource.
178  *
179  * Allocate the lowest free resource from the resource manager, and set
180  * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
181  */
vmw_resource_alloc_id(struct vmw_resource * res)182 int vmw_resource_alloc_id(struct vmw_resource *res)
183 {
184 	struct vmw_private *dev_priv = res->dev_priv;
185 	int ret;
186 	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
187 
188 	BUG_ON(res->id != -1);
189 
190 	idr_preload(GFP_KERNEL);
191 	spin_lock(&dev_priv->resource_lock);
192 
193 	ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
194 	if (ret >= 0)
195 		res->id = ret;
196 
197 	spin_unlock(&dev_priv->resource_lock);
198 	idr_preload_end();
199 	return ret < 0 ? ret : 0;
200 }
201 
202 /**
203  * vmw_resource_init - initialize a struct vmw_resource
204  *
205  * @dev_priv:       Pointer to a device private struct.
206  * @res:            The struct vmw_resource to initialize.
207  * @delay_id:       Boolean whether to defer device id allocation until
208  *                  the first validation.
209  * @res_free:       Resource destructor.
210  * @func:           Resource function table.
211  */
vmw_resource_init(struct vmw_private * dev_priv,struct vmw_resource * res,bool delay_id,void (* res_free)(struct vmw_resource * res),const struct vmw_res_func * func)212 int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
213 		      bool delay_id,
214 		      void (*res_free) (struct vmw_resource *res),
215 		      const struct vmw_res_func *func)
216 {
217 	kref_init(&res->kref);
218 	res->hw_destroy = NULL;
219 	res->res_free = res_free;
220 	res->dev_priv = dev_priv;
221 	res->func = func;
222 	RB_CLEAR_NODE(&res->mob_node);
223 	INIT_LIST_HEAD(&res->lru_head);
224 	INIT_LIST_HEAD(&res->binding_head);
225 	res->id = -1;
226 	res->backup = NULL;
227 	res->backup_offset = 0;
228 	res->backup_dirty = false;
229 	res->res_dirty = false;
230 	res->coherent = false;
231 	res->used_prio = 3;
232 	res->dirty = NULL;
233 	if (delay_id)
234 		return 0;
235 	else
236 		return vmw_resource_alloc_id(res);
237 }
238 
239 
240 /**
241  * vmw_user_resource_lookup_handle - lookup a struct resource from a
242  * TTM user-space handle and perform basic type checks
243  *
244  * @dev_priv:     Pointer to a device private struct
245  * @tfile:        Pointer to a struct ttm_object_file identifying the caller
246  * @handle:       The TTM user-space handle
247  * @converter:    Pointer to an object describing the resource type
248  * @p_res:        On successful return the location pointed to will contain
249  *                a pointer to a refcounted struct vmw_resource.
250  *
251  * If the handle can't be found or is associated with an incorrect resource
252  * type, -EINVAL will be returned.
253  */
vmw_user_resource_lookup_handle(struct vmw_private * dev_priv,struct ttm_object_file * tfile,uint32_t handle,const struct vmw_user_resource_conv * converter,struct vmw_resource ** p_res)254 int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
255 				    struct ttm_object_file *tfile,
256 				    uint32_t handle,
257 				    const struct vmw_user_resource_conv
258 				    *converter,
259 				    struct vmw_resource **p_res)
260 {
261 	struct ttm_base_object *base;
262 	struct vmw_resource *res;
263 	int ret = -EINVAL;
264 
265 	base = ttm_base_object_lookup(tfile, handle);
266 	if (unlikely(base == NULL))
267 		return -EINVAL;
268 
269 	if (unlikely(ttm_base_object_type(base) != converter->object_type))
270 		goto out_bad_resource;
271 
272 	res = converter->base_obj_to_res(base);
273 	kref_get(&res->kref);
274 
275 	*p_res = res;
276 	ret = 0;
277 
278 out_bad_resource:
279 	ttm_base_object_unref(&base);
280 
281 	return ret;
282 }
283 
284 /*
285  * Helper function that looks either a surface or bo.
286  *
287  * The pointer this pointed at by out_surf and out_buf needs to be null.
288  */
vmw_user_lookup_handle(struct vmw_private * dev_priv,struct drm_file * filp,uint32_t handle,struct vmw_surface ** out_surf,struct vmw_buffer_object ** out_buf)289 int vmw_user_lookup_handle(struct vmw_private *dev_priv,
290 			   struct drm_file *filp,
291 			   uint32_t handle,
292 			   struct vmw_surface **out_surf,
293 			   struct vmw_buffer_object **out_buf)
294 {
295 	struct ttm_object_file *tfile = vmw_fpriv(filp)->tfile;
296 	struct vmw_resource *res;
297 	int ret;
298 
299 	BUG_ON(*out_surf || *out_buf);
300 
301 	ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
302 					      user_surface_converter,
303 					      &res);
304 	if (!ret) {
305 		*out_surf = vmw_res_to_srf(res);
306 		return 0;
307 	}
308 
309 	*out_surf = NULL;
310 	ret = vmw_user_bo_lookup(filp, handle, out_buf);
311 	return ret;
312 }
313 
314 /**
315  * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
316  *
317  * @res:            The resource for which to allocate a backup buffer.
318  * @interruptible:  Whether any sleeps during allocation should be
319  *                  performed while interruptible.
320  */
vmw_resource_buf_alloc(struct vmw_resource * res,bool interruptible)321 static int vmw_resource_buf_alloc(struct vmw_resource *res,
322 				  bool interruptible)
323 {
324 	unsigned long size = PFN_ALIGN(res->backup_size);
325 	struct vmw_buffer_object *backup;
326 	int ret;
327 
328 	if (likely(res->backup)) {
329 		BUG_ON(res->backup->base.base.size < size);
330 		return 0;
331 	}
332 
333 	ret = vmw_bo_create(res->dev_priv, res->backup_size,
334 			    res->func->backup_placement,
335 			    interruptible, false,
336 			    &vmw_bo_bo_free, &backup);
337 	if (unlikely(ret != 0))
338 		goto out_no_bo;
339 
340 	res->backup = backup;
341 
342 out_no_bo:
343 	return ret;
344 }
345 
346 /**
347  * vmw_resource_do_validate - Make a resource up-to-date and visible
348  *                            to the device.
349  *
350  * @res:            The resource to make visible to the device.
351  * @val_buf:        Information about a buffer possibly
352  *                  containing backup data if a bind operation is needed.
353  * @dirtying:       Transfer dirty regions.
354  *
355  * On hardware resource shortage, this function returns -EBUSY and
356  * should be retried once resources have been freed up.
357  */
vmw_resource_do_validate(struct vmw_resource * res,struct ttm_validate_buffer * val_buf,bool dirtying)358 static int vmw_resource_do_validate(struct vmw_resource *res,
359 				    struct ttm_validate_buffer *val_buf,
360 				    bool dirtying)
361 {
362 	int ret = 0;
363 	const struct vmw_res_func *func = res->func;
364 
365 	if (unlikely(res->id == -1)) {
366 		ret = func->create(res);
367 		if (unlikely(ret != 0))
368 			return ret;
369 	}
370 
371 	if (func->bind &&
372 	    ((func->needs_backup && !vmw_resource_mob_attached(res) &&
373 	      val_buf->bo != NULL) ||
374 	     (!func->needs_backup && val_buf->bo != NULL))) {
375 		ret = func->bind(res, val_buf);
376 		if (unlikely(ret != 0))
377 			goto out_bind_failed;
378 		if (func->needs_backup)
379 			vmw_resource_mob_attach(res);
380 	}
381 
382 	/*
383 	 * Handle the case where the backup mob is marked coherent but
384 	 * the resource isn't.
385 	 */
386 	if (func->dirty_alloc && vmw_resource_mob_attached(res) &&
387 	    !res->coherent) {
388 		if (res->backup->dirty && !res->dirty) {
389 			ret = func->dirty_alloc(res);
390 			if (ret)
391 				return ret;
392 		} else if (!res->backup->dirty && res->dirty) {
393 			func->dirty_free(res);
394 		}
395 	}
396 
397 	/*
398 	 * Transfer the dirty regions to the resource and update
399 	 * the resource.
400 	 */
401 	if (res->dirty) {
402 		if (dirtying && !res->res_dirty) {
403 			pgoff_t start = res->backup_offset >> PAGE_SHIFT;
404 			pgoff_t end = __KERNEL_DIV_ROUND_UP
405 				(res->backup_offset + res->backup_size,
406 				 PAGE_SIZE);
407 
408 			vmw_bo_dirty_unmap(res->backup, start, end);
409 		}
410 
411 		vmw_bo_dirty_transfer_to_res(res);
412 		return func->dirty_sync(res);
413 	}
414 
415 	return 0;
416 
417 out_bind_failed:
418 	func->destroy(res);
419 
420 	return ret;
421 }
422 
423 /**
424  * vmw_resource_unreserve - Unreserve a resource previously reserved for
425  * command submission.
426  *
427  * @res:               Pointer to the struct vmw_resource to unreserve.
428  * @dirty_set:         Change dirty status of the resource.
429  * @dirty:             When changing dirty status indicates the new status.
430  * @switch_backup:     Backup buffer has been switched.
431  * @new_backup:        Pointer to new backup buffer if command submission
432  *                     switched. May be NULL.
433  * @new_backup_offset: New backup offset if @switch_backup is true.
434  *
435  * Currently unreserving a resource means putting it back on the device's
436  * resource lru list, so that it can be evicted if necessary.
437  */
vmw_resource_unreserve(struct vmw_resource * res,bool dirty_set,bool dirty,bool switch_backup,struct vmw_buffer_object * new_backup,unsigned long new_backup_offset)438 void vmw_resource_unreserve(struct vmw_resource *res,
439 			    bool dirty_set,
440 			    bool dirty,
441 			    bool switch_backup,
442 			    struct vmw_buffer_object *new_backup,
443 			    unsigned long new_backup_offset)
444 {
445 	struct vmw_private *dev_priv = res->dev_priv;
446 
447 	if (!list_empty(&res->lru_head))
448 		return;
449 
450 	if (switch_backup && new_backup != res->backup) {
451 		if (res->backup) {
452 			vmw_resource_mob_detach(res);
453 			if (res->coherent)
454 				vmw_bo_dirty_release(res->backup);
455 			vmw_bo_unreference(&res->backup);
456 		}
457 
458 		if (new_backup) {
459 			res->backup = vmw_bo_reference(new_backup);
460 
461 			/*
462 			 * The validation code should already have added a
463 			 * dirty tracker here.
464 			 */
465 			WARN_ON(res->coherent && !new_backup->dirty);
466 
467 			vmw_resource_mob_attach(res);
468 		} else {
469 			res->backup = NULL;
470 		}
471 	} else if (switch_backup && res->coherent) {
472 		vmw_bo_dirty_release(res->backup);
473 	}
474 
475 	if (switch_backup)
476 		res->backup_offset = new_backup_offset;
477 
478 	if (dirty_set)
479 		res->res_dirty = dirty;
480 
481 	if (!res->func->may_evict || res->id == -1 || res->pin_count)
482 		return;
483 
484 	spin_lock(&dev_priv->resource_lock);
485 	list_add_tail(&res->lru_head,
486 		      &res->dev_priv->res_lru[res->func->res_type]);
487 	spin_unlock(&dev_priv->resource_lock);
488 }
489 
490 /**
491  * vmw_resource_check_buffer - Check whether a backup buffer is needed
492  *                             for a resource and in that case, allocate
493  *                             one, reserve and validate it.
494  *
495  * @ticket:         The ww acquire context to use, or NULL if trylocking.
496  * @res:            The resource for which to allocate a backup buffer.
497  * @interruptible:  Whether any sleeps during allocation should be
498  *                  performed while interruptible.
499  * @val_buf:        On successful return contains data about the
500  *                  reserved and validated backup buffer.
501  */
502 static int
vmw_resource_check_buffer(struct ww_acquire_ctx * ticket,struct vmw_resource * res,bool interruptible,struct ttm_validate_buffer * val_buf)503 vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
504 			  struct vmw_resource *res,
505 			  bool interruptible,
506 			  struct ttm_validate_buffer *val_buf)
507 {
508 	struct ttm_operation_ctx ctx = { true, false };
509 	struct list_head val_list;
510 	bool backup_dirty = false;
511 	int ret;
512 
513 	if (unlikely(res->backup == NULL)) {
514 		ret = vmw_resource_buf_alloc(res, interruptible);
515 		if (unlikely(ret != 0))
516 			return ret;
517 	}
518 
519 	INIT_LIST_HEAD(&val_list);
520 	ttm_bo_get(&res->backup->base);
521 	val_buf->bo = &res->backup->base;
522 	val_buf->num_shared = 0;
523 	list_add_tail(&val_buf->head, &val_list);
524 	ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL);
525 	if (unlikely(ret != 0))
526 		goto out_no_reserve;
527 
528 	if (res->func->needs_backup && !vmw_resource_mob_attached(res))
529 		return 0;
530 
531 	backup_dirty = res->backup_dirty;
532 	ret = ttm_bo_validate(&res->backup->base,
533 			      res->func->backup_placement,
534 			      &ctx);
535 
536 	if (unlikely(ret != 0))
537 		goto out_no_validate;
538 
539 	return 0;
540 
541 out_no_validate:
542 	ttm_eu_backoff_reservation(ticket, &val_list);
543 out_no_reserve:
544 	ttm_bo_put(val_buf->bo);
545 	val_buf->bo = NULL;
546 	if (backup_dirty)
547 		vmw_bo_unreference(&res->backup);
548 
549 	return ret;
550 }
551 
552 /*
553  * vmw_resource_reserve - Reserve a resource for command submission
554  *
555  * @res:            The resource to reserve.
556  *
557  * This function takes the resource off the LRU list and make sure
558  * a backup buffer is present for guest-backed resources. However,
559  * the buffer may not be bound to the resource at this point.
560  *
561  */
vmw_resource_reserve(struct vmw_resource * res,bool interruptible,bool no_backup)562 int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
563 			 bool no_backup)
564 {
565 	struct vmw_private *dev_priv = res->dev_priv;
566 	int ret;
567 
568 	spin_lock(&dev_priv->resource_lock);
569 	list_del_init(&res->lru_head);
570 	spin_unlock(&dev_priv->resource_lock);
571 
572 	if (res->func->needs_backup && res->backup == NULL &&
573 	    !no_backup) {
574 		ret = vmw_resource_buf_alloc(res, interruptible);
575 		if (unlikely(ret != 0)) {
576 			DRM_ERROR("Failed to allocate a backup buffer "
577 				  "of size %lu. bytes\n",
578 				  (unsigned long) res->backup_size);
579 			return ret;
580 		}
581 	}
582 
583 	return 0;
584 }
585 
586 /**
587  * vmw_resource_backoff_reservation - Unreserve and unreference a
588  *                                    backup buffer
589  *.
590  * @ticket:         The ww acquire ctx used for reservation.
591  * @val_buf:        Backup buffer information.
592  */
593 static void
vmw_resource_backoff_reservation(struct ww_acquire_ctx * ticket,struct ttm_validate_buffer * val_buf)594 vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
595 				 struct ttm_validate_buffer *val_buf)
596 {
597 	struct list_head val_list;
598 
599 	if (likely(val_buf->bo == NULL))
600 		return;
601 
602 	INIT_LIST_HEAD(&val_list);
603 	list_add_tail(&val_buf->head, &val_list);
604 	ttm_eu_backoff_reservation(ticket, &val_list);
605 	ttm_bo_put(val_buf->bo);
606 	val_buf->bo = NULL;
607 }
608 
609 /**
610  * vmw_resource_do_evict - Evict a resource, and transfer its data
611  *                         to a backup buffer.
612  *
613  * @ticket:         The ww acquire ticket to use, or NULL if trylocking.
614  * @res:            The resource to evict.
615  * @interruptible:  Whether to wait interruptible.
616  */
vmw_resource_do_evict(struct ww_acquire_ctx * ticket,struct vmw_resource * res,bool interruptible)617 static int vmw_resource_do_evict(struct ww_acquire_ctx *ticket,
618 				 struct vmw_resource *res, bool interruptible)
619 {
620 	struct ttm_validate_buffer val_buf;
621 	const struct vmw_res_func *func = res->func;
622 	int ret;
623 
624 	BUG_ON(!func->may_evict);
625 
626 	val_buf.bo = NULL;
627 	val_buf.num_shared = 0;
628 	ret = vmw_resource_check_buffer(ticket, res, interruptible, &val_buf);
629 	if (unlikely(ret != 0))
630 		return ret;
631 
632 	if (unlikely(func->unbind != NULL &&
633 		     (!func->needs_backup || vmw_resource_mob_attached(res)))) {
634 		ret = func->unbind(res, res->res_dirty, &val_buf);
635 		if (unlikely(ret != 0))
636 			goto out_no_unbind;
637 		vmw_resource_mob_detach(res);
638 	}
639 	ret = func->destroy(res);
640 	res->backup_dirty = true;
641 	res->res_dirty = false;
642 out_no_unbind:
643 	vmw_resource_backoff_reservation(ticket, &val_buf);
644 
645 	return ret;
646 }
647 
648 
649 /**
650  * vmw_resource_validate - Make a resource up-to-date and visible
651  *                         to the device.
652  * @res: The resource to make visible to the device.
653  * @intr: Perform waits interruptible if possible.
654  * @dirtying: Pending GPU operation will dirty the resource
655  *
656  * On successful return, any backup DMA buffer pointed to by @res->backup will
657  * be reserved and validated.
658  * On hardware resource shortage, this function will repeatedly evict
659  * resources of the same type until the validation succeeds.
660  *
661  * Return: Zero on success, -ERESTARTSYS if interrupted, negative error code
662  * on failure.
663  */
vmw_resource_validate(struct vmw_resource * res,bool intr,bool dirtying)664 int vmw_resource_validate(struct vmw_resource *res, bool intr,
665 			  bool dirtying)
666 {
667 	int ret;
668 	struct vmw_resource *evict_res;
669 	struct vmw_private *dev_priv = res->dev_priv;
670 	struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
671 	struct ttm_validate_buffer val_buf;
672 	unsigned err_count = 0;
673 
674 	if (!res->func->create)
675 		return 0;
676 
677 	val_buf.bo = NULL;
678 	val_buf.num_shared = 0;
679 	if (res->backup)
680 		val_buf.bo = &res->backup->base;
681 	do {
682 		ret = vmw_resource_do_validate(res, &val_buf, dirtying);
683 		if (likely(ret != -EBUSY))
684 			break;
685 
686 		spin_lock(&dev_priv->resource_lock);
687 		if (list_empty(lru_list) || !res->func->may_evict) {
688 			DRM_ERROR("Out of device device resources "
689 				  "for %s.\n", res->func->type_name);
690 			ret = -EBUSY;
691 			spin_unlock(&dev_priv->resource_lock);
692 			break;
693 		}
694 
695 		evict_res = vmw_resource_reference
696 			(list_first_entry(lru_list, struct vmw_resource,
697 					  lru_head));
698 		list_del_init(&evict_res->lru_head);
699 
700 		spin_unlock(&dev_priv->resource_lock);
701 
702 		/* Trylock backup buffers with a NULL ticket. */
703 		ret = vmw_resource_do_evict(NULL, evict_res, intr);
704 		if (unlikely(ret != 0)) {
705 			spin_lock(&dev_priv->resource_lock);
706 			list_add_tail(&evict_res->lru_head, lru_list);
707 			spin_unlock(&dev_priv->resource_lock);
708 			if (ret == -ERESTARTSYS ||
709 			    ++err_count > VMW_RES_EVICT_ERR_COUNT) {
710 				vmw_resource_unreference(&evict_res);
711 				goto out_no_validate;
712 			}
713 		}
714 
715 		vmw_resource_unreference(&evict_res);
716 	} while (1);
717 
718 	if (unlikely(ret != 0))
719 		goto out_no_validate;
720 	else if (!res->func->needs_backup && res->backup) {
721 		WARN_ON_ONCE(vmw_resource_mob_attached(res));
722 		vmw_bo_unreference(&res->backup);
723 	}
724 
725 	return 0;
726 
727 out_no_validate:
728 	return ret;
729 }
730 
731 
732 /**
733  * vmw_resource_unbind_list
734  *
735  * @vbo: Pointer to the current backing MOB.
736  *
737  * Evicts the Guest Backed hardware resource if the backup
738  * buffer is being moved out of MOB memory.
739  * Note that this function will not race with the resource
740  * validation code, since resource validation and eviction
741  * both require the backup buffer to be reserved.
742  */
vmw_resource_unbind_list(struct vmw_buffer_object * vbo)743 void vmw_resource_unbind_list(struct vmw_buffer_object *vbo)
744 {
745 	struct ttm_validate_buffer val_buf = {
746 		.bo = &vbo->base,
747 		.num_shared = 0
748 	};
749 
750 	dma_resv_assert_held(vbo->base.base.resv);
751 	while (!RB_EMPTY_ROOT(&vbo->res_tree)) {
752 		struct rb_node *node = vbo->res_tree.rb_node;
753 		struct vmw_resource *res =
754 			container_of(node, struct vmw_resource, mob_node);
755 
756 		if (!WARN_ON_ONCE(!res->func->unbind))
757 			(void) res->func->unbind(res, res->res_dirty, &val_buf);
758 
759 		res->backup_dirty = true;
760 		res->res_dirty = false;
761 		vmw_resource_mob_detach(res);
762 	}
763 
764 	(void) ttm_bo_wait(&vbo->base, false, false);
765 }
766 
767 
768 /**
769  * vmw_query_readback_all - Read back cached query states
770  *
771  * @dx_query_mob: Buffer containing the DX query MOB
772  *
773  * Read back cached states from the device if they exist.  This function
774  * assumes binding_mutex is held.
775  */
vmw_query_readback_all(struct vmw_buffer_object * dx_query_mob)776 int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob)
777 {
778 	struct vmw_resource *dx_query_ctx;
779 	struct vmw_private *dev_priv;
780 	struct {
781 		SVGA3dCmdHeader header;
782 		SVGA3dCmdDXReadbackAllQuery body;
783 	} *cmd;
784 
785 
786 	/* No query bound, so do nothing */
787 	if (!dx_query_mob || !dx_query_mob->dx_query_ctx)
788 		return 0;
789 
790 	dx_query_ctx = dx_query_mob->dx_query_ctx;
791 	dev_priv     = dx_query_ctx->dev_priv;
792 
793 	cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), dx_query_ctx->id);
794 	if (unlikely(cmd == NULL))
795 		return -ENOMEM;
796 
797 	cmd->header.id   = SVGA_3D_CMD_DX_READBACK_ALL_QUERY;
798 	cmd->header.size = sizeof(cmd->body);
799 	cmd->body.cid    = dx_query_ctx->id;
800 
801 	vmw_cmd_commit(dev_priv, sizeof(*cmd));
802 
803 	/* Triggers a rebind the next time affected context is bound */
804 	dx_query_mob->dx_query_ctx = NULL;
805 
806 	return 0;
807 }
808 
809 
810 
811 /**
812  * vmw_query_move_notify - Read back cached query states
813  *
814  * @bo: The TTM buffer object about to move.
815  * @old_mem: The memory region @bo is moving from.
816  * @new_mem: The memory region @bo is moving to.
817  *
818  * Called before the query MOB is swapped out to read back cached query
819  * states from the device.
820  */
vmw_query_move_notify(struct ttm_buffer_object * bo,struct ttm_resource * old_mem,struct ttm_resource * new_mem)821 void vmw_query_move_notify(struct ttm_buffer_object *bo,
822 			   struct ttm_resource *old_mem,
823 			   struct ttm_resource *new_mem)
824 {
825 	struct vmw_buffer_object *dx_query_mob;
826 	struct ttm_device *bdev = bo->bdev;
827 	struct vmw_private *dev_priv;
828 
829 	dev_priv = container_of(bdev, struct vmw_private, bdev);
830 
831 	mutex_lock(&dev_priv->binding_mutex);
832 
833 	/* If BO is being moved from MOB to system memory */
834 	if (new_mem->mem_type == TTM_PL_SYSTEM &&
835 	    old_mem->mem_type == VMW_PL_MOB) {
836 		struct vmw_fence_obj *fence;
837 
838 		dx_query_mob = container_of(bo, struct vmw_buffer_object, base);
839 		if (!dx_query_mob || !dx_query_mob->dx_query_ctx) {
840 			mutex_unlock(&dev_priv->binding_mutex);
841 			return;
842 		}
843 
844 		(void) vmw_query_readback_all(dx_query_mob);
845 		mutex_unlock(&dev_priv->binding_mutex);
846 
847 		/* Create a fence and attach the BO to it */
848 		(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
849 		vmw_bo_fence_single(bo, fence);
850 
851 		if (fence != NULL)
852 			vmw_fence_obj_unreference(&fence);
853 
854 		(void) ttm_bo_wait(bo, false, false);
855 	} else
856 		mutex_unlock(&dev_priv->binding_mutex);
857 }
858 
859 /**
860  * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
861  *
862  * @res:            The resource being queried.
863  */
vmw_resource_needs_backup(const struct vmw_resource * res)864 bool vmw_resource_needs_backup(const struct vmw_resource *res)
865 {
866 	return res->func->needs_backup;
867 }
868 
869 /**
870  * vmw_resource_evict_type - Evict all resources of a specific type
871  *
872  * @dev_priv:       Pointer to a device private struct
873  * @type:           The resource type to evict
874  *
875  * To avoid thrashing starvation or as part of the hibernation sequence,
876  * try to evict all evictable resources of a specific type.
877  */
vmw_resource_evict_type(struct vmw_private * dev_priv,enum vmw_res_type type)878 static void vmw_resource_evict_type(struct vmw_private *dev_priv,
879 				    enum vmw_res_type type)
880 {
881 	struct list_head *lru_list = &dev_priv->res_lru[type];
882 	struct vmw_resource *evict_res;
883 	unsigned err_count = 0;
884 	int ret;
885 	struct ww_acquire_ctx ticket;
886 
887 	do {
888 		spin_lock(&dev_priv->resource_lock);
889 
890 		if (list_empty(lru_list))
891 			goto out_unlock;
892 
893 		evict_res = vmw_resource_reference(
894 			list_first_entry(lru_list, struct vmw_resource,
895 					 lru_head));
896 		list_del_init(&evict_res->lru_head);
897 		spin_unlock(&dev_priv->resource_lock);
898 
899 		/* Wait lock backup buffers with a ticket. */
900 		ret = vmw_resource_do_evict(&ticket, evict_res, false);
901 		if (unlikely(ret != 0)) {
902 			spin_lock(&dev_priv->resource_lock);
903 			list_add_tail(&evict_res->lru_head, lru_list);
904 			spin_unlock(&dev_priv->resource_lock);
905 			if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
906 				vmw_resource_unreference(&evict_res);
907 				return;
908 			}
909 		}
910 
911 		vmw_resource_unreference(&evict_res);
912 	} while (1);
913 
914 out_unlock:
915 	spin_unlock(&dev_priv->resource_lock);
916 }
917 
918 /**
919  * vmw_resource_evict_all - Evict all evictable resources
920  *
921  * @dev_priv:       Pointer to a device private struct
922  *
923  * To avoid thrashing starvation or as part of the hibernation sequence,
924  * evict all evictable resources. In particular this means that all
925  * guest-backed resources that are registered with the device are
926  * evicted and the OTable becomes clean.
927  */
vmw_resource_evict_all(struct vmw_private * dev_priv)928 void vmw_resource_evict_all(struct vmw_private *dev_priv)
929 {
930 	enum vmw_res_type type;
931 
932 	mutex_lock(&dev_priv->cmdbuf_mutex);
933 
934 	for (type = 0; type < vmw_res_max; ++type)
935 		vmw_resource_evict_type(dev_priv, type);
936 
937 	mutex_unlock(&dev_priv->cmdbuf_mutex);
938 }
939 
940 /*
941  * vmw_resource_pin - Add a pin reference on a resource
942  *
943  * @res: The resource to add a pin reference on
944  *
945  * This function adds a pin reference, and if needed validates the resource.
946  * Having a pin reference means that the resource can never be evicted, and
947  * its id will never change as long as there is a pin reference.
948  * This function returns 0 on success and a negative error code on failure.
949  */
vmw_resource_pin(struct vmw_resource * res,bool interruptible)950 int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
951 {
952 	struct ttm_operation_ctx ctx = { interruptible, false };
953 	struct vmw_private *dev_priv = res->dev_priv;
954 	int ret;
955 
956 	mutex_lock(&dev_priv->cmdbuf_mutex);
957 	ret = vmw_resource_reserve(res, interruptible, false);
958 	if (ret)
959 		goto out_no_reserve;
960 
961 	if (res->pin_count == 0) {
962 		struct vmw_buffer_object *vbo = NULL;
963 
964 		if (res->backup) {
965 			vbo = res->backup;
966 
967 			ret = ttm_bo_reserve(&vbo->base, interruptible, false, NULL);
968 			if (ret)
969 				goto out_no_validate;
970 			if (!vbo->base.pin_count) {
971 				ret = ttm_bo_validate
972 					(&vbo->base,
973 					 res->func->backup_placement,
974 					 &ctx);
975 				if (ret) {
976 					ttm_bo_unreserve(&vbo->base);
977 					goto out_no_validate;
978 				}
979 			}
980 
981 			/* Do we really need to pin the MOB as well? */
982 			vmw_bo_pin_reserved(vbo, true);
983 		}
984 		ret = vmw_resource_validate(res, interruptible, true);
985 		if (vbo)
986 			ttm_bo_unreserve(&vbo->base);
987 		if (ret)
988 			goto out_no_validate;
989 	}
990 	res->pin_count++;
991 
992 out_no_validate:
993 	vmw_resource_unreserve(res, false, false, false, NULL, 0UL);
994 out_no_reserve:
995 	mutex_unlock(&dev_priv->cmdbuf_mutex);
996 
997 	return ret;
998 }
999 
1000 /**
1001  * vmw_resource_unpin - Remove a pin reference from a resource
1002  *
1003  * @res: The resource to remove a pin reference from
1004  *
1005  * Having a pin reference means that the resource can never be evicted, and
1006  * its id will never change as long as there is a pin reference.
1007  */
vmw_resource_unpin(struct vmw_resource * res)1008 void vmw_resource_unpin(struct vmw_resource *res)
1009 {
1010 	struct vmw_private *dev_priv = res->dev_priv;
1011 	int ret;
1012 
1013 	mutex_lock(&dev_priv->cmdbuf_mutex);
1014 
1015 	ret = vmw_resource_reserve(res, false, true);
1016 	WARN_ON(ret);
1017 
1018 	WARN_ON(res->pin_count == 0);
1019 	if (--res->pin_count == 0 && res->backup) {
1020 		struct vmw_buffer_object *vbo = res->backup;
1021 
1022 		(void) ttm_bo_reserve(&vbo->base, false, false, NULL);
1023 		vmw_bo_pin_reserved(vbo, false);
1024 		ttm_bo_unreserve(&vbo->base);
1025 	}
1026 
1027 	vmw_resource_unreserve(res, false, false, false, NULL, 0UL);
1028 
1029 	mutex_unlock(&dev_priv->cmdbuf_mutex);
1030 }
1031 
1032 /**
1033  * vmw_res_type - Return the resource type
1034  *
1035  * @res: Pointer to the resource
1036  */
vmw_res_type(const struct vmw_resource * res)1037 enum vmw_res_type vmw_res_type(const struct vmw_resource *res)
1038 {
1039 	return res->func->res_type;
1040 }
1041 
1042 /**
1043  * vmw_resource_dirty_update - Update a resource's dirty tracker with a
1044  * sequential range of touched backing store memory.
1045  * @res: The resource.
1046  * @start: The first page touched.
1047  * @end: The last page touched + 1.
1048  */
vmw_resource_dirty_update(struct vmw_resource * res,pgoff_t start,pgoff_t end)1049 void vmw_resource_dirty_update(struct vmw_resource *res, pgoff_t start,
1050 			       pgoff_t end)
1051 {
1052 	if (res->dirty)
1053 		res->func->dirty_range_add(res, start << PAGE_SHIFT,
1054 					   end << PAGE_SHIFT);
1055 }
1056 
1057 /**
1058  * vmw_resources_clean - Clean resources intersecting a mob range
1059  * @vbo: The mob buffer object
1060  * @start: The mob page offset starting the range
1061  * @end: The mob page offset ending the range
1062  * @num_prefault: Returns how many pages including the first have been
1063  * cleaned and are ok to prefault
1064  */
vmw_resources_clean(struct vmw_buffer_object * vbo,pgoff_t start,pgoff_t end,pgoff_t * num_prefault)1065 int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start,
1066 			pgoff_t end, pgoff_t *num_prefault)
1067 {
1068 	struct rb_node *cur = vbo->res_tree.rb_node;
1069 	struct vmw_resource *found = NULL;
1070 	unsigned long res_start = start << PAGE_SHIFT;
1071 	unsigned long res_end = end << PAGE_SHIFT;
1072 	unsigned long last_cleaned = 0;
1073 
1074 	/*
1075 	 * Find the resource with lowest backup_offset that intersects the
1076 	 * range.
1077 	 */
1078 	while (cur) {
1079 		struct vmw_resource *cur_res =
1080 			container_of(cur, struct vmw_resource, mob_node);
1081 
1082 		if (cur_res->backup_offset >= res_end) {
1083 			cur = cur->rb_left;
1084 		} else if (cur_res->backup_offset + cur_res->backup_size <=
1085 			   res_start) {
1086 			cur = cur->rb_right;
1087 		} else {
1088 			found = cur_res;
1089 			cur = cur->rb_left;
1090 			/* Continue to look for resources with lower offsets */
1091 		}
1092 	}
1093 
1094 	/*
1095 	 * In order of increasing backup_offset, clean dirty resources
1096 	 * intersecting the range.
1097 	 */
1098 	while (found) {
1099 		if (found->res_dirty) {
1100 			int ret;
1101 
1102 			if (!found->func->clean)
1103 				return -EINVAL;
1104 
1105 			ret = found->func->clean(found);
1106 			if (ret)
1107 				return ret;
1108 
1109 			found->res_dirty = false;
1110 		}
1111 		last_cleaned = found->backup_offset + found->backup_size;
1112 		cur = rb_next(&found->mob_node);
1113 		if (!cur)
1114 			break;
1115 
1116 		found = container_of(cur, struct vmw_resource, mob_node);
1117 		if (found->backup_offset >= res_end)
1118 			break;
1119 	}
1120 
1121 	/*
1122 	 * Set number of pages allowed prefaulting and fence the buffer object
1123 	 */
1124 	*num_prefault = 1;
1125 	if (last_cleaned > res_start) {
1126 		struct ttm_buffer_object *bo = &vbo->base;
1127 
1128 		*num_prefault = __KERNEL_DIV_ROUND_UP(last_cleaned - res_start,
1129 						      PAGE_SIZE);
1130 		vmw_bo_fence_single(bo, NULL);
1131 	}
1132 
1133 	return 0;
1134 }
1135