1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21 
22  * * Author: Monk.liu@amd.com
23  */
24 
25 #include <drm/drm_exec.h>
26 
27 #include "amdgpu.h"
28 
amdgpu_csa_vaddr(struct amdgpu_device * adev)29 uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev)
30 {
31 	uint64_t addr = adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT;
32 
33 	addr -= AMDGPU_VA_RESERVED_SIZE;
34 	addr = amdgpu_gmc_sign_extend(addr);
35 
36 	return addr;
37 }
38 
amdgpu_allocate_static_csa(struct amdgpu_device * adev,struct amdgpu_bo ** bo,u32 domain,uint32_t size)39 int amdgpu_allocate_static_csa(struct amdgpu_device *adev, struct amdgpu_bo **bo,
40 				u32 domain, uint32_t size)
41 {
42 	void *ptr;
43 
44 	amdgpu_bo_create_kernel(adev, size, PAGE_SIZE,
45 				domain, bo,
46 				NULL, &ptr);
47 	if (!*bo)
48 		return -ENOMEM;
49 
50 	memset(ptr, 0, size);
51 	adev->virt.csa_cpu_addr = ptr;
52 	return 0;
53 }
54 
amdgpu_free_static_csa(struct amdgpu_bo ** bo)55 void amdgpu_free_static_csa(struct amdgpu_bo **bo)
56 {
57 	amdgpu_bo_free_kernel(bo, NULL, NULL);
58 }
59 
60 /*
61  * amdgpu_map_static_csa should be called during amdgpu_vm_init
62  * it maps virtual address amdgpu_csa_vaddr() to this VM, and each command
63  * submission of GFX should use this virtual address within META_DATA init
64  * package to support SRIOV gfx preemption.
65  */
amdgpu_map_static_csa(struct amdgpu_device * adev,struct amdgpu_vm * vm,struct amdgpu_bo * bo,struct amdgpu_bo_va ** bo_va,uint64_t csa_addr,uint32_t size)66 int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
67 			  struct amdgpu_bo *bo, struct amdgpu_bo_va **bo_va,
68 			  uint64_t csa_addr, uint32_t size)
69 {
70 	struct drm_exec exec;
71 	int r;
72 
73 	drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
74 	drm_exec_until_all_locked(&exec) {
75 		r = amdgpu_vm_lock_pd(vm, &exec, 0);
76 		if (likely(!r))
77 			r = drm_exec_lock_obj(&exec, &bo->tbo.base);
78 		drm_exec_retry_on_contention(&exec);
79 		if (unlikely(r)) {
80 			DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r);
81 			goto error;
82 		}
83 	}
84 
85 	*bo_va = amdgpu_vm_bo_add(adev, vm, bo);
86 	if (!*bo_va) {
87 		r = -ENOMEM;
88 		goto error;
89 	}
90 
91 	r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, size,
92 			     AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
93 			     AMDGPU_PTE_EXECUTABLE);
94 
95 	if (r) {
96 		DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r);
97 		amdgpu_vm_bo_del(adev, *bo_va);
98 		goto error;
99 	}
100 
101 error:
102 	drm_exec_fini(&exec);
103 	return r;
104 }
105 
amdgpu_unmap_static_csa(struct amdgpu_device * adev,struct amdgpu_vm * vm,struct amdgpu_bo * bo,struct amdgpu_bo_va * bo_va,uint64_t csa_addr)106 int amdgpu_unmap_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
107 			    struct amdgpu_bo *bo, struct amdgpu_bo_va *bo_va,
108 			    uint64_t csa_addr)
109 {
110 	struct drm_exec exec;
111 	int r;
112 
113 	drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
114 	drm_exec_until_all_locked(&exec) {
115 		r = amdgpu_vm_lock_pd(vm, &exec, 0);
116 		if (likely(!r))
117 			r = drm_exec_lock_obj(&exec, &bo->tbo.base);
118 		drm_exec_retry_on_contention(&exec);
119 		if (unlikely(r)) {
120 			DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r);
121 			goto error;
122 		}
123 	}
124 
125 	r = amdgpu_vm_bo_unmap(adev, bo_va, csa_addr);
126 	if (r) {
127 		DRM_ERROR("failed to do bo_unmap on static CSA, err=%d\n", r);
128 		goto error;
129 	}
130 
131 	amdgpu_vm_bo_del(adev, bo_va);
132 
133 error:
134 	drm_exec_fini(&exec);
135 	return r;
136 }
137