1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  */
25 #include <linux/firmware.h>
26 #include "amdgpu.h"
27 #include "amdgpu_gfx.h"
28 #include "amdgpu_rlc.h"
29 
30 /**
31  * amdgpu_gfx_rlc_enter_safe_mode - Set RLC into safe mode
32  *
33  * @adev: amdgpu_device pointer
34  *
35  * Set RLC enter into safe mode if RLC is enabled and haven't in safe mode.
36  */
amdgpu_gfx_rlc_enter_safe_mode(struct amdgpu_device * adev)37 void amdgpu_gfx_rlc_enter_safe_mode(struct amdgpu_device *adev)
38 {
39 	if (adev->gfx.rlc.in_safe_mode)
40 		return;
41 
42 	/* if RLC is not enabled, do nothing */
43 	if (!adev->gfx.rlc.funcs->is_rlc_enabled(adev))
44 		return;
45 
46 	if (adev->cg_flags &
47 	    (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG |
48 	     AMD_CG_SUPPORT_GFX_3D_CGCG)) {
49 		adev->gfx.rlc.funcs->set_safe_mode(adev);
50 		adev->gfx.rlc.in_safe_mode = true;
51 	}
52 }
53 
54 /**
55  * amdgpu_gfx_rlc_exit_safe_mode - Set RLC out of safe mode
56  *
57  * @adev: amdgpu_device pointer
58  *
59  * Set RLC exit safe mode if RLC is enabled and have entered into safe mode.
60  */
amdgpu_gfx_rlc_exit_safe_mode(struct amdgpu_device * adev)61 void amdgpu_gfx_rlc_exit_safe_mode(struct amdgpu_device *adev)
62 {
63 	if (!(adev->gfx.rlc.in_safe_mode))
64 		return;
65 
66 	/* if RLC is not enabled, do nothing */
67 	if (!adev->gfx.rlc.funcs->is_rlc_enabled(adev))
68 		return;
69 
70 	if (adev->cg_flags &
71 	    (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG |
72 	     AMD_CG_SUPPORT_GFX_3D_CGCG)) {
73 		adev->gfx.rlc.funcs->unset_safe_mode(adev);
74 		adev->gfx.rlc.in_safe_mode = false;
75 	}
76 }
77 
78 /**
79  * amdgpu_gfx_rlc_init_sr - Init save restore block
80  *
81  * @adev: amdgpu_device pointer
82  * @dws: the size of save restore block
83  *
84  * Allocate and setup value to save restore block of rlc.
85  * Returns 0 on succeess or negative error code if allocate failed.
86  */
amdgpu_gfx_rlc_init_sr(struct amdgpu_device * adev,u32 dws)87 int amdgpu_gfx_rlc_init_sr(struct amdgpu_device *adev, u32 dws)
88 {
89 	const u32 *src_ptr;
90 	volatile u32 *dst_ptr;
91 	u32 i;
92 	int r;
93 
94 	/* allocate save restore block */
95 	r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
96 				      AMDGPU_GEM_DOMAIN_VRAM,
97 				      &adev->gfx.rlc.save_restore_obj,
98 				      &adev->gfx.rlc.save_restore_gpu_addr,
99 				      (void **)&adev->gfx.rlc.sr_ptr);
100 	if (r) {
101 		dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r);
102 		amdgpu_gfx_rlc_fini(adev);
103 		return r;
104 	}
105 
106 	/* write the sr buffer */
107 	src_ptr = adev->gfx.rlc.reg_list;
108 	dst_ptr = adev->gfx.rlc.sr_ptr;
109 	for (i = 0; i < adev->gfx.rlc.reg_list_size; i++)
110 		dst_ptr[i] = cpu_to_le32(src_ptr[i]);
111 	amdgpu_bo_kunmap(adev->gfx.rlc.save_restore_obj);
112 	amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
113 
114 	return 0;
115 }
116 
117 /**
118  * amdgpu_gfx_rlc_init_csb - Init clear state block
119  *
120  * @adev: amdgpu_device pointer
121  *
122  * Allocate and setup value to clear state block of rlc.
123  * Returns 0 on succeess or negative error code if allocate failed.
124  */
amdgpu_gfx_rlc_init_csb(struct amdgpu_device * adev)125 int amdgpu_gfx_rlc_init_csb(struct amdgpu_device *adev)
126 {
127 	u32 dws;
128 	int r;
129 
130 	/* allocate clear state block */
131 	adev->gfx.rlc.clear_state_size = dws = adev->gfx.rlc.funcs->get_csb_size(adev);
132 	r = amdgpu_bo_create_kernel(adev, dws * 4, PAGE_SIZE,
133 				      AMDGPU_GEM_DOMAIN_VRAM,
134 				      &adev->gfx.rlc.clear_state_obj,
135 				      &adev->gfx.rlc.clear_state_gpu_addr,
136 				      (void **)&adev->gfx.rlc.cs_ptr);
137 	if (r) {
138 		dev_err(adev->dev, "(%d) failed to create rlc csb bo\n", r);
139 		amdgpu_gfx_rlc_fini(adev);
140 		return r;
141 	}
142 
143 	return 0;
144 }
145 
146 /**
147  * amdgpu_gfx_rlc_init_cpt - Init cp table
148  *
149  * @adev: amdgpu_device pointer
150  *
151  * Allocate and setup value to cp table of rlc.
152  * Returns 0 on succeess or negative error code if allocate failed.
153  */
amdgpu_gfx_rlc_init_cpt(struct amdgpu_device * adev)154 int amdgpu_gfx_rlc_init_cpt(struct amdgpu_device *adev)
155 {
156 	int r;
157 
158 	r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size,
159 				      PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
160 				      &adev->gfx.rlc.cp_table_obj,
161 				      &adev->gfx.rlc.cp_table_gpu_addr,
162 				      (void **)&adev->gfx.rlc.cp_table_ptr);
163 	if (r) {
164 		dev_err(adev->dev, "(%d) failed to create cp table bo\n", r);
165 		amdgpu_gfx_rlc_fini(adev);
166 		return r;
167 	}
168 
169 	/* set up the cp table */
170 	amdgpu_gfx_rlc_setup_cp_table(adev);
171 	amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
172 	amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
173 
174 	return 0;
175 }
176 
177 /**
178  * amdgpu_gfx_rlc_setup_cp_table - setup cp the buffer of cp table
179  *
180  * @adev: amdgpu_device pointer
181  *
182  * Write cp firmware data into cp table.
183  */
amdgpu_gfx_rlc_setup_cp_table(struct amdgpu_device * adev)184 void amdgpu_gfx_rlc_setup_cp_table(struct amdgpu_device *adev)
185 {
186 	const __le32 *fw_data;
187 	volatile u32 *dst_ptr;
188 	int me, i, max_me;
189 	u32 bo_offset = 0;
190 	u32 table_offset, table_size;
191 
192 	max_me = adev->gfx.rlc.funcs->get_cp_table_num(adev);
193 
194 	/* write the cp table buffer */
195 	dst_ptr = adev->gfx.rlc.cp_table_ptr;
196 	for (me = 0; me < max_me; me++) {
197 		if (me == 0) {
198 			const struct gfx_firmware_header_v1_0 *hdr =
199 				(const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
200 			fw_data = (const __le32 *)
201 				(adev->gfx.ce_fw->data +
202 				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
203 			table_offset = le32_to_cpu(hdr->jt_offset);
204 			table_size = le32_to_cpu(hdr->jt_size);
205 		} else if (me == 1) {
206 			const struct gfx_firmware_header_v1_0 *hdr =
207 				(const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
208 			fw_data = (const __le32 *)
209 				(adev->gfx.pfp_fw->data +
210 				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
211 			table_offset = le32_to_cpu(hdr->jt_offset);
212 			table_size = le32_to_cpu(hdr->jt_size);
213 		} else if (me == 2) {
214 			const struct gfx_firmware_header_v1_0 *hdr =
215 				(const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
216 			fw_data = (const __le32 *)
217 				(adev->gfx.me_fw->data +
218 				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
219 			table_offset = le32_to_cpu(hdr->jt_offset);
220 			table_size = le32_to_cpu(hdr->jt_size);
221 		} else if (me == 3) {
222 			const struct gfx_firmware_header_v1_0 *hdr =
223 				(const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
224 			fw_data = (const __le32 *)
225 				(adev->gfx.mec_fw->data +
226 				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
227 			table_offset = le32_to_cpu(hdr->jt_offset);
228 			table_size = le32_to_cpu(hdr->jt_size);
229 		} else  if (me == 4) {
230 			const struct gfx_firmware_header_v1_0 *hdr =
231 				(const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
232 			fw_data = (const __le32 *)
233 				(adev->gfx.mec2_fw->data +
234 				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
235 			table_offset = le32_to_cpu(hdr->jt_offset);
236 			table_size = le32_to_cpu(hdr->jt_size);
237 		}
238 
239 		for (i = 0; i < table_size; i ++) {
240 			dst_ptr[bo_offset + i] =
241 				cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
242 		}
243 
244 		bo_offset += table_size;
245 	}
246 }
247 
248 /**
249  * amdgpu_gfx_rlc_fini - Free BO which used for RLC
250  *
251  * @adev: amdgpu_device pointer
252  *
253  * Free three BO which is used for rlc_save_restore_block, rlc_clear_state_block
254  * and rlc_jump_table_block.
255  */
amdgpu_gfx_rlc_fini(struct amdgpu_device * adev)256 void amdgpu_gfx_rlc_fini(struct amdgpu_device *adev)
257 {
258 	/* save restore block */
259 	if (adev->gfx.rlc.save_restore_obj) {
260 		amdgpu_bo_free_kernel(&adev->gfx.rlc.save_restore_obj,
261 				      &adev->gfx.rlc.save_restore_gpu_addr,
262 				      (void **)&adev->gfx.rlc.sr_ptr);
263 	}
264 
265 	/* clear state block */
266 	amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
267 			      &adev->gfx.rlc.clear_state_gpu_addr,
268 			      (void **)&adev->gfx.rlc.cs_ptr);
269 
270 	/* jump table block */
271 	amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
272 			      &adev->gfx.rlc.cp_table_gpu_addr,
273 			      (void **)&adev->gfx.rlc.cp_table_ptr);
274 }
275