1 /*
2  * Copyright 2010 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Alex Deucher
23  */
24 
25 #include <linux/firmware.h>
26 #include <linux/module.h>
27 #include <linux/pci.h>
28 #include <linux/slab.h>
29 
30 #include <drm/radeon_drm.h>
31 
32 #include "atom.h"
33 #include "cayman_blit_shaders.h"
34 #include "clearstate_cayman.h"
35 #include "evergreen.h"
36 #include "ni.h"
37 #include "ni_reg.h"
38 #include "nid.h"
39 #include "radeon.h"
40 #include "radeon_asic.h"
41 #include "radeon_audio.h"
42 #include "radeon_ucode.h"
43 
44 /*
45  * Indirect registers accessor
46  */
tn_smc_rreg(struct radeon_device * rdev,u32 reg)47 u32 tn_smc_rreg(struct radeon_device *rdev, u32 reg)
48 {
49 	unsigned long flags;
50 	u32 r;
51 
52 	spin_lock_irqsave(&rdev->smc_idx_lock, flags);
53 	WREG32(TN_SMC_IND_INDEX_0, (reg));
54 	r = RREG32(TN_SMC_IND_DATA_0);
55 	spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
56 	return r;
57 }
58 
tn_smc_wreg(struct radeon_device * rdev,u32 reg,u32 v)59 void tn_smc_wreg(struct radeon_device *rdev, u32 reg, u32 v)
60 {
61 	unsigned long flags;
62 
63 	spin_lock_irqsave(&rdev->smc_idx_lock, flags);
64 	WREG32(TN_SMC_IND_INDEX_0, (reg));
65 	WREG32(TN_SMC_IND_DATA_0, (v));
66 	spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
67 }
68 
69 static const u32 tn_rlc_save_restore_register_list[] =
70 {
71 	0x98fc,
72 	0x98f0,
73 	0x9834,
74 	0x9838,
75 	0x9870,
76 	0x9874,
77 	0x8a14,
78 	0x8b24,
79 	0x8bcc,
80 	0x8b10,
81 	0x8c30,
82 	0x8d00,
83 	0x8d04,
84 	0x8c00,
85 	0x8c04,
86 	0x8c10,
87 	0x8c14,
88 	0x8d8c,
89 	0x8cf0,
90 	0x8e38,
91 	0x9508,
92 	0x9688,
93 	0x9608,
94 	0x960c,
95 	0x9610,
96 	0x9614,
97 	0x88c4,
98 	0x8978,
99 	0x88d4,
100 	0x900c,
101 	0x9100,
102 	0x913c,
103 	0x90e8,
104 	0x9354,
105 	0xa008,
106 	0x98f8,
107 	0x9148,
108 	0x914c,
109 	0x3f94,
110 	0x98f4,
111 	0x9b7c,
112 	0x3f8c,
113 	0x8950,
114 	0x8954,
115 	0x8a18,
116 	0x8b28,
117 	0x9144,
118 	0x3f90,
119 	0x915c,
120 	0x9160,
121 	0x9178,
122 	0x917c,
123 	0x9180,
124 	0x918c,
125 	0x9190,
126 	0x9194,
127 	0x9198,
128 	0x919c,
129 	0x91a8,
130 	0x91ac,
131 	0x91b0,
132 	0x91b4,
133 	0x91b8,
134 	0x91c4,
135 	0x91c8,
136 	0x91cc,
137 	0x91d0,
138 	0x91d4,
139 	0x91e0,
140 	0x91e4,
141 	0x91ec,
142 	0x91f0,
143 	0x91f4,
144 	0x9200,
145 	0x9204,
146 	0x929c,
147 	0x8030,
148 	0x9150,
149 	0x9a60,
150 	0x920c,
151 	0x9210,
152 	0x9228,
153 	0x922c,
154 	0x9244,
155 	0x9248,
156 	0x91e8,
157 	0x9294,
158 	0x9208,
159 	0x9224,
160 	0x9240,
161 	0x9220,
162 	0x923c,
163 	0x9258,
164 	0x9744,
165 	0xa200,
166 	0xa204,
167 	0xa208,
168 	0xa20c,
169 	0x8d58,
170 	0x9030,
171 	0x9034,
172 	0x9038,
173 	0x903c,
174 	0x9040,
175 	0x9654,
176 	0x897c,
177 	0xa210,
178 	0xa214,
179 	0x9868,
180 	0xa02c,
181 	0x9664,
182 	0x9698,
183 	0x949c,
184 	0x8e10,
185 	0x8e18,
186 	0x8c50,
187 	0x8c58,
188 	0x8c60,
189 	0x8c68,
190 	0x89b4,
191 	0x9830,
192 	0x802c,
193 };
194 
195 /* Firmware Names */
196 MODULE_FIRMWARE("radeon/BARTS_pfp.bin");
197 MODULE_FIRMWARE("radeon/BARTS_me.bin");
198 MODULE_FIRMWARE("radeon/BARTS_mc.bin");
199 MODULE_FIRMWARE("radeon/BARTS_smc.bin");
200 MODULE_FIRMWARE("radeon/BTC_rlc.bin");
201 MODULE_FIRMWARE("radeon/TURKS_pfp.bin");
202 MODULE_FIRMWARE("radeon/TURKS_me.bin");
203 MODULE_FIRMWARE("radeon/TURKS_mc.bin");
204 MODULE_FIRMWARE("radeon/TURKS_smc.bin");
205 MODULE_FIRMWARE("radeon/CAICOS_pfp.bin");
206 MODULE_FIRMWARE("radeon/CAICOS_me.bin");
207 MODULE_FIRMWARE("radeon/CAICOS_mc.bin");
208 MODULE_FIRMWARE("radeon/CAICOS_smc.bin");
209 MODULE_FIRMWARE("radeon/CAYMAN_pfp.bin");
210 MODULE_FIRMWARE("radeon/CAYMAN_me.bin");
211 MODULE_FIRMWARE("radeon/CAYMAN_mc.bin");
212 MODULE_FIRMWARE("radeon/CAYMAN_rlc.bin");
213 MODULE_FIRMWARE("radeon/CAYMAN_smc.bin");
214 MODULE_FIRMWARE("radeon/ARUBA_pfp.bin");
215 MODULE_FIRMWARE("radeon/ARUBA_me.bin");
216 MODULE_FIRMWARE("radeon/ARUBA_rlc.bin");
217 
218 
219 static const u32 cayman_golden_registers2[] =
220 {
221 	0x3e5c, 0xffffffff, 0x00000000,
222 	0x3e48, 0xffffffff, 0x00000000,
223 	0x3e4c, 0xffffffff, 0x00000000,
224 	0x3e64, 0xffffffff, 0x00000000,
225 	0x3e50, 0xffffffff, 0x00000000,
226 	0x3e60, 0xffffffff, 0x00000000
227 };
228 
229 static const u32 cayman_golden_registers[] =
230 {
231 	0x5eb4, 0xffffffff, 0x00000002,
232 	0x5e78, 0x8f311ff1, 0x001000f0,
233 	0x3f90, 0xffff0000, 0xff000000,
234 	0x9148, 0xffff0000, 0xff000000,
235 	0x3f94, 0xffff0000, 0xff000000,
236 	0x914c, 0xffff0000, 0xff000000,
237 	0xc78, 0x00000080, 0x00000080,
238 	0xbd4, 0x70073777, 0x00011003,
239 	0xd02c, 0xbfffff1f, 0x08421000,
240 	0xd0b8, 0x73773777, 0x02011003,
241 	0x5bc0, 0x00200000, 0x50100000,
242 	0x98f8, 0x33773777, 0x02011003,
243 	0x98fc, 0xffffffff, 0x76541032,
244 	0x7030, 0x31000311, 0x00000011,
245 	0x2f48, 0x33773777, 0x42010001,
246 	0x6b28, 0x00000010, 0x00000012,
247 	0x7728, 0x00000010, 0x00000012,
248 	0x10328, 0x00000010, 0x00000012,
249 	0x10f28, 0x00000010, 0x00000012,
250 	0x11b28, 0x00000010, 0x00000012,
251 	0x12728, 0x00000010, 0x00000012,
252 	0x240c, 0x000007ff, 0x00000000,
253 	0x8a14, 0xf000001f, 0x00000007,
254 	0x8b24, 0x3fff3fff, 0x00ff0fff,
255 	0x8b10, 0x0000ff0f, 0x00000000,
256 	0x28a4c, 0x07ffffff, 0x06000000,
257 	0x10c, 0x00000001, 0x00010003,
258 	0xa02c, 0xffffffff, 0x0000009b,
259 	0x913c, 0x0000010f, 0x01000100,
260 	0x8c04, 0xf8ff00ff, 0x40600060,
261 	0x28350, 0x00000f01, 0x00000000,
262 	0x9508, 0x3700001f, 0x00000002,
263 	0x960c, 0xffffffff, 0x54763210,
264 	0x88c4, 0x001f3ae3, 0x00000082,
265 	0x88d0, 0xffffffff, 0x0f40df40,
266 	0x88d4, 0x0000001f, 0x00000010,
267 	0x8974, 0xffffffff, 0x00000000
268 };
269 
270 static const u32 dvst_golden_registers2[] =
271 {
272 	0x8f8, 0xffffffff, 0,
273 	0x8fc, 0x00380000, 0,
274 	0x8f8, 0xffffffff, 1,
275 	0x8fc, 0x0e000000, 0
276 };
277 
278 static const u32 dvst_golden_registers[] =
279 {
280 	0x690, 0x3fff3fff, 0x20c00033,
281 	0x918c, 0x0fff0fff, 0x00010006,
282 	0x91a8, 0x0fff0fff, 0x00010006,
283 	0x9150, 0xffffdfff, 0x6e944040,
284 	0x917c, 0x0fff0fff, 0x00030002,
285 	0x9198, 0x0fff0fff, 0x00030002,
286 	0x915c, 0x0fff0fff, 0x00010000,
287 	0x3f90, 0xffff0001, 0xff000000,
288 	0x9178, 0x0fff0fff, 0x00070000,
289 	0x9194, 0x0fff0fff, 0x00070000,
290 	0x9148, 0xffff0001, 0xff000000,
291 	0x9190, 0x0fff0fff, 0x00090008,
292 	0x91ac, 0x0fff0fff, 0x00090008,
293 	0x3f94, 0xffff0000, 0xff000000,
294 	0x914c, 0xffff0000, 0xff000000,
295 	0x929c, 0x00000fff, 0x00000001,
296 	0x55e4, 0xff607fff, 0xfc000100,
297 	0x8a18, 0xff000fff, 0x00000100,
298 	0x8b28, 0xff000fff, 0x00000100,
299 	0x9144, 0xfffc0fff, 0x00000100,
300 	0x6ed8, 0x00010101, 0x00010000,
301 	0x9830, 0xffffffff, 0x00000000,
302 	0x9834, 0xf00fffff, 0x00000400,
303 	0x9838, 0xfffffffe, 0x00000000,
304 	0xd0c0, 0xff000fff, 0x00000100,
305 	0xd02c, 0xbfffff1f, 0x08421000,
306 	0xd0b8, 0x73773777, 0x12010001,
307 	0x5bb0, 0x000000f0, 0x00000070,
308 	0x98f8, 0x73773777, 0x12010001,
309 	0x98fc, 0xffffffff, 0x00000010,
310 	0x9b7c, 0x00ff0000, 0x00fc0000,
311 	0x8030, 0x00001f0f, 0x0000100a,
312 	0x2f48, 0x73773777, 0x12010001,
313 	0x2408, 0x00030000, 0x000c007f,
314 	0x8a14, 0xf000003f, 0x00000007,
315 	0x8b24, 0x3fff3fff, 0x00ff0fff,
316 	0x8b10, 0x0000ff0f, 0x00000000,
317 	0x28a4c, 0x07ffffff, 0x06000000,
318 	0x4d8, 0x00000fff, 0x00000100,
319 	0xa008, 0xffffffff, 0x00010000,
320 	0x913c, 0xffff03ff, 0x01000100,
321 	0x8c00, 0x000000ff, 0x00000003,
322 	0x8c04, 0xf8ff00ff, 0x40600060,
323 	0x8cf0, 0x1fff1fff, 0x08e00410,
324 	0x28350, 0x00000f01, 0x00000000,
325 	0x9508, 0xf700071f, 0x00000002,
326 	0x960c, 0xffffffff, 0x54763210,
327 	0x20ef8, 0x01ff01ff, 0x00000002,
328 	0x20e98, 0xfffffbff, 0x00200000,
329 	0x2015c, 0xffffffff, 0x00000f40,
330 	0x88c4, 0x001f3ae3, 0x00000082,
331 	0x8978, 0x3fffffff, 0x04050140,
332 	0x88d4, 0x0000001f, 0x00000010,
333 	0x8974, 0xffffffff, 0x00000000
334 };
335 
336 static const u32 scrapper_golden_registers[] =
337 {
338 	0x690, 0x3fff3fff, 0x20c00033,
339 	0x918c, 0x0fff0fff, 0x00010006,
340 	0x918c, 0x0fff0fff, 0x00010006,
341 	0x91a8, 0x0fff0fff, 0x00010006,
342 	0x91a8, 0x0fff0fff, 0x00010006,
343 	0x9150, 0xffffdfff, 0x6e944040,
344 	0x9150, 0xffffdfff, 0x6e944040,
345 	0x917c, 0x0fff0fff, 0x00030002,
346 	0x917c, 0x0fff0fff, 0x00030002,
347 	0x9198, 0x0fff0fff, 0x00030002,
348 	0x9198, 0x0fff0fff, 0x00030002,
349 	0x915c, 0x0fff0fff, 0x00010000,
350 	0x915c, 0x0fff0fff, 0x00010000,
351 	0x3f90, 0xffff0001, 0xff000000,
352 	0x3f90, 0xffff0001, 0xff000000,
353 	0x9178, 0x0fff0fff, 0x00070000,
354 	0x9178, 0x0fff0fff, 0x00070000,
355 	0x9194, 0x0fff0fff, 0x00070000,
356 	0x9194, 0x0fff0fff, 0x00070000,
357 	0x9148, 0xffff0001, 0xff000000,
358 	0x9148, 0xffff0001, 0xff000000,
359 	0x9190, 0x0fff0fff, 0x00090008,
360 	0x9190, 0x0fff0fff, 0x00090008,
361 	0x91ac, 0x0fff0fff, 0x00090008,
362 	0x91ac, 0x0fff0fff, 0x00090008,
363 	0x3f94, 0xffff0000, 0xff000000,
364 	0x3f94, 0xffff0000, 0xff000000,
365 	0x914c, 0xffff0000, 0xff000000,
366 	0x914c, 0xffff0000, 0xff000000,
367 	0x929c, 0x00000fff, 0x00000001,
368 	0x929c, 0x00000fff, 0x00000001,
369 	0x55e4, 0xff607fff, 0xfc000100,
370 	0x8a18, 0xff000fff, 0x00000100,
371 	0x8a18, 0xff000fff, 0x00000100,
372 	0x8b28, 0xff000fff, 0x00000100,
373 	0x8b28, 0xff000fff, 0x00000100,
374 	0x9144, 0xfffc0fff, 0x00000100,
375 	0x9144, 0xfffc0fff, 0x00000100,
376 	0x6ed8, 0x00010101, 0x00010000,
377 	0x9830, 0xffffffff, 0x00000000,
378 	0x9830, 0xffffffff, 0x00000000,
379 	0x9834, 0xf00fffff, 0x00000400,
380 	0x9834, 0xf00fffff, 0x00000400,
381 	0x9838, 0xfffffffe, 0x00000000,
382 	0x9838, 0xfffffffe, 0x00000000,
383 	0xd0c0, 0xff000fff, 0x00000100,
384 	0xd02c, 0xbfffff1f, 0x08421000,
385 	0xd02c, 0xbfffff1f, 0x08421000,
386 	0xd0b8, 0x73773777, 0x12010001,
387 	0xd0b8, 0x73773777, 0x12010001,
388 	0x5bb0, 0x000000f0, 0x00000070,
389 	0x98f8, 0x73773777, 0x12010001,
390 	0x98f8, 0x73773777, 0x12010001,
391 	0x98fc, 0xffffffff, 0x00000010,
392 	0x98fc, 0xffffffff, 0x00000010,
393 	0x9b7c, 0x00ff0000, 0x00fc0000,
394 	0x9b7c, 0x00ff0000, 0x00fc0000,
395 	0x8030, 0x00001f0f, 0x0000100a,
396 	0x8030, 0x00001f0f, 0x0000100a,
397 	0x2f48, 0x73773777, 0x12010001,
398 	0x2f48, 0x73773777, 0x12010001,
399 	0x2408, 0x00030000, 0x000c007f,
400 	0x8a14, 0xf000003f, 0x00000007,
401 	0x8a14, 0xf000003f, 0x00000007,
402 	0x8b24, 0x3fff3fff, 0x00ff0fff,
403 	0x8b24, 0x3fff3fff, 0x00ff0fff,
404 	0x8b10, 0x0000ff0f, 0x00000000,
405 	0x8b10, 0x0000ff0f, 0x00000000,
406 	0x28a4c, 0x07ffffff, 0x06000000,
407 	0x28a4c, 0x07ffffff, 0x06000000,
408 	0x4d8, 0x00000fff, 0x00000100,
409 	0x4d8, 0x00000fff, 0x00000100,
410 	0xa008, 0xffffffff, 0x00010000,
411 	0xa008, 0xffffffff, 0x00010000,
412 	0x913c, 0xffff03ff, 0x01000100,
413 	0x913c, 0xffff03ff, 0x01000100,
414 	0x90e8, 0x001fffff, 0x010400c0,
415 	0x8c00, 0x000000ff, 0x00000003,
416 	0x8c00, 0x000000ff, 0x00000003,
417 	0x8c04, 0xf8ff00ff, 0x40600060,
418 	0x8c04, 0xf8ff00ff, 0x40600060,
419 	0x8c30, 0x0000000f, 0x00040005,
420 	0x8cf0, 0x1fff1fff, 0x08e00410,
421 	0x8cf0, 0x1fff1fff, 0x08e00410,
422 	0x900c, 0x00ffffff, 0x0017071f,
423 	0x28350, 0x00000f01, 0x00000000,
424 	0x28350, 0x00000f01, 0x00000000,
425 	0x9508, 0xf700071f, 0x00000002,
426 	0x9508, 0xf700071f, 0x00000002,
427 	0x9688, 0x00300000, 0x0017000f,
428 	0x960c, 0xffffffff, 0x54763210,
429 	0x960c, 0xffffffff, 0x54763210,
430 	0x20ef8, 0x01ff01ff, 0x00000002,
431 	0x20e98, 0xfffffbff, 0x00200000,
432 	0x2015c, 0xffffffff, 0x00000f40,
433 	0x88c4, 0x001f3ae3, 0x00000082,
434 	0x88c4, 0x001f3ae3, 0x00000082,
435 	0x8978, 0x3fffffff, 0x04050140,
436 	0x8978, 0x3fffffff, 0x04050140,
437 	0x88d4, 0x0000001f, 0x00000010,
438 	0x88d4, 0x0000001f, 0x00000010,
439 	0x8974, 0xffffffff, 0x00000000,
440 	0x8974, 0xffffffff, 0x00000000
441 };
442 
ni_init_golden_registers(struct radeon_device * rdev)443 static void ni_init_golden_registers(struct radeon_device *rdev)
444 {
445 	switch (rdev->family) {
446 	case CHIP_CAYMAN:
447 		radeon_program_register_sequence(rdev,
448 						 cayman_golden_registers,
449 						 (const u32)ARRAY_SIZE(cayman_golden_registers));
450 		radeon_program_register_sequence(rdev,
451 						 cayman_golden_registers2,
452 						 (const u32)ARRAY_SIZE(cayman_golden_registers2));
453 		break;
454 	case CHIP_ARUBA:
455 		if ((rdev->pdev->device == 0x9900) ||
456 		    (rdev->pdev->device == 0x9901) ||
457 		    (rdev->pdev->device == 0x9903) ||
458 		    (rdev->pdev->device == 0x9904) ||
459 		    (rdev->pdev->device == 0x9905) ||
460 		    (rdev->pdev->device == 0x9906) ||
461 		    (rdev->pdev->device == 0x9907) ||
462 		    (rdev->pdev->device == 0x9908) ||
463 		    (rdev->pdev->device == 0x9909) ||
464 		    (rdev->pdev->device == 0x990A) ||
465 		    (rdev->pdev->device == 0x990B) ||
466 		    (rdev->pdev->device == 0x990C) ||
467 		    (rdev->pdev->device == 0x990D) ||
468 		    (rdev->pdev->device == 0x990E) ||
469 		    (rdev->pdev->device == 0x990F) ||
470 		    (rdev->pdev->device == 0x9910) ||
471 		    (rdev->pdev->device == 0x9913) ||
472 		    (rdev->pdev->device == 0x9917) ||
473 		    (rdev->pdev->device == 0x9918)) {
474 			radeon_program_register_sequence(rdev,
475 							 dvst_golden_registers,
476 							 (const u32)ARRAY_SIZE(dvst_golden_registers));
477 			radeon_program_register_sequence(rdev,
478 							 dvst_golden_registers2,
479 							 (const u32)ARRAY_SIZE(dvst_golden_registers2));
480 		} else {
481 			radeon_program_register_sequence(rdev,
482 							 scrapper_golden_registers,
483 							 (const u32)ARRAY_SIZE(scrapper_golden_registers));
484 			radeon_program_register_sequence(rdev,
485 							 dvst_golden_registers2,
486 							 (const u32)ARRAY_SIZE(dvst_golden_registers2));
487 		}
488 		break;
489 	default:
490 		break;
491 	}
492 }
493 
494 #define BTC_IO_MC_REGS_SIZE 29
495 
496 static const u32 barts_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
497 	{0x00000077, 0xff010100},
498 	{0x00000078, 0x00000000},
499 	{0x00000079, 0x00001434},
500 	{0x0000007a, 0xcc08ec08},
501 	{0x0000007b, 0x00040000},
502 	{0x0000007c, 0x000080c0},
503 	{0x0000007d, 0x09000000},
504 	{0x0000007e, 0x00210404},
505 	{0x00000081, 0x08a8e800},
506 	{0x00000082, 0x00030444},
507 	{0x00000083, 0x00000000},
508 	{0x00000085, 0x00000001},
509 	{0x00000086, 0x00000002},
510 	{0x00000087, 0x48490000},
511 	{0x00000088, 0x20244647},
512 	{0x00000089, 0x00000005},
513 	{0x0000008b, 0x66030000},
514 	{0x0000008c, 0x00006603},
515 	{0x0000008d, 0x00000100},
516 	{0x0000008f, 0x00001c0a},
517 	{0x00000090, 0xff000001},
518 	{0x00000094, 0x00101101},
519 	{0x00000095, 0x00000fff},
520 	{0x00000096, 0x00116fff},
521 	{0x00000097, 0x60010000},
522 	{0x00000098, 0x10010000},
523 	{0x00000099, 0x00006000},
524 	{0x0000009a, 0x00001000},
525 	{0x0000009f, 0x00946a00}
526 };
527 
528 static const u32 turks_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
529 	{0x00000077, 0xff010100},
530 	{0x00000078, 0x00000000},
531 	{0x00000079, 0x00001434},
532 	{0x0000007a, 0xcc08ec08},
533 	{0x0000007b, 0x00040000},
534 	{0x0000007c, 0x000080c0},
535 	{0x0000007d, 0x09000000},
536 	{0x0000007e, 0x00210404},
537 	{0x00000081, 0x08a8e800},
538 	{0x00000082, 0x00030444},
539 	{0x00000083, 0x00000000},
540 	{0x00000085, 0x00000001},
541 	{0x00000086, 0x00000002},
542 	{0x00000087, 0x48490000},
543 	{0x00000088, 0x20244647},
544 	{0x00000089, 0x00000005},
545 	{0x0000008b, 0x66030000},
546 	{0x0000008c, 0x00006603},
547 	{0x0000008d, 0x00000100},
548 	{0x0000008f, 0x00001c0a},
549 	{0x00000090, 0xff000001},
550 	{0x00000094, 0x00101101},
551 	{0x00000095, 0x00000fff},
552 	{0x00000096, 0x00116fff},
553 	{0x00000097, 0x60010000},
554 	{0x00000098, 0x10010000},
555 	{0x00000099, 0x00006000},
556 	{0x0000009a, 0x00001000},
557 	{0x0000009f, 0x00936a00}
558 };
559 
560 static const u32 caicos_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
561 	{0x00000077, 0xff010100},
562 	{0x00000078, 0x00000000},
563 	{0x00000079, 0x00001434},
564 	{0x0000007a, 0xcc08ec08},
565 	{0x0000007b, 0x00040000},
566 	{0x0000007c, 0x000080c0},
567 	{0x0000007d, 0x09000000},
568 	{0x0000007e, 0x00210404},
569 	{0x00000081, 0x08a8e800},
570 	{0x00000082, 0x00030444},
571 	{0x00000083, 0x00000000},
572 	{0x00000085, 0x00000001},
573 	{0x00000086, 0x00000002},
574 	{0x00000087, 0x48490000},
575 	{0x00000088, 0x20244647},
576 	{0x00000089, 0x00000005},
577 	{0x0000008b, 0x66030000},
578 	{0x0000008c, 0x00006603},
579 	{0x0000008d, 0x00000100},
580 	{0x0000008f, 0x00001c0a},
581 	{0x00000090, 0xff000001},
582 	{0x00000094, 0x00101101},
583 	{0x00000095, 0x00000fff},
584 	{0x00000096, 0x00116fff},
585 	{0x00000097, 0x60010000},
586 	{0x00000098, 0x10010000},
587 	{0x00000099, 0x00006000},
588 	{0x0000009a, 0x00001000},
589 	{0x0000009f, 0x00916a00}
590 };
591 
592 static const u32 cayman_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
593 	{0x00000077, 0xff010100},
594 	{0x00000078, 0x00000000},
595 	{0x00000079, 0x00001434},
596 	{0x0000007a, 0xcc08ec08},
597 	{0x0000007b, 0x00040000},
598 	{0x0000007c, 0x000080c0},
599 	{0x0000007d, 0x09000000},
600 	{0x0000007e, 0x00210404},
601 	{0x00000081, 0x08a8e800},
602 	{0x00000082, 0x00030444},
603 	{0x00000083, 0x00000000},
604 	{0x00000085, 0x00000001},
605 	{0x00000086, 0x00000002},
606 	{0x00000087, 0x48490000},
607 	{0x00000088, 0x20244647},
608 	{0x00000089, 0x00000005},
609 	{0x0000008b, 0x66030000},
610 	{0x0000008c, 0x00006603},
611 	{0x0000008d, 0x00000100},
612 	{0x0000008f, 0x00001c0a},
613 	{0x00000090, 0xff000001},
614 	{0x00000094, 0x00101101},
615 	{0x00000095, 0x00000fff},
616 	{0x00000096, 0x00116fff},
617 	{0x00000097, 0x60010000},
618 	{0x00000098, 0x10010000},
619 	{0x00000099, 0x00006000},
620 	{0x0000009a, 0x00001000},
621 	{0x0000009f, 0x00976b00}
622 };
623 
ni_mc_load_microcode(struct radeon_device * rdev)624 int ni_mc_load_microcode(struct radeon_device *rdev)
625 {
626 	const __be32 *fw_data;
627 	u32 mem_type, running, blackout = 0;
628 	u32 *io_mc_regs;
629 	int i, ucode_size, regs_size;
630 
631 	if (!rdev->mc_fw)
632 		return -EINVAL;
633 
634 	switch (rdev->family) {
635 	case CHIP_BARTS:
636 		io_mc_regs = (u32 *)&barts_io_mc_regs;
637 		ucode_size = BTC_MC_UCODE_SIZE;
638 		regs_size = BTC_IO_MC_REGS_SIZE;
639 		break;
640 	case CHIP_TURKS:
641 		io_mc_regs = (u32 *)&turks_io_mc_regs;
642 		ucode_size = BTC_MC_UCODE_SIZE;
643 		regs_size = BTC_IO_MC_REGS_SIZE;
644 		break;
645 	case CHIP_CAICOS:
646 	default:
647 		io_mc_regs = (u32 *)&caicos_io_mc_regs;
648 		ucode_size = BTC_MC_UCODE_SIZE;
649 		regs_size = BTC_IO_MC_REGS_SIZE;
650 		break;
651 	case CHIP_CAYMAN:
652 		io_mc_regs = (u32 *)&cayman_io_mc_regs;
653 		ucode_size = CAYMAN_MC_UCODE_SIZE;
654 		regs_size = BTC_IO_MC_REGS_SIZE;
655 		break;
656 	}
657 
658 	mem_type = (RREG32(MC_SEQ_MISC0) & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT;
659 	running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
660 
661 	if ((mem_type == MC_SEQ_MISC0_GDDR5_VALUE) && (running == 0)) {
662 		if (running) {
663 			blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
664 			WREG32(MC_SHARED_BLACKOUT_CNTL, 1);
665 		}
666 
667 		/* reset the engine and set to writable */
668 		WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
669 		WREG32(MC_SEQ_SUP_CNTL, 0x00000010);
670 
671 		/* load mc io regs */
672 		for (i = 0; i < regs_size; i++) {
673 			WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]);
674 			WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
675 		}
676 		/* load the MC ucode */
677 		fw_data = (const __be32 *)rdev->mc_fw->data;
678 		for (i = 0; i < ucode_size; i++)
679 			WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++));
680 
681 		/* put the engine back into the active state */
682 		WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
683 		WREG32(MC_SEQ_SUP_CNTL, 0x00000004);
684 		WREG32(MC_SEQ_SUP_CNTL, 0x00000001);
685 
686 		/* wait for training to complete */
687 		for (i = 0; i < rdev->usec_timeout; i++) {
688 			if (RREG32(MC_IO_PAD_CNTL_D0) & MEM_FALL_OUT_CMD)
689 				break;
690 			udelay(1);
691 		}
692 
693 		if (running)
694 			WREG32(MC_SHARED_BLACKOUT_CNTL, blackout);
695 	}
696 
697 	return 0;
698 }
699 
ni_init_microcode(struct radeon_device * rdev)700 int ni_init_microcode(struct radeon_device *rdev)
701 {
702 	const char *chip_name;
703 	const char *rlc_chip_name;
704 	size_t pfp_req_size, me_req_size, rlc_req_size, mc_req_size;
705 	size_t smc_req_size = 0;
706 	char fw_name[30];
707 	int err;
708 
709 	DRM_DEBUG("\n");
710 
711 	switch (rdev->family) {
712 	case CHIP_BARTS:
713 		chip_name = "BARTS";
714 		rlc_chip_name = "BTC";
715 		pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
716 		me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
717 		rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
718 		mc_req_size = BTC_MC_UCODE_SIZE * 4;
719 		smc_req_size = ALIGN(BARTS_SMC_UCODE_SIZE, 4);
720 		break;
721 	case CHIP_TURKS:
722 		chip_name = "TURKS";
723 		rlc_chip_name = "BTC";
724 		pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
725 		me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
726 		rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
727 		mc_req_size = BTC_MC_UCODE_SIZE * 4;
728 		smc_req_size = ALIGN(TURKS_SMC_UCODE_SIZE, 4);
729 		break;
730 	case CHIP_CAICOS:
731 		chip_name = "CAICOS";
732 		rlc_chip_name = "BTC";
733 		pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
734 		me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
735 		rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
736 		mc_req_size = BTC_MC_UCODE_SIZE * 4;
737 		smc_req_size = ALIGN(CAICOS_SMC_UCODE_SIZE, 4);
738 		break;
739 	case CHIP_CAYMAN:
740 		chip_name = "CAYMAN";
741 		rlc_chip_name = "CAYMAN";
742 		pfp_req_size = CAYMAN_PFP_UCODE_SIZE * 4;
743 		me_req_size = CAYMAN_PM4_UCODE_SIZE * 4;
744 		rlc_req_size = CAYMAN_RLC_UCODE_SIZE * 4;
745 		mc_req_size = CAYMAN_MC_UCODE_SIZE * 4;
746 		smc_req_size = ALIGN(CAYMAN_SMC_UCODE_SIZE, 4);
747 		break;
748 	case CHIP_ARUBA:
749 		chip_name = "ARUBA";
750 		rlc_chip_name = "ARUBA";
751 		/* pfp/me same size as CAYMAN */
752 		pfp_req_size = CAYMAN_PFP_UCODE_SIZE * 4;
753 		me_req_size = CAYMAN_PM4_UCODE_SIZE * 4;
754 		rlc_req_size = ARUBA_RLC_UCODE_SIZE * 4;
755 		mc_req_size = 0;
756 		break;
757 	default: BUG();
758 	}
759 
760 	DRM_INFO("Loading %s Microcode\n", chip_name);
761 
762 	snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
763 	err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
764 	if (err)
765 		goto out;
766 	if (rdev->pfp_fw->size != pfp_req_size) {
767 		pr_err("ni_cp: Bogus length %zu in firmware \"%s\"\n",
768 		       rdev->pfp_fw->size, fw_name);
769 		err = -EINVAL;
770 		goto out;
771 	}
772 
773 	snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
774 	err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
775 	if (err)
776 		goto out;
777 	if (rdev->me_fw->size != me_req_size) {
778 		pr_err("ni_cp: Bogus length %zu in firmware \"%s\"\n",
779 		       rdev->me_fw->size, fw_name);
780 		err = -EINVAL;
781 	}
782 
783 	snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
784 	err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
785 	if (err)
786 		goto out;
787 	if (rdev->rlc_fw->size != rlc_req_size) {
788 		pr_err("ni_rlc: Bogus length %zu in firmware \"%s\"\n",
789 		       rdev->rlc_fw->size, fw_name);
790 		err = -EINVAL;
791 	}
792 
793 	/* no MC ucode on TN */
794 	if (!(rdev->flags & RADEON_IS_IGP)) {
795 		snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
796 		err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
797 		if (err)
798 			goto out;
799 		if (rdev->mc_fw->size != mc_req_size) {
800 			pr_err("ni_mc: Bogus length %zu in firmware \"%s\"\n",
801 			       rdev->mc_fw->size, fw_name);
802 			err = -EINVAL;
803 		}
804 	}
805 
806 	if ((rdev->family >= CHIP_BARTS) && (rdev->family <= CHIP_CAYMAN)) {
807 		snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
808 		err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
809 		if (err) {
810 			pr_err("smc: error loading firmware \"%s\"\n", fw_name);
811 			release_firmware(rdev->smc_fw);
812 			rdev->smc_fw = NULL;
813 			err = 0;
814 		} else if (rdev->smc_fw->size != smc_req_size) {
815 			pr_err("ni_mc: Bogus length %zu in firmware \"%s\"\n",
816 			       rdev->mc_fw->size, fw_name);
817 			err = -EINVAL;
818 		}
819 	}
820 
821 out:
822 	if (err) {
823 		if (err != -EINVAL)
824 			pr_err("ni_cp: Failed to load firmware \"%s\"\n",
825 			       fw_name);
826 		release_firmware(rdev->pfp_fw);
827 		rdev->pfp_fw = NULL;
828 		release_firmware(rdev->me_fw);
829 		rdev->me_fw = NULL;
830 		release_firmware(rdev->rlc_fw);
831 		rdev->rlc_fw = NULL;
832 		release_firmware(rdev->mc_fw);
833 		rdev->mc_fw = NULL;
834 	}
835 	return err;
836 }
837 
838 /**
839  * cayman_get_allowed_info_register - fetch the register for the info ioctl
840  *
841  * @rdev: radeon_device pointer
842  * @reg: register offset in bytes
843  * @val: register value
844  *
845  * Returns 0 for success or -EINVAL for an invalid register
846  *
847  */
cayman_get_allowed_info_register(struct radeon_device * rdev,u32 reg,u32 * val)848 int cayman_get_allowed_info_register(struct radeon_device *rdev,
849 				     u32 reg, u32 *val)
850 {
851 	switch (reg) {
852 	case GRBM_STATUS:
853 	case GRBM_STATUS_SE0:
854 	case GRBM_STATUS_SE1:
855 	case SRBM_STATUS:
856 	case SRBM_STATUS2:
857 	case (DMA_STATUS_REG + DMA0_REGISTER_OFFSET):
858 	case (DMA_STATUS_REG + DMA1_REGISTER_OFFSET):
859 	case UVD_STATUS:
860 		*val = RREG32(reg);
861 		return 0;
862 	default:
863 		return -EINVAL;
864 	}
865 }
866 
tn_get_temp(struct radeon_device * rdev)867 int tn_get_temp(struct radeon_device *rdev)
868 {
869 	u32 temp = RREG32_SMC(TN_CURRENT_GNB_TEMP) & 0x7ff;
870 	int actual_temp = (temp / 8) - 49;
871 
872 	return actual_temp * 1000;
873 }
874 
875 /*
876  * Core functions
877  */
cayman_gpu_init(struct radeon_device * rdev)878 static void cayman_gpu_init(struct radeon_device *rdev)
879 {
880 	u32 gb_addr_config = 0;
881 	u32 mc_arb_ramcfg;
882 	u32 cgts_tcc_disable;
883 	u32 sx_debug_1;
884 	u32 smx_dc_ctl0;
885 	u32 cgts_sm_ctrl_reg;
886 	u32 hdp_host_path_cntl;
887 	u32 tmp;
888 	u32 disabled_rb_mask;
889 	int i, j;
890 
891 	switch (rdev->family) {
892 	case CHIP_CAYMAN:
893 		rdev->config.cayman.max_shader_engines = 2;
894 		rdev->config.cayman.max_pipes_per_simd = 4;
895 		rdev->config.cayman.max_tile_pipes = 8;
896 		rdev->config.cayman.max_simds_per_se = 12;
897 		rdev->config.cayman.max_backends_per_se = 4;
898 		rdev->config.cayman.max_texture_channel_caches = 8;
899 		rdev->config.cayman.max_gprs = 256;
900 		rdev->config.cayman.max_threads = 256;
901 		rdev->config.cayman.max_gs_threads = 32;
902 		rdev->config.cayman.max_stack_entries = 512;
903 		rdev->config.cayman.sx_num_of_sets = 8;
904 		rdev->config.cayman.sx_max_export_size = 256;
905 		rdev->config.cayman.sx_max_export_pos_size = 64;
906 		rdev->config.cayman.sx_max_export_smx_size = 192;
907 		rdev->config.cayman.max_hw_contexts = 8;
908 		rdev->config.cayman.sq_num_cf_insts = 2;
909 
910 		rdev->config.cayman.sc_prim_fifo_size = 0x100;
911 		rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30;
912 		rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130;
913 		gb_addr_config = CAYMAN_GB_ADDR_CONFIG_GOLDEN;
914 		break;
915 	case CHIP_ARUBA:
916 	default:
917 		rdev->config.cayman.max_shader_engines = 1;
918 		rdev->config.cayman.max_pipes_per_simd = 4;
919 		rdev->config.cayman.max_tile_pipes = 2;
920 		if ((rdev->pdev->device == 0x9900) ||
921 		    (rdev->pdev->device == 0x9901) ||
922 		    (rdev->pdev->device == 0x9905) ||
923 		    (rdev->pdev->device == 0x9906) ||
924 		    (rdev->pdev->device == 0x9907) ||
925 		    (rdev->pdev->device == 0x9908) ||
926 		    (rdev->pdev->device == 0x9909) ||
927 		    (rdev->pdev->device == 0x990B) ||
928 		    (rdev->pdev->device == 0x990C) ||
929 		    (rdev->pdev->device == 0x990F) ||
930 		    (rdev->pdev->device == 0x9910) ||
931 		    (rdev->pdev->device == 0x9917) ||
932 		    (rdev->pdev->device == 0x9999) ||
933 		    (rdev->pdev->device == 0x999C)) {
934 			rdev->config.cayman.max_simds_per_se = 6;
935 			rdev->config.cayman.max_backends_per_se = 2;
936 			rdev->config.cayman.max_hw_contexts = 8;
937 			rdev->config.cayman.sx_max_export_size = 256;
938 			rdev->config.cayman.sx_max_export_pos_size = 64;
939 			rdev->config.cayman.sx_max_export_smx_size = 192;
940 		} else if ((rdev->pdev->device == 0x9903) ||
941 			   (rdev->pdev->device == 0x9904) ||
942 			   (rdev->pdev->device == 0x990A) ||
943 			   (rdev->pdev->device == 0x990D) ||
944 			   (rdev->pdev->device == 0x990E) ||
945 			   (rdev->pdev->device == 0x9913) ||
946 			   (rdev->pdev->device == 0x9918) ||
947 			   (rdev->pdev->device == 0x999D)) {
948 			rdev->config.cayman.max_simds_per_se = 4;
949 			rdev->config.cayman.max_backends_per_se = 2;
950 			rdev->config.cayman.max_hw_contexts = 8;
951 			rdev->config.cayman.sx_max_export_size = 256;
952 			rdev->config.cayman.sx_max_export_pos_size = 64;
953 			rdev->config.cayman.sx_max_export_smx_size = 192;
954 		} else if ((rdev->pdev->device == 0x9919) ||
955 			   (rdev->pdev->device == 0x9990) ||
956 			   (rdev->pdev->device == 0x9991) ||
957 			   (rdev->pdev->device == 0x9994) ||
958 			   (rdev->pdev->device == 0x9995) ||
959 			   (rdev->pdev->device == 0x9996) ||
960 			   (rdev->pdev->device == 0x999A) ||
961 			   (rdev->pdev->device == 0x99A0)) {
962 			rdev->config.cayman.max_simds_per_se = 3;
963 			rdev->config.cayman.max_backends_per_se = 1;
964 			rdev->config.cayman.max_hw_contexts = 4;
965 			rdev->config.cayman.sx_max_export_size = 128;
966 			rdev->config.cayman.sx_max_export_pos_size = 32;
967 			rdev->config.cayman.sx_max_export_smx_size = 96;
968 		} else {
969 			rdev->config.cayman.max_simds_per_se = 2;
970 			rdev->config.cayman.max_backends_per_se = 1;
971 			rdev->config.cayman.max_hw_contexts = 4;
972 			rdev->config.cayman.sx_max_export_size = 128;
973 			rdev->config.cayman.sx_max_export_pos_size = 32;
974 			rdev->config.cayman.sx_max_export_smx_size = 96;
975 		}
976 		rdev->config.cayman.max_texture_channel_caches = 2;
977 		rdev->config.cayman.max_gprs = 256;
978 		rdev->config.cayman.max_threads = 256;
979 		rdev->config.cayman.max_gs_threads = 32;
980 		rdev->config.cayman.max_stack_entries = 512;
981 		rdev->config.cayman.sx_num_of_sets = 8;
982 		rdev->config.cayman.sq_num_cf_insts = 2;
983 
984 		rdev->config.cayman.sc_prim_fifo_size = 0x40;
985 		rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30;
986 		rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130;
987 		gb_addr_config = ARUBA_GB_ADDR_CONFIG_GOLDEN;
988 		break;
989 	}
990 
991 	/* Initialize HDP */
992 	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
993 		WREG32((0x2c14 + j), 0x00000000);
994 		WREG32((0x2c18 + j), 0x00000000);
995 		WREG32((0x2c1c + j), 0x00000000);
996 		WREG32((0x2c20 + j), 0x00000000);
997 		WREG32((0x2c24 + j), 0x00000000);
998 	}
999 
1000 	WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
1001 	WREG32(SRBM_INT_CNTL, 0x1);
1002 	WREG32(SRBM_INT_ACK, 0x1);
1003 
1004 	evergreen_fix_pci_max_read_req_size(rdev);
1005 
1006 	RREG32(MC_SHARED_CHMAP);
1007 	mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
1008 
1009 	tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT;
1010 	rdev->config.cayman.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
1011 	if (rdev->config.cayman.mem_row_size_in_kb > 4)
1012 		rdev->config.cayman.mem_row_size_in_kb = 4;
1013 	/* XXX use MC settings? */
1014 	rdev->config.cayman.shader_engine_tile_size = 32;
1015 	rdev->config.cayman.num_gpus = 1;
1016 	rdev->config.cayman.multi_gpu_tile_size = 64;
1017 
1018 	tmp = (gb_addr_config & NUM_PIPES_MASK) >> NUM_PIPES_SHIFT;
1019 	rdev->config.cayman.num_tile_pipes = (1 << tmp);
1020 	tmp = (gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT;
1021 	rdev->config.cayman.mem_max_burst_length_bytes = (tmp + 1) * 256;
1022 	tmp = (gb_addr_config & NUM_SHADER_ENGINES_MASK) >> NUM_SHADER_ENGINES_SHIFT;
1023 	rdev->config.cayman.num_shader_engines = tmp + 1;
1024 	tmp = (gb_addr_config & NUM_GPUS_MASK) >> NUM_GPUS_SHIFT;
1025 	rdev->config.cayman.num_gpus = tmp + 1;
1026 	tmp = (gb_addr_config & MULTI_GPU_TILE_SIZE_MASK) >> MULTI_GPU_TILE_SIZE_SHIFT;
1027 	rdev->config.cayman.multi_gpu_tile_size = 1 << tmp;
1028 	tmp = (gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT;
1029 	rdev->config.cayman.mem_row_size_in_kb = 1 << tmp;
1030 
1031 
1032 	/* setup tiling info dword.  gb_addr_config is not adequate since it does
1033 	 * not have bank info, so create a custom tiling dword.
1034 	 * bits 3:0   num_pipes
1035 	 * bits 7:4   num_banks
1036 	 * bits 11:8  group_size
1037 	 * bits 15:12 row_size
1038 	 */
1039 	rdev->config.cayman.tile_config = 0;
1040 	switch (rdev->config.cayman.num_tile_pipes) {
1041 	case 1:
1042 	default:
1043 		rdev->config.cayman.tile_config |= (0 << 0);
1044 		break;
1045 	case 2:
1046 		rdev->config.cayman.tile_config |= (1 << 0);
1047 		break;
1048 	case 4:
1049 		rdev->config.cayman.tile_config |= (2 << 0);
1050 		break;
1051 	case 8:
1052 		rdev->config.cayman.tile_config |= (3 << 0);
1053 		break;
1054 	}
1055 
1056 	/* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */
1057 	if (rdev->flags & RADEON_IS_IGP)
1058 		rdev->config.cayman.tile_config |= 1 << 4;
1059 	else {
1060 		switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) {
1061 		case 0: /* four banks */
1062 			rdev->config.cayman.tile_config |= 0 << 4;
1063 			break;
1064 		case 1: /* eight banks */
1065 			rdev->config.cayman.tile_config |= 1 << 4;
1066 			break;
1067 		case 2: /* sixteen banks */
1068 		default:
1069 			rdev->config.cayman.tile_config |= 2 << 4;
1070 			break;
1071 		}
1072 	}
1073 	rdev->config.cayman.tile_config |=
1074 		((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
1075 	rdev->config.cayman.tile_config |=
1076 		((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12;
1077 
1078 	tmp = 0;
1079 	for (i = (rdev->config.cayman.max_shader_engines - 1); i >= 0; i--) {
1080 		u32 rb_disable_bitmap;
1081 
1082 		WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
1083 		WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
1084 		rb_disable_bitmap = (RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000) >> 16;
1085 		tmp <<= 4;
1086 		tmp |= rb_disable_bitmap;
1087 	}
1088 	/* enabled rb are just the one not disabled :) */
1089 	disabled_rb_mask = tmp;
1090 	tmp = 0;
1091 	for (i = 0; i < (rdev->config.cayman.max_backends_per_se * rdev->config.cayman.max_shader_engines); i++)
1092 		tmp |= (1 << i);
1093 	/* if all the backends are disabled, fix it up here */
1094 	if ((disabled_rb_mask & tmp) == tmp) {
1095 		for (i = 0; i < (rdev->config.cayman.max_backends_per_se * rdev->config.cayman.max_shader_engines); i++)
1096 			disabled_rb_mask &= ~(1 << i);
1097 	}
1098 
1099 	for (i = 0; i < rdev->config.cayman.max_shader_engines; i++) {
1100 		u32 simd_disable_bitmap;
1101 
1102 		WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
1103 		WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
1104 		simd_disable_bitmap = (RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffff0000) >> 16;
1105 		simd_disable_bitmap |= 0xffffffff << rdev->config.cayman.max_simds_per_se;
1106 		tmp <<= 16;
1107 		tmp |= simd_disable_bitmap;
1108 	}
1109 	rdev->config.cayman.active_simds = hweight32(~tmp);
1110 
1111 	WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
1112 	WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
1113 
1114 	WREG32(GB_ADDR_CONFIG, gb_addr_config);
1115 	WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
1116 	if (ASIC_IS_DCE6(rdev))
1117 		WREG32(DMIF_ADDR_CALC, gb_addr_config);
1118 	WREG32(HDP_ADDR_CONFIG, gb_addr_config);
1119 	WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
1120 	WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
1121 	WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config);
1122 	WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
1123 	WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
1124 
1125 	if ((rdev->config.cayman.max_backends_per_se == 1) &&
1126 	    (rdev->flags & RADEON_IS_IGP)) {
1127 		if ((disabled_rb_mask & 3) == 2) {
1128 			/* RB1 disabled, RB0 enabled */
1129 			tmp = 0x00000000;
1130 		} else {
1131 			/* RB0 disabled, RB1 enabled */
1132 			tmp = 0x11111111;
1133 		}
1134 	} else {
1135 		tmp = gb_addr_config & NUM_PIPES_MASK;
1136 		tmp = r6xx_remap_render_backend(rdev, tmp,
1137 						rdev->config.cayman.max_backends_per_se *
1138 						rdev->config.cayman.max_shader_engines,
1139 						CAYMAN_MAX_BACKENDS, disabled_rb_mask);
1140 	}
1141 	rdev->config.cayman.backend_map = tmp;
1142 	WREG32(GB_BACKEND_MAP, tmp);
1143 
1144 	cgts_tcc_disable = 0xffff0000;
1145 	for (i = 0; i < rdev->config.cayman.max_texture_channel_caches; i++)
1146 		cgts_tcc_disable &= ~(1 << (16 + i));
1147 	WREG32(CGTS_TCC_DISABLE, cgts_tcc_disable);
1148 	WREG32(CGTS_SYS_TCC_DISABLE, cgts_tcc_disable);
1149 	WREG32(CGTS_USER_SYS_TCC_DISABLE, cgts_tcc_disable);
1150 	WREG32(CGTS_USER_TCC_DISABLE, cgts_tcc_disable);
1151 
1152 	/* reprogram the shader complex */
1153 	cgts_sm_ctrl_reg = RREG32(CGTS_SM_CTRL_REG);
1154 	for (i = 0; i < 16; i++)
1155 		WREG32(CGTS_SM_CTRL_REG, OVERRIDE);
1156 	WREG32(CGTS_SM_CTRL_REG, cgts_sm_ctrl_reg);
1157 
1158 	/* set HW defaults for 3D engine */
1159 	WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60));
1160 
1161 	sx_debug_1 = RREG32(SX_DEBUG_1);
1162 	sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS;
1163 	WREG32(SX_DEBUG_1, sx_debug_1);
1164 
1165 	smx_dc_ctl0 = RREG32(SMX_DC_CTL0);
1166 	smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff);
1167 	smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.cayman.sx_num_of_sets);
1168 	WREG32(SMX_DC_CTL0, smx_dc_ctl0);
1169 
1170 	WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4) | CRC_SIMD_ID_WADDR_DISABLE);
1171 
1172 	/* need to be explicitly zero-ed */
1173 	WREG32(VGT_OFFCHIP_LDS_BASE, 0);
1174 	WREG32(SQ_LSTMP_RING_BASE, 0);
1175 	WREG32(SQ_HSTMP_RING_BASE, 0);
1176 	WREG32(SQ_ESTMP_RING_BASE, 0);
1177 	WREG32(SQ_GSTMP_RING_BASE, 0);
1178 	WREG32(SQ_VSTMP_RING_BASE, 0);
1179 	WREG32(SQ_PSTMP_RING_BASE, 0);
1180 
1181 	WREG32(TA_CNTL_AUX, DISABLE_CUBE_ANISO);
1182 
1183 	WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.cayman.sx_max_export_size / 4) - 1) |
1184 					POSITION_BUFFER_SIZE((rdev->config.cayman.sx_max_export_pos_size / 4) - 1) |
1185 					SMX_BUFFER_SIZE((rdev->config.cayman.sx_max_export_smx_size / 4) - 1)));
1186 
1187 	WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.cayman.sc_prim_fifo_size) |
1188 				 SC_HIZ_TILE_FIFO_SIZE(rdev->config.cayman.sc_hiz_tile_fifo_size) |
1189 				 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.cayman.sc_earlyz_tile_fifo_size)));
1190 
1191 
1192 	WREG32(VGT_NUM_INSTANCES, 1);
1193 
1194 	WREG32(CP_PERFMON_CNTL, 0);
1195 
1196 	WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.cayman.sq_num_cf_insts) |
1197 				  FETCH_FIFO_HIWATER(0x4) |
1198 				  DONE_FIFO_HIWATER(0xe0) |
1199 				  ALU_UPDATE_FIFO_HIWATER(0x8)));
1200 
1201 	WREG32(SQ_GPR_RESOURCE_MGMT_1, NUM_CLAUSE_TEMP_GPRS(4));
1202 	WREG32(SQ_CONFIG, (VC_ENABLE |
1203 			   EXPORT_SRC_C |
1204 			   GFX_PRIO(0) |
1205 			   CS1_PRIO(0) |
1206 			   CS2_PRIO(1)));
1207 	WREG32(SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, DYN_GPR_ENABLE);
1208 
1209 	WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
1210 					  FORCE_EOV_MAX_REZ_CNT(255)));
1211 
1212 	WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC) |
1213 	       AUTO_INVLD_EN(ES_AND_GS_AUTO));
1214 
1215 	WREG32(VGT_GS_VERTEX_REUSE, 16);
1216 	WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
1217 
1218 	WREG32(CB_PERF_CTR0_SEL_0, 0);
1219 	WREG32(CB_PERF_CTR0_SEL_1, 0);
1220 	WREG32(CB_PERF_CTR1_SEL_0, 0);
1221 	WREG32(CB_PERF_CTR1_SEL_1, 0);
1222 	WREG32(CB_PERF_CTR2_SEL_0, 0);
1223 	WREG32(CB_PERF_CTR2_SEL_1, 0);
1224 	WREG32(CB_PERF_CTR3_SEL_0, 0);
1225 	WREG32(CB_PERF_CTR3_SEL_1, 0);
1226 
1227 	tmp = RREG32(HDP_MISC_CNTL);
1228 	tmp |= HDP_FLUSH_INVALIDATE_CACHE;
1229 	WREG32(HDP_MISC_CNTL, tmp);
1230 
1231 	hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
1232 	WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
1233 
1234 	WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
1235 
1236 	udelay(50);
1237 
1238 	/* set clockgating golden values on TN */
1239 	if (rdev->family == CHIP_ARUBA) {
1240 		tmp = RREG32_CG(CG_CGTT_LOCAL_0);
1241 		tmp &= ~0x00380000;
1242 		WREG32_CG(CG_CGTT_LOCAL_0, tmp);
1243 		tmp = RREG32_CG(CG_CGTT_LOCAL_1);
1244 		tmp &= ~0x0e000000;
1245 		WREG32_CG(CG_CGTT_LOCAL_1, tmp);
1246 	}
1247 }
1248 
1249 /*
1250  * GART
1251  */
cayman_pcie_gart_tlb_flush(struct radeon_device * rdev)1252 void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev)
1253 {
1254 	/* flush hdp cache */
1255 	WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
1256 
1257 	/* bits 0-7 are the VM contexts0-7 */
1258 	WREG32(VM_INVALIDATE_REQUEST, 1);
1259 }
1260 
cayman_pcie_gart_enable(struct radeon_device * rdev)1261 static int cayman_pcie_gart_enable(struct radeon_device *rdev)
1262 {
1263 	int i, r;
1264 
1265 	if (rdev->gart.robj == NULL) {
1266 		dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
1267 		return -EINVAL;
1268 	}
1269 	r = radeon_gart_table_vram_pin(rdev);
1270 	if (r)
1271 		return r;
1272 	/* Setup TLB control */
1273 	WREG32(MC_VM_MX_L1_TLB_CNTL,
1274 	       (0xA << 7) |
1275 	       ENABLE_L1_TLB |
1276 	       ENABLE_L1_FRAGMENT_PROCESSING |
1277 	       SYSTEM_ACCESS_MODE_NOT_IN_SYS |
1278 	       ENABLE_ADVANCED_DRIVER_MODEL |
1279 	       SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
1280 	/* Setup L2 cache */
1281 	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
1282 	       ENABLE_L2_FRAGMENT_PROCESSING |
1283 	       ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
1284 	       ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
1285 	       EFFECTIVE_L2_QUEUE_SIZE(7) |
1286 	       CONTEXT1_IDENTITY_ACCESS_MODE(1));
1287 	WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE);
1288 	WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
1289 	       BANK_SELECT(6) |
1290 	       L2_CACHE_BIGK_FRAGMENT_SIZE(6));
1291 	/* setup context0 */
1292 	WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
1293 	WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
1294 	WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
1295 	WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
1296 			(u32)(rdev->dummy_page.addr >> 12));
1297 	WREG32(VM_CONTEXT0_CNTL2, 0);
1298 	WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
1299 				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
1300 
1301 	WREG32(0x15D4, 0);
1302 	WREG32(0x15D8, 0);
1303 	WREG32(0x15DC, 0);
1304 
1305 	/* empty context1-7 */
1306 	/* Assign the pt base to something valid for now; the pts used for
1307 	 * the VMs are determined by the application and setup and assigned
1308 	 * on the fly in the vm part of radeon_gart.c
1309 	 */
1310 	for (i = 1; i < 8; i++) {
1311 		WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (i << 2), 0);
1312 		WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2),
1313 			rdev->vm_manager.max_pfn - 1);
1314 		WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
1315 		       rdev->vm_manager.saved_table_addr[i]);
1316 	}
1317 
1318 	/* enable context1-7 */
1319 	WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
1320 	       (u32)(rdev->dummy_page.addr >> 12));
1321 	WREG32(VM_CONTEXT1_CNTL2, 4);
1322 	WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
1323 				PAGE_TABLE_BLOCK_SIZE(radeon_vm_block_size - 9) |
1324 				RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
1325 				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
1326 				DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
1327 				DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT |
1328 				PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT |
1329 				PDE0_PROTECTION_FAULT_ENABLE_DEFAULT |
1330 				VALID_PROTECTION_FAULT_ENABLE_INTERRUPT |
1331 				VALID_PROTECTION_FAULT_ENABLE_DEFAULT |
1332 				READ_PROTECTION_FAULT_ENABLE_INTERRUPT |
1333 				READ_PROTECTION_FAULT_ENABLE_DEFAULT |
1334 				WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT |
1335 				WRITE_PROTECTION_FAULT_ENABLE_DEFAULT);
1336 
1337 	cayman_pcie_gart_tlb_flush(rdev);
1338 	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
1339 		 (unsigned)(rdev->mc.gtt_size >> 20),
1340 		 (unsigned long long)rdev->gart.table_addr);
1341 	rdev->gart.ready = true;
1342 	return 0;
1343 }
1344 
cayman_pcie_gart_disable(struct radeon_device * rdev)1345 static void cayman_pcie_gart_disable(struct radeon_device *rdev)
1346 {
1347 	unsigned i;
1348 
1349 	for (i = 1; i < 8; ++i) {
1350 		rdev->vm_manager.saved_table_addr[i] = RREG32(
1351 			VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2));
1352 	}
1353 
1354 	/* Disable all tables */
1355 	WREG32(VM_CONTEXT0_CNTL, 0);
1356 	WREG32(VM_CONTEXT1_CNTL, 0);
1357 	/* Setup TLB control */
1358 	WREG32(MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING |
1359 	       SYSTEM_ACCESS_MODE_NOT_IN_SYS |
1360 	       SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
1361 	/* Setup L2 cache */
1362 	WREG32(VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
1363 	       ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
1364 	       EFFECTIVE_L2_QUEUE_SIZE(7) |
1365 	       CONTEXT1_IDENTITY_ACCESS_MODE(1));
1366 	WREG32(VM_L2_CNTL2, 0);
1367 	WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
1368 	       L2_CACHE_BIGK_FRAGMENT_SIZE(6));
1369 	radeon_gart_table_vram_unpin(rdev);
1370 }
1371 
cayman_pcie_gart_fini(struct radeon_device * rdev)1372 static void cayman_pcie_gart_fini(struct radeon_device *rdev)
1373 {
1374 	cayman_pcie_gart_disable(rdev);
1375 	radeon_gart_table_vram_free(rdev);
1376 	radeon_gart_fini(rdev);
1377 }
1378 
cayman_cp_int_cntl_setup(struct radeon_device * rdev,int ring,u32 cp_int_cntl)1379 void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
1380 			      int ring, u32 cp_int_cntl)
1381 {
1382 	WREG32(SRBM_GFX_CNTL, RINGID(ring));
1383 	WREG32(CP_INT_CNTL, cp_int_cntl);
1384 }
1385 
1386 /*
1387  * CP.
1388  */
cayman_fence_ring_emit(struct radeon_device * rdev,struct radeon_fence * fence)1389 void cayman_fence_ring_emit(struct radeon_device *rdev,
1390 			    struct radeon_fence *fence)
1391 {
1392 	struct radeon_ring *ring = &rdev->ring[fence->ring];
1393 	u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
1394 	u32 cp_coher_cntl = PACKET3_FULL_CACHE_ENA | PACKET3_TC_ACTION_ENA |
1395 		PACKET3_SH_ACTION_ENA;
1396 
1397 	/* flush read cache over gart for this vmid */
1398 	radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
1399 	radeon_ring_write(ring, PACKET3_ENGINE_ME | cp_coher_cntl);
1400 	radeon_ring_write(ring, 0xFFFFFFFF);
1401 	radeon_ring_write(ring, 0);
1402 	radeon_ring_write(ring, 10); /* poll interval */
1403 	/* EVENT_WRITE_EOP - flush caches, send int */
1404 	radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
1405 	radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
1406 	radeon_ring_write(ring, lower_32_bits(addr));
1407 	radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
1408 	radeon_ring_write(ring, fence->seq);
1409 	radeon_ring_write(ring, 0);
1410 }
1411 
cayman_ring_ib_execute(struct radeon_device * rdev,struct radeon_ib * ib)1412 void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
1413 {
1414 	struct radeon_ring *ring = &rdev->ring[ib->ring];
1415 	unsigned vm_id = ib->vm ? ib->vm->ids[ib->ring].id : 0;
1416 	u32 cp_coher_cntl = PACKET3_FULL_CACHE_ENA | PACKET3_TC_ACTION_ENA |
1417 		PACKET3_SH_ACTION_ENA;
1418 
1419 	/* set to DX10/11 mode */
1420 	radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
1421 	radeon_ring_write(ring, 1);
1422 
1423 	if (ring->rptr_save_reg) {
1424 		uint32_t next_rptr = ring->wptr + 3 + 4 + 8;
1425 		radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
1426 		radeon_ring_write(ring, ((ring->rptr_save_reg -
1427 					  PACKET3_SET_CONFIG_REG_START) >> 2));
1428 		radeon_ring_write(ring, next_rptr);
1429 	}
1430 
1431 	radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
1432 	radeon_ring_write(ring,
1433 #ifdef __BIG_ENDIAN
1434 			  (2 << 0) |
1435 #endif
1436 			  (ib->gpu_addr & 0xFFFFFFFC));
1437 	radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
1438 	radeon_ring_write(ring, ib->length_dw | (vm_id << 24));
1439 
1440 	/* flush read cache over gart for this vmid */
1441 	radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
1442 	radeon_ring_write(ring, PACKET3_ENGINE_ME | cp_coher_cntl);
1443 	radeon_ring_write(ring, 0xFFFFFFFF);
1444 	radeon_ring_write(ring, 0);
1445 	radeon_ring_write(ring, (vm_id << 24) | 10); /* poll interval */
1446 }
1447 
cayman_cp_enable(struct radeon_device * rdev,bool enable)1448 static void cayman_cp_enable(struct radeon_device *rdev, bool enable)
1449 {
1450 	if (enable)
1451 		WREG32(CP_ME_CNTL, 0);
1452 	else {
1453 		if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
1454 			radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
1455 		WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
1456 		WREG32(SCRATCH_UMSK, 0);
1457 		rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
1458 	}
1459 }
1460 
cayman_gfx_get_rptr(struct radeon_device * rdev,struct radeon_ring * ring)1461 u32 cayman_gfx_get_rptr(struct radeon_device *rdev,
1462 			struct radeon_ring *ring)
1463 {
1464 	u32 rptr;
1465 
1466 	if (rdev->wb.enabled)
1467 		rptr = rdev->wb.wb[ring->rptr_offs/4];
1468 	else {
1469 		if (ring->idx == RADEON_RING_TYPE_GFX_INDEX)
1470 			rptr = RREG32(CP_RB0_RPTR);
1471 		else if (ring->idx == CAYMAN_RING_TYPE_CP1_INDEX)
1472 			rptr = RREG32(CP_RB1_RPTR);
1473 		else
1474 			rptr = RREG32(CP_RB2_RPTR);
1475 	}
1476 
1477 	return rptr;
1478 }
1479 
cayman_gfx_get_wptr(struct radeon_device * rdev,struct radeon_ring * ring)1480 u32 cayman_gfx_get_wptr(struct radeon_device *rdev,
1481 			struct radeon_ring *ring)
1482 {
1483 	u32 wptr;
1484 
1485 	if (ring->idx == RADEON_RING_TYPE_GFX_INDEX)
1486 		wptr = RREG32(CP_RB0_WPTR);
1487 	else if (ring->idx == CAYMAN_RING_TYPE_CP1_INDEX)
1488 		wptr = RREG32(CP_RB1_WPTR);
1489 	else
1490 		wptr = RREG32(CP_RB2_WPTR);
1491 
1492 	return wptr;
1493 }
1494 
cayman_gfx_set_wptr(struct radeon_device * rdev,struct radeon_ring * ring)1495 void cayman_gfx_set_wptr(struct radeon_device *rdev,
1496 			 struct radeon_ring *ring)
1497 {
1498 	if (ring->idx == RADEON_RING_TYPE_GFX_INDEX) {
1499 		WREG32(CP_RB0_WPTR, ring->wptr);
1500 		(void)RREG32(CP_RB0_WPTR);
1501 	} else if (ring->idx == CAYMAN_RING_TYPE_CP1_INDEX) {
1502 		WREG32(CP_RB1_WPTR, ring->wptr);
1503 		(void)RREG32(CP_RB1_WPTR);
1504 	} else {
1505 		WREG32(CP_RB2_WPTR, ring->wptr);
1506 		(void)RREG32(CP_RB2_WPTR);
1507 	}
1508 }
1509 
cayman_cp_load_microcode(struct radeon_device * rdev)1510 static int cayman_cp_load_microcode(struct radeon_device *rdev)
1511 {
1512 	const __be32 *fw_data;
1513 	int i;
1514 
1515 	if (!rdev->me_fw || !rdev->pfp_fw)
1516 		return -EINVAL;
1517 
1518 	cayman_cp_enable(rdev, false);
1519 
1520 	fw_data = (const __be32 *)rdev->pfp_fw->data;
1521 	WREG32(CP_PFP_UCODE_ADDR, 0);
1522 	for (i = 0; i < CAYMAN_PFP_UCODE_SIZE; i++)
1523 		WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
1524 	WREG32(CP_PFP_UCODE_ADDR, 0);
1525 
1526 	fw_data = (const __be32 *)rdev->me_fw->data;
1527 	WREG32(CP_ME_RAM_WADDR, 0);
1528 	for (i = 0; i < CAYMAN_PM4_UCODE_SIZE; i++)
1529 		WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
1530 
1531 	WREG32(CP_PFP_UCODE_ADDR, 0);
1532 	WREG32(CP_ME_RAM_WADDR, 0);
1533 	WREG32(CP_ME_RAM_RADDR, 0);
1534 	return 0;
1535 }
1536 
cayman_cp_start(struct radeon_device * rdev)1537 static int cayman_cp_start(struct radeon_device *rdev)
1538 {
1539 	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
1540 	int r, i;
1541 
1542 	r = radeon_ring_lock(rdev, ring, 7);
1543 	if (r) {
1544 		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1545 		return r;
1546 	}
1547 	radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
1548 	radeon_ring_write(ring, 0x1);
1549 	radeon_ring_write(ring, 0x0);
1550 	radeon_ring_write(ring, rdev->config.cayman.max_hw_contexts - 1);
1551 	radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
1552 	radeon_ring_write(ring, 0);
1553 	radeon_ring_write(ring, 0);
1554 	radeon_ring_unlock_commit(rdev, ring, false);
1555 
1556 	cayman_cp_enable(rdev, true);
1557 
1558 	r = radeon_ring_lock(rdev, ring, cayman_default_size + 19);
1559 	if (r) {
1560 		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1561 		return r;
1562 	}
1563 
1564 	/* setup clear context state */
1565 	radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1566 	radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
1567 
1568 	for (i = 0; i < cayman_default_size; i++)
1569 		radeon_ring_write(ring, cayman_default_state[i]);
1570 
1571 	radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1572 	radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
1573 
1574 	/* set clear context state */
1575 	radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
1576 	radeon_ring_write(ring, 0);
1577 
1578 	/* SQ_VTX_BASE_VTX_LOC */
1579 	radeon_ring_write(ring, 0xc0026f00);
1580 	radeon_ring_write(ring, 0x00000000);
1581 	radeon_ring_write(ring, 0x00000000);
1582 	radeon_ring_write(ring, 0x00000000);
1583 
1584 	/* Clear consts */
1585 	radeon_ring_write(ring, 0xc0036f00);
1586 	radeon_ring_write(ring, 0x00000bc4);
1587 	radeon_ring_write(ring, 0xffffffff);
1588 	radeon_ring_write(ring, 0xffffffff);
1589 	radeon_ring_write(ring, 0xffffffff);
1590 
1591 	radeon_ring_write(ring, 0xc0026900);
1592 	radeon_ring_write(ring, 0x00000316);
1593 	radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
1594 	radeon_ring_write(ring, 0x00000010); /*  */
1595 
1596 	radeon_ring_unlock_commit(rdev, ring, false);
1597 
1598 	/* XXX init other rings */
1599 
1600 	return 0;
1601 }
1602 
cayman_cp_fini(struct radeon_device * rdev)1603 static void cayman_cp_fini(struct radeon_device *rdev)
1604 {
1605 	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
1606 	cayman_cp_enable(rdev, false);
1607 	radeon_ring_fini(rdev, ring);
1608 	radeon_scratch_free(rdev, ring->rptr_save_reg);
1609 }
1610 
cayman_cp_resume(struct radeon_device * rdev)1611 static int cayman_cp_resume(struct radeon_device *rdev)
1612 {
1613 	static const int ridx[] = {
1614 		RADEON_RING_TYPE_GFX_INDEX,
1615 		CAYMAN_RING_TYPE_CP1_INDEX,
1616 		CAYMAN_RING_TYPE_CP2_INDEX
1617 	};
1618 	static const unsigned cp_rb_cntl[] = {
1619 		CP_RB0_CNTL,
1620 		CP_RB1_CNTL,
1621 		CP_RB2_CNTL,
1622 	};
1623 	static const unsigned cp_rb_rptr_addr[] = {
1624 		CP_RB0_RPTR_ADDR,
1625 		CP_RB1_RPTR_ADDR,
1626 		CP_RB2_RPTR_ADDR
1627 	};
1628 	static const unsigned cp_rb_rptr_addr_hi[] = {
1629 		CP_RB0_RPTR_ADDR_HI,
1630 		CP_RB1_RPTR_ADDR_HI,
1631 		CP_RB2_RPTR_ADDR_HI
1632 	};
1633 	static const unsigned cp_rb_base[] = {
1634 		CP_RB0_BASE,
1635 		CP_RB1_BASE,
1636 		CP_RB2_BASE
1637 	};
1638 	static const unsigned cp_rb_rptr[] = {
1639 		CP_RB0_RPTR,
1640 		CP_RB1_RPTR,
1641 		CP_RB2_RPTR
1642 	};
1643 	static const unsigned cp_rb_wptr[] = {
1644 		CP_RB0_WPTR,
1645 		CP_RB1_WPTR,
1646 		CP_RB2_WPTR
1647 	};
1648 	struct radeon_ring *ring;
1649 	int i, r;
1650 
1651 	/* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */
1652 	WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP |
1653 				 SOFT_RESET_PA |
1654 				 SOFT_RESET_SH |
1655 				 SOFT_RESET_VGT |
1656 				 SOFT_RESET_SPI |
1657 				 SOFT_RESET_SX));
1658 	RREG32(GRBM_SOFT_RESET);
1659 	mdelay(15);
1660 	WREG32(GRBM_SOFT_RESET, 0);
1661 	RREG32(GRBM_SOFT_RESET);
1662 
1663 	WREG32(CP_SEM_WAIT_TIMER, 0x0);
1664 	WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
1665 
1666 	/* Set the write pointer delay */
1667 	WREG32(CP_RB_WPTR_DELAY, 0);
1668 
1669 	WREG32(CP_DEBUG, (1 << 27));
1670 
1671 	/* set the wb address whether it's enabled or not */
1672 	WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
1673 	WREG32(SCRATCH_UMSK, 0xff);
1674 
1675 	for (i = 0; i < 3; ++i) {
1676 		uint32_t rb_cntl;
1677 		uint64_t addr;
1678 
1679 		/* Set ring buffer size */
1680 		ring = &rdev->ring[ridx[i]];
1681 		rb_cntl = order_base_2(ring->ring_size / 8);
1682 		rb_cntl |= order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8;
1683 #ifdef __BIG_ENDIAN
1684 		rb_cntl |= BUF_SWAP_32BIT;
1685 #endif
1686 		WREG32(cp_rb_cntl[i], rb_cntl);
1687 
1688 		/* set the wb address whether it's enabled or not */
1689 		addr = rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET;
1690 		WREG32(cp_rb_rptr_addr[i], addr & 0xFFFFFFFC);
1691 		WREG32(cp_rb_rptr_addr_hi[i], upper_32_bits(addr) & 0xFF);
1692 	}
1693 
1694 	/* set the rb base addr, this causes an internal reset of ALL rings */
1695 	for (i = 0; i < 3; ++i) {
1696 		ring = &rdev->ring[ridx[i]];
1697 		WREG32(cp_rb_base[i], ring->gpu_addr >> 8);
1698 	}
1699 
1700 	for (i = 0; i < 3; ++i) {
1701 		/* Initialize the ring buffer's read and write pointers */
1702 		ring = &rdev->ring[ridx[i]];
1703 		WREG32_P(cp_rb_cntl[i], RB_RPTR_WR_ENA, ~RB_RPTR_WR_ENA);
1704 
1705 		ring->wptr = 0;
1706 		WREG32(cp_rb_rptr[i], 0);
1707 		WREG32(cp_rb_wptr[i], ring->wptr);
1708 
1709 		mdelay(1);
1710 		WREG32_P(cp_rb_cntl[i], 0, ~RB_RPTR_WR_ENA);
1711 	}
1712 
1713 	/* start the rings */
1714 	cayman_cp_start(rdev);
1715 	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true;
1716 	rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
1717 	rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
1718 	/* this only test cp0 */
1719 	r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
1720 	if (r) {
1721 		rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
1722 		rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
1723 		rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
1724 		return r;
1725 	}
1726 
1727 	if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
1728 		radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
1729 
1730 	return 0;
1731 }
1732 
cayman_gpu_check_soft_reset(struct radeon_device * rdev)1733 u32 cayman_gpu_check_soft_reset(struct radeon_device *rdev)
1734 {
1735 	u32 reset_mask = 0;
1736 	u32 tmp;
1737 
1738 	/* GRBM_STATUS */
1739 	tmp = RREG32(GRBM_STATUS);
1740 	if (tmp & (PA_BUSY | SC_BUSY |
1741 		   SH_BUSY | SX_BUSY |
1742 		   TA_BUSY | VGT_BUSY |
1743 		   DB_BUSY | CB_BUSY |
1744 		   GDS_BUSY | SPI_BUSY |
1745 		   IA_BUSY | IA_BUSY_NO_DMA))
1746 		reset_mask |= RADEON_RESET_GFX;
1747 
1748 	if (tmp & (CF_RQ_PENDING | PF_RQ_PENDING |
1749 		   CP_BUSY | CP_COHERENCY_BUSY))
1750 		reset_mask |= RADEON_RESET_CP;
1751 
1752 	if (tmp & GRBM_EE_BUSY)
1753 		reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP;
1754 
1755 	/* DMA_STATUS_REG 0 */
1756 	tmp = RREG32(DMA_STATUS_REG + DMA0_REGISTER_OFFSET);
1757 	if (!(tmp & DMA_IDLE))
1758 		reset_mask |= RADEON_RESET_DMA;
1759 
1760 	/* DMA_STATUS_REG 1 */
1761 	tmp = RREG32(DMA_STATUS_REG + DMA1_REGISTER_OFFSET);
1762 	if (!(tmp & DMA_IDLE))
1763 		reset_mask |= RADEON_RESET_DMA1;
1764 
1765 	/* SRBM_STATUS2 */
1766 	tmp = RREG32(SRBM_STATUS2);
1767 	if (tmp & DMA_BUSY)
1768 		reset_mask |= RADEON_RESET_DMA;
1769 
1770 	if (tmp & DMA1_BUSY)
1771 		reset_mask |= RADEON_RESET_DMA1;
1772 
1773 	/* SRBM_STATUS */
1774 	tmp = RREG32(SRBM_STATUS);
1775 	if (tmp & (RLC_RQ_PENDING | RLC_BUSY))
1776 		reset_mask |= RADEON_RESET_RLC;
1777 
1778 	if (tmp & IH_BUSY)
1779 		reset_mask |= RADEON_RESET_IH;
1780 
1781 	if (tmp & SEM_BUSY)
1782 		reset_mask |= RADEON_RESET_SEM;
1783 
1784 	if (tmp & GRBM_RQ_PENDING)
1785 		reset_mask |= RADEON_RESET_GRBM;
1786 
1787 	if (tmp & VMC_BUSY)
1788 		reset_mask |= RADEON_RESET_VMC;
1789 
1790 	if (tmp & (MCB_BUSY | MCB_NON_DISPLAY_BUSY |
1791 		   MCC_BUSY | MCD_BUSY))
1792 		reset_mask |= RADEON_RESET_MC;
1793 
1794 	if (evergreen_is_display_hung(rdev))
1795 		reset_mask |= RADEON_RESET_DISPLAY;
1796 
1797 	/* VM_L2_STATUS */
1798 	tmp = RREG32(VM_L2_STATUS);
1799 	if (tmp & L2_BUSY)
1800 		reset_mask |= RADEON_RESET_VMC;
1801 
1802 	/* Skip MC reset as it's mostly likely not hung, just busy */
1803 	if (reset_mask & RADEON_RESET_MC) {
1804 		DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
1805 		reset_mask &= ~RADEON_RESET_MC;
1806 	}
1807 
1808 	return reset_mask;
1809 }
1810 
cayman_gpu_soft_reset(struct radeon_device * rdev,u32 reset_mask)1811 static void cayman_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
1812 {
1813 	struct evergreen_mc_save save;
1814 	u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
1815 	u32 tmp;
1816 
1817 	if (reset_mask == 0)
1818 		return;
1819 
1820 	dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
1821 
1822 	evergreen_print_gpu_status_regs(rdev);
1823 	dev_info(rdev->dev, "  VM_CONTEXT0_PROTECTION_FAULT_ADDR   0x%08X\n",
1824 		 RREG32(0x14F8));
1825 	dev_info(rdev->dev, "  VM_CONTEXT0_PROTECTION_FAULT_STATUS 0x%08X\n",
1826 		 RREG32(0x14D8));
1827 	dev_info(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
1828 		 RREG32(0x14FC));
1829 	dev_info(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1830 		 RREG32(0x14DC));
1831 
1832 	/* Disable CP parsing/prefetching */
1833 	WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
1834 
1835 	if (reset_mask & RADEON_RESET_DMA) {
1836 		/* dma0 */
1837 		tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
1838 		tmp &= ~DMA_RB_ENABLE;
1839 		WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
1840 	}
1841 
1842 	if (reset_mask & RADEON_RESET_DMA1) {
1843 		/* dma1 */
1844 		tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
1845 		tmp &= ~DMA_RB_ENABLE;
1846 		WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
1847 	}
1848 
1849 	udelay(50);
1850 
1851 	evergreen_mc_stop(rdev, &save);
1852 	if (evergreen_mc_wait_for_idle(rdev)) {
1853 		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1854 	}
1855 
1856 	if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) {
1857 		grbm_soft_reset = SOFT_RESET_CB |
1858 			SOFT_RESET_DB |
1859 			SOFT_RESET_GDS |
1860 			SOFT_RESET_PA |
1861 			SOFT_RESET_SC |
1862 			SOFT_RESET_SPI |
1863 			SOFT_RESET_SH |
1864 			SOFT_RESET_SX |
1865 			SOFT_RESET_TC |
1866 			SOFT_RESET_TA |
1867 			SOFT_RESET_VGT |
1868 			SOFT_RESET_IA;
1869 	}
1870 
1871 	if (reset_mask & RADEON_RESET_CP) {
1872 		grbm_soft_reset |= SOFT_RESET_CP | SOFT_RESET_VGT;
1873 
1874 		srbm_soft_reset |= SOFT_RESET_GRBM;
1875 	}
1876 
1877 	if (reset_mask & RADEON_RESET_DMA)
1878 		srbm_soft_reset |= SOFT_RESET_DMA;
1879 
1880 	if (reset_mask & RADEON_RESET_DMA1)
1881 		srbm_soft_reset |= SOFT_RESET_DMA1;
1882 
1883 	if (reset_mask & RADEON_RESET_DISPLAY)
1884 		srbm_soft_reset |= SOFT_RESET_DC;
1885 
1886 	if (reset_mask & RADEON_RESET_RLC)
1887 		srbm_soft_reset |= SOFT_RESET_RLC;
1888 
1889 	if (reset_mask & RADEON_RESET_SEM)
1890 		srbm_soft_reset |= SOFT_RESET_SEM;
1891 
1892 	if (reset_mask & RADEON_RESET_IH)
1893 		srbm_soft_reset |= SOFT_RESET_IH;
1894 
1895 	if (reset_mask & RADEON_RESET_GRBM)
1896 		srbm_soft_reset |= SOFT_RESET_GRBM;
1897 
1898 	if (reset_mask & RADEON_RESET_VMC)
1899 		srbm_soft_reset |= SOFT_RESET_VMC;
1900 
1901 	if (!(rdev->flags & RADEON_IS_IGP)) {
1902 		if (reset_mask & RADEON_RESET_MC)
1903 			srbm_soft_reset |= SOFT_RESET_MC;
1904 	}
1905 
1906 	if (grbm_soft_reset) {
1907 		tmp = RREG32(GRBM_SOFT_RESET);
1908 		tmp |= grbm_soft_reset;
1909 		dev_info(rdev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
1910 		WREG32(GRBM_SOFT_RESET, tmp);
1911 		tmp = RREG32(GRBM_SOFT_RESET);
1912 
1913 		udelay(50);
1914 
1915 		tmp &= ~grbm_soft_reset;
1916 		WREG32(GRBM_SOFT_RESET, tmp);
1917 		tmp = RREG32(GRBM_SOFT_RESET);
1918 	}
1919 
1920 	if (srbm_soft_reset) {
1921 		tmp = RREG32(SRBM_SOFT_RESET);
1922 		tmp |= srbm_soft_reset;
1923 		dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1924 		WREG32(SRBM_SOFT_RESET, tmp);
1925 		tmp = RREG32(SRBM_SOFT_RESET);
1926 
1927 		udelay(50);
1928 
1929 		tmp &= ~srbm_soft_reset;
1930 		WREG32(SRBM_SOFT_RESET, tmp);
1931 		tmp = RREG32(SRBM_SOFT_RESET);
1932 	}
1933 
1934 	/* Wait a little for things to settle down */
1935 	udelay(50);
1936 
1937 	evergreen_mc_resume(rdev, &save);
1938 	udelay(50);
1939 
1940 	evergreen_print_gpu_status_regs(rdev);
1941 }
1942 
cayman_asic_reset(struct radeon_device * rdev,bool hard)1943 int cayman_asic_reset(struct radeon_device *rdev, bool hard)
1944 {
1945 	u32 reset_mask;
1946 
1947 	if (hard) {
1948 		evergreen_gpu_pci_config_reset(rdev);
1949 		return 0;
1950 	}
1951 
1952 	reset_mask = cayman_gpu_check_soft_reset(rdev);
1953 
1954 	if (reset_mask)
1955 		r600_set_bios_scratch_engine_hung(rdev, true);
1956 
1957 	cayman_gpu_soft_reset(rdev, reset_mask);
1958 
1959 	reset_mask = cayman_gpu_check_soft_reset(rdev);
1960 
1961 	if (reset_mask)
1962 		evergreen_gpu_pci_config_reset(rdev);
1963 
1964 	r600_set_bios_scratch_engine_hung(rdev, false);
1965 
1966 	return 0;
1967 }
1968 
1969 /**
1970  * cayman_gfx_is_lockup - Check if the GFX engine is locked up
1971  *
1972  * @rdev: radeon_device pointer
1973  * @ring: radeon_ring structure holding ring information
1974  *
1975  * Check if the GFX engine is locked up.
1976  * Returns true if the engine appears to be locked up, false if not.
1977  */
cayman_gfx_is_lockup(struct radeon_device * rdev,struct radeon_ring * ring)1978 bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
1979 {
1980 	u32 reset_mask = cayman_gpu_check_soft_reset(rdev);
1981 
1982 	if (!(reset_mask & (RADEON_RESET_GFX |
1983 			    RADEON_RESET_COMPUTE |
1984 			    RADEON_RESET_CP))) {
1985 		radeon_ring_lockup_update(rdev, ring);
1986 		return false;
1987 	}
1988 	return radeon_ring_test_lockup(rdev, ring);
1989 }
1990 
cayman_uvd_init(struct radeon_device * rdev)1991 static void cayman_uvd_init(struct radeon_device *rdev)
1992 {
1993 	int r;
1994 
1995 	if (!rdev->has_uvd)
1996 		return;
1997 
1998 	r = radeon_uvd_init(rdev);
1999 	if (r) {
2000 		dev_err(rdev->dev, "failed UVD (%d) init.\n", r);
2001 		/*
2002 		 * At this point rdev->uvd.vcpu_bo is NULL which trickles down
2003 		 * to early fails uvd_v2_2_resume() and thus nothing happens
2004 		 * there. So it is pointless to try to go through that code
2005 		 * hence why we disable uvd here.
2006 		 */
2007 		rdev->has_uvd = false;
2008 		return;
2009 	}
2010 	rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
2011 	r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX], 4096);
2012 }
2013 
cayman_uvd_start(struct radeon_device * rdev)2014 static void cayman_uvd_start(struct radeon_device *rdev)
2015 {
2016 	int r;
2017 
2018 	if (!rdev->has_uvd)
2019 		return;
2020 
2021 	r = uvd_v2_2_resume(rdev);
2022 	if (r) {
2023 		dev_err(rdev->dev, "failed UVD resume (%d).\n", r);
2024 		goto error;
2025 	}
2026 	r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX);
2027 	if (r) {
2028 		dev_err(rdev->dev, "failed initializing UVD fences (%d).\n", r);
2029 		goto error;
2030 	}
2031 	return;
2032 
2033 error:
2034 	rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
2035 }
2036 
cayman_uvd_resume(struct radeon_device * rdev)2037 static void cayman_uvd_resume(struct radeon_device *rdev)
2038 {
2039 	struct radeon_ring *ring;
2040 	int r;
2041 
2042 	if (!rdev->has_uvd || !rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size)
2043 		return;
2044 
2045 	ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
2046 	r = radeon_ring_init(rdev, ring, ring->ring_size, 0, PACKET0(UVD_NO_OP, 0));
2047 	if (r) {
2048 		dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r);
2049 		return;
2050 	}
2051 	r = uvd_v1_0_init(rdev);
2052 	if (r) {
2053 		dev_err(rdev->dev, "failed initializing UVD (%d).\n", r);
2054 		return;
2055 	}
2056 }
2057 
cayman_vce_init(struct radeon_device * rdev)2058 static void cayman_vce_init(struct radeon_device *rdev)
2059 {
2060 	int r;
2061 
2062 	/* Only set for CHIP_ARUBA */
2063 	if (!rdev->has_vce)
2064 		return;
2065 
2066 	r = radeon_vce_init(rdev);
2067 	if (r) {
2068 		dev_err(rdev->dev, "failed VCE (%d) init.\n", r);
2069 		/*
2070 		 * At this point rdev->vce.vcpu_bo is NULL which trickles down
2071 		 * to early fails cayman_vce_start() and thus nothing happens
2072 		 * there. So it is pointless to try to go through that code
2073 		 * hence why we disable vce here.
2074 		 */
2075 		rdev->has_vce = false;
2076 		return;
2077 	}
2078 	rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_obj = NULL;
2079 	r600_ring_init(rdev, &rdev->ring[TN_RING_TYPE_VCE1_INDEX], 4096);
2080 	rdev->ring[TN_RING_TYPE_VCE2_INDEX].ring_obj = NULL;
2081 	r600_ring_init(rdev, &rdev->ring[TN_RING_TYPE_VCE2_INDEX], 4096);
2082 }
2083 
cayman_vce_start(struct radeon_device * rdev)2084 static void cayman_vce_start(struct radeon_device *rdev)
2085 {
2086 	int r;
2087 
2088 	if (!rdev->has_vce)
2089 		return;
2090 
2091 	r = radeon_vce_resume(rdev);
2092 	if (r) {
2093 		dev_err(rdev->dev, "failed VCE resume (%d).\n", r);
2094 		goto error;
2095 	}
2096 	r = vce_v1_0_resume(rdev);
2097 	if (r) {
2098 		dev_err(rdev->dev, "failed VCE resume (%d).\n", r);
2099 		goto error;
2100 	}
2101 	r = radeon_fence_driver_start_ring(rdev, TN_RING_TYPE_VCE1_INDEX);
2102 	if (r) {
2103 		dev_err(rdev->dev, "failed initializing VCE1 fences (%d).\n", r);
2104 		goto error;
2105 	}
2106 	r = radeon_fence_driver_start_ring(rdev, TN_RING_TYPE_VCE2_INDEX);
2107 	if (r) {
2108 		dev_err(rdev->dev, "failed initializing VCE2 fences (%d).\n", r);
2109 		goto error;
2110 	}
2111 	return;
2112 
2113 error:
2114 	rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_size = 0;
2115 	rdev->ring[TN_RING_TYPE_VCE2_INDEX].ring_size = 0;
2116 }
2117 
cayman_vce_resume(struct radeon_device * rdev)2118 static void cayman_vce_resume(struct radeon_device *rdev)
2119 {
2120 	struct radeon_ring *ring;
2121 	int r;
2122 
2123 	if (!rdev->has_vce || !rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_size)
2124 		return;
2125 
2126 	ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
2127 	r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 0x0);
2128 	if (r) {
2129 		dev_err(rdev->dev, "failed initializing VCE1 ring (%d).\n", r);
2130 		return;
2131 	}
2132 	ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
2133 	r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 0x0);
2134 	if (r) {
2135 		dev_err(rdev->dev, "failed initializing VCE1 ring (%d).\n", r);
2136 		return;
2137 	}
2138 	r = vce_v1_0_init(rdev);
2139 	if (r) {
2140 		dev_err(rdev->dev, "failed initializing VCE (%d).\n", r);
2141 		return;
2142 	}
2143 }
2144 
cayman_startup(struct radeon_device * rdev)2145 static int cayman_startup(struct radeon_device *rdev)
2146 {
2147 	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2148 	int r;
2149 
2150 	/* enable pcie gen2 link */
2151 	evergreen_pcie_gen2_enable(rdev);
2152 	/* enable aspm */
2153 	evergreen_program_aspm(rdev);
2154 
2155 	/* scratch needs to be initialized before MC */
2156 	r = r600_vram_scratch_init(rdev);
2157 	if (r)
2158 		return r;
2159 
2160 	evergreen_mc_program(rdev);
2161 
2162 	if (!(rdev->flags & RADEON_IS_IGP) && !rdev->pm.dpm_enabled) {
2163 		r = ni_mc_load_microcode(rdev);
2164 		if (r) {
2165 			DRM_ERROR("Failed to load MC firmware!\n");
2166 			return r;
2167 		}
2168 	}
2169 
2170 	r = cayman_pcie_gart_enable(rdev);
2171 	if (r)
2172 		return r;
2173 	cayman_gpu_init(rdev);
2174 
2175 	/* allocate rlc buffers */
2176 	if (rdev->flags & RADEON_IS_IGP) {
2177 		rdev->rlc.reg_list = tn_rlc_save_restore_register_list;
2178 		rdev->rlc.reg_list_size =
2179 			(u32)ARRAY_SIZE(tn_rlc_save_restore_register_list);
2180 		rdev->rlc.cs_data = cayman_cs_data;
2181 		r = sumo_rlc_init(rdev);
2182 		if (r) {
2183 			DRM_ERROR("Failed to init rlc BOs!\n");
2184 			return r;
2185 		}
2186 	}
2187 
2188 	/* allocate wb buffer */
2189 	r = radeon_wb_init(rdev);
2190 	if (r)
2191 		return r;
2192 
2193 	r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
2194 	if (r) {
2195 		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
2196 		return r;
2197 	}
2198 
2199 	cayman_uvd_start(rdev);
2200 	cayman_vce_start(rdev);
2201 
2202 	r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
2203 	if (r) {
2204 		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
2205 		return r;
2206 	}
2207 
2208 	r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
2209 	if (r) {
2210 		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
2211 		return r;
2212 	}
2213 
2214 	r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
2215 	if (r) {
2216 		dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
2217 		return r;
2218 	}
2219 
2220 	r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
2221 	if (r) {
2222 		dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
2223 		return r;
2224 	}
2225 
2226 	/* Enable IRQ */
2227 	if (!rdev->irq.installed) {
2228 		r = radeon_irq_kms_init(rdev);
2229 		if (r)
2230 			return r;
2231 	}
2232 
2233 	r = r600_irq_init(rdev);
2234 	if (r) {
2235 		DRM_ERROR("radeon: IH init failed (%d).\n", r);
2236 		radeon_irq_kms_fini(rdev);
2237 		return r;
2238 	}
2239 	evergreen_irq_set(rdev);
2240 
2241 	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
2242 			     RADEON_CP_PACKET2);
2243 	if (r)
2244 		return r;
2245 
2246 	ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
2247 	r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
2248 			     DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
2249 	if (r)
2250 		return r;
2251 
2252 	ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
2253 	r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
2254 			     DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
2255 	if (r)
2256 		return r;
2257 
2258 	r = cayman_cp_load_microcode(rdev);
2259 	if (r)
2260 		return r;
2261 	r = cayman_cp_resume(rdev);
2262 	if (r)
2263 		return r;
2264 
2265 	r = cayman_dma_resume(rdev);
2266 	if (r)
2267 		return r;
2268 
2269 	cayman_uvd_resume(rdev);
2270 	cayman_vce_resume(rdev);
2271 
2272 	r = radeon_ib_pool_init(rdev);
2273 	if (r) {
2274 		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
2275 		return r;
2276 	}
2277 
2278 	r = radeon_vm_manager_init(rdev);
2279 	if (r) {
2280 		dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r);
2281 		return r;
2282 	}
2283 
2284 	r = radeon_audio_init(rdev);
2285 	if (r)
2286 		return r;
2287 
2288 	return 0;
2289 }
2290 
cayman_resume(struct radeon_device * rdev)2291 int cayman_resume(struct radeon_device *rdev)
2292 {
2293 	int r;
2294 
2295 	/* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
2296 	 * posting will perform necessary task to bring back GPU into good
2297 	 * shape.
2298 	 */
2299 	/* post card */
2300 	atom_asic_init(rdev->mode_info.atom_context);
2301 
2302 	/* init golden registers */
2303 	ni_init_golden_registers(rdev);
2304 
2305 	if (rdev->pm.pm_method == PM_METHOD_DPM)
2306 		radeon_pm_resume(rdev);
2307 
2308 	rdev->accel_working = true;
2309 	r = cayman_startup(rdev);
2310 	if (r) {
2311 		DRM_ERROR("cayman startup failed on resume\n");
2312 		rdev->accel_working = false;
2313 		return r;
2314 	}
2315 	return r;
2316 }
2317 
cayman_suspend(struct radeon_device * rdev)2318 int cayman_suspend(struct radeon_device *rdev)
2319 {
2320 	radeon_pm_suspend(rdev);
2321 	radeon_audio_fini(rdev);
2322 	radeon_vm_manager_fini(rdev);
2323 	cayman_cp_enable(rdev, false);
2324 	cayman_dma_stop(rdev);
2325 	if (rdev->has_uvd) {
2326 		radeon_uvd_suspend(rdev);
2327 		uvd_v1_0_fini(rdev);
2328 	}
2329 	evergreen_irq_suspend(rdev);
2330 	radeon_wb_disable(rdev);
2331 	cayman_pcie_gart_disable(rdev);
2332 	return 0;
2333 }
2334 
2335 /* Plan is to move initialization in that function and use
2336  * helper function so that radeon_device_init pretty much
2337  * do nothing more than calling asic specific function. This
2338  * should also allow to remove a bunch of callback function
2339  * like vram_info.
2340  */
cayman_init(struct radeon_device * rdev)2341 int cayman_init(struct radeon_device *rdev)
2342 {
2343 	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2344 	int r;
2345 
2346 	/* Read BIOS */
2347 	if (!radeon_get_bios(rdev)) {
2348 		if (ASIC_IS_AVIVO(rdev))
2349 			return -EINVAL;
2350 	}
2351 	/* Must be an ATOMBIOS */
2352 	if (!rdev->is_atom_bios) {
2353 		dev_err(rdev->dev, "Expecting atombios for cayman GPU\n");
2354 		return -EINVAL;
2355 	}
2356 	r = radeon_atombios_init(rdev);
2357 	if (r)
2358 		return r;
2359 
2360 	/* Post card if necessary */
2361 	if (!radeon_card_posted(rdev)) {
2362 		if (!rdev->bios) {
2363 			dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
2364 			return -EINVAL;
2365 		}
2366 		DRM_INFO("GPU not posted. posting now...\n");
2367 		atom_asic_init(rdev->mode_info.atom_context);
2368 	}
2369 	/* init golden registers */
2370 	ni_init_golden_registers(rdev);
2371 	/* Initialize scratch registers */
2372 	r600_scratch_init(rdev);
2373 	/* Initialize surface registers */
2374 	radeon_surface_init(rdev);
2375 	/* Initialize clocks */
2376 	radeon_get_clock_info(rdev->ddev);
2377 	/* Fence driver */
2378 	radeon_fence_driver_init(rdev);
2379 	/* initialize memory controller */
2380 	r = evergreen_mc_init(rdev);
2381 	if (r)
2382 		return r;
2383 	/* Memory manager */
2384 	r = radeon_bo_init(rdev);
2385 	if (r)
2386 		return r;
2387 
2388 	if (rdev->flags & RADEON_IS_IGP) {
2389 		if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
2390 			r = ni_init_microcode(rdev);
2391 			if (r) {
2392 				DRM_ERROR("Failed to load firmware!\n");
2393 				return r;
2394 			}
2395 		}
2396 	} else {
2397 		if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
2398 			r = ni_init_microcode(rdev);
2399 			if (r) {
2400 				DRM_ERROR("Failed to load firmware!\n");
2401 				return r;
2402 			}
2403 		}
2404 	}
2405 
2406 	/* Initialize power management */
2407 	radeon_pm_init(rdev);
2408 
2409 	ring->ring_obj = NULL;
2410 	r600_ring_init(rdev, ring, 1024 * 1024);
2411 
2412 	ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
2413 	ring->ring_obj = NULL;
2414 	r600_ring_init(rdev, ring, 64 * 1024);
2415 
2416 	ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
2417 	ring->ring_obj = NULL;
2418 	r600_ring_init(rdev, ring, 64 * 1024);
2419 
2420 	cayman_uvd_init(rdev);
2421 	cayman_vce_init(rdev);
2422 
2423 	rdev->ih.ring_obj = NULL;
2424 	r600_ih_ring_init(rdev, 64 * 1024);
2425 
2426 	r = r600_pcie_gart_init(rdev);
2427 	if (r)
2428 		return r;
2429 
2430 	rdev->accel_working = true;
2431 	r = cayman_startup(rdev);
2432 	if (r) {
2433 		dev_err(rdev->dev, "disabling GPU acceleration\n");
2434 		cayman_cp_fini(rdev);
2435 		cayman_dma_fini(rdev);
2436 		r600_irq_fini(rdev);
2437 		if (rdev->flags & RADEON_IS_IGP)
2438 			sumo_rlc_fini(rdev);
2439 		radeon_wb_fini(rdev);
2440 		radeon_ib_pool_fini(rdev);
2441 		radeon_vm_manager_fini(rdev);
2442 		radeon_irq_kms_fini(rdev);
2443 		cayman_pcie_gart_fini(rdev);
2444 		rdev->accel_working = false;
2445 	}
2446 
2447 	/* Don't start up if the MC ucode is missing.
2448 	 * The default clocks and voltages before the MC ucode
2449 	 * is loaded are not suffient for advanced operations.
2450 	 *
2451 	 * We can skip this check for TN, because there is no MC
2452 	 * ucode.
2453 	 */
2454 	if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) {
2455 		DRM_ERROR("radeon: MC ucode required for NI+.\n");
2456 		return -EINVAL;
2457 	}
2458 
2459 	return 0;
2460 }
2461 
cayman_fini(struct radeon_device * rdev)2462 void cayman_fini(struct radeon_device *rdev)
2463 {
2464 	radeon_pm_fini(rdev);
2465 	cayman_cp_fini(rdev);
2466 	cayman_dma_fini(rdev);
2467 	r600_irq_fini(rdev);
2468 	if (rdev->flags & RADEON_IS_IGP)
2469 		sumo_rlc_fini(rdev);
2470 	radeon_wb_fini(rdev);
2471 	radeon_vm_manager_fini(rdev);
2472 	radeon_ib_pool_fini(rdev);
2473 	radeon_irq_kms_fini(rdev);
2474 	uvd_v1_0_fini(rdev);
2475 	radeon_uvd_fini(rdev);
2476 	if (rdev->has_vce)
2477 		radeon_vce_fini(rdev);
2478 	cayman_pcie_gart_fini(rdev);
2479 	r600_vram_scratch_fini(rdev);
2480 	radeon_gem_fini(rdev);
2481 	radeon_fence_driver_fini(rdev);
2482 	radeon_bo_fini(rdev);
2483 	radeon_atombios_fini(rdev);
2484 	kfree(rdev->bios);
2485 	rdev->bios = NULL;
2486 }
2487 
2488 /*
2489  * vm
2490  */
cayman_vm_init(struct radeon_device * rdev)2491 int cayman_vm_init(struct radeon_device *rdev)
2492 {
2493 	/* number of VMs */
2494 	rdev->vm_manager.nvm = 8;
2495 	/* base offset of vram pages */
2496 	if (rdev->flags & RADEON_IS_IGP) {
2497 		u64 tmp = RREG32(FUS_MC_VM_FB_OFFSET);
2498 		tmp <<= 22;
2499 		rdev->vm_manager.vram_base_offset = tmp;
2500 	} else
2501 		rdev->vm_manager.vram_base_offset = 0;
2502 	return 0;
2503 }
2504 
cayman_vm_fini(struct radeon_device * rdev)2505 void cayman_vm_fini(struct radeon_device *rdev)
2506 {
2507 }
2508 
2509 /**
2510  * cayman_vm_decode_fault - print human readable fault info
2511  *
2512  * @rdev: radeon_device pointer
2513  * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
2514  * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
2515  *
2516  * Print human readable fault information (cayman/TN).
2517  */
cayman_vm_decode_fault(struct radeon_device * rdev,u32 status,u32 addr)2518 void cayman_vm_decode_fault(struct radeon_device *rdev,
2519 			    u32 status, u32 addr)
2520 {
2521 	u32 mc_id = (status & MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT;
2522 	u32 vmid = (status & FAULT_VMID_MASK) >> FAULT_VMID_SHIFT;
2523 	u32 protections = (status & PROTECTIONS_MASK) >> PROTECTIONS_SHIFT;
2524 	char *block;
2525 
2526 	switch (mc_id) {
2527 	case 32:
2528 	case 16:
2529 	case 96:
2530 	case 80:
2531 	case 160:
2532 	case 144:
2533 	case 224:
2534 	case 208:
2535 		block = "CB";
2536 		break;
2537 	case 33:
2538 	case 17:
2539 	case 97:
2540 	case 81:
2541 	case 161:
2542 	case 145:
2543 	case 225:
2544 	case 209:
2545 		block = "CB_FMASK";
2546 		break;
2547 	case 34:
2548 	case 18:
2549 	case 98:
2550 	case 82:
2551 	case 162:
2552 	case 146:
2553 	case 226:
2554 	case 210:
2555 		block = "CB_CMASK";
2556 		break;
2557 	case 35:
2558 	case 19:
2559 	case 99:
2560 	case 83:
2561 	case 163:
2562 	case 147:
2563 	case 227:
2564 	case 211:
2565 		block = "CB_IMMED";
2566 		break;
2567 	case 36:
2568 	case 20:
2569 	case 100:
2570 	case 84:
2571 	case 164:
2572 	case 148:
2573 	case 228:
2574 	case 212:
2575 		block = "DB";
2576 		break;
2577 	case 37:
2578 	case 21:
2579 	case 101:
2580 	case 85:
2581 	case 165:
2582 	case 149:
2583 	case 229:
2584 	case 213:
2585 		block = "DB_HTILE";
2586 		break;
2587 	case 38:
2588 	case 22:
2589 	case 102:
2590 	case 86:
2591 	case 166:
2592 	case 150:
2593 	case 230:
2594 	case 214:
2595 		block = "SX";
2596 		break;
2597 	case 39:
2598 	case 23:
2599 	case 103:
2600 	case 87:
2601 	case 167:
2602 	case 151:
2603 	case 231:
2604 	case 215:
2605 		block = "DB_STEN";
2606 		break;
2607 	case 40:
2608 	case 24:
2609 	case 104:
2610 	case 88:
2611 	case 232:
2612 	case 216:
2613 	case 168:
2614 	case 152:
2615 		block = "TC_TFETCH";
2616 		break;
2617 	case 41:
2618 	case 25:
2619 	case 105:
2620 	case 89:
2621 	case 233:
2622 	case 217:
2623 	case 169:
2624 	case 153:
2625 		block = "TC_VFETCH";
2626 		break;
2627 	case 42:
2628 	case 26:
2629 	case 106:
2630 	case 90:
2631 	case 234:
2632 	case 218:
2633 	case 170:
2634 	case 154:
2635 		block = "VC";
2636 		break;
2637 	case 112:
2638 		block = "CP";
2639 		break;
2640 	case 113:
2641 	case 114:
2642 		block = "SH";
2643 		break;
2644 	case 115:
2645 		block = "VGT";
2646 		break;
2647 	case 178:
2648 		block = "IH";
2649 		break;
2650 	case 51:
2651 		block = "RLC";
2652 		break;
2653 	case 55:
2654 		block = "DMA";
2655 		break;
2656 	case 56:
2657 		block = "HDP";
2658 		break;
2659 	default:
2660 		block = "unknown";
2661 		break;
2662 	}
2663 
2664 	printk("VM fault (0x%02x, vmid %d) at page %u, %s from %s (%d)\n",
2665 	       protections, vmid, addr,
2666 	       (status & MEMORY_CLIENT_RW_MASK) ? "write" : "read",
2667 	       block, mc_id);
2668 }
2669 
2670 /*
2671  * cayman_vm_flush - vm flush using the CP
2672  *
2673  * Update the page table base and flush the VM TLB
2674  * using the CP (cayman-si).
2675  */
cayman_vm_flush(struct radeon_device * rdev,struct radeon_ring * ring,unsigned vm_id,uint64_t pd_addr)2676 void cayman_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
2677 		     unsigned vm_id, uint64_t pd_addr)
2678 {
2679 	radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2), 0));
2680 	radeon_ring_write(ring, pd_addr >> 12);
2681 
2682 	/* flush hdp cache */
2683 	radeon_ring_write(ring, PACKET0(HDP_MEM_COHERENCY_FLUSH_CNTL, 0));
2684 	radeon_ring_write(ring, 0x1);
2685 
2686 	/* bits 0-7 are the VM contexts0-7 */
2687 	radeon_ring_write(ring, PACKET0(VM_INVALIDATE_REQUEST, 0));
2688 	radeon_ring_write(ring, 1 << vm_id);
2689 
2690 	/* wait for the invalidate to complete */
2691 	radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
2692 	radeon_ring_write(ring, (WAIT_REG_MEM_FUNCTION(0) |  /* always */
2693 				 WAIT_REG_MEM_ENGINE(0))); /* me */
2694 	radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
2695 	radeon_ring_write(ring, 0);
2696 	radeon_ring_write(ring, 0); /* ref */
2697 	radeon_ring_write(ring, 0); /* mask */
2698 	radeon_ring_write(ring, 0x20); /* poll interval */
2699 
2700 	/* sync PFP to ME, otherwise we might get invalid PFP reads */
2701 	radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
2702 	radeon_ring_write(ring, 0x0);
2703 }
2704 
tn_set_vce_clocks(struct radeon_device * rdev,u32 evclk,u32 ecclk)2705 int tn_set_vce_clocks(struct radeon_device *rdev, u32 evclk, u32 ecclk)
2706 {
2707 	struct atom_clock_dividers dividers;
2708 	int r, i;
2709 
2710 	r = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
2711 					   ecclk, false, &dividers);
2712 	if (r)
2713 		return r;
2714 
2715 	for (i = 0; i < 100; i++) {
2716 		if (RREG32(CG_ECLK_STATUS) & ECLK_STATUS)
2717 			break;
2718 		mdelay(10);
2719 	}
2720 	if (i == 100)
2721 		return -ETIMEDOUT;
2722 
2723 	WREG32_P(CG_ECLK_CNTL, dividers.post_div, ~(ECLK_DIR_CNTL_EN|ECLK_DIVIDER_MASK));
2724 
2725 	for (i = 0; i < 100; i++) {
2726 		if (RREG32(CG_ECLK_STATUS) & ECLK_STATUS)
2727 			break;
2728 		mdelay(10);
2729 	}
2730 	if (i == 100)
2731 		return -ETIMEDOUT;
2732 
2733 	return 0;
2734 }
2735