1 /*
2 * Copyright 2010 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24
25 #include <linux/firmware.h>
26 #include <linux/pci.h>
27 #include <linux/slab.h>
28
29 #include <drm/drm_vblank.h>
30 #include <drm/radeon_drm.h>
31 #include <drm/drm_fourcc.h>
32
33 #include "atom.h"
34 #include "avivod.h"
35 #include "cik.h"
36 #include "ni.h"
37 #include "rv770.h"
38 #include "evergreen.h"
39 #include "evergreen_blit_shaders.h"
40 #include "evergreen_reg.h"
41 #include "evergreend.h"
42 #include "radeon.h"
43 #include "radeon_asic.h"
44 #include "radeon_audio.h"
45 #include "radeon_ucode.h"
46 #include "si.h"
47
48 #define DC_HPDx_CONTROL(x) (DC_HPD1_CONTROL + (x * 0xc))
49 #define DC_HPDx_INT_CONTROL(x) (DC_HPD1_INT_CONTROL + (x * 0xc))
50 #define DC_HPDx_INT_STATUS_REG(x) (DC_HPD1_INT_STATUS + (x * 0xc))
51
52 /*
53 * Indirect registers accessor
54 */
eg_cg_rreg(struct radeon_device * rdev,u32 reg)55 u32 eg_cg_rreg(struct radeon_device *rdev, u32 reg)
56 {
57 unsigned long flags;
58 u32 r;
59
60 spin_lock_irqsave(&rdev->cg_idx_lock, flags);
61 WREG32(EVERGREEN_CG_IND_ADDR, ((reg) & 0xffff));
62 r = RREG32(EVERGREEN_CG_IND_DATA);
63 spin_unlock_irqrestore(&rdev->cg_idx_lock, flags);
64 return r;
65 }
66
eg_cg_wreg(struct radeon_device * rdev,u32 reg,u32 v)67 void eg_cg_wreg(struct radeon_device *rdev, u32 reg, u32 v)
68 {
69 unsigned long flags;
70
71 spin_lock_irqsave(&rdev->cg_idx_lock, flags);
72 WREG32(EVERGREEN_CG_IND_ADDR, ((reg) & 0xffff));
73 WREG32(EVERGREEN_CG_IND_DATA, (v));
74 spin_unlock_irqrestore(&rdev->cg_idx_lock, flags);
75 }
76
eg_pif_phy0_rreg(struct radeon_device * rdev,u32 reg)77 u32 eg_pif_phy0_rreg(struct radeon_device *rdev, u32 reg)
78 {
79 unsigned long flags;
80 u32 r;
81
82 spin_lock_irqsave(&rdev->pif_idx_lock, flags);
83 WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff));
84 r = RREG32(EVERGREEN_PIF_PHY0_DATA);
85 spin_unlock_irqrestore(&rdev->pif_idx_lock, flags);
86 return r;
87 }
88
eg_pif_phy0_wreg(struct radeon_device * rdev,u32 reg,u32 v)89 void eg_pif_phy0_wreg(struct radeon_device *rdev, u32 reg, u32 v)
90 {
91 unsigned long flags;
92
93 spin_lock_irqsave(&rdev->pif_idx_lock, flags);
94 WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff));
95 WREG32(EVERGREEN_PIF_PHY0_DATA, (v));
96 spin_unlock_irqrestore(&rdev->pif_idx_lock, flags);
97 }
98
eg_pif_phy1_rreg(struct radeon_device * rdev,u32 reg)99 u32 eg_pif_phy1_rreg(struct radeon_device *rdev, u32 reg)
100 {
101 unsigned long flags;
102 u32 r;
103
104 spin_lock_irqsave(&rdev->pif_idx_lock, flags);
105 WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff));
106 r = RREG32(EVERGREEN_PIF_PHY1_DATA);
107 spin_unlock_irqrestore(&rdev->pif_idx_lock, flags);
108 return r;
109 }
110
eg_pif_phy1_wreg(struct radeon_device * rdev,u32 reg,u32 v)111 void eg_pif_phy1_wreg(struct radeon_device *rdev, u32 reg, u32 v)
112 {
113 unsigned long flags;
114
115 spin_lock_irqsave(&rdev->pif_idx_lock, flags);
116 WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff));
117 WREG32(EVERGREEN_PIF_PHY1_DATA, (v));
118 spin_unlock_irqrestore(&rdev->pif_idx_lock, flags);
119 }
120
121 static const u32 crtc_offsets[6] =
122 {
123 EVERGREEN_CRTC0_REGISTER_OFFSET,
124 EVERGREEN_CRTC1_REGISTER_OFFSET,
125 EVERGREEN_CRTC2_REGISTER_OFFSET,
126 EVERGREEN_CRTC3_REGISTER_OFFSET,
127 EVERGREEN_CRTC4_REGISTER_OFFSET,
128 EVERGREEN_CRTC5_REGISTER_OFFSET
129 };
130
131 #include "clearstate_evergreen.h"
132
133 static const u32 sumo_rlc_save_restore_register_list[] =
134 {
135 0x98fc,
136 0x9830,
137 0x9834,
138 0x9838,
139 0x9870,
140 0x9874,
141 0x8a14,
142 0x8b24,
143 0x8bcc,
144 0x8b10,
145 0x8d00,
146 0x8d04,
147 0x8c00,
148 0x8c04,
149 0x8c08,
150 0x8c0c,
151 0x8d8c,
152 0x8c20,
153 0x8c24,
154 0x8c28,
155 0x8c18,
156 0x8c1c,
157 0x8cf0,
158 0x8e2c,
159 0x8e38,
160 0x8c30,
161 0x9508,
162 0x9688,
163 0x9608,
164 0x960c,
165 0x9610,
166 0x9614,
167 0x88c4,
168 0x88d4,
169 0xa008,
170 0x900c,
171 0x9100,
172 0x913c,
173 0x98f8,
174 0x98f4,
175 0x9b7c,
176 0x3f8c,
177 0x8950,
178 0x8954,
179 0x8a18,
180 0x8b28,
181 0x9144,
182 0x9148,
183 0x914c,
184 0x3f90,
185 0x3f94,
186 0x915c,
187 0x9160,
188 0x9178,
189 0x917c,
190 0x9180,
191 0x918c,
192 0x9190,
193 0x9194,
194 0x9198,
195 0x919c,
196 0x91a8,
197 0x91ac,
198 0x91b0,
199 0x91b4,
200 0x91b8,
201 0x91c4,
202 0x91c8,
203 0x91cc,
204 0x91d0,
205 0x91d4,
206 0x91e0,
207 0x91e4,
208 0x91ec,
209 0x91f0,
210 0x91f4,
211 0x9200,
212 0x9204,
213 0x929c,
214 0x9150,
215 0x802c,
216 };
217
218 static void evergreen_gpu_init(struct radeon_device *rdev);
219 void evergreen_fini(struct radeon_device *rdev);
220 void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
221 void evergreen_program_aspm(struct radeon_device *rdev);
222
223 static const u32 evergreen_golden_registers[] =
224 {
225 0x3f90, 0xffff0000, 0xff000000,
226 0x9148, 0xffff0000, 0xff000000,
227 0x3f94, 0xffff0000, 0xff000000,
228 0x914c, 0xffff0000, 0xff000000,
229 0x9b7c, 0xffffffff, 0x00000000,
230 0x8a14, 0xffffffff, 0x00000007,
231 0x8b10, 0xffffffff, 0x00000000,
232 0x960c, 0xffffffff, 0x54763210,
233 0x88c4, 0xffffffff, 0x000000c2,
234 0x88d4, 0xffffffff, 0x00000010,
235 0x8974, 0xffffffff, 0x00000000,
236 0xc78, 0x00000080, 0x00000080,
237 0x5eb4, 0xffffffff, 0x00000002,
238 0x5e78, 0xffffffff, 0x001000f0,
239 0x6104, 0x01000300, 0x00000000,
240 0x5bc0, 0x00300000, 0x00000000,
241 0x7030, 0xffffffff, 0x00000011,
242 0x7c30, 0xffffffff, 0x00000011,
243 0x10830, 0xffffffff, 0x00000011,
244 0x11430, 0xffffffff, 0x00000011,
245 0x12030, 0xffffffff, 0x00000011,
246 0x12c30, 0xffffffff, 0x00000011,
247 0xd02c, 0xffffffff, 0x08421000,
248 0x240c, 0xffffffff, 0x00000380,
249 0x8b24, 0xffffffff, 0x00ff0fff,
250 0x28a4c, 0x06000000, 0x06000000,
251 0x10c, 0x00000001, 0x00000001,
252 0x8d00, 0xffffffff, 0x100e4848,
253 0x8d04, 0xffffffff, 0x00164745,
254 0x8c00, 0xffffffff, 0xe4000003,
255 0x8c04, 0xffffffff, 0x40600060,
256 0x8c08, 0xffffffff, 0x001c001c,
257 0x8cf0, 0xffffffff, 0x08e00620,
258 0x8c20, 0xffffffff, 0x00800080,
259 0x8c24, 0xffffffff, 0x00800080,
260 0x8c18, 0xffffffff, 0x20202078,
261 0x8c1c, 0xffffffff, 0x00001010,
262 0x28350, 0xffffffff, 0x00000000,
263 0xa008, 0xffffffff, 0x00010000,
264 0x5c4, 0xffffffff, 0x00000001,
265 0x9508, 0xffffffff, 0x00000002,
266 0x913c, 0x0000000f, 0x0000000a
267 };
268
269 static const u32 evergreen_golden_registers2[] =
270 {
271 0x2f4c, 0xffffffff, 0x00000000,
272 0x54f4, 0xffffffff, 0x00000000,
273 0x54f0, 0xffffffff, 0x00000000,
274 0x5498, 0xffffffff, 0x00000000,
275 0x549c, 0xffffffff, 0x00000000,
276 0x5494, 0xffffffff, 0x00000000,
277 0x53cc, 0xffffffff, 0x00000000,
278 0x53c8, 0xffffffff, 0x00000000,
279 0x53c4, 0xffffffff, 0x00000000,
280 0x53c0, 0xffffffff, 0x00000000,
281 0x53bc, 0xffffffff, 0x00000000,
282 0x53b8, 0xffffffff, 0x00000000,
283 0x53b4, 0xffffffff, 0x00000000,
284 0x53b0, 0xffffffff, 0x00000000
285 };
286
287 static const u32 cypress_mgcg_init[] =
288 {
289 0x802c, 0xffffffff, 0xc0000000,
290 0x5448, 0xffffffff, 0x00000100,
291 0x55e4, 0xffffffff, 0x00000100,
292 0x160c, 0xffffffff, 0x00000100,
293 0x5644, 0xffffffff, 0x00000100,
294 0xc164, 0xffffffff, 0x00000100,
295 0x8a18, 0xffffffff, 0x00000100,
296 0x897c, 0xffffffff, 0x06000100,
297 0x8b28, 0xffffffff, 0x00000100,
298 0x9144, 0xffffffff, 0x00000100,
299 0x9a60, 0xffffffff, 0x00000100,
300 0x9868, 0xffffffff, 0x00000100,
301 0x8d58, 0xffffffff, 0x00000100,
302 0x9510, 0xffffffff, 0x00000100,
303 0x949c, 0xffffffff, 0x00000100,
304 0x9654, 0xffffffff, 0x00000100,
305 0x9030, 0xffffffff, 0x00000100,
306 0x9034, 0xffffffff, 0x00000100,
307 0x9038, 0xffffffff, 0x00000100,
308 0x903c, 0xffffffff, 0x00000100,
309 0x9040, 0xffffffff, 0x00000100,
310 0xa200, 0xffffffff, 0x00000100,
311 0xa204, 0xffffffff, 0x00000100,
312 0xa208, 0xffffffff, 0x00000100,
313 0xa20c, 0xffffffff, 0x00000100,
314 0x971c, 0xffffffff, 0x00000100,
315 0x977c, 0xffffffff, 0x00000100,
316 0x3f80, 0xffffffff, 0x00000100,
317 0xa210, 0xffffffff, 0x00000100,
318 0xa214, 0xffffffff, 0x00000100,
319 0x4d8, 0xffffffff, 0x00000100,
320 0x9784, 0xffffffff, 0x00000100,
321 0x9698, 0xffffffff, 0x00000100,
322 0x4d4, 0xffffffff, 0x00000200,
323 0x30cc, 0xffffffff, 0x00000100,
324 0xd0c0, 0xffffffff, 0xff000100,
325 0x802c, 0xffffffff, 0x40000000,
326 0x915c, 0xffffffff, 0x00010000,
327 0x9160, 0xffffffff, 0x00030002,
328 0x9178, 0xffffffff, 0x00070000,
329 0x917c, 0xffffffff, 0x00030002,
330 0x9180, 0xffffffff, 0x00050004,
331 0x918c, 0xffffffff, 0x00010006,
332 0x9190, 0xffffffff, 0x00090008,
333 0x9194, 0xffffffff, 0x00070000,
334 0x9198, 0xffffffff, 0x00030002,
335 0x919c, 0xffffffff, 0x00050004,
336 0x91a8, 0xffffffff, 0x00010006,
337 0x91ac, 0xffffffff, 0x00090008,
338 0x91b0, 0xffffffff, 0x00070000,
339 0x91b4, 0xffffffff, 0x00030002,
340 0x91b8, 0xffffffff, 0x00050004,
341 0x91c4, 0xffffffff, 0x00010006,
342 0x91c8, 0xffffffff, 0x00090008,
343 0x91cc, 0xffffffff, 0x00070000,
344 0x91d0, 0xffffffff, 0x00030002,
345 0x91d4, 0xffffffff, 0x00050004,
346 0x91e0, 0xffffffff, 0x00010006,
347 0x91e4, 0xffffffff, 0x00090008,
348 0x91e8, 0xffffffff, 0x00000000,
349 0x91ec, 0xffffffff, 0x00070000,
350 0x91f0, 0xffffffff, 0x00030002,
351 0x91f4, 0xffffffff, 0x00050004,
352 0x9200, 0xffffffff, 0x00010006,
353 0x9204, 0xffffffff, 0x00090008,
354 0x9208, 0xffffffff, 0x00070000,
355 0x920c, 0xffffffff, 0x00030002,
356 0x9210, 0xffffffff, 0x00050004,
357 0x921c, 0xffffffff, 0x00010006,
358 0x9220, 0xffffffff, 0x00090008,
359 0x9224, 0xffffffff, 0x00070000,
360 0x9228, 0xffffffff, 0x00030002,
361 0x922c, 0xffffffff, 0x00050004,
362 0x9238, 0xffffffff, 0x00010006,
363 0x923c, 0xffffffff, 0x00090008,
364 0x9240, 0xffffffff, 0x00070000,
365 0x9244, 0xffffffff, 0x00030002,
366 0x9248, 0xffffffff, 0x00050004,
367 0x9254, 0xffffffff, 0x00010006,
368 0x9258, 0xffffffff, 0x00090008,
369 0x925c, 0xffffffff, 0x00070000,
370 0x9260, 0xffffffff, 0x00030002,
371 0x9264, 0xffffffff, 0x00050004,
372 0x9270, 0xffffffff, 0x00010006,
373 0x9274, 0xffffffff, 0x00090008,
374 0x9278, 0xffffffff, 0x00070000,
375 0x927c, 0xffffffff, 0x00030002,
376 0x9280, 0xffffffff, 0x00050004,
377 0x928c, 0xffffffff, 0x00010006,
378 0x9290, 0xffffffff, 0x00090008,
379 0x9294, 0xffffffff, 0x00000000,
380 0x929c, 0xffffffff, 0x00000001,
381 0x802c, 0xffffffff, 0x40010000,
382 0x915c, 0xffffffff, 0x00010000,
383 0x9160, 0xffffffff, 0x00030002,
384 0x9178, 0xffffffff, 0x00070000,
385 0x917c, 0xffffffff, 0x00030002,
386 0x9180, 0xffffffff, 0x00050004,
387 0x918c, 0xffffffff, 0x00010006,
388 0x9190, 0xffffffff, 0x00090008,
389 0x9194, 0xffffffff, 0x00070000,
390 0x9198, 0xffffffff, 0x00030002,
391 0x919c, 0xffffffff, 0x00050004,
392 0x91a8, 0xffffffff, 0x00010006,
393 0x91ac, 0xffffffff, 0x00090008,
394 0x91b0, 0xffffffff, 0x00070000,
395 0x91b4, 0xffffffff, 0x00030002,
396 0x91b8, 0xffffffff, 0x00050004,
397 0x91c4, 0xffffffff, 0x00010006,
398 0x91c8, 0xffffffff, 0x00090008,
399 0x91cc, 0xffffffff, 0x00070000,
400 0x91d0, 0xffffffff, 0x00030002,
401 0x91d4, 0xffffffff, 0x00050004,
402 0x91e0, 0xffffffff, 0x00010006,
403 0x91e4, 0xffffffff, 0x00090008,
404 0x91e8, 0xffffffff, 0x00000000,
405 0x91ec, 0xffffffff, 0x00070000,
406 0x91f0, 0xffffffff, 0x00030002,
407 0x91f4, 0xffffffff, 0x00050004,
408 0x9200, 0xffffffff, 0x00010006,
409 0x9204, 0xffffffff, 0x00090008,
410 0x9208, 0xffffffff, 0x00070000,
411 0x920c, 0xffffffff, 0x00030002,
412 0x9210, 0xffffffff, 0x00050004,
413 0x921c, 0xffffffff, 0x00010006,
414 0x9220, 0xffffffff, 0x00090008,
415 0x9224, 0xffffffff, 0x00070000,
416 0x9228, 0xffffffff, 0x00030002,
417 0x922c, 0xffffffff, 0x00050004,
418 0x9238, 0xffffffff, 0x00010006,
419 0x923c, 0xffffffff, 0x00090008,
420 0x9240, 0xffffffff, 0x00070000,
421 0x9244, 0xffffffff, 0x00030002,
422 0x9248, 0xffffffff, 0x00050004,
423 0x9254, 0xffffffff, 0x00010006,
424 0x9258, 0xffffffff, 0x00090008,
425 0x925c, 0xffffffff, 0x00070000,
426 0x9260, 0xffffffff, 0x00030002,
427 0x9264, 0xffffffff, 0x00050004,
428 0x9270, 0xffffffff, 0x00010006,
429 0x9274, 0xffffffff, 0x00090008,
430 0x9278, 0xffffffff, 0x00070000,
431 0x927c, 0xffffffff, 0x00030002,
432 0x9280, 0xffffffff, 0x00050004,
433 0x928c, 0xffffffff, 0x00010006,
434 0x9290, 0xffffffff, 0x00090008,
435 0x9294, 0xffffffff, 0x00000000,
436 0x929c, 0xffffffff, 0x00000001,
437 0x802c, 0xffffffff, 0xc0000000
438 };
439
440 static const u32 redwood_mgcg_init[] =
441 {
442 0x802c, 0xffffffff, 0xc0000000,
443 0x5448, 0xffffffff, 0x00000100,
444 0x55e4, 0xffffffff, 0x00000100,
445 0x160c, 0xffffffff, 0x00000100,
446 0x5644, 0xffffffff, 0x00000100,
447 0xc164, 0xffffffff, 0x00000100,
448 0x8a18, 0xffffffff, 0x00000100,
449 0x897c, 0xffffffff, 0x06000100,
450 0x8b28, 0xffffffff, 0x00000100,
451 0x9144, 0xffffffff, 0x00000100,
452 0x9a60, 0xffffffff, 0x00000100,
453 0x9868, 0xffffffff, 0x00000100,
454 0x8d58, 0xffffffff, 0x00000100,
455 0x9510, 0xffffffff, 0x00000100,
456 0x949c, 0xffffffff, 0x00000100,
457 0x9654, 0xffffffff, 0x00000100,
458 0x9030, 0xffffffff, 0x00000100,
459 0x9034, 0xffffffff, 0x00000100,
460 0x9038, 0xffffffff, 0x00000100,
461 0x903c, 0xffffffff, 0x00000100,
462 0x9040, 0xffffffff, 0x00000100,
463 0xa200, 0xffffffff, 0x00000100,
464 0xa204, 0xffffffff, 0x00000100,
465 0xa208, 0xffffffff, 0x00000100,
466 0xa20c, 0xffffffff, 0x00000100,
467 0x971c, 0xffffffff, 0x00000100,
468 0x977c, 0xffffffff, 0x00000100,
469 0x3f80, 0xffffffff, 0x00000100,
470 0xa210, 0xffffffff, 0x00000100,
471 0xa214, 0xffffffff, 0x00000100,
472 0x4d8, 0xffffffff, 0x00000100,
473 0x9784, 0xffffffff, 0x00000100,
474 0x9698, 0xffffffff, 0x00000100,
475 0x4d4, 0xffffffff, 0x00000200,
476 0x30cc, 0xffffffff, 0x00000100,
477 0xd0c0, 0xffffffff, 0xff000100,
478 0x802c, 0xffffffff, 0x40000000,
479 0x915c, 0xffffffff, 0x00010000,
480 0x9160, 0xffffffff, 0x00030002,
481 0x9178, 0xffffffff, 0x00070000,
482 0x917c, 0xffffffff, 0x00030002,
483 0x9180, 0xffffffff, 0x00050004,
484 0x918c, 0xffffffff, 0x00010006,
485 0x9190, 0xffffffff, 0x00090008,
486 0x9194, 0xffffffff, 0x00070000,
487 0x9198, 0xffffffff, 0x00030002,
488 0x919c, 0xffffffff, 0x00050004,
489 0x91a8, 0xffffffff, 0x00010006,
490 0x91ac, 0xffffffff, 0x00090008,
491 0x91b0, 0xffffffff, 0x00070000,
492 0x91b4, 0xffffffff, 0x00030002,
493 0x91b8, 0xffffffff, 0x00050004,
494 0x91c4, 0xffffffff, 0x00010006,
495 0x91c8, 0xffffffff, 0x00090008,
496 0x91cc, 0xffffffff, 0x00070000,
497 0x91d0, 0xffffffff, 0x00030002,
498 0x91d4, 0xffffffff, 0x00050004,
499 0x91e0, 0xffffffff, 0x00010006,
500 0x91e4, 0xffffffff, 0x00090008,
501 0x91e8, 0xffffffff, 0x00000000,
502 0x91ec, 0xffffffff, 0x00070000,
503 0x91f0, 0xffffffff, 0x00030002,
504 0x91f4, 0xffffffff, 0x00050004,
505 0x9200, 0xffffffff, 0x00010006,
506 0x9204, 0xffffffff, 0x00090008,
507 0x9294, 0xffffffff, 0x00000000,
508 0x929c, 0xffffffff, 0x00000001,
509 0x802c, 0xffffffff, 0xc0000000
510 };
511
512 static const u32 cedar_golden_registers[] =
513 {
514 0x3f90, 0xffff0000, 0xff000000,
515 0x9148, 0xffff0000, 0xff000000,
516 0x3f94, 0xffff0000, 0xff000000,
517 0x914c, 0xffff0000, 0xff000000,
518 0x9b7c, 0xffffffff, 0x00000000,
519 0x8a14, 0xffffffff, 0x00000007,
520 0x8b10, 0xffffffff, 0x00000000,
521 0x960c, 0xffffffff, 0x54763210,
522 0x88c4, 0xffffffff, 0x000000c2,
523 0x88d4, 0xffffffff, 0x00000000,
524 0x8974, 0xffffffff, 0x00000000,
525 0xc78, 0x00000080, 0x00000080,
526 0x5eb4, 0xffffffff, 0x00000002,
527 0x5e78, 0xffffffff, 0x001000f0,
528 0x6104, 0x01000300, 0x00000000,
529 0x5bc0, 0x00300000, 0x00000000,
530 0x7030, 0xffffffff, 0x00000011,
531 0x7c30, 0xffffffff, 0x00000011,
532 0x10830, 0xffffffff, 0x00000011,
533 0x11430, 0xffffffff, 0x00000011,
534 0xd02c, 0xffffffff, 0x08421000,
535 0x240c, 0xffffffff, 0x00000380,
536 0x8b24, 0xffffffff, 0x00ff0fff,
537 0x28a4c, 0x06000000, 0x06000000,
538 0x10c, 0x00000001, 0x00000001,
539 0x8d00, 0xffffffff, 0x100e4848,
540 0x8d04, 0xffffffff, 0x00164745,
541 0x8c00, 0xffffffff, 0xe4000003,
542 0x8c04, 0xffffffff, 0x40600060,
543 0x8c08, 0xffffffff, 0x001c001c,
544 0x8cf0, 0xffffffff, 0x08e00410,
545 0x8c20, 0xffffffff, 0x00800080,
546 0x8c24, 0xffffffff, 0x00800080,
547 0x8c18, 0xffffffff, 0x20202078,
548 0x8c1c, 0xffffffff, 0x00001010,
549 0x28350, 0xffffffff, 0x00000000,
550 0xa008, 0xffffffff, 0x00010000,
551 0x5c4, 0xffffffff, 0x00000001,
552 0x9508, 0xffffffff, 0x00000002
553 };
554
555 static const u32 cedar_mgcg_init[] =
556 {
557 0x802c, 0xffffffff, 0xc0000000,
558 0x5448, 0xffffffff, 0x00000100,
559 0x55e4, 0xffffffff, 0x00000100,
560 0x160c, 0xffffffff, 0x00000100,
561 0x5644, 0xffffffff, 0x00000100,
562 0xc164, 0xffffffff, 0x00000100,
563 0x8a18, 0xffffffff, 0x00000100,
564 0x897c, 0xffffffff, 0x06000100,
565 0x8b28, 0xffffffff, 0x00000100,
566 0x9144, 0xffffffff, 0x00000100,
567 0x9a60, 0xffffffff, 0x00000100,
568 0x9868, 0xffffffff, 0x00000100,
569 0x8d58, 0xffffffff, 0x00000100,
570 0x9510, 0xffffffff, 0x00000100,
571 0x949c, 0xffffffff, 0x00000100,
572 0x9654, 0xffffffff, 0x00000100,
573 0x9030, 0xffffffff, 0x00000100,
574 0x9034, 0xffffffff, 0x00000100,
575 0x9038, 0xffffffff, 0x00000100,
576 0x903c, 0xffffffff, 0x00000100,
577 0x9040, 0xffffffff, 0x00000100,
578 0xa200, 0xffffffff, 0x00000100,
579 0xa204, 0xffffffff, 0x00000100,
580 0xa208, 0xffffffff, 0x00000100,
581 0xa20c, 0xffffffff, 0x00000100,
582 0x971c, 0xffffffff, 0x00000100,
583 0x977c, 0xffffffff, 0x00000100,
584 0x3f80, 0xffffffff, 0x00000100,
585 0xa210, 0xffffffff, 0x00000100,
586 0xa214, 0xffffffff, 0x00000100,
587 0x4d8, 0xffffffff, 0x00000100,
588 0x9784, 0xffffffff, 0x00000100,
589 0x9698, 0xffffffff, 0x00000100,
590 0x4d4, 0xffffffff, 0x00000200,
591 0x30cc, 0xffffffff, 0x00000100,
592 0xd0c0, 0xffffffff, 0xff000100,
593 0x802c, 0xffffffff, 0x40000000,
594 0x915c, 0xffffffff, 0x00010000,
595 0x9178, 0xffffffff, 0x00050000,
596 0x917c, 0xffffffff, 0x00030002,
597 0x918c, 0xffffffff, 0x00010004,
598 0x9190, 0xffffffff, 0x00070006,
599 0x9194, 0xffffffff, 0x00050000,
600 0x9198, 0xffffffff, 0x00030002,
601 0x91a8, 0xffffffff, 0x00010004,
602 0x91ac, 0xffffffff, 0x00070006,
603 0x91e8, 0xffffffff, 0x00000000,
604 0x9294, 0xffffffff, 0x00000000,
605 0x929c, 0xffffffff, 0x00000001,
606 0x802c, 0xffffffff, 0xc0000000
607 };
608
609 static const u32 juniper_mgcg_init[] =
610 {
611 0x802c, 0xffffffff, 0xc0000000,
612 0x5448, 0xffffffff, 0x00000100,
613 0x55e4, 0xffffffff, 0x00000100,
614 0x160c, 0xffffffff, 0x00000100,
615 0x5644, 0xffffffff, 0x00000100,
616 0xc164, 0xffffffff, 0x00000100,
617 0x8a18, 0xffffffff, 0x00000100,
618 0x897c, 0xffffffff, 0x06000100,
619 0x8b28, 0xffffffff, 0x00000100,
620 0x9144, 0xffffffff, 0x00000100,
621 0x9a60, 0xffffffff, 0x00000100,
622 0x9868, 0xffffffff, 0x00000100,
623 0x8d58, 0xffffffff, 0x00000100,
624 0x9510, 0xffffffff, 0x00000100,
625 0x949c, 0xffffffff, 0x00000100,
626 0x9654, 0xffffffff, 0x00000100,
627 0x9030, 0xffffffff, 0x00000100,
628 0x9034, 0xffffffff, 0x00000100,
629 0x9038, 0xffffffff, 0x00000100,
630 0x903c, 0xffffffff, 0x00000100,
631 0x9040, 0xffffffff, 0x00000100,
632 0xa200, 0xffffffff, 0x00000100,
633 0xa204, 0xffffffff, 0x00000100,
634 0xa208, 0xffffffff, 0x00000100,
635 0xa20c, 0xffffffff, 0x00000100,
636 0x971c, 0xffffffff, 0x00000100,
637 0xd0c0, 0xffffffff, 0xff000100,
638 0x802c, 0xffffffff, 0x40000000,
639 0x915c, 0xffffffff, 0x00010000,
640 0x9160, 0xffffffff, 0x00030002,
641 0x9178, 0xffffffff, 0x00070000,
642 0x917c, 0xffffffff, 0x00030002,
643 0x9180, 0xffffffff, 0x00050004,
644 0x918c, 0xffffffff, 0x00010006,
645 0x9190, 0xffffffff, 0x00090008,
646 0x9194, 0xffffffff, 0x00070000,
647 0x9198, 0xffffffff, 0x00030002,
648 0x919c, 0xffffffff, 0x00050004,
649 0x91a8, 0xffffffff, 0x00010006,
650 0x91ac, 0xffffffff, 0x00090008,
651 0x91b0, 0xffffffff, 0x00070000,
652 0x91b4, 0xffffffff, 0x00030002,
653 0x91b8, 0xffffffff, 0x00050004,
654 0x91c4, 0xffffffff, 0x00010006,
655 0x91c8, 0xffffffff, 0x00090008,
656 0x91cc, 0xffffffff, 0x00070000,
657 0x91d0, 0xffffffff, 0x00030002,
658 0x91d4, 0xffffffff, 0x00050004,
659 0x91e0, 0xffffffff, 0x00010006,
660 0x91e4, 0xffffffff, 0x00090008,
661 0x91e8, 0xffffffff, 0x00000000,
662 0x91ec, 0xffffffff, 0x00070000,
663 0x91f0, 0xffffffff, 0x00030002,
664 0x91f4, 0xffffffff, 0x00050004,
665 0x9200, 0xffffffff, 0x00010006,
666 0x9204, 0xffffffff, 0x00090008,
667 0x9208, 0xffffffff, 0x00070000,
668 0x920c, 0xffffffff, 0x00030002,
669 0x9210, 0xffffffff, 0x00050004,
670 0x921c, 0xffffffff, 0x00010006,
671 0x9220, 0xffffffff, 0x00090008,
672 0x9224, 0xffffffff, 0x00070000,
673 0x9228, 0xffffffff, 0x00030002,
674 0x922c, 0xffffffff, 0x00050004,
675 0x9238, 0xffffffff, 0x00010006,
676 0x923c, 0xffffffff, 0x00090008,
677 0x9240, 0xffffffff, 0x00070000,
678 0x9244, 0xffffffff, 0x00030002,
679 0x9248, 0xffffffff, 0x00050004,
680 0x9254, 0xffffffff, 0x00010006,
681 0x9258, 0xffffffff, 0x00090008,
682 0x925c, 0xffffffff, 0x00070000,
683 0x9260, 0xffffffff, 0x00030002,
684 0x9264, 0xffffffff, 0x00050004,
685 0x9270, 0xffffffff, 0x00010006,
686 0x9274, 0xffffffff, 0x00090008,
687 0x9278, 0xffffffff, 0x00070000,
688 0x927c, 0xffffffff, 0x00030002,
689 0x9280, 0xffffffff, 0x00050004,
690 0x928c, 0xffffffff, 0x00010006,
691 0x9290, 0xffffffff, 0x00090008,
692 0x9294, 0xffffffff, 0x00000000,
693 0x929c, 0xffffffff, 0x00000001,
694 0x802c, 0xffffffff, 0xc0000000,
695 0x977c, 0xffffffff, 0x00000100,
696 0x3f80, 0xffffffff, 0x00000100,
697 0xa210, 0xffffffff, 0x00000100,
698 0xa214, 0xffffffff, 0x00000100,
699 0x4d8, 0xffffffff, 0x00000100,
700 0x9784, 0xffffffff, 0x00000100,
701 0x9698, 0xffffffff, 0x00000100,
702 0x4d4, 0xffffffff, 0x00000200,
703 0x30cc, 0xffffffff, 0x00000100,
704 0x802c, 0xffffffff, 0xc0000000
705 };
706
707 static const u32 supersumo_golden_registers[] =
708 {
709 0x5eb4, 0xffffffff, 0x00000002,
710 0x5c4, 0xffffffff, 0x00000001,
711 0x7030, 0xffffffff, 0x00000011,
712 0x7c30, 0xffffffff, 0x00000011,
713 0x6104, 0x01000300, 0x00000000,
714 0x5bc0, 0x00300000, 0x00000000,
715 0x8c04, 0xffffffff, 0x40600060,
716 0x8c08, 0xffffffff, 0x001c001c,
717 0x8c20, 0xffffffff, 0x00800080,
718 0x8c24, 0xffffffff, 0x00800080,
719 0x8c18, 0xffffffff, 0x20202078,
720 0x8c1c, 0xffffffff, 0x00001010,
721 0x918c, 0xffffffff, 0x00010006,
722 0x91a8, 0xffffffff, 0x00010006,
723 0x91c4, 0xffffffff, 0x00010006,
724 0x91e0, 0xffffffff, 0x00010006,
725 0x9200, 0xffffffff, 0x00010006,
726 0x9150, 0xffffffff, 0x6e944040,
727 0x917c, 0xffffffff, 0x00030002,
728 0x9180, 0xffffffff, 0x00050004,
729 0x9198, 0xffffffff, 0x00030002,
730 0x919c, 0xffffffff, 0x00050004,
731 0x91b4, 0xffffffff, 0x00030002,
732 0x91b8, 0xffffffff, 0x00050004,
733 0x91d0, 0xffffffff, 0x00030002,
734 0x91d4, 0xffffffff, 0x00050004,
735 0x91f0, 0xffffffff, 0x00030002,
736 0x91f4, 0xffffffff, 0x00050004,
737 0x915c, 0xffffffff, 0x00010000,
738 0x9160, 0xffffffff, 0x00030002,
739 0x3f90, 0xffff0000, 0xff000000,
740 0x9178, 0xffffffff, 0x00070000,
741 0x9194, 0xffffffff, 0x00070000,
742 0x91b0, 0xffffffff, 0x00070000,
743 0x91cc, 0xffffffff, 0x00070000,
744 0x91ec, 0xffffffff, 0x00070000,
745 0x9148, 0xffff0000, 0xff000000,
746 0x9190, 0xffffffff, 0x00090008,
747 0x91ac, 0xffffffff, 0x00090008,
748 0x91c8, 0xffffffff, 0x00090008,
749 0x91e4, 0xffffffff, 0x00090008,
750 0x9204, 0xffffffff, 0x00090008,
751 0x3f94, 0xffff0000, 0xff000000,
752 0x914c, 0xffff0000, 0xff000000,
753 0x929c, 0xffffffff, 0x00000001,
754 0x8a18, 0xffffffff, 0x00000100,
755 0x8b28, 0xffffffff, 0x00000100,
756 0x9144, 0xffffffff, 0x00000100,
757 0x5644, 0xffffffff, 0x00000100,
758 0x9b7c, 0xffffffff, 0x00000000,
759 0x8030, 0xffffffff, 0x0000100a,
760 0x8a14, 0xffffffff, 0x00000007,
761 0x8b24, 0xffffffff, 0x00ff0fff,
762 0x8b10, 0xffffffff, 0x00000000,
763 0x28a4c, 0x06000000, 0x06000000,
764 0x4d8, 0xffffffff, 0x00000100,
765 0x913c, 0xffff000f, 0x0100000a,
766 0x960c, 0xffffffff, 0x54763210,
767 0x88c4, 0xffffffff, 0x000000c2,
768 0x88d4, 0xffffffff, 0x00000010,
769 0x8974, 0xffffffff, 0x00000000,
770 0xc78, 0x00000080, 0x00000080,
771 0x5e78, 0xffffffff, 0x001000f0,
772 0xd02c, 0xffffffff, 0x08421000,
773 0xa008, 0xffffffff, 0x00010000,
774 0x8d00, 0xffffffff, 0x100e4848,
775 0x8d04, 0xffffffff, 0x00164745,
776 0x8c00, 0xffffffff, 0xe4000003,
777 0x8cf0, 0x1fffffff, 0x08e00620,
778 0x28350, 0xffffffff, 0x00000000,
779 0x9508, 0xffffffff, 0x00000002
780 };
781
782 static const u32 sumo_golden_registers[] =
783 {
784 0x900c, 0x00ffffff, 0x0017071f,
785 0x8c18, 0xffffffff, 0x10101060,
786 0x8c1c, 0xffffffff, 0x00001010,
787 0x8c30, 0x0000000f, 0x00000005,
788 0x9688, 0x0000000f, 0x00000007
789 };
790
791 static const u32 wrestler_golden_registers[] =
792 {
793 0x5eb4, 0xffffffff, 0x00000002,
794 0x5c4, 0xffffffff, 0x00000001,
795 0x7030, 0xffffffff, 0x00000011,
796 0x7c30, 0xffffffff, 0x00000011,
797 0x6104, 0x01000300, 0x00000000,
798 0x5bc0, 0x00300000, 0x00000000,
799 0x918c, 0xffffffff, 0x00010006,
800 0x91a8, 0xffffffff, 0x00010006,
801 0x9150, 0xffffffff, 0x6e944040,
802 0x917c, 0xffffffff, 0x00030002,
803 0x9198, 0xffffffff, 0x00030002,
804 0x915c, 0xffffffff, 0x00010000,
805 0x3f90, 0xffff0000, 0xff000000,
806 0x9178, 0xffffffff, 0x00070000,
807 0x9194, 0xffffffff, 0x00070000,
808 0x9148, 0xffff0000, 0xff000000,
809 0x9190, 0xffffffff, 0x00090008,
810 0x91ac, 0xffffffff, 0x00090008,
811 0x3f94, 0xffff0000, 0xff000000,
812 0x914c, 0xffff0000, 0xff000000,
813 0x929c, 0xffffffff, 0x00000001,
814 0x8a18, 0xffffffff, 0x00000100,
815 0x8b28, 0xffffffff, 0x00000100,
816 0x9144, 0xffffffff, 0x00000100,
817 0x9b7c, 0xffffffff, 0x00000000,
818 0x8030, 0xffffffff, 0x0000100a,
819 0x8a14, 0xffffffff, 0x00000001,
820 0x8b24, 0xffffffff, 0x00ff0fff,
821 0x8b10, 0xffffffff, 0x00000000,
822 0x28a4c, 0x06000000, 0x06000000,
823 0x4d8, 0xffffffff, 0x00000100,
824 0x913c, 0xffff000f, 0x0100000a,
825 0x960c, 0xffffffff, 0x54763210,
826 0x88c4, 0xffffffff, 0x000000c2,
827 0x88d4, 0xffffffff, 0x00000010,
828 0x8974, 0xffffffff, 0x00000000,
829 0xc78, 0x00000080, 0x00000080,
830 0x5e78, 0xffffffff, 0x001000f0,
831 0xd02c, 0xffffffff, 0x08421000,
832 0xa008, 0xffffffff, 0x00010000,
833 0x8d00, 0xffffffff, 0x100e4848,
834 0x8d04, 0xffffffff, 0x00164745,
835 0x8c00, 0xffffffff, 0xe4000003,
836 0x8cf0, 0x1fffffff, 0x08e00410,
837 0x28350, 0xffffffff, 0x00000000,
838 0x9508, 0xffffffff, 0x00000002,
839 0x900c, 0xffffffff, 0x0017071f,
840 0x8c18, 0xffffffff, 0x10101060,
841 0x8c1c, 0xffffffff, 0x00001010
842 };
843
844 static const u32 barts_golden_registers[] =
845 {
846 0x5eb4, 0xffffffff, 0x00000002,
847 0x5e78, 0x8f311ff1, 0x001000f0,
848 0x3f90, 0xffff0000, 0xff000000,
849 0x9148, 0xffff0000, 0xff000000,
850 0x3f94, 0xffff0000, 0xff000000,
851 0x914c, 0xffff0000, 0xff000000,
852 0xc78, 0x00000080, 0x00000080,
853 0xbd4, 0x70073777, 0x00010001,
854 0xd02c, 0xbfffff1f, 0x08421000,
855 0xd0b8, 0x03773777, 0x02011003,
856 0x5bc0, 0x00200000, 0x50100000,
857 0x98f8, 0x33773777, 0x02011003,
858 0x98fc, 0xffffffff, 0x76543210,
859 0x7030, 0x31000311, 0x00000011,
860 0x2f48, 0x00000007, 0x02011003,
861 0x6b28, 0x00000010, 0x00000012,
862 0x7728, 0x00000010, 0x00000012,
863 0x10328, 0x00000010, 0x00000012,
864 0x10f28, 0x00000010, 0x00000012,
865 0x11b28, 0x00000010, 0x00000012,
866 0x12728, 0x00000010, 0x00000012,
867 0x240c, 0x000007ff, 0x00000380,
868 0x8a14, 0xf000001f, 0x00000007,
869 0x8b24, 0x3fff3fff, 0x00ff0fff,
870 0x8b10, 0x0000ff0f, 0x00000000,
871 0x28a4c, 0x07ffffff, 0x06000000,
872 0x10c, 0x00000001, 0x00010003,
873 0xa02c, 0xffffffff, 0x0000009b,
874 0x913c, 0x0000000f, 0x0100000a,
875 0x8d00, 0xffff7f7f, 0x100e4848,
876 0x8d04, 0x00ffffff, 0x00164745,
877 0x8c00, 0xfffc0003, 0xe4000003,
878 0x8c04, 0xf8ff00ff, 0x40600060,
879 0x8c08, 0x00ff00ff, 0x001c001c,
880 0x8cf0, 0x1fff1fff, 0x08e00620,
881 0x8c20, 0x0fff0fff, 0x00800080,
882 0x8c24, 0x0fff0fff, 0x00800080,
883 0x8c18, 0xffffffff, 0x20202078,
884 0x8c1c, 0x0000ffff, 0x00001010,
885 0x28350, 0x00000f01, 0x00000000,
886 0x9508, 0x3700001f, 0x00000002,
887 0x960c, 0xffffffff, 0x54763210,
888 0x88c4, 0x001f3ae3, 0x000000c2,
889 0x88d4, 0x0000001f, 0x00000010,
890 0x8974, 0xffffffff, 0x00000000
891 };
892
893 static const u32 turks_golden_registers[] =
894 {
895 0x5eb4, 0xffffffff, 0x00000002,
896 0x5e78, 0x8f311ff1, 0x001000f0,
897 0x8c8, 0x00003000, 0x00001070,
898 0x8cc, 0x000fffff, 0x00040035,
899 0x3f90, 0xffff0000, 0xfff00000,
900 0x9148, 0xffff0000, 0xfff00000,
901 0x3f94, 0xffff0000, 0xfff00000,
902 0x914c, 0xffff0000, 0xfff00000,
903 0xc78, 0x00000080, 0x00000080,
904 0xbd4, 0x00073007, 0x00010002,
905 0xd02c, 0xbfffff1f, 0x08421000,
906 0xd0b8, 0x03773777, 0x02010002,
907 0x5bc0, 0x00200000, 0x50100000,
908 0x98f8, 0x33773777, 0x00010002,
909 0x98fc, 0xffffffff, 0x33221100,
910 0x7030, 0x31000311, 0x00000011,
911 0x2f48, 0x33773777, 0x00010002,
912 0x6b28, 0x00000010, 0x00000012,
913 0x7728, 0x00000010, 0x00000012,
914 0x10328, 0x00000010, 0x00000012,
915 0x10f28, 0x00000010, 0x00000012,
916 0x11b28, 0x00000010, 0x00000012,
917 0x12728, 0x00000010, 0x00000012,
918 0x240c, 0x000007ff, 0x00000380,
919 0x8a14, 0xf000001f, 0x00000007,
920 0x8b24, 0x3fff3fff, 0x00ff0fff,
921 0x8b10, 0x0000ff0f, 0x00000000,
922 0x28a4c, 0x07ffffff, 0x06000000,
923 0x10c, 0x00000001, 0x00010003,
924 0xa02c, 0xffffffff, 0x0000009b,
925 0x913c, 0x0000000f, 0x0100000a,
926 0x8d00, 0xffff7f7f, 0x100e4848,
927 0x8d04, 0x00ffffff, 0x00164745,
928 0x8c00, 0xfffc0003, 0xe4000003,
929 0x8c04, 0xf8ff00ff, 0x40600060,
930 0x8c08, 0x00ff00ff, 0x001c001c,
931 0x8cf0, 0x1fff1fff, 0x08e00410,
932 0x8c20, 0x0fff0fff, 0x00800080,
933 0x8c24, 0x0fff0fff, 0x00800080,
934 0x8c18, 0xffffffff, 0x20202078,
935 0x8c1c, 0x0000ffff, 0x00001010,
936 0x28350, 0x00000f01, 0x00000000,
937 0x9508, 0x3700001f, 0x00000002,
938 0x960c, 0xffffffff, 0x54763210,
939 0x88c4, 0x001f3ae3, 0x000000c2,
940 0x88d4, 0x0000001f, 0x00000010,
941 0x8974, 0xffffffff, 0x00000000
942 };
943
944 static const u32 caicos_golden_registers[] =
945 {
946 0x5eb4, 0xffffffff, 0x00000002,
947 0x5e78, 0x8f311ff1, 0x001000f0,
948 0x8c8, 0x00003420, 0x00001450,
949 0x8cc, 0x000fffff, 0x00040035,
950 0x3f90, 0xffff0000, 0xfffc0000,
951 0x9148, 0xffff0000, 0xfffc0000,
952 0x3f94, 0xffff0000, 0xfffc0000,
953 0x914c, 0xffff0000, 0xfffc0000,
954 0xc78, 0x00000080, 0x00000080,
955 0xbd4, 0x00073007, 0x00010001,
956 0xd02c, 0xbfffff1f, 0x08421000,
957 0xd0b8, 0x03773777, 0x02010001,
958 0x5bc0, 0x00200000, 0x50100000,
959 0x98f8, 0x33773777, 0x02010001,
960 0x98fc, 0xffffffff, 0x33221100,
961 0x7030, 0x31000311, 0x00000011,
962 0x2f48, 0x33773777, 0x02010001,
963 0x6b28, 0x00000010, 0x00000012,
964 0x7728, 0x00000010, 0x00000012,
965 0x10328, 0x00000010, 0x00000012,
966 0x10f28, 0x00000010, 0x00000012,
967 0x11b28, 0x00000010, 0x00000012,
968 0x12728, 0x00000010, 0x00000012,
969 0x240c, 0x000007ff, 0x00000380,
970 0x8a14, 0xf000001f, 0x00000001,
971 0x8b24, 0x3fff3fff, 0x00ff0fff,
972 0x8b10, 0x0000ff0f, 0x00000000,
973 0x28a4c, 0x07ffffff, 0x06000000,
974 0x10c, 0x00000001, 0x00010003,
975 0xa02c, 0xffffffff, 0x0000009b,
976 0x913c, 0x0000000f, 0x0100000a,
977 0x8d00, 0xffff7f7f, 0x100e4848,
978 0x8d04, 0x00ffffff, 0x00164745,
979 0x8c00, 0xfffc0003, 0xe4000003,
980 0x8c04, 0xf8ff00ff, 0x40600060,
981 0x8c08, 0x00ff00ff, 0x001c001c,
982 0x8cf0, 0x1fff1fff, 0x08e00410,
983 0x8c20, 0x0fff0fff, 0x00800080,
984 0x8c24, 0x0fff0fff, 0x00800080,
985 0x8c18, 0xffffffff, 0x20202078,
986 0x8c1c, 0x0000ffff, 0x00001010,
987 0x28350, 0x00000f01, 0x00000000,
988 0x9508, 0x3700001f, 0x00000002,
989 0x960c, 0xffffffff, 0x54763210,
990 0x88c4, 0x001f3ae3, 0x000000c2,
991 0x88d4, 0x0000001f, 0x00000010,
992 0x8974, 0xffffffff, 0x00000000
993 };
994
evergreen_init_golden_registers(struct radeon_device * rdev)995 static void evergreen_init_golden_registers(struct radeon_device *rdev)
996 {
997 switch (rdev->family) {
998 case CHIP_CYPRESS:
999 case CHIP_HEMLOCK:
1000 radeon_program_register_sequence(rdev,
1001 evergreen_golden_registers,
1002 (const u32)ARRAY_SIZE(evergreen_golden_registers));
1003 radeon_program_register_sequence(rdev,
1004 evergreen_golden_registers2,
1005 (const u32)ARRAY_SIZE(evergreen_golden_registers2));
1006 radeon_program_register_sequence(rdev,
1007 cypress_mgcg_init,
1008 (const u32)ARRAY_SIZE(cypress_mgcg_init));
1009 break;
1010 case CHIP_JUNIPER:
1011 radeon_program_register_sequence(rdev,
1012 evergreen_golden_registers,
1013 (const u32)ARRAY_SIZE(evergreen_golden_registers));
1014 radeon_program_register_sequence(rdev,
1015 evergreen_golden_registers2,
1016 (const u32)ARRAY_SIZE(evergreen_golden_registers2));
1017 radeon_program_register_sequence(rdev,
1018 juniper_mgcg_init,
1019 (const u32)ARRAY_SIZE(juniper_mgcg_init));
1020 break;
1021 case CHIP_REDWOOD:
1022 radeon_program_register_sequence(rdev,
1023 evergreen_golden_registers,
1024 (const u32)ARRAY_SIZE(evergreen_golden_registers));
1025 radeon_program_register_sequence(rdev,
1026 evergreen_golden_registers2,
1027 (const u32)ARRAY_SIZE(evergreen_golden_registers2));
1028 radeon_program_register_sequence(rdev,
1029 redwood_mgcg_init,
1030 (const u32)ARRAY_SIZE(redwood_mgcg_init));
1031 break;
1032 case CHIP_CEDAR:
1033 radeon_program_register_sequence(rdev,
1034 cedar_golden_registers,
1035 (const u32)ARRAY_SIZE(cedar_golden_registers));
1036 radeon_program_register_sequence(rdev,
1037 evergreen_golden_registers2,
1038 (const u32)ARRAY_SIZE(evergreen_golden_registers2));
1039 radeon_program_register_sequence(rdev,
1040 cedar_mgcg_init,
1041 (const u32)ARRAY_SIZE(cedar_mgcg_init));
1042 break;
1043 case CHIP_PALM:
1044 radeon_program_register_sequence(rdev,
1045 wrestler_golden_registers,
1046 (const u32)ARRAY_SIZE(wrestler_golden_registers));
1047 break;
1048 case CHIP_SUMO:
1049 radeon_program_register_sequence(rdev,
1050 supersumo_golden_registers,
1051 (const u32)ARRAY_SIZE(supersumo_golden_registers));
1052 break;
1053 case CHIP_SUMO2:
1054 radeon_program_register_sequence(rdev,
1055 supersumo_golden_registers,
1056 (const u32)ARRAY_SIZE(supersumo_golden_registers));
1057 radeon_program_register_sequence(rdev,
1058 sumo_golden_registers,
1059 (const u32)ARRAY_SIZE(sumo_golden_registers));
1060 break;
1061 case CHIP_BARTS:
1062 radeon_program_register_sequence(rdev,
1063 barts_golden_registers,
1064 (const u32)ARRAY_SIZE(barts_golden_registers));
1065 break;
1066 case CHIP_TURKS:
1067 radeon_program_register_sequence(rdev,
1068 turks_golden_registers,
1069 (const u32)ARRAY_SIZE(turks_golden_registers));
1070 break;
1071 case CHIP_CAICOS:
1072 radeon_program_register_sequence(rdev,
1073 caicos_golden_registers,
1074 (const u32)ARRAY_SIZE(caicos_golden_registers));
1075 break;
1076 default:
1077 break;
1078 }
1079 }
1080
1081 /**
1082 * evergreen_get_allowed_info_register - fetch the register for the info ioctl
1083 *
1084 * @rdev: radeon_device pointer
1085 * @reg: register offset in bytes
1086 * @val: register value
1087 *
1088 * Returns 0 for success or -EINVAL for an invalid register
1089 *
1090 */
evergreen_get_allowed_info_register(struct radeon_device * rdev,u32 reg,u32 * val)1091 int evergreen_get_allowed_info_register(struct radeon_device *rdev,
1092 u32 reg, u32 *val)
1093 {
1094 switch (reg) {
1095 case GRBM_STATUS:
1096 case GRBM_STATUS_SE0:
1097 case GRBM_STATUS_SE1:
1098 case SRBM_STATUS:
1099 case SRBM_STATUS2:
1100 case DMA_STATUS_REG:
1101 case UVD_STATUS:
1102 *val = RREG32(reg);
1103 return 0;
1104 default:
1105 return -EINVAL;
1106 }
1107 }
1108
evergreen_tiling_fields(unsigned tiling_flags,unsigned * bankw,unsigned * bankh,unsigned * mtaspect,unsigned * tile_split)1109 void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw,
1110 unsigned *bankh, unsigned *mtaspect,
1111 unsigned *tile_split)
1112 {
1113 *bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK;
1114 *bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK;
1115 *mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK;
1116 *tile_split = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK;
1117 switch (*bankw) {
1118 default:
1119 case 1: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_1; break;
1120 case 2: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_2; break;
1121 case 4: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_4; break;
1122 case 8: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_8; break;
1123 }
1124 switch (*bankh) {
1125 default:
1126 case 1: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_1; break;
1127 case 2: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_2; break;
1128 case 4: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_4; break;
1129 case 8: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_8; break;
1130 }
1131 switch (*mtaspect) {
1132 default:
1133 case 1: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_1; break;
1134 case 2: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_2; break;
1135 case 4: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_4; break;
1136 case 8: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_8; break;
1137 }
1138 }
1139
sumo_set_uvd_clock(struct radeon_device * rdev,u32 clock,u32 cntl_reg,u32 status_reg)1140 static int sumo_set_uvd_clock(struct radeon_device *rdev, u32 clock,
1141 u32 cntl_reg, u32 status_reg)
1142 {
1143 int r, i;
1144 struct atom_clock_dividers dividers;
1145
1146 r = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
1147 clock, false, ÷rs);
1148 if (r)
1149 return r;
1150
1151 WREG32_P(cntl_reg, dividers.post_div, ~(DCLK_DIR_CNTL_EN|DCLK_DIVIDER_MASK));
1152
1153 for (i = 0; i < 100; i++) {
1154 if (RREG32(status_reg) & DCLK_STATUS)
1155 break;
1156 mdelay(10);
1157 }
1158 if (i == 100)
1159 return -ETIMEDOUT;
1160
1161 return 0;
1162 }
1163
sumo_set_uvd_clocks(struct radeon_device * rdev,u32 vclk,u32 dclk)1164 int sumo_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
1165 {
1166 int r = 0;
1167 u32 cg_scratch = RREG32(CG_SCRATCH1);
1168
1169 r = sumo_set_uvd_clock(rdev, vclk, CG_VCLK_CNTL, CG_VCLK_STATUS);
1170 if (r)
1171 goto done;
1172 cg_scratch &= 0xffff0000;
1173 cg_scratch |= vclk / 100; /* Mhz */
1174
1175 r = sumo_set_uvd_clock(rdev, dclk, CG_DCLK_CNTL, CG_DCLK_STATUS);
1176 if (r)
1177 goto done;
1178 cg_scratch &= 0x0000ffff;
1179 cg_scratch |= (dclk / 100) << 16; /* Mhz */
1180
1181 done:
1182 WREG32(CG_SCRATCH1, cg_scratch);
1183
1184 return r;
1185 }
1186
evergreen_set_uvd_clocks(struct radeon_device * rdev,u32 vclk,u32 dclk)1187 int evergreen_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
1188 {
1189 /* start off with something large */
1190 unsigned fb_div = 0, vclk_div = 0, dclk_div = 0;
1191 int r;
1192
1193 /* bypass vclk and dclk with bclk */
1194 WREG32_P(CG_UPLL_FUNC_CNTL_2,
1195 VCLK_SRC_SEL(1) | DCLK_SRC_SEL(1),
1196 ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
1197
1198 /* put PLL in bypass mode */
1199 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK);
1200
1201 if (!vclk || !dclk) {
1202 /* keep the Bypass mode, put PLL to sleep */
1203 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
1204 return 0;
1205 }
1206
1207 r = radeon_uvd_calc_upll_dividers(rdev, vclk, dclk, 125000, 250000,
1208 16384, 0x03FFFFFF, 0, 128, 5,
1209 &fb_div, &vclk_div, &dclk_div);
1210 if (r)
1211 return r;
1212
1213 /* set VCO_MODE to 1 */
1214 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_VCO_MODE_MASK, ~UPLL_VCO_MODE_MASK);
1215
1216 /* toggle UPLL_SLEEP to 1 then back to 0 */
1217 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
1218 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_SLEEP_MASK);
1219
1220 /* deassert UPLL_RESET */
1221 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
1222
1223 mdelay(1);
1224
1225 r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
1226 if (r)
1227 return r;
1228
1229 /* assert UPLL_RESET again */
1230 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_RESET_MASK, ~UPLL_RESET_MASK);
1231
1232 /* disable spread spectrum. */
1233 WREG32_P(CG_UPLL_SPREAD_SPECTRUM, 0, ~SSEN_MASK);
1234
1235 /* set feedback divider */
1236 WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(fb_div), ~UPLL_FB_DIV_MASK);
1237
1238 /* set ref divider to 0 */
1239 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_REF_DIV_MASK);
1240
1241 if (fb_div < 307200)
1242 WREG32_P(CG_UPLL_FUNC_CNTL_4, 0, ~UPLL_SPARE_ISPARE9);
1243 else
1244 WREG32_P(CG_UPLL_FUNC_CNTL_4, UPLL_SPARE_ISPARE9, ~UPLL_SPARE_ISPARE9);
1245
1246 /* set PDIV_A and PDIV_B */
1247 WREG32_P(CG_UPLL_FUNC_CNTL_2,
1248 UPLL_PDIV_A(vclk_div) | UPLL_PDIV_B(dclk_div),
1249 ~(UPLL_PDIV_A_MASK | UPLL_PDIV_B_MASK));
1250
1251 /* give the PLL some time to settle */
1252 mdelay(15);
1253
1254 /* deassert PLL_RESET */
1255 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
1256
1257 mdelay(15);
1258
1259 /* switch from bypass mode to normal mode */
1260 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_BYPASS_EN_MASK);
1261
1262 r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
1263 if (r)
1264 return r;
1265
1266 /* switch VCLK and DCLK selection */
1267 WREG32_P(CG_UPLL_FUNC_CNTL_2,
1268 VCLK_SRC_SEL(2) | DCLK_SRC_SEL(2),
1269 ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
1270
1271 mdelay(100);
1272
1273 return 0;
1274 }
1275
evergreen_fix_pci_max_read_req_size(struct radeon_device * rdev)1276 void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
1277 {
1278 int readrq;
1279 u16 v;
1280
1281 readrq = pcie_get_readrq(rdev->pdev);
1282 v = ffs(readrq) - 8;
1283 /* if bios or OS sets MAX_READ_REQUEST_SIZE to an invalid value, fix it
1284 * to avoid hangs or perfomance issues
1285 */
1286 if ((v == 0) || (v == 6) || (v == 7))
1287 pcie_set_readrq(rdev->pdev, 512);
1288 }
1289
dce4_program_fmt(struct drm_encoder * encoder)1290 void dce4_program_fmt(struct drm_encoder *encoder)
1291 {
1292 struct drm_device *dev = encoder->dev;
1293 struct radeon_device *rdev = dev->dev_private;
1294 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1295 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
1296 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
1297 int bpc = 0;
1298 u32 tmp = 0;
1299 enum radeon_connector_dither dither = RADEON_FMT_DITHER_DISABLE;
1300
1301 if (connector) {
1302 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
1303 bpc = radeon_get_monitor_bpc(connector);
1304 dither = radeon_connector->dither;
1305 }
1306
1307 /* LVDS/eDP FMT is set up by atom */
1308 if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
1309 return;
1310
1311 /* not needed for analog */
1312 if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
1313 (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
1314 return;
1315
1316 if (bpc == 0)
1317 return;
1318
1319 switch (bpc) {
1320 case 6:
1321 if (dither == RADEON_FMT_DITHER_ENABLE)
1322 /* XXX sort out optimal dither settings */
1323 tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
1324 FMT_SPATIAL_DITHER_EN);
1325 else
1326 tmp |= FMT_TRUNCATE_EN;
1327 break;
1328 case 8:
1329 if (dither == RADEON_FMT_DITHER_ENABLE)
1330 /* XXX sort out optimal dither settings */
1331 tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
1332 FMT_RGB_RANDOM_ENABLE |
1333 FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH);
1334 else
1335 tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH);
1336 break;
1337 case 10:
1338 default:
1339 /* not needed */
1340 break;
1341 }
1342
1343 WREG32(FMT_BIT_DEPTH_CONTROL + radeon_crtc->crtc_offset, tmp);
1344 }
1345
dce4_is_in_vblank(struct radeon_device * rdev,int crtc)1346 static bool dce4_is_in_vblank(struct radeon_device *rdev, int crtc)
1347 {
1348 if (RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK)
1349 return true;
1350 else
1351 return false;
1352 }
1353
dce4_is_counter_moving(struct radeon_device * rdev,int crtc)1354 static bool dce4_is_counter_moving(struct radeon_device *rdev, int crtc)
1355 {
1356 u32 pos1, pos2;
1357
1358 pos1 = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]);
1359 pos2 = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]);
1360
1361 if (pos1 != pos2)
1362 return true;
1363 else
1364 return false;
1365 }
1366
1367 /**
1368 * dce4_wait_for_vblank - vblank wait asic callback.
1369 *
1370 * @rdev: radeon_device pointer
1371 * @crtc: crtc to wait for vblank on
1372 *
1373 * Wait for vblank on the requested crtc (evergreen+).
1374 */
dce4_wait_for_vblank(struct radeon_device * rdev,int crtc)1375 void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc)
1376 {
1377 unsigned i = 0;
1378
1379 if (crtc >= rdev->num_crtc)
1380 return;
1381
1382 if (!(RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[crtc]) & EVERGREEN_CRTC_MASTER_EN))
1383 return;
1384
1385 /* depending on when we hit vblank, we may be close to active; if so,
1386 * wait for another frame.
1387 */
1388 while (dce4_is_in_vblank(rdev, crtc)) {
1389 if (i++ % 100 == 0) {
1390 if (!dce4_is_counter_moving(rdev, crtc))
1391 break;
1392 }
1393 }
1394
1395 while (!dce4_is_in_vblank(rdev, crtc)) {
1396 if (i++ % 100 == 0) {
1397 if (!dce4_is_counter_moving(rdev, crtc))
1398 break;
1399 }
1400 }
1401 }
1402
1403 /**
1404 * evergreen_page_flip - pageflip callback.
1405 *
1406 * @rdev: radeon_device pointer
1407 * @crtc_id: crtc to cleanup pageflip on
1408 * @crtc_base: new address of the crtc (GPU MC address)
1409 * @async: asynchronous flip
1410 *
1411 * Triggers the actual pageflip by updating the primary
1412 * surface base address (evergreen+).
1413 */
evergreen_page_flip(struct radeon_device * rdev,int crtc_id,u64 crtc_base,bool async)1414 void evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base,
1415 bool async)
1416 {
1417 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
1418 struct drm_framebuffer *fb = radeon_crtc->base.primary->fb;
1419
1420 /* flip at hsync for async, default is vsync */
1421 WREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset,
1422 async ? EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN : 0);
1423 /* update pitch */
1424 WREG32(EVERGREEN_GRPH_PITCH + radeon_crtc->crtc_offset,
1425 fb->pitches[0] / fb->format->cpp[0]);
1426 /* update the scanout addresses */
1427 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
1428 upper_32_bits(crtc_base));
1429 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
1430 (u32)crtc_base);
1431 /* post the write */
1432 RREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset);
1433 }
1434
1435 /**
1436 * evergreen_page_flip_pending - check if page flip is still pending
1437 *
1438 * @rdev: radeon_device pointer
1439 * @crtc_id: crtc to check
1440 *
1441 * Returns the current update pending status.
1442 */
evergreen_page_flip_pending(struct radeon_device * rdev,int crtc_id)1443 bool evergreen_page_flip_pending(struct radeon_device *rdev, int crtc_id)
1444 {
1445 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
1446
1447 /* Return current update_pending status: */
1448 return !!(RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) &
1449 EVERGREEN_GRPH_SURFACE_UPDATE_PENDING);
1450 }
1451
1452 /* get temperature in millidegrees */
evergreen_get_temp(struct radeon_device * rdev)1453 int evergreen_get_temp(struct radeon_device *rdev)
1454 {
1455 u32 temp, toffset;
1456 int actual_temp = 0;
1457
1458 if (rdev->family == CHIP_JUNIPER) {
1459 toffset = (RREG32(CG_THERMAL_CTRL) & TOFFSET_MASK) >>
1460 TOFFSET_SHIFT;
1461 temp = (RREG32(CG_TS0_STATUS) & TS0_ADC_DOUT_MASK) >>
1462 TS0_ADC_DOUT_SHIFT;
1463
1464 if (toffset & 0x100)
1465 actual_temp = temp / 2 - (0x200 - toffset);
1466 else
1467 actual_temp = temp / 2 + toffset;
1468
1469 actual_temp = actual_temp * 1000;
1470
1471 } else {
1472 temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >>
1473 ASIC_T_SHIFT;
1474
1475 if (temp & 0x400)
1476 actual_temp = -256;
1477 else if (temp & 0x200)
1478 actual_temp = 255;
1479 else if (temp & 0x100) {
1480 actual_temp = temp & 0x1ff;
1481 actual_temp |= ~0x1ff;
1482 } else
1483 actual_temp = temp & 0xff;
1484
1485 actual_temp = (actual_temp * 1000) / 2;
1486 }
1487
1488 return actual_temp;
1489 }
1490
sumo_get_temp(struct radeon_device * rdev)1491 int sumo_get_temp(struct radeon_device *rdev)
1492 {
1493 u32 temp = RREG32(CG_THERMAL_STATUS) & 0xff;
1494 int actual_temp = temp - 49;
1495
1496 return actual_temp * 1000;
1497 }
1498
1499 /**
1500 * sumo_pm_init_profile - Initialize power profiles callback.
1501 *
1502 * @rdev: radeon_device pointer
1503 *
1504 * Initialize the power states used in profile mode
1505 * (sumo, trinity, SI).
1506 * Used for profile mode only.
1507 */
sumo_pm_init_profile(struct radeon_device * rdev)1508 void sumo_pm_init_profile(struct radeon_device *rdev)
1509 {
1510 int idx;
1511
1512 /* default */
1513 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
1514 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
1515 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
1516 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
1517
1518 /* low,mid sh/mh */
1519 if (rdev->flags & RADEON_IS_MOBILITY)
1520 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
1521 else
1522 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
1523
1524 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx;
1525 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx;
1526 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
1527 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
1528
1529 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx;
1530 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx;
1531 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
1532 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
1533
1534 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx;
1535 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx;
1536 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
1537 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
1538
1539 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx;
1540 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx;
1541 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
1542 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
1543
1544 /* high sh/mh */
1545 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
1546 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx;
1547 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx;
1548 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
1549 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx =
1550 rdev->pm.power_state[idx].num_clock_modes - 1;
1551
1552 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx;
1553 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx;
1554 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
1555 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx =
1556 rdev->pm.power_state[idx].num_clock_modes - 1;
1557 }
1558
1559 /**
1560 * btc_pm_init_profile - Initialize power profiles callback.
1561 *
1562 * @rdev: radeon_device pointer
1563 *
1564 * Initialize the power states used in profile mode
1565 * (BTC, cayman).
1566 * Used for profile mode only.
1567 */
btc_pm_init_profile(struct radeon_device * rdev)1568 void btc_pm_init_profile(struct radeon_device *rdev)
1569 {
1570 int idx;
1571
1572 /* default */
1573 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
1574 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
1575 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
1576 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
1577 /* starting with BTC, there is one state that is used for both
1578 * MH and SH. Difference is that we always use the high clock index for
1579 * mclk.
1580 */
1581 if (rdev->flags & RADEON_IS_MOBILITY)
1582 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
1583 else
1584 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
1585 /* low sh */
1586 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx;
1587 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx;
1588 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
1589 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
1590 /* mid sh */
1591 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx;
1592 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx;
1593 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
1594 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
1595 /* high sh */
1596 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx;
1597 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx;
1598 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
1599 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
1600 /* low mh */
1601 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx;
1602 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx;
1603 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
1604 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
1605 /* mid mh */
1606 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx;
1607 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx;
1608 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
1609 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
1610 /* high mh */
1611 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx;
1612 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx;
1613 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
1614 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
1615 }
1616
1617 /**
1618 * evergreen_pm_misc - set additional pm hw parameters callback.
1619 *
1620 * @rdev: radeon_device pointer
1621 *
1622 * Set non-clock parameters associated with a power state
1623 * (voltage, etc.) (evergreen+).
1624 */
evergreen_pm_misc(struct radeon_device * rdev)1625 void evergreen_pm_misc(struct radeon_device *rdev)
1626 {
1627 int req_ps_idx = rdev->pm.requested_power_state_index;
1628 int req_cm_idx = rdev->pm.requested_clock_mode_index;
1629 struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
1630 struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
1631
1632 if (voltage->type == VOLTAGE_SW) {
1633 /* 0xff0x are flags rather then an actual voltage */
1634 if ((voltage->voltage & 0xff00) == 0xff00)
1635 return;
1636 if (voltage->voltage && (voltage->voltage != rdev->pm.current_vddc)) {
1637 radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
1638 rdev->pm.current_vddc = voltage->voltage;
1639 DRM_DEBUG("Setting: vddc: %d\n", voltage->voltage);
1640 }
1641
1642 /* starting with BTC, there is one state that is used for both
1643 * MH and SH. Difference is that we always use the high clock index for
1644 * mclk and vddci.
1645 */
1646 if ((rdev->pm.pm_method == PM_METHOD_PROFILE) &&
1647 (rdev->family >= CHIP_BARTS) &&
1648 rdev->pm.active_crtc_count &&
1649 ((rdev->pm.profile_index == PM_PROFILE_MID_MH_IDX) ||
1650 (rdev->pm.profile_index == PM_PROFILE_LOW_MH_IDX)))
1651 voltage = &rdev->pm.power_state[req_ps_idx].
1652 clock_info[rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx].voltage;
1653
1654 /* 0xff0x are flags rather then an actual voltage */
1655 if ((voltage->vddci & 0xff00) == 0xff00)
1656 return;
1657 if (voltage->vddci && (voltage->vddci != rdev->pm.current_vddci)) {
1658 radeon_atom_set_voltage(rdev, voltage->vddci, SET_VOLTAGE_TYPE_ASIC_VDDCI);
1659 rdev->pm.current_vddci = voltage->vddci;
1660 DRM_DEBUG("Setting: vddci: %d\n", voltage->vddci);
1661 }
1662 }
1663 }
1664
1665 /**
1666 * evergreen_pm_prepare - pre-power state change callback.
1667 *
1668 * @rdev: radeon_device pointer
1669 *
1670 * Prepare for a power state change (evergreen+).
1671 */
evergreen_pm_prepare(struct radeon_device * rdev)1672 void evergreen_pm_prepare(struct radeon_device *rdev)
1673 {
1674 struct drm_device *ddev = rdev->ddev;
1675 struct drm_crtc *crtc;
1676 struct radeon_crtc *radeon_crtc;
1677 u32 tmp;
1678
1679 /* disable any active CRTCs */
1680 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
1681 radeon_crtc = to_radeon_crtc(crtc);
1682 if (radeon_crtc->enabled) {
1683 tmp = RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset);
1684 tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
1685 WREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
1686 }
1687 }
1688 }
1689
1690 /**
1691 * evergreen_pm_finish - post-power state change callback.
1692 *
1693 * @rdev: radeon_device pointer
1694 *
1695 * Clean up after a power state change (evergreen+).
1696 */
evergreen_pm_finish(struct radeon_device * rdev)1697 void evergreen_pm_finish(struct radeon_device *rdev)
1698 {
1699 struct drm_device *ddev = rdev->ddev;
1700 struct drm_crtc *crtc;
1701 struct radeon_crtc *radeon_crtc;
1702 u32 tmp;
1703
1704 /* enable any active CRTCs */
1705 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
1706 radeon_crtc = to_radeon_crtc(crtc);
1707 if (radeon_crtc->enabled) {
1708 tmp = RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset);
1709 tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
1710 WREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
1711 }
1712 }
1713 }
1714
1715 /**
1716 * evergreen_hpd_sense - hpd sense callback.
1717 *
1718 * @rdev: radeon_device pointer
1719 * @hpd: hpd (hotplug detect) pin
1720 *
1721 * Checks if a digital monitor is connected (evergreen+).
1722 * Returns true if connected, false if not connected.
1723 */
evergreen_hpd_sense(struct radeon_device * rdev,enum radeon_hpd_id hpd)1724 bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
1725 {
1726 if (hpd == RADEON_HPD_NONE)
1727 return false;
1728
1729 return !!(RREG32(DC_HPDx_INT_STATUS_REG(hpd)) & DC_HPDx_SENSE);
1730 }
1731
1732 /**
1733 * evergreen_hpd_set_polarity - hpd set polarity callback.
1734 *
1735 * @rdev: radeon_device pointer
1736 * @hpd: hpd (hotplug detect) pin
1737 *
1738 * Set the polarity of the hpd pin (evergreen+).
1739 */
evergreen_hpd_set_polarity(struct radeon_device * rdev,enum radeon_hpd_id hpd)1740 void evergreen_hpd_set_polarity(struct radeon_device *rdev,
1741 enum radeon_hpd_id hpd)
1742 {
1743 bool connected = evergreen_hpd_sense(rdev, hpd);
1744
1745 if (hpd == RADEON_HPD_NONE)
1746 return;
1747
1748 if (connected)
1749 WREG32_AND(DC_HPDx_INT_CONTROL(hpd), ~DC_HPDx_INT_POLARITY);
1750 else
1751 WREG32_OR(DC_HPDx_INT_CONTROL(hpd), DC_HPDx_INT_POLARITY);
1752 }
1753
1754 /**
1755 * evergreen_hpd_init - hpd setup callback.
1756 *
1757 * @rdev: radeon_device pointer
1758 *
1759 * Setup the hpd pins used by the card (evergreen+).
1760 * Enable the pin, set the polarity, and enable the hpd interrupts.
1761 */
evergreen_hpd_init(struct radeon_device * rdev)1762 void evergreen_hpd_init(struct radeon_device *rdev)
1763 {
1764 struct drm_device *dev = rdev->ddev;
1765 struct drm_connector *connector;
1766 unsigned enabled = 0;
1767 u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) |
1768 DC_HPDx_RX_INT_TIMER(0xfa) | DC_HPDx_EN;
1769
1770 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1771 enum radeon_hpd_id hpd =
1772 to_radeon_connector(connector)->hpd.hpd;
1773
1774 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
1775 connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
1776 /* don't try to enable hpd on eDP or LVDS avoid breaking the
1777 * aux dp channel on imac and help (but not completely fix)
1778 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
1779 * also avoid interrupt storms during dpms.
1780 */
1781 continue;
1782 }
1783
1784 if (hpd == RADEON_HPD_NONE)
1785 continue;
1786
1787 WREG32(DC_HPDx_CONTROL(hpd), tmp);
1788 enabled |= 1 << hpd;
1789
1790 radeon_hpd_set_polarity(rdev, hpd);
1791 }
1792 radeon_irq_kms_enable_hpd(rdev, enabled);
1793 }
1794
1795 /**
1796 * evergreen_hpd_fini - hpd tear down callback.
1797 *
1798 * @rdev: radeon_device pointer
1799 *
1800 * Tear down the hpd pins used by the card (evergreen+).
1801 * Disable the hpd interrupts.
1802 */
evergreen_hpd_fini(struct radeon_device * rdev)1803 void evergreen_hpd_fini(struct radeon_device *rdev)
1804 {
1805 struct drm_device *dev = rdev->ddev;
1806 struct drm_connector *connector;
1807 unsigned disabled = 0;
1808
1809 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1810 enum radeon_hpd_id hpd =
1811 to_radeon_connector(connector)->hpd.hpd;
1812
1813 if (hpd == RADEON_HPD_NONE)
1814 continue;
1815
1816 WREG32(DC_HPDx_CONTROL(hpd), 0);
1817 disabled |= 1 << hpd;
1818 }
1819 radeon_irq_kms_disable_hpd(rdev, disabled);
1820 }
1821
1822 /* watermark setup */
1823
evergreen_line_buffer_adjust(struct radeon_device * rdev,struct radeon_crtc * radeon_crtc,struct drm_display_mode * mode,struct drm_display_mode * other_mode)1824 static u32 evergreen_line_buffer_adjust(struct radeon_device *rdev,
1825 struct radeon_crtc *radeon_crtc,
1826 struct drm_display_mode *mode,
1827 struct drm_display_mode *other_mode)
1828 {
1829 u32 tmp, buffer_alloc, i;
1830 u32 pipe_offset = radeon_crtc->crtc_id * 0x20;
1831 /*
1832 * Line Buffer Setup
1833 * There are 3 line buffers, each one shared by 2 display controllers.
1834 * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between
1835 * the display controllers. The paritioning is done via one of four
1836 * preset allocations specified in bits 2:0:
1837 * first display controller
1838 * 0 - first half of lb (3840 * 2)
1839 * 1 - first 3/4 of lb (5760 * 2)
1840 * 2 - whole lb (7680 * 2), other crtc must be disabled
1841 * 3 - first 1/4 of lb (1920 * 2)
1842 * second display controller
1843 * 4 - second half of lb (3840 * 2)
1844 * 5 - second 3/4 of lb (5760 * 2)
1845 * 6 - whole lb (7680 * 2), other crtc must be disabled
1846 * 7 - last 1/4 of lb (1920 * 2)
1847 */
1848 /* this can get tricky if we have two large displays on a paired group
1849 * of crtcs. Ideally for multiple large displays we'd assign them to
1850 * non-linked crtcs for maximum line buffer allocation.
1851 */
1852 if (radeon_crtc->base.enabled && mode) {
1853 if (other_mode) {
1854 tmp = 0; /* 1/2 */
1855 buffer_alloc = 1;
1856 } else {
1857 tmp = 2; /* whole */
1858 buffer_alloc = 2;
1859 }
1860 } else {
1861 tmp = 0;
1862 buffer_alloc = 0;
1863 }
1864
1865 /* second controller of the pair uses second half of the lb */
1866 if (radeon_crtc->crtc_id % 2)
1867 tmp += 4;
1868 WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset, tmp);
1869
1870 if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) {
1871 WREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
1872 DMIF_BUFFERS_ALLOCATED(buffer_alloc));
1873 for (i = 0; i < rdev->usec_timeout; i++) {
1874 if (RREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
1875 DMIF_BUFFERS_ALLOCATED_COMPLETED)
1876 break;
1877 udelay(1);
1878 }
1879 }
1880
1881 if (radeon_crtc->base.enabled && mode) {
1882 switch (tmp) {
1883 case 0:
1884 case 4:
1885 default:
1886 if (ASIC_IS_DCE5(rdev))
1887 return 4096 * 2;
1888 else
1889 return 3840 * 2;
1890 case 1:
1891 case 5:
1892 if (ASIC_IS_DCE5(rdev))
1893 return 6144 * 2;
1894 else
1895 return 5760 * 2;
1896 case 2:
1897 case 6:
1898 if (ASIC_IS_DCE5(rdev))
1899 return 8192 * 2;
1900 else
1901 return 7680 * 2;
1902 case 3:
1903 case 7:
1904 if (ASIC_IS_DCE5(rdev))
1905 return 2048 * 2;
1906 else
1907 return 1920 * 2;
1908 }
1909 }
1910
1911 /* controller not enabled, so no lb used */
1912 return 0;
1913 }
1914
evergreen_get_number_of_dram_channels(struct radeon_device * rdev)1915 u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev)
1916 {
1917 u32 tmp = RREG32(MC_SHARED_CHMAP);
1918
1919 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
1920 case 0:
1921 default:
1922 return 1;
1923 case 1:
1924 return 2;
1925 case 2:
1926 return 4;
1927 case 3:
1928 return 8;
1929 }
1930 }
1931
1932 struct evergreen_wm_params {
1933 u32 dram_channels; /* number of dram channels */
1934 u32 yclk; /* bandwidth per dram data pin in kHz */
1935 u32 sclk; /* engine clock in kHz */
1936 u32 disp_clk; /* display clock in kHz */
1937 u32 src_width; /* viewport width */
1938 u32 active_time; /* active display time in ns */
1939 u32 blank_time; /* blank time in ns */
1940 bool interlaced; /* mode is interlaced */
1941 fixed20_12 vsc; /* vertical scale ratio */
1942 u32 num_heads; /* number of active crtcs */
1943 u32 bytes_per_pixel; /* bytes per pixel display + overlay */
1944 u32 lb_size; /* line buffer allocated to pipe */
1945 u32 vtaps; /* vertical scaler taps */
1946 };
1947
evergreen_dram_bandwidth(struct evergreen_wm_params * wm)1948 static u32 evergreen_dram_bandwidth(struct evergreen_wm_params *wm)
1949 {
1950 /* Calculate DRAM Bandwidth and the part allocated to display. */
1951 fixed20_12 dram_efficiency; /* 0.7 */
1952 fixed20_12 yclk, dram_channels, bandwidth;
1953 fixed20_12 a;
1954
1955 a.full = dfixed_const(1000);
1956 yclk.full = dfixed_const(wm->yclk);
1957 yclk.full = dfixed_div(yclk, a);
1958 dram_channels.full = dfixed_const(wm->dram_channels * 4);
1959 a.full = dfixed_const(10);
1960 dram_efficiency.full = dfixed_const(7);
1961 dram_efficiency.full = dfixed_div(dram_efficiency, a);
1962 bandwidth.full = dfixed_mul(dram_channels, yclk);
1963 bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
1964
1965 return dfixed_trunc(bandwidth);
1966 }
1967
evergreen_dram_bandwidth_for_display(struct evergreen_wm_params * wm)1968 static u32 evergreen_dram_bandwidth_for_display(struct evergreen_wm_params *wm)
1969 {
1970 /* Calculate DRAM Bandwidth and the part allocated to display. */
1971 fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
1972 fixed20_12 yclk, dram_channels, bandwidth;
1973 fixed20_12 a;
1974
1975 a.full = dfixed_const(1000);
1976 yclk.full = dfixed_const(wm->yclk);
1977 yclk.full = dfixed_div(yclk, a);
1978 dram_channels.full = dfixed_const(wm->dram_channels * 4);
1979 a.full = dfixed_const(10);
1980 disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
1981 disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
1982 bandwidth.full = dfixed_mul(dram_channels, yclk);
1983 bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
1984
1985 return dfixed_trunc(bandwidth);
1986 }
1987
evergreen_data_return_bandwidth(struct evergreen_wm_params * wm)1988 static u32 evergreen_data_return_bandwidth(struct evergreen_wm_params *wm)
1989 {
1990 /* Calculate the display Data return Bandwidth */
1991 fixed20_12 return_efficiency; /* 0.8 */
1992 fixed20_12 sclk, bandwidth;
1993 fixed20_12 a;
1994
1995 a.full = dfixed_const(1000);
1996 sclk.full = dfixed_const(wm->sclk);
1997 sclk.full = dfixed_div(sclk, a);
1998 a.full = dfixed_const(10);
1999 return_efficiency.full = dfixed_const(8);
2000 return_efficiency.full = dfixed_div(return_efficiency, a);
2001 a.full = dfixed_const(32);
2002 bandwidth.full = dfixed_mul(a, sclk);
2003 bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
2004
2005 return dfixed_trunc(bandwidth);
2006 }
2007
evergreen_dmif_request_bandwidth(struct evergreen_wm_params * wm)2008 static u32 evergreen_dmif_request_bandwidth(struct evergreen_wm_params *wm)
2009 {
2010 /* Calculate the DMIF Request Bandwidth */
2011 fixed20_12 disp_clk_request_efficiency; /* 0.8 */
2012 fixed20_12 disp_clk, bandwidth;
2013 fixed20_12 a;
2014
2015 a.full = dfixed_const(1000);
2016 disp_clk.full = dfixed_const(wm->disp_clk);
2017 disp_clk.full = dfixed_div(disp_clk, a);
2018 a.full = dfixed_const(10);
2019 disp_clk_request_efficiency.full = dfixed_const(8);
2020 disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
2021 a.full = dfixed_const(32);
2022 bandwidth.full = dfixed_mul(a, disp_clk);
2023 bandwidth.full = dfixed_mul(bandwidth, disp_clk_request_efficiency);
2024
2025 return dfixed_trunc(bandwidth);
2026 }
2027
evergreen_available_bandwidth(struct evergreen_wm_params * wm)2028 static u32 evergreen_available_bandwidth(struct evergreen_wm_params *wm)
2029 {
2030 /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
2031 u32 dram_bandwidth = evergreen_dram_bandwidth(wm);
2032 u32 data_return_bandwidth = evergreen_data_return_bandwidth(wm);
2033 u32 dmif_req_bandwidth = evergreen_dmif_request_bandwidth(wm);
2034
2035 return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
2036 }
2037
evergreen_average_bandwidth(struct evergreen_wm_params * wm)2038 static u32 evergreen_average_bandwidth(struct evergreen_wm_params *wm)
2039 {
2040 /* Calculate the display mode Average Bandwidth
2041 * DisplayMode should contain the source and destination dimensions,
2042 * timing, etc.
2043 */
2044 fixed20_12 bpp;
2045 fixed20_12 line_time;
2046 fixed20_12 src_width;
2047 fixed20_12 bandwidth;
2048 fixed20_12 a;
2049
2050 a.full = dfixed_const(1000);
2051 line_time.full = dfixed_const(wm->active_time + wm->blank_time);
2052 line_time.full = dfixed_div(line_time, a);
2053 bpp.full = dfixed_const(wm->bytes_per_pixel);
2054 src_width.full = dfixed_const(wm->src_width);
2055 bandwidth.full = dfixed_mul(src_width, bpp);
2056 bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
2057 bandwidth.full = dfixed_div(bandwidth, line_time);
2058
2059 return dfixed_trunc(bandwidth);
2060 }
2061
evergreen_latency_watermark(struct evergreen_wm_params * wm)2062 static u32 evergreen_latency_watermark(struct evergreen_wm_params *wm)
2063 {
2064 /* First calcualte the latency in ns */
2065 u32 mc_latency = 2000; /* 2000 ns. */
2066 u32 available_bandwidth = evergreen_available_bandwidth(wm);
2067 u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
2068 u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
2069 u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
2070 u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
2071 (wm->num_heads * cursor_line_pair_return_time);
2072 u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
2073 u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
2074 fixed20_12 a, b, c;
2075
2076 if (wm->num_heads == 0)
2077 return 0;
2078
2079 a.full = dfixed_const(2);
2080 b.full = dfixed_const(1);
2081 if ((wm->vsc.full > a.full) ||
2082 ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
2083 (wm->vtaps >= 5) ||
2084 ((wm->vsc.full >= a.full) && wm->interlaced))
2085 max_src_lines_per_dst_line = 4;
2086 else
2087 max_src_lines_per_dst_line = 2;
2088
2089 a.full = dfixed_const(available_bandwidth);
2090 b.full = dfixed_const(wm->num_heads);
2091 a.full = dfixed_div(a, b);
2092
2093 lb_fill_bw = min(dfixed_trunc(a), wm->disp_clk * wm->bytes_per_pixel / 1000);
2094
2095 a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
2096 b.full = dfixed_const(1000);
2097 c.full = dfixed_const(lb_fill_bw);
2098 b.full = dfixed_div(c, b);
2099 a.full = dfixed_div(a, b);
2100 line_fill_time = dfixed_trunc(a);
2101
2102 if (line_fill_time < wm->active_time)
2103 return latency;
2104 else
2105 return latency + (line_fill_time - wm->active_time);
2106
2107 }
2108
evergreen_average_bandwidth_vs_dram_bandwidth_for_display(struct evergreen_wm_params * wm)2109 static bool evergreen_average_bandwidth_vs_dram_bandwidth_for_display(struct evergreen_wm_params *wm)
2110 {
2111 if (evergreen_average_bandwidth(wm) <=
2112 (evergreen_dram_bandwidth_for_display(wm) / wm->num_heads))
2113 return true;
2114 else
2115 return false;
2116 };
2117
evergreen_average_bandwidth_vs_available_bandwidth(struct evergreen_wm_params * wm)2118 static bool evergreen_average_bandwidth_vs_available_bandwidth(struct evergreen_wm_params *wm)
2119 {
2120 if (evergreen_average_bandwidth(wm) <=
2121 (evergreen_available_bandwidth(wm) / wm->num_heads))
2122 return true;
2123 else
2124 return false;
2125 };
2126
evergreen_check_latency_hiding(struct evergreen_wm_params * wm)2127 static bool evergreen_check_latency_hiding(struct evergreen_wm_params *wm)
2128 {
2129 u32 lb_partitions = wm->lb_size / wm->src_width;
2130 u32 line_time = wm->active_time + wm->blank_time;
2131 u32 latency_tolerant_lines;
2132 u32 latency_hiding;
2133 fixed20_12 a;
2134
2135 a.full = dfixed_const(1);
2136 if (wm->vsc.full > a.full)
2137 latency_tolerant_lines = 1;
2138 else {
2139 if (lb_partitions <= (wm->vtaps + 1))
2140 latency_tolerant_lines = 1;
2141 else
2142 latency_tolerant_lines = 2;
2143 }
2144
2145 latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
2146
2147 if (evergreen_latency_watermark(wm) <= latency_hiding)
2148 return true;
2149 else
2150 return false;
2151 }
2152
evergreen_program_watermarks(struct radeon_device * rdev,struct radeon_crtc * radeon_crtc,u32 lb_size,u32 num_heads)2153 static void evergreen_program_watermarks(struct radeon_device *rdev,
2154 struct radeon_crtc *radeon_crtc,
2155 u32 lb_size, u32 num_heads)
2156 {
2157 struct drm_display_mode *mode = &radeon_crtc->base.mode;
2158 struct evergreen_wm_params wm_low, wm_high;
2159 u32 dram_channels;
2160 u32 active_time;
2161 u32 line_time = 0;
2162 u32 latency_watermark_a = 0, latency_watermark_b = 0;
2163 u32 priority_a_mark = 0, priority_b_mark = 0;
2164 u32 priority_a_cnt = PRIORITY_OFF;
2165 u32 priority_b_cnt = PRIORITY_OFF;
2166 u32 pipe_offset = radeon_crtc->crtc_id * 16;
2167 u32 tmp, arb_control3;
2168 fixed20_12 a, b, c;
2169
2170 if (radeon_crtc->base.enabled && num_heads && mode) {
2171 active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
2172 (u32)mode->clock);
2173 line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
2174 (u32)mode->clock);
2175 line_time = min(line_time, (u32)65535);
2176 priority_a_cnt = 0;
2177 priority_b_cnt = 0;
2178 dram_channels = evergreen_get_number_of_dram_channels(rdev);
2179
2180 /* watermark for high clocks */
2181 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
2182 wm_high.yclk =
2183 radeon_dpm_get_mclk(rdev, false) * 10;
2184 wm_high.sclk =
2185 radeon_dpm_get_sclk(rdev, false) * 10;
2186 } else {
2187 wm_high.yclk = rdev->pm.current_mclk * 10;
2188 wm_high.sclk = rdev->pm.current_sclk * 10;
2189 }
2190
2191 wm_high.disp_clk = mode->clock;
2192 wm_high.src_width = mode->crtc_hdisplay;
2193 wm_high.active_time = active_time;
2194 wm_high.blank_time = line_time - wm_high.active_time;
2195 wm_high.interlaced = false;
2196 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
2197 wm_high.interlaced = true;
2198 wm_high.vsc = radeon_crtc->vsc;
2199 wm_high.vtaps = 1;
2200 if (radeon_crtc->rmx_type != RMX_OFF)
2201 wm_high.vtaps = 2;
2202 wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
2203 wm_high.lb_size = lb_size;
2204 wm_high.dram_channels = dram_channels;
2205 wm_high.num_heads = num_heads;
2206
2207 /* watermark for low clocks */
2208 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
2209 wm_low.yclk =
2210 radeon_dpm_get_mclk(rdev, true) * 10;
2211 wm_low.sclk =
2212 radeon_dpm_get_sclk(rdev, true) * 10;
2213 } else {
2214 wm_low.yclk = rdev->pm.current_mclk * 10;
2215 wm_low.sclk = rdev->pm.current_sclk * 10;
2216 }
2217
2218 wm_low.disp_clk = mode->clock;
2219 wm_low.src_width = mode->crtc_hdisplay;
2220 wm_low.active_time = active_time;
2221 wm_low.blank_time = line_time - wm_low.active_time;
2222 wm_low.interlaced = false;
2223 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
2224 wm_low.interlaced = true;
2225 wm_low.vsc = radeon_crtc->vsc;
2226 wm_low.vtaps = 1;
2227 if (radeon_crtc->rmx_type != RMX_OFF)
2228 wm_low.vtaps = 2;
2229 wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
2230 wm_low.lb_size = lb_size;
2231 wm_low.dram_channels = dram_channels;
2232 wm_low.num_heads = num_heads;
2233
2234 /* set for high clocks */
2235 latency_watermark_a = min(evergreen_latency_watermark(&wm_high), (u32)65535);
2236 /* set for low clocks */
2237 latency_watermark_b = min(evergreen_latency_watermark(&wm_low), (u32)65535);
2238
2239 /* possibly force display priority to high */
2240 /* should really do this at mode validation time... */
2241 if (!evergreen_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
2242 !evergreen_average_bandwidth_vs_available_bandwidth(&wm_high) ||
2243 !evergreen_check_latency_hiding(&wm_high) ||
2244 (rdev->disp_priority == 2)) {
2245 DRM_DEBUG_KMS("force priority a to high\n");
2246 priority_a_cnt |= PRIORITY_ALWAYS_ON;
2247 }
2248 if (!evergreen_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
2249 !evergreen_average_bandwidth_vs_available_bandwidth(&wm_low) ||
2250 !evergreen_check_latency_hiding(&wm_low) ||
2251 (rdev->disp_priority == 2)) {
2252 DRM_DEBUG_KMS("force priority b to high\n");
2253 priority_b_cnt |= PRIORITY_ALWAYS_ON;
2254 }
2255
2256 a.full = dfixed_const(1000);
2257 b.full = dfixed_const(mode->clock);
2258 b.full = dfixed_div(b, a);
2259 c.full = dfixed_const(latency_watermark_a);
2260 c.full = dfixed_mul(c, b);
2261 c.full = dfixed_mul(c, radeon_crtc->hsc);
2262 c.full = dfixed_div(c, a);
2263 a.full = dfixed_const(16);
2264 c.full = dfixed_div(c, a);
2265 priority_a_mark = dfixed_trunc(c);
2266 priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK;
2267
2268 a.full = dfixed_const(1000);
2269 b.full = dfixed_const(mode->clock);
2270 b.full = dfixed_div(b, a);
2271 c.full = dfixed_const(latency_watermark_b);
2272 c.full = dfixed_mul(c, b);
2273 c.full = dfixed_mul(c, radeon_crtc->hsc);
2274 c.full = dfixed_div(c, a);
2275 a.full = dfixed_const(16);
2276 c.full = dfixed_div(c, a);
2277 priority_b_mark = dfixed_trunc(c);
2278 priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
2279
2280 /* Save number of lines the linebuffer leads before the scanout */
2281 radeon_crtc->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
2282 }
2283
2284 /* select wm A */
2285 arb_control3 = RREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset);
2286 tmp = arb_control3;
2287 tmp &= ~LATENCY_WATERMARK_MASK(3);
2288 tmp |= LATENCY_WATERMARK_MASK(1);
2289 WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, tmp);
2290 WREG32(PIPE0_LATENCY_CONTROL + pipe_offset,
2291 (LATENCY_LOW_WATERMARK(latency_watermark_a) |
2292 LATENCY_HIGH_WATERMARK(line_time)));
2293 /* select wm B */
2294 tmp = RREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset);
2295 tmp &= ~LATENCY_WATERMARK_MASK(3);
2296 tmp |= LATENCY_WATERMARK_MASK(2);
2297 WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, tmp);
2298 WREG32(PIPE0_LATENCY_CONTROL + pipe_offset,
2299 (LATENCY_LOW_WATERMARK(latency_watermark_b) |
2300 LATENCY_HIGH_WATERMARK(line_time)));
2301 /* restore original selection */
2302 WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, arb_control3);
2303
2304 /* write the priority marks */
2305 WREG32(PRIORITY_A_CNT + radeon_crtc->crtc_offset, priority_a_cnt);
2306 WREG32(PRIORITY_B_CNT + radeon_crtc->crtc_offset, priority_b_cnt);
2307
2308 /* save values for DPM */
2309 radeon_crtc->line_time = line_time;
2310 radeon_crtc->wm_high = latency_watermark_a;
2311 radeon_crtc->wm_low = latency_watermark_b;
2312 }
2313
2314 /**
2315 * evergreen_bandwidth_update - update display watermarks callback.
2316 *
2317 * @rdev: radeon_device pointer
2318 *
2319 * Update the display watermarks based on the requested mode(s)
2320 * (evergreen+).
2321 */
evergreen_bandwidth_update(struct radeon_device * rdev)2322 void evergreen_bandwidth_update(struct radeon_device *rdev)
2323 {
2324 struct drm_display_mode *mode0 = NULL;
2325 struct drm_display_mode *mode1 = NULL;
2326 u32 num_heads = 0, lb_size;
2327 int i;
2328
2329 if (!rdev->mode_info.mode_config_initialized)
2330 return;
2331
2332 radeon_update_display_priority(rdev);
2333
2334 for (i = 0; i < rdev->num_crtc; i++) {
2335 if (rdev->mode_info.crtcs[i]->base.enabled)
2336 num_heads++;
2337 }
2338 for (i = 0; i < rdev->num_crtc; i += 2) {
2339 mode0 = &rdev->mode_info.crtcs[i]->base.mode;
2340 mode1 = &rdev->mode_info.crtcs[i+1]->base.mode;
2341 lb_size = evergreen_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i], mode0, mode1);
2342 evergreen_program_watermarks(rdev, rdev->mode_info.crtcs[i], lb_size, num_heads);
2343 lb_size = evergreen_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i+1], mode1, mode0);
2344 evergreen_program_watermarks(rdev, rdev->mode_info.crtcs[i+1], lb_size, num_heads);
2345 }
2346 }
2347
2348 /**
2349 * evergreen_mc_wait_for_idle - wait for MC idle callback.
2350 *
2351 * @rdev: radeon_device pointer
2352 *
2353 * Wait for the MC (memory controller) to be idle.
2354 * (evergreen+).
2355 * Returns 0 if the MC is idle, -1 if not.
2356 */
evergreen_mc_wait_for_idle(struct radeon_device * rdev)2357 int evergreen_mc_wait_for_idle(struct radeon_device *rdev)
2358 {
2359 unsigned i;
2360 u32 tmp;
2361
2362 for (i = 0; i < rdev->usec_timeout; i++) {
2363 /* read MC_STATUS */
2364 tmp = RREG32(SRBM_STATUS) & 0x1F00;
2365 if (!tmp)
2366 return 0;
2367 udelay(1);
2368 }
2369 return -1;
2370 }
2371
2372 /*
2373 * GART
2374 */
evergreen_pcie_gart_tlb_flush(struct radeon_device * rdev)2375 void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev)
2376 {
2377 unsigned i;
2378 u32 tmp;
2379
2380 WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
2381
2382 WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
2383 for (i = 0; i < rdev->usec_timeout; i++) {
2384 /* read MC_STATUS */
2385 tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
2386 tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
2387 if (tmp == 2) {
2388 pr_warn("[drm] r600 flush TLB failed\n");
2389 return;
2390 }
2391 if (tmp) {
2392 return;
2393 }
2394 udelay(1);
2395 }
2396 }
2397
evergreen_pcie_gart_enable(struct radeon_device * rdev)2398 static int evergreen_pcie_gart_enable(struct radeon_device *rdev)
2399 {
2400 u32 tmp;
2401 int r;
2402
2403 if (rdev->gart.robj == NULL) {
2404 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
2405 return -EINVAL;
2406 }
2407 r = radeon_gart_table_vram_pin(rdev);
2408 if (r)
2409 return r;
2410 /* Setup L2 cache */
2411 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
2412 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
2413 EFFECTIVE_L2_QUEUE_SIZE(7));
2414 WREG32(VM_L2_CNTL2, 0);
2415 WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
2416 /* Setup TLB control */
2417 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
2418 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
2419 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
2420 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
2421 if (rdev->flags & RADEON_IS_IGP) {
2422 WREG32(FUS_MC_VM_MD_L1_TLB0_CNTL, tmp);
2423 WREG32(FUS_MC_VM_MD_L1_TLB1_CNTL, tmp);
2424 WREG32(FUS_MC_VM_MD_L1_TLB2_CNTL, tmp);
2425 } else {
2426 WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
2427 WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
2428 WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
2429 if ((rdev->family == CHIP_JUNIPER) ||
2430 (rdev->family == CHIP_CYPRESS) ||
2431 (rdev->family == CHIP_HEMLOCK) ||
2432 (rdev->family == CHIP_BARTS))
2433 WREG32(MC_VM_MD_L1_TLB3_CNTL, tmp);
2434 }
2435 WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
2436 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
2437 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
2438 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
2439 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
2440 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
2441 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
2442 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
2443 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
2444 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
2445 (u32)(rdev->dummy_page.addr >> 12));
2446 WREG32(VM_CONTEXT1_CNTL, 0);
2447
2448 evergreen_pcie_gart_tlb_flush(rdev);
2449 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
2450 (unsigned)(rdev->mc.gtt_size >> 20),
2451 (unsigned long long)rdev->gart.table_addr);
2452 rdev->gart.ready = true;
2453 return 0;
2454 }
2455
evergreen_pcie_gart_disable(struct radeon_device * rdev)2456 static void evergreen_pcie_gart_disable(struct radeon_device *rdev)
2457 {
2458 u32 tmp;
2459
2460 /* Disable all tables */
2461 WREG32(VM_CONTEXT0_CNTL, 0);
2462 WREG32(VM_CONTEXT1_CNTL, 0);
2463
2464 /* Setup L2 cache */
2465 WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
2466 EFFECTIVE_L2_QUEUE_SIZE(7));
2467 WREG32(VM_L2_CNTL2, 0);
2468 WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
2469 /* Setup TLB control */
2470 tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
2471 WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
2472 WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
2473 WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
2474 WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
2475 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
2476 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
2477 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
2478 radeon_gart_table_vram_unpin(rdev);
2479 }
2480
evergreen_pcie_gart_fini(struct radeon_device * rdev)2481 static void evergreen_pcie_gart_fini(struct radeon_device *rdev)
2482 {
2483 evergreen_pcie_gart_disable(rdev);
2484 radeon_gart_table_vram_free(rdev);
2485 radeon_gart_fini(rdev);
2486 }
2487
2488
evergreen_agp_enable(struct radeon_device * rdev)2489 static void evergreen_agp_enable(struct radeon_device *rdev)
2490 {
2491 u32 tmp;
2492
2493 /* Setup L2 cache */
2494 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
2495 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
2496 EFFECTIVE_L2_QUEUE_SIZE(7));
2497 WREG32(VM_L2_CNTL2, 0);
2498 WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
2499 /* Setup TLB control */
2500 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
2501 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
2502 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
2503 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
2504 WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
2505 WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
2506 WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
2507 WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
2508 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
2509 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
2510 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
2511 WREG32(VM_CONTEXT0_CNTL, 0);
2512 WREG32(VM_CONTEXT1_CNTL, 0);
2513 }
2514
2515 static const unsigned ni_dig_offsets[] =
2516 {
2517 NI_DIG0_REGISTER_OFFSET,
2518 NI_DIG1_REGISTER_OFFSET,
2519 NI_DIG2_REGISTER_OFFSET,
2520 NI_DIG3_REGISTER_OFFSET,
2521 NI_DIG4_REGISTER_OFFSET,
2522 NI_DIG5_REGISTER_OFFSET
2523 };
2524
2525 static const unsigned ni_tx_offsets[] =
2526 {
2527 NI_DCIO_UNIPHY0_UNIPHY_TX_CONTROL1,
2528 NI_DCIO_UNIPHY1_UNIPHY_TX_CONTROL1,
2529 NI_DCIO_UNIPHY2_UNIPHY_TX_CONTROL1,
2530 NI_DCIO_UNIPHY3_UNIPHY_TX_CONTROL1,
2531 NI_DCIO_UNIPHY4_UNIPHY_TX_CONTROL1,
2532 NI_DCIO_UNIPHY5_UNIPHY_TX_CONTROL1
2533 };
2534
2535 static const unsigned evergreen_dp_offsets[] =
2536 {
2537 EVERGREEN_DP0_REGISTER_OFFSET,
2538 EVERGREEN_DP1_REGISTER_OFFSET,
2539 EVERGREEN_DP2_REGISTER_OFFSET,
2540 EVERGREEN_DP3_REGISTER_OFFSET,
2541 EVERGREEN_DP4_REGISTER_OFFSET,
2542 EVERGREEN_DP5_REGISTER_OFFSET
2543 };
2544
2545 static const unsigned evergreen_disp_int_status[] =
2546 {
2547 DISP_INTERRUPT_STATUS,
2548 DISP_INTERRUPT_STATUS_CONTINUE,
2549 DISP_INTERRUPT_STATUS_CONTINUE2,
2550 DISP_INTERRUPT_STATUS_CONTINUE3,
2551 DISP_INTERRUPT_STATUS_CONTINUE4,
2552 DISP_INTERRUPT_STATUS_CONTINUE5
2553 };
2554
2555 /*
2556 * Assumption is that EVERGREEN_CRTC_MASTER_EN enable for requested crtc
2557 * We go from crtc to connector and it is not relible since it
2558 * should be an opposite direction .If crtc is enable then
2559 * find the dig_fe which selects this crtc and insure that it enable.
2560 * if such dig_fe is found then find dig_be which selects found dig_be and
2561 * insure that it enable and in DP_SST mode.
2562 * if UNIPHY_PLL_CONTROL1.enable then we should disconnect timing
2563 * from dp symbols clocks .
2564 */
evergreen_is_dp_sst_stream_enabled(struct radeon_device * rdev,unsigned crtc_id,unsigned * ret_dig_fe)2565 static bool evergreen_is_dp_sst_stream_enabled(struct radeon_device *rdev,
2566 unsigned crtc_id, unsigned *ret_dig_fe)
2567 {
2568 unsigned i;
2569 unsigned dig_fe;
2570 unsigned dig_be;
2571 unsigned dig_en_be;
2572 unsigned uniphy_pll;
2573 unsigned digs_fe_selected;
2574 unsigned dig_be_mode;
2575 unsigned dig_fe_mask;
2576 bool is_enabled = false;
2577 bool found_crtc = false;
2578
2579 /* loop through all running dig_fe to find selected crtc */
2580 for (i = 0; i < ARRAY_SIZE(ni_dig_offsets); i++) {
2581 dig_fe = RREG32(NI_DIG_FE_CNTL + ni_dig_offsets[i]);
2582 if (dig_fe & NI_DIG_FE_CNTL_SYMCLK_FE_ON &&
2583 crtc_id == NI_DIG_FE_CNTL_SOURCE_SELECT(dig_fe)) {
2584 /* found running pipe */
2585 found_crtc = true;
2586 dig_fe_mask = 1 << i;
2587 dig_fe = i;
2588 break;
2589 }
2590 }
2591
2592 if (found_crtc) {
2593 /* loop through all running dig_be to find selected dig_fe */
2594 for (i = 0; i < ARRAY_SIZE(ni_dig_offsets); i++) {
2595 dig_be = RREG32(NI_DIG_BE_CNTL + ni_dig_offsets[i]);
2596 /* if dig_fe_selected by dig_be? */
2597 digs_fe_selected = NI_DIG_BE_CNTL_FE_SOURCE_SELECT(dig_be);
2598 dig_be_mode = NI_DIG_FE_CNTL_MODE(dig_be);
2599 if (dig_fe_mask & digs_fe_selected &&
2600 /* if dig_be in sst mode? */
2601 dig_be_mode == NI_DIG_BE_DPSST) {
2602 dig_en_be = RREG32(NI_DIG_BE_EN_CNTL +
2603 ni_dig_offsets[i]);
2604 uniphy_pll = RREG32(NI_DCIO_UNIPHY0_PLL_CONTROL1 +
2605 ni_tx_offsets[i]);
2606 /* dig_be enable and tx is running */
2607 if (dig_en_be & NI_DIG_BE_EN_CNTL_ENABLE &&
2608 dig_en_be & NI_DIG_BE_EN_CNTL_SYMBCLK_ON &&
2609 uniphy_pll & NI_DCIO_UNIPHY0_PLL_CONTROL1_ENABLE) {
2610 is_enabled = true;
2611 *ret_dig_fe = dig_fe;
2612 break;
2613 }
2614 }
2615 }
2616 }
2617
2618 return is_enabled;
2619 }
2620
2621 /*
2622 * Blank dig when in dp sst mode
2623 * Dig ignores crtc timing
2624 */
evergreen_blank_dp_output(struct radeon_device * rdev,unsigned dig_fe)2625 static void evergreen_blank_dp_output(struct radeon_device *rdev,
2626 unsigned dig_fe)
2627 {
2628 unsigned stream_ctrl;
2629 unsigned fifo_ctrl;
2630 unsigned counter = 0;
2631
2632 if (dig_fe >= ARRAY_SIZE(evergreen_dp_offsets)) {
2633 DRM_ERROR("invalid dig_fe %d\n", dig_fe);
2634 return;
2635 }
2636
2637 stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
2638 evergreen_dp_offsets[dig_fe]);
2639 if (!(stream_ctrl & EVERGREEN_DP_VID_STREAM_CNTL_ENABLE)) {
2640 DRM_ERROR("dig %d , should be enable\n", dig_fe);
2641 return;
2642 }
2643
2644 stream_ctrl &=~EVERGREEN_DP_VID_STREAM_CNTL_ENABLE;
2645 WREG32(EVERGREEN_DP_VID_STREAM_CNTL +
2646 evergreen_dp_offsets[dig_fe], stream_ctrl);
2647
2648 stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
2649 evergreen_dp_offsets[dig_fe]);
2650 while (counter < 32 && stream_ctrl & EVERGREEN_DP_VID_STREAM_STATUS) {
2651 msleep(1);
2652 counter++;
2653 stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
2654 evergreen_dp_offsets[dig_fe]);
2655 }
2656 if (counter >= 32 )
2657 DRM_ERROR("counter exceeds %d\n", counter);
2658
2659 fifo_ctrl = RREG32(EVERGREEN_DP_STEER_FIFO + evergreen_dp_offsets[dig_fe]);
2660 fifo_ctrl |= EVERGREEN_DP_STEER_FIFO_RESET;
2661 WREG32(EVERGREEN_DP_STEER_FIFO + evergreen_dp_offsets[dig_fe], fifo_ctrl);
2662
2663 }
2664
evergreen_mc_stop(struct radeon_device * rdev,struct evergreen_mc_save * save)2665 void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
2666 {
2667 u32 crtc_enabled, tmp, frame_count, blackout;
2668 int i, j;
2669 unsigned dig_fe;
2670
2671 if (!ASIC_IS_NODCE(rdev)) {
2672 save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
2673 save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
2674
2675 /* disable VGA render */
2676 WREG32(VGA_RENDER_CONTROL, 0);
2677 }
2678 /* blank the display controllers */
2679 for (i = 0; i < rdev->num_crtc; i++) {
2680 crtc_enabled = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) & EVERGREEN_CRTC_MASTER_EN;
2681 if (crtc_enabled) {
2682 save->crtc_enabled[i] = true;
2683 if (ASIC_IS_DCE6(rdev)) {
2684 tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
2685 if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) {
2686 radeon_wait_for_vblank(rdev, i);
2687 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
2688 tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
2689 WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
2690 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
2691 }
2692 } else {
2693 tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
2694 if (!(tmp & EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE)) {
2695 radeon_wait_for_vblank(rdev, i);
2696 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
2697 tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
2698 WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
2699 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
2700 }
2701 }
2702 /* wait for the next frame */
2703 frame_count = radeon_get_vblank_counter(rdev, i);
2704 for (j = 0; j < rdev->usec_timeout; j++) {
2705 if (radeon_get_vblank_counter(rdev, i) != frame_count)
2706 break;
2707 udelay(1);
2708 }
2709 /*we should disable dig if it drives dp sst*/
2710 /*but we are in radeon_device_init and the topology is unknown*/
2711 /*and it is available after radeon_modeset_init*/
2712 /*the following method radeon_atom_encoder_dpms_dig*/
2713 /*does the job if we initialize it properly*/
2714 /*for now we do it this manually*/
2715 /**/
2716 if (ASIC_IS_DCE5(rdev) &&
2717 evergreen_is_dp_sst_stream_enabled(rdev, i ,&dig_fe))
2718 evergreen_blank_dp_output(rdev, dig_fe);
2719 /*we could remove 6 lines below*/
2720 /* XXX this is a hack to avoid strange behavior with EFI on certain systems */
2721 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
2722 tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
2723 tmp &= ~EVERGREEN_CRTC_MASTER_EN;
2724 WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
2725 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
2726 save->crtc_enabled[i] = false;
2727 /* ***** */
2728 } else {
2729 save->crtc_enabled[i] = false;
2730 }
2731 }
2732
2733 radeon_mc_wait_for_idle(rdev);
2734
2735 blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
2736 if ((blackout & BLACKOUT_MODE_MASK) != 1) {
2737 /* Block CPU access */
2738 WREG32(BIF_FB_EN, 0);
2739 /* blackout the MC */
2740 blackout &= ~BLACKOUT_MODE_MASK;
2741 WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
2742 }
2743 /* wait for the MC to settle */
2744 udelay(100);
2745
2746 /* lock double buffered regs */
2747 for (i = 0; i < rdev->num_crtc; i++) {
2748 if (save->crtc_enabled[i]) {
2749 tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
2750 if (!(tmp & EVERGREEN_GRPH_UPDATE_LOCK)) {
2751 tmp |= EVERGREEN_GRPH_UPDATE_LOCK;
2752 WREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i], tmp);
2753 }
2754 tmp = RREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i]);
2755 if (!(tmp & 1)) {
2756 tmp |= 1;
2757 WREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
2758 }
2759 }
2760 }
2761 }
2762
evergreen_mc_resume(struct radeon_device * rdev,struct evergreen_mc_save * save)2763 void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
2764 {
2765 u32 tmp, frame_count;
2766 int i, j;
2767
2768 /* update crtc base addresses */
2769 for (i = 0; i < rdev->num_crtc; i++) {
2770 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
2771 upper_32_bits(rdev->mc.vram_start));
2772 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
2773 upper_32_bits(rdev->mc.vram_start));
2774 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
2775 (u32)rdev->mc.vram_start);
2776 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
2777 (u32)rdev->mc.vram_start);
2778 }
2779
2780 if (!ASIC_IS_NODCE(rdev)) {
2781 WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start));
2782 WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
2783 }
2784
2785 /* unlock regs and wait for update */
2786 for (i = 0; i < rdev->num_crtc; i++) {
2787 if (save->crtc_enabled[i]) {
2788 tmp = RREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i]);
2789 if ((tmp & 0x7) != 0) {
2790 tmp &= ~0x7;
2791 WREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
2792 }
2793 tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
2794 if (tmp & EVERGREEN_GRPH_UPDATE_LOCK) {
2795 tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK;
2796 WREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i], tmp);
2797 }
2798 tmp = RREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i]);
2799 if (tmp & 1) {
2800 tmp &= ~1;
2801 WREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
2802 }
2803 for (j = 0; j < rdev->usec_timeout; j++) {
2804 tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
2805 if ((tmp & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING) == 0)
2806 break;
2807 udelay(1);
2808 }
2809 }
2810 }
2811
2812 /* unblackout the MC */
2813 tmp = RREG32(MC_SHARED_BLACKOUT_CNTL);
2814 tmp &= ~BLACKOUT_MODE_MASK;
2815 WREG32(MC_SHARED_BLACKOUT_CNTL, tmp);
2816 /* allow CPU access */
2817 WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
2818
2819 for (i = 0; i < rdev->num_crtc; i++) {
2820 if (save->crtc_enabled[i]) {
2821 if (ASIC_IS_DCE6(rdev)) {
2822 tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
2823 tmp &= ~EVERGREEN_CRTC_BLANK_DATA_EN;
2824 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
2825 WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
2826 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
2827 } else {
2828 tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
2829 tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
2830 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
2831 WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
2832 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
2833 }
2834 /* wait for the next frame */
2835 frame_count = radeon_get_vblank_counter(rdev, i);
2836 for (j = 0; j < rdev->usec_timeout; j++) {
2837 if (radeon_get_vblank_counter(rdev, i) != frame_count)
2838 break;
2839 udelay(1);
2840 }
2841 }
2842 }
2843 if (!ASIC_IS_NODCE(rdev)) {
2844 /* Unlock vga access */
2845 WREG32(VGA_HDP_CONTROL, save->vga_hdp_control);
2846 mdelay(1);
2847 WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
2848 }
2849 }
2850
evergreen_mc_program(struct radeon_device * rdev)2851 void evergreen_mc_program(struct radeon_device *rdev)
2852 {
2853 struct evergreen_mc_save save;
2854 u32 tmp;
2855 int i, j;
2856
2857 /* Initialize HDP */
2858 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
2859 WREG32((0x2c14 + j), 0x00000000);
2860 WREG32((0x2c18 + j), 0x00000000);
2861 WREG32((0x2c1c + j), 0x00000000);
2862 WREG32((0x2c20 + j), 0x00000000);
2863 WREG32((0x2c24 + j), 0x00000000);
2864 }
2865 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
2866
2867 evergreen_mc_stop(rdev, &save);
2868 if (evergreen_mc_wait_for_idle(rdev)) {
2869 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
2870 }
2871 /* Lockout access through VGA aperture*/
2872 WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
2873 /* Update configuration */
2874 if (rdev->flags & RADEON_IS_AGP) {
2875 if (rdev->mc.vram_start < rdev->mc.gtt_start) {
2876 /* VRAM before AGP */
2877 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
2878 rdev->mc.vram_start >> 12);
2879 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
2880 rdev->mc.gtt_end >> 12);
2881 } else {
2882 /* VRAM after AGP */
2883 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
2884 rdev->mc.gtt_start >> 12);
2885 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
2886 rdev->mc.vram_end >> 12);
2887 }
2888 } else {
2889 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
2890 rdev->mc.vram_start >> 12);
2891 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
2892 rdev->mc.vram_end >> 12);
2893 }
2894 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12);
2895 /* llano/ontario only */
2896 if ((rdev->family == CHIP_PALM) ||
2897 (rdev->family == CHIP_SUMO) ||
2898 (rdev->family == CHIP_SUMO2)) {
2899 tmp = RREG32(MC_FUS_VM_FB_OFFSET) & 0x000FFFFF;
2900 tmp |= ((rdev->mc.vram_end >> 20) & 0xF) << 24;
2901 tmp |= ((rdev->mc.vram_start >> 20) & 0xF) << 20;
2902 WREG32(MC_FUS_VM_FB_OFFSET, tmp);
2903 }
2904 tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
2905 tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
2906 WREG32(MC_VM_FB_LOCATION, tmp);
2907 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
2908 WREG32(HDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
2909 WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
2910 if (rdev->flags & RADEON_IS_AGP) {
2911 WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16);
2912 WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16);
2913 WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
2914 } else {
2915 WREG32(MC_VM_AGP_BASE, 0);
2916 WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
2917 WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
2918 }
2919 if (evergreen_mc_wait_for_idle(rdev)) {
2920 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
2921 }
2922 evergreen_mc_resume(rdev, &save);
2923 /* we need to own VRAM, so turn off the VGA renderer here
2924 * to stop it overwriting our objects */
2925 rv515_vga_render_disable(rdev);
2926 }
2927
2928 /*
2929 * CP.
2930 */
evergreen_ring_ib_execute(struct radeon_device * rdev,struct radeon_ib * ib)2931 void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
2932 {
2933 struct radeon_ring *ring = &rdev->ring[ib->ring];
2934 u32 next_rptr;
2935
2936 /* set to DX10/11 mode */
2937 radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
2938 radeon_ring_write(ring, 1);
2939
2940 if (ring->rptr_save_reg) {
2941 next_rptr = ring->wptr + 3 + 4;
2942 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2943 radeon_ring_write(ring, ((ring->rptr_save_reg -
2944 PACKET3_SET_CONFIG_REG_START) >> 2));
2945 radeon_ring_write(ring, next_rptr);
2946 } else if (rdev->wb.enabled) {
2947 next_rptr = ring->wptr + 5 + 4;
2948 radeon_ring_write(ring, PACKET3(PACKET3_MEM_WRITE, 3));
2949 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
2950 radeon_ring_write(ring, (upper_32_bits(ring->next_rptr_gpu_addr) & 0xff) | (1 << 18));
2951 radeon_ring_write(ring, next_rptr);
2952 radeon_ring_write(ring, 0);
2953 }
2954
2955 radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
2956 radeon_ring_write(ring,
2957 #ifdef __BIG_ENDIAN
2958 (2 << 0) |
2959 #endif
2960 (ib->gpu_addr & 0xFFFFFFFC));
2961 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
2962 radeon_ring_write(ring, ib->length_dw);
2963 }
2964
2965
evergreen_cp_load_microcode(struct radeon_device * rdev)2966 static int evergreen_cp_load_microcode(struct radeon_device *rdev)
2967 {
2968 const __be32 *fw_data;
2969 int i;
2970
2971 if (!rdev->me_fw || !rdev->pfp_fw)
2972 return -EINVAL;
2973
2974 r700_cp_stop(rdev);
2975 WREG32(CP_RB_CNTL,
2976 #ifdef __BIG_ENDIAN
2977 BUF_SWAP_32BIT |
2978 #endif
2979 RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
2980
2981 fw_data = (const __be32 *)rdev->pfp_fw->data;
2982 WREG32(CP_PFP_UCODE_ADDR, 0);
2983 for (i = 0; i < EVERGREEN_PFP_UCODE_SIZE; i++)
2984 WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
2985 WREG32(CP_PFP_UCODE_ADDR, 0);
2986
2987 fw_data = (const __be32 *)rdev->me_fw->data;
2988 WREG32(CP_ME_RAM_WADDR, 0);
2989 for (i = 0; i < EVERGREEN_PM4_UCODE_SIZE; i++)
2990 WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
2991
2992 WREG32(CP_PFP_UCODE_ADDR, 0);
2993 WREG32(CP_ME_RAM_WADDR, 0);
2994 WREG32(CP_ME_RAM_RADDR, 0);
2995 return 0;
2996 }
2997
evergreen_cp_start(struct radeon_device * rdev)2998 static int evergreen_cp_start(struct radeon_device *rdev)
2999 {
3000 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
3001 int r, i;
3002 uint32_t cp_me;
3003
3004 r = radeon_ring_lock(rdev, ring, 7);
3005 if (r) {
3006 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
3007 return r;
3008 }
3009 radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
3010 radeon_ring_write(ring, 0x1);
3011 radeon_ring_write(ring, 0x0);
3012 radeon_ring_write(ring, rdev->config.evergreen.max_hw_contexts - 1);
3013 radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
3014 radeon_ring_write(ring, 0);
3015 radeon_ring_write(ring, 0);
3016 radeon_ring_unlock_commit(rdev, ring, false);
3017
3018 cp_me = 0xff;
3019 WREG32(CP_ME_CNTL, cp_me);
3020
3021 r = radeon_ring_lock(rdev, ring, evergreen_default_size + 19);
3022 if (r) {
3023 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
3024 return r;
3025 }
3026
3027 /* setup clear context state */
3028 radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
3029 radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
3030
3031 for (i = 0; i < evergreen_default_size; i++)
3032 radeon_ring_write(ring, evergreen_default_state[i]);
3033
3034 radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
3035 radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
3036
3037 /* set clear context state */
3038 radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
3039 radeon_ring_write(ring, 0);
3040
3041 /* SQ_VTX_BASE_VTX_LOC */
3042 radeon_ring_write(ring, 0xc0026f00);
3043 radeon_ring_write(ring, 0x00000000);
3044 radeon_ring_write(ring, 0x00000000);
3045 radeon_ring_write(ring, 0x00000000);
3046
3047 /* Clear consts */
3048 radeon_ring_write(ring, 0xc0036f00);
3049 radeon_ring_write(ring, 0x00000bc4);
3050 radeon_ring_write(ring, 0xffffffff);
3051 radeon_ring_write(ring, 0xffffffff);
3052 radeon_ring_write(ring, 0xffffffff);
3053
3054 radeon_ring_write(ring, 0xc0026900);
3055 radeon_ring_write(ring, 0x00000316);
3056 radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
3057 radeon_ring_write(ring, 0x00000010); /* */
3058
3059 radeon_ring_unlock_commit(rdev, ring, false);
3060
3061 return 0;
3062 }
3063
evergreen_cp_resume(struct radeon_device * rdev)3064 static int evergreen_cp_resume(struct radeon_device *rdev)
3065 {
3066 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
3067 u32 tmp;
3068 u32 rb_bufsz;
3069 int r;
3070
3071 /* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */
3072 WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP |
3073 SOFT_RESET_PA |
3074 SOFT_RESET_SH |
3075 SOFT_RESET_VGT |
3076 SOFT_RESET_SPI |
3077 SOFT_RESET_SX));
3078 RREG32(GRBM_SOFT_RESET);
3079 mdelay(15);
3080 WREG32(GRBM_SOFT_RESET, 0);
3081 RREG32(GRBM_SOFT_RESET);
3082
3083 /* Set ring buffer size */
3084 rb_bufsz = order_base_2(ring->ring_size / 8);
3085 tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
3086 #ifdef __BIG_ENDIAN
3087 tmp |= BUF_SWAP_32BIT;
3088 #endif
3089 WREG32(CP_RB_CNTL, tmp);
3090 WREG32(CP_SEM_WAIT_TIMER, 0x0);
3091 WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
3092
3093 /* Set the write pointer delay */
3094 WREG32(CP_RB_WPTR_DELAY, 0);
3095
3096 /* Initialize the ring buffer's read and write pointers */
3097 WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
3098 WREG32(CP_RB_RPTR_WR, 0);
3099 ring->wptr = 0;
3100 WREG32(CP_RB_WPTR, ring->wptr);
3101
3102 /* set the wb address whether it's enabled or not */
3103 WREG32(CP_RB_RPTR_ADDR,
3104 ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
3105 WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
3106 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
3107
3108 if (rdev->wb.enabled)
3109 WREG32(SCRATCH_UMSK, 0xff);
3110 else {
3111 tmp |= RB_NO_UPDATE;
3112 WREG32(SCRATCH_UMSK, 0);
3113 }
3114
3115 mdelay(1);
3116 WREG32(CP_RB_CNTL, tmp);
3117
3118 WREG32(CP_RB_BASE, ring->gpu_addr >> 8);
3119 WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
3120
3121 evergreen_cp_start(rdev);
3122 ring->ready = true;
3123 r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
3124 if (r) {
3125 ring->ready = false;
3126 return r;
3127 }
3128 return 0;
3129 }
3130
3131 /*
3132 * Core functions
3133 */
evergreen_gpu_init(struct radeon_device * rdev)3134 static void evergreen_gpu_init(struct radeon_device *rdev)
3135 {
3136 u32 gb_addr_config;
3137 u32 mc_arb_ramcfg;
3138 u32 sx_debug_1;
3139 u32 smx_dc_ctl0;
3140 u32 sq_config;
3141 u32 sq_lds_resource_mgmt;
3142 u32 sq_gpr_resource_mgmt_1;
3143 u32 sq_gpr_resource_mgmt_2;
3144 u32 sq_gpr_resource_mgmt_3;
3145 u32 sq_thread_resource_mgmt;
3146 u32 sq_thread_resource_mgmt_2;
3147 u32 sq_stack_resource_mgmt_1;
3148 u32 sq_stack_resource_mgmt_2;
3149 u32 sq_stack_resource_mgmt_3;
3150 u32 vgt_cache_invalidation;
3151 u32 hdp_host_path_cntl, tmp;
3152 u32 disabled_rb_mask;
3153 int i, j, ps_thread_count;
3154
3155 switch (rdev->family) {
3156 case CHIP_CYPRESS:
3157 case CHIP_HEMLOCK:
3158 rdev->config.evergreen.num_ses = 2;
3159 rdev->config.evergreen.max_pipes = 4;
3160 rdev->config.evergreen.max_tile_pipes = 8;
3161 rdev->config.evergreen.max_simds = 10;
3162 rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
3163 rdev->config.evergreen.max_gprs = 256;
3164 rdev->config.evergreen.max_threads = 248;
3165 rdev->config.evergreen.max_gs_threads = 32;
3166 rdev->config.evergreen.max_stack_entries = 512;
3167 rdev->config.evergreen.sx_num_of_sets = 4;
3168 rdev->config.evergreen.sx_max_export_size = 256;
3169 rdev->config.evergreen.sx_max_export_pos_size = 64;
3170 rdev->config.evergreen.sx_max_export_smx_size = 192;
3171 rdev->config.evergreen.max_hw_contexts = 8;
3172 rdev->config.evergreen.sq_num_cf_insts = 2;
3173
3174 rdev->config.evergreen.sc_prim_fifo_size = 0x100;
3175 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
3176 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
3177 gb_addr_config = CYPRESS_GB_ADDR_CONFIG_GOLDEN;
3178 break;
3179 case CHIP_JUNIPER:
3180 rdev->config.evergreen.num_ses = 1;
3181 rdev->config.evergreen.max_pipes = 4;
3182 rdev->config.evergreen.max_tile_pipes = 4;
3183 rdev->config.evergreen.max_simds = 10;
3184 rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
3185 rdev->config.evergreen.max_gprs = 256;
3186 rdev->config.evergreen.max_threads = 248;
3187 rdev->config.evergreen.max_gs_threads = 32;
3188 rdev->config.evergreen.max_stack_entries = 512;
3189 rdev->config.evergreen.sx_num_of_sets = 4;
3190 rdev->config.evergreen.sx_max_export_size = 256;
3191 rdev->config.evergreen.sx_max_export_pos_size = 64;
3192 rdev->config.evergreen.sx_max_export_smx_size = 192;
3193 rdev->config.evergreen.max_hw_contexts = 8;
3194 rdev->config.evergreen.sq_num_cf_insts = 2;
3195
3196 rdev->config.evergreen.sc_prim_fifo_size = 0x100;
3197 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
3198 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
3199 gb_addr_config = JUNIPER_GB_ADDR_CONFIG_GOLDEN;
3200 break;
3201 case CHIP_REDWOOD:
3202 rdev->config.evergreen.num_ses = 1;
3203 rdev->config.evergreen.max_pipes = 4;
3204 rdev->config.evergreen.max_tile_pipes = 4;
3205 rdev->config.evergreen.max_simds = 5;
3206 rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
3207 rdev->config.evergreen.max_gprs = 256;
3208 rdev->config.evergreen.max_threads = 248;
3209 rdev->config.evergreen.max_gs_threads = 32;
3210 rdev->config.evergreen.max_stack_entries = 256;
3211 rdev->config.evergreen.sx_num_of_sets = 4;
3212 rdev->config.evergreen.sx_max_export_size = 256;
3213 rdev->config.evergreen.sx_max_export_pos_size = 64;
3214 rdev->config.evergreen.sx_max_export_smx_size = 192;
3215 rdev->config.evergreen.max_hw_contexts = 8;
3216 rdev->config.evergreen.sq_num_cf_insts = 2;
3217
3218 rdev->config.evergreen.sc_prim_fifo_size = 0x100;
3219 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
3220 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
3221 gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN;
3222 break;
3223 case CHIP_CEDAR:
3224 default:
3225 rdev->config.evergreen.num_ses = 1;
3226 rdev->config.evergreen.max_pipes = 2;
3227 rdev->config.evergreen.max_tile_pipes = 2;
3228 rdev->config.evergreen.max_simds = 2;
3229 rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
3230 rdev->config.evergreen.max_gprs = 256;
3231 rdev->config.evergreen.max_threads = 192;
3232 rdev->config.evergreen.max_gs_threads = 16;
3233 rdev->config.evergreen.max_stack_entries = 256;
3234 rdev->config.evergreen.sx_num_of_sets = 4;
3235 rdev->config.evergreen.sx_max_export_size = 128;
3236 rdev->config.evergreen.sx_max_export_pos_size = 32;
3237 rdev->config.evergreen.sx_max_export_smx_size = 96;
3238 rdev->config.evergreen.max_hw_contexts = 4;
3239 rdev->config.evergreen.sq_num_cf_insts = 1;
3240
3241 rdev->config.evergreen.sc_prim_fifo_size = 0x40;
3242 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
3243 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
3244 gb_addr_config = CEDAR_GB_ADDR_CONFIG_GOLDEN;
3245 break;
3246 case CHIP_PALM:
3247 rdev->config.evergreen.num_ses = 1;
3248 rdev->config.evergreen.max_pipes = 2;
3249 rdev->config.evergreen.max_tile_pipes = 2;
3250 rdev->config.evergreen.max_simds = 2;
3251 rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
3252 rdev->config.evergreen.max_gprs = 256;
3253 rdev->config.evergreen.max_threads = 192;
3254 rdev->config.evergreen.max_gs_threads = 16;
3255 rdev->config.evergreen.max_stack_entries = 256;
3256 rdev->config.evergreen.sx_num_of_sets = 4;
3257 rdev->config.evergreen.sx_max_export_size = 128;
3258 rdev->config.evergreen.sx_max_export_pos_size = 32;
3259 rdev->config.evergreen.sx_max_export_smx_size = 96;
3260 rdev->config.evergreen.max_hw_contexts = 4;
3261 rdev->config.evergreen.sq_num_cf_insts = 1;
3262
3263 rdev->config.evergreen.sc_prim_fifo_size = 0x40;
3264 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
3265 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
3266 gb_addr_config = CEDAR_GB_ADDR_CONFIG_GOLDEN;
3267 break;
3268 case CHIP_SUMO:
3269 rdev->config.evergreen.num_ses = 1;
3270 rdev->config.evergreen.max_pipes = 4;
3271 rdev->config.evergreen.max_tile_pipes = 4;
3272 if (rdev->pdev->device == 0x9648)
3273 rdev->config.evergreen.max_simds = 3;
3274 else if ((rdev->pdev->device == 0x9647) ||
3275 (rdev->pdev->device == 0x964a))
3276 rdev->config.evergreen.max_simds = 4;
3277 else
3278 rdev->config.evergreen.max_simds = 5;
3279 rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
3280 rdev->config.evergreen.max_gprs = 256;
3281 rdev->config.evergreen.max_threads = 248;
3282 rdev->config.evergreen.max_gs_threads = 32;
3283 rdev->config.evergreen.max_stack_entries = 256;
3284 rdev->config.evergreen.sx_num_of_sets = 4;
3285 rdev->config.evergreen.sx_max_export_size = 256;
3286 rdev->config.evergreen.sx_max_export_pos_size = 64;
3287 rdev->config.evergreen.sx_max_export_smx_size = 192;
3288 rdev->config.evergreen.max_hw_contexts = 8;
3289 rdev->config.evergreen.sq_num_cf_insts = 2;
3290
3291 rdev->config.evergreen.sc_prim_fifo_size = 0x40;
3292 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
3293 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
3294 gb_addr_config = SUMO_GB_ADDR_CONFIG_GOLDEN;
3295 break;
3296 case CHIP_SUMO2:
3297 rdev->config.evergreen.num_ses = 1;
3298 rdev->config.evergreen.max_pipes = 4;
3299 rdev->config.evergreen.max_tile_pipes = 4;
3300 rdev->config.evergreen.max_simds = 2;
3301 rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
3302 rdev->config.evergreen.max_gprs = 256;
3303 rdev->config.evergreen.max_threads = 248;
3304 rdev->config.evergreen.max_gs_threads = 32;
3305 rdev->config.evergreen.max_stack_entries = 512;
3306 rdev->config.evergreen.sx_num_of_sets = 4;
3307 rdev->config.evergreen.sx_max_export_size = 256;
3308 rdev->config.evergreen.sx_max_export_pos_size = 64;
3309 rdev->config.evergreen.sx_max_export_smx_size = 192;
3310 rdev->config.evergreen.max_hw_contexts = 4;
3311 rdev->config.evergreen.sq_num_cf_insts = 2;
3312
3313 rdev->config.evergreen.sc_prim_fifo_size = 0x40;
3314 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
3315 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
3316 gb_addr_config = SUMO2_GB_ADDR_CONFIG_GOLDEN;
3317 break;
3318 case CHIP_BARTS:
3319 rdev->config.evergreen.num_ses = 2;
3320 rdev->config.evergreen.max_pipes = 4;
3321 rdev->config.evergreen.max_tile_pipes = 8;
3322 rdev->config.evergreen.max_simds = 7;
3323 rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
3324 rdev->config.evergreen.max_gprs = 256;
3325 rdev->config.evergreen.max_threads = 248;
3326 rdev->config.evergreen.max_gs_threads = 32;
3327 rdev->config.evergreen.max_stack_entries = 512;
3328 rdev->config.evergreen.sx_num_of_sets = 4;
3329 rdev->config.evergreen.sx_max_export_size = 256;
3330 rdev->config.evergreen.sx_max_export_pos_size = 64;
3331 rdev->config.evergreen.sx_max_export_smx_size = 192;
3332 rdev->config.evergreen.max_hw_contexts = 8;
3333 rdev->config.evergreen.sq_num_cf_insts = 2;
3334
3335 rdev->config.evergreen.sc_prim_fifo_size = 0x100;
3336 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
3337 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
3338 gb_addr_config = BARTS_GB_ADDR_CONFIG_GOLDEN;
3339 break;
3340 case CHIP_TURKS:
3341 rdev->config.evergreen.num_ses = 1;
3342 rdev->config.evergreen.max_pipes = 4;
3343 rdev->config.evergreen.max_tile_pipes = 4;
3344 rdev->config.evergreen.max_simds = 6;
3345 rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
3346 rdev->config.evergreen.max_gprs = 256;
3347 rdev->config.evergreen.max_threads = 248;
3348 rdev->config.evergreen.max_gs_threads = 32;
3349 rdev->config.evergreen.max_stack_entries = 256;
3350 rdev->config.evergreen.sx_num_of_sets = 4;
3351 rdev->config.evergreen.sx_max_export_size = 256;
3352 rdev->config.evergreen.sx_max_export_pos_size = 64;
3353 rdev->config.evergreen.sx_max_export_smx_size = 192;
3354 rdev->config.evergreen.max_hw_contexts = 8;
3355 rdev->config.evergreen.sq_num_cf_insts = 2;
3356
3357 rdev->config.evergreen.sc_prim_fifo_size = 0x100;
3358 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
3359 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
3360 gb_addr_config = TURKS_GB_ADDR_CONFIG_GOLDEN;
3361 break;
3362 case CHIP_CAICOS:
3363 rdev->config.evergreen.num_ses = 1;
3364 rdev->config.evergreen.max_pipes = 2;
3365 rdev->config.evergreen.max_tile_pipes = 2;
3366 rdev->config.evergreen.max_simds = 2;
3367 rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
3368 rdev->config.evergreen.max_gprs = 256;
3369 rdev->config.evergreen.max_threads = 192;
3370 rdev->config.evergreen.max_gs_threads = 16;
3371 rdev->config.evergreen.max_stack_entries = 256;
3372 rdev->config.evergreen.sx_num_of_sets = 4;
3373 rdev->config.evergreen.sx_max_export_size = 128;
3374 rdev->config.evergreen.sx_max_export_pos_size = 32;
3375 rdev->config.evergreen.sx_max_export_smx_size = 96;
3376 rdev->config.evergreen.max_hw_contexts = 4;
3377 rdev->config.evergreen.sq_num_cf_insts = 1;
3378
3379 rdev->config.evergreen.sc_prim_fifo_size = 0x40;
3380 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
3381 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
3382 gb_addr_config = CAICOS_GB_ADDR_CONFIG_GOLDEN;
3383 break;
3384 }
3385
3386 /* Initialize HDP */
3387 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
3388 WREG32((0x2c14 + j), 0x00000000);
3389 WREG32((0x2c18 + j), 0x00000000);
3390 WREG32((0x2c1c + j), 0x00000000);
3391 WREG32((0x2c20 + j), 0x00000000);
3392 WREG32((0x2c24 + j), 0x00000000);
3393 }
3394
3395 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
3396 WREG32(SRBM_INT_CNTL, 0x1);
3397 WREG32(SRBM_INT_ACK, 0x1);
3398
3399 evergreen_fix_pci_max_read_req_size(rdev);
3400
3401 RREG32(MC_SHARED_CHMAP);
3402 if ((rdev->family == CHIP_PALM) ||
3403 (rdev->family == CHIP_SUMO) ||
3404 (rdev->family == CHIP_SUMO2))
3405 mc_arb_ramcfg = RREG32(FUS_MC_ARB_RAMCFG);
3406 else
3407 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
3408
3409 /* setup tiling info dword. gb_addr_config is not adequate since it does
3410 * not have bank info, so create a custom tiling dword.
3411 * bits 3:0 num_pipes
3412 * bits 7:4 num_banks
3413 * bits 11:8 group_size
3414 * bits 15:12 row_size
3415 */
3416 rdev->config.evergreen.tile_config = 0;
3417 switch (rdev->config.evergreen.max_tile_pipes) {
3418 case 1:
3419 default:
3420 rdev->config.evergreen.tile_config |= (0 << 0);
3421 break;
3422 case 2:
3423 rdev->config.evergreen.tile_config |= (1 << 0);
3424 break;
3425 case 4:
3426 rdev->config.evergreen.tile_config |= (2 << 0);
3427 break;
3428 case 8:
3429 rdev->config.evergreen.tile_config |= (3 << 0);
3430 break;
3431 }
3432 /* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */
3433 if (rdev->flags & RADEON_IS_IGP)
3434 rdev->config.evergreen.tile_config |= 1 << 4;
3435 else {
3436 switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) {
3437 case 0: /* four banks */
3438 rdev->config.evergreen.tile_config |= 0 << 4;
3439 break;
3440 case 1: /* eight banks */
3441 rdev->config.evergreen.tile_config |= 1 << 4;
3442 break;
3443 case 2: /* sixteen banks */
3444 default:
3445 rdev->config.evergreen.tile_config |= 2 << 4;
3446 break;
3447 }
3448 }
3449 rdev->config.evergreen.tile_config |= 0 << 8;
3450 rdev->config.evergreen.tile_config |=
3451 ((gb_addr_config & 0x30000000) >> 28) << 12;
3452
3453 if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK)) {
3454 u32 efuse_straps_4;
3455 u32 efuse_straps_3;
3456
3457 efuse_straps_4 = RREG32_RCU(0x204);
3458 efuse_straps_3 = RREG32_RCU(0x203);
3459 tmp = (((efuse_straps_4 & 0xf) << 4) |
3460 ((efuse_straps_3 & 0xf0000000) >> 28));
3461 } else {
3462 tmp = 0;
3463 for (i = (rdev->config.evergreen.num_ses - 1); i >= 0; i--) {
3464 u32 rb_disable_bitmap;
3465
3466 WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
3467 WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
3468 rb_disable_bitmap = (RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000) >> 16;
3469 tmp <<= 4;
3470 tmp |= rb_disable_bitmap;
3471 }
3472 }
3473 /* enabled rb are just the one not disabled :) */
3474 disabled_rb_mask = tmp;
3475 tmp = 0;
3476 for (i = 0; i < rdev->config.evergreen.max_backends; i++)
3477 tmp |= (1 << i);
3478 /* if all the backends are disabled, fix it up here */
3479 if ((disabled_rb_mask & tmp) == tmp) {
3480 for (i = 0; i < rdev->config.evergreen.max_backends; i++)
3481 disabled_rb_mask &= ~(1 << i);
3482 }
3483
3484 for (i = 0; i < rdev->config.evergreen.num_ses; i++) {
3485 u32 simd_disable_bitmap;
3486
3487 WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
3488 WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
3489 simd_disable_bitmap = (RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffff0000) >> 16;
3490 simd_disable_bitmap |= 0xffffffff << rdev->config.evergreen.max_simds;
3491 tmp <<= 16;
3492 tmp |= simd_disable_bitmap;
3493 }
3494 rdev->config.evergreen.active_simds = hweight32(~tmp);
3495
3496 WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
3497 WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
3498
3499 WREG32(GB_ADDR_CONFIG, gb_addr_config);
3500 WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
3501 WREG32(HDP_ADDR_CONFIG, gb_addr_config);
3502 WREG32(DMA_TILING_CONFIG, gb_addr_config);
3503 WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config);
3504 WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
3505 WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
3506
3507 if ((rdev->config.evergreen.max_backends == 1) &&
3508 (rdev->flags & RADEON_IS_IGP)) {
3509 if ((disabled_rb_mask & 3) == 1) {
3510 /* RB0 disabled, RB1 enabled */
3511 tmp = 0x11111111;
3512 } else {
3513 /* RB1 disabled, RB0 enabled */
3514 tmp = 0x00000000;
3515 }
3516 } else {
3517 tmp = gb_addr_config & NUM_PIPES_MASK;
3518 tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends,
3519 EVERGREEN_MAX_BACKENDS, disabled_rb_mask);
3520 }
3521 rdev->config.evergreen.backend_map = tmp;
3522 WREG32(GB_BACKEND_MAP, tmp);
3523
3524 WREG32(CGTS_SYS_TCC_DISABLE, 0);
3525 WREG32(CGTS_TCC_DISABLE, 0);
3526 WREG32(CGTS_USER_SYS_TCC_DISABLE, 0);
3527 WREG32(CGTS_USER_TCC_DISABLE, 0);
3528
3529 /* set HW defaults for 3D engine */
3530 WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
3531 ROQ_IB2_START(0x2b)));
3532
3533 WREG32(CP_MEQ_THRESHOLDS, STQ_SPLIT(0x30));
3534
3535 WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO |
3536 SYNC_GRADIENT |
3537 SYNC_WALKER |
3538 SYNC_ALIGNER));
3539
3540 sx_debug_1 = RREG32(SX_DEBUG_1);
3541 sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS;
3542 WREG32(SX_DEBUG_1, sx_debug_1);
3543
3544
3545 smx_dc_ctl0 = RREG32(SMX_DC_CTL0);
3546 smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff);
3547 smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.evergreen.sx_num_of_sets);
3548 WREG32(SMX_DC_CTL0, smx_dc_ctl0);
3549
3550 if (rdev->family <= CHIP_SUMO2)
3551 WREG32(SMX_SAR_CTL0, 0x00010000);
3552
3553 WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_size / 4) - 1) |
3554 POSITION_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_pos_size / 4) - 1) |
3555 SMX_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_smx_size / 4) - 1)));
3556
3557 WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.evergreen.sc_prim_fifo_size) |
3558 SC_HIZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_hiz_tile_fifo_size) |
3559 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_earlyz_tile_fifo_size)));
3560
3561 WREG32(VGT_NUM_INSTANCES, 1);
3562 WREG32(SPI_CONFIG_CNTL, 0);
3563 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
3564 WREG32(CP_PERFMON_CNTL, 0);
3565
3566 WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.evergreen.sq_num_cf_insts) |
3567 FETCH_FIFO_HIWATER(0x4) |
3568 DONE_FIFO_HIWATER(0xe0) |
3569 ALU_UPDATE_FIFO_HIWATER(0x8)));
3570
3571 sq_config = RREG32(SQ_CONFIG);
3572 sq_config &= ~(PS_PRIO(3) |
3573 VS_PRIO(3) |
3574 GS_PRIO(3) |
3575 ES_PRIO(3));
3576 sq_config |= (VC_ENABLE |
3577 EXPORT_SRC_C |
3578 PS_PRIO(0) |
3579 VS_PRIO(1) |
3580 GS_PRIO(2) |
3581 ES_PRIO(3));
3582
3583 switch (rdev->family) {
3584 case CHIP_CEDAR:
3585 case CHIP_PALM:
3586 case CHIP_SUMO:
3587 case CHIP_SUMO2:
3588 case CHIP_CAICOS:
3589 /* no vertex cache */
3590 sq_config &= ~VC_ENABLE;
3591 break;
3592 default:
3593 break;
3594 }
3595
3596 sq_lds_resource_mgmt = RREG32(SQ_LDS_RESOURCE_MGMT);
3597
3598 sq_gpr_resource_mgmt_1 = NUM_PS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2))* 12 / 32);
3599 sq_gpr_resource_mgmt_1 |= NUM_VS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 6 / 32);
3600 sq_gpr_resource_mgmt_1 |= NUM_CLAUSE_TEMP_GPRS(4);
3601 sq_gpr_resource_mgmt_2 = NUM_GS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32);
3602 sq_gpr_resource_mgmt_2 |= NUM_ES_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32);
3603 sq_gpr_resource_mgmt_3 = NUM_HS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32);
3604 sq_gpr_resource_mgmt_3 |= NUM_LS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32);
3605
3606 switch (rdev->family) {
3607 case CHIP_CEDAR:
3608 case CHIP_PALM:
3609 case CHIP_SUMO:
3610 case CHIP_SUMO2:
3611 ps_thread_count = 96;
3612 break;
3613 default:
3614 ps_thread_count = 128;
3615 break;
3616 }
3617
3618 sq_thread_resource_mgmt = NUM_PS_THREADS(ps_thread_count);
3619 sq_thread_resource_mgmt |= NUM_VS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
3620 sq_thread_resource_mgmt |= NUM_GS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
3621 sq_thread_resource_mgmt |= NUM_ES_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
3622 sq_thread_resource_mgmt_2 = NUM_HS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
3623 sq_thread_resource_mgmt_2 |= NUM_LS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
3624
3625 sq_stack_resource_mgmt_1 = NUM_PS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
3626 sq_stack_resource_mgmt_1 |= NUM_VS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
3627 sq_stack_resource_mgmt_2 = NUM_GS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
3628 sq_stack_resource_mgmt_2 |= NUM_ES_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
3629 sq_stack_resource_mgmt_3 = NUM_HS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
3630 sq_stack_resource_mgmt_3 |= NUM_LS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
3631
3632 WREG32(SQ_CONFIG, sq_config);
3633 WREG32(SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1);
3634 WREG32(SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2);
3635 WREG32(SQ_GPR_RESOURCE_MGMT_3, sq_gpr_resource_mgmt_3);
3636 WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
3637 WREG32(SQ_THREAD_RESOURCE_MGMT_2, sq_thread_resource_mgmt_2);
3638 WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
3639 WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
3640 WREG32(SQ_STACK_RESOURCE_MGMT_3, sq_stack_resource_mgmt_3);
3641 WREG32(SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, 0);
3642 WREG32(SQ_LDS_RESOURCE_MGMT, sq_lds_resource_mgmt);
3643
3644 WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
3645 FORCE_EOV_MAX_REZ_CNT(255)));
3646
3647 switch (rdev->family) {
3648 case CHIP_CEDAR:
3649 case CHIP_PALM:
3650 case CHIP_SUMO:
3651 case CHIP_SUMO2:
3652 case CHIP_CAICOS:
3653 vgt_cache_invalidation = CACHE_INVALIDATION(TC_ONLY);
3654 break;
3655 default:
3656 vgt_cache_invalidation = CACHE_INVALIDATION(VC_AND_TC);
3657 break;
3658 }
3659 vgt_cache_invalidation |= AUTO_INVLD_EN(ES_AND_GS_AUTO);
3660 WREG32(VGT_CACHE_INVALIDATION, vgt_cache_invalidation);
3661
3662 WREG32(VGT_GS_VERTEX_REUSE, 16);
3663 WREG32(PA_SU_LINE_STIPPLE_VALUE, 0);
3664 WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
3665
3666 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, 14);
3667 WREG32(VGT_OUT_DEALLOC_CNTL, 16);
3668
3669 WREG32(CB_PERF_CTR0_SEL_0, 0);
3670 WREG32(CB_PERF_CTR0_SEL_1, 0);
3671 WREG32(CB_PERF_CTR1_SEL_0, 0);
3672 WREG32(CB_PERF_CTR1_SEL_1, 0);
3673 WREG32(CB_PERF_CTR2_SEL_0, 0);
3674 WREG32(CB_PERF_CTR2_SEL_1, 0);
3675 WREG32(CB_PERF_CTR3_SEL_0, 0);
3676 WREG32(CB_PERF_CTR3_SEL_1, 0);
3677
3678 /* clear render buffer base addresses */
3679 WREG32(CB_COLOR0_BASE, 0);
3680 WREG32(CB_COLOR1_BASE, 0);
3681 WREG32(CB_COLOR2_BASE, 0);
3682 WREG32(CB_COLOR3_BASE, 0);
3683 WREG32(CB_COLOR4_BASE, 0);
3684 WREG32(CB_COLOR5_BASE, 0);
3685 WREG32(CB_COLOR6_BASE, 0);
3686 WREG32(CB_COLOR7_BASE, 0);
3687 WREG32(CB_COLOR8_BASE, 0);
3688 WREG32(CB_COLOR9_BASE, 0);
3689 WREG32(CB_COLOR10_BASE, 0);
3690 WREG32(CB_COLOR11_BASE, 0);
3691
3692 /* set the shader const cache sizes to 0 */
3693 for (i = SQ_ALU_CONST_BUFFER_SIZE_PS_0; i < 0x28200; i += 4)
3694 WREG32(i, 0);
3695 for (i = SQ_ALU_CONST_BUFFER_SIZE_HS_0; i < 0x29000; i += 4)
3696 WREG32(i, 0);
3697
3698 tmp = RREG32(HDP_MISC_CNTL);
3699 tmp |= HDP_FLUSH_INVALIDATE_CACHE;
3700 WREG32(HDP_MISC_CNTL, tmp);
3701
3702 hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
3703 WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
3704
3705 WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
3706
3707 udelay(50);
3708
3709 }
3710
evergreen_mc_init(struct radeon_device * rdev)3711 int evergreen_mc_init(struct radeon_device *rdev)
3712 {
3713 u32 tmp;
3714 int chansize, numchan;
3715
3716 /* Get VRAM informations */
3717 rdev->mc.vram_is_ddr = true;
3718 if ((rdev->family == CHIP_PALM) ||
3719 (rdev->family == CHIP_SUMO) ||
3720 (rdev->family == CHIP_SUMO2))
3721 tmp = RREG32(FUS_MC_ARB_RAMCFG);
3722 else
3723 tmp = RREG32(MC_ARB_RAMCFG);
3724 if (tmp & CHANSIZE_OVERRIDE) {
3725 chansize = 16;
3726 } else if (tmp & CHANSIZE_MASK) {
3727 chansize = 64;
3728 } else {
3729 chansize = 32;
3730 }
3731 tmp = RREG32(MC_SHARED_CHMAP);
3732 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
3733 case 0:
3734 default:
3735 numchan = 1;
3736 break;
3737 case 1:
3738 numchan = 2;
3739 break;
3740 case 2:
3741 numchan = 4;
3742 break;
3743 case 3:
3744 numchan = 8;
3745 break;
3746 }
3747 rdev->mc.vram_width = numchan * chansize;
3748 /* Could aper size report 0 ? */
3749 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
3750 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
3751 /* Setup GPU memory space */
3752 if ((rdev->family == CHIP_PALM) ||
3753 (rdev->family == CHIP_SUMO) ||
3754 (rdev->family == CHIP_SUMO2)) {
3755 /* size in bytes on fusion */
3756 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
3757 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
3758 } else {
3759 /* size in MB on evergreen/cayman/tn */
3760 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
3761 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
3762 }
3763 rdev->mc.visible_vram_size = rdev->mc.aper_size;
3764 r700_vram_gtt_location(rdev, &rdev->mc);
3765 radeon_update_bandwidth_info(rdev);
3766
3767 return 0;
3768 }
3769
evergreen_print_gpu_status_regs(struct radeon_device * rdev)3770 void evergreen_print_gpu_status_regs(struct radeon_device *rdev)
3771 {
3772 dev_info(rdev->dev, " GRBM_STATUS = 0x%08X\n",
3773 RREG32(GRBM_STATUS));
3774 dev_info(rdev->dev, " GRBM_STATUS_SE0 = 0x%08X\n",
3775 RREG32(GRBM_STATUS_SE0));
3776 dev_info(rdev->dev, " GRBM_STATUS_SE1 = 0x%08X\n",
3777 RREG32(GRBM_STATUS_SE1));
3778 dev_info(rdev->dev, " SRBM_STATUS = 0x%08X\n",
3779 RREG32(SRBM_STATUS));
3780 dev_info(rdev->dev, " SRBM_STATUS2 = 0x%08X\n",
3781 RREG32(SRBM_STATUS2));
3782 dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
3783 RREG32(CP_STALLED_STAT1));
3784 dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
3785 RREG32(CP_STALLED_STAT2));
3786 dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
3787 RREG32(CP_BUSY_STAT));
3788 dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
3789 RREG32(CP_STAT));
3790 dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
3791 RREG32(DMA_STATUS_REG));
3792 if (rdev->family >= CHIP_CAYMAN) {
3793 dev_info(rdev->dev, " R_00D834_DMA_STATUS_REG = 0x%08X\n",
3794 RREG32(DMA_STATUS_REG + 0x800));
3795 }
3796 }
3797
evergreen_is_display_hung(struct radeon_device * rdev)3798 bool evergreen_is_display_hung(struct radeon_device *rdev)
3799 {
3800 u32 crtc_hung = 0;
3801 u32 crtc_status[6];
3802 u32 i, j, tmp;
3803
3804 for (i = 0; i < rdev->num_crtc; i++) {
3805 if (RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) & EVERGREEN_CRTC_MASTER_EN) {
3806 crtc_status[i] = RREG32(EVERGREEN_CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
3807 crtc_hung |= (1 << i);
3808 }
3809 }
3810
3811 for (j = 0; j < 10; j++) {
3812 for (i = 0; i < rdev->num_crtc; i++) {
3813 if (crtc_hung & (1 << i)) {
3814 tmp = RREG32(EVERGREEN_CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
3815 if (tmp != crtc_status[i])
3816 crtc_hung &= ~(1 << i);
3817 }
3818 }
3819 if (crtc_hung == 0)
3820 return false;
3821 udelay(100);
3822 }
3823
3824 return true;
3825 }
3826
evergreen_gpu_check_soft_reset(struct radeon_device * rdev)3827 u32 evergreen_gpu_check_soft_reset(struct radeon_device *rdev)
3828 {
3829 u32 reset_mask = 0;
3830 u32 tmp;
3831
3832 /* GRBM_STATUS */
3833 tmp = RREG32(GRBM_STATUS);
3834 if (tmp & (PA_BUSY | SC_BUSY |
3835 SH_BUSY | SX_BUSY |
3836 TA_BUSY | VGT_BUSY |
3837 DB_BUSY | CB_BUSY |
3838 SPI_BUSY | VGT_BUSY_NO_DMA))
3839 reset_mask |= RADEON_RESET_GFX;
3840
3841 if (tmp & (CF_RQ_PENDING | PF_RQ_PENDING |
3842 CP_BUSY | CP_COHERENCY_BUSY))
3843 reset_mask |= RADEON_RESET_CP;
3844
3845 if (tmp & GRBM_EE_BUSY)
3846 reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP;
3847
3848 /* DMA_STATUS_REG */
3849 tmp = RREG32(DMA_STATUS_REG);
3850 if (!(tmp & DMA_IDLE))
3851 reset_mask |= RADEON_RESET_DMA;
3852
3853 /* SRBM_STATUS2 */
3854 tmp = RREG32(SRBM_STATUS2);
3855 if (tmp & DMA_BUSY)
3856 reset_mask |= RADEON_RESET_DMA;
3857
3858 /* SRBM_STATUS */
3859 tmp = RREG32(SRBM_STATUS);
3860 if (tmp & (RLC_RQ_PENDING | RLC_BUSY))
3861 reset_mask |= RADEON_RESET_RLC;
3862
3863 if (tmp & IH_BUSY)
3864 reset_mask |= RADEON_RESET_IH;
3865
3866 if (tmp & SEM_BUSY)
3867 reset_mask |= RADEON_RESET_SEM;
3868
3869 if (tmp & GRBM_RQ_PENDING)
3870 reset_mask |= RADEON_RESET_GRBM;
3871
3872 if (tmp & VMC_BUSY)
3873 reset_mask |= RADEON_RESET_VMC;
3874
3875 if (tmp & (MCB_BUSY | MCB_NON_DISPLAY_BUSY |
3876 MCC_BUSY | MCD_BUSY))
3877 reset_mask |= RADEON_RESET_MC;
3878
3879 if (evergreen_is_display_hung(rdev))
3880 reset_mask |= RADEON_RESET_DISPLAY;
3881
3882 /* VM_L2_STATUS */
3883 tmp = RREG32(VM_L2_STATUS);
3884 if (tmp & L2_BUSY)
3885 reset_mask |= RADEON_RESET_VMC;
3886
3887 /* Skip MC reset as it's mostly likely not hung, just busy */
3888 if (reset_mask & RADEON_RESET_MC) {
3889 DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
3890 reset_mask &= ~RADEON_RESET_MC;
3891 }
3892
3893 return reset_mask;
3894 }
3895
evergreen_gpu_soft_reset(struct radeon_device * rdev,u32 reset_mask)3896 static void evergreen_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
3897 {
3898 struct evergreen_mc_save save;
3899 u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
3900 u32 tmp;
3901
3902 if (reset_mask == 0)
3903 return;
3904
3905 dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
3906
3907 evergreen_print_gpu_status_regs(rdev);
3908
3909 /* Disable CP parsing/prefetching */
3910 WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
3911
3912 if (reset_mask & RADEON_RESET_DMA) {
3913 /* Disable DMA */
3914 tmp = RREG32(DMA_RB_CNTL);
3915 tmp &= ~DMA_RB_ENABLE;
3916 WREG32(DMA_RB_CNTL, tmp);
3917 }
3918
3919 udelay(50);
3920
3921 evergreen_mc_stop(rdev, &save);
3922 if (evergreen_mc_wait_for_idle(rdev)) {
3923 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
3924 }
3925
3926 if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) {
3927 grbm_soft_reset |= SOFT_RESET_DB |
3928 SOFT_RESET_CB |
3929 SOFT_RESET_PA |
3930 SOFT_RESET_SC |
3931 SOFT_RESET_SPI |
3932 SOFT_RESET_SX |
3933 SOFT_RESET_SH |
3934 SOFT_RESET_TC |
3935 SOFT_RESET_TA |
3936 SOFT_RESET_VC |
3937 SOFT_RESET_VGT;
3938 }
3939
3940 if (reset_mask & RADEON_RESET_CP) {
3941 grbm_soft_reset |= SOFT_RESET_CP |
3942 SOFT_RESET_VGT;
3943
3944 srbm_soft_reset |= SOFT_RESET_GRBM;
3945 }
3946
3947 if (reset_mask & RADEON_RESET_DMA)
3948 srbm_soft_reset |= SOFT_RESET_DMA;
3949
3950 if (reset_mask & RADEON_RESET_DISPLAY)
3951 srbm_soft_reset |= SOFT_RESET_DC;
3952
3953 if (reset_mask & RADEON_RESET_RLC)
3954 srbm_soft_reset |= SOFT_RESET_RLC;
3955
3956 if (reset_mask & RADEON_RESET_SEM)
3957 srbm_soft_reset |= SOFT_RESET_SEM;
3958
3959 if (reset_mask & RADEON_RESET_IH)
3960 srbm_soft_reset |= SOFT_RESET_IH;
3961
3962 if (reset_mask & RADEON_RESET_GRBM)
3963 srbm_soft_reset |= SOFT_RESET_GRBM;
3964
3965 if (reset_mask & RADEON_RESET_VMC)
3966 srbm_soft_reset |= SOFT_RESET_VMC;
3967
3968 if (!(rdev->flags & RADEON_IS_IGP)) {
3969 if (reset_mask & RADEON_RESET_MC)
3970 srbm_soft_reset |= SOFT_RESET_MC;
3971 }
3972
3973 if (grbm_soft_reset) {
3974 tmp = RREG32(GRBM_SOFT_RESET);
3975 tmp |= grbm_soft_reset;
3976 dev_info(rdev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
3977 WREG32(GRBM_SOFT_RESET, tmp);
3978 tmp = RREG32(GRBM_SOFT_RESET);
3979
3980 udelay(50);
3981
3982 tmp &= ~grbm_soft_reset;
3983 WREG32(GRBM_SOFT_RESET, tmp);
3984 tmp = RREG32(GRBM_SOFT_RESET);
3985 }
3986
3987 if (srbm_soft_reset) {
3988 tmp = RREG32(SRBM_SOFT_RESET);
3989 tmp |= srbm_soft_reset;
3990 dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
3991 WREG32(SRBM_SOFT_RESET, tmp);
3992 tmp = RREG32(SRBM_SOFT_RESET);
3993
3994 udelay(50);
3995
3996 tmp &= ~srbm_soft_reset;
3997 WREG32(SRBM_SOFT_RESET, tmp);
3998 tmp = RREG32(SRBM_SOFT_RESET);
3999 }
4000
4001 /* Wait a little for things to settle down */
4002 udelay(50);
4003
4004 evergreen_mc_resume(rdev, &save);
4005 udelay(50);
4006
4007 evergreen_print_gpu_status_regs(rdev);
4008 }
4009
evergreen_gpu_pci_config_reset(struct radeon_device * rdev)4010 void evergreen_gpu_pci_config_reset(struct radeon_device *rdev)
4011 {
4012 struct evergreen_mc_save save;
4013 u32 tmp, i;
4014
4015 dev_info(rdev->dev, "GPU pci config reset\n");
4016
4017 /* disable dpm? */
4018
4019 /* Disable CP parsing/prefetching */
4020 WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
4021 udelay(50);
4022 /* Disable DMA */
4023 tmp = RREG32(DMA_RB_CNTL);
4024 tmp &= ~DMA_RB_ENABLE;
4025 WREG32(DMA_RB_CNTL, tmp);
4026 /* XXX other engines? */
4027
4028 /* halt the rlc */
4029 r600_rlc_stop(rdev);
4030
4031 udelay(50);
4032
4033 /* set mclk/sclk to bypass */
4034 rv770_set_clk_bypass_mode(rdev);
4035 /* disable BM */
4036 pci_clear_master(rdev->pdev);
4037 /* disable mem access */
4038 evergreen_mc_stop(rdev, &save);
4039 if (evergreen_mc_wait_for_idle(rdev)) {
4040 dev_warn(rdev->dev, "Wait for MC idle timed out !\n");
4041 }
4042 /* reset */
4043 radeon_pci_config_reset(rdev);
4044 /* wait for asic to come out of reset */
4045 for (i = 0; i < rdev->usec_timeout; i++) {
4046 if (RREG32(CONFIG_MEMSIZE) != 0xffffffff)
4047 break;
4048 udelay(1);
4049 }
4050 }
4051
evergreen_asic_reset(struct radeon_device * rdev,bool hard)4052 int evergreen_asic_reset(struct radeon_device *rdev, bool hard)
4053 {
4054 u32 reset_mask;
4055
4056 if (hard) {
4057 evergreen_gpu_pci_config_reset(rdev);
4058 return 0;
4059 }
4060
4061 reset_mask = evergreen_gpu_check_soft_reset(rdev);
4062
4063 if (reset_mask)
4064 r600_set_bios_scratch_engine_hung(rdev, true);
4065
4066 /* try soft reset */
4067 evergreen_gpu_soft_reset(rdev, reset_mask);
4068
4069 reset_mask = evergreen_gpu_check_soft_reset(rdev);
4070
4071 /* try pci config reset */
4072 if (reset_mask && radeon_hard_reset)
4073 evergreen_gpu_pci_config_reset(rdev);
4074
4075 reset_mask = evergreen_gpu_check_soft_reset(rdev);
4076
4077 if (!reset_mask)
4078 r600_set_bios_scratch_engine_hung(rdev, false);
4079
4080 return 0;
4081 }
4082
4083 /**
4084 * evergreen_gfx_is_lockup - Check if the GFX engine is locked up
4085 *
4086 * @rdev: radeon_device pointer
4087 * @ring: radeon_ring structure holding ring information
4088 *
4089 * Check if the GFX engine is locked up.
4090 * Returns true if the engine appears to be locked up, false if not.
4091 */
evergreen_gfx_is_lockup(struct radeon_device * rdev,struct radeon_ring * ring)4092 bool evergreen_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
4093 {
4094 u32 reset_mask = evergreen_gpu_check_soft_reset(rdev);
4095
4096 if (!(reset_mask & (RADEON_RESET_GFX |
4097 RADEON_RESET_COMPUTE |
4098 RADEON_RESET_CP))) {
4099 radeon_ring_lockup_update(rdev, ring);
4100 return false;
4101 }
4102 return radeon_ring_test_lockup(rdev, ring);
4103 }
4104
4105 /*
4106 * RLC
4107 */
4108 #define RLC_SAVE_RESTORE_LIST_END_MARKER 0x00000000
4109 #define RLC_CLEAR_STATE_END_MARKER 0x00000001
4110
sumo_rlc_fini(struct radeon_device * rdev)4111 void sumo_rlc_fini(struct radeon_device *rdev)
4112 {
4113 int r;
4114
4115 /* save restore block */
4116 if (rdev->rlc.save_restore_obj) {
4117 r = radeon_bo_reserve(rdev->rlc.save_restore_obj, false);
4118 if (unlikely(r != 0))
4119 dev_warn(rdev->dev, "(%d) reserve RLC sr bo failed\n", r);
4120 radeon_bo_unpin(rdev->rlc.save_restore_obj);
4121 radeon_bo_unreserve(rdev->rlc.save_restore_obj);
4122
4123 radeon_bo_unref(&rdev->rlc.save_restore_obj);
4124 rdev->rlc.save_restore_obj = NULL;
4125 }
4126
4127 /* clear state block */
4128 if (rdev->rlc.clear_state_obj) {
4129 r = radeon_bo_reserve(rdev->rlc.clear_state_obj, false);
4130 if (unlikely(r != 0))
4131 dev_warn(rdev->dev, "(%d) reserve RLC c bo failed\n", r);
4132 radeon_bo_unpin(rdev->rlc.clear_state_obj);
4133 radeon_bo_unreserve(rdev->rlc.clear_state_obj);
4134
4135 radeon_bo_unref(&rdev->rlc.clear_state_obj);
4136 rdev->rlc.clear_state_obj = NULL;
4137 }
4138
4139 /* clear state block */
4140 if (rdev->rlc.cp_table_obj) {
4141 r = radeon_bo_reserve(rdev->rlc.cp_table_obj, false);
4142 if (unlikely(r != 0))
4143 dev_warn(rdev->dev, "(%d) reserve RLC cp table bo failed\n", r);
4144 radeon_bo_unpin(rdev->rlc.cp_table_obj);
4145 radeon_bo_unreserve(rdev->rlc.cp_table_obj);
4146
4147 radeon_bo_unref(&rdev->rlc.cp_table_obj);
4148 rdev->rlc.cp_table_obj = NULL;
4149 }
4150 }
4151
4152 #define CP_ME_TABLE_SIZE 96
4153
sumo_rlc_init(struct radeon_device * rdev)4154 int sumo_rlc_init(struct radeon_device *rdev)
4155 {
4156 const u32 *src_ptr;
4157 volatile u32 *dst_ptr;
4158 u32 dws, data, i, j, k, reg_num;
4159 u32 reg_list_num, reg_list_hdr_blk_index, reg_list_blk_index = 0;
4160 u64 reg_list_mc_addr;
4161 const struct cs_section_def *cs_data;
4162 int r;
4163
4164 src_ptr = rdev->rlc.reg_list;
4165 dws = rdev->rlc.reg_list_size;
4166 if (rdev->family >= CHIP_BONAIRE) {
4167 dws += (5 * 16) + 48 + 48 + 64;
4168 }
4169 cs_data = rdev->rlc.cs_data;
4170
4171 if (src_ptr) {
4172 /* save restore block */
4173 if (rdev->rlc.save_restore_obj == NULL) {
4174 r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true,
4175 RADEON_GEM_DOMAIN_VRAM, 0, NULL,
4176 NULL, &rdev->rlc.save_restore_obj);
4177 if (r) {
4178 dev_warn(rdev->dev, "(%d) create RLC sr bo failed\n", r);
4179 return r;
4180 }
4181 }
4182
4183 r = radeon_bo_reserve(rdev->rlc.save_restore_obj, false);
4184 if (unlikely(r != 0)) {
4185 sumo_rlc_fini(rdev);
4186 return r;
4187 }
4188 r = radeon_bo_pin(rdev->rlc.save_restore_obj, RADEON_GEM_DOMAIN_VRAM,
4189 &rdev->rlc.save_restore_gpu_addr);
4190 if (r) {
4191 radeon_bo_unreserve(rdev->rlc.save_restore_obj);
4192 dev_warn(rdev->dev, "(%d) pin RLC sr bo failed\n", r);
4193 sumo_rlc_fini(rdev);
4194 return r;
4195 }
4196
4197 r = radeon_bo_kmap(rdev->rlc.save_restore_obj, (void **)&rdev->rlc.sr_ptr);
4198 if (r) {
4199 dev_warn(rdev->dev, "(%d) map RLC sr bo failed\n", r);
4200 sumo_rlc_fini(rdev);
4201 return r;
4202 }
4203 /* write the sr buffer */
4204 dst_ptr = rdev->rlc.sr_ptr;
4205 if (rdev->family >= CHIP_TAHITI) {
4206 /* SI */
4207 for (i = 0; i < rdev->rlc.reg_list_size; i++)
4208 dst_ptr[i] = cpu_to_le32(src_ptr[i]);
4209 } else {
4210 /* ON/LN/TN */
4211 /* format:
4212 * dw0: (reg2 << 16) | reg1
4213 * dw1: reg1 save space
4214 * dw2: reg2 save space
4215 */
4216 for (i = 0; i < dws; i++) {
4217 data = src_ptr[i] >> 2;
4218 i++;
4219 if (i < dws)
4220 data |= (src_ptr[i] >> 2) << 16;
4221 j = (((i - 1) * 3) / 2);
4222 dst_ptr[j] = cpu_to_le32(data);
4223 }
4224 j = ((i * 3) / 2);
4225 dst_ptr[j] = cpu_to_le32(RLC_SAVE_RESTORE_LIST_END_MARKER);
4226 }
4227 radeon_bo_kunmap(rdev->rlc.save_restore_obj);
4228 radeon_bo_unreserve(rdev->rlc.save_restore_obj);
4229 }
4230
4231 if (cs_data) {
4232 /* clear state block */
4233 if (rdev->family >= CHIP_BONAIRE) {
4234 rdev->rlc.clear_state_size = dws = cik_get_csb_size(rdev);
4235 } else if (rdev->family >= CHIP_TAHITI) {
4236 rdev->rlc.clear_state_size = si_get_csb_size(rdev);
4237 dws = rdev->rlc.clear_state_size + (256 / 4);
4238 } else {
4239 reg_list_num = 0;
4240 dws = 0;
4241 for (i = 0; cs_data[i].section != NULL; i++) {
4242 for (j = 0; cs_data[i].section[j].extent != NULL; j++) {
4243 reg_list_num++;
4244 dws += cs_data[i].section[j].reg_count;
4245 }
4246 }
4247 reg_list_blk_index = (3 * reg_list_num + 2);
4248 dws += reg_list_blk_index;
4249 rdev->rlc.clear_state_size = dws;
4250 }
4251
4252 if (rdev->rlc.clear_state_obj == NULL) {
4253 r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true,
4254 RADEON_GEM_DOMAIN_VRAM, 0, NULL,
4255 NULL, &rdev->rlc.clear_state_obj);
4256 if (r) {
4257 dev_warn(rdev->dev, "(%d) create RLC c bo failed\n", r);
4258 sumo_rlc_fini(rdev);
4259 return r;
4260 }
4261 }
4262 r = radeon_bo_reserve(rdev->rlc.clear_state_obj, false);
4263 if (unlikely(r != 0)) {
4264 sumo_rlc_fini(rdev);
4265 return r;
4266 }
4267 r = radeon_bo_pin(rdev->rlc.clear_state_obj, RADEON_GEM_DOMAIN_VRAM,
4268 &rdev->rlc.clear_state_gpu_addr);
4269 if (r) {
4270 radeon_bo_unreserve(rdev->rlc.clear_state_obj);
4271 dev_warn(rdev->dev, "(%d) pin RLC c bo failed\n", r);
4272 sumo_rlc_fini(rdev);
4273 return r;
4274 }
4275
4276 r = radeon_bo_kmap(rdev->rlc.clear_state_obj, (void **)&rdev->rlc.cs_ptr);
4277 if (r) {
4278 dev_warn(rdev->dev, "(%d) map RLC c bo failed\n", r);
4279 sumo_rlc_fini(rdev);
4280 return r;
4281 }
4282 /* set up the cs buffer */
4283 dst_ptr = rdev->rlc.cs_ptr;
4284 if (rdev->family >= CHIP_BONAIRE) {
4285 cik_get_csb_buffer(rdev, dst_ptr);
4286 } else if (rdev->family >= CHIP_TAHITI) {
4287 reg_list_mc_addr = rdev->rlc.clear_state_gpu_addr + 256;
4288 dst_ptr[0] = cpu_to_le32(upper_32_bits(reg_list_mc_addr));
4289 dst_ptr[1] = cpu_to_le32(lower_32_bits(reg_list_mc_addr));
4290 dst_ptr[2] = cpu_to_le32(rdev->rlc.clear_state_size);
4291 si_get_csb_buffer(rdev, &dst_ptr[(256/4)]);
4292 } else {
4293 reg_list_hdr_blk_index = 0;
4294 reg_list_mc_addr = rdev->rlc.clear_state_gpu_addr + (reg_list_blk_index * 4);
4295 data = upper_32_bits(reg_list_mc_addr);
4296 dst_ptr[reg_list_hdr_blk_index] = cpu_to_le32(data);
4297 reg_list_hdr_blk_index++;
4298 for (i = 0; cs_data[i].section != NULL; i++) {
4299 for (j = 0; cs_data[i].section[j].extent != NULL; j++) {
4300 reg_num = cs_data[i].section[j].reg_count;
4301 data = reg_list_mc_addr & 0xffffffff;
4302 dst_ptr[reg_list_hdr_blk_index] = cpu_to_le32(data);
4303 reg_list_hdr_blk_index++;
4304
4305 data = (cs_data[i].section[j].reg_index * 4) & 0xffffffff;
4306 dst_ptr[reg_list_hdr_blk_index] = cpu_to_le32(data);
4307 reg_list_hdr_blk_index++;
4308
4309 data = 0x08000000 | (reg_num * 4);
4310 dst_ptr[reg_list_hdr_blk_index] = cpu_to_le32(data);
4311 reg_list_hdr_blk_index++;
4312
4313 for (k = 0; k < reg_num; k++) {
4314 data = cs_data[i].section[j].extent[k];
4315 dst_ptr[reg_list_blk_index + k] = cpu_to_le32(data);
4316 }
4317 reg_list_mc_addr += reg_num * 4;
4318 reg_list_blk_index += reg_num;
4319 }
4320 }
4321 dst_ptr[reg_list_hdr_blk_index] = cpu_to_le32(RLC_CLEAR_STATE_END_MARKER);
4322 }
4323 radeon_bo_kunmap(rdev->rlc.clear_state_obj);
4324 radeon_bo_unreserve(rdev->rlc.clear_state_obj);
4325 }
4326
4327 if (rdev->rlc.cp_table_size) {
4328 if (rdev->rlc.cp_table_obj == NULL) {
4329 r = radeon_bo_create(rdev, rdev->rlc.cp_table_size,
4330 PAGE_SIZE, true,
4331 RADEON_GEM_DOMAIN_VRAM, 0, NULL,
4332 NULL, &rdev->rlc.cp_table_obj);
4333 if (r) {
4334 dev_warn(rdev->dev, "(%d) create RLC cp table bo failed\n", r);
4335 sumo_rlc_fini(rdev);
4336 return r;
4337 }
4338 }
4339
4340 r = radeon_bo_reserve(rdev->rlc.cp_table_obj, false);
4341 if (unlikely(r != 0)) {
4342 dev_warn(rdev->dev, "(%d) reserve RLC cp table bo failed\n", r);
4343 sumo_rlc_fini(rdev);
4344 return r;
4345 }
4346 r = radeon_bo_pin(rdev->rlc.cp_table_obj, RADEON_GEM_DOMAIN_VRAM,
4347 &rdev->rlc.cp_table_gpu_addr);
4348 if (r) {
4349 radeon_bo_unreserve(rdev->rlc.cp_table_obj);
4350 dev_warn(rdev->dev, "(%d) pin RLC cp_table bo failed\n", r);
4351 sumo_rlc_fini(rdev);
4352 return r;
4353 }
4354 r = radeon_bo_kmap(rdev->rlc.cp_table_obj, (void **)&rdev->rlc.cp_table_ptr);
4355 if (r) {
4356 dev_warn(rdev->dev, "(%d) map RLC cp table bo failed\n", r);
4357 sumo_rlc_fini(rdev);
4358 return r;
4359 }
4360
4361 cik_init_cp_pg_table(rdev);
4362
4363 radeon_bo_kunmap(rdev->rlc.cp_table_obj);
4364 radeon_bo_unreserve(rdev->rlc.cp_table_obj);
4365
4366 }
4367
4368 return 0;
4369 }
4370
evergreen_rlc_start(struct radeon_device * rdev)4371 static void evergreen_rlc_start(struct radeon_device *rdev)
4372 {
4373 u32 mask = RLC_ENABLE;
4374
4375 if (rdev->flags & RADEON_IS_IGP) {
4376 mask |= GFX_POWER_GATING_ENABLE | GFX_POWER_GATING_SRC;
4377 }
4378
4379 WREG32(RLC_CNTL, mask);
4380 }
4381
evergreen_rlc_resume(struct radeon_device * rdev)4382 int evergreen_rlc_resume(struct radeon_device *rdev)
4383 {
4384 u32 i;
4385 const __be32 *fw_data;
4386
4387 if (!rdev->rlc_fw)
4388 return -EINVAL;
4389
4390 r600_rlc_stop(rdev);
4391
4392 WREG32(RLC_HB_CNTL, 0);
4393
4394 if (rdev->flags & RADEON_IS_IGP) {
4395 if (rdev->family == CHIP_ARUBA) {
4396 u32 always_on_bitmap =
4397 3 | (3 << (16 * rdev->config.cayman.max_shader_engines));
4398 /* find out the number of active simds */
4399 u32 tmp = (RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffff0000) >> 16;
4400 tmp |= 0xffffffff << rdev->config.cayman.max_simds_per_se;
4401 tmp = hweight32(~tmp);
4402 if (tmp == rdev->config.cayman.max_simds_per_se) {
4403 WREG32(TN_RLC_LB_ALWAYS_ACTIVE_SIMD_MASK, always_on_bitmap);
4404 WREG32(TN_RLC_LB_PARAMS, 0x00601004);
4405 WREG32(TN_RLC_LB_INIT_SIMD_MASK, 0xffffffff);
4406 WREG32(TN_RLC_LB_CNTR_INIT, 0x00000000);
4407 WREG32(TN_RLC_LB_CNTR_MAX, 0x00002000);
4408 }
4409 } else {
4410 WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
4411 WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
4412 }
4413 WREG32(TN_RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
4414 WREG32(TN_RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
4415 } else {
4416 WREG32(RLC_HB_BASE, 0);
4417 WREG32(RLC_HB_RPTR, 0);
4418 WREG32(RLC_HB_WPTR, 0);
4419 WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
4420 WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
4421 }
4422 WREG32(RLC_MC_CNTL, 0);
4423 WREG32(RLC_UCODE_CNTL, 0);
4424
4425 fw_data = (const __be32 *)rdev->rlc_fw->data;
4426 if (rdev->family >= CHIP_ARUBA) {
4427 for (i = 0; i < ARUBA_RLC_UCODE_SIZE; i++) {
4428 WREG32(RLC_UCODE_ADDR, i);
4429 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
4430 }
4431 } else if (rdev->family >= CHIP_CAYMAN) {
4432 for (i = 0; i < CAYMAN_RLC_UCODE_SIZE; i++) {
4433 WREG32(RLC_UCODE_ADDR, i);
4434 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
4435 }
4436 } else {
4437 for (i = 0; i < EVERGREEN_RLC_UCODE_SIZE; i++) {
4438 WREG32(RLC_UCODE_ADDR, i);
4439 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
4440 }
4441 }
4442 WREG32(RLC_UCODE_ADDR, 0);
4443
4444 evergreen_rlc_start(rdev);
4445
4446 return 0;
4447 }
4448
4449 /* Interrupts */
4450
evergreen_get_vblank_counter(struct radeon_device * rdev,int crtc)4451 u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc)
4452 {
4453 if (crtc >= rdev->num_crtc)
4454 return 0;
4455 else
4456 return RREG32(CRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
4457 }
4458
evergreen_disable_interrupt_state(struct radeon_device * rdev)4459 void evergreen_disable_interrupt_state(struct radeon_device *rdev)
4460 {
4461 int i;
4462 u32 tmp;
4463
4464 if (rdev->family >= CHIP_CAYMAN) {
4465 cayman_cp_int_cntl_setup(rdev, 0,
4466 CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
4467 cayman_cp_int_cntl_setup(rdev, 1, 0);
4468 cayman_cp_int_cntl_setup(rdev, 2, 0);
4469 tmp = RREG32(CAYMAN_DMA1_CNTL) & ~TRAP_ENABLE;
4470 WREG32(CAYMAN_DMA1_CNTL, tmp);
4471 } else
4472 WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
4473 tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
4474 WREG32(DMA_CNTL, tmp);
4475 WREG32(GRBM_INT_CNTL, 0);
4476 WREG32(SRBM_INT_CNTL, 0);
4477 for (i = 0; i < rdev->num_crtc; i++)
4478 WREG32(INT_MASK + crtc_offsets[i], 0);
4479 for (i = 0; i < rdev->num_crtc; i++)
4480 WREG32(GRPH_INT_CONTROL + crtc_offsets[i], 0);
4481
4482 /* only one DAC on DCE5 */
4483 if (!ASIC_IS_DCE5(rdev))
4484 WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
4485 WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
4486
4487 for (i = 0; i < 6; i++)
4488 WREG32_AND(DC_HPDx_INT_CONTROL(i), DC_HPDx_INT_POLARITY);
4489 }
4490
4491 /* Note that the order we write back regs here is important */
evergreen_irq_set(struct radeon_device * rdev)4492 int evergreen_irq_set(struct radeon_device *rdev)
4493 {
4494 int i;
4495 u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
4496 u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0;
4497 u32 grbm_int_cntl = 0;
4498 u32 dma_cntl, dma_cntl1 = 0;
4499 u32 thermal_int = 0;
4500
4501 if (!rdev->irq.installed) {
4502 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
4503 return -EINVAL;
4504 }
4505 /* don't enable anything if the ih is disabled */
4506 if (!rdev->ih.enabled) {
4507 r600_disable_interrupts(rdev);
4508 /* force the active interrupt state to all disabled */
4509 evergreen_disable_interrupt_state(rdev);
4510 return 0;
4511 }
4512
4513 if (rdev->family == CHIP_ARUBA)
4514 thermal_int = RREG32(TN_CG_THERMAL_INT_CTRL) &
4515 ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
4516 else
4517 thermal_int = RREG32(CG_THERMAL_INT) &
4518 ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
4519
4520 dma_cntl = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
4521
4522 if (rdev->family >= CHIP_CAYMAN) {
4523 /* enable CP interrupts on all rings */
4524 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
4525 DRM_DEBUG("evergreen_irq_set: sw int gfx\n");
4526 cp_int_cntl |= TIME_STAMP_INT_ENABLE;
4527 }
4528 if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP1_INDEX])) {
4529 DRM_DEBUG("evergreen_irq_set: sw int cp1\n");
4530 cp_int_cntl1 |= TIME_STAMP_INT_ENABLE;
4531 }
4532 if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP2_INDEX])) {
4533 DRM_DEBUG("evergreen_irq_set: sw int cp2\n");
4534 cp_int_cntl2 |= TIME_STAMP_INT_ENABLE;
4535 }
4536 } else {
4537 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
4538 DRM_DEBUG("evergreen_irq_set: sw int gfx\n");
4539 cp_int_cntl |= RB_INT_ENABLE;
4540 cp_int_cntl |= TIME_STAMP_INT_ENABLE;
4541 }
4542 }
4543
4544 if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
4545 DRM_DEBUG("r600_irq_set: sw int dma\n");
4546 dma_cntl |= TRAP_ENABLE;
4547 }
4548
4549 if (rdev->family >= CHIP_CAYMAN) {
4550 dma_cntl1 = RREG32(CAYMAN_DMA1_CNTL) & ~TRAP_ENABLE;
4551 if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_DMA1_INDEX])) {
4552 DRM_DEBUG("r600_irq_set: sw int dma1\n");
4553 dma_cntl1 |= TRAP_ENABLE;
4554 }
4555 }
4556
4557 if (rdev->irq.dpm_thermal) {
4558 DRM_DEBUG("dpm thermal\n");
4559 thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
4560 }
4561
4562 if (rdev->family >= CHIP_CAYMAN) {
4563 cayman_cp_int_cntl_setup(rdev, 0, cp_int_cntl);
4564 cayman_cp_int_cntl_setup(rdev, 1, cp_int_cntl1);
4565 cayman_cp_int_cntl_setup(rdev, 2, cp_int_cntl2);
4566 } else
4567 WREG32(CP_INT_CNTL, cp_int_cntl);
4568
4569 WREG32(DMA_CNTL, dma_cntl);
4570
4571 if (rdev->family >= CHIP_CAYMAN)
4572 WREG32(CAYMAN_DMA1_CNTL, dma_cntl1);
4573
4574 WREG32(GRBM_INT_CNTL, grbm_int_cntl);
4575
4576 for (i = 0; i < rdev->num_crtc; i++) {
4577 radeon_irq_kms_set_irq_n_enabled(
4578 rdev, INT_MASK + crtc_offsets[i],
4579 VBLANK_INT_MASK,
4580 rdev->irq.crtc_vblank_int[i] ||
4581 atomic_read(&rdev->irq.pflip[i]), "vblank", i);
4582 }
4583
4584 for (i = 0; i < rdev->num_crtc; i++)
4585 WREG32(GRPH_INT_CONTROL + crtc_offsets[i], GRPH_PFLIP_INT_MASK);
4586
4587 for (i = 0; i < 6; i++) {
4588 radeon_irq_kms_set_irq_n_enabled(
4589 rdev, DC_HPDx_INT_CONTROL(i),
4590 DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN,
4591 rdev->irq.hpd[i], "HPD", i);
4592 }
4593
4594 if (rdev->family == CHIP_ARUBA)
4595 WREG32(TN_CG_THERMAL_INT_CTRL, thermal_int);
4596 else
4597 WREG32(CG_THERMAL_INT, thermal_int);
4598
4599 for (i = 0; i < 6; i++) {
4600 radeon_irq_kms_set_irq_n_enabled(
4601 rdev, AFMT_AUDIO_PACKET_CONTROL + crtc_offsets[i],
4602 AFMT_AZ_FORMAT_WTRIG_MASK,
4603 rdev->irq.afmt[i], "HDMI", i);
4604 }
4605
4606 /* posting read */
4607 RREG32(SRBM_STATUS);
4608
4609 return 0;
4610 }
4611
4612 /* Note that the order we write back regs here is important */
evergreen_irq_ack(struct radeon_device * rdev)4613 static void evergreen_irq_ack(struct radeon_device *rdev)
4614 {
4615 int i, j;
4616 u32 *grph_int = rdev->irq.stat_regs.evergreen.grph_int;
4617 u32 *disp_int = rdev->irq.stat_regs.evergreen.disp_int;
4618 u32 *afmt_status = rdev->irq.stat_regs.evergreen.afmt_status;
4619
4620 for (i = 0; i < 6; i++) {
4621 disp_int[i] = RREG32(evergreen_disp_int_status[i]);
4622 afmt_status[i] = RREG32(AFMT_STATUS + crtc_offsets[i]);
4623 if (i < rdev->num_crtc)
4624 grph_int[i] = RREG32(GRPH_INT_STATUS + crtc_offsets[i]);
4625 }
4626
4627 /* We write back each interrupt register in pairs of two */
4628 for (i = 0; i < rdev->num_crtc; i += 2) {
4629 for (j = i; j < (i + 2); j++) {
4630 if (grph_int[j] & GRPH_PFLIP_INT_OCCURRED)
4631 WREG32(GRPH_INT_STATUS + crtc_offsets[j],
4632 GRPH_PFLIP_INT_CLEAR);
4633 }
4634
4635 for (j = i; j < (i + 2); j++) {
4636 if (disp_int[j] & LB_D1_VBLANK_INTERRUPT)
4637 WREG32(VBLANK_STATUS + crtc_offsets[j],
4638 VBLANK_ACK);
4639 if (disp_int[j] & LB_D1_VLINE_INTERRUPT)
4640 WREG32(VLINE_STATUS + crtc_offsets[j],
4641 VLINE_ACK);
4642 }
4643 }
4644
4645 for (i = 0; i < 6; i++) {
4646 if (disp_int[i] & DC_HPD1_INTERRUPT)
4647 WREG32_OR(DC_HPDx_INT_CONTROL(i), DC_HPDx_INT_ACK);
4648 }
4649
4650 for (i = 0; i < 6; i++) {
4651 if (disp_int[i] & DC_HPD1_RX_INTERRUPT)
4652 WREG32_OR(DC_HPDx_INT_CONTROL(i), DC_HPDx_RX_INT_ACK);
4653 }
4654
4655 for (i = 0; i < 6; i++) {
4656 if (afmt_status[i] & AFMT_AZ_FORMAT_WTRIG)
4657 WREG32_OR(AFMT_AUDIO_PACKET_CONTROL + crtc_offsets[i],
4658 AFMT_AZ_FORMAT_WTRIG_ACK);
4659 }
4660 }
4661
evergreen_irq_disable(struct radeon_device * rdev)4662 static void evergreen_irq_disable(struct radeon_device *rdev)
4663 {
4664 r600_disable_interrupts(rdev);
4665 /* Wait and acknowledge irq */
4666 mdelay(1);
4667 evergreen_irq_ack(rdev);
4668 evergreen_disable_interrupt_state(rdev);
4669 }
4670
evergreen_irq_suspend(struct radeon_device * rdev)4671 void evergreen_irq_suspend(struct radeon_device *rdev)
4672 {
4673 evergreen_irq_disable(rdev);
4674 r600_rlc_stop(rdev);
4675 }
4676
evergreen_get_ih_wptr(struct radeon_device * rdev)4677 static u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
4678 {
4679 u32 wptr, tmp;
4680
4681 if (rdev->wb.enabled)
4682 wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
4683 else
4684 wptr = RREG32(IH_RB_WPTR);
4685
4686 if (wptr & RB_OVERFLOW) {
4687 wptr &= ~RB_OVERFLOW;
4688 /* When a ring buffer overflow happen start parsing interrupt
4689 * from the last not overwritten vector (wptr + 16). Hopefully
4690 * this should allow us to catchup.
4691 */
4692 dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
4693 wptr, rdev->ih.rptr, (wptr + 16) & rdev->ih.ptr_mask);
4694 rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
4695 tmp = RREG32(IH_RB_CNTL);
4696 tmp |= IH_WPTR_OVERFLOW_CLEAR;
4697 WREG32(IH_RB_CNTL, tmp);
4698 }
4699 return (wptr & rdev->ih.ptr_mask);
4700 }
4701
evergreen_irq_process(struct radeon_device * rdev)4702 int evergreen_irq_process(struct radeon_device *rdev)
4703 {
4704 u32 *disp_int = rdev->irq.stat_regs.evergreen.disp_int;
4705 u32 *afmt_status = rdev->irq.stat_regs.evergreen.afmt_status;
4706 u32 crtc_idx, hpd_idx, afmt_idx;
4707 u32 mask;
4708 u32 wptr;
4709 u32 rptr;
4710 u32 src_id, src_data;
4711 u32 ring_index;
4712 bool queue_hotplug = false;
4713 bool queue_hdmi = false;
4714 bool queue_dp = false;
4715 bool queue_thermal = false;
4716 u32 status, addr;
4717 const char *event_name;
4718
4719 if (!rdev->ih.enabled || rdev->shutdown)
4720 return IRQ_NONE;
4721
4722 wptr = evergreen_get_ih_wptr(rdev);
4723
4724 restart_ih:
4725 /* is somebody else already processing irqs? */
4726 if (atomic_xchg(&rdev->ih.lock, 1))
4727 return IRQ_NONE;
4728
4729 rptr = rdev->ih.rptr;
4730 DRM_DEBUG("evergreen_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
4731
4732 /* Order reading of wptr vs. reading of IH ring data */
4733 rmb();
4734
4735 /* display interrupts */
4736 evergreen_irq_ack(rdev);
4737
4738 while (rptr != wptr) {
4739 /* wptr/rptr are in bytes! */
4740 ring_index = rptr / 4;
4741 src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
4742 src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
4743
4744 switch (src_id) {
4745 case 1: /* D1 vblank/vline */
4746 case 2: /* D2 vblank/vline */
4747 case 3: /* D3 vblank/vline */
4748 case 4: /* D4 vblank/vline */
4749 case 5: /* D5 vblank/vline */
4750 case 6: /* D6 vblank/vline */
4751 crtc_idx = src_id - 1;
4752
4753 if (src_data == 0) { /* vblank */
4754 mask = LB_D1_VBLANK_INTERRUPT;
4755 event_name = "vblank";
4756
4757 if (rdev->irq.crtc_vblank_int[crtc_idx]) {
4758 drm_handle_vblank(rdev->ddev, crtc_idx);
4759 rdev->pm.vblank_sync = true;
4760 wake_up(&rdev->irq.vblank_queue);
4761 }
4762 if (atomic_read(&rdev->irq.pflip[crtc_idx])) {
4763 radeon_crtc_handle_vblank(rdev,
4764 crtc_idx);
4765 }
4766
4767 } else if (src_data == 1) { /* vline */
4768 mask = LB_D1_VLINE_INTERRUPT;
4769 event_name = "vline";
4770 } else {
4771 DRM_DEBUG("Unhandled interrupt: %d %d\n",
4772 src_id, src_data);
4773 break;
4774 }
4775
4776 if (!(disp_int[crtc_idx] & mask)) {
4777 DRM_DEBUG("IH: D%d %s - IH event w/o asserted irq bit?\n",
4778 crtc_idx + 1, event_name);
4779 }
4780
4781 disp_int[crtc_idx] &= ~mask;
4782 DRM_DEBUG("IH: D%d %s\n", crtc_idx + 1, event_name);
4783
4784 break;
4785 case 8: /* D1 page flip */
4786 case 10: /* D2 page flip */
4787 case 12: /* D3 page flip */
4788 case 14: /* D4 page flip */
4789 case 16: /* D5 page flip */
4790 case 18: /* D6 page flip */
4791 DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1);
4792 if (radeon_use_pflipirq > 0)
4793 radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1);
4794 break;
4795 case 42: /* HPD hotplug */
4796 if (src_data <= 5) {
4797 hpd_idx = src_data;
4798 mask = DC_HPD1_INTERRUPT;
4799 queue_hotplug = true;
4800 event_name = "HPD";
4801
4802 } else if (src_data <= 11) {
4803 hpd_idx = src_data - 6;
4804 mask = DC_HPD1_RX_INTERRUPT;
4805 queue_dp = true;
4806 event_name = "HPD_RX";
4807
4808 } else {
4809 DRM_DEBUG("Unhandled interrupt: %d %d\n",
4810 src_id, src_data);
4811 break;
4812 }
4813
4814 if (!(disp_int[hpd_idx] & mask))
4815 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
4816
4817 disp_int[hpd_idx] &= ~mask;
4818 DRM_DEBUG("IH: %s%d\n", event_name, hpd_idx + 1);
4819
4820 break;
4821 case 44: /* hdmi */
4822 afmt_idx = src_data;
4823 if (!(afmt_status[afmt_idx] & AFMT_AZ_FORMAT_WTRIG))
4824 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
4825
4826 if (afmt_idx > 5) {
4827 DRM_ERROR("Unhandled interrupt: %d %d\n",
4828 src_id, src_data);
4829 break;
4830 }
4831 afmt_status[afmt_idx] &= ~AFMT_AZ_FORMAT_WTRIG;
4832 queue_hdmi = true;
4833 DRM_DEBUG("IH: HDMI%d\n", afmt_idx + 1);
4834 break;
4835 case 96:
4836 DRM_ERROR("SRBM_READ_ERROR: 0x%x\n", RREG32(SRBM_READ_ERROR));
4837 WREG32(SRBM_INT_ACK, 0x1);
4838 break;
4839 case 124: /* UVD */
4840 DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
4841 radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
4842 break;
4843 case 146:
4844 case 147:
4845 addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
4846 status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS);
4847 /* reset addr and status */
4848 WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
4849 if (addr == 0x0 && status == 0x0)
4850 break;
4851 dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
4852 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
4853 addr);
4854 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
4855 status);
4856 cayman_vm_decode_fault(rdev, status, addr);
4857 break;
4858 case 176: /* CP_INT in ring buffer */
4859 case 177: /* CP_INT in IB1 */
4860 case 178: /* CP_INT in IB2 */
4861 DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
4862 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
4863 break;
4864 case 181: /* CP EOP event */
4865 DRM_DEBUG("IH: CP EOP\n");
4866 if (rdev->family >= CHIP_CAYMAN) {
4867 switch (src_data) {
4868 case 0:
4869 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
4870 break;
4871 case 1:
4872 radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
4873 break;
4874 case 2:
4875 radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
4876 break;
4877 }
4878 } else
4879 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
4880 break;
4881 case 224: /* DMA trap event */
4882 DRM_DEBUG("IH: DMA trap\n");
4883 radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
4884 break;
4885 case 230: /* thermal low to high */
4886 DRM_DEBUG("IH: thermal low to high\n");
4887 rdev->pm.dpm.thermal.high_to_low = false;
4888 queue_thermal = true;
4889 break;
4890 case 231: /* thermal high to low */
4891 DRM_DEBUG("IH: thermal high to low\n");
4892 rdev->pm.dpm.thermal.high_to_low = true;
4893 queue_thermal = true;
4894 break;
4895 case 233: /* GUI IDLE */
4896 DRM_DEBUG("IH: GUI idle\n");
4897 break;
4898 case 244: /* DMA trap event */
4899 if (rdev->family >= CHIP_CAYMAN) {
4900 DRM_DEBUG("IH: DMA1 trap\n");
4901 radeon_fence_process(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
4902 }
4903 break;
4904 default:
4905 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
4906 break;
4907 }
4908
4909 /* wptr/rptr are in bytes! */
4910 rptr += 16;
4911 rptr &= rdev->ih.ptr_mask;
4912 WREG32(IH_RB_RPTR, rptr);
4913 }
4914 if (queue_dp)
4915 schedule_work(&rdev->dp_work);
4916 if (queue_hotplug)
4917 schedule_delayed_work(&rdev->hotplug_work, 0);
4918 if (queue_hdmi)
4919 schedule_work(&rdev->audio_work);
4920 if (queue_thermal && rdev->pm.dpm_enabled)
4921 schedule_work(&rdev->pm.dpm.thermal.work);
4922 rdev->ih.rptr = rptr;
4923 atomic_set(&rdev->ih.lock, 0);
4924
4925 /* make sure wptr hasn't changed while processing */
4926 wptr = evergreen_get_ih_wptr(rdev);
4927 if (wptr != rptr)
4928 goto restart_ih;
4929
4930 return IRQ_HANDLED;
4931 }
4932
evergreen_uvd_init(struct radeon_device * rdev)4933 static void evergreen_uvd_init(struct radeon_device *rdev)
4934 {
4935 int r;
4936
4937 if (!rdev->has_uvd)
4938 return;
4939
4940 r = radeon_uvd_init(rdev);
4941 if (r) {
4942 dev_err(rdev->dev, "failed UVD (%d) init.\n", r);
4943 /*
4944 * At this point rdev->uvd.vcpu_bo is NULL which trickles down
4945 * to early fails uvd_v2_2_resume() and thus nothing happens
4946 * there. So it is pointless to try to go through that code
4947 * hence why we disable uvd here.
4948 */
4949 rdev->has_uvd = false;
4950 return;
4951 }
4952 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
4953 r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX], 4096);
4954 }
4955
evergreen_uvd_start(struct radeon_device * rdev)4956 static void evergreen_uvd_start(struct radeon_device *rdev)
4957 {
4958 int r;
4959
4960 if (!rdev->has_uvd)
4961 return;
4962
4963 r = uvd_v2_2_resume(rdev);
4964 if (r) {
4965 dev_err(rdev->dev, "failed UVD resume (%d).\n", r);
4966 goto error;
4967 }
4968 r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX);
4969 if (r) {
4970 dev_err(rdev->dev, "failed initializing UVD fences (%d).\n", r);
4971 goto error;
4972 }
4973 return;
4974
4975 error:
4976 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
4977 }
4978
evergreen_uvd_resume(struct radeon_device * rdev)4979 static void evergreen_uvd_resume(struct radeon_device *rdev)
4980 {
4981 struct radeon_ring *ring;
4982 int r;
4983
4984 if (!rdev->has_uvd || !rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size)
4985 return;
4986
4987 ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
4988 r = radeon_ring_init(rdev, ring, ring->ring_size, 0, PACKET0(UVD_NO_OP, 0));
4989 if (r) {
4990 dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r);
4991 return;
4992 }
4993 r = uvd_v1_0_init(rdev);
4994 if (r) {
4995 dev_err(rdev->dev, "failed initializing UVD (%d).\n", r);
4996 return;
4997 }
4998 }
4999
evergreen_startup(struct radeon_device * rdev)5000 static int evergreen_startup(struct radeon_device *rdev)
5001 {
5002 struct radeon_ring *ring;
5003 int r;
5004
5005 /* enable pcie gen2 link */
5006 evergreen_pcie_gen2_enable(rdev);
5007 /* enable aspm */
5008 evergreen_program_aspm(rdev);
5009
5010 /* scratch needs to be initialized before MC */
5011 r = r600_vram_scratch_init(rdev);
5012 if (r)
5013 return r;
5014
5015 evergreen_mc_program(rdev);
5016
5017 if (ASIC_IS_DCE5(rdev) && !rdev->pm.dpm_enabled) {
5018 r = ni_mc_load_microcode(rdev);
5019 if (r) {
5020 DRM_ERROR("Failed to load MC firmware!\n");
5021 return r;
5022 }
5023 }
5024
5025 if (rdev->flags & RADEON_IS_AGP) {
5026 evergreen_agp_enable(rdev);
5027 } else {
5028 r = evergreen_pcie_gart_enable(rdev);
5029 if (r)
5030 return r;
5031 }
5032 evergreen_gpu_init(rdev);
5033
5034 /* allocate rlc buffers */
5035 if (rdev->flags & RADEON_IS_IGP) {
5036 rdev->rlc.reg_list = sumo_rlc_save_restore_register_list;
5037 rdev->rlc.reg_list_size =
5038 (u32)ARRAY_SIZE(sumo_rlc_save_restore_register_list);
5039 rdev->rlc.cs_data = evergreen_cs_data;
5040 r = sumo_rlc_init(rdev);
5041 if (r) {
5042 DRM_ERROR("Failed to init rlc BOs!\n");
5043 return r;
5044 }
5045 }
5046
5047 /* allocate wb buffer */
5048 r = radeon_wb_init(rdev);
5049 if (r)
5050 return r;
5051
5052 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
5053 if (r) {
5054 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
5055 return r;
5056 }
5057
5058 r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
5059 if (r) {
5060 dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
5061 return r;
5062 }
5063
5064 evergreen_uvd_start(rdev);
5065
5066 /* Enable IRQ */
5067 if (!rdev->irq.installed) {
5068 r = radeon_irq_kms_init(rdev);
5069 if (r)
5070 return r;
5071 }
5072
5073 r = r600_irq_init(rdev);
5074 if (r) {
5075 DRM_ERROR("radeon: IH init failed (%d).\n", r);
5076 radeon_irq_kms_fini(rdev);
5077 return r;
5078 }
5079 evergreen_irq_set(rdev);
5080
5081 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
5082 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
5083 RADEON_CP_PACKET2);
5084 if (r)
5085 return r;
5086
5087 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
5088 r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
5089 DMA_PACKET(DMA_PACKET_NOP, 0, 0));
5090 if (r)
5091 return r;
5092
5093 r = evergreen_cp_load_microcode(rdev);
5094 if (r)
5095 return r;
5096 r = evergreen_cp_resume(rdev);
5097 if (r)
5098 return r;
5099 r = r600_dma_resume(rdev);
5100 if (r)
5101 return r;
5102
5103 evergreen_uvd_resume(rdev);
5104
5105 r = radeon_ib_pool_init(rdev);
5106 if (r) {
5107 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
5108 return r;
5109 }
5110
5111 r = radeon_audio_init(rdev);
5112 if (r) {
5113 DRM_ERROR("radeon: audio init failed\n");
5114 return r;
5115 }
5116
5117 return 0;
5118 }
5119
evergreen_resume(struct radeon_device * rdev)5120 int evergreen_resume(struct radeon_device *rdev)
5121 {
5122 int r;
5123
5124 /* reset the asic, the gfx blocks are often in a bad state
5125 * after the driver is unloaded or after a resume
5126 */
5127 if (radeon_asic_reset(rdev))
5128 dev_warn(rdev->dev, "GPU reset failed !\n");
5129 /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
5130 * posting will perform necessary task to bring back GPU into good
5131 * shape.
5132 */
5133 /* post card */
5134 atom_asic_init(rdev->mode_info.atom_context);
5135
5136 /* init golden registers */
5137 evergreen_init_golden_registers(rdev);
5138
5139 if (rdev->pm.pm_method == PM_METHOD_DPM)
5140 radeon_pm_resume(rdev);
5141
5142 rdev->accel_working = true;
5143 r = evergreen_startup(rdev);
5144 if (r) {
5145 DRM_ERROR("evergreen startup failed on resume\n");
5146 rdev->accel_working = false;
5147 return r;
5148 }
5149
5150 return r;
5151
5152 }
5153
evergreen_suspend(struct radeon_device * rdev)5154 int evergreen_suspend(struct radeon_device *rdev)
5155 {
5156 radeon_pm_suspend(rdev);
5157 radeon_audio_fini(rdev);
5158 if (rdev->has_uvd) {
5159 radeon_uvd_suspend(rdev);
5160 uvd_v1_0_fini(rdev);
5161 }
5162 r700_cp_stop(rdev);
5163 r600_dma_stop(rdev);
5164 evergreen_irq_suspend(rdev);
5165 radeon_wb_disable(rdev);
5166 evergreen_pcie_gart_disable(rdev);
5167
5168 return 0;
5169 }
5170
5171 /* Plan is to move initialization in that function and use
5172 * helper function so that radeon_device_init pretty much
5173 * do nothing more than calling asic specific function. This
5174 * should also allow to remove a bunch of callback function
5175 * like vram_info.
5176 */
evergreen_init(struct radeon_device * rdev)5177 int evergreen_init(struct radeon_device *rdev)
5178 {
5179 int r;
5180
5181 /* Read BIOS */
5182 if (!radeon_get_bios(rdev)) {
5183 if (ASIC_IS_AVIVO(rdev))
5184 return -EINVAL;
5185 }
5186 /* Must be an ATOMBIOS */
5187 if (!rdev->is_atom_bios) {
5188 dev_err(rdev->dev, "Expecting atombios for evergreen GPU\n");
5189 return -EINVAL;
5190 }
5191 r = radeon_atombios_init(rdev);
5192 if (r)
5193 return r;
5194 /* reset the asic, the gfx blocks are often in a bad state
5195 * after the driver is unloaded or after a resume
5196 */
5197 if (radeon_asic_reset(rdev))
5198 dev_warn(rdev->dev, "GPU reset failed !\n");
5199 /* Post card if necessary */
5200 if (!radeon_card_posted(rdev)) {
5201 if (!rdev->bios) {
5202 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
5203 return -EINVAL;
5204 }
5205 DRM_INFO("GPU not posted. posting now...\n");
5206 atom_asic_init(rdev->mode_info.atom_context);
5207 }
5208 /* init golden registers */
5209 evergreen_init_golden_registers(rdev);
5210 /* Initialize scratch registers */
5211 r600_scratch_init(rdev);
5212 /* Initialize surface registers */
5213 radeon_surface_init(rdev);
5214 /* Initialize clocks */
5215 radeon_get_clock_info(rdev->ddev);
5216 /* Fence driver */
5217 radeon_fence_driver_init(rdev);
5218 /* initialize AGP */
5219 if (rdev->flags & RADEON_IS_AGP) {
5220 r = radeon_agp_init(rdev);
5221 if (r)
5222 radeon_agp_disable(rdev);
5223 }
5224 /* initialize memory controller */
5225 r = evergreen_mc_init(rdev);
5226 if (r)
5227 return r;
5228 /* Memory manager */
5229 r = radeon_bo_init(rdev);
5230 if (r)
5231 return r;
5232
5233 if (ASIC_IS_DCE5(rdev)) {
5234 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
5235 r = ni_init_microcode(rdev);
5236 if (r) {
5237 DRM_ERROR("Failed to load firmware!\n");
5238 return r;
5239 }
5240 }
5241 } else {
5242 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
5243 r = r600_init_microcode(rdev);
5244 if (r) {
5245 DRM_ERROR("Failed to load firmware!\n");
5246 return r;
5247 }
5248 }
5249 }
5250
5251 /* Initialize power management */
5252 radeon_pm_init(rdev);
5253
5254 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
5255 r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
5256
5257 rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL;
5258 r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024);
5259
5260 evergreen_uvd_init(rdev);
5261
5262 rdev->ih.ring_obj = NULL;
5263 r600_ih_ring_init(rdev, 64 * 1024);
5264
5265 r = r600_pcie_gart_init(rdev);
5266 if (r)
5267 return r;
5268
5269 rdev->accel_working = true;
5270 r = evergreen_startup(rdev);
5271 if (r) {
5272 dev_err(rdev->dev, "disabling GPU acceleration\n");
5273 r700_cp_fini(rdev);
5274 r600_dma_fini(rdev);
5275 r600_irq_fini(rdev);
5276 if (rdev->flags & RADEON_IS_IGP)
5277 sumo_rlc_fini(rdev);
5278 radeon_wb_fini(rdev);
5279 radeon_ib_pool_fini(rdev);
5280 radeon_irq_kms_fini(rdev);
5281 evergreen_pcie_gart_fini(rdev);
5282 rdev->accel_working = false;
5283 }
5284
5285 /* Don't start up if the MC ucode is missing on BTC parts.
5286 * The default clocks and voltages before the MC ucode
5287 * is loaded are not suffient for advanced operations.
5288 */
5289 if (ASIC_IS_DCE5(rdev)) {
5290 if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) {
5291 DRM_ERROR("radeon: MC ucode required for NI+.\n");
5292 return -EINVAL;
5293 }
5294 }
5295
5296 return 0;
5297 }
5298
evergreen_fini(struct radeon_device * rdev)5299 void evergreen_fini(struct radeon_device *rdev)
5300 {
5301 radeon_pm_fini(rdev);
5302 radeon_audio_fini(rdev);
5303 r700_cp_fini(rdev);
5304 r600_dma_fini(rdev);
5305 r600_irq_fini(rdev);
5306 if (rdev->flags & RADEON_IS_IGP)
5307 sumo_rlc_fini(rdev);
5308 radeon_wb_fini(rdev);
5309 radeon_ib_pool_fini(rdev);
5310 radeon_irq_kms_fini(rdev);
5311 uvd_v1_0_fini(rdev);
5312 radeon_uvd_fini(rdev);
5313 evergreen_pcie_gart_fini(rdev);
5314 r600_vram_scratch_fini(rdev);
5315 radeon_gem_fini(rdev);
5316 radeon_fence_driver_fini(rdev);
5317 radeon_agp_fini(rdev);
5318 radeon_bo_fini(rdev);
5319 radeon_atombios_fini(rdev);
5320 kfree(rdev->bios);
5321 rdev->bios = NULL;
5322 }
5323
evergreen_pcie_gen2_enable(struct radeon_device * rdev)5324 void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
5325 {
5326 u32 link_width_cntl, speed_cntl;
5327
5328 if (radeon_pcie_gen2 == 0)
5329 return;
5330
5331 if (rdev->flags & RADEON_IS_IGP)
5332 return;
5333
5334 if (!(rdev->flags & RADEON_IS_PCIE))
5335 return;
5336
5337 /* x2 cards have a special sequence */
5338 if (ASIC_IS_X2(rdev))
5339 return;
5340
5341 if ((rdev->pdev->bus->max_bus_speed != PCIE_SPEED_5_0GT) &&
5342 (rdev->pdev->bus->max_bus_speed != PCIE_SPEED_8_0GT))
5343 return;
5344
5345 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
5346 if (speed_cntl & LC_CURRENT_DATA_RATE) {
5347 DRM_INFO("PCIE gen 2 link speeds already enabled\n");
5348 return;
5349 }
5350
5351 DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
5352
5353 if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) ||
5354 (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
5355
5356 link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
5357 link_width_cntl &= ~LC_UPCONFIGURE_DIS;
5358 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
5359
5360 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
5361 speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
5362 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
5363
5364 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
5365 speed_cntl |= LC_CLR_FAILED_SPD_CHANGE_CNT;
5366 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
5367
5368 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
5369 speed_cntl &= ~LC_CLR_FAILED_SPD_CHANGE_CNT;
5370 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
5371
5372 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
5373 speed_cntl |= LC_GEN2_EN_STRAP;
5374 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
5375
5376 } else {
5377 link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
5378 /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
5379 if (1)
5380 link_width_cntl |= LC_UPCONFIGURE_DIS;
5381 else
5382 link_width_cntl &= ~LC_UPCONFIGURE_DIS;
5383 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
5384 }
5385 }
5386
evergreen_program_aspm(struct radeon_device * rdev)5387 void evergreen_program_aspm(struct radeon_device *rdev)
5388 {
5389 u32 data, orig;
5390 u32 pcie_lc_cntl, pcie_lc_cntl_old;
5391 bool disable_l0s, disable_l1 = false, disable_plloff_in_l1 = false;
5392 /* fusion_platform = true
5393 * if the system is a fusion system
5394 * (APU or DGPU in a fusion system).
5395 * todo: check if the system is a fusion platform.
5396 */
5397 bool fusion_platform = false;
5398
5399 if (radeon_aspm == 0)
5400 return;
5401
5402 if (!(rdev->flags & RADEON_IS_PCIE))
5403 return;
5404
5405 switch (rdev->family) {
5406 case CHIP_CYPRESS:
5407 case CHIP_HEMLOCK:
5408 case CHIP_JUNIPER:
5409 case CHIP_REDWOOD:
5410 case CHIP_CEDAR:
5411 case CHIP_SUMO:
5412 case CHIP_SUMO2:
5413 case CHIP_PALM:
5414 case CHIP_ARUBA:
5415 disable_l0s = true;
5416 break;
5417 default:
5418 disable_l0s = false;
5419 break;
5420 }
5421
5422 if (rdev->flags & RADEON_IS_IGP)
5423 fusion_platform = true; /* XXX also dGPUs in a fusion system */
5424
5425 data = orig = RREG32_PIF_PHY0(PB0_PIF_PAIRING);
5426 if (fusion_platform)
5427 data &= ~MULTI_PIF;
5428 else
5429 data |= MULTI_PIF;
5430 if (data != orig)
5431 WREG32_PIF_PHY0(PB0_PIF_PAIRING, data);
5432
5433 data = orig = RREG32_PIF_PHY1(PB1_PIF_PAIRING);
5434 if (fusion_platform)
5435 data &= ~MULTI_PIF;
5436 else
5437 data |= MULTI_PIF;
5438 if (data != orig)
5439 WREG32_PIF_PHY1(PB1_PIF_PAIRING, data);
5440
5441 pcie_lc_cntl = pcie_lc_cntl_old = RREG32_PCIE_PORT(PCIE_LC_CNTL);
5442 pcie_lc_cntl &= ~(LC_L0S_INACTIVITY_MASK | LC_L1_INACTIVITY_MASK);
5443 if (!disable_l0s) {
5444 if (rdev->family >= CHIP_BARTS)
5445 pcie_lc_cntl |= LC_L0S_INACTIVITY(7);
5446 else
5447 pcie_lc_cntl |= LC_L0S_INACTIVITY(3);
5448 }
5449
5450 if (!disable_l1) {
5451 if (rdev->family >= CHIP_BARTS)
5452 pcie_lc_cntl |= LC_L1_INACTIVITY(7);
5453 else
5454 pcie_lc_cntl |= LC_L1_INACTIVITY(8);
5455
5456 if (!disable_plloff_in_l1) {
5457 data = orig = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0);
5458 data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
5459 data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
5460 if (data != orig)
5461 WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0, data);
5462
5463 data = orig = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1);
5464 data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
5465 data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
5466 if (data != orig)
5467 WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1, data);
5468
5469 data = orig = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0);
5470 data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
5471 data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
5472 if (data != orig)
5473 WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0, data);
5474
5475 data = orig = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1);
5476 data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
5477 data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
5478 if (data != orig)
5479 WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1, data);
5480
5481 if (rdev->family >= CHIP_BARTS) {
5482 data = orig = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0);
5483 data &= ~PLL_RAMP_UP_TIME_0_MASK;
5484 data |= PLL_RAMP_UP_TIME_0(4);
5485 if (data != orig)
5486 WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0, data);
5487
5488 data = orig = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1);
5489 data &= ~PLL_RAMP_UP_TIME_1_MASK;
5490 data |= PLL_RAMP_UP_TIME_1(4);
5491 if (data != orig)
5492 WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1, data);
5493
5494 data = orig = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0);
5495 data &= ~PLL_RAMP_UP_TIME_0_MASK;
5496 data |= PLL_RAMP_UP_TIME_0(4);
5497 if (data != orig)
5498 WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0, data);
5499
5500 data = orig = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1);
5501 data &= ~PLL_RAMP_UP_TIME_1_MASK;
5502 data |= PLL_RAMP_UP_TIME_1(4);
5503 if (data != orig)
5504 WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1, data);
5505 }
5506
5507 data = orig = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
5508 data &= ~LC_DYN_LANES_PWR_STATE_MASK;
5509 data |= LC_DYN_LANES_PWR_STATE(3);
5510 if (data != orig)
5511 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, data);
5512
5513 if (rdev->family >= CHIP_BARTS) {
5514 data = orig = RREG32_PIF_PHY0(PB0_PIF_CNTL);
5515 data &= ~LS2_EXIT_TIME_MASK;
5516 data |= LS2_EXIT_TIME(1);
5517 if (data != orig)
5518 WREG32_PIF_PHY0(PB0_PIF_CNTL, data);
5519
5520 data = orig = RREG32_PIF_PHY1(PB1_PIF_CNTL);
5521 data &= ~LS2_EXIT_TIME_MASK;
5522 data |= LS2_EXIT_TIME(1);
5523 if (data != orig)
5524 WREG32_PIF_PHY1(PB1_PIF_CNTL, data);
5525 }
5526 }
5527 }
5528
5529 /* evergreen parts only */
5530 if (rdev->family < CHIP_BARTS)
5531 pcie_lc_cntl |= LC_PMI_TO_L1_DIS;
5532
5533 if (pcie_lc_cntl != pcie_lc_cntl_old)
5534 WREG32_PCIE_PORT(PCIE_LC_CNTL, pcie_lc_cntl);
5535 }
5536