1 /*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #include <drm/amdgpu_drm.h>
25 #include "amdgpu.h"
26 #include "atomfirmware.h"
27 #include "amdgpu_atomfirmware.h"
28 #include "atom.h"
29 #include "atombios.h"
30 #include "soc15_hw_ip.h"
31
32 union firmware_info {
33 struct atom_firmware_info_v3_1 v31;
34 struct atom_firmware_info_v3_2 v32;
35 struct atom_firmware_info_v3_3 v33;
36 struct atom_firmware_info_v3_4 v34;
37 };
38
39 /*
40 * Helper function to query firmware capability
41 *
42 * @adev: amdgpu_device pointer
43 *
44 * Return firmware_capability in firmwareinfo table on success or 0 if not
45 */
amdgpu_atomfirmware_query_firmware_capability(struct amdgpu_device * adev)46 uint32_t amdgpu_atomfirmware_query_firmware_capability(struct amdgpu_device *adev)
47 {
48 struct amdgpu_mode_info *mode_info = &adev->mode_info;
49 int index;
50 u16 data_offset, size;
51 union firmware_info *firmware_info;
52 u8 frev, crev;
53 u32 fw_cap = 0;
54
55 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
56 firmwareinfo);
57
58 if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context,
59 index, &size, &frev, &crev, &data_offset)) {
60 /* support firmware_info 3.1 + */
61 if ((frev == 3 && crev >=1) || (frev > 3)) {
62 firmware_info = (union firmware_info *)
63 (mode_info->atom_context->bios + data_offset);
64 fw_cap = le32_to_cpu(firmware_info->v31.firmware_capability);
65 }
66 }
67
68 return fw_cap;
69 }
70
71 /*
72 * Helper function to query gpu virtualizaiton capability
73 *
74 * @adev: amdgpu_device pointer
75 *
76 * Return true if gpu virtualization is supported or false if not
77 */
amdgpu_atomfirmware_gpu_virtualization_supported(struct amdgpu_device * adev)78 bool amdgpu_atomfirmware_gpu_virtualization_supported(struct amdgpu_device *adev)
79 {
80 u32 fw_cap;
81
82 fw_cap = adev->mode_info.firmware_flags;
83
84 return (fw_cap & ATOM_FIRMWARE_CAP_GPU_VIRTUALIZATION) ? true : false;
85 }
86
amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device * adev)87 void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev)
88 {
89 int index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
90 firmwareinfo);
91 uint16_t data_offset;
92
93 if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, NULL,
94 NULL, NULL, &data_offset)) {
95 struct atom_firmware_info_v3_1 *firmware_info =
96 (struct atom_firmware_info_v3_1 *)(adev->mode_info.atom_context->bios +
97 data_offset);
98
99 adev->bios_scratch_reg_offset =
100 le32_to_cpu(firmware_info->bios_scratch_reg_startaddr);
101 }
102 }
103
amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device * adev)104 int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev)
105 {
106 struct atom_context *ctx = adev->mode_info.atom_context;
107 int index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
108 vram_usagebyfirmware);
109 struct vram_usagebyfirmware_v2_1 *firmware_usage;
110 uint32_t start_addr, size;
111 uint16_t data_offset;
112 int usage_bytes = 0;
113
114 if (amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
115 firmware_usage = (struct vram_usagebyfirmware_v2_1 *)(ctx->bios + data_offset);
116 DRM_DEBUG("atom firmware requested %08x %dkb fw %dkb drv\n",
117 le32_to_cpu(firmware_usage->start_address_in_kb),
118 le16_to_cpu(firmware_usage->used_by_firmware_in_kb),
119 le16_to_cpu(firmware_usage->used_by_driver_in_kb));
120
121 start_addr = le32_to_cpu(firmware_usage->start_address_in_kb);
122 size = le16_to_cpu(firmware_usage->used_by_firmware_in_kb);
123
124 if ((uint32_t)(start_addr & ATOM_VRAM_OPERATION_FLAGS_MASK) ==
125 (uint32_t)(ATOM_VRAM_BLOCK_SRIOV_MSG_SHARE_RESERVATION <<
126 ATOM_VRAM_OPERATION_FLAGS_SHIFT)) {
127 /* Firmware request VRAM reservation for SR-IOV */
128 adev->mman.fw_vram_usage_start_offset = (start_addr &
129 (~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10;
130 adev->mman.fw_vram_usage_size = size << 10;
131 /* Use the default scratch size */
132 usage_bytes = 0;
133 } else {
134 usage_bytes = le16_to_cpu(firmware_usage->used_by_driver_in_kb) << 10;
135 }
136 }
137 ctx->scratch_size_bytes = 0;
138 if (usage_bytes == 0)
139 usage_bytes = 20 * 1024;
140 /* allocate some scratch memory */
141 ctx->scratch = kzalloc(usage_bytes, GFP_KERNEL);
142 if (!ctx->scratch)
143 return -ENOMEM;
144 ctx->scratch_size_bytes = usage_bytes;
145 return 0;
146 }
147
148 union igp_info {
149 struct atom_integrated_system_info_v1_11 v11;
150 struct atom_integrated_system_info_v1_12 v12;
151 struct atom_integrated_system_info_v2_1 v21;
152 };
153
154 union umc_info {
155 struct atom_umc_info_v3_1 v31;
156 struct atom_umc_info_v3_2 v32;
157 struct atom_umc_info_v3_3 v33;
158 };
159
160 union vram_info {
161 struct atom_vram_info_header_v2_3 v23;
162 struct atom_vram_info_header_v2_4 v24;
163 struct atom_vram_info_header_v2_5 v25;
164 struct atom_vram_info_header_v2_6 v26;
165 struct atom_vram_info_header_v3_0 v30;
166 };
167
168 union vram_module {
169 struct atom_vram_module_v9 v9;
170 struct atom_vram_module_v10 v10;
171 struct atom_vram_module_v11 v11;
172 struct atom_vram_module_v3_0 v30;
173 };
174
convert_atom_mem_type_to_vram_type(struct amdgpu_device * adev,int atom_mem_type)175 static int convert_atom_mem_type_to_vram_type(struct amdgpu_device *adev,
176 int atom_mem_type)
177 {
178 int vram_type;
179
180 if (adev->flags & AMD_IS_APU) {
181 switch (atom_mem_type) {
182 case Ddr2MemType:
183 case LpDdr2MemType:
184 vram_type = AMDGPU_VRAM_TYPE_DDR2;
185 break;
186 case Ddr3MemType:
187 case LpDdr3MemType:
188 vram_type = AMDGPU_VRAM_TYPE_DDR3;
189 break;
190 case Ddr4MemType:
191 vram_type = AMDGPU_VRAM_TYPE_DDR4;
192 break;
193 case LpDdr4MemType:
194 vram_type = AMDGPU_VRAM_TYPE_LPDDR4;
195 break;
196 case Ddr5MemType:
197 vram_type = AMDGPU_VRAM_TYPE_DDR5;
198 break;
199 case LpDdr5MemType:
200 vram_type = AMDGPU_VRAM_TYPE_LPDDR5;
201 break;
202 default:
203 vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
204 break;
205 }
206 } else {
207 switch (atom_mem_type) {
208 case ATOM_DGPU_VRAM_TYPE_GDDR5:
209 vram_type = AMDGPU_VRAM_TYPE_GDDR5;
210 break;
211 case ATOM_DGPU_VRAM_TYPE_HBM2:
212 case ATOM_DGPU_VRAM_TYPE_HBM2E:
213 vram_type = AMDGPU_VRAM_TYPE_HBM;
214 break;
215 case ATOM_DGPU_VRAM_TYPE_GDDR6:
216 vram_type = AMDGPU_VRAM_TYPE_GDDR6;
217 break;
218 default:
219 vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
220 break;
221 }
222 }
223
224 return vram_type;
225 }
226
227
228 int
amdgpu_atomfirmware_get_vram_info(struct amdgpu_device * adev,int * vram_width,int * vram_type,int * vram_vendor)229 amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
230 int *vram_width, int *vram_type,
231 int *vram_vendor)
232 {
233 struct amdgpu_mode_info *mode_info = &adev->mode_info;
234 int index, i = 0;
235 u16 data_offset, size;
236 union igp_info *igp_info;
237 union vram_info *vram_info;
238 union vram_module *vram_module;
239 u8 frev, crev;
240 u8 mem_type;
241 u8 mem_vendor;
242 u32 mem_channel_number;
243 u32 mem_channel_width;
244 u32 module_id;
245
246 if (adev->flags & AMD_IS_APU)
247 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
248 integratedsysteminfo);
249 else
250 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
251 vram_info);
252
253 if (amdgpu_atom_parse_data_header(mode_info->atom_context,
254 index, &size,
255 &frev, &crev, &data_offset)) {
256 if (adev->flags & AMD_IS_APU) {
257 igp_info = (union igp_info *)
258 (mode_info->atom_context->bios + data_offset);
259 switch (frev) {
260 case 1:
261 switch (crev) {
262 case 11:
263 case 12:
264 mem_channel_number = igp_info->v11.umachannelnumber;
265 if (!mem_channel_number)
266 mem_channel_number = 1;
267 /* channel width is 64 */
268 if (vram_width)
269 *vram_width = mem_channel_number * 64;
270 mem_type = igp_info->v11.memorytype;
271 if (vram_type)
272 *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
273 break;
274 default:
275 return -EINVAL;
276 }
277 break;
278 case 2:
279 switch (crev) {
280 case 1:
281 case 2:
282 mem_channel_number = igp_info->v21.umachannelnumber;
283 if (!mem_channel_number)
284 mem_channel_number = 1;
285 /* channel width is 64 */
286 if (vram_width)
287 *vram_width = mem_channel_number * 64;
288 mem_type = igp_info->v21.memorytype;
289 if (vram_type)
290 *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
291 break;
292 default:
293 return -EINVAL;
294 }
295 break;
296 default:
297 return -EINVAL;
298 }
299 } else {
300 vram_info = (union vram_info *)
301 (mode_info->atom_context->bios + data_offset);
302 module_id = (RREG32(adev->bios_scratch_reg_offset + 4) & 0x00ff0000) >> 16;
303 if (frev == 3) {
304 switch (crev) {
305 /* v30 */
306 case 0:
307 vram_module = (union vram_module *)vram_info->v30.vram_module;
308 mem_vendor = (vram_module->v30.dram_vendor_id) & 0xF;
309 if (vram_vendor)
310 *vram_vendor = mem_vendor;
311 mem_type = vram_info->v30.memory_type;
312 if (vram_type)
313 *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
314 mem_channel_number = vram_info->v30.channel_num;
315 mem_channel_width = vram_info->v30.channel_width;
316 if (vram_width)
317 *vram_width = mem_channel_number * (1 << mem_channel_width);
318 break;
319 default:
320 return -EINVAL;
321 }
322 } else if (frev == 2) {
323 switch (crev) {
324 /* v23 */
325 case 3:
326 if (module_id > vram_info->v23.vram_module_num)
327 module_id = 0;
328 vram_module = (union vram_module *)vram_info->v23.vram_module;
329 while (i < module_id) {
330 vram_module = (union vram_module *)
331 ((u8 *)vram_module + vram_module->v9.vram_module_size);
332 i++;
333 }
334 mem_type = vram_module->v9.memory_type;
335 if (vram_type)
336 *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
337 mem_channel_number = vram_module->v9.channel_num;
338 mem_channel_width = vram_module->v9.channel_width;
339 if (vram_width)
340 *vram_width = mem_channel_number * (1 << mem_channel_width);
341 mem_vendor = (vram_module->v9.vender_rev_id) & 0xF;
342 if (vram_vendor)
343 *vram_vendor = mem_vendor;
344 break;
345 /* v24 */
346 case 4:
347 if (module_id > vram_info->v24.vram_module_num)
348 module_id = 0;
349 vram_module = (union vram_module *)vram_info->v24.vram_module;
350 while (i < module_id) {
351 vram_module = (union vram_module *)
352 ((u8 *)vram_module + vram_module->v10.vram_module_size);
353 i++;
354 }
355 mem_type = vram_module->v10.memory_type;
356 if (vram_type)
357 *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
358 mem_channel_number = vram_module->v10.channel_num;
359 mem_channel_width = vram_module->v10.channel_width;
360 if (vram_width)
361 *vram_width = mem_channel_number * (1 << mem_channel_width);
362 mem_vendor = (vram_module->v10.vender_rev_id) & 0xF;
363 if (vram_vendor)
364 *vram_vendor = mem_vendor;
365 break;
366 /* v25 */
367 case 5:
368 if (module_id > vram_info->v25.vram_module_num)
369 module_id = 0;
370 vram_module = (union vram_module *)vram_info->v25.vram_module;
371 while (i < module_id) {
372 vram_module = (union vram_module *)
373 ((u8 *)vram_module + vram_module->v11.vram_module_size);
374 i++;
375 }
376 mem_type = vram_module->v11.memory_type;
377 if (vram_type)
378 *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
379 mem_channel_number = vram_module->v11.channel_num;
380 mem_channel_width = vram_module->v11.channel_width;
381 if (vram_width)
382 *vram_width = mem_channel_number * (1 << mem_channel_width);
383 mem_vendor = (vram_module->v11.vender_rev_id) & 0xF;
384 if (vram_vendor)
385 *vram_vendor = mem_vendor;
386 break;
387 /* v26 */
388 case 6:
389 if (module_id > vram_info->v26.vram_module_num)
390 module_id = 0;
391 vram_module = (union vram_module *)vram_info->v26.vram_module;
392 while (i < module_id) {
393 vram_module = (union vram_module *)
394 ((u8 *)vram_module + vram_module->v9.vram_module_size);
395 i++;
396 }
397 mem_type = vram_module->v9.memory_type;
398 if (vram_type)
399 *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
400 mem_channel_number = vram_module->v9.channel_num;
401 mem_channel_width = vram_module->v9.channel_width;
402 if (vram_width)
403 *vram_width = mem_channel_number * (1 << mem_channel_width);
404 mem_vendor = (vram_module->v9.vender_rev_id) & 0xF;
405 if (vram_vendor)
406 *vram_vendor = mem_vendor;
407 break;
408 default:
409 return -EINVAL;
410 }
411 } else {
412 /* invalid frev */
413 return -EINVAL;
414 }
415 }
416
417 }
418
419 return 0;
420 }
421
422 /*
423 * Return true if vbios enabled ecc by default, if umc info table is available
424 * or false if ecc is not enabled or umc info table is not available
425 */
amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device * adev)426 bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev)
427 {
428 struct amdgpu_mode_info *mode_info = &adev->mode_info;
429 int index;
430 u16 data_offset, size;
431 union umc_info *umc_info;
432 u8 frev, crev;
433 bool ecc_default_enabled = false;
434 u8 umc_config;
435 u32 umc_config1;
436
437 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
438 umc_info);
439
440 if (amdgpu_atom_parse_data_header(mode_info->atom_context,
441 index, &size, &frev, &crev, &data_offset)) {
442 if (frev == 3) {
443 umc_info = (union umc_info *)
444 (mode_info->atom_context->bios + data_offset);
445 switch (crev) {
446 case 1:
447 umc_config = le32_to_cpu(umc_info->v31.umc_config);
448 ecc_default_enabled =
449 (umc_config & UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ? true : false;
450 break;
451 case 2:
452 umc_config = le32_to_cpu(umc_info->v32.umc_config);
453 ecc_default_enabled =
454 (umc_config & UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ? true : false;
455 break;
456 case 3:
457 umc_config = le32_to_cpu(umc_info->v33.umc_config);
458 umc_config1 = le32_to_cpu(umc_info->v33.umc_config1);
459 ecc_default_enabled =
460 ((umc_config & UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ||
461 (umc_config1 & UMC_CONFIG1__ENABLE_ECC_CAPABLE)) ? true : false;
462 break;
463 default:
464 /* unsupported crev */
465 return false;
466 }
467 }
468 }
469
470 return ecc_default_enabled;
471 }
472
473 /*
474 * Helper function to query sram ecc capablity
475 *
476 * @adev: amdgpu_device pointer
477 *
478 * Return true if vbios supports sram ecc or false if not
479 */
amdgpu_atomfirmware_sram_ecc_supported(struct amdgpu_device * adev)480 bool amdgpu_atomfirmware_sram_ecc_supported(struct amdgpu_device *adev)
481 {
482 u32 fw_cap;
483
484 fw_cap = adev->mode_info.firmware_flags;
485
486 return (fw_cap & ATOM_FIRMWARE_CAP_SRAM_ECC) ? true : false;
487 }
488
489 /*
490 * Helper function to query dynamic boot config capability
491 *
492 * @adev: amdgpu_device pointer
493 *
494 * Return true if vbios supports dynamic boot config or false if not
495 */
amdgpu_atomfirmware_dynamic_boot_config_supported(struct amdgpu_device * adev)496 bool amdgpu_atomfirmware_dynamic_boot_config_supported(struct amdgpu_device *adev)
497 {
498 u32 fw_cap;
499
500 fw_cap = adev->mode_info.firmware_flags;
501
502 return (fw_cap & ATOM_FIRMWARE_CAP_DYNAMIC_BOOT_CFG_ENABLE) ? true : false;
503 }
504
505 /**
506 * amdgpu_atomfirmware_ras_rom_addr -- Get the RAS EEPROM addr from VBIOS
507 * @adev: amdgpu_device pointer
508 * @i2c_address: pointer to u8; if not NULL, will contain
509 * the RAS EEPROM address if the function returns true
510 *
511 * Return true if VBIOS supports RAS EEPROM address reporting,
512 * else return false. If true and @i2c_address is not NULL,
513 * will contain the RAS ROM address.
514 */
amdgpu_atomfirmware_ras_rom_addr(struct amdgpu_device * adev,u8 * i2c_address)515 bool amdgpu_atomfirmware_ras_rom_addr(struct amdgpu_device *adev,
516 u8 *i2c_address)
517 {
518 struct amdgpu_mode_info *mode_info = &adev->mode_info;
519 int index;
520 u16 data_offset, size;
521 union firmware_info *firmware_info;
522 u8 frev, crev;
523
524 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
525 firmwareinfo);
526
527 if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context,
528 index, &size, &frev, &crev,
529 &data_offset)) {
530 /* support firmware_info 3.4 + */
531 if ((frev == 3 && crev >=4) || (frev > 3)) {
532 firmware_info = (union firmware_info *)
533 (mode_info->atom_context->bios + data_offset);
534 /* The ras_rom_i2c_slave_addr should ideally
535 * be a 19-bit EEPROM address, which would be
536 * used as is by the driver; see top of
537 * amdgpu_eeprom.c.
538 *
539 * When this is the case, 0 is of course a
540 * valid RAS EEPROM address, in which case,
541 * we'll drop the first "if (firm...)" and only
542 * leave the check for the pointer.
543 *
544 * The reason this works right now is because
545 * ras_rom_i2c_slave_addr contains the EEPROM
546 * device type qualifier 1010b in the top 4
547 * bits.
548 */
549 if (firmware_info->v34.ras_rom_i2c_slave_addr) {
550 if (i2c_address)
551 *i2c_address = firmware_info->v34.ras_rom_i2c_slave_addr;
552 return true;
553 }
554 }
555 }
556
557 return false;
558 }
559
560
561 union smu_info {
562 struct atom_smu_info_v3_1 v31;
563 struct atom_smu_info_v4_0 v40;
564 };
565
566 union gfx_info {
567 struct atom_gfx_info_v2_2 v22;
568 struct atom_gfx_info_v2_4 v24;
569 struct atom_gfx_info_v2_7 v27;
570 struct atom_gfx_info_v3_0 v30;
571 };
572
amdgpu_atomfirmware_get_clock_info(struct amdgpu_device * adev)573 int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev)
574 {
575 struct amdgpu_mode_info *mode_info = &adev->mode_info;
576 struct amdgpu_pll *spll = &adev->clock.spll;
577 struct amdgpu_pll *mpll = &adev->clock.mpll;
578 uint8_t frev, crev;
579 uint16_t data_offset;
580 int ret = -EINVAL, index;
581
582 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
583 firmwareinfo);
584 if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
585 &frev, &crev, &data_offset)) {
586 union firmware_info *firmware_info =
587 (union firmware_info *)(mode_info->atom_context->bios +
588 data_offset);
589
590 adev->clock.default_sclk =
591 le32_to_cpu(firmware_info->v31.bootup_sclk_in10khz);
592 adev->clock.default_mclk =
593 le32_to_cpu(firmware_info->v31.bootup_mclk_in10khz);
594
595 adev->pm.current_sclk = adev->clock.default_sclk;
596 adev->pm.current_mclk = adev->clock.default_mclk;
597
598 ret = 0;
599 }
600
601 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
602 smu_info);
603 if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
604 &frev, &crev, &data_offset)) {
605 union smu_info *smu_info =
606 (union smu_info *)(mode_info->atom_context->bios +
607 data_offset);
608
609 /* system clock */
610 if (frev == 3)
611 spll->reference_freq = le32_to_cpu(smu_info->v31.core_refclk_10khz);
612 else if (frev == 4)
613 spll->reference_freq = le32_to_cpu(smu_info->v40.core_refclk_10khz);
614
615 spll->reference_div = 0;
616 spll->min_post_div = 1;
617 spll->max_post_div = 1;
618 spll->min_ref_div = 2;
619 spll->max_ref_div = 0xff;
620 spll->min_feedback_div = 4;
621 spll->max_feedback_div = 0xff;
622 spll->best_vco = 0;
623
624 ret = 0;
625 }
626
627 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
628 umc_info);
629 if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
630 &frev, &crev, &data_offset)) {
631 union umc_info *umc_info =
632 (union umc_info *)(mode_info->atom_context->bios +
633 data_offset);
634
635 /* memory clock */
636 mpll->reference_freq = le32_to_cpu(umc_info->v31.mem_refclk_10khz);
637
638 mpll->reference_div = 0;
639 mpll->min_post_div = 1;
640 mpll->max_post_div = 1;
641 mpll->min_ref_div = 2;
642 mpll->max_ref_div = 0xff;
643 mpll->min_feedback_div = 4;
644 mpll->max_feedback_div = 0xff;
645 mpll->best_vco = 0;
646
647 ret = 0;
648 }
649
650 /* if asic is Navi+, the rlc reference clock is used for system clock
651 * from vbios gfx_info table */
652 if (adev->asic_type >= CHIP_NAVI10) {
653 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
654 gfx_info);
655 if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
656 &frev, &crev, &data_offset)) {
657 union gfx_info *gfx_info = (union gfx_info *)
658 (mode_info->atom_context->bios + data_offset);
659 if ((frev == 3) ||
660 (frev == 2 && crev == 6)) {
661 spll->reference_freq = le32_to_cpu(gfx_info->v30.golden_tsc_count_lower_refclk);
662 ret = 0;
663 } else if ((frev == 2) &&
664 (crev >= 2) &&
665 (crev != 6)) {
666 spll->reference_freq = le32_to_cpu(gfx_info->v22.rlc_gpu_timer_refclk);
667 ret = 0;
668 } else {
669 BUG();
670 }
671 }
672 }
673
674 return ret;
675 }
676
amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device * adev)677 int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev)
678 {
679 struct amdgpu_mode_info *mode_info = &adev->mode_info;
680 int index;
681 uint8_t frev, crev;
682 uint16_t data_offset;
683
684 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
685 gfx_info);
686 if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
687 &frev, &crev, &data_offset)) {
688 union gfx_info *gfx_info = (union gfx_info *)
689 (mode_info->atom_context->bios + data_offset);
690 if (frev == 2) {
691 switch (crev) {
692 case 4:
693 adev->gfx.config.max_shader_engines = gfx_info->v24.max_shader_engines;
694 adev->gfx.config.max_cu_per_sh = gfx_info->v24.max_cu_per_sh;
695 adev->gfx.config.max_sh_per_se = gfx_info->v24.max_sh_per_se;
696 adev->gfx.config.max_backends_per_se = gfx_info->v24.max_backends_per_se;
697 adev->gfx.config.max_texture_channel_caches = gfx_info->v24.max_texture_channel_caches;
698 adev->gfx.config.max_gprs = le16_to_cpu(gfx_info->v24.gc_num_gprs);
699 adev->gfx.config.max_gs_threads = gfx_info->v24.gc_num_max_gs_thds;
700 adev->gfx.config.gs_vgt_table_depth = gfx_info->v24.gc_gs_table_depth;
701 adev->gfx.config.gs_prim_buffer_depth =
702 le16_to_cpu(gfx_info->v24.gc_gsprim_buff_depth);
703 adev->gfx.config.double_offchip_lds_buf =
704 gfx_info->v24.gc_double_offchip_lds_buffer;
705 adev->gfx.cu_info.wave_front_size = le16_to_cpu(gfx_info->v24.gc_wave_size);
706 adev->gfx.cu_info.max_waves_per_simd = le16_to_cpu(gfx_info->v24.gc_max_waves_per_simd);
707 adev->gfx.cu_info.max_scratch_slots_per_cu = gfx_info->v24.gc_max_scratch_slots_per_cu;
708 adev->gfx.cu_info.lds_size = le16_to_cpu(gfx_info->v24.gc_lds_size);
709 return 0;
710 case 7:
711 adev->gfx.config.max_shader_engines = gfx_info->v27.max_shader_engines;
712 adev->gfx.config.max_cu_per_sh = gfx_info->v27.max_cu_per_sh;
713 adev->gfx.config.max_sh_per_se = gfx_info->v27.max_sh_per_se;
714 adev->gfx.config.max_backends_per_se = gfx_info->v27.max_backends_per_se;
715 adev->gfx.config.max_texture_channel_caches = gfx_info->v27.max_texture_channel_caches;
716 adev->gfx.config.max_gprs = le16_to_cpu(gfx_info->v27.gc_num_gprs);
717 adev->gfx.config.max_gs_threads = gfx_info->v27.gc_num_max_gs_thds;
718 adev->gfx.config.gs_vgt_table_depth = gfx_info->v27.gc_gs_table_depth;
719 adev->gfx.config.gs_prim_buffer_depth = le16_to_cpu(gfx_info->v27.gc_gsprim_buff_depth);
720 adev->gfx.config.double_offchip_lds_buf = gfx_info->v27.gc_double_offchip_lds_buffer;
721 adev->gfx.cu_info.wave_front_size = le16_to_cpu(gfx_info->v27.gc_wave_size);
722 adev->gfx.cu_info.max_waves_per_simd = le16_to_cpu(gfx_info->v27.gc_max_waves_per_simd);
723 adev->gfx.cu_info.max_scratch_slots_per_cu = gfx_info->v27.gc_max_scratch_slots_per_cu;
724 adev->gfx.cu_info.lds_size = le16_to_cpu(gfx_info->v27.gc_lds_size);
725 return 0;
726 default:
727 return -EINVAL;
728 }
729 } else if (frev == 3) {
730 switch (crev) {
731 case 0:
732 adev->gfx.config.max_shader_engines = gfx_info->v30.max_shader_engines;
733 adev->gfx.config.max_cu_per_sh = gfx_info->v30.max_cu_per_sh;
734 adev->gfx.config.max_sh_per_se = gfx_info->v30.max_sh_per_se;
735 adev->gfx.config.max_backends_per_se = gfx_info->v30.max_backends_per_se;
736 adev->gfx.config.max_texture_channel_caches = gfx_info->v30.max_texture_channel_caches;
737 return 0;
738 default:
739 return -EINVAL;
740 }
741 } else {
742 return -EINVAL;
743 }
744
745 }
746 return -EINVAL;
747 }
748
749 /*
750 * Helper function to query two stage mem training capability
751 *
752 * @adev: amdgpu_device pointer
753 *
754 * Return true if two stage mem training is supported or false if not
755 */
amdgpu_atomfirmware_mem_training_supported(struct amdgpu_device * adev)756 bool amdgpu_atomfirmware_mem_training_supported(struct amdgpu_device *adev)
757 {
758 u32 fw_cap;
759
760 fw_cap = adev->mode_info.firmware_flags;
761
762 return (fw_cap & ATOM_FIRMWARE_CAP_ENABLE_2STAGE_BIST_TRAINING) ? true : false;
763 }
764
amdgpu_atomfirmware_get_fw_reserved_fb_size(struct amdgpu_device * adev)765 int amdgpu_atomfirmware_get_fw_reserved_fb_size(struct amdgpu_device *adev)
766 {
767 struct atom_context *ctx = adev->mode_info.atom_context;
768 union firmware_info *firmware_info;
769 int index;
770 u16 data_offset, size;
771 u8 frev, crev;
772 int fw_reserved_fb_size;
773
774 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
775 firmwareinfo);
776
777 if (!amdgpu_atom_parse_data_header(ctx, index, &size,
778 &frev, &crev, &data_offset))
779 /* fail to parse data_header */
780 return 0;
781
782 firmware_info = (union firmware_info *)(ctx->bios + data_offset);
783
784 if (frev !=3)
785 return -EINVAL;
786
787 switch (crev) {
788 case 4:
789 fw_reserved_fb_size =
790 (firmware_info->v34.fw_reserved_size_in_kb << 10);
791 break;
792 default:
793 fw_reserved_fb_size = 0;
794 break;
795 }
796
797 return fw_reserved_fb_size;
798 }
799
800 /*
801 * Helper function to execute asic_init table
802 *
803 * @adev: amdgpu_device pointer
804 * @fb_reset: flag to indicate whether fb is reset or not
805 *
806 * Return 0 if succeed, otherwise failed
807 */
amdgpu_atomfirmware_asic_init(struct amdgpu_device * adev,bool fb_reset)808 int amdgpu_atomfirmware_asic_init(struct amdgpu_device *adev, bool fb_reset)
809 {
810 struct amdgpu_mode_info *mode_info = &adev->mode_info;
811 struct atom_context *ctx;
812 uint8_t frev, crev;
813 uint16_t data_offset;
814 uint32_t bootup_sclk_in10khz, bootup_mclk_in10khz;
815 struct asic_init_ps_allocation_v2_1 asic_init_ps_v2_1;
816 int index;
817
818 if (!mode_info)
819 return -EINVAL;
820
821 ctx = mode_info->atom_context;
822 if (!ctx)
823 return -EINVAL;
824
825 /* query bootup sclk/mclk from firmware_info table */
826 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
827 firmwareinfo);
828 if (amdgpu_atom_parse_data_header(ctx, index, NULL,
829 &frev, &crev, &data_offset)) {
830 union firmware_info *firmware_info =
831 (union firmware_info *)(ctx->bios +
832 data_offset);
833
834 bootup_sclk_in10khz =
835 le32_to_cpu(firmware_info->v31.bootup_sclk_in10khz);
836 bootup_mclk_in10khz =
837 le32_to_cpu(firmware_info->v31.bootup_mclk_in10khz);
838 } else {
839 return -EINVAL;
840 }
841
842 index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
843 asic_init);
844 if (amdgpu_atom_parse_cmd_header(mode_info->atom_context, index, &frev, &crev)) {
845 if (frev == 2 && crev >= 1) {
846 memset(&asic_init_ps_v2_1, 0, sizeof(asic_init_ps_v2_1));
847 asic_init_ps_v2_1.param.engineparam.sclkfreqin10khz = bootup_sclk_in10khz;
848 asic_init_ps_v2_1.param.memparam.mclkfreqin10khz = bootup_mclk_in10khz;
849 asic_init_ps_v2_1.param.engineparam.engineflag = b3NORMAL_ENGINE_INIT;
850 if (!fb_reset)
851 asic_init_ps_v2_1.param.memparam.memflag = b3DRAM_SELF_REFRESH_EXIT;
852 else
853 asic_init_ps_v2_1.param.memparam.memflag = 0;
854 } else {
855 return -EINVAL;
856 }
857 } else {
858 return -EINVAL;
859 }
860
861 return amdgpu_atom_execute_table(ctx, ATOM_CMD_INIT, (uint32_t *)&asic_init_ps_v2_1);
862 }
863