Lines Matching refs:adev

149 	struct amdgpu_device *adev = drm_to_adev(ddev);  in amdgpu_device_get_pcie_replay_count()  local
150 uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev); in amdgpu_device_get_pcie_replay_count()
158 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
174 struct amdgpu_device *adev = drm_to_adev(ddev); in amdgpu_device_get_product_name() local
176 return sysfs_emit(buf, "%s\n", adev->product_name); in amdgpu_device_get_product_name()
196 struct amdgpu_device *adev = drm_to_adev(ddev); in amdgpu_device_get_product_number() local
198 return sysfs_emit(buf, "%s\n", adev->product_number); in amdgpu_device_get_product_number()
218 struct amdgpu_device *adev = drm_to_adev(ddev); in amdgpu_device_get_serial_number() local
220 return sysfs_emit(buf, "%s\n", adev->serial); in amdgpu_device_get_serial_number()
236 struct amdgpu_device *adev = drm_to_adev(dev); in amdgpu_device_supports_px() local
238 if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid()) in amdgpu_device_supports_px()
253 struct amdgpu_device *adev = drm_to_adev(dev); in amdgpu_device_supports_boco() local
255 if (adev->has_pr3 || in amdgpu_device_supports_boco()
256 ((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid())) in amdgpu_device_supports_boco()
271 struct amdgpu_device *adev = drm_to_adev(dev); in amdgpu_device_supports_baco() local
273 return amdgpu_asic_supports_baco(adev); in amdgpu_device_supports_baco()
304 void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos, in amdgpu_device_mm_access() argument
313 if (!drm_dev_enter(adev_to_drm(adev), &idx)) in amdgpu_device_mm_access()
318 spin_lock_irqsave(&adev->mmio_idx_lock, flags); in amdgpu_device_mm_access()
333 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); in amdgpu_device_mm_access()
348 size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos, in amdgpu_device_aper_access() argument
356 if (!adev->mman.aper_base_kaddr) in amdgpu_device_aper_access()
359 last = min(pos + size, adev->gmc.visible_vram_size); in amdgpu_device_aper_access()
361 addr = adev->mman.aper_base_kaddr + pos; in amdgpu_device_aper_access()
367 amdgpu_device_flush_hdp(adev, NULL); in amdgpu_device_aper_access()
369 amdgpu_device_invalidate_hdp(adev, NULL); in amdgpu_device_aper_access()
391 void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos, in amdgpu_device_vram_access() argument
397 count = amdgpu_device_aper_access(adev, pos, buf, size, write); in amdgpu_device_vram_access()
403 amdgpu_device_mm_access(adev, pos, buf, size, write); in amdgpu_device_vram_access()
412 bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev) in amdgpu_device_skip_hw_access() argument
414 if (adev->no_hw_access) in amdgpu_device_skip_hw_access()
430 if (down_read_trylock(&adev->reset_domain->sem)) in amdgpu_device_skip_hw_access()
431 up_read(&adev->reset_domain->sem); in amdgpu_device_skip_hw_access()
433 lockdep_assert_held(&adev->reset_domain->sem); in amdgpu_device_skip_hw_access()
448 uint32_t amdgpu_device_rreg(struct amdgpu_device *adev, in amdgpu_device_rreg() argument
453 if (amdgpu_device_skip_hw_access(adev)) in amdgpu_device_rreg()
456 if ((reg * 4) < adev->rmmio_size) { in amdgpu_device_rreg()
458 amdgpu_sriov_runtime(adev) && in amdgpu_device_rreg()
459 down_read_trylock(&adev->reset_domain->sem)) { in amdgpu_device_rreg()
460 ret = amdgpu_kiq_rreg(adev, reg); in amdgpu_device_rreg()
461 up_read(&adev->reset_domain->sem); in amdgpu_device_rreg()
463 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4)); in amdgpu_device_rreg()
466 ret = adev->pcie_rreg(adev, reg * 4); in amdgpu_device_rreg()
469 trace_amdgpu_device_rreg(adev->pdev->device, reg, ret); in amdgpu_device_rreg()
488 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset) in amdgpu_mm_rreg8() argument
490 if (amdgpu_device_skip_hw_access(adev)) in amdgpu_mm_rreg8()
493 if (offset < adev->rmmio_size) in amdgpu_mm_rreg8()
494 return (readb(adev->rmmio + offset)); in amdgpu_mm_rreg8()
513 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value) in amdgpu_mm_wreg8() argument
515 if (amdgpu_device_skip_hw_access(adev)) in amdgpu_mm_wreg8()
518 if (offset < adev->rmmio_size) in amdgpu_mm_wreg8()
519 writeb(value, adev->rmmio + offset); in amdgpu_mm_wreg8()
534 void amdgpu_device_wreg(struct amdgpu_device *adev, in amdgpu_device_wreg() argument
538 if (amdgpu_device_skip_hw_access(adev)) in amdgpu_device_wreg()
541 if ((reg * 4) < adev->rmmio_size) { in amdgpu_device_wreg()
543 amdgpu_sriov_runtime(adev) && in amdgpu_device_wreg()
544 down_read_trylock(&adev->reset_domain->sem)) { in amdgpu_device_wreg()
545 amdgpu_kiq_wreg(adev, reg, v); in amdgpu_device_wreg()
546 up_read(&adev->reset_domain->sem); in amdgpu_device_wreg()
548 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4)); in amdgpu_device_wreg()
551 adev->pcie_wreg(adev, reg * 4, v); in amdgpu_device_wreg()
554 trace_amdgpu_device_wreg(adev->pdev->device, reg, v); in amdgpu_device_wreg()
566 void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, in amdgpu_mm_wreg_mmio_rlc() argument
569 if (amdgpu_device_skip_hw_access(adev)) in amdgpu_mm_wreg_mmio_rlc()
572 if (amdgpu_sriov_fullaccess(adev) && in amdgpu_mm_wreg_mmio_rlc()
573 adev->gfx.rlc.funcs && in amdgpu_mm_wreg_mmio_rlc()
574 adev->gfx.rlc.funcs->is_rlcg_access_range) { in amdgpu_mm_wreg_mmio_rlc()
575 if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg)) in amdgpu_mm_wreg_mmio_rlc()
576 return amdgpu_sriov_wreg(adev, reg, v, 0, 0); in amdgpu_mm_wreg_mmio_rlc()
577 } else if ((reg * 4) >= adev->rmmio_size) { in amdgpu_mm_wreg_mmio_rlc()
578 adev->pcie_wreg(adev, reg * 4, v); in amdgpu_mm_wreg_mmio_rlc()
580 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4)); in amdgpu_mm_wreg_mmio_rlc()
593 u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index) in amdgpu_mm_rdoorbell() argument
595 if (amdgpu_device_skip_hw_access(adev)) in amdgpu_mm_rdoorbell()
598 if (index < adev->doorbell.num_doorbells) { in amdgpu_mm_rdoorbell()
599 return readl(adev->doorbell.ptr + index); in amdgpu_mm_rdoorbell()
616 void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v) in amdgpu_mm_wdoorbell() argument
618 if (amdgpu_device_skip_hw_access(adev)) in amdgpu_mm_wdoorbell()
621 if (index < adev->doorbell.num_doorbells) { in amdgpu_mm_wdoorbell()
622 writel(v, adev->doorbell.ptr + index); in amdgpu_mm_wdoorbell()
637 u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index) in amdgpu_mm_rdoorbell64() argument
639 if (amdgpu_device_skip_hw_access(adev)) in amdgpu_mm_rdoorbell64()
642 if (index < adev->doorbell.num_doorbells) { in amdgpu_mm_rdoorbell64()
643 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index)); in amdgpu_mm_rdoorbell64()
660 void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v) in amdgpu_mm_wdoorbell64() argument
662 if (amdgpu_device_skip_hw_access(adev)) in amdgpu_mm_wdoorbell64()
665 if (index < adev->doorbell.num_doorbells) { in amdgpu_mm_wdoorbell64()
666 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v); in amdgpu_mm_wdoorbell64()
682 u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev, in amdgpu_device_indirect_rreg() argument
691 spin_lock_irqsave(&adev->pcie_idx_lock, flags); in amdgpu_device_indirect_rreg()
692 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4; in amdgpu_device_indirect_rreg()
693 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4; in amdgpu_device_indirect_rreg()
698 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); in amdgpu_device_indirect_rreg()
713 u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev, in amdgpu_device_indirect_rreg64() argument
722 spin_lock_irqsave(&adev->pcie_idx_lock, flags); in amdgpu_device_indirect_rreg64()
723 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4; in amdgpu_device_indirect_rreg64()
724 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4; in amdgpu_device_indirect_rreg64()
734 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); in amdgpu_device_indirect_rreg64()
749 void amdgpu_device_indirect_wreg(struct amdgpu_device *adev, in amdgpu_device_indirect_wreg() argument
757 spin_lock_irqsave(&adev->pcie_idx_lock, flags); in amdgpu_device_indirect_wreg()
758 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4; in amdgpu_device_indirect_wreg()
759 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4; in amdgpu_device_indirect_wreg()
765 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); in amdgpu_device_indirect_wreg()
778 void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev, in amdgpu_device_indirect_wreg64() argument
786 spin_lock_irqsave(&adev->pcie_idx_lock, flags); in amdgpu_device_indirect_wreg64()
787 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4; in amdgpu_device_indirect_wreg64()
788 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4; in amdgpu_device_indirect_wreg64()
800 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); in amdgpu_device_indirect_wreg64()
813 static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg) in amdgpu_invalid_rreg() argument
830 static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v) in amdgpu_invalid_wreg() argument
847 static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg) in amdgpu_invalid_rreg64() argument
864 static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v) in amdgpu_invalid_wreg64() argument
882 static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev, in amdgpu_block_invalid_rreg() argument
902 static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev, in amdgpu_block_invalid_wreg() argument
918 static int amdgpu_device_asic_init(struct amdgpu_device *adev) in amdgpu_device_asic_init() argument
920 amdgpu_asic_pre_asic_init(adev); in amdgpu_device_asic_init()
922 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 0)) in amdgpu_device_asic_init()
923 return amdgpu_atomfirmware_asic_init(adev, true); in amdgpu_device_asic_init()
925 return amdgpu_atom_asic_init(adev->mode_info.atom_context); in amdgpu_device_asic_init()
936 static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev) in amdgpu_device_vram_scratch_init() argument
938 return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE, in amdgpu_device_vram_scratch_init()
940 &adev->vram_scratch.robj, in amdgpu_device_vram_scratch_init()
941 &adev->vram_scratch.gpu_addr, in amdgpu_device_vram_scratch_init()
942 (void **)&adev->vram_scratch.ptr); in amdgpu_device_vram_scratch_init()
952 static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev) in amdgpu_device_vram_scratch_fini() argument
954 amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL); in amdgpu_device_vram_scratch_fini()
967 void amdgpu_device_program_register_sequence(struct amdgpu_device *adev, in amdgpu_device_program_register_sequence() argument
987 if (adev->family >= AMDGPU_FAMILY_AI) in amdgpu_device_program_register_sequence()
1004 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev) in amdgpu_device_pci_config_reset() argument
1006 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA); in amdgpu_device_pci_config_reset()
1016 int amdgpu_device_pci_reset(struct amdgpu_device *adev) in amdgpu_device_pci_reset() argument
1018 return pci_reset_function(adev->pdev); in amdgpu_device_pci_reset()
1032 static int amdgpu_device_doorbell_init(struct amdgpu_device *adev) in amdgpu_device_doorbell_init() argument
1036 if (adev->asic_type < CHIP_BONAIRE) { in amdgpu_device_doorbell_init()
1037 adev->doorbell.base = 0; in amdgpu_device_doorbell_init()
1038 adev->doorbell.size = 0; in amdgpu_device_doorbell_init()
1039 adev->doorbell.num_doorbells = 0; in amdgpu_device_doorbell_init()
1040 adev->doorbell.ptr = NULL; in amdgpu_device_doorbell_init()
1044 if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET) in amdgpu_device_doorbell_init()
1047 amdgpu_asic_init_doorbell_index(adev); in amdgpu_device_doorbell_init()
1050 adev->doorbell.base = pci_resource_start(adev->pdev, 2); in amdgpu_device_doorbell_init()
1051 adev->doorbell.size = pci_resource_len(adev->pdev, 2); in amdgpu_device_doorbell_init()
1053 if (adev->enable_mes) { in amdgpu_device_doorbell_init()
1054 adev->doorbell.num_doorbells = in amdgpu_device_doorbell_init()
1055 adev->doorbell.size / sizeof(u32); in amdgpu_device_doorbell_init()
1057 adev->doorbell.num_doorbells = in amdgpu_device_doorbell_init()
1058 min_t(u32, adev->doorbell.size / sizeof(u32), in amdgpu_device_doorbell_init()
1059 adev->doorbell_index.max_assignment+1); in amdgpu_device_doorbell_init()
1060 if (adev->doorbell.num_doorbells == 0) in amdgpu_device_doorbell_init()
1069 if (adev->asic_type >= CHIP_VEGA10) in amdgpu_device_doorbell_init()
1070 adev->doorbell.num_doorbells += 0x400; in amdgpu_device_doorbell_init()
1073 adev->doorbell.ptr = ioremap(adev->doorbell.base, in amdgpu_device_doorbell_init()
1074 adev->doorbell.num_doorbells * in amdgpu_device_doorbell_init()
1076 if (adev->doorbell.ptr == NULL) in amdgpu_device_doorbell_init()
1089 static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev) in amdgpu_device_doorbell_fini() argument
1091 iounmap(adev->doorbell.ptr); in amdgpu_device_doorbell_fini()
1092 adev->doorbell.ptr = NULL; in amdgpu_device_doorbell_fini()
1111 static void amdgpu_device_wb_fini(struct amdgpu_device *adev) in amdgpu_device_wb_fini() argument
1113 if (adev->wb.wb_obj) { in amdgpu_device_wb_fini()
1114 amdgpu_bo_free_kernel(&adev->wb.wb_obj, in amdgpu_device_wb_fini()
1115 &adev->wb.gpu_addr, in amdgpu_device_wb_fini()
1116 (void **)&adev->wb.wb); in amdgpu_device_wb_fini()
1117 adev->wb.wb_obj = NULL; in amdgpu_device_wb_fini()
1130 static int amdgpu_device_wb_init(struct amdgpu_device *adev) in amdgpu_device_wb_init() argument
1134 if (adev->wb.wb_obj == NULL) { in amdgpu_device_wb_init()
1136 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8, in amdgpu_device_wb_init()
1138 &adev->wb.wb_obj, &adev->wb.gpu_addr, in amdgpu_device_wb_init()
1139 (void **)&adev->wb.wb); in amdgpu_device_wb_init()
1141 dev_warn(adev->dev, "(%d) create WB bo failed\n", r); in amdgpu_device_wb_init()
1145 adev->wb.num_wb = AMDGPU_MAX_WB; in amdgpu_device_wb_init()
1146 memset(&adev->wb.used, 0, sizeof(adev->wb.used)); in amdgpu_device_wb_init()
1149 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8); in amdgpu_device_wb_init()
1164 int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb) in amdgpu_device_wb_get() argument
1166 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb); in amdgpu_device_wb_get()
1168 if (offset < adev->wb.num_wb) { in amdgpu_device_wb_get()
1169 __set_bit(offset, adev->wb.used); in amdgpu_device_wb_get()
1185 void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb) in amdgpu_device_wb_free() argument
1188 if (wb < adev->wb.num_wb) in amdgpu_device_wb_free()
1189 __clear_bit(wb, adev->wb.used); in amdgpu_device_wb_free()
1201 int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev) in amdgpu_device_resize_fb_bar() argument
1203 int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size); in amdgpu_device_resize_fb_bar()
1211 if (amdgpu_sriov_vf(adev)) in amdgpu_device_resize_fb_bar()
1215 if (adev->gmc.real_vram_size && in amdgpu_device_resize_fb_bar()
1216 (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size)) in amdgpu_device_resize_fb_bar()
1220 root = adev->pdev->bus; in amdgpu_device_resize_fb_bar()
1235 rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1, in amdgpu_device_resize_fb_bar()
1239 pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd); in amdgpu_device_resize_fb_bar()
1240 pci_write_config_word(adev->pdev, PCI_COMMAND, in amdgpu_device_resize_fb_bar()
1244 amdgpu_device_doorbell_fini(adev); in amdgpu_device_resize_fb_bar()
1245 if (adev->asic_type >= CHIP_BONAIRE) in amdgpu_device_resize_fb_bar()
1246 pci_release_resource(adev->pdev, 2); in amdgpu_device_resize_fb_bar()
1248 pci_release_resource(adev->pdev, 0); in amdgpu_device_resize_fb_bar()
1250 r = pci_resize_resource(adev->pdev, 0, rbar_size); in amdgpu_device_resize_fb_bar()
1256 pci_assign_unassigned_bus_resources(adev->pdev->bus); in amdgpu_device_resize_fb_bar()
1261 r = amdgpu_device_doorbell_init(adev); in amdgpu_device_resize_fb_bar()
1262 if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET)) in amdgpu_device_resize_fb_bar()
1265 pci_write_config_word(adev->pdev, PCI_COMMAND, cmd); in amdgpu_device_resize_fb_bar()
1282 bool amdgpu_device_need_post(struct amdgpu_device *adev) in amdgpu_device_need_post() argument
1286 if (amdgpu_sriov_vf(adev)) in amdgpu_device_need_post()
1289 if (amdgpu_passthrough(adev)) { in amdgpu_device_need_post()
1295 if (adev->asic_type == CHIP_FIJI) { in amdgpu_device_need_post()
1298 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev); in amdgpu_device_need_post()
1303 fw_ver = *((uint32_t *)adev->pm.fw->data + 69); in amdgpu_device_need_post()
1310 if (adev->gmc.xgmi.pending_reset) in amdgpu_device_need_post()
1313 if (adev->has_hw_reset) { in amdgpu_device_need_post()
1314 adev->has_hw_reset = false; in amdgpu_device_need_post()
1319 if (adev->asic_type >= CHIP_BONAIRE) in amdgpu_device_need_post()
1320 return amdgpu_atombios_scratch_need_asic_init(adev); in amdgpu_device_need_post()
1323 reg = amdgpu_asic_get_config_memsize(adev); in amdgpu_device_need_post()
1341 bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev) in amdgpu_device_should_use_aspm() argument
1353 return pcie_aspm_enabled(adev->pdev); in amdgpu_device_should_use_aspm()
1369 struct amdgpu_device *adev = drm_to_adev(pci_get_drvdata(pdev)); in amdgpu_device_vga_set_decode() local
1370 amdgpu_asic_set_vga_state(adev, state); in amdgpu_device_vga_set_decode()
1388 static void amdgpu_device_check_block_size(struct amdgpu_device *adev) in amdgpu_device_check_block_size() argument
1397 dev_warn(adev->dev, "VM page table size (%d) too small\n", in amdgpu_device_check_block_size()
1411 static void amdgpu_device_check_vm_size(struct amdgpu_device *adev) in amdgpu_device_check_vm_size() argument
1418 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n", in amdgpu_device_check_vm_size()
1424 static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev) in amdgpu_device_check_smu_prv_buffer_size() argument
1454 adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28; in amdgpu_device_check_smu_prv_buffer_size()
1461 adev->pm.smu_prv_buffer_size = 0; in amdgpu_device_check_smu_prv_buffer_size()
1464 static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev) in amdgpu_device_init_apu_flags() argument
1466 if (!(adev->flags & AMD_IS_APU) || in amdgpu_device_init_apu_flags()
1467 adev->asic_type < CHIP_RAVEN) in amdgpu_device_init_apu_flags()
1470 switch (adev->asic_type) { in amdgpu_device_init_apu_flags()
1472 if (adev->pdev->device == 0x15dd) in amdgpu_device_init_apu_flags()
1473 adev->apu_flags |= AMD_APU_IS_RAVEN; in amdgpu_device_init_apu_flags()
1474 if (adev->pdev->device == 0x15d8) in amdgpu_device_init_apu_flags()
1475 adev->apu_flags |= AMD_APU_IS_PICASSO; in amdgpu_device_init_apu_flags()
1478 if ((adev->pdev->device == 0x1636) || in amdgpu_device_init_apu_flags()
1479 (adev->pdev->device == 0x164c)) in amdgpu_device_init_apu_flags()
1480 adev->apu_flags |= AMD_APU_IS_RENOIR; in amdgpu_device_init_apu_flags()
1482 adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE; in amdgpu_device_init_apu_flags()
1485 adev->apu_flags |= AMD_APU_IS_VANGOGH; in amdgpu_device_init_apu_flags()
1490 if ((adev->pdev->device == 0x13FE) || in amdgpu_device_init_apu_flags()
1491 (adev->pdev->device == 0x143F)) in amdgpu_device_init_apu_flags()
1492 adev->apu_flags |= AMD_APU_IS_CYAN_SKILLFISH2; in amdgpu_device_init_apu_flags()
1509 static int amdgpu_device_check_arguments(struct amdgpu_device *adev) in amdgpu_device_check_arguments() argument
1512 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n", in amdgpu_device_check_arguments()
1516 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n", in amdgpu_device_check_arguments()
1523 dev_warn(adev->dev, "gart size (%d) too small\n", in amdgpu_device_check_arguments()
1530 dev_warn(adev->dev, "gtt size (%d) too small\n", in amdgpu_device_check_arguments()
1538 dev_warn(adev->dev, "valid range is between 4 and 9\n"); in amdgpu_device_check_arguments()
1543 dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n", in amdgpu_device_check_arguments()
1547 dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n", in amdgpu_device_check_arguments()
1553 dev_warn(adev->dev, "invalid option for reset method, reverting to default\n"); in amdgpu_device_check_arguments()
1557 amdgpu_device_check_smu_prv_buffer_size(adev); in amdgpu_device_check_arguments()
1559 amdgpu_device_check_vm_size(adev); in amdgpu_device_check_arguments()
1561 amdgpu_device_check_block_size(adev); in amdgpu_device_check_arguments()
1563 adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type); in amdgpu_device_check_arguments()
1653 struct amdgpu_device *adev = dev; in amdgpu_device_ip_set_clockgating_state() local
1656 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_ip_set_clockgating_state()
1657 if (!adev->ip_blocks[i].status.valid) in amdgpu_device_ip_set_clockgating_state()
1659 if (adev->ip_blocks[i].version->type != block_type) in amdgpu_device_ip_set_clockgating_state()
1661 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state) in amdgpu_device_ip_set_clockgating_state()
1663 r = adev->ip_blocks[i].version->funcs->set_clockgating_state( in amdgpu_device_ip_set_clockgating_state()
1664 (void *)adev, state); in amdgpu_device_ip_set_clockgating_state()
1667 adev->ip_blocks[i].version->funcs->name, r); in amdgpu_device_ip_set_clockgating_state()
1687 struct amdgpu_device *adev = dev; in amdgpu_device_ip_set_powergating_state() local
1690 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_ip_set_powergating_state()
1691 if (!adev->ip_blocks[i].status.valid) in amdgpu_device_ip_set_powergating_state()
1693 if (adev->ip_blocks[i].version->type != block_type) in amdgpu_device_ip_set_powergating_state()
1695 if (!adev->ip_blocks[i].version->funcs->set_powergating_state) in amdgpu_device_ip_set_powergating_state()
1697 r = adev->ip_blocks[i].version->funcs->set_powergating_state( in amdgpu_device_ip_set_powergating_state()
1698 (void *)adev, state); in amdgpu_device_ip_set_powergating_state()
1701 adev->ip_blocks[i].version->funcs->name, r); in amdgpu_device_ip_set_powergating_state()
1717 void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev, in amdgpu_device_ip_get_clockgating_state() argument
1722 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_ip_get_clockgating_state()
1723 if (!adev->ip_blocks[i].status.valid) in amdgpu_device_ip_get_clockgating_state()
1725 if (adev->ip_blocks[i].version->funcs->get_clockgating_state) in amdgpu_device_ip_get_clockgating_state()
1726 adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags); in amdgpu_device_ip_get_clockgating_state()
1739 int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev, in amdgpu_device_ip_wait_for_idle() argument
1744 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_ip_wait_for_idle()
1745 if (!adev->ip_blocks[i].status.valid) in amdgpu_device_ip_wait_for_idle()
1747 if (adev->ip_blocks[i].version->type == block_type) { in amdgpu_device_ip_wait_for_idle()
1748 r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev); in amdgpu_device_ip_wait_for_idle()
1767 bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev, in amdgpu_device_ip_is_idle() argument
1772 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_ip_is_idle()
1773 if (!adev->ip_blocks[i].status.valid) in amdgpu_device_ip_is_idle()
1775 if (adev->ip_blocks[i].version->type == block_type) in amdgpu_device_ip_is_idle()
1776 return adev->ip_blocks[i].version->funcs->is_idle((void *)adev); in amdgpu_device_ip_is_idle()
1792 amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev, in amdgpu_device_ip_get_ip_block() argument
1797 for (i = 0; i < adev->num_ip_blocks; i++) in amdgpu_device_ip_get_ip_block()
1798 if (adev->ip_blocks[i].version->type == type) in amdgpu_device_ip_get_ip_block()
1799 return &adev->ip_blocks[i]; in amdgpu_device_ip_get_ip_block()
1815 int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev, in amdgpu_device_ip_block_version_cmp() argument
1819 struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type); in amdgpu_device_ip_block_version_cmp()
1838 int amdgpu_device_ip_block_add(struct amdgpu_device *adev, in amdgpu_device_ip_block_add() argument
1846 if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK) in amdgpu_device_ip_block_add()
1850 if (adev->harvest_ip_mask & AMD_HARVEST_IP_JPEG_MASK) in amdgpu_device_ip_block_add()
1857 DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks, in amdgpu_device_ip_block_add()
1860 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version; in amdgpu_device_ip_block_add()
1877 static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev) in amdgpu_device_enable_virtual_display() argument
1879 adev->enable_virtual_display = false; in amdgpu_device_enable_virtual_display()
1882 const char *pci_address_name = pci_name(adev->pdev); in amdgpu_device_enable_virtual_display()
1894 adev->enable_virtual_display = true; in amdgpu_device_enable_virtual_display()
1905 adev->mode_info.num_crtc = num_crtc; in amdgpu_device_enable_virtual_display()
1907 adev->mode_info.num_crtc = 1; in amdgpu_device_enable_virtual_display()
1915 adev->enable_virtual_display, adev->mode_info.num_crtc); in amdgpu_device_enable_virtual_display()
1931 static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev) in amdgpu_device_parse_gpu_info_fw() argument
1938 adev->firmware.gpu_info_fw = NULL; in amdgpu_device_parse_gpu_info_fw()
1940 if (adev->mman.discovery_bin) { in amdgpu_device_parse_gpu_info_fw()
1946 if (adev->asic_type != CHIP_NAVI12) in amdgpu_device_parse_gpu_info_fw()
1950 switch (adev->asic_type) { in amdgpu_device_parse_gpu_info_fw()
1960 if (adev->apu_flags & AMD_APU_IS_RAVEN2) in amdgpu_device_parse_gpu_info_fw()
1962 else if (adev->apu_flags & AMD_APU_IS_PICASSO) in amdgpu_device_parse_gpu_info_fw()
1976 err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev); in amdgpu_device_parse_gpu_info_fw()
1978 dev_err(adev->dev, in amdgpu_device_parse_gpu_info_fw()
1983 err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw); in amdgpu_device_parse_gpu_info_fw()
1985 dev_err(adev->dev, in amdgpu_device_parse_gpu_info_fw()
1991 hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data; in amdgpu_device_parse_gpu_info_fw()
1998 (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data + in amdgpu_device_parse_gpu_info_fw()
2004 if (adev->asic_type == CHIP_NAVI12) in amdgpu_device_parse_gpu_info_fw()
2007 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se); in amdgpu_device_parse_gpu_info_fw()
2008 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh); in amdgpu_device_parse_gpu_info_fw()
2009 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se); in amdgpu_device_parse_gpu_info_fw()
2010 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se); in amdgpu_device_parse_gpu_info_fw()
2011 adev->gfx.config.max_texture_channel_caches = in amdgpu_device_parse_gpu_info_fw()
2013 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs); in amdgpu_device_parse_gpu_info_fw()
2014 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds); in amdgpu_device_parse_gpu_info_fw()
2015 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth); in amdgpu_device_parse_gpu_info_fw()
2016 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth); in amdgpu_device_parse_gpu_info_fw()
2017 adev->gfx.config.double_offchip_lds_buf = in amdgpu_device_parse_gpu_info_fw()
2019 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size); in amdgpu_device_parse_gpu_info_fw()
2020 adev->gfx.cu_info.max_waves_per_simd = in amdgpu_device_parse_gpu_info_fw()
2022 adev->gfx.cu_info.max_scratch_slots_per_cu = in amdgpu_device_parse_gpu_info_fw()
2024 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size); in amdgpu_device_parse_gpu_info_fw()
2027 (const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data + in amdgpu_device_parse_gpu_info_fw()
2029 adev->gfx.config.num_sc_per_sh = in amdgpu_device_parse_gpu_info_fw()
2031 adev->gfx.config.num_packer_per_sc = in amdgpu_device_parse_gpu_info_fw()
2042 (const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data + in amdgpu_device_parse_gpu_info_fw()
2044 adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box; in amdgpu_device_parse_gpu_info_fw()
2049 dev_err(adev->dev, in amdgpu_device_parse_gpu_info_fw()
2068 static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) in amdgpu_device_ip_early_init() argument
2070 struct drm_device *dev = adev_to_drm(adev); in amdgpu_device_ip_early_init()
2074 amdgpu_device_enable_virtual_display(adev); in amdgpu_device_ip_early_init()
2076 if (amdgpu_sriov_vf(adev)) { in amdgpu_device_ip_early_init()
2077 r = amdgpu_virt_request_full_gpu(adev, true); in amdgpu_device_ip_early_init()
2082 switch (adev->asic_type) { in amdgpu_device_ip_early_init()
2089 adev->family = AMDGPU_FAMILY_SI; in amdgpu_device_ip_early_init()
2090 r = si_set_ip_blocks(adev); in amdgpu_device_ip_early_init()
2101 if (adev->flags & AMD_IS_APU) in amdgpu_device_ip_early_init()
2102 adev->family = AMDGPU_FAMILY_KV; in amdgpu_device_ip_early_init()
2104 adev->family = AMDGPU_FAMILY_CI; in amdgpu_device_ip_early_init()
2106 r = cik_set_ip_blocks(adev); in amdgpu_device_ip_early_init()
2120 if (adev->flags & AMD_IS_APU) in amdgpu_device_ip_early_init()
2121 adev->family = AMDGPU_FAMILY_CZ; in amdgpu_device_ip_early_init()
2123 adev->family = AMDGPU_FAMILY_VI; in amdgpu_device_ip_early_init()
2125 r = vi_set_ip_blocks(adev); in amdgpu_device_ip_early_init()
2130 r = amdgpu_discovery_set_ip_blocks(adev); in amdgpu_device_ip_early_init()
2139 ((adev->flags & AMD_IS_APU) == 0) && in amdgpu_device_ip_early_init()
2141 adev->flags |= AMD_IS_PX; in amdgpu_device_ip_early_init()
2143 if (!(adev->flags & AMD_IS_APU)) { in amdgpu_device_ip_early_init()
2144 parent = pci_upstream_bridge(adev->pdev); in amdgpu_device_ip_early_init()
2145 adev->has_pr3 = parent ? pci_pr3_present(parent) : false; in amdgpu_device_ip_early_init()
2148 amdgpu_amdkfd_device_probe(adev); in amdgpu_device_ip_early_init()
2150 adev->pm.pp_feature = amdgpu_pp_feature_mask; in amdgpu_device_ip_early_init()
2151 if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS) in amdgpu_device_ip_early_init()
2152 adev->pm.pp_feature &= ~PP_GFXOFF_MASK; in amdgpu_device_ip_early_init()
2153 if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID) in amdgpu_device_ip_early_init()
2154 adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK; in amdgpu_device_ip_early_init()
2156 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_ip_early_init()
2159 i, adev->ip_blocks[i].version->funcs->name); in amdgpu_device_ip_early_init()
2160 adev->ip_blocks[i].status.valid = false; in amdgpu_device_ip_early_init()
2162 if (adev->ip_blocks[i].version->funcs->early_init) { in amdgpu_device_ip_early_init()
2163 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev); in amdgpu_device_ip_early_init()
2165 adev->ip_blocks[i].status.valid = false; in amdgpu_device_ip_early_init()
2168 adev->ip_blocks[i].version->funcs->name, r); in amdgpu_device_ip_early_init()
2171 adev->ip_blocks[i].status.valid = true; in amdgpu_device_ip_early_init()
2174 adev->ip_blocks[i].status.valid = true; in amdgpu_device_ip_early_init()
2178 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) { in amdgpu_device_ip_early_init()
2179 r = amdgpu_device_parse_gpu_info_fw(adev); in amdgpu_device_ip_early_init()
2184 if (!amdgpu_get_bios(adev)) in amdgpu_device_ip_early_init()
2187 r = amdgpu_atombios_init(adev); in amdgpu_device_ip_early_init()
2189 dev_err(adev->dev, "amdgpu_atombios_init failed\n"); in amdgpu_device_ip_early_init()
2190 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0); in amdgpu_device_ip_early_init()
2195 if (amdgpu_sriov_vf(adev)) in amdgpu_device_ip_early_init()
2196 amdgpu_virt_init_data_exchange(adev); in amdgpu_device_ip_early_init()
2201 adev->cg_flags &= amdgpu_cg_mask; in amdgpu_device_ip_early_init()
2202 adev->pg_flags &= amdgpu_pg_mask; in amdgpu_device_ip_early_init()
2207 static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev) in amdgpu_device_ip_hw_init_phase1() argument
2211 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_ip_hw_init_phase1()
2212 if (!adev->ip_blocks[i].status.sw) in amdgpu_device_ip_hw_init_phase1()
2214 if (adev->ip_blocks[i].status.hw) in amdgpu_device_ip_hw_init_phase1()
2216 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || in amdgpu_device_ip_hw_init_phase1()
2217 (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) || in amdgpu_device_ip_hw_init_phase1()
2218 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) { in amdgpu_device_ip_hw_init_phase1()
2219 r = adev->ip_blocks[i].version->funcs->hw_init(adev); in amdgpu_device_ip_hw_init_phase1()
2222 adev->ip_blocks[i].version->funcs->name, r); in amdgpu_device_ip_hw_init_phase1()
2225 adev->ip_blocks[i].status.hw = true; in amdgpu_device_ip_hw_init_phase1()
2232 static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev) in amdgpu_device_ip_hw_init_phase2() argument
2236 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_ip_hw_init_phase2()
2237 if (!adev->ip_blocks[i].status.sw) in amdgpu_device_ip_hw_init_phase2()
2239 if (adev->ip_blocks[i].status.hw) in amdgpu_device_ip_hw_init_phase2()
2241 r = adev->ip_blocks[i].version->funcs->hw_init(adev); in amdgpu_device_ip_hw_init_phase2()
2244 adev->ip_blocks[i].version->funcs->name, r); in amdgpu_device_ip_hw_init_phase2()
2247 adev->ip_blocks[i].status.hw = true; in amdgpu_device_ip_hw_init_phase2()
2253 static int amdgpu_device_fw_loading(struct amdgpu_device *adev) in amdgpu_device_fw_loading() argument
2259 if (adev->asic_type >= CHIP_VEGA10) { in amdgpu_device_fw_loading()
2260 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_fw_loading()
2261 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP) in amdgpu_device_fw_loading()
2264 if (!adev->ip_blocks[i].status.sw) in amdgpu_device_fw_loading()
2268 if (adev->ip_blocks[i].status.hw == true) in amdgpu_device_fw_loading()
2271 if (amdgpu_in_reset(adev) || adev->in_suspend) { in amdgpu_device_fw_loading()
2272 r = adev->ip_blocks[i].version->funcs->resume(adev); in amdgpu_device_fw_loading()
2275 adev->ip_blocks[i].version->funcs->name, r); in amdgpu_device_fw_loading()
2279 r = adev->ip_blocks[i].version->funcs->hw_init(adev); in amdgpu_device_fw_loading()
2282 adev->ip_blocks[i].version->funcs->name, r); in amdgpu_device_fw_loading()
2287 adev->ip_blocks[i].status.hw = true; in amdgpu_device_fw_loading()
2292 if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA) in amdgpu_device_fw_loading()
2293 r = amdgpu_pm_load_smu_firmware(adev, &smu_version); in amdgpu_device_fw_loading()
2298 static int amdgpu_device_init_schedulers(struct amdgpu_device *adev) in amdgpu_device_init_schedulers() argument
2304 struct amdgpu_ring *ring = adev->rings[i]; in amdgpu_device_init_schedulers()
2312 timeout = adev->gfx_timeout; in amdgpu_device_init_schedulers()
2315 timeout = adev->compute_timeout; in amdgpu_device_init_schedulers()
2318 timeout = adev->sdma_timeout; in amdgpu_device_init_schedulers()
2321 timeout = adev->video_timeout; in amdgpu_device_init_schedulers()
2327 timeout, adev->reset_domain->wq, in amdgpu_device_init_schedulers()
2329 adev->dev); in amdgpu_device_init_schedulers()
2352 static int amdgpu_device_ip_init(struct amdgpu_device *adev) in amdgpu_device_ip_init() argument
2356 r = amdgpu_ras_init(adev); in amdgpu_device_ip_init()
2360 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_ip_init()
2361 if (!adev->ip_blocks[i].status.valid) in amdgpu_device_ip_init()
2363 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev); in amdgpu_device_ip_init()
2366 adev->ip_blocks[i].version->funcs->name, r); in amdgpu_device_ip_init()
2369 adev->ip_blocks[i].status.sw = true; in amdgpu_device_ip_init()
2371 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) { in amdgpu_device_ip_init()
2373 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev); in amdgpu_device_ip_init()
2378 adev->ip_blocks[i].status.hw = true; in amdgpu_device_ip_init()
2379 } else if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) { in amdgpu_device_ip_init()
2382 if (amdgpu_sriov_vf(adev)) in amdgpu_device_ip_init()
2383 amdgpu_virt_exchange_data(adev); in amdgpu_device_ip_init()
2385 r = amdgpu_device_vram_scratch_init(adev); in amdgpu_device_ip_init()
2390 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev); in amdgpu_device_ip_init()
2395 r = amdgpu_device_wb_init(adev); in amdgpu_device_ip_init()
2400 adev->ip_blocks[i].status.hw = true; in amdgpu_device_ip_init()
2403 if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) { in amdgpu_device_ip_init()
2404 r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj, in amdgpu_device_ip_init()
2415 if (amdgpu_sriov_vf(adev)) in amdgpu_device_ip_init()
2416 amdgpu_virt_init_data_exchange(adev); in amdgpu_device_ip_init()
2418 r = amdgpu_ib_pool_init(adev); in amdgpu_device_ip_init()
2420 dev_err(adev->dev, "IB initialization failed (%d).\n", r); in amdgpu_device_ip_init()
2421 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r); in amdgpu_device_ip_init()
2425 r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/ in amdgpu_device_ip_init()
2429 r = amdgpu_device_ip_hw_init_phase1(adev); in amdgpu_device_ip_init()
2433 r = amdgpu_device_fw_loading(adev); in amdgpu_device_ip_init()
2437 r = amdgpu_device_ip_hw_init_phase2(adev); in amdgpu_device_ip_init()
2456 r = amdgpu_ras_recovery_init(adev); in amdgpu_device_ip_init()
2463 if (adev->gmc.xgmi.num_physical_nodes > 1) { in amdgpu_device_ip_init()
2464 if (amdgpu_xgmi_add_device(adev) == 0) { in amdgpu_device_ip_init()
2465 if (!amdgpu_sriov_vf(adev)) { in amdgpu_device_ip_init()
2466 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev); in amdgpu_device_ip_init()
2481 amdgpu_reset_put_reset_domain(adev->reset_domain); in amdgpu_device_ip_init()
2482 adev->reset_domain = hive->reset_domain; in amdgpu_device_ip_init()
2488 r = amdgpu_device_init_schedulers(adev); in amdgpu_device_ip_init()
2493 if (!adev->gmc.xgmi.pending_reset) in amdgpu_device_ip_init()
2494 amdgpu_amdkfd_device_init(adev); in amdgpu_device_ip_init()
2496 amdgpu_fru_get_product_info(adev); in amdgpu_device_ip_init()
2499 if (amdgpu_sriov_vf(adev)) in amdgpu_device_ip_init()
2500 amdgpu_virt_release_full_gpu(adev, true); in amdgpu_device_ip_init()
2514 static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev) in amdgpu_device_fill_reset_magic() argument
2516 memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM); in amdgpu_device_fill_reset_magic()
2529 static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev) in amdgpu_device_check_vram_lost() argument
2531 if (memcmp(adev->gart.ptr, adev->reset_magic, in amdgpu_device_check_vram_lost()
2535 if (!amdgpu_in_reset(adev)) in amdgpu_device_check_vram_lost()
2542 switch (amdgpu_asic_reset_method(adev)) { in amdgpu_device_check_vram_lost()
2564 int amdgpu_device_set_cg_state(struct amdgpu_device *adev, in amdgpu_device_set_cg_state() argument
2572 for (j = 0; j < adev->num_ip_blocks; j++) { in amdgpu_device_set_cg_state()
2573 i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1; in amdgpu_device_set_cg_state()
2574 if (!adev->ip_blocks[i].status.late_initialized) in amdgpu_device_set_cg_state()
2577 if (adev->in_s0ix && in amdgpu_device_set_cg_state()
2578 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX) in amdgpu_device_set_cg_state()
2581 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && in amdgpu_device_set_cg_state()
2582 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE && in amdgpu_device_set_cg_state()
2583 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN && in amdgpu_device_set_cg_state()
2584 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG && in amdgpu_device_set_cg_state()
2585 adev->ip_blocks[i].version->funcs->set_clockgating_state) { in amdgpu_device_set_cg_state()
2587 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev, in amdgpu_device_set_cg_state()
2591 adev->ip_blocks[i].version->funcs->name, r); in amdgpu_device_set_cg_state()
2600 int amdgpu_device_set_pg_state(struct amdgpu_device *adev, in amdgpu_device_set_pg_state() argument
2608 for (j = 0; j < adev->num_ip_blocks; j++) { in amdgpu_device_set_pg_state()
2609 i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1; in amdgpu_device_set_pg_state()
2610 if (!adev->ip_blocks[i].status.late_initialized) in amdgpu_device_set_pg_state()
2613 if (adev->in_s0ix && in amdgpu_device_set_pg_state()
2614 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX) in amdgpu_device_set_pg_state()
2617 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && in amdgpu_device_set_pg_state()
2618 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE && in amdgpu_device_set_pg_state()
2619 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN && in amdgpu_device_set_pg_state()
2620 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG && in amdgpu_device_set_pg_state()
2621 adev->ip_blocks[i].version->funcs->set_powergating_state) { in amdgpu_device_set_pg_state()
2623 r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev, in amdgpu_device_set_pg_state()
2627 adev->ip_blocks[i].version->funcs->name, r); in amdgpu_device_set_pg_state()
2638 struct amdgpu_device *adev; in amdgpu_device_enable_mgpu_fan_boost() local
2653 adev = gpu_ins->adev; in amdgpu_device_enable_mgpu_fan_boost()
2654 if (!(adev->flags & AMD_IS_APU) && in amdgpu_device_enable_mgpu_fan_boost()
2656 ret = amdgpu_dpm_enable_mgpu_fan_boost(adev); in amdgpu_device_enable_mgpu_fan_boost()
2682 static int amdgpu_device_ip_late_init(struct amdgpu_device *adev) in amdgpu_device_ip_late_init() argument
2687 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_ip_late_init()
2688 if (!adev->ip_blocks[i].status.hw) in amdgpu_device_ip_late_init()
2690 if (adev->ip_blocks[i].version->funcs->late_init) { in amdgpu_device_ip_late_init()
2691 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev); in amdgpu_device_ip_late_init()
2694 adev->ip_blocks[i].version->funcs->name, r); in amdgpu_device_ip_late_init()
2698 adev->ip_blocks[i].status.late_initialized = true; in amdgpu_device_ip_late_init()
2701 r = amdgpu_ras_late_init(adev); in amdgpu_device_ip_late_init()
2707 amdgpu_ras_set_error_query_ready(adev, true); in amdgpu_device_ip_late_init()
2709 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE); in amdgpu_device_ip_late_init()
2710 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE); in amdgpu_device_ip_late_init()
2712 amdgpu_device_fill_reset_magic(adev); in amdgpu_device_ip_late_init()
2719 …if (amdgpu_passthrough(adev) && ((adev->asic_type == CHIP_ARCTURUS && adev->gmc.xgmi.num_physical_… in amdgpu_device_ip_late_init()
2720 adev->asic_type == CHIP_ALDEBARAN )) in amdgpu_device_ip_late_init()
2721 amdgpu_dpm_handle_passthrough_sbr(adev, true); in amdgpu_device_ip_late_init()
2723 if (adev->gmc.xgmi.num_physical_nodes > 1) { in amdgpu_device_ip_late_init()
2739 if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) { in amdgpu_device_ip_late_init()
2742 if (gpu_instance->adev->flags & AMD_IS_APU) in amdgpu_device_ip_late_init()
2745 r = amdgpu_xgmi_set_pstate(gpu_instance->adev, in amdgpu_device_ip_late_init()
2767 static void amdgpu_device_smu_fini_early(struct amdgpu_device *adev) in amdgpu_device_smu_fini_early() argument
2771 if (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0)) in amdgpu_device_smu_fini_early()
2774 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_smu_fini_early()
2775 if (!adev->ip_blocks[i].status.hw) in amdgpu_device_smu_fini_early()
2777 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) { in amdgpu_device_smu_fini_early()
2778 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev); in amdgpu_device_smu_fini_early()
2782 adev->ip_blocks[i].version->funcs->name, r); in amdgpu_device_smu_fini_early()
2784 adev->ip_blocks[i].status.hw = false; in amdgpu_device_smu_fini_early()
2790 static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev) in amdgpu_device_ip_fini_early() argument
2794 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_ip_fini_early()
2795 if (!adev->ip_blocks[i].version->funcs->early_fini) in amdgpu_device_ip_fini_early()
2798 r = adev->ip_blocks[i].version->funcs->early_fini((void *)adev); in amdgpu_device_ip_fini_early()
2801 adev->ip_blocks[i].version->funcs->name, r); in amdgpu_device_ip_fini_early()
2805 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE); in amdgpu_device_ip_fini_early()
2806 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE); in amdgpu_device_ip_fini_early()
2808 amdgpu_amdkfd_suspend(adev, false); in amdgpu_device_ip_fini_early()
2811 amdgpu_device_smu_fini_early(adev); in amdgpu_device_ip_fini_early()
2813 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { in amdgpu_device_ip_fini_early()
2814 if (!adev->ip_blocks[i].status.hw) in amdgpu_device_ip_fini_early()
2817 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev); in amdgpu_device_ip_fini_early()
2821 adev->ip_blocks[i].version->funcs->name, r); in amdgpu_device_ip_fini_early()
2824 adev->ip_blocks[i].status.hw = false; in amdgpu_device_ip_fini_early()
2827 if (amdgpu_sriov_vf(adev)) { in amdgpu_device_ip_fini_early()
2828 if (amdgpu_virt_release_full_gpu(adev, false)) in amdgpu_device_ip_fini_early()
2846 static int amdgpu_device_ip_fini(struct amdgpu_device *adev) in amdgpu_device_ip_fini() argument
2850 if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done) in amdgpu_device_ip_fini()
2851 amdgpu_virt_release_ras_err_handler_data(adev); in amdgpu_device_ip_fini()
2853 if (adev->gmc.xgmi.num_physical_nodes > 1) in amdgpu_device_ip_fini()
2854 amdgpu_xgmi_remove_device(adev); in amdgpu_device_ip_fini()
2856 amdgpu_amdkfd_device_fini_sw(adev); in amdgpu_device_ip_fini()
2858 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { in amdgpu_device_ip_fini()
2859 if (!adev->ip_blocks[i].status.sw) in amdgpu_device_ip_fini()
2862 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) { in amdgpu_device_ip_fini()
2863 amdgpu_ucode_free_bo(adev); in amdgpu_device_ip_fini()
2864 amdgpu_free_static_csa(&adev->virt.csa_obj); in amdgpu_device_ip_fini()
2865 amdgpu_device_wb_fini(adev); in amdgpu_device_ip_fini()
2866 amdgpu_device_vram_scratch_fini(adev); in amdgpu_device_ip_fini()
2867 amdgpu_ib_pool_fini(adev); in amdgpu_device_ip_fini()
2870 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev); in amdgpu_device_ip_fini()
2874 adev->ip_blocks[i].version->funcs->name, r); in amdgpu_device_ip_fini()
2876 adev->ip_blocks[i].status.sw = false; in amdgpu_device_ip_fini()
2877 adev->ip_blocks[i].status.valid = false; in amdgpu_device_ip_fini()
2880 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { in amdgpu_device_ip_fini()
2881 if (!adev->ip_blocks[i].status.late_initialized) in amdgpu_device_ip_fini()
2883 if (adev->ip_blocks[i].version->funcs->late_fini) in amdgpu_device_ip_fini()
2884 adev->ip_blocks[i].version->funcs->late_fini((void *)adev); in amdgpu_device_ip_fini()
2885 adev->ip_blocks[i].status.late_initialized = false; in amdgpu_device_ip_fini()
2888 amdgpu_ras_fini(adev); in amdgpu_device_ip_fini()
2900 struct amdgpu_device *adev = in amdgpu_device_delayed_init_work_handler() local
2904 r = amdgpu_ib_ring_tests(adev); in amdgpu_device_delayed_init_work_handler()
2911 struct amdgpu_device *adev = in amdgpu_device_delay_enable_gfx_off() local
2914 WARN_ON_ONCE(adev->gfx.gfx_off_state); in amdgpu_device_delay_enable_gfx_off()
2915 WARN_ON_ONCE(adev->gfx.gfx_off_req_count); in amdgpu_device_delay_enable_gfx_off()
2917 if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true)) in amdgpu_device_delay_enable_gfx_off()
2918 adev->gfx.gfx_off_state = true; in amdgpu_device_delay_enable_gfx_off()
2932 static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev) in amdgpu_device_ip_suspend_phase1() argument
2936 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE); in amdgpu_device_ip_suspend_phase1()
2937 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE); in amdgpu_device_ip_suspend_phase1()
2944 if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW)) in amdgpu_device_ip_suspend_phase1()
2945 dev_warn(adev->dev, "Failed to disallow df cstate"); in amdgpu_device_ip_suspend_phase1()
2947 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { in amdgpu_device_ip_suspend_phase1()
2948 if (!adev->ip_blocks[i].status.valid) in amdgpu_device_ip_suspend_phase1()
2952 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE) in amdgpu_device_ip_suspend_phase1()
2956 r = adev->ip_blocks[i].version->funcs->suspend(adev); in amdgpu_device_ip_suspend_phase1()
2960 adev->ip_blocks[i].version->funcs->name, r); in amdgpu_device_ip_suspend_phase1()
2964 adev->ip_blocks[i].status.hw = false; in amdgpu_device_ip_suspend_phase1()
2981 static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev) in amdgpu_device_ip_suspend_phase2() argument
2985 if (adev->in_s0ix) in amdgpu_device_ip_suspend_phase2()
2986 amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D3Entry); in amdgpu_device_ip_suspend_phase2()
2988 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { in amdgpu_device_ip_suspend_phase2()
2989 if (!adev->ip_blocks[i].status.valid) in amdgpu_device_ip_suspend_phase2()
2992 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) in amdgpu_device_ip_suspend_phase2()
2996 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) { in amdgpu_device_ip_suspend_phase2()
2997 adev->ip_blocks[i].status.hw = false; in amdgpu_device_ip_suspend_phase2()
3002 if (adev->gmc.xgmi.pending_reset && in amdgpu_device_ip_suspend_phase2()
3003 !(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC || in amdgpu_device_ip_suspend_phase2()
3004 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC || in amdgpu_device_ip_suspend_phase2()
3005 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || in amdgpu_device_ip_suspend_phase2()
3006 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)) { in amdgpu_device_ip_suspend_phase2()
3007 adev->ip_blocks[i].status.hw = false; in amdgpu_device_ip_suspend_phase2()
3016 if (adev->in_s0ix && in amdgpu_device_ip_suspend_phase2()
3017 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP || in amdgpu_device_ip_suspend_phase2()
3018 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX || in amdgpu_device_ip_suspend_phase2()
3019 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_MES)) in amdgpu_device_ip_suspend_phase2()
3023 r = adev->ip_blocks[i].version->funcs->suspend(adev); in amdgpu_device_ip_suspend_phase2()
3027 adev->ip_blocks[i].version->funcs->name, r); in amdgpu_device_ip_suspend_phase2()
3029 adev->ip_blocks[i].status.hw = false; in amdgpu_device_ip_suspend_phase2()
3031 if(!amdgpu_sriov_vf(adev)){ in amdgpu_device_ip_suspend_phase2()
3032 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) { in amdgpu_device_ip_suspend_phase2()
3033 r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state); in amdgpu_device_ip_suspend_phase2()
3036 adev->mp1_state, r); in amdgpu_device_ip_suspend_phase2()
3057 int amdgpu_device_ip_suspend(struct amdgpu_device *adev) in amdgpu_device_ip_suspend() argument
3061 if (amdgpu_sriov_vf(adev)) { in amdgpu_device_ip_suspend()
3062 amdgpu_virt_fini_data_exchange(adev); in amdgpu_device_ip_suspend()
3063 amdgpu_virt_request_full_gpu(adev, false); in amdgpu_device_ip_suspend()
3066 r = amdgpu_device_ip_suspend_phase1(adev); in amdgpu_device_ip_suspend()
3069 r = amdgpu_device_ip_suspend_phase2(adev); in amdgpu_device_ip_suspend()
3071 if (amdgpu_sriov_vf(adev)) in amdgpu_device_ip_suspend()
3072 amdgpu_virt_release_full_gpu(adev, false); in amdgpu_device_ip_suspend()
3077 static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev) in amdgpu_device_ip_reinit_early_sriov() argument
3088 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_ip_reinit_early_sriov()
3092 block = &adev->ip_blocks[i]; in amdgpu_device_ip_reinit_early_sriov()
3101 r = block->version->funcs->hw_init(adev); in amdgpu_device_ip_reinit_early_sriov()
3112 static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev) in amdgpu_device_ip_reinit_late_sriov() argument
3130 for (j = 0; j < adev->num_ip_blocks; j++) { in amdgpu_device_ip_reinit_late_sriov()
3131 block = &adev->ip_blocks[j]; in amdgpu_device_ip_reinit_late_sriov()
3139 r = block->version->funcs->resume(adev); in amdgpu_device_ip_reinit_late_sriov()
3141 r = block->version->funcs->hw_init(adev); in amdgpu_device_ip_reinit_late_sriov()
3165 static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev) in amdgpu_device_ip_resume_phase1() argument
3169 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_ip_resume_phase1()
3170 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw) in amdgpu_device_ip_resume_phase1()
3172 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || in amdgpu_device_ip_resume_phase1()
3173 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC || in amdgpu_device_ip_resume_phase1()
3174 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH || in amdgpu_device_ip_resume_phase1()
3175 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP && amdgpu_sriov_vf(adev))) { in amdgpu_device_ip_resume_phase1()
3177 r = adev->ip_blocks[i].version->funcs->resume(adev); in amdgpu_device_ip_resume_phase1()
3180 adev->ip_blocks[i].version->funcs->name, r); in amdgpu_device_ip_resume_phase1()
3183 adev->ip_blocks[i].status.hw = true; in amdgpu_device_ip_resume_phase1()
3203 static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev) in amdgpu_device_ip_resume_phase2() argument
3207 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_ip_resume_phase2()
3208 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw) in amdgpu_device_ip_resume_phase2()
3210 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || in amdgpu_device_ip_resume_phase2()
3211 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC || in amdgpu_device_ip_resume_phase2()
3212 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH || in amdgpu_device_ip_resume_phase2()
3213 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) in amdgpu_device_ip_resume_phase2()
3215 r = adev->ip_blocks[i].version->funcs->resume(adev); in amdgpu_device_ip_resume_phase2()
3218 adev->ip_blocks[i].version->funcs->name, r); in amdgpu_device_ip_resume_phase2()
3221 adev->ip_blocks[i].status.hw = true; in amdgpu_device_ip_resume_phase2()
3223 if (adev->in_s0ix && adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) { in amdgpu_device_ip_resume_phase2()
3227 amdgpu_gfx_off_ctrl(adev, false); in amdgpu_device_ip_resume_phase2()
3248 static int amdgpu_device_ip_resume(struct amdgpu_device *adev) in amdgpu_device_ip_resume() argument
3252 r = amdgpu_amdkfd_resume_iommu(adev); in amdgpu_device_ip_resume()
3256 r = amdgpu_device_ip_resume_phase1(adev); in amdgpu_device_ip_resume()
3260 r = amdgpu_device_fw_loading(adev); in amdgpu_device_ip_resume()
3264 r = amdgpu_device_ip_resume_phase2(adev); in amdgpu_device_ip_resume()
3276 static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev) in amdgpu_device_detect_sriov_bios() argument
3278 if (amdgpu_sriov_vf(adev)) { in amdgpu_device_detect_sriov_bios()
3279 if (adev->is_atom_fw) { in amdgpu_device_detect_sriov_bios()
3280 if (amdgpu_atomfirmware_gpu_virtualization_supported(adev)) in amdgpu_device_detect_sriov_bios()
3281 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS; in amdgpu_device_detect_sriov_bios()
3283 if (amdgpu_atombios_has_gpu_virtualization_table(adev)) in amdgpu_device_detect_sriov_bios()
3284 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS; in amdgpu_device_detect_sriov_bios()
3287 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS)) in amdgpu_device_detect_sriov_bios()
3288 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0); in amdgpu_device_detect_sriov_bios()
3357 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev) in amdgpu_device_has_dc_support() argument
3359 if (amdgpu_sriov_vf(adev) || in amdgpu_device_has_dc_support()
3360 adev->enable_virtual_display || in amdgpu_device_has_dc_support()
3361 (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK)) in amdgpu_device_has_dc_support()
3364 return amdgpu_device_asic_has_dc_support(adev->asic_type); in amdgpu_device_has_dc_support()
3369 struct amdgpu_device *adev = in amdgpu_device_xgmi_reset_func() local
3371 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev); in amdgpu_device_xgmi_reset_func()
3383 if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) { in amdgpu_device_xgmi_reset_func()
3386 adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev)); in amdgpu_device_xgmi_reset_func()
3388 if (adev->asic_reset_res) in amdgpu_device_xgmi_reset_func()
3392 adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev)); in amdgpu_device_xgmi_reset_func()
3394 if (adev->asic_reset_res) in amdgpu_device_xgmi_reset_func()
3397 if (adev->mmhub.ras && adev->mmhub.ras->ras_block.hw_ops && in amdgpu_device_xgmi_reset_func()
3398 adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count) in amdgpu_device_xgmi_reset_func()
3399 adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(adev); in amdgpu_device_xgmi_reset_func()
3403 adev->asic_reset_res = amdgpu_asic_reset(adev); in amdgpu_device_xgmi_reset_func()
3407 if (adev->asic_reset_res) in amdgpu_device_xgmi_reset_func()
3409 adev->asic_reset_res, adev_to_drm(adev)->unique); in amdgpu_device_xgmi_reset_func()
3413 static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev) in amdgpu_device_get_job_timeout_settings() argument
3427 adev->gfx_timeout = msecs_to_jiffies(10000); in amdgpu_device_get_job_timeout_settings()
3428 adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout; in amdgpu_device_get_job_timeout_settings()
3429 if (amdgpu_sriov_vf(adev)) in amdgpu_device_get_job_timeout_settings()
3430 adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ? in amdgpu_device_get_job_timeout_settings()
3433 adev->compute_timeout = msecs_to_jiffies(60000); in amdgpu_device_get_job_timeout_settings()
3447 dev_warn(adev->dev, "lockup timeout disabled"); in amdgpu_device_get_job_timeout_settings()
3455 adev->gfx_timeout = timeout; in amdgpu_device_get_job_timeout_settings()
3458 adev->compute_timeout = timeout; in amdgpu_device_get_job_timeout_settings()
3461 adev->sdma_timeout = timeout; in amdgpu_device_get_job_timeout_settings()
3464 adev->video_timeout = timeout; in amdgpu_device_get_job_timeout_settings()
3475 adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout; in amdgpu_device_get_job_timeout_settings()
3476 if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev)) in amdgpu_device_get_job_timeout_settings()
3477 adev->compute_timeout = adev->gfx_timeout; in amdgpu_device_get_job_timeout_settings()
3491 static void amdgpu_device_check_iommu_direct_map(struct amdgpu_device *adev) in amdgpu_device_check_iommu_direct_map() argument
3495 domain = iommu_get_domain_for_dev(adev->dev); in amdgpu_device_check_iommu_direct_map()
3497 adev->ram_is_direct_mapped = true; in amdgpu_device_check_iommu_direct_map()
3518 int amdgpu_device_init(struct amdgpu_device *adev, in amdgpu_device_init() argument
3521 struct drm_device *ddev = adev_to_drm(adev); in amdgpu_device_init()
3522 struct pci_dev *pdev = adev->pdev; in amdgpu_device_init()
3527 adev->shutdown = false; in amdgpu_device_init()
3528 adev->flags = flags; in amdgpu_device_init()
3531 adev->asic_type = amdgpu_force_asic_type; in amdgpu_device_init()
3533 adev->asic_type = flags & AMD_ASIC_MASK; in amdgpu_device_init()
3535 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT; in amdgpu_device_init()
3537 adev->usec_timeout *= 10; in amdgpu_device_init()
3538 adev->gmc.gart_size = 512 * 1024 * 1024; in amdgpu_device_init()
3539 adev->accel_working = false; in amdgpu_device_init()
3540 adev->num_rings = 0; in amdgpu_device_init()
3541 RCU_INIT_POINTER(adev->gang_submit, dma_fence_get_stub()); in amdgpu_device_init()
3542 adev->mman.buffer_funcs = NULL; in amdgpu_device_init()
3543 adev->mman.buffer_funcs_ring = NULL; in amdgpu_device_init()
3544 adev->vm_manager.vm_pte_funcs = NULL; in amdgpu_device_init()
3545 adev->vm_manager.vm_pte_num_scheds = 0; in amdgpu_device_init()
3546 adev->gmc.gmc_funcs = NULL; in amdgpu_device_init()
3547 adev->harvest_ip_mask = 0x0; in amdgpu_device_init()
3548 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS); in amdgpu_device_init()
3549 bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); in amdgpu_device_init()
3551 adev->smc_rreg = &amdgpu_invalid_rreg; in amdgpu_device_init()
3552 adev->smc_wreg = &amdgpu_invalid_wreg; in amdgpu_device_init()
3553 adev->pcie_rreg = &amdgpu_invalid_rreg; in amdgpu_device_init()
3554 adev->pcie_wreg = &amdgpu_invalid_wreg; in amdgpu_device_init()
3555 adev->pciep_rreg = &amdgpu_invalid_rreg; in amdgpu_device_init()
3556 adev->pciep_wreg = &amdgpu_invalid_wreg; in amdgpu_device_init()
3557 adev->pcie_rreg64 = &amdgpu_invalid_rreg64; in amdgpu_device_init()
3558 adev->pcie_wreg64 = &amdgpu_invalid_wreg64; in amdgpu_device_init()
3559 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg; in amdgpu_device_init()
3560 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg; in amdgpu_device_init()
3561 adev->didt_rreg = &amdgpu_invalid_rreg; in amdgpu_device_init()
3562 adev->didt_wreg = &amdgpu_invalid_wreg; in amdgpu_device_init()
3563 adev->gc_cac_rreg = &amdgpu_invalid_rreg; in amdgpu_device_init()
3564 adev->gc_cac_wreg = &amdgpu_invalid_wreg; in amdgpu_device_init()
3565 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg; in amdgpu_device_init()
3566 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg; in amdgpu_device_init()
3569 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device, in amdgpu_device_init()
3574 mutex_init(&adev->firmware.mutex); in amdgpu_device_init()
3575 mutex_init(&adev->pm.mutex); in amdgpu_device_init()
3576 mutex_init(&adev->gfx.gpu_clock_mutex); in amdgpu_device_init()
3577 mutex_init(&adev->srbm_mutex); in amdgpu_device_init()
3578 mutex_init(&adev->gfx.pipe_reserve_mutex); in amdgpu_device_init()
3579 mutex_init(&adev->gfx.gfx_off_mutex); in amdgpu_device_init()
3580 mutex_init(&adev->grbm_idx_mutex); in amdgpu_device_init()
3581 mutex_init(&adev->mn_lock); in amdgpu_device_init()
3582 mutex_init(&adev->virt.vf_errors.lock); in amdgpu_device_init()
3583 hash_init(adev->mn_hash); in amdgpu_device_init()
3584 mutex_init(&adev->psp.mutex); in amdgpu_device_init()
3585 mutex_init(&adev->notifier_lock); in amdgpu_device_init()
3586 mutex_init(&adev->pm.stable_pstate_ctx_lock); in amdgpu_device_init()
3587 mutex_init(&adev->benchmark_mutex); in amdgpu_device_init()
3589 amdgpu_device_init_apu_flags(adev); in amdgpu_device_init()
3591 r = amdgpu_device_check_arguments(adev); in amdgpu_device_init()
3595 spin_lock_init(&adev->mmio_idx_lock); in amdgpu_device_init()
3596 spin_lock_init(&adev->smc_idx_lock); in amdgpu_device_init()
3597 spin_lock_init(&adev->pcie_idx_lock); in amdgpu_device_init()
3598 spin_lock_init(&adev->uvd_ctx_idx_lock); in amdgpu_device_init()
3599 spin_lock_init(&adev->didt_idx_lock); in amdgpu_device_init()
3600 spin_lock_init(&adev->gc_cac_idx_lock); in amdgpu_device_init()
3601 spin_lock_init(&adev->se_cac_idx_lock); in amdgpu_device_init()
3602 spin_lock_init(&adev->audio_endpt_idx_lock); in amdgpu_device_init()
3603 spin_lock_init(&adev->mm_stats.lock); in amdgpu_device_init()
3605 INIT_LIST_HEAD(&adev->shadow_list); in amdgpu_device_init()
3606 mutex_init(&adev->shadow_list_lock); in amdgpu_device_init()
3608 INIT_LIST_HEAD(&adev->reset_list); in amdgpu_device_init()
3610 INIT_LIST_HEAD(&adev->ras_list); in amdgpu_device_init()
3612 INIT_DELAYED_WORK(&adev->delayed_init_work, in amdgpu_device_init()
3614 INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work, in amdgpu_device_init()
3617 INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func); in amdgpu_device_init()
3619 adev->gfx.gfx_off_req_count = 1; in amdgpu_device_init()
3620 adev->gfx.gfx_off_residency = 0; in amdgpu_device_init()
3621 adev->gfx.gfx_off_entrycount = 0; in amdgpu_device_init()
3622 adev->pm.ac_power = power_supply_is_system_supplied() > 0; in amdgpu_device_init()
3624 atomic_set(&adev->throttling_logging_enabled, 1); in amdgpu_device_init()
3632 ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1); in amdgpu_device_init()
3633 ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE); in amdgpu_device_init()
3637 if (adev->asic_type >= CHIP_BONAIRE) { in amdgpu_device_init()
3638 adev->rmmio_base = pci_resource_start(adev->pdev, 5); in amdgpu_device_init()
3639 adev->rmmio_size = pci_resource_len(adev->pdev, 5); in amdgpu_device_init()
3641 adev->rmmio_base = pci_resource_start(adev->pdev, 2); in amdgpu_device_init()
3642 adev->rmmio_size = pci_resource_len(adev->pdev, 2); in amdgpu_device_init()
3646 atomic_set(&adev->pm.pwr_state[i], POWER_STATE_UNKNOWN); in amdgpu_device_init()
3648 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size); in amdgpu_device_init()
3649 if (adev->rmmio == NULL) { in amdgpu_device_init()
3652 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base); in amdgpu_device_init()
3653 DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size); in amdgpu_device_init()
3655 amdgpu_device_get_pcie_info(adev); in amdgpu_device_init()
3665 adev->reset_domain = amdgpu_reset_create_reset_domain(SINGLE_DEVICE, "amdgpu-reset-dev"); in amdgpu_device_init()
3666 if (!adev->reset_domain) in amdgpu_device_init()
3670 amdgpu_detect_virtualization(adev); in amdgpu_device_init()
3672 r = amdgpu_device_get_job_timeout_settings(adev); in amdgpu_device_init()
3674 dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n"); in amdgpu_device_init()
3679 r = amdgpu_device_ip_early_init(adev); in amdgpu_device_init()
3684 r = drm_aperture_remove_conflicting_pci_framebuffers(adev->pdev, &amdgpu_kms_driver); in amdgpu_device_init()
3689 amdgpu_gmc_tmz_set(adev); in amdgpu_device_init()
3691 amdgpu_gmc_noretry_set(adev); in amdgpu_device_init()
3693 if (adev->gmc.xgmi.supported) { in amdgpu_device_init()
3694 r = adev->gfxhub.funcs->get_xgmi_info(adev); in amdgpu_device_init()
3700 if (amdgpu_sriov_vf(adev)) in amdgpu_device_init()
3701 adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *) in amdgpu_device_init()
3702 adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_support_flags == in amdgpu_device_init()
3705 adev->have_atomics_support = in amdgpu_device_init()
3706 !pci_enable_atomic_ops_to_root(adev->pdev, in amdgpu_device_init()
3709 if (!adev->have_atomics_support) in amdgpu_device_init()
3710 dev_info(adev->dev, "PCIE atomic ops is not supported\n"); in amdgpu_device_init()
3713 amdgpu_device_doorbell_init(adev); in amdgpu_device_init()
3717 emu_soc_asic_init(adev); in amdgpu_device_init()
3721 amdgpu_reset_init(adev); in amdgpu_device_init()
3724 amdgpu_device_detect_sriov_bios(adev); in amdgpu_device_init()
3729 if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) { in amdgpu_device_init()
3730 if (adev->gmc.xgmi.num_physical_nodes) { in amdgpu_device_init()
3731 dev_info(adev->dev, "Pending hive reset.\n"); in amdgpu_device_init()
3732 adev->gmc.xgmi.pending_reset = true; in amdgpu_device_init()
3734 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_init()
3735 if (!adev->ip_blocks[i].status.valid) in amdgpu_device_init()
3737 if (!(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC || in amdgpu_device_init()
3738 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || in amdgpu_device_init()
3739 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH || in amdgpu_device_init()
3740 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)) { in amdgpu_device_init()
3742 adev->ip_blocks[i].version->funcs->name); in amdgpu_device_init()
3743 adev->ip_blocks[i].status.hw = true; in amdgpu_device_init()
3747 r = amdgpu_asic_reset(adev); in amdgpu_device_init()
3749 dev_err(adev->dev, "asic reset on init failed\n"); in amdgpu_device_init()
3755 pci_enable_pcie_error_reporting(adev->pdev); in amdgpu_device_init()
3758 if (amdgpu_device_need_post(adev)) { in amdgpu_device_init()
3759 if (!adev->bios) { in amdgpu_device_init()
3760 dev_err(adev->dev, "no vBIOS found\n"); in amdgpu_device_init()
3765 r = amdgpu_device_asic_init(adev); in amdgpu_device_init()
3767 dev_err(adev->dev, "gpu post error!\n"); in amdgpu_device_init()
3772 if (adev->is_atom_fw) { in amdgpu_device_init()
3774 r = amdgpu_atomfirmware_get_clock_info(adev); in amdgpu_device_init()
3776 dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n"); in amdgpu_device_init()
3777 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0); in amdgpu_device_init()
3782 r = amdgpu_atombios_get_clock_info(adev); in amdgpu_device_init()
3784 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n"); in amdgpu_device_init()
3785 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0); in amdgpu_device_init()
3789 if (!amdgpu_device_has_dc_support(adev)) in amdgpu_device_init()
3790 amdgpu_atombios_i2c_init(adev); in amdgpu_device_init()
3795 r = amdgpu_fence_driver_sw_init(adev); in amdgpu_device_init()
3797 dev_err(adev->dev, "amdgpu_fence_driver_sw_init failed\n"); in amdgpu_device_init()
3798 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0); in amdgpu_device_init()
3803 drm_mode_config_init(adev_to_drm(adev)); in amdgpu_device_init()
3805 r = amdgpu_device_ip_init(adev); in amdgpu_device_init()
3808 if (amdgpu_sriov_vf(adev) && in amdgpu_device_init()
3809 !amdgpu_sriov_runtime(adev) && in amdgpu_device_init()
3810 amdgpu_virt_mmio_blocked(adev) && in amdgpu_device_init()
3811 !amdgpu_virt_wait_reset(adev)) { in amdgpu_device_init()
3812 dev_err(adev->dev, "VF exclusive mode timeout\n"); in amdgpu_device_init()
3814 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME; in amdgpu_device_init()
3815 adev->virt.ops = NULL; in amdgpu_device_init()
3819 dev_err(adev->dev, "amdgpu_device_ip_init failed\n"); in amdgpu_device_init()
3820 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0); in amdgpu_device_init()
3824 amdgpu_fence_driver_hw_init(adev); in amdgpu_device_init()
3826 dev_info(adev->dev, in amdgpu_device_init()
3828 adev->gfx.config.max_shader_engines, in amdgpu_device_init()
3829 adev->gfx.config.max_sh_per_se, in amdgpu_device_init()
3830 adev->gfx.config.max_cu_per_sh, in amdgpu_device_init()
3831 adev->gfx.cu_info.number); in amdgpu_device_init()
3833 adev->accel_working = true; in amdgpu_device_init()
3835 amdgpu_vm_check_compute_bug(adev); in amdgpu_device_init()
3843 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps)); in amdgpu_device_init()
3845 r = amdgpu_pm_sysfs_init(adev); in amdgpu_device_init()
3847 adev->pm_sysfs_en = false; in amdgpu_device_init()
3850 adev->pm_sysfs_en = true; in amdgpu_device_init()
3852 r = amdgpu_ucode_sysfs_init(adev); in amdgpu_device_init()
3854 adev->ucode_sysfs_en = false; in amdgpu_device_init()
3857 adev->ucode_sysfs_en = true; in amdgpu_device_init()
3859 r = amdgpu_psp_sysfs_init(adev); in amdgpu_device_init()
3861 adev->psp_sysfs_en = false; in amdgpu_device_init()
3862 if (!amdgpu_sriov_vf(adev)) in amdgpu_device_init()
3865 adev->psp_sysfs_en = true; in amdgpu_device_init()
3872 amdgpu_register_gpu_instance(adev); in amdgpu_device_init()
3877 if (!adev->gmc.xgmi.pending_reset) { in amdgpu_device_init()
3878 r = amdgpu_device_ip_late_init(adev); in amdgpu_device_init()
3880 dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n"); in amdgpu_device_init()
3881 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r); in amdgpu_device_init()
3885 amdgpu_ras_resume(adev); in amdgpu_device_init()
3886 queue_delayed_work(system_wq, &adev->delayed_init_work, in amdgpu_device_init()
3890 if (amdgpu_sriov_vf(adev)) in amdgpu_device_init()
3891 flush_delayed_work(&adev->delayed_init_work); in amdgpu_device_init()
3893 r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes); in amdgpu_device_init()
3895 dev_err(adev->dev, "Could not create amdgpu device attr\n"); in amdgpu_device_init()
3898 r = amdgpu_pmu_init(adev); in amdgpu_device_init()
3900 dev_err(adev->dev, "amdgpu_pmu_init failed\n"); in amdgpu_device_init()
3903 if (amdgpu_device_cache_pci_state(adev->pdev)) in amdgpu_device_init()
3909 if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) in amdgpu_device_init()
3910 vga_client_register(adev->pdev, amdgpu_device_vga_set_decode); in amdgpu_device_init()
3914 vga_switcheroo_register_client(adev->pdev, in amdgpu_device_init()
3916 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain); in amdgpu_device_init()
3919 if (adev->gmc.xgmi.pending_reset) in amdgpu_device_init()
3923 amdgpu_device_check_iommu_direct_map(adev); in amdgpu_device_init()
3928 amdgpu_release_ras_context(adev); in amdgpu_device_init()
3931 amdgpu_vf_error_trans_all(adev); in amdgpu_device_init()
3936 static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev) in amdgpu_device_unmap_mmio() argument
3940 unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1); in amdgpu_device_unmap_mmio()
3943 amdgpu_device_doorbell_fini(adev); in amdgpu_device_unmap_mmio()
3945 iounmap(adev->rmmio); in amdgpu_device_unmap_mmio()
3946 adev->rmmio = NULL; in amdgpu_device_unmap_mmio()
3947 if (adev->mman.aper_base_kaddr) in amdgpu_device_unmap_mmio()
3948 iounmap(adev->mman.aper_base_kaddr); in amdgpu_device_unmap_mmio()
3949 adev->mman.aper_base_kaddr = NULL; in amdgpu_device_unmap_mmio()
3952 if (!adev->gmc.xgmi.connected_to_cpu) { in amdgpu_device_unmap_mmio()
3953 arch_phys_wc_del(adev->gmc.vram_mtrr); in amdgpu_device_unmap_mmio()
3954 arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size); in amdgpu_device_unmap_mmio()
3966 void amdgpu_device_fini_hw(struct amdgpu_device *adev) in amdgpu_device_fini_hw() argument
3968 dev_info(adev->dev, "amdgpu: finishing device.\n"); in amdgpu_device_fini_hw()
3969 flush_delayed_work(&adev->delayed_init_work); in amdgpu_device_fini_hw()
3970 adev->shutdown = true; in amdgpu_device_fini_hw()
3975 if (amdgpu_sriov_vf(adev)) { in amdgpu_device_fini_hw()
3976 amdgpu_virt_request_full_gpu(adev, false); in amdgpu_device_fini_hw()
3977 amdgpu_virt_fini_data_exchange(adev); in amdgpu_device_fini_hw()
3981 amdgpu_irq_disable_all(adev); in amdgpu_device_fini_hw()
3982 if (adev->mode_info.mode_config_initialized){ in amdgpu_device_fini_hw()
3983 if (!drm_drv_uses_atomic_modeset(adev_to_drm(adev))) in amdgpu_device_fini_hw()
3984 drm_helper_force_disable_all(adev_to_drm(adev)); in amdgpu_device_fini_hw()
3986 drm_atomic_helper_shutdown(adev_to_drm(adev)); in amdgpu_device_fini_hw()
3988 amdgpu_fence_driver_hw_fini(adev); in amdgpu_device_fini_hw()
3990 if (adev->mman.initialized) { in amdgpu_device_fini_hw()
3991 flush_delayed_work(&adev->mman.bdev.wq); in amdgpu_device_fini_hw()
3992 ttm_bo_lock_delayed_workqueue(&adev->mman.bdev); in amdgpu_device_fini_hw()
3995 if (adev->pm_sysfs_en) in amdgpu_device_fini_hw()
3996 amdgpu_pm_sysfs_fini(adev); in amdgpu_device_fini_hw()
3997 if (adev->ucode_sysfs_en) in amdgpu_device_fini_hw()
3998 amdgpu_ucode_sysfs_fini(adev); in amdgpu_device_fini_hw()
3999 if (adev->psp_sysfs_en) in amdgpu_device_fini_hw()
4000 amdgpu_psp_sysfs_fini(adev); in amdgpu_device_fini_hw()
4001 sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes); in amdgpu_device_fini_hw()
4004 amdgpu_ras_pre_fini(adev); in amdgpu_device_fini_hw()
4006 amdgpu_device_ip_fini_early(adev); in amdgpu_device_fini_hw()
4008 amdgpu_irq_fini_hw(adev); in amdgpu_device_fini_hw()
4010 if (adev->mman.initialized) in amdgpu_device_fini_hw()
4011 ttm_device_clear_dma_mappings(&adev->mman.bdev); in amdgpu_device_fini_hw()
4013 amdgpu_gart_dummy_page_fini(adev); in amdgpu_device_fini_hw()
4015 amdgpu_device_unmap_mmio(adev); in amdgpu_device_fini_hw()
4019 void amdgpu_device_fini_sw(struct amdgpu_device *adev) in amdgpu_device_fini_sw() argument
4023 amdgpu_fence_driver_sw_fini(adev); in amdgpu_device_fini_sw()
4024 amdgpu_device_ip_fini(adev); in amdgpu_device_fini_sw()
4025 release_firmware(adev->firmware.gpu_info_fw); in amdgpu_device_fini_sw()
4026 adev->firmware.gpu_info_fw = NULL; in amdgpu_device_fini_sw()
4027 adev->accel_working = false; in amdgpu_device_fini_sw()
4028 dma_fence_put(rcu_dereference_protected(adev->gang_submit, true)); in amdgpu_device_fini_sw()
4030 amdgpu_reset_fini(adev); in amdgpu_device_fini_sw()
4033 if (!amdgpu_device_has_dc_support(adev)) in amdgpu_device_fini_sw()
4034 amdgpu_i2c_fini(adev); in amdgpu_device_fini_sw()
4037 amdgpu_atombios_fini(adev); in amdgpu_device_fini_sw()
4039 kfree(adev->bios); in amdgpu_device_fini_sw()
4040 adev->bios = NULL; in amdgpu_device_fini_sw()
4041 if (amdgpu_device_supports_px(adev_to_drm(adev))) { in amdgpu_device_fini_sw()
4042 vga_switcheroo_unregister_client(adev->pdev); in amdgpu_device_fini_sw()
4043 vga_switcheroo_fini_domain_pm_ops(adev->dev); in amdgpu_device_fini_sw()
4045 if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) in amdgpu_device_fini_sw()
4046 vga_client_unregister(adev->pdev); in amdgpu_device_fini_sw()
4048 if (drm_dev_enter(adev_to_drm(adev), &idx)) { in amdgpu_device_fini_sw()
4050 iounmap(adev->rmmio); in amdgpu_device_fini_sw()
4051 adev->rmmio = NULL; in amdgpu_device_fini_sw()
4052 amdgpu_device_doorbell_fini(adev); in amdgpu_device_fini_sw()
4057 amdgpu_pmu_fini(adev); in amdgpu_device_fini_sw()
4058 if (adev->mman.discovery_bin) in amdgpu_device_fini_sw()
4059 amdgpu_discovery_fini(adev); in amdgpu_device_fini_sw()
4061 amdgpu_reset_put_reset_domain(adev->reset_domain); in amdgpu_device_fini_sw()
4062 adev->reset_domain = NULL; in amdgpu_device_fini_sw()
4064 kfree(adev->pci_state); in amdgpu_device_fini_sw()
4077 static int amdgpu_device_evict_resources(struct amdgpu_device *adev) in amdgpu_device_evict_resources() argument
4082 if ((adev->in_s3 || adev->in_s0ix) && (adev->flags & AMD_IS_APU)) in amdgpu_device_evict_resources()
4085 ret = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM); in amdgpu_device_evict_resources()
4106 struct amdgpu_device *adev = drm_to_adev(dev); in amdgpu_device_suspend() local
4112 adev->in_suspend = true; in amdgpu_device_suspend()
4114 if (amdgpu_sriov_vf(adev)) { in amdgpu_device_suspend()
4115 amdgpu_virt_fini_data_exchange(adev); in amdgpu_device_suspend()
4116 r = amdgpu_virt_request_full_gpu(adev, false); in amdgpu_device_suspend()
4127 drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true); in amdgpu_device_suspend()
4129 cancel_delayed_work_sync(&adev->delayed_init_work); in amdgpu_device_suspend()
4131 amdgpu_ras_suspend(adev); in amdgpu_device_suspend()
4133 amdgpu_device_ip_suspend_phase1(adev); in amdgpu_device_suspend()
4135 if (!adev->in_s0ix) in amdgpu_device_suspend()
4136 amdgpu_amdkfd_suspend(adev, adev->in_runpm); in amdgpu_device_suspend()
4138 r = amdgpu_device_evict_resources(adev); in amdgpu_device_suspend()
4142 amdgpu_fence_driver_hw_fini(adev); in amdgpu_device_suspend()
4144 amdgpu_device_ip_suspend_phase2(adev); in amdgpu_device_suspend()
4146 if (amdgpu_sriov_vf(adev)) in amdgpu_device_suspend()
4147 amdgpu_virt_release_full_gpu(adev, false); in amdgpu_device_suspend()
4164 struct amdgpu_device *adev = drm_to_adev(dev); in amdgpu_device_resume() local
4167 if (amdgpu_sriov_vf(adev)) { in amdgpu_device_resume()
4168 r = amdgpu_virt_request_full_gpu(adev, true); in amdgpu_device_resume()
4176 if (adev->in_s0ix) in amdgpu_device_resume()
4177 amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D0Entry); in amdgpu_device_resume()
4180 if (amdgpu_device_need_post(adev)) { in amdgpu_device_resume()
4181 r = amdgpu_device_asic_init(adev); in amdgpu_device_resume()
4183 dev_err(adev->dev, "amdgpu asic init failed\n"); in amdgpu_device_resume()
4186 r = amdgpu_device_ip_resume(adev); in amdgpu_device_resume()
4189 if (amdgpu_sriov_vf(adev)) { in amdgpu_device_resume()
4190 amdgpu_virt_init_data_exchange(adev); in amdgpu_device_resume()
4191 amdgpu_virt_release_full_gpu(adev, true); in amdgpu_device_resume()
4195 dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r); in amdgpu_device_resume()
4198 amdgpu_fence_driver_hw_init(adev); in amdgpu_device_resume()
4200 r = amdgpu_device_ip_late_init(adev); in amdgpu_device_resume()
4204 queue_delayed_work(system_wq, &adev->delayed_init_work, in amdgpu_device_resume()
4207 if (!adev->in_s0ix) { in amdgpu_device_resume()
4208 r = amdgpu_amdkfd_resume(adev, adev->in_runpm); in amdgpu_device_resume()
4214 flush_delayed_work(&adev->delayed_init_work); in amdgpu_device_resume()
4216 if (adev->in_s0ix) { in amdgpu_device_resume()
4220 amdgpu_gfx_off_ctrl(adev, true); in amdgpu_device_resume()
4224 drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false); in amdgpu_device_resume()
4228 amdgpu_ras_resume(adev); in amdgpu_device_resume()
4242 if (!amdgpu_device_has_dc_support(adev)) in amdgpu_device_resume()
4249 adev->in_suspend = false; in amdgpu_device_resume()
4267 static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev) in amdgpu_device_ip_check_soft_reset() argument
4272 if (amdgpu_sriov_vf(adev)) in amdgpu_device_ip_check_soft_reset()
4275 if (amdgpu_asic_need_full_reset(adev)) in amdgpu_device_ip_check_soft_reset()
4278 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_ip_check_soft_reset()
4279 if (!adev->ip_blocks[i].status.valid) in amdgpu_device_ip_check_soft_reset()
4281 if (adev->ip_blocks[i].version->funcs->check_soft_reset) in amdgpu_device_ip_check_soft_reset()
4282 adev->ip_blocks[i].status.hang = in amdgpu_device_ip_check_soft_reset()
4283 adev->ip_blocks[i].version->funcs->check_soft_reset(adev); in amdgpu_device_ip_check_soft_reset()
4284 if (adev->ip_blocks[i].status.hang) { in amdgpu_device_ip_check_soft_reset()
4285 dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name); in amdgpu_device_ip_check_soft_reset()
4303 static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev) in amdgpu_device_ip_pre_soft_reset() argument
4307 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_ip_pre_soft_reset()
4308 if (!adev->ip_blocks[i].status.valid) in amdgpu_device_ip_pre_soft_reset()
4310 if (adev->ip_blocks[i].status.hang && in amdgpu_device_ip_pre_soft_reset()
4311 adev->ip_blocks[i].version->funcs->pre_soft_reset) { in amdgpu_device_ip_pre_soft_reset()
4312 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev); in amdgpu_device_ip_pre_soft_reset()
4330 static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev) in amdgpu_device_ip_need_full_reset() argument
4334 if (amdgpu_asic_need_full_reset(adev)) in amdgpu_device_ip_need_full_reset()
4337 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_ip_need_full_reset()
4338 if (!adev->ip_blocks[i].status.valid) in amdgpu_device_ip_need_full_reset()
4340 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) || in amdgpu_device_ip_need_full_reset()
4341 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) || in amdgpu_device_ip_need_full_reset()
4342 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) || in amdgpu_device_ip_need_full_reset()
4343 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) || in amdgpu_device_ip_need_full_reset()
4344 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) { in amdgpu_device_ip_need_full_reset()
4345 if (adev->ip_blocks[i].status.hang) { in amdgpu_device_ip_need_full_reset()
4346 dev_info(adev->dev, "Some block need full reset!\n"); in amdgpu_device_ip_need_full_reset()
4365 static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev) in amdgpu_device_ip_soft_reset() argument
4369 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_ip_soft_reset()
4370 if (!adev->ip_blocks[i].status.valid) in amdgpu_device_ip_soft_reset()
4372 if (adev->ip_blocks[i].status.hang && in amdgpu_device_ip_soft_reset()
4373 adev->ip_blocks[i].version->funcs->soft_reset) { in amdgpu_device_ip_soft_reset()
4374 r = adev->ip_blocks[i].version->funcs->soft_reset(adev); in amdgpu_device_ip_soft_reset()
4394 static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev) in amdgpu_device_ip_post_soft_reset() argument
4398 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_ip_post_soft_reset()
4399 if (!adev->ip_blocks[i].status.valid) in amdgpu_device_ip_post_soft_reset()
4401 if (adev->ip_blocks[i].status.hang && in amdgpu_device_ip_post_soft_reset()
4402 adev->ip_blocks[i].version->funcs->post_soft_reset) in amdgpu_device_ip_post_soft_reset()
4403 r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev); in amdgpu_device_ip_post_soft_reset()
4423 static int amdgpu_device_recover_vram(struct amdgpu_device *adev) in amdgpu_device_recover_vram() argument
4430 if (amdgpu_sriov_runtime(adev)) in amdgpu_device_recover_vram()
4435 dev_info(adev->dev, "recover vram bo from shadow start\n"); in amdgpu_device_recover_vram()
4436 mutex_lock(&adev->shadow_list_lock); in amdgpu_device_recover_vram()
4437 list_for_each_entry(vmbo, &adev->shadow_list, shadow_list) { in amdgpu_device_recover_vram()
4464 mutex_unlock(&adev->shadow_list_lock); in amdgpu_device_recover_vram()
4471 dev_err(adev->dev, "recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo); in amdgpu_device_recover_vram()
4475 dev_info(adev->dev, "recover vram bo from shadow done\n"); in amdgpu_device_recover_vram()
4489 static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, in amdgpu_device_reset_sriov() argument
4497 amdgpu_amdkfd_pre_reset(adev); in amdgpu_device_reset_sriov()
4500 r = amdgpu_virt_request_full_gpu(adev, true); in amdgpu_device_reset_sriov()
4502 r = amdgpu_virt_reset_gpu(adev); in amdgpu_device_reset_sriov()
4507 r = amdgpu_device_ip_reinit_early_sriov(adev); in amdgpu_device_reset_sriov()
4511 amdgpu_virt_init_data_exchange(adev); in amdgpu_device_reset_sriov()
4513 r = amdgpu_device_fw_loading(adev); in amdgpu_device_reset_sriov()
4518 r = amdgpu_device_ip_reinit_late_sriov(adev); in amdgpu_device_reset_sriov()
4522 hive = amdgpu_get_xgmi_hive(adev); in amdgpu_device_reset_sriov()
4524 if (hive && adev->gmc.xgmi.num_physical_nodes > 1) in amdgpu_device_reset_sriov()
4525 r = amdgpu_xgmi_update_topology(hive, adev); in amdgpu_device_reset_sriov()
4531 amdgpu_irq_gpu_reset_resume_helper(adev); in amdgpu_device_reset_sriov()
4532 r = amdgpu_ib_ring_tests(adev); in amdgpu_device_reset_sriov()
4534 amdgpu_amdkfd_post_reset(adev); in amdgpu_device_reset_sriov()
4538 if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) { in amdgpu_device_reset_sriov()
4539 amdgpu_inc_vram_lost(adev); in amdgpu_device_reset_sriov()
4540 r = amdgpu_device_recover_vram(adev); in amdgpu_device_reset_sriov()
4542 amdgpu_virt_release_full_gpu(adev, true); in amdgpu_device_reset_sriov()
4562 bool amdgpu_device_has_job_running(struct amdgpu_device *adev) in amdgpu_device_has_job_running() argument
4568 struct amdgpu_ring *ring = adev->rings[i]; in amdgpu_device_has_job_running()
4591 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev) in amdgpu_device_should_recover_gpu() argument
4597 if (!amdgpu_device_ip_check_soft_reset(adev)) { in amdgpu_device_should_recover_gpu()
4598 dev_info(adev->dev,"Timeout, but no hardware hang detected.\n"); in amdgpu_device_should_recover_gpu()
4602 if (amdgpu_sriov_vf(adev)) in amdgpu_device_should_recover_gpu()
4606 switch (adev->asic_type) { in amdgpu_device_should_recover_gpu()
4631 dev_info(adev->dev, "GPU recovery disabled.\n"); in amdgpu_device_should_recover_gpu()
4635 int amdgpu_device_mode1_reset(struct amdgpu_device *adev) in amdgpu_device_mode1_reset() argument
4640 amdgpu_atombios_scratch_regs_engine_hung(adev, true); in amdgpu_device_mode1_reset()
4642 dev_info(adev->dev, "GPU mode1 reset\n"); in amdgpu_device_mode1_reset()
4645 pci_clear_master(adev->pdev); in amdgpu_device_mode1_reset()
4647 amdgpu_device_cache_pci_state(adev->pdev); in amdgpu_device_mode1_reset()
4649 if (amdgpu_dpm_is_mode1_reset_supported(adev)) { in amdgpu_device_mode1_reset()
4650 dev_info(adev->dev, "GPU smu mode1 reset\n"); in amdgpu_device_mode1_reset()
4651 ret = amdgpu_dpm_mode1_reset(adev); in amdgpu_device_mode1_reset()
4653 dev_info(adev->dev, "GPU psp mode1 reset\n"); in amdgpu_device_mode1_reset()
4654 ret = psp_gpu_reset(adev); in amdgpu_device_mode1_reset()
4658 dev_err(adev->dev, "GPU mode1 reset failed\n"); in amdgpu_device_mode1_reset()
4660 amdgpu_device_load_pci_state(adev->pdev); in amdgpu_device_mode1_reset()
4663 for (i = 0; i < adev->usec_timeout; i++) { in amdgpu_device_mode1_reset()
4664 u32 memsize = adev->nbio.funcs->get_memsize(adev); in amdgpu_device_mode1_reset()
4671 amdgpu_atombios_scratch_regs_engine_hung(adev, false); in amdgpu_device_mode1_reset()
4675 int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev, in amdgpu_device_pre_asic_reset() argument
4683 if (reset_context->reset_req_dev == adev) in amdgpu_device_pre_asic_reset()
4686 if (amdgpu_sriov_vf(adev)) { in amdgpu_device_pre_asic_reset()
4688 amdgpu_virt_fini_data_exchange(adev); in amdgpu_device_pre_asic_reset()
4691 amdgpu_fence_driver_isr_toggle(adev, true); in amdgpu_device_pre_asic_reset()
4695 struct amdgpu_ring *ring = adev->rings[i]; in amdgpu_device_pre_asic_reset()
4708 amdgpu_fence_driver_isr_toggle(adev, false); in amdgpu_device_pre_asic_reset()
4713 r = amdgpu_reset_prepare_hwcontext(adev, reset_context); in amdgpu_device_pre_asic_reset()
4721 if (!amdgpu_sriov_vf(adev)) { in amdgpu_device_pre_asic_reset()
4724 need_full_reset = amdgpu_device_ip_need_full_reset(adev); in amdgpu_device_pre_asic_reset()
4727 amdgpu_device_ip_pre_soft_reset(adev); in amdgpu_device_pre_asic_reset()
4728 r = amdgpu_device_ip_soft_reset(adev); in amdgpu_device_pre_asic_reset()
4729 amdgpu_device_ip_post_soft_reset(adev); in amdgpu_device_pre_asic_reset()
4730 if (r || amdgpu_device_ip_check_soft_reset(adev)) { in amdgpu_device_pre_asic_reset()
4731 dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n"); in amdgpu_device_pre_asic_reset()
4737 r = amdgpu_device_ip_suspend(adev); in amdgpu_device_pre_asic_reset()
4748 static int amdgpu_reset_reg_dumps(struct amdgpu_device *adev) in amdgpu_reset_reg_dumps() argument
4752 lockdep_assert_held(&adev->reset_domain->sem); in amdgpu_reset_reg_dumps()
4754 for (i = 0; i < adev->num_regs; i++) { in amdgpu_reset_reg_dumps()
4755 adev->reset_dump_reg_value[i] = RREG32(adev->reset_dump_reg_list[i]); in amdgpu_reset_reg_dumps()
4756 trace_amdgpu_reset_reg_dumps(adev->reset_dump_reg_list[i], in amdgpu_reset_reg_dumps()
4757 adev->reset_dump_reg_value[i]); in amdgpu_reset_reg_dumps()
4768 struct amdgpu_device *adev = data; in amdgpu_devcoredump_read() local
4782 drm_printf(&p, "time: %lld.%09ld\n", adev->reset_time.tv_sec, adev->reset_time.tv_nsec); in amdgpu_devcoredump_read()
4783 if (adev->reset_task_info.pid) in amdgpu_devcoredump_read()
4785 adev->reset_task_info.process_name, in amdgpu_devcoredump_read()
4786 adev->reset_task_info.pid); in amdgpu_devcoredump_read()
4788 if (adev->reset_vram_lost) in amdgpu_devcoredump_read()
4790 if (adev->num_regs) { in amdgpu_devcoredump_read()
4793 for (i = 0; i < adev->num_regs; i++) in amdgpu_devcoredump_read()
4795 adev->reset_dump_reg_list[i], in amdgpu_devcoredump_read()
4796 adev->reset_dump_reg_value[i]); in amdgpu_devcoredump_read()
4806 static void amdgpu_reset_capture_coredumpm(struct amdgpu_device *adev) in amdgpu_reset_capture_coredumpm() argument
4808 struct drm_device *dev = adev_to_drm(adev); in amdgpu_reset_capture_coredumpm()
4810 ktime_get_ts64(&adev->reset_time); in amdgpu_reset_capture_coredumpm()
4811 dev_coredumpm(dev->dev, THIS_MODULE, adev, 0, GFP_KERNEL, in amdgpu_reset_capture_coredumpm()
5012 static void amdgpu_device_set_mp1_state(struct amdgpu_device *adev) in amdgpu_device_set_mp1_state() argument
5015 switch (amdgpu_asic_reset_method(adev)) { in amdgpu_device_set_mp1_state()
5017 adev->mp1_state = PP_MP1_STATE_SHUTDOWN; in amdgpu_device_set_mp1_state()
5020 adev->mp1_state = PP_MP1_STATE_RESET; in amdgpu_device_set_mp1_state()
5023 adev->mp1_state = PP_MP1_STATE_NONE; in amdgpu_device_set_mp1_state()
5028 static void amdgpu_device_unset_mp1_state(struct amdgpu_device *adev) in amdgpu_device_unset_mp1_state() argument
5030 amdgpu_vf_error_trans_all(adev); in amdgpu_device_unset_mp1_state()
5031 adev->mp1_state = PP_MP1_STATE_NONE; in amdgpu_device_unset_mp1_state()
5034 static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev) in amdgpu_device_resume_display_audio() argument
5038 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus), in amdgpu_device_resume_display_audio()
5039 adev->pdev->bus->number, 1); in amdgpu_device_resume_display_audio()
5048 static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev) in amdgpu_device_suspend_display_audio() argument
5058 reset_method = amdgpu_asic_reset_method(adev); in amdgpu_device_suspend_display_audio()
5063 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus), in amdgpu_device_suspend_display_audio()
5064 adev->pdev->bus->number, 1); in amdgpu_device_suspend_display_audio()
5083 dev_warn(adev->dev, "failed to suspend display audio\n"); in amdgpu_device_suspend_display_audio()
5097 struct amdgpu_device *adev, struct list_head *device_list_handle, in amdgpu_device_recheck_guilty_jobs() argument
5103 struct amdgpu_ring *ring = adev->rings[i]; in amdgpu_device_recheck_guilty_jobs()
5130 amdgpu_fence_driver_isr_toggle(adev, true); in amdgpu_device_recheck_guilty_jobs()
5135 amdgpu_fence_driver_isr_toggle(adev, false); in amdgpu_device_recheck_guilty_jobs()
5145 amdgpu_reset_prepare_hwcontext(adev, reset_context); in amdgpu_device_recheck_guilty_jobs()
5148 if (amdgpu_sriov_vf(adev)) { in amdgpu_device_recheck_guilty_jobs()
5149 amdgpu_virt_fini_data_exchange(adev); in amdgpu_device_recheck_guilty_jobs()
5150 r = amdgpu_device_reset_sriov(adev, false); in amdgpu_device_recheck_guilty_jobs()
5152 adev->asic_reset_res = r; in amdgpu_device_recheck_guilty_jobs()
5166 atomic_inc(&adev->gpu_reset_counter); in amdgpu_device_recheck_guilty_jobs()
5184 static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev) in amdgpu_device_stop_pending_resets() argument
5186 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_device_stop_pending_resets()
5189 if (!amdgpu_sriov_vf(adev)) in amdgpu_device_stop_pending_resets()
5190 cancel_work(&adev->reset_work); in amdgpu_device_stop_pending_resets()
5193 if (adev->kfd.dev) in amdgpu_device_stop_pending_resets()
5194 cancel_work(&adev->kfd.reset_work); in amdgpu_device_stop_pending_resets()
5196 if (amdgpu_sriov_vf(adev)) in amdgpu_device_stop_pending_resets()
5197 cancel_work(&adev->virt.flr_work); in amdgpu_device_stop_pending_resets()
5199 if (con && adev->ras_enabled) in amdgpu_device_stop_pending_resets()
5216 int amdgpu_device_gpu_recover(struct amdgpu_device *adev, in amdgpu_device_gpu_recover() argument
5237 need_emergency_restart = amdgpu_ras_need_emergency_restart(adev); in amdgpu_device_gpu_recover()
5243 if (need_emergency_restart && amdgpu_ras_get_context(adev)->reboot) { in amdgpu_device_gpu_recover()
5250 dev_info(adev->dev, "GPU %s begin!\n", in amdgpu_device_gpu_recover()
5253 if (!amdgpu_sriov_vf(adev)) in amdgpu_device_gpu_recover()
5254 hive = amdgpu_get_xgmi_hive(adev); in amdgpu_device_gpu_recover()
5266 if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1)) { in amdgpu_device_gpu_recover()
5269 if (gpu_reset_for_dev_remove && adev->shutdown) in amdgpu_device_gpu_recover()
5272 if (!list_is_first(&adev->reset_list, &device_list)) in amdgpu_device_gpu_recover()
5273 list_rotate_to_front(&adev->reset_list, &device_list); in amdgpu_device_gpu_recover()
5276 list_add_tail(&adev->reset_list, &device_list); in amdgpu_device_gpu_recover()
5348 dev_info(adev->dev, "Guilty job already signaled, skipping HW reset"); in amdgpu_device_gpu_recover()
5373 tmp_vram_lost_counter = atomic_read(&((adev)->vram_lost_counter)); in amdgpu_device_gpu_recover()
5376 if (amdgpu_sriov_vf(adev)) { in amdgpu_device_gpu_recover()
5377 r = amdgpu_device_reset_sriov(adev, job ? false : true); in amdgpu_device_gpu_recover()
5379 adev->asic_reset_res = r; in amdgpu_device_gpu_recover()
5382 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2)) in amdgpu_device_gpu_recover()
5383 amdgpu_ras_resume(adev); in amdgpu_device_gpu_recover()
5406 !(tmp_vram_lost_counter < atomic_read(&adev->vram_lost_counter))) in amdgpu_device_gpu_recover()
5423 if (adev->enable_mes && adev->ip_versions[GC_HWIP][0] != IP_VERSION(11, 0, 3)) in amdgpu_device_gpu_recover()
5455 if (!adev->kfd.init_complete) in amdgpu_device_gpu_recover()
5456 amdgpu_amdkfd_device_init(adev); in amdgpu_device_gpu_recover()
5475 dev_info(adev->dev, "GPU reset end with ret = %d\n", r); in amdgpu_device_gpu_recover()
5477 atomic_set(&adev->reset_domain->reset_res, r); in amdgpu_device_gpu_recover()
5490 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev) in amdgpu_device_get_pcie_info() argument
5497 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap; in amdgpu_device_get_pcie_info()
5500 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap; in amdgpu_device_get_pcie_info()
5503 if (pci_is_root_bus(adev->pdev->bus)) { in amdgpu_device_get_pcie_info()
5504 if (adev->pm.pcie_gen_mask == 0) in amdgpu_device_get_pcie_info()
5505 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK; in amdgpu_device_get_pcie_info()
5506 if (adev->pm.pcie_mlw_mask == 0) in amdgpu_device_get_pcie_info()
5507 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK; in amdgpu_device_get_pcie_info()
5511 if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask) in amdgpu_device_get_pcie_info()
5514 pcie_bandwidth_available(adev->pdev, NULL, in amdgpu_device_get_pcie_info()
5517 if (adev->pm.pcie_gen_mask == 0) { in amdgpu_device_get_pcie_info()
5519 pdev = adev->pdev; in amdgpu_device_get_pcie_info()
5522 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | in amdgpu_device_get_pcie_info()
5527 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | in amdgpu_device_get_pcie_info()
5533 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | in amdgpu_device_get_pcie_info()
5538 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | in amdgpu_device_get_pcie_info()
5542 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | in amdgpu_device_get_pcie_info()
5545 adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1; in amdgpu_device_get_pcie_info()
5549 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | in amdgpu_device_get_pcie_info()
5553 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | in amdgpu_device_get_pcie_info()
5559 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | in amdgpu_device_get_pcie_info()
5564 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | in amdgpu_device_get_pcie_info()
5568 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | in amdgpu_device_get_pcie_info()
5571 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1; in amdgpu_device_get_pcie_info()
5575 if (adev->pm.pcie_mlw_mask == 0) { in amdgpu_device_get_pcie_info()
5577 adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK; in amdgpu_device_get_pcie_info()
5581 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 | in amdgpu_device_get_pcie_info()
5590 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 | in amdgpu_device_get_pcie_info()
5598 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | in amdgpu_device_get_pcie_info()
5605 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | in amdgpu_device_get_pcie_info()
5611 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | in amdgpu_device_get_pcie_info()
5616 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | in amdgpu_device_get_pcie_info()
5620 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1; in amdgpu_device_get_pcie_info()
5639 bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev, in amdgpu_device_is_peer_accessible() argument
5646 adev->gmc.aper_base + adev->gmc.aper_size - 1; in amdgpu_device_is_peer_accessible()
5648 !adev->gmc.xgmi.connected_to_cpu && in amdgpu_device_is_peer_accessible()
5649 !(pci_p2pdma_distance(adev->pdev, peer_adev->dev, false) < 0); in amdgpu_device_is_peer_accessible()
5651 return pcie_p2p && p2p_access && (adev->gmc.visible_vram_size && in amdgpu_device_is_peer_accessible()
5652 adev->gmc.real_vram_size == adev->gmc.visible_vram_size && in amdgpu_device_is_peer_accessible()
5653 !(adev->gmc.aper_base & address_mask || in amdgpu_device_is_peer_accessible()
5662 struct amdgpu_device *adev = drm_to_adev(dev); in amdgpu_device_baco_enter() local
5663 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); in amdgpu_device_baco_enter()
5665 if (!amdgpu_device_supports_baco(adev_to_drm(adev))) in amdgpu_device_baco_enter()
5668 if (ras && adev->ras_enabled && in amdgpu_device_baco_enter()
5669 adev->nbio.funcs->enable_doorbell_interrupt) in amdgpu_device_baco_enter()
5670 adev->nbio.funcs->enable_doorbell_interrupt(adev, false); in amdgpu_device_baco_enter()
5672 return amdgpu_dpm_baco_enter(adev); in amdgpu_device_baco_enter()
5677 struct amdgpu_device *adev = drm_to_adev(dev); in amdgpu_device_baco_exit() local
5678 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); in amdgpu_device_baco_exit()
5681 if (!amdgpu_device_supports_baco(adev_to_drm(adev))) in amdgpu_device_baco_exit()
5684 ret = amdgpu_dpm_baco_exit(adev); in amdgpu_device_baco_exit()
5688 if (ras && adev->ras_enabled && in amdgpu_device_baco_exit()
5689 adev->nbio.funcs->enable_doorbell_interrupt) in amdgpu_device_baco_exit()
5690 adev->nbio.funcs->enable_doorbell_interrupt(adev, true); in amdgpu_device_baco_exit()
5692 if (amdgpu_passthrough(adev) && in amdgpu_device_baco_exit()
5693 adev->nbio.funcs->clear_doorbell_interrupt) in amdgpu_device_baco_exit()
5694 adev->nbio.funcs->clear_doorbell_interrupt(adev); in amdgpu_device_baco_exit()
5711 struct amdgpu_device *adev = drm_to_adev(dev); in amdgpu_pci_error_detected() local
5716 if (adev->gmc.xgmi.num_physical_nodes > 1) { in amdgpu_pci_error_detected()
5721 adev->pci_channel_state = state; in amdgpu_pci_error_detected()
5732 amdgpu_device_lock_reset_domain(adev->reset_domain); in amdgpu_pci_error_detected()
5733 amdgpu_device_set_mp1_state(adev); in amdgpu_pci_error_detected()
5740 struct amdgpu_ring *ring = adev->rings[i]; in amdgpu_pci_error_detected()
5747 atomic_inc(&adev->gpu_reset_counter); in amdgpu_pci_error_detected()
5787 struct amdgpu_device *adev = drm_to_adev(dev); in amdgpu_pci_slot_reset() local
5798 list_add_tail(&adev->reset_list, &device_list); in amdgpu_pci_slot_reset()
5807 for (i = 0; i < adev->usec_timeout; i++) { in amdgpu_pci_slot_reset()
5808 memsize = amdgpu_asic_get_config_memsize(adev); in amdgpu_pci_slot_reset()
5820 reset_context.reset_req_dev = adev; in amdgpu_pci_slot_reset()
5824 adev->no_hw_access = true; in amdgpu_pci_slot_reset()
5825 r = amdgpu_device_pre_asic_reset(adev, &reset_context); in amdgpu_pci_slot_reset()
5826 adev->no_hw_access = false; in amdgpu_pci_slot_reset()
5834 if (amdgpu_device_cache_pci_state(adev->pdev)) in amdgpu_pci_slot_reset()
5835 pci_restore_state(adev->pdev); in amdgpu_pci_slot_reset()
5840 amdgpu_device_unset_mp1_state(adev); in amdgpu_pci_slot_reset()
5841 amdgpu_device_unlock_reset_domain(adev->reset_domain); in amdgpu_pci_slot_reset()
5857 struct amdgpu_device *adev = drm_to_adev(dev); in amdgpu_pci_resume() local
5864 if (adev->pci_channel_state != pci_channel_io_frozen) in amdgpu_pci_resume()
5868 struct amdgpu_ring *ring = adev->rings[i]; in amdgpu_pci_resume()
5878 amdgpu_device_unset_mp1_state(adev); in amdgpu_pci_resume()
5879 amdgpu_device_unlock_reset_domain(adev->reset_domain); in amdgpu_pci_resume()
5885 struct amdgpu_device *adev = drm_to_adev(dev); in amdgpu_device_cache_pci_state() local
5890 kfree(adev->pci_state); in amdgpu_device_cache_pci_state()
5892 adev->pci_state = pci_store_saved_state(pdev); in amdgpu_device_cache_pci_state()
5894 if (!adev->pci_state) { in amdgpu_device_cache_pci_state()
5909 struct amdgpu_device *adev = drm_to_adev(dev); in amdgpu_device_load_pci_state() local
5912 if (!adev->pci_state) in amdgpu_device_load_pci_state()
5915 r = pci_load_saved_state(pdev, adev->pci_state); in amdgpu_device_load_pci_state()
5927 void amdgpu_device_flush_hdp(struct amdgpu_device *adev, in amdgpu_device_flush_hdp() argument
5931 if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) in amdgpu_device_flush_hdp()
5934 if (adev->gmc.xgmi.connected_to_cpu) in amdgpu_device_flush_hdp()
5940 amdgpu_asic_flush_hdp(adev, ring); in amdgpu_device_flush_hdp()
5943 void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev, in amdgpu_device_invalidate_hdp() argument
5947 if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) in amdgpu_device_invalidate_hdp()
5950 if (adev->gmc.xgmi.connected_to_cpu) in amdgpu_device_invalidate_hdp()
5953 amdgpu_asic_invalidate_hdp(adev, ring); in amdgpu_device_invalidate_hdp()
5956 int amdgpu_in_reset(struct amdgpu_device *adev) in amdgpu_in_reset() argument
5958 return atomic_read(&adev->reset_domain->in_gpu_reset); in amdgpu_in_reset()
5981 void amdgpu_device_halt(struct amdgpu_device *adev) in amdgpu_device_halt() argument
5983 struct pci_dev *pdev = adev->pdev; in amdgpu_device_halt()
5984 struct drm_device *ddev = adev_to_drm(adev); in amdgpu_device_halt()
5988 amdgpu_irq_disable_all(adev); in amdgpu_device_halt()
5990 amdgpu_fence_driver_hw_fini(adev); in amdgpu_device_halt()
5992 adev->no_hw_access = true; in amdgpu_device_halt()
5994 amdgpu_device_unmap_mmio(adev); in amdgpu_device_halt()
6000 u32 amdgpu_device_pcie_port_rreg(struct amdgpu_device *adev, in amdgpu_device_pcie_port_rreg() argument
6006 address = adev->nbio.funcs->get_pcie_port_index_offset(adev); in amdgpu_device_pcie_port_rreg()
6007 data = adev->nbio.funcs->get_pcie_port_data_offset(adev); in amdgpu_device_pcie_port_rreg()
6009 spin_lock_irqsave(&adev->pcie_idx_lock, flags); in amdgpu_device_pcie_port_rreg()
6013 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); in amdgpu_device_pcie_port_rreg()
6017 void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev, in amdgpu_device_pcie_port_wreg() argument
6022 address = adev->nbio.funcs->get_pcie_port_index_offset(adev); in amdgpu_device_pcie_port_wreg()
6023 data = adev->nbio.funcs->get_pcie_port_data_offset(adev); in amdgpu_device_pcie_port_wreg()
6025 spin_lock_irqsave(&adev->pcie_idx_lock, flags); in amdgpu_device_pcie_port_wreg()
6030 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); in amdgpu_device_pcie_port_wreg()
6042 struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev, in amdgpu_device_switch_gang() argument
6050 old = dma_fence_get_rcu_safe(&adev->gang_submit); in amdgpu_device_switch_gang()
6059 } while (cmpxchg((struct dma_fence __force **)&adev->gang_submit, in amdgpu_device_switch_gang()
6066 bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev) in amdgpu_device_has_display_hardware() argument
6068 switch (adev->asic_type) { in amdgpu_device_has_display_hardware()
6100 if (!adev->ip_versions[DCE_HWIP][0] || in amdgpu_device_has_display_hardware()
6101 (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK)) in amdgpu_device_has_display_hardware()