Lines Matching refs:adev

414 static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev,  in gmc_v9_0_ecc_interrupt_state()  argument
423 if (adev->asic_type >= CHIP_VEGA20) in gmc_v9_0_ecc_interrupt_state()
464 static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev, in gmc_v9_0_vm_fault_interrupt_state() argument
482 for (j = 0; j < adev->num_vmhubs; j++) { in gmc_v9_0_vm_fault_interrupt_state()
483 hub = &adev->vmhub[j]; in gmc_v9_0_vm_fault_interrupt_state()
502 for (j = 0; j < adev->num_vmhubs; j++) { in gmc_v9_0_vm_fault_interrupt_state()
503 hub = &adev->vmhub[j]; in gmc_v9_0_vm_fault_interrupt_state()
528 static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, in gmc_v9_0_process_interrupt() argument
548 if (entry->ih != &adev->irq.ih_soft && in gmc_v9_0_process_interrupt()
549 amdgpu_gmc_filter_faults(adev, entry->ih, addr, entry->pasid, in gmc_v9_0_process_interrupt()
556 if (entry->ih == &adev->irq.ih) { in gmc_v9_0_process_interrupt()
557 amdgpu_irq_delegate(adev, entry, 8); in gmc_v9_0_process_interrupt()
564 if (amdgpu_vm_handle_fault(adev, entry->pasid, addr, write_fault)) in gmc_v9_0_process_interrupt()
573 hub = &adev->vmhub[AMDGPU_MMHUB_0]; in gmc_v9_0_process_interrupt()
576 hub = &adev->vmhub[AMDGPU_MMHUB_1]; in gmc_v9_0_process_interrupt()
579 hub = &adev->vmhub[AMDGPU_GFXHUB_0]; in gmc_v9_0_process_interrupt()
583 amdgpu_vm_get_task_info(adev, entry->pasid, &task_info); in gmc_v9_0_process_interrupt()
585 dev_err(adev->dev, in gmc_v9_0_process_interrupt()
592 dev_err(adev->dev, " in page starting at address 0x%016llx from IH client 0x%x (%s)\n", in gmc_v9_0_process_interrupt()
596 if (amdgpu_sriov_vf(adev)) in gmc_v9_0_process_interrupt()
605 (adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 4, 2))) in gmc_v9_0_process_interrupt()
614 dev_err(adev->dev, in gmc_v9_0_process_interrupt()
617 if (hub == &adev->vmhub[AMDGPU_GFXHUB_0]) { in gmc_v9_0_process_interrupt()
618 dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n", in gmc_v9_0_process_interrupt()
623 switch (adev->ip_versions[MMHUB_HWIP][0]) { in gmc_v9_0_process_interrupt()
651 dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n", in gmc_v9_0_process_interrupt()
654 dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n", in gmc_v9_0_process_interrupt()
657 dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n", in gmc_v9_0_process_interrupt()
660 dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n", in gmc_v9_0_process_interrupt()
663 dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n", in gmc_v9_0_process_interrupt()
666 dev_err(adev->dev, "\t RW: 0x%x\n", rw); in gmc_v9_0_process_interrupt()
681 static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev) in gmc_v9_0_set_irq_funcs() argument
683 adev->gmc.vm_fault.num_types = 1; in gmc_v9_0_set_irq_funcs()
684 adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs; in gmc_v9_0_set_irq_funcs()
686 if (!amdgpu_sriov_vf(adev) && in gmc_v9_0_set_irq_funcs()
687 !adev->gmc.xgmi.connected_to_cpu) { in gmc_v9_0_set_irq_funcs()
688 adev->gmc.ecc_irq.num_types = 1; in gmc_v9_0_set_irq_funcs()
689 adev->gmc.ecc_irq.funcs = &gmc_v9_0_ecc_funcs; in gmc_v9_0_set_irq_funcs()
719 static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev, in gmc_v9_0_use_invalidate_semaphore() argument
722 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2)) in gmc_v9_0_use_invalidate_semaphore()
727 (!amdgpu_sriov_vf(adev)) && in gmc_v9_0_use_invalidate_semaphore()
728 (!(!(adev->apu_flags & AMD_APU_IS_RAVEN2) && in gmc_v9_0_use_invalidate_semaphore()
729 (adev->apu_flags & AMD_APU_IS_PICASSO)))); in gmc_v9_0_use_invalidate_semaphore()
732 static bool gmc_v9_0_get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev, in gmc_v9_0_get_atc_vmid_pasid_mapping_info() argument
761 static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, in gmc_v9_0_flush_gpu_tlb() argument
764 bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(adev, vmhub); in gmc_v9_0_flush_gpu_tlb()
769 BUG_ON(vmhub >= adev->num_vmhubs); in gmc_v9_0_flush_gpu_tlb()
771 hub = &adev->vmhub[vmhub]; in gmc_v9_0_flush_gpu_tlb()
772 if (adev->gmc.xgmi.num_physical_nodes && in gmc_v9_0_flush_gpu_tlb()
773 adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 0)) { in gmc_v9_0_flush_gpu_tlb()
790 if (adev->gfx.kiq.ring.sched.ready && in gmc_v9_0_flush_gpu_tlb()
791 (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) && in gmc_v9_0_flush_gpu_tlb()
792 down_read_trylock(&adev->reset_domain->sem)) { in gmc_v9_0_flush_gpu_tlb()
796 amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req, in gmc_v9_0_flush_gpu_tlb()
798 up_read(&adev->reset_domain->sem); in gmc_v9_0_flush_gpu_tlb()
802 spin_lock(&adev->gmc.invalidate_lock); in gmc_v9_0_flush_gpu_tlb()
813 for (j = 0; j < adev->usec_timeout; j++) { in gmc_v9_0_flush_gpu_tlb()
825 if (j >= adev->usec_timeout) in gmc_v9_0_flush_gpu_tlb()
841 (adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 4, 2))) in gmc_v9_0_flush_gpu_tlb()
845 for (j = 0; j < adev->usec_timeout; j++) { in gmc_v9_0_flush_gpu_tlb()
872 spin_unlock(&adev->gmc.invalidate_lock); in gmc_v9_0_flush_gpu_tlb()
874 if (j < adev->usec_timeout) in gmc_v9_0_flush_gpu_tlb()
890 static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, in gmc_v9_0_flush_gpu_tlb_pasid() argument
899 u32 usec_timeout = amdgpu_sriov_vf(adev) ? SRIOV_USEC_TIMEOUT : adev->usec_timeout; in gmc_v9_0_flush_gpu_tlb_pasid()
900 struct amdgpu_ring *ring = &adev->gfx.kiq.ring; in gmc_v9_0_flush_gpu_tlb_pasid()
901 struct amdgpu_kiq *kiq = &adev->gfx.kiq; in gmc_v9_0_flush_gpu_tlb_pasid()
903 if (amdgpu_in_reset(adev)) in gmc_v9_0_flush_gpu_tlb_pasid()
906 if (ring->sched.ready && down_read_trylock(&adev->reset_domain->sem)) { in gmc_v9_0_flush_gpu_tlb_pasid()
913 bool vega20_xgmi_wa = (adev->gmc.xgmi.num_physical_nodes && in gmc_v9_0_flush_gpu_tlb_pasid()
914 adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 0)); in gmc_v9_0_flush_gpu_tlb_pasid()
921 spin_lock(&adev->gfx.kiq.ring_lock); in gmc_v9_0_flush_gpu_tlb_pasid()
932 spin_unlock(&adev->gfx.kiq.ring_lock); in gmc_v9_0_flush_gpu_tlb_pasid()
933 up_read(&adev->reset_domain->sem); in gmc_v9_0_flush_gpu_tlb_pasid()
938 spin_unlock(&adev->gfx.kiq.ring_lock); in gmc_v9_0_flush_gpu_tlb_pasid()
941 dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r); in gmc_v9_0_flush_gpu_tlb_pasid()
942 up_read(&adev->reset_domain->sem); in gmc_v9_0_flush_gpu_tlb_pasid()
945 up_read(&adev->reset_domain->sem); in gmc_v9_0_flush_gpu_tlb_pasid()
951 ret = gmc_v9_0_get_atc_vmid_pasid_mapping_info(adev, vmid, in gmc_v9_0_flush_gpu_tlb_pasid()
955 for (i = 0; i < adev->num_vmhubs; i++) in gmc_v9_0_flush_gpu_tlb_pasid()
956 gmc_v9_0_flush_gpu_tlb(adev, vmid, in gmc_v9_0_flush_gpu_tlb_pasid()
959 gmc_v9_0_flush_gpu_tlb(adev, vmid, in gmc_v9_0_flush_gpu_tlb_pasid()
973 bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(ring->adev, ring->funcs->vmhub); in gmc_v9_0_emit_flush_gpu_tlb()
974 struct amdgpu_device *adev = ring->adev; in gmc_v9_0_emit_flush_gpu_tlb() local
975 struct amdgpu_vmhub *hub = &adev->vmhub[ring->funcs->vmhub]; in gmc_v9_0_emit_flush_gpu_tlb()
1022 struct amdgpu_device *adev = ring->adev; in gmc_v9_0_emit_pasid_mapping() local
1069 static uint64_t gmc_v9_0_map_mtype(struct amdgpu_device *adev, uint32_t flags) in gmc_v9_0_map_mtype() argument
1090 static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level, in gmc_v9_0_get_vm_pde() argument
1094 *addr = amdgpu_gmc_vram_mc2pa(adev, *addr); in gmc_v9_0_get_vm_pde()
1097 if (!adev->gmc.translate_further) in gmc_v9_0_get_vm_pde()
1116 static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev, in gmc_v9_0_get_vm_pte() argument
1131 if ((adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1) || in gmc_v9_0_get_vm_pte()
1132 adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2)) && in gmc_v9_0_get_vm_pte()
1137 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2)) in gmc_v9_0_get_vm_pte()
1141 static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev) in gmc_v9_0_get_vbios_fb_size() argument
1153 switch (adev->ip_versions[DCE_HWIP][0]) { in gmc_v9_0_get_vbios_fb_size()
1194 static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev) in gmc_v9_0_set_gmc_funcs() argument
1196 adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs; in gmc_v9_0_set_gmc_funcs()
1199 static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev) in gmc_v9_0_set_umc_funcs() argument
1201 switch (adev->ip_versions[UMC_HWIP][0]) { in gmc_v9_0_set_umc_funcs()
1203 adev->umc.funcs = &umc_v6_0_funcs; in gmc_v9_0_set_umc_funcs()
1206 adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM; in gmc_v9_0_set_umc_funcs()
1207 adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM; in gmc_v9_0_set_umc_funcs()
1208 adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM; in gmc_v9_0_set_umc_funcs()
1209 adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_VG20; in gmc_v9_0_set_umc_funcs()
1210 adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0]; in gmc_v9_0_set_umc_funcs()
1211 adev->umc.ras = &umc_v6_1_ras; in gmc_v9_0_set_umc_funcs()
1214 adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM; in gmc_v9_0_set_umc_funcs()
1215 adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM; in gmc_v9_0_set_umc_funcs()
1216 adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM; in gmc_v9_0_set_umc_funcs()
1217 adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_ARCT; in gmc_v9_0_set_umc_funcs()
1218 adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0]; in gmc_v9_0_set_umc_funcs()
1219 adev->umc.ras = &umc_v6_1_ras; in gmc_v9_0_set_umc_funcs()
1222 adev->umc.max_ras_err_cnt_per_query = in gmc_v9_0_set_umc_funcs()
1224 adev->umc.channel_inst_num = UMC_V6_7_CHANNEL_INSTANCE_NUM; in gmc_v9_0_set_umc_funcs()
1225 adev->umc.umc_inst_num = UMC_V6_7_UMC_INSTANCE_NUM; in gmc_v9_0_set_umc_funcs()
1226 adev->umc.channel_offs = UMC_V6_7_PER_CHANNEL_OFFSET; in gmc_v9_0_set_umc_funcs()
1227 if (!adev->gmc.xgmi.connected_to_cpu) in gmc_v9_0_set_umc_funcs()
1228 adev->umc.ras = &umc_v6_7_ras; in gmc_v9_0_set_umc_funcs()
1229 if (1 & adev->smuio.funcs->get_die_id(adev)) in gmc_v9_0_set_umc_funcs()
1230 adev->umc.channel_idx_tbl = &umc_v6_7_channel_idx_tbl_first[0][0]; in gmc_v9_0_set_umc_funcs()
1232 adev->umc.channel_idx_tbl = &umc_v6_7_channel_idx_tbl_second[0][0]; in gmc_v9_0_set_umc_funcs()
1238 if (adev->umc.ras) { in gmc_v9_0_set_umc_funcs()
1239 amdgpu_ras_register_ras_block(adev, &adev->umc.ras->ras_block); in gmc_v9_0_set_umc_funcs()
1241 strcpy(adev->umc.ras->ras_block.ras_comm.name, "umc"); in gmc_v9_0_set_umc_funcs()
1242 adev->umc.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__UMC; in gmc_v9_0_set_umc_funcs()
1243 adev->umc.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; in gmc_v9_0_set_umc_funcs()
1244 adev->umc.ras_if = &adev->umc.ras->ras_block.ras_comm; in gmc_v9_0_set_umc_funcs()
1247 if (!adev->umc.ras->ras_block.ras_late_init) in gmc_v9_0_set_umc_funcs()
1248 adev->umc.ras->ras_block.ras_late_init = amdgpu_umc_ras_late_init; in gmc_v9_0_set_umc_funcs()
1251 if (!adev->umc.ras->ras_block.ras_cb) in gmc_v9_0_set_umc_funcs()
1252 adev->umc.ras->ras_block.ras_cb = amdgpu_umc_process_ras_data_cb; in gmc_v9_0_set_umc_funcs()
1256 static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev) in gmc_v9_0_set_mmhub_funcs() argument
1258 switch (adev->ip_versions[MMHUB_HWIP][0]) { in gmc_v9_0_set_mmhub_funcs()
1260 adev->mmhub.funcs = &mmhub_v9_4_funcs; in gmc_v9_0_set_mmhub_funcs()
1263 adev->mmhub.funcs = &mmhub_v1_7_funcs; in gmc_v9_0_set_mmhub_funcs()
1266 adev->mmhub.funcs = &mmhub_v1_0_funcs; in gmc_v9_0_set_mmhub_funcs()
1271 static void gmc_v9_0_set_mmhub_ras_funcs(struct amdgpu_device *adev) in gmc_v9_0_set_mmhub_ras_funcs() argument
1273 switch (adev->ip_versions[MMHUB_HWIP][0]) { in gmc_v9_0_set_mmhub_ras_funcs()
1275 adev->mmhub.ras = &mmhub_v1_0_ras; in gmc_v9_0_set_mmhub_ras_funcs()
1278 adev->mmhub.ras = &mmhub_v9_4_ras; in gmc_v9_0_set_mmhub_ras_funcs()
1281 adev->mmhub.ras = &mmhub_v1_7_ras; in gmc_v9_0_set_mmhub_ras_funcs()
1288 if (adev->mmhub.ras) { in gmc_v9_0_set_mmhub_ras_funcs()
1289 amdgpu_ras_register_ras_block(adev, &adev->mmhub.ras->ras_block); in gmc_v9_0_set_mmhub_ras_funcs()
1291 strcpy(adev->mmhub.ras->ras_block.ras_comm.name, "mmhub"); in gmc_v9_0_set_mmhub_ras_funcs()
1292 adev->mmhub.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__MMHUB; in gmc_v9_0_set_mmhub_ras_funcs()
1293 adev->mmhub.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; in gmc_v9_0_set_mmhub_ras_funcs()
1294 adev->mmhub.ras_if = &adev->mmhub.ras->ras_block.ras_comm; in gmc_v9_0_set_mmhub_ras_funcs()
1298 static void gmc_v9_0_set_gfxhub_funcs(struct amdgpu_device *adev) in gmc_v9_0_set_gfxhub_funcs() argument
1300 adev->gfxhub.funcs = &gfxhub_v1_0_funcs; in gmc_v9_0_set_gfxhub_funcs()
1303 static void gmc_v9_0_set_hdp_ras_funcs(struct amdgpu_device *adev) in gmc_v9_0_set_hdp_ras_funcs() argument
1305 adev->hdp.ras = &hdp_v4_0_ras; in gmc_v9_0_set_hdp_ras_funcs()
1306 amdgpu_ras_register_ras_block(adev, &adev->hdp.ras->ras_block); in gmc_v9_0_set_hdp_ras_funcs()
1307 adev->hdp.ras_if = &adev->hdp.ras->ras_block.ras_comm; in gmc_v9_0_set_hdp_ras_funcs()
1310 static void gmc_v9_0_set_mca_funcs(struct amdgpu_device *adev) in gmc_v9_0_set_mca_funcs() argument
1313 switch (adev->ip_versions[UMC_HWIP][0]) { in gmc_v9_0_set_mca_funcs()
1315 if (!adev->gmc.xgmi.connected_to_cpu) in gmc_v9_0_set_mca_funcs()
1316 adev->mca.funcs = &mca_v3_0_funcs; in gmc_v9_0_set_mca_funcs()
1326 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v9_0_early_init() local
1329 if (adev->asic_type == CHIP_VEGA20 || in gmc_v9_0_early_init()
1330 adev->asic_type == CHIP_ARCTURUS) in gmc_v9_0_early_init()
1331 adev->gmc.xgmi.supported = true; in gmc_v9_0_early_init()
1333 if (adev->ip_versions[XGMI_HWIP][0] == IP_VERSION(6, 1, 0)) { in gmc_v9_0_early_init()
1334 adev->gmc.xgmi.supported = true; in gmc_v9_0_early_init()
1335 adev->gmc.xgmi.connected_to_cpu = in gmc_v9_0_early_init()
1336 adev->smuio.funcs->is_host_gpu_xgmi_supported(adev); in gmc_v9_0_early_init()
1339 gmc_v9_0_set_gmc_funcs(adev); in gmc_v9_0_early_init()
1340 gmc_v9_0_set_irq_funcs(adev); in gmc_v9_0_early_init()
1341 gmc_v9_0_set_umc_funcs(adev); in gmc_v9_0_early_init()
1342 gmc_v9_0_set_mmhub_funcs(adev); in gmc_v9_0_early_init()
1343 gmc_v9_0_set_mmhub_ras_funcs(adev); in gmc_v9_0_early_init()
1344 gmc_v9_0_set_gfxhub_funcs(adev); in gmc_v9_0_early_init()
1345 gmc_v9_0_set_hdp_ras_funcs(adev); in gmc_v9_0_early_init()
1346 gmc_v9_0_set_mca_funcs(adev); in gmc_v9_0_early_init()
1348 adev->gmc.shared_aperture_start = 0x2000000000000000ULL; in gmc_v9_0_early_init()
1349 adev->gmc.shared_aperture_end = in gmc_v9_0_early_init()
1350 adev->gmc.shared_aperture_start + (4ULL << 30) - 1; in gmc_v9_0_early_init()
1351 adev->gmc.private_aperture_start = 0x1000000000000000ULL; in gmc_v9_0_early_init()
1352 adev->gmc.private_aperture_end = in gmc_v9_0_early_init()
1353 adev->gmc.private_aperture_start + (4ULL << 30) - 1; in gmc_v9_0_early_init()
1355 r = amdgpu_gmc_ras_early_init(adev); in gmc_v9_0_early_init()
1364 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v9_0_late_init() local
1367 r = amdgpu_gmc_allocate_vm_inv_eng(adev); in gmc_v9_0_late_init()
1375 if (!amdgpu_sriov_vf(adev) && in gmc_v9_0_late_init()
1376 (adev->ip_versions[UMC_HWIP][0] == IP_VERSION(6, 0, 0))) { in gmc_v9_0_late_init()
1377 if (!(adev->ras_enabled & (1 << AMDGPU_RAS_BLOCK__UMC))) { in gmc_v9_0_late_init()
1378 if (adev->df.funcs && in gmc_v9_0_late_init()
1379 adev->df.funcs->enable_ecc_force_par_wr_rmw) in gmc_v9_0_late_init()
1380 adev->df.funcs->enable_ecc_force_par_wr_rmw(adev, false); in gmc_v9_0_late_init()
1384 if (!amdgpu_persistent_edc_harvesting_supported(adev)) { in gmc_v9_0_late_init()
1385 if (adev->mmhub.ras && adev->mmhub.ras->ras_block.hw_ops && in gmc_v9_0_late_init()
1386 adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count) in gmc_v9_0_late_init()
1387 adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(adev); in gmc_v9_0_late_init()
1389 if (adev->hdp.ras && adev->hdp.ras->ras_block.hw_ops && in gmc_v9_0_late_init()
1390 adev->hdp.ras->ras_block.hw_ops->reset_ras_error_count) in gmc_v9_0_late_init()
1391 adev->hdp.ras->ras_block.hw_ops->reset_ras_error_count(adev); in gmc_v9_0_late_init()
1394 r = amdgpu_gmc_ras_late_init(adev); in gmc_v9_0_late_init()
1398 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0); in gmc_v9_0_late_init()
1401 static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev, in gmc_v9_0_vram_gtt_location() argument
1404 u64 base = adev->mmhub.funcs->get_fb_location(adev); in gmc_v9_0_vram_gtt_location()
1407 base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size; in gmc_v9_0_vram_gtt_location()
1408 if (adev->gmc.xgmi.connected_to_cpu) { in gmc_v9_0_vram_gtt_location()
1409 amdgpu_gmc_sysvm_location(adev, mc); in gmc_v9_0_vram_gtt_location()
1411 amdgpu_gmc_vram_location(adev, mc, base); in gmc_v9_0_vram_gtt_location()
1412 amdgpu_gmc_gart_location(adev, mc); in gmc_v9_0_vram_gtt_location()
1413 amdgpu_gmc_agp_location(adev, mc); in gmc_v9_0_vram_gtt_location()
1416 adev->vm_manager.vram_base_offset = adev->gfxhub.funcs->get_mc_fb_offset(adev); in gmc_v9_0_vram_gtt_location()
1419 adev->vm_manager.vram_base_offset += in gmc_v9_0_vram_gtt_location()
1420 adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size; in gmc_v9_0_vram_gtt_location()
1432 static int gmc_v9_0_mc_init(struct amdgpu_device *adev) in gmc_v9_0_mc_init() argument
1437 adev->gmc.mc_vram_size = in gmc_v9_0_mc_init()
1438 adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL; in gmc_v9_0_mc_init()
1439 adev->gmc.real_vram_size = adev->gmc.mc_vram_size; in gmc_v9_0_mc_init()
1441 if (!(adev->flags & AMD_IS_APU) && in gmc_v9_0_mc_init()
1442 !adev->gmc.xgmi.connected_to_cpu) { in gmc_v9_0_mc_init()
1443 r = amdgpu_device_resize_fb_bar(adev); in gmc_v9_0_mc_init()
1447 adev->gmc.aper_base = pci_resource_start(adev->pdev, 0); in gmc_v9_0_mc_init()
1448 adev->gmc.aper_size = pci_resource_len(adev->pdev, 0); in gmc_v9_0_mc_init()
1463 if (((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) || in gmc_v9_0_mc_init()
1464 (adev->gmc.xgmi.supported && in gmc_v9_0_mc_init()
1465 adev->gmc.xgmi.connected_to_cpu)) { in gmc_v9_0_mc_init()
1466 adev->gmc.aper_base = in gmc_v9_0_mc_init()
1467 adev->gfxhub.funcs->get_mc_fb_offset(adev) + in gmc_v9_0_mc_init()
1468 adev->gmc.xgmi.physical_node_id * in gmc_v9_0_mc_init()
1469 adev->gmc.xgmi.node_segment_size; in gmc_v9_0_mc_init()
1470 adev->gmc.aper_size = adev->gmc.real_vram_size; in gmc_v9_0_mc_init()
1475 adev->gmc.visible_vram_size = adev->gmc.aper_size; in gmc_v9_0_mc_init()
1476 if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size) in gmc_v9_0_mc_init()
1477 adev->gmc.visible_vram_size = adev->gmc.real_vram_size; in gmc_v9_0_mc_init()
1481 switch (adev->ip_versions[GC_HWIP][0]) { in gmc_v9_0_mc_init()
1488 adev->gmc.gart_size = 512ULL << 20; in gmc_v9_0_mc_init()
1493 adev->gmc.gart_size = 1024ULL << 20; in gmc_v9_0_mc_init()
1497 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20; in gmc_v9_0_mc_init()
1500 adev->gmc.gart_size += adev->pm.smu_prv_buffer_size; in gmc_v9_0_mc_init()
1502 gmc_v9_0_vram_gtt_location(adev, &adev->gmc); in gmc_v9_0_mc_init()
1507 static int gmc_v9_0_gart_init(struct amdgpu_device *adev) in gmc_v9_0_gart_init() argument
1511 if (adev->gart.bo) { in gmc_v9_0_gart_init()
1516 if (adev->gmc.xgmi.connected_to_cpu) { in gmc_v9_0_gart_init()
1517 adev->gmc.vmid0_page_table_depth = 1; in gmc_v9_0_gart_init()
1518 adev->gmc.vmid0_page_table_block_size = 12; in gmc_v9_0_gart_init()
1520 adev->gmc.vmid0_page_table_depth = 0; in gmc_v9_0_gart_init()
1521 adev->gmc.vmid0_page_table_block_size = 0; in gmc_v9_0_gart_init()
1525 r = amdgpu_gart_init(adev); in gmc_v9_0_gart_init()
1528 adev->gart.table_size = adev->gart.num_gpu_pages * 8; in gmc_v9_0_gart_init()
1529 adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_VG10(MTYPE_UC) | in gmc_v9_0_gart_init()
1532 r = amdgpu_gart_table_vram_alloc(adev); in gmc_v9_0_gart_init()
1536 if (adev->gmc.xgmi.connected_to_cpu) { in gmc_v9_0_gart_init()
1537 r = amdgpu_gmc_pdb0_alloc(adev); in gmc_v9_0_gart_init()
1551 static void gmc_v9_0_save_registers(struct amdgpu_device *adev) in gmc_v9_0_save_registers() argument
1553 if ((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) || in gmc_v9_0_save_registers()
1554 (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) in gmc_v9_0_save_registers()
1555 adev->gmc.sdpif_register = RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0); in gmc_v9_0_save_registers()
1561 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v9_0_sw_init() local
1563 adev->gfxhub.funcs->init(adev); in gmc_v9_0_sw_init()
1565 adev->mmhub.funcs->init(adev); in gmc_v9_0_sw_init()
1566 if (adev->mca.funcs) in gmc_v9_0_sw_init()
1567 adev->mca.funcs->init(adev); in gmc_v9_0_sw_init()
1569 spin_lock_init(&adev->gmc.invalidate_lock); in gmc_v9_0_sw_init()
1571 r = amdgpu_atomfirmware_get_vram_info(adev, in gmc_v9_0_sw_init()
1573 if (amdgpu_sriov_vf(adev)) in gmc_v9_0_sw_init()
1578 adev->gmc.vram_width = 2048; in gmc_v9_0_sw_init()
1580 adev->gmc.vram_width = vram_width; in gmc_v9_0_sw_init()
1582 if (!adev->gmc.vram_width) { in gmc_v9_0_sw_init()
1586 if (adev->flags & AMD_IS_APU) in gmc_v9_0_sw_init()
1590 if (adev->df.funcs && in gmc_v9_0_sw_init()
1591 adev->df.funcs->get_hbm_channel_number) { in gmc_v9_0_sw_init()
1592 numchan = adev->df.funcs->get_hbm_channel_number(adev); in gmc_v9_0_sw_init()
1593 adev->gmc.vram_width = numchan * chansize; in gmc_v9_0_sw_init()
1597 adev->gmc.vram_type = vram_type; in gmc_v9_0_sw_init()
1598 adev->gmc.vram_vendor = vram_vendor; in gmc_v9_0_sw_init()
1599 switch (adev->ip_versions[GC_HWIP][0]) { in gmc_v9_0_sw_init()
1602 adev->num_vmhubs = 2; in gmc_v9_0_sw_init()
1604 if (adev->rev_id == 0x0 || adev->rev_id == 0x1) { in gmc_v9_0_sw_init()
1605 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); in gmc_v9_0_sw_init()
1608 amdgpu_vm_adjust_size(adev, 128 * 1024 + 512, 9, 2, 48); in gmc_v9_0_sw_init()
1609 adev->gmc.translate_further = in gmc_v9_0_sw_init()
1610 adev->vm_manager.num_level > 1; in gmc_v9_0_sw_init()
1618 adev->num_vmhubs = 2; in gmc_v9_0_sw_init()
1627 if (amdgpu_sriov_vf(adev)) in gmc_v9_0_sw_init()
1628 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 47); in gmc_v9_0_sw_init()
1630 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); in gmc_v9_0_sw_init()
1631 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2)) in gmc_v9_0_sw_init()
1632 adev->gmc.translate_further = adev->vm_manager.num_level > 1; in gmc_v9_0_sw_init()
1635 adev->num_vmhubs = 3; in gmc_v9_0_sw_init()
1638 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); in gmc_v9_0_sw_init()
1639 adev->gmc.translate_further = adev->vm_manager.num_level > 1; in gmc_v9_0_sw_init()
1646 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, VMC_1_0__SRCID__VM_FAULT, in gmc_v9_0_sw_init()
1647 &adev->gmc.vm_fault); in gmc_v9_0_sw_init()
1651 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1)) { in gmc_v9_0_sw_init()
1652 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC1, VMC_1_0__SRCID__VM_FAULT, in gmc_v9_0_sw_init()
1653 &adev->gmc.vm_fault); in gmc_v9_0_sw_init()
1658 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, UTCL2_1_0__SRCID__FAULT, in gmc_v9_0_sw_init()
1659 &adev->gmc.vm_fault); in gmc_v9_0_sw_init()
1664 if (!amdgpu_sriov_vf(adev) && in gmc_v9_0_sw_init()
1665 !adev->gmc.xgmi.connected_to_cpu) { in gmc_v9_0_sw_init()
1667 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0, in gmc_v9_0_sw_init()
1668 &adev->gmc.ecc_irq); in gmc_v9_0_sw_init()
1677 adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */ in gmc_v9_0_sw_init()
1679 dma_addr_bits = adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2) ? 48:44; in gmc_v9_0_sw_init()
1680 r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(dma_addr_bits)); in gmc_v9_0_sw_init()
1685 adev->need_swiotlb = drm_need_swiotlb(dma_addr_bits); in gmc_v9_0_sw_init()
1687 r = gmc_v9_0_mc_init(adev); in gmc_v9_0_sw_init()
1691 amdgpu_gmc_get_vbios_allocations(adev); in gmc_v9_0_sw_init()
1694 r = amdgpu_bo_init(adev); in gmc_v9_0_sw_init()
1698 r = gmc_v9_0_gart_init(adev); in gmc_v9_0_sw_init()
1712 adev->vm_manager.first_kfd_vmid = in gmc_v9_0_sw_init()
1713 (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1) || in gmc_v9_0_sw_init()
1714 adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2)) ? 3 : 8; in gmc_v9_0_sw_init()
1716 amdgpu_vm_manager_init(adev); in gmc_v9_0_sw_init()
1718 gmc_v9_0_save_registers(adev); in gmc_v9_0_sw_init()
1725 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v9_0_sw_fini() local
1727 amdgpu_gmc_ras_fini(adev); in gmc_v9_0_sw_fini()
1728 amdgpu_gem_force_release(adev); in gmc_v9_0_sw_fini()
1729 amdgpu_vm_manager_fini(adev); in gmc_v9_0_sw_fini()
1730 amdgpu_gart_table_vram_free(adev); in gmc_v9_0_sw_fini()
1731 amdgpu_bo_free_kernel(&adev->gmc.pdb0_bo, NULL, &adev->gmc.ptr_pdb0); in gmc_v9_0_sw_fini()
1732 amdgpu_bo_fini(adev); in gmc_v9_0_sw_fini()
1737 static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev) in gmc_v9_0_init_golden_registers() argument
1740 switch (adev->ip_versions[MMHUB_HWIP][0]) { in gmc_v9_0_init_golden_registers()
1742 if (amdgpu_sriov_vf(adev)) in gmc_v9_0_init_golden_registers()
1746 soc15_program_register_sequence(adev, in gmc_v9_0_init_golden_registers()
1749 soc15_program_register_sequence(adev, in gmc_v9_0_init_golden_registers()
1756 soc15_program_register_sequence(adev, in gmc_v9_0_init_golden_registers()
1772 void gmc_v9_0_restore_registers(struct amdgpu_device *adev) in gmc_v9_0_restore_registers() argument
1774 if ((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) || in gmc_v9_0_restore_registers()
1775 (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) { in gmc_v9_0_restore_registers()
1776 WREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0, adev->gmc.sdpif_register); in gmc_v9_0_restore_registers()
1777 WARN_ON(adev->gmc.sdpif_register != in gmc_v9_0_restore_registers()
1787 static int gmc_v9_0_gart_enable(struct amdgpu_device *adev) in gmc_v9_0_gart_enable() argument
1791 if (adev->gmc.xgmi.connected_to_cpu) in gmc_v9_0_gart_enable()
1792 amdgpu_gmc_init_pdb0(adev); in gmc_v9_0_gart_enable()
1794 if (adev->gart.bo == NULL) { in gmc_v9_0_gart_enable()
1795 dev_err(adev->dev, "No VRAM object for PCIE GART.\n"); in gmc_v9_0_gart_enable()
1799 amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr); in gmc_v9_0_gart_enable()
1800 r = adev->gfxhub.funcs->gart_enable(adev); in gmc_v9_0_gart_enable()
1804 r = adev->mmhub.funcs->gart_enable(adev); in gmc_v9_0_gart_enable()
1809 (unsigned)(adev->gmc.gart_size >> 20)); in gmc_v9_0_gart_enable()
1810 if (adev->gmc.pdb0_bo) in gmc_v9_0_gart_enable()
1812 (unsigned long long)amdgpu_bo_gpu_offset(adev->gmc.pdb0_bo)); in gmc_v9_0_gart_enable()
1814 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo)); in gmc_v9_0_gart_enable()
1821 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v9_0_hw_init() local
1826 gmc_v9_0_init_golden_registers(adev); in gmc_v9_0_hw_init()
1828 if (adev->mode_info.num_crtc) { in gmc_v9_0_hw_init()
1835 if (adev->mmhub.funcs->update_power_gating) in gmc_v9_0_hw_init()
1836 adev->mmhub.funcs->update_power_gating(adev, true); in gmc_v9_0_hw_init()
1838 adev->hdp.funcs->init_registers(adev); in gmc_v9_0_hw_init()
1841 adev->hdp.funcs->flush_hdp(adev, NULL); in gmc_v9_0_hw_init()
1848 if (!amdgpu_sriov_vf(adev)) { in gmc_v9_0_hw_init()
1849 adev->gfxhub.funcs->set_fault_enable_default(adev, value); in gmc_v9_0_hw_init()
1850 adev->mmhub.funcs->set_fault_enable_default(adev, value); in gmc_v9_0_hw_init()
1852 for (i = 0; i < adev->num_vmhubs; ++i) in gmc_v9_0_hw_init()
1853 gmc_v9_0_flush_gpu_tlb(adev, 0, i, 0); in gmc_v9_0_hw_init()
1855 if (adev->umc.funcs && adev->umc.funcs->init_registers) in gmc_v9_0_hw_init()
1856 adev->umc.funcs->init_registers(adev); in gmc_v9_0_hw_init()
1858 r = gmc_v9_0_gart_enable(adev); in gmc_v9_0_hw_init()
1863 return amdgpu_gmc_vram_checking(adev); in gmc_v9_0_hw_init()
1875 static void gmc_v9_0_gart_disable(struct amdgpu_device *adev) in gmc_v9_0_gart_disable() argument
1877 adev->gfxhub.funcs->gart_disable(adev); in gmc_v9_0_gart_disable()
1878 adev->mmhub.funcs->gart_disable(adev); in gmc_v9_0_gart_disable()
1883 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v9_0_hw_fini() local
1885 gmc_v9_0_gart_disable(adev); in gmc_v9_0_hw_fini()
1887 if (amdgpu_sriov_vf(adev)) { in gmc_v9_0_hw_fini()
1898 if (adev->mmhub.funcs->update_power_gating) in gmc_v9_0_hw_fini()
1899 adev->mmhub.funcs->update_power_gating(adev, false); in gmc_v9_0_hw_fini()
1901 amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0); in gmc_v9_0_hw_fini()
1902 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); in gmc_v9_0_hw_fini()
1909 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v9_0_suspend() local
1911 return gmc_v9_0_hw_fini(adev); in gmc_v9_0_suspend()
1917 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v9_0_resume() local
1919 r = gmc_v9_0_hw_init(adev); in gmc_v9_0_resume()
1923 amdgpu_vmid_reset_all(adev); in gmc_v9_0_resume()
1949 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v9_0_set_clockgating_state() local
1951 adev->mmhub.funcs->set_clockgating(adev, state); in gmc_v9_0_set_clockgating_state()
1953 athub_v1_0_set_clockgating(adev, state); in gmc_v9_0_set_clockgating_state()
1960 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v9_0_get_clockgating_state() local
1962 adev->mmhub.funcs->get_clockgating(adev, flags); in gmc_v9_0_get_clockgating_state()
1964 athub_v1_0_get_clockgating(adev, flags); in gmc_v9_0_get_clockgating_state()