/linux-6.1.9/drivers/gpu/drm/amd/amdgpu/ |
D | amdgpu_device.c | 149 struct amdgpu_device *adev = drm_to_adev(ddev); in amdgpu_device_get_pcie_replay_count() local 150 uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev); in amdgpu_device_get_pcie_replay_count() 158 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev); 174 struct amdgpu_device *adev = drm_to_adev(ddev); in amdgpu_device_get_product_name() local 176 return sysfs_emit(buf, "%s\n", adev->product_name); in amdgpu_device_get_product_name() 196 struct amdgpu_device *adev = drm_to_adev(ddev); in amdgpu_device_get_product_number() local 198 return sysfs_emit(buf, "%s\n", adev->product_number); in amdgpu_device_get_product_number() 218 struct amdgpu_device *adev = drm_to_adev(ddev); in amdgpu_device_get_serial_number() local 220 return sysfs_emit(buf, "%s\n", adev->serial); in amdgpu_device_get_serial_number() 236 struct amdgpu_device *adev = drm_to_adev(dev); in amdgpu_device_supports_px() local [all …]
|
D | gmc_v11_0.c | 49 static int gmc_v11_0_ecc_interrupt_state(struct amdgpu_device *adev, in gmc_v11_0_ecc_interrupt_state() argument 58 gmc_v11_0_vm_fault_interrupt_state(struct amdgpu_device *adev, in gmc_v11_0_vm_fault_interrupt_state() argument 65 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, false); in gmc_v11_0_vm_fault_interrupt_state() 67 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, false); in gmc_v11_0_vm_fault_interrupt_state() 71 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, true); in gmc_v11_0_vm_fault_interrupt_state() 73 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, true); in gmc_v11_0_vm_fault_interrupt_state() 82 static int gmc_v11_0_process_interrupt(struct amdgpu_device *adev, in gmc_v11_0_process_interrupt() argument 86 struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src]; in gmc_v11_0_process_interrupt() 93 if (!amdgpu_sriov_vf(adev)) { in gmc_v11_0_process_interrupt() 110 amdgpu_vm_get_task_info(adev, entry->pasid, &task_info); in gmc_v11_0_process_interrupt() [all …]
|
D | gmc_v10_0.c | 63 static int gmc_v10_0_ecc_interrupt_state(struct amdgpu_device *adev, in gmc_v10_0_ecc_interrupt_state() argument 72 gmc_v10_0_vm_fault_interrupt_state(struct amdgpu_device *adev, in gmc_v10_0_vm_fault_interrupt_state() argument 79 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, false); in gmc_v10_0_vm_fault_interrupt_state() 81 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, false); in gmc_v10_0_vm_fault_interrupt_state() 85 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, true); in gmc_v10_0_vm_fault_interrupt_state() 87 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, true); in gmc_v10_0_vm_fault_interrupt_state() 96 static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev, in gmc_v10_0_process_interrupt() argument 102 struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src]; in gmc_v10_0_process_interrupt() 114 if (entry->ih != &adev->irq.ih_soft && in gmc_v10_0_process_interrupt() 115 amdgpu_gmc_filter_faults(adev, entry->ih, addr, entry->pasid, in gmc_v10_0_process_interrupt() [all …]
|
D | soc21.c | 77 static int soc21_query_video_codecs(struct amdgpu_device *adev, bool encode, in soc21_query_video_codecs() argument 80 switch (adev->ip_versions[UVD_HWIP][0]) { in soc21_query_video_codecs() 96 static u32 soc21_pcie_rreg(struct amdgpu_device *adev, u32 reg) in soc21_pcie_rreg() argument 99 address = adev->nbio.funcs->get_pcie_index_offset(adev); in soc21_pcie_rreg() 100 data = adev->nbio.funcs->get_pcie_data_offset(adev); in soc21_pcie_rreg() 102 return amdgpu_device_indirect_rreg(adev, address, data, reg); in soc21_pcie_rreg() 105 static void soc21_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v) in soc21_pcie_wreg() argument 109 address = adev->nbio.funcs->get_pcie_index_offset(adev); in soc21_pcie_wreg() 110 data = adev->nbio.funcs->get_pcie_data_offset(adev); in soc21_pcie_wreg() 112 amdgpu_device_indirect_wreg(adev, address, data, reg, v); in soc21_pcie_wreg() [all …]
|
D | nv.c | 181 static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode, in nv_query_video_codecs() argument 184 switch (adev->ip_versions[UVD_HWIP][0]) { in nv_query_video_codecs() 188 if (amdgpu_sriov_vf(adev)) { in nv_query_video_codecs() 235 static u32 nv_pcie_rreg(struct amdgpu_device *adev, u32 reg) in nv_pcie_rreg() argument 238 address = adev->nbio.funcs->get_pcie_index_offset(adev); in nv_pcie_rreg() 239 data = adev->nbio.funcs->get_pcie_data_offset(adev); in nv_pcie_rreg() 241 return amdgpu_device_indirect_rreg(adev, address, data, reg); in nv_pcie_rreg() 244 static void nv_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v) in nv_pcie_wreg() argument 248 address = adev->nbio.funcs->get_pcie_index_offset(adev); in nv_pcie_wreg() 249 data = adev->nbio.funcs->get_pcie_data_offset(adev); in nv_pcie_wreg() [all …]
|
D | gmc_v9_0.c | 414 static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev, in gmc_v9_0_ecc_interrupt_state() argument 423 if (adev->asic_type >= CHIP_VEGA20) in gmc_v9_0_ecc_interrupt_state() 464 static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev, in gmc_v9_0_vm_fault_interrupt_state() argument 482 for (j = 0; j < adev->num_vmhubs; j++) { in gmc_v9_0_vm_fault_interrupt_state() 483 hub = &adev->vmhub[j]; in gmc_v9_0_vm_fault_interrupt_state() 502 for (j = 0; j < adev->num_vmhubs; j++) { in gmc_v9_0_vm_fault_interrupt_state() 503 hub = &adev->vmhub[j]; in gmc_v9_0_vm_fault_interrupt_state() 528 static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, in gmc_v9_0_process_interrupt() argument 548 if (entry->ih != &adev->irq.ih_soft && in gmc_v9_0_process_interrupt() 549 amdgpu_gmc_filter_faults(adev, entry->ih, addr, entry->pasid, in gmc_v9_0_process_interrupt() [all …]
|
D | soc15.c | 156 static int soc15_query_video_codecs(struct amdgpu_device *adev, bool encode, in soc15_query_video_codecs() argument 159 if (adev->ip_versions[VCE_HWIP][0]) { in soc15_query_video_codecs() 160 switch (adev->ip_versions[VCE_HWIP][0]) { in soc15_query_video_codecs() 172 switch (adev->ip_versions[UVD_HWIP][0]) { in soc15_query_video_codecs() 197 static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg) in soc15_pcie_rreg() argument 200 address = adev->nbio.funcs->get_pcie_index_offset(adev); in soc15_pcie_rreg() 201 data = adev->nbio.funcs->get_pcie_data_offset(adev); in soc15_pcie_rreg() 203 return amdgpu_device_indirect_rreg(adev, address, data, reg); in soc15_pcie_rreg() 206 static void soc15_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v) in soc15_pcie_wreg() argument 210 address = adev->nbio.funcs->get_pcie_index_offset(adev); in soc15_pcie_wreg() [all …]
|
D | amdgpu_rlc.c | 37 void amdgpu_gfx_rlc_enter_safe_mode(struct amdgpu_device *adev) in amdgpu_gfx_rlc_enter_safe_mode() argument 39 if (adev->gfx.rlc.in_safe_mode) in amdgpu_gfx_rlc_enter_safe_mode() 43 if (!adev->gfx.rlc.funcs->is_rlc_enabled(adev)) in amdgpu_gfx_rlc_enter_safe_mode() 46 if (adev->cg_flags & in amdgpu_gfx_rlc_enter_safe_mode() 49 adev->gfx.rlc.funcs->set_safe_mode(adev); in amdgpu_gfx_rlc_enter_safe_mode() 50 adev->gfx.rlc.in_safe_mode = true; in amdgpu_gfx_rlc_enter_safe_mode() 61 void amdgpu_gfx_rlc_exit_safe_mode(struct amdgpu_device *adev) in amdgpu_gfx_rlc_exit_safe_mode() argument 63 if (!(adev->gfx.rlc.in_safe_mode)) in amdgpu_gfx_rlc_exit_safe_mode() 67 if (!adev->gfx.rlc.funcs->is_rlc_enabled(adev)) in amdgpu_gfx_rlc_exit_safe_mode() 70 if (adev->cg_flags & in amdgpu_gfx_rlc_exit_safe_mode() [all …]
|
D | amdgpu_gfx.c | 41 int amdgpu_gfx_mec_queue_to_bit(struct amdgpu_device *adev, int mec, in amdgpu_gfx_mec_queue_to_bit() argument 46 bit += mec * adev->gfx.mec.num_pipe_per_mec in amdgpu_gfx_mec_queue_to_bit() 47 * adev->gfx.mec.num_queue_per_pipe; in amdgpu_gfx_mec_queue_to_bit() 48 bit += pipe * adev->gfx.mec.num_queue_per_pipe; in amdgpu_gfx_mec_queue_to_bit() 54 void amdgpu_queue_mask_bit_to_mec_queue(struct amdgpu_device *adev, int bit, in amdgpu_queue_mask_bit_to_mec_queue() argument 57 *queue = bit % adev->gfx.mec.num_queue_per_pipe; in amdgpu_queue_mask_bit_to_mec_queue() 58 *pipe = (bit / adev->gfx.mec.num_queue_per_pipe) in amdgpu_queue_mask_bit_to_mec_queue() 59 % adev->gfx.mec.num_pipe_per_mec; in amdgpu_queue_mask_bit_to_mec_queue() 60 *mec = (bit / adev->gfx.mec.num_queue_per_pipe) in amdgpu_queue_mask_bit_to_mec_queue() 61 / adev->gfx.mec.num_pipe_per_mec; in amdgpu_queue_mask_bit_to_mec_queue() [all …]
|
D | amdgpu_acp.c | 103 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in acp_sw_init() local 105 adev->acp.parent = adev->dev; in acp_sw_init() 107 adev->acp.cgs_device = in acp_sw_init() 108 amdgpu_cgs_create_device(adev); in acp_sw_init() 109 if (!adev->acp.cgs_device) in acp_sw_init() 117 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in acp_sw_fini() local 119 if (adev->acp.cgs_device) in acp_sw_fini() 120 amdgpu_cgs_destroy_device(adev->acp.cgs_device); in acp_sw_fini() 126 void *adev; member 133 struct amdgpu_device *adev; in acp_poweroff() local [all …]
|
D | amdgpu_discovery.c | 200 static int amdgpu_discovery_read_binary_from_vram(struct amdgpu_device *adev, uint8_t *binary) in amdgpu_discovery_read_binary_from_vram() argument 205 amdgpu_device_vram_access(adev, pos, (uint32_t *)binary, in amdgpu_discovery_read_binary_from_vram() 206 adev->mman.discovery_tmr_size, false); in amdgpu_discovery_read_binary_from_vram() 210 static int amdgpu_discovery_read_binary_from_file(struct amdgpu_device *adev, uint8_t *binary) in amdgpu_discovery_read_binary_from_file() argument 221 dev_warn(adev->dev, "amdgpu_discovery is not set properly\n"); in amdgpu_discovery_read_binary_from_file() 225 r = request_firmware(&fw, fw_name, adev->dev); in amdgpu_discovery_read_binary_from_file() 227 dev_err(adev->dev, "can't load firmware \"%s\"\n", in amdgpu_discovery_read_binary_from_file() 263 static void amdgpu_discovery_harvest_config_quirk(struct amdgpu_device *adev) in amdgpu_discovery_harvest_config_quirk() argument 269 if ((adev->ip_versions[UVD_HWIP][1] == IP_VERSION(3, 0, 1)) && in amdgpu_discovery_harvest_config_quirk() 270 (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 2))) { in amdgpu_discovery_harvest_config_quirk() [all …]
|
D | amdgpu_virt.c | 45 bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev) in amdgpu_virt_mmio_blocked() argument 53 void amdgpu_virt_init_setting(struct amdgpu_device *adev) in amdgpu_virt_init_setting() argument 55 struct drm_device *ddev = adev_to_drm(adev); in amdgpu_virt_init_setting() 58 if (adev->asic_type != CHIP_ALDEBARAN && in amdgpu_virt_init_setting() 59 adev->asic_type != CHIP_ARCTURUS) { in amdgpu_virt_init_setting() 60 if (adev->mode_info.num_crtc == 0) in amdgpu_virt_init_setting() 61 adev->mode_info.num_crtc = 1; in amdgpu_virt_init_setting() 62 adev->enable_virtual_display = true; in amdgpu_virt_init_setting() 65 adev->cg_flags = 0; in amdgpu_virt_init_setting() 66 adev->pg_flags = 0; in amdgpu_virt_init_setting() [all …]
|
D | amdgpu_irq.c | 121 struct amdgpu_device *adev = container_of(work, struct amdgpu_device, in amdgpu_hotplug_work_func() local 123 struct drm_device *dev = adev_to_drm(adev); in amdgpu_hotplug_work_func() 145 void amdgpu_irq_disable_all(struct amdgpu_device *adev) in amdgpu_irq_disable_all() argument 151 spin_lock_irqsave(&adev->irq.lock, irqflags); in amdgpu_irq_disable_all() 153 if (!adev->irq.client[i].sources) in amdgpu_irq_disable_all() 157 struct amdgpu_irq_src *src = adev->irq.client[i].sources[j]; in amdgpu_irq_disable_all() 164 r = src->funcs->set(adev, src, k, in amdgpu_irq_disable_all() 172 spin_unlock_irqrestore(&adev->irq.lock, irqflags); in amdgpu_irq_disable_all() 189 struct amdgpu_device *adev = drm_to_adev(dev); in amdgpu_irq_handler() local 192 ret = amdgpu_ih_process(adev, &adev->irq.ih); in amdgpu_irq_handler() [all …]
|
D | amdgpu_amdkfd.c | 70 void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev) in amdgpu_amdkfd_device_probe() argument 72 bool vf = amdgpu_sriov_vf(adev); in amdgpu_amdkfd_device_probe() 77 adev->kfd.dev = kgd2kfd_probe(adev, vf); in amdgpu_amdkfd_device_probe() 93 static void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev, in amdgpu_doorbell_get_kfd_info() argument 102 if (adev->enable_mes) { in amdgpu_doorbell_get_kfd_info() 109 *aperture_base = adev->doorbell.base; in amdgpu_doorbell_get_kfd_info() 112 } else if (adev->doorbell.size > adev->doorbell.num_doorbells * in amdgpu_doorbell_get_kfd_info() 114 *aperture_base = adev->doorbell.base; in amdgpu_doorbell_get_kfd_info() 115 *aperture_size = adev->doorbell.size; in amdgpu_doorbell_get_kfd_info() 116 *start_offset = adev->doorbell.num_doorbells * sizeof(u32); in amdgpu_doorbell_get_kfd_info() [all …]
|
D | vi.c | 261 static int vi_query_video_codecs(struct amdgpu_device *adev, bool encode, in vi_query_video_codecs() argument 264 switch (adev->asic_type) { in vi_query_video_codecs() 302 static u32 vi_pcie_rreg(struct amdgpu_device *adev, u32 reg) in vi_pcie_rreg() argument 307 spin_lock_irqsave(&adev->pcie_idx_lock, flags); in vi_pcie_rreg() 311 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); in vi_pcie_rreg() 315 static void vi_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v) in vi_pcie_wreg() argument 319 spin_lock_irqsave(&adev->pcie_idx_lock, flags); in vi_pcie_wreg() 324 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); in vi_pcie_wreg() 327 static u32 vi_smc_rreg(struct amdgpu_device *adev, u32 reg) in vi_smc_rreg() argument 332 spin_lock_irqsave(&adev->smc_idx_lock, flags); in vi_smc_rreg() [all …]
|
D | amdgpu_bios.c | 90 static bool igp_read_bios_from_vram(struct amdgpu_device *adev) in igp_read_bios_from_vram() argument 96 if (!(adev->flags & AMD_IS_APU)) in igp_read_bios_from_vram() 97 if (amdgpu_device_need_post(adev)) in igp_read_bios_from_vram() 101 if (pci_resource_len(adev->pdev, 0) == 0) in igp_read_bios_from_vram() 104 adev->bios = NULL; in igp_read_bios_from_vram() 105 vram_base = pci_resource_start(adev->pdev, 0); in igp_read_bios_from_vram() 111 adev->bios = kmalloc(size, GFP_KERNEL); in igp_read_bios_from_vram() 112 if (!adev->bios) { in igp_read_bios_from_vram() 116 adev->bios_size = size; in igp_read_bios_from_vram() 117 memcpy_fromio(adev->bios, bios, size); in igp_read_bios_from_vram() [all …]
|
D | amdgpu_ras.c | 126 static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev, 129 static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev); 137 void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool ready) in amdgpu_ras_set_error_query_ready() argument 139 if (adev && amdgpu_ras_get_context(adev)) in amdgpu_ras_set_error_query_ready() 140 amdgpu_ras_get_context(adev)->error_query_ready = ready; in amdgpu_ras_set_error_query_ready() 143 static bool amdgpu_ras_get_error_query_ready(struct amdgpu_device *adev) in amdgpu_ras_get_error_query_ready() argument 145 if (adev && amdgpu_ras_get_context(adev)) in amdgpu_ras_get_error_query_ready() 146 return amdgpu_ras_get_context(adev)->error_query_ready; in amdgpu_ras_get_error_query_ready() 151 static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t address) in amdgpu_reserve_page_direct() argument 156 if ((address >= adev->gmc.mc_vram_size) || in amdgpu_reserve_page_direct() [all …]
|
D | mxgpu_nv.c | 36 static void xgpu_nv_mailbox_send_ack(struct amdgpu_device *adev) in xgpu_nv_mailbox_send_ack() argument 41 static void xgpu_nv_mailbox_set_valid(struct amdgpu_device *adev, bool val) in xgpu_nv_mailbox_set_valid() argument 55 static enum idh_event xgpu_nv_mailbox_peek_msg(struct amdgpu_device *adev) in xgpu_nv_mailbox_peek_msg() argument 61 static int xgpu_nv_mailbox_rcv_msg(struct amdgpu_device *adev, in xgpu_nv_mailbox_rcv_msg() argument 70 xgpu_nv_mailbox_send_ack(adev); in xgpu_nv_mailbox_rcv_msg() 75 static uint8_t xgpu_nv_peek_ack(struct amdgpu_device *adev) in xgpu_nv_peek_ack() argument 80 static int xgpu_nv_poll_ack(struct amdgpu_device *adev) in xgpu_nv_poll_ack() argument 99 static int xgpu_nv_poll_msg(struct amdgpu_device *adev, enum idh_event event) in xgpu_nv_poll_msg() argument 108 r = xgpu_nv_mailbox_rcv_msg(adev, event); in xgpu_nv_poll_msg() 120 static void xgpu_nv_mailbox_trans_msg (struct amdgpu_device *adev, in xgpu_nv_mailbox_trans_msg() argument [all …]
|
D | vega20_reg_init.c | 29 int vega20_reg_base_init(struct amdgpu_device *adev) in vega20_reg_base_init() argument 34 adev->reg_offset[GC_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); in vega20_reg_base_init() 35 adev->reg_offset[HDP_HWIP][i] = (uint32_t *)(&(HDP_BASE.instance[i])); in vega20_reg_base_init() 36 adev->reg_offset[MMHUB_HWIP][i] = (uint32_t *)(&(MMHUB_BASE.instance[i])); in vega20_reg_base_init() 37 adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i])); in vega20_reg_base_init() 38 adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i])); in vega20_reg_base_init() 39 adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i])); in vega20_reg_base_init() 40 adev->reg_offset[MP1_HWIP][i] = (uint32_t *)(&(MP1_BASE.instance[i])); in vega20_reg_base_init() 41 adev->reg_offset[UVD_HWIP][i] = (uint32_t *)(&(UVD_BASE.instance[i])); in vega20_reg_base_init() 42 adev->reg_offset[VCE_HWIP][i] = (uint32_t *)(&(VCE_BASE.instance[i])); in vega20_reg_base_init() [all …]
|
D | gmc_v6_0.c | 44 static void gmc_v6_0_set_gmc_funcs(struct amdgpu_device *adev); 45 static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev); 64 static void gmc_v6_0_mc_stop(struct amdgpu_device *adev) in gmc_v6_0_mc_stop() argument 68 gmc_v6_0_wait_for_idle((void *)adev); in gmc_v6_0_mc_stop() 84 static void gmc_v6_0_mc_resume(struct amdgpu_device *adev) in gmc_v6_0_mc_resume() argument 98 static int gmc_v6_0_init_microcode(struct amdgpu_device *adev) in gmc_v6_0_init_microcode() argument 107 switch (adev->asic_type) { in gmc_v6_0_init_microcode() 134 err = request_firmware(&adev->gmc.fw, fw_name, adev->dev); in gmc_v6_0_init_microcode() 138 err = amdgpu_ucode_validate(adev->gmc.fw); in gmc_v6_0_init_microcode() 142 dev_err(adev->dev, in gmc_v6_0_init_microcode() [all …]
|
D | sienna_cichlid.c | 37 struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle; in sienna_cichlid_is_mode2_default() 39 if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7) && in sienna_cichlid_is_mode2_default() 40 adev->pm.fw_version >= 0x3a5500 && !amdgpu_sriov_vf(adev)) in sienna_cichlid_is_mode2_default() 71 static int sienna_cichlid_mode2_suspend_ip(struct amdgpu_device *adev) in sienna_cichlid_mode2_suspend_ip() argument 75 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE); in sienna_cichlid_mode2_suspend_ip() 76 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE); in sienna_cichlid_mode2_suspend_ip() 78 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { in sienna_cichlid_mode2_suspend_ip() 79 if (!(adev->ip_blocks[i].version->type == in sienna_cichlid_mode2_suspend_ip() 81 adev->ip_blocks[i].version->type == in sienna_cichlid_mode2_suspend_ip() 85 r = adev->ip_blocks[i].version->funcs->suspend(adev); in sienna_cichlid_mode2_suspend_ip() [all …]
|
D | amdgpu_gart.c | 73 static int amdgpu_gart_dummy_page_init(struct amdgpu_device *adev) in amdgpu_gart_dummy_page_init() argument 77 if (adev->dummy_page_addr) in amdgpu_gart_dummy_page_init() 79 adev->dummy_page_addr = dma_map_page(&adev->pdev->dev, dummy_page, 0, in amdgpu_gart_dummy_page_init() 81 if (dma_mapping_error(&adev->pdev->dev, adev->dummy_page_addr)) { in amdgpu_gart_dummy_page_init() 82 dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n"); in amdgpu_gart_dummy_page_init() 83 adev->dummy_page_addr = 0; in amdgpu_gart_dummy_page_init() 96 void amdgpu_gart_dummy_page_fini(struct amdgpu_device *adev) in amdgpu_gart_dummy_page_fini() argument 98 if (!adev->dummy_page_addr) in amdgpu_gart_dummy_page_fini() 100 dma_unmap_page(&adev->pdev->dev, adev->dummy_page_addr, PAGE_SIZE, in amdgpu_gart_dummy_page_fini() 102 adev->dummy_page_addr = 0; in amdgpu_gart_dummy_page_fini() [all …]
|
D | amdgpu_virt.h | 84 int (*req_full_gpu)(struct amdgpu_device *adev, bool init); 85 int (*rel_full_gpu)(struct amdgpu_device *adev, bool init); 86 int (*req_init_data)(struct amdgpu_device *adev); 87 int (*reset_gpu)(struct amdgpu_device *adev); 88 int (*wait_reset)(struct amdgpu_device *adev); 89 void (*trans_msg)(struct amdgpu_device *adev, enum idh_request req, 267 #define amdgpu_sriov_enabled(adev) \ argument 268 ((adev)->virt.caps & AMDGPU_SRIOV_CAPS_ENABLE_IOV) 270 #define amdgpu_sriov_vf(adev) \ argument 271 ((adev)->virt.caps & AMDGPU_SRIOV_CAPS_IS_VF) [all …]
|
D | mxgpu_ai.c | 37 static void xgpu_ai_mailbox_send_ack(struct amdgpu_device *adev) in xgpu_ai_mailbox_send_ack() argument 42 static void xgpu_ai_mailbox_set_valid(struct amdgpu_device *adev, bool val) in xgpu_ai_mailbox_set_valid() argument 56 static enum idh_event xgpu_ai_mailbox_peek_msg(struct amdgpu_device *adev) in xgpu_ai_mailbox_peek_msg() argument 63 static int xgpu_ai_mailbox_rcv_msg(struct amdgpu_device *adev, in xgpu_ai_mailbox_rcv_msg() argument 73 xgpu_ai_mailbox_send_ack(adev); in xgpu_ai_mailbox_rcv_msg() 78 static uint8_t xgpu_ai_peek_ack(struct amdgpu_device *adev) { in xgpu_ai_peek_ack() argument 82 static int xgpu_ai_poll_ack(struct amdgpu_device *adev) in xgpu_ai_poll_ack() argument 101 static int xgpu_ai_poll_msg(struct amdgpu_device *adev, enum idh_event event) in xgpu_ai_poll_msg() argument 106 r = xgpu_ai_mailbox_rcv_msg(adev, event); in xgpu_ai_poll_msg() 119 static void xgpu_ai_mailbox_trans_msg (struct amdgpu_device *adev, in xgpu_ai_mailbox_trans_msg() argument [all …]
|
/linux-6.1.9/drivers/gpu/drm/amd/pm/ |
D | amdgpu_dpm.c | 36 #define amdgpu_dpm_enable_bapm(adev, e) \ argument 37 ((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e))) 39 int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low) in amdgpu_dpm_get_sclk() argument 41 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; in amdgpu_dpm_get_sclk() 47 mutex_lock(&adev->pm.mutex); in amdgpu_dpm_get_sclk() 48 ret = pp_funcs->get_sclk((adev)->powerplay.pp_handle, in amdgpu_dpm_get_sclk() 50 mutex_unlock(&adev->pm.mutex); in amdgpu_dpm_get_sclk() 55 int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low) in amdgpu_dpm_get_mclk() argument 57 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; in amdgpu_dpm_get_mclk() 63 mutex_lock(&adev->pm.mutex); in amdgpu_dpm_get_mclk() [all …]
|