Lines Matching refs:gmu
26 static int a6xx_hfi_queue_read(struct a6xx_gmu *gmu, in a6xx_hfi_queue_read() argument
57 if (!gmu->legacy) in a6xx_hfi_queue_read()
64 static int a6xx_hfi_queue_write(struct a6xx_gmu *gmu, in a6xx_hfi_queue_write() argument
88 if (!gmu->legacy) { in a6xx_hfi_queue_write()
96 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 0x01); in a6xx_hfi_queue_write()
100 static int a6xx_hfi_wait_for_ack(struct a6xx_gmu *gmu, u32 id, u32 seqnum, in a6xx_hfi_wait_for_ack() argument
103 struct a6xx_hfi_queue *queue = &gmu->queues[HFI_RESPONSE_QUEUE]; in a6xx_hfi_wait_for_ack()
108 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val, in a6xx_hfi_wait_for_ack()
112 DRM_DEV_ERROR(gmu->dev, in a6xx_hfi_wait_for_ack()
119 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, in a6xx_hfi_wait_for_ack()
126 ret = a6xx_hfi_queue_read(gmu, queue, (u32 *) &resp, in a6xx_hfi_wait_for_ack()
131 DRM_DEV_ERROR(gmu->dev, in a6xx_hfi_wait_for_ack()
141 DRM_DEV_ERROR(gmu->dev, "GMU firmware error %d\n", in a6xx_hfi_wait_for_ack()
147 DRM_DEV_ERROR(gmu->dev, in a6xx_hfi_wait_for_ack()
154 DRM_DEV_ERROR(gmu->dev, in a6xx_hfi_wait_for_ack()
169 static int a6xx_hfi_send_msg(struct a6xx_gmu *gmu, int id, in a6xx_hfi_send_msg() argument
172 struct a6xx_hfi_queue *queue = &gmu->queues[HFI_COMMAND_QUEUE]; in a6xx_hfi_send_msg()
182 ret = a6xx_hfi_queue_write(gmu, queue, data, dwords); in a6xx_hfi_send_msg()
184 DRM_DEV_ERROR(gmu->dev, "Unable to send message %s id %d\n", in a6xx_hfi_send_msg()
189 return a6xx_hfi_wait_for_ack(gmu, id, seqnum, payload, payload_size); in a6xx_hfi_send_msg()
192 static int a6xx_hfi_send_gmu_init(struct a6xx_gmu *gmu, int boot_state) in a6xx_hfi_send_gmu_init() argument
196 msg.dbg_buffer_addr = (u32) gmu->debug.iova; in a6xx_hfi_send_gmu_init()
197 msg.dbg_buffer_size = (u32) gmu->debug.size; in a6xx_hfi_send_gmu_init()
200 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_INIT, &msg, sizeof(msg), in a6xx_hfi_send_gmu_init()
204 static int a6xx_hfi_get_fw_version(struct a6xx_gmu *gmu, u32 *version) in a6xx_hfi_get_fw_version() argument
211 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_FW_VERSION, &msg, sizeof(msg), in a6xx_hfi_get_fw_version()
215 static int a6xx_hfi_send_perf_table_v1(struct a6xx_gmu *gmu) in a6xx_hfi_send_perf_table_v1() argument
220 msg.num_gpu_levels = gmu->nr_gpu_freqs; in a6xx_hfi_send_perf_table_v1()
221 msg.num_gmu_levels = gmu->nr_gmu_freqs; in a6xx_hfi_send_perf_table_v1()
223 for (i = 0; i < gmu->nr_gpu_freqs; i++) { in a6xx_hfi_send_perf_table_v1()
224 msg.gx_votes[i].vote = gmu->gx_arc_votes[i]; in a6xx_hfi_send_perf_table_v1()
225 msg.gx_votes[i].freq = gmu->gpu_freqs[i] / 1000; in a6xx_hfi_send_perf_table_v1()
228 for (i = 0; i < gmu->nr_gmu_freqs; i++) { in a6xx_hfi_send_perf_table_v1()
229 msg.cx_votes[i].vote = gmu->cx_arc_votes[i]; in a6xx_hfi_send_perf_table_v1()
230 msg.cx_votes[i].freq = gmu->gmu_freqs[i] / 1000; in a6xx_hfi_send_perf_table_v1()
233 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_PERF_TABLE, &msg, sizeof(msg), in a6xx_hfi_send_perf_table_v1()
237 static int a6xx_hfi_send_perf_table(struct a6xx_gmu *gmu) in a6xx_hfi_send_perf_table() argument
242 msg.num_gpu_levels = gmu->nr_gpu_freqs; in a6xx_hfi_send_perf_table()
243 msg.num_gmu_levels = gmu->nr_gmu_freqs; in a6xx_hfi_send_perf_table()
245 for (i = 0; i < gmu->nr_gpu_freqs; i++) { in a6xx_hfi_send_perf_table()
246 msg.gx_votes[i].vote = gmu->gx_arc_votes[i]; in a6xx_hfi_send_perf_table()
248 msg.gx_votes[i].freq = gmu->gpu_freqs[i] / 1000; in a6xx_hfi_send_perf_table()
251 for (i = 0; i < gmu->nr_gmu_freqs; i++) { in a6xx_hfi_send_perf_table()
252 msg.cx_votes[i].vote = gmu->cx_arc_votes[i]; in a6xx_hfi_send_perf_table()
253 msg.cx_votes[i].freq = gmu->gmu_freqs[i] / 1000; in a6xx_hfi_send_perf_table()
256 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_PERF_TABLE, &msg, sizeof(msg), in a6xx_hfi_send_perf_table()
516 static int a6xx_hfi_send_bw_table(struct a6xx_gmu *gmu) in a6xx_hfi_send_bw_table() argument
519 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); in a6xx_hfi_send_bw_table()
537 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_BW_TABLE, &msg, sizeof(msg), in a6xx_hfi_send_bw_table()
541 static int a6xx_hfi_send_test(struct a6xx_gmu *gmu) in a6xx_hfi_send_test() argument
545 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_TEST, &msg, sizeof(msg), in a6xx_hfi_send_test()
549 static int a6xx_hfi_send_start(struct a6xx_gmu *gmu) in a6xx_hfi_send_start() argument
553 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_START, &msg, sizeof(msg), in a6xx_hfi_send_start()
557 static int a6xx_hfi_send_core_fw_start(struct a6xx_gmu *gmu) in a6xx_hfi_send_core_fw_start() argument
561 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_CORE_FW_START, &msg, in a6xx_hfi_send_core_fw_start()
565 int a6xx_hfi_set_freq(struct a6xx_gmu *gmu, int index) in a6xx_hfi_set_freq() argument
573 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_GX_BW_PERF_VOTE, &msg, in a6xx_hfi_set_freq()
577 int a6xx_hfi_send_prep_slumber(struct a6xx_gmu *gmu) in a6xx_hfi_send_prep_slumber() argument
583 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_PREPARE_SLUMBER, &msg, in a6xx_hfi_send_prep_slumber()
587 static int a6xx_hfi_start_v1(struct a6xx_gmu *gmu, int boot_state) in a6xx_hfi_start_v1() argument
591 ret = a6xx_hfi_send_gmu_init(gmu, boot_state); in a6xx_hfi_start_v1()
595 ret = a6xx_hfi_get_fw_version(gmu, NULL); in a6xx_hfi_start_v1()
605 ret = a6xx_hfi_send_perf_table_v1(gmu); in a6xx_hfi_start_v1()
609 ret = a6xx_hfi_send_bw_table(gmu); in a6xx_hfi_start_v1()
617 a6xx_hfi_send_test(gmu); in a6xx_hfi_start_v1()
622 int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state) in a6xx_hfi_start() argument
626 if (gmu->legacy) in a6xx_hfi_start()
627 return a6xx_hfi_start_v1(gmu, boot_state); in a6xx_hfi_start()
630 ret = a6xx_hfi_send_perf_table(gmu); in a6xx_hfi_start()
634 ret = a6xx_hfi_send_bw_table(gmu); in a6xx_hfi_start()
638 ret = a6xx_hfi_send_core_fw_start(gmu); in a6xx_hfi_start()
646 ret = a6xx_hfi_send_start(gmu); in a6xx_hfi_start()
653 void a6xx_hfi_stop(struct a6xx_gmu *gmu) in a6xx_hfi_stop() argument
657 for (i = 0; i < ARRAY_SIZE(gmu->queues); i++) { in a6xx_hfi_stop()
658 struct a6xx_hfi_queue *queue = &gmu->queues[i]; in a6xx_hfi_stop()
664 DRM_DEV_ERROR(gmu->dev, "HFI queue %d is not empty\n", i); in a6xx_hfi_stop()
701 void a6xx_hfi_init(struct a6xx_gmu *gmu) in a6xx_hfi_init() argument
703 struct a6xx_gmu_bo *hfi = &gmu->hfi; in a6xx_hfi_init()
714 table_size += (ARRAY_SIZE(gmu->queues) * in a6xx_hfi_init()
722 table->num_queues = ARRAY_SIZE(gmu->queues); in a6xx_hfi_init()
723 table->active_queues = ARRAY_SIZE(gmu->queues); in a6xx_hfi_init()
727 a6xx_hfi_queue_init(&gmu->queues[0], &headers[0], hfi->virt + offset, in a6xx_hfi_init()
732 a6xx_hfi_queue_init(&gmu->queues[1], &headers[1], hfi->virt + offset, in a6xx_hfi_init()
733 hfi->iova + offset, gmu->legacy ? 4 : 1); in a6xx_hfi_init()