1 /*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26 #include <linux/irqdomain.h>
27 #include <linux/pci.h>
28 #include <linux/pm_domain.h>
29 #include <linux/platform_device.h>
30 #include <sound/designware_i2s.h>
31 #include <sound/pcm.h>
32 #include <linux/acpi.h>
33 #include <linux/dmi.h>
34
35 #include "amdgpu.h"
36 #include "atom.h"
37 #include "amdgpu_acp.h"
38
39 #include "acp_gfx_if.h"
40
41 #define ST_JADEITE 1
42 #define ACP_TILE_ON_MASK 0x03
43 #define ACP_TILE_OFF_MASK 0x02
44 #define ACP_TILE_ON_RETAIN_REG_MASK 0x1f
45 #define ACP_TILE_OFF_RETAIN_REG_MASK 0x20
46
47 #define ACP_TILE_P1_MASK 0x3e
48 #define ACP_TILE_P2_MASK 0x3d
49 #define ACP_TILE_DSP0_MASK 0x3b
50 #define ACP_TILE_DSP1_MASK 0x37
51
52 #define ACP_TILE_DSP2_MASK 0x2f
53
54 #define ACP_DMA_REGS_END 0x146c0
55 #define ACP_I2S_PLAY_REGS_START 0x14840
56 #define ACP_I2S_PLAY_REGS_END 0x148b4
57 #define ACP_I2S_CAP_REGS_START 0x148b8
58 #define ACP_I2S_CAP_REGS_END 0x1496c
59
60 #define ACP_I2S_COMP1_CAP_REG_OFFSET 0xac
61 #define ACP_I2S_COMP2_CAP_REG_OFFSET 0xa8
62 #define ACP_I2S_COMP1_PLAY_REG_OFFSET 0x6c
63 #define ACP_I2S_COMP2_PLAY_REG_OFFSET 0x68
64 #define ACP_BT_PLAY_REGS_START 0x14970
65 #define ACP_BT_PLAY_REGS_END 0x14a24
66 #define ACP_BT_COMP1_REG_OFFSET 0xac
67 #define ACP_BT_COMP2_REG_OFFSET 0xa8
68
69 #define mmACP_PGFSM_RETAIN_REG 0x51c9
70 #define mmACP_PGFSM_CONFIG_REG 0x51ca
71 #define mmACP_PGFSM_READ_REG_0 0x51cc
72
73 #define mmACP_MEM_SHUT_DOWN_REQ_LO 0x51f8
74 #define mmACP_MEM_SHUT_DOWN_REQ_HI 0x51f9
75 #define mmACP_MEM_SHUT_DOWN_STS_LO 0x51fa
76 #define mmACP_MEM_SHUT_DOWN_STS_HI 0x51fb
77
78 #define mmACP_CONTROL 0x5131
79 #define mmACP_STATUS 0x5133
80 #define mmACP_SOFT_RESET 0x5134
81 #define ACP_CONTROL__ClkEn_MASK 0x1
82 #define ACP_SOFT_RESET__SoftResetAud_MASK 0x100
83 #define ACP_SOFT_RESET__SoftResetAudDone_MASK 0x1000000
84 #define ACP_CLOCK_EN_TIME_OUT_VALUE 0x000000FF
85 #define ACP_SOFT_RESET_DONE_TIME_OUT_VALUE 0x000000FF
86
87 #define ACP_TIMEOUT_LOOP 0x000000FF
88 #define ACP_DEVS 4
89 #define ACP_SRC_ID 162
90
91 static unsigned long acp_machine_id;
92
93 enum {
94 ACP_TILE_P1 = 0,
95 ACP_TILE_P2,
96 ACP_TILE_DSP0,
97 ACP_TILE_DSP1,
98 ACP_TILE_DSP2,
99 };
100
acp_sw_init(void * handle)101 static int acp_sw_init(void *handle)
102 {
103 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
104
105 adev->acp.parent = adev->dev;
106
107 adev->acp.cgs_device =
108 amdgpu_cgs_create_device(adev);
109 if (!adev->acp.cgs_device)
110 return -EINVAL;
111
112 return 0;
113 }
114
acp_sw_fini(void * handle)115 static int acp_sw_fini(void *handle)
116 {
117 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
118
119 if (adev->acp.cgs_device)
120 amdgpu_cgs_destroy_device(adev->acp.cgs_device);
121
122 return 0;
123 }
124
125 struct acp_pm_domain {
126 void *adev;
127 struct generic_pm_domain gpd;
128 };
129
acp_poweroff(struct generic_pm_domain * genpd)130 static int acp_poweroff(struct generic_pm_domain *genpd)
131 {
132 struct acp_pm_domain *apd;
133 struct amdgpu_device *adev;
134
135 apd = container_of(genpd, struct acp_pm_domain, gpd);
136 adev = apd->adev;
137 /* call smu to POWER GATE ACP block
138 * smu will
139 * 1. turn off the acp clock
140 * 2. power off the acp tiles
141 * 3. check and enter ulv state
142 */
143 amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true);
144 return 0;
145 }
146
acp_poweron(struct generic_pm_domain * genpd)147 static int acp_poweron(struct generic_pm_domain *genpd)
148 {
149 struct acp_pm_domain *apd;
150 struct amdgpu_device *adev;
151
152 apd = container_of(genpd, struct acp_pm_domain, gpd);
153 adev = apd->adev;
154 /* call smu to UNGATE ACP block
155 * smu will
156 * 1. exit ulv
157 * 2. turn on acp clock
158 * 3. power on acp tiles
159 */
160 amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false);
161 return 0;
162 }
163
acp_genpd_add_device(struct device * dev,void * data)164 static int acp_genpd_add_device(struct device *dev, void *data)
165 {
166 struct generic_pm_domain *gpd = data;
167 int ret;
168
169 ret = pm_genpd_add_device(gpd, dev);
170 if (ret)
171 dev_err(dev, "Failed to add dev to genpd %d\n", ret);
172
173 return ret;
174 }
175
acp_genpd_remove_device(struct device * dev,void * data)176 static int acp_genpd_remove_device(struct device *dev, void *data)
177 {
178 int ret;
179
180 ret = pm_genpd_remove_device(dev);
181 if (ret)
182 dev_err(dev, "Failed to remove dev from genpd %d\n", ret);
183
184 /* Continue to remove */
185 return 0;
186 }
187
acp_quirk_cb(const struct dmi_system_id * id)188 static int acp_quirk_cb(const struct dmi_system_id *id)
189 {
190 acp_machine_id = ST_JADEITE;
191 return 1;
192 }
193
194 static const struct dmi_system_id acp_quirk_table[] = {
195 {
196 .callback = acp_quirk_cb,
197 .matches = {
198 DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AMD"),
199 DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Jadeite"),
200 }
201 },
202 {
203 .callback = acp_quirk_cb,
204 .matches = {
205 DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "IP3 Technology CO.,Ltd."),
206 DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ASN1D"),
207 },
208 },
209 {
210 .callback = acp_quirk_cb,
211 .matches = {
212 DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Standard"),
213 DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ASN10"),
214 },
215 },
216 {}
217 };
218
219 /**
220 * acp_hw_init - start and test ACP block
221 *
222 * @handle: handle used to pass amdgpu_device pointer
223 *
224 */
acp_hw_init(void * handle)225 static int acp_hw_init(void *handle)
226 {
227 int r;
228 u64 acp_base;
229 u32 val = 0;
230 u32 count = 0;
231 struct i2s_platform_data *i2s_pdata = NULL;
232
233 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
234
235 const struct amdgpu_ip_block *ip_block =
236 amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_ACP);
237
238 if (!ip_block)
239 return -EINVAL;
240
241 r = amd_acp_hw_init(adev->acp.cgs_device,
242 ip_block->version->major, ip_block->version->minor);
243 /* -ENODEV means board uses AZ rather than ACP */
244 if (r == -ENODEV) {
245 amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true);
246 return 0;
247 } else if (r) {
248 return r;
249 }
250
251 if (adev->rmmio_size == 0 || adev->rmmio_size < 0x5289)
252 return -EINVAL;
253
254 acp_base = adev->rmmio_base;
255 adev->acp.acp_genpd = kzalloc(sizeof(struct acp_pm_domain), GFP_KERNEL);
256 if (!adev->acp.acp_genpd)
257 return -ENOMEM;
258
259 adev->acp.acp_genpd->gpd.name = "ACP_AUDIO";
260 adev->acp.acp_genpd->gpd.power_off = acp_poweroff;
261 adev->acp.acp_genpd->gpd.power_on = acp_poweron;
262 adev->acp.acp_genpd->adev = adev;
263
264 pm_genpd_init(&adev->acp.acp_genpd->gpd, NULL, false);
265 dmi_check_system(acp_quirk_table);
266 switch (acp_machine_id) {
267 case ST_JADEITE:
268 {
269 adev->acp.acp_cell = kcalloc(2, sizeof(struct mfd_cell),
270 GFP_KERNEL);
271 if (!adev->acp.acp_cell) {
272 r = -ENOMEM;
273 goto failure;
274 }
275
276 adev->acp.acp_res = kcalloc(3, sizeof(struct resource), GFP_KERNEL);
277 if (!adev->acp.acp_res) {
278 r = -ENOMEM;
279 goto failure;
280 }
281
282 i2s_pdata = kcalloc(1, sizeof(struct i2s_platform_data), GFP_KERNEL);
283 if (!i2s_pdata) {
284 r = -ENOMEM;
285 goto failure;
286 }
287
288 i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
289 DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
290 i2s_pdata[0].cap = DWC_I2S_PLAY | DWC_I2S_RECORD;
291 i2s_pdata[0].snd_rates = SNDRV_PCM_RATE_8000_96000;
292 i2s_pdata[0].i2s_reg_comp1 = ACP_I2S_COMP1_CAP_REG_OFFSET;
293 i2s_pdata[0].i2s_reg_comp2 = ACP_I2S_COMP2_CAP_REG_OFFSET;
294
295 adev->acp.acp_res[0].name = "acp2x_dma";
296 adev->acp.acp_res[0].flags = IORESOURCE_MEM;
297 adev->acp.acp_res[0].start = acp_base;
298 adev->acp.acp_res[0].end = acp_base + ACP_DMA_REGS_END;
299
300 adev->acp.acp_res[1].name = "acp2x_dw_i2s_play_cap";
301 adev->acp.acp_res[1].flags = IORESOURCE_MEM;
302 adev->acp.acp_res[1].start = acp_base + ACP_I2S_CAP_REGS_START;
303 adev->acp.acp_res[1].end = acp_base + ACP_I2S_CAP_REGS_END;
304
305 adev->acp.acp_res[2].name = "acp2x_dma_irq";
306 adev->acp.acp_res[2].flags = IORESOURCE_IRQ;
307 adev->acp.acp_res[2].start = amdgpu_irq_create_mapping(adev, 162);
308 adev->acp.acp_res[2].end = adev->acp.acp_res[2].start;
309
310 adev->acp.acp_cell[0].name = "acp_audio_dma";
311 adev->acp.acp_cell[0].num_resources = 3;
312 adev->acp.acp_cell[0].resources = &adev->acp.acp_res[0];
313 adev->acp.acp_cell[0].platform_data = &adev->asic_type;
314 adev->acp.acp_cell[0].pdata_size = sizeof(adev->asic_type);
315
316 adev->acp.acp_cell[1].name = "designware-i2s";
317 adev->acp.acp_cell[1].num_resources = 1;
318 adev->acp.acp_cell[1].resources = &adev->acp.acp_res[1];
319 adev->acp.acp_cell[1].platform_data = &i2s_pdata[0];
320 adev->acp.acp_cell[1].pdata_size = sizeof(struct i2s_platform_data);
321 r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell, 2);
322 if (r)
323 goto failure;
324 r = device_for_each_child(adev->acp.parent, &adev->acp.acp_genpd->gpd,
325 acp_genpd_add_device);
326 if (r)
327 goto failure;
328 break;
329 }
330 default:
331 adev->acp.acp_cell = kcalloc(ACP_DEVS, sizeof(struct mfd_cell),
332 GFP_KERNEL);
333
334 if (!adev->acp.acp_cell) {
335 r = -ENOMEM;
336 goto failure;
337 }
338
339 adev->acp.acp_res = kcalloc(5, sizeof(struct resource), GFP_KERNEL);
340 if (!adev->acp.acp_res) {
341 r = -ENOMEM;
342 goto failure;
343 }
344
345 i2s_pdata = kcalloc(3, sizeof(struct i2s_platform_data), GFP_KERNEL);
346 if (!i2s_pdata) {
347 r = -ENOMEM;
348 goto failure;
349 }
350
351 switch (adev->asic_type) {
352 case CHIP_STONEY:
353 i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
354 DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
355 break;
356 default:
357 i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET;
358 }
359 i2s_pdata[0].cap = DWC_I2S_PLAY;
360 i2s_pdata[0].snd_rates = SNDRV_PCM_RATE_8000_96000;
361 i2s_pdata[0].i2s_reg_comp1 = ACP_I2S_COMP1_PLAY_REG_OFFSET;
362 i2s_pdata[0].i2s_reg_comp2 = ACP_I2S_COMP2_PLAY_REG_OFFSET;
363 switch (adev->asic_type) {
364 case CHIP_STONEY:
365 i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
366 DW_I2S_QUIRK_COMP_PARAM1 |
367 DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
368 break;
369 default:
370 i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
371 DW_I2S_QUIRK_COMP_PARAM1;
372 }
373
374 i2s_pdata[1].cap = DWC_I2S_RECORD;
375 i2s_pdata[1].snd_rates = SNDRV_PCM_RATE_8000_96000;
376 i2s_pdata[1].i2s_reg_comp1 = ACP_I2S_COMP1_CAP_REG_OFFSET;
377 i2s_pdata[1].i2s_reg_comp2 = ACP_I2S_COMP2_CAP_REG_OFFSET;
378
379 i2s_pdata[2].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET;
380 switch (adev->asic_type) {
381 case CHIP_STONEY:
382 i2s_pdata[2].quirks |= DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
383 break;
384 default:
385 break;
386 }
387
388 i2s_pdata[2].cap = DWC_I2S_PLAY | DWC_I2S_RECORD;
389 i2s_pdata[2].snd_rates = SNDRV_PCM_RATE_8000_96000;
390 i2s_pdata[2].i2s_reg_comp1 = ACP_BT_COMP1_REG_OFFSET;
391 i2s_pdata[2].i2s_reg_comp2 = ACP_BT_COMP2_REG_OFFSET;
392
393 adev->acp.acp_res[0].name = "acp2x_dma";
394 adev->acp.acp_res[0].flags = IORESOURCE_MEM;
395 adev->acp.acp_res[0].start = acp_base;
396 adev->acp.acp_res[0].end = acp_base + ACP_DMA_REGS_END;
397
398 adev->acp.acp_res[1].name = "acp2x_dw_i2s_play";
399 adev->acp.acp_res[1].flags = IORESOURCE_MEM;
400 adev->acp.acp_res[1].start = acp_base + ACP_I2S_PLAY_REGS_START;
401 adev->acp.acp_res[1].end = acp_base + ACP_I2S_PLAY_REGS_END;
402
403 adev->acp.acp_res[2].name = "acp2x_dw_i2s_cap";
404 adev->acp.acp_res[2].flags = IORESOURCE_MEM;
405 adev->acp.acp_res[2].start = acp_base + ACP_I2S_CAP_REGS_START;
406 adev->acp.acp_res[2].end = acp_base + ACP_I2S_CAP_REGS_END;
407
408 adev->acp.acp_res[3].name = "acp2x_dw_bt_i2s_play_cap";
409 adev->acp.acp_res[3].flags = IORESOURCE_MEM;
410 adev->acp.acp_res[3].start = acp_base + ACP_BT_PLAY_REGS_START;
411 adev->acp.acp_res[3].end = acp_base + ACP_BT_PLAY_REGS_END;
412
413 adev->acp.acp_res[4].name = "acp2x_dma_irq";
414 adev->acp.acp_res[4].flags = IORESOURCE_IRQ;
415 adev->acp.acp_res[4].start = amdgpu_irq_create_mapping(adev, 162);
416 adev->acp.acp_res[4].end = adev->acp.acp_res[4].start;
417
418 adev->acp.acp_cell[0].name = "acp_audio_dma";
419 adev->acp.acp_cell[0].num_resources = 5;
420 adev->acp.acp_cell[0].resources = &adev->acp.acp_res[0];
421 adev->acp.acp_cell[0].platform_data = &adev->asic_type;
422 adev->acp.acp_cell[0].pdata_size = sizeof(adev->asic_type);
423
424 adev->acp.acp_cell[1].name = "designware-i2s";
425 adev->acp.acp_cell[1].num_resources = 1;
426 adev->acp.acp_cell[1].resources = &adev->acp.acp_res[1];
427 adev->acp.acp_cell[1].platform_data = &i2s_pdata[0];
428 adev->acp.acp_cell[1].pdata_size = sizeof(struct i2s_platform_data);
429
430 adev->acp.acp_cell[2].name = "designware-i2s";
431 adev->acp.acp_cell[2].num_resources = 1;
432 adev->acp.acp_cell[2].resources = &adev->acp.acp_res[2];
433 adev->acp.acp_cell[2].platform_data = &i2s_pdata[1];
434 adev->acp.acp_cell[2].pdata_size = sizeof(struct i2s_platform_data);
435
436 adev->acp.acp_cell[3].name = "designware-i2s";
437 adev->acp.acp_cell[3].num_resources = 1;
438 adev->acp.acp_cell[3].resources = &adev->acp.acp_res[3];
439 adev->acp.acp_cell[3].platform_data = &i2s_pdata[2];
440 adev->acp.acp_cell[3].pdata_size = sizeof(struct i2s_platform_data);
441
442 r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell, ACP_DEVS);
443 if (r)
444 goto failure;
445
446 r = device_for_each_child(adev->acp.parent, &adev->acp.acp_genpd->gpd,
447 acp_genpd_add_device);
448 if (r)
449 goto failure;
450 }
451
452 /* Assert Soft reset of ACP */
453 val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
454
455 val |= ACP_SOFT_RESET__SoftResetAud_MASK;
456 cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
457
458 count = ACP_SOFT_RESET_DONE_TIME_OUT_VALUE;
459 while (true) {
460 val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
461 if (ACP_SOFT_RESET__SoftResetAudDone_MASK ==
462 (val & ACP_SOFT_RESET__SoftResetAudDone_MASK))
463 break;
464 if (--count == 0) {
465 dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
466 r = -ETIMEDOUT;
467 goto failure;
468 }
469 udelay(100);
470 }
471 /* Enable clock to ACP and wait until the clock is enabled */
472 val = cgs_read_register(adev->acp.cgs_device, mmACP_CONTROL);
473 val = val | ACP_CONTROL__ClkEn_MASK;
474 cgs_write_register(adev->acp.cgs_device, mmACP_CONTROL, val);
475
476 count = ACP_CLOCK_EN_TIME_OUT_VALUE;
477
478 while (true) {
479 val = cgs_read_register(adev->acp.cgs_device, mmACP_STATUS);
480 if (val & (u32) 0x1)
481 break;
482 if (--count == 0) {
483 dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
484 r = -ETIMEDOUT;
485 goto failure;
486 }
487 udelay(100);
488 }
489 /* Deassert the SOFT RESET flags */
490 val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
491 val &= ~ACP_SOFT_RESET__SoftResetAud_MASK;
492 cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
493 return 0;
494
495 failure:
496 kfree(i2s_pdata);
497 kfree(adev->acp.acp_res);
498 kfree(adev->acp.acp_cell);
499 kfree(adev->acp.acp_genpd);
500 return r;
501 }
502
503 /**
504 * acp_hw_fini - stop the hardware block
505 *
506 * @handle: handle used to pass amdgpu_device pointer
507 *
508 */
acp_hw_fini(void * handle)509 static int acp_hw_fini(void *handle)
510 {
511 u32 val = 0;
512 u32 count = 0;
513 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
514
515 /* return early if no ACP */
516 if (!adev->acp.acp_genpd) {
517 amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false);
518 return 0;
519 }
520
521 /* Assert Soft reset of ACP */
522 val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
523
524 val |= ACP_SOFT_RESET__SoftResetAud_MASK;
525 cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
526
527 count = ACP_SOFT_RESET_DONE_TIME_OUT_VALUE;
528 while (true) {
529 val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
530 if (ACP_SOFT_RESET__SoftResetAudDone_MASK ==
531 (val & ACP_SOFT_RESET__SoftResetAudDone_MASK))
532 break;
533 if (--count == 0) {
534 dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
535 return -ETIMEDOUT;
536 }
537 udelay(100);
538 }
539 /* Disable ACP clock */
540 val = cgs_read_register(adev->acp.cgs_device, mmACP_CONTROL);
541 val &= ~ACP_CONTROL__ClkEn_MASK;
542 cgs_write_register(adev->acp.cgs_device, mmACP_CONTROL, val);
543
544 count = ACP_CLOCK_EN_TIME_OUT_VALUE;
545
546 while (true) {
547 val = cgs_read_register(adev->acp.cgs_device, mmACP_STATUS);
548 if (val & (u32) 0x1)
549 break;
550 if (--count == 0) {
551 dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
552 return -ETIMEDOUT;
553 }
554 udelay(100);
555 }
556
557 device_for_each_child(adev->acp.parent, NULL,
558 acp_genpd_remove_device);
559
560 mfd_remove_devices(adev->acp.parent);
561 kfree(adev->acp.acp_res);
562 kfree(adev->acp.acp_genpd);
563 kfree(adev->acp.acp_cell);
564
565 return 0;
566 }
567
acp_suspend(void * handle)568 static int acp_suspend(void *handle)
569 {
570 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
571
572 /* power up on suspend */
573 if (!adev->acp.acp_cell)
574 amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false);
575 return 0;
576 }
577
acp_resume(void * handle)578 static int acp_resume(void *handle)
579 {
580 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
581
582 /* power down again on resume */
583 if (!adev->acp.acp_cell)
584 amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true);
585 return 0;
586 }
587
acp_early_init(void * handle)588 static int acp_early_init(void *handle)
589 {
590 return 0;
591 }
592
acp_is_idle(void * handle)593 static bool acp_is_idle(void *handle)
594 {
595 return true;
596 }
597
acp_wait_for_idle(void * handle)598 static int acp_wait_for_idle(void *handle)
599 {
600 return 0;
601 }
602
acp_soft_reset(void * handle)603 static int acp_soft_reset(void *handle)
604 {
605 return 0;
606 }
607
acp_set_clockgating_state(void * handle,enum amd_clockgating_state state)608 static int acp_set_clockgating_state(void *handle,
609 enum amd_clockgating_state state)
610 {
611 return 0;
612 }
613
acp_set_powergating_state(void * handle,enum amd_powergating_state state)614 static int acp_set_powergating_state(void *handle,
615 enum amd_powergating_state state)
616 {
617 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
618 bool enable = (state == AMD_PG_STATE_GATE);
619
620 amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, enable);
621
622 return 0;
623 }
624
625 static const struct amd_ip_funcs acp_ip_funcs = {
626 .name = "acp_ip",
627 .early_init = acp_early_init,
628 .late_init = NULL,
629 .sw_init = acp_sw_init,
630 .sw_fini = acp_sw_fini,
631 .hw_init = acp_hw_init,
632 .hw_fini = acp_hw_fini,
633 .suspend = acp_suspend,
634 .resume = acp_resume,
635 .is_idle = acp_is_idle,
636 .wait_for_idle = acp_wait_for_idle,
637 .soft_reset = acp_soft_reset,
638 .set_clockgating_state = acp_set_clockgating_state,
639 .set_powergating_state = acp_set_powergating_state,
640 };
641
642 const struct amdgpu_ip_block_version acp_ip_block = {
643 .type = AMD_IP_BLOCK_TYPE_ACP,
644 .major = 2,
645 .minor = 2,
646 .rev = 0,
647 .funcs = &acp_ip_funcs,
648 };
649