1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 //
3 // This file is provided under a dual BSD/GPLv2 license.  When using or
4 // redistributing this file, you may do so under either license.
5 //
6 // Copyright(c) 2018 Intel Corporation. All rights reserved.
7 //
8 // Authors: Liam Girdwood <liam.r.girdwood@linux.intel.com>
9 //	    Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
10 //	    Rander Wang <rander.wang@intel.com>
11 //          Keyon Jie <yang.jie@linux.intel.com>
12 //
13 
14 /*
15  * Hardware interface for HDA DSP code loader
16  */
17 
18 #include <linux/firmware.h>
19 #include <sound/hdaudio_ext.h>
20 #include <sound/hda_register.h>
21 #include <sound/sof.h>
22 #include "ext_manifest.h"
23 #include "../ops.h"
24 #include "../sof-priv.h"
25 #include "hda.h"
26 
hda_ssp_set_cbp_cfp(struct snd_sof_dev * sdev)27 static void hda_ssp_set_cbp_cfp(struct snd_sof_dev *sdev)
28 {
29 	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
30 	const struct sof_intel_dsp_desc *chip = hda->desc;
31 	int i;
32 
33 	/* DSP is powered up, set all SSPs to clock consumer/codec provider mode */
34 	for (i = 0; i < chip->ssp_count; i++) {
35 		snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
36 						 chip->ssp_base_offset
37 						 + i * SSP_DEV_MEM_SIZE
38 						 + SSP_SSC1_OFFSET,
39 						 SSP_SET_CBP_CFP,
40 						 SSP_SET_CBP_CFP);
41 	}
42 }
43 
hda_cl_stream_prepare(struct snd_sof_dev * sdev,unsigned int format,unsigned int size,struct snd_dma_buffer * dmab,int direction)44 struct hdac_ext_stream *hda_cl_stream_prepare(struct snd_sof_dev *sdev, unsigned int format,
45 					      unsigned int size, struct snd_dma_buffer *dmab,
46 					      int direction)
47 {
48 	struct hdac_ext_stream *hext_stream;
49 	struct hdac_stream *hstream;
50 	struct pci_dev *pci = to_pci_dev(sdev->dev);
51 	int ret;
52 
53 	hext_stream = hda_dsp_stream_get(sdev, direction, 0);
54 
55 	if (!hext_stream) {
56 		dev_err(sdev->dev, "error: no stream available\n");
57 		return ERR_PTR(-ENODEV);
58 	}
59 	hstream = &hext_stream->hstream;
60 	hstream->substream = NULL;
61 
62 	/* allocate DMA buffer */
63 	ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV_SG, &pci->dev, size, dmab);
64 	if (ret < 0) {
65 		dev_err(sdev->dev, "error: memory alloc failed: %d\n", ret);
66 		goto out_put;
67 	}
68 
69 	hstream->period_bytes = 0;/* initialize period_bytes */
70 	hstream->format_val = format;
71 	hstream->bufsize = size;
72 
73 	if (direction == SNDRV_PCM_STREAM_CAPTURE) {
74 		ret = hda_dsp_iccmax_stream_hw_params(sdev, hext_stream, dmab, NULL);
75 		if (ret < 0) {
76 			dev_err(sdev->dev, "error: iccmax stream prepare failed: %d\n", ret);
77 			goto out_free;
78 		}
79 	} else {
80 		ret = hda_dsp_stream_hw_params(sdev, hext_stream, dmab, NULL);
81 		if (ret < 0) {
82 			dev_err(sdev->dev, "error: hdac prepare failed: %d\n", ret);
83 			goto out_free;
84 		}
85 		hda_dsp_stream_spib_config(sdev, hext_stream, HDA_DSP_SPIB_ENABLE, size);
86 	}
87 
88 	return hext_stream;
89 
90 out_free:
91 	snd_dma_free_pages(dmab);
92 out_put:
93 	hda_dsp_stream_put(sdev, direction, hstream->stream_tag);
94 	return ERR_PTR(ret);
95 }
96 
97 /*
98  * first boot sequence has some extra steps.
99  * power on all host managed cores and only unstall/run the boot core to boot the
100  * DSP then turn off all non boot cores (if any) is powered on.
101  */
cl_dsp_init(struct snd_sof_dev * sdev,int stream_tag,bool imr_boot)102 static int cl_dsp_init(struct snd_sof_dev *sdev, int stream_tag, bool imr_boot)
103 {
104 	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
105 	const struct sof_intel_dsp_desc *chip = hda->desc;
106 	unsigned int status, target_status;
107 	u32 flags, ipc_hdr, j;
108 	unsigned long mask;
109 	char *dump_msg;
110 	int ret;
111 
112 	/* step 1: power up corex */
113 	ret = hda_dsp_core_power_up(sdev, chip->host_managed_cores_mask);
114 	if (ret < 0) {
115 		if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS)
116 			dev_err(sdev->dev, "error: dsp core 0/1 power up failed\n");
117 		goto err;
118 	}
119 
120 	hda_ssp_set_cbp_cfp(sdev);
121 
122 	/* step 2: Send ROM_CONTROL command (stream_tag is ignored for IMR boot) */
123 	ipc_hdr = chip->ipc_req_mask | HDA_DSP_ROM_IPC_CONTROL;
124 	if (!imr_boot)
125 		ipc_hdr |= HDA_DSP_ROM_IPC_PURGE_FW | ((stream_tag - 1) << 9);
126 
127 	snd_sof_dsp_write(sdev, HDA_DSP_BAR, chip->ipc_req, ipc_hdr);
128 
129 	/* step 3: unset core 0 reset state & unstall/run core 0 */
130 	ret = hda_dsp_core_run(sdev, chip->init_core_mask);
131 	if (ret < 0) {
132 		if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS)
133 			dev_err(sdev->dev,
134 				"error: dsp core start failed %d\n", ret);
135 		ret = -EIO;
136 		goto err;
137 	}
138 
139 	/* step 4: wait for IPC DONE bit from ROM */
140 	ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
141 					    chip->ipc_ack, status,
142 					    ((status & chip->ipc_ack_mask)
143 						    == chip->ipc_ack_mask),
144 					    HDA_DSP_REG_POLL_INTERVAL_US,
145 					    HDA_DSP_INIT_TIMEOUT_US);
146 
147 	if (ret < 0) {
148 		if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS)
149 			dev_err(sdev->dev,
150 				"error: %s: timeout for HIPCIE done\n",
151 				__func__);
152 		goto err;
153 	}
154 
155 	/* set DONE bit to clear the reply IPC message */
156 	snd_sof_dsp_update_bits_forced(sdev, HDA_DSP_BAR,
157 				       chip->ipc_ack,
158 				       chip->ipc_ack_mask,
159 				       chip->ipc_ack_mask);
160 
161 	/* step 5: power down cores that are no longer needed */
162 	ret = hda_dsp_core_reset_power_down(sdev, chip->host_managed_cores_mask &
163 					   ~(chip->init_core_mask));
164 	if (ret < 0) {
165 		if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS)
166 			dev_err(sdev->dev,
167 				"error: dsp core x power down failed\n");
168 		goto err;
169 	}
170 
171 	/* step 6: enable IPC interrupts */
172 	hda_dsp_ipc_int_enable(sdev);
173 
174 	/*
175 	 * step 7:
176 	 * - Cold/Full boot: wait for ROM init to proceed to download the firmware
177 	 * - IMR boot: wait for ROM firmware entered (firmware booted up from IMR)
178 	 */
179 	if (imr_boot)
180 		target_status = HDA_DSP_ROM_FW_ENTERED;
181 	else
182 		target_status = HDA_DSP_ROM_INIT;
183 
184 	ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
185 					chip->rom_status_reg, status,
186 					((status & HDA_DSP_ROM_STS_MASK)
187 						== target_status),
188 					HDA_DSP_REG_POLL_INTERVAL_US,
189 					chip->rom_init_timeout *
190 					USEC_PER_MSEC);
191 	if (!ret) {
192 		/* set enabled cores mask and increment ref count for cores in init_core_mask */
193 		sdev->enabled_cores_mask |= chip->init_core_mask;
194 		mask = sdev->enabled_cores_mask;
195 		for_each_set_bit(j, &mask, SOF_MAX_DSP_NUM_CORES)
196 			sdev->dsp_core_ref_count[j]++;
197 		return 0;
198 	}
199 
200 	if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS)
201 		dev_err(sdev->dev,
202 			"%s: timeout with rom_status_reg (%#x) read\n",
203 			__func__, chip->rom_status_reg);
204 
205 err:
206 	flags = SOF_DBG_DUMP_PCI | SOF_DBG_DUMP_MBOX | SOF_DBG_DUMP_OPTIONAL;
207 
208 	/* after max boot attempts make sure that the dump is printed */
209 	if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS)
210 		flags &= ~SOF_DBG_DUMP_OPTIONAL;
211 
212 	dump_msg = kasprintf(GFP_KERNEL, "Boot iteration failed: %d/%d",
213 			     hda->boot_iteration, HDA_FW_BOOT_ATTEMPTS);
214 	snd_sof_dsp_dbg_dump(sdev, dump_msg, flags);
215 	hda_dsp_core_reset_power_down(sdev, chip->host_managed_cores_mask);
216 
217 	kfree(dump_msg);
218 	return ret;
219 }
220 
cl_trigger(struct snd_sof_dev * sdev,struct hdac_ext_stream * hext_stream,int cmd)221 static int cl_trigger(struct snd_sof_dev *sdev,
222 		      struct hdac_ext_stream *hext_stream, int cmd)
223 {
224 	struct hdac_stream *hstream = &hext_stream->hstream;
225 	int sd_offset = SOF_STREAM_SD_OFFSET(hstream);
226 
227 	/* code loader is special case that reuses stream ops */
228 	switch (cmd) {
229 	case SNDRV_PCM_TRIGGER_START:
230 		snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, SOF_HDA_INTCTL,
231 					1 << hstream->index,
232 					1 << hstream->index);
233 
234 		snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
235 					sd_offset,
236 					SOF_HDA_SD_CTL_DMA_START |
237 					SOF_HDA_CL_DMA_SD_INT_MASK,
238 					SOF_HDA_SD_CTL_DMA_START |
239 					SOF_HDA_CL_DMA_SD_INT_MASK);
240 
241 		hstream->running = true;
242 		return 0;
243 	default:
244 		return hda_dsp_stream_trigger(sdev, hext_stream, cmd);
245 	}
246 }
247 
hda_cl_cleanup(struct snd_sof_dev * sdev,struct snd_dma_buffer * dmab,struct hdac_ext_stream * hext_stream)248 int hda_cl_cleanup(struct snd_sof_dev *sdev, struct snd_dma_buffer *dmab,
249 		   struct hdac_ext_stream *hext_stream)
250 {
251 	struct hdac_stream *hstream = &hext_stream->hstream;
252 	int sd_offset = SOF_STREAM_SD_OFFSET(hstream);
253 	int ret = 0;
254 
255 	if (hstream->direction == SNDRV_PCM_STREAM_PLAYBACK)
256 		ret = hda_dsp_stream_spib_config(sdev, hext_stream, HDA_DSP_SPIB_DISABLE, 0);
257 	else
258 		snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, sd_offset,
259 					SOF_HDA_SD_CTL_DMA_START, 0);
260 
261 	hda_dsp_stream_put(sdev, hstream->direction, hstream->stream_tag);
262 	hstream->running = 0;
263 	hstream->substream = NULL;
264 
265 	/* reset BDL address */
266 	snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
267 			  sd_offset + SOF_HDA_ADSP_REG_CL_SD_BDLPL, 0);
268 	snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
269 			  sd_offset + SOF_HDA_ADSP_REG_CL_SD_BDLPU, 0);
270 
271 	snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR, sd_offset, 0);
272 	snd_dma_free_pages(dmab);
273 	dmab->area = NULL;
274 	hstream->bufsize = 0;
275 	hstream->format_val = 0;
276 
277 	return ret;
278 }
279 
hda_cl_copy_fw(struct snd_sof_dev * sdev,struct hdac_ext_stream * hext_stream)280 int hda_cl_copy_fw(struct snd_sof_dev *sdev, struct hdac_ext_stream *hext_stream)
281 {
282 	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
283 	const struct sof_intel_dsp_desc *chip = hda->desc;
284 	unsigned int reg;
285 	int ret, status;
286 
287 	ret = cl_trigger(sdev, hext_stream, SNDRV_PCM_TRIGGER_START);
288 	if (ret < 0) {
289 		dev_err(sdev->dev, "error: DMA trigger start failed\n");
290 		return ret;
291 	}
292 
293 	status = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
294 					chip->rom_status_reg, reg,
295 					((reg & HDA_DSP_ROM_STS_MASK)
296 						== HDA_DSP_ROM_FW_ENTERED),
297 					HDA_DSP_REG_POLL_INTERVAL_US,
298 					HDA_DSP_BASEFW_TIMEOUT_US);
299 
300 	/*
301 	 * even in case of errors we still need to stop the DMAs,
302 	 * but we return the initial error should the DMA stop also fail
303 	 */
304 
305 	if (status < 0) {
306 		dev_err(sdev->dev,
307 			"%s: timeout with rom_status_reg (%#x) read\n",
308 			__func__, chip->rom_status_reg);
309 	}
310 
311 	ret = cl_trigger(sdev, hext_stream, SNDRV_PCM_TRIGGER_STOP);
312 	if (ret < 0) {
313 		dev_err(sdev->dev, "error: DMA trigger stop failed\n");
314 		if (!status)
315 			status = ret;
316 	}
317 
318 	return status;
319 }
320 
hda_dsp_cl_boot_firmware_iccmax(struct snd_sof_dev * sdev)321 int hda_dsp_cl_boot_firmware_iccmax(struct snd_sof_dev *sdev)
322 {
323 	struct snd_sof_pdata *plat_data = sdev->pdata;
324 	struct hdac_ext_stream *iccmax_stream;
325 	struct hdac_bus *bus = sof_to_bus(sdev);
326 	struct firmware stripped_firmware;
327 	struct snd_dma_buffer dmab_bdl;
328 	int ret, ret1;
329 	u8 original_gb;
330 
331 	/* save the original LTRP guardband value */
332 	original_gb = snd_hdac_chip_readb(bus, VS_LTRP) & HDA_VS_INTEL_LTRP_GB_MASK;
333 
334 	if (plat_data->fw->size <= plat_data->fw_offset) {
335 		dev_err(sdev->dev, "error: firmware size must be greater than firmware offset\n");
336 		return -EINVAL;
337 	}
338 
339 	stripped_firmware.size = plat_data->fw->size - plat_data->fw_offset;
340 
341 	/* prepare capture stream for ICCMAX */
342 	iccmax_stream = hda_cl_stream_prepare(sdev, HDA_CL_STREAM_FORMAT, stripped_firmware.size,
343 					      &dmab_bdl, SNDRV_PCM_STREAM_CAPTURE);
344 	if (IS_ERR(iccmax_stream)) {
345 		dev_err(sdev->dev, "error: dma prepare for ICCMAX stream failed\n");
346 		return PTR_ERR(iccmax_stream);
347 	}
348 
349 	ret = hda_dsp_cl_boot_firmware(sdev);
350 
351 	/*
352 	 * Perform iccmax stream cleanup. This should be done even if firmware loading fails.
353 	 * If the cleanup also fails, we return the initial error
354 	 */
355 	ret1 = hda_cl_cleanup(sdev, &dmab_bdl, iccmax_stream);
356 	if (ret1 < 0) {
357 		dev_err(sdev->dev, "error: ICCMAX stream cleanup failed\n");
358 
359 		/* set return value to indicate cleanup failure */
360 		if (!ret)
361 			ret = ret1;
362 	}
363 
364 	/* restore the original guardband value after FW boot */
365 	snd_hdac_chip_updateb(bus, VS_LTRP, HDA_VS_INTEL_LTRP_GB_MASK, original_gb);
366 
367 	return ret;
368 }
369 
hda_dsp_boot_imr(struct snd_sof_dev * sdev)370 static int hda_dsp_boot_imr(struct snd_sof_dev *sdev)
371 {
372 	int ret;
373 
374 	ret = cl_dsp_init(sdev, 0, true);
375 	if (!ret)
376 		hda_sdw_process_wakeen(sdev);
377 
378 	return ret;
379 }
380 
hda_dsp_cl_boot_firmware(struct snd_sof_dev * sdev)381 int hda_dsp_cl_boot_firmware(struct snd_sof_dev *sdev)
382 {
383 	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
384 	struct snd_sof_pdata *plat_data = sdev->pdata;
385 	const struct sof_dev_desc *desc = plat_data->desc;
386 	const struct sof_intel_dsp_desc *chip_info;
387 	struct hdac_ext_stream *hext_stream;
388 	struct firmware stripped_firmware;
389 	struct snd_dma_buffer dmab;
390 	int ret, ret1, i;
391 
392 	if (sdev->system_suspend_target < SOF_SUSPEND_S4 &&
393 	    hda->imrboot_supported && !sdev->first_boot) {
394 		dev_dbg(sdev->dev, "IMR restore supported, booting from IMR directly\n");
395 		hda->boot_iteration = 0;
396 		ret = hda_dsp_boot_imr(sdev);
397 		if (!ret)
398 			return 0;
399 
400 		dev_warn(sdev->dev, "IMR restore failed, trying to cold boot\n");
401 	}
402 
403 	chip_info = desc->chip_info;
404 
405 	if (plat_data->fw->size <= plat_data->fw_offset) {
406 		dev_err(sdev->dev, "error: firmware size must be greater than firmware offset\n");
407 		return -EINVAL;
408 	}
409 
410 	stripped_firmware.data = plat_data->fw->data + plat_data->fw_offset;
411 	stripped_firmware.size = plat_data->fw->size - plat_data->fw_offset;
412 
413 	/* init for booting wait */
414 	init_waitqueue_head(&sdev->boot_wait);
415 
416 	/* prepare DMA for code loader stream */
417 	hext_stream = hda_cl_stream_prepare(sdev, HDA_CL_STREAM_FORMAT,
418 					    stripped_firmware.size,
419 					    &dmab, SNDRV_PCM_STREAM_PLAYBACK);
420 	if (IS_ERR(hext_stream)) {
421 		dev_err(sdev->dev, "error: dma prepare for fw loading failed\n");
422 		return PTR_ERR(hext_stream);
423 	}
424 
425 	memcpy(dmab.area, stripped_firmware.data,
426 	       stripped_firmware.size);
427 
428 	/* try ROM init a few times before giving up */
429 	for (i = 0; i < HDA_FW_BOOT_ATTEMPTS; i++) {
430 		dev_dbg(sdev->dev,
431 			"Attempting iteration %d of Core En/ROM load...\n", i);
432 
433 		hda->boot_iteration = i + 1;
434 		ret = cl_dsp_init(sdev, hext_stream->hstream.stream_tag, false);
435 
436 		/* don't retry anymore if successful */
437 		if (!ret)
438 			break;
439 	}
440 
441 	if (i == HDA_FW_BOOT_ATTEMPTS) {
442 		dev_err(sdev->dev, "error: dsp init failed after %d attempts with err: %d\n",
443 			i, ret);
444 		goto cleanup;
445 	}
446 
447 	/*
448 	 * When a SoundWire link is in clock stop state, a Slave
449 	 * device may trigger in-band wakes for events such as jack
450 	 * insertion or acoustic event detection. This event will lead
451 	 * to a WAKEEN interrupt, handled by the PCI device and routed
452 	 * to PME if the PCI device is in D3. The resume function in
453 	 * audio PCI driver will be invoked by ACPI for PME event and
454 	 * initialize the device and process WAKEEN interrupt.
455 	 *
456 	 * The WAKEEN interrupt should be processed ASAP to prevent an
457 	 * interrupt flood, otherwise other interrupts, such IPC,
458 	 * cannot work normally.  The WAKEEN is handled after the ROM
459 	 * is initialized successfully, which ensures power rails are
460 	 * enabled before accessing the SoundWire SHIM registers
461 	 */
462 	if (!sdev->first_boot)
463 		hda_sdw_process_wakeen(sdev);
464 
465 	/*
466 	 * Set the boot_iteration to the last attempt, indicating that the
467 	 * DSP ROM has been initialized and from this point there will be no
468 	 * retry done to boot.
469 	 *
470 	 * Continue with code loading and firmware boot
471 	 */
472 	hda->boot_iteration = HDA_FW_BOOT_ATTEMPTS;
473 	ret = hda_cl_copy_fw(sdev, hext_stream);
474 	if (!ret)
475 		dev_dbg(sdev->dev, "Firmware download successful, booting...\n");
476 	else
477 		snd_sof_dsp_dbg_dump(sdev, "Firmware download failed",
478 				     SOF_DBG_DUMP_PCI | SOF_DBG_DUMP_MBOX);
479 
480 cleanup:
481 	/*
482 	 * Perform codeloader stream cleanup.
483 	 * This should be done even if firmware loading fails.
484 	 * If the cleanup also fails, we return the initial error
485 	 */
486 	ret1 = hda_cl_cleanup(sdev, &dmab, hext_stream);
487 	if (ret1 < 0) {
488 		dev_err(sdev->dev, "error: Code loader DSP cleanup failed\n");
489 
490 		/* set return value to indicate cleanup failure */
491 		if (!ret)
492 			ret = ret1;
493 	}
494 
495 	/*
496 	 * return primary core id if both fw copy
497 	 * and stream clean up are successful
498 	 */
499 	if (!ret)
500 		return chip_info->init_core_mask;
501 
502 	/* disable DSP */
503 	snd_sof_dsp_update_bits(sdev, HDA_DSP_PP_BAR,
504 				SOF_HDA_REG_PP_PPCTL,
505 				SOF_HDA_PPCTL_GPROCEN, 0);
506 	return ret;
507 }
508 
509 /* pre fw run operations */
hda_dsp_pre_fw_run(struct snd_sof_dev * sdev)510 int hda_dsp_pre_fw_run(struct snd_sof_dev *sdev)
511 {
512 	/* disable clock gating and power gating */
513 	return hda_dsp_ctrl_clock_power_gating(sdev, false);
514 }
515 
516 /* post fw run operations */
hda_dsp_post_fw_run(struct snd_sof_dev * sdev)517 int hda_dsp_post_fw_run(struct snd_sof_dev *sdev)
518 {
519 	int ret;
520 
521 	if (sdev->first_boot) {
522 		struct sof_intel_hda_dev *hdev = sdev->pdata->hw_pdata;
523 
524 		ret = hda_sdw_startup(sdev);
525 		if (ret < 0) {
526 			dev_err(sdev->dev,
527 				"error: could not startup SoundWire links\n");
528 			return ret;
529 		}
530 
531 		/* Check if IMR boot is usable */
532 		if (!sof_debug_check_flag(SOF_DBG_IGNORE_D3_PERSISTENT) &&
533 		    sdev->fw_ready.flags & SOF_IPC_INFO_D3_PERSISTENT)
534 			hdev->imrboot_supported = true;
535 	}
536 
537 	hda_sdw_int_enable(sdev, true);
538 
539 	/* re-enable clock gating and power gating */
540 	return hda_dsp_ctrl_clock_power_gating(sdev, true);
541 }
542 
hda_dsp_ext_man_get_cavs_config_data(struct snd_sof_dev * sdev,const struct sof_ext_man_elem_header * hdr)543 int hda_dsp_ext_man_get_cavs_config_data(struct snd_sof_dev *sdev,
544 					 const struct sof_ext_man_elem_header *hdr)
545 {
546 	const struct sof_ext_man_cavs_config_data *config_data =
547 		container_of(hdr, struct sof_ext_man_cavs_config_data, hdr);
548 	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
549 	int i, elem_num;
550 
551 	/* calculate total number of config data elements */
552 	elem_num = (hdr->size - sizeof(struct sof_ext_man_elem_header))
553 		   / sizeof(struct sof_config_elem);
554 	if (elem_num <= 0) {
555 		dev_err(sdev->dev, "cavs config data is inconsistent: %d\n", elem_num);
556 		return -EINVAL;
557 	}
558 
559 	for (i = 0; i < elem_num; i++)
560 		switch (config_data->elems[i].token) {
561 		case SOF_EXT_MAN_CAVS_CONFIG_EMPTY:
562 			/* skip empty token */
563 			break;
564 		case SOF_EXT_MAN_CAVS_CONFIG_CAVS_LPRO:
565 			hda->clk_config_lpro = config_data->elems[i].value;
566 			dev_dbg(sdev->dev, "FW clock config: %s\n",
567 				hda->clk_config_lpro ? "LPRO" : "HPRO");
568 			break;
569 		case SOF_EXT_MAN_CAVS_CONFIG_OUTBOX_SIZE:
570 		case SOF_EXT_MAN_CAVS_CONFIG_INBOX_SIZE:
571 			/* These elements are defined but not being used yet. No warn is required */
572 			break;
573 		default:
574 			dev_info(sdev->dev, "unsupported token type: %d\n",
575 				 config_data->elems[i].token);
576 		}
577 
578 	return 0;
579 }
580