1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 //
3 // This file is provided under a dual BSD/GPLv2 license.  When using or
4 // redistributing this file, you may do so under either license.
5 //
6 // Copyright(c) 2018 Intel Corporation. All rights reserved.
7 //
8 // Authors: Liam Girdwood <liam.r.girdwood@linux.intel.com>
9 //	    Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
10 //	    Rander Wang <rander.wang@intel.com>
11 //          Keyon Jie <yang.jie@linux.intel.com>
12 //
13 
14 /*
15  * Hardware interface for HDA DSP code loader
16  */
17 
18 #include <linux/firmware.h>
19 #include <sound/hdaudio_ext.h>
20 #include <sound/hda_register.h>
21 #include <sound/sof.h>
22 #include <sound/sof/ipc4/header.h>
23 #include "ext_manifest.h"
24 #include "../ipc4-priv.h"
25 #include "../ops.h"
26 #include "../sof-priv.h"
27 #include "hda.h"
28 
hda_ssp_set_cbp_cfp(struct snd_sof_dev * sdev)29 static void hda_ssp_set_cbp_cfp(struct snd_sof_dev *sdev)
30 {
31 	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
32 	const struct sof_intel_dsp_desc *chip = hda->desc;
33 	int i;
34 
35 	/* DSP is powered up, set all SSPs to clock consumer/codec provider mode */
36 	for (i = 0; i < chip->ssp_count; i++) {
37 		snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
38 						 chip->ssp_base_offset
39 						 + i * SSP_DEV_MEM_SIZE
40 						 + SSP_SSC1_OFFSET,
41 						 SSP_SET_CBP_CFP,
42 						 SSP_SET_CBP_CFP);
43 	}
44 }
45 
hda_cl_stream_prepare(struct snd_sof_dev * sdev,unsigned int format,unsigned int size,struct snd_dma_buffer * dmab,int direction)46 struct hdac_ext_stream *hda_cl_stream_prepare(struct snd_sof_dev *sdev, unsigned int format,
47 					      unsigned int size, struct snd_dma_buffer *dmab,
48 					      int direction)
49 {
50 	struct hdac_ext_stream *hext_stream;
51 	struct hdac_stream *hstream;
52 	struct pci_dev *pci = to_pci_dev(sdev->dev);
53 	int ret;
54 
55 	hext_stream = hda_dsp_stream_get(sdev, direction, 0);
56 
57 	if (!hext_stream) {
58 		dev_err(sdev->dev, "error: no stream available\n");
59 		return ERR_PTR(-ENODEV);
60 	}
61 	hstream = &hext_stream->hstream;
62 	hstream->substream = NULL;
63 
64 	/* allocate DMA buffer */
65 	ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV_SG, &pci->dev, size, dmab);
66 	if (ret < 0) {
67 		dev_err(sdev->dev, "error: memory alloc failed: %d\n", ret);
68 		goto out_put;
69 	}
70 
71 	hstream->period_bytes = 0;/* initialize period_bytes */
72 	hstream->format_val = format;
73 	hstream->bufsize = size;
74 
75 	if (direction == SNDRV_PCM_STREAM_CAPTURE) {
76 		ret = hda_dsp_iccmax_stream_hw_params(sdev, hext_stream, dmab, NULL);
77 		if (ret < 0) {
78 			dev_err(sdev->dev, "error: iccmax stream prepare failed: %d\n", ret);
79 			goto out_free;
80 		}
81 	} else {
82 		ret = hda_dsp_stream_hw_params(sdev, hext_stream, dmab, NULL);
83 		if (ret < 0) {
84 			dev_err(sdev->dev, "error: hdac prepare failed: %d\n", ret);
85 			goto out_free;
86 		}
87 		hda_dsp_stream_spib_config(sdev, hext_stream, HDA_DSP_SPIB_ENABLE, size);
88 	}
89 
90 	return hext_stream;
91 
92 out_free:
93 	snd_dma_free_pages(dmab);
94 out_put:
95 	hda_dsp_stream_put(sdev, direction, hstream->stream_tag);
96 	return ERR_PTR(ret);
97 }
98 
99 /*
100  * first boot sequence has some extra steps.
101  * power on all host managed cores and only unstall/run the boot core to boot the
102  * DSP then turn off all non boot cores (if any) is powered on.
103  */
cl_dsp_init(struct snd_sof_dev * sdev,int stream_tag,bool imr_boot)104 int cl_dsp_init(struct snd_sof_dev *sdev, int stream_tag, bool imr_boot)
105 {
106 	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
107 	const struct sof_intel_dsp_desc *chip = hda->desc;
108 	unsigned int status, target_status;
109 	u32 flags, ipc_hdr, j;
110 	unsigned long mask;
111 	char *dump_msg;
112 	int ret;
113 
114 	/* step 1: power up corex */
115 	ret = hda_dsp_core_power_up(sdev, chip->host_managed_cores_mask);
116 	if (ret < 0) {
117 		if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS)
118 			dev_err(sdev->dev, "error: dsp core 0/1 power up failed\n");
119 		goto err;
120 	}
121 
122 	hda_ssp_set_cbp_cfp(sdev);
123 
124 	/* step 2: Send ROM_CONTROL command (stream_tag is ignored for IMR boot) */
125 	ipc_hdr = chip->ipc_req_mask | HDA_DSP_ROM_IPC_CONTROL;
126 	if (!imr_boot)
127 		ipc_hdr |= HDA_DSP_ROM_IPC_PURGE_FW | ((stream_tag - 1) << 9);
128 
129 	snd_sof_dsp_write(sdev, HDA_DSP_BAR, chip->ipc_req, ipc_hdr);
130 
131 	/* step 3: unset core 0 reset state & unstall/run core 0 */
132 	ret = hda_dsp_core_run(sdev, chip->init_core_mask);
133 	if (ret < 0) {
134 		if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS)
135 			dev_err(sdev->dev,
136 				"error: dsp core start failed %d\n", ret);
137 		ret = -EIO;
138 		goto err;
139 	}
140 
141 	/* step 4: wait for IPC DONE bit from ROM */
142 	ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
143 					    chip->ipc_ack, status,
144 					    ((status & chip->ipc_ack_mask)
145 						    == chip->ipc_ack_mask),
146 					    HDA_DSP_REG_POLL_INTERVAL_US,
147 					    HDA_DSP_INIT_TIMEOUT_US);
148 
149 	if (ret < 0) {
150 		if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS)
151 			dev_err(sdev->dev,
152 				"error: %s: timeout for HIPCIE done\n",
153 				__func__);
154 		goto err;
155 	}
156 
157 	/* set DONE bit to clear the reply IPC message */
158 	snd_sof_dsp_update_bits_forced(sdev, HDA_DSP_BAR,
159 				       chip->ipc_ack,
160 				       chip->ipc_ack_mask,
161 				       chip->ipc_ack_mask);
162 
163 	/* step 5: power down cores that are no longer needed */
164 	ret = hda_dsp_core_reset_power_down(sdev, chip->host_managed_cores_mask &
165 					   ~(chip->init_core_mask));
166 	if (ret < 0) {
167 		if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS)
168 			dev_err(sdev->dev,
169 				"error: dsp core x power down failed\n");
170 		goto err;
171 	}
172 
173 	/* step 6: enable IPC interrupts */
174 	hda_dsp_ipc_int_enable(sdev);
175 
176 	/*
177 	 * step 7:
178 	 * - Cold/Full boot: wait for ROM init to proceed to download the firmware
179 	 * - IMR boot: wait for ROM firmware entered (firmware booted up from IMR)
180 	 */
181 	if (imr_boot)
182 		target_status = FSR_STATE_FW_ENTERED;
183 	else
184 		target_status = FSR_STATE_INIT_DONE;
185 
186 	ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
187 					chip->rom_status_reg, status,
188 					(FSR_TO_STATE_CODE(status) == target_status),
189 					HDA_DSP_REG_POLL_INTERVAL_US,
190 					chip->rom_init_timeout *
191 					USEC_PER_MSEC);
192 	if (!ret) {
193 		/* set enabled cores mask and increment ref count for cores in init_core_mask */
194 		sdev->enabled_cores_mask |= chip->init_core_mask;
195 		mask = sdev->enabled_cores_mask;
196 		for_each_set_bit(j, &mask, SOF_MAX_DSP_NUM_CORES)
197 			sdev->dsp_core_ref_count[j]++;
198 		return 0;
199 	}
200 
201 	if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS)
202 		dev_err(sdev->dev,
203 			"%s: timeout with rom_status_reg (%#x) read\n",
204 			__func__, chip->rom_status_reg);
205 
206 err:
207 	flags = SOF_DBG_DUMP_PCI | SOF_DBG_DUMP_MBOX | SOF_DBG_DUMP_OPTIONAL;
208 
209 	/* after max boot attempts make sure that the dump is printed */
210 	if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS)
211 		flags &= ~SOF_DBG_DUMP_OPTIONAL;
212 
213 	dump_msg = kasprintf(GFP_KERNEL, "Boot iteration failed: %d/%d",
214 			     hda->boot_iteration, HDA_FW_BOOT_ATTEMPTS);
215 	snd_sof_dsp_dbg_dump(sdev, dump_msg, flags);
216 	hda_dsp_core_reset_power_down(sdev, chip->host_managed_cores_mask);
217 
218 	kfree(dump_msg);
219 	return ret;
220 }
221 
cl_trigger(struct snd_sof_dev * sdev,struct hdac_ext_stream * hext_stream,int cmd)222 static int cl_trigger(struct snd_sof_dev *sdev,
223 		      struct hdac_ext_stream *hext_stream, int cmd)
224 {
225 	struct hdac_stream *hstream = &hext_stream->hstream;
226 	int sd_offset = SOF_STREAM_SD_OFFSET(hstream);
227 
228 	/* code loader is special case that reuses stream ops */
229 	switch (cmd) {
230 	case SNDRV_PCM_TRIGGER_START:
231 		snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, SOF_HDA_INTCTL,
232 					1 << hstream->index,
233 					1 << hstream->index);
234 
235 		snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
236 					sd_offset,
237 					SOF_HDA_SD_CTL_DMA_START |
238 					SOF_HDA_CL_DMA_SD_INT_MASK,
239 					SOF_HDA_SD_CTL_DMA_START |
240 					SOF_HDA_CL_DMA_SD_INT_MASK);
241 
242 		hstream->running = true;
243 		return 0;
244 	default:
245 		return hda_dsp_stream_trigger(sdev, hext_stream, cmd);
246 	}
247 }
248 
hda_cl_cleanup(struct snd_sof_dev * sdev,struct snd_dma_buffer * dmab,struct hdac_ext_stream * hext_stream)249 int hda_cl_cleanup(struct snd_sof_dev *sdev, struct snd_dma_buffer *dmab,
250 		   struct hdac_ext_stream *hext_stream)
251 {
252 	struct hdac_stream *hstream = &hext_stream->hstream;
253 	int sd_offset = SOF_STREAM_SD_OFFSET(hstream);
254 	int ret = 0;
255 
256 	if (hstream->direction == SNDRV_PCM_STREAM_PLAYBACK)
257 		ret = hda_dsp_stream_spib_config(sdev, hext_stream, HDA_DSP_SPIB_DISABLE, 0);
258 	else
259 		snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, sd_offset,
260 					SOF_HDA_SD_CTL_DMA_START, 0);
261 
262 	hda_dsp_stream_put(sdev, hstream->direction, hstream->stream_tag);
263 	hstream->running = 0;
264 	hstream->substream = NULL;
265 
266 	/* reset BDL address */
267 	snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
268 			  sd_offset + SOF_HDA_ADSP_REG_SD_BDLPL, 0);
269 	snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
270 			  sd_offset + SOF_HDA_ADSP_REG_SD_BDLPU, 0);
271 
272 	snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR, sd_offset, 0);
273 	snd_dma_free_pages(dmab);
274 	dmab->area = NULL;
275 	hstream->bufsize = 0;
276 	hstream->format_val = 0;
277 
278 	return ret;
279 }
280 
hda_cl_copy_fw(struct snd_sof_dev * sdev,struct hdac_ext_stream * hext_stream)281 int hda_cl_copy_fw(struct snd_sof_dev *sdev, struct hdac_ext_stream *hext_stream)
282 {
283 	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
284 	const struct sof_intel_dsp_desc *chip = hda->desc;
285 	unsigned int reg;
286 	int ret, status;
287 
288 	ret = cl_trigger(sdev, hext_stream, SNDRV_PCM_TRIGGER_START);
289 	if (ret < 0) {
290 		dev_err(sdev->dev, "error: DMA trigger start failed\n");
291 		return ret;
292 	}
293 
294 	status = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
295 					chip->rom_status_reg, reg,
296 					(FSR_TO_STATE_CODE(reg) == FSR_STATE_FW_ENTERED),
297 					HDA_DSP_REG_POLL_INTERVAL_US,
298 					HDA_DSP_BASEFW_TIMEOUT_US);
299 
300 	/*
301 	 * even in case of errors we still need to stop the DMAs,
302 	 * but we return the initial error should the DMA stop also fail
303 	 */
304 
305 	if (status < 0) {
306 		dev_err(sdev->dev,
307 			"%s: timeout with rom_status_reg (%#x) read\n",
308 			__func__, chip->rom_status_reg);
309 	}
310 
311 	ret = cl_trigger(sdev, hext_stream, SNDRV_PCM_TRIGGER_STOP);
312 	if (ret < 0) {
313 		dev_err(sdev->dev, "error: DMA trigger stop failed\n");
314 		if (!status)
315 			status = ret;
316 	}
317 
318 	return status;
319 }
320 
hda_dsp_cl_boot_firmware_iccmax(struct snd_sof_dev * sdev)321 int hda_dsp_cl_boot_firmware_iccmax(struct snd_sof_dev *sdev)
322 {
323 	struct hdac_ext_stream *iccmax_stream;
324 	struct snd_dma_buffer dmab_bdl;
325 	int ret, ret1;
326 	u8 original_gb;
327 
328 	/* save the original LTRP guardband value */
329 	original_gb = snd_sof_dsp_read8(sdev, HDA_DSP_HDA_BAR, HDA_VS_INTEL_LTRP) &
330 		HDA_VS_INTEL_LTRP_GB_MASK;
331 
332 	/*
333 	 * Prepare capture stream for ICCMAX. We do not need to store
334 	 * the data, so use a buffer of PAGE_SIZE for receiving.
335 	 */
336 	iccmax_stream = hda_cl_stream_prepare(sdev, HDA_CL_STREAM_FORMAT, PAGE_SIZE,
337 					      &dmab_bdl, SNDRV_PCM_STREAM_CAPTURE);
338 	if (IS_ERR(iccmax_stream)) {
339 		dev_err(sdev->dev, "error: dma prepare for ICCMAX stream failed\n");
340 		return PTR_ERR(iccmax_stream);
341 	}
342 
343 	ret = hda_dsp_cl_boot_firmware(sdev);
344 
345 	/*
346 	 * Perform iccmax stream cleanup. This should be done even if firmware loading fails.
347 	 * If the cleanup also fails, we return the initial error
348 	 */
349 	ret1 = hda_cl_cleanup(sdev, &dmab_bdl, iccmax_stream);
350 	if (ret1 < 0) {
351 		dev_err(sdev->dev, "error: ICCMAX stream cleanup failed\n");
352 
353 		/* set return value to indicate cleanup failure */
354 		if (!ret)
355 			ret = ret1;
356 	}
357 
358 	/* restore the original guardband value after FW boot */
359 	snd_sof_dsp_update8(sdev, HDA_DSP_HDA_BAR, HDA_VS_INTEL_LTRP,
360 			    HDA_VS_INTEL_LTRP_GB_MASK, original_gb);
361 
362 	return ret;
363 }
364 
hda_dsp_boot_imr(struct snd_sof_dev * sdev)365 static int hda_dsp_boot_imr(struct snd_sof_dev *sdev)
366 {
367 	const struct sof_intel_dsp_desc *chip_info;
368 	int ret;
369 
370 	chip_info = get_chip_info(sdev->pdata);
371 	if (chip_info->cl_init)
372 		ret = chip_info->cl_init(sdev, 0, true);
373 	else
374 		ret = -EINVAL;
375 
376 	if (!ret)
377 		hda_sdw_process_wakeen(sdev);
378 
379 	return ret;
380 }
381 
hda_dsp_cl_boot_firmware(struct snd_sof_dev * sdev)382 int hda_dsp_cl_boot_firmware(struct snd_sof_dev *sdev)
383 {
384 	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
385 	struct snd_sof_pdata *plat_data = sdev->pdata;
386 	const struct sof_dev_desc *desc = plat_data->desc;
387 	const struct sof_intel_dsp_desc *chip_info;
388 	struct hdac_ext_stream *hext_stream;
389 	struct firmware stripped_firmware;
390 	struct snd_dma_buffer dmab;
391 	int ret, ret1, i;
392 
393 	if (hda->imrboot_supported && !sdev->first_boot && !hda->skip_imr_boot) {
394 		dev_dbg(sdev->dev, "IMR restore supported, booting from IMR directly\n");
395 		hda->boot_iteration = 0;
396 		ret = hda_dsp_boot_imr(sdev);
397 		if (!ret) {
398 			hda->booted_from_imr = true;
399 			return 0;
400 		}
401 
402 		dev_warn(sdev->dev, "IMR restore failed, trying to cold boot\n");
403 	}
404 
405 	hda->booted_from_imr = false;
406 
407 	chip_info = desc->chip_info;
408 
409 	if (sdev->basefw.fw->size <= sdev->basefw.payload_offset) {
410 		dev_err(sdev->dev, "error: firmware size must be greater than firmware offset\n");
411 		return -EINVAL;
412 	}
413 
414 	stripped_firmware.data = sdev->basefw.fw->data + sdev->basefw.payload_offset;
415 	stripped_firmware.size = sdev->basefw.fw->size - sdev->basefw.payload_offset;
416 
417 	/* init for booting wait */
418 	init_waitqueue_head(&sdev->boot_wait);
419 
420 	/* prepare DMA for code loader stream */
421 	hext_stream = hda_cl_stream_prepare(sdev, HDA_CL_STREAM_FORMAT,
422 					    stripped_firmware.size,
423 					    &dmab, SNDRV_PCM_STREAM_PLAYBACK);
424 	if (IS_ERR(hext_stream)) {
425 		dev_err(sdev->dev, "error: dma prepare for fw loading failed\n");
426 		return PTR_ERR(hext_stream);
427 	}
428 
429 	memcpy(dmab.area, stripped_firmware.data,
430 	       stripped_firmware.size);
431 
432 	/* try ROM init a few times before giving up */
433 	for (i = 0; i < HDA_FW_BOOT_ATTEMPTS; i++) {
434 		dev_dbg(sdev->dev,
435 			"Attempting iteration %d of Core En/ROM load...\n", i);
436 
437 		hda->boot_iteration = i + 1;
438 		if (chip_info->cl_init)
439 			ret = chip_info->cl_init(sdev, hext_stream->hstream.stream_tag, false);
440 		else
441 			ret = -EINVAL;
442 
443 		/* don't retry anymore if successful */
444 		if (!ret)
445 			break;
446 	}
447 
448 	if (i == HDA_FW_BOOT_ATTEMPTS) {
449 		dev_err(sdev->dev, "error: dsp init failed after %d attempts with err: %d\n",
450 			i, ret);
451 		goto cleanup;
452 	}
453 
454 	/*
455 	 * When a SoundWire link is in clock stop state, a Slave
456 	 * device may trigger in-band wakes for events such as jack
457 	 * insertion or acoustic event detection. This event will lead
458 	 * to a WAKEEN interrupt, handled by the PCI device and routed
459 	 * to PME if the PCI device is in D3. The resume function in
460 	 * audio PCI driver will be invoked by ACPI for PME event and
461 	 * initialize the device and process WAKEEN interrupt.
462 	 *
463 	 * The WAKEEN interrupt should be processed ASAP to prevent an
464 	 * interrupt flood, otherwise other interrupts, such IPC,
465 	 * cannot work normally.  The WAKEEN is handled after the ROM
466 	 * is initialized successfully, which ensures power rails are
467 	 * enabled before accessing the SoundWire SHIM registers
468 	 */
469 	if (!sdev->first_boot)
470 		hda_sdw_process_wakeen(sdev);
471 
472 	/*
473 	 * Set the boot_iteration to the last attempt, indicating that the
474 	 * DSP ROM has been initialized and from this point there will be no
475 	 * retry done to boot.
476 	 *
477 	 * Continue with code loading and firmware boot
478 	 */
479 	hda->boot_iteration = HDA_FW_BOOT_ATTEMPTS;
480 	ret = hda_cl_copy_fw(sdev, hext_stream);
481 	if (!ret) {
482 		dev_dbg(sdev->dev, "Firmware download successful, booting...\n");
483 		hda->skip_imr_boot = false;
484 	} else {
485 		snd_sof_dsp_dbg_dump(sdev, "Firmware download failed",
486 				     SOF_DBG_DUMP_PCI | SOF_DBG_DUMP_MBOX);
487 		hda->skip_imr_boot = true;
488 	}
489 
490 cleanup:
491 	/*
492 	 * Perform codeloader stream cleanup.
493 	 * This should be done even if firmware loading fails.
494 	 * If the cleanup also fails, we return the initial error
495 	 */
496 	ret1 = hda_cl_cleanup(sdev, &dmab, hext_stream);
497 	if (ret1 < 0) {
498 		dev_err(sdev->dev, "error: Code loader DSP cleanup failed\n");
499 
500 		/* set return value to indicate cleanup failure */
501 		if (!ret)
502 			ret = ret1;
503 	}
504 
505 	/*
506 	 * return primary core id if both fw copy
507 	 * and stream clean up are successful
508 	 */
509 	if (!ret)
510 		return chip_info->init_core_mask;
511 
512 	/* disable DSP */
513 	snd_sof_dsp_update_bits(sdev, HDA_DSP_PP_BAR,
514 				SOF_HDA_REG_PP_PPCTL,
515 				SOF_HDA_PPCTL_GPROCEN, 0);
516 	return ret;
517 }
518 
hda_dsp_ipc4_load_library(struct snd_sof_dev * sdev,struct sof_ipc4_fw_library * fw_lib,bool reload)519 int hda_dsp_ipc4_load_library(struct snd_sof_dev *sdev,
520 			      struct sof_ipc4_fw_library *fw_lib, bool reload)
521 {
522 	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
523 	struct hdac_ext_stream *hext_stream;
524 	struct firmware stripped_firmware;
525 	struct sof_ipc4_msg msg = {};
526 	struct snd_dma_buffer dmab;
527 	int ret, ret1;
528 
529 	/* IMR booting will restore the libraries as well, skip the loading */
530 	if (reload && hda->booted_from_imr)
531 		return 0;
532 
533 	/* the fw_lib has been verified during loading, we can trust the validity here */
534 	stripped_firmware.data = fw_lib->sof_fw.fw->data + fw_lib->sof_fw.payload_offset;
535 	stripped_firmware.size = fw_lib->sof_fw.fw->size - fw_lib->sof_fw.payload_offset;
536 
537 	/* prepare DMA for code loader stream */
538 	hext_stream = hda_cl_stream_prepare(sdev, HDA_CL_STREAM_FORMAT,
539 					    stripped_firmware.size,
540 					    &dmab, SNDRV_PCM_STREAM_PLAYBACK);
541 	if (IS_ERR(hext_stream)) {
542 		dev_err(sdev->dev, "%s: DMA prepare failed\n", __func__);
543 		return PTR_ERR(hext_stream);
544 	}
545 
546 	memcpy(dmab.area, stripped_firmware.data, stripped_firmware.size);
547 
548 	msg.primary = hext_stream->hstream.stream_tag - 1;
549 	msg.primary |= SOF_IPC4_MSG_TYPE_SET(SOF_IPC4_GLB_LOAD_LIBRARY);
550 	msg.primary |= SOF_IPC4_MSG_DIR(SOF_IPC4_MSG_REQUEST);
551 	msg.primary |= SOF_IPC4_MSG_TARGET(SOF_IPC4_FW_GEN_MSG);
552 	msg.primary |= SOF_IPC4_GLB_LOAD_LIBRARY_LIB_ID(fw_lib->id);
553 
554 	ret = cl_trigger(sdev, hext_stream, SNDRV_PCM_TRIGGER_START);
555 	if (ret < 0) {
556 		dev_err(sdev->dev, "%s: DMA trigger start failed\n", __func__);
557 		goto cleanup;
558 	}
559 
560 	ret = sof_ipc_tx_message_no_reply(sdev->ipc, &msg, 0);
561 
562 	ret1 = cl_trigger(sdev, hext_stream, SNDRV_PCM_TRIGGER_STOP);
563 	if (ret1 < 0) {
564 		dev_err(sdev->dev, "%s: DMA trigger stop failed\n", __func__);
565 		if (!ret)
566 			ret = ret1;
567 	}
568 
569 cleanup:
570 	/* clean up even in case of error and return the first error */
571 	ret1 = hda_cl_cleanup(sdev, &dmab, hext_stream);
572 	if (ret1 < 0) {
573 		dev_err(sdev->dev, "%s: Code loader DSP cleanup failed\n", __func__);
574 
575 		/* set return value to indicate cleanup failure */
576 		if (!ret)
577 			ret = ret1;
578 	}
579 
580 	return ret;
581 }
582 
583 /* pre fw run operations */
hda_dsp_pre_fw_run(struct snd_sof_dev * sdev)584 int hda_dsp_pre_fw_run(struct snd_sof_dev *sdev)
585 {
586 	/* disable clock gating and power gating */
587 	return hda_dsp_ctrl_clock_power_gating(sdev, false);
588 }
589 
590 /* post fw run operations */
hda_dsp_post_fw_run(struct snd_sof_dev * sdev)591 int hda_dsp_post_fw_run(struct snd_sof_dev *sdev)
592 {
593 	int ret;
594 
595 	if (sdev->first_boot) {
596 		struct sof_intel_hda_dev *hdev = sdev->pdata->hw_pdata;
597 
598 		ret = hda_sdw_startup(sdev);
599 		if (ret < 0) {
600 			dev_err(sdev->dev,
601 				"error: could not startup SoundWire links\n");
602 			return ret;
603 		}
604 
605 		/* Check if IMR boot is usable */
606 		if (!sof_debug_check_flag(SOF_DBG_IGNORE_D3_PERSISTENT) &&
607 		    (sdev->fw_ready.flags & SOF_IPC_INFO_D3_PERSISTENT ||
608 		     sdev->pdata->ipc_type == SOF_INTEL_IPC4))
609 			hdev->imrboot_supported = true;
610 	}
611 
612 	hda_sdw_int_enable(sdev, true);
613 
614 	/* re-enable clock gating and power gating */
615 	return hda_dsp_ctrl_clock_power_gating(sdev, true);
616 }
617 
hda_dsp_ext_man_get_cavs_config_data(struct snd_sof_dev * sdev,const struct sof_ext_man_elem_header * hdr)618 int hda_dsp_ext_man_get_cavs_config_data(struct snd_sof_dev *sdev,
619 					 const struct sof_ext_man_elem_header *hdr)
620 {
621 	const struct sof_ext_man_cavs_config_data *config_data =
622 		container_of(hdr, struct sof_ext_man_cavs_config_data, hdr);
623 	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
624 	int i, elem_num;
625 
626 	/* calculate total number of config data elements */
627 	elem_num = (hdr->size - sizeof(struct sof_ext_man_elem_header))
628 		   / sizeof(struct sof_config_elem);
629 	if (elem_num <= 0) {
630 		dev_err(sdev->dev, "cavs config data is inconsistent: %d\n", elem_num);
631 		return -EINVAL;
632 	}
633 
634 	for (i = 0; i < elem_num; i++)
635 		switch (config_data->elems[i].token) {
636 		case SOF_EXT_MAN_CAVS_CONFIG_EMPTY:
637 			/* skip empty token */
638 			break;
639 		case SOF_EXT_MAN_CAVS_CONFIG_CAVS_LPRO:
640 			hda->clk_config_lpro = config_data->elems[i].value;
641 			dev_dbg(sdev->dev, "FW clock config: %s\n",
642 				hda->clk_config_lpro ? "LPRO" : "HPRO");
643 			break;
644 		case SOF_EXT_MAN_CAVS_CONFIG_OUTBOX_SIZE:
645 		case SOF_EXT_MAN_CAVS_CONFIG_INBOX_SIZE:
646 			/* These elements are defined but not being used yet. No warn is required */
647 			break;
648 		default:
649 			dev_info(sdev->dev, "unsupported token type: %d\n",
650 				 config_data->elems[i].token);
651 		}
652 
653 	return 0;
654 }
655