1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  skl-topology.c - Implements Platform component ALSA controls/widget
4  *  handlers.
5  *
6  *  Copyright (C) 2014-2015 Intel Corp
7  *  Author: Jeeja KP <jeeja.kp@intel.com>
8  *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
9  */
10 
11 #include <linux/slab.h>
12 #include <linux/types.h>
13 #include <linux/firmware.h>
14 #include <linux/uuid.h>
15 #include <sound/intel-nhlt.h>
16 #include <sound/soc.h>
17 #include <sound/soc-acpi.h>
18 #include <sound/soc-topology.h>
19 #include <uapi/sound/snd_sst_tokens.h>
20 #include <uapi/sound/skl-tplg-interface.h>
21 #include "skl-sst-dsp.h"
22 #include "skl-sst-ipc.h"
23 #include "skl-topology.h"
24 #include "skl.h"
25 #include "../common/sst-dsp.h"
26 #include "../common/sst-dsp-priv.h"
27 
28 #define SKL_CH_FIXUP_MASK		(1 << 0)
29 #define SKL_RATE_FIXUP_MASK		(1 << 1)
30 #define SKL_FMT_FIXUP_MASK		(1 << 2)
31 #define SKL_IN_DIR_BIT_MASK		BIT(0)
32 #define SKL_PIN_COUNT_MASK		GENMASK(7, 4)
33 
34 static const int mic_mono_list[] = {
35 0, 1, 2, 3,
36 };
37 static const int mic_stereo_list[][SKL_CH_STEREO] = {
38 {0, 1}, {0, 2}, {0, 3}, {1, 2}, {1, 3}, {2, 3},
39 };
40 static const int mic_trio_list[][SKL_CH_TRIO] = {
41 {0, 1, 2}, {0, 1, 3}, {0, 2, 3}, {1, 2, 3},
42 };
43 static const int mic_quatro_list[][SKL_CH_QUATRO] = {
44 {0, 1, 2, 3},
45 };
46 
47 #define CHECK_HW_PARAMS(ch, freq, bps, prm_ch, prm_freq, prm_bps) \
48 	((ch == prm_ch) && (bps == prm_bps) && (freq == prm_freq))
49 
skl_tplg_d0i3_get(struct skl_dev * skl,enum d0i3_capability caps)50 void skl_tplg_d0i3_get(struct skl_dev *skl, enum d0i3_capability caps)
51 {
52 	struct skl_d0i3_data *d0i3 =  &skl->d0i3;
53 
54 	switch (caps) {
55 	case SKL_D0I3_NONE:
56 		d0i3->non_d0i3++;
57 		break;
58 
59 	case SKL_D0I3_STREAMING:
60 		d0i3->streaming++;
61 		break;
62 
63 	case SKL_D0I3_NON_STREAMING:
64 		d0i3->non_streaming++;
65 		break;
66 	}
67 }
68 
skl_tplg_d0i3_put(struct skl_dev * skl,enum d0i3_capability caps)69 void skl_tplg_d0i3_put(struct skl_dev *skl, enum d0i3_capability caps)
70 {
71 	struct skl_d0i3_data *d0i3 =  &skl->d0i3;
72 
73 	switch (caps) {
74 	case SKL_D0I3_NONE:
75 		d0i3->non_d0i3--;
76 		break;
77 
78 	case SKL_D0I3_STREAMING:
79 		d0i3->streaming--;
80 		break;
81 
82 	case SKL_D0I3_NON_STREAMING:
83 		d0i3->non_streaming--;
84 		break;
85 	}
86 }
87 
88 /*
89  * SKL DSP driver modelling uses only few DAPM widgets so for rest we will
90  * ignore. This helpers checks if the SKL driver handles this widget type
91  */
is_skl_dsp_widget_type(struct snd_soc_dapm_widget * w,struct device * dev)92 static int is_skl_dsp_widget_type(struct snd_soc_dapm_widget *w,
93 				  struct device *dev)
94 {
95 	if (w->dapm->dev != dev)
96 		return false;
97 
98 	switch (w->id) {
99 	case snd_soc_dapm_dai_link:
100 	case snd_soc_dapm_dai_in:
101 	case snd_soc_dapm_aif_in:
102 	case snd_soc_dapm_aif_out:
103 	case snd_soc_dapm_dai_out:
104 	case snd_soc_dapm_switch:
105 	case snd_soc_dapm_output:
106 	case snd_soc_dapm_mux:
107 
108 		return false;
109 	default:
110 		return true;
111 	}
112 }
113 
skl_dump_mconfig(struct skl_dev * skl,struct skl_module_cfg * mcfg)114 static void skl_dump_mconfig(struct skl_dev *skl, struct skl_module_cfg *mcfg)
115 {
116 	struct skl_module_iface *iface = &mcfg->module->formats[mcfg->fmt_idx];
117 
118 	dev_dbg(skl->dev, "Dumping config\n");
119 	dev_dbg(skl->dev, "Input Format:\n");
120 	dev_dbg(skl->dev, "channels = %d\n", iface->inputs[0].fmt.channels);
121 	dev_dbg(skl->dev, "s_freq = %d\n", iface->inputs[0].fmt.s_freq);
122 	dev_dbg(skl->dev, "ch_cfg = %d\n", iface->inputs[0].fmt.ch_cfg);
123 	dev_dbg(skl->dev, "valid bit depth = %d\n",
124 				iface->inputs[0].fmt.valid_bit_depth);
125 	dev_dbg(skl->dev, "Output Format:\n");
126 	dev_dbg(skl->dev, "channels = %d\n", iface->outputs[0].fmt.channels);
127 	dev_dbg(skl->dev, "s_freq = %d\n", iface->outputs[0].fmt.s_freq);
128 	dev_dbg(skl->dev, "valid bit depth = %d\n",
129 				iface->outputs[0].fmt.valid_bit_depth);
130 	dev_dbg(skl->dev, "ch_cfg = %d\n", iface->outputs[0].fmt.ch_cfg);
131 }
132 
skl_tplg_update_chmap(struct skl_module_fmt * fmt,int chs)133 static void skl_tplg_update_chmap(struct skl_module_fmt *fmt, int chs)
134 {
135 	int slot_map = 0xFFFFFFFF;
136 	int start_slot = 0;
137 	int i;
138 
139 	for (i = 0; i < chs; i++) {
140 		/*
141 		 * For 2 channels with starting slot as 0, slot map will
142 		 * look like 0xFFFFFF10.
143 		 */
144 		slot_map &= (~(0xF << (4 * i)) | (start_slot << (4 * i)));
145 		start_slot++;
146 	}
147 	fmt->ch_map = slot_map;
148 }
149 
skl_tplg_update_params(struct skl_module_fmt * fmt,struct skl_pipe_params * params,int fixup)150 static void skl_tplg_update_params(struct skl_module_fmt *fmt,
151 			struct skl_pipe_params *params, int fixup)
152 {
153 	if (fixup & SKL_RATE_FIXUP_MASK)
154 		fmt->s_freq = params->s_freq;
155 	if (fixup & SKL_CH_FIXUP_MASK) {
156 		fmt->channels = params->ch;
157 		skl_tplg_update_chmap(fmt, fmt->channels);
158 	}
159 	if (fixup & SKL_FMT_FIXUP_MASK) {
160 		fmt->valid_bit_depth = skl_get_bit_depth(params->s_fmt);
161 
162 		/*
163 		 * 16 bit is 16 bit container whereas 24 bit is in 32 bit
164 		 * container so update bit depth accordingly
165 		 */
166 		switch (fmt->valid_bit_depth) {
167 		case SKL_DEPTH_16BIT:
168 			fmt->bit_depth = fmt->valid_bit_depth;
169 			break;
170 
171 		default:
172 			fmt->bit_depth = SKL_DEPTH_32BIT;
173 			break;
174 		}
175 	}
176 
177 }
178 
179 /*
180  * A pipeline may have modules which impact the pcm parameters, like SRC,
181  * channel converter, format converter.
182  * We need to calculate the output params by applying the 'fixup'
183  * Topology will tell driver which type of fixup is to be applied by
184  * supplying the fixup mask, so based on that we calculate the output
185  *
186  * Now In FE the pcm hw_params is source/target format. Same is applicable
187  * for BE with its hw_params invoked.
188  * here based on FE, BE pipeline and direction we calculate the input and
189  * outfix and then apply that for a module
190  */
skl_tplg_update_params_fixup(struct skl_module_cfg * m_cfg,struct skl_pipe_params * params,bool is_fe)191 static void skl_tplg_update_params_fixup(struct skl_module_cfg *m_cfg,
192 		struct skl_pipe_params *params, bool is_fe)
193 {
194 	int in_fixup, out_fixup;
195 	struct skl_module_fmt *in_fmt, *out_fmt;
196 
197 	/* Fixups will be applied to pin 0 only */
198 	in_fmt = &m_cfg->module->formats[m_cfg->fmt_idx].inputs[0].fmt;
199 	out_fmt = &m_cfg->module->formats[m_cfg->fmt_idx].outputs[0].fmt;
200 
201 	if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) {
202 		if (is_fe) {
203 			in_fixup = m_cfg->params_fixup;
204 			out_fixup = (~m_cfg->converter) &
205 					m_cfg->params_fixup;
206 		} else {
207 			out_fixup = m_cfg->params_fixup;
208 			in_fixup = (~m_cfg->converter) &
209 					m_cfg->params_fixup;
210 		}
211 	} else {
212 		if (is_fe) {
213 			out_fixup = m_cfg->params_fixup;
214 			in_fixup = (~m_cfg->converter) &
215 					m_cfg->params_fixup;
216 		} else {
217 			in_fixup = m_cfg->params_fixup;
218 			out_fixup = (~m_cfg->converter) &
219 					m_cfg->params_fixup;
220 		}
221 	}
222 
223 	skl_tplg_update_params(in_fmt, params, in_fixup);
224 	skl_tplg_update_params(out_fmt, params, out_fixup);
225 }
226 
227 /*
228  * A module needs input and output buffers, which are dependent upon pcm
229  * params, so once we have calculate params, we need buffer calculation as
230  * well.
231  */
skl_tplg_update_buffer_size(struct skl_dev * skl,struct skl_module_cfg * mcfg)232 static void skl_tplg_update_buffer_size(struct skl_dev *skl,
233 				struct skl_module_cfg *mcfg)
234 {
235 	int multiplier = 1;
236 	struct skl_module_fmt *in_fmt, *out_fmt;
237 	struct skl_module_res *res;
238 
239 	/* Since fixups is applied to pin 0 only, ibs, obs needs
240 	 * change for pin 0 only
241 	 */
242 	res = &mcfg->module->resources[mcfg->res_idx];
243 	in_fmt = &mcfg->module->formats[mcfg->fmt_idx].inputs[0].fmt;
244 	out_fmt = &mcfg->module->formats[mcfg->fmt_idx].outputs[0].fmt;
245 
246 	if (mcfg->m_type == SKL_MODULE_TYPE_SRCINT)
247 		multiplier = 5;
248 
249 	res->ibs = DIV_ROUND_UP(in_fmt->s_freq, 1000) *
250 			in_fmt->channels * (in_fmt->bit_depth >> 3) *
251 			multiplier;
252 
253 	res->obs = DIV_ROUND_UP(out_fmt->s_freq, 1000) *
254 			out_fmt->channels * (out_fmt->bit_depth >> 3) *
255 			multiplier;
256 }
257 
skl_tplg_be_dev_type(int dev_type)258 static u8 skl_tplg_be_dev_type(int dev_type)
259 {
260 	int ret;
261 
262 	switch (dev_type) {
263 	case SKL_DEVICE_BT:
264 		ret = NHLT_DEVICE_BT;
265 		break;
266 
267 	case SKL_DEVICE_DMIC:
268 		ret = NHLT_DEVICE_DMIC;
269 		break;
270 
271 	case SKL_DEVICE_I2S:
272 		ret = NHLT_DEVICE_I2S;
273 		break;
274 
275 	default:
276 		ret = NHLT_DEVICE_INVALID;
277 		break;
278 	}
279 
280 	return ret;
281 }
282 
skl_tplg_update_be_blob(struct snd_soc_dapm_widget * w,struct skl_dev * skl)283 static int skl_tplg_update_be_blob(struct snd_soc_dapm_widget *w,
284 						struct skl_dev *skl)
285 {
286 	struct skl_module_cfg *m_cfg = w->priv;
287 	int link_type, dir;
288 	u32 ch, s_freq, s_fmt, s_cont;
289 	struct nhlt_specific_cfg *cfg;
290 	u8 dev_type = skl_tplg_be_dev_type(m_cfg->dev_type);
291 	int fmt_idx = m_cfg->fmt_idx;
292 	struct skl_module_iface *m_iface = &m_cfg->module->formats[fmt_idx];
293 
294 	/* check if we already have blob */
295 	if (m_cfg->formats_config[SKL_PARAM_INIT].caps_size > 0)
296 		return 0;
297 
298 	dev_dbg(skl->dev, "Applying default cfg blob\n");
299 	switch (m_cfg->dev_type) {
300 	case SKL_DEVICE_DMIC:
301 		link_type = NHLT_LINK_DMIC;
302 		dir = SNDRV_PCM_STREAM_CAPTURE;
303 		s_freq = m_iface->inputs[0].fmt.s_freq;
304 		s_fmt = m_iface->inputs[0].fmt.valid_bit_depth;
305 		s_cont = m_iface->inputs[0].fmt.bit_depth;
306 		ch = m_iface->inputs[0].fmt.channels;
307 		break;
308 
309 	case SKL_DEVICE_I2S:
310 		link_type = NHLT_LINK_SSP;
311 		if (m_cfg->hw_conn_type == SKL_CONN_SOURCE) {
312 			dir = SNDRV_PCM_STREAM_PLAYBACK;
313 			s_freq = m_iface->outputs[0].fmt.s_freq;
314 			s_fmt = m_iface->outputs[0].fmt.valid_bit_depth;
315 			s_cont = m_iface->outputs[0].fmt.bit_depth;
316 			ch = m_iface->outputs[0].fmt.channels;
317 		} else {
318 			dir = SNDRV_PCM_STREAM_CAPTURE;
319 			s_freq = m_iface->inputs[0].fmt.s_freq;
320 			s_fmt = m_iface->inputs[0].fmt.valid_bit_depth;
321 			s_cont = m_iface->inputs[0].fmt.bit_depth;
322 			ch = m_iface->inputs[0].fmt.channels;
323 		}
324 		break;
325 
326 	default:
327 		return -EINVAL;
328 	}
329 
330 	/* update the blob based on virtual bus_id and default params */
331 	cfg = intel_nhlt_get_endpoint_blob(skl->dev, skl->nhlt, m_cfg->vbus_id,
332 					   link_type, s_fmt, s_cont, ch,
333 					   s_freq, dir, dev_type);
334 	if (cfg) {
335 		m_cfg->formats_config[SKL_PARAM_INIT].caps_size = cfg->size;
336 		m_cfg->formats_config[SKL_PARAM_INIT].caps = (u32 *)&cfg->caps;
337 	} else {
338 		dev_err(skl->dev, "Blob NULL for id %x type %d dirn %d\n",
339 					m_cfg->vbus_id, link_type, dir);
340 		dev_err(skl->dev, "PCM: ch %d, freq %d, fmt %d/%d\n",
341 					ch, s_freq, s_fmt, s_cont);
342 		return -EIO;
343 	}
344 
345 	return 0;
346 }
347 
skl_tplg_update_module_params(struct snd_soc_dapm_widget * w,struct skl_dev * skl)348 static void skl_tplg_update_module_params(struct snd_soc_dapm_widget *w,
349 							struct skl_dev *skl)
350 {
351 	struct skl_module_cfg *m_cfg = w->priv;
352 	struct skl_pipe_params *params = m_cfg->pipe->p_params;
353 	int p_conn_type = m_cfg->pipe->conn_type;
354 	bool is_fe;
355 
356 	if (!m_cfg->params_fixup)
357 		return;
358 
359 	dev_dbg(skl->dev, "Mconfig for widget=%s BEFORE updation\n",
360 				w->name);
361 
362 	skl_dump_mconfig(skl, m_cfg);
363 
364 	if (p_conn_type == SKL_PIPE_CONN_TYPE_FE)
365 		is_fe = true;
366 	else
367 		is_fe = false;
368 
369 	skl_tplg_update_params_fixup(m_cfg, params, is_fe);
370 	skl_tplg_update_buffer_size(skl, m_cfg);
371 
372 	dev_dbg(skl->dev, "Mconfig for widget=%s AFTER updation\n",
373 				w->name);
374 
375 	skl_dump_mconfig(skl, m_cfg);
376 }
377 
378 /*
379  * some modules can have multiple params set from user control and
380  * need to be set after module is initialized. If set_param flag is
381  * set module params will be done after module is initialised.
382  */
skl_tplg_set_module_params(struct snd_soc_dapm_widget * w,struct skl_dev * skl)383 static int skl_tplg_set_module_params(struct snd_soc_dapm_widget *w,
384 						struct skl_dev *skl)
385 {
386 	int i, ret;
387 	struct skl_module_cfg *mconfig = w->priv;
388 	const struct snd_kcontrol_new *k;
389 	struct soc_bytes_ext *sb;
390 	struct skl_algo_data *bc;
391 	struct skl_specific_cfg *sp_cfg;
392 
393 	if (mconfig->formats_config[SKL_PARAM_SET].caps_size > 0 &&
394 	    mconfig->formats_config[SKL_PARAM_SET].set_params == SKL_PARAM_SET) {
395 		sp_cfg = &mconfig->formats_config[SKL_PARAM_SET];
396 		ret = skl_set_module_params(skl, sp_cfg->caps,
397 					sp_cfg->caps_size,
398 					sp_cfg->param_id, mconfig);
399 		if (ret < 0)
400 			return ret;
401 	}
402 
403 	for (i = 0; i < w->num_kcontrols; i++) {
404 		k = &w->kcontrol_news[i];
405 		if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
406 			sb = (void *) k->private_value;
407 			bc = (struct skl_algo_data *)sb->dobj.private;
408 
409 			if (bc->set_params == SKL_PARAM_SET) {
410 				ret = skl_set_module_params(skl,
411 						(u32 *)bc->params, bc->size,
412 						bc->param_id, mconfig);
413 				if (ret < 0)
414 					return ret;
415 			}
416 		}
417 	}
418 
419 	return 0;
420 }
421 
422 /*
423  * some module param can set from user control and this is required as
424  * when module is initailzed. if module param is required in init it is
425  * identifed by set_param flag. if set_param flag is not set, then this
426  * parameter needs to set as part of module init.
427  */
skl_tplg_set_module_init_data(struct snd_soc_dapm_widget * w)428 static int skl_tplg_set_module_init_data(struct snd_soc_dapm_widget *w)
429 {
430 	const struct snd_kcontrol_new *k;
431 	struct soc_bytes_ext *sb;
432 	struct skl_algo_data *bc;
433 	struct skl_module_cfg *mconfig = w->priv;
434 	int i;
435 
436 	for (i = 0; i < w->num_kcontrols; i++) {
437 		k = &w->kcontrol_news[i];
438 		if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
439 			sb = (struct soc_bytes_ext *)k->private_value;
440 			bc = (struct skl_algo_data *)sb->dobj.private;
441 
442 			if (bc->set_params != SKL_PARAM_INIT)
443 				continue;
444 
445 			mconfig->formats_config[SKL_PARAM_INIT].caps =
446 							(u32 *)bc->params;
447 			mconfig->formats_config[SKL_PARAM_INIT].caps_size =
448 								bc->size;
449 
450 			break;
451 		}
452 	}
453 
454 	return 0;
455 }
456 
skl_tplg_module_prepare(struct skl_dev * skl,struct skl_pipe * pipe,struct snd_soc_dapm_widget * w,struct skl_module_cfg * mcfg)457 static int skl_tplg_module_prepare(struct skl_dev *skl, struct skl_pipe *pipe,
458 		struct snd_soc_dapm_widget *w, struct skl_module_cfg *mcfg)
459 {
460 	switch (mcfg->dev_type) {
461 	case SKL_DEVICE_HDAHOST:
462 		return skl_pcm_host_dma_prepare(skl->dev, pipe->p_params);
463 
464 	case SKL_DEVICE_HDALINK:
465 		return skl_pcm_link_dma_prepare(skl->dev, pipe->p_params);
466 	}
467 
468 	return 0;
469 }
470 
471 /*
472  * Inside a pipe instance, we can have various modules. These modules need
473  * to instantiated in DSP by invoking INIT_MODULE IPC, which is achieved by
474  * skl_init_module() routine, so invoke that for all modules in a pipeline
475  */
476 static int
skl_tplg_init_pipe_modules(struct skl_dev * skl,struct skl_pipe * pipe)477 skl_tplg_init_pipe_modules(struct skl_dev *skl, struct skl_pipe *pipe)
478 {
479 	struct skl_pipe_module *w_module;
480 	struct snd_soc_dapm_widget *w;
481 	struct skl_module_cfg *mconfig;
482 	u8 cfg_idx;
483 	int ret = 0;
484 
485 	list_for_each_entry(w_module, &pipe->w_list, node) {
486 		guid_t *uuid_mod;
487 		w = w_module->w;
488 		mconfig = w->priv;
489 
490 		/* check if module ids are populated */
491 		if (mconfig->id.module_id < 0) {
492 			dev_err(skl->dev,
493 					"module %pUL id not populated\n",
494 					(guid_t *)mconfig->guid);
495 			return -EIO;
496 		}
497 
498 		cfg_idx = mconfig->pipe->cur_config_idx;
499 		mconfig->fmt_idx = mconfig->mod_cfg[cfg_idx].fmt_idx;
500 		mconfig->res_idx = mconfig->mod_cfg[cfg_idx].res_idx;
501 
502 		if (mconfig->module->loadable && skl->dsp->fw_ops.load_mod) {
503 			ret = skl->dsp->fw_ops.load_mod(skl->dsp,
504 				mconfig->id.module_id, mconfig->guid);
505 			if (ret < 0)
506 				return ret;
507 		}
508 
509 		/* prepare the DMA if the module is gateway cpr */
510 		ret = skl_tplg_module_prepare(skl, pipe, w, mconfig);
511 		if (ret < 0)
512 			return ret;
513 
514 		/* update blob if blob is null for be with default value */
515 		skl_tplg_update_be_blob(w, skl);
516 
517 		/*
518 		 * apply fix/conversion to module params based on
519 		 * FE/BE params
520 		 */
521 		skl_tplg_update_module_params(w, skl);
522 		uuid_mod = (guid_t *)mconfig->guid;
523 		mconfig->id.pvt_id = skl_get_pvt_id(skl, uuid_mod,
524 						mconfig->id.instance_id);
525 		if (mconfig->id.pvt_id < 0)
526 			return ret;
527 		skl_tplg_set_module_init_data(w);
528 
529 		ret = skl_dsp_get_core(skl->dsp, mconfig->core_id);
530 		if (ret < 0) {
531 			dev_err(skl->dev, "Failed to wake up core %d ret=%d\n",
532 						mconfig->core_id, ret);
533 			return ret;
534 		}
535 
536 		ret = skl_init_module(skl, mconfig);
537 		if (ret < 0) {
538 			skl_put_pvt_id(skl, uuid_mod, &mconfig->id.pvt_id);
539 			goto err;
540 		}
541 
542 		ret = skl_tplg_set_module_params(w, skl);
543 		if (ret < 0)
544 			goto err;
545 	}
546 
547 	return 0;
548 err:
549 	skl_dsp_put_core(skl->dsp, mconfig->core_id);
550 	return ret;
551 }
552 
skl_tplg_unload_pipe_modules(struct skl_dev * skl,struct skl_pipe * pipe)553 static int skl_tplg_unload_pipe_modules(struct skl_dev *skl,
554 	 struct skl_pipe *pipe)
555 {
556 	int ret = 0;
557 	struct skl_pipe_module *w_module;
558 	struct skl_module_cfg *mconfig;
559 
560 	list_for_each_entry(w_module, &pipe->w_list, node) {
561 		guid_t *uuid_mod;
562 		mconfig  = w_module->w->priv;
563 		uuid_mod = (guid_t *)mconfig->guid;
564 
565 		if (mconfig->module->loadable && skl->dsp->fw_ops.unload_mod) {
566 			ret = skl->dsp->fw_ops.unload_mod(skl->dsp,
567 						mconfig->id.module_id);
568 			if (ret < 0)
569 				return -EIO;
570 		}
571 		skl_put_pvt_id(skl, uuid_mod, &mconfig->id.pvt_id);
572 
573 		ret = skl_dsp_put_core(skl->dsp, mconfig->core_id);
574 		if (ret < 0) {
575 			/* don't return; continue with other modules */
576 			dev_err(skl->dev, "Failed to sleep core %d ret=%d\n",
577 				mconfig->core_id, ret);
578 		}
579 	}
580 
581 	/* no modules to unload in this path, so return */
582 	return ret;
583 }
584 
skl_tplg_is_multi_fmt(struct skl_dev * skl,struct skl_pipe * pipe)585 static bool skl_tplg_is_multi_fmt(struct skl_dev *skl, struct skl_pipe *pipe)
586 {
587 	struct skl_pipe_fmt *cur_fmt;
588 	struct skl_pipe_fmt *next_fmt;
589 	int i;
590 
591 	if (pipe->nr_cfgs <= 1)
592 		return false;
593 
594 	if (pipe->conn_type != SKL_PIPE_CONN_TYPE_FE)
595 		return true;
596 
597 	for (i = 0; i < pipe->nr_cfgs - 1; i++) {
598 		if (pipe->direction == SNDRV_PCM_STREAM_PLAYBACK) {
599 			cur_fmt = &pipe->configs[i].out_fmt;
600 			next_fmt = &pipe->configs[i + 1].out_fmt;
601 		} else {
602 			cur_fmt = &pipe->configs[i].in_fmt;
603 			next_fmt = &pipe->configs[i + 1].in_fmt;
604 		}
605 
606 		if (!CHECK_HW_PARAMS(cur_fmt->channels, cur_fmt->freq,
607 				     cur_fmt->bps,
608 				     next_fmt->channels,
609 				     next_fmt->freq,
610 				     next_fmt->bps))
611 			return true;
612 	}
613 
614 	return false;
615 }
616 
617 /*
618  * Here, we select pipe format based on the pipe type and pipe
619  * direction to determine the current config index for the pipeline.
620  * The config index is then used to select proper module resources.
621  * Intermediate pipes currently have a fixed format hence we select the
622  * 0th configuratation by default for such pipes.
623  */
624 static int
skl_tplg_get_pipe_config(struct skl_dev * skl,struct skl_module_cfg * mconfig)625 skl_tplg_get_pipe_config(struct skl_dev *skl, struct skl_module_cfg *mconfig)
626 {
627 	struct skl_pipe *pipe = mconfig->pipe;
628 	struct skl_pipe_params *params = pipe->p_params;
629 	struct skl_path_config *pconfig = &pipe->configs[0];
630 	struct skl_pipe_fmt *fmt = NULL;
631 	bool in_fmt = false;
632 	int i;
633 
634 	if (pipe->nr_cfgs == 0) {
635 		pipe->cur_config_idx = 0;
636 		return 0;
637 	}
638 
639 	if (skl_tplg_is_multi_fmt(skl, pipe)) {
640 		pipe->cur_config_idx = pipe->pipe_config_idx;
641 		pipe->memory_pages = pconfig->mem_pages;
642 		dev_dbg(skl->dev, "found pipe config idx:%d\n",
643 			pipe->cur_config_idx);
644 		return 0;
645 	}
646 
647 	if (pipe->conn_type == SKL_PIPE_CONN_TYPE_NONE || pipe->nr_cfgs == 1) {
648 		dev_dbg(skl->dev, "No conn_type or just 1 pathcfg, taking 0th for %d\n",
649 			pipe->ppl_id);
650 		pipe->cur_config_idx = 0;
651 		pipe->memory_pages = pconfig->mem_pages;
652 
653 		return 0;
654 	}
655 
656 	if ((pipe->conn_type == SKL_PIPE_CONN_TYPE_FE &&
657 	     pipe->direction == SNDRV_PCM_STREAM_PLAYBACK) ||
658 	     (pipe->conn_type == SKL_PIPE_CONN_TYPE_BE &&
659 	     pipe->direction == SNDRV_PCM_STREAM_CAPTURE))
660 		in_fmt = true;
661 
662 	for (i = 0; i < pipe->nr_cfgs; i++) {
663 		pconfig = &pipe->configs[i];
664 		if (in_fmt)
665 			fmt = &pconfig->in_fmt;
666 		else
667 			fmt = &pconfig->out_fmt;
668 
669 		if (CHECK_HW_PARAMS(params->ch, params->s_freq, params->s_fmt,
670 				    fmt->channels, fmt->freq, fmt->bps)) {
671 			pipe->cur_config_idx = i;
672 			pipe->memory_pages = pconfig->mem_pages;
673 			dev_dbg(skl->dev, "Using pipe config: %d\n", i);
674 
675 			return 0;
676 		}
677 	}
678 
679 	dev_err(skl->dev, "Invalid pipe config: %d %d %d for pipe: %d\n",
680 		params->ch, params->s_freq, params->s_fmt, pipe->ppl_id);
681 	return -EINVAL;
682 }
683 
684 /*
685  * Mixer module represents a pipeline. So in the Pre-PMU event of mixer we
686  * need create the pipeline. So we do following:
687  *   - Create the pipeline
688  *   - Initialize the modules in pipeline
689  *   - finally bind all modules together
690  */
skl_tplg_mixer_dapm_pre_pmu_event(struct snd_soc_dapm_widget * w,struct skl_dev * skl)691 static int skl_tplg_mixer_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w,
692 							struct skl_dev *skl)
693 {
694 	int ret;
695 	struct skl_module_cfg *mconfig = w->priv;
696 	struct skl_pipe_module *w_module;
697 	struct skl_pipe *s_pipe = mconfig->pipe;
698 	struct skl_module_cfg *src_module = NULL, *dst_module, *module;
699 	struct skl_module_deferred_bind *modules;
700 
701 	ret = skl_tplg_get_pipe_config(skl, mconfig);
702 	if (ret < 0)
703 		return ret;
704 
705 	/*
706 	 * Create a list of modules for pipe.
707 	 * This list contains modules from source to sink
708 	 */
709 	ret = skl_create_pipeline(skl, mconfig->pipe);
710 	if (ret < 0)
711 		return ret;
712 
713 	/* Init all pipe modules from source to sink */
714 	ret = skl_tplg_init_pipe_modules(skl, s_pipe);
715 	if (ret < 0)
716 		return ret;
717 
718 	/* Bind modules from source to sink */
719 	list_for_each_entry(w_module, &s_pipe->w_list, node) {
720 		dst_module = w_module->w->priv;
721 
722 		if (src_module == NULL) {
723 			src_module = dst_module;
724 			continue;
725 		}
726 
727 		ret = skl_bind_modules(skl, src_module, dst_module);
728 		if (ret < 0)
729 			return ret;
730 
731 		src_module = dst_module;
732 	}
733 
734 	/*
735 	 * When the destination module is initialized, check for these modules
736 	 * in deferred bind list. If found, bind them.
737 	 */
738 	list_for_each_entry(w_module, &s_pipe->w_list, node) {
739 		if (list_empty(&skl->bind_list))
740 			break;
741 
742 		list_for_each_entry(modules, &skl->bind_list, node) {
743 			module = w_module->w->priv;
744 			if (modules->dst == module)
745 				skl_bind_modules(skl, modules->src,
746 							modules->dst);
747 		}
748 	}
749 
750 	return 0;
751 }
752 
skl_fill_sink_instance_id(struct skl_dev * skl,u32 * params,int size,struct skl_module_cfg * mcfg)753 static int skl_fill_sink_instance_id(struct skl_dev *skl, u32 *params,
754 				int size, struct skl_module_cfg *mcfg)
755 {
756 	int i, pvt_id;
757 
758 	if (mcfg->m_type == SKL_MODULE_TYPE_KPB) {
759 		struct skl_kpb_params *kpb_params =
760 				(struct skl_kpb_params *)params;
761 		struct skl_mod_inst_map *inst = kpb_params->u.map;
762 
763 		for (i = 0; i < kpb_params->num_modules; i++) {
764 			pvt_id = skl_get_pvt_instance_id_map(skl, inst->mod_id,
765 								inst->inst_id);
766 			if (pvt_id < 0)
767 				return -EINVAL;
768 
769 			inst->inst_id = pvt_id;
770 			inst++;
771 		}
772 	}
773 
774 	return 0;
775 }
776 /*
777  * Some modules require params to be set after the module is bound to
778  * all pins connected.
779  *
780  * The module provider initializes set_param flag for such modules and we
781  * send params after binding
782  */
skl_tplg_set_module_bind_params(struct snd_soc_dapm_widget * w,struct skl_module_cfg * mcfg,struct skl_dev * skl)783 static int skl_tplg_set_module_bind_params(struct snd_soc_dapm_widget *w,
784 			struct skl_module_cfg *mcfg, struct skl_dev *skl)
785 {
786 	int i, ret;
787 	struct skl_module_cfg *mconfig = w->priv;
788 	const struct snd_kcontrol_new *k;
789 	struct soc_bytes_ext *sb;
790 	struct skl_algo_data *bc;
791 	struct skl_specific_cfg *sp_cfg;
792 	u32 *params;
793 
794 	/*
795 	 * check all out/in pins are in bind state.
796 	 * if so set the module param
797 	 */
798 	for (i = 0; i < mcfg->module->max_output_pins; i++) {
799 		if (mcfg->m_out_pin[i].pin_state != SKL_PIN_BIND_DONE)
800 			return 0;
801 	}
802 
803 	for (i = 0; i < mcfg->module->max_input_pins; i++) {
804 		if (mcfg->m_in_pin[i].pin_state != SKL_PIN_BIND_DONE)
805 			return 0;
806 	}
807 
808 	if (mconfig->formats_config[SKL_PARAM_BIND].caps_size > 0 &&
809 	    mconfig->formats_config[SKL_PARAM_BIND].set_params ==
810 								SKL_PARAM_BIND) {
811 		sp_cfg = &mconfig->formats_config[SKL_PARAM_BIND];
812 		ret = skl_set_module_params(skl, sp_cfg->caps,
813 					sp_cfg->caps_size,
814 					sp_cfg->param_id, mconfig);
815 		if (ret < 0)
816 			return ret;
817 	}
818 
819 	for (i = 0; i < w->num_kcontrols; i++) {
820 		k = &w->kcontrol_news[i];
821 		if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
822 			sb = (void *) k->private_value;
823 			bc = (struct skl_algo_data *)sb->dobj.private;
824 
825 			if (bc->set_params == SKL_PARAM_BIND) {
826 				params = kmemdup(bc->params, bc->max, GFP_KERNEL);
827 				if (!params)
828 					return -ENOMEM;
829 
830 				skl_fill_sink_instance_id(skl, params, bc->max,
831 								mconfig);
832 
833 				ret = skl_set_module_params(skl, params,
834 						bc->max, bc->param_id, mconfig);
835 				kfree(params);
836 
837 				if (ret < 0)
838 					return ret;
839 			}
840 		}
841 	}
842 
843 	return 0;
844 }
845 
skl_get_module_id(struct skl_dev * skl,guid_t * uuid)846 static int skl_get_module_id(struct skl_dev *skl, guid_t *uuid)
847 {
848 	struct uuid_module *module;
849 
850 	list_for_each_entry(module, &skl->uuid_list, list) {
851 		if (guid_equal(uuid, &module->uuid))
852 			return module->id;
853 	}
854 
855 	return -EINVAL;
856 }
857 
skl_tplg_find_moduleid_from_uuid(struct skl_dev * skl,const struct snd_kcontrol_new * k)858 static int skl_tplg_find_moduleid_from_uuid(struct skl_dev *skl,
859 					const struct snd_kcontrol_new *k)
860 {
861 	struct soc_bytes_ext *sb = (void *) k->private_value;
862 	struct skl_algo_data *bc = (struct skl_algo_data *)sb->dobj.private;
863 	struct skl_kpb_params *uuid_params, *params;
864 	struct hdac_bus *bus = skl_to_bus(skl);
865 	int i, size, module_id;
866 
867 	if (bc->set_params == SKL_PARAM_BIND && bc->max) {
868 		uuid_params = (struct skl_kpb_params *)bc->params;
869 		size = struct_size(params, u.map, uuid_params->num_modules);
870 
871 		params = devm_kzalloc(bus->dev, size, GFP_KERNEL);
872 		if (!params)
873 			return -ENOMEM;
874 
875 		params->num_modules = uuid_params->num_modules;
876 
877 		for (i = 0; i < uuid_params->num_modules; i++) {
878 			module_id = skl_get_module_id(skl,
879 				&uuid_params->u.map_uuid[i].mod_uuid);
880 			if (module_id < 0) {
881 				devm_kfree(bus->dev, params);
882 				return -EINVAL;
883 			}
884 
885 			params->u.map[i].mod_id = module_id;
886 			params->u.map[i].inst_id =
887 				uuid_params->u.map_uuid[i].inst_id;
888 		}
889 
890 		devm_kfree(bus->dev, bc->params);
891 		bc->params = (char *)params;
892 		bc->max = size;
893 	}
894 
895 	return 0;
896 }
897 
898 /*
899  * Retrieve the module id from UUID mentioned in the
900  * post bind params
901  */
skl_tplg_add_moduleid_in_bind_params(struct skl_dev * skl,struct snd_soc_dapm_widget * w)902 void skl_tplg_add_moduleid_in_bind_params(struct skl_dev *skl,
903 				struct snd_soc_dapm_widget *w)
904 {
905 	struct skl_module_cfg *mconfig = w->priv;
906 	int i;
907 
908 	/*
909 	 * Post bind params are used for only for KPB
910 	 * to set copier instances to drain the data
911 	 * in fast mode
912 	 */
913 	if (mconfig->m_type != SKL_MODULE_TYPE_KPB)
914 		return;
915 
916 	for (i = 0; i < w->num_kcontrols; i++)
917 		if ((w->kcontrol_news[i].access &
918 			SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) &&
919 			(skl_tplg_find_moduleid_from_uuid(skl,
920 			&w->kcontrol_news[i]) < 0))
921 			dev_err(skl->dev,
922 				"%s: invalid kpb post bind params\n",
923 				__func__);
924 }
925 
skl_tplg_module_add_deferred_bind(struct skl_dev * skl,struct skl_module_cfg * src,struct skl_module_cfg * dst)926 static int skl_tplg_module_add_deferred_bind(struct skl_dev *skl,
927 	struct skl_module_cfg *src, struct skl_module_cfg *dst)
928 {
929 	struct skl_module_deferred_bind *m_list, *modules;
930 	int i;
931 
932 	/* only supported for module with static pin connection */
933 	for (i = 0; i < dst->module->max_input_pins; i++) {
934 		struct skl_module_pin *pin = &dst->m_in_pin[i];
935 
936 		if (pin->is_dynamic)
937 			continue;
938 
939 		if ((pin->id.module_id  == src->id.module_id) &&
940 			(pin->id.instance_id  == src->id.instance_id)) {
941 
942 			if (!list_empty(&skl->bind_list)) {
943 				list_for_each_entry(modules, &skl->bind_list, node) {
944 					if (modules->src == src && modules->dst == dst)
945 						return 0;
946 				}
947 			}
948 
949 			m_list = kzalloc(sizeof(*m_list), GFP_KERNEL);
950 			if (!m_list)
951 				return -ENOMEM;
952 
953 			m_list->src = src;
954 			m_list->dst = dst;
955 
956 			list_add(&m_list->node, &skl->bind_list);
957 		}
958 	}
959 
960 	return 0;
961 }
962 
skl_tplg_bind_sinks(struct snd_soc_dapm_widget * w,struct skl_dev * skl,struct snd_soc_dapm_widget * src_w,struct skl_module_cfg * src_mconfig)963 static int skl_tplg_bind_sinks(struct snd_soc_dapm_widget *w,
964 				struct skl_dev *skl,
965 				struct snd_soc_dapm_widget *src_w,
966 				struct skl_module_cfg *src_mconfig)
967 {
968 	struct snd_soc_dapm_path *p;
969 	struct snd_soc_dapm_widget *sink = NULL, *next_sink = NULL;
970 	struct skl_module_cfg *sink_mconfig;
971 	int ret;
972 
973 	snd_soc_dapm_widget_for_each_sink_path(w, p) {
974 		if (!p->connect)
975 			continue;
976 
977 		dev_dbg(skl->dev,
978 			"%s: src widget=%s\n", __func__, w->name);
979 		dev_dbg(skl->dev,
980 			"%s: sink widget=%s\n", __func__, p->sink->name);
981 
982 		next_sink = p->sink;
983 
984 		if (!is_skl_dsp_widget_type(p->sink, skl->dev))
985 			return skl_tplg_bind_sinks(p->sink, skl, src_w, src_mconfig);
986 
987 		/*
988 		 * here we will check widgets in sink pipelines, so that
989 		 * can be any widgets type and we are only interested if
990 		 * they are ones used for SKL so check that first
991 		 */
992 		if ((p->sink->priv != NULL) &&
993 				is_skl_dsp_widget_type(p->sink, skl->dev)) {
994 
995 			sink = p->sink;
996 			sink_mconfig = sink->priv;
997 
998 			/*
999 			 * Modules other than PGA leaf can be connected
1000 			 * directly or via switch to a module in another
1001 			 * pipeline. EX: reference path
1002 			 * when the path is enabled, the dst module that needs
1003 			 * to be bound may not be initialized. if the module is
1004 			 * not initialized, add these modules in the deferred
1005 			 * bind list and when the dst module is initialised,
1006 			 * bind this module to the dst_module in deferred list.
1007 			 */
1008 			if (((src_mconfig->m_state == SKL_MODULE_INIT_DONE)
1009 				&& (sink_mconfig->m_state == SKL_MODULE_UNINIT))) {
1010 
1011 				ret = skl_tplg_module_add_deferred_bind(skl,
1012 						src_mconfig, sink_mconfig);
1013 
1014 				if (ret < 0)
1015 					return ret;
1016 
1017 			}
1018 
1019 
1020 			if (src_mconfig->m_state == SKL_MODULE_UNINIT ||
1021 				sink_mconfig->m_state == SKL_MODULE_UNINIT)
1022 				continue;
1023 
1024 			/* Bind source to sink, mixin is always source */
1025 			ret = skl_bind_modules(skl, src_mconfig, sink_mconfig);
1026 			if (ret)
1027 				return ret;
1028 
1029 			/* set module params after bind */
1030 			skl_tplg_set_module_bind_params(src_w,
1031 					src_mconfig, skl);
1032 			skl_tplg_set_module_bind_params(sink,
1033 					sink_mconfig, skl);
1034 
1035 			/* Start sinks pipe first */
1036 			if (sink_mconfig->pipe->state != SKL_PIPE_STARTED) {
1037 				if (sink_mconfig->pipe->conn_type !=
1038 							SKL_PIPE_CONN_TYPE_FE)
1039 					ret = skl_run_pipe(skl,
1040 							sink_mconfig->pipe);
1041 				if (ret)
1042 					return ret;
1043 			}
1044 		}
1045 	}
1046 
1047 	if (!sink && next_sink)
1048 		return skl_tplg_bind_sinks(next_sink, skl, src_w, src_mconfig);
1049 
1050 	return 0;
1051 }
1052 
1053 /*
1054  * A PGA represents a module in a pipeline. So in the Pre-PMU event of PGA
1055  * we need to do following:
1056  *   - Bind to sink pipeline
1057  *      Since the sink pipes can be running and we don't get mixer event on
1058  *      connect for already running mixer, we need to find the sink pipes
1059  *      here and bind to them. This way dynamic connect works.
1060  *   - Start sink pipeline, if not running
1061  *   - Then run current pipe
1062  */
skl_tplg_pga_dapm_pre_pmu_event(struct snd_soc_dapm_widget * w,struct skl_dev * skl)1063 static int skl_tplg_pga_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w,
1064 							struct skl_dev *skl)
1065 {
1066 	struct skl_module_cfg *src_mconfig;
1067 	int ret = 0;
1068 
1069 	src_mconfig = w->priv;
1070 
1071 	/*
1072 	 * find which sink it is connected to, bind with the sink,
1073 	 * if sink is not started, start sink pipe first, then start
1074 	 * this pipe
1075 	 */
1076 	ret = skl_tplg_bind_sinks(w, skl, w, src_mconfig);
1077 	if (ret)
1078 		return ret;
1079 
1080 	/* Start source pipe last after starting all sinks */
1081 	if (src_mconfig->pipe->conn_type != SKL_PIPE_CONN_TYPE_FE)
1082 		return skl_run_pipe(skl, src_mconfig->pipe);
1083 
1084 	return 0;
1085 }
1086 
skl_get_src_dsp_widget(struct snd_soc_dapm_widget * w,struct skl_dev * skl)1087 static struct snd_soc_dapm_widget *skl_get_src_dsp_widget(
1088 		struct snd_soc_dapm_widget *w, struct skl_dev *skl)
1089 {
1090 	struct snd_soc_dapm_path *p;
1091 	struct snd_soc_dapm_widget *src_w = NULL;
1092 
1093 	snd_soc_dapm_widget_for_each_source_path(w, p) {
1094 		src_w = p->source;
1095 		if (!p->connect)
1096 			continue;
1097 
1098 		dev_dbg(skl->dev, "sink widget=%s\n", w->name);
1099 		dev_dbg(skl->dev, "src widget=%s\n", p->source->name);
1100 
1101 		/*
1102 		 * here we will check widgets in sink pipelines, so that can
1103 		 * be any widgets type and we are only interested if they are
1104 		 * ones used for SKL so check that first
1105 		 */
1106 		if ((p->source->priv != NULL) &&
1107 				is_skl_dsp_widget_type(p->source, skl->dev)) {
1108 			return p->source;
1109 		}
1110 	}
1111 
1112 	if (src_w != NULL)
1113 		return skl_get_src_dsp_widget(src_w, skl);
1114 
1115 	return NULL;
1116 }
1117 
1118 /*
1119  * in the Post-PMU event of mixer we need to do following:
1120  *   - Check if this pipe is running
1121  *   - if not, then
1122  *	- bind this pipeline to its source pipeline
1123  *	  if source pipe is already running, this means it is a dynamic
1124  *	  connection and we need to bind only to that pipe
1125  *	- start this pipeline
1126  */
skl_tplg_mixer_dapm_post_pmu_event(struct snd_soc_dapm_widget * w,struct skl_dev * skl)1127 static int skl_tplg_mixer_dapm_post_pmu_event(struct snd_soc_dapm_widget *w,
1128 							struct skl_dev *skl)
1129 {
1130 	int ret = 0;
1131 	struct snd_soc_dapm_widget *source, *sink;
1132 	struct skl_module_cfg *src_mconfig, *sink_mconfig;
1133 	int src_pipe_started = 0;
1134 
1135 	sink = w;
1136 	sink_mconfig = sink->priv;
1137 
1138 	/*
1139 	 * If source pipe is already started, that means source is driving
1140 	 * one more sink before this sink got connected, Since source is
1141 	 * started, bind this sink to source and start this pipe.
1142 	 */
1143 	source = skl_get_src_dsp_widget(w, skl);
1144 	if (source != NULL) {
1145 		src_mconfig = source->priv;
1146 		sink_mconfig = sink->priv;
1147 		src_pipe_started = 1;
1148 
1149 		/*
1150 		 * check pipe state, then no need to bind or start the
1151 		 * pipe
1152 		 */
1153 		if (src_mconfig->pipe->state != SKL_PIPE_STARTED)
1154 			src_pipe_started = 0;
1155 	}
1156 
1157 	if (src_pipe_started) {
1158 		ret = skl_bind_modules(skl, src_mconfig, sink_mconfig);
1159 		if (ret)
1160 			return ret;
1161 
1162 		/* set module params after bind */
1163 		skl_tplg_set_module_bind_params(source, src_mconfig, skl);
1164 		skl_tplg_set_module_bind_params(sink, sink_mconfig, skl);
1165 
1166 		if (sink_mconfig->pipe->conn_type != SKL_PIPE_CONN_TYPE_FE)
1167 			ret = skl_run_pipe(skl, sink_mconfig->pipe);
1168 	}
1169 
1170 	return ret;
1171 }
1172 
1173 /*
1174  * in the Pre-PMD event of mixer we need to do following:
1175  *   - Stop the pipe
1176  *   - find the source connections and remove that from dapm_path_list
1177  *   - unbind with source pipelines if still connected
1178  */
skl_tplg_mixer_dapm_pre_pmd_event(struct snd_soc_dapm_widget * w,struct skl_dev * skl)1179 static int skl_tplg_mixer_dapm_pre_pmd_event(struct snd_soc_dapm_widget *w,
1180 							struct skl_dev *skl)
1181 {
1182 	struct skl_module_cfg *src_mconfig, *sink_mconfig;
1183 	int ret = 0, i;
1184 
1185 	sink_mconfig = w->priv;
1186 
1187 	/* Stop the pipe */
1188 	ret = skl_stop_pipe(skl, sink_mconfig->pipe);
1189 	if (ret)
1190 		return ret;
1191 
1192 	for (i = 0; i < sink_mconfig->module->max_input_pins; i++) {
1193 		if (sink_mconfig->m_in_pin[i].pin_state == SKL_PIN_BIND_DONE) {
1194 			src_mconfig = sink_mconfig->m_in_pin[i].tgt_mcfg;
1195 			if (!src_mconfig)
1196 				continue;
1197 
1198 			ret = skl_unbind_modules(skl,
1199 						src_mconfig, sink_mconfig);
1200 		}
1201 	}
1202 
1203 	return ret;
1204 }
1205 
1206 /*
1207  * in the Post-PMD event of mixer we need to do following:
1208  *   - Unbind the modules within the pipeline
1209  *   - Delete the pipeline (modules are not required to be explicitly
1210  *     deleted, pipeline delete is enough here
1211  */
skl_tplg_mixer_dapm_post_pmd_event(struct snd_soc_dapm_widget * w,struct skl_dev * skl)1212 static int skl_tplg_mixer_dapm_post_pmd_event(struct snd_soc_dapm_widget *w,
1213 							struct skl_dev *skl)
1214 {
1215 	struct skl_module_cfg *mconfig = w->priv;
1216 	struct skl_pipe_module *w_module;
1217 	struct skl_module_cfg *src_module = NULL, *dst_module;
1218 	struct skl_pipe *s_pipe = mconfig->pipe;
1219 	struct skl_module_deferred_bind *modules, *tmp;
1220 
1221 	if (s_pipe->state == SKL_PIPE_INVALID)
1222 		return -EINVAL;
1223 
1224 	list_for_each_entry(w_module, &s_pipe->w_list, node) {
1225 		if (list_empty(&skl->bind_list))
1226 			break;
1227 
1228 		src_module = w_module->w->priv;
1229 
1230 		list_for_each_entry_safe(modules, tmp, &skl->bind_list, node) {
1231 			/*
1232 			 * When the destination module is deleted, Unbind the
1233 			 * modules from deferred bind list.
1234 			 */
1235 			if (modules->dst == src_module) {
1236 				skl_unbind_modules(skl, modules->src,
1237 						modules->dst);
1238 			}
1239 
1240 			/*
1241 			 * When the source module is deleted, remove this entry
1242 			 * from the deferred bind list.
1243 			 */
1244 			if (modules->src == src_module) {
1245 				list_del(&modules->node);
1246 				modules->src = NULL;
1247 				modules->dst = NULL;
1248 				kfree(modules);
1249 			}
1250 		}
1251 	}
1252 
1253 	list_for_each_entry(w_module, &s_pipe->w_list, node) {
1254 		dst_module = w_module->w->priv;
1255 
1256 		if (src_module == NULL) {
1257 			src_module = dst_module;
1258 			continue;
1259 		}
1260 
1261 		skl_unbind_modules(skl, src_module, dst_module);
1262 		src_module = dst_module;
1263 	}
1264 
1265 	skl_delete_pipe(skl, mconfig->pipe);
1266 
1267 	list_for_each_entry(w_module, &s_pipe->w_list, node) {
1268 		src_module = w_module->w->priv;
1269 		src_module->m_state = SKL_MODULE_UNINIT;
1270 	}
1271 
1272 	return skl_tplg_unload_pipe_modules(skl, s_pipe);
1273 }
1274 
1275 /*
1276  * in the Post-PMD event of PGA we need to do following:
1277  *   - Stop the pipeline
1278  *   - In source pipe is connected, unbind with source pipelines
1279  */
skl_tplg_pga_dapm_post_pmd_event(struct snd_soc_dapm_widget * w,struct skl_dev * skl)1280 static int skl_tplg_pga_dapm_post_pmd_event(struct snd_soc_dapm_widget *w,
1281 							struct skl_dev *skl)
1282 {
1283 	struct skl_module_cfg *src_mconfig, *sink_mconfig;
1284 	int ret = 0, i;
1285 
1286 	src_mconfig = w->priv;
1287 
1288 	/* Stop the pipe since this is a mixin module */
1289 	ret = skl_stop_pipe(skl, src_mconfig->pipe);
1290 	if (ret)
1291 		return ret;
1292 
1293 	for (i = 0; i < src_mconfig->module->max_output_pins; i++) {
1294 		if (src_mconfig->m_out_pin[i].pin_state == SKL_PIN_BIND_DONE) {
1295 			sink_mconfig = src_mconfig->m_out_pin[i].tgt_mcfg;
1296 			if (!sink_mconfig)
1297 				continue;
1298 			/*
1299 			 * This is a connecter and if path is found that means
1300 			 * unbind between source and sink has not happened yet
1301 			 */
1302 			ret = skl_unbind_modules(skl, src_mconfig,
1303 							sink_mconfig);
1304 		}
1305 	}
1306 
1307 	return ret;
1308 }
1309 
1310 /*
1311  * In modelling, we assume there will be ONLY one mixer in a pipeline. If a
1312  * second one is required that is created as another pipe entity.
1313  * The mixer is responsible for pipe management and represent a pipeline
1314  * instance
1315  */
skl_tplg_mixer_event(struct snd_soc_dapm_widget * w,struct snd_kcontrol * k,int event)1316 static int skl_tplg_mixer_event(struct snd_soc_dapm_widget *w,
1317 				struct snd_kcontrol *k, int event)
1318 {
1319 	struct snd_soc_dapm_context *dapm = w->dapm;
1320 	struct skl_dev *skl = get_skl_ctx(dapm->dev);
1321 
1322 	switch (event) {
1323 	case SND_SOC_DAPM_PRE_PMU:
1324 		return skl_tplg_mixer_dapm_pre_pmu_event(w, skl);
1325 
1326 	case SND_SOC_DAPM_POST_PMU:
1327 		return skl_tplg_mixer_dapm_post_pmu_event(w, skl);
1328 
1329 	case SND_SOC_DAPM_PRE_PMD:
1330 		return skl_tplg_mixer_dapm_pre_pmd_event(w, skl);
1331 
1332 	case SND_SOC_DAPM_POST_PMD:
1333 		return skl_tplg_mixer_dapm_post_pmd_event(w, skl);
1334 	}
1335 
1336 	return 0;
1337 }
1338 
1339 /*
1340  * In modelling, we assumed rest of the modules in pipeline are PGA. But we
1341  * are interested in last PGA (leaf PGA) in a pipeline to disconnect with
1342  * the sink when it is running (two FE to one BE or one FE to two BE)
1343  * scenarios
1344  */
skl_tplg_pga_event(struct snd_soc_dapm_widget * w,struct snd_kcontrol * k,int event)1345 static int skl_tplg_pga_event(struct snd_soc_dapm_widget *w,
1346 			struct snd_kcontrol *k, int event)
1347 
1348 {
1349 	struct snd_soc_dapm_context *dapm = w->dapm;
1350 	struct skl_dev *skl = get_skl_ctx(dapm->dev);
1351 
1352 	switch (event) {
1353 	case SND_SOC_DAPM_PRE_PMU:
1354 		return skl_tplg_pga_dapm_pre_pmu_event(w, skl);
1355 
1356 	case SND_SOC_DAPM_POST_PMD:
1357 		return skl_tplg_pga_dapm_post_pmd_event(w, skl);
1358 	}
1359 
1360 	return 0;
1361 }
1362 
skl_tplg_multi_config_set_get(struct snd_kcontrol * kcontrol,struct snd_ctl_elem_value * ucontrol,bool is_set)1363 static int skl_tplg_multi_config_set_get(struct snd_kcontrol *kcontrol,
1364 					 struct snd_ctl_elem_value *ucontrol,
1365 					 bool is_set)
1366 {
1367 	struct snd_soc_component *component =
1368 		snd_soc_kcontrol_component(kcontrol);
1369 	struct hdac_bus *bus = snd_soc_component_get_drvdata(component);
1370 	struct skl_dev *skl = bus_to_skl(bus);
1371 	struct skl_pipeline *ppl;
1372 	struct skl_pipe *pipe = NULL;
1373 	struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value;
1374 	u32 *pipe_id;
1375 
1376 	if (!ec)
1377 		return -EINVAL;
1378 
1379 	if (is_set && ucontrol->value.enumerated.item[0] > ec->items)
1380 		return -EINVAL;
1381 
1382 	pipe_id = ec->dobj.private;
1383 
1384 	list_for_each_entry(ppl, &skl->ppl_list, node) {
1385 		if (ppl->pipe->ppl_id == *pipe_id) {
1386 			pipe = ppl->pipe;
1387 			break;
1388 		}
1389 	}
1390 	if (!pipe)
1391 		return -EIO;
1392 
1393 	if (is_set)
1394 		pipe->pipe_config_idx = ucontrol->value.enumerated.item[0];
1395 	else
1396 		ucontrol->value.enumerated.item[0]  =  pipe->pipe_config_idx;
1397 
1398 	return 0;
1399 }
1400 
skl_tplg_multi_config_get(struct snd_kcontrol * kcontrol,struct snd_ctl_elem_value * ucontrol)1401 static int skl_tplg_multi_config_get(struct snd_kcontrol *kcontrol,
1402 				     struct snd_ctl_elem_value *ucontrol)
1403 {
1404 	return skl_tplg_multi_config_set_get(kcontrol, ucontrol, false);
1405 }
1406 
skl_tplg_multi_config_set(struct snd_kcontrol * kcontrol,struct snd_ctl_elem_value * ucontrol)1407 static int skl_tplg_multi_config_set(struct snd_kcontrol *kcontrol,
1408 				     struct snd_ctl_elem_value *ucontrol)
1409 {
1410 	return skl_tplg_multi_config_set_get(kcontrol, ucontrol, true);
1411 }
1412 
skl_tplg_multi_config_get_dmic(struct snd_kcontrol * kcontrol,struct snd_ctl_elem_value * ucontrol)1413 static int skl_tplg_multi_config_get_dmic(struct snd_kcontrol *kcontrol,
1414 					  struct snd_ctl_elem_value *ucontrol)
1415 {
1416 	return skl_tplg_multi_config_set_get(kcontrol, ucontrol, false);
1417 }
1418 
skl_tplg_multi_config_set_dmic(struct snd_kcontrol * kcontrol,struct snd_ctl_elem_value * ucontrol)1419 static int skl_tplg_multi_config_set_dmic(struct snd_kcontrol *kcontrol,
1420 					  struct snd_ctl_elem_value *ucontrol)
1421 {
1422 	return skl_tplg_multi_config_set_get(kcontrol, ucontrol, true);
1423 }
1424 
skl_tplg_tlv_control_get(struct snd_kcontrol * kcontrol,unsigned int __user * data,unsigned int size)1425 static int skl_tplg_tlv_control_get(struct snd_kcontrol *kcontrol,
1426 			unsigned int __user *data, unsigned int size)
1427 {
1428 	struct soc_bytes_ext *sb =
1429 			(struct soc_bytes_ext *)kcontrol->private_value;
1430 	struct skl_algo_data *bc = (struct skl_algo_data *)sb->dobj.private;
1431 	struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol);
1432 	struct skl_module_cfg *mconfig = w->priv;
1433 	struct skl_dev *skl = get_skl_ctx(w->dapm->dev);
1434 
1435 	if (w->power)
1436 		skl_get_module_params(skl, (u32 *)bc->params,
1437 				      bc->size, bc->param_id, mconfig);
1438 
1439 	/* decrement size for TLV header */
1440 	size -= 2 * sizeof(u32);
1441 
1442 	/* check size as we don't want to send kernel data */
1443 	if (size > bc->max)
1444 		size = bc->max;
1445 
1446 	if (bc->params) {
1447 		if (copy_to_user(data, &bc->param_id, sizeof(u32)))
1448 			return -EFAULT;
1449 		if (copy_to_user(data + 1, &size, sizeof(u32)))
1450 			return -EFAULT;
1451 		if (copy_to_user(data + 2, bc->params, size))
1452 			return -EFAULT;
1453 	}
1454 
1455 	return 0;
1456 }
1457 
1458 #define SKL_PARAM_VENDOR_ID 0xff
1459 
skl_tplg_tlv_control_set(struct snd_kcontrol * kcontrol,const unsigned int __user * data,unsigned int size)1460 static int skl_tplg_tlv_control_set(struct snd_kcontrol *kcontrol,
1461 			const unsigned int __user *data, unsigned int size)
1462 {
1463 	struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol);
1464 	struct skl_module_cfg *mconfig = w->priv;
1465 	struct soc_bytes_ext *sb =
1466 			(struct soc_bytes_ext *)kcontrol->private_value;
1467 	struct skl_algo_data *ac = (struct skl_algo_data *)sb->dobj.private;
1468 	struct skl_dev *skl = get_skl_ctx(w->dapm->dev);
1469 
1470 	if (ac->params) {
1471 		if (size > ac->max)
1472 			return -EINVAL;
1473 		ac->size = size;
1474 
1475 		if (copy_from_user(ac->params, data, size))
1476 			return -EFAULT;
1477 
1478 		if (w->power)
1479 			return skl_set_module_params(skl,
1480 						(u32 *)ac->params, ac->size,
1481 						ac->param_id, mconfig);
1482 	}
1483 
1484 	return 0;
1485 }
1486 
skl_tplg_mic_control_get(struct snd_kcontrol * kcontrol,struct snd_ctl_elem_value * ucontrol)1487 static int skl_tplg_mic_control_get(struct snd_kcontrol *kcontrol,
1488 		struct snd_ctl_elem_value *ucontrol)
1489 {
1490 	struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol);
1491 	struct skl_module_cfg *mconfig = w->priv;
1492 	struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value;
1493 	u32 ch_type = *((u32 *)ec->dobj.private);
1494 
1495 	if (mconfig->dmic_ch_type == ch_type)
1496 		ucontrol->value.enumerated.item[0] =
1497 					mconfig->dmic_ch_combo_index;
1498 	else
1499 		ucontrol->value.enumerated.item[0] = 0;
1500 
1501 	return 0;
1502 }
1503 
skl_fill_mic_sel_params(struct skl_module_cfg * mconfig,struct skl_mic_sel_config * mic_cfg,struct device * dev)1504 static int skl_fill_mic_sel_params(struct skl_module_cfg *mconfig,
1505 	struct skl_mic_sel_config *mic_cfg, struct device *dev)
1506 {
1507 	struct skl_specific_cfg *sp_cfg =
1508 				&mconfig->formats_config[SKL_PARAM_INIT];
1509 
1510 	sp_cfg->caps_size = sizeof(struct skl_mic_sel_config);
1511 	sp_cfg->set_params = SKL_PARAM_SET;
1512 	sp_cfg->param_id = 0x00;
1513 	if (!sp_cfg->caps) {
1514 		sp_cfg->caps = devm_kzalloc(dev, sp_cfg->caps_size, GFP_KERNEL);
1515 		if (!sp_cfg->caps)
1516 			return -ENOMEM;
1517 	}
1518 
1519 	mic_cfg->mic_switch = SKL_MIC_SEL_SWITCH;
1520 	mic_cfg->flags = 0;
1521 	memcpy(sp_cfg->caps, mic_cfg, sp_cfg->caps_size);
1522 
1523 	return 0;
1524 }
1525 
skl_tplg_mic_control_set(struct snd_kcontrol * kcontrol,struct snd_ctl_elem_value * ucontrol)1526 static int skl_tplg_mic_control_set(struct snd_kcontrol *kcontrol,
1527 			struct snd_ctl_elem_value *ucontrol)
1528 {
1529 	struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol);
1530 	struct skl_module_cfg *mconfig = w->priv;
1531 	struct skl_mic_sel_config mic_cfg = {0};
1532 	struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value;
1533 	u32 ch_type = *((u32 *)ec->dobj.private);
1534 	const int *list;
1535 	u8 in_ch, out_ch, index;
1536 
1537 	mconfig->dmic_ch_type = ch_type;
1538 	mconfig->dmic_ch_combo_index = ucontrol->value.enumerated.item[0];
1539 
1540 	/* enum control index 0 is INVALID, so no channels to be set */
1541 	if (mconfig->dmic_ch_combo_index == 0)
1542 		return 0;
1543 
1544 	/* No valid channel selection map for index 0, so offset by 1 */
1545 	index = mconfig->dmic_ch_combo_index - 1;
1546 
1547 	switch (ch_type) {
1548 	case SKL_CH_MONO:
1549 		if (mconfig->dmic_ch_combo_index > ARRAY_SIZE(mic_mono_list))
1550 			return -EINVAL;
1551 
1552 		list = &mic_mono_list[index];
1553 		break;
1554 
1555 	case SKL_CH_STEREO:
1556 		if (mconfig->dmic_ch_combo_index > ARRAY_SIZE(mic_stereo_list))
1557 			return -EINVAL;
1558 
1559 		list = mic_stereo_list[index];
1560 		break;
1561 
1562 	case SKL_CH_TRIO:
1563 		if (mconfig->dmic_ch_combo_index > ARRAY_SIZE(mic_trio_list))
1564 			return -EINVAL;
1565 
1566 		list = mic_trio_list[index];
1567 		break;
1568 
1569 	case SKL_CH_QUATRO:
1570 		if (mconfig->dmic_ch_combo_index > ARRAY_SIZE(mic_quatro_list))
1571 			return -EINVAL;
1572 
1573 		list = mic_quatro_list[index];
1574 		break;
1575 
1576 	default:
1577 		dev_err(w->dapm->dev,
1578 				"Invalid channel %d for mic_select module\n",
1579 				ch_type);
1580 		return -EINVAL;
1581 
1582 	}
1583 
1584 	/* channel type enum map to number of chanels for that type */
1585 	for (out_ch = 0; out_ch < ch_type; out_ch++) {
1586 		in_ch = list[out_ch];
1587 		mic_cfg.blob[out_ch][in_ch] = SKL_DEFAULT_MIC_SEL_GAIN;
1588 	}
1589 
1590 	return skl_fill_mic_sel_params(mconfig, &mic_cfg, w->dapm->dev);
1591 }
1592 
1593 /*
1594  * Fill the dma id for host and link. In case of passthrough
1595  * pipeline, this will both host and link in the same
1596  * pipeline, so need to copy the link and host based on dev_type
1597  */
skl_tplg_fill_dma_id(struct skl_module_cfg * mcfg,struct skl_pipe_params * params)1598 static void skl_tplg_fill_dma_id(struct skl_module_cfg *mcfg,
1599 				struct skl_pipe_params *params)
1600 {
1601 	struct skl_pipe *pipe = mcfg->pipe;
1602 
1603 	if (pipe->passthru) {
1604 		switch (mcfg->dev_type) {
1605 		case SKL_DEVICE_HDALINK:
1606 			pipe->p_params->link_dma_id = params->link_dma_id;
1607 			pipe->p_params->link_index = params->link_index;
1608 			pipe->p_params->link_bps = params->link_bps;
1609 			break;
1610 
1611 		case SKL_DEVICE_HDAHOST:
1612 			pipe->p_params->host_dma_id = params->host_dma_id;
1613 			pipe->p_params->host_bps = params->host_bps;
1614 			break;
1615 
1616 		default:
1617 			break;
1618 		}
1619 		pipe->p_params->s_fmt = params->s_fmt;
1620 		pipe->p_params->ch = params->ch;
1621 		pipe->p_params->s_freq = params->s_freq;
1622 		pipe->p_params->stream = params->stream;
1623 		pipe->p_params->format = params->format;
1624 
1625 	} else {
1626 		memcpy(pipe->p_params, params, sizeof(*params));
1627 	}
1628 }
1629 
1630 /*
1631  * The FE params are passed by hw_params of the DAI.
1632  * On hw_params, the params are stored in Gateway module of the FE and we
1633  * need to calculate the format in DSP module configuration, that
1634  * conversion is done here
1635  */
skl_tplg_update_pipe_params(struct device * dev,struct skl_module_cfg * mconfig,struct skl_pipe_params * params)1636 int skl_tplg_update_pipe_params(struct device *dev,
1637 			struct skl_module_cfg *mconfig,
1638 			struct skl_pipe_params *params)
1639 {
1640 	struct skl_module_res *res;
1641 	struct skl_dev *skl = get_skl_ctx(dev);
1642 	struct skl_module_fmt *format = NULL;
1643 	u8 cfg_idx = mconfig->pipe->cur_config_idx;
1644 
1645 	res = &mconfig->module->resources[mconfig->res_idx];
1646 	skl_tplg_fill_dma_id(mconfig, params);
1647 	mconfig->fmt_idx = mconfig->mod_cfg[cfg_idx].fmt_idx;
1648 	mconfig->res_idx = mconfig->mod_cfg[cfg_idx].res_idx;
1649 
1650 	if (skl->nr_modules)
1651 		return 0;
1652 
1653 	if (params->stream == SNDRV_PCM_STREAM_PLAYBACK)
1654 		format = &mconfig->module->formats[mconfig->fmt_idx].inputs[0].fmt;
1655 	else
1656 		format = &mconfig->module->formats[mconfig->fmt_idx].outputs[0].fmt;
1657 
1658 	/* set the hw_params */
1659 	format->s_freq = params->s_freq;
1660 	format->channels = params->ch;
1661 	format->valid_bit_depth = skl_get_bit_depth(params->s_fmt);
1662 
1663 	/*
1664 	 * 16 bit is 16 bit container whereas 24 bit is in 32 bit
1665 	 * container so update bit depth accordingly
1666 	 */
1667 	switch (format->valid_bit_depth) {
1668 	case SKL_DEPTH_16BIT:
1669 		format->bit_depth = format->valid_bit_depth;
1670 		break;
1671 
1672 	case SKL_DEPTH_24BIT:
1673 	case SKL_DEPTH_32BIT:
1674 		format->bit_depth = SKL_DEPTH_32BIT;
1675 		break;
1676 
1677 	default:
1678 		dev_err(dev, "Invalid bit depth %x for pipe\n",
1679 				format->valid_bit_depth);
1680 		return -EINVAL;
1681 	}
1682 
1683 	if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) {
1684 		res->ibs = (format->s_freq / 1000) *
1685 				(format->channels) *
1686 				(format->bit_depth >> 3);
1687 	} else {
1688 		res->obs = (format->s_freq / 1000) *
1689 				(format->channels) *
1690 				(format->bit_depth >> 3);
1691 	}
1692 
1693 	return 0;
1694 }
1695 
1696 /*
1697  * Query the module config for the FE DAI
1698  * This is used to find the hw_params set for that DAI and apply to FE
1699  * pipeline
1700  */
1701 struct skl_module_cfg *
skl_tplg_fe_get_cpr_module(struct snd_soc_dai * dai,int stream)1702 skl_tplg_fe_get_cpr_module(struct snd_soc_dai *dai, int stream)
1703 {
1704 	struct snd_soc_dapm_widget *w;
1705 	struct snd_soc_dapm_path *p = NULL;
1706 
1707 	if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
1708 		w = dai->playback_widget;
1709 		snd_soc_dapm_widget_for_each_sink_path(w, p) {
1710 			if (p->connect && p->sink->power &&
1711 				!is_skl_dsp_widget_type(p->sink, dai->dev))
1712 				continue;
1713 
1714 			if (p->sink->priv) {
1715 				dev_dbg(dai->dev, "set params for %s\n",
1716 						p->sink->name);
1717 				return p->sink->priv;
1718 			}
1719 		}
1720 	} else {
1721 		w = dai->capture_widget;
1722 		snd_soc_dapm_widget_for_each_source_path(w, p) {
1723 			if (p->connect && p->source->power &&
1724 				!is_skl_dsp_widget_type(p->source, dai->dev))
1725 				continue;
1726 
1727 			if (p->source->priv) {
1728 				dev_dbg(dai->dev, "set params for %s\n",
1729 						p->source->name);
1730 				return p->source->priv;
1731 			}
1732 		}
1733 	}
1734 
1735 	return NULL;
1736 }
1737 
skl_get_mconfig_pb_cpr(struct snd_soc_dai * dai,struct snd_soc_dapm_widget * w)1738 static struct skl_module_cfg *skl_get_mconfig_pb_cpr(
1739 		struct snd_soc_dai *dai, struct snd_soc_dapm_widget *w)
1740 {
1741 	struct snd_soc_dapm_path *p;
1742 	struct skl_module_cfg *mconfig = NULL;
1743 
1744 	snd_soc_dapm_widget_for_each_source_path(w, p) {
1745 		if (w->endpoints[SND_SOC_DAPM_DIR_OUT] > 0) {
1746 			if (p->connect &&
1747 				    (p->sink->id == snd_soc_dapm_aif_out) &&
1748 				    p->source->priv) {
1749 				mconfig = p->source->priv;
1750 				return mconfig;
1751 			}
1752 			mconfig = skl_get_mconfig_pb_cpr(dai, p->source);
1753 			if (mconfig)
1754 				return mconfig;
1755 		}
1756 	}
1757 	return mconfig;
1758 }
1759 
skl_get_mconfig_cap_cpr(struct snd_soc_dai * dai,struct snd_soc_dapm_widget * w)1760 static struct skl_module_cfg *skl_get_mconfig_cap_cpr(
1761 		struct snd_soc_dai *dai, struct snd_soc_dapm_widget *w)
1762 {
1763 	struct snd_soc_dapm_path *p;
1764 	struct skl_module_cfg *mconfig = NULL;
1765 
1766 	snd_soc_dapm_widget_for_each_sink_path(w, p) {
1767 		if (w->endpoints[SND_SOC_DAPM_DIR_IN] > 0) {
1768 			if (p->connect &&
1769 				    (p->source->id == snd_soc_dapm_aif_in) &&
1770 				    p->sink->priv) {
1771 				mconfig = p->sink->priv;
1772 				return mconfig;
1773 			}
1774 			mconfig = skl_get_mconfig_cap_cpr(dai, p->sink);
1775 			if (mconfig)
1776 				return mconfig;
1777 		}
1778 	}
1779 	return mconfig;
1780 }
1781 
1782 struct skl_module_cfg *
skl_tplg_be_get_cpr_module(struct snd_soc_dai * dai,int stream)1783 skl_tplg_be_get_cpr_module(struct snd_soc_dai *dai, int stream)
1784 {
1785 	struct snd_soc_dapm_widget *w;
1786 	struct skl_module_cfg *mconfig;
1787 
1788 	if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
1789 		w = dai->playback_widget;
1790 		mconfig = skl_get_mconfig_pb_cpr(dai, w);
1791 	} else {
1792 		w = dai->capture_widget;
1793 		mconfig = skl_get_mconfig_cap_cpr(dai, w);
1794 	}
1795 	return mconfig;
1796 }
1797 
skl_tplg_be_link_type(int dev_type)1798 static u8 skl_tplg_be_link_type(int dev_type)
1799 {
1800 	int ret;
1801 
1802 	switch (dev_type) {
1803 	case SKL_DEVICE_BT:
1804 		ret = NHLT_LINK_SSP;
1805 		break;
1806 
1807 	case SKL_DEVICE_DMIC:
1808 		ret = NHLT_LINK_DMIC;
1809 		break;
1810 
1811 	case SKL_DEVICE_I2S:
1812 		ret = NHLT_LINK_SSP;
1813 		break;
1814 
1815 	case SKL_DEVICE_HDALINK:
1816 		ret = NHLT_LINK_HDA;
1817 		break;
1818 
1819 	default:
1820 		ret = NHLT_LINK_INVALID;
1821 		break;
1822 	}
1823 
1824 	return ret;
1825 }
1826 
1827 /*
1828  * Fill the BE gateway parameters
1829  * The BE gateway expects a blob of parameters which are kept in the ACPI
1830  * NHLT blob, so query the blob for interface type (i2s/pdm) and instance.
1831  * The port can have multiple settings so pick based on the pipeline
1832  * parameters
1833  */
skl_tplg_be_fill_pipe_params(struct snd_soc_dai * dai,struct skl_module_cfg * mconfig,struct skl_pipe_params * params)1834 static int skl_tplg_be_fill_pipe_params(struct snd_soc_dai *dai,
1835 				struct skl_module_cfg *mconfig,
1836 				struct skl_pipe_params *params)
1837 {
1838 	struct nhlt_specific_cfg *cfg;
1839 	struct skl_pipe *pipe = mconfig->pipe;
1840 	struct skl_pipe_fmt *pipe_fmt;
1841 	struct skl_dev *skl = get_skl_ctx(dai->dev);
1842 	int link_type = skl_tplg_be_link_type(mconfig->dev_type);
1843 	u8 dev_type = skl_tplg_be_dev_type(mconfig->dev_type);
1844 
1845 	skl_tplg_fill_dma_id(mconfig, params);
1846 
1847 	if (link_type == NHLT_LINK_HDA)
1848 		return 0;
1849 
1850 	if (pipe->direction == SNDRV_PCM_STREAM_PLAYBACK)
1851 		pipe_fmt = &pipe->configs[pipe->pipe_config_idx].out_fmt;
1852 	else
1853 		pipe_fmt = &pipe->configs[pipe->pipe_config_idx].in_fmt;
1854 
1855 	/* update the blob based on virtual bus_id*/
1856 	cfg = intel_nhlt_get_endpoint_blob(dai->dev, skl->nhlt,
1857 					mconfig->vbus_id, link_type,
1858 					pipe_fmt->bps, params->s_cont,
1859 					pipe_fmt->channels, pipe_fmt->freq,
1860 					pipe->direction, dev_type);
1861 	if (cfg) {
1862 		mconfig->formats_config[SKL_PARAM_INIT].caps_size = cfg->size;
1863 		mconfig->formats_config[SKL_PARAM_INIT].caps = (u32 *)&cfg->caps;
1864 	} else {
1865 		dev_err(dai->dev, "Blob NULL for id:%d type:%d dirn:%d ch:%d, freq:%d, fmt:%d\n",
1866 			mconfig->vbus_id, link_type, params->stream,
1867 			params->ch, params->s_freq, params->s_fmt);
1868 		return -EINVAL;
1869 	}
1870 
1871 	return 0;
1872 }
1873 
skl_tplg_be_set_src_pipe_params(struct snd_soc_dai * dai,struct snd_soc_dapm_widget * w,struct skl_pipe_params * params)1874 static int skl_tplg_be_set_src_pipe_params(struct snd_soc_dai *dai,
1875 				struct snd_soc_dapm_widget *w,
1876 				struct skl_pipe_params *params)
1877 {
1878 	struct snd_soc_dapm_path *p;
1879 	int ret = -EIO;
1880 
1881 	snd_soc_dapm_widget_for_each_source_path(w, p) {
1882 		if (p->connect && is_skl_dsp_widget_type(p->source, dai->dev) &&
1883 						p->source->priv) {
1884 
1885 			ret = skl_tplg_be_fill_pipe_params(dai,
1886 						p->source->priv, params);
1887 			if (ret < 0)
1888 				return ret;
1889 		} else {
1890 			ret = skl_tplg_be_set_src_pipe_params(dai,
1891 						p->source, params);
1892 			if (ret < 0)
1893 				return ret;
1894 		}
1895 	}
1896 
1897 	return ret;
1898 }
1899 
skl_tplg_be_set_sink_pipe_params(struct snd_soc_dai * dai,struct snd_soc_dapm_widget * w,struct skl_pipe_params * params)1900 static int skl_tplg_be_set_sink_pipe_params(struct snd_soc_dai *dai,
1901 	struct snd_soc_dapm_widget *w, struct skl_pipe_params *params)
1902 {
1903 	struct snd_soc_dapm_path *p;
1904 	int ret = -EIO;
1905 
1906 	snd_soc_dapm_widget_for_each_sink_path(w, p) {
1907 		if (p->connect && is_skl_dsp_widget_type(p->sink, dai->dev) &&
1908 						p->sink->priv) {
1909 
1910 			ret = skl_tplg_be_fill_pipe_params(dai,
1911 						p->sink->priv, params);
1912 			if (ret < 0)
1913 				return ret;
1914 		} else {
1915 			ret = skl_tplg_be_set_sink_pipe_params(
1916 						dai, p->sink, params);
1917 			if (ret < 0)
1918 				return ret;
1919 		}
1920 	}
1921 
1922 	return ret;
1923 }
1924 
1925 /*
1926  * BE hw_params can be a source parameters (capture) or sink parameters
1927  * (playback). Based on sink and source we need to either find the source
1928  * list or the sink list and set the pipeline parameters
1929  */
skl_tplg_be_update_params(struct snd_soc_dai * dai,struct skl_pipe_params * params)1930 int skl_tplg_be_update_params(struct snd_soc_dai *dai,
1931 				struct skl_pipe_params *params)
1932 {
1933 	struct snd_soc_dapm_widget *w;
1934 
1935 	if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) {
1936 		w = dai->playback_widget;
1937 
1938 		return skl_tplg_be_set_src_pipe_params(dai, w, params);
1939 
1940 	} else {
1941 		w = dai->capture_widget;
1942 
1943 		return skl_tplg_be_set_sink_pipe_params(dai, w, params);
1944 	}
1945 
1946 	return 0;
1947 }
1948 
1949 static const struct snd_soc_tplg_widget_events skl_tplg_widget_ops[] = {
1950 	{SKL_MIXER_EVENT, skl_tplg_mixer_event},
1951 	{SKL_VMIXER_EVENT, skl_tplg_mixer_event},
1952 	{SKL_PGA_EVENT, skl_tplg_pga_event},
1953 };
1954 
1955 static const struct snd_soc_tplg_bytes_ext_ops skl_tlv_ops[] = {
1956 	{SKL_CONTROL_TYPE_BYTE_TLV, skl_tplg_tlv_control_get,
1957 					skl_tplg_tlv_control_set},
1958 };
1959 
1960 static const struct snd_soc_tplg_kcontrol_ops skl_tplg_kcontrol_ops[] = {
1961 	{
1962 		.id = SKL_CONTROL_TYPE_MIC_SELECT,
1963 		.get = skl_tplg_mic_control_get,
1964 		.put = skl_tplg_mic_control_set,
1965 	},
1966 	{
1967 		.id = SKL_CONTROL_TYPE_MULTI_IO_SELECT,
1968 		.get = skl_tplg_multi_config_get,
1969 		.put = skl_tplg_multi_config_set,
1970 	},
1971 	{
1972 		.id = SKL_CONTROL_TYPE_MULTI_IO_SELECT_DMIC,
1973 		.get = skl_tplg_multi_config_get_dmic,
1974 		.put = skl_tplg_multi_config_set_dmic,
1975 	}
1976 };
1977 
skl_tplg_fill_pipe_cfg(struct device * dev,struct skl_pipe * pipe,u32 tkn,u32 tkn_val,int conf_idx,int dir)1978 static int skl_tplg_fill_pipe_cfg(struct device *dev,
1979 			struct skl_pipe *pipe, u32 tkn,
1980 			u32 tkn_val, int conf_idx, int dir)
1981 {
1982 	struct skl_pipe_fmt *fmt;
1983 	struct skl_path_config *config;
1984 
1985 	switch (dir) {
1986 	case SKL_DIR_IN:
1987 		fmt = &pipe->configs[conf_idx].in_fmt;
1988 		break;
1989 
1990 	case SKL_DIR_OUT:
1991 		fmt = &pipe->configs[conf_idx].out_fmt;
1992 		break;
1993 
1994 	default:
1995 		dev_err(dev, "Invalid direction: %d\n", dir);
1996 		return -EINVAL;
1997 	}
1998 
1999 	config = &pipe->configs[conf_idx];
2000 
2001 	switch (tkn) {
2002 	case SKL_TKN_U32_CFG_FREQ:
2003 		fmt->freq = tkn_val;
2004 		break;
2005 
2006 	case SKL_TKN_U8_CFG_CHAN:
2007 		fmt->channels = tkn_val;
2008 		break;
2009 
2010 	case SKL_TKN_U8_CFG_BPS:
2011 		fmt->bps = tkn_val;
2012 		break;
2013 
2014 	case SKL_TKN_U32_PATH_MEM_PGS:
2015 		config->mem_pages = tkn_val;
2016 		break;
2017 
2018 	default:
2019 		dev_err(dev, "Invalid token config: %d\n", tkn);
2020 		return -EINVAL;
2021 	}
2022 
2023 	return 0;
2024 }
2025 
skl_tplg_fill_pipe_tkn(struct device * dev,struct skl_pipe * pipe,u32 tkn,u32 tkn_val)2026 static int skl_tplg_fill_pipe_tkn(struct device *dev,
2027 			struct skl_pipe *pipe, u32 tkn,
2028 			u32 tkn_val)
2029 {
2030 
2031 	switch (tkn) {
2032 	case SKL_TKN_U32_PIPE_CONN_TYPE:
2033 		pipe->conn_type = tkn_val;
2034 		break;
2035 
2036 	case SKL_TKN_U32_PIPE_PRIORITY:
2037 		pipe->pipe_priority = tkn_val;
2038 		break;
2039 
2040 	case SKL_TKN_U32_PIPE_MEM_PGS:
2041 		pipe->memory_pages = tkn_val;
2042 		break;
2043 
2044 	case SKL_TKN_U32_PMODE:
2045 		pipe->lp_mode = tkn_val;
2046 		break;
2047 
2048 	case SKL_TKN_U32_PIPE_DIRECTION:
2049 		pipe->direction = tkn_val;
2050 		break;
2051 
2052 	case SKL_TKN_U32_NUM_CONFIGS:
2053 		pipe->nr_cfgs = tkn_val;
2054 		break;
2055 
2056 	default:
2057 		dev_err(dev, "Token not handled %d\n", tkn);
2058 		return -EINVAL;
2059 	}
2060 
2061 	return 0;
2062 }
2063 
2064 /*
2065  * Add pipeline by parsing the relevant tokens
2066  * Return an existing pipe if the pipe already exists.
2067  */
skl_tplg_add_pipe(struct device * dev,struct skl_module_cfg * mconfig,struct skl_dev * skl,struct snd_soc_tplg_vendor_value_elem * tkn_elem)2068 static int skl_tplg_add_pipe(struct device *dev,
2069 		struct skl_module_cfg *mconfig, struct skl_dev *skl,
2070 		struct snd_soc_tplg_vendor_value_elem *tkn_elem)
2071 {
2072 	struct skl_pipeline *ppl;
2073 	struct skl_pipe *pipe;
2074 	struct skl_pipe_params *params;
2075 
2076 	list_for_each_entry(ppl, &skl->ppl_list, node) {
2077 		if (ppl->pipe->ppl_id == tkn_elem->value) {
2078 			mconfig->pipe = ppl->pipe;
2079 			return -EEXIST;
2080 		}
2081 	}
2082 
2083 	ppl = devm_kzalloc(dev, sizeof(*ppl), GFP_KERNEL);
2084 	if (!ppl)
2085 		return -ENOMEM;
2086 
2087 	pipe = devm_kzalloc(dev, sizeof(*pipe), GFP_KERNEL);
2088 	if (!pipe)
2089 		return -ENOMEM;
2090 
2091 	params = devm_kzalloc(dev, sizeof(*params), GFP_KERNEL);
2092 	if (!params)
2093 		return -ENOMEM;
2094 
2095 	pipe->p_params = params;
2096 	pipe->ppl_id = tkn_elem->value;
2097 	INIT_LIST_HEAD(&pipe->w_list);
2098 
2099 	ppl->pipe = pipe;
2100 	list_add(&ppl->node, &skl->ppl_list);
2101 
2102 	mconfig->pipe = pipe;
2103 	mconfig->pipe->state = SKL_PIPE_INVALID;
2104 
2105 	return 0;
2106 }
2107 
skl_tplg_get_uuid(struct device * dev,guid_t * guid,struct snd_soc_tplg_vendor_uuid_elem * uuid_tkn)2108 static int skl_tplg_get_uuid(struct device *dev, guid_t *guid,
2109 	      struct snd_soc_tplg_vendor_uuid_elem *uuid_tkn)
2110 {
2111 	if (uuid_tkn->token == SKL_TKN_UUID) {
2112 		import_guid(guid, uuid_tkn->uuid);
2113 		return 0;
2114 	}
2115 
2116 	dev_err(dev, "Not an UUID token %d\n", uuid_tkn->token);
2117 
2118 	return -EINVAL;
2119 }
2120 
skl_tplg_fill_pin(struct device * dev,struct snd_soc_tplg_vendor_value_elem * tkn_elem,struct skl_module_pin * m_pin,int pin_index)2121 static int skl_tplg_fill_pin(struct device *dev,
2122 			struct snd_soc_tplg_vendor_value_elem *tkn_elem,
2123 			struct skl_module_pin *m_pin,
2124 			int pin_index)
2125 {
2126 	int ret;
2127 
2128 	switch (tkn_elem->token) {
2129 	case SKL_TKN_U32_PIN_MOD_ID:
2130 		m_pin[pin_index].id.module_id = tkn_elem->value;
2131 		break;
2132 
2133 	case SKL_TKN_U32_PIN_INST_ID:
2134 		m_pin[pin_index].id.instance_id = tkn_elem->value;
2135 		break;
2136 
2137 	case SKL_TKN_UUID:
2138 		ret = skl_tplg_get_uuid(dev, &m_pin[pin_index].id.mod_uuid,
2139 			(struct snd_soc_tplg_vendor_uuid_elem *)tkn_elem);
2140 		if (ret < 0)
2141 			return ret;
2142 
2143 		break;
2144 
2145 	default:
2146 		dev_err(dev, "%d Not a pin token\n", tkn_elem->token);
2147 		return -EINVAL;
2148 	}
2149 
2150 	return 0;
2151 }
2152 
2153 /*
2154  * Parse for pin config specific tokens to fill up the
2155  * module private data
2156  */
skl_tplg_fill_pins_info(struct device * dev,struct skl_module_cfg * mconfig,struct snd_soc_tplg_vendor_value_elem * tkn_elem,int dir,int pin_count)2157 static int skl_tplg_fill_pins_info(struct device *dev,
2158 		struct skl_module_cfg *mconfig,
2159 		struct snd_soc_tplg_vendor_value_elem *tkn_elem,
2160 		int dir, int pin_count)
2161 {
2162 	int ret;
2163 	struct skl_module_pin *m_pin;
2164 
2165 	switch (dir) {
2166 	case SKL_DIR_IN:
2167 		m_pin = mconfig->m_in_pin;
2168 		break;
2169 
2170 	case SKL_DIR_OUT:
2171 		m_pin = mconfig->m_out_pin;
2172 		break;
2173 
2174 	default:
2175 		dev_err(dev, "Invalid direction value\n");
2176 		return -EINVAL;
2177 	}
2178 
2179 	ret = skl_tplg_fill_pin(dev, tkn_elem, m_pin, pin_count);
2180 	if (ret < 0)
2181 		return ret;
2182 
2183 	m_pin[pin_count].in_use = false;
2184 	m_pin[pin_count].pin_state = SKL_PIN_UNBIND;
2185 
2186 	return 0;
2187 }
2188 
2189 /*
2190  * Fill up input/output module config format based
2191  * on the direction
2192  */
skl_tplg_fill_fmt(struct device * dev,struct skl_module_fmt * dst_fmt,u32 tkn,u32 value)2193 static int skl_tplg_fill_fmt(struct device *dev,
2194 		struct skl_module_fmt *dst_fmt,
2195 		u32 tkn, u32 value)
2196 {
2197 	switch (tkn) {
2198 	case SKL_TKN_U32_FMT_CH:
2199 		dst_fmt->channels  = value;
2200 		break;
2201 
2202 	case SKL_TKN_U32_FMT_FREQ:
2203 		dst_fmt->s_freq = value;
2204 		break;
2205 
2206 	case SKL_TKN_U32_FMT_BIT_DEPTH:
2207 		dst_fmt->bit_depth = value;
2208 		break;
2209 
2210 	case SKL_TKN_U32_FMT_SAMPLE_SIZE:
2211 		dst_fmt->valid_bit_depth = value;
2212 		break;
2213 
2214 	case SKL_TKN_U32_FMT_CH_CONFIG:
2215 		dst_fmt->ch_cfg = value;
2216 		break;
2217 
2218 	case SKL_TKN_U32_FMT_INTERLEAVE:
2219 		dst_fmt->interleaving_style = value;
2220 		break;
2221 
2222 	case SKL_TKN_U32_FMT_SAMPLE_TYPE:
2223 		dst_fmt->sample_type = value;
2224 		break;
2225 
2226 	case SKL_TKN_U32_FMT_CH_MAP:
2227 		dst_fmt->ch_map = value;
2228 		break;
2229 
2230 	default:
2231 		dev_err(dev, "Invalid token %d\n", tkn);
2232 		return -EINVAL;
2233 	}
2234 
2235 	return 0;
2236 }
2237 
skl_tplg_widget_fill_fmt(struct device * dev,struct skl_module_iface * fmt,u32 tkn,u32 val,u32 dir,int fmt_idx)2238 static int skl_tplg_widget_fill_fmt(struct device *dev,
2239 		struct skl_module_iface *fmt,
2240 		u32 tkn, u32 val, u32 dir, int fmt_idx)
2241 {
2242 	struct skl_module_fmt *dst_fmt;
2243 
2244 	if (!fmt)
2245 		return -EINVAL;
2246 
2247 	switch (dir) {
2248 	case SKL_DIR_IN:
2249 		dst_fmt = &fmt->inputs[fmt_idx].fmt;
2250 		break;
2251 
2252 	case SKL_DIR_OUT:
2253 		dst_fmt = &fmt->outputs[fmt_idx].fmt;
2254 		break;
2255 
2256 	default:
2257 		dev_err(dev, "Invalid direction: %d\n", dir);
2258 		return -EINVAL;
2259 	}
2260 
2261 	return skl_tplg_fill_fmt(dev, dst_fmt, tkn, val);
2262 }
2263 
skl_tplg_fill_pin_dynamic_val(struct skl_module_pin * mpin,u32 pin_count,u32 value)2264 static void skl_tplg_fill_pin_dynamic_val(
2265 		struct skl_module_pin *mpin, u32 pin_count, u32 value)
2266 {
2267 	int i;
2268 
2269 	for (i = 0; i < pin_count; i++)
2270 		mpin[i].is_dynamic = value;
2271 }
2272 
2273 /*
2274  * Resource table in the manifest has pin specific resources
2275  * like pin and pin buffer size
2276  */
skl_tplg_manifest_pin_res_tkn(struct device * dev,struct snd_soc_tplg_vendor_value_elem * tkn_elem,struct skl_module_res * res,int pin_idx,int dir)2277 static int skl_tplg_manifest_pin_res_tkn(struct device *dev,
2278 		struct snd_soc_tplg_vendor_value_elem *tkn_elem,
2279 		struct skl_module_res *res, int pin_idx, int dir)
2280 {
2281 	struct skl_module_pin_resources *m_pin;
2282 
2283 	switch (dir) {
2284 	case SKL_DIR_IN:
2285 		m_pin = &res->input[pin_idx];
2286 		break;
2287 
2288 	case SKL_DIR_OUT:
2289 		m_pin = &res->output[pin_idx];
2290 		break;
2291 
2292 	default:
2293 		dev_err(dev, "Invalid pin direction: %d\n", dir);
2294 		return -EINVAL;
2295 	}
2296 
2297 	switch (tkn_elem->token) {
2298 	case SKL_TKN_MM_U32_RES_PIN_ID:
2299 		m_pin->pin_index = tkn_elem->value;
2300 		break;
2301 
2302 	case SKL_TKN_MM_U32_PIN_BUF:
2303 		m_pin->buf_size = tkn_elem->value;
2304 		break;
2305 
2306 	default:
2307 		dev_err(dev, "Invalid token: %d\n", tkn_elem->token);
2308 		return -EINVAL;
2309 	}
2310 
2311 	return 0;
2312 }
2313 
2314 /*
2315  * Fill module specific resources from the manifest's resource
2316  * table like CPS, DMA size, mem_pages.
2317  */
skl_tplg_fill_res_tkn(struct device * dev,struct snd_soc_tplg_vendor_value_elem * tkn_elem,struct skl_module_res * res,int pin_idx,int dir)2318 static int skl_tplg_fill_res_tkn(struct device *dev,
2319 		struct snd_soc_tplg_vendor_value_elem *tkn_elem,
2320 		struct skl_module_res *res,
2321 		int pin_idx, int dir)
2322 {
2323 	int ret, tkn_count = 0;
2324 
2325 	if (!res)
2326 		return -EINVAL;
2327 
2328 	switch (tkn_elem->token) {
2329 	case SKL_TKN_MM_U32_DMA_SIZE:
2330 		res->dma_buffer_size = tkn_elem->value;
2331 		break;
2332 
2333 	case SKL_TKN_MM_U32_CPC:
2334 		res->cpc = tkn_elem->value;
2335 		break;
2336 
2337 	case SKL_TKN_U32_MEM_PAGES:
2338 		res->is_pages = tkn_elem->value;
2339 		break;
2340 
2341 	case SKL_TKN_U32_OBS:
2342 		res->obs = tkn_elem->value;
2343 		break;
2344 
2345 	case SKL_TKN_U32_IBS:
2346 		res->ibs = tkn_elem->value;
2347 		break;
2348 
2349 	case SKL_TKN_MM_U32_RES_PIN_ID:
2350 	case SKL_TKN_MM_U32_PIN_BUF:
2351 		ret = skl_tplg_manifest_pin_res_tkn(dev, tkn_elem, res,
2352 						    pin_idx, dir);
2353 		if (ret < 0)
2354 			return ret;
2355 		break;
2356 
2357 	case SKL_TKN_MM_U32_CPS:
2358 	case SKL_TKN_U32_MAX_MCPS:
2359 		/* ignore unused tokens */
2360 		break;
2361 
2362 	default:
2363 		dev_err(dev, "Not a res type token: %d", tkn_elem->token);
2364 		return -EINVAL;
2365 
2366 	}
2367 	tkn_count++;
2368 
2369 	return tkn_count;
2370 }
2371 
2372 /*
2373  * Parse tokens to fill up the module private data
2374  */
skl_tplg_get_token(struct device * dev,struct snd_soc_tplg_vendor_value_elem * tkn_elem,struct skl_dev * skl,struct skl_module_cfg * mconfig)2375 static int skl_tplg_get_token(struct device *dev,
2376 		struct snd_soc_tplg_vendor_value_elem *tkn_elem,
2377 		struct skl_dev *skl, struct skl_module_cfg *mconfig)
2378 {
2379 	int tkn_count = 0;
2380 	int ret;
2381 	static int is_pipe_exists;
2382 	static int pin_index, dir, conf_idx;
2383 	struct skl_module_iface *iface = NULL;
2384 	struct skl_module_res *res = NULL;
2385 	int res_idx = mconfig->res_idx;
2386 	int fmt_idx = mconfig->fmt_idx;
2387 
2388 	/*
2389 	 * If the manifest structure contains no modules, fill all
2390 	 * the module data to 0th index.
2391 	 * res_idx and fmt_idx are default set to 0.
2392 	 */
2393 	if (skl->nr_modules == 0) {
2394 		res = &mconfig->module->resources[res_idx];
2395 		iface = &mconfig->module->formats[fmt_idx];
2396 	}
2397 
2398 	if (tkn_elem->token > SKL_TKN_MAX)
2399 		return -EINVAL;
2400 
2401 	switch (tkn_elem->token) {
2402 	case SKL_TKN_U8_IN_QUEUE_COUNT:
2403 		mconfig->module->max_input_pins = tkn_elem->value;
2404 		break;
2405 
2406 	case SKL_TKN_U8_OUT_QUEUE_COUNT:
2407 		mconfig->module->max_output_pins = tkn_elem->value;
2408 		break;
2409 
2410 	case SKL_TKN_U8_DYN_IN_PIN:
2411 		if (!mconfig->m_in_pin)
2412 			mconfig->m_in_pin =
2413 				devm_kcalloc(dev, MAX_IN_QUEUE,
2414 					     sizeof(*mconfig->m_in_pin),
2415 					     GFP_KERNEL);
2416 		if (!mconfig->m_in_pin)
2417 			return -ENOMEM;
2418 
2419 		skl_tplg_fill_pin_dynamic_val(mconfig->m_in_pin, MAX_IN_QUEUE,
2420 					      tkn_elem->value);
2421 		break;
2422 
2423 	case SKL_TKN_U8_DYN_OUT_PIN:
2424 		if (!mconfig->m_out_pin)
2425 			mconfig->m_out_pin =
2426 				devm_kcalloc(dev, MAX_IN_QUEUE,
2427 					     sizeof(*mconfig->m_in_pin),
2428 					     GFP_KERNEL);
2429 		if (!mconfig->m_out_pin)
2430 			return -ENOMEM;
2431 
2432 		skl_tplg_fill_pin_dynamic_val(mconfig->m_out_pin, MAX_OUT_QUEUE,
2433 					      tkn_elem->value);
2434 		break;
2435 
2436 	case SKL_TKN_U8_TIME_SLOT:
2437 		mconfig->time_slot = tkn_elem->value;
2438 		break;
2439 
2440 	case SKL_TKN_U8_CORE_ID:
2441 		mconfig->core_id = tkn_elem->value;
2442 		break;
2443 
2444 	case SKL_TKN_U8_MOD_TYPE:
2445 		mconfig->m_type = tkn_elem->value;
2446 		break;
2447 
2448 	case SKL_TKN_U8_DEV_TYPE:
2449 		mconfig->dev_type = tkn_elem->value;
2450 		break;
2451 
2452 	case SKL_TKN_U8_HW_CONN_TYPE:
2453 		mconfig->hw_conn_type = tkn_elem->value;
2454 		break;
2455 
2456 	case SKL_TKN_U16_MOD_INST_ID:
2457 		mconfig->id.instance_id =
2458 		tkn_elem->value;
2459 		break;
2460 
2461 	case SKL_TKN_U32_MEM_PAGES:
2462 	case SKL_TKN_U32_MAX_MCPS:
2463 	case SKL_TKN_U32_OBS:
2464 	case SKL_TKN_U32_IBS:
2465 		ret = skl_tplg_fill_res_tkn(dev, tkn_elem, res, pin_index, dir);
2466 		if (ret < 0)
2467 			return ret;
2468 
2469 		break;
2470 
2471 	case SKL_TKN_U32_VBUS_ID:
2472 		mconfig->vbus_id = tkn_elem->value;
2473 		break;
2474 
2475 	case SKL_TKN_U32_PARAMS_FIXUP:
2476 		mconfig->params_fixup = tkn_elem->value;
2477 		break;
2478 
2479 	case SKL_TKN_U32_CONVERTER:
2480 		mconfig->converter = tkn_elem->value;
2481 		break;
2482 
2483 	case SKL_TKN_U32_D0I3_CAPS:
2484 		mconfig->d0i3_caps = tkn_elem->value;
2485 		break;
2486 
2487 	case SKL_TKN_U32_PIPE_ID:
2488 		ret = skl_tplg_add_pipe(dev,
2489 				mconfig, skl, tkn_elem);
2490 
2491 		if (ret < 0) {
2492 			if (ret == -EEXIST) {
2493 				is_pipe_exists = 1;
2494 				break;
2495 			}
2496 			return is_pipe_exists;
2497 		}
2498 
2499 		break;
2500 
2501 	case SKL_TKN_U32_PIPE_CONFIG_ID:
2502 		conf_idx = tkn_elem->value;
2503 		break;
2504 
2505 	case SKL_TKN_U32_PIPE_CONN_TYPE:
2506 	case SKL_TKN_U32_PIPE_PRIORITY:
2507 	case SKL_TKN_U32_PIPE_MEM_PGS:
2508 	case SKL_TKN_U32_PMODE:
2509 	case SKL_TKN_U32_PIPE_DIRECTION:
2510 	case SKL_TKN_U32_NUM_CONFIGS:
2511 		if (is_pipe_exists) {
2512 			ret = skl_tplg_fill_pipe_tkn(dev, mconfig->pipe,
2513 					tkn_elem->token, tkn_elem->value);
2514 			if (ret < 0)
2515 				return ret;
2516 		}
2517 
2518 		break;
2519 
2520 	case SKL_TKN_U32_PATH_MEM_PGS:
2521 	case SKL_TKN_U32_CFG_FREQ:
2522 	case SKL_TKN_U8_CFG_CHAN:
2523 	case SKL_TKN_U8_CFG_BPS:
2524 		if (mconfig->pipe->nr_cfgs) {
2525 			ret = skl_tplg_fill_pipe_cfg(dev, mconfig->pipe,
2526 					tkn_elem->token, tkn_elem->value,
2527 					conf_idx, dir);
2528 			if (ret < 0)
2529 				return ret;
2530 		}
2531 		break;
2532 
2533 	case SKL_TKN_CFG_MOD_RES_ID:
2534 		mconfig->mod_cfg[conf_idx].res_idx = tkn_elem->value;
2535 		break;
2536 
2537 	case SKL_TKN_CFG_MOD_FMT_ID:
2538 		mconfig->mod_cfg[conf_idx].fmt_idx = tkn_elem->value;
2539 		break;
2540 
2541 	/*
2542 	 * SKL_TKN_U32_DIR_PIN_COUNT token has the value for both
2543 	 * direction and the pin count. The first four bits represent
2544 	 * direction and next four the pin count.
2545 	 */
2546 	case SKL_TKN_U32_DIR_PIN_COUNT:
2547 		dir = tkn_elem->value & SKL_IN_DIR_BIT_MASK;
2548 		pin_index = (tkn_elem->value &
2549 			SKL_PIN_COUNT_MASK) >> 4;
2550 
2551 		break;
2552 
2553 	case SKL_TKN_U32_FMT_CH:
2554 	case SKL_TKN_U32_FMT_FREQ:
2555 	case SKL_TKN_U32_FMT_BIT_DEPTH:
2556 	case SKL_TKN_U32_FMT_SAMPLE_SIZE:
2557 	case SKL_TKN_U32_FMT_CH_CONFIG:
2558 	case SKL_TKN_U32_FMT_INTERLEAVE:
2559 	case SKL_TKN_U32_FMT_SAMPLE_TYPE:
2560 	case SKL_TKN_U32_FMT_CH_MAP:
2561 		ret = skl_tplg_widget_fill_fmt(dev, iface, tkn_elem->token,
2562 				tkn_elem->value, dir, pin_index);
2563 
2564 		if (ret < 0)
2565 			return ret;
2566 
2567 		break;
2568 
2569 	case SKL_TKN_U32_PIN_MOD_ID:
2570 	case SKL_TKN_U32_PIN_INST_ID:
2571 	case SKL_TKN_UUID:
2572 		ret = skl_tplg_fill_pins_info(dev,
2573 				mconfig, tkn_elem, dir,
2574 				pin_index);
2575 		if (ret < 0)
2576 			return ret;
2577 
2578 		break;
2579 
2580 	case SKL_TKN_U32_FMT_CFG_IDX:
2581 		if (tkn_elem->value > SKL_MAX_PARAMS_TYPES)
2582 			return -EINVAL;
2583 
2584 		mconfig->fmt_cfg_idx = tkn_elem->value;
2585 		break;
2586 
2587 	case SKL_TKN_U32_CAPS_SIZE:
2588 		mconfig->formats_config[mconfig->fmt_cfg_idx].caps_size =
2589 			tkn_elem->value;
2590 
2591 		break;
2592 
2593 	case SKL_TKN_U32_CAPS_SET_PARAMS:
2594 		mconfig->formats_config[mconfig->fmt_cfg_idx].set_params =
2595 				tkn_elem->value;
2596 		break;
2597 
2598 	case SKL_TKN_U32_CAPS_PARAMS_ID:
2599 		mconfig->formats_config[mconfig->fmt_cfg_idx].param_id =
2600 				tkn_elem->value;
2601 		break;
2602 
2603 	case SKL_TKN_U32_PROC_DOMAIN:
2604 		mconfig->domain =
2605 			tkn_elem->value;
2606 
2607 		break;
2608 
2609 	case SKL_TKN_U32_DMA_BUF_SIZE:
2610 		mconfig->dma_buffer_size = tkn_elem->value;
2611 		break;
2612 
2613 	case SKL_TKN_U8_IN_PIN_TYPE:
2614 	case SKL_TKN_U8_OUT_PIN_TYPE:
2615 	case SKL_TKN_U8_CONN_TYPE:
2616 		break;
2617 
2618 	default:
2619 		dev_err(dev, "Token %d not handled\n",
2620 				tkn_elem->token);
2621 		return -EINVAL;
2622 	}
2623 
2624 	tkn_count++;
2625 
2626 	return tkn_count;
2627 }
2628 
2629 /*
2630  * Parse the vendor array for specific tokens to construct
2631  * module private data
2632  */
skl_tplg_get_tokens(struct device * dev,char * pvt_data,struct skl_dev * skl,struct skl_module_cfg * mconfig,int block_size)2633 static int skl_tplg_get_tokens(struct device *dev,
2634 		char *pvt_data,	struct skl_dev *skl,
2635 		struct skl_module_cfg *mconfig, int block_size)
2636 {
2637 	struct snd_soc_tplg_vendor_array *array;
2638 	struct snd_soc_tplg_vendor_value_elem *tkn_elem;
2639 	int tkn_count = 0, ret;
2640 	int off = 0, tuple_size = 0;
2641 	bool is_module_guid = true;
2642 
2643 	if (block_size <= 0)
2644 		return -EINVAL;
2645 
2646 	while (tuple_size < block_size) {
2647 		array = (struct snd_soc_tplg_vendor_array *)(pvt_data + off);
2648 
2649 		off += array->size;
2650 
2651 		switch (array->type) {
2652 		case SND_SOC_TPLG_TUPLE_TYPE_STRING:
2653 			dev_warn(dev, "no string tokens expected for skl tplg\n");
2654 			continue;
2655 
2656 		case SND_SOC_TPLG_TUPLE_TYPE_UUID:
2657 			if (is_module_guid) {
2658 				ret = skl_tplg_get_uuid(dev, (guid_t *)mconfig->guid,
2659 							array->uuid);
2660 				is_module_guid = false;
2661 			} else {
2662 				ret = skl_tplg_get_token(dev, array->value, skl,
2663 							 mconfig);
2664 			}
2665 
2666 			if (ret < 0)
2667 				return ret;
2668 
2669 			tuple_size += sizeof(*array->uuid);
2670 
2671 			continue;
2672 
2673 		default:
2674 			tkn_elem = array->value;
2675 			tkn_count = 0;
2676 			break;
2677 		}
2678 
2679 		while (tkn_count <= (array->num_elems - 1)) {
2680 			ret = skl_tplg_get_token(dev, tkn_elem,
2681 					skl, mconfig);
2682 
2683 			if (ret < 0)
2684 				return ret;
2685 
2686 			tkn_count = tkn_count + ret;
2687 			tkn_elem++;
2688 		}
2689 
2690 		tuple_size += tkn_count * sizeof(*tkn_elem);
2691 	}
2692 
2693 	return off;
2694 }
2695 
2696 /*
2697  * Every data block is preceded by a descriptor to read the number
2698  * of data blocks, they type of the block and it's size
2699  */
skl_tplg_get_desc_blocks(struct device * dev,struct snd_soc_tplg_vendor_array * array)2700 static int skl_tplg_get_desc_blocks(struct device *dev,
2701 		struct snd_soc_tplg_vendor_array *array)
2702 {
2703 	struct snd_soc_tplg_vendor_value_elem *tkn_elem;
2704 
2705 	tkn_elem = array->value;
2706 
2707 	switch (tkn_elem->token) {
2708 	case SKL_TKN_U8_NUM_BLOCKS:
2709 	case SKL_TKN_U8_BLOCK_TYPE:
2710 	case SKL_TKN_U16_BLOCK_SIZE:
2711 		return tkn_elem->value;
2712 
2713 	default:
2714 		dev_err(dev, "Invalid descriptor token %d\n", tkn_elem->token);
2715 		break;
2716 	}
2717 
2718 	return -EINVAL;
2719 }
2720 
2721 /* Functions to parse private data from configuration file format v4 */
2722 
2723 /*
2724  * Add pipeline from topology binary into driver pipeline list
2725  *
2726  * If already added we return that instance
2727  * Otherwise we create a new instance and add into driver list
2728  */
skl_tplg_add_pipe_v4(struct device * dev,struct skl_module_cfg * mconfig,struct skl_dev * skl,struct skl_dfw_v4_pipe * dfw_pipe)2729 static int skl_tplg_add_pipe_v4(struct device *dev,
2730 			struct skl_module_cfg *mconfig, struct skl_dev *skl,
2731 			struct skl_dfw_v4_pipe *dfw_pipe)
2732 {
2733 	struct skl_pipeline *ppl;
2734 	struct skl_pipe *pipe;
2735 	struct skl_pipe_params *params;
2736 
2737 	list_for_each_entry(ppl, &skl->ppl_list, node) {
2738 		if (ppl->pipe->ppl_id == dfw_pipe->pipe_id) {
2739 			mconfig->pipe = ppl->pipe;
2740 			return 0;
2741 		}
2742 	}
2743 
2744 	ppl = devm_kzalloc(dev, sizeof(*ppl), GFP_KERNEL);
2745 	if (!ppl)
2746 		return -ENOMEM;
2747 
2748 	pipe = devm_kzalloc(dev, sizeof(*pipe), GFP_KERNEL);
2749 	if (!pipe)
2750 		return -ENOMEM;
2751 
2752 	params = devm_kzalloc(dev, sizeof(*params), GFP_KERNEL);
2753 	if (!params)
2754 		return -ENOMEM;
2755 
2756 	pipe->ppl_id = dfw_pipe->pipe_id;
2757 	pipe->memory_pages = dfw_pipe->memory_pages;
2758 	pipe->pipe_priority = dfw_pipe->pipe_priority;
2759 	pipe->conn_type = dfw_pipe->conn_type;
2760 	pipe->state = SKL_PIPE_INVALID;
2761 	pipe->p_params = params;
2762 	INIT_LIST_HEAD(&pipe->w_list);
2763 
2764 	ppl->pipe = pipe;
2765 	list_add(&ppl->node, &skl->ppl_list);
2766 
2767 	mconfig->pipe = pipe;
2768 
2769 	return 0;
2770 }
2771 
skl_fill_module_pin_info_v4(struct skl_dfw_v4_module_pin * dfw_pin,struct skl_module_pin * m_pin,bool is_dynamic,int max_pin)2772 static void skl_fill_module_pin_info_v4(struct skl_dfw_v4_module_pin *dfw_pin,
2773 					struct skl_module_pin *m_pin,
2774 					bool is_dynamic, int max_pin)
2775 {
2776 	int i;
2777 
2778 	for (i = 0; i < max_pin; i++) {
2779 		m_pin[i].id.module_id = dfw_pin[i].module_id;
2780 		m_pin[i].id.instance_id = dfw_pin[i].instance_id;
2781 		m_pin[i].in_use = false;
2782 		m_pin[i].is_dynamic = is_dynamic;
2783 		m_pin[i].pin_state = SKL_PIN_UNBIND;
2784 	}
2785 }
2786 
skl_tplg_fill_fmt_v4(struct skl_module_pin_fmt * dst_fmt,struct skl_dfw_v4_module_fmt * src_fmt,int pins)2787 static void skl_tplg_fill_fmt_v4(struct skl_module_pin_fmt *dst_fmt,
2788 				 struct skl_dfw_v4_module_fmt *src_fmt,
2789 				 int pins)
2790 {
2791 	int i;
2792 
2793 	for (i = 0; i < pins; i++) {
2794 		dst_fmt[i].fmt.channels  = src_fmt[i].channels;
2795 		dst_fmt[i].fmt.s_freq = src_fmt[i].freq;
2796 		dst_fmt[i].fmt.bit_depth = src_fmt[i].bit_depth;
2797 		dst_fmt[i].fmt.valid_bit_depth = src_fmt[i].valid_bit_depth;
2798 		dst_fmt[i].fmt.ch_cfg = src_fmt[i].ch_cfg;
2799 		dst_fmt[i].fmt.ch_map = src_fmt[i].ch_map;
2800 		dst_fmt[i].fmt.interleaving_style =
2801 						src_fmt[i].interleaving_style;
2802 		dst_fmt[i].fmt.sample_type = src_fmt[i].sample_type;
2803 	}
2804 }
2805 
skl_tplg_get_pvt_data_v4(struct snd_soc_tplg_dapm_widget * tplg_w,struct skl_dev * skl,struct device * dev,struct skl_module_cfg * mconfig)2806 static int skl_tplg_get_pvt_data_v4(struct snd_soc_tplg_dapm_widget *tplg_w,
2807 				    struct skl_dev *skl, struct device *dev,
2808 				    struct skl_module_cfg *mconfig)
2809 {
2810 	struct skl_dfw_v4_module *dfw =
2811 				(struct skl_dfw_v4_module *)tplg_w->priv.data;
2812 	int ret;
2813 	int idx = mconfig->fmt_cfg_idx;
2814 
2815 	dev_dbg(dev, "Parsing Skylake v4 widget topology data\n");
2816 
2817 	ret = guid_parse(dfw->uuid, (guid_t *)mconfig->guid);
2818 	if (ret)
2819 		return ret;
2820 	mconfig->id.module_id = -1;
2821 	mconfig->id.instance_id = dfw->instance_id;
2822 	mconfig->module->resources[0].cpc = dfw->max_mcps / 1000;
2823 	mconfig->module->resources[0].ibs = dfw->ibs;
2824 	mconfig->module->resources[0].obs = dfw->obs;
2825 	mconfig->core_id = dfw->core_id;
2826 	mconfig->module->max_input_pins = dfw->max_in_queue;
2827 	mconfig->module->max_output_pins = dfw->max_out_queue;
2828 	mconfig->module->loadable = dfw->is_loadable;
2829 	skl_tplg_fill_fmt_v4(mconfig->module->formats[0].inputs, dfw->in_fmt,
2830 			     MAX_IN_QUEUE);
2831 	skl_tplg_fill_fmt_v4(mconfig->module->formats[0].outputs, dfw->out_fmt,
2832 			     MAX_OUT_QUEUE);
2833 
2834 	mconfig->params_fixup = dfw->params_fixup;
2835 	mconfig->converter = dfw->converter;
2836 	mconfig->m_type = dfw->module_type;
2837 	mconfig->vbus_id = dfw->vbus_id;
2838 	mconfig->module->resources[0].is_pages = dfw->mem_pages;
2839 
2840 	ret = skl_tplg_add_pipe_v4(dev, mconfig, skl, &dfw->pipe);
2841 	if (ret)
2842 		return ret;
2843 
2844 	mconfig->dev_type = dfw->dev_type;
2845 	mconfig->hw_conn_type = dfw->hw_conn_type;
2846 	mconfig->time_slot = dfw->time_slot;
2847 	mconfig->formats_config[idx].caps_size = dfw->caps.caps_size;
2848 
2849 	mconfig->m_in_pin = devm_kcalloc(dev,
2850 				MAX_IN_QUEUE, sizeof(*mconfig->m_in_pin),
2851 				GFP_KERNEL);
2852 	if (!mconfig->m_in_pin)
2853 		return -ENOMEM;
2854 
2855 	mconfig->m_out_pin = devm_kcalloc(dev,
2856 				MAX_OUT_QUEUE, sizeof(*mconfig->m_out_pin),
2857 				GFP_KERNEL);
2858 	if (!mconfig->m_out_pin)
2859 		return -ENOMEM;
2860 
2861 	skl_fill_module_pin_info_v4(dfw->in_pin, mconfig->m_in_pin,
2862 				    dfw->is_dynamic_in_pin,
2863 				    mconfig->module->max_input_pins);
2864 	skl_fill_module_pin_info_v4(dfw->out_pin, mconfig->m_out_pin,
2865 				    dfw->is_dynamic_out_pin,
2866 				    mconfig->module->max_output_pins);
2867 
2868 	if (mconfig->formats_config[idx].caps_size) {
2869 		mconfig->formats_config[idx].set_params = dfw->caps.set_params;
2870 		mconfig->formats_config[idx].param_id = dfw->caps.param_id;
2871 		mconfig->formats_config[idx].caps =
2872 		devm_kzalloc(dev, mconfig->formats_config[idx].caps_size,
2873 			     GFP_KERNEL);
2874 		if (!mconfig->formats_config[idx].caps)
2875 			return -ENOMEM;
2876 		memcpy(mconfig->formats_config[idx].caps, dfw->caps.caps,
2877 		       dfw->caps.caps_size);
2878 	}
2879 
2880 	return 0;
2881 }
2882 
skl_tplg_get_caps_data(struct device * dev,char * data,struct skl_module_cfg * mconfig)2883 static int skl_tplg_get_caps_data(struct device *dev, char *data,
2884 				  struct skl_module_cfg *mconfig)
2885 {
2886 	int idx = mconfig->fmt_cfg_idx;
2887 
2888 	if (mconfig->formats_config[idx].caps_size > 0) {
2889 		mconfig->formats_config[idx].caps =
2890 			devm_kzalloc(dev, mconfig->formats_config[idx].caps_size,
2891 				     GFP_KERNEL);
2892 		if (!mconfig->formats_config[idx].caps)
2893 			return -ENOMEM;
2894 		memcpy(mconfig->formats_config[idx].caps, data,
2895 		       mconfig->formats_config[idx].caps_size);
2896 	}
2897 
2898 	return mconfig->formats_config[idx].caps_size;
2899 }
2900 
2901 /*
2902  * Parse the private data for the token and corresponding value.
2903  * The private data can have multiple data blocks. So, a data block
2904  * is preceded by a descriptor for number of blocks and a descriptor
2905  * for the type and size of the suceeding data block.
2906  */
skl_tplg_get_pvt_data(struct snd_soc_tplg_dapm_widget * tplg_w,struct skl_dev * skl,struct device * dev,struct skl_module_cfg * mconfig)2907 static int skl_tplg_get_pvt_data(struct snd_soc_tplg_dapm_widget *tplg_w,
2908 				struct skl_dev *skl, struct device *dev,
2909 				struct skl_module_cfg *mconfig)
2910 {
2911 	struct snd_soc_tplg_vendor_array *array;
2912 	int num_blocks, block_size, block_type, off = 0;
2913 	char *data;
2914 	int ret;
2915 
2916 	/*
2917 	 * v4 configuration files have a valid UUID at the start of
2918 	 * the widget's private data.
2919 	 */
2920 	if (uuid_is_valid((char *)tplg_w->priv.data))
2921 		return skl_tplg_get_pvt_data_v4(tplg_w, skl, dev, mconfig);
2922 
2923 	/* Read the NUM_DATA_BLOCKS descriptor */
2924 	array = (struct snd_soc_tplg_vendor_array *)tplg_w->priv.data;
2925 	ret = skl_tplg_get_desc_blocks(dev, array);
2926 	if (ret < 0)
2927 		return ret;
2928 	num_blocks = ret;
2929 
2930 	off += array->size;
2931 	/* Read the BLOCK_TYPE and BLOCK_SIZE descriptor */
2932 	while (num_blocks > 0) {
2933 		array = (struct snd_soc_tplg_vendor_array *)
2934 				(tplg_w->priv.data + off);
2935 
2936 		ret = skl_tplg_get_desc_blocks(dev, array);
2937 
2938 		if (ret < 0)
2939 			return ret;
2940 		block_type = ret;
2941 		off += array->size;
2942 
2943 		array = (struct snd_soc_tplg_vendor_array *)
2944 			(tplg_w->priv.data + off);
2945 
2946 		ret = skl_tplg_get_desc_blocks(dev, array);
2947 
2948 		if (ret < 0)
2949 			return ret;
2950 		block_size = ret;
2951 		off += array->size;
2952 
2953 		data = (tplg_w->priv.data + off);
2954 
2955 		if (block_type == SKL_TYPE_TUPLE) {
2956 			ret = skl_tplg_get_tokens(dev, data,
2957 					skl, mconfig, block_size);
2958 		} else {
2959 			ret = skl_tplg_get_caps_data(dev, data, mconfig);
2960 		}
2961 
2962 		if (ret < 0)
2963 			return ret;
2964 
2965 		--num_blocks;
2966 		off += ret;
2967 	}
2968 
2969 	return 0;
2970 }
2971 
skl_clear_pin_config(struct snd_soc_component * component,struct snd_soc_dapm_widget * w)2972 static void skl_clear_pin_config(struct snd_soc_component *component,
2973 				struct snd_soc_dapm_widget *w)
2974 {
2975 	int i;
2976 	struct skl_module_cfg *mconfig;
2977 	struct skl_pipe *pipe;
2978 
2979 	if (!strncmp(w->dapm->component->name, component->name,
2980 					strlen(component->name))) {
2981 		mconfig = w->priv;
2982 		pipe = mconfig->pipe;
2983 		for (i = 0; i < mconfig->module->max_input_pins; i++) {
2984 			mconfig->m_in_pin[i].in_use = false;
2985 			mconfig->m_in_pin[i].pin_state = SKL_PIN_UNBIND;
2986 		}
2987 		for (i = 0; i < mconfig->module->max_output_pins; i++) {
2988 			mconfig->m_out_pin[i].in_use = false;
2989 			mconfig->m_out_pin[i].pin_state = SKL_PIN_UNBIND;
2990 		}
2991 		pipe->state = SKL_PIPE_INVALID;
2992 		mconfig->m_state = SKL_MODULE_UNINIT;
2993 	}
2994 }
2995 
skl_cleanup_resources(struct skl_dev * skl)2996 void skl_cleanup_resources(struct skl_dev *skl)
2997 {
2998 	struct snd_soc_component *soc_component = skl->component;
2999 	struct snd_soc_dapm_widget *w;
3000 	struct snd_soc_card *card;
3001 
3002 	if (soc_component == NULL)
3003 		return;
3004 
3005 	card = soc_component->card;
3006 	if (!card || !card->instantiated)
3007 		return;
3008 
3009 	list_for_each_entry(w, &card->widgets, list) {
3010 		if (is_skl_dsp_widget_type(w, skl->dev) && w->priv != NULL)
3011 			skl_clear_pin_config(soc_component, w);
3012 	}
3013 
3014 	skl_clear_module_cnt(skl->dsp);
3015 }
3016 
3017 /*
3018  * Topology core widget load callback
3019  *
3020  * This is used to save the private data for each widget which gives
3021  * information to the driver about module and pipeline parameters which DSP
3022  * FW expects like ids, resource values, formats etc
3023  */
skl_tplg_widget_load(struct snd_soc_component * cmpnt,int index,struct snd_soc_dapm_widget * w,struct snd_soc_tplg_dapm_widget * tplg_w)3024 static int skl_tplg_widget_load(struct snd_soc_component *cmpnt, int index,
3025 				struct snd_soc_dapm_widget *w,
3026 				struct snd_soc_tplg_dapm_widget *tplg_w)
3027 {
3028 	int ret;
3029 	struct hdac_bus *bus = snd_soc_component_get_drvdata(cmpnt);
3030 	struct skl_dev *skl = bus_to_skl(bus);
3031 	struct skl_module_cfg *mconfig;
3032 
3033 	if (!tplg_w->priv.size)
3034 		goto bind_event;
3035 
3036 	mconfig = devm_kzalloc(bus->dev, sizeof(*mconfig), GFP_KERNEL);
3037 
3038 	if (!mconfig)
3039 		return -ENOMEM;
3040 
3041 	if (skl->nr_modules == 0) {
3042 		mconfig->module = devm_kzalloc(bus->dev,
3043 				sizeof(*mconfig->module), GFP_KERNEL);
3044 		if (!mconfig->module)
3045 			return -ENOMEM;
3046 	}
3047 
3048 	w->priv = mconfig;
3049 
3050 	/*
3051 	 * module binary can be loaded later, so set it to query when
3052 	 * module is load for a use case
3053 	 */
3054 	mconfig->id.module_id = -1;
3055 
3056 	/* To provide backward compatibility, set default as SKL_PARAM_INIT */
3057 	mconfig->fmt_cfg_idx = SKL_PARAM_INIT;
3058 
3059 	/* Parse private data for tuples */
3060 	ret = skl_tplg_get_pvt_data(tplg_w, skl, bus->dev, mconfig);
3061 	if (ret < 0)
3062 		return ret;
3063 
3064 	skl_debug_init_module(skl->debugfs, w, mconfig);
3065 
3066 bind_event:
3067 	if (tplg_w->event_type == 0) {
3068 		dev_dbg(bus->dev, "ASoC: No event handler required\n");
3069 		return 0;
3070 	}
3071 
3072 	ret = snd_soc_tplg_widget_bind_event(w, skl_tplg_widget_ops,
3073 					ARRAY_SIZE(skl_tplg_widget_ops),
3074 					tplg_w->event_type);
3075 
3076 	if (ret) {
3077 		dev_err(bus->dev, "%s: No matching event handlers found for %d\n",
3078 					__func__, tplg_w->event_type);
3079 		return -EINVAL;
3080 	}
3081 
3082 	return 0;
3083 }
3084 
skl_init_algo_data(struct device * dev,struct soc_bytes_ext * be,struct snd_soc_tplg_bytes_control * bc)3085 static int skl_init_algo_data(struct device *dev, struct soc_bytes_ext *be,
3086 					struct snd_soc_tplg_bytes_control *bc)
3087 {
3088 	struct skl_algo_data *ac;
3089 	struct skl_dfw_algo_data *dfw_ac =
3090 				(struct skl_dfw_algo_data *)bc->priv.data;
3091 
3092 	ac = devm_kzalloc(dev, sizeof(*ac), GFP_KERNEL);
3093 	if (!ac)
3094 		return -ENOMEM;
3095 
3096 	/* Fill private data */
3097 	ac->max = dfw_ac->max;
3098 	ac->param_id = dfw_ac->param_id;
3099 	ac->set_params = dfw_ac->set_params;
3100 	ac->size = dfw_ac->max;
3101 
3102 	if (ac->max) {
3103 		ac->params = devm_kzalloc(dev, ac->max, GFP_KERNEL);
3104 		if (!ac->params)
3105 			return -ENOMEM;
3106 
3107 		memcpy(ac->params, dfw_ac->params, ac->max);
3108 	}
3109 
3110 	be->dobj.private  = ac;
3111 	return 0;
3112 }
3113 
skl_init_enum_data(struct device * dev,struct soc_enum * se,struct snd_soc_tplg_enum_control * ec)3114 static int skl_init_enum_data(struct device *dev, struct soc_enum *se,
3115 				struct snd_soc_tplg_enum_control *ec)
3116 {
3117 
3118 	void *data;
3119 
3120 	if (ec->priv.size) {
3121 		data = devm_kzalloc(dev, sizeof(ec->priv.size), GFP_KERNEL);
3122 		if (!data)
3123 			return -ENOMEM;
3124 		memcpy(data, ec->priv.data, ec->priv.size);
3125 		se->dobj.private = data;
3126 	}
3127 
3128 	return 0;
3129 
3130 }
3131 
skl_tplg_control_load(struct snd_soc_component * cmpnt,int index,struct snd_kcontrol_new * kctl,struct snd_soc_tplg_ctl_hdr * hdr)3132 static int skl_tplg_control_load(struct snd_soc_component *cmpnt,
3133 				int index,
3134 				struct snd_kcontrol_new *kctl,
3135 				struct snd_soc_tplg_ctl_hdr *hdr)
3136 {
3137 	struct soc_bytes_ext *sb;
3138 	struct snd_soc_tplg_bytes_control *tplg_bc;
3139 	struct snd_soc_tplg_enum_control *tplg_ec;
3140 	struct hdac_bus *bus  = snd_soc_component_get_drvdata(cmpnt);
3141 	struct soc_enum *se;
3142 
3143 	switch (hdr->ops.info) {
3144 	case SND_SOC_TPLG_CTL_BYTES:
3145 		tplg_bc = container_of(hdr,
3146 				struct snd_soc_tplg_bytes_control, hdr);
3147 		if (kctl->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
3148 			sb = (struct soc_bytes_ext *)kctl->private_value;
3149 			if (tplg_bc->priv.size)
3150 				return skl_init_algo_data(
3151 						bus->dev, sb, tplg_bc);
3152 		}
3153 		break;
3154 
3155 	case SND_SOC_TPLG_CTL_ENUM:
3156 		tplg_ec = container_of(hdr,
3157 				struct snd_soc_tplg_enum_control, hdr);
3158 		if (kctl->access & SNDRV_CTL_ELEM_ACCESS_READ) {
3159 			se = (struct soc_enum *)kctl->private_value;
3160 			if (tplg_ec->priv.size)
3161 				skl_init_enum_data(bus->dev, se, tplg_ec);
3162 		}
3163 
3164 		/*
3165 		 * now that the control initializations are done, remove
3166 		 * write permission for the DMIC configuration enums to
3167 		 * avoid conflicts between NHLT settings and user interaction
3168 		 */
3169 
3170 		if (hdr->ops.get == SKL_CONTROL_TYPE_MULTI_IO_SELECT_DMIC)
3171 			kctl->access = SNDRV_CTL_ELEM_ACCESS_READ;
3172 
3173 		break;
3174 
3175 	default:
3176 		dev_dbg(bus->dev, "Control load not supported %d:%d:%d\n",
3177 			hdr->ops.get, hdr->ops.put, hdr->ops.info);
3178 		break;
3179 	}
3180 
3181 	return 0;
3182 }
3183 
skl_tplg_fill_str_mfest_tkn(struct device * dev,struct snd_soc_tplg_vendor_string_elem * str_elem,struct skl_dev * skl)3184 static int skl_tplg_fill_str_mfest_tkn(struct device *dev,
3185 		struct snd_soc_tplg_vendor_string_elem *str_elem,
3186 		struct skl_dev *skl)
3187 {
3188 	int tkn_count = 0;
3189 	static int ref_count;
3190 
3191 	switch (str_elem->token) {
3192 	case SKL_TKN_STR_LIB_NAME:
3193 		if (ref_count > skl->lib_count - 1) {
3194 			ref_count = 0;
3195 			return -EINVAL;
3196 		}
3197 
3198 		strncpy(skl->lib_info[ref_count].name,
3199 			str_elem->string,
3200 			ARRAY_SIZE(skl->lib_info[ref_count].name));
3201 		ref_count++;
3202 		break;
3203 
3204 	default:
3205 		dev_err(dev, "Not a string token %d\n", str_elem->token);
3206 		break;
3207 	}
3208 	tkn_count++;
3209 
3210 	return tkn_count;
3211 }
3212 
skl_tplg_get_str_tkn(struct device * dev,struct snd_soc_tplg_vendor_array * array,struct skl_dev * skl)3213 static int skl_tplg_get_str_tkn(struct device *dev,
3214 		struct snd_soc_tplg_vendor_array *array,
3215 		struct skl_dev *skl)
3216 {
3217 	int tkn_count = 0, ret;
3218 	struct snd_soc_tplg_vendor_string_elem *str_elem;
3219 
3220 	str_elem = (struct snd_soc_tplg_vendor_string_elem *)array->value;
3221 	while (tkn_count < array->num_elems) {
3222 		ret = skl_tplg_fill_str_mfest_tkn(dev, str_elem, skl);
3223 		str_elem++;
3224 
3225 		if (ret < 0)
3226 			return ret;
3227 
3228 		tkn_count = tkn_count + ret;
3229 	}
3230 
3231 	return tkn_count;
3232 }
3233 
skl_tplg_manifest_fill_fmt(struct device * dev,struct skl_module_iface * fmt,struct snd_soc_tplg_vendor_value_elem * tkn_elem,u32 dir,int fmt_idx)3234 static int skl_tplg_manifest_fill_fmt(struct device *dev,
3235 		struct skl_module_iface *fmt,
3236 		struct snd_soc_tplg_vendor_value_elem *tkn_elem,
3237 		u32 dir, int fmt_idx)
3238 {
3239 	struct skl_module_pin_fmt *dst_fmt;
3240 	struct skl_module_fmt *mod_fmt;
3241 	int ret;
3242 
3243 	if (!fmt)
3244 		return -EINVAL;
3245 
3246 	switch (dir) {
3247 	case SKL_DIR_IN:
3248 		dst_fmt = &fmt->inputs[fmt_idx];
3249 		break;
3250 
3251 	case SKL_DIR_OUT:
3252 		dst_fmt = &fmt->outputs[fmt_idx];
3253 		break;
3254 
3255 	default:
3256 		dev_err(dev, "Invalid direction: %d\n", dir);
3257 		return -EINVAL;
3258 	}
3259 
3260 	mod_fmt = &dst_fmt->fmt;
3261 
3262 	switch (tkn_elem->token) {
3263 	case SKL_TKN_MM_U32_INTF_PIN_ID:
3264 		dst_fmt->id = tkn_elem->value;
3265 		break;
3266 
3267 	default:
3268 		ret = skl_tplg_fill_fmt(dev, mod_fmt, tkn_elem->token,
3269 					tkn_elem->value);
3270 		if (ret < 0)
3271 			return ret;
3272 		break;
3273 	}
3274 
3275 	return 0;
3276 }
3277 
skl_tplg_fill_mod_info(struct device * dev,struct snd_soc_tplg_vendor_value_elem * tkn_elem,struct skl_module * mod)3278 static int skl_tplg_fill_mod_info(struct device *dev,
3279 		struct snd_soc_tplg_vendor_value_elem *tkn_elem,
3280 		struct skl_module *mod)
3281 {
3282 
3283 	if (!mod)
3284 		return -EINVAL;
3285 
3286 	switch (tkn_elem->token) {
3287 	case SKL_TKN_U8_IN_PIN_TYPE:
3288 		mod->input_pin_type = tkn_elem->value;
3289 		break;
3290 
3291 	case SKL_TKN_U8_OUT_PIN_TYPE:
3292 		mod->output_pin_type = tkn_elem->value;
3293 		break;
3294 
3295 	case SKL_TKN_U8_IN_QUEUE_COUNT:
3296 		mod->max_input_pins = tkn_elem->value;
3297 		break;
3298 
3299 	case SKL_TKN_U8_OUT_QUEUE_COUNT:
3300 		mod->max_output_pins = tkn_elem->value;
3301 		break;
3302 
3303 	case SKL_TKN_MM_U8_NUM_RES:
3304 		mod->nr_resources = tkn_elem->value;
3305 		break;
3306 
3307 	case SKL_TKN_MM_U8_NUM_INTF:
3308 		mod->nr_interfaces = tkn_elem->value;
3309 		break;
3310 
3311 	default:
3312 		dev_err(dev, "Invalid mod info token %d", tkn_elem->token);
3313 		return -EINVAL;
3314 	}
3315 
3316 	return 0;
3317 }
3318 
3319 
skl_tplg_get_int_tkn(struct device * dev,struct snd_soc_tplg_vendor_value_elem * tkn_elem,struct skl_dev * skl)3320 static int skl_tplg_get_int_tkn(struct device *dev,
3321 		struct snd_soc_tplg_vendor_value_elem *tkn_elem,
3322 		struct skl_dev *skl)
3323 {
3324 	int tkn_count = 0, ret;
3325 	static int mod_idx, res_val_idx, intf_val_idx, dir, pin_idx;
3326 	struct skl_module_res *res = NULL;
3327 	struct skl_module_iface *fmt = NULL;
3328 	struct skl_module *mod = NULL;
3329 	static struct skl_astate_param *astate_table;
3330 	static int astate_cfg_idx, count;
3331 	int i;
3332 	size_t size;
3333 
3334 	if (skl->modules) {
3335 		mod = skl->modules[mod_idx];
3336 		res = &mod->resources[res_val_idx];
3337 		fmt = &mod->formats[intf_val_idx];
3338 	}
3339 
3340 	switch (tkn_elem->token) {
3341 	case SKL_TKN_U32_LIB_COUNT:
3342 		skl->lib_count = tkn_elem->value;
3343 		break;
3344 
3345 	case SKL_TKN_U8_NUM_MOD:
3346 		skl->nr_modules = tkn_elem->value;
3347 		skl->modules = devm_kcalloc(dev, skl->nr_modules,
3348 				sizeof(*skl->modules), GFP_KERNEL);
3349 		if (!skl->modules)
3350 			return -ENOMEM;
3351 
3352 		for (i = 0; i < skl->nr_modules; i++) {
3353 			skl->modules[i] = devm_kzalloc(dev,
3354 					sizeof(struct skl_module), GFP_KERNEL);
3355 			if (!skl->modules[i])
3356 				return -ENOMEM;
3357 		}
3358 		break;
3359 
3360 	case SKL_TKN_MM_U8_MOD_IDX:
3361 		mod_idx = tkn_elem->value;
3362 		break;
3363 
3364 	case SKL_TKN_U32_ASTATE_COUNT:
3365 		if (astate_table != NULL) {
3366 			dev_err(dev, "More than one entry for A-State count");
3367 			return -EINVAL;
3368 		}
3369 
3370 		if (tkn_elem->value > SKL_MAX_ASTATE_CFG) {
3371 			dev_err(dev, "Invalid A-State count %d\n",
3372 				tkn_elem->value);
3373 			return -EINVAL;
3374 		}
3375 
3376 		size = struct_size(skl->cfg.astate_cfg, astate_table,
3377 				   tkn_elem->value);
3378 		skl->cfg.astate_cfg = devm_kzalloc(dev, size, GFP_KERNEL);
3379 		if (!skl->cfg.astate_cfg)
3380 			return -ENOMEM;
3381 
3382 		astate_table = skl->cfg.astate_cfg->astate_table;
3383 		count = skl->cfg.astate_cfg->count = tkn_elem->value;
3384 		break;
3385 
3386 	case SKL_TKN_U32_ASTATE_IDX:
3387 		if (tkn_elem->value >= count) {
3388 			dev_err(dev, "Invalid A-State index %d\n",
3389 				tkn_elem->value);
3390 			return -EINVAL;
3391 		}
3392 
3393 		astate_cfg_idx = tkn_elem->value;
3394 		break;
3395 
3396 	case SKL_TKN_U32_ASTATE_KCPS:
3397 		astate_table[astate_cfg_idx].kcps = tkn_elem->value;
3398 		break;
3399 
3400 	case SKL_TKN_U32_ASTATE_CLK_SRC:
3401 		astate_table[astate_cfg_idx].clk_src = tkn_elem->value;
3402 		break;
3403 
3404 	case SKL_TKN_U8_IN_PIN_TYPE:
3405 	case SKL_TKN_U8_OUT_PIN_TYPE:
3406 	case SKL_TKN_U8_IN_QUEUE_COUNT:
3407 	case SKL_TKN_U8_OUT_QUEUE_COUNT:
3408 	case SKL_TKN_MM_U8_NUM_RES:
3409 	case SKL_TKN_MM_U8_NUM_INTF:
3410 		ret = skl_tplg_fill_mod_info(dev, tkn_elem, mod);
3411 		if (ret < 0)
3412 			return ret;
3413 		break;
3414 
3415 	case SKL_TKN_U32_DIR_PIN_COUNT:
3416 		dir = tkn_elem->value & SKL_IN_DIR_BIT_MASK;
3417 		pin_idx = (tkn_elem->value & SKL_PIN_COUNT_MASK) >> 4;
3418 		break;
3419 
3420 	case SKL_TKN_MM_U32_RES_ID:
3421 		if (!res)
3422 			return -EINVAL;
3423 
3424 		res->id = tkn_elem->value;
3425 		res_val_idx = tkn_elem->value;
3426 		break;
3427 
3428 	case SKL_TKN_MM_U32_FMT_ID:
3429 		if (!fmt)
3430 			return -EINVAL;
3431 
3432 		fmt->fmt_idx = tkn_elem->value;
3433 		intf_val_idx = tkn_elem->value;
3434 		break;
3435 
3436 	case SKL_TKN_MM_U32_CPS:
3437 	case SKL_TKN_MM_U32_DMA_SIZE:
3438 	case SKL_TKN_MM_U32_CPC:
3439 	case SKL_TKN_U32_MEM_PAGES:
3440 	case SKL_TKN_U32_OBS:
3441 	case SKL_TKN_U32_IBS:
3442 	case SKL_TKN_MM_U32_RES_PIN_ID:
3443 	case SKL_TKN_MM_U32_PIN_BUF:
3444 		ret = skl_tplg_fill_res_tkn(dev, tkn_elem, res, pin_idx, dir);
3445 		if (ret < 0)
3446 			return ret;
3447 
3448 		break;
3449 
3450 	case SKL_TKN_MM_U32_NUM_IN_FMT:
3451 		if (!fmt)
3452 			return -EINVAL;
3453 
3454 		res->nr_input_pins = tkn_elem->value;
3455 		break;
3456 
3457 	case SKL_TKN_MM_U32_NUM_OUT_FMT:
3458 		if (!fmt)
3459 			return -EINVAL;
3460 
3461 		res->nr_output_pins = tkn_elem->value;
3462 		break;
3463 
3464 	case SKL_TKN_U32_FMT_CH:
3465 	case SKL_TKN_U32_FMT_FREQ:
3466 	case SKL_TKN_U32_FMT_BIT_DEPTH:
3467 	case SKL_TKN_U32_FMT_SAMPLE_SIZE:
3468 	case SKL_TKN_U32_FMT_CH_CONFIG:
3469 	case SKL_TKN_U32_FMT_INTERLEAVE:
3470 	case SKL_TKN_U32_FMT_SAMPLE_TYPE:
3471 	case SKL_TKN_U32_FMT_CH_MAP:
3472 	case SKL_TKN_MM_U32_INTF_PIN_ID:
3473 		ret = skl_tplg_manifest_fill_fmt(dev, fmt, tkn_elem,
3474 						 dir, pin_idx);
3475 		if (ret < 0)
3476 			return ret;
3477 		break;
3478 
3479 	default:
3480 		dev_err(dev, "Not a manifest token %d\n", tkn_elem->token);
3481 		return -EINVAL;
3482 	}
3483 	tkn_count++;
3484 
3485 	return tkn_count;
3486 }
3487 
3488 /*
3489  * Fill the manifest structure by parsing the tokens based on the
3490  * type.
3491  */
skl_tplg_get_manifest_tkn(struct device * dev,char * pvt_data,struct skl_dev * skl,int block_size)3492 static int skl_tplg_get_manifest_tkn(struct device *dev,
3493 		char *pvt_data, struct skl_dev *skl,
3494 		int block_size)
3495 {
3496 	int tkn_count = 0, ret;
3497 	int off = 0, tuple_size = 0;
3498 	u8 uuid_index = 0;
3499 	struct snd_soc_tplg_vendor_array *array;
3500 	struct snd_soc_tplg_vendor_value_elem *tkn_elem;
3501 
3502 	if (block_size <= 0)
3503 		return -EINVAL;
3504 
3505 	while (tuple_size < block_size) {
3506 		array = (struct snd_soc_tplg_vendor_array *)(pvt_data + off);
3507 		off += array->size;
3508 		switch (array->type) {
3509 		case SND_SOC_TPLG_TUPLE_TYPE_STRING:
3510 			ret = skl_tplg_get_str_tkn(dev, array, skl);
3511 
3512 			if (ret < 0)
3513 				return ret;
3514 			tkn_count = ret;
3515 
3516 			tuple_size += tkn_count *
3517 				sizeof(struct snd_soc_tplg_vendor_string_elem);
3518 			continue;
3519 
3520 		case SND_SOC_TPLG_TUPLE_TYPE_UUID:
3521 			if (array->uuid->token != SKL_TKN_UUID) {
3522 				dev_err(dev, "Not an UUID token: %d\n",
3523 					array->uuid->token);
3524 				return -EINVAL;
3525 			}
3526 			if (uuid_index >= skl->nr_modules) {
3527 				dev_err(dev, "Too many UUID tokens\n");
3528 				return -EINVAL;
3529 			}
3530 			import_guid(&skl->modules[uuid_index++]->uuid,
3531 				    array->uuid->uuid);
3532 
3533 			tuple_size += sizeof(*array->uuid);
3534 			continue;
3535 
3536 		default:
3537 			tkn_elem = array->value;
3538 			tkn_count = 0;
3539 			break;
3540 		}
3541 
3542 		while (tkn_count <= array->num_elems - 1) {
3543 			ret = skl_tplg_get_int_tkn(dev,
3544 					tkn_elem, skl);
3545 			if (ret < 0)
3546 				return ret;
3547 
3548 			tkn_count = tkn_count + ret;
3549 			tkn_elem++;
3550 		}
3551 		tuple_size += (tkn_count * sizeof(*tkn_elem));
3552 		tkn_count = 0;
3553 	}
3554 
3555 	return off;
3556 }
3557 
3558 /*
3559  * Parse manifest private data for tokens. The private data block is
3560  * preceded by descriptors for type and size of data block.
3561  */
skl_tplg_get_manifest_data(struct snd_soc_tplg_manifest * manifest,struct device * dev,struct skl_dev * skl)3562 static int skl_tplg_get_manifest_data(struct snd_soc_tplg_manifest *manifest,
3563 			struct device *dev, struct skl_dev *skl)
3564 {
3565 	struct snd_soc_tplg_vendor_array *array;
3566 	int num_blocks, block_size = 0, block_type, off = 0;
3567 	char *data;
3568 	int ret;
3569 
3570 	/* Read the NUM_DATA_BLOCKS descriptor */
3571 	array = (struct snd_soc_tplg_vendor_array *)manifest->priv.data;
3572 	ret = skl_tplg_get_desc_blocks(dev, array);
3573 	if (ret < 0)
3574 		return ret;
3575 	num_blocks = ret;
3576 
3577 	off += array->size;
3578 	/* Read the BLOCK_TYPE and BLOCK_SIZE descriptor */
3579 	while (num_blocks > 0) {
3580 		array = (struct snd_soc_tplg_vendor_array *)
3581 				(manifest->priv.data + off);
3582 		ret = skl_tplg_get_desc_blocks(dev, array);
3583 
3584 		if (ret < 0)
3585 			return ret;
3586 		block_type = ret;
3587 		off += array->size;
3588 
3589 		array = (struct snd_soc_tplg_vendor_array *)
3590 			(manifest->priv.data + off);
3591 
3592 		ret = skl_tplg_get_desc_blocks(dev, array);
3593 
3594 		if (ret < 0)
3595 			return ret;
3596 		block_size = ret;
3597 		off += array->size;
3598 
3599 		data = (manifest->priv.data + off);
3600 
3601 		if (block_type == SKL_TYPE_TUPLE) {
3602 			ret = skl_tplg_get_manifest_tkn(dev, data, skl,
3603 					block_size);
3604 
3605 			if (ret < 0)
3606 				return ret;
3607 
3608 			--num_blocks;
3609 		} else {
3610 			return -EINVAL;
3611 		}
3612 		off += ret;
3613 	}
3614 
3615 	return 0;
3616 }
3617 
skl_manifest_load(struct snd_soc_component * cmpnt,int index,struct snd_soc_tplg_manifest * manifest)3618 static int skl_manifest_load(struct snd_soc_component *cmpnt, int index,
3619 				struct snd_soc_tplg_manifest *manifest)
3620 {
3621 	struct hdac_bus *bus = snd_soc_component_get_drvdata(cmpnt);
3622 	struct skl_dev *skl = bus_to_skl(bus);
3623 
3624 	/* proceed only if we have private data defined */
3625 	if (manifest->priv.size == 0)
3626 		return 0;
3627 
3628 	skl_tplg_get_manifest_data(manifest, bus->dev, skl);
3629 
3630 	if (skl->lib_count > SKL_MAX_LIB) {
3631 		dev_err(bus->dev, "Exceeding max Library count. Got:%d\n",
3632 					skl->lib_count);
3633 		return  -EINVAL;
3634 	}
3635 
3636 	return 0;
3637 }
3638 
skl_tplg_complete(struct snd_soc_component * component)3639 static int skl_tplg_complete(struct snd_soc_component *component)
3640 {
3641 	struct snd_soc_dobj *dobj;
3642 	struct snd_soc_acpi_mach *mach;
3643 	struct snd_ctl_elem_value *val;
3644 	int i;
3645 
3646 	val = kmalloc(sizeof(*val), GFP_KERNEL);
3647 	if (!val)
3648 		return -ENOMEM;
3649 
3650 	mach = dev_get_platdata(component->card->dev);
3651 	list_for_each_entry(dobj, &component->dobj_list, list) {
3652 		struct snd_kcontrol *kcontrol = dobj->control.kcontrol;
3653 		struct soc_enum *se;
3654 		char **texts;
3655 		char chan_text[4];
3656 
3657 		if (dobj->type != SND_SOC_DOBJ_ENUM || !kcontrol ||
3658 		    kcontrol->put != skl_tplg_multi_config_set_dmic)
3659 			continue;
3660 
3661 		se = (struct soc_enum *)kcontrol->private_value;
3662 		texts = dobj->control.dtexts;
3663 		sprintf(chan_text, "c%d", mach->mach_params.dmic_num);
3664 
3665 		for (i = 0; i < se->items; i++) {
3666 			if (strstr(texts[i], chan_text)) {
3667 				memset(val, 0, sizeof(*val));
3668 				val->value.enumerated.item[0] = i;
3669 				kcontrol->put(kcontrol, val);
3670 			}
3671 		}
3672 	}
3673 
3674 	kfree(val);
3675 	return 0;
3676 }
3677 
3678 static struct snd_soc_tplg_ops skl_tplg_ops  = {
3679 	.widget_load = skl_tplg_widget_load,
3680 	.control_load = skl_tplg_control_load,
3681 	.bytes_ext_ops = skl_tlv_ops,
3682 	.bytes_ext_ops_count = ARRAY_SIZE(skl_tlv_ops),
3683 	.io_ops = skl_tplg_kcontrol_ops,
3684 	.io_ops_count = ARRAY_SIZE(skl_tplg_kcontrol_ops),
3685 	.manifest = skl_manifest_load,
3686 	.dai_load = skl_dai_load,
3687 	.complete = skl_tplg_complete,
3688 };
3689 
3690 /*
3691  * A pipe can have multiple modules, each of them will be a DAPM widget as
3692  * well. While managing a pipeline we need to get the list of all the
3693  * widgets in a pipelines, so this helper - skl_tplg_create_pipe_widget_list()
3694  * helps to get the SKL type widgets in that pipeline
3695  */
skl_tplg_create_pipe_widget_list(struct snd_soc_component * component)3696 static int skl_tplg_create_pipe_widget_list(struct snd_soc_component *component)
3697 {
3698 	struct snd_soc_dapm_widget *w;
3699 	struct skl_module_cfg *mcfg = NULL;
3700 	struct skl_pipe_module *p_module = NULL;
3701 	struct skl_pipe *pipe;
3702 
3703 	list_for_each_entry(w, &component->card->widgets, list) {
3704 		if (is_skl_dsp_widget_type(w, component->dev) && w->priv) {
3705 			mcfg = w->priv;
3706 			pipe = mcfg->pipe;
3707 
3708 			p_module = devm_kzalloc(component->dev,
3709 						sizeof(*p_module), GFP_KERNEL);
3710 			if (!p_module)
3711 				return -ENOMEM;
3712 
3713 			p_module->w = w;
3714 			list_add_tail(&p_module->node, &pipe->w_list);
3715 		}
3716 	}
3717 
3718 	return 0;
3719 }
3720 
skl_tplg_set_pipe_type(struct skl_dev * skl,struct skl_pipe * pipe)3721 static void skl_tplg_set_pipe_type(struct skl_dev *skl, struct skl_pipe *pipe)
3722 {
3723 	struct skl_pipe_module *w_module;
3724 	struct snd_soc_dapm_widget *w;
3725 	struct skl_module_cfg *mconfig;
3726 	bool host_found = false, link_found = false;
3727 
3728 	list_for_each_entry(w_module, &pipe->w_list, node) {
3729 		w = w_module->w;
3730 		mconfig = w->priv;
3731 
3732 		if (mconfig->dev_type == SKL_DEVICE_HDAHOST)
3733 			host_found = true;
3734 		else if (mconfig->dev_type != SKL_DEVICE_NONE)
3735 			link_found = true;
3736 	}
3737 
3738 	if (host_found && link_found)
3739 		pipe->passthru = true;
3740 	else
3741 		pipe->passthru = false;
3742 }
3743 
3744 /*
3745  * SKL topology init routine
3746  */
skl_tplg_init(struct snd_soc_component * component,struct hdac_bus * bus)3747 int skl_tplg_init(struct snd_soc_component *component, struct hdac_bus *bus)
3748 {
3749 	int ret;
3750 	const struct firmware *fw;
3751 	struct skl_dev *skl = bus_to_skl(bus);
3752 	struct skl_pipeline *ppl;
3753 
3754 	ret = request_firmware(&fw, skl->tplg_name, bus->dev);
3755 	if (ret < 0) {
3756 		char alt_tplg_name[64];
3757 
3758 		snprintf(alt_tplg_name, sizeof(alt_tplg_name), "%s-tplg.bin",
3759 			 skl->mach->drv_name);
3760 		dev_info(bus->dev, "tplg fw %s load failed with %d, trying alternative tplg name %s",
3761 			 skl->tplg_name, ret, alt_tplg_name);
3762 
3763 		ret = request_firmware(&fw, alt_tplg_name, bus->dev);
3764 		if (!ret)
3765 			goto component_load;
3766 
3767 		dev_info(bus->dev, "tplg %s failed with %d, falling back to dfw_sst.bin",
3768 			 alt_tplg_name, ret);
3769 
3770 		ret = request_firmware(&fw, "dfw_sst.bin", bus->dev);
3771 		if (ret < 0) {
3772 			dev_err(bus->dev, "Fallback tplg fw %s load failed with %d\n",
3773 					"dfw_sst.bin", ret);
3774 			return ret;
3775 		}
3776 	}
3777 
3778 component_load:
3779 	ret = snd_soc_tplg_component_load(component, &skl_tplg_ops, fw);
3780 	if (ret < 0) {
3781 		dev_err(bus->dev, "tplg component load failed%d\n", ret);
3782 		goto err;
3783 	}
3784 
3785 	ret = skl_tplg_create_pipe_widget_list(component);
3786 	if (ret < 0) {
3787 		dev_err(bus->dev, "tplg create pipe widget list failed%d\n",
3788 				ret);
3789 		goto err;
3790 	}
3791 
3792 	list_for_each_entry(ppl, &skl->ppl_list, node)
3793 		skl_tplg_set_pipe_type(skl, ppl->pipe);
3794 
3795 err:
3796 	release_firmware(fw);
3797 	return ret;
3798 }
3799 
skl_tplg_exit(struct snd_soc_component * component,struct hdac_bus * bus)3800 void skl_tplg_exit(struct snd_soc_component *component, struct hdac_bus *bus)
3801 {
3802 	struct skl_dev *skl = bus_to_skl(bus);
3803 	struct skl_pipeline *ppl, *tmp;
3804 
3805 	list_for_each_entry_safe(ppl, tmp, &skl->ppl_list, node)
3806 		list_del(&ppl->node);
3807 
3808 	/* clean up topology */
3809 	snd_soc_tplg_component_remove(component);
3810 }
3811