1 // SPDX-License-Identifier: GPL-2.0-only
2 //
3 // Copyright(c) 2021 Intel Corporation. All rights reserved.
4 //
5 // Authors: Cezary Rojewski <cezary.rojewski@intel.com>
6 //          Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com>
7 //
8 
9 #include <sound/intel-nhlt.h>
10 #include <sound/pcm_params.h>
11 #include <sound/soc.h>
12 #include "avs.h"
13 #include "control.h"
14 #include "path.h"
15 #include "topology.h"
16 
17 /* Must be called with adev->comp_list_mutex held. */
18 static struct avs_tplg *
avs_path_find_tplg(struct avs_dev * adev,const char * name)19 avs_path_find_tplg(struct avs_dev *adev, const char *name)
20 {
21 	struct avs_soc_component *acomp;
22 
23 	list_for_each_entry(acomp, &adev->comp_list, node)
24 		if (!strcmp(acomp->tplg->name, name))
25 			return acomp->tplg;
26 	return NULL;
27 }
28 
29 static struct avs_path_module *
avs_path_find_module(struct avs_path_pipeline * ppl,u32 template_id)30 avs_path_find_module(struct avs_path_pipeline *ppl, u32 template_id)
31 {
32 	struct avs_path_module *mod;
33 
34 	list_for_each_entry(mod, &ppl->mod_list, node)
35 		if (mod->template->id == template_id)
36 			return mod;
37 	return NULL;
38 }
39 
40 static struct avs_path_pipeline *
avs_path_find_pipeline(struct avs_path * path,u32 template_id)41 avs_path_find_pipeline(struct avs_path *path, u32 template_id)
42 {
43 	struct avs_path_pipeline *ppl;
44 
45 	list_for_each_entry(ppl, &path->ppl_list, node)
46 		if (ppl->template->id == template_id)
47 			return ppl;
48 	return NULL;
49 }
50 
51 static struct avs_path *
avs_path_find_path(struct avs_dev * adev,const char * name,u32 template_id)52 avs_path_find_path(struct avs_dev *adev, const char *name, u32 template_id)
53 {
54 	struct avs_tplg_path_template *pos, *template = NULL;
55 	struct avs_tplg *tplg;
56 	struct avs_path *path;
57 
58 	tplg = avs_path_find_tplg(adev, name);
59 	if (!tplg)
60 		return NULL;
61 
62 	list_for_each_entry(pos, &tplg->path_tmpl_list, node) {
63 		if (pos->id == template_id) {
64 			template = pos;
65 			break;
66 		}
67 	}
68 	if (!template)
69 		return NULL;
70 
71 	spin_lock(&adev->path_list_lock);
72 	/* Only one variant of given path template may be instantiated at a time. */
73 	list_for_each_entry(path, &adev->path_list, node) {
74 		if (path->template->owner == template) {
75 			spin_unlock(&adev->path_list_lock);
76 			return path;
77 		}
78 	}
79 
80 	spin_unlock(&adev->path_list_lock);
81 	return NULL;
82 }
83 
avs_test_hw_params(struct snd_pcm_hw_params * params,struct avs_audio_format * fmt)84 static bool avs_test_hw_params(struct snd_pcm_hw_params *params,
85 			       struct avs_audio_format *fmt)
86 {
87 	return (params_rate(params) == fmt->sampling_freq &&
88 		params_channels(params) == fmt->num_channels &&
89 		params_physical_width(params) == fmt->bit_depth &&
90 		params_width(params) == fmt->valid_bit_depth);
91 }
92 
93 static struct avs_tplg_path *
avs_path_find_variant(struct avs_dev * adev,struct avs_tplg_path_template * template,struct snd_pcm_hw_params * fe_params,struct snd_pcm_hw_params * be_params)94 avs_path_find_variant(struct avs_dev *adev,
95 		      struct avs_tplg_path_template *template,
96 		      struct snd_pcm_hw_params *fe_params,
97 		      struct snd_pcm_hw_params *be_params)
98 {
99 	struct avs_tplg_path *variant;
100 
101 	list_for_each_entry(variant, &template->path_list, node) {
102 		dev_dbg(adev->dev, "check FE rate %d chn %d vbd %d bd %d\n",
103 			variant->fe_fmt->sampling_freq, variant->fe_fmt->num_channels,
104 			variant->fe_fmt->valid_bit_depth, variant->fe_fmt->bit_depth);
105 		dev_dbg(adev->dev, "check BE rate %d chn %d vbd %d bd %d\n",
106 			variant->be_fmt->sampling_freq, variant->be_fmt->num_channels,
107 			variant->be_fmt->valid_bit_depth, variant->be_fmt->bit_depth);
108 
109 		if (variant->fe_fmt && avs_test_hw_params(fe_params, variant->fe_fmt) &&
110 		    variant->be_fmt && avs_test_hw_params(be_params, variant->be_fmt))
111 			return variant;
112 	}
113 
114 	return NULL;
115 }
116 
117 __maybe_unused
avs_dma_type_is_host(u32 dma_type)118 static bool avs_dma_type_is_host(u32 dma_type)
119 {
120 	return dma_type == AVS_DMA_HDA_HOST_OUTPUT ||
121 	       dma_type == AVS_DMA_HDA_HOST_INPUT;
122 }
123 
124 __maybe_unused
avs_dma_type_is_link(u32 dma_type)125 static bool avs_dma_type_is_link(u32 dma_type)
126 {
127 	return !avs_dma_type_is_host(dma_type);
128 }
129 
130 __maybe_unused
avs_dma_type_is_output(u32 dma_type)131 static bool avs_dma_type_is_output(u32 dma_type)
132 {
133 	return dma_type == AVS_DMA_HDA_HOST_OUTPUT ||
134 	       dma_type == AVS_DMA_HDA_LINK_OUTPUT ||
135 	       dma_type == AVS_DMA_I2S_LINK_OUTPUT;
136 }
137 
138 __maybe_unused
avs_dma_type_is_input(u32 dma_type)139 static bool avs_dma_type_is_input(u32 dma_type)
140 {
141 	return !avs_dma_type_is_output(dma_type);
142 }
143 
avs_copier_create(struct avs_dev * adev,struct avs_path_module * mod)144 static int avs_copier_create(struct avs_dev *adev, struct avs_path_module *mod)
145 {
146 	struct nhlt_acpi_table *nhlt = adev->nhlt;
147 	struct avs_tplg_module *t = mod->template;
148 	struct avs_copier_cfg *cfg;
149 	struct nhlt_specific_cfg *ep_blob;
150 	union avs_connector_node_id node_id = {0};
151 	size_t cfg_size, data_size = 0;
152 	void *data = NULL;
153 	u32 dma_type;
154 	int ret;
155 
156 	dma_type = t->cfg_ext->copier.dma_type;
157 	node_id.dma_type = dma_type;
158 
159 	switch (dma_type) {
160 		struct avs_audio_format *fmt;
161 		int direction;
162 
163 	case AVS_DMA_I2S_LINK_OUTPUT:
164 	case AVS_DMA_I2S_LINK_INPUT:
165 		if (avs_dma_type_is_input(dma_type))
166 			direction = SNDRV_PCM_STREAM_CAPTURE;
167 		else
168 			direction = SNDRV_PCM_STREAM_PLAYBACK;
169 
170 		if (t->cfg_ext->copier.blob_fmt)
171 			fmt = t->cfg_ext->copier.blob_fmt;
172 		else if (direction == SNDRV_PCM_STREAM_CAPTURE)
173 			fmt = t->in_fmt;
174 		else
175 			fmt = t->cfg_ext->copier.out_fmt;
176 
177 		ep_blob = intel_nhlt_get_endpoint_blob(adev->dev,
178 			nhlt, t->cfg_ext->copier.vindex.i2s.instance,
179 			NHLT_LINK_SSP, fmt->valid_bit_depth, fmt->bit_depth,
180 			fmt->num_channels, fmt->sampling_freq, direction,
181 			NHLT_DEVICE_I2S);
182 		if (!ep_blob) {
183 			dev_err(adev->dev, "no I2S ep_blob found\n");
184 			return -ENOENT;
185 		}
186 
187 		data = ep_blob->caps;
188 		data_size = ep_blob->size;
189 		/* I2S gateway's vindex is statically assigned in topology */
190 		node_id.vindex = t->cfg_ext->copier.vindex.val;
191 
192 		break;
193 
194 	case AVS_DMA_DMIC_LINK_INPUT:
195 		direction = SNDRV_PCM_STREAM_CAPTURE;
196 
197 		if (t->cfg_ext->copier.blob_fmt)
198 			fmt = t->cfg_ext->copier.blob_fmt;
199 		else
200 			fmt = t->in_fmt;
201 
202 		ep_blob = intel_nhlt_get_endpoint_blob(adev->dev, nhlt, 0,
203 				NHLT_LINK_DMIC, fmt->valid_bit_depth,
204 				fmt->bit_depth, fmt->num_channels,
205 				fmt->sampling_freq, direction, NHLT_DEVICE_DMIC);
206 		if (!ep_blob) {
207 			dev_err(adev->dev, "no DMIC ep_blob found\n");
208 			return -ENOENT;
209 		}
210 
211 		data = ep_blob->caps;
212 		data_size = ep_blob->size;
213 		/* DMIC gateway's vindex is statically assigned in topology */
214 		node_id.vindex = t->cfg_ext->copier.vindex.val;
215 
216 		break;
217 
218 	case AVS_DMA_HDA_HOST_OUTPUT:
219 	case AVS_DMA_HDA_HOST_INPUT:
220 		/* HOST gateway's vindex is dynamically assigned with DMA id */
221 		node_id.vindex = mod->owner->owner->dma_id;
222 		break;
223 
224 	case AVS_DMA_HDA_LINK_OUTPUT:
225 	case AVS_DMA_HDA_LINK_INPUT:
226 		node_id.vindex = t->cfg_ext->copier.vindex.val |
227 				 mod->owner->owner->dma_id;
228 		break;
229 
230 	case INVALID_OBJECT_ID:
231 	default:
232 		node_id = INVALID_NODE_ID;
233 		break;
234 	}
235 
236 	cfg_size = sizeof(*cfg) + data_size;
237 	/* Every config-BLOB contains gateway attributes. */
238 	if (data_size)
239 		cfg_size -= sizeof(cfg->gtw_cfg.config.attrs);
240 
241 	cfg = kzalloc(cfg_size, GFP_KERNEL);
242 	if (!cfg)
243 		return -ENOMEM;
244 
245 	cfg->base.cpc = t->cfg_base->cpc;
246 	cfg->base.ibs = t->cfg_base->ibs;
247 	cfg->base.obs = t->cfg_base->obs;
248 	cfg->base.is_pages = t->cfg_base->is_pages;
249 	cfg->base.audio_fmt = *t->in_fmt;
250 	cfg->out_fmt = *t->cfg_ext->copier.out_fmt;
251 	cfg->feature_mask = t->cfg_ext->copier.feature_mask;
252 	cfg->gtw_cfg.node_id = node_id;
253 	cfg->gtw_cfg.dma_buffer_size = t->cfg_ext->copier.dma_buffer_size;
254 	/* config_length in DWORDs */
255 	cfg->gtw_cfg.config_length = DIV_ROUND_UP(data_size, 4);
256 	if (data)
257 		memcpy(&cfg->gtw_cfg.config, data, data_size);
258 
259 	mod->gtw_attrs = cfg->gtw_cfg.config.attrs;
260 
261 	ret = avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
262 				  t->core_id, t->domain, cfg, cfg_size,
263 				  &mod->instance_id);
264 	kfree(cfg);
265 	return ret;
266 }
267 
avs_get_module_control(struct avs_path_module * mod)268 static struct avs_control_data *avs_get_module_control(struct avs_path_module *mod)
269 {
270 	struct avs_tplg_module *t = mod->template;
271 	struct avs_tplg_path_template *path_tmpl;
272 	struct snd_soc_dapm_widget *w;
273 	int i;
274 
275 	path_tmpl = t->owner->owner->owner;
276 	w = path_tmpl->w;
277 
278 	for (i = 0; i < w->num_kcontrols; i++) {
279 		struct avs_control_data *ctl_data;
280 		struct soc_mixer_control *mc;
281 
282 		mc = (struct soc_mixer_control *)w->kcontrols[i]->private_value;
283 		ctl_data = (struct avs_control_data *)mc->dobj.private;
284 		if (ctl_data->id == t->ctl_id)
285 			return ctl_data;
286 	}
287 
288 	return NULL;
289 }
290 
avs_peakvol_create(struct avs_dev * adev,struct avs_path_module * mod)291 static int avs_peakvol_create(struct avs_dev *adev, struct avs_path_module *mod)
292 {
293 	struct avs_tplg_module *t = mod->template;
294 	struct avs_control_data *ctl_data;
295 	struct avs_peakvol_cfg *cfg;
296 	int volume = S32_MAX;
297 	size_t size;
298 	int ret;
299 
300 	ctl_data = avs_get_module_control(mod);
301 	if (ctl_data)
302 		volume = ctl_data->volume;
303 
304 	/* As 2+ channels controls are unsupported, have a single block for all channels. */
305 	size = struct_size(cfg, vols, 1);
306 	cfg = kzalloc(size, GFP_KERNEL);
307 	if (!cfg)
308 		return -ENOMEM;
309 
310 	cfg->base.cpc = t->cfg_base->cpc;
311 	cfg->base.ibs = t->cfg_base->ibs;
312 	cfg->base.obs = t->cfg_base->obs;
313 	cfg->base.is_pages = t->cfg_base->is_pages;
314 	cfg->base.audio_fmt = *t->in_fmt;
315 	cfg->vols[0].target_volume = volume;
316 	cfg->vols[0].channel_id = AVS_ALL_CHANNELS_MASK;
317 	cfg->vols[0].curve_type = AVS_AUDIO_CURVE_NONE;
318 	cfg->vols[0].curve_duration = 0;
319 
320 	ret = avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id, t->core_id,
321 				  t->domain, cfg, size, &mod->instance_id);
322 
323 	kfree(cfg);
324 	return ret;
325 }
326 
avs_updown_mix_create(struct avs_dev * adev,struct avs_path_module * mod)327 static int avs_updown_mix_create(struct avs_dev *adev, struct avs_path_module *mod)
328 {
329 	struct avs_tplg_module *t = mod->template;
330 	struct avs_updown_mixer_cfg cfg;
331 	int i;
332 
333 	cfg.base.cpc = t->cfg_base->cpc;
334 	cfg.base.ibs = t->cfg_base->ibs;
335 	cfg.base.obs = t->cfg_base->obs;
336 	cfg.base.is_pages = t->cfg_base->is_pages;
337 	cfg.base.audio_fmt = *t->in_fmt;
338 	cfg.out_channel_config = t->cfg_ext->updown_mix.out_channel_config;
339 	cfg.coefficients_select = t->cfg_ext->updown_mix.coefficients_select;
340 	for (i = 0; i < AVS_CHANNELS_MAX; i++)
341 		cfg.coefficients[i] = t->cfg_ext->updown_mix.coefficients[i];
342 	cfg.channel_map = t->cfg_ext->updown_mix.channel_map;
343 
344 	return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
345 				   t->core_id, t->domain, &cfg, sizeof(cfg),
346 				   &mod->instance_id);
347 }
348 
avs_src_create(struct avs_dev * adev,struct avs_path_module * mod)349 static int avs_src_create(struct avs_dev *adev, struct avs_path_module *mod)
350 {
351 	struct avs_tplg_module *t = mod->template;
352 	struct avs_src_cfg cfg;
353 
354 	cfg.base.cpc = t->cfg_base->cpc;
355 	cfg.base.ibs = t->cfg_base->ibs;
356 	cfg.base.obs = t->cfg_base->obs;
357 	cfg.base.is_pages = t->cfg_base->is_pages;
358 	cfg.base.audio_fmt = *t->in_fmt;
359 	cfg.out_freq = t->cfg_ext->src.out_freq;
360 
361 	return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
362 				   t->core_id, t->domain, &cfg, sizeof(cfg),
363 				   &mod->instance_id);
364 }
365 
avs_asrc_create(struct avs_dev * adev,struct avs_path_module * mod)366 static int avs_asrc_create(struct avs_dev *adev, struct avs_path_module *mod)
367 {
368 	struct avs_tplg_module *t = mod->template;
369 	struct avs_asrc_cfg cfg;
370 
371 	cfg.base.cpc = t->cfg_base->cpc;
372 	cfg.base.ibs = t->cfg_base->ibs;
373 	cfg.base.obs = t->cfg_base->obs;
374 	cfg.base.is_pages = t->cfg_base->is_pages;
375 	cfg.base.audio_fmt = *t->in_fmt;
376 	cfg.out_freq = t->cfg_ext->asrc.out_freq;
377 	cfg.mode = t->cfg_ext->asrc.mode;
378 	cfg.disable_jitter_buffer = t->cfg_ext->asrc.disable_jitter_buffer;
379 
380 	return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
381 				   t->core_id, t->domain, &cfg, sizeof(cfg),
382 				   &mod->instance_id);
383 }
384 
avs_aec_create(struct avs_dev * adev,struct avs_path_module * mod)385 static int avs_aec_create(struct avs_dev *adev, struct avs_path_module *mod)
386 {
387 	struct avs_tplg_module *t = mod->template;
388 	struct avs_aec_cfg cfg;
389 
390 	cfg.base.cpc = t->cfg_base->cpc;
391 	cfg.base.ibs = t->cfg_base->ibs;
392 	cfg.base.obs = t->cfg_base->obs;
393 	cfg.base.is_pages = t->cfg_base->is_pages;
394 	cfg.base.audio_fmt = *t->in_fmt;
395 	cfg.ref_fmt = *t->cfg_ext->aec.ref_fmt;
396 	cfg.out_fmt = *t->cfg_ext->aec.out_fmt;
397 	cfg.cpc_lp_mode = t->cfg_ext->aec.cpc_lp_mode;
398 
399 	return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
400 				   t->core_id, t->domain, &cfg, sizeof(cfg),
401 				   &mod->instance_id);
402 }
403 
avs_mux_create(struct avs_dev * adev,struct avs_path_module * mod)404 static int avs_mux_create(struct avs_dev *adev, struct avs_path_module *mod)
405 {
406 	struct avs_tplg_module *t = mod->template;
407 	struct avs_mux_cfg cfg;
408 
409 	cfg.base.cpc = t->cfg_base->cpc;
410 	cfg.base.ibs = t->cfg_base->ibs;
411 	cfg.base.obs = t->cfg_base->obs;
412 	cfg.base.is_pages = t->cfg_base->is_pages;
413 	cfg.base.audio_fmt = *t->in_fmt;
414 	cfg.ref_fmt = *t->cfg_ext->mux.ref_fmt;
415 	cfg.out_fmt = *t->cfg_ext->mux.out_fmt;
416 
417 	return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
418 				   t->core_id, t->domain, &cfg, sizeof(cfg),
419 				   &mod->instance_id);
420 }
421 
avs_wov_create(struct avs_dev * adev,struct avs_path_module * mod)422 static int avs_wov_create(struct avs_dev *adev, struct avs_path_module *mod)
423 {
424 	struct avs_tplg_module *t = mod->template;
425 	struct avs_wov_cfg cfg;
426 
427 	cfg.base.cpc = t->cfg_base->cpc;
428 	cfg.base.ibs = t->cfg_base->ibs;
429 	cfg.base.obs = t->cfg_base->obs;
430 	cfg.base.is_pages = t->cfg_base->is_pages;
431 	cfg.base.audio_fmt = *t->in_fmt;
432 	cfg.cpc_lp_mode = t->cfg_ext->wov.cpc_lp_mode;
433 
434 	return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
435 				   t->core_id, t->domain, &cfg, sizeof(cfg),
436 				   &mod->instance_id);
437 }
438 
avs_micsel_create(struct avs_dev * adev,struct avs_path_module * mod)439 static int avs_micsel_create(struct avs_dev *adev, struct avs_path_module *mod)
440 {
441 	struct avs_tplg_module *t = mod->template;
442 	struct avs_micsel_cfg cfg;
443 
444 	cfg.base.cpc = t->cfg_base->cpc;
445 	cfg.base.ibs = t->cfg_base->ibs;
446 	cfg.base.obs = t->cfg_base->obs;
447 	cfg.base.is_pages = t->cfg_base->is_pages;
448 	cfg.base.audio_fmt = *t->in_fmt;
449 	cfg.out_fmt = *t->cfg_ext->micsel.out_fmt;
450 
451 	return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
452 				   t->core_id, t->domain, &cfg, sizeof(cfg),
453 				   &mod->instance_id);
454 }
455 
avs_modbase_create(struct avs_dev * adev,struct avs_path_module * mod)456 static int avs_modbase_create(struct avs_dev *adev, struct avs_path_module *mod)
457 {
458 	struct avs_tplg_module *t = mod->template;
459 	struct avs_modcfg_base cfg;
460 
461 	cfg.cpc = t->cfg_base->cpc;
462 	cfg.ibs = t->cfg_base->ibs;
463 	cfg.obs = t->cfg_base->obs;
464 	cfg.is_pages = t->cfg_base->is_pages;
465 	cfg.audio_fmt = *t->in_fmt;
466 
467 	return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
468 				   t->core_id, t->domain, &cfg, sizeof(cfg),
469 				   &mod->instance_id);
470 }
471 
avs_modext_create(struct avs_dev * adev,struct avs_path_module * mod)472 static int avs_modext_create(struct avs_dev *adev, struct avs_path_module *mod)
473 {
474 	struct avs_tplg_module *t = mod->template;
475 	struct avs_tplg_modcfg_ext *tcfg = t->cfg_ext;
476 	struct avs_modcfg_ext *cfg;
477 	size_t cfg_size, num_pins;
478 	int ret, i;
479 
480 	num_pins = tcfg->generic.num_input_pins + tcfg->generic.num_output_pins;
481 	cfg_size = struct_size(cfg, pin_fmts, num_pins);
482 
483 	cfg = kzalloc(cfg_size, GFP_KERNEL);
484 	if (!cfg)
485 		return -ENOMEM;
486 
487 	cfg->base.cpc = t->cfg_base->cpc;
488 	cfg->base.ibs = t->cfg_base->ibs;
489 	cfg->base.obs = t->cfg_base->obs;
490 	cfg->base.is_pages = t->cfg_base->is_pages;
491 	cfg->base.audio_fmt = *t->in_fmt;
492 	cfg->num_input_pins = tcfg->generic.num_input_pins;
493 	cfg->num_output_pins = tcfg->generic.num_output_pins;
494 
495 	/* configure pin formats */
496 	for (i = 0; i < num_pins; i++) {
497 		struct avs_tplg_pin_format *tpin = &tcfg->generic.pin_fmts[i];
498 		struct avs_pin_format *pin = &cfg->pin_fmts[i];
499 
500 		pin->pin_index = tpin->pin_index;
501 		pin->iobs = tpin->iobs;
502 		pin->audio_fmt = *tpin->fmt;
503 	}
504 
505 	ret = avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
506 				  t->core_id, t->domain, cfg, cfg_size,
507 				  &mod->instance_id);
508 	kfree(cfg);
509 	return ret;
510 }
511 
avs_probe_create(struct avs_dev * adev,struct avs_path_module * mod)512 static int avs_probe_create(struct avs_dev *adev, struct avs_path_module *mod)
513 {
514 	dev_err(adev->dev, "Probe module can't be instantiated by topology");
515 	return -EINVAL;
516 }
517 
518 struct avs_module_create {
519 	guid_t *guid;
520 	int (*create)(struct avs_dev *adev, struct avs_path_module *mod);
521 };
522 
523 static struct avs_module_create avs_module_create[] = {
524 	{ &AVS_MIXIN_MOD_UUID, avs_modbase_create },
525 	{ &AVS_MIXOUT_MOD_UUID, avs_modbase_create },
526 	{ &AVS_KPBUFF_MOD_UUID, avs_modbase_create },
527 	{ &AVS_COPIER_MOD_UUID, avs_copier_create },
528 	{ &AVS_PEAKVOL_MOD_UUID, avs_peakvol_create },
529 	{ &AVS_GAIN_MOD_UUID, avs_peakvol_create },
530 	{ &AVS_MICSEL_MOD_UUID, avs_micsel_create },
531 	{ &AVS_MUX_MOD_UUID, avs_mux_create },
532 	{ &AVS_UPDWMIX_MOD_UUID, avs_updown_mix_create },
533 	{ &AVS_SRCINTC_MOD_UUID, avs_src_create },
534 	{ &AVS_AEC_MOD_UUID, avs_aec_create },
535 	{ &AVS_ASRC_MOD_UUID, avs_asrc_create },
536 	{ &AVS_INTELWOV_MOD_UUID, avs_wov_create },
537 	{ &AVS_PROBE_MOD_UUID, avs_probe_create },
538 };
539 
avs_path_module_type_create(struct avs_dev * adev,struct avs_path_module * mod)540 static int avs_path_module_type_create(struct avs_dev *adev, struct avs_path_module *mod)
541 {
542 	const guid_t *type = &mod->template->cfg_ext->type;
543 
544 	for (int i = 0; i < ARRAY_SIZE(avs_module_create); i++)
545 		if (guid_equal(type, avs_module_create[i].guid))
546 			return avs_module_create[i].create(adev, mod);
547 
548 	return avs_modext_create(adev, mod);
549 }
550 
avs_path_module_free(struct avs_dev * adev,struct avs_path_module * mod)551 static void avs_path_module_free(struct avs_dev *adev, struct avs_path_module *mod)
552 {
553 	kfree(mod);
554 }
555 
556 static struct avs_path_module *
avs_path_module_create(struct avs_dev * adev,struct avs_path_pipeline * owner,struct avs_tplg_module * template)557 avs_path_module_create(struct avs_dev *adev,
558 		       struct avs_path_pipeline *owner,
559 		       struct avs_tplg_module *template)
560 {
561 	struct avs_path_module *mod;
562 	int module_id, ret;
563 
564 	module_id = avs_get_module_id(adev, &template->cfg_ext->type);
565 	if (module_id < 0)
566 		return ERR_PTR(module_id);
567 
568 	mod = kzalloc(sizeof(*mod), GFP_KERNEL);
569 	if (!mod)
570 		return ERR_PTR(-ENOMEM);
571 
572 	mod->template = template;
573 	mod->module_id = module_id;
574 	mod->owner = owner;
575 	INIT_LIST_HEAD(&mod->node);
576 
577 	ret = avs_path_module_type_create(adev, mod);
578 	if (ret) {
579 		dev_err(adev->dev, "module-type create failed: %d\n", ret);
580 		kfree(mod);
581 		return ERR_PTR(ret);
582 	}
583 
584 	return mod;
585 }
586 
avs_path_binding_arm(struct avs_dev * adev,struct avs_path_binding * binding)587 static int avs_path_binding_arm(struct avs_dev *adev, struct avs_path_binding *binding)
588 {
589 	struct avs_path_module *this_mod, *target_mod;
590 	struct avs_path_pipeline *target_ppl;
591 	struct avs_path *target_path;
592 	struct avs_tplg_binding *t;
593 
594 	t = binding->template;
595 	this_mod = avs_path_find_module(binding->owner,
596 					t->mod_id);
597 	if (!this_mod) {
598 		dev_err(adev->dev, "path mod %d not found\n", t->mod_id);
599 		return -EINVAL;
600 	}
601 
602 	/* update with target_tplg_name too */
603 	target_path = avs_path_find_path(adev, t->target_tplg_name,
604 					 t->target_path_tmpl_id);
605 	if (!target_path) {
606 		dev_err(adev->dev, "target path %s:%d not found\n",
607 			t->target_tplg_name, t->target_path_tmpl_id);
608 		return -EINVAL;
609 	}
610 
611 	target_ppl = avs_path_find_pipeline(target_path,
612 					    t->target_ppl_id);
613 	if (!target_ppl) {
614 		dev_err(adev->dev, "target ppl %d not found\n", t->target_ppl_id);
615 		return -EINVAL;
616 	}
617 
618 	target_mod = avs_path_find_module(target_ppl, t->target_mod_id);
619 	if (!target_mod) {
620 		dev_err(adev->dev, "target mod %d not found\n", t->target_mod_id);
621 		return -EINVAL;
622 	}
623 
624 	if (t->is_sink) {
625 		binding->sink = this_mod;
626 		binding->sink_pin = t->mod_pin;
627 		binding->source = target_mod;
628 		binding->source_pin = t->target_mod_pin;
629 	} else {
630 		binding->sink = target_mod;
631 		binding->sink_pin = t->target_mod_pin;
632 		binding->source = this_mod;
633 		binding->source_pin = t->mod_pin;
634 	}
635 
636 	return 0;
637 }
638 
avs_path_binding_free(struct avs_dev * adev,struct avs_path_binding * binding)639 static void avs_path_binding_free(struct avs_dev *adev, struct avs_path_binding *binding)
640 {
641 	kfree(binding);
642 }
643 
avs_path_binding_create(struct avs_dev * adev,struct avs_path_pipeline * owner,struct avs_tplg_binding * t)644 static struct avs_path_binding *avs_path_binding_create(struct avs_dev *adev,
645 							struct avs_path_pipeline *owner,
646 							struct avs_tplg_binding *t)
647 {
648 	struct avs_path_binding *binding;
649 
650 	binding = kzalloc(sizeof(*binding), GFP_KERNEL);
651 	if (!binding)
652 		return ERR_PTR(-ENOMEM);
653 
654 	binding->template = t;
655 	binding->owner = owner;
656 	INIT_LIST_HEAD(&binding->node);
657 
658 	return binding;
659 }
660 
avs_path_pipeline_arm(struct avs_dev * adev,struct avs_path_pipeline * ppl)661 static int avs_path_pipeline_arm(struct avs_dev *adev,
662 				 struct avs_path_pipeline *ppl)
663 {
664 	struct avs_path_module *mod;
665 
666 	list_for_each_entry(mod, &ppl->mod_list, node) {
667 		struct avs_path_module *source, *sink;
668 		int ret;
669 
670 		/*
671 		 * Only one module (so it's implicitly last) or it is the last
672 		 * one, either way we don't have next module to bind it to.
673 		 */
674 		if (mod == list_last_entry(&ppl->mod_list,
675 					   struct avs_path_module, node))
676 			break;
677 
678 		/* bind current module to next module on list */
679 		source = mod;
680 		sink = list_next_entry(mod, node);
681 		if (!source || !sink)
682 			return -EINVAL;
683 
684 		ret = avs_ipc_bind(adev, source->module_id, source->instance_id,
685 				   sink->module_id, sink->instance_id, 0, 0);
686 		if (ret)
687 			return AVS_IPC_RET(ret);
688 	}
689 
690 	return 0;
691 }
692 
avs_path_pipeline_free(struct avs_dev * adev,struct avs_path_pipeline * ppl)693 static void avs_path_pipeline_free(struct avs_dev *adev,
694 				   struct avs_path_pipeline *ppl)
695 {
696 	struct avs_path_binding *binding, *bsave;
697 	struct avs_path_module *mod, *save;
698 
699 	list_for_each_entry_safe(binding, bsave, &ppl->binding_list, node) {
700 		list_del(&binding->node);
701 		avs_path_binding_free(adev, binding);
702 	}
703 
704 	avs_dsp_delete_pipeline(adev, ppl->instance_id);
705 
706 	/* Unload resources occupied by owned modules */
707 	list_for_each_entry_safe(mod, save, &ppl->mod_list, node) {
708 		avs_dsp_delete_module(adev, mod->module_id, mod->instance_id,
709 				      mod->owner->instance_id,
710 				      mod->template->core_id);
711 		avs_path_module_free(adev, mod);
712 	}
713 
714 	list_del(&ppl->node);
715 	kfree(ppl);
716 }
717 
718 static struct avs_path_pipeline *
avs_path_pipeline_create(struct avs_dev * adev,struct avs_path * owner,struct avs_tplg_pipeline * template)719 avs_path_pipeline_create(struct avs_dev *adev, struct avs_path *owner,
720 			 struct avs_tplg_pipeline *template)
721 {
722 	struct avs_path_pipeline *ppl;
723 	struct avs_tplg_pplcfg *cfg = template->cfg;
724 	struct avs_tplg_module *tmod;
725 	int ret, i;
726 
727 	ppl = kzalloc(sizeof(*ppl), GFP_KERNEL);
728 	if (!ppl)
729 		return ERR_PTR(-ENOMEM);
730 
731 	ppl->template = template;
732 	ppl->owner = owner;
733 	INIT_LIST_HEAD(&ppl->binding_list);
734 	INIT_LIST_HEAD(&ppl->mod_list);
735 	INIT_LIST_HEAD(&ppl->node);
736 
737 	ret = avs_dsp_create_pipeline(adev, cfg->req_size, cfg->priority,
738 				      cfg->lp, cfg->attributes,
739 				      &ppl->instance_id);
740 	if (ret) {
741 		dev_err(adev->dev, "error creating pipeline %d\n", ret);
742 		kfree(ppl);
743 		return ERR_PTR(ret);
744 	}
745 
746 	list_for_each_entry(tmod, &template->mod_list, node) {
747 		struct avs_path_module *mod;
748 
749 		mod = avs_path_module_create(adev, ppl, tmod);
750 		if (IS_ERR(mod)) {
751 			ret = PTR_ERR(mod);
752 			dev_err(adev->dev, "error creating module %d\n", ret);
753 			goto init_err;
754 		}
755 
756 		list_add_tail(&mod->node, &ppl->mod_list);
757 	}
758 
759 	for (i = 0; i < template->num_bindings; i++) {
760 		struct avs_path_binding *binding;
761 
762 		binding = avs_path_binding_create(adev, ppl, template->bindings[i]);
763 		if (IS_ERR(binding)) {
764 			ret = PTR_ERR(binding);
765 			dev_err(adev->dev, "error creating binding %d\n", ret);
766 			goto init_err;
767 		}
768 
769 		list_add_tail(&binding->node, &ppl->binding_list);
770 	}
771 
772 	return ppl;
773 
774 init_err:
775 	avs_path_pipeline_free(adev, ppl);
776 	return ERR_PTR(ret);
777 }
778 
avs_path_init(struct avs_dev * adev,struct avs_path * path,struct avs_tplg_path * template,u32 dma_id)779 static int avs_path_init(struct avs_dev *adev, struct avs_path *path,
780 			 struct avs_tplg_path *template, u32 dma_id)
781 {
782 	struct avs_tplg_pipeline *tppl;
783 
784 	path->owner = adev;
785 	path->template = template;
786 	path->dma_id = dma_id;
787 	INIT_LIST_HEAD(&path->ppl_list);
788 	INIT_LIST_HEAD(&path->node);
789 
790 	/* create all the pipelines */
791 	list_for_each_entry(tppl, &template->ppl_list, node) {
792 		struct avs_path_pipeline *ppl;
793 
794 		ppl = avs_path_pipeline_create(adev, path, tppl);
795 		if (IS_ERR(ppl))
796 			return PTR_ERR(ppl);
797 
798 		list_add_tail(&ppl->node, &path->ppl_list);
799 	}
800 
801 	spin_lock(&adev->path_list_lock);
802 	list_add_tail(&path->node, &adev->path_list);
803 	spin_unlock(&adev->path_list_lock);
804 
805 	return 0;
806 }
807 
avs_path_arm(struct avs_dev * adev,struct avs_path * path)808 static int avs_path_arm(struct avs_dev *adev, struct avs_path *path)
809 {
810 	struct avs_path_pipeline *ppl;
811 	struct avs_path_binding *binding;
812 	int ret;
813 
814 	list_for_each_entry(ppl, &path->ppl_list, node) {
815 		/*
816 		 * Arm all ppl bindings before binding internal modules
817 		 * as it costs no IPCs which isn't true for the latter.
818 		 */
819 		list_for_each_entry(binding, &ppl->binding_list, node) {
820 			ret = avs_path_binding_arm(adev, binding);
821 			if (ret < 0)
822 				return ret;
823 		}
824 
825 		ret = avs_path_pipeline_arm(adev, ppl);
826 		if (ret < 0)
827 			return ret;
828 	}
829 
830 	return 0;
831 }
832 
avs_path_free_unlocked(struct avs_path * path)833 static void avs_path_free_unlocked(struct avs_path *path)
834 {
835 	struct avs_path_pipeline *ppl, *save;
836 
837 	spin_lock(&path->owner->path_list_lock);
838 	list_del(&path->node);
839 	spin_unlock(&path->owner->path_list_lock);
840 
841 	list_for_each_entry_safe(ppl, save, &path->ppl_list, node)
842 		avs_path_pipeline_free(path->owner, ppl);
843 
844 	kfree(path);
845 }
846 
avs_path_create_unlocked(struct avs_dev * adev,u32 dma_id,struct avs_tplg_path * template)847 static struct avs_path *avs_path_create_unlocked(struct avs_dev *adev, u32 dma_id,
848 						 struct avs_tplg_path *template)
849 {
850 	struct avs_path *path;
851 	int ret;
852 
853 	path = kzalloc(sizeof(*path), GFP_KERNEL);
854 	if (!path)
855 		return ERR_PTR(-ENOMEM);
856 
857 	ret = avs_path_init(adev, path, template, dma_id);
858 	if (ret < 0)
859 		goto err;
860 
861 	ret = avs_path_arm(adev, path);
862 	if (ret < 0)
863 		goto err;
864 
865 	path->state = AVS_PPL_STATE_INVALID;
866 	return path;
867 err:
868 	avs_path_free_unlocked(path);
869 	return ERR_PTR(ret);
870 }
871 
avs_path_free(struct avs_path * path)872 void avs_path_free(struct avs_path *path)
873 {
874 	struct avs_dev *adev = path->owner;
875 
876 	mutex_lock(&adev->path_mutex);
877 	avs_path_free_unlocked(path);
878 	mutex_unlock(&adev->path_mutex);
879 }
880 
avs_path_create(struct avs_dev * adev,u32 dma_id,struct avs_tplg_path_template * template,struct snd_pcm_hw_params * fe_params,struct snd_pcm_hw_params * be_params)881 struct avs_path *avs_path_create(struct avs_dev *adev, u32 dma_id,
882 				 struct avs_tplg_path_template *template,
883 				 struct snd_pcm_hw_params *fe_params,
884 				 struct snd_pcm_hw_params *be_params)
885 {
886 	struct avs_tplg_path *variant;
887 	struct avs_path *path;
888 
889 	variant = avs_path_find_variant(adev, template, fe_params, be_params);
890 	if (!variant) {
891 		dev_err(adev->dev, "no matching variant found\n");
892 		return ERR_PTR(-ENOENT);
893 	}
894 
895 	/* Serialize path and its components creation. */
896 	mutex_lock(&adev->path_mutex);
897 	/* Satisfy needs of avs_path_find_tplg(). */
898 	mutex_lock(&adev->comp_list_mutex);
899 
900 	path = avs_path_create_unlocked(adev, dma_id, variant);
901 
902 	mutex_unlock(&adev->comp_list_mutex);
903 	mutex_unlock(&adev->path_mutex);
904 
905 	return path;
906 }
907 
avs_path_bind_prepare(struct avs_dev * adev,struct avs_path_binding * binding)908 static int avs_path_bind_prepare(struct avs_dev *adev,
909 				 struct avs_path_binding *binding)
910 {
911 	const struct avs_audio_format *src_fmt, *sink_fmt;
912 	struct avs_tplg_module *tsource = binding->source->template;
913 	struct avs_path_module *source = binding->source;
914 	int ret;
915 
916 	/*
917 	 * only copier modules about to be bound
918 	 * to output pin other than 0 need preparation
919 	 */
920 	if (!binding->source_pin)
921 		return 0;
922 	if (!guid_equal(&tsource->cfg_ext->type, &AVS_COPIER_MOD_UUID))
923 		return 0;
924 
925 	src_fmt = tsource->in_fmt;
926 	sink_fmt = binding->sink->template->in_fmt;
927 
928 	ret = avs_ipc_copier_set_sink_format(adev, source->module_id,
929 					     source->instance_id, binding->source_pin,
930 					     src_fmt, sink_fmt);
931 	if (ret) {
932 		dev_err(adev->dev, "config copier failed: %d\n", ret);
933 		return AVS_IPC_RET(ret);
934 	}
935 
936 	return 0;
937 }
938 
avs_path_bind(struct avs_path * path)939 int avs_path_bind(struct avs_path *path)
940 {
941 	struct avs_path_pipeline *ppl;
942 	struct avs_dev *adev = path->owner;
943 	int ret;
944 
945 	list_for_each_entry(ppl, &path->ppl_list, node) {
946 		struct avs_path_binding *binding;
947 
948 		list_for_each_entry(binding, &ppl->binding_list, node) {
949 			struct avs_path_module *source, *sink;
950 
951 			source = binding->source;
952 			sink = binding->sink;
953 
954 			ret = avs_path_bind_prepare(adev, binding);
955 			if (ret < 0)
956 				return ret;
957 
958 			ret = avs_ipc_bind(adev, source->module_id,
959 					   source->instance_id, sink->module_id,
960 					   sink->instance_id, binding->sink_pin,
961 					   binding->source_pin);
962 			if (ret) {
963 				dev_err(adev->dev, "bind path failed: %d\n", ret);
964 				return AVS_IPC_RET(ret);
965 			}
966 		}
967 	}
968 
969 	return 0;
970 }
971 
avs_path_unbind(struct avs_path * path)972 int avs_path_unbind(struct avs_path *path)
973 {
974 	struct avs_path_pipeline *ppl;
975 	struct avs_dev *adev = path->owner;
976 	int ret;
977 
978 	list_for_each_entry(ppl, &path->ppl_list, node) {
979 		struct avs_path_binding *binding;
980 
981 		list_for_each_entry(binding, &ppl->binding_list, node) {
982 			struct avs_path_module *source, *sink;
983 
984 			source = binding->source;
985 			sink = binding->sink;
986 
987 			ret = avs_ipc_unbind(adev, source->module_id,
988 					     source->instance_id, sink->module_id,
989 					     sink->instance_id, binding->sink_pin,
990 					     binding->source_pin);
991 			if (ret) {
992 				dev_err(adev->dev, "unbind path failed: %d\n", ret);
993 				return AVS_IPC_RET(ret);
994 			}
995 		}
996 	}
997 
998 	return 0;
999 }
1000 
avs_path_reset(struct avs_path * path)1001 int avs_path_reset(struct avs_path *path)
1002 {
1003 	struct avs_path_pipeline *ppl;
1004 	struct avs_dev *adev = path->owner;
1005 	int ret;
1006 
1007 	if (path->state == AVS_PPL_STATE_RESET)
1008 		return 0;
1009 
1010 	list_for_each_entry(ppl, &path->ppl_list, node) {
1011 		ret = avs_ipc_set_pipeline_state(adev, ppl->instance_id,
1012 						 AVS_PPL_STATE_RESET);
1013 		if (ret) {
1014 			dev_err(adev->dev, "reset path failed: %d\n", ret);
1015 			path->state = AVS_PPL_STATE_INVALID;
1016 			return AVS_IPC_RET(ret);
1017 		}
1018 	}
1019 
1020 	path->state = AVS_PPL_STATE_RESET;
1021 	return 0;
1022 }
1023 
avs_path_pause(struct avs_path * path)1024 int avs_path_pause(struct avs_path *path)
1025 {
1026 	struct avs_path_pipeline *ppl;
1027 	struct avs_dev *adev = path->owner;
1028 	int ret;
1029 
1030 	if (path->state == AVS_PPL_STATE_PAUSED)
1031 		return 0;
1032 
1033 	list_for_each_entry_reverse(ppl, &path->ppl_list, node) {
1034 		ret = avs_ipc_set_pipeline_state(adev, ppl->instance_id,
1035 						 AVS_PPL_STATE_PAUSED);
1036 		if (ret) {
1037 			dev_err(adev->dev, "pause path failed: %d\n", ret);
1038 			path->state = AVS_PPL_STATE_INVALID;
1039 			return AVS_IPC_RET(ret);
1040 		}
1041 	}
1042 
1043 	path->state = AVS_PPL_STATE_PAUSED;
1044 	return 0;
1045 }
1046 
avs_path_run(struct avs_path * path,int trigger)1047 int avs_path_run(struct avs_path *path, int trigger)
1048 {
1049 	struct avs_path_pipeline *ppl;
1050 	struct avs_dev *adev = path->owner;
1051 	int ret;
1052 
1053 	if (path->state == AVS_PPL_STATE_RUNNING && trigger == AVS_TPLG_TRIGGER_AUTO)
1054 		return 0;
1055 
1056 	list_for_each_entry(ppl, &path->ppl_list, node) {
1057 		if (ppl->template->cfg->trigger != trigger)
1058 			continue;
1059 
1060 		ret = avs_ipc_set_pipeline_state(adev, ppl->instance_id,
1061 						 AVS_PPL_STATE_RUNNING);
1062 		if (ret) {
1063 			dev_err(adev->dev, "run path failed: %d\n", ret);
1064 			path->state = AVS_PPL_STATE_INVALID;
1065 			return AVS_IPC_RET(ret);
1066 		}
1067 	}
1068 
1069 	path->state = AVS_PPL_STATE_RUNNING;
1070 	return 0;
1071 }
1072