1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2010-2011,2013-2015 The Linux Foundation. All rights reserved.
4 *
5 * lpass-cpu.c -- ALSA SoC CPU DAI driver for QTi LPASS
6 */
7
8 #include <linux/clk.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/of.h>
12 #include <linux/of_device.h>
13 #include <linux/platform_device.h>
14 #include <sound/pcm.h>
15 #include <sound/pcm_params.h>
16 #include <linux/regmap.h>
17 #include <sound/soc.h>
18 #include <sound/soc-dai.h>
19 #include "lpass-lpaif-reg.h"
20 #include "lpass.h"
21
22 #define LPASS_CPU_MAX_MI2S_LINES 4
23 #define LPASS_CPU_I2S_SD0_MASK BIT(0)
24 #define LPASS_CPU_I2S_SD1_MASK BIT(1)
25 #define LPASS_CPU_I2S_SD2_MASK BIT(2)
26 #define LPASS_CPU_I2S_SD3_MASK BIT(3)
27 #define LPASS_CPU_I2S_SD0_1_MASK GENMASK(1, 0)
28 #define LPASS_CPU_I2S_SD2_3_MASK GENMASK(3, 2)
29 #define LPASS_CPU_I2S_SD0_1_2_MASK GENMASK(2, 0)
30 #define LPASS_CPU_I2S_SD0_1_2_3_MASK GENMASK(3, 0)
31 #define LPASS_REG_READ 1
32 #define LPASS_REG_WRITE 0
33
34 /*
35 * Channel maps for Quad channel playbacks on MI2S Secondary
36 */
37 static struct snd_pcm_chmap_elem lpass_quad_chmaps[] = {
38 { .channels = 4,
39 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_RL,
40 SNDRV_CHMAP_FR, SNDRV_CHMAP_RR } },
41 { }
42 };
lpass_cpu_init_i2sctl_bitfields(struct device * dev,struct lpaif_i2sctl * i2sctl,struct regmap * map)43 static int lpass_cpu_init_i2sctl_bitfields(struct device *dev,
44 struct lpaif_i2sctl *i2sctl, struct regmap *map)
45 {
46 struct lpass_data *drvdata = dev_get_drvdata(dev);
47 struct lpass_variant *v = drvdata->variant;
48
49 i2sctl->loopback = devm_regmap_field_alloc(dev, map, v->loopback);
50 i2sctl->spken = devm_regmap_field_alloc(dev, map, v->spken);
51 i2sctl->spkmode = devm_regmap_field_alloc(dev, map, v->spkmode);
52 i2sctl->spkmono = devm_regmap_field_alloc(dev, map, v->spkmono);
53 i2sctl->micen = devm_regmap_field_alloc(dev, map, v->micen);
54 i2sctl->micmode = devm_regmap_field_alloc(dev, map, v->micmode);
55 i2sctl->micmono = devm_regmap_field_alloc(dev, map, v->micmono);
56 i2sctl->wssrc = devm_regmap_field_alloc(dev, map, v->wssrc);
57 i2sctl->bitwidth = devm_regmap_field_alloc(dev, map, v->bitwidth);
58
59 if (IS_ERR(i2sctl->loopback) || IS_ERR(i2sctl->spken) ||
60 IS_ERR(i2sctl->spkmode) || IS_ERR(i2sctl->spkmono) ||
61 IS_ERR(i2sctl->micen) || IS_ERR(i2sctl->micmode) ||
62 IS_ERR(i2sctl->micmono) || IS_ERR(i2sctl->wssrc) ||
63 IS_ERR(i2sctl->bitwidth))
64 return -EINVAL;
65
66 return 0;
67 }
68
lpass_cpu_daiops_set_sysclk(struct snd_soc_dai * dai,int clk_id,unsigned int freq,int dir)69 static int lpass_cpu_daiops_set_sysclk(struct snd_soc_dai *dai, int clk_id,
70 unsigned int freq, int dir)
71 {
72 struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
73 int ret;
74
75 ret = clk_set_rate(drvdata->mi2s_osr_clk[dai->driver->id], freq);
76 if (ret)
77 dev_err(dai->dev, "error setting mi2s osrclk to %u: %d\n",
78 freq, ret);
79
80 return ret;
81 }
82
lpass_cpu_daiops_startup(struct snd_pcm_substream * substream,struct snd_soc_dai * dai)83 static int lpass_cpu_daiops_startup(struct snd_pcm_substream *substream,
84 struct snd_soc_dai *dai)
85 {
86 struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
87 int ret;
88
89 ret = clk_prepare_enable(drvdata->mi2s_osr_clk[dai->driver->id]);
90 if (ret) {
91 dev_err(dai->dev, "error in enabling mi2s osr clk: %d\n", ret);
92 return ret;
93 }
94 ret = clk_prepare(drvdata->mi2s_bit_clk[dai->driver->id]);
95 if (ret) {
96 dev_err(dai->dev, "error in enabling mi2s bit clk: %d\n", ret);
97 clk_disable_unprepare(drvdata->mi2s_osr_clk[dai->driver->id]);
98 return ret;
99 }
100 return 0;
101 }
102
lpass_cpu_daiops_shutdown(struct snd_pcm_substream * substream,struct snd_soc_dai * dai)103 static void lpass_cpu_daiops_shutdown(struct snd_pcm_substream *substream,
104 struct snd_soc_dai *dai)
105 {
106 struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
107 struct lpaif_i2sctl *i2sctl = drvdata->i2sctl;
108 unsigned int id = dai->driver->id;
109
110 clk_disable_unprepare(drvdata->mi2s_osr_clk[dai->driver->id]);
111 /*
112 * Ensure LRCLK is disabled even in device node validation.
113 * Will not impact if disabled in lpass_cpu_daiops_trigger()
114 * suspend.
115 */
116 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
117 regmap_fields_write(i2sctl->spken, id, LPAIF_I2SCTL_SPKEN_DISABLE);
118 else
119 regmap_fields_write(i2sctl->micen, id, LPAIF_I2SCTL_MICEN_DISABLE);
120
121 /*
122 * BCLK may not be enabled if lpass_cpu_daiops_prepare is called before
123 * lpass_cpu_daiops_shutdown. It's paired with the clk_enable in
124 * lpass_cpu_daiops_prepare.
125 */
126 if (drvdata->mi2s_was_prepared[dai->driver->id]) {
127 drvdata->mi2s_was_prepared[dai->driver->id] = false;
128 clk_disable(drvdata->mi2s_bit_clk[dai->driver->id]);
129 }
130
131 clk_unprepare(drvdata->mi2s_bit_clk[dai->driver->id]);
132 }
133
lpass_cpu_daiops_hw_params(struct snd_pcm_substream * substream,struct snd_pcm_hw_params * params,struct snd_soc_dai * dai)134 static int lpass_cpu_daiops_hw_params(struct snd_pcm_substream *substream,
135 struct snd_pcm_hw_params *params, struct snd_soc_dai *dai)
136 {
137 struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
138 struct lpaif_i2sctl *i2sctl = drvdata->i2sctl;
139 unsigned int id = dai->driver->id;
140 snd_pcm_format_t format = params_format(params);
141 unsigned int channels = params_channels(params);
142 unsigned int rate = params_rate(params);
143 unsigned int mode;
144 unsigned int regval;
145 int bitwidth, ret;
146
147 bitwidth = snd_pcm_format_width(format);
148 if (bitwidth < 0) {
149 dev_err(dai->dev, "invalid bit width given: %d\n", bitwidth);
150 return bitwidth;
151 }
152
153 ret = regmap_fields_write(i2sctl->loopback, id,
154 LPAIF_I2SCTL_LOOPBACK_DISABLE);
155 if (ret) {
156 dev_err(dai->dev, "error updating loopback field: %d\n", ret);
157 return ret;
158 }
159
160 ret = regmap_fields_write(i2sctl->wssrc, id,
161 LPAIF_I2SCTL_WSSRC_INTERNAL);
162 if (ret) {
163 dev_err(dai->dev, "error updating wssrc field: %d\n", ret);
164 return ret;
165 }
166
167 switch (bitwidth) {
168 case 16:
169 regval = LPAIF_I2SCTL_BITWIDTH_16;
170 break;
171 case 24:
172 regval = LPAIF_I2SCTL_BITWIDTH_24;
173 break;
174 case 32:
175 regval = LPAIF_I2SCTL_BITWIDTH_32;
176 break;
177 default:
178 dev_err(dai->dev, "invalid bitwidth given: %d\n", bitwidth);
179 return -EINVAL;
180 }
181
182 ret = regmap_fields_write(i2sctl->bitwidth, id, regval);
183 if (ret) {
184 dev_err(dai->dev, "error updating bitwidth field: %d\n", ret);
185 return ret;
186 }
187
188 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
189 mode = drvdata->mi2s_playback_sd_mode[id];
190 else
191 mode = drvdata->mi2s_capture_sd_mode[id];
192
193 if (!mode) {
194 dev_err(dai->dev, "no line is assigned\n");
195 return -EINVAL;
196 }
197
198 switch (channels) {
199 case 1:
200 case 2:
201 switch (mode) {
202 case LPAIF_I2SCTL_MODE_QUAD01:
203 case LPAIF_I2SCTL_MODE_6CH:
204 case LPAIF_I2SCTL_MODE_8CH:
205 mode = LPAIF_I2SCTL_MODE_SD0;
206 break;
207 case LPAIF_I2SCTL_MODE_QUAD23:
208 mode = LPAIF_I2SCTL_MODE_SD2;
209 break;
210 }
211
212 break;
213 case 4:
214 if (mode < LPAIF_I2SCTL_MODE_QUAD01) {
215 dev_err(dai->dev, "cannot configure 4 channels with mode %d\n",
216 mode);
217 return -EINVAL;
218 }
219
220 switch (mode) {
221 case LPAIF_I2SCTL_MODE_6CH:
222 case LPAIF_I2SCTL_MODE_8CH:
223 mode = LPAIF_I2SCTL_MODE_QUAD01;
224 break;
225 }
226 break;
227 case 6:
228 if (mode < LPAIF_I2SCTL_MODE_6CH) {
229 dev_err(dai->dev, "cannot configure 6 channels with mode %d\n",
230 mode);
231 return -EINVAL;
232 }
233
234 switch (mode) {
235 case LPAIF_I2SCTL_MODE_8CH:
236 mode = LPAIF_I2SCTL_MODE_6CH;
237 break;
238 }
239 break;
240 case 8:
241 if (mode < LPAIF_I2SCTL_MODE_8CH) {
242 dev_err(dai->dev, "cannot configure 8 channels with mode %d\n",
243 mode);
244 return -EINVAL;
245 }
246 break;
247 default:
248 dev_err(dai->dev, "invalid channels given: %u\n", channels);
249 return -EINVAL;
250 }
251
252 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
253 ret = regmap_fields_write(i2sctl->spkmode, id,
254 LPAIF_I2SCTL_SPKMODE(mode));
255 if (ret) {
256 dev_err(dai->dev, "error writing to i2sctl spkr mode: %d\n",
257 ret);
258 return ret;
259 }
260 if (channels >= 2)
261 ret = regmap_fields_write(i2sctl->spkmono, id,
262 LPAIF_I2SCTL_SPKMONO_STEREO);
263 else
264 ret = regmap_fields_write(i2sctl->spkmono, id,
265 LPAIF_I2SCTL_SPKMONO_MONO);
266 } else {
267 ret = regmap_fields_write(i2sctl->micmode, id,
268 LPAIF_I2SCTL_MICMODE(mode));
269 if (ret) {
270 dev_err(dai->dev, "error writing to i2sctl mic mode: %d\n",
271 ret);
272 return ret;
273 }
274 if (channels >= 2)
275 ret = regmap_fields_write(i2sctl->micmono, id,
276 LPAIF_I2SCTL_MICMONO_STEREO);
277 else
278 ret = regmap_fields_write(i2sctl->micmono, id,
279 LPAIF_I2SCTL_MICMONO_MONO);
280 }
281
282 if (ret) {
283 dev_err(dai->dev, "error writing to i2sctl channels mode: %d\n",
284 ret);
285 return ret;
286 }
287
288 ret = clk_set_rate(drvdata->mi2s_bit_clk[id],
289 rate * bitwidth * 2);
290 if (ret) {
291 dev_err(dai->dev, "error setting mi2s bitclk to %u: %d\n",
292 rate * bitwidth * 2, ret);
293 return ret;
294 }
295
296 return 0;
297 }
298
lpass_cpu_daiops_trigger(struct snd_pcm_substream * substream,int cmd,struct snd_soc_dai * dai)299 static int lpass_cpu_daiops_trigger(struct snd_pcm_substream *substream,
300 int cmd, struct snd_soc_dai *dai)
301 {
302 struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
303 struct lpaif_i2sctl *i2sctl = drvdata->i2sctl;
304 unsigned int id = dai->driver->id;
305 int ret = -EINVAL;
306
307 switch (cmd) {
308 case SNDRV_PCM_TRIGGER_START:
309 case SNDRV_PCM_TRIGGER_RESUME:
310 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
311 /*
312 * Ensure lpass BCLK/LRCLK is enabled during
313 * device resume as lpass_cpu_daiops_prepare() is not called
314 * after the device resumes. We don't check mi2s_was_prepared before
315 * enable/disable BCLK in trigger events because:
316 * 1. These trigger events are paired, so the BCLK
317 * enable_count is balanced.
318 * 2. the BCLK can be shared (ex: headset and headset mic),
319 * we need to increase the enable_count so that we don't
320 * turn off the shared BCLK while other devices are using
321 * it.
322 */
323 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
324 ret = regmap_fields_write(i2sctl->spken, id,
325 LPAIF_I2SCTL_SPKEN_ENABLE);
326 } else {
327 ret = regmap_fields_write(i2sctl->micen, id,
328 LPAIF_I2SCTL_MICEN_ENABLE);
329 }
330 if (ret)
331 dev_err(dai->dev, "error writing to i2sctl reg: %d\n",
332 ret);
333
334 ret = clk_enable(drvdata->mi2s_bit_clk[id]);
335 if (ret) {
336 dev_err(dai->dev, "error in enabling mi2s bit clk: %d\n", ret);
337 clk_disable(drvdata->mi2s_osr_clk[id]);
338 return ret;
339 }
340 break;
341 case SNDRV_PCM_TRIGGER_STOP:
342 case SNDRV_PCM_TRIGGER_SUSPEND:
343 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
344 /*
345 * To ensure lpass BCLK/LRCLK is disabled during
346 * device suspend.
347 */
348 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
349 ret = regmap_fields_write(i2sctl->spken, id,
350 LPAIF_I2SCTL_SPKEN_DISABLE);
351 } else {
352 ret = regmap_fields_write(i2sctl->micen, id,
353 LPAIF_I2SCTL_MICEN_DISABLE);
354 }
355 if (ret)
356 dev_err(dai->dev, "error writing to i2sctl reg: %d\n",
357 ret);
358
359 clk_disable(drvdata->mi2s_bit_clk[dai->driver->id]);
360
361 break;
362 }
363
364 return ret;
365 }
366
lpass_cpu_daiops_prepare(struct snd_pcm_substream * substream,struct snd_soc_dai * dai)367 static int lpass_cpu_daiops_prepare(struct snd_pcm_substream *substream,
368 struct snd_soc_dai *dai)
369 {
370 struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
371 struct lpaif_i2sctl *i2sctl = drvdata->i2sctl;
372 unsigned int id = dai->driver->id;
373 int ret;
374
375 /*
376 * Ensure lpass BCLK/LRCLK is enabled bit before playback/capture
377 * data flow starts. This allows other codec to have some delay before
378 * the data flow.
379 * (ex: to drop start up pop noise before capture starts).
380 */
381 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
382 ret = regmap_fields_write(i2sctl->spken, id, LPAIF_I2SCTL_SPKEN_ENABLE);
383 else
384 ret = regmap_fields_write(i2sctl->micen, id, LPAIF_I2SCTL_MICEN_ENABLE);
385
386 if (ret) {
387 dev_err(dai->dev, "error writing to i2sctl reg: %d\n", ret);
388 return ret;
389 }
390
391 /*
392 * Check mi2s_was_prepared before enabling BCLK as lpass_cpu_daiops_prepare can
393 * be called multiple times. It's paired with the clk_disable in
394 * lpass_cpu_daiops_shutdown.
395 */
396 if (!drvdata->mi2s_was_prepared[dai->driver->id]) {
397 ret = clk_enable(drvdata->mi2s_bit_clk[id]);
398 if (ret) {
399 dev_err(dai->dev, "error in enabling mi2s bit clk: %d\n", ret);
400 return ret;
401 }
402 drvdata->mi2s_was_prepared[dai->driver->id] = true;
403 }
404 return 0;
405 }
406
407 const struct snd_soc_dai_ops asoc_qcom_lpass_cpu_dai_ops = {
408 .set_sysclk = lpass_cpu_daiops_set_sysclk,
409 .startup = lpass_cpu_daiops_startup,
410 .shutdown = lpass_cpu_daiops_shutdown,
411 .hw_params = lpass_cpu_daiops_hw_params,
412 .trigger = lpass_cpu_daiops_trigger,
413 .prepare = lpass_cpu_daiops_prepare,
414 };
415 EXPORT_SYMBOL_GPL(asoc_qcom_lpass_cpu_dai_ops);
416
lpass_cpu_pcm_new(struct snd_soc_pcm_runtime * rtd,struct snd_soc_dai * dai)417 int lpass_cpu_pcm_new(struct snd_soc_pcm_runtime *rtd,
418 struct snd_soc_dai *dai)
419 {
420 int ret;
421 struct snd_soc_dai_driver *drv = dai->driver;
422 struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
423
424 if (drvdata->mi2s_playback_sd_mode[dai->id] == LPAIF_I2SCTL_MODE_QUAD01) {
425 ret = snd_pcm_add_chmap_ctls(rtd->pcm, SNDRV_PCM_STREAM_PLAYBACK,
426 lpass_quad_chmaps, drv->playback.channels_max, 0,
427 NULL);
428 if (ret < 0)
429 return ret;
430 }
431
432 return 0;
433 }
434 EXPORT_SYMBOL_GPL(lpass_cpu_pcm_new);
435
asoc_qcom_lpass_cpu_dai_probe(struct snd_soc_dai * dai)436 int asoc_qcom_lpass_cpu_dai_probe(struct snd_soc_dai *dai)
437 {
438 struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
439 int ret;
440
441 /* ensure audio hardware is disabled */
442 ret = regmap_write(drvdata->lpaif_map,
443 LPAIF_I2SCTL_REG(drvdata->variant, dai->driver->id), 0);
444 if (ret)
445 dev_err(dai->dev, "error writing to i2sctl reg: %d\n", ret);
446
447 return ret;
448 }
449 EXPORT_SYMBOL_GPL(asoc_qcom_lpass_cpu_dai_probe);
450
asoc_qcom_of_xlate_dai_name(struct snd_soc_component * component,const struct of_phandle_args * args,const char ** dai_name)451 static int asoc_qcom_of_xlate_dai_name(struct snd_soc_component *component,
452 const struct of_phandle_args *args,
453 const char **dai_name)
454 {
455 struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
456 struct lpass_variant *variant = drvdata->variant;
457 int id = args->args[0];
458 int ret = -EINVAL;
459 int i;
460
461 for (i = 0; i < variant->num_dai; i++) {
462 if (variant->dai_driver[i].id == id) {
463 *dai_name = variant->dai_driver[i].name;
464 ret = 0;
465 break;
466 }
467 }
468
469 return ret;
470 }
471
472 static const struct snd_soc_component_driver lpass_cpu_comp_driver = {
473 .name = "lpass-cpu",
474 .of_xlate_dai_name = asoc_qcom_of_xlate_dai_name,
475 };
476
lpass_cpu_regmap_writeable(struct device * dev,unsigned int reg)477 static bool lpass_cpu_regmap_writeable(struct device *dev, unsigned int reg)
478 {
479 struct lpass_data *drvdata = dev_get_drvdata(dev);
480 struct lpass_variant *v = drvdata->variant;
481 int i;
482
483 for (i = 0; i < v->i2s_ports; ++i)
484 if (reg == LPAIF_I2SCTL_REG(v, i))
485 return true;
486
487 for (i = 0; i < v->irq_ports; ++i) {
488 if (reg == LPAIF_IRQEN_REG(v, i))
489 return true;
490 if (reg == LPAIF_IRQCLEAR_REG(v, i))
491 return true;
492 }
493
494 for (i = 0; i < v->rdma_channels; ++i) {
495 if (reg == LPAIF_RDMACTL_REG(v, i))
496 return true;
497 if (reg == LPAIF_RDMABASE_REG(v, i))
498 return true;
499 if (reg == LPAIF_RDMABUFF_REG(v, i))
500 return true;
501 if (reg == LPAIF_RDMAPER_REG(v, i))
502 return true;
503 }
504
505 for (i = 0; i < v->wrdma_channels; ++i) {
506 if (reg == LPAIF_WRDMACTL_REG(v, i + v->wrdma_channel_start))
507 return true;
508 if (reg == LPAIF_WRDMABASE_REG(v, i + v->wrdma_channel_start))
509 return true;
510 if (reg == LPAIF_WRDMABUFF_REG(v, i + v->wrdma_channel_start))
511 return true;
512 if (reg == LPAIF_WRDMAPER_REG(v, i + v->wrdma_channel_start))
513 return true;
514 }
515
516 return false;
517 }
518
lpass_cpu_regmap_readable(struct device * dev,unsigned int reg)519 static bool lpass_cpu_regmap_readable(struct device *dev, unsigned int reg)
520 {
521 struct lpass_data *drvdata = dev_get_drvdata(dev);
522 struct lpass_variant *v = drvdata->variant;
523 int i;
524
525 for (i = 0; i < v->i2s_ports; ++i)
526 if (reg == LPAIF_I2SCTL_REG(v, i))
527 return true;
528
529 for (i = 0; i < v->irq_ports; ++i) {
530 if (reg == LPAIF_IRQCLEAR_REG(v, i))
531 return true;
532 if (reg == LPAIF_IRQEN_REG(v, i))
533 return true;
534 if (reg == LPAIF_IRQSTAT_REG(v, i))
535 return true;
536 }
537
538 for (i = 0; i < v->rdma_channels; ++i) {
539 if (reg == LPAIF_RDMACTL_REG(v, i))
540 return true;
541 if (reg == LPAIF_RDMABASE_REG(v, i))
542 return true;
543 if (reg == LPAIF_RDMABUFF_REG(v, i))
544 return true;
545 if (reg == LPAIF_RDMACURR_REG(v, i))
546 return true;
547 if (reg == LPAIF_RDMAPER_REG(v, i))
548 return true;
549 }
550
551 for (i = 0; i < v->wrdma_channels; ++i) {
552 if (reg == LPAIF_WRDMACTL_REG(v, i + v->wrdma_channel_start))
553 return true;
554 if (reg == LPAIF_WRDMABASE_REG(v, i + v->wrdma_channel_start))
555 return true;
556 if (reg == LPAIF_WRDMABUFF_REG(v, i + v->wrdma_channel_start))
557 return true;
558 if (reg == LPAIF_WRDMACURR_REG(v, i + v->wrdma_channel_start))
559 return true;
560 if (reg == LPAIF_WRDMAPER_REG(v, i + v->wrdma_channel_start))
561 return true;
562 }
563
564 return false;
565 }
566
lpass_cpu_regmap_volatile(struct device * dev,unsigned int reg)567 static bool lpass_cpu_regmap_volatile(struct device *dev, unsigned int reg)
568 {
569 struct lpass_data *drvdata = dev_get_drvdata(dev);
570 struct lpass_variant *v = drvdata->variant;
571 int i;
572
573 for (i = 0; i < v->irq_ports; ++i) {
574 if (reg == LPAIF_IRQCLEAR_REG(v, i))
575 return true;
576 if (reg == LPAIF_IRQSTAT_REG(v, i))
577 return true;
578 }
579
580 for (i = 0; i < v->rdma_channels; ++i)
581 if (reg == LPAIF_RDMACURR_REG(v, i))
582 return true;
583
584 for (i = 0; i < v->wrdma_channels; ++i)
585 if (reg == LPAIF_WRDMACURR_REG(v, i + v->wrdma_channel_start))
586 return true;
587
588 return false;
589 }
590
591 static struct regmap_config lpass_cpu_regmap_config = {
592 .name = "lpass_cpu",
593 .reg_bits = 32,
594 .reg_stride = 4,
595 .val_bits = 32,
596 .writeable_reg = lpass_cpu_regmap_writeable,
597 .readable_reg = lpass_cpu_regmap_readable,
598 .volatile_reg = lpass_cpu_regmap_volatile,
599 .cache_type = REGCACHE_FLAT,
600 };
601
lpass_hdmi_init_bitfields(struct device * dev,struct regmap * map)602 static int lpass_hdmi_init_bitfields(struct device *dev, struct regmap *map)
603 {
604 struct lpass_data *drvdata = dev_get_drvdata(dev);
605 struct lpass_variant *v = drvdata->variant;
606 unsigned int i;
607 struct lpass_hdmi_tx_ctl *tx_ctl;
608 struct regmap_field *legacy_en;
609 struct lpass_vbit_ctrl *vbit_ctl;
610 struct regmap_field *tx_parity;
611 struct lpass_dp_metadata_ctl *meta_ctl;
612 struct lpass_sstream_ctl *sstream_ctl;
613 struct regmap_field *ch_msb;
614 struct regmap_field *ch_lsb;
615 struct lpass_hdmitx_dmactl *tx_dmactl;
616 int rval;
617
618 tx_ctl = devm_kzalloc(dev, sizeof(*tx_ctl), GFP_KERNEL);
619 if (!tx_ctl)
620 return -ENOMEM;
621
622 QCOM_REGMAP_FIELD_ALLOC(dev, map, v->soft_reset, tx_ctl->soft_reset);
623 QCOM_REGMAP_FIELD_ALLOC(dev, map, v->force_reset, tx_ctl->force_reset);
624 drvdata->tx_ctl = tx_ctl;
625
626 QCOM_REGMAP_FIELD_ALLOC(dev, map, v->legacy_en, legacy_en);
627 drvdata->hdmitx_legacy_en = legacy_en;
628
629 vbit_ctl = devm_kzalloc(dev, sizeof(*vbit_ctl), GFP_KERNEL);
630 if (!vbit_ctl)
631 return -ENOMEM;
632
633 QCOM_REGMAP_FIELD_ALLOC(dev, map, v->replace_vbit, vbit_ctl->replace_vbit);
634 QCOM_REGMAP_FIELD_ALLOC(dev, map, v->vbit_stream, vbit_ctl->vbit_stream);
635 drvdata->vbit_ctl = vbit_ctl;
636
637
638 QCOM_REGMAP_FIELD_ALLOC(dev, map, v->calc_en, tx_parity);
639 drvdata->hdmitx_parity_calc_en = tx_parity;
640
641 meta_ctl = devm_kzalloc(dev, sizeof(*meta_ctl), GFP_KERNEL);
642 if (!meta_ctl)
643 return -ENOMEM;
644
645 rval = devm_regmap_field_bulk_alloc(dev, map, &meta_ctl->mute, &v->mute, 7);
646 if (rval)
647 return rval;
648 drvdata->meta_ctl = meta_ctl;
649
650 sstream_ctl = devm_kzalloc(dev, sizeof(*sstream_ctl), GFP_KERNEL);
651 if (!sstream_ctl)
652 return -ENOMEM;
653
654 rval = devm_regmap_field_bulk_alloc(dev, map, &sstream_ctl->sstream_en, &v->sstream_en, 9);
655 if (rval)
656 return rval;
657
658 drvdata->sstream_ctl = sstream_ctl;
659
660 for (i = 0; i < LPASS_MAX_HDMI_DMA_CHANNELS; i++) {
661 QCOM_REGMAP_FIELD_ALLOC(dev, map, v->msb_bits, ch_msb);
662 drvdata->hdmitx_ch_msb[i] = ch_msb;
663
664 QCOM_REGMAP_FIELD_ALLOC(dev, map, v->lsb_bits, ch_lsb);
665 drvdata->hdmitx_ch_lsb[i] = ch_lsb;
666
667 tx_dmactl = devm_kzalloc(dev, sizeof(*tx_dmactl), GFP_KERNEL);
668 if (!tx_dmactl)
669 return -ENOMEM;
670
671 QCOM_REGMAP_FIELD_ALLOC(dev, map, v->use_hw_chs, tx_dmactl->use_hw_chs);
672 QCOM_REGMAP_FIELD_ALLOC(dev, map, v->use_hw_usr, tx_dmactl->use_hw_usr);
673 QCOM_REGMAP_FIELD_ALLOC(dev, map, v->hw_chs_sel, tx_dmactl->hw_chs_sel);
674 QCOM_REGMAP_FIELD_ALLOC(dev, map, v->hw_usr_sel, tx_dmactl->hw_usr_sel);
675 drvdata->hdmi_tx_dmactl[i] = tx_dmactl;
676 }
677 return 0;
678 }
679
lpass_hdmi_regmap_writeable(struct device * dev,unsigned int reg)680 static bool lpass_hdmi_regmap_writeable(struct device *dev, unsigned int reg)
681 {
682 struct lpass_data *drvdata = dev_get_drvdata(dev);
683 struct lpass_variant *v = drvdata->variant;
684 int i;
685
686 if (reg == LPASS_HDMI_TX_CTL_ADDR(v))
687 return true;
688 if (reg == LPASS_HDMI_TX_LEGACY_ADDR(v))
689 return true;
690 if (reg == LPASS_HDMI_TX_VBIT_CTL_ADDR(v))
691 return true;
692 if (reg == LPASS_HDMI_TX_PARITY_ADDR(v))
693 return true;
694 if (reg == LPASS_HDMI_TX_DP_ADDR(v))
695 return true;
696 if (reg == LPASS_HDMI_TX_SSTREAM_ADDR(v))
697 return true;
698 if (reg == LPASS_HDMITX_APP_IRQEN_REG(v))
699 return true;
700 if (reg == LPASS_HDMITX_APP_IRQCLEAR_REG(v))
701 return true;
702
703 for (i = 0; i < v->hdmi_rdma_channels; i++) {
704 if (reg == LPASS_HDMI_TX_CH_LSB_ADDR(v, i))
705 return true;
706 if (reg == LPASS_HDMI_TX_CH_MSB_ADDR(v, i))
707 return true;
708 if (reg == LPASS_HDMI_TX_DMA_ADDR(v, i))
709 return true;
710 }
711
712 for (i = 0; i < v->hdmi_rdma_channels; ++i) {
713 if (reg == LPAIF_HDMI_RDMACTL_REG(v, i))
714 return true;
715 if (reg == LPAIF_HDMI_RDMABASE_REG(v, i))
716 return true;
717 if (reg == LPAIF_HDMI_RDMABUFF_REG(v, i))
718 return true;
719 if (reg == LPAIF_HDMI_RDMAPER_REG(v, i))
720 return true;
721 }
722 return false;
723 }
724
lpass_hdmi_regmap_readable(struct device * dev,unsigned int reg)725 static bool lpass_hdmi_regmap_readable(struct device *dev, unsigned int reg)
726 {
727 struct lpass_data *drvdata = dev_get_drvdata(dev);
728 struct lpass_variant *v = drvdata->variant;
729 int i;
730
731 if (reg == LPASS_HDMI_TX_CTL_ADDR(v))
732 return true;
733 if (reg == LPASS_HDMI_TX_LEGACY_ADDR(v))
734 return true;
735 if (reg == LPASS_HDMI_TX_VBIT_CTL_ADDR(v))
736 return true;
737
738 for (i = 0; i < v->hdmi_rdma_channels; i++) {
739 if (reg == LPASS_HDMI_TX_CH_LSB_ADDR(v, i))
740 return true;
741 if (reg == LPASS_HDMI_TX_CH_MSB_ADDR(v, i))
742 return true;
743 if (reg == LPASS_HDMI_TX_DMA_ADDR(v, i))
744 return true;
745 }
746
747 if (reg == LPASS_HDMI_TX_PARITY_ADDR(v))
748 return true;
749 if (reg == LPASS_HDMI_TX_DP_ADDR(v))
750 return true;
751 if (reg == LPASS_HDMI_TX_SSTREAM_ADDR(v))
752 return true;
753 if (reg == LPASS_HDMITX_APP_IRQEN_REG(v))
754 return true;
755 if (reg == LPASS_HDMITX_APP_IRQSTAT_REG(v))
756 return true;
757
758 for (i = 0; i < v->hdmi_rdma_channels; ++i) {
759 if (reg == LPAIF_HDMI_RDMACTL_REG(v, i))
760 return true;
761 if (reg == LPAIF_HDMI_RDMABASE_REG(v, i))
762 return true;
763 if (reg == LPAIF_HDMI_RDMABUFF_REG(v, i))
764 return true;
765 if (reg == LPAIF_HDMI_RDMAPER_REG(v, i))
766 return true;
767 if (reg == LPAIF_HDMI_RDMACURR_REG(v, i))
768 return true;
769 }
770
771 return false;
772 }
773
lpass_hdmi_regmap_volatile(struct device * dev,unsigned int reg)774 static bool lpass_hdmi_regmap_volatile(struct device *dev, unsigned int reg)
775 {
776 struct lpass_data *drvdata = dev_get_drvdata(dev);
777 struct lpass_variant *v = drvdata->variant;
778 int i;
779
780 if (reg == LPASS_HDMITX_APP_IRQSTAT_REG(v))
781 return true;
782 if (reg == LPASS_HDMI_TX_LEGACY_ADDR(v))
783 return true;
784
785 for (i = 0; i < v->hdmi_rdma_channels; ++i) {
786 if (reg == LPAIF_HDMI_RDMACURR_REG(v, i))
787 return true;
788 }
789 return false;
790 }
791
792 static struct regmap_config lpass_hdmi_regmap_config = {
793 .name = "lpass_hdmi",
794 .reg_bits = 32,
795 .reg_stride = 4,
796 .val_bits = 32,
797 .writeable_reg = lpass_hdmi_regmap_writeable,
798 .readable_reg = lpass_hdmi_regmap_readable,
799 .volatile_reg = lpass_hdmi_regmap_volatile,
800 .cache_type = REGCACHE_FLAT,
801 };
802
__lpass_rxtx_regmap_accessible(struct device * dev,unsigned int reg,bool rw)803 static bool __lpass_rxtx_regmap_accessible(struct device *dev, unsigned int reg, bool rw)
804 {
805 struct lpass_data *drvdata = dev_get_drvdata(dev);
806 struct lpass_variant *v = drvdata->variant;
807 int i;
808
809 for (i = 0; i < v->rxtx_irq_ports; ++i) {
810 if (reg == LPAIF_RXTX_IRQCLEAR_REG(v, i))
811 return true;
812 if (reg == LPAIF_RXTX_IRQEN_REG(v, i))
813 return true;
814 if (reg == LPAIF_RXTX_IRQSTAT_REG(v, i))
815 return true;
816 }
817
818 for (i = 0; i < v->rxtx_rdma_channels; ++i) {
819 if (reg == LPAIF_CDC_RXTX_RDMACTL_REG(v, i, LPASS_CDC_DMA_RX0))
820 return true;
821 if (reg == LPAIF_CDC_RXTX_RDMABASE_REG(v, i, LPASS_CDC_DMA_RX0))
822 return true;
823 if (reg == LPAIF_CDC_RXTX_RDMABUFF_REG(v, i, LPASS_CDC_DMA_RX0))
824 return true;
825 if (rw == LPASS_REG_READ) {
826 if (reg == LPAIF_CDC_RXTX_RDMACURR_REG(v, i, LPASS_CDC_DMA_RX0))
827 return true;
828 }
829 if (reg == LPAIF_CDC_RXTX_RDMAPER_REG(v, i, LPASS_CDC_DMA_RX0))
830 return true;
831 if (reg == LPAIF_CDC_RXTX_RDMA_INTF_REG(v, i, LPASS_CDC_DMA_RX0))
832 return true;
833 }
834
835 for (i = 0; i < v->rxtx_wrdma_channels; ++i) {
836 if (reg == LPAIF_CDC_RXTX_WRDMACTL_REG(v, i + v->rxtx_wrdma_channel_start,
837 LPASS_CDC_DMA_TX3))
838 return true;
839 if (reg == LPAIF_CDC_RXTX_WRDMABASE_REG(v, i + v->rxtx_wrdma_channel_start,
840 LPASS_CDC_DMA_TX3))
841 return true;
842 if (reg == LPAIF_CDC_RXTX_WRDMABUFF_REG(v, i + v->rxtx_wrdma_channel_start,
843 LPASS_CDC_DMA_TX3))
844 return true;
845 if (rw == LPASS_REG_READ) {
846 if (reg == LPAIF_CDC_RXTX_WRDMACURR_REG(v, i, LPASS_CDC_DMA_RX0))
847 return true;
848 }
849 if (reg == LPAIF_CDC_RXTX_WRDMAPER_REG(v, i + v->rxtx_wrdma_channel_start,
850 LPASS_CDC_DMA_TX3))
851 return true;
852 if (reg == LPAIF_CDC_RXTX_WRDMA_INTF_REG(v, i + v->rxtx_wrdma_channel_start,
853 LPASS_CDC_DMA_TX3))
854 return true;
855 }
856 return false;
857 }
858
lpass_rxtx_regmap_writeable(struct device * dev,unsigned int reg)859 static bool lpass_rxtx_regmap_writeable(struct device *dev, unsigned int reg)
860 {
861 return __lpass_rxtx_regmap_accessible(dev, reg, LPASS_REG_WRITE);
862 }
863
lpass_rxtx_regmap_readable(struct device * dev,unsigned int reg)864 static bool lpass_rxtx_regmap_readable(struct device *dev, unsigned int reg)
865 {
866 return __lpass_rxtx_regmap_accessible(dev, reg, LPASS_REG_READ);
867 }
868
lpass_rxtx_regmap_volatile(struct device * dev,unsigned int reg)869 static bool lpass_rxtx_regmap_volatile(struct device *dev, unsigned int reg)
870 {
871 struct lpass_data *drvdata = dev_get_drvdata(dev);
872 struct lpass_variant *v = drvdata->variant;
873 int i;
874
875 for (i = 0; i < v->rxtx_irq_ports; ++i) {
876 if (reg == LPAIF_RXTX_IRQCLEAR_REG(v, i))
877 return true;
878 if (reg == LPAIF_RXTX_IRQSTAT_REG(v, i))
879 return true;
880 }
881
882 for (i = 0; i < v->rxtx_rdma_channels; ++i)
883 if (reg == LPAIF_CDC_RXTX_RDMACURR_REG(v, i, LPASS_CDC_DMA_RX0))
884 return true;
885
886 for (i = 0; i < v->rxtx_wrdma_channels; ++i)
887 if (reg == LPAIF_CDC_RXTX_WRDMACURR_REG(v, i + v->rxtx_wrdma_channel_start,
888 LPASS_CDC_DMA_TX3))
889 return true;
890
891 return false;
892 }
893
__lpass_va_regmap_accessible(struct device * dev,unsigned int reg,bool rw)894 static bool __lpass_va_regmap_accessible(struct device *dev, unsigned int reg, bool rw)
895 {
896 struct lpass_data *drvdata = dev_get_drvdata(dev);
897 struct lpass_variant *v = drvdata->variant;
898 int i;
899
900 for (i = 0; i < v->va_irq_ports; ++i) {
901 if (reg == LPAIF_VA_IRQCLEAR_REG(v, i))
902 return true;
903 if (reg == LPAIF_VA_IRQEN_REG(v, i))
904 return true;
905 if (reg == LPAIF_VA_IRQSTAT_REG(v, i))
906 return true;
907 }
908
909 for (i = 0; i < v->va_wrdma_channels; ++i) {
910 if (reg == LPAIF_CDC_VA_WRDMACTL_REG(v, i + v->va_wrdma_channel_start,
911 LPASS_CDC_DMA_VA_TX0))
912 return true;
913 if (reg == LPAIF_CDC_VA_WRDMABASE_REG(v, i + v->va_wrdma_channel_start,
914 LPASS_CDC_DMA_VA_TX0))
915 return true;
916 if (reg == LPAIF_CDC_VA_WRDMABUFF_REG(v, i + v->va_wrdma_channel_start,
917 LPASS_CDC_DMA_VA_TX0))
918 return true;
919 if (rw == LPASS_REG_READ) {
920 if (reg == LPAIF_CDC_VA_WRDMACURR_REG(v, i + v->va_wrdma_channel_start,
921 LPASS_CDC_DMA_VA_TX0))
922 return true;
923 }
924 if (reg == LPAIF_CDC_VA_WRDMAPER_REG(v, i + v->va_wrdma_channel_start,
925 LPASS_CDC_DMA_VA_TX0))
926 return true;
927 if (reg == LPAIF_CDC_VA_WRDMA_INTF_REG(v, i + v->va_wrdma_channel_start,
928 LPASS_CDC_DMA_VA_TX0))
929 return true;
930 }
931 return false;
932 }
933
lpass_va_regmap_writeable(struct device * dev,unsigned int reg)934 static bool lpass_va_regmap_writeable(struct device *dev, unsigned int reg)
935 {
936 return __lpass_va_regmap_accessible(dev, reg, LPASS_REG_WRITE);
937 }
938
lpass_va_regmap_readable(struct device * dev,unsigned int reg)939 static bool lpass_va_regmap_readable(struct device *dev, unsigned int reg)
940 {
941 return __lpass_va_regmap_accessible(dev, reg, LPASS_REG_READ);
942 }
943
lpass_va_regmap_volatile(struct device * dev,unsigned int reg)944 static bool lpass_va_regmap_volatile(struct device *dev, unsigned int reg)
945 {
946 struct lpass_data *drvdata = dev_get_drvdata(dev);
947 struct lpass_variant *v = drvdata->variant;
948 int i;
949
950 for (i = 0; i < v->va_irq_ports; ++i) {
951 if (reg == LPAIF_VA_IRQCLEAR_REG(v, i))
952 return true;
953 if (reg == LPAIF_VA_IRQSTAT_REG(v, i))
954 return true;
955 }
956
957 for (i = 0; i < v->va_wrdma_channels; ++i) {
958 if (reg == LPAIF_CDC_VA_WRDMACURR_REG(v, i + v->va_wrdma_channel_start,
959 LPASS_CDC_DMA_VA_TX0))
960 return true;
961 }
962
963 return false;
964 }
965
966 static struct regmap_config lpass_rxtx_regmap_config = {
967 .reg_bits = 32,
968 .reg_stride = 4,
969 .val_bits = 32,
970 .writeable_reg = lpass_rxtx_regmap_writeable,
971 .readable_reg = lpass_rxtx_regmap_readable,
972 .volatile_reg = lpass_rxtx_regmap_volatile,
973 .cache_type = REGCACHE_FLAT,
974 };
975
976 static struct regmap_config lpass_va_regmap_config = {
977 .reg_bits = 32,
978 .reg_stride = 4,
979 .val_bits = 32,
980 .writeable_reg = lpass_va_regmap_writeable,
981 .readable_reg = lpass_va_regmap_readable,
982 .volatile_reg = lpass_va_regmap_volatile,
983 .cache_type = REGCACHE_FLAT,
984 };
985
of_lpass_cpu_parse_sd_lines(struct device * dev,struct device_node * node,const char * name)986 static unsigned int of_lpass_cpu_parse_sd_lines(struct device *dev,
987 struct device_node *node,
988 const char *name)
989 {
990 unsigned int lines[LPASS_CPU_MAX_MI2S_LINES];
991 unsigned int sd_line_mask = 0;
992 int num_lines, i;
993
994 num_lines = of_property_read_variable_u32_array(node, name, lines, 0,
995 LPASS_CPU_MAX_MI2S_LINES);
996 if (num_lines < 0)
997 return LPAIF_I2SCTL_MODE_NONE;
998
999 for (i = 0; i < num_lines; i++)
1000 sd_line_mask |= BIT(lines[i]);
1001
1002 switch (sd_line_mask) {
1003 case LPASS_CPU_I2S_SD0_MASK:
1004 return LPAIF_I2SCTL_MODE_SD0;
1005 case LPASS_CPU_I2S_SD1_MASK:
1006 return LPAIF_I2SCTL_MODE_SD1;
1007 case LPASS_CPU_I2S_SD2_MASK:
1008 return LPAIF_I2SCTL_MODE_SD2;
1009 case LPASS_CPU_I2S_SD3_MASK:
1010 return LPAIF_I2SCTL_MODE_SD3;
1011 case LPASS_CPU_I2S_SD0_1_MASK:
1012 return LPAIF_I2SCTL_MODE_QUAD01;
1013 case LPASS_CPU_I2S_SD2_3_MASK:
1014 return LPAIF_I2SCTL_MODE_QUAD23;
1015 case LPASS_CPU_I2S_SD0_1_2_MASK:
1016 return LPAIF_I2SCTL_MODE_6CH;
1017 case LPASS_CPU_I2S_SD0_1_2_3_MASK:
1018 return LPAIF_I2SCTL_MODE_8CH;
1019 default:
1020 dev_err(dev, "Unsupported SD line mask: %#x\n", sd_line_mask);
1021 return LPAIF_I2SCTL_MODE_NONE;
1022 }
1023 }
1024
of_lpass_cpu_parse_dai_data(struct device * dev,struct lpass_data * data)1025 static void of_lpass_cpu_parse_dai_data(struct device *dev,
1026 struct lpass_data *data)
1027 {
1028 struct device_node *node;
1029 int ret, id;
1030
1031 /* Allow all channels by default for backwards compatibility */
1032 for (id = 0; id < data->variant->num_dai; id++) {
1033 data->mi2s_playback_sd_mode[id] = LPAIF_I2SCTL_MODE_8CH;
1034 data->mi2s_capture_sd_mode[id] = LPAIF_I2SCTL_MODE_8CH;
1035 }
1036
1037 for_each_child_of_node(dev->of_node, node) {
1038 ret = of_property_read_u32(node, "reg", &id);
1039 if (ret || id < 0) {
1040 dev_err(dev, "valid dai id not found: %d\n", ret);
1041 continue;
1042 }
1043 if (id == LPASS_DP_RX) {
1044 data->hdmi_port_enable = 1;
1045 } else if (is_cdc_dma_port(id)) {
1046 data->codec_dma_enable = 1;
1047 } else {
1048 data->mi2s_playback_sd_mode[id] =
1049 of_lpass_cpu_parse_sd_lines(dev, node,
1050 "qcom,playback-sd-lines");
1051 data->mi2s_capture_sd_mode[id] =
1052 of_lpass_cpu_parse_sd_lines(dev, node,
1053 "qcom,capture-sd-lines");
1054 }
1055 }
1056 }
1057
of_lpass_cdc_dma_clks_parse(struct device * dev,struct lpass_data * data)1058 static int of_lpass_cdc_dma_clks_parse(struct device *dev,
1059 struct lpass_data *data)
1060 {
1061 data->codec_mem0 = devm_clk_get(dev, "audio_cc_codec_mem0");
1062 if (IS_ERR(data->codec_mem0))
1063 return PTR_ERR(data->codec_mem0);
1064
1065 data->codec_mem1 = devm_clk_get(dev, "audio_cc_codec_mem1");
1066 if (IS_ERR(data->codec_mem1))
1067 return PTR_ERR(data->codec_mem1);
1068
1069 data->codec_mem2 = devm_clk_get(dev, "audio_cc_codec_mem2");
1070 if (IS_ERR(data->codec_mem2))
1071 return PTR_ERR(data->codec_mem2);
1072
1073 data->va_mem0 = devm_clk_get(dev, "aon_cc_va_mem0");
1074 if (IS_ERR(data->va_mem0))
1075 return PTR_ERR(data->va_mem0);
1076
1077 return 0;
1078 }
1079
asoc_qcom_lpass_cpu_platform_probe(struct platform_device * pdev)1080 int asoc_qcom_lpass_cpu_platform_probe(struct platform_device *pdev)
1081 {
1082 struct lpass_data *drvdata;
1083 struct device_node *dsp_of_node;
1084 struct resource *res;
1085 struct lpass_variant *variant;
1086 struct device *dev = &pdev->dev;
1087 const struct of_device_id *match;
1088 int ret, i, dai_id;
1089
1090 dsp_of_node = of_parse_phandle(pdev->dev.of_node, "qcom,adsp", 0);
1091 if (dsp_of_node) {
1092 dev_err(dev, "DSP exists and holds audio resources\n");
1093 of_node_put(dsp_of_node);
1094 return -EBUSY;
1095 }
1096
1097 drvdata = devm_kzalloc(dev, sizeof(struct lpass_data), GFP_KERNEL);
1098 if (!drvdata)
1099 return -ENOMEM;
1100 platform_set_drvdata(pdev, drvdata);
1101
1102 match = of_match_device(dev->driver->of_match_table, dev);
1103 if (!match || !match->data)
1104 return -EINVAL;
1105
1106 drvdata->variant = (struct lpass_variant *)match->data;
1107 variant = drvdata->variant;
1108
1109 of_lpass_cpu_parse_dai_data(dev, drvdata);
1110
1111 if (drvdata->codec_dma_enable) {
1112 drvdata->rxtx_lpaif =
1113 devm_platform_ioremap_resource_byname(pdev, "lpass-rxtx-lpaif");
1114 if (IS_ERR(drvdata->rxtx_lpaif))
1115 return PTR_ERR(drvdata->rxtx_lpaif);
1116
1117 drvdata->va_lpaif = devm_platform_ioremap_resource_byname(pdev, "lpass-va-lpaif");
1118 if (IS_ERR(drvdata->va_lpaif))
1119 return PTR_ERR(drvdata->va_lpaif);
1120
1121 lpass_rxtx_regmap_config.max_register = LPAIF_CDC_RXTX_WRDMAPER_REG(variant,
1122 variant->rxtx_wrdma_channels +
1123 variant->rxtx_wrdma_channel_start, LPASS_CDC_DMA_TX3);
1124
1125 drvdata->rxtx_lpaif_map = devm_regmap_init_mmio(dev, drvdata->rxtx_lpaif,
1126 &lpass_rxtx_regmap_config);
1127 if (IS_ERR(drvdata->rxtx_lpaif_map))
1128 return PTR_ERR(drvdata->rxtx_lpaif_map);
1129
1130 lpass_va_regmap_config.max_register = LPAIF_CDC_VA_WRDMAPER_REG(variant,
1131 variant->va_wrdma_channels +
1132 variant->va_wrdma_channel_start, LPASS_CDC_DMA_VA_TX0);
1133
1134 drvdata->va_lpaif_map = devm_regmap_init_mmio(dev, drvdata->va_lpaif,
1135 &lpass_va_regmap_config);
1136 if (IS_ERR(drvdata->va_lpaif_map))
1137 return PTR_ERR(drvdata->va_lpaif_map);
1138
1139 ret = of_lpass_cdc_dma_clks_parse(dev, drvdata);
1140 if (ret) {
1141 dev_err(dev, "failed to get cdc dma clocks %d\n", ret);
1142 return ret;
1143 }
1144
1145 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lpass-rxtx-cdc-dma-lpm");
1146 drvdata->rxtx_cdc_dma_lpm_buf = res->start;
1147
1148 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lpass-va-cdc-dma-lpm");
1149 drvdata->va_cdc_dma_lpm_buf = res->start;
1150 }
1151
1152 drvdata->lpaif = devm_platform_ioremap_resource_byname(pdev, "lpass-lpaif");
1153 if (IS_ERR(drvdata->lpaif))
1154 return PTR_ERR(drvdata->lpaif);
1155
1156 lpass_cpu_regmap_config.max_register = LPAIF_WRDMAPER_REG(variant,
1157 variant->wrdma_channels +
1158 variant->wrdma_channel_start);
1159
1160 drvdata->lpaif_map = devm_regmap_init_mmio(dev, drvdata->lpaif,
1161 &lpass_cpu_regmap_config);
1162 if (IS_ERR(drvdata->lpaif_map)) {
1163 dev_err(dev, "error initializing regmap: %ld\n",
1164 PTR_ERR(drvdata->lpaif_map));
1165 return PTR_ERR(drvdata->lpaif_map);
1166 }
1167
1168 if (drvdata->hdmi_port_enable) {
1169 drvdata->hdmiif = devm_platform_ioremap_resource_byname(pdev, "lpass-hdmiif");
1170 if (IS_ERR(drvdata->hdmiif))
1171 return PTR_ERR(drvdata->hdmiif);
1172
1173 lpass_hdmi_regmap_config.max_register = LPAIF_HDMI_RDMAPER_REG(variant,
1174 variant->hdmi_rdma_channels - 1);
1175 drvdata->hdmiif_map = devm_regmap_init_mmio(dev, drvdata->hdmiif,
1176 &lpass_hdmi_regmap_config);
1177 if (IS_ERR(drvdata->hdmiif_map)) {
1178 dev_err(dev, "error initializing regmap: %ld\n",
1179 PTR_ERR(drvdata->hdmiif_map));
1180 return PTR_ERR(drvdata->hdmiif_map);
1181 }
1182 }
1183
1184 if (variant->init) {
1185 ret = variant->init(pdev);
1186 if (ret) {
1187 dev_err(dev, "error initializing variant: %d\n", ret);
1188 return ret;
1189 }
1190 }
1191
1192 for (i = 0; i < variant->num_dai; i++) {
1193 dai_id = variant->dai_driver[i].id;
1194 if (dai_id == LPASS_DP_RX || is_cdc_dma_port(dai_id))
1195 continue;
1196
1197 drvdata->mi2s_osr_clk[dai_id] = devm_clk_get_optional(dev,
1198 variant->dai_osr_clk_names[i]);
1199 drvdata->mi2s_bit_clk[dai_id] = devm_clk_get(dev,
1200 variant->dai_bit_clk_names[i]);
1201 if (IS_ERR(drvdata->mi2s_bit_clk[dai_id])) {
1202 dev_err(dev,
1203 "error getting %s: %ld\n",
1204 variant->dai_bit_clk_names[i],
1205 PTR_ERR(drvdata->mi2s_bit_clk[dai_id]));
1206 return PTR_ERR(drvdata->mi2s_bit_clk[dai_id]);
1207 }
1208 if (drvdata->mi2s_playback_sd_mode[dai_id] ==
1209 LPAIF_I2SCTL_MODE_QUAD01) {
1210 variant->dai_driver[dai_id].playback.channels_min = 4;
1211 variant->dai_driver[dai_id].playback.channels_max = 4;
1212 }
1213 }
1214
1215 /* Allocation for i2sctl regmap fields */
1216 drvdata->i2sctl = devm_kzalloc(&pdev->dev, sizeof(struct lpaif_i2sctl),
1217 GFP_KERNEL);
1218
1219 /* Initialize bitfields for dai I2SCTL register */
1220 ret = lpass_cpu_init_i2sctl_bitfields(dev, drvdata->i2sctl,
1221 drvdata->lpaif_map);
1222 if (ret) {
1223 dev_err(dev, "error init i2sctl field: %d\n", ret);
1224 return ret;
1225 }
1226
1227 if (drvdata->hdmi_port_enable) {
1228 ret = lpass_hdmi_init_bitfields(dev, drvdata->hdmiif_map);
1229 if (ret) {
1230 dev_err(dev, "%s error hdmi init failed\n", __func__);
1231 return ret;
1232 }
1233 }
1234 ret = devm_snd_soc_register_component(dev,
1235 &lpass_cpu_comp_driver,
1236 variant->dai_driver,
1237 variant->num_dai);
1238 if (ret) {
1239 dev_err(dev, "error registering cpu driver: %d\n", ret);
1240 goto err;
1241 }
1242
1243 ret = asoc_qcom_lpass_platform_register(pdev);
1244 if (ret) {
1245 dev_err(dev, "error registering platform driver: %d\n", ret);
1246 goto err;
1247 }
1248
1249 err:
1250 return ret;
1251 }
1252 EXPORT_SYMBOL_GPL(asoc_qcom_lpass_cpu_platform_probe);
1253
asoc_qcom_lpass_cpu_platform_remove(struct platform_device * pdev)1254 int asoc_qcom_lpass_cpu_platform_remove(struct platform_device *pdev)
1255 {
1256 struct lpass_data *drvdata = platform_get_drvdata(pdev);
1257
1258 if (drvdata->variant->exit)
1259 drvdata->variant->exit(pdev);
1260
1261
1262 return 0;
1263 }
1264 EXPORT_SYMBOL_GPL(asoc_qcom_lpass_cpu_platform_remove);
1265
asoc_qcom_lpass_cpu_platform_shutdown(struct platform_device * pdev)1266 void asoc_qcom_lpass_cpu_platform_shutdown(struct platform_device *pdev)
1267 {
1268 struct lpass_data *drvdata = platform_get_drvdata(pdev);
1269
1270 if (drvdata->variant->exit)
1271 drvdata->variant->exit(pdev);
1272
1273 }
1274 EXPORT_SYMBOL_GPL(asoc_qcom_lpass_cpu_platform_shutdown);
1275
1276 MODULE_DESCRIPTION("QTi LPASS CPU Driver");
1277 MODULE_LICENSE("GPL v2");
1278