1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 //
3 // Copyright (c) 2018 BayLibre, SAS.
4 // Author: Jerome Brunet <jbrunet@baylibre.com>
5
6 #include <linux/clk.h>
7 #include <linux/of_irq.h>
8 #include <linux/of_platform.h>
9 #include <linux/module.h>
10 #include <linux/regmap.h>
11 #include <linux/reset.h>
12 #include <sound/pcm_params.h>
13 #include <sound/soc.h>
14 #include <sound/soc-dai.h>
15
16 #include "axg-fifo.h"
17
18 /*
19 * This file implements the platform operations common to the playback and
20 * capture frontend DAI. The logic behind this two types of fifo is very
21 * similar but some difference exist.
22 * These differences are handled in the respective DAI drivers
23 */
24
25 static struct snd_pcm_hardware axg_fifo_hw = {
26 .info = (SNDRV_PCM_INFO_INTERLEAVED |
27 SNDRV_PCM_INFO_MMAP |
28 SNDRV_PCM_INFO_MMAP_VALID |
29 SNDRV_PCM_INFO_BLOCK_TRANSFER |
30 SNDRV_PCM_INFO_PAUSE |
31 SNDRV_PCM_INFO_NO_PERIOD_WAKEUP),
32 .formats = AXG_FIFO_FORMATS,
33 .rate_min = 5512,
34 .rate_max = 192000,
35 .channels_min = 1,
36 .channels_max = AXG_FIFO_CH_MAX,
37 .period_bytes_min = AXG_FIFO_BURST,
38 .period_bytes_max = UINT_MAX,
39 .periods_min = 2,
40 .periods_max = UINT_MAX,
41
42 /* No real justification for this */
43 .buffer_bytes_max = 1 * 1024 * 1024,
44 };
45
axg_fifo_dai(struct snd_pcm_substream * ss)46 static struct snd_soc_dai *axg_fifo_dai(struct snd_pcm_substream *ss)
47 {
48 struct snd_soc_pcm_runtime *rtd = ss->private_data;
49
50 return asoc_rtd_to_cpu(rtd, 0);
51 }
52
axg_fifo_data(struct snd_pcm_substream * ss)53 static struct axg_fifo *axg_fifo_data(struct snd_pcm_substream *ss)
54 {
55 struct snd_soc_dai *dai = axg_fifo_dai(ss);
56
57 return snd_soc_dai_get_drvdata(dai);
58 }
59
axg_fifo_dev(struct snd_pcm_substream * ss)60 static struct device *axg_fifo_dev(struct snd_pcm_substream *ss)
61 {
62 struct snd_soc_dai *dai = axg_fifo_dai(ss);
63
64 return dai->dev;
65 }
66
__dma_enable(struct axg_fifo * fifo,bool enable)67 static void __dma_enable(struct axg_fifo *fifo, bool enable)
68 {
69 regmap_update_bits(fifo->map, FIFO_CTRL0, CTRL0_DMA_EN,
70 enable ? CTRL0_DMA_EN : 0);
71 }
72
axg_fifo_pcm_trigger(struct snd_soc_component * component,struct snd_pcm_substream * ss,int cmd)73 int axg_fifo_pcm_trigger(struct snd_soc_component *component,
74 struct snd_pcm_substream *ss, int cmd)
75 {
76 struct axg_fifo *fifo = axg_fifo_data(ss);
77
78 switch (cmd) {
79 case SNDRV_PCM_TRIGGER_START:
80 case SNDRV_PCM_TRIGGER_RESUME:
81 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
82 __dma_enable(fifo, true);
83 break;
84 case SNDRV_PCM_TRIGGER_SUSPEND:
85 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
86 case SNDRV_PCM_TRIGGER_STOP:
87 __dma_enable(fifo, false);
88 break;
89 default:
90 return -EINVAL;
91 }
92
93 return 0;
94 }
95 EXPORT_SYMBOL_GPL(axg_fifo_pcm_trigger);
96
axg_fifo_pcm_pointer(struct snd_soc_component * component,struct snd_pcm_substream * ss)97 snd_pcm_uframes_t axg_fifo_pcm_pointer(struct snd_soc_component *component,
98 struct snd_pcm_substream *ss)
99 {
100 struct axg_fifo *fifo = axg_fifo_data(ss);
101 struct snd_pcm_runtime *runtime = ss->runtime;
102 unsigned int addr;
103
104 regmap_read(fifo->map, FIFO_STATUS2, &addr);
105
106 return bytes_to_frames(runtime, addr - (unsigned int)runtime->dma_addr);
107 }
108 EXPORT_SYMBOL_GPL(axg_fifo_pcm_pointer);
109
axg_fifo_pcm_hw_params(struct snd_soc_component * component,struct snd_pcm_substream * ss,struct snd_pcm_hw_params * params)110 int axg_fifo_pcm_hw_params(struct snd_soc_component *component,
111 struct snd_pcm_substream *ss,
112 struct snd_pcm_hw_params *params)
113 {
114 struct snd_pcm_runtime *runtime = ss->runtime;
115 struct axg_fifo *fifo = axg_fifo_data(ss);
116 unsigned int burst_num, period, threshold, irq_en;
117 dma_addr_t end_ptr;
118
119 period = params_period_bytes(params);
120
121 /* Setup dma memory pointers */
122 end_ptr = runtime->dma_addr + runtime->dma_bytes - AXG_FIFO_BURST;
123 regmap_write(fifo->map, FIFO_START_ADDR, runtime->dma_addr);
124 regmap_write(fifo->map, FIFO_FINISH_ADDR, end_ptr);
125
126 /* Setup interrupt periodicity */
127 burst_num = period / AXG_FIFO_BURST;
128 regmap_write(fifo->map, FIFO_INT_ADDR, burst_num);
129
130 /*
131 * Start the fifo request on the smallest of the following:
132 * - Half the fifo size
133 * - Half the period size
134 */
135 threshold = min(period / 2, fifo->depth / 2);
136
137 /*
138 * With the threshold in bytes, register value is:
139 * V = (threshold / burst) - 1
140 */
141 threshold /= AXG_FIFO_BURST;
142 regmap_field_write(fifo->field_threshold,
143 threshold ? threshold - 1 : 0);
144
145 /* Enable irq if necessary */
146 irq_en = runtime->no_period_wakeup ? 0 : FIFO_INT_COUNT_REPEAT;
147 regmap_update_bits(fifo->map, FIFO_CTRL0,
148 CTRL0_INT_EN(FIFO_INT_COUNT_REPEAT),
149 CTRL0_INT_EN(irq_en));
150
151 return 0;
152 }
153 EXPORT_SYMBOL_GPL(axg_fifo_pcm_hw_params);
154
g12a_fifo_pcm_hw_params(struct snd_soc_component * component,struct snd_pcm_substream * ss,struct snd_pcm_hw_params * params)155 int g12a_fifo_pcm_hw_params(struct snd_soc_component *component,
156 struct snd_pcm_substream *ss,
157 struct snd_pcm_hw_params *params)
158 {
159 struct axg_fifo *fifo = axg_fifo_data(ss);
160 struct snd_pcm_runtime *runtime = ss->runtime;
161 int ret;
162
163 ret = axg_fifo_pcm_hw_params(component, ss, params);
164 if (ret)
165 return ret;
166
167 /* Set the initial memory address of the DMA */
168 regmap_write(fifo->map, FIFO_INIT_ADDR, runtime->dma_addr);
169
170 return 0;
171 }
172 EXPORT_SYMBOL_GPL(g12a_fifo_pcm_hw_params);
173
axg_fifo_pcm_hw_free(struct snd_soc_component * component,struct snd_pcm_substream * ss)174 int axg_fifo_pcm_hw_free(struct snd_soc_component *component,
175 struct snd_pcm_substream *ss)
176 {
177 struct axg_fifo *fifo = axg_fifo_data(ss);
178
179 /* Disable the block count irq */
180 regmap_update_bits(fifo->map, FIFO_CTRL0,
181 CTRL0_INT_EN(FIFO_INT_COUNT_REPEAT), 0);
182
183 return 0;
184 }
185 EXPORT_SYMBOL_GPL(axg_fifo_pcm_hw_free);
186
axg_fifo_ack_irq(struct axg_fifo * fifo,u8 mask)187 static void axg_fifo_ack_irq(struct axg_fifo *fifo, u8 mask)
188 {
189 regmap_update_bits(fifo->map, FIFO_CTRL1,
190 CTRL1_INT_CLR(FIFO_INT_MASK),
191 CTRL1_INT_CLR(mask));
192
193 /* Clear must also be cleared */
194 regmap_update_bits(fifo->map, FIFO_CTRL1,
195 CTRL1_INT_CLR(FIFO_INT_MASK),
196 0);
197 }
198
axg_fifo_pcm_irq_block(int irq,void * dev_id)199 static irqreturn_t axg_fifo_pcm_irq_block(int irq, void *dev_id)
200 {
201 struct snd_pcm_substream *ss = dev_id;
202 struct axg_fifo *fifo = axg_fifo_data(ss);
203 unsigned int status;
204
205 regmap_read(fifo->map, FIFO_STATUS1, &status);
206
207 status = STATUS1_INT_STS(status) & FIFO_INT_MASK;
208 if (status & FIFO_INT_COUNT_REPEAT)
209 snd_pcm_period_elapsed(ss);
210 else
211 dev_dbg(axg_fifo_dev(ss), "unexpected irq - STS 0x%02x\n",
212 status);
213
214 /* Ack irqs */
215 axg_fifo_ack_irq(fifo, status);
216
217 return IRQ_RETVAL(status);
218 }
219
axg_fifo_pcm_open(struct snd_soc_component * component,struct snd_pcm_substream * ss)220 int axg_fifo_pcm_open(struct snd_soc_component *component,
221 struct snd_pcm_substream *ss)
222 {
223 struct axg_fifo *fifo = axg_fifo_data(ss);
224 struct device *dev = axg_fifo_dev(ss);
225 int ret;
226
227 snd_soc_set_runtime_hwparams(ss, &axg_fifo_hw);
228
229 /*
230 * Make sure the buffer and period size are multiple of the FIFO
231 * burst
232 */
233 ret = snd_pcm_hw_constraint_step(ss->runtime, 0,
234 SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
235 AXG_FIFO_BURST);
236 if (ret)
237 return ret;
238
239 ret = snd_pcm_hw_constraint_step(ss->runtime, 0,
240 SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
241 AXG_FIFO_BURST);
242 if (ret)
243 return ret;
244
245 ret = request_irq(fifo->irq, axg_fifo_pcm_irq_block, 0,
246 dev_name(dev), ss);
247 if (ret)
248 return ret;
249
250 /* Enable pclk to access registers and clock the fifo ip */
251 ret = clk_prepare_enable(fifo->pclk);
252 if (ret)
253 goto free_irq;
254
255 /* Setup status2 so it reports the memory pointer */
256 regmap_update_bits(fifo->map, FIFO_CTRL1,
257 CTRL1_STATUS2_SEL_MASK,
258 CTRL1_STATUS2_SEL(STATUS2_SEL_DDR_READ));
259
260 /* Make sure the dma is initially disabled */
261 __dma_enable(fifo, false);
262
263 /* Disable irqs until params are ready */
264 regmap_update_bits(fifo->map, FIFO_CTRL0,
265 CTRL0_INT_EN(FIFO_INT_MASK), 0);
266
267 /* Clear any pending interrupt */
268 axg_fifo_ack_irq(fifo, FIFO_INT_MASK);
269
270 /* Take memory arbitror out of reset */
271 ret = reset_control_deassert(fifo->arb);
272 if (ret)
273 goto free_clk;
274
275 return 0;
276
277 free_clk:
278 clk_disable_unprepare(fifo->pclk);
279 free_irq:
280 free_irq(fifo->irq, ss);
281 return ret;
282 }
283 EXPORT_SYMBOL_GPL(axg_fifo_pcm_open);
284
axg_fifo_pcm_close(struct snd_soc_component * component,struct snd_pcm_substream * ss)285 int axg_fifo_pcm_close(struct snd_soc_component *component,
286 struct snd_pcm_substream *ss)
287 {
288 struct axg_fifo *fifo = axg_fifo_data(ss);
289 int ret;
290
291 /* Put the memory arbitror back in reset */
292 ret = reset_control_assert(fifo->arb);
293
294 /* Disable fifo ip and register access */
295 clk_disable_unprepare(fifo->pclk);
296
297 /* remove IRQ */
298 free_irq(fifo->irq, ss);
299
300 return ret;
301 }
302 EXPORT_SYMBOL_GPL(axg_fifo_pcm_close);
303
axg_fifo_pcm_new(struct snd_soc_pcm_runtime * rtd,unsigned int type)304 int axg_fifo_pcm_new(struct snd_soc_pcm_runtime *rtd, unsigned int type)
305 {
306 struct snd_card *card = rtd->card->snd_card;
307 size_t size = axg_fifo_hw.buffer_bytes_max;
308
309 snd_pcm_set_managed_buffer(rtd->pcm->streams[type].substream,
310 SNDRV_DMA_TYPE_DEV, card->dev,
311 size, size);
312 return 0;
313 }
314 EXPORT_SYMBOL_GPL(axg_fifo_pcm_new);
315
316 static const struct regmap_config axg_fifo_regmap_cfg = {
317 .reg_bits = 32,
318 .val_bits = 32,
319 .reg_stride = 4,
320 .max_register = FIFO_CTRL2,
321 };
322
axg_fifo_probe(struct platform_device * pdev)323 int axg_fifo_probe(struct platform_device *pdev)
324 {
325 struct device *dev = &pdev->dev;
326 const struct axg_fifo_match_data *data;
327 struct axg_fifo *fifo;
328 void __iomem *regs;
329 int ret;
330
331 data = of_device_get_match_data(dev);
332 if (!data) {
333 dev_err(dev, "failed to match device\n");
334 return -ENODEV;
335 }
336
337 fifo = devm_kzalloc(dev, sizeof(*fifo), GFP_KERNEL);
338 if (!fifo)
339 return -ENOMEM;
340 platform_set_drvdata(pdev, fifo);
341
342 regs = devm_platform_ioremap_resource(pdev, 0);
343 if (IS_ERR(regs))
344 return PTR_ERR(regs);
345
346 fifo->map = devm_regmap_init_mmio(dev, regs, &axg_fifo_regmap_cfg);
347 if (IS_ERR(fifo->map)) {
348 dev_err(dev, "failed to init regmap: %ld\n",
349 PTR_ERR(fifo->map));
350 return PTR_ERR(fifo->map);
351 }
352
353 fifo->pclk = devm_clk_get(dev, NULL);
354 if (IS_ERR(fifo->pclk))
355 return dev_err_probe(dev, PTR_ERR(fifo->pclk), "failed to get pclk\n");
356
357 fifo->arb = devm_reset_control_get_exclusive(dev, NULL);
358 if (IS_ERR(fifo->arb))
359 return dev_err_probe(dev, PTR_ERR(fifo->arb), "failed to get arb reset\n");
360
361 fifo->irq = of_irq_get(dev->of_node, 0);
362 if (fifo->irq <= 0) {
363 dev_err(dev, "failed to get irq: %d\n", fifo->irq);
364 return fifo->irq;
365 }
366
367 fifo->field_threshold =
368 devm_regmap_field_alloc(dev, fifo->map, data->field_threshold);
369 if (IS_ERR(fifo->field_threshold))
370 return PTR_ERR(fifo->field_threshold);
371
372 ret = of_property_read_u32(dev->of_node, "amlogic,fifo-depth",
373 &fifo->depth);
374 if (ret) {
375 /* Error out for anything but a missing property */
376 if (ret != -EINVAL)
377 return ret;
378 /*
379 * If the property is missing, it might be because of an old
380 * DT. In such case, assume the smallest known fifo depth
381 */
382 fifo->depth = 256;
383 dev_warn(dev, "fifo depth not found, assume %u bytes\n",
384 fifo->depth);
385 }
386
387 return devm_snd_soc_register_component(dev, data->component_drv,
388 data->dai_drv, 1);
389 }
390 EXPORT_SYMBOL_GPL(axg_fifo_probe);
391
392 MODULE_DESCRIPTION("Amlogic AXG/G12A fifo driver");
393 MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
394 MODULE_LICENSE("GPL v2");
395