1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates.
4 * Synopsys DesignWare eDMA v0 core
5 *
6 * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com>
7 */
8
9 #include <linux/bitfield.h>
10
11 #include "dw-edma-core.h"
12 #include "dw-edma-v0-core.h"
13 #include "dw-edma-v0-regs.h"
14 #include "dw-edma-v0-debugfs.h"
15
16 enum dw_edma_control {
17 DW_EDMA_V0_CB = BIT(0),
18 DW_EDMA_V0_TCB = BIT(1),
19 DW_EDMA_V0_LLP = BIT(2),
20 DW_EDMA_V0_LIE = BIT(3),
21 DW_EDMA_V0_RIE = BIT(4),
22 DW_EDMA_V0_CCS = BIT(8),
23 DW_EDMA_V0_LLE = BIT(9),
24 };
25
__dw_regs(struct dw_edma * dw)26 static inline struct dw_edma_v0_regs __iomem *__dw_regs(struct dw_edma *dw)
27 {
28 return dw->chip->reg_base;
29 }
30
31 #define SET_32(dw, name, value) \
32 writel(value, &(__dw_regs(dw)->name))
33
34 #define GET_32(dw, name) \
35 readl(&(__dw_regs(dw)->name))
36
37 #define SET_RW_32(dw, dir, name, value) \
38 do { \
39 if ((dir) == EDMA_DIR_WRITE) \
40 SET_32(dw, wr_##name, value); \
41 else \
42 SET_32(dw, rd_##name, value); \
43 } while (0)
44
45 #define GET_RW_32(dw, dir, name) \
46 ((dir) == EDMA_DIR_WRITE \
47 ? GET_32(dw, wr_##name) \
48 : GET_32(dw, rd_##name))
49
50 #define SET_BOTH_32(dw, name, value) \
51 do { \
52 SET_32(dw, wr_##name, value); \
53 SET_32(dw, rd_##name, value); \
54 } while (0)
55
56 #ifdef CONFIG_64BIT
57
58 #define SET_64(dw, name, value) \
59 writeq(value, &(__dw_regs(dw)->name))
60
61 #define GET_64(dw, name) \
62 readq(&(__dw_regs(dw)->name))
63
64 #define SET_RW_64(dw, dir, name, value) \
65 do { \
66 if ((dir) == EDMA_DIR_WRITE) \
67 SET_64(dw, wr_##name, value); \
68 else \
69 SET_64(dw, rd_##name, value); \
70 } while (0)
71
72 #define GET_RW_64(dw, dir, name) \
73 ((dir) == EDMA_DIR_WRITE \
74 ? GET_64(dw, wr_##name) \
75 : GET_64(dw, rd_##name))
76
77 #define SET_BOTH_64(dw, name, value) \
78 do { \
79 SET_64(dw, wr_##name, value); \
80 SET_64(dw, rd_##name, value); \
81 } while (0)
82
83 #endif /* CONFIG_64BIT */
84
85 #define SET_COMPAT(dw, name, value) \
86 writel(value, &(__dw_regs(dw)->type.unroll.name))
87
88 #define SET_RW_COMPAT(dw, dir, name, value) \
89 do { \
90 if ((dir) == EDMA_DIR_WRITE) \
91 SET_COMPAT(dw, wr_##name, value); \
92 else \
93 SET_COMPAT(dw, rd_##name, value); \
94 } while (0)
95
96 static inline struct dw_edma_v0_ch_regs __iomem *
__dw_ch_regs(struct dw_edma * dw,enum dw_edma_dir dir,u16 ch)97 __dw_ch_regs(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch)
98 {
99 if (dw->chip->mf == EDMA_MF_EDMA_LEGACY)
100 return &(__dw_regs(dw)->type.legacy.ch);
101
102 if (dir == EDMA_DIR_WRITE)
103 return &__dw_regs(dw)->type.unroll.ch[ch].wr;
104
105 return &__dw_regs(dw)->type.unroll.ch[ch].rd;
106 }
107
writel_ch(struct dw_edma * dw,enum dw_edma_dir dir,u16 ch,u32 value,void __iomem * addr)108 static inline void writel_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
109 u32 value, void __iomem *addr)
110 {
111 if (dw->chip->mf == EDMA_MF_EDMA_LEGACY) {
112 u32 viewport_sel;
113 unsigned long flags;
114
115 raw_spin_lock_irqsave(&dw->lock, flags);
116
117 viewport_sel = FIELD_PREP(EDMA_V0_VIEWPORT_MASK, ch);
118 if (dir == EDMA_DIR_READ)
119 viewport_sel |= BIT(31);
120
121 writel(viewport_sel,
122 &(__dw_regs(dw)->type.legacy.viewport_sel));
123 writel(value, addr);
124
125 raw_spin_unlock_irqrestore(&dw->lock, flags);
126 } else {
127 writel(value, addr);
128 }
129 }
130
readl_ch(struct dw_edma * dw,enum dw_edma_dir dir,u16 ch,const void __iomem * addr)131 static inline u32 readl_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
132 const void __iomem *addr)
133 {
134 u32 value;
135
136 if (dw->chip->mf == EDMA_MF_EDMA_LEGACY) {
137 u32 viewport_sel;
138 unsigned long flags;
139
140 raw_spin_lock_irqsave(&dw->lock, flags);
141
142 viewport_sel = FIELD_PREP(EDMA_V0_VIEWPORT_MASK, ch);
143 if (dir == EDMA_DIR_READ)
144 viewport_sel |= BIT(31);
145
146 writel(viewport_sel,
147 &(__dw_regs(dw)->type.legacy.viewport_sel));
148 value = readl(addr);
149
150 raw_spin_unlock_irqrestore(&dw->lock, flags);
151 } else {
152 value = readl(addr);
153 }
154
155 return value;
156 }
157
158 #define SET_CH_32(dw, dir, ch, name, value) \
159 writel_ch(dw, dir, ch, value, &(__dw_ch_regs(dw, dir, ch)->name))
160
161 #define GET_CH_32(dw, dir, ch, name) \
162 readl_ch(dw, dir, ch, &(__dw_ch_regs(dw, dir, ch)->name))
163
164 #define SET_LL_32(ll, value) \
165 writel(value, ll)
166
167 #ifdef CONFIG_64BIT
168
writeq_ch(struct dw_edma * dw,enum dw_edma_dir dir,u16 ch,u64 value,void __iomem * addr)169 static inline void writeq_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
170 u64 value, void __iomem *addr)
171 {
172 if (dw->chip->mf == EDMA_MF_EDMA_LEGACY) {
173 u32 viewport_sel;
174 unsigned long flags;
175
176 raw_spin_lock_irqsave(&dw->lock, flags);
177
178 viewport_sel = FIELD_PREP(EDMA_V0_VIEWPORT_MASK, ch);
179 if (dir == EDMA_DIR_READ)
180 viewport_sel |= BIT(31);
181
182 writel(viewport_sel,
183 &(__dw_regs(dw)->type.legacy.viewport_sel));
184 writeq(value, addr);
185
186 raw_spin_unlock_irqrestore(&dw->lock, flags);
187 } else {
188 writeq(value, addr);
189 }
190 }
191
readq_ch(struct dw_edma * dw,enum dw_edma_dir dir,u16 ch,const void __iomem * addr)192 static inline u64 readq_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
193 const void __iomem *addr)
194 {
195 u32 value;
196
197 if (dw->chip->mf == EDMA_MF_EDMA_LEGACY) {
198 u32 viewport_sel;
199 unsigned long flags;
200
201 raw_spin_lock_irqsave(&dw->lock, flags);
202
203 viewport_sel = FIELD_PREP(EDMA_V0_VIEWPORT_MASK, ch);
204 if (dir == EDMA_DIR_READ)
205 viewport_sel |= BIT(31);
206
207 writel(viewport_sel,
208 &(__dw_regs(dw)->type.legacy.viewport_sel));
209 value = readq(addr);
210
211 raw_spin_unlock_irqrestore(&dw->lock, flags);
212 } else {
213 value = readq(addr);
214 }
215
216 return value;
217 }
218
219 #define SET_CH_64(dw, dir, ch, name, value) \
220 writeq_ch(dw, dir, ch, value, &(__dw_ch_regs(dw, dir, ch)->name))
221
222 #define GET_CH_64(dw, dir, ch, name) \
223 readq_ch(dw, dir, ch, &(__dw_ch_regs(dw, dir, ch)->name))
224
225 #define SET_LL_64(ll, value) \
226 writeq(value, ll)
227
228 #endif /* CONFIG_64BIT */
229
230 /* eDMA management callbacks */
dw_edma_v0_core_off(struct dw_edma * dw)231 void dw_edma_v0_core_off(struct dw_edma *dw)
232 {
233 SET_BOTH_32(dw, int_mask,
234 EDMA_V0_DONE_INT_MASK | EDMA_V0_ABORT_INT_MASK);
235 SET_BOTH_32(dw, int_clear,
236 EDMA_V0_DONE_INT_MASK | EDMA_V0_ABORT_INT_MASK);
237 SET_BOTH_32(dw, engine_en, 0);
238 }
239
dw_edma_v0_core_ch_count(struct dw_edma * dw,enum dw_edma_dir dir)240 u16 dw_edma_v0_core_ch_count(struct dw_edma *dw, enum dw_edma_dir dir)
241 {
242 u32 num_ch;
243
244 if (dir == EDMA_DIR_WRITE)
245 num_ch = FIELD_GET(EDMA_V0_WRITE_CH_COUNT_MASK,
246 GET_32(dw, ctrl));
247 else
248 num_ch = FIELD_GET(EDMA_V0_READ_CH_COUNT_MASK,
249 GET_32(dw, ctrl));
250
251 if (num_ch > EDMA_V0_MAX_NR_CH)
252 num_ch = EDMA_V0_MAX_NR_CH;
253
254 return (u16)num_ch;
255 }
256
dw_edma_v0_core_ch_status(struct dw_edma_chan * chan)257 enum dma_status dw_edma_v0_core_ch_status(struct dw_edma_chan *chan)
258 {
259 struct dw_edma *dw = chan->dw;
260 u32 tmp;
261
262 tmp = FIELD_GET(EDMA_V0_CH_STATUS_MASK,
263 GET_CH_32(dw, chan->dir, chan->id, ch_control1));
264
265 if (tmp == 1)
266 return DMA_IN_PROGRESS;
267 else if (tmp == 3)
268 return DMA_COMPLETE;
269 else
270 return DMA_ERROR;
271 }
272
dw_edma_v0_core_clear_done_int(struct dw_edma_chan * chan)273 void dw_edma_v0_core_clear_done_int(struct dw_edma_chan *chan)
274 {
275 struct dw_edma *dw = chan->dw;
276
277 SET_RW_32(dw, chan->dir, int_clear,
278 FIELD_PREP(EDMA_V0_DONE_INT_MASK, BIT(chan->id)));
279 }
280
dw_edma_v0_core_clear_abort_int(struct dw_edma_chan * chan)281 void dw_edma_v0_core_clear_abort_int(struct dw_edma_chan *chan)
282 {
283 struct dw_edma *dw = chan->dw;
284
285 SET_RW_32(dw, chan->dir, int_clear,
286 FIELD_PREP(EDMA_V0_ABORT_INT_MASK, BIT(chan->id)));
287 }
288
dw_edma_v0_core_status_done_int(struct dw_edma * dw,enum dw_edma_dir dir)289 u32 dw_edma_v0_core_status_done_int(struct dw_edma *dw, enum dw_edma_dir dir)
290 {
291 return FIELD_GET(EDMA_V0_DONE_INT_MASK,
292 GET_RW_32(dw, dir, int_status));
293 }
294
dw_edma_v0_core_status_abort_int(struct dw_edma * dw,enum dw_edma_dir dir)295 u32 dw_edma_v0_core_status_abort_int(struct dw_edma *dw, enum dw_edma_dir dir)
296 {
297 return FIELD_GET(EDMA_V0_ABORT_INT_MASK,
298 GET_RW_32(dw, dir, int_status));
299 }
300
dw_edma_v0_core_write_chunk(struct dw_edma_chunk * chunk)301 static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
302 {
303 struct dw_edma_burst *child;
304 struct dw_edma_chan *chan = chunk->chan;
305 struct dw_edma_v0_lli __iomem *lli;
306 struct dw_edma_v0_llp __iomem *llp;
307 u32 control = 0, i = 0;
308 int j;
309
310 lli = chunk->ll_region.vaddr;
311
312 if (chunk->cb)
313 control = DW_EDMA_V0_CB;
314
315 j = chunk->bursts_alloc;
316 list_for_each_entry(child, &chunk->burst->list, list) {
317 j--;
318 if (!j) {
319 control |= DW_EDMA_V0_LIE;
320 if (!(chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL))
321 control |= DW_EDMA_V0_RIE;
322 }
323 /* Channel control */
324 SET_LL_32(&lli[i].control, control);
325 /* Transfer size */
326 SET_LL_32(&lli[i].transfer_size, child->sz);
327 /* SAR */
328 #ifdef CONFIG_64BIT
329 SET_LL_64(&lli[i].sar.reg, child->sar);
330 #else /* CONFIG_64BIT */
331 SET_LL_32(&lli[i].sar.lsb, lower_32_bits(child->sar));
332 SET_LL_32(&lli[i].sar.msb, upper_32_bits(child->sar));
333 #endif /* CONFIG_64BIT */
334 /* DAR */
335 #ifdef CONFIG_64BIT
336 SET_LL_64(&lli[i].dar.reg, child->dar);
337 #else /* CONFIG_64BIT */
338 SET_LL_32(&lli[i].dar.lsb, lower_32_bits(child->dar));
339 SET_LL_32(&lli[i].dar.msb, upper_32_bits(child->dar));
340 #endif /* CONFIG_64BIT */
341 i++;
342 }
343
344 llp = (void __iomem *)&lli[i];
345 control = DW_EDMA_V0_LLP | DW_EDMA_V0_TCB;
346 if (!chunk->cb)
347 control |= DW_EDMA_V0_CB;
348
349 /* Channel control */
350 SET_LL_32(&llp->control, control);
351 /* Linked list */
352 #ifdef CONFIG_64BIT
353 SET_LL_64(&llp->llp.reg, chunk->ll_region.paddr);
354 #else /* CONFIG_64BIT */
355 SET_LL_32(&llp->llp.lsb, lower_32_bits(chunk->ll_region.paddr));
356 SET_LL_32(&llp->llp.msb, upper_32_bits(chunk->ll_region.paddr));
357 #endif /* CONFIG_64BIT */
358 }
359
dw_edma_v0_core_start(struct dw_edma_chunk * chunk,bool first)360 void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
361 {
362 struct dw_edma_chan *chan = chunk->chan;
363 struct dw_edma *dw = chan->dw;
364 u32 tmp;
365
366 dw_edma_v0_core_write_chunk(chunk);
367
368 if (first) {
369 /* Enable engine */
370 SET_RW_32(dw, chan->dir, engine_en, BIT(0));
371 if (dw->chip->mf == EDMA_MF_HDMA_COMPAT) {
372 switch (chan->id) {
373 case 0:
374 SET_RW_COMPAT(dw, chan->dir, ch0_pwr_en,
375 BIT(0));
376 break;
377 case 1:
378 SET_RW_COMPAT(dw, chan->dir, ch1_pwr_en,
379 BIT(0));
380 break;
381 case 2:
382 SET_RW_COMPAT(dw, chan->dir, ch2_pwr_en,
383 BIT(0));
384 break;
385 case 3:
386 SET_RW_COMPAT(dw, chan->dir, ch3_pwr_en,
387 BIT(0));
388 break;
389 case 4:
390 SET_RW_COMPAT(dw, chan->dir, ch4_pwr_en,
391 BIT(0));
392 break;
393 case 5:
394 SET_RW_COMPAT(dw, chan->dir, ch5_pwr_en,
395 BIT(0));
396 break;
397 case 6:
398 SET_RW_COMPAT(dw, chan->dir, ch6_pwr_en,
399 BIT(0));
400 break;
401 case 7:
402 SET_RW_COMPAT(dw, chan->dir, ch7_pwr_en,
403 BIT(0));
404 break;
405 }
406 }
407 /* Interrupt unmask - done, abort */
408 tmp = GET_RW_32(dw, chan->dir, int_mask);
409 tmp &= ~FIELD_PREP(EDMA_V0_DONE_INT_MASK, BIT(chan->id));
410 tmp &= ~FIELD_PREP(EDMA_V0_ABORT_INT_MASK, BIT(chan->id));
411 SET_RW_32(dw, chan->dir, int_mask, tmp);
412 /* Linked list error */
413 tmp = GET_RW_32(dw, chan->dir, linked_list_err_en);
414 tmp |= FIELD_PREP(EDMA_V0_LINKED_LIST_ERR_MASK, BIT(chan->id));
415 SET_RW_32(dw, chan->dir, linked_list_err_en, tmp);
416 /* Channel control */
417 SET_CH_32(dw, chan->dir, chan->id, ch_control1,
418 (DW_EDMA_V0_CCS | DW_EDMA_V0_LLE));
419 /* Linked list */
420 /* llp is not aligned on 64bit -> keep 32bit accesses */
421 SET_CH_32(dw, chan->dir, chan->id, llp.lsb,
422 lower_32_bits(chunk->ll_region.paddr));
423 SET_CH_32(dw, chan->dir, chan->id, llp.msb,
424 upper_32_bits(chunk->ll_region.paddr));
425 }
426 /* Doorbell */
427 SET_RW_32(dw, chan->dir, doorbell,
428 FIELD_PREP(EDMA_V0_DOORBELL_CH_MASK, chan->id));
429 }
430
dw_edma_v0_core_device_config(struct dw_edma_chan * chan)431 int dw_edma_v0_core_device_config(struct dw_edma_chan *chan)
432 {
433 struct dw_edma *dw = chan->dw;
434 u32 tmp = 0;
435
436 /* MSI done addr - low, high */
437 SET_RW_32(dw, chan->dir, done_imwr.lsb, chan->msi.address_lo);
438 SET_RW_32(dw, chan->dir, done_imwr.msb, chan->msi.address_hi);
439 /* MSI abort addr - low, high */
440 SET_RW_32(dw, chan->dir, abort_imwr.lsb, chan->msi.address_lo);
441 SET_RW_32(dw, chan->dir, abort_imwr.msb, chan->msi.address_hi);
442 /* MSI data - low, high */
443 switch (chan->id) {
444 case 0:
445 case 1:
446 tmp = GET_RW_32(dw, chan->dir, ch01_imwr_data);
447 break;
448
449 case 2:
450 case 3:
451 tmp = GET_RW_32(dw, chan->dir, ch23_imwr_data);
452 break;
453
454 case 4:
455 case 5:
456 tmp = GET_RW_32(dw, chan->dir, ch45_imwr_data);
457 break;
458
459 case 6:
460 case 7:
461 tmp = GET_RW_32(dw, chan->dir, ch67_imwr_data);
462 break;
463 }
464
465 if (chan->id & BIT(0)) {
466 /* Channel odd {1, 3, 5, 7} */
467 tmp &= EDMA_V0_CH_EVEN_MSI_DATA_MASK;
468 tmp |= FIELD_PREP(EDMA_V0_CH_ODD_MSI_DATA_MASK,
469 chan->msi.data);
470 } else {
471 /* Channel even {0, 2, 4, 6} */
472 tmp &= EDMA_V0_CH_ODD_MSI_DATA_MASK;
473 tmp |= FIELD_PREP(EDMA_V0_CH_EVEN_MSI_DATA_MASK,
474 chan->msi.data);
475 }
476
477 switch (chan->id) {
478 case 0:
479 case 1:
480 SET_RW_32(dw, chan->dir, ch01_imwr_data, tmp);
481 break;
482
483 case 2:
484 case 3:
485 SET_RW_32(dw, chan->dir, ch23_imwr_data, tmp);
486 break;
487
488 case 4:
489 case 5:
490 SET_RW_32(dw, chan->dir, ch45_imwr_data, tmp);
491 break;
492
493 case 6:
494 case 7:
495 SET_RW_32(dw, chan->dir, ch67_imwr_data, tmp);
496 break;
497 }
498
499 return 0;
500 }
501
502 /* eDMA debugfs callbacks */
dw_edma_v0_core_debugfs_on(struct dw_edma * dw)503 void dw_edma_v0_core_debugfs_on(struct dw_edma *dw)
504 {
505 dw_edma_v0_debugfs_on(dw);
506 }
507
dw_edma_v0_core_debugfs_off(struct dw_edma * dw)508 void dw_edma_v0_core_debugfs_off(struct dw_edma *dw)
509 {
510 dw_edma_v0_debugfs_off(dw);
511 }
512