1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * Copyright (C) 2010 Samsung Electronics Co. Ltd.
7 * Jaswinder Singh <jassi.brar@samsung.com>
8 */
9
10 #include <linux/debugfs.h>
11 #include <linux/kernel.h>
12 #include <linux/io.h>
13 #include <linux/init.h>
14 #include <linux/slab.h>
15 #include <linux/module.h>
16 #include <linux/string.h>
17 #include <linux/delay.h>
18 #include <linux/interrupt.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/dmaengine.h>
21 #include <linux/amba/bus.h>
22 #include <linux/scatterlist.h>
23 #include <linux/of.h>
24 #include <linux/of_dma.h>
25 #include <linux/err.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/bug.h>
28 #include <linux/reset.h>
29
30 #include "dmaengine.h"
31 #define PL330_MAX_CHAN 8
32 #define PL330_MAX_IRQS 32
33 #define PL330_MAX_PERI 32
34 #define PL330_MAX_BURST 16
35
36 #define PL330_QUIRK_BROKEN_NO_FLUSHP BIT(0)
37 #define PL330_QUIRK_PERIPH_BURST BIT(1)
38
39 enum pl330_cachectrl {
40 CCTRL0, /* Noncacheable and nonbufferable */
41 CCTRL1, /* Bufferable only */
42 CCTRL2, /* Cacheable, but do not allocate */
43 CCTRL3, /* Cacheable and bufferable, but do not allocate */
44 INVALID1, /* AWCACHE = 0x1000 */
45 INVALID2,
46 CCTRL6, /* Cacheable write-through, allocate on writes only */
47 CCTRL7, /* Cacheable write-back, allocate on writes only */
48 };
49
50 enum pl330_byteswap {
51 SWAP_NO,
52 SWAP_2,
53 SWAP_4,
54 SWAP_8,
55 SWAP_16,
56 };
57
58 /* Register and Bit field Definitions */
59 #define DS 0x0
60 #define DS_ST_STOP 0x0
61 #define DS_ST_EXEC 0x1
62 #define DS_ST_CMISS 0x2
63 #define DS_ST_UPDTPC 0x3
64 #define DS_ST_WFE 0x4
65 #define DS_ST_ATBRR 0x5
66 #define DS_ST_QBUSY 0x6
67 #define DS_ST_WFP 0x7
68 #define DS_ST_KILL 0x8
69 #define DS_ST_CMPLT 0x9
70 #define DS_ST_FLTCMP 0xe
71 #define DS_ST_FAULT 0xf
72
73 #define DPC 0x4
74 #define INTEN 0x20
75 #define ES 0x24
76 #define INTSTATUS 0x28
77 #define INTCLR 0x2c
78 #define FSM 0x30
79 #define FSC 0x34
80 #define FTM 0x38
81
82 #define _FTC 0x40
83 #define FTC(n) (_FTC + (n)*0x4)
84
85 #define _CS 0x100
86 #define CS(n) (_CS + (n)*0x8)
87 #define CS_CNS (1 << 21)
88
89 #define _CPC 0x104
90 #define CPC(n) (_CPC + (n)*0x8)
91
92 #define _SA 0x400
93 #define SA(n) (_SA + (n)*0x20)
94
95 #define _DA 0x404
96 #define DA(n) (_DA + (n)*0x20)
97
98 #define _CC 0x408
99 #define CC(n) (_CC + (n)*0x20)
100
101 #define CC_SRCINC (1 << 0)
102 #define CC_DSTINC (1 << 14)
103 #define CC_SRCPRI (1 << 8)
104 #define CC_DSTPRI (1 << 22)
105 #define CC_SRCNS (1 << 9)
106 #define CC_DSTNS (1 << 23)
107 #define CC_SRCIA (1 << 10)
108 #define CC_DSTIA (1 << 24)
109 #define CC_SRCBRSTLEN_SHFT 4
110 #define CC_DSTBRSTLEN_SHFT 18
111 #define CC_SRCBRSTSIZE_SHFT 1
112 #define CC_DSTBRSTSIZE_SHFT 15
113 #define CC_SRCCCTRL_SHFT 11
114 #define CC_SRCCCTRL_MASK 0x7
115 #define CC_DSTCCTRL_SHFT 25
116 #define CC_DRCCCTRL_MASK 0x7
117 #define CC_SWAP_SHFT 28
118
119 #define _LC0 0x40c
120 #define LC0(n) (_LC0 + (n)*0x20)
121
122 #define _LC1 0x410
123 #define LC1(n) (_LC1 + (n)*0x20)
124
125 #define DBGSTATUS 0xd00
126 #define DBG_BUSY (1 << 0)
127
128 #define DBGCMD 0xd04
129 #define DBGINST0 0xd08
130 #define DBGINST1 0xd0c
131
132 #define CR0 0xe00
133 #define CR1 0xe04
134 #define CR2 0xe08
135 #define CR3 0xe0c
136 #define CR4 0xe10
137 #define CRD 0xe14
138
139 #define PERIPH_ID 0xfe0
140 #define PERIPH_REV_SHIFT 20
141 #define PERIPH_REV_MASK 0xf
142 #define PERIPH_REV_R0P0 0
143 #define PERIPH_REV_R1P0 1
144 #define PERIPH_REV_R1P1 2
145
146 #define CR0_PERIPH_REQ_SET (1 << 0)
147 #define CR0_BOOT_EN_SET (1 << 1)
148 #define CR0_BOOT_MAN_NS (1 << 2)
149 #define CR0_NUM_CHANS_SHIFT 4
150 #define CR0_NUM_CHANS_MASK 0x7
151 #define CR0_NUM_PERIPH_SHIFT 12
152 #define CR0_NUM_PERIPH_MASK 0x1f
153 #define CR0_NUM_EVENTS_SHIFT 17
154 #define CR0_NUM_EVENTS_MASK 0x1f
155
156 #define CR1_ICACHE_LEN_SHIFT 0
157 #define CR1_ICACHE_LEN_MASK 0x7
158 #define CR1_NUM_ICACHELINES_SHIFT 4
159 #define CR1_NUM_ICACHELINES_MASK 0xf
160
161 #define CRD_DATA_WIDTH_SHIFT 0
162 #define CRD_DATA_WIDTH_MASK 0x7
163 #define CRD_WR_CAP_SHIFT 4
164 #define CRD_WR_CAP_MASK 0x7
165 #define CRD_WR_Q_DEP_SHIFT 8
166 #define CRD_WR_Q_DEP_MASK 0xf
167 #define CRD_RD_CAP_SHIFT 12
168 #define CRD_RD_CAP_MASK 0x7
169 #define CRD_RD_Q_DEP_SHIFT 16
170 #define CRD_RD_Q_DEP_MASK 0xf
171 #define CRD_DATA_BUFF_SHIFT 20
172 #define CRD_DATA_BUFF_MASK 0x3ff
173
174 #define PART 0x330
175 #define DESIGNER 0x41
176 #define REVISION 0x0
177 #define INTEG_CFG 0x0
178 #define PERIPH_ID_VAL ((PART << 0) | (DESIGNER << 12))
179
180 #define PL330_STATE_STOPPED (1 << 0)
181 #define PL330_STATE_EXECUTING (1 << 1)
182 #define PL330_STATE_WFE (1 << 2)
183 #define PL330_STATE_FAULTING (1 << 3)
184 #define PL330_STATE_COMPLETING (1 << 4)
185 #define PL330_STATE_WFP (1 << 5)
186 #define PL330_STATE_KILLING (1 << 6)
187 #define PL330_STATE_FAULT_COMPLETING (1 << 7)
188 #define PL330_STATE_CACHEMISS (1 << 8)
189 #define PL330_STATE_UPDTPC (1 << 9)
190 #define PL330_STATE_ATBARRIER (1 << 10)
191 #define PL330_STATE_QUEUEBUSY (1 << 11)
192 #define PL330_STATE_INVALID (1 << 15)
193
194 #define PL330_STABLE_STATES (PL330_STATE_STOPPED | PL330_STATE_EXECUTING \
195 | PL330_STATE_WFE | PL330_STATE_FAULTING)
196
197 #define CMD_DMAADDH 0x54
198 #define CMD_DMAEND 0x00
199 #define CMD_DMAFLUSHP 0x35
200 #define CMD_DMAGO 0xa0
201 #define CMD_DMALD 0x04
202 #define CMD_DMALDP 0x25
203 #define CMD_DMALP 0x20
204 #define CMD_DMALPEND 0x28
205 #define CMD_DMAKILL 0x01
206 #define CMD_DMAMOV 0xbc
207 #define CMD_DMANOP 0x18
208 #define CMD_DMARMB 0x12
209 #define CMD_DMASEV 0x34
210 #define CMD_DMAST 0x08
211 #define CMD_DMASTP 0x29
212 #define CMD_DMASTZ 0x0c
213 #define CMD_DMAWFE 0x36
214 #define CMD_DMAWFP 0x30
215 #define CMD_DMAWMB 0x13
216
217 #define SZ_DMAADDH 3
218 #define SZ_DMAEND 1
219 #define SZ_DMAFLUSHP 2
220 #define SZ_DMALD 1
221 #define SZ_DMALDP 2
222 #define SZ_DMALP 2
223 #define SZ_DMALPEND 2
224 #define SZ_DMAKILL 1
225 #define SZ_DMAMOV 6
226 #define SZ_DMANOP 1
227 #define SZ_DMARMB 1
228 #define SZ_DMASEV 2
229 #define SZ_DMAST 1
230 #define SZ_DMASTP 2
231 #define SZ_DMASTZ 1
232 #define SZ_DMAWFE 2
233 #define SZ_DMAWFP 2
234 #define SZ_DMAWMB 1
235 #define SZ_DMAGO 6
236
237 #define BRST_LEN(ccr) ((((ccr) >> CC_SRCBRSTLEN_SHFT) & 0xf) + 1)
238 #define BRST_SIZE(ccr) (1 << (((ccr) >> CC_SRCBRSTSIZE_SHFT) & 0x7))
239
240 #define BYTE_TO_BURST(b, ccr) ((b) / BRST_SIZE(ccr) / BRST_LEN(ccr))
241 #define BURST_TO_BYTE(c, ccr) ((c) * BRST_SIZE(ccr) * BRST_LEN(ccr))
242
243 /*
244 * With 256 bytes, we can do more than 2.5MB and 5MB xfers per req
245 * at 1byte/burst for P<->M and M<->M respectively.
246 * For typical scenario, at 1word/burst, 10MB and 20MB xfers per req
247 * should be enough for P<->M and M<->M respectively.
248 */
249 #define MCODE_BUFF_PER_REQ 256
250
251 /* Use this _only_ to wait on transient states */
252 #define UNTIL(t, s) while (!(_state(t) & (s))) cpu_relax();
253
254 #ifdef PL330_DEBUG_MCGEN
255 static unsigned cmd_line;
256 #define PL330_DBGCMD_DUMP(off, x...) do { \
257 printk("%x:", cmd_line); \
258 printk(KERN_CONT x); \
259 cmd_line += off; \
260 } while (0)
261 #define PL330_DBGMC_START(addr) (cmd_line = addr)
262 #else
263 #define PL330_DBGCMD_DUMP(off, x...) do {} while (0)
264 #define PL330_DBGMC_START(addr) do {} while (0)
265 #endif
266
267 /* The number of default descriptors */
268
269 #define NR_DEFAULT_DESC 16
270
271 /* Delay for runtime PM autosuspend, ms */
272 #define PL330_AUTOSUSPEND_DELAY 20
273
274 /* Populated by the PL330 core driver for DMA API driver's info */
275 struct pl330_config {
276 u32 periph_id;
277 #define DMAC_MODE_NS (1 << 0)
278 unsigned int mode;
279 unsigned int data_bus_width:10; /* In number of bits */
280 unsigned int data_buf_dep:11;
281 unsigned int num_chan:4;
282 unsigned int num_peri:6;
283 u32 peri_ns;
284 unsigned int num_events:6;
285 u32 irq_ns;
286 };
287
288 /*
289 * Request Configuration.
290 * The PL330 core does not modify this and uses the last
291 * working configuration if the request doesn't provide any.
292 *
293 * The Client may want to provide this info only for the
294 * first request and a request with new settings.
295 */
296 struct pl330_reqcfg {
297 /* Address Incrementing */
298 unsigned dst_inc:1;
299 unsigned src_inc:1;
300
301 /*
302 * For now, the SRC & DST protection levels
303 * and burst size/length are assumed same.
304 */
305 bool nonsecure;
306 bool privileged;
307 bool insnaccess;
308 unsigned brst_len:5;
309 unsigned brst_size:3; /* in power of 2 */
310
311 enum pl330_cachectrl dcctl;
312 enum pl330_cachectrl scctl;
313 enum pl330_byteswap swap;
314 struct pl330_config *pcfg;
315 };
316
317 /*
318 * One cycle of DMAC operation.
319 * There may be more than one xfer in a request.
320 */
321 struct pl330_xfer {
322 u32 src_addr;
323 u32 dst_addr;
324 /* Size to xfer */
325 u32 bytes;
326 };
327
328 /* The xfer callbacks are made with one of these arguments. */
329 enum pl330_op_err {
330 /* The all xfers in the request were success. */
331 PL330_ERR_NONE,
332 /* If req aborted due to global error. */
333 PL330_ERR_ABORT,
334 /* If req failed due to problem with Channel. */
335 PL330_ERR_FAIL,
336 };
337
338 enum dmamov_dst {
339 SAR = 0,
340 CCR,
341 DAR,
342 };
343
344 enum pl330_dst {
345 SRC = 0,
346 DST,
347 };
348
349 enum pl330_cond {
350 SINGLE,
351 BURST,
352 ALWAYS,
353 };
354
355 struct dma_pl330_desc;
356
357 struct _pl330_req {
358 u32 mc_bus;
359 void *mc_cpu;
360 struct dma_pl330_desc *desc;
361 };
362
363 /* ToBeDone for tasklet */
364 struct _pl330_tbd {
365 bool reset_dmac;
366 bool reset_mngr;
367 u8 reset_chan;
368 };
369
370 /* A DMAC Thread */
371 struct pl330_thread {
372 u8 id;
373 int ev;
374 /* If the channel is not yet acquired by any client */
375 bool free;
376 /* Parent DMAC */
377 struct pl330_dmac *dmac;
378 /* Only two at a time */
379 struct _pl330_req req[2];
380 /* Index of the last enqueued request */
381 unsigned lstenq;
382 /* Index of the last submitted request or -1 if the DMA is stopped */
383 int req_running;
384 };
385
386 enum pl330_dmac_state {
387 UNINIT,
388 INIT,
389 DYING,
390 };
391
392 enum desc_status {
393 /* In the DMAC pool */
394 FREE,
395 /*
396 * Allocated to some channel during prep_xxx
397 * Also may be sitting on the work_list.
398 */
399 PREP,
400 /*
401 * Sitting on the work_list and already submitted
402 * to the PL330 core. Not more than two descriptors
403 * of a channel can be BUSY at any time.
404 */
405 BUSY,
406 /*
407 * Sitting on the channel work_list but xfer done
408 * by PL330 core
409 */
410 DONE,
411 };
412
413 struct dma_pl330_chan {
414 /* Schedule desc completion */
415 struct tasklet_struct task;
416
417 /* DMA-Engine Channel */
418 struct dma_chan chan;
419
420 /* List of submitted descriptors */
421 struct list_head submitted_list;
422 /* List of issued descriptors */
423 struct list_head work_list;
424 /* List of completed descriptors */
425 struct list_head completed_list;
426
427 /* Pointer to the DMAC that manages this channel,
428 * NULL if the channel is available to be acquired.
429 * As the parent, this DMAC also provides descriptors
430 * to the channel.
431 */
432 struct pl330_dmac *dmac;
433
434 /* To protect channel manipulation */
435 spinlock_t lock;
436
437 /*
438 * Hardware channel thread of PL330 DMAC. NULL if the channel is
439 * available.
440 */
441 struct pl330_thread *thread;
442
443 /* For D-to-M and M-to-D channels */
444 int burst_sz; /* the peripheral fifo width */
445 int burst_len; /* the number of burst */
446 phys_addr_t fifo_addr;
447 /* DMA-mapped view of the FIFO; may differ if an IOMMU is present */
448 dma_addr_t fifo_dma;
449 enum dma_data_direction dir;
450 struct dma_slave_config slave_config;
451
452 /* for cyclic capability */
453 bool cyclic;
454
455 /* for runtime pm tracking */
456 bool active;
457 };
458
459 struct pl330_dmac {
460 /* DMA-Engine Device */
461 struct dma_device ddma;
462
463 /* Pool of descriptors available for the DMAC's channels */
464 struct list_head desc_pool;
465 /* To protect desc_pool manipulation */
466 spinlock_t pool_lock;
467
468 /* Size of MicroCode buffers for each channel. */
469 unsigned mcbufsz;
470 /* ioremap'ed address of PL330 registers. */
471 void __iomem *base;
472 /* Populated by the PL330 core driver during pl330_add */
473 struct pl330_config pcfg;
474
475 spinlock_t lock;
476 /* Maximum possible events/irqs */
477 int events[32];
478 /* BUS address of MicroCode buffer */
479 dma_addr_t mcode_bus;
480 /* CPU address of MicroCode buffer */
481 void *mcode_cpu;
482 /* List of all Channel threads */
483 struct pl330_thread *channels;
484 /* Pointer to the MANAGER thread */
485 struct pl330_thread *manager;
486 /* To handle bad news in interrupt */
487 struct tasklet_struct tasks;
488 struct _pl330_tbd dmac_tbd;
489 /* State of DMAC operation */
490 enum pl330_dmac_state state;
491 /* Holds list of reqs with due callbacks */
492 struct list_head req_done;
493
494 /* Peripheral channels connected to this DMAC */
495 unsigned int num_peripherals;
496 struct dma_pl330_chan *peripherals; /* keep at end */
497 int quirks;
498
499 struct reset_control *rstc;
500 struct reset_control *rstc_ocp;
501 };
502
503 static struct pl330_of_quirks {
504 char *quirk;
505 int id;
506 } of_quirks[] = {
507 {
508 .quirk = "arm,pl330-broken-no-flushp",
509 .id = PL330_QUIRK_BROKEN_NO_FLUSHP,
510 },
511 {
512 .quirk = "arm,pl330-periph-burst",
513 .id = PL330_QUIRK_PERIPH_BURST,
514 }
515 };
516
517 struct dma_pl330_desc {
518 /* To attach to a queue as child */
519 struct list_head node;
520
521 /* Descriptor for the DMA Engine API */
522 struct dma_async_tx_descriptor txd;
523
524 /* Xfer for PL330 core */
525 struct pl330_xfer px;
526
527 struct pl330_reqcfg rqcfg;
528
529 enum desc_status status;
530
531 int bytes_requested;
532 bool last;
533
534 /* The channel which currently holds this desc */
535 struct dma_pl330_chan *pchan;
536
537 enum dma_transfer_direction rqtype;
538 /* Index of peripheral for the xfer. */
539 unsigned peri:5;
540 /* Hook to attach to DMAC's list of reqs with due callback */
541 struct list_head rqd;
542 };
543
544 struct _xfer_spec {
545 u32 ccr;
546 struct dma_pl330_desc *desc;
547 };
548
549 static int pl330_config_write(struct dma_chan *chan,
550 struct dma_slave_config *slave_config,
551 enum dma_transfer_direction direction);
552
_queue_full(struct pl330_thread * thrd)553 static inline bool _queue_full(struct pl330_thread *thrd)
554 {
555 return thrd->req[0].desc != NULL && thrd->req[1].desc != NULL;
556 }
557
is_manager(struct pl330_thread * thrd)558 static inline bool is_manager(struct pl330_thread *thrd)
559 {
560 return thrd->dmac->manager == thrd;
561 }
562
563 /* If manager of the thread is in Non-Secure mode */
_manager_ns(struct pl330_thread * thrd)564 static inline bool _manager_ns(struct pl330_thread *thrd)
565 {
566 return (thrd->dmac->pcfg.mode & DMAC_MODE_NS) ? true : false;
567 }
568
get_revision(u32 periph_id)569 static inline u32 get_revision(u32 periph_id)
570 {
571 return (periph_id >> PERIPH_REV_SHIFT) & PERIPH_REV_MASK;
572 }
573
_emit_END(unsigned dry_run,u8 buf[])574 static inline u32 _emit_END(unsigned dry_run, u8 buf[])
575 {
576 if (dry_run)
577 return SZ_DMAEND;
578
579 buf[0] = CMD_DMAEND;
580
581 PL330_DBGCMD_DUMP(SZ_DMAEND, "\tDMAEND\n");
582
583 return SZ_DMAEND;
584 }
585
_emit_FLUSHP(unsigned dry_run,u8 buf[],u8 peri)586 static inline u32 _emit_FLUSHP(unsigned dry_run, u8 buf[], u8 peri)
587 {
588 if (dry_run)
589 return SZ_DMAFLUSHP;
590
591 buf[0] = CMD_DMAFLUSHP;
592
593 peri &= 0x1f;
594 peri <<= 3;
595 buf[1] = peri;
596
597 PL330_DBGCMD_DUMP(SZ_DMAFLUSHP, "\tDMAFLUSHP %u\n", peri >> 3);
598
599 return SZ_DMAFLUSHP;
600 }
601
_emit_LD(unsigned dry_run,u8 buf[],enum pl330_cond cond)602 static inline u32 _emit_LD(unsigned dry_run, u8 buf[], enum pl330_cond cond)
603 {
604 if (dry_run)
605 return SZ_DMALD;
606
607 buf[0] = CMD_DMALD;
608
609 if (cond == SINGLE)
610 buf[0] |= (0 << 1) | (1 << 0);
611 else if (cond == BURST)
612 buf[0] |= (1 << 1) | (1 << 0);
613
614 PL330_DBGCMD_DUMP(SZ_DMALD, "\tDMALD%c\n",
615 cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A'));
616
617 return SZ_DMALD;
618 }
619
_emit_LDP(unsigned dry_run,u8 buf[],enum pl330_cond cond,u8 peri)620 static inline u32 _emit_LDP(unsigned dry_run, u8 buf[],
621 enum pl330_cond cond, u8 peri)
622 {
623 if (dry_run)
624 return SZ_DMALDP;
625
626 buf[0] = CMD_DMALDP;
627
628 if (cond == BURST)
629 buf[0] |= (1 << 1);
630
631 peri &= 0x1f;
632 peri <<= 3;
633 buf[1] = peri;
634
635 PL330_DBGCMD_DUMP(SZ_DMALDP, "\tDMALDP%c %u\n",
636 cond == SINGLE ? 'S' : 'B', peri >> 3);
637
638 return SZ_DMALDP;
639 }
640
_emit_LP(unsigned dry_run,u8 buf[],unsigned loop,u8 cnt)641 static inline u32 _emit_LP(unsigned dry_run, u8 buf[],
642 unsigned loop, u8 cnt)
643 {
644 if (dry_run)
645 return SZ_DMALP;
646
647 buf[0] = CMD_DMALP;
648
649 if (loop)
650 buf[0] |= (1 << 1);
651
652 cnt--; /* DMAC increments by 1 internally */
653 buf[1] = cnt;
654
655 PL330_DBGCMD_DUMP(SZ_DMALP, "\tDMALP_%c %u\n", loop ? '1' : '0', cnt);
656
657 return SZ_DMALP;
658 }
659
660 struct _arg_LPEND {
661 enum pl330_cond cond;
662 bool forever;
663 unsigned loop;
664 u8 bjump;
665 };
666
_emit_LPEND(unsigned dry_run,u8 buf[],const struct _arg_LPEND * arg)667 static inline u32 _emit_LPEND(unsigned dry_run, u8 buf[],
668 const struct _arg_LPEND *arg)
669 {
670 enum pl330_cond cond = arg->cond;
671 bool forever = arg->forever;
672 unsigned loop = arg->loop;
673 u8 bjump = arg->bjump;
674
675 if (dry_run)
676 return SZ_DMALPEND;
677
678 buf[0] = CMD_DMALPEND;
679
680 if (loop)
681 buf[0] |= (1 << 2);
682
683 if (!forever)
684 buf[0] |= (1 << 4);
685
686 if (cond == SINGLE)
687 buf[0] |= (0 << 1) | (1 << 0);
688 else if (cond == BURST)
689 buf[0] |= (1 << 1) | (1 << 0);
690
691 buf[1] = bjump;
692
693 PL330_DBGCMD_DUMP(SZ_DMALPEND, "\tDMALP%s%c_%c bjmpto_%x\n",
694 forever ? "FE" : "END",
695 cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A'),
696 loop ? '1' : '0',
697 bjump);
698
699 return SZ_DMALPEND;
700 }
701
_emit_KILL(unsigned dry_run,u8 buf[])702 static inline u32 _emit_KILL(unsigned dry_run, u8 buf[])
703 {
704 if (dry_run)
705 return SZ_DMAKILL;
706
707 buf[0] = CMD_DMAKILL;
708
709 return SZ_DMAKILL;
710 }
711
_emit_MOV(unsigned dry_run,u8 buf[],enum dmamov_dst dst,u32 val)712 static inline u32 _emit_MOV(unsigned dry_run, u8 buf[],
713 enum dmamov_dst dst, u32 val)
714 {
715 if (dry_run)
716 return SZ_DMAMOV;
717
718 buf[0] = CMD_DMAMOV;
719 buf[1] = dst;
720 buf[2] = val;
721 buf[3] = val >> 8;
722 buf[4] = val >> 16;
723 buf[5] = val >> 24;
724
725 PL330_DBGCMD_DUMP(SZ_DMAMOV, "\tDMAMOV %s 0x%x\n",
726 dst == SAR ? "SAR" : (dst == DAR ? "DAR" : "CCR"), val);
727
728 return SZ_DMAMOV;
729 }
730
_emit_RMB(unsigned dry_run,u8 buf[])731 static inline u32 _emit_RMB(unsigned dry_run, u8 buf[])
732 {
733 if (dry_run)
734 return SZ_DMARMB;
735
736 buf[0] = CMD_DMARMB;
737
738 PL330_DBGCMD_DUMP(SZ_DMARMB, "\tDMARMB\n");
739
740 return SZ_DMARMB;
741 }
742
_emit_SEV(unsigned dry_run,u8 buf[],u8 ev)743 static inline u32 _emit_SEV(unsigned dry_run, u8 buf[], u8 ev)
744 {
745 if (dry_run)
746 return SZ_DMASEV;
747
748 buf[0] = CMD_DMASEV;
749
750 ev &= 0x1f;
751 ev <<= 3;
752 buf[1] = ev;
753
754 PL330_DBGCMD_DUMP(SZ_DMASEV, "\tDMASEV %u\n", ev >> 3);
755
756 return SZ_DMASEV;
757 }
758
_emit_ST(unsigned dry_run,u8 buf[],enum pl330_cond cond)759 static inline u32 _emit_ST(unsigned dry_run, u8 buf[], enum pl330_cond cond)
760 {
761 if (dry_run)
762 return SZ_DMAST;
763
764 buf[0] = CMD_DMAST;
765
766 if (cond == SINGLE)
767 buf[0] |= (0 << 1) | (1 << 0);
768 else if (cond == BURST)
769 buf[0] |= (1 << 1) | (1 << 0);
770
771 PL330_DBGCMD_DUMP(SZ_DMAST, "\tDMAST%c\n",
772 cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A'));
773
774 return SZ_DMAST;
775 }
776
_emit_STP(unsigned dry_run,u8 buf[],enum pl330_cond cond,u8 peri)777 static inline u32 _emit_STP(unsigned dry_run, u8 buf[],
778 enum pl330_cond cond, u8 peri)
779 {
780 if (dry_run)
781 return SZ_DMASTP;
782
783 buf[0] = CMD_DMASTP;
784
785 if (cond == BURST)
786 buf[0] |= (1 << 1);
787
788 peri &= 0x1f;
789 peri <<= 3;
790 buf[1] = peri;
791
792 PL330_DBGCMD_DUMP(SZ_DMASTP, "\tDMASTP%c %u\n",
793 cond == SINGLE ? 'S' : 'B', peri >> 3);
794
795 return SZ_DMASTP;
796 }
797
_emit_WFP(unsigned dry_run,u8 buf[],enum pl330_cond cond,u8 peri)798 static inline u32 _emit_WFP(unsigned dry_run, u8 buf[],
799 enum pl330_cond cond, u8 peri)
800 {
801 if (dry_run)
802 return SZ_DMAWFP;
803
804 buf[0] = CMD_DMAWFP;
805
806 if (cond == SINGLE)
807 buf[0] |= (0 << 1) | (0 << 0);
808 else if (cond == BURST)
809 buf[0] |= (1 << 1) | (0 << 0);
810 else
811 buf[0] |= (0 << 1) | (1 << 0);
812
813 peri &= 0x1f;
814 peri <<= 3;
815 buf[1] = peri;
816
817 PL330_DBGCMD_DUMP(SZ_DMAWFP, "\tDMAWFP%c %u\n",
818 cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'P'), peri >> 3);
819
820 return SZ_DMAWFP;
821 }
822
_emit_WMB(unsigned dry_run,u8 buf[])823 static inline u32 _emit_WMB(unsigned dry_run, u8 buf[])
824 {
825 if (dry_run)
826 return SZ_DMAWMB;
827
828 buf[0] = CMD_DMAWMB;
829
830 PL330_DBGCMD_DUMP(SZ_DMAWMB, "\tDMAWMB\n");
831
832 return SZ_DMAWMB;
833 }
834
835 struct _arg_GO {
836 u8 chan;
837 u32 addr;
838 unsigned ns;
839 };
840
_emit_GO(unsigned dry_run,u8 buf[],const struct _arg_GO * arg)841 static inline u32 _emit_GO(unsigned dry_run, u8 buf[],
842 const struct _arg_GO *arg)
843 {
844 u8 chan = arg->chan;
845 u32 addr = arg->addr;
846 unsigned ns = arg->ns;
847
848 if (dry_run)
849 return SZ_DMAGO;
850
851 buf[0] = CMD_DMAGO;
852 buf[0] |= (ns << 1);
853 buf[1] = chan & 0x7;
854 buf[2] = addr;
855 buf[3] = addr >> 8;
856 buf[4] = addr >> 16;
857 buf[5] = addr >> 24;
858
859 return SZ_DMAGO;
860 }
861
862 #define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
863
864 /* Returns Time-Out */
_until_dmac_idle(struct pl330_thread * thrd)865 static bool _until_dmac_idle(struct pl330_thread *thrd)
866 {
867 void __iomem *regs = thrd->dmac->base;
868 unsigned long loops = msecs_to_loops(5);
869
870 do {
871 /* Until Manager is Idle */
872 if (!(readl(regs + DBGSTATUS) & DBG_BUSY))
873 break;
874
875 cpu_relax();
876 } while (--loops);
877
878 if (!loops)
879 return true;
880
881 return false;
882 }
883
_execute_DBGINSN(struct pl330_thread * thrd,u8 insn[],bool as_manager)884 static inline void _execute_DBGINSN(struct pl330_thread *thrd,
885 u8 insn[], bool as_manager)
886 {
887 void __iomem *regs = thrd->dmac->base;
888 u32 val;
889
890 /* If timed out due to halted state-machine */
891 if (_until_dmac_idle(thrd)) {
892 dev_err(thrd->dmac->ddma.dev, "DMAC halted!\n");
893 return;
894 }
895
896 val = (insn[0] << 16) | (insn[1] << 24);
897 if (!as_manager) {
898 val |= (1 << 0);
899 val |= (thrd->id << 8); /* Channel Number */
900 }
901 writel(val, regs + DBGINST0);
902
903 val = le32_to_cpu(*((__le32 *)&insn[2]));
904 writel(val, regs + DBGINST1);
905
906 /* Get going */
907 writel(0, regs + DBGCMD);
908 }
909
_state(struct pl330_thread * thrd)910 static inline u32 _state(struct pl330_thread *thrd)
911 {
912 void __iomem *regs = thrd->dmac->base;
913 u32 val;
914
915 if (is_manager(thrd))
916 val = readl(regs + DS) & 0xf;
917 else
918 val = readl(regs + CS(thrd->id)) & 0xf;
919
920 switch (val) {
921 case DS_ST_STOP:
922 return PL330_STATE_STOPPED;
923 case DS_ST_EXEC:
924 return PL330_STATE_EXECUTING;
925 case DS_ST_CMISS:
926 return PL330_STATE_CACHEMISS;
927 case DS_ST_UPDTPC:
928 return PL330_STATE_UPDTPC;
929 case DS_ST_WFE:
930 return PL330_STATE_WFE;
931 case DS_ST_FAULT:
932 return PL330_STATE_FAULTING;
933 case DS_ST_ATBRR:
934 if (is_manager(thrd))
935 return PL330_STATE_INVALID;
936 else
937 return PL330_STATE_ATBARRIER;
938 case DS_ST_QBUSY:
939 if (is_manager(thrd))
940 return PL330_STATE_INVALID;
941 else
942 return PL330_STATE_QUEUEBUSY;
943 case DS_ST_WFP:
944 if (is_manager(thrd))
945 return PL330_STATE_INVALID;
946 else
947 return PL330_STATE_WFP;
948 case DS_ST_KILL:
949 if (is_manager(thrd))
950 return PL330_STATE_INVALID;
951 else
952 return PL330_STATE_KILLING;
953 case DS_ST_CMPLT:
954 if (is_manager(thrd))
955 return PL330_STATE_INVALID;
956 else
957 return PL330_STATE_COMPLETING;
958 case DS_ST_FLTCMP:
959 if (is_manager(thrd))
960 return PL330_STATE_INVALID;
961 else
962 return PL330_STATE_FAULT_COMPLETING;
963 default:
964 return PL330_STATE_INVALID;
965 }
966 }
967
_stop(struct pl330_thread * thrd)968 static void _stop(struct pl330_thread *thrd)
969 {
970 void __iomem *regs = thrd->dmac->base;
971 u8 insn[6] = {0, 0, 0, 0, 0, 0};
972 u32 inten = readl(regs + INTEN);
973
974 if (_state(thrd) == PL330_STATE_FAULT_COMPLETING)
975 UNTIL(thrd, PL330_STATE_FAULTING | PL330_STATE_KILLING);
976
977 /* Return if nothing needs to be done */
978 if (_state(thrd) == PL330_STATE_COMPLETING
979 || _state(thrd) == PL330_STATE_KILLING
980 || _state(thrd) == PL330_STATE_STOPPED)
981 return;
982
983 _emit_KILL(0, insn);
984
985 _execute_DBGINSN(thrd, insn, is_manager(thrd));
986
987 /* clear the event */
988 if (inten & (1 << thrd->ev))
989 writel(1 << thrd->ev, regs + INTCLR);
990 /* Stop generating interrupts for SEV */
991 writel(inten & ~(1 << thrd->ev), regs + INTEN);
992 }
993
994 /* Start doing req 'idx' of thread 'thrd' */
_trigger(struct pl330_thread * thrd)995 static bool _trigger(struct pl330_thread *thrd)
996 {
997 void __iomem *regs = thrd->dmac->base;
998 struct _pl330_req *req;
999 struct dma_pl330_desc *desc;
1000 struct _arg_GO go;
1001 unsigned ns;
1002 u8 insn[6] = {0, 0, 0, 0, 0, 0};
1003 int idx;
1004
1005 /* Return if already ACTIVE */
1006 if (_state(thrd) != PL330_STATE_STOPPED)
1007 return true;
1008
1009 idx = 1 - thrd->lstenq;
1010 if (thrd->req[idx].desc != NULL) {
1011 req = &thrd->req[idx];
1012 } else {
1013 idx = thrd->lstenq;
1014 if (thrd->req[idx].desc != NULL)
1015 req = &thrd->req[idx];
1016 else
1017 req = NULL;
1018 }
1019
1020 /* Return if no request */
1021 if (!req)
1022 return true;
1023
1024 /* Return if req is running */
1025 if (idx == thrd->req_running)
1026 return true;
1027
1028 desc = req->desc;
1029
1030 ns = desc->rqcfg.nonsecure ? 1 : 0;
1031
1032 /* See 'Abort Sources' point-4 at Page 2-25 */
1033 if (_manager_ns(thrd) && !ns)
1034 dev_info(thrd->dmac->ddma.dev, "%s:%d Recipe for ABORT!\n",
1035 __func__, __LINE__);
1036
1037 go.chan = thrd->id;
1038 go.addr = req->mc_bus;
1039 go.ns = ns;
1040 _emit_GO(0, insn, &go);
1041
1042 /* Set to generate interrupts for SEV */
1043 writel(readl(regs + INTEN) | (1 << thrd->ev), regs + INTEN);
1044
1045 /* Only manager can execute GO */
1046 _execute_DBGINSN(thrd, insn, true);
1047
1048 thrd->req_running = idx;
1049
1050 return true;
1051 }
1052
_start(struct pl330_thread * thrd)1053 static bool _start(struct pl330_thread *thrd)
1054 {
1055 switch (_state(thrd)) {
1056 case PL330_STATE_FAULT_COMPLETING:
1057 UNTIL(thrd, PL330_STATE_FAULTING | PL330_STATE_KILLING);
1058
1059 if (_state(thrd) == PL330_STATE_KILLING)
1060 UNTIL(thrd, PL330_STATE_STOPPED)
1061 fallthrough;
1062
1063 case PL330_STATE_FAULTING:
1064 _stop(thrd);
1065 fallthrough;
1066
1067 case PL330_STATE_KILLING:
1068 case PL330_STATE_COMPLETING:
1069 UNTIL(thrd, PL330_STATE_STOPPED)
1070 fallthrough;
1071
1072 case PL330_STATE_STOPPED:
1073 return _trigger(thrd);
1074
1075 case PL330_STATE_WFP:
1076 case PL330_STATE_QUEUEBUSY:
1077 case PL330_STATE_ATBARRIER:
1078 case PL330_STATE_UPDTPC:
1079 case PL330_STATE_CACHEMISS:
1080 case PL330_STATE_EXECUTING:
1081 return true;
1082
1083 case PL330_STATE_WFE: /* For RESUME, nothing yet */
1084 default:
1085 return false;
1086 }
1087 }
1088
_ldst_memtomem(unsigned dry_run,u8 buf[],const struct _xfer_spec * pxs,int cyc)1089 static inline int _ldst_memtomem(unsigned dry_run, u8 buf[],
1090 const struct _xfer_spec *pxs, int cyc)
1091 {
1092 int off = 0;
1093 struct pl330_config *pcfg = pxs->desc->rqcfg.pcfg;
1094
1095 /* check lock-up free version */
1096 if (get_revision(pcfg->periph_id) >= PERIPH_REV_R1P0) {
1097 while (cyc--) {
1098 off += _emit_LD(dry_run, &buf[off], ALWAYS);
1099 off += _emit_ST(dry_run, &buf[off], ALWAYS);
1100 }
1101 } else {
1102 while (cyc--) {
1103 off += _emit_LD(dry_run, &buf[off], ALWAYS);
1104 off += _emit_RMB(dry_run, &buf[off]);
1105 off += _emit_ST(dry_run, &buf[off], ALWAYS);
1106 off += _emit_WMB(dry_run, &buf[off]);
1107 }
1108 }
1109
1110 return off;
1111 }
1112
_emit_load(unsigned int dry_run,u8 buf[],enum pl330_cond cond,enum dma_transfer_direction direction,u8 peri)1113 static u32 _emit_load(unsigned int dry_run, u8 buf[],
1114 enum pl330_cond cond, enum dma_transfer_direction direction,
1115 u8 peri)
1116 {
1117 int off = 0;
1118
1119 switch (direction) {
1120 case DMA_MEM_TO_MEM:
1121 case DMA_MEM_TO_DEV:
1122 off += _emit_LD(dry_run, &buf[off], cond);
1123 break;
1124
1125 case DMA_DEV_TO_MEM:
1126 if (cond == ALWAYS) {
1127 off += _emit_LDP(dry_run, &buf[off], SINGLE,
1128 peri);
1129 off += _emit_LDP(dry_run, &buf[off], BURST,
1130 peri);
1131 } else {
1132 off += _emit_LDP(dry_run, &buf[off], cond,
1133 peri);
1134 }
1135 break;
1136
1137 default:
1138 /* this code should be unreachable */
1139 WARN_ON(1);
1140 break;
1141 }
1142
1143 return off;
1144 }
1145
_emit_store(unsigned int dry_run,u8 buf[],enum pl330_cond cond,enum dma_transfer_direction direction,u8 peri)1146 static inline u32 _emit_store(unsigned int dry_run, u8 buf[],
1147 enum pl330_cond cond, enum dma_transfer_direction direction,
1148 u8 peri)
1149 {
1150 int off = 0;
1151
1152 switch (direction) {
1153 case DMA_MEM_TO_MEM:
1154 case DMA_DEV_TO_MEM:
1155 off += _emit_ST(dry_run, &buf[off], cond);
1156 break;
1157
1158 case DMA_MEM_TO_DEV:
1159 if (cond == ALWAYS) {
1160 off += _emit_STP(dry_run, &buf[off], SINGLE,
1161 peri);
1162 off += _emit_STP(dry_run, &buf[off], BURST,
1163 peri);
1164 } else {
1165 off += _emit_STP(dry_run, &buf[off], cond,
1166 peri);
1167 }
1168 break;
1169
1170 default:
1171 /* this code should be unreachable */
1172 WARN_ON(1);
1173 break;
1174 }
1175
1176 return off;
1177 }
1178
_ldst_peripheral(struct pl330_dmac * pl330,unsigned dry_run,u8 buf[],const struct _xfer_spec * pxs,int cyc,enum pl330_cond cond)1179 static inline int _ldst_peripheral(struct pl330_dmac *pl330,
1180 unsigned dry_run, u8 buf[],
1181 const struct _xfer_spec *pxs, int cyc,
1182 enum pl330_cond cond)
1183 {
1184 int off = 0;
1185
1186 /*
1187 * do FLUSHP at beginning to clear any stale dma requests before the
1188 * first WFP.
1189 */
1190 if (!(pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP))
1191 off += _emit_FLUSHP(dry_run, &buf[off], pxs->desc->peri);
1192 while (cyc--) {
1193 off += _emit_WFP(dry_run, &buf[off], cond, pxs->desc->peri);
1194 off += _emit_load(dry_run, &buf[off], cond, pxs->desc->rqtype,
1195 pxs->desc->peri);
1196 off += _emit_store(dry_run, &buf[off], cond, pxs->desc->rqtype,
1197 pxs->desc->peri);
1198 }
1199
1200 return off;
1201 }
1202
_bursts(struct pl330_dmac * pl330,unsigned dry_run,u8 buf[],const struct _xfer_spec * pxs,int cyc)1203 static int _bursts(struct pl330_dmac *pl330, unsigned dry_run, u8 buf[],
1204 const struct _xfer_spec *pxs, int cyc)
1205 {
1206 int off = 0;
1207 enum pl330_cond cond = BRST_LEN(pxs->ccr) > 1 ? BURST : SINGLE;
1208
1209 if (pl330->quirks & PL330_QUIRK_PERIPH_BURST)
1210 cond = BURST;
1211
1212 switch (pxs->desc->rqtype) {
1213 case DMA_MEM_TO_DEV:
1214 case DMA_DEV_TO_MEM:
1215 off += _ldst_peripheral(pl330, dry_run, &buf[off], pxs, cyc,
1216 cond);
1217 break;
1218
1219 case DMA_MEM_TO_MEM:
1220 off += _ldst_memtomem(dry_run, &buf[off], pxs, cyc);
1221 break;
1222
1223 default:
1224 /* this code should be unreachable */
1225 WARN_ON(1);
1226 break;
1227 }
1228
1229 return off;
1230 }
1231
1232 /*
1233 * only the unaligned burst transfers have the dregs.
1234 * so, still transfer dregs with a reduced size burst
1235 * for mem-to-mem, mem-to-dev or dev-to-mem.
1236 */
_dregs(struct pl330_dmac * pl330,unsigned int dry_run,u8 buf[],const struct _xfer_spec * pxs,int transfer_length)1237 static int _dregs(struct pl330_dmac *pl330, unsigned int dry_run, u8 buf[],
1238 const struct _xfer_spec *pxs, int transfer_length)
1239 {
1240 int off = 0;
1241 int dregs_ccr;
1242
1243 if (transfer_length == 0)
1244 return off;
1245
1246 /*
1247 * dregs_len = (total bytes - BURST_TO_BYTE(bursts, ccr)) /
1248 * BRST_SIZE(ccr)
1249 * the dregs len must be smaller than burst len,
1250 * so, for higher efficiency, we can modify CCR
1251 * to use a reduced size burst len for the dregs.
1252 */
1253 dregs_ccr = pxs->ccr;
1254 dregs_ccr &= ~((0xf << CC_SRCBRSTLEN_SHFT) |
1255 (0xf << CC_DSTBRSTLEN_SHFT));
1256 dregs_ccr |= (((transfer_length - 1) & 0xf) <<
1257 CC_SRCBRSTLEN_SHFT);
1258 dregs_ccr |= (((transfer_length - 1) & 0xf) <<
1259 CC_DSTBRSTLEN_SHFT);
1260
1261 switch (pxs->desc->rqtype) {
1262 case DMA_MEM_TO_DEV:
1263 case DMA_DEV_TO_MEM:
1264 off += _emit_MOV(dry_run, &buf[off], CCR, dregs_ccr);
1265 off += _ldst_peripheral(pl330, dry_run, &buf[off], pxs, 1,
1266 BURST);
1267 break;
1268
1269 case DMA_MEM_TO_MEM:
1270 off += _emit_MOV(dry_run, &buf[off], CCR, dregs_ccr);
1271 off += _ldst_memtomem(dry_run, &buf[off], pxs, 1);
1272 break;
1273
1274 default:
1275 /* this code should be unreachable */
1276 WARN_ON(1);
1277 break;
1278 }
1279
1280 return off;
1281 }
1282
1283 /* Returns bytes consumed and updates bursts */
_loop(struct pl330_dmac * pl330,unsigned dry_run,u8 buf[],unsigned long * bursts,const struct _xfer_spec * pxs)1284 static inline int _loop(struct pl330_dmac *pl330, unsigned dry_run, u8 buf[],
1285 unsigned long *bursts, const struct _xfer_spec *pxs)
1286 {
1287 int cyc, cycmax, szlp, szlpend, szbrst, off;
1288 unsigned lcnt0, lcnt1, ljmp0, ljmp1;
1289 struct _arg_LPEND lpend;
1290
1291 if (*bursts == 1)
1292 return _bursts(pl330, dry_run, buf, pxs, 1);
1293
1294 /* Max iterations possible in DMALP is 256 */
1295 if (*bursts >= 256*256) {
1296 lcnt1 = 256;
1297 lcnt0 = 256;
1298 cyc = *bursts / lcnt1 / lcnt0;
1299 } else if (*bursts > 256) {
1300 lcnt1 = 256;
1301 lcnt0 = *bursts / lcnt1;
1302 cyc = 1;
1303 } else {
1304 lcnt1 = *bursts;
1305 lcnt0 = 0;
1306 cyc = 1;
1307 }
1308
1309 szlp = _emit_LP(1, buf, 0, 0);
1310 szbrst = _bursts(pl330, 1, buf, pxs, 1);
1311
1312 lpend.cond = ALWAYS;
1313 lpend.forever = false;
1314 lpend.loop = 0;
1315 lpend.bjump = 0;
1316 szlpend = _emit_LPEND(1, buf, &lpend);
1317
1318 if (lcnt0) {
1319 szlp *= 2;
1320 szlpend *= 2;
1321 }
1322
1323 /*
1324 * Max bursts that we can unroll due to limit on the
1325 * size of backward jump that can be encoded in DMALPEND
1326 * which is 8-bits and hence 255
1327 */
1328 cycmax = (255 - (szlp + szlpend)) / szbrst;
1329
1330 cyc = (cycmax < cyc) ? cycmax : cyc;
1331
1332 off = 0;
1333
1334 if (lcnt0) {
1335 off += _emit_LP(dry_run, &buf[off], 0, lcnt0);
1336 ljmp0 = off;
1337 }
1338
1339 off += _emit_LP(dry_run, &buf[off], 1, lcnt1);
1340 ljmp1 = off;
1341
1342 off += _bursts(pl330, dry_run, &buf[off], pxs, cyc);
1343
1344 lpend.cond = ALWAYS;
1345 lpend.forever = false;
1346 lpend.loop = 1;
1347 lpend.bjump = off - ljmp1;
1348 off += _emit_LPEND(dry_run, &buf[off], &lpend);
1349
1350 if (lcnt0) {
1351 lpend.cond = ALWAYS;
1352 lpend.forever = false;
1353 lpend.loop = 0;
1354 lpend.bjump = off - ljmp0;
1355 off += _emit_LPEND(dry_run, &buf[off], &lpend);
1356 }
1357
1358 *bursts = lcnt1 * cyc;
1359 if (lcnt0)
1360 *bursts *= lcnt0;
1361
1362 return off;
1363 }
1364
_setup_loops(struct pl330_dmac * pl330,unsigned dry_run,u8 buf[],const struct _xfer_spec * pxs)1365 static inline int _setup_loops(struct pl330_dmac *pl330,
1366 unsigned dry_run, u8 buf[],
1367 const struct _xfer_spec *pxs)
1368 {
1369 struct pl330_xfer *x = &pxs->desc->px;
1370 u32 ccr = pxs->ccr;
1371 unsigned long c, bursts = BYTE_TO_BURST(x->bytes, ccr);
1372 int num_dregs = (x->bytes - BURST_TO_BYTE(bursts, ccr)) /
1373 BRST_SIZE(ccr);
1374 int off = 0;
1375
1376 while (bursts) {
1377 c = bursts;
1378 off += _loop(pl330, dry_run, &buf[off], &c, pxs);
1379 bursts -= c;
1380 }
1381 off += _dregs(pl330, dry_run, &buf[off], pxs, num_dregs);
1382
1383 return off;
1384 }
1385
_setup_xfer(struct pl330_dmac * pl330,unsigned dry_run,u8 buf[],const struct _xfer_spec * pxs)1386 static inline int _setup_xfer(struct pl330_dmac *pl330,
1387 unsigned dry_run, u8 buf[],
1388 const struct _xfer_spec *pxs)
1389 {
1390 struct pl330_xfer *x = &pxs->desc->px;
1391 int off = 0;
1392
1393 /* DMAMOV SAR, x->src_addr */
1394 off += _emit_MOV(dry_run, &buf[off], SAR, x->src_addr);
1395 /* DMAMOV DAR, x->dst_addr */
1396 off += _emit_MOV(dry_run, &buf[off], DAR, x->dst_addr);
1397
1398 /* Setup Loop(s) */
1399 off += _setup_loops(pl330, dry_run, &buf[off], pxs);
1400
1401 return off;
1402 }
1403
1404 /*
1405 * A req is a sequence of one or more xfer units.
1406 * Returns the number of bytes taken to setup the MC for the req.
1407 */
_setup_req(struct pl330_dmac * pl330,unsigned dry_run,struct pl330_thread * thrd,unsigned index,struct _xfer_spec * pxs)1408 static int _setup_req(struct pl330_dmac *pl330, unsigned dry_run,
1409 struct pl330_thread *thrd, unsigned index,
1410 struct _xfer_spec *pxs)
1411 {
1412 struct _pl330_req *req = &thrd->req[index];
1413 u8 *buf = req->mc_cpu;
1414 int off = 0;
1415
1416 PL330_DBGMC_START(req->mc_bus);
1417
1418 /* DMAMOV CCR, ccr */
1419 off += _emit_MOV(dry_run, &buf[off], CCR, pxs->ccr);
1420
1421 off += _setup_xfer(pl330, dry_run, &buf[off], pxs);
1422
1423 /* DMASEV peripheral/event */
1424 off += _emit_SEV(dry_run, &buf[off], thrd->ev);
1425 /* DMAEND */
1426 off += _emit_END(dry_run, &buf[off]);
1427
1428 return off;
1429 }
1430
_prepare_ccr(const struct pl330_reqcfg * rqc)1431 static inline u32 _prepare_ccr(const struct pl330_reqcfg *rqc)
1432 {
1433 u32 ccr = 0;
1434
1435 if (rqc->src_inc)
1436 ccr |= CC_SRCINC;
1437
1438 if (rqc->dst_inc)
1439 ccr |= CC_DSTINC;
1440
1441 /* We set same protection levels for Src and DST for now */
1442 if (rqc->privileged)
1443 ccr |= CC_SRCPRI | CC_DSTPRI;
1444 if (rqc->nonsecure)
1445 ccr |= CC_SRCNS | CC_DSTNS;
1446 if (rqc->insnaccess)
1447 ccr |= CC_SRCIA | CC_DSTIA;
1448
1449 ccr |= (((rqc->brst_len - 1) & 0xf) << CC_SRCBRSTLEN_SHFT);
1450 ccr |= (((rqc->brst_len - 1) & 0xf) << CC_DSTBRSTLEN_SHFT);
1451
1452 ccr |= (rqc->brst_size << CC_SRCBRSTSIZE_SHFT);
1453 ccr |= (rqc->brst_size << CC_DSTBRSTSIZE_SHFT);
1454
1455 ccr |= (rqc->scctl << CC_SRCCCTRL_SHFT);
1456 ccr |= (rqc->dcctl << CC_DSTCCTRL_SHFT);
1457
1458 ccr |= (rqc->swap << CC_SWAP_SHFT);
1459
1460 return ccr;
1461 }
1462
1463 /*
1464 * Submit a list of xfers after which the client wants notification.
1465 * Client is not notified after each xfer unit, just once after all
1466 * xfer units are done or some error occurs.
1467 */
pl330_submit_req(struct pl330_thread * thrd,struct dma_pl330_desc * desc)1468 static int pl330_submit_req(struct pl330_thread *thrd,
1469 struct dma_pl330_desc *desc)
1470 {
1471 struct pl330_dmac *pl330 = thrd->dmac;
1472 struct _xfer_spec xs;
1473 unsigned long flags;
1474 unsigned idx;
1475 u32 ccr;
1476 int ret = 0;
1477
1478 switch (desc->rqtype) {
1479 case DMA_MEM_TO_DEV:
1480 break;
1481
1482 case DMA_DEV_TO_MEM:
1483 break;
1484
1485 case DMA_MEM_TO_MEM:
1486 break;
1487
1488 default:
1489 return -ENOTSUPP;
1490 }
1491
1492 if (pl330->state == DYING
1493 || pl330->dmac_tbd.reset_chan & (1 << thrd->id)) {
1494 dev_info(thrd->dmac->ddma.dev, "%s:%d\n",
1495 __func__, __LINE__);
1496 return -EAGAIN;
1497 }
1498
1499 /* If request for non-existing peripheral */
1500 if (desc->rqtype != DMA_MEM_TO_MEM &&
1501 desc->peri >= pl330->pcfg.num_peri) {
1502 dev_info(thrd->dmac->ddma.dev,
1503 "%s:%d Invalid peripheral(%u)!\n",
1504 __func__, __LINE__, desc->peri);
1505 return -EINVAL;
1506 }
1507
1508 spin_lock_irqsave(&pl330->lock, flags);
1509
1510 if (_queue_full(thrd)) {
1511 ret = -EAGAIN;
1512 goto xfer_exit;
1513 }
1514
1515 /* Prefer Secure Channel */
1516 if (!_manager_ns(thrd))
1517 desc->rqcfg.nonsecure = 0;
1518 else
1519 desc->rqcfg.nonsecure = 1;
1520
1521 ccr = _prepare_ccr(&desc->rqcfg);
1522
1523 idx = thrd->req[0].desc == NULL ? 0 : 1;
1524
1525 xs.ccr = ccr;
1526 xs.desc = desc;
1527
1528 /* First dry run to check if req is acceptable */
1529 ret = _setup_req(pl330, 1, thrd, idx, &xs);
1530
1531 if (ret > pl330->mcbufsz / 2) {
1532 dev_info(pl330->ddma.dev, "%s:%d Try increasing mcbufsz (%i/%i)\n",
1533 __func__, __LINE__, ret, pl330->mcbufsz / 2);
1534 ret = -ENOMEM;
1535 goto xfer_exit;
1536 }
1537
1538 /* Hook the request */
1539 thrd->lstenq = idx;
1540 thrd->req[idx].desc = desc;
1541 _setup_req(pl330, 0, thrd, idx, &xs);
1542
1543 ret = 0;
1544
1545 xfer_exit:
1546 spin_unlock_irqrestore(&pl330->lock, flags);
1547
1548 return ret;
1549 }
1550
dma_pl330_rqcb(struct dma_pl330_desc * desc,enum pl330_op_err err)1551 static void dma_pl330_rqcb(struct dma_pl330_desc *desc, enum pl330_op_err err)
1552 {
1553 struct dma_pl330_chan *pch;
1554 unsigned long flags;
1555
1556 if (!desc)
1557 return;
1558
1559 pch = desc->pchan;
1560
1561 /* If desc aborted */
1562 if (!pch)
1563 return;
1564
1565 spin_lock_irqsave(&pch->lock, flags);
1566
1567 desc->status = DONE;
1568
1569 spin_unlock_irqrestore(&pch->lock, flags);
1570
1571 tasklet_schedule(&pch->task);
1572 }
1573
pl330_dotask(struct tasklet_struct * t)1574 static void pl330_dotask(struct tasklet_struct *t)
1575 {
1576 struct pl330_dmac *pl330 = from_tasklet(pl330, t, tasks);
1577 unsigned long flags;
1578 int i;
1579
1580 spin_lock_irqsave(&pl330->lock, flags);
1581
1582 /* The DMAC itself gone nuts */
1583 if (pl330->dmac_tbd.reset_dmac) {
1584 pl330->state = DYING;
1585 /* Reset the manager too */
1586 pl330->dmac_tbd.reset_mngr = true;
1587 /* Clear the reset flag */
1588 pl330->dmac_tbd.reset_dmac = false;
1589 }
1590
1591 if (pl330->dmac_tbd.reset_mngr) {
1592 _stop(pl330->manager);
1593 /* Reset all channels */
1594 pl330->dmac_tbd.reset_chan = (1 << pl330->pcfg.num_chan) - 1;
1595 /* Clear the reset flag */
1596 pl330->dmac_tbd.reset_mngr = false;
1597 }
1598
1599 for (i = 0; i < pl330->pcfg.num_chan; i++) {
1600
1601 if (pl330->dmac_tbd.reset_chan & (1 << i)) {
1602 struct pl330_thread *thrd = &pl330->channels[i];
1603 void __iomem *regs = pl330->base;
1604 enum pl330_op_err err;
1605
1606 _stop(thrd);
1607
1608 if (readl(regs + FSC) & (1 << thrd->id))
1609 err = PL330_ERR_FAIL;
1610 else
1611 err = PL330_ERR_ABORT;
1612
1613 spin_unlock_irqrestore(&pl330->lock, flags);
1614 dma_pl330_rqcb(thrd->req[1 - thrd->lstenq].desc, err);
1615 dma_pl330_rqcb(thrd->req[thrd->lstenq].desc, err);
1616 spin_lock_irqsave(&pl330->lock, flags);
1617
1618 thrd->req[0].desc = NULL;
1619 thrd->req[1].desc = NULL;
1620 thrd->req_running = -1;
1621
1622 /* Clear the reset flag */
1623 pl330->dmac_tbd.reset_chan &= ~(1 << i);
1624 }
1625 }
1626
1627 spin_unlock_irqrestore(&pl330->lock, flags);
1628
1629 return;
1630 }
1631
1632 /* Returns 1 if state was updated, 0 otherwise */
pl330_update(struct pl330_dmac * pl330)1633 static int pl330_update(struct pl330_dmac *pl330)
1634 {
1635 struct dma_pl330_desc *descdone;
1636 unsigned long flags;
1637 void __iomem *regs;
1638 u32 val;
1639 int id, ev, ret = 0;
1640
1641 regs = pl330->base;
1642
1643 spin_lock_irqsave(&pl330->lock, flags);
1644
1645 val = readl(regs + FSM) & 0x1;
1646 if (val)
1647 pl330->dmac_tbd.reset_mngr = true;
1648 else
1649 pl330->dmac_tbd.reset_mngr = false;
1650
1651 val = readl(regs + FSC) & ((1 << pl330->pcfg.num_chan) - 1);
1652 pl330->dmac_tbd.reset_chan |= val;
1653 if (val) {
1654 int i = 0;
1655 while (i < pl330->pcfg.num_chan) {
1656 if (val & (1 << i)) {
1657 dev_info(pl330->ddma.dev,
1658 "Reset Channel-%d\t CS-%x FTC-%x\n",
1659 i, readl(regs + CS(i)),
1660 readl(regs + FTC(i)));
1661 _stop(&pl330->channels[i]);
1662 }
1663 i++;
1664 }
1665 }
1666
1667 /* Check which event happened i.e, thread notified */
1668 val = readl(regs + ES);
1669 if (pl330->pcfg.num_events < 32
1670 && val & ~((1 << pl330->pcfg.num_events) - 1)) {
1671 pl330->dmac_tbd.reset_dmac = true;
1672 dev_err(pl330->ddma.dev, "%s:%d Unexpected!\n", __func__,
1673 __LINE__);
1674 ret = 1;
1675 goto updt_exit;
1676 }
1677
1678 for (ev = 0; ev < pl330->pcfg.num_events; ev++) {
1679 if (val & (1 << ev)) { /* Event occurred */
1680 struct pl330_thread *thrd;
1681 u32 inten = readl(regs + INTEN);
1682 int active;
1683
1684 /* Clear the event */
1685 if (inten & (1 << ev))
1686 writel(1 << ev, regs + INTCLR);
1687
1688 ret = 1;
1689
1690 id = pl330->events[ev];
1691
1692 thrd = &pl330->channels[id];
1693
1694 active = thrd->req_running;
1695 if (active == -1) /* Aborted */
1696 continue;
1697
1698 /* Detach the req */
1699 descdone = thrd->req[active].desc;
1700 thrd->req[active].desc = NULL;
1701
1702 thrd->req_running = -1;
1703
1704 /* Get going again ASAP */
1705 _start(thrd);
1706
1707 /* For now, just make a list of callbacks to be done */
1708 list_add_tail(&descdone->rqd, &pl330->req_done);
1709 }
1710 }
1711
1712 /* Now that we are in no hurry, do the callbacks */
1713 while (!list_empty(&pl330->req_done)) {
1714 descdone = list_first_entry(&pl330->req_done,
1715 struct dma_pl330_desc, rqd);
1716 list_del(&descdone->rqd);
1717 spin_unlock_irqrestore(&pl330->lock, flags);
1718 dma_pl330_rqcb(descdone, PL330_ERR_NONE);
1719 spin_lock_irqsave(&pl330->lock, flags);
1720 }
1721
1722 updt_exit:
1723 spin_unlock_irqrestore(&pl330->lock, flags);
1724
1725 if (pl330->dmac_tbd.reset_dmac
1726 || pl330->dmac_tbd.reset_mngr
1727 || pl330->dmac_tbd.reset_chan) {
1728 ret = 1;
1729 tasklet_schedule(&pl330->tasks);
1730 }
1731
1732 return ret;
1733 }
1734
1735 /* Reserve an event */
_alloc_event(struct pl330_thread * thrd)1736 static inline int _alloc_event(struct pl330_thread *thrd)
1737 {
1738 struct pl330_dmac *pl330 = thrd->dmac;
1739 int ev;
1740
1741 for (ev = 0; ev < pl330->pcfg.num_events; ev++)
1742 if (pl330->events[ev] == -1) {
1743 pl330->events[ev] = thrd->id;
1744 return ev;
1745 }
1746
1747 return -1;
1748 }
1749
_chan_ns(const struct pl330_dmac * pl330,int i)1750 static bool _chan_ns(const struct pl330_dmac *pl330, int i)
1751 {
1752 return pl330->pcfg.irq_ns & (1 << i);
1753 }
1754
1755 /* Upon success, returns IdentityToken for the
1756 * allocated channel, NULL otherwise.
1757 */
pl330_request_channel(struct pl330_dmac * pl330)1758 static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330)
1759 {
1760 struct pl330_thread *thrd = NULL;
1761 int chans, i;
1762
1763 if (pl330->state == DYING)
1764 return NULL;
1765
1766 chans = pl330->pcfg.num_chan;
1767
1768 for (i = 0; i < chans; i++) {
1769 thrd = &pl330->channels[i];
1770 if ((thrd->free) && (!_manager_ns(thrd) ||
1771 _chan_ns(pl330, i))) {
1772 thrd->ev = _alloc_event(thrd);
1773 if (thrd->ev >= 0) {
1774 thrd->free = false;
1775 thrd->lstenq = 1;
1776 thrd->req[0].desc = NULL;
1777 thrd->req[1].desc = NULL;
1778 thrd->req_running = -1;
1779 break;
1780 }
1781 }
1782 thrd = NULL;
1783 }
1784
1785 return thrd;
1786 }
1787
1788 /* Release an event */
_free_event(struct pl330_thread * thrd,int ev)1789 static inline void _free_event(struct pl330_thread *thrd, int ev)
1790 {
1791 struct pl330_dmac *pl330 = thrd->dmac;
1792
1793 /* If the event is valid and was held by the thread */
1794 if (ev >= 0 && ev < pl330->pcfg.num_events
1795 && pl330->events[ev] == thrd->id)
1796 pl330->events[ev] = -1;
1797 }
1798
pl330_release_channel(struct pl330_thread * thrd)1799 static void pl330_release_channel(struct pl330_thread *thrd)
1800 {
1801 if (!thrd || thrd->free)
1802 return;
1803
1804 _stop(thrd);
1805
1806 dma_pl330_rqcb(thrd->req[1 - thrd->lstenq].desc, PL330_ERR_ABORT);
1807 dma_pl330_rqcb(thrd->req[thrd->lstenq].desc, PL330_ERR_ABORT);
1808
1809 _free_event(thrd, thrd->ev);
1810 thrd->free = true;
1811 }
1812
1813 /* Initialize the structure for PL330 configuration, that can be used
1814 * by the client driver the make best use of the DMAC
1815 */
read_dmac_config(struct pl330_dmac * pl330)1816 static void read_dmac_config(struct pl330_dmac *pl330)
1817 {
1818 void __iomem *regs = pl330->base;
1819 u32 val;
1820
1821 val = readl(regs + CRD) >> CRD_DATA_WIDTH_SHIFT;
1822 val &= CRD_DATA_WIDTH_MASK;
1823 pl330->pcfg.data_bus_width = 8 * (1 << val);
1824
1825 val = readl(regs + CRD) >> CRD_DATA_BUFF_SHIFT;
1826 val &= CRD_DATA_BUFF_MASK;
1827 pl330->pcfg.data_buf_dep = val + 1;
1828
1829 val = readl(regs + CR0) >> CR0_NUM_CHANS_SHIFT;
1830 val &= CR0_NUM_CHANS_MASK;
1831 val += 1;
1832 pl330->pcfg.num_chan = val;
1833
1834 val = readl(regs + CR0);
1835 if (val & CR0_PERIPH_REQ_SET) {
1836 val = (val >> CR0_NUM_PERIPH_SHIFT) & CR0_NUM_PERIPH_MASK;
1837 val += 1;
1838 pl330->pcfg.num_peri = val;
1839 pl330->pcfg.peri_ns = readl(regs + CR4);
1840 } else {
1841 pl330->pcfg.num_peri = 0;
1842 }
1843
1844 val = readl(regs + CR0);
1845 if (val & CR0_BOOT_MAN_NS)
1846 pl330->pcfg.mode |= DMAC_MODE_NS;
1847 else
1848 pl330->pcfg.mode &= ~DMAC_MODE_NS;
1849
1850 val = readl(regs + CR0) >> CR0_NUM_EVENTS_SHIFT;
1851 val &= CR0_NUM_EVENTS_MASK;
1852 val += 1;
1853 pl330->pcfg.num_events = val;
1854
1855 pl330->pcfg.irq_ns = readl(regs + CR3);
1856 }
1857
_reset_thread(struct pl330_thread * thrd)1858 static inline void _reset_thread(struct pl330_thread *thrd)
1859 {
1860 struct pl330_dmac *pl330 = thrd->dmac;
1861
1862 thrd->req[0].mc_cpu = pl330->mcode_cpu
1863 + (thrd->id * pl330->mcbufsz);
1864 thrd->req[0].mc_bus = pl330->mcode_bus
1865 + (thrd->id * pl330->mcbufsz);
1866 thrd->req[0].desc = NULL;
1867
1868 thrd->req[1].mc_cpu = thrd->req[0].mc_cpu
1869 + pl330->mcbufsz / 2;
1870 thrd->req[1].mc_bus = thrd->req[0].mc_bus
1871 + pl330->mcbufsz / 2;
1872 thrd->req[1].desc = NULL;
1873
1874 thrd->req_running = -1;
1875 }
1876
dmac_alloc_threads(struct pl330_dmac * pl330)1877 static int dmac_alloc_threads(struct pl330_dmac *pl330)
1878 {
1879 int chans = pl330->pcfg.num_chan;
1880 struct pl330_thread *thrd;
1881 int i;
1882
1883 /* Allocate 1 Manager and 'chans' Channel threads */
1884 pl330->channels = kcalloc(1 + chans, sizeof(*thrd),
1885 GFP_KERNEL);
1886 if (!pl330->channels)
1887 return -ENOMEM;
1888
1889 /* Init Channel threads */
1890 for (i = 0; i < chans; i++) {
1891 thrd = &pl330->channels[i];
1892 thrd->id = i;
1893 thrd->dmac = pl330;
1894 _reset_thread(thrd);
1895 thrd->free = true;
1896 }
1897
1898 /* MANAGER is indexed at the end */
1899 thrd = &pl330->channels[chans];
1900 thrd->id = chans;
1901 thrd->dmac = pl330;
1902 thrd->free = false;
1903 pl330->manager = thrd;
1904
1905 return 0;
1906 }
1907
dmac_alloc_resources(struct pl330_dmac * pl330)1908 static int dmac_alloc_resources(struct pl330_dmac *pl330)
1909 {
1910 int chans = pl330->pcfg.num_chan;
1911 int ret;
1912
1913 /*
1914 * Alloc MicroCode buffer for 'chans' Channel threads.
1915 * A channel's buffer offset is (Channel_Id * MCODE_BUFF_PERCHAN)
1916 */
1917 pl330->mcode_cpu = dma_alloc_attrs(pl330->ddma.dev,
1918 chans * pl330->mcbufsz,
1919 &pl330->mcode_bus, GFP_KERNEL,
1920 DMA_ATTR_PRIVILEGED);
1921 if (!pl330->mcode_cpu) {
1922 dev_err(pl330->ddma.dev, "%s:%d Can't allocate memory!\n",
1923 __func__, __LINE__);
1924 return -ENOMEM;
1925 }
1926
1927 ret = dmac_alloc_threads(pl330);
1928 if (ret) {
1929 dev_err(pl330->ddma.dev, "%s:%d Can't to create channels for DMAC!\n",
1930 __func__, __LINE__);
1931 dma_free_attrs(pl330->ddma.dev,
1932 chans * pl330->mcbufsz,
1933 pl330->mcode_cpu, pl330->mcode_bus,
1934 DMA_ATTR_PRIVILEGED);
1935 return ret;
1936 }
1937
1938 return 0;
1939 }
1940
pl330_add(struct pl330_dmac * pl330)1941 static int pl330_add(struct pl330_dmac *pl330)
1942 {
1943 int i, ret;
1944
1945 /* Check if we can handle this DMAC */
1946 if ((pl330->pcfg.periph_id & 0xfffff) != PERIPH_ID_VAL) {
1947 dev_err(pl330->ddma.dev, "PERIPH_ID 0x%x !\n",
1948 pl330->pcfg.periph_id);
1949 return -EINVAL;
1950 }
1951
1952 /* Read the configuration of the DMAC */
1953 read_dmac_config(pl330);
1954
1955 if (pl330->pcfg.num_events == 0) {
1956 dev_err(pl330->ddma.dev, "%s:%d Can't work without events!\n",
1957 __func__, __LINE__);
1958 return -EINVAL;
1959 }
1960
1961 spin_lock_init(&pl330->lock);
1962
1963 INIT_LIST_HEAD(&pl330->req_done);
1964
1965 /* Use default MC buffer size if not provided */
1966 if (!pl330->mcbufsz)
1967 pl330->mcbufsz = MCODE_BUFF_PER_REQ * 2;
1968
1969 /* Mark all events as free */
1970 for (i = 0; i < pl330->pcfg.num_events; i++)
1971 pl330->events[i] = -1;
1972
1973 /* Allocate resources needed by the DMAC */
1974 ret = dmac_alloc_resources(pl330);
1975 if (ret) {
1976 dev_err(pl330->ddma.dev, "Unable to create channels for DMAC\n");
1977 return ret;
1978 }
1979
1980 tasklet_setup(&pl330->tasks, pl330_dotask);
1981
1982 pl330->state = INIT;
1983
1984 return 0;
1985 }
1986
dmac_free_threads(struct pl330_dmac * pl330)1987 static int dmac_free_threads(struct pl330_dmac *pl330)
1988 {
1989 struct pl330_thread *thrd;
1990 int i;
1991
1992 /* Release Channel threads */
1993 for (i = 0; i < pl330->pcfg.num_chan; i++) {
1994 thrd = &pl330->channels[i];
1995 pl330_release_channel(thrd);
1996 }
1997
1998 /* Free memory */
1999 kfree(pl330->channels);
2000
2001 return 0;
2002 }
2003
pl330_del(struct pl330_dmac * pl330)2004 static void pl330_del(struct pl330_dmac *pl330)
2005 {
2006 pl330->state = UNINIT;
2007
2008 tasklet_kill(&pl330->tasks);
2009
2010 /* Free DMAC resources */
2011 dmac_free_threads(pl330);
2012
2013 dma_free_attrs(pl330->ddma.dev,
2014 pl330->pcfg.num_chan * pl330->mcbufsz, pl330->mcode_cpu,
2015 pl330->mcode_bus, DMA_ATTR_PRIVILEGED);
2016 }
2017
2018 /* forward declaration */
2019 static struct amba_driver pl330_driver;
2020
2021 static inline struct dma_pl330_chan *
to_pchan(struct dma_chan * ch)2022 to_pchan(struct dma_chan *ch)
2023 {
2024 if (!ch)
2025 return NULL;
2026
2027 return container_of(ch, struct dma_pl330_chan, chan);
2028 }
2029
2030 static inline struct dma_pl330_desc *
to_desc(struct dma_async_tx_descriptor * tx)2031 to_desc(struct dma_async_tx_descriptor *tx)
2032 {
2033 return container_of(tx, struct dma_pl330_desc, txd);
2034 }
2035
fill_queue(struct dma_pl330_chan * pch)2036 static inline void fill_queue(struct dma_pl330_chan *pch)
2037 {
2038 struct dma_pl330_desc *desc;
2039 int ret;
2040
2041 list_for_each_entry(desc, &pch->work_list, node) {
2042
2043 /* If already submitted */
2044 if (desc->status == BUSY)
2045 continue;
2046
2047 ret = pl330_submit_req(pch->thread, desc);
2048 if (!ret) {
2049 desc->status = BUSY;
2050 } else if (ret == -EAGAIN) {
2051 /* QFull or DMAC Dying */
2052 break;
2053 } else {
2054 /* Unacceptable request */
2055 desc->status = DONE;
2056 dev_err(pch->dmac->ddma.dev, "%s:%d Bad Desc(%d)\n",
2057 __func__, __LINE__, desc->txd.cookie);
2058 tasklet_schedule(&pch->task);
2059 }
2060 }
2061 }
2062
pl330_tasklet(struct tasklet_struct * t)2063 static void pl330_tasklet(struct tasklet_struct *t)
2064 {
2065 struct dma_pl330_chan *pch = from_tasklet(pch, t, task);
2066 struct dma_pl330_desc *desc, *_dt;
2067 unsigned long flags;
2068 bool power_down = false;
2069
2070 spin_lock_irqsave(&pch->lock, flags);
2071
2072 /* Pick up ripe tomatoes */
2073 list_for_each_entry_safe(desc, _dt, &pch->work_list, node)
2074 if (desc->status == DONE) {
2075 if (!pch->cyclic)
2076 dma_cookie_complete(&desc->txd);
2077 list_move_tail(&desc->node, &pch->completed_list);
2078 }
2079
2080 /* Try to submit a req imm. next to the last completed cookie */
2081 fill_queue(pch);
2082
2083 if (list_empty(&pch->work_list)) {
2084 spin_lock(&pch->thread->dmac->lock);
2085 _stop(pch->thread);
2086 spin_unlock(&pch->thread->dmac->lock);
2087 power_down = true;
2088 pch->active = false;
2089 } else {
2090 /* Make sure the PL330 Channel thread is active */
2091 spin_lock(&pch->thread->dmac->lock);
2092 _start(pch->thread);
2093 spin_unlock(&pch->thread->dmac->lock);
2094 }
2095
2096 while (!list_empty(&pch->completed_list)) {
2097 struct dmaengine_desc_callback cb;
2098
2099 desc = list_first_entry(&pch->completed_list,
2100 struct dma_pl330_desc, node);
2101
2102 dmaengine_desc_get_callback(&desc->txd, &cb);
2103
2104 if (pch->cyclic) {
2105 desc->status = PREP;
2106 list_move_tail(&desc->node, &pch->work_list);
2107 if (power_down) {
2108 pch->active = true;
2109 spin_lock(&pch->thread->dmac->lock);
2110 _start(pch->thread);
2111 spin_unlock(&pch->thread->dmac->lock);
2112 power_down = false;
2113 }
2114 } else {
2115 desc->status = FREE;
2116 list_move_tail(&desc->node, &pch->dmac->desc_pool);
2117 }
2118
2119 dma_descriptor_unmap(&desc->txd);
2120
2121 if (dmaengine_desc_callback_valid(&cb)) {
2122 spin_unlock_irqrestore(&pch->lock, flags);
2123 dmaengine_desc_callback_invoke(&cb, NULL);
2124 spin_lock_irqsave(&pch->lock, flags);
2125 }
2126 }
2127 spin_unlock_irqrestore(&pch->lock, flags);
2128
2129 /* If work list empty, power down */
2130 if (power_down) {
2131 pm_runtime_mark_last_busy(pch->dmac->ddma.dev);
2132 pm_runtime_put_autosuspend(pch->dmac->ddma.dev);
2133 }
2134 }
2135
of_dma_pl330_xlate(struct of_phandle_args * dma_spec,struct of_dma * ofdma)2136 static struct dma_chan *of_dma_pl330_xlate(struct of_phandle_args *dma_spec,
2137 struct of_dma *ofdma)
2138 {
2139 int count = dma_spec->args_count;
2140 struct pl330_dmac *pl330 = ofdma->of_dma_data;
2141 unsigned int chan_id;
2142
2143 if (!pl330)
2144 return NULL;
2145
2146 if (count != 1)
2147 return NULL;
2148
2149 chan_id = dma_spec->args[0];
2150 if (chan_id >= pl330->num_peripherals)
2151 return NULL;
2152
2153 return dma_get_slave_channel(&pl330->peripherals[chan_id].chan);
2154 }
2155
pl330_alloc_chan_resources(struct dma_chan * chan)2156 static int pl330_alloc_chan_resources(struct dma_chan *chan)
2157 {
2158 struct dma_pl330_chan *pch = to_pchan(chan);
2159 struct pl330_dmac *pl330 = pch->dmac;
2160 unsigned long flags;
2161
2162 spin_lock_irqsave(&pl330->lock, flags);
2163
2164 dma_cookie_init(chan);
2165 pch->cyclic = false;
2166
2167 pch->thread = pl330_request_channel(pl330);
2168 if (!pch->thread) {
2169 spin_unlock_irqrestore(&pl330->lock, flags);
2170 return -ENOMEM;
2171 }
2172
2173 tasklet_setup(&pch->task, pl330_tasklet);
2174
2175 spin_unlock_irqrestore(&pl330->lock, flags);
2176
2177 return 1;
2178 }
2179
2180 /*
2181 * We need the data direction between the DMAC (the dma-mapping "device") and
2182 * the FIFO (the dmaengine "dev"), from the FIFO's point of view. Confusing!
2183 */
2184 static enum dma_data_direction
pl330_dma_slave_map_dir(enum dma_transfer_direction dir)2185 pl330_dma_slave_map_dir(enum dma_transfer_direction dir)
2186 {
2187 switch (dir) {
2188 case DMA_MEM_TO_DEV:
2189 return DMA_FROM_DEVICE;
2190 case DMA_DEV_TO_MEM:
2191 return DMA_TO_DEVICE;
2192 case DMA_DEV_TO_DEV:
2193 return DMA_BIDIRECTIONAL;
2194 default:
2195 return DMA_NONE;
2196 }
2197 }
2198
pl330_unprep_slave_fifo(struct dma_pl330_chan * pch)2199 static void pl330_unprep_slave_fifo(struct dma_pl330_chan *pch)
2200 {
2201 if (pch->dir != DMA_NONE)
2202 dma_unmap_resource(pch->chan.device->dev, pch->fifo_dma,
2203 1 << pch->burst_sz, pch->dir, 0);
2204 pch->dir = DMA_NONE;
2205 }
2206
2207
pl330_prep_slave_fifo(struct dma_pl330_chan * pch,enum dma_transfer_direction dir)2208 static bool pl330_prep_slave_fifo(struct dma_pl330_chan *pch,
2209 enum dma_transfer_direction dir)
2210 {
2211 struct device *dev = pch->chan.device->dev;
2212 enum dma_data_direction dma_dir = pl330_dma_slave_map_dir(dir);
2213
2214 /* Already mapped for this config? */
2215 if (pch->dir == dma_dir)
2216 return true;
2217
2218 pl330_unprep_slave_fifo(pch);
2219 pch->fifo_dma = dma_map_resource(dev, pch->fifo_addr,
2220 1 << pch->burst_sz, dma_dir, 0);
2221 if (dma_mapping_error(dev, pch->fifo_dma))
2222 return false;
2223
2224 pch->dir = dma_dir;
2225 return true;
2226 }
2227
fixup_burst_len(int max_burst_len,int quirks)2228 static int fixup_burst_len(int max_burst_len, int quirks)
2229 {
2230 if (max_burst_len > PL330_MAX_BURST)
2231 return PL330_MAX_BURST;
2232 else if (max_burst_len < 1)
2233 return 1;
2234 else
2235 return max_burst_len;
2236 }
2237
pl330_config_write(struct dma_chan * chan,struct dma_slave_config * slave_config,enum dma_transfer_direction direction)2238 static int pl330_config_write(struct dma_chan *chan,
2239 struct dma_slave_config *slave_config,
2240 enum dma_transfer_direction direction)
2241 {
2242 struct dma_pl330_chan *pch = to_pchan(chan);
2243
2244 pl330_unprep_slave_fifo(pch);
2245 if (direction == DMA_MEM_TO_DEV) {
2246 if (slave_config->dst_addr)
2247 pch->fifo_addr = slave_config->dst_addr;
2248 if (slave_config->dst_addr_width)
2249 pch->burst_sz = __ffs(slave_config->dst_addr_width);
2250 pch->burst_len = fixup_burst_len(slave_config->dst_maxburst,
2251 pch->dmac->quirks);
2252 } else if (direction == DMA_DEV_TO_MEM) {
2253 if (slave_config->src_addr)
2254 pch->fifo_addr = slave_config->src_addr;
2255 if (slave_config->src_addr_width)
2256 pch->burst_sz = __ffs(slave_config->src_addr_width);
2257 pch->burst_len = fixup_burst_len(slave_config->src_maxburst,
2258 pch->dmac->quirks);
2259 }
2260
2261 return 0;
2262 }
2263
pl330_config(struct dma_chan * chan,struct dma_slave_config * slave_config)2264 static int pl330_config(struct dma_chan *chan,
2265 struct dma_slave_config *slave_config)
2266 {
2267 struct dma_pl330_chan *pch = to_pchan(chan);
2268
2269 memcpy(&pch->slave_config, slave_config, sizeof(*slave_config));
2270
2271 return 0;
2272 }
2273
pl330_terminate_all(struct dma_chan * chan)2274 static int pl330_terminate_all(struct dma_chan *chan)
2275 {
2276 struct dma_pl330_chan *pch = to_pchan(chan);
2277 struct dma_pl330_desc *desc;
2278 unsigned long flags;
2279 struct pl330_dmac *pl330 = pch->dmac;
2280 bool power_down = false;
2281
2282 pm_runtime_get_sync(pl330->ddma.dev);
2283 spin_lock_irqsave(&pch->lock, flags);
2284
2285 spin_lock(&pl330->lock);
2286 _stop(pch->thread);
2287 pch->thread->req[0].desc = NULL;
2288 pch->thread->req[1].desc = NULL;
2289 pch->thread->req_running = -1;
2290 spin_unlock(&pl330->lock);
2291
2292 power_down = pch->active;
2293 pch->active = false;
2294
2295 /* Mark all desc done */
2296 list_for_each_entry(desc, &pch->submitted_list, node) {
2297 desc->status = FREE;
2298 dma_cookie_complete(&desc->txd);
2299 }
2300
2301 list_for_each_entry(desc, &pch->work_list , node) {
2302 desc->status = FREE;
2303 dma_cookie_complete(&desc->txd);
2304 }
2305
2306 list_splice_tail_init(&pch->submitted_list, &pl330->desc_pool);
2307 list_splice_tail_init(&pch->work_list, &pl330->desc_pool);
2308 list_splice_tail_init(&pch->completed_list, &pl330->desc_pool);
2309 spin_unlock_irqrestore(&pch->lock, flags);
2310 pm_runtime_mark_last_busy(pl330->ddma.dev);
2311 if (power_down)
2312 pm_runtime_put_autosuspend(pl330->ddma.dev);
2313 pm_runtime_put_autosuspend(pl330->ddma.dev);
2314
2315 return 0;
2316 }
2317
2318 /*
2319 * We don't support DMA_RESUME command because of hardware
2320 * limitations, so after pausing the channel we cannot restore
2321 * it to active state. We have to terminate channel and setup
2322 * DMA transfer again. This pause feature was implemented to
2323 * allow safely read residue before channel termination.
2324 */
pl330_pause(struct dma_chan * chan)2325 static int pl330_pause(struct dma_chan *chan)
2326 {
2327 struct dma_pl330_chan *pch = to_pchan(chan);
2328 struct pl330_dmac *pl330 = pch->dmac;
2329 unsigned long flags;
2330
2331 pm_runtime_get_sync(pl330->ddma.dev);
2332 spin_lock_irqsave(&pch->lock, flags);
2333
2334 spin_lock(&pl330->lock);
2335 _stop(pch->thread);
2336 spin_unlock(&pl330->lock);
2337
2338 spin_unlock_irqrestore(&pch->lock, flags);
2339 pm_runtime_mark_last_busy(pl330->ddma.dev);
2340 pm_runtime_put_autosuspend(pl330->ddma.dev);
2341
2342 return 0;
2343 }
2344
pl330_free_chan_resources(struct dma_chan * chan)2345 static void pl330_free_chan_resources(struct dma_chan *chan)
2346 {
2347 struct dma_pl330_chan *pch = to_pchan(chan);
2348 struct pl330_dmac *pl330 = pch->dmac;
2349 unsigned long flags;
2350
2351 tasklet_kill(&pch->task);
2352
2353 pm_runtime_get_sync(pch->dmac->ddma.dev);
2354 spin_lock_irqsave(&pl330->lock, flags);
2355
2356 pl330_release_channel(pch->thread);
2357 pch->thread = NULL;
2358
2359 if (pch->cyclic)
2360 list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool);
2361
2362 spin_unlock_irqrestore(&pl330->lock, flags);
2363 pm_runtime_mark_last_busy(pch->dmac->ddma.dev);
2364 pm_runtime_put_autosuspend(pch->dmac->ddma.dev);
2365 pl330_unprep_slave_fifo(pch);
2366 }
2367
pl330_get_current_xferred_count(struct dma_pl330_chan * pch,struct dma_pl330_desc * desc)2368 static int pl330_get_current_xferred_count(struct dma_pl330_chan *pch,
2369 struct dma_pl330_desc *desc)
2370 {
2371 struct pl330_thread *thrd = pch->thread;
2372 struct pl330_dmac *pl330 = pch->dmac;
2373 void __iomem *regs = thrd->dmac->base;
2374 u32 val, addr;
2375
2376 pm_runtime_get_sync(pl330->ddma.dev);
2377 val = addr = 0;
2378 if (desc->rqcfg.src_inc) {
2379 val = readl(regs + SA(thrd->id));
2380 addr = desc->px.src_addr;
2381 } else {
2382 val = readl(regs + DA(thrd->id));
2383 addr = desc->px.dst_addr;
2384 }
2385 pm_runtime_mark_last_busy(pch->dmac->ddma.dev);
2386 pm_runtime_put_autosuspend(pl330->ddma.dev);
2387
2388 /* If DMAMOV hasn't finished yet, SAR/DAR can be zero */
2389 if (!val)
2390 return 0;
2391
2392 return val - addr;
2393 }
2394
2395 static enum dma_status
pl330_tx_status(struct dma_chan * chan,dma_cookie_t cookie,struct dma_tx_state * txstate)2396 pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
2397 struct dma_tx_state *txstate)
2398 {
2399 enum dma_status ret;
2400 unsigned long flags;
2401 struct dma_pl330_desc *desc, *running = NULL, *last_enq = NULL;
2402 struct dma_pl330_chan *pch = to_pchan(chan);
2403 unsigned int transferred, residual = 0;
2404
2405 ret = dma_cookie_status(chan, cookie, txstate);
2406
2407 if (!txstate)
2408 return ret;
2409
2410 if (ret == DMA_COMPLETE)
2411 goto out;
2412
2413 spin_lock_irqsave(&pch->lock, flags);
2414 spin_lock(&pch->thread->dmac->lock);
2415
2416 if (pch->thread->req_running != -1)
2417 running = pch->thread->req[pch->thread->req_running].desc;
2418
2419 last_enq = pch->thread->req[pch->thread->lstenq].desc;
2420
2421 /* Check in pending list */
2422 list_for_each_entry(desc, &pch->work_list, node) {
2423 if (desc->status == DONE)
2424 transferred = desc->bytes_requested;
2425 else if (running && desc == running)
2426 transferred =
2427 pl330_get_current_xferred_count(pch, desc);
2428 else if (desc->status == BUSY)
2429 /*
2430 * Busy but not running means either just enqueued,
2431 * or finished and not yet marked done
2432 */
2433 if (desc == last_enq)
2434 transferred = 0;
2435 else
2436 transferred = desc->bytes_requested;
2437 else
2438 transferred = 0;
2439 residual += desc->bytes_requested - transferred;
2440 if (desc->txd.cookie == cookie) {
2441 switch (desc->status) {
2442 case DONE:
2443 ret = DMA_COMPLETE;
2444 break;
2445 case PREP:
2446 case BUSY:
2447 ret = DMA_IN_PROGRESS;
2448 break;
2449 default:
2450 WARN_ON(1);
2451 }
2452 break;
2453 }
2454 if (desc->last)
2455 residual = 0;
2456 }
2457 spin_unlock(&pch->thread->dmac->lock);
2458 spin_unlock_irqrestore(&pch->lock, flags);
2459
2460 out:
2461 dma_set_residue(txstate, residual);
2462
2463 return ret;
2464 }
2465
pl330_issue_pending(struct dma_chan * chan)2466 static void pl330_issue_pending(struct dma_chan *chan)
2467 {
2468 struct dma_pl330_chan *pch = to_pchan(chan);
2469 unsigned long flags;
2470
2471 spin_lock_irqsave(&pch->lock, flags);
2472 if (list_empty(&pch->work_list)) {
2473 /*
2474 * Warn on nothing pending. Empty submitted_list may
2475 * break our pm_runtime usage counter as it is
2476 * updated on work_list emptiness status.
2477 */
2478 WARN_ON(list_empty(&pch->submitted_list));
2479 pch->active = true;
2480 pm_runtime_get_sync(pch->dmac->ddma.dev);
2481 }
2482 list_splice_tail_init(&pch->submitted_list, &pch->work_list);
2483 spin_unlock_irqrestore(&pch->lock, flags);
2484
2485 pl330_tasklet(&pch->task);
2486 }
2487
2488 /*
2489 * We returned the last one of the circular list of descriptor(s)
2490 * from prep_xxx, so the argument to submit corresponds to the last
2491 * descriptor of the list.
2492 */
pl330_tx_submit(struct dma_async_tx_descriptor * tx)2493 static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
2494 {
2495 struct dma_pl330_desc *desc, *last = to_desc(tx);
2496 struct dma_pl330_chan *pch = to_pchan(tx->chan);
2497 dma_cookie_t cookie;
2498 unsigned long flags;
2499
2500 spin_lock_irqsave(&pch->lock, flags);
2501
2502 /* Assign cookies to all nodes */
2503 while (!list_empty(&last->node)) {
2504 desc = list_entry(last->node.next, struct dma_pl330_desc, node);
2505 if (pch->cyclic) {
2506 desc->txd.callback = last->txd.callback;
2507 desc->txd.callback_param = last->txd.callback_param;
2508 }
2509 desc->last = false;
2510
2511 dma_cookie_assign(&desc->txd);
2512
2513 list_move_tail(&desc->node, &pch->submitted_list);
2514 }
2515
2516 last->last = true;
2517 cookie = dma_cookie_assign(&last->txd);
2518 list_add_tail(&last->node, &pch->submitted_list);
2519 spin_unlock_irqrestore(&pch->lock, flags);
2520
2521 return cookie;
2522 }
2523
_init_desc(struct dma_pl330_desc * desc)2524 static inline void _init_desc(struct dma_pl330_desc *desc)
2525 {
2526 desc->rqcfg.swap = SWAP_NO;
2527 desc->rqcfg.scctl = CCTRL0;
2528 desc->rqcfg.dcctl = CCTRL0;
2529 desc->txd.tx_submit = pl330_tx_submit;
2530
2531 INIT_LIST_HEAD(&desc->node);
2532 }
2533
2534 /* Returns the number of descriptors added to the DMAC pool */
add_desc(struct list_head * pool,spinlock_t * lock,gfp_t flg,int count)2535 static int add_desc(struct list_head *pool, spinlock_t *lock,
2536 gfp_t flg, int count)
2537 {
2538 struct dma_pl330_desc *desc;
2539 unsigned long flags;
2540 int i;
2541
2542 desc = kcalloc(count, sizeof(*desc), flg);
2543 if (!desc)
2544 return 0;
2545
2546 spin_lock_irqsave(lock, flags);
2547
2548 for (i = 0; i < count; i++) {
2549 _init_desc(&desc[i]);
2550 list_add_tail(&desc[i].node, pool);
2551 }
2552
2553 spin_unlock_irqrestore(lock, flags);
2554
2555 return count;
2556 }
2557
pluck_desc(struct list_head * pool,spinlock_t * lock)2558 static struct dma_pl330_desc *pluck_desc(struct list_head *pool,
2559 spinlock_t *lock)
2560 {
2561 struct dma_pl330_desc *desc = NULL;
2562 unsigned long flags;
2563
2564 spin_lock_irqsave(lock, flags);
2565
2566 if (!list_empty(pool)) {
2567 desc = list_entry(pool->next,
2568 struct dma_pl330_desc, node);
2569
2570 list_del_init(&desc->node);
2571
2572 desc->status = PREP;
2573 desc->txd.callback = NULL;
2574 }
2575
2576 spin_unlock_irqrestore(lock, flags);
2577
2578 return desc;
2579 }
2580
pl330_get_desc(struct dma_pl330_chan * pch)2581 static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch)
2582 {
2583 struct pl330_dmac *pl330 = pch->dmac;
2584 u8 *peri_id = pch->chan.private;
2585 struct dma_pl330_desc *desc;
2586
2587 /* Pluck one desc from the pool of DMAC */
2588 desc = pluck_desc(&pl330->desc_pool, &pl330->pool_lock);
2589
2590 /* If the DMAC pool is empty, alloc new */
2591 if (!desc) {
2592 static DEFINE_SPINLOCK(lock);
2593 LIST_HEAD(pool);
2594
2595 if (!add_desc(&pool, &lock, GFP_ATOMIC, 1))
2596 return NULL;
2597
2598 desc = pluck_desc(&pool, &lock);
2599 WARN_ON(!desc || !list_empty(&pool));
2600 }
2601
2602 /* Initialize the descriptor */
2603 desc->pchan = pch;
2604 desc->txd.cookie = 0;
2605 async_tx_ack(&desc->txd);
2606
2607 desc->peri = peri_id ? pch->chan.chan_id : 0;
2608 desc->rqcfg.pcfg = &pch->dmac->pcfg;
2609
2610 dma_async_tx_descriptor_init(&desc->txd, &pch->chan);
2611
2612 return desc;
2613 }
2614
fill_px(struct pl330_xfer * px,dma_addr_t dst,dma_addr_t src,size_t len)2615 static inline void fill_px(struct pl330_xfer *px,
2616 dma_addr_t dst, dma_addr_t src, size_t len)
2617 {
2618 px->bytes = len;
2619 px->dst_addr = dst;
2620 px->src_addr = src;
2621 }
2622
2623 static struct dma_pl330_desc *
__pl330_prep_dma_memcpy(struct dma_pl330_chan * pch,dma_addr_t dst,dma_addr_t src,size_t len)2624 __pl330_prep_dma_memcpy(struct dma_pl330_chan *pch, dma_addr_t dst,
2625 dma_addr_t src, size_t len)
2626 {
2627 struct dma_pl330_desc *desc = pl330_get_desc(pch);
2628
2629 if (!desc) {
2630 dev_err(pch->dmac->ddma.dev, "%s:%d Unable to fetch desc\n",
2631 __func__, __LINE__);
2632 return NULL;
2633 }
2634
2635 /*
2636 * Ideally we should lookout for reqs bigger than
2637 * those that can be programmed with 256 bytes of
2638 * MC buffer, but considering a req size is seldom
2639 * going to be word-unaligned and more than 200MB,
2640 * we take it easy.
2641 * Also, should the limit is reached we'd rather
2642 * have the platform increase MC buffer size than
2643 * complicating this API driver.
2644 */
2645 fill_px(&desc->px, dst, src, len);
2646
2647 return desc;
2648 }
2649
2650 /* Call after fixing burst size */
get_burst_len(struct dma_pl330_desc * desc,size_t len)2651 static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len)
2652 {
2653 struct dma_pl330_chan *pch = desc->pchan;
2654 struct pl330_dmac *pl330 = pch->dmac;
2655 int burst_len;
2656
2657 burst_len = pl330->pcfg.data_bus_width / 8;
2658 burst_len *= pl330->pcfg.data_buf_dep / pl330->pcfg.num_chan;
2659 burst_len >>= desc->rqcfg.brst_size;
2660
2661 /* src/dst_burst_len can't be more than 16 */
2662 if (burst_len > PL330_MAX_BURST)
2663 burst_len = PL330_MAX_BURST;
2664
2665 return burst_len;
2666 }
2667
pl330_prep_dma_cyclic(struct dma_chan * chan,dma_addr_t dma_addr,size_t len,size_t period_len,enum dma_transfer_direction direction,unsigned long flags)2668 static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
2669 struct dma_chan *chan, dma_addr_t dma_addr, size_t len,
2670 size_t period_len, enum dma_transfer_direction direction,
2671 unsigned long flags)
2672 {
2673 struct dma_pl330_desc *desc = NULL, *first = NULL;
2674 struct dma_pl330_chan *pch = to_pchan(chan);
2675 struct pl330_dmac *pl330 = pch->dmac;
2676 unsigned int i;
2677 dma_addr_t dst;
2678 dma_addr_t src;
2679
2680 if (len % period_len != 0)
2681 return NULL;
2682
2683 if (!is_slave_direction(direction)) {
2684 dev_err(pch->dmac->ddma.dev, "%s:%d Invalid dma direction\n",
2685 __func__, __LINE__);
2686 return NULL;
2687 }
2688
2689 pl330_config_write(chan, &pch->slave_config, direction);
2690
2691 if (!pl330_prep_slave_fifo(pch, direction))
2692 return NULL;
2693
2694 for (i = 0; i < len / period_len; i++) {
2695 desc = pl330_get_desc(pch);
2696 if (!desc) {
2697 unsigned long iflags;
2698
2699 dev_err(pch->dmac->ddma.dev, "%s:%d Unable to fetch desc\n",
2700 __func__, __LINE__);
2701
2702 if (!first)
2703 return NULL;
2704
2705 spin_lock_irqsave(&pl330->pool_lock, iflags);
2706
2707 while (!list_empty(&first->node)) {
2708 desc = list_entry(first->node.next,
2709 struct dma_pl330_desc, node);
2710 list_move_tail(&desc->node, &pl330->desc_pool);
2711 }
2712
2713 list_move_tail(&first->node, &pl330->desc_pool);
2714
2715 spin_unlock_irqrestore(&pl330->pool_lock, iflags);
2716
2717 return NULL;
2718 }
2719
2720 switch (direction) {
2721 case DMA_MEM_TO_DEV:
2722 desc->rqcfg.src_inc = 1;
2723 desc->rqcfg.dst_inc = 0;
2724 src = dma_addr;
2725 dst = pch->fifo_dma;
2726 break;
2727 case DMA_DEV_TO_MEM:
2728 desc->rqcfg.src_inc = 0;
2729 desc->rqcfg.dst_inc = 1;
2730 src = pch->fifo_dma;
2731 dst = dma_addr;
2732 break;
2733 default:
2734 break;
2735 }
2736
2737 desc->rqtype = direction;
2738 desc->rqcfg.brst_size = pch->burst_sz;
2739 desc->rqcfg.brst_len = pch->burst_len;
2740 desc->bytes_requested = period_len;
2741 fill_px(&desc->px, dst, src, period_len);
2742
2743 if (!first)
2744 first = desc;
2745 else
2746 list_add_tail(&desc->node, &first->node);
2747
2748 dma_addr += period_len;
2749 }
2750
2751 if (!desc)
2752 return NULL;
2753
2754 pch->cyclic = true;
2755
2756 return &desc->txd;
2757 }
2758
2759 static struct dma_async_tx_descriptor *
pl330_prep_dma_memcpy(struct dma_chan * chan,dma_addr_t dst,dma_addr_t src,size_t len,unsigned long flags)2760 pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
2761 dma_addr_t src, size_t len, unsigned long flags)
2762 {
2763 struct dma_pl330_desc *desc;
2764 struct dma_pl330_chan *pch = to_pchan(chan);
2765 struct pl330_dmac *pl330;
2766 int burst;
2767
2768 if (unlikely(!pch || !len))
2769 return NULL;
2770
2771 pl330 = pch->dmac;
2772
2773 desc = __pl330_prep_dma_memcpy(pch, dst, src, len);
2774 if (!desc)
2775 return NULL;
2776
2777 desc->rqcfg.src_inc = 1;
2778 desc->rqcfg.dst_inc = 1;
2779 desc->rqtype = DMA_MEM_TO_MEM;
2780
2781 /* Select max possible burst size */
2782 burst = pl330->pcfg.data_bus_width / 8;
2783
2784 /*
2785 * Make sure we use a burst size that aligns with all the memcpy
2786 * parameters because our DMA programming algorithm doesn't cope with
2787 * transfers which straddle an entry in the DMA device's MFIFO.
2788 */
2789 while ((src | dst | len) & (burst - 1))
2790 burst /= 2;
2791
2792 desc->rqcfg.brst_size = 0;
2793 while (burst != (1 << desc->rqcfg.brst_size))
2794 desc->rqcfg.brst_size++;
2795
2796 desc->rqcfg.brst_len = get_burst_len(desc, len);
2797 /*
2798 * If burst size is smaller than bus width then make sure we only
2799 * transfer one at a time to avoid a burst stradling an MFIFO entry.
2800 */
2801 if (burst * 8 < pl330->pcfg.data_bus_width)
2802 desc->rqcfg.brst_len = 1;
2803
2804 desc->bytes_requested = len;
2805
2806 return &desc->txd;
2807 }
2808
__pl330_giveback_desc(struct pl330_dmac * pl330,struct dma_pl330_desc * first)2809 static void __pl330_giveback_desc(struct pl330_dmac *pl330,
2810 struct dma_pl330_desc *first)
2811 {
2812 unsigned long flags;
2813 struct dma_pl330_desc *desc;
2814
2815 if (!first)
2816 return;
2817
2818 spin_lock_irqsave(&pl330->pool_lock, flags);
2819
2820 while (!list_empty(&first->node)) {
2821 desc = list_entry(first->node.next,
2822 struct dma_pl330_desc, node);
2823 list_move_tail(&desc->node, &pl330->desc_pool);
2824 }
2825
2826 list_move_tail(&first->node, &pl330->desc_pool);
2827
2828 spin_unlock_irqrestore(&pl330->pool_lock, flags);
2829 }
2830
2831 static struct dma_async_tx_descriptor *
pl330_prep_slave_sg(struct dma_chan * chan,struct scatterlist * sgl,unsigned int sg_len,enum dma_transfer_direction direction,unsigned long flg,void * context)2832 pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
2833 unsigned int sg_len, enum dma_transfer_direction direction,
2834 unsigned long flg, void *context)
2835 {
2836 struct dma_pl330_desc *first, *desc = NULL;
2837 struct dma_pl330_chan *pch = to_pchan(chan);
2838 struct scatterlist *sg;
2839 int i;
2840
2841 if (unlikely(!pch || !sgl || !sg_len))
2842 return NULL;
2843
2844 pl330_config_write(chan, &pch->slave_config, direction);
2845
2846 if (!pl330_prep_slave_fifo(pch, direction))
2847 return NULL;
2848
2849 first = NULL;
2850
2851 for_each_sg(sgl, sg, sg_len, i) {
2852
2853 desc = pl330_get_desc(pch);
2854 if (!desc) {
2855 struct pl330_dmac *pl330 = pch->dmac;
2856
2857 dev_err(pch->dmac->ddma.dev,
2858 "%s:%d Unable to fetch desc\n",
2859 __func__, __LINE__);
2860 __pl330_giveback_desc(pl330, first);
2861
2862 return NULL;
2863 }
2864
2865 if (!first)
2866 first = desc;
2867 else
2868 list_add_tail(&desc->node, &first->node);
2869
2870 if (direction == DMA_MEM_TO_DEV) {
2871 desc->rqcfg.src_inc = 1;
2872 desc->rqcfg.dst_inc = 0;
2873 fill_px(&desc->px, pch->fifo_dma, sg_dma_address(sg),
2874 sg_dma_len(sg));
2875 } else {
2876 desc->rqcfg.src_inc = 0;
2877 desc->rqcfg.dst_inc = 1;
2878 fill_px(&desc->px, sg_dma_address(sg), pch->fifo_dma,
2879 sg_dma_len(sg));
2880 }
2881
2882 desc->rqcfg.brst_size = pch->burst_sz;
2883 desc->rqcfg.brst_len = pch->burst_len;
2884 desc->rqtype = direction;
2885 desc->bytes_requested = sg_dma_len(sg);
2886 }
2887
2888 /* Return the last desc in the chain */
2889 return &desc->txd;
2890 }
2891
pl330_irq_handler(int irq,void * data)2892 static irqreturn_t pl330_irq_handler(int irq, void *data)
2893 {
2894 if (pl330_update(data))
2895 return IRQ_HANDLED;
2896 else
2897 return IRQ_NONE;
2898 }
2899
2900 #define PL330_DMA_BUSWIDTHS \
2901 BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
2902 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
2903 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
2904 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
2905 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)
2906
2907 #ifdef CONFIG_DEBUG_FS
pl330_debugfs_show(struct seq_file * s,void * data)2908 static int pl330_debugfs_show(struct seq_file *s, void *data)
2909 {
2910 struct pl330_dmac *pl330 = s->private;
2911 int chans, pchs, ch, pr;
2912
2913 chans = pl330->pcfg.num_chan;
2914 pchs = pl330->num_peripherals;
2915
2916 seq_puts(s, "PL330 physical channels:\n");
2917 seq_puts(s, "THREAD:\t\tCHANNEL:\n");
2918 seq_puts(s, "--------\t-----\n");
2919 for (ch = 0; ch < chans; ch++) {
2920 struct pl330_thread *thrd = &pl330->channels[ch];
2921 int found = -1;
2922
2923 for (pr = 0; pr < pchs; pr++) {
2924 struct dma_pl330_chan *pch = &pl330->peripherals[pr];
2925
2926 if (!pch->thread || thrd->id != pch->thread->id)
2927 continue;
2928
2929 found = pr;
2930 }
2931
2932 seq_printf(s, "%d\t\t", thrd->id);
2933 if (found == -1)
2934 seq_puts(s, "--\n");
2935 else
2936 seq_printf(s, "%d\n", found);
2937 }
2938
2939 return 0;
2940 }
2941
2942 DEFINE_SHOW_ATTRIBUTE(pl330_debugfs);
2943
init_pl330_debugfs(struct pl330_dmac * pl330)2944 static inline void init_pl330_debugfs(struct pl330_dmac *pl330)
2945 {
2946 debugfs_create_file(dev_name(pl330->ddma.dev),
2947 S_IFREG | 0444, NULL, pl330,
2948 &pl330_debugfs_fops);
2949 }
2950 #else
init_pl330_debugfs(struct pl330_dmac * pl330)2951 static inline void init_pl330_debugfs(struct pl330_dmac *pl330)
2952 {
2953 }
2954 #endif
2955
2956 /*
2957 * Runtime PM callbacks are provided by amba/bus.c driver.
2958 *
2959 * It is assumed here that IRQ safe runtime PM is chosen in probe and amba
2960 * bus driver will only disable/enable the clock in runtime PM callbacks.
2961 */
pl330_suspend(struct device * dev)2962 static int __maybe_unused pl330_suspend(struct device *dev)
2963 {
2964 struct amba_device *pcdev = to_amba_device(dev);
2965
2966 pm_runtime_force_suspend(dev);
2967 clk_unprepare(pcdev->pclk);
2968
2969 return 0;
2970 }
2971
pl330_resume(struct device * dev)2972 static int __maybe_unused pl330_resume(struct device *dev)
2973 {
2974 struct amba_device *pcdev = to_amba_device(dev);
2975 int ret;
2976
2977 ret = clk_prepare(pcdev->pclk);
2978 if (ret)
2979 return ret;
2980
2981 pm_runtime_force_resume(dev);
2982
2983 return ret;
2984 }
2985
2986 static const struct dev_pm_ops pl330_pm = {
2987 SET_LATE_SYSTEM_SLEEP_PM_OPS(pl330_suspend, pl330_resume)
2988 };
2989
2990 static int
pl330_probe(struct amba_device * adev,const struct amba_id * id)2991 pl330_probe(struct amba_device *adev, const struct amba_id *id)
2992 {
2993 struct pl330_config *pcfg;
2994 struct pl330_dmac *pl330;
2995 struct dma_pl330_chan *pch, *_p;
2996 struct dma_device *pd;
2997 struct resource *res;
2998 int i, ret, irq;
2999 int num_chan;
3000 struct device_node *np = adev->dev.of_node;
3001
3002 ret = dma_set_mask_and_coherent(&adev->dev, DMA_BIT_MASK(32));
3003 if (ret)
3004 return ret;
3005
3006 /* Allocate a new DMAC and its Channels */
3007 pl330 = devm_kzalloc(&adev->dev, sizeof(*pl330), GFP_KERNEL);
3008 if (!pl330)
3009 return -ENOMEM;
3010
3011 pd = &pl330->ddma;
3012 pd->dev = &adev->dev;
3013
3014 pl330->mcbufsz = 0;
3015
3016 /* get quirk */
3017 for (i = 0; i < ARRAY_SIZE(of_quirks); i++)
3018 if (of_property_read_bool(np, of_quirks[i].quirk))
3019 pl330->quirks |= of_quirks[i].id;
3020
3021 res = &adev->res;
3022 pl330->base = devm_ioremap_resource(&adev->dev, res);
3023 if (IS_ERR(pl330->base))
3024 return PTR_ERR(pl330->base);
3025
3026 amba_set_drvdata(adev, pl330);
3027
3028 pl330->rstc = devm_reset_control_get_optional(&adev->dev, "dma");
3029 if (IS_ERR(pl330->rstc)) {
3030 return dev_err_probe(&adev->dev, PTR_ERR(pl330->rstc), "Failed to get reset!\n");
3031 } else {
3032 ret = reset_control_deassert(pl330->rstc);
3033 if (ret) {
3034 dev_err(&adev->dev, "Couldn't deassert the device from reset!\n");
3035 return ret;
3036 }
3037 }
3038
3039 pl330->rstc_ocp = devm_reset_control_get_optional(&adev->dev, "dma-ocp");
3040 if (IS_ERR(pl330->rstc_ocp)) {
3041 return dev_err_probe(&adev->dev, PTR_ERR(pl330->rstc_ocp),
3042 "Failed to get OCP reset!\n");
3043 } else {
3044 ret = reset_control_deassert(pl330->rstc_ocp);
3045 if (ret) {
3046 dev_err(&adev->dev, "Couldn't deassert the device from OCP reset!\n");
3047 return ret;
3048 }
3049 }
3050
3051 for (i = 0; i < AMBA_NR_IRQS; i++) {
3052 irq = adev->irq[i];
3053 if (irq) {
3054 ret = devm_request_irq(&adev->dev, irq,
3055 pl330_irq_handler, 0,
3056 dev_name(&adev->dev), pl330);
3057 if (ret)
3058 return ret;
3059 } else {
3060 break;
3061 }
3062 }
3063
3064 pcfg = &pl330->pcfg;
3065
3066 pcfg->periph_id = adev->periphid;
3067 ret = pl330_add(pl330);
3068 if (ret)
3069 return ret;
3070
3071 INIT_LIST_HEAD(&pl330->desc_pool);
3072 spin_lock_init(&pl330->pool_lock);
3073
3074 /* Create a descriptor pool of default size */
3075 if (!add_desc(&pl330->desc_pool, &pl330->pool_lock,
3076 GFP_KERNEL, NR_DEFAULT_DESC))
3077 dev_warn(&adev->dev, "unable to allocate desc\n");
3078
3079 INIT_LIST_HEAD(&pd->channels);
3080
3081 /* Initialize channel parameters */
3082 num_chan = max_t(int, pcfg->num_peri, pcfg->num_chan);
3083
3084 pl330->num_peripherals = num_chan;
3085
3086 pl330->peripherals = kcalloc(num_chan, sizeof(*pch), GFP_KERNEL);
3087 if (!pl330->peripherals) {
3088 ret = -ENOMEM;
3089 goto probe_err2;
3090 }
3091
3092 for (i = 0; i < num_chan; i++) {
3093 pch = &pl330->peripherals[i];
3094
3095 pch->chan.private = adev->dev.of_node;
3096 INIT_LIST_HEAD(&pch->submitted_list);
3097 INIT_LIST_HEAD(&pch->work_list);
3098 INIT_LIST_HEAD(&pch->completed_list);
3099 spin_lock_init(&pch->lock);
3100 pch->thread = NULL;
3101 pch->chan.device = pd;
3102 pch->dmac = pl330;
3103 pch->dir = DMA_NONE;
3104
3105 /* Add the channel to the DMAC list */
3106 list_add_tail(&pch->chan.device_node, &pd->channels);
3107 }
3108
3109 dma_cap_set(DMA_MEMCPY, pd->cap_mask);
3110 if (pcfg->num_peri) {
3111 dma_cap_set(DMA_SLAVE, pd->cap_mask);
3112 dma_cap_set(DMA_CYCLIC, pd->cap_mask);
3113 dma_cap_set(DMA_PRIVATE, pd->cap_mask);
3114 }
3115
3116 pd->device_alloc_chan_resources = pl330_alloc_chan_resources;
3117 pd->device_free_chan_resources = pl330_free_chan_resources;
3118 pd->device_prep_dma_memcpy = pl330_prep_dma_memcpy;
3119 pd->device_prep_dma_cyclic = pl330_prep_dma_cyclic;
3120 pd->device_tx_status = pl330_tx_status;
3121 pd->device_prep_slave_sg = pl330_prep_slave_sg;
3122 pd->device_config = pl330_config;
3123 pd->device_pause = pl330_pause;
3124 pd->device_terminate_all = pl330_terminate_all;
3125 pd->device_issue_pending = pl330_issue_pending;
3126 pd->src_addr_widths = PL330_DMA_BUSWIDTHS;
3127 pd->dst_addr_widths = PL330_DMA_BUSWIDTHS;
3128 pd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
3129 pd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
3130 pd->max_burst = PL330_MAX_BURST;
3131
3132 ret = dma_async_device_register(pd);
3133 if (ret) {
3134 dev_err(&adev->dev, "unable to register DMAC\n");
3135 goto probe_err3;
3136 }
3137
3138 if (adev->dev.of_node) {
3139 ret = of_dma_controller_register(adev->dev.of_node,
3140 of_dma_pl330_xlate, pl330);
3141 if (ret) {
3142 dev_err(&adev->dev,
3143 "unable to register DMA to the generic DT DMA helpers\n");
3144 }
3145 }
3146
3147 /*
3148 * This is the limit for transfers with a buswidth of 1, larger
3149 * buswidths will have larger limits.
3150 */
3151 ret = dma_set_max_seg_size(&adev->dev, 1900800);
3152 if (ret)
3153 dev_err(&adev->dev, "unable to set the seg size\n");
3154
3155
3156 init_pl330_debugfs(pl330);
3157 dev_info(&adev->dev,
3158 "Loaded driver for PL330 DMAC-%x\n", adev->periphid);
3159 dev_info(&adev->dev,
3160 "\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n",
3161 pcfg->data_buf_dep, pcfg->data_bus_width / 8, pcfg->num_chan,
3162 pcfg->num_peri, pcfg->num_events);
3163
3164 pm_runtime_irq_safe(&adev->dev);
3165 pm_runtime_use_autosuspend(&adev->dev);
3166 pm_runtime_set_autosuspend_delay(&adev->dev, PL330_AUTOSUSPEND_DELAY);
3167 pm_runtime_mark_last_busy(&adev->dev);
3168 pm_runtime_put_autosuspend(&adev->dev);
3169
3170 return 0;
3171 probe_err3:
3172 /* Idle the DMAC */
3173 list_for_each_entry_safe(pch, _p, &pl330->ddma.channels,
3174 chan.device_node) {
3175
3176 /* Remove the channel */
3177 list_del(&pch->chan.device_node);
3178
3179 /* Flush the channel */
3180 if (pch->thread) {
3181 pl330_terminate_all(&pch->chan);
3182 pl330_free_chan_resources(&pch->chan);
3183 }
3184 }
3185 probe_err2:
3186 pl330_del(pl330);
3187
3188 if (pl330->rstc_ocp)
3189 reset_control_assert(pl330->rstc_ocp);
3190
3191 if (pl330->rstc)
3192 reset_control_assert(pl330->rstc);
3193 return ret;
3194 }
3195
pl330_remove(struct amba_device * adev)3196 static void pl330_remove(struct amba_device *adev)
3197 {
3198 struct pl330_dmac *pl330 = amba_get_drvdata(adev);
3199 struct dma_pl330_chan *pch, *_p;
3200 int i, irq;
3201
3202 pm_runtime_get_noresume(pl330->ddma.dev);
3203
3204 if (adev->dev.of_node)
3205 of_dma_controller_free(adev->dev.of_node);
3206
3207 for (i = 0; i < AMBA_NR_IRQS; i++) {
3208 irq = adev->irq[i];
3209 if (irq)
3210 devm_free_irq(&adev->dev, irq, pl330);
3211 }
3212
3213 dma_async_device_unregister(&pl330->ddma);
3214
3215 /* Idle the DMAC */
3216 list_for_each_entry_safe(pch, _p, &pl330->ddma.channels,
3217 chan.device_node) {
3218
3219 /* Remove the channel */
3220 list_del(&pch->chan.device_node);
3221
3222 /* Flush the channel */
3223 if (pch->thread) {
3224 pl330_terminate_all(&pch->chan);
3225 pl330_free_chan_resources(&pch->chan);
3226 }
3227 }
3228
3229 pl330_del(pl330);
3230
3231 if (pl330->rstc_ocp)
3232 reset_control_assert(pl330->rstc_ocp);
3233
3234 if (pl330->rstc)
3235 reset_control_assert(pl330->rstc);
3236 }
3237
3238 static const struct amba_id pl330_ids[] = {
3239 {
3240 .id = 0x00041330,
3241 .mask = 0x000fffff,
3242 },
3243 { 0, 0 },
3244 };
3245
3246 MODULE_DEVICE_TABLE(amba, pl330_ids);
3247
3248 static struct amba_driver pl330_driver = {
3249 .drv = {
3250 .owner = THIS_MODULE,
3251 .name = "dma-pl330",
3252 .pm = &pl330_pm,
3253 },
3254 .id_table = pl330_ids,
3255 .probe = pl330_probe,
3256 .remove = pl330_remove,
3257 };
3258
3259 module_amba_driver(pl330_driver);
3260
3261 MODULE_AUTHOR("Jaswinder Singh <jassisinghbrar@gmail.com>");
3262 MODULE_DESCRIPTION("API Driver for PL330 DMAC");
3263 MODULE_LICENSE("GPL");
3264