1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * LocalPlus Bus FIFO driver for the Freescale MPC52xx.
4 *
5 * Copyright (C) 2009 Secret Lab Technologies Ltd.
6 *
7 * Todo:
8 * - Add support for multiple requests to be queued.
9 */
10
11 #include <linux/interrupt.h>
12 #include <linux/kernel.h>
13 #include <linux/of.h>
14 #include <linux/of_address.h>
15 #include <linux/of_irq.h>
16 #include <linux/of_platform.h>
17 #include <linux/spinlock.h>
18 #include <linux/module.h>
19 #include <asm/io.h>
20 #include <asm/mpc52xx.h>
21 #include <asm/time.h>
22
23 #include <linux/fsl/bestcomm/bestcomm.h>
24 #include <linux/fsl/bestcomm/bestcomm_priv.h>
25 #include <linux/fsl/bestcomm/gen_bd.h>
26
27 MODULE_AUTHOR("Grant Likely <grant.likely@secretlab.ca>");
28 MODULE_DESCRIPTION("MPC5200 LocalPlus FIFO device driver");
29 MODULE_LICENSE("GPL");
30
31 #define LPBFIFO_REG_PACKET_SIZE (0x00)
32 #define LPBFIFO_REG_START_ADDRESS (0x04)
33 #define LPBFIFO_REG_CONTROL (0x08)
34 #define LPBFIFO_REG_ENABLE (0x0C)
35 #define LPBFIFO_REG_BYTES_DONE_STATUS (0x14)
36 #define LPBFIFO_REG_FIFO_DATA (0x40)
37 #define LPBFIFO_REG_FIFO_STATUS (0x44)
38 #define LPBFIFO_REG_FIFO_CONTROL (0x48)
39 #define LPBFIFO_REG_FIFO_ALARM (0x4C)
40
41 struct mpc52xx_lpbfifo {
42 struct device *dev;
43 phys_addr_t regs_phys;
44 void __iomem *regs;
45 int irq;
46 spinlock_t lock;
47
48 struct bcom_task *bcom_tx_task;
49 struct bcom_task *bcom_rx_task;
50 struct bcom_task *bcom_cur_task;
51
52 /* Current state data */
53 struct mpc52xx_lpbfifo_request *req;
54 int dma_irqs_enabled;
55 };
56
57 /* The MPC5200 has only one fifo, so only need one instance structure */
58 static struct mpc52xx_lpbfifo lpbfifo;
59
60 /**
61 * mpc52xx_lpbfifo_kick - Trigger the next block of data to be transferred
62 */
mpc52xx_lpbfifo_kick(struct mpc52xx_lpbfifo_request * req)63 static void mpc52xx_lpbfifo_kick(struct mpc52xx_lpbfifo_request *req)
64 {
65 size_t transfer_size = req->size - req->pos;
66 struct bcom_bd *bd;
67 void __iomem *reg;
68 u32 *data;
69 int i;
70 int bit_fields;
71 int dma = !(req->flags & MPC52XX_LPBFIFO_FLAG_NO_DMA);
72 int write = req->flags & MPC52XX_LPBFIFO_FLAG_WRITE;
73 int poll_dma = req->flags & MPC52XX_LPBFIFO_FLAG_POLL_DMA;
74
75 /* Set and clear the reset bits; is good practice in User Manual */
76 out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x01010000);
77
78 /* set master enable bit */
79 out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x00000001);
80 if (!dma) {
81 /* While the FIFO can be setup for transfer sizes as large as
82 * 16M-1, the FIFO itself is only 512 bytes deep and it does
83 * not generate interrupts for FIFO full events (only transfer
84 * complete will raise an IRQ). Therefore when not using
85 * Bestcomm to drive the FIFO it needs to either be polled, or
86 * transfers need to constrained to the size of the fifo.
87 *
88 * This driver restricts the size of the transfer
89 */
90 if (transfer_size > 512)
91 transfer_size = 512;
92
93 /* Load the FIFO with data */
94 if (write) {
95 reg = lpbfifo.regs + LPBFIFO_REG_FIFO_DATA;
96 data = req->data + req->pos;
97 for (i = 0; i < transfer_size; i += 4)
98 out_be32(reg, *data++);
99 }
100
101 /* Unmask both error and completion irqs */
102 out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x00000301);
103 } else {
104 /* Choose the correct direction
105 *
106 * Configure the watermarks so DMA will always complete correctly.
107 * It may be worth experimenting with the ALARM value to see if
108 * there is a performance impact. However, if it is wrong there
109 * is a risk of DMA not transferring the last chunk of data
110 */
111 if (write) {
112 out_be32(lpbfifo.regs + LPBFIFO_REG_FIFO_ALARM, 0x1e4);
113 out_8(lpbfifo.regs + LPBFIFO_REG_FIFO_CONTROL, 7);
114 lpbfifo.bcom_cur_task = lpbfifo.bcom_tx_task;
115 } else {
116 out_be32(lpbfifo.regs + LPBFIFO_REG_FIFO_ALARM, 0x1ff);
117 out_8(lpbfifo.regs + LPBFIFO_REG_FIFO_CONTROL, 0);
118 lpbfifo.bcom_cur_task = lpbfifo.bcom_rx_task;
119
120 if (poll_dma) {
121 if (lpbfifo.dma_irqs_enabled) {
122 disable_irq(bcom_get_task_irq(lpbfifo.bcom_rx_task));
123 lpbfifo.dma_irqs_enabled = 0;
124 }
125 } else {
126 if (!lpbfifo.dma_irqs_enabled) {
127 enable_irq(bcom_get_task_irq(lpbfifo.bcom_rx_task));
128 lpbfifo.dma_irqs_enabled = 1;
129 }
130 }
131 }
132
133 bd = bcom_prepare_next_buffer(lpbfifo.bcom_cur_task);
134 bd->status = transfer_size;
135 if (!write) {
136 /*
137 * In the DMA read case, the DMA doesn't complete,
138 * possibly due to incorrect watermarks in the ALARM
139 * and CONTROL regs. For now instead of trying to
140 * determine the right watermarks that will make this
141 * work, just increase the number of bytes the FIFO is
142 * expecting.
143 *
144 * When submitting another operation, the FIFO will get
145 * reset, so the condition of the FIFO waiting for a
146 * non-existent 4 bytes will get cleared.
147 */
148 transfer_size += 4; /* BLECH! */
149 }
150 bd->data[0] = req->data_phys + req->pos;
151 bcom_submit_next_buffer(lpbfifo.bcom_cur_task, NULL);
152
153 /* error irq & master enabled bit */
154 bit_fields = 0x00000201;
155
156 /* Unmask irqs */
157 if (write && (!poll_dma))
158 bit_fields |= 0x00000100; /* completion irq too */
159 out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, bit_fields);
160 }
161
162 /* Set transfer size, width, chip select and READ mode */
163 out_be32(lpbfifo.regs + LPBFIFO_REG_START_ADDRESS,
164 req->offset + req->pos);
165 out_be32(lpbfifo.regs + LPBFIFO_REG_PACKET_SIZE, transfer_size);
166
167 bit_fields = req->cs << 24 | 0x000008;
168 if (!write)
169 bit_fields |= 0x010000; /* read mode */
170 out_be32(lpbfifo.regs + LPBFIFO_REG_CONTROL, bit_fields);
171
172 /* Kick it off */
173 if (!lpbfifo.req->defer_xfer_start)
174 out_8(lpbfifo.regs + LPBFIFO_REG_PACKET_SIZE, 0x01);
175 if (dma)
176 bcom_enable(lpbfifo.bcom_cur_task);
177 }
178
179 /**
180 * mpc52xx_lpbfifo_irq - IRQ handler for LPB FIFO
181 *
182 * On transmit, the dma completion irq triggers before the fifo completion
183 * triggers. Handle the dma completion here instead of the LPB FIFO Bestcomm
184 * task completion irq because everything is not really done until the LPB FIFO
185 * completion irq triggers.
186 *
187 * In other words:
188 * For DMA, on receive, the "Fat Lady" is the bestcom completion irq. on
189 * transmit, the fifo completion irq is the "Fat Lady". The opera (or in this
190 * case the DMA/FIFO operation) is not finished until the "Fat Lady" sings.
191 *
192 * Reasons for entering this routine:
193 * 1) PIO mode rx and tx completion irq
194 * 2) DMA interrupt mode tx completion irq
195 * 3) DMA polled mode tx
196 *
197 * Exit conditions:
198 * 1) Transfer aborted
199 * 2) FIFO complete without DMA; more data to do
200 * 3) FIFO complete without DMA; all data transferred
201 * 4) FIFO complete using DMA
202 *
203 * Condition 1 can occur regardless of whether or not DMA is used.
204 * It requires executing the callback to report the error and exiting
205 * immediately.
206 *
207 * Condition 2 requires programming the FIFO with the next block of data
208 *
209 * Condition 3 requires executing the callback to report completion
210 *
211 * Condition 4 means the same as 3, except that we also retrieve the bcom
212 * buffer so DMA doesn't get clogged up.
213 *
214 * To make things trickier, the spinlock must be dropped before
215 * executing the callback, otherwise we could end up with a deadlock
216 * or nested spinlock condition. The out path is non-trivial, so
217 * extra fiddling is done to make sure all paths lead to the same
218 * outbound code.
219 */
mpc52xx_lpbfifo_irq(int irq,void * dev_id)220 static irqreturn_t mpc52xx_lpbfifo_irq(int irq, void *dev_id)
221 {
222 struct mpc52xx_lpbfifo_request *req;
223 u32 status = in_8(lpbfifo.regs + LPBFIFO_REG_BYTES_DONE_STATUS);
224 void __iomem *reg;
225 u32 *data;
226 int count, i;
227 int do_callback = 0;
228 u32 ts;
229 unsigned long flags;
230 int dma, write, poll_dma;
231
232 spin_lock_irqsave(&lpbfifo.lock, flags);
233 ts = mftb();
234
235 req = lpbfifo.req;
236 if (!req) {
237 spin_unlock_irqrestore(&lpbfifo.lock, flags);
238 pr_err("bogus LPBFIFO IRQ\n");
239 return IRQ_HANDLED;
240 }
241
242 dma = !(req->flags & MPC52XX_LPBFIFO_FLAG_NO_DMA);
243 write = req->flags & MPC52XX_LPBFIFO_FLAG_WRITE;
244 poll_dma = req->flags & MPC52XX_LPBFIFO_FLAG_POLL_DMA;
245
246 if (dma && !write) {
247 spin_unlock_irqrestore(&lpbfifo.lock, flags);
248 pr_err("bogus LPBFIFO IRQ (dma and not writing)\n");
249 return IRQ_HANDLED;
250 }
251
252 if ((status & 0x01) == 0) {
253 goto out;
254 }
255
256 /* check abort bit */
257 if (status & 0x10) {
258 out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x01010000);
259 do_callback = 1;
260 goto out;
261 }
262
263 /* Read result from hardware */
264 count = in_be32(lpbfifo.regs + LPBFIFO_REG_BYTES_DONE_STATUS);
265 count &= 0x00ffffff;
266
267 if (!dma && !write) {
268 /* copy the data out of the FIFO */
269 reg = lpbfifo.regs + LPBFIFO_REG_FIFO_DATA;
270 data = req->data + req->pos;
271 for (i = 0; i < count; i += 4)
272 *data++ = in_be32(reg);
273 }
274
275 /* Update transfer position and count */
276 req->pos += count;
277
278 /* Decide what to do next */
279 if (req->size - req->pos)
280 mpc52xx_lpbfifo_kick(req); /* more work to do */
281 else
282 do_callback = 1;
283
284 out:
285 /* Clear the IRQ */
286 out_8(lpbfifo.regs + LPBFIFO_REG_BYTES_DONE_STATUS, 0x01);
287
288 if (dma && (status & 0x11)) {
289 /*
290 * Count the DMA as complete only when the FIFO completion
291 * status or abort bits are set.
292 *
293 * (status & 0x01) should always be the case except sometimes
294 * when using polled DMA.
295 *
296 * (status & 0x10) {transfer aborted}: This case needs more
297 * testing.
298 */
299 bcom_retrieve_buffer(lpbfifo.bcom_cur_task, &status, NULL);
300 }
301 req->last_byte = ((u8 *)req->data)[req->size - 1];
302
303 /* When the do_callback flag is set; it means the transfer is finished
304 * so set the FIFO as idle */
305 if (do_callback)
306 lpbfifo.req = NULL;
307
308 if (irq != 0) /* don't increment on polled case */
309 req->irq_count++;
310
311 req->irq_ticks += mftb() - ts;
312 spin_unlock_irqrestore(&lpbfifo.lock, flags);
313
314 /* Spinlock is released; it is now safe to call the callback */
315 if (do_callback && req->callback)
316 req->callback(req);
317
318 return IRQ_HANDLED;
319 }
320
321 /**
322 * mpc52xx_lpbfifo_bcom_irq - IRQ handler for LPB FIFO Bestcomm task
323 *
324 * Only used when receiving data.
325 */
mpc52xx_lpbfifo_bcom_irq(int irq,void * dev_id)326 static irqreturn_t mpc52xx_lpbfifo_bcom_irq(int irq, void *dev_id)
327 {
328 struct mpc52xx_lpbfifo_request *req;
329 unsigned long flags;
330 u32 status;
331 u32 ts;
332
333 spin_lock_irqsave(&lpbfifo.lock, flags);
334 ts = mftb();
335
336 req = lpbfifo.req;
337 if (!req || (req->flags & MPC52XX_LPBFIFO_FLAG_NO_DMA)) {
338 spin_unlock_irqrestore(&lpbfifo.lock, flags);
339 return IRQ_HANDLED;
340 }
341
342 if (irq != 0) /* don't increment on polled case */
343 req->irq_count++;
344
345 if (!bcom_buffer_done(lpbfifo.bcom_cur_task)) {
346 spin_unlock_irqrestore(&lpbfifo.lock, flags);
347
348 req->buffer_not_done_cnt++;
349 if ((req->buffer_not_done_cnt % 1000) == 0)
350 pr_err("transfer stalled\n");
351
352 return IRQ_HANDLED;
353 }
354
355 bcom_retrieve_buffer(lpbfifo.bcom_cur_task, &status, NULL);
356
357 req->last_byte = ((u8 *)req->data)[req->size - 1];
358
359 req->pos = status & 0x00ffffff;
360
361 /* Mark the FIFO as idle */
362 lpbfifo.req = NULL;
363
364 /* Release the lock before calling out to the callback. */
365 req->irq_ticks += mftb() - ts;
366 spin_unlock_irqrestore(&lpbfifo.lock, flags);
367
368 if (req->callback)
369 req->callback(req);
370
371 return IRQ_HANDLED;
372 }
373
374 /**
375 * mpc52xx_lpbfifo_bcom_poll - Poll for DMA completion
376 */
mpc52xx_lpbfifo_poll(void)377 void mpc52xx_lpbfifo_poll(void)
378 {
379 struct mpc52xx_lpbfifo_request *req = lpbfifo.req;
380 int dma = !(req->flags & MPC52XX_LPBFIFO_FLAG_NO_DMA);
381 int write = req->flags & MPC52XX_LPBFIFO_FLAG_WRITE;
382
383 /*
384 * For more information, see comments on the "Fat Lady"
385 */
386 if (dma && write)
387 mpc52xx_lpbfifo_irq(0, NULL);
388 else
389 mpc52xx_lpbfifo_bcom_irq(0, NULL);
390 }
391 EXPORT_SYMBOL(mpc52xx_lpbfifo_poll);
392
393 /**
394 * mpc52xx_lpbfifo_submit - Submit an LPB FIFO transfer request.
395 * @req: Pointer to request structure
396 */
mpc52xx_lpbfifo_submit(struct mpc52xx_lpbfifo_request * req)397 int mpc52xx_lpbfifo_submit(struct mpc52xx_lpbfifo_request *req)
398 {
399 unsigned long flags;
400
401 if (!lpbfifo.regs)
402 return -ENODEV;
403
404 spin_lock_irqsave(&lpbfifo.lock, flags);
405
406 /* If the req pointer is already set, then a transfer is in progress */
407 if (lpbfifo.req) {
408 spin_unlock_irqrestore(&lpbfifo.lock, flags);
409 return -EBUSY;
410 }
411
412 /* Setup the transfer */
413 lpbfifo.req = req;
414 req->irq_count = 0;
415 req->irq_ticks = 0;
416 req->buffer_not_done_cnt = 0;
417 req->pos = 0;
418
419 mpc52xx_lpbfifo_kick(req);
420 spin_unlock_irqrestore(&lpbfifo.lock, flags);
421 return 0;
422 }
423 EXPORT_SYMBOL(mpc52xx_lpbfifo_submit);
424
mpc52xx_lpbfifo_start_xfer(struct mpc52xx_lpbfifo_request * req)425 int mpc52xx_lpbfifo_start_xfer(struct mpc52xx_lpbfifo_request *req)
426 {
427 unsigned long flags;
428
429 if (!lpbfifo.regs)
430 return -ENODEV;
431
432 spin_lock_irqsave(&lpbfifo.lock, flags);
433
434 /*
435 * If the req pointer is already set and a transfer was
436 * started on submit, then this transfer is in progress
437 */
438 if (lpbfifo.req && !lpbfifo.req->defer_xfer_start) {
439 spin_unlock_irqrestore(&lpbfifo.lock, flags);
440 return -EBUSY;
441 }
442
443 /*
444 * If the req was previously submitted but not
445 * started, start it now
446 */
447 if (lpbfifo.req && lpbfifo.req == req &&
448 lpbfifo.req->defer_xfer_start) {
449 out_8(lpbfifo.regs + LPBFIFO_REG_PACKET_SIZE, 0x01);
450 }
451
452 spin_unlock_irqrestore(&lpbfifo.lock, flags);
453 return 0;
454 }
455 EXPORT_SYMBOL(mpc52xx_lpbfifo_start_xfer);
456
mpc52xx_lpbfifo_abort(struct mpc52xx_lpbfifo_request * req)457 void mpc52xx_lpbfifo_abort(struct mpc52xx_lpbfifo_request *req)
458 {
459 unsigned long flags;
460
461 spin_lock_irqsave(&lpbfifo.lock, flags);
462 if (lpbfifo.req == req) {
463 /* Put it into reset and clear the state */
464 bcom_gen_bd_rx_reset(lpbfifo.bcom_rx_task);
465 bcom_gen_bd_tx_reset(lpbfifo.bcom_tx_task);
466 out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x01010000);
467 lpbfifo.req = NULL;
468 }
469 spin_unlock_irqrestore(&lpbfifo.lock, flags);
470 }
471 EXPORT_SYMBOL(mpc52xx_lpbfifo_abort);
472
mpc52xx_lpbfifo_probe(struct platform_device * op)473 static int mpc52xx_lpbfifo_probe(struct platform_device *op)
474 {
475 struct resource res;
476 int rc = -ENOMEM;
477
478 if (lpbfifo.dev != NULL)
479 return -ENOSPC;
480
481 lpbfifo.irq = irq_of_parse_and_map(op->dev.of_node, 0);
482 if (!lpbfifo.irq)
483 return -ENODEV;
484
485 if (of_address_to_resource(op->dev.of_node, 0, &res))
486 return -ENODEV;
487 lpbfifo.regs_phys = res.start;
488 lpbfifo.regs = of_iomap(op->dev.of_node, 0);
489 if (!lpbfifo.regs)
490 return -ENOMEM;
491
492 spin_lock_init(&lpbfifo.lock);
493
494 /* Put FIFO into reset */
495 out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x01010000);
496
497 /* Register the interrupt handler */
498 rc = request_irq(lpbfifo.irq, mpc52xx_lpbfifo_irq, 0,
499 "mpc52xx-lpbfifo", &lpbfifo);
500 if (rc)
501 goto err_irq;
502
503 /* Request the Bestcomm receive (fifo --> memory) task and IRQ */
504 lpbfifo.bcom_rx_task =
505 bcom_gen_bd_rx_init(2, res.start + LPBFIFO_REG_FIFO_DATA,
506 BCOM_INITIATOR_SCLPC, BCOM_IPR_SCLPC,
507 16*1024*1024);
508 if (!lpbfifo.bcom_rx_task)
509 goto err_bcom_rx;
510
511 rc = request_irq(bcom_get_task_irq(lpbfifo.bcom_rx_task),
512 mpc52xx_lpbfifo_bcom_irq, 0,
513 "mpc52xx-lpbfifo-rx", &lpbfifo);
514 if (rc)
515 goto err_bcom_rx_irq;
516
517 lpbfifo.dma_irqs_enabled = 1;
518
519 /* Request the Bestcomm transmit (memory --> fifo) task and IRQ */
520 lpbfifo.bcom_tx_task =
521 bcom_gen_bd_tx_init(2, res.start + LPBFIFO_REG_FIFO_DATA,
522 BCOM_INITIATOR_SCLPC, BCOM_IPR_SCLPC);
523 if (!lpbfifo.bcom_tx_task)
524 goto err_bcom_tx;
525
526 lpbfifo.dev = &op->dev;
527 return 0;
528
529 err_bcom_tx:
530 free_irq(bcom_get_task_irq(lpbfifo.bcom_rx_task), &lpbfifo);
531 err_bcom_rx_irq:
532 bcom_gen_bd_rx_release(lpbfifo.bcom_rx_task);
533 err_bcom_rx:
534 free_irq(lpbfifo.irq, &lpbfifo);
535 err_irq:
536 iounmap(lpbfifo.regs);
537 lpbfifo.regs = NULL;
538
539 dev_err(&op->dev, "mpc52xx_lpbfifo_probe() failed\n");
540 return -ENODEV;
541 }
542
543
mpc52xx_lpbfifo_remove(struct platform_device * op)544 static int mpc52xx_lpbfifo_remove(struct platform_device *op)
545 {
546 if (lpbfifo.dev != &op->dev)
547 return 0;
548
549 /* Put FIFO in reset */
550 out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x01010000);
551
552 /* Release the bestcomm transmit task */
553 free_irq(bcom_get_task_irq(lpbfifo.bcom_tx_task), &lpbfifo);
554 bcom_gen_bd_tx_release(lpbfifo.bcom_tx_task);
555
556 /* Release the bestcomm receive task */
557 free_irq(bcom_get_task_irq(lpbfifo.bcom_rx_task), &lpbfifo);
558 bcom_gen_bd_rx_release(lpbfifo.bcom_rx_task);
559
560 free_irq(lpbfifo.irq, &lpbfifo);
561 iounmap(lpbfifo.regs);
562 lpbfifo.regs = NULL;
563 lpbfifo.dev = NULL;
564
565 return 0;
566 }
567
568 static const struct of_device_id mpc52xx_lpbfifo_match[] = {
569 { .compatible = "fsl,mpc5200-lpbfifo", },
570 {},
571 };
572 MODULE_DEVICE_TABLE(of, mpc52xx_lpbfifo_match);
573
574 static struct platform_driver mpc52xx_lpbfifo_driver = {
575 .driver = {
576 .name = "mpc52xx-lpbfifo",
577 .of_match_table = mpc52xx_lpbfifo_match,
578 },
579 .probe = mpc52xx_lpbfifo_probe,
580 .remove = mpc52xx_lpbfifo_remove,
581 };
582 module_platform_driver(mpc52xx_lpbfifo_driver);
583