1 /* mac_esp.c: ESP front-end for Macintosh Quadra systems.
2 *
3 * Adapted from jazz_esp.c and the old mac_esp.c.
4 *
5 * The pseudo DMA algorithm is based on the one used in NetBSD.
6 * See sys/arch/mac68k/obio/esp.c for some background information.
7 *
8 * Copyright (C) 2007-2008 Finn Thain
9 */
10
11 #include <linux/kernel.h>
12 #include <linux/types.h>
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
16 #include <linux/platform_device.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/scatterlist.h>
19 #include <linux/delay.h>
20 #include <linux/io.h>
21 #include <linux/nubus.h>
22 #include <linux/slab.h>
23
24 #include <asm/irq.h>
25 #include <asm/dma.h>
26 #include <asm/macints.h>
27 #include <asm/macintosh.h>
28
29 #include <scsi/scsi_host.h>
30
31 #include "esp_scsi.h"
32
33 #define DRV_MODULE_NAME "mac_esp"
34 #define PFX DRV_MODULE_NAME ": "
35 #define DRV_VERSION "1.000"
36 #define DRV_MODULE_RELDATE "Sept 15, 2007"
37
38 #define MAC_ESP_IO_BASE 0x50F00000
39 #define MAC_ESP_REGS_QUADRA (MAC_ESP_IO_BASE + 0x10000)
40 #define MAC_ESP_REGS_QUADRA2 (MAC_ESP_IO_BASE + 0xF000)
41 #define MAC_ESP_REGS_QUADRA3 (MAC_ESP_IO_BASE + 0x18000)
42 #define MAC_ESP_REGS_SPACING 0x402
43 #define MAC_ESP_PDMA_REG 0xF9800024
44 #define MAC_ESP_PDMA_REG_SPACING 0x4
45 #define MAC_ESP_PDMA_IO_OFFSET 0x100
46
47 #define esp_read8(REG) mac_esp_read8(esp, REG)
48 #define esp_write8(VAL, REG) mac_esp_write8(esp, VAL, REG)
49
50 struct mac_esp_priv {
51 struct esp *esp;
52 void __iomem *pdma_regs;
53 void __iomem *pdma_io;
54 int error;
55 };
56 static struct esp *esp_chips[2];
57
58 #define MAC_ESP_GET_PRIV(esp) ((struct mac_esp_priv *) \
59 platform_get_drvdata((struct platform_device *) \
60 (esp->dev)))
61
mac_esp_write8(struct esp * esp,u8 val,unsigned long reg)62 static inline void mac_esp_write8(struct esp *esp, u8 val, unsigned long reg)
63 {
64 nubus_writeb(val, esp->regs + reg * 16);
65 }
66
mac_esp_read8(struct esp * esp,unsigned long reg)67 static inline u8 mac_esp_read8(struct esp *esp, unsigned long reg)
68 {
69 return nubus_readb(esp->regs + reg * 16);
70 }
71
72 /* For pseudo DMA and PIO we need the virtual address
73 * so this address mapping is the identity mapping.
74 */
75
mac_esp_map_single(struct esp * esp,void * buf,size_t sz,int dir)76 static dma_addr_t mac_esp_map_single(struct esp *esp, void *buf,
77 size_t sz, int dir)
78 {
79 return (dma_addr_t)buf;
80 }
81
mac_esp_map_sg(struct esp * esp,struct scatterlist * sg,int num_sg,int dir)82 static int mac_esp_map_sg(struct esp *esp, struct scatterlist *sg,
83 int num_sg, int dir)
84 {
85 int i;
86
87 for (i = 0; i < num_sg; i++)
88 sg[i].dma_address = (u32)sg_virt(&sg[i]);
89 return num_sg;
90 }
91
mac_esp_unmap_single(struct esp * esp,dma_addr_t addr,size_t sz,int dir)92 static void mac_esp_unmap_single(struct esp *esp, dma_addr_t addr,
93 size_t sz, int dir)
94 {
95 /* Nothing to do. */
96 }
97
mac_esp_unmap_sg(struct esp * esp,struct scatterlist * sg,int num_sg,int dir)98 static void mac_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
99 int num_sg, int dir)
100 {
101 /* Nothing to do. */
102 }
103
mac_esp_reset_dma(struct esp * esp)104 static void mac_esp_reset_dma(struct esp *esp)
105 {
106 /* Nothing to do. */
107 }
108
mac_esp_dma_drain(struct esp * esp)109 static void mac_esp_dma_drain(struct esp *esp)
110 {
111 /* Nothing to do. */
112 }
113
mac_esp_dma_invalidate(struct esp * esp)114 static void mac_esp_dma_invalidate(struct esp *esp)
115 {
116 /* Nothing to do. */
117 }
118
mac_esp_dma_error(struct esp * esp)119 static int mac_esp_dma_error(struct esp *esp)
120 {
121 return MAC_ESP_GET_PRIV(esp)->error;
122 }
123
mac_esp_wait_for_empty_fifo(struct esp * esp)124 static inline int mac_esp_wait_for_empty_fifo(struct esp *esp)
125 {
126 struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp);
127 int i = 500000;
128
129 do {
130 if (!(esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES))
131 return 0;
132
133 if (esp_read8(ESP_STATUS) & ESP_STAT_INTR)
134 return 1;
135
136 udelay(2);
137 } while (--i);
138
139 printk(KERN_ERR PFX "FIFO is not empty (sreg %02x)\n",
140 esp_read8(ESP_STATUS));
141 mep->error = 1;
142 return 1;
143 }
144
mac_esp_wait_for_dreq(struct esp * esp)145 static inline int mac_esp_wait_for_dreq(struct esp *esp)
146 {
147 struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp);
148 int i = 500000;
149
150 do {
151 if (mep->pdma_regs == NULL) {
152 if (mac_irq_pending(IRQ_MAC_SCSIDRQ))
153 return 0;
154 } else {
155 if (nubus_readl(mep->pdma_regs) & 0x200)
156 return 0;
157 }
158
159 if (esp_read8(ESP_STATUS) & ESP_STAT_INTR)
160 return 1;
161
162 udelay(2);
163 } while (--i);
164
165 printk(KERN_ERR PFX "PDMA timeout (sreg %02x)\n",
166 esp_read8(ESP_STATUS));
167 mep->error = 1;
168 return 1;
169 }
170
171 #define MAC_ESP_PDMA_LOOP(operands) \
172 asm volatile ( \
173 " tstw %1 \n" \
174 " jbeq 20f \n" \
175 "1: movew " operands " \n" \
176 "2: movew " operands " \n" \
177 "3: movew " operands " \n" \
178 "4: movew " operands " \n" \
179 "5: movew " operands " \n" \
180 "6: movew " operands " \n" \
181 "7: movew " operands " \n" \
182 "8: movew " operands " \n" \
183 "9: movew " operands " \n" \
184 "10: movew " operands " \n" \
185 "11: movew " operands " \n" \
186 "12: movew " operands " \n" \
187 "13: movew " operands " \n" \
188 "14: movew " operands " \n" \
189 "15: movew " operands " \n" \
190 "16: movew " operands " \n" \
191 " subqw #1,%1 \n" \
192 " jbne 1b \n" \
193 "20: tstw %2 \n" \
194 " jbeq 30f \n" \
195 "21: movew " operands " \n" \
196 " subqw #1,%2 \n" \
197 " jbne 21b \n" \
198 "30: tstw %3 \n" \
199 " jbeq 40f \n" \
200 "31: moveb " operands " \n" \
201 "32: nop \n" \
202 "40: \n" \
203 " \n" \
204 " .section __ex_table,\"a\" \n" \
205 " .align 4 \n" \
206 " .long 1b,40b \n" \
207 " .long 2b,40b \n" \
208 " .long 3b,40b \n" \
209 " .long 4b,40b \n" \
210 " .long 5b,40b \n" \
211 " .long 6b,40b \n" \
212 " .long 7b,40b \n" \
213 " .long 8b,40b \n" \
214 " .long 9b,40b \n" \
215 " .long 10b,40b \n" \
216 " .long 11b,40b \n" \
217 " .long 12b,40b \n" \
218 " .long 13b,40b \n" \
219 " .long 14b,40b \n" \
220 " .long 15b,40b \n" \
221 " .long 16b,40b \n" \
222 " .long 21b,40b \n" \
223 " .long 31b,40b \n" \
224 " .long 32b,40b \n" \
225 " .previous \n" \
226 : "+a" (addr), "+r" (count32), "+r" (count2) \
227 : "g" (count1), "a" (mep->pdma_io))
228
mac_esp_send_pdma_cmd(struct esp * esp,u32 addr,u32 esp_count,u32 dma_count,int write,u8 cmd)229 static void mac_esp_send_pdma_cmd(struct esp *esp, u32 addr, u32 esp_count,
230 u32 dma_count, int write, u8 cmd)
231 {
232 struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp);
233 unsigned long flags;
234
235 local_irq_save(flags);
236
237 mep->error = 0;
238
239 if (!write)
240 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
241
242 esp_write8((esp_count >> 0) & 0xFF, ESP_TCLOW);
243 esp_write8((esp_count >> 8) & 0xFF, ESP_TCMED);
244
245 scsi_esp_cmd(esp, cmd);
246
247 do {
248 unsigned int count32 = esp_count >> 5;
249 unsigned int count2 = (esp_count & 0x1F) >> 1;
250 unsigned int count1 = esp_count & 1;
251 unsigned int start_addr = addr;
252
253 if (mac_esp_wait_for_dreq(esp))
254 break;
255
256 if (write) {
257 MAC_ESP_PDMA_LOOP("%4@,%0@+");
258
259 esp_count -= addr - start_addr;
260 } else {
261 unsigned int n;
262
263 MAC_ESP_PDMA_LOOP("%0@+,%4@");
264
265 if (mac_esp_wait_for_empty_fifo(esp))
266 break;
267
268 n = (esp_read8(ESP_TCMED) << 8) + esp_read8(ESP_TCLOW);
269 addr = start_addr + esp_count - n;
270 esp_count = n;
271 }
272 } while (esp_count);
273
274 local_irq_restore(flags);
275 }
276
277 /*
278 * Programmed IO routines follow.
279 */
280
mac_esp_wait_for_fifo(struct esp * esp)281 static inline unsigned int mac_esp_wait_for_fifo(struct esp *esp)
282 {
283 int i = 500000;
284
285 do {
286 unsigned int fbytes = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
287
288 if (fbytes)
289 return fbytes;
290
291 udelay(2);
292 } while (--i);
293
294 printk(KERN_ERR PFX "FIFO is empty (sreg %02x)\n",
295 esp_read8(ESP_STATUS));
296 return 0;
297 }
298
mac_esp_wait_for_intr(struct esp * esp)299 static inline int mac_esp_wait_for_intr(struct esp *esp)
300 {
301 struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp);
302 int i = 500000;
303
304 do {
305 esp->sreg = esp_read8(ESP_STATUS);
306 if (esp->sreg & ESP_STAT_INTR)
307 return 0;
308
309 udelay(2);
310 } while (--i);
311
312 printk(KERN_ERR PFX "IRQ timeout (sreg %02x)\n", esp->sreg);
313 mep->error = 1;
314 return 1;
315 }
316
317 #define MAC_ESP_PIO_LOOP(operands, reg1) \
318 asm volatile ( \
319 "1: moveb " operands " \n" \
320 " subqw #1,%1 \n" \
321 " jbne 1b \n" \
322 : "+a" (addr), "+r" (reg1) \
323 : "a" (fifo))
324
325 #define MAC_ESP_PIO_FILL(operands, reg1) \
326 asm volatile ( \
327 " moveb " operands " \n" \
328 " moveb " operands " \n" \
329 " moveb " operands " \n" \
330 " moveb " operands " \n" \
331 " moveb " operands " \n" \
332 " moveb " operands " \n" \
333 " moveb " operands " \n" \
334 " moveb " operands " \n" \
335 " moveb " operands " \n" \
336 " moveb " operands " \n" \
337 " moveb " operands " \n" \
338 " moveb " operands " \n" \
339 " moveb " operands " \n" \
340 " moveb " operands " \n" \
341 " moveb " operands " \n" \
342 " moveb " operands " \n" \
343 " subqw #8,%1 \n" \
344 " subqw #8,%1 \n" \
345 : "+a" (addr), "+r" (reg1) \
346 : "a" (fifo))
347
348 #define MAC_ESP_FIFO_SIZE 16
349
mac_esp_send_pio_cmd(struct esp * esp,u32 addr,u32 esp_count,u32 dma_count,int write,u8 cmd)350 static void mac_esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count,
351 u32 dma_count, int write, u8 cmd)
352 {
353 struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp);
354 u8 *fifo = esp->regs + ESP_FDATA * 16;
355
356 disable_irq(esp->host->irq);
357
358 cmd &= ~ESP_CMD_DMA;
359 mep->error = 0;
360
361 if (write) {
362 scsi_esp_cmd(esp, cmd);
363
364 while (1) {
365 unsigned int n;
366
367 n = mac_esp_wait_for_fifo(esp);
368 if (!n)
369 break;
370
371 if (n > esp_count)
372 n = esp_count;
373 esp_count -= n;
374
375 MAC_ESP_PIO_LOOP("%2@,%0@+", n);
376
377 if (!esp_count)
378 break;
379
380 if (mac_esp_wait_for_intr(esp))
381 break;
382
383 if (((esp->sreg & ESP_STAT_PMASK) != ESP_DIP) &&
384 ((esp->sreg & ESP_STAT_PMASK) != ESP_MIP))
385 break;
386
387 esp->ireg = esp_read8(ESP_INTRPT);
388 if ((esp->ireg & (ESP_INTR_DC | ESP_INTR_BSERV)) !=
389 ESP_INTR_BSERV)
390 break;
391
392 scsi_esp_cmd(esp, ESP_CMD_TI);
393 }
394 } else {
395 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
396
397 if (esp_count >= MAC_ESP_FIFO_SIZE)
398 MAC_ESP_PIO_FILL("%0@+,%2@", esp_count);
399 else
400 MAC_ESP_PIO_LOOP("%0@+,%2@", esp_count);
401
402 scsi_esp_cmd(esp, cmd);
403
404 while (esp_count) {
405 unsigned int n;
406
407 if (mac_esp_wait_for_intr(esp))
408 break;
409
410 if (((esp->sreg & ESP_STAT_PMASK) != ESP_DOP) &&
411 ((esp->sreg & ESP_STAT_PMASK) != ESP_MOP))
412 break;
413
414 esp->ireg = esp_read8(ESP_INTRPT);
415 if ((esp->ireg & (ESP_INTR_DC | ESP_INTR_BSERV)) !=
416 ESP_INTR_BSERV)
417 break;
418
419 n = MAC_ESP_FIFO_SIZE -
420 (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES);
421 if (n > esp_count)
422 n = esp_count;
423
424 if (n == MAC_ESP_FIFO_SIZE) {
425 MAC_ESP_PIO_FILL("%0@+,%2@", esp_count);
426 } else {
427 esp_count -= n;
428 MAC_ESP_PIO_LOOP("%0@+,%2@", n);
429 }
430
431 scsi_esp_cmd(esp, ESP_CMD_TI);
432 }
433 }
434
435 enable_irq(esp->host->irq);
436 }
437
mac_esp_irq_pending(struct esp * esp)438 static int mac_esp_irq_pending(struct esp *esp)
439 {
440 if (esp_read8(ESP_STATUS) & ESP_STAT_INTR)
441 return 1;
442 return 0;
443 }
444
mac_esp_dma_length_limit(struct esp * esp,u32 dma_addr,u32 dma_len)445 static u32 mac_esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len)
446 {
447 return dma_len > 0xFFFF ? 0xFFFF : dma_len;
448 }
449
mac_scsi_esp_intr(int irq,void * dev_id)450 static irqreturn_t mac_scsi_esp_intr(int irq, void *dev_id)
451 {
452 int got_intr;
453
454 /*
455 * This is an edge triggered IRQ, so we have to be careful to
456 * avoid missing a transition when it is shared by two ESP devices.
457 */
458
459 do {
460 got_intr = 0;
461 if (esp_chips[0] &&
462 (mac_esp_read8(esp_chips[0], ESP_STATUS) & ESP_STAT_INTR)) {
463 (void)scsi_esp_intr(irq, esp_chips[0]);
464 got_intr = 1;
465 }
466 if (esp_chips[1] &&
467 (mac_esp_read8(esp_chips[1], ESP_STATUS) & ESP_STAT_INTR)) {
468 (void)scsi_esp_intr(irq, esp_chips[1]);
469 got_intr = 1;
470 }
471 } while (got_intr);
472
473 return IRQ_HANDLED;
474 }
475
476 static struct esp_driver_ops mac_esp_ops = {
477 .esp_write8 = mac_esp_write8,
478 .esp_read8 = mac_esp_read8,
479 .map_single = mac_esp_map_single,
480 .map_sg = mac_esp_map_sg,
481 .unmap_single = mac_esp_unmap_single,
482 .unmap_sg = mac_esp_unmap_sg,
483 .irq_pending = mac_esp_irq_pending,
484 .dma_length_limit = mac_esp_dma_length_limit,
485 .reset_dma = mac_esp_reset_dma,
486 .dma_drain = mac_esp_dma_drain,
487 .dma_invalidate = mac_esp_dma_invalidate,
488 .send_dma_cmd = mac_esp_send_pdma_cmd,
489 .dma_error = mac_esp_dma_error,
490 };
491
esp_mac_probe(struct platform_device * dev)492 static int __devinit esp_mac_probe(struct platform_device *dev)
493 {
494 struct scsi_host_template *tpnt = &scsi_esp_template;
495 struct Scsi_Host *host;
496 struct esp *esp;
497 int err;
498 struct mac_esp_priv *mep;
499
500 if (!MACH_IS_MAC)
501 return -ENODEV;
502
503 if (dev->id > 1)
504 return -ENODEV;
505
506 host = scsi_host_alloc(tpnt, sizeof(struct esp));
507
508 err = -ENOMEM;
509 if (!host)
510 goto fail;
511
512 host->max_id = 8;
513 host->use_clustering = DISABLE_CLUSTERING;
514 esp = shost_priv(host);
515
516 esp->host = host;
517 esp->dev = dev;
518
519 esp->command_block = kzalloc(16, GFP_KERNEL);
520 if (!esp->command_block)
521 goto fail_unlink;
522 esp->command_block_dma = (dma_addr_t)esp->command_block;
523
524 esp->scsi_id = 7;
525 host->this_id = esp->scsi_id;
526 esp->scsi_id_mask = 1 << esp->scsi_id;
527
528 mep = kzalloc(sizeof(struct mac_esp_priv), GFP_KERNEL);
529 if (!mep)
530 goto fail_free_command_block;
531 mep->esp = esp;
532 platform_set_drvdata(dev, mep);
533
534 switch (macintosh_config->scsi_type) {
535 case MAC_SCSI_QUADRA:
536 esp->cfreq = 16500000;
537 esp->regs = (void __iomem *)MAC_ESP_REGS_QUADRA;
538 mep->pdma_io = esp->regs + MAC_ESP_PDMA_IO_OFFSET;
539 mep->pdma_regs = NULL;
540 break;
541 case MAC_SCSI_QUADRA2:
542 esp->cfreq = 25000000;
543 esp->regs = (void __iomem *)(MAC_ESP_REGS_QUADRA2 +
544 dev->id * MAC_ESP_REGS_SPACING);
545 mep->pdma_io = esp->regs + MAC_ESP_PDMA_IO_OFFSET;
546 mep->pdma_regs = (void __iomem *)(MAC_ESP_PDMA_REG +
547 dev->id * MAC_ESP_PDMA_REG_SPACING);
548 nubus_writel(0x1d1, mep->pdma_regs);
549 break;
550 case MAC_SCSI_QUADRA3:
551 /* These quadras have a real DMA controller (the PSC) but we
552 * don't know how to drive it so we must use PIO instead.
553 */
554 esp->cfreq = 25000000;
555 esp->regs = (void __iomem *)MAC_ESP_REGS_QUADRA3;
556 mep->pdma_io = NULL;
557 mep->pdma_regs = NULL;
558 break;
559 }
560
561 esp->ops = &mac_esp_ops;
562 if (mep->pdma_io == NULL) {
563 printk(KERN_INFO PFX "using PIO for controller %d\n", dev->id);
564 esp_write8(0, ESP_TCLOW);
565 esp_write8(0, ESP_TCMED);
566 esp->flags = ESP_FLAG_DISABLE_SYNC;
567 mac_esp_ops.send_dma_cmd = mac_esp_send_pio_cmd;
568 } else {
569 printk(KERN_INFO PFX "using PDMA for controller %d\n", dev->id);
570 }
571
572 host->irq = IRQ_MAC_SCSI;
573 esp_chips[dev->id] = esp;
574 mb();
575 if (esp_chips[!dev->id] == NULL) {
576 err = request_irq(host->irq, mac_scsi_esp_intr, 0,
577 "Mac ESP", NULL);
578 if (err < 0) {
579 esp_chips[dev->id] = NULL;
580 goto fail_free_priv;
581 }
582 }
583
584 err = scsi_esp_register(esp, &dev->dev);
585 if (err)
586 goto fail_free_irq;
587
588 return 0;
589
590 fail_free_irq:
591 if (esp_chips[!dev->id] == NULL)
592 free_irq(host->irq, esp);
593 fail_free_priv:
594 kfree(mep);
595 fail_free_command_block:
596 kfree(esp->command_block);
597 fail_unlink:
598 scsi_host_put(host);
599 fail:
600 return err;
601 }
602
esp_mac_remove(struct platform_device * dev)603 static int __devexit esp_mac_remove(struct platform_device *dev)
604 {
605 struct mac_esp_priv *mep = platform_get_drvdata(dev);
606 struct esp *esp = mep->esp;
607 unsigned int irq = esp->host->irq;
608
609 scsi_esp_unregister(esp);
610
611 esp_chips[dev->id] = NULL;
612 if (!(esp_chips[0] || esp_chips[1]))
613 free_irq(irq, NULL);
614
615 kfree(mep);
616
617 kfree(esp->command_block);
618
619 scsi_host_put(esp->host);
620
621 return 0;
622 }
623
624 static struct platform_driver esp_mac_driver = {
625 .probe = esp_mac_probe,
626 .remove = __devexit_p(esp_mac_remove),
627 .driver = {
628 .name = DRV_MODULE_NAME,
629 .owner = THIS_MODULE,
630 },
631 };
632
mac_esp_init(void)633 static int __init mac_esp_init(void)
634 {
635 return platform_driver_register(&esp_mac_driver);
636 }
637
mac_esp_exit(void)638 static void __exit mac_esp_exit(void)
639 {
640 platform_driver_unregister(&esp_mac_driver);
641 }
642
643 MODULE_DESCRIPTION("Mac ESP SCSI driver");
644 MODULE_AUTHOR("Finn Thain <fthain@telegraphics.com.au>");
645 MODULE_LICENSE("GPL v2");
646 MODULE_VERSION(DRV_VERSION);
647 MODULE_ALIAS("platform:" DRV_MODULE_NAME);
648
649 module_init(mac_esp_init);
650 module_exit(mac_esp_exit);
651