1 /* fastlane.c: Driver for Phase5's Fastlane SCSI Controller.
2 *
3 * Copyright (C) 1996 Jesper Skov (jskov@cygnus.co.uk)
4 *
5 * This driver is based on the CyberStorm driver, hence the occasional
6 * reference to CyberStorm.
7 *
8 * Betatesting & crucial adjustments by
9 * Patrik Rak (prak3264@ss1000.ms.mff.cuni.cz)
10 *
11 */
12
13 /* TODO:
14 *
15 * o According to the doc from laire, it is required to reset the DMA when
16 * the transfer is done. ATM we reset DMA just before every new
17 * dma_init_(read|write).
18 *
19 * 1) Figure out how to make a cleaner merge with the sparc driver with regard
20 * to the caches and the Sparc MMU mapping.
21 * 2) Make as few routines required outside the generic driver. A lot of the
22 * routines in this file used to be inline!
23 */
24
25 #include <linux/module.h>
26
27 #include <linux/init.h>
28 #include <linux/kernel.h>
29 #include <linux/delay.h>
30 #include <linux/types.h>
31 #include <linux/string.h>
32 #include <linux/slab.h>
33 #include <linux/blk.h>
34 #include <linux/proc_fs.h>
35 #include <linux/stat.h>
36
37 #include "scsi.h"
38 #include "hosts.h"
39 #include "NCR53C9x.h"
40 #include "fastlane.h"
41
42 #include <linux/zorro.h>
43 #include <asm/irq.h>
44
45 #include <asm/amigaints.h>
46 #include <asm/amigahw.h>
47
48 #include <asm/pgtable.h>
49
50 /* Such day has just come... */
51 #if 0
52 /* Let this defined unless you really need to enable DMA IRQ one day */
53 #define NODMAIRQ
54 #endif
55
56 static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count);
57 static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp);
58 static inline void dma_clear(struct NCR_ESP *esp);
59 static void dma_dump_state(struct NCR_ESP *esp);
60 static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length);
61 static void dma_init_write(struct NCR_ESP *esp, __u32 vaddr, int length);
62 static void dma_ints_off(struct NCR_ESP *esp);
63 static void dma_ints_on(struct NCR_ESP *esp);
64 static int dma_irq_p(struct NCR_ESP *esp);
65 static void dma_irq_exit(struct NCR_ESP *esp);
66 static void dma_led_off(struct NCR_ESP *esp);
67 static void dma_led_on(struct NCR_ESP *esp);
68 static int dma_ports_p(struct NCR_ESP *esp);
69 static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write);
70
71 static unsigned char ctrl_data = 0; /* Keep backup of the stuff written
72 * to ctrl_reg. Always write a copy
73 * to this register when writing to
74 * the hardware register!
75 */
76
77 static volatile unsigned char cmd_buffer[16];
78 /* This is where all commands are put
79 * before they are transferred to the ESP chip
80 * via PIO.
81 */
82
83 /***************************************************************** Detection */
fastlane_esp_detect(Scsi_Host_Template * tpnt)84 int __init fastlane_esp_detect(Scsi_Host_Template *tpnt)
85 {
86 struct NCR_ESP *esp;
87 struct zorro_dev *z = NULL;
88 unsigned long address;
89
90 if ((z = zorro_find_device(ZORRO_PROD_PHASE5_BLIZZARD_1230_II_FASTLANE_Z3_CYBERSCSI_CYBERSTORM060, z))) {
91 unsigned long board = z->resource.start;
92 if (request_mem_region(board+FASTLANE_ESP_ADDR,
93 sizeof(struct ESP_regs), "NCR53C9x")) {
94 /* Check if this is really a fastlane controller. The problem
95 * is that also the cyberstorm and blizzard controllers use
96 * this ID value. Fortunately only Fastlane maps in Z3 space
97 */
98 if (board < 0x1000000) {
99 goto err_release;
100 }
101 esp = esp_allocate(tpnt, (void *)board+FASTLANE_ESP_ADDR);
102
103 /* Do command transfer with programmed I/O */
104 esp->do_pio_cmds = 1;
105
106 /* Required functions */
107 esp->dma_bytes_sent = &dma_bytes_sent;
108 esp->dma_can_transfer = &dma_can_transfer;
109 esp->dma_dump_state = &dma_dump_state;
110 esp->dma_init_read = &dma_init_read;
111 esp->dma_init_write = &dma_init_write;
112 esp->dma_ints_off = &dma_ints_off;
113 esp->dma_ints_on = &dma_ints_on;
114 esp->dma_irq_p = &dma_irq_p;
115 esp->dma_ports_p = &dma_ports_p;
116 esp->dma_setup = &dma_setup;
117
118 /* Optional functions */
119 esp->dma_barrier = 0;
120 esp->dma_drain = 0;
121 esp->dma_invalidate = 0;
122 esp->dma_irq_entry = 0;
123 esp->dma_irq_exit = &dma_irq_exit;
124 esp->dma_led_on = &dma_led_on;
125 esp->dma_led_off = &dma_led_off;
126 esp->dma_poll = 0;
127 esp->dma_reset = 0;
128
129 /* Initialize the portBits (enable IRQs) */
130 ctrl_data = (FASTLANE_DMA_FCODE |
131 #ifndef NODMAIRQ
132 FASTLANE_DMA_EDI |
133 #endif
134 FASTLANE_DMA_ESI);
135
136
137 /* SCSI chip clock */
138 esp->cfreq = 40000000;
139
140
141 /* Map the physical address space into virtual kernel space */
142 address = (unsigned long)
143 z_ioremap(board, z->resource.end-board+1);
144
145 if(!address){
146 printk("Could not remap Fastlane controller memory!");
147 goto err_unregister;
148 }
149
150
151 /* The DMA registers on the Fastlane are mapped
152 * relative to the device (i.e. in the same Zorro
153 * I/O block).
154 */
155 esp->dregs = (void *)(address + FASTLANE_DMA_ADDR);
156
157 /* ESP register base */
158 esp->eregs = (struct ESP_regs *)(address + FASTLANE_ESP_ADDR);
159
160 /* Board base */
161 esp->edev = (void *) address;
162
163 /* Set the command buffer */
164 esp->esp_command = cmd_buffer;
165 esp->esp_command_dvma = virt_to_bus((void *)cmd_buffer);
166
167 esp->irq = IRQ_AMIGA_PORTS;
168 esp->slot = board+FASTLANE_ESP_ADDR;
169 if (request_irq(IRQ_AMIGA_PORTS, esp_intr, SA_SHIRQ,
170 "Fastlane SCSI", esp_intr)) {
171 printk(KERN_WARNING "Fastlane: Could not get IRQ%d, aborting.\n", IRQ_AMIGA_PORTS);
172 goto err_unmap;
173 }
174
175 /* Controller ID */
176 esp->scsi_id = 7;
177
178 /* We don't have a differential SCSI-bus. */
179 esp->diff = 0;
180
181 dma_clear(esp);
182 esp_initialize(esp);
183
184 printk("ESP: Total of %d ESP hosts found, %d actually in use.\n", nesps, esps_in_use);
185 esps_running = esps_in_use;
186 return esps_in_use;
187 }
188 }
189 return 0;
190
191 err_unmap:
192 z_iounmap((void *)address);
193 err_unregister:
194 scsi_unregister (esp->ehost);
195 err_release:
196 release_mem_region(z->resource.start+FASTLANE_ESP_ADDR,
197 sizeof(struct ESP_regs));
198 return 0;
199 }
200
201
202 /************************************************************* DMA Functions */
dma_bytes_sent(struct NCR_ESP * esp,int fifo_count)203 static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count)
204 {
205 /* Since the Fastlane DMA is fully dedicated to the ESP chip,
206 * the number of bytes sent (to the ESP chip) equals the number
207 * of bytes in the FIFO - there is no buffering in the DMA controller.
208 * XXXX Do I read this right? It is from host to ESP, right?
209 */
210 return fifo_count;
211 }
212
dma_can_transfer(struct NCR_ESP * esp,Scsi_Cmnd * sp)213 static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp)
214 {
215 unsigned long sz = sp->SCp.this_residual;
216 if(sz > 0xfffc)
217 sz = 0xfffc;
218 return sz;
219 }
220
dma_dump_state(struct NCR_ESP * esp)221 static void dma_dump_state(struct NCR_ESP *esp)
222 {
223 ESPLOG(("esp%d: dma -- cond_reg<%02x>\n",
224 esp->esp_id, ((struct fastlane_dma_registers *)
225 (esp->dregs))->cond_reg));
226 ESPLOG(("intreq:<%04x>, intena:<%04x>\n",
227 custom.intreqr, custom.intenar));
228 }
229
dma_init_read(struct NCR_ESP * esp,__u32 addr,int length)230 static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length)
231 {
232 struct fastlane_dma_registers *dregs =
233 (struct fastlane_dma_registers *) (esp->dregs);
234 unsigned long *t;
235
236 cache_clear(addr, length);
237
238 dma_clear(esp);
239
240 t = (unsigned long *)((addr & 0x00ffffff) + esp->edev);
241
242 dregs->clear_strobe = 0;
243 *t = addr;
244
245 ctrl_data = (ctrl_data & FASTLANE_DMA_MASK) | FASTLANE_DMA_ENABLE;
246 dregs->ctrl_reg = ctrl_data;
247 }
248
dma_init_write(struct NCR_ESP * esp,__u32 addr,int length)249 static void dma_init_write(struct NCR_ESP *esp, __u32 addr, int length)
250 {
251 struct fastlane_dma_registers *dregs =
252 (struct fastlane_dma_registers *) (esp->dregs);
253 unsigned long *t;
254
255 cache_push(addr, length);
256
257 dma_clear(esp);
258
259 t = (unsigned long *)((addr & 0x00ffffff) + (esp->edev));
260
261 dregs->clear_strobe = 0;
262 *t = addr;
263
264 ctrl_data = ((ctrl_data & FASTLANE_DMA_MASK) |
265 FASTLANE_DMA_ENABLE |
266 FASTLANE_DMA_WRITE);
267 dregs->ctrl_reg = ctrl_data;
268 }
269
dma_clear(struct NCR_ESP * esp)270 static inline void dma_clear(struct NCR_ESP *esp)
271 {
272 struct fastlane_dma_registers *dregs =
273 (struct fastlane_dma_registers *) (esp->dregs);
274 unsigned long *t;
275
276 ctrl_data = (ctrl_data & FASTLANE_DMA_MASK);
277 dregs->ctrl_reg = ctrl_data;
278
279 t = (unsigned long *)(esp->edev);
280
281 dregs->clear_strobe = 0;
282 *t = 0 ;
283 }
284
285
dma_ints_off(struct NCR_ESP * esp)286 static void dma_ints_off(struct NCR_ESP *esp)
287 {
288 disable_irq(esp->irq);
289 }
290
dma_ints_on(struct NCR_ESP * esp)291 static void dma_ints_on(struct NCR_ESP *esp)
292 {
293 enable_irq(esp->irq);
294 }
295
dma_irq_exit(struct NCR_ESP * esp)296 static void dma_irq_exit(struct NCR_ESP *esp)
297 {
298 struct fastlane_dma_registers *dregs =
299 (struct fastlane_dma_registers *) (esp->dregs);
300
301 dregs->ctrl_reg = ctrl_data & ~(FASTLANE_DMA_EDI|FASTLANE_DMA_ESI);
302 #ifdef __mc68000__
303 nop();
304 #endif
305 dregs->ctrl_reg = ctrl_data;
306 }
307
dma_irq_p(struct NCR_ESP * esp)308 static int dma_irq_p(struct NCR_ESP *esp)
309 {
310 struct fastlane_dma_registers *dregs =
311 (struct fastlane_dma_registers *) (esp->dregs);
312 unsigned char dma_status;
313
314 dma_status = dregs->cond_reg;
315
316 if(dma_status & FASTLANE_DMA_IACT)
317 return 0; /* not our IRQ */
318
319 /* Return non-zero if ESP requested IRQ */
320 return (
321 #ifndef NODMAIRQ
322 (dma_status & FASTLANE_DMA_CREQ) &&
323 #endif
324 (!(dma_status & FASTLANE_DMA_MINT)) &&
325 (esp_read(((struct ESP_regs *) (esp->eregs))->esp_status) & ESP_STAT_INTR));
326 }
327
dma_led_off(struct NCR_ESP * esp)328 static void dma_led_off(struct NCR_ESP *esp)
329 {
330 ctrl_data &= ~FASTLANE_DMA_LED;
331 ((struct fastlane_dma_registers *)(esp->dregs))->ctrl_reg = ctrl_data;
332 }
333
dma_led_on(struct NCR_ESP * esp)334 static void dma_led_on(struct NCR_ESP *esp)
335 {
336 ctrl_data |= FASTLANE_DMA_LED;
337 ((struct fastlane_dma_registers *)(esp->dregs))->ctrl_reg = ctrl_data;
338 }
339
dma_ports_p(struct NCR_ESP * esp)340 static int dma_ports_p(struct NCR_ESP *esp)
341 {
342 return ((custom.intenar) & IF_PORTS);
343 }
344
dma_setup(struct NCR_ESP * esp,__u32 addr,int count,int write)345 static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write)
346 {
347 /* On the Sparc, DMA_ST_WRITE means "move data from device to memory"
348 * so when (write) is true, it actually means READ!
349 */
350 if(write){
351 dma_init_read(esp, addr, count);
352 } else {
353 dma_init_write(esp, addr, count);
354 }
355 }
356
357 #define HOSTS_C
358
359 #include "fastlane.h"
360
361 static Scsi_Host_Template driver_template = SCSI_FASTLANE;
362 #include "scsi_module.c"
363
fastlane_esp_release(struct Scsi_Host * instance)364 int fastlane_esp_release(struct Scsi_Host *instance)
365 {
366 #ifdef MODULE
367 unsigned long address = (unsigned long)((struct NCR_ESP *)instance->hostdata)->edev;
368 esp_deallocate((struct NCR_ESP *)instance->hostdata);
369 esp_release();
370 release_mem_region(address, sizeof(struct ESP_regs));
371 free_irq(IRQ_AMIGA_PORTS, esp_intr);
372 #endif
373 return 1;
374 }
375
376 MODULE_LICENSE("GPL");
377