1 /*
2 * linux/drivers/ide/ppc/pmac.c
3 *
4 * Support for IDE interfaces on PowerMacs.
5 * These IDE interfaces are memory-mapped and have a DBDMA channel
6 * for doing DMA.
7 *
8 * Copyright (C) 1998-2002 Paul Mackerras & Ben. Herrenschmidt
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 *
15 * Some code taken from drivers/ide/ide-dma.c:
16 *
17 * Copyright (c) 1995-1998 Mark Lord
18 *
19 */
20 #include <linux/config.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <linux/init.h>
25 #include <linux/delay.h>
26 #include <linux/ide.h>
27 #include <linux/notifier.h>
28 #include <linux/reboot.h>
29 #include <linux/pci.h>
30
31 #include <asm/prom.h>
32 #include <asm/io.h>
33 #include <asm/dbdma.h>
34 #include <asm/ide.h>
35 #include <asm/mediabay.h>
36 #include <asm/pci-bridge.h>
37 #include <asm/machdep.h>
38 #include <asm/pmac_feature.h>
39 #include <asm/sections.h>
40 #include <asm/irq.h>
41 #ifdef CONFIG_PMAC_PBOOK
42 #include <linux/adb.h>
43 #include <linux/pmu.h>
44 #endif
45 #include "ide_modes.h"
46 #include "ide-timing.h"
47
48 extern void ide_do_request(ide_hwgroup_t *hwgroup, int masked_irq);
49
50 #undef IDE_PMAC_DEBUG
51 #define DMA_WAIT_TIMEOUT 100
52
53 typedef struct pmac_ide_hwif {
54 ide_ioreg_t regbase;
55 unsigned long mapbase;
56 int irq;
57 int kind;
58 int aapl_bus_id;
59 int cable_80;
60 struct device_node* node;
61 u32 timings[4];
62 #ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
63 /* Those fields are duplicating what is in hwif. We currently
64 * can't use the hwif ones because of some assumptions that are
65 * beeing done by the generic code about the kind of dma controller
66 * and format of the dma table. This will have to be fixed though.
67 */
68 volatile struct dbdma_regs* dma_regs;
69 struct dbdma_cmd* dma_table_cpu;
70 dma_addr_t dma_table_dma;
71 struct scatterlist* sg_table;
72 int sg_nents;
73 int sg_dma_direction;
74 #endif
75
76 } pmac_ide_hwif_t;
77
78 static pmac_ide_hwif_t pmac_ide[MAX_HWIFS] __pmacdata;
79 static int pmac_ide_count;
80
81 enum {
82 controller_ohare, /* OHare based */
83 controller_heathrow, /* Heathrow/Paddington */
84 controller_kl_ata3, /* KeyLargo ATA-3 */
85 controller_kl_ata4, /* KeyLargo ATA-4 */
86 controller_un_ata6 /* UniNorth2 ATA-6 */
87 };
88
89 static const char* model_name[] = {
90 "OHare ATA", /* OHare based */
91 "Heathrow ATA", /* Heathrow/Paddington */
92 "KeyLargo ATA-3", /* KeyLargo ATA-3 */
93 "KeyLargo ATA-4", /* KeyLargo ATA-4 */
94 "UniNorth ATA-6" /* UniNorth2 ATA-6 */
95 };
96
97 /*
98 * Extra registers, both 32-bit little-endian
99 */
100 #define IDE_TIMING_CONFIG 0x200
101 #define IDE_INTERRUPT 0x300
102
103 /* Kauai (U2) ATA has different register setup */
104 #define IDE_KAUAI_PIO_CONFIG 0x200
105 #define IDE_KAUAI_ULTRA_CONFIG 0x210
106 #define IDE_KAUAI_POLL_CONFIG 0x220
107
108 /*
109 * Timing configuration register definitions
110 */
111
112 /* Number of IDE_SYSCLK_NS ticks, argument is in nanoseconds */
113 #define SYSCLK_TICKS(t) (((t) + IDE_SYSCLK_NS - 1) / IDE_SYSCLK_NS)
114 #define SYSCLK_TICKS_66(t) (((t) + IDE_SYSCLK_66_NS - 1) / IDE_SYSCLK_66_NS)
115 #define IDE_SYSCLK_NS 30 /* 33Mhz cell */
116 #define IDE_SYSCLK_66_NS 15 /* 66Mhz cell */
117
118 /* 100Mhz cell, found in Uninorth 2. I don't have much infos about
119 * this one yet, it appears as a pci device (106b/0033) on uninorth
120 * internal PCI bus and it's clock is controlled like gem or fw. It
121 * appears to be an evolution of keylargo ATA4 with a timing register
122 * extended to 2 32bits registers and a similar DBDMA channel. Other
123 * registers seem to exist but I can't tell much about them.
124 *
125 * So far, I'm using pre-calculated tables for this extracted from
126 * the values used by the MacOS X driver.
127 *
128 * The "PIO" register controls PIO and MDMA timings, the "ULTRA"
129 * register controls the UDMA timings. At least, it seems bit 0
130 * of this one enables UDMA vs. MDMA, and bits 4..7 are the
131 * cycle time in units of 10ns. Bits 8..15 are used by I don't
132 * know their meaning yet
133 */
134 #define TR_100_PIOREG_PIO_MASK 0xff000fff
135 #define TR_100_PIOREG_MDMA_MASK 0x00fff000
136 #define TR_100_UDMAREG_UDMA_MASK 0x0000ffff
137 #define TR_100_UDMAREG_UDMA_EN 0x00000001
138
139
140 /* 66Mhz cell, found in KeyLargo. Can do ultra mode 0 to 2 on
141 * 40 connector cable and to 4 on 80 connector one.
142 * Clock unit is 15ns (66Mhz)
143 *
144 * 3 Values can be programmed:
145 * - Write data setup, which appears to match the cycle time. They
146 * also call it DIOW setup.
147 * - Ready to pause time (from spec)
148 * - Address setup. That one is weird. I don't see where exactly
149 * it fits in UDMA cycles, I got it's name from an obscure piece
150 * of commented out code in Darwin. They leave it to 0, we do as
151 * well, despite a comment that would lead to think it has a
152 * min value of 45ns.
153 * Apple also add 60ns to the write data setup (or cycle time ?) on
154 * reads.
155 */
156 #define TR_66_UDMA_MASK 0xfff00000
157 #define TR_66_UDMA_EN 0x00100000 /* Enable Ultra mode for DMA */
158 #define TR_66_UDMA_ADDRSETUP_MASK 0xe0000000 /* Address setup */
159 #define TR_66_UDMA_ADDRSETUP_SHIFT 29
160 #define TR_66_UDMA_RDY2PAUS_MASK 0x1e000000 /* Ready 2 pause time */
161 #define TR_66_UDMA_RDY2PAUS_SHIFT 25
162 #define TR_66_UDMA_WRDATASETUP_MASK 0x01e00000 /* Write data setup time */
163 #define TR_66_UDMA_WRDATASETUP_SHIFT 21
164 #define TR_66_MDMA_MASK 0x000ffc00
165 #define TR_66_MDMA_RECOVERY_MASK 0x000f8000
166 #define TR_66_MDMA_RECOVERY_SHIFT 15
167 #define TR_66_MDMA_ACCESS_MASK 0x00007c00
168 #define TR_66_MDMA_ACCESS_SHIFT 10
169 #define TR_66_PIO_MASK 0x000003ff
170 #define TR_66_PIO_RECOVERY_MASK 0x000003e0
171 #define TR_66_PIO_RECOVERY_SHIFT 5
172 #define TR_66_PIO_ACCESS_MASK 0x0000001f
173 #define TR_66_PIO_ACCESS_SHIFT 0
174
175 /* 33Mhz cell, found in OHare, Heathrow (& Paddington) and KeyLargo
176 * Can do pio & mdma modes, clock unit is 30ns (33Mhz)
177 *
178 * The access time and recovery time can be programmed. Some older
179 * Darwin code base limit OHare to 150ns cycle time. I decided to do
180 * the same here fore safety against broken old hardware ;)
181 * The HalfTick bit, when set, adds half a clock (15ns) to the access
182 * time and removes one from recovery. It's not supported on KeyLargo
183 * implementation afaik. The E bit appears to be set for PIO mode 0 and
184 * is used to reach long timings used in this mode.
185 */
186 #define TR_33_MDMA_MASK 0x003ff800
187 #define TR_33_MDMA_RECOVERY_MASK 0x001f0000
188 #define TR_33_MDMA_RECOVERY_SHIFT 16
189 #define TR_33_MDMA_ACCESS_MASK 0x0000f800
190 #define TR_33_MDMA_ACCESS_SHIFT 11
191 #define TR_33_MDMA_HALFTICK 0x00200000
192 #define TR_33_PIO_MASK 0x000007ff
193 #define TR_33_PIO_E 0x00000400
194 #define TR_33_PIO_RECOVERY_MASK 0x000003e0
195 #define TR_33_PIO_RECOVERY_SHIFT 5
196 #define TR_33_PIO_ACCESS_MASK 0x0000001f
197 #define TR_33_PIO_ACCESS_SHIFT 0
198
199 /*
200 * Interrupt register definitions
201 */
202 #define IDE_INTR_DMA 0x80000000
203 #define IDE_INTR_DEVICE 0x40000000
204
205 #ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
206
207 /* Rounded Multiword DMA timings
208 *
209 * I gave up finding a generic formula for all controller
210 * types and instead, built tables based on timing values
211 * used by Apple in Darwin's implementation.
212 */
213 struct mdma_timings_t {
214 int accessTime;
215 int recoveryTime;
216 int cycleTime;
217 };
218
219 struct mdma_timings_t mdma_timings_33[] __pmacdata =
220 {
221 { 240, 240, 480 },
222 { 180, 180, 360 },
223 { 135, 135, 270 },
224 { 120, 120, 240 },
225 { 105, 105, 210 },
226 { 90, 90, 180 },
227 { 75, 75, 150 },
228 { 75, 45, 120 },
229 { 0, 0, 0 }
230 };
231
232 struct mdma_timings_t mdma_timings_33k[] __pmacdata =
233 {
234 { 240, 240, 480 },
235 { 180, 180, 360 },
236 { 150, 150, 300 },
237 { 120, 120, 240 },
238 { 90, 120, 210 },
239 { 90, 90, 180 },
240 { 90, 60, 150 },
241 { 90, 30, 120 },
242 { 0, 0, 0 }
243 };
244
245 struct mdma_timings_t mdma_timings_66[] __pmacdata =
246 {
247 { 240, 240, 480 },
248 { 180, 180, 360 },
249 { 135, 135, 270 },
250 { 120, 120, 240 },
251 { 105, 105, 210 },
252 { 90, 90, 180 },
253 { 90, 75, 165 },
254 { 75, 45, 120 },
255 { 0, 0, 0 }
256 };
257
258 /* KeyLargo ATA-4 Ultra DMA timings (rounded) */
259 struct {
260 int addrSetup; /* ??? */
261 int rdy2pause;
262 int wrDataSetup;
263 } kl66_udma_timings[] __pmacdata =
264 {
265 { 0, 180, 120 }, /* Mode 0 */
266 { 0, 150, 90 }, /* 1 */
267 { 0, 120, 60 }, /* 2 */
268 { 0, 90, 45 }, /* 3 */
269 { 0, 90, 30 } /* 4 */
270 };
271
272 /* UniNorth 2 ATA/100 timings */
273 struct kauai_timing {
274 int cycle_time;
275 u32 timing_reg;
276 };
277
278 static struct kauai_timing kauai_pio_timings[] __pmacdata =
279 {
280 { 930 , 0x08000fff },
281 { 600 , 0x08000a92 },
282 { 383 , 0x0800060f },
283 { 360 , 0x08000492 },
284 { 330 , 0x0800048f },
285 { 300 , 0x080003cf },
286 { 270 , 0x080003cc },
287 { 240 , 0x0800038b },
288 { 239 , 0x0800030c },
289 { 180 , 0x05000249 },
290 { 120 , 0x04000148 }
291 };
292
293 static struct kauai_timing kauai_mdma_timings[] __pmacdata =
294 {
295 { 1260 , 0x00fff000 },
296 { 480 , 0x00618000 },
297 { 360 , 0x00492000 },
298 { 270 , 0x0038e000 },
299 { 240 , 0x0030c000 },
300 { 210 , 0x002cb000 },
301 { 180 , 0x00249000 },
302 { 150 , 0x00209000 },
303 { 120 , 0x00148000 },
304 { 0 , 0 },
305 };
306
307 static struct kauai_timing kauai_udma_timings[] __pmacdata =
308 {
309 { 120 , 0x000070c0 },
310 { 90 , 0x00005d80 },
311 { 60 , 0x00004a60 },
312 { 45 , 0x00003a50 },
313 { 30 , 0x00002a30 },
314 { 20 , 0x00002921 },
315 { 0 , 0 },
316 };
317
318 static inline u32
kauai_lookup_timing(struct kauai_timing * table,int cycle_time)319 kauai_lookup_timing(struct kauai_timing* table, int cycle_time)
320 {
321 int i;
322
323 for (i=0; table[i].cycle_time; i++)
324 if (cycle_time > table[i+1].cycle_time)
325 return table[i].timing_reg;
326 return 0;
327 }
328
329 /* allow up to 256 DBDMA commands per xfer */
330 #define MAX_DCMDS 256
331
332 /* Wait 2s for disk to answer on IDE bus after
333 * enable operation.
334 * NOTE: There is at least one case I know of a disk that needs about 10sec
335 * before anwering on the bus. I beleive we could add a kernel command
336 * line arg to override this delay for such cases.
337 *
338 * NOTE2: This has to be fixed with a BSY wait loop. I'm working on adding
339 * that to the generic probe code.
340 */
341 #define IDE_WAKEUP_DELAY_MS 2000
342
343 static void pmac_ide_setup_dma(struct device_node *np, int ix);
344 static int pmac_ide_build_dmatable(ide_drive_t *drive, struct request *rq, int ddir);
345 static int pmac_ide_tune_chipset(ide_drive_t *drive, u8 speed);
346 static void pmac_ide_tuneproc(ide_drive_t *drive, u8 pio);
347 static void pmac_ide_selectproc(ide_drive_t *drive);
348 static void pmac_ide_kauai_selectproc(ide_drive_t *drive);
349 static int pmac_ide_dma_begin (ide_drive_t *drive);
350
351 #endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */
352
353 #ifdef CONFIG_PMAC_PBOOK
354 static int idepmac_notify_sleep(struct pmu_sleep_notifier *self, int when);
355 struct pmu_sleep_notifier idepmac_sleep_notifier = {
356 idepmac_notify_sleep, SLEEP_LEVEL_BLOCK,
357 };
358 #endif /* CONFIG_PMAC_PBOOK */
359
360 /*
361 * N.B. this can't be an initfunc, because the media-bay task can
362 * call ide_[un]register at any time.
363 */
364 void __pmac
pmac_ide_init_hwif_ports(hw_regs_t * hw,ide_ioreg_t data_port,ide_ioreg_t ctrl_port,int * irq)365 pmac_ide_init_hwif_ports(hw_regs_t *hw,
366 ide_ioreg_t data_port, ide_ioreg_t ctrl_port,
367 int *irq)
368 {
369 int i, ix;
370
371 if (data_port == 0)
372 return;
373
374 for (ix = 0; ix < MAX_HWIFS; ++ix)
375 if (data_port == pmac_ide[ix].regbase)
376 break;
377
378 if (ix >= MAX_HWIFS) {
379 /* Probably a PCI interface... */
380 for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; ++i)
381 hw->io_ports[i] = data_port + i - IDE_DATA_OFFSET;
382 hw->io_ports[IDE_CONTROL_OFFSET] = ctrl_port;
383 return;
384 }
385
386 for (i = 0; i < 8; ++i)
387 hw->io_ports[i] = data_port + i * 0x10;
388 hw->io_ports[8] = data_port + 0x160;
389
390 if (irq != NULL)
391 *irq = pmac_ide[ix].irq;
392 }
393
394 /* Setup timings for the selected drive (master/slave). I still need to verify if this
395 * is enough, I beleive selectproc will be called whenever an IDE command is started,
396 * but... */
397 static void __pmac
pmac_ide_selectproc(ide_drive_t * drive)398 pmac_ide_selectproc(ide_drive_t *drive)
399 {
400 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
401
402 if (pmif == NULL)
403 return;
404
405 if (drive->select.b.unit & 0x01)
406 writel(pmif->timings[1],
407 (unsigned *)(IDE_DATA_REG+IDE_TIMING_CONFIG));
408 else
409 writel(pmif->timings[0],
410 (unsigned *)(IDE_DATA_REG+IDE_TIMING_CONFIG));
411 (void)readl((unsigned *)(IDE_DATA_REG+IDE_TIMING_CONFIG));
412 }
413
414 static void __pmac
pmac_ide_kauai_selectproc(ide_drive_t * drive)415 pmac_ide_kauai_selectproc(ide_drive_t *drive)
416 {
417 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
418
419 if (pmif == NULL)
420 return;
421
422 if (drive->select.b.unit & 0x01) {
423 writel(pmif->timings[1],
424 (unsigned *)(IDE_DATA_REG + IDE_KAUAI_PIO_CONFIG));
425 writel(pmif->timings[3],
426 (unsigned *)(IDE_DATA_REG + IDE_KAUAI_ULTRA_CONFIG));
427 } else {
428 writel(pmif->timings[0],
429 (unsigned *)(IDE_DATA_REG + IDE_KAUAI_PIO_CONFIG));
430 writel(pmif->timings[2],
431 (unsigned *)(IDE_DATA_REG + IDE_KAUAI_ULTRA_CONFIG));
432 }
433 (void)readl((unsigned *)(IDE_DATA_REG + IDE_KAUAI_PIO_CONFIG));
434 }
435
436 static void __pmac
pmac_ide_do_update_timings(ide_drive_t * drive)437 pmac_ide_do_update_timings(ide_drive_t *drive)
438 {
439 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
440
441 if (pmif == NULL)
442 return;
443
444 if (pmif->kind == controller_un_ata6)
445 pmac_ide_kauai_selectproc(drive);
446 else
447 pmac_ide_selectproc(drive);
448 }
449
450 static void
pmac_outbsync(ide_drive_t * drive,u8 value,unsigned long port)451 pmac_outbsync(ide_drive_t *drive, u8 value, unsigned long port)
452 {
453 u32 tmp;
454
455 writeb(value, port);
456 tmp = readl((unsigned *)(IDE_DATA_REG + IDE_TIMING_CONFIG));
457 }
458
459 static int __pmac
pmac_ide_do_setfeature(ide_drive_t * drive,u8 command)460 pmac_ide_do_setfeature(ide_drive_t *drive, u8 command)
461 {
462 ide_hwif_t *hwif = HWIF(drive);
463 int result = 1;
464
465 disable_irq(hwif->irq); /* disable_irq_nosync ?? */
466 udelay(1);
467 SELECT_DRIVE(drive);
468 SELECT_MASK(drive, 0);
469 udelay(1);
470 /* Get rid of pending error state */
471 (void)hwif->INB(IDE_STATUS_REG);
472 /* Timeout bumped for some powerbooks */
473 if (wait_for_ready(drive, 2000)) {
474 /* Timeout bumped for some powerbooks */
475 printk(KERN_ERR "pmac_ide_do_setfeature disk not ready "
476 "before SET_FEATURE!\n");
477 goto out;
478 }
479 udelay(10);
480 hwif->OUTB(drive->ctl | 2, IDE_CONTROL_REG);
481 hwif->OUTB(command, IDE_NSECTOR_REG);
482 hwif->OUTB(SETFEATURES_XFER, IDE_FEATURE_REG);
483 hwif->OUTB(WIN_SETFEATURES, IDE_COMMAND_REG);
484 udelay(1);
485 /* Timeout bumped for some powerbooks */
486 result = wait_for_ready(drive, 2000);
487 hwif->OUTB(drive->ctl, IDE_CONTROL_REG);
488 if (result)
489 printk(KERN_ERR "pmac_ide_do_setfeature disk not ready "
490 "after SET_FEATURE !\n");
491 out:
492 SELECT_MASK(drive, 0);
493 if (result == 0) {
494 drive->id->dma_ultra &= ~0xFF00;
495 drive->id->dma_mword &= ~0x0F00;
496 drive->id->dma_1word &= ~0x0F00;
497 switch(command) {
498 case XFER_UDMA_7:
499 drive->id->dma_ultra |= 0x8080; break;
500 case XFER_UDMA_6:
501 drive->id->dma_ultra |= 0x4040; break;
502 case XFER_UDMA_5:
503 drive->id->dma_ultra |= 0x2020; break;
504 case XFER_UDMA_4:
505 drive->id->dma_ultra |= 0x1010; break;
506 case XFER_UDMA_3:
507 drive->id->dma_ultra |= 0x0808; break;
508 case XFER_UDMA_2:
509 drive->id->dma_ultra |= 0x0404; break;
510 case XFER_UDMA_1:
511 drive->id->dma_ultra |= 0x0202; break;
512 case XFER_UDMA_0:
513 drive->id->dma_ultra |= 0x0101; break;
514 case XFER_MW_DMA_2:
515 drive->id->dma_mword |= 0x0404; break;
516 case XFER_MW_DMA_1:
517 drive->id->dma_mword |= 0x0202; break;
518 case XFER_MW_DMA_0:
519 drive->id->dma_mword |= 0x0101; break;
520 case XFER_SW_DMA_2:
521 drive->id->dma_1word |= 0x0404; break;
522 case XFER_SW_DMA_1:
523 drive->id->dma_1word |= 0x0202; break;
524 case XFER_SW_DMA_0:
525 drive->id->dma_1word |= 0x0101; break;
526 default: break;
527 }
528 }
529 enable_irq(hwif->irq);
530 return result;
531 }
532
533 /* Calculate PIO timings */
534 static void __pmac
pmac_ide_tuneproc(ide_drive_t * drive,u8 pio)535 pmac_ide_tuneproc(ide_drive_t *drive, u8 pio)
536 {
537 ide_pio_data_t d;
538 u32 *timings;
539 unsigned accessTicks, recTicks;
540 unsigned accessTime, recTime;
541 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
542
543 if (pmif == NULL)
544 return;
545
546 /* which drive is it ? */
547 timings = &pmif->timings[drive->select.b.unit & 0x01];
548
549 pio = ide_get_best_pio_mode(drive, pio, 4, &d);
550
551 switch (pmif->kind) {
552 case controller_un_ata6: {
553 /* 100Mhz cell */
554 u32 tr = kauai_lookup_timing(kauai_pio_timings, d.cycle_time);
555 if (tr == 0)
556 return;
557 *timings = ((*timings) & ~TR_100_PIOREG_PIO_MASK) | tr;
558 break;
559 }
560 case controller_kl_ata4:
561 /* 66Mhz cell */
562 recTime = d.cycle_time - ide_pio_timings[pio].active_time
563 - ide_pio_timings[pio].setup_time;
564 recTime = max(recTime, 150U);
565 accessTime = ide_pio_timings[pio].active_time;
566 accessTime = max(accessTime, 150U);
567 accessTicks = SYSCLK_TICKS_66(accessTime);
568 accessTicks = min(accessTicks, 0x1fU);
569 recTicks = SYSCLK_TICKS_66(recTime);
570 recTicks = min(recTicks, 0x1fU);
571 *timings = ((*timings) & ~TR_66_PIO_MASK) |
572 (accessTicks << TR_66_PIO_ACCESS_SHIFT) |
573 (recTicks << TR_66_PIO_RECOVERY_SHIFT);
574 break;
575 default: {
576 /* 33Mhz cell */
577 int ebit = 0;
578 recTime = d.cycle_time - ide_pio_timings[pio].active_time
579 - ide_pio_timings[pio].setup_time;
580 recTime = max(recTime, 150U);
581 accessTime = ide_pio_timings[pio].active_time;
582 accessTime = max(accessTime, 150U);
583 accessTicks = SYSCLK_TICKS(accessTime);
584 accessTicks = min(accessTicks, 0x1fU);
585 accessTicks = max(accessTicks, 4U);
586 recTicks = SYSCLK_TICKS(recTime);
587 recTicks = min(recTicks, 0x1fU);
588 recTicks = max(recTicks, 5U) - 4;
589 if (recTicks > 9) {
590 recTicks--; /* guess, but it's only for PIO0, so... */
591 ebit = 1;
592 }
593 *timings = ((*timings) & ~TR_33_PIO_MASK) |
594 (accessTicks << TR_33_PIO_ACCESS_SHIFT) |
595 (recTicks << TR_33_PIO_RECOVERY_SHIFT);
596 if (ebit)
597 *timings |= TR_33_PIO_E;
598 break;
599 }
600 }
601
602 #ifdef IDE_PMAC_DEBUG
603 printk(KERN_ERR "ide_pmac: Set PIO timing for mode %d, reg: 0x%08x\n",
604 pio, *timings);
605 #endif
606
607 if (drive->select.all == HWIF(drive)->INB(IDE_SELECT_REG))
608 pmac_ide_do_update_timings(drive);
609 }
610
611 #ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
612 static int __pmac
set_timings_udma_ata4(u32 * timings,u8 speed)613 set_timings_udma_ata4(u32 *timings, u8 speed)
614 {
615 unsigned rdyToPauseTicks, wrDataSetupTicks, addrTicks;
616
617 if (speed > XFER_UDMA_4)
618 return 1;
619
620 rdyToPauseTicks = SYSCLK_TICKS_66(kl66_udma_timings[speed & 0xf].rdy2pause);
621 wrDataSetupTicks = SYSCLK_TICKS_66(kl66_udma_timings[speed & 0xf].wrDataSetup);
622 addrTicks = SYSCLK_TICKS_66(kl66_udma_timings[speed & 0xf].addrSetup);
623
624 *timings = ((*timings) & ~(TR_66_UDMA_MASK | TR_66_MDMA_MASK)) |
625 (wrDataSetupTicks << TR_66_UDMA_WRDATASETUP_SHIFT) |
626 (rdyToPauseTicks << TR_66_UDMA_RDY2PAUS_SHIFT) |
627 (addrTicks <<TR_66_UDMA_ADDRSETUP_SHIFT) |
628 TR_66_UDMA_EN;
629 #ifdef IDE_PMAC_DEBUG
630 printk(KERN_ERR "ide_pmac: Set UDMA timing for mode %d, reg: 0x%08x\n",
631 speed & 0xf, *timings);
632 #endif
633
634 return 0;
635 }
636
637 static int __pmac
set_timings_udma_ata6(u32 * pio_timings,u32 * ultra_timings,u8 speed)638 set_timings_udma_ata6(u32 *pio_timings, u32 *ultra_timings, u8 speed)
639 {
640 struct ide_timing *t = ide_timing_find_mode(speed);
641 u32 tr;
642
643 if (speed > XFER_UDMA_5 || t == NULL)
644 return 1;
645 tr = kauai_lookup_timing(kauai_udma_timings, (int)t->udma);
646 if (tr == 0)
647 return 1;
648 *ultra_timings = ((*ultra_timings) & ~TR_100_UDMAREG_UDMA_MASK) | tr;
649 *ultra_timings = (*ultra_timings) | TR_100_UDMAREG_UDMA_EN;
650
651 return 0;
652 }
653
654 static int __pmac
set_timings_mdma(int intf_type,u32 * timings,u32 * timings2,u8 speed,int drive_cycle_time)655 set_timings_mdma(int intf_type, u32 *timings, u32 *timings2, u8 speed, int drive_cycle_time)
656 {
657 int cycleTime, accessTime, recTime;
658 unsigned accessTicks, recTicks;
659 struct mdma_timings_t* tm = NULL;
660 int i;
661
662 /* Get default cycle time for mode */
663 switch(speed & 0xf) {
664 case 0: cycleTime = 480; break;
665 case 1: cycleTime = 150; break;
666 case 2: cycleTime = 120; break;
667 default:
668 return 1;
669 }
670 /* Adjust for drive */
671 if (drive_cycle_time && drive_cycle_time > cycleTime)
672 cycleTime = drive_cycle_time;
673 /* OHare limits according to some old Apple sources */
674 if ((intf_type == controller_ohare) && (cycleTime < 150))
675 cycleTime = 150;
676 /* Get the proper timing array for this controller */
677 switch(intf_type) {
678 case controller_un_ata6:
679 break;
680 case controller_kl_ata4:
681 tm = mdma_timings_66;
682 break;
683 case controller_kl_ata3:
684 tm = mdma_timings_33k;
685 break;
686 default:
687 tm = mdma_timings_33;
688 break;
689 }
690 if (tm != NULL) {
691 /* Lookup matching access & recovery times */
692 i = -1;
693 for (;;) {
694 if (tm[i+1].cycleTime < cycleTime)
695 break;
696 i++;
697 }
698 if (i < 0)
699 return 1;
700 cycleTime = tm[i].cycleTime;
701 accessTime = tm[i].accessTime;
702 recTime = tm[i].recoveryTime;
703
704 #ifdef IDE_PMAC_DEBUG
705 printk(KERN_ERR "ide_pmac: MDMA, cycleTime: %d, accessTime: %d, recTime: %d\n",
706 cycleTime, accessTime, recTime);
707 #endif
708 }
709 switch(intf_type) {
710 case controller_un_ata6: {
711 /* 100Mhz cell */
712 u32 tr = kauai_lookup_timing(kauai_mdma_timings, cycleTime);
713 if (tr == 0)
714 return 1;
715 *timings = ((*timings) & ~TR_100_PIOREG_MDMA_MASK) | tr;
716 *timings2 = (*timings2) & ~TR_100_UDMAREG_UDMA_EN;
717 }
718 break;
719 case controller_kl_ata4:
720 /* 66Mhz cell */
721 accessTicks = SYSCLK_TICKS_66(accessTime);
722 accessTicks = min(accessTicks, 0x1fU);
723 accessTicks = max(accessTicks, 0x1U);
724 recTicks = SYSCLK_TICKS_66(recTime);
725 recTicks = min(recTicks, 0x1fU);
726 recTicks = max(recTicks, 0x3U);
727 /* Clear out mdma bits and disable udma */
728 *timings = ((*timings) & ~(TR_66_MDMA_MASK | TR_66_UDMA_MASK)) |
729 (accessTicks << TR_66_MDMA_ACCESS_SHIFT) |
730 (recTicks << TR_66_MDMA_RECOVERY_SHIFT);
731 break;
732 case controller_kl_ata3:
733 /* 33Mhz cell on KeyLargo */
734 accessTicks = SYSCLK_TICKS(accessTime);
735 accessTicks = max(accessTicks, 1U);
736 accessTicks = min(accessTicks, 0x1fU);
737 accessTime = accessTicks * IDE_SYSCLK_NS;
738 recTicks = SYSCLK_TICKS(recTime);
739 recTicks = max(recTicks, 1U);
740 recTicks = min(recTicks, 0x1fU);
741 *timings = ((*timings) & ~TR_33_MDMA_MASK) |
742 (accessTicks << TR_33_MDMA_ACCESS_SHIFT) |
743 (recTicks << TR_33_MDMA_RECOVERY_SHIFT);
744 break;
745 default: {
746 /* 33Mhz cell on others */
747 int halfTick = 0;
748 int origAccessTime = accessTime;
749 int origRecTime = recTime;
750
751 accessTicks = SYSCLK_TICKS(accessTime);
752 accessTicks = max(accessTicks, 1U);
753 accessTicks = min(accessTicks, 0x1fU);
754 accessTime = accessTicks * IDE_SYSCLK_NS;
755 recTicks = SYSCLK_TICKS(recTime);
756 recTicks = max(recTicks, 2U) - 1;
757 recTicks = min(recTicks, 0x1fU);
758 recTime = (recTicks + 1) * IDE_SYSCLK_NS;
759 if ((accessTicks > 1) &&
760 ((accessTime - IDE_SYSCLK_NS/2) >= origAccessTime) &&
761 ((recTime - IDE_SYSCLK_NS/2) >= origRecTime)) {
762 halfTick = 1;
763 accessTicks--;
764 }
765 *timings = ((*timings) & ~TR_33_MDMA_MASK) |
766 (accessTicks << TR_33_MDMA_ACCESS_SHIFT) |
767 (recTicks << TR_33_MDMA_RECOVERY_SHIFT);
768 if (halfTick)
769 *timings |= TR_33_MDMA_HALFTICK;
770 }
771 }
772 #ifdef IDE_PMAC_DEBUG
773 printk(KERN_ERR "ide_pmac: Set MDMA timing for mode %d, reg: 0x%08x\n",
774 speed & 0xf, *timings);
775 #endif
776 return 0;
777 }
778 #endif /* #ifdef CONFIG_BLK_DEV_IDEDMA_PMAC */
779
780 /* You may notice we don't use this function on normal operation,
781 * our, normal mdma function is supposed to be more precise
782 */
783 static int __pmac
pmac_ide_tune_chipset(ide_drive_t * drive,byte speed)784 pmac_ide_tune_chipset (ide_drive_t *drive, byte speed)
785 {
786 int unit = (drive->select.b.unit & 0x01);
787 int ret = 0;
788 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
789 u32 *timings, *timings2;
790
791 if (pmif == NULL)
792 return 1;
793
794 timings = &pmif->timings[unit];
795 timings2 = &pmif->timings[unit+2];
796
797 switch(speed) {
798 #ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
799 case XFER_UDMA_5:
800 if (pmif->kind != controller_un_ata6)
801 return 1;
802 case XFER_UDMA_4:
803 case XFER_UDMA_3:
804 if (HWIF(drive)->udma_four == 0)
805 return 1;
806 case XFER_UDMA_2:
807 case XFER_UDMA_1:
808 case XFER_UDMA_0:
809 if (pmif->kind == controller_kl_ata4)
810 ret = set_timings_udma_ata4(timings, speed);
811 else if (pmif->kind == controller_un_ata6)
812 ret = set_timings_udma_ata6(timings, timings2, speed);
813 else
814 ret = 1;
815 break;
816 case XFER_MW_DMA_2:
817 case XFER_MW_DMA_1:
818 case XFER_MW_DMA_0:
819 ret = set_timings_mdma(pmif->kind, timings, timings2, speed, 0);
820 break;
821 case XFER_SW_DMA_2:
822 case XFER_SW_DMA_1:
823 case XFER_SW_DMA_0:
824 return 1;
825 #endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */
826 case XFER_PIO_4:
827 case XFER_PIO_3:
828 case XFER_PIO_2:
829 case XFER_PIO_1:
830 case XFER_PIO_0:
831 pmac_ide_tuneproc(drive, speed & 0x07);
832 break;
833 default:
834 ret = 1;
835 }
836 if (ret)
837 return ret;
838
839 ret = pmac_ide_do_setfeature(drive, speed);
840 if (ret)
841 return ret;
842
843 pmac_ide_do_update_timings(drive);
844 drive->current_speed = speed;
845
846 return 0;
847 }
848
849 static void __pmac
sanitize_timings(pmac_ide_hwif_t * pmif)850 sanitize_timings(pmac_ide_hwif_t *pmif)
851 {
852 unsigned int value, value2 = 0;
853
854 switch(pmif->kind) {
855 case controller_un_ata6:
856 value = 0x08618a92;
857 value2 = 0x00002921;
858 break;
859 case controller_kl_ata4:
860 value = 0x0008438c;
861 break;
862 case controller_kl_ata3:
863 value = 0x00084526;
864 break;
865 case controller_heathrow:
866 case controller_ohare:
867 default:
868 value = 0x00074526;
869 break;
870 }
871 pmif->timings[0] = pmif->timings[1] = value;
872 pmif->timings[2] = pmif->timings[3] = value2;
873 }
874
875 ide_ioreg_t __pmac
pmac_ide_get_base(int index)876 pmac_ide_get_base(int index)
877 {
878 return pmac_ide[index].regbase;
879 }
880
881 int __pmac
pmac_ide_check_base(ide_ioreg_t base)882 pmac_ide_check_base(ide_ioreg_t base)
883 {
884 int ix;
885
886 for (ix = 0; ix < MAX_HWIFS; ++ix)
887 if (base == pmac_ide[ix].regbase)
888 return ix;
889 return -1;
890 }
891
892 struct device_node*
pmac_ide_get_of_node(int index)893 pmac_ide_get_of_node(int index)
894 {
895 /* Don't access the pmac_ide array on non-pmac */
896 if (pmac_ide_count == 0)
897 return NULL;
898 if (pmac_ide[index].regbase == 0)
899 return NULL;
900 return pmac_ide[index].node;
901 }
902
903 int __pmac
pmac_ide_get_irq(ide_ioreg_t base)904 pmac_ide_get_irq(ide_ioreg_t base)
905 {
906 int ix;
907
908 for (ix = 0; ix < MAX_HWIFS; ++ix)
909 if (base == pmac_ide[ix].regbase)
910 return pmac_ide[ix].irq;
911 return 0;
912 }
913
914 static int ide_majors[] __pmacdata = { 3, 22, 33, 34, 56, 57 };
915
916 kdev_t __init
pmac_find_ide_boot(char * bootdevice,int n)917 pmac_find_ide_boot(char *bootdevice, int n)
918 {
919 int i;
920
921 /*
922 * Look through the list of IDE interfaces for this one.
923 */
924 for (i = 0; i < pmac_ide_count; ++i) {
925 char *name;
926 if (!pmac_ide[i].node || !pmac_ide[i].node->full_name)
927 continue;
928 name = pmac_ide[i].node->full_name;
929 if (memcmp(name, bootdevice, n) == 0 && name[n] == 0) {
930 /* XXX should cope with the 2nd drive as well... */
931 return MKDEV(ide_majors[i], 0);
932 }
933 }
934
935 return 0;
936 }
937
938 void __init
pmac_ide_probe(void)939 pmac_ide_probe(void)
940 {
941 struct device_node *np;
942 int i;
943 struct device_node *atas = NULL;
944 struct device_node *p, *nextp, **pp, *removables, **rp;
945 unsigned long base, regbase;
946 int irq;
947 ide_hwif_t *hwif;
948
949 if (_machine != _MACH_Pmac)
950 return;
951 pp = &atas;
952 rp = &removables;
953 p = find_devices("ATA");
954 if (p == NULL)
955 p = find_devices("IDE");
956 if (p == NULL)
957 p = find_type_devices("ide");
958 if (p == NULL)
959 p = find_type_devices("ata");
960 /* Move removable devices such as the media-bay CDROM
961 on the PB3400 to the end of the list. */
962 for (; p != NULL; p = nextp) {
963 nextp = p->next;
964 if (p->parent && p->parent->type
965 && strcasecmp(p->parent->type, "media-bay") == 0) {
966 *rp = p;
967 rp = &p->next;
968 }
969 #ifdef CONFIG_BLK_DEV_IDE_PMAC_ATA100FIRST
970 /* Move Kauai ATA/100 if it exist to first postition in list */
971 else if (device_is_compatible(p, "kauai-ata")) {
972 p->next = atas;
973 if (pp == &atas)
974 pp = &p->next;
975 atas = p;
976 }
977 #endif /* CONFIG_BLK_DEV_IDE_PMAC_ATA100FIRST */
978 else {
979 *pp = p;
980 pp = &p->next;
981 }
982 }
983 *rp = NULL;
984 *pp = removables;
985
986 for (i = 0, np = atas; i < MAX_HWIFS && np != NULL; np = np->next) {
987 struct device_node *tp;
988 struct pmac_ide_hwif* pmif;
989 int *bidp;
990 int in_bay = 0;
991 u8 pbus, pid;
992 struct pci_dev *pdev = NULL;
993
994 /*
995 * If this node is not under a mac-io or dbdma node,
996 * leave it to the generic PCI driver. Except for U2's
997 * Kauai ATA
998 */
999 if (!device_is_compatible(np, "kauai-ata")) {
1000 for (tp = np->parent; tp != 0; tp = tp->parent)
1001 if (tp->type && (strcmp(tp->type, "mac-io") == 0
1002 || strcmp(tp->type, "dbdma") == 0))
1003 break;
1004 if (tp == 0)
1005 continue;
1006
1007 if (np->n_addrs == 0) {
1008 printk(KERN_WARNING "ide-pmac: no address for device %s\n",
1009 np->full_name);
1010 continue;
1011 }
1012 /* We need to find the pci_dev of the mac-io holding the
1013 * IDE interface
1014 */
1015 if (pci_device_from_OF_node(tp, &pbus, &pid) == 0)
1016 pdev = pci_find_slot(pbus, pid);
1017
1018 if (pdev == NULL)
1019 printk(KERN_WARNING "ide-pmac: no PCI host for device %s, DMA disabled\n",
1020 np->full_name);
1021 /*
1022 * Some older OFs have bogus sizes, causing request_OF_resource
1023 * to fail. We fix them up here
1024 */
1025 if (np->addrs[0].size > 0x1000)
1026 np->addrs[0].size = 0x1000;
1027 if (np->n_addrs > 1 && np->addrs[1].size > 0x100)
1028 np->addrs[1].size = 0x100;
1029
1030 if (request_OF_resource(np, 0, " (mac-io IDE IO)") == NULL) {
1031 printk(KERN_ERR "ide-pmac(%s): can't request IO resource !\n", np->name);
1032 continue;
1033 }
1034
1035 base = (unsigned long) ioremap(np->addrs[0].address, 0x400);
1036 regbase = base;
1037
1038 /* XXX This is bogus. Should be fixed in the registry by checking
1039 the kind of host interrupt controller, a bit like gatwick
1040 fixes in irq.c
1041 */
1042 if (np->n_intrs == 0) {
1043 printk(KERN_WARNING "ide-pmac: no intrs for device %s, using 13\n",
1044 np->full_name);
1045 irq = 13;
1046 } else {
1047 irq = np->intrs[0].line;
1048 }
1049 } else {
1050 unsigned long rbase, rlen;
1051
1052 if (pci_device_from_OF_node(np, &pbus, &pid) == 0)
1053 pdev = pci_find_slot(pbus, pid);
1054 if (pdev == NULL) {
1055 printk(KERN_WARNING "ide-pmac: no PCI host for device %s, skipping\n",
1056 np->full_name);
1057 continue;
1058 }
1059 if (pci_enable_device(pdev)) {
1060 printk(KERN_WARNING "ide-pmac: Can't enable PCI device for %s, skipping\n",
1061 np->full_name);
1062 continue;
1063 }
1064 pci_set_master(pdev);
1065
1066 if (pci_request_regions(pdev, "U2 IDE")) {
1067 printk(KERN_ERR "ide-pmac: Cannot obtain PCI resources\n");
1068 continue;
1069 }
1070
1071 rbase = pci_resource_start(pdev, 0);
1072 rlen = pci_resource_len(pdev, 0);
1073
1074 base = (unsigned long) ioremap(rbase, rlen);
1075 regbase = base + 0x2000;
1076
1077 irq = pdev->irq;
1078 }
1079
1080 /*
1081 * If this slot is taken (e.g. by ide-pci.c) try the next one.
1082 */
1083 while (i < MAX_HWIFS
1084 && ide_hwifs[i].io_ports[IDE_DATA_OFFSET] != 0)
1085 ++i;
1086 if (i >= MAX_HWIFS)
1087 break;
1088 pmif = &pmac_ide[i];
1089
1090 pmif->mapbase = base;
1091 pmif->regbase = regbase;
1092 pmif->irq = irq;
1093 pmif->node = np;
1094 pmif->cable_80 = 0;
1095 if (device_is_compatible(np, "kauai-ata")) {
1096 pmif->kind = controller_un_ata6;
1097 pci_set_drvdata(pdev, pmif);
1098 } else if (device_is_compatible(np, "keylargo-ata")) {
1099 if (strcmp(np->name, "ata-4") == 0)
1100 pmif->kind = controller_kl_ata4;
1101 else
1102 pmif->kind = controller_kl_ata3;
1103 } else if (device_is_compatible(np, "heathrow-ata"))
1104 pmif->kind = controller_heathrow;
1105 else
1106 pmif->kind = controller_ohare;
1107
1108 bidp = (int *)get_property(np, "AAPL,bus-id", NULL);
1109 pmif->aapl_bus_id = bidp ? *bidp : 0;
1110
1111 /* Get cable type from device-tree */
1112 if (pmif->kind == controller_kl_ata4 || pmif->kind == controller_un_ata6) {
1113 char* cable = get_property(np, "cable-type", NULL);
1114 if (cable && !strncmp(cable, "80-", 3))
1115 pmif->cable_80 = 1;
1116 }
1117
1118 /* Make sure we have sane timings */
1119 sanitize_timings(pmif);
1120
1121 if (np->parent && np->parent->name
1122 && strcasecmp(np->parent->name, "media-bay") == 0) {
1123 #ifdef CONFIG_PMAC_PBOOK
1124 media_bay_set_ide_infos(np->parent,regbase,irq,i);
1125 #endif /* CONFIG_PMAC_PBOOK */
1126 in_bay = 1;
1127 if (!bidp)
1128 pmif->aapl_bus_id = 1;
1129 } else if (pmif->kind == controller_ohare) {
1130 /* The code below is having trouble on some ohare machines
1131 * (timing related ?). Until I can put my hand on one of these
1132 * units, I keep the old way
1133 */
1134 ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, np, 0, 1);
1135 } else {
1136 /* This is necessary to enable IDE when net-booting */
1137 ppc_md.feature_call(PMAC_FTR_IDE_RESET, np, pmif->aapl_bus_id, 1);
1138 ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, np, pmif->aapl_bus_id, 1);
1139 mdelay(10);
1140 ppc_md.feature_call(PMAC_FTR_IDE_RESET, np, pmif->aapl_bus_id, 0);
1141 }
1142
1143 hwif = &ide_hwifs[i];
1144 /* Setup MMIO ops */
1145 default_hwif_mmiops(hwif);
1146 hwif->OUTBSYNC = pmac_outbsync;
1147 /* Tell common code _not_ to mess with resources */
1148 hwif->mmio = 2;
1149 hwif->hwif_data = pmif;
1150 pmac_ide_init_hwif_ports(&hwif->hw, regbase, 0, &hwif->irq);
1151 memcpy(hwif->io_ports, hwif->hw.io_ports, sizeof(hwif->io_ports));
1152 hwif->chipset = ide_pmac;
1153 hwif->noprobe = !hwif->io_ports[IDE_DATA_OFFSET] || in_bay;
1154 hwif->hold = in_bay;
1155 hwif->udma_four = pmif->cable_80;
1156 hwif->pci_dev = pdev;
1157 hwif->drives[0].unmask = 1;
1158 hwif->drives[1].unmask = 1;
1159 hwif->tuneproc = pmac_ide_tuneproc;
1160 if (pmif->kind == controller_un_ata6)
1161 hwif->selectproc = pmac_ide_kauai_selectproc;
1162 else
1163 hwif->selectproc = pmac_ide_selectproc;
1164 hwif->speedproc = pmac_ide_tune_chipset;
1165
1166 printk(KERN_INFO "ide%d: Found Apple %s controller, bus ID %d%s\n",
1167 i, model_name[pmif->kind], pmif->aapl_bus_id,
1168 in_bay ? " (mediabay)" : "");
1169
1170 #ifdef CONFIG_PMAC_PBOOK
1171 if (in_bay && check_media_bay_by_base(regbase, MB_CD) == 0)
1172 hwif->noprobe = 0;
1173 #endif /* CONFIG_PMAC_PBOOK */
1174
1175 #ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
1176 if (np->n_addrs >= 2 || pmif->kind == controller_un_ata6) {
1177 /* has a DBDMA controller channel */
1178 pmac_ide_setup_dma(np, i);
1179 }
1180 hwif->atapi_dma = 1;
1181 switch(pmif->kind) {
1182 case controller_un_ata6:
1183 hwif->ultra_mask = pmif->cable_80 ? 0x3f : 0x07;
1184 hwif->mwdma_mask = 0x07;
1185 hwif->swdma_mask = 0x00;
1186 break;
1187 case controller_kl_ata4:
1188 hwif->ultra_mask = pmif->cable_80 ? 0x1f : 0x07;
1189 hwif->mwdma_mask = 0x07;
1190 hwif->swdma_mask = 0x00;
1191 break;
1192 default:
1193 hwif->ultra_mask = 0x00;
1194 hwif->mwdma_mask = 0x07;
1195 hwif->swdma_mask = 0x00;
1196 break;
1197 }
1198 #endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */
1199
1200 ++i;
1201 }
1202 pmac_ide_count = i;
1203
1204 if (pmac_ide_count == 0)
1205 return;
1206
1207 #ifdef CONFIG_PMAC_PBOOK
1208 pmu_register_sleep_notifier(&idepmac_sleep_notifier);
1209 #endif /* CONFIG_PMAC_PBOOK */
1210 }
1211
1212 #ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
1213
1214 static int __pmac
pmac_ide_build_sglist(ide_hwif_t * hwif,struct request * rq,int data_dir)1215 pmac_ide_build_sglist(ide_hwif_t *hwif, struct request *rq, int data_dir)
1216 {
1217 pmac_ide_hwif_t *pmif = (pmac_ide_hwif_t *)hwif->hwif_data;
1218 struct buffer_head *bh;
1219 struct scatterlist *sg = pmif->sg_table;
1220 unsigned long lastdataend = ~0UL;
1221 int nents = 0;
1222
1223 if (hwif->sg_dma_active)
1224 BUG();
1225
1226 pmif->sg_dma_direction = data_dir;
1227
1228 bh = rq->bh;
1229 do {
1230 int contig = 0;
1231
1232 if (bh->b_page) {
1233 if (bh_phys(bh) == lastdataend)
1234 contig = 1;
1235 } else {
1236 if ((unsigned long) bh->b_data == lastdataend)
1237 contig = 1;
1238 }
1239
1240 if (contig) {
1241 sg[nents - 1].length += bh->b_size;
1242 lastdataend += bh->b_size;
1243 continue;
1244 }
1245
1246 if (nents >= MAX_DCMDS)
1247 return 0;
1248
1249 memset(&sg[nents], 0, sizeof(*sg));
1250
1251 if (bh->b_page) {
1252 sg[nents].page = bh->b_page;
1253 sg[nents].offset = bh_offset(bh);
1254 lastdataend = bh_phys(bh) + bh->b_size;
1255 } else {
1256 if ((unsigned long) bh->b_data < PAGE_SIZE)
1257 BUG();
1258
1259 sg[nents].address = bh->b_data;
1260 lastdataend = (unsigned long) bh->b_data + bh->b_size;
1261 }
1262
1263 sg[nents].length = bh->b_size;
1264 nents++;
1265 } while ((bh = bh->b_reqnext) != NULL);
1266
1267 if(nents == 0)
1268 BUG();
1269
1270 return pci_map_sg(hwif->pci_dev, sg, nents, data_dir);
1271 }
1272
1273 static int __pmac
pmac_ide_raw_build_sglist(ide_hwif_t * hwif,struct request * rq)1274 pmac_ide_raw_build_sglist(ide_hwif_t *hwif, struct request *rq)
1275 {
1276 pmac_ide_hwif_t *pmif = (pmac_ide_hwif_t *)hwif->hwif_data;
1277 struct scatterlist *sg = hwif->sg_table;
1278 int nents = 0;
1279 ide_task_t *args = rq->special;
1280 unsigned char *virt_addr = rq->buffer;
1281 int sector_count = rq->nr_sectors;
1282
1283 if (args->command_type == IDE_DRIVE_TASK_RAW_WRITE)
1284 pmif->sg_dma_direction = PCI_DMA_TODEVICE;
1285 else
1286 pmif->sg_dma_direction = PCI_DMA_FROMDEVICE;
1287
1288 if (sector_count > 128) {
1289 memset(&sg[nents], 0, sizeof(*sg));
1290 sg[nents].address = virt_addr;
1291 sg[nents].length = 128 * SECTOR_SIZE;
1292 nents++;
1293 virt_addr = virt_addr + (128 * SECTOR_SIZE);
1294 sector_count -= 128;
1295 }
1296 memset(&sg[nents], 0, sizeof(*sg));
1297 sg[nents].address = virt_addr;
1298 sg[nents].length = sector_count * SECTOR_SIZE;
1299 nents++;
1300
1301 return pci_map_sg(hwif->pci_dev, sg, nents, pmif->sg_dma_direction);
1302 }
1303
1304 /*
1305 * pmac_ide_build_dmatable builds the DBDMA command list
1306 * for a transfer and sets the DBDMA channel to point to it.
1307 */
1308 static int __pmac
pmac_ide_build_dmatable(ide_drive_t * drive,struct request * rq,int ddir)1309 pmac_ide_build_dmatable(ide_drive_t *drive, struct request *rq, int ddir)
1310 {
1311 struct dbdma_cmd *table;
1312 int i, count = 0;
1313 ide_hwif_t *hwif = HWIF(drive);
1314 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)hwif->hwif_data;
1315 volatile struct dbdma_regs *dma = pmif->dma_regs;
1316 struct scatterlist *sg;
1317
1318 /* DMA table is already aligned */
1319 table = (struct dbdma_cmd *) pmif->dma_table_cpu;
1320
1321 /* Make sure DMA controller is stopped (necessary ?) */
1322 writel((RUN|PAUSE|FLUSH|WAKE|DEAD) << 16, &dma->control);
1323 while (readl(&dma->status) & RUN)
1324 udelay(1);
1325
1326 /* Build sglist */
1327 if (rq->cmd == IDE_DRIVE_TASKFILE)
1328 pmif->sg_nents = i = pmac_ide_raw_build_sglist(hwif, rq);
1329 else
1330 pmif->sg_nents = i = pmac_ide_build_sglist(hwif, rq, ddir);
1331 if (!i)
1332 return 0;
1333
1334 /* Build DBDMA commands list */
1335 sg = pmif->sg_table;
1336 while (i && sg_dma_len(sg)) {
1337 u32 cur_addr;
1338 u32 cur_len;
1339
1340 cur_addr = sg_dma_address(sg);
1341 cur_len = sg_dma_len(sg);
1342
1343 while (cur_len) {
1344 unsigned int tc = (cur_len < 0xfe00)? cur_len: 0xfe00;
1345
1346 if (++count >= MAX_DCMDS) {
1347 printk(KERN_WARNING "%s: DMA table too small\n",
1348 drive->name);
1349 goto use_pio_instead;
1350 }
1351 st_le16(&table->command,
1352 (ddir == PCI_DMA_TODEVICE) ? OUTPUT_MORE: INPUT_MORE);
1353 st_le16(&table->req_count, tc);
1354 st_le32(&table->phy_addr, cur_addr);
1355 table->cmd_dep = 0;
1356 table->xfer_status = 0;
1357 table->res_count = 0;
1358 cur_addr += tc;
1359 cur_len -= tc;
1360 ++table;
1361 }
1362 sg++;
1363 i--;
1364 }
1365
1366 /* convert the last command to an input/output last command */
1367 if (count) {
1368 st_le16(&table[-1].command,
1369 (ddir == PCI_DMA_TODEVICE) ? OUTPUT_LAST: INPUT_LAST);
1370 /* add the stop command to the end of the list */
1371 memset(table, 0, sizeof(struct dbdma_cmd));
1372 st_le16(&table->command, DBDMA_STOP);
1373 mb();
1374 writel(pmif->dma_table_dma, &dma->cmdptr);
1375 return 1;
1376 }
1377
1378 printk(KERN_DEBUG "%s: empty DMA table?\n", drive->name);
1379 use_pio_instead:
1380 pci_unmap_sg(hwif->pci_dev,
1381 pmif->sg_table,
1382 pmif->sg_nents,
1383 pmif->sg_dma_direction);
1384 hwif->sg_dma_active = 0;
1385 return 0; /* revert to PIO for this request */
1386 }
1387
1388 /* Teardown mappings after DMA has completed. */
1389 static void __pmac
pmac_ide_destroy_dmatable(ide_drive_t * drive)1390 pmac_ide_destroy_dmatable (ide_drive_t *drive)
1391 {
1392 struct pci_dev *dev = HWIF(drive)->pci_dev;
1393 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
1394 struct scatterlist *sg = pmif->sg_table;
1395 int nents = pmif->sg_nents;
1396
1397 if (nents) {
1398 pci_unmap_sg(dev, sg, nents, pmif->sg_dma_direction);
1399 pmif->sg_nents = 0;
1400 HWIF(drive)->sg_dma_active = 0;
1401 }
1402 }
1403
1404 /* Calculate MultiWord DMA timings */
1405 static int __pmac
pmac_ide_mdma_enable(ide_drive_t * drive,u16 mode)1406 pmac_ide_mdma_enable(ide_drive_t *drive, u16 mode)
1407 {
1408 ide_hwif_t *hwif = HWIF(drive);
1409 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)hwif->hwif_data;
1410 int drive_cycle_time;
1411 struct hd_driveid *id = drive->id;
1412 u32 *timings, *timings2;
1413 u32 timing_local[2];
1414 int ret;
1415
1416 /* which drive is it ? */
1417 timings = &pmif->timings[drive->select.b.unit & 0x01];
1418 timings2 = &pmif->timings[(drive->select.b.unit & 0x01) + 2];
1419
1420 /* Check if drive provide explicit cycle time */
1421 if ((id->field_valid & 2) && (id->eide_dma_time))
1422 drive_cycle_time = id->eide_dma_time;
1423 else
1424 drive_cycle_time = 0;
1425
1426 /* Copy timings to local image */
1427 timing_local[0] = *timings;
1428 timing_local[1] = *timings2;
1429
1430 /* Calculate controller timings */
1431 ret = set_timings_mdma( pmif->kind,
1432 &timing_local[0],
1433 &timing_local[1],
1434 mode,
1435 drive_cycle_time);
1436 if (ret)
1437 return 0;
1438
1439 /* Set feature on drive */
1440 printk(KERN_INFO "%s: Enabling MultiWord DMA %d\n", drive->name, mode & 0xf);
1441 ret = pmac_ide_do_setfeature(drive, mode);
1442 if (ret) {
1443 printk(KERN_WARNING "%s: Failed !\n", drive->name);
1444 return 0;
1445 }
1446
1447 /* Apply timings to controller */
1448 *timings = timing_local[0];
1449 *timings2 = timing_local[1];
1450
1451 /* Set speed info in drive */
1452 drive->current_speed = mode;
1453 if (!drive->init_speed)
1454 drive->init_speed = mode;
1455
1456 return 1;
1457 }
1458
1459 /* Calculate Ultra DMA timings */
1460 static int __pmac
pmac_ide_udma_enable(ide_drive_t * drive,u16 mode)1461 pmac_ide_udma_enable(ide_drive_t *drive, u16 mode)
1462 {
1463 ide_hwif_t *hwif = HWIF(drive);
1464 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)hwif->hwif_data;
1465 u32 *timings, *timings2;
1466 u32 timing_local[2];
1467 int ret;
1468
1469 /* which drive is it ? */
1470 timings = &pmif->timings[drive->select.b.unit & 0x01];
1471 timings2 = &pmif->timings[(drive->select.b.unit & 0x01) + 2];
1472
1473 /* Copy timings to local image */
1474 timing_local[0] = *timings;
1475 timing_local[1] = *timings2;
1476
1477 /* Calculate timings for interface */
1478 if (pmif->kind == controller_un_ata6)
1479 ret = set_timings_udma_ata6( &timing_local[0],
1480 &timing_local[1],
1481 mode);
1482 else
1483 ret = set_timings_udma_ata4(&timing_local[0], mode);
1484 if (ret)
1485 return 0;
1486
1487 /* Set feature on drive */
1488 printk(KERN_INFO "%s: Enabling Ultra DMA %d\n", drive->name, mode & 0x0f);
1489 ret = pmac_ide_do_setfeature(drive, mode);
1490 if (ret) {
1491 printk(KERN_WARNING "%s: Failed !\n", drive->name);
1492 return 0;
1493 }
1494
1495 /* Apply timings to controller */
1496 *timings = timing_local[0];
1497 *timings2 = timing_local[1];
1498
1499 /* Set speed info in drive */
1500 drive->current_speed = mode;
1501 if (!drive->init_speed)
1502 drive->init_speed = mode;
1503
1504 return 1;
1505 }
1506
1507 static __pmac
pmac_ide_dma_check(ide_drive_t * drive)1508 int pmac_ide_dma_check(ide_drive_t *drive)
1509 {
1510 struct hd_driveid *id = drive->id;
1511 ide_hwif_t *hwif = HWIF(drive);
1512 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)hwif->hwif_data;
1513 int enable = 1;
1514 int map;
1515 drive->using_dma = 0;
1516
1517 if (pmif == NULL)
1518 return 0;
1519
1520 if (drive->media == ide_floppy)
1521 enable = 0;
1522 if (((id->capability & 1) == 0) &&
1523 !HWIF(drive)->ide_dma_good_drive(drive))
1524 enable = 0;
1525 if (HWIF(drive)->ide_dma_bad_drive(drive))
1526 enable = 0;
1527
1528 if (enable) {
1529 short mode;
1530
1531 map = XFER_MWDMA;
1532 if (pmif->kind == controller_kl_ata4 || pmif->kind == controller_un_ata6) {
1533 map |= XFER_UDMA;
1534 if (pmif->cable_80) {
1535 map |= XFER_UDMA_66;
1536 if (pmif->kind == controller_un_ata6)
1537 map |= XFER_UDMA_100;
1538 }
1539 }
1540 mode = ide_find_best_mode(drive, map);
1541 if (mode & XFER_UDMA)
1542 drive->using_dma = pmac_ide_udma_enable(drive, mode);
1543 else if (mode & XFER_MWDMA)
1544 drive->using_dma = pmac_ide_mdma_enable(drive, mode);
1545 hwif->OUTB(0, IDE_CONTROL_REG);
1546 /* Apply settings to controller */
1547 pmac_ide_do_update_timings(drive);
1548 }
1549 return 0;
1550 }
1551
1552 static int __pmac
pmac_ide_dma_read(ide_drive_t * drive)1553 pmac_ide_dma_read (ide_drive_t *drive)
1554 {
1555 ide_hwif_t *hwif = HWIF(drive);
1556 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)hwif->hwif_data;
1557 struct request *rq = HWGROUP(drive)->rq;
1558 // ide_task_t *args = rq->special;
1559 u8 unit = (drive->select.b.unit & 0x01);
1560 u8 ata4;
1561 u8 lba48 = (drive->addressing == 1) ? 1 : 0;
1562 task_ioreg_t command = WIN_NOP;
1563
1564 if (pmif == NULL)
1565 return 1;
1566
1567 ata4 = (pmif->kind == controller_kl_ata4);
1568
1569 if (!pmac_ide_build_dmatable(drive, rq, PCI_DMA_FROMDEVICE))
1570 /* try PIO instead of DMA */
1571 return 1;
1572
1573 /* Apple adds 60ns to wrDataSetup on reads */
1574 if (ata4 && (pmif->timings[unit] & TR_66_UDMA_EN)) {
1575 writel(pmif->timings[unit]+0x00800000UL,
1576 (unsigned *)(IDE_DATA_REG+IDE_TIMING_CONFIG));
1577 (void)readl((unsigned *)(IDE_DATA_REG + IDE_TIMING_CONFIG));
1578 }
1579
1580 drive->waiting_for_dma = 1;
1581 if (drive->media != ide_disk)
1582 return 0;
1583 /*
1584 * FIX ME to use only ACB ide_task_t args Struct
1585 */
1586 #if 0
1587 {
1588 ide_task_t *args = rq->special;
1589 command = args->tfRegister[IDE_COMMAND_OFFSET];
1590 }
1591 #else
1592 command = (lba48) ? WIN_READDMA_EXT : WIN_READDMA;
1593 if (rq->cmd == IDE_DRIVE_TASKFILE) {
1594 ide_task_t *args = rq->special;
1595 command = args->tfRegister[IDE_COMMAND_OFFSET];
1596 }
1597 #endif
1598 /* issue cmd to drive */
1599 ide_execute_command(drive, command, &ide_dma_intr, 2*WAIT_CMD, NULL);
1600
1601 return pmac_ide_dma_begin(drive);
1602 }
1603
1604 static int __pmac
pmac_ide_dma_write(ide_drive_t * drive)1605 pmac_ide_dma_write (ide_drive_t *drive)
1606 {
1607 ide_hwif_t *hwif = HWIF(drive);
1608 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)hwif->hwif_data;
1609 struct request *rq = HWGROUP(drive)->rq;
1610 // ide_task_t *args = rq->special;
1611 u8 unit = (drive->select.b.unit & 0x01);
1612 u8 ata4;
1613 u8 lba48 = (drive->addressing == 1) ? 1 : 0;
1614 task_ioreg_t command = WIN_NOP;
1615
1616 if (pmif == NULL)
1617 return 1;
1618
1619 ata4 = (pmif->kind == controller_kl_ata4);
1620
1621 if (!pmac_ide_build_dmatable(drive, rq, PCI_DMA_TODEVICE))
1622 /* try PIO instead of DMA */
1623 return 1;
1624
1625 /* Apple adds 60ns to wrDataSetup on reads */
1626 if (ata4 && (pmif->timings[unit] & TR_66_UDMA_EN)) {
1627 writel(pmif->timings[unit],
1628 (unsigned *)(IDE_DATA_REG+IDE_TIMING_CONFIG));
1629 (void)readl((unsigned *)(IDE_DATA_REG + IDE_TIMING_CONFIG));
1630 }
1631
1632 drive->waiting_for_dma = 1;
1633 if (drive->media != ide_disk)
1634 return 0;
1635
1636 /*
1637 * FIX ME to use only ACB ide_task_t args Struct
1638 */
1639 #if 0
1640 {
1641 ide_task_t *args = rq->special;
1642 command = args->tfRegister[IDE_COMMAND_OFFSET];
1643 }
1644 #else
1645 command = (lba48) ? WIN_WRITEDMA_EXT : WIN_WRITEDMA;
1646 if (rq->cmd == IDE_DRIVE_TASKFILE) {
1647 ide_task_t *args = rq->special;
1648 command = args->tfRegister[IDE_COMMAND_OFFSET];
1649 }
1650 #endif
1651 /* issue cmd to drive */
1652 ide_execute_command(drive, command, &ide_dma_intr, 2*WAIT_CMD, NULL);
1653
1654 return pmac_ide_dma_begin(drive);
1655 }
1656
1657 static int __pmac
pmac_ide_dma_count(ide_drive_t * drive)1658 pmac_ide_dma_count (ide_drive_t *drive)
1659 {
1660 return HWIF(drive)->ide_dma_begin(drive);
1661 }
1662
1663 static int __pmac
pmac_ide_dma_begin(ide_drive_t * drive)1664 pmac_ide_dma_begin (ide_drive_t *drive)
1665 {
1666 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
1667 volatile struct dbdma_regs *dma;
1668
1669 if (pmif == NULL)
1670 return 1;
1671 dma = pmif->dma_regs;
1672
1673 writel((RUN << 16) | RUN, &dma->control);
1674 /* Make sure it gets to the controller right now */
1675 (void)readl(&dma->control);
1676 return 0;
1677 }
1678
1679 static int __pmac
pmac_ide_dma_end(ide_drive_t * drive)1680 pmac_ide_dma_end (ide_drive_t *drive)
1681 {
1682 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
1683 volatile struct dbdma_regs *dma;
1684 u32 dstat;
1685
1686 if (pmif == NULL)
1687 return 0;
1688 dma = pmif->dma_regs;
1689
1690 drive->waiting_for_dma = 0;
1691 dstat = readl(&dma->status);
1692 writel(((RUN|WAKE|DEAD) << 16), &dma->control);
1693 pmac_ide_destroy_dmatable(drive);
1694 /* verify good dma status */
1695 if ((dstat & (RUN|DEAD|ACTIVE)) == RUN)
1696 return 0;
1697 printk(KERN_WARNING "%s: bad status at DMA end, dstat=%x\n",
1698 drive->name, dstat);
1699 return 1;
1700 }
1701
1702 static int __pmac
pmac_ide_dma_test_irq(ide_drive_t * drive)1703 pmac_ide_dma_test_irq (ide_drive_t *drive)
1704 {
1705 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
1706 volatile struct dbdma_regs *dma;
1707 unsigned long status;
1708
1709 if (pmif == NULL)
1710 return 0;
1711 dma = pmif->dma_regs;
1712
1713 /* We have to things to deal with here:
1714 *
1715 * - The dbdma won't stop if the command was started
1716 * but completed with an error without transfering all
1717 * datas. This happens when bad blocks are met during
1718 * a multi-block transfer.
1719 *
1720 * - The dbdma fifo hasn't yet finished flushing to
1721 * to system memory when the disk interrupt occurs.
1722 *
1723 * The trick here is to increment drive->waiting_for_dma,
1724 * and return as if no interrupt occured. If the counter
1725 * reach a certain timeout value, we then return 1. If
1726 * we really got the interrupt, it will happen right away
1727 * again.
1728 * Apple's solution here may be more elegant. They issue
1729 * a DMA channel interrupt (a separate irq line) via a DBDMA
1730 * NOP command just before the STOP, and wait for both the
1731 * disk and DBDMA interrupts to have completed.
1732 */
1733
1734 /* If ACTIVE is cleared, the STOP command have passed and
1735 * transfer is complete.
1736 */
1737 status = readl(&dma->status);
1738 if (!(status & ACTIVE))
1739 return 1;
1740 if (!drive->waiting_for_dma)
1741 printk(KERN_WARNING "%s: ide_dma_test_irq \
1742 called while not waiting\n", drive->name);
1743
1744 /* If dbdma didn't execute the STOP command yet, the
1745 * active bit is still set */
1746 drive->waiting_for_dma++;
1747 if (drive->waiting_for_dma >= DMA_WAIT_TIMEOUT) {
1748 printk(KERN_WARNING "%s: timeout waiting \
1749 for dbdma command stop\n", drive->name);
1750 return 1;
1751 }
1752 udelay(10);
1753 return 0;
1754 }
1755
1756 static int __pmac
pmac_ide_dma_host_off(ide_drive_t * drive)1757 pmac_ide_dma_host_off (ide_drive_t *drive)
1758 {
1759 return 0;
1760 }
1761
1762 static int __pmac
pmac_ide_dma_host_on(ide_drive_t * drive)1763 pmac_ide_dma_host_on (ide_drive_t *drive)
1764 {
1765 return 0;
1766 }
1767
1768 static int __pmac
pmac_ide_dma_lostirq(ide_drive_t * drive)1769 pmac_ide_dma_lostirq (ide_drive_t *drive)
1770 {
1771 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
1772 volatile struct dbdma_regs *dma;
1773 unsigned long status;
1774
1775 if (pmif == NULL)
1776 return 0;
1777 dma = pmif->dma_regs;
1778
1779 status = readl(&dma->status);
1780 printk(KERN_ERR "%s: lost interrupt, dma status: %lx\n", drive->name, status);
1781 return 0;
1782 }
1783
1784 static void __init
pmac_ide_setup_dma(struct device_node * np,int ix)1785 pmac_ide_setup_dma(struct device_node *np, int ix)
1786 {
1787 struct pmac_ide_hwif *pmif = &pmac_ide[ix];
1788
1789 if (device_is_compatible(np, "kauai-ata")) {
1790 pmif->dma_regs = (volatile struct dbdma_regs*)(pmif->mapbase + 0x1000);
1791 } else {
1792 if (request_OF_resource(np, 1, " (mac-io IDE DMA)") == NULL) {
1793 printk(KERN_ERR "ide-pmac(%s): can't request DMA resource !\n", np->name);
1794 return;
1795 }
1796 pmif->dma_regs =
1797 (volatile struct dbdma_regs*)ioremap(np->addrs[1].address, 0x200);
1798 }
1799
1800 /*
1801 * Allocate space for the DBDMA commands.
1802 * The +2 is +1 for the stop command and +1 to allow for
1803 * aligning the start address to a multiple of 16 bytes.
1804 */
1805 pmif->dma_table_cpu = (struct dbdma_cmd*)pci_alloc_consistent(
1806 ide_hwifs[ix].pci_dev,
1807 (MAX_DCMDS + 2) * sizeof(struct dbdma_cmd),
1808 &pmif->dma_table_dma);
1809 if (pmif->dma_table_cpu == NULL) {
1810 printk(KERN_ERR "%s: unable to allocate DMA command list\n",
1811 ide_hwifs[ix].name);
1812 return;
1813 }
1814
1815 pmif->sg_table = kmalloc(sizeof(struct scatterlist) * MAX_DCMDS,
1816 GFP_KERNEL);
1817 if (pmif->sg_table == NULL) {
1818 pci_free_consistent( ide_hwifs[ix].pci_dev,
1819 (MAX_DCMDS + 2) * sizeof(struct dbdma_cmd),
1820 pmif->dma_table_cpu, pmif->dma_table_dma);
1821 return;
1822 }
1823 ide_hwifs[ix].ide_dma_off = &__ide_dma_off;
1824 ide_hwifs[ix].ide_dma_off_quietly = &__ide_dma_off_quietly;
1825 ide_hwifs[ix].ide_dma_on = &__ide_dma_on;
1826 ide_hwifs[ix].ide_dma_check = &pmac_ide_dma_check;
1827 ide_hwifs[ix].ide_dma_read = &pmac_ide_dma_read;
1828 ide_hwifs[ix].ide_dma_write = &pmac_ide_dma_write;
1829 ide_hwifs[ix].ide_dma_count = &pmac_ide_dma_count;
1830 ide_hwifs[ix].ide_dma_begin = &pmac_ide_dma_begin;
1831 ide_hwifs[ix].ide_dma_end = &pmac_ide_dma_end;
1832 ide_hwifs[ix].ide_dma_test_irq = &pmac_ide_dma_test_irq;
1833 ide_hwifs[ix].ide_dma_host_off = &pmac_ide_dma_host_off;
1834 ide_hwifs[ix].ide_dma_host_on = &pmac_ide_dma_host_on;
1835 ide_hwifs[ix].ide_dma_good_drive = &__ide_dma_good_drive;
1836 ide_hwifs[ix].ide_dma_bad_drive = &__ide_dma_bad_drive;
1837 ide_hwifs[ix].ide_dma_verbose = &__ide_dma_verbose;
1838 ide_hwifs[ix].ide_dma_timeout = &__ide_dma_timeout;
1839 ide_hwifs[ix].ide_dma_retune = &__ide_dma_retune;
1840 ide_hwifs[ix].ide_dma_lostirq = &pmac_ide_dma_lostirq;
1841
1842 #ifdef CONFIG_BLK_DEV_IDEDMA_PMAC_AUTO
1843 if (!noautodma)
1844 ide_hwifs[ix].autodma = 1;
1845 #endif
1846 ide_hwifs[ix].drives[0].autodma = ide_hwifs[ix].autodma;
1847 ide_hwifs[ix].drives[1].autodma = ide_hwifs[ix].autodma;
1848 }
1849
1850 #endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */
1851
1852 static void __pmac
idepmac_sleep_device(ide_drive_t * drive)1853 idepmac_sleep_device(ide_drive_t *drive)
1854 {
1855 ide_hwif_t *hwif = HWIF(drive);
1856 int j;
1857
1858 /* FIXME: We only handle the master IDE disk, we shoud
1859 * try to fix CD-ROMs here
1860 */
1861 switch (drive->media) {
1862 case ide_disk:
1863 /* Spin down the drive */
1864 SELECT_DRIVE(drive);
1865 SELECT_MASK(drive, 0);
1866 hwif->OUTB(drive->select.all, IDE_SELECT_REG);
1867 (void) hwif->INB(IDE_SELECT_REG);
1868 udelay(100);
1869 hwif->OUTB(0x00, IDE_SECTOR_REG);
1870 hwif->OUTB(0x00, IDE_NSECTOR_REG);
1871 hwif->OUTB(0x00, IDE_LCYL_REG);
1872 hwif->OUTB(0x00, IDE_HCYL_REG);
1873 hwif->OUTB(drive->ctl|2, IDE_CONTROL_REG);
1874 hwif->OUTB(WIN_STANDBYNOW1, IDE_COMMAND_REG);
1875 for (j = 0; j < 10; j++) {
1876 u8 status;
1877 mdelay(100);
1878 status = hwif->INB(IDE_STATUS_REG);
1879 if (!(status & BUSY_STAT) && (status & DRQ_STAT))
1880 break;
1881 }
1882 break;
1883 case ide_cdrom:
1884 // todo
1885 break;
1886 case ide_floppy:
1887 // todo
1888 break;
1889 }
1890 }
1891
1892 #ifdef CONFIG_PMAC_PBOOK
1893 static void __pmac
idepmac_wake_device(ide_drive_t * drive,int used_dma)1894 idepmac_wake_device(ide_drive_t *drive, int used_dma)
1895 {
1896 /* We force the IDE subdriver to check for a media change
1897 * This must be done first or we may lost the condition
1898 *
1899 * Problem: This can schedule. I moved the block device
1900 * wakeup almost late by priority because of that.
1901 */
1902 if (DRIVER(drive)->media_change)
1903 DRIVER(drive)->media_change(drive);
1904
1905 /* We kick the VFS too (see fix in ide.c revalidate) */
1906 check_disk_change(MKDEV(HWIF(drive)->major, (drive->select.b.unit) << PARTN_BITS));
1907
1908 #ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
1909 /* We re-enable DMA on the drive if it was active. */
1910 /* This doesn't work with the CD-ROM in the media-bay, probably
1911 * because of a pending unit attention. The problem if that if I
1912 * clear the error, the filesystem dies.
1913 */
1914 if (used_dma && !ide_spin_wait_hwgroup(drive)) {
1915 /* Lock HW group */
1916 HWGROUP(drive)->busy = 1;
1917 pmac_ide_dma_check(drive);
1918 HWGROUP(drive)->busy = 0;
1919 if (!list_empty(&drive->queue.queue_head))
1920 ide_do_request(HWGROUP(drive), 0);
1921 spin_unlock_irq(&io_request_lock);
1922 }
1923 #endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */
1924 }
1925
1926 static void __pmac
idepmac_sleep_interface(pmac_ide_hwif_t * pmif,unsigned base,int mediabay)1927 idepmac_sleep_interface(pmac_ide_hwif_t *pmif, unsigned base, int mediabay)
1928 {
1929 struct device_node* np = pmif->node;
1930
1931 /* We clear the timings */
1932 pmif->timings[0] = 0;
1933 pmif->timings[1] = 0;
1934
1935 /* The media bay will handle itself just fine */
1936 if (mediabay)
1937 return;
1938
1939 /* Disable the bus */
1940 ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, np, pmif->aapl_bus_id, 0);
1941 }
1942
1943 static void __pmac
idepmac_wake_interface(pmac_ide_hwif_t * pmif,unsigned long base,int mediabay)1944 idepmac_wake_interface(pmac_ide_hwif_t *pmif, unsigned long base, int mediabay)
1945 {
1946 struct device_node* np = pmif->node;
1947
1948 if (!mediabay) {
1949 /* Revive IDE disk and controller */
1950 ppc_md.feature_call(PMAC_FTR_IDE_RESET, np, pmif->aapl_bus_id, 1);
1951 ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, np, pmif->aapl_bus_id, 1);
1952 mdelay(10);
1953 ppc_md.feature_call(PMAC_FTR_IDE_RESET, np, pmif->aapl_bus_id, 0);
1954 }
1955 }
1956
1957 static void __pmac
idepmac_sleep_drive(ide_drive_t * drive)1958 idepmac_sleep_drive(ide_drive_t *drive)
1959 {
1960 int unlock = 0;
1961
1962 /* Wait for HW group to complete operations */
1963 if (ide_spin_wait_hwgroup(drive)) {
1964 // What can we do here ? Wake drive we had already
1965 // put to sleep and return an error ?
1966 } else {
1967 unlock = 1;
1968 /* Lock HW group */
1969 HWGROUP(drive)->busy = 1;
1970 /* Stop the device */
1971 idepmac_sleep_device(drive);
1972 }
1973 if (unlock)
1974 spin_unlock_irq(&io_request_lock);
1975 }
1976
1977 static void __pmac
idepmac_wake_drive(ide_drive_t * drive,unsigned long base)1978 idepmac_wake_drive(ide_drive_t *drive, unsigned long base)
1979 {
1980 ide_hwif_t *hwif = HWIF(drive);
1981 unsigned long flags;
1982 int j;
1983
1984 /* Reset timings */
1985 pmac_ide_do_update_timings(drive);
1986 mdelay(10);
1987
1988 /* Wait up to 20 seconds for the drive to be ready */
1989 for (j = 0; j < 200; j++) {
1990 u8 status = 0;
1991 mdelay(100);
1992 hwif->OUTB(drive->select.all, base + 0x60);
1993 if ((hwif->INB(base + 0x60)) != drive->select.all)
1994 continue;
1995 status = hwif->INB(base + 0x70);
1996 if (!(status & BUSY_STAT))
1997 break;
1998 }
1999
2000 /* We resume processing on the HW group */
2001 spin_lock_irqsave(&io_request_lock, flags);
2002 HWGROUP(drive)->busy = 0;
2003 if (!list_empty(&drive->queue.queue_head))
2004 ide_do_request(HWGROUP(drive), 0);
2005 spin_unlock_irqrestore(&io_request_lock, flags);
2006 }
2007
2008 /* Note: We support only master drives for now. This will have to be
2009 * improved if we want to handle sleep on the iMacDV where the CD-ROM
2010 * is a slave
2011 */
2012 static int __pmac
idepmac_notify_sleep(struct pmu_sleep_notifier * self,int when)2013 idepmac_notify_sleep(struct pmu_sleep_notifier *self, int when)
2014 {
2015 int i, ret;
2016 unsigned long base;
2017 int big_delay;
2018
2019 switch (when) {
2020 case PBOOK_SLEEP_REQUEST:
2021 break;
2022 case PBOOK_SLEEP_REJECT:
2023 break;
2024 case PBOOK_SLEEP_NOW:
2025 for (i = 0; i < pmac_ide_count; ++i) {
2026 ide_hwif_t *hwif;
2027 int dn;
2028
2029 if ((base = pmac_ide[i].regbase) == 0)
2030 continue;
2031
2032 hwif = &ide_hwifs[i];
2033 for (dn=0; dn<MAX_DRIVES; dn++) {
2034 if (!hwif->drives[dn].present)
2035 continue;
2036 idepmac_sleep_drive(&hwif->drives[dn]);
2037 }
2038 /* Disable irq during sleep */
2039 disable_irq(pmac_ide[i].irq);
2040
2041 /* Check if this is a media bay with an IDE device or not
2042 * a media bay.
2043 */
2044 ret = check_media_bay_by_base(base, MB_CD);
2045 if ((ret == 0) || (ret == -ENODEV))
2046 idepmac_sleep_interface(&pmac_ide[i], base, (ret == 0));
2047 }
2048 break;
2049 case PBOOK_WAKE:
2050 big_delay = 0;
2051 for (i = 0; i < pmac_ide_count; ++i) {
2052
2053 if ((base = pmac_ide[i].regbase) == 0)
2054 continue;
2055
2056 /* Make sure we have sane timings */
2057 sanitize_timings(&pmac_ide[i]);
2058
2059 /* Check if this is a media bay with an IDE device or not
2060 * a media bay
2061 */
2062 ret = check_media_bay_by_base(base, MB_CD);
2063 if ((ret == 0) || (ret == -ENODEV)) {
2064 idepmac_wake_interface(&pmac_ide[i], base, (ret == 0));
2065 big_delay = 1;
2066 }
2067
2068 }
2069 /* Let hardware get up to speed */
2070 if (big_delay)
2071 mdelay(IDE_WAKEUP_DELAY_MS);
2072
2073 for (i = 0; i < pmac_ide_count; ++i) {
2074 ide_hwif_t *hwif;
2075 int used_dma, dn;
2076 int irq_on = 0;
2077
2078 if ((base = pmac_ide[i].regbase) == 0)
2079 continue;
2080
2081 hwif = &ide_hwifs[i];
2082 for (dn=0; dn<MAX_DRIVES; dn++) {
2083 ide_drive_t *drive = &hwif->drives[dn];
2084 if (!drive->present)
2085 continue;
2086 /* We don't have re-configured DMA yet */
2087 used_dma = drive->using_dma;
2088 drive->using_dma = 0;
2089 idepmac_wake_drive(drive, base);
2090 if (!irq_on) {
2091 enable_irq(pmac_ide[i].irq);
2092 irq_on = 1;
2093 }
2094 idepmac_wake_device(drive, used_dma);
2095 }
2096 if (!irq_on)
2097 enable_irq(pmac_ide[i].irq);
2098 }
2099 break;
2100 }
2101 return PBOOK_SLEEP_OK;
2102 }
2103 #endif /* CONFIG_PMAC_PBOOK */
2104