1 /*
2 * linux/drivers/ide/pci/hpt366.c Version 0.36 April 25, 2003
3 *
4 * Copyright (C) 1999-2003 Andre Hedrick <andre@linux-ide.org>
5 * Portions Copyright (C) 2001 Sun Microsystems, Inc.
6 * Portions Copyright (C) 2003 Red Hat Inc
7 *
8 * Thanks to HighPoint Technologies for their assistance, and hardware.
9 * Special Thanks to Jon Burchmore in SanDiego for the deep pockets, his
10 * donation of an ABit BP6 mainboard, processor, and memory acellerated
11 * development and support.
12 *
13 * Highpoint have their own driver (source except for the raid part)
14 * available from http://www.highpoint-tech.com/hpt3xx-opensource-v131.tgz
15 * This may be useful to anyone wanting to work on the mainstream hpt IDE.
16 *
17 * Note that final HPT370 support was done by force extraction of GPL.
18 *
19 * - add function for getting/setting power status of drive
20 * - the HPT370's state machine can get confused. reset it before each dma
21 * xfer to prevent that from happening.
22 * - reset state engine whenever we get an error.
23 * - check for busmaster state at end of dma.
24 * - use new highpoint timings.
25 * - detect bus speed using highpoint register.
26 * - use pll if we don't have a clock table. added a 66MHz table that's
27 * just 2x the 33MHz table.
28 * - removed turnaround. NOTE: we never want to switch between pll and
29 * pci clocks as the chip can glitch in those cases. the highpoint
30 * approved workaround slows everything down too much to be useful. in
31 * addition, we would have to serialize access to each chip.
32 * Adrian Sun <a.sun@sun.com>
33 *
34 * add drive timings for 66MHz PCI bus,
35 * fix ATA Cable signal detection, fix incorrect /proc info
36 * add /proc display for per-drive PIO/DMA/UDMA mode and
37 * per-channel ATA-33/66 Cable detect.
38 * Duncan Laurie <void@sun.com>
39 *
40 * fixup /proc output for multiple controllers
41 * Tim Hockin <thockin@sun.com>
42 *
43 * On hpt366:
44 * Reset the hpt366 on error, reset on dma
45 * Fix disabling Fast Interrupt hpt366.
46 * Mike Waychison <crlf@sun.com>
47 *
48 * Added support for 372N clocking and clock switching. The 372N needs
49 * different clocks on read/write. This requires overloading rw_disk and
50 * other deeply crazy things. Thanks to <http://www.hoerstreich.de> for
51 * keeping me sane.
52 * Alan Cox <alan@redhat.com>
53 *
54 */
55
56
57 #include <linux/config.h>
58 #include <linux/types.h>
59 #include <linux/module.h>
60 #include <linux/kernel.h>
61 #include <linux/delay.h>
62 #include <linux/timer.h>
63 #include <linux/mm.h>
64 #include <linux/ioport.h>
65 #include <linux/blkdev.h>
66 #include <linux/hdreg.h>
67
68 #include <linux/interrupt.h>
69 #include <linux/pci.h>
70 #include <linux/init.h>
71 #include <linux/ide.h>
72
73 #include <asm/uaccess.h>
74 #include <asm/io.h>
75 #include <asm/irq.h>
76
77 #include "ide_modes.h"
78 #include "hpt366.h"
79
80 #if defined(DISPLAY_HPT366_TIMINGS) && defined(CONFIG_PROC_FS)
81 #include <linux/stat.h>
82 #include <linux/proc_fs.h>
83 #endif /* defined(DISPLAY_HPT366_TIMINGS) && defined(CONFIG_PROC_FS) */
84
85 static unsigned int hpt_revision(struct pci_dev *dev);
86 static unsigned int hpt_minimum_revision(struct pci_dev *dev, int revision);
87
88 #if defined(DISPLAY_HPT366_TIMINGS) && defined(CONFIG_PROC_FS)
89
90 static u8 hpt366_proc = 0;
91 static struct pci_dev *hpt_devs[HPT366_MAX_DEVS];
92 static int n_hpt_devs;
93
hpt366_get_info(char * buffer,char ** addr,off_t offset,int count)94 static int hpt366_get_info (char *buffer, char **addr, off_t offset, int count)
95 {
96 char *p = buffer;
97 char *chipset_nums[] = {"366", "366", "368",
98 "370", "370A", "372",
99 "302", "371", "374" };
100 int i;
101
102 p += sprintf(p, "\n "
103 "HighPoint HPT366/368/370/372/374\n");
104 for (i = 0; i < n_hpt_devs; i++) {
105 struct pci_dev *dev = hpt_devs[i];
106 unsigned long iobase = dev->resource[4].start;
107 u32 class_rev = hpt_revision(dev);
108 u8 c0, c1;
109
110 p += sprintf(p, "\nController: %d\n", i);
111 if(class_rev < 9)
112 p += sprintf(p, "Chipset: HPT%s\n", chipset_nums[class_rev]);
113 else
114 p += sprintf(p, "Chipset: HPT revision %d\n", class_rev);
115 p += sprintf(p, "--------------- Primary Channel "
116 "--------------- Secondary Channel "
117 "--------------\n");
118
119 /* get the bus master status registers */
120 c0 = inb(iobase + 0x2);
121 c1 = inb(iobase + 0xa);
122 p += sprintf(p, "Enabled: %s"
123 " %s\n",
124 (c0 & 0x80) ? "no" : "yes",
125 (c1 & 0x80) ? "no" : "yes");
126 #if 0
127 if (hpt_minimum_revision(dev, 3)) {
128 u8 cbl;
129 cbl = inb(iobase + 0x7b);
130 outb(cbl | 1, iobase + 0x7b);
131 outb(cbl & ~1, iobase + 0x7b);
132 cbl = inb(iobase + 0x7a);
133 p += sprintf(p, "Cable: ATA-%d"
134 " ATA-%d\n",
135 (cbl & 0x02) ? 33 : 66,
136 (cbl & 0x01) ? 33 : 66);
137 p += sprintf(p, "\n");
138 }
139 #endif
140 p += sprintf(p, "--------------- drive0 --------- drive1 "
141 "------- drive0 ---------- drive1 -------\n");
142 p += sprintf(p, "DMA capable: %s %s"
143 " %s %s\n",
144 (c0 & 0x20) ? "yes" : "no ",
145 (c0 & 0x40) ? "yes" : "no ",
146 (c1 & 0x20) ? "yes" : "no ",
147 (c1 & 0x40) ? "yes" : "no ");
148
149 {
150 u8 c2, c3;
151 /* older revs don't have these registers mapped
152 * into io space */
153 pci_read_config_byte(dev, 0x43, &c0);
154 pci_read_config_byte(dev, 0x47, &c1);
155 pci_read_config_byte(dev, 0x4b, &c2);
156 pci_read_config_byte(dev, 0x4f, &c3);
157
158 p += sprintf(p, "Mode: %s %s"
159 " %s %s\n",
160 (c0 & 0x10) ? "UDMA" : (c0 & 0x20) ? "DMA " :
161 (c0 & 0x80) ? "PIO " : "off ",
162 (c1 & 0x10) ? "UDMA" : (c1 & 0x20) ? "DMA " :
163 (c1 & 0x80) ? "PIO " : "off ",
164 (c2 & 0x10) ? "UDMA" : (c2 & 0x20) ? "DMA " :
165 (c2 & 0x80) ? "PIO " : "off ",
166 (c3 & 0x10) ? "UDMA" : (c3 & 0x20) ? "DMA " :
167 (c3 & 0x80) ? "PIO " : "off ");
168 }
169 }
170 p += sprintf(p, "\n");
171
172 return p-buffer;/* => must be less than 4k! */
173 }
174 #endif /* defined(DISPLAY_HPT366_TIMINGS) && defined(CONFIG_PROC_FS) */
175
176 /*
177 * This wants fixing so that we do everything not by classrev
178 * (which breaks on the newest chips) but by creating an
179 * enumeration of chip variants and using that
180 */
181
182
hpt_revision(struct pci_dev * dev)183 static u32 hpt_revision (struct pci_dev *dev)
184 {
185 u32 class_rev;
186 pci_read_config_dword(dev, PCI_CLASS_REVISION, &class_rev);
187 class_rev &= 0xff;
188
189 switch(dev->device) {
190 /* Remap new 372N onto 372 */
191 case PCI_DEVICE_ID_TTI_HPT372N:
192 class_rev = PCI_DEVICE_ID_TTI_HPT372; break;
193 case PCI_DEVICE_ID_TTI_HPT374:
194 class_rev = PCI_DEVICE_ID_TTI_HPT374; break;
195 case PCI_DEVICE_ID_TTI_HPT371:
196 class_rev = PCI_DEVICE_ID_TTI_HPT371; break;
197 case PCI_DEVICE_ID_TTI_HPT302:
198 class_rev = PCI_DEVICE_ID_TTI_HPT302; break;
199 case PCI_DEVICE_ID_TTI_HPT372:
200 class_rev = PCI_DEVICE_ID_TTI_HPT372; break;
201 default:
202 break;
203 }
204 return class_rev;
205 }
206
hpt_minimum_revision(struct pci_dev * dev,int revision)207 static u32 hpt_minimum_revision (struct pci_dev *dev, int revision)
208 {
209 unsigned int class_rev = hpt_revision(dev);
210 revision--;
211 return ((int) (class_rev > revision) ? 1 : 0);
212 }
213
214 static int check_in_drive_lists(ide_drive_t *drive, const char **list);
215
hpt3xx_ratemask(ide_drive_t * drive)216 static u8 hpt3xx_ratemask (ide_drive_t *drive)
217 {
218 struct pci_dev *dev = HWIF(drive)->pci_dev;
219 u8 mode = 0;
220
221 if (hpt_minimum_revision(dev, 8)) { /* HPT374 */
222 mode = (HPT374_ALLOW_ATA133_6) ? 4 : 3;
223 } else if (hpt_minimum_revision(dev, 7)) { /* HPT371 */
224 mode = (HPT371_ALLOW_ATA133_6) ? 4 : 3;
225 } else if (hpt_minimum_revision(dev, 6)) { /* HPT302 */
226 mode = (HPT302_ALLOW_ATA133_6) ? 4 : 3;
227 } else if (hpt_minimum_revision(dev, 5)) { /* HPT372 */
228 mode = (HPT372_ALLOW_ATA133_6) ? 4 : 3;
229 } else if (hpt_minimum_revision(dev, 4)) { /* HPT370A */
230 mode = (HPT370_ALLOW_ATA100_5) ? 3 : 2;
231 } else if (hpt_minimum_revision(dev, 3)) { /* HPT370 */
232 mode = (HPT370_ALLOW_ATA100_5) ? 3 : 2;
233 mode = (check_in_drive_lists(drive, bad_ata33)) ? 0 : mode;
234 } else { /* HPT366 and HPT368 */
235 mode = (check_in_drive_lists(drive, bad_ata33)) ? 0 : 2;
236 }
237 if (!eighty_ninty_three(drive) && (mode))
238 mode = min(mode, (u8)1);
239 return mode;
240 }
241
242 /*
243 * Note for the future; the SATA hpt37x we must set
244 * either PIO or UDMA modes 0,4,5
245 */
246
hpt3xx_ratefilter(ide_drive_t * drive,u8 speed)247 static u8 hpt3xx_ratefilter (ide_drive_t *drive, u8 speed)
248 {
249 struct pci_dev *dev = HWIF(drive)->pci_dev;
250 u8 mode = hpt3xx_ratemask(drive);
251
252 if (drive->media != ide_disk)
253 return min(speed, (u8)XFER_PIO_4);
254
255 switch(mode) {
256 case 0x04:
257 speed = min(speed, (u8)XFER_UDMA_6);
258 break;
259 case 0x03:
260 speed = min(speed, (u8)XFER_UDMA_5);
261 if (hpt_minimum_revision(dev, 5))
262 break;
263 if (check_in_drive_lists(drive, bad_ata100_5))
264 speed = min(speed, (u8)XFER_UDMA_4);
265 break;
266 case 0x02:
267 speed = min(speed, (u8)XFER_UDMA_4);
268 /*
269 * CHECK ME, Does this need to be set to 5 ??
270 */
271 if (hpt_minimum_revision(dev, 3))
272 break;
273 if ((check_in_drive_lists(drive, bad_ata66_4)) ||
274 (!(HPT366_ALLOW_ATA66_4)))
275 speed = min(speed, (u8)XFER_UDMA_3);
276 if ((check_in_drive_lists(drive, bad_ata66_3)) ||
277 (!(HPT366_ALLOW_ATA66_3)))
278 speed = min(speed, (u8)XFER_UDMA_2);
279 break;
280 case 0x01:
281 speed = min(speed, (u8)XFER_UDMA_2);
282 /*
283 * CHECK ME, Does this need to be set to 5 ??
284 */
285 if (hpt_minimum_revision(dev, 3))
286 break;
287 if (check_in_drive_lists(drive, bad_ata33))
288 speed = min(speed, (u8)XFER_MW_DMA_2);
289 break;
290 case 0x00:
291 default:
292 speed = min(speed, (u8)XFER_MW_DMA_2);
293 break;
294 }
295 return speed;
296 }
297
check_in_drive_lists(ide_drive_t * drive,const char ** list)298 static int check_in_drive_lists (ide_drive_t *drive, const char **list)
299 {
300 struct hd_driveid *id = drive->id;
301
302 if (quirk_drives == list) {
303 while (*list)
304 if (strstr(id->model, *list++))
305 return 1;
306 } else {
307 while (*list)
308 if (!strcmp(*list++,id->model))
309 return 1;
310 }
311 return 0;
312 }
313
pci_bus_clock_list(u8 speed,struct chipset_bus_clock_list_entry * chipset_table)314 static unsigned int pci_bus_clock_list (u8 speed, struct chipset_bus_clock_list_entry * chipset_table)
315 {
316 for ( ; chipset_table->xfer_speed ; chipset_table++)
317 if (chipset_table->xfer_speed == speed)
318 return chipset_table->chipset_settings;
319 return chipset_table->chipset_settings;
320 }
321
hpt366_tune_chipset(ide_drive_t * drive,u8 xferspeed)322 static void hpt366_tune_chipset (ide_drive_t *drive, u8 xferspeed)
323 {
324 struct pci_dev *dev = HWIF(drive)->pci_dev;
325 u8 speed = hpt3xx_ratefilter(drive, xferspeed);
326 // u8 speed = ide_rate_filter(hpt3xx_ratemask(drive), xferspeed);
327 u8 regtime = (drive->select.b.unit & 0x01) ? 0x44 : 0x40;
328 u8 regfast = (HWIF(drive)->channel) ? 0x55 : 0x51;
329 u8 drive_fast = 0;
330 u32 reg1 = 0, reg2 = 0;
331
332 /*
333 * Disable the "fast interrupt" prediction.
334 */
335 pci_read_config_byte(dev, regfast, &drive_fast);
336 #if 0
337 if (drive_fast & 0x02)
338 pci_write_config_byte(dev, regfast, drive_fast & ~0x20);
339 #else
340 if (drive_fast & 0x80)
341 pci_write_config_byte(dev, regfast, drive_fast & ~0x80);
342 #endif
343
344 reg2 = pci_bus_clock_list(speed,
345 (struct chipset_bus_clock_list_entry *) pci_get_drvdata(dev));
346 /*
347 * Disable on-chip PIO FIFO/buffer
348 * (to avoid problems handling I/O errors later)
349 */
350 pci_read_config_dword(dev, regtime, ®1);
351 if (speed >= XFER_MW_DMA_0) {
352 reg2 = (reg2 & ~0xc0000000) | (reg1 & 0xc0000000);
353 } else {
354 reg2 = (reg2 & ~0x30070000) | (reg1 & 0x30070000);
355 }
356 reg2 &= ~0x80000000;
357
358 pci_write_config_dword(dev, regtime, reg2);
359 }
360
hpt368_tune_chipset(ide_drive_t * drive,u8 speed)361 static void hpt368_tune_chipset (ide_drive_t *drive, u8 speed)
362 {
363 hpt366_tune_chipset(drive, speed);
364 }
365
hpt370_tune_chipset(ide_drive_t * drive,u8 xferspeed)366 static void hpt370_tune_chipset (ide_drive_t *drive, u8 xferspeed)
367 {
368 struct pci_dev *dev = HWIF(drive)->pci_dev;
369 u8 speed = hpt3xx_ratefilter(drive, xferspeed);
370 // u8 speed = ide_rate_filter(hpt3xx_ratemask(drive), xferspeed);
371 u8 regfast = (HWIF(drive)->channel) ? 0x55 : 0x51;
372 u8 drive_pci = 0x40 + (drive->dn * 4);
373 u8 new_fast = 0, drive_fast = 0;
374 u32 list_conf = 0, drive_conf = 0;
375 u32 conf_mask = (speed >= XFER_MW_DMA_0) ? 0xc0000000 : 0x30070000;
376
377 /*
378 * Disable the "fast interrupt" prediction.
379 * don't holdoff on interrupts. (== 0x01 despite what the docs say)
380 */
381 pci_read_config_byte(dev, regfast, &drive_fast);
382 new_fast = drive_fast;
383 if (new_fast & 0x02)
384 new_fast &= ~0x02;
385
386 #ifdef HPT_DELAY_INTERRUPT
387 if (new_fast & 0x01)
388 new_fast &= ~0x01;
389 #else
390 if ((new_fast & 0x01) == 0)
391 new_fast |= 0x01;
392 #endif
393 if (new_fast != drive_fast)
394 pci_write_config_byte(dev, regfast, new_fast);
395
396 list_conf = pci_bus_clock_list(speed,
397 (struct chipset_bus_clock_list_entry *)
398 pci_get_drvdata(dev));
399
400 pci_read_config_dword(dev, drive_pci, &drive_conf);
401 list_conf = (list_conf & ~conf_mask) | (drive_conf & conf_mask);
402
403 if (speed < XFER_MW_DMA_0) {
404 list_conf &= ~0x80000000; /* Disable on-chip PIO FIFO/buffer */
405 }
406
407 pci_write_config_dword(dev, drive_pci, list_conf);
408 }
409
hpt372_tune_chipset(ide_drive_t * drive,u8 xferspeed)410 static void hpt372_tune_chipset (ide_drive_t *drive, u8 xferspeed)
411 {
412 struct pci_dev *dev = HWIF(drive)->pci_dev;
413 u8 speed = hpt3xx_ratefilter(drive, xferspeed);
414 // u8 speed = ide_rate_filter(hpt3xx_ratemask(drive), xferspeed);
415 u8 regfast = (HWIF(drive)->channel) ? 0x55 : 0x51;
416 u8 drive_fast = 0, drive_pci = 0x40 + (drive->dn * 4);
417 u32 list_conf = 0, drive_conf = 0;
418 u32 conf_mask = (speed >= XFER_MW_DMA_0) ? 0xc0000000 : 0x30070000;
419 /*
420 * Disable the "fast interrupt" prediction.
421 * don't holdoff on interrupts. (== 0x01 despite what the docs say)
422 */
423 pci_read_config_byte(dev, regfast, &drive_fast);
424 drive_fast &= ~0x07;
425 pci_write_config_byte(dev, regfast, drive_fast);
426
427 list_conf = pci_bus_clock_list(speed,
428 (struct chipset_bus_clock_list_entry *)
429 pci_get_drvdata(dev));
430 pci_read_config_dword(dev, drive_pci, &drive_conf);
431 list_conf = (list_conf & ~conf_mask) | (drive_conf & conf_mask);
432 if (speed < XFER_MW_DMA_0)
433 list_conf &= ~0x80000000; /* Disable on-chip PIO FIFO/buffer */
434 pci_write_config_dword(dev, drive_pci, list_conf);
435 }
436
hpt374_tune_chipset(ide_drive_t * drive,u8 speed)437 static void hpt374_tune_chipset (ide_drive_t *drive, u8 speed)
438 {
439 hpt372_tune_chipset(drive, speed);
440 }
441
hpt3xx_tune_chipset(ide_drive_t * drive,u8 speed)442 static int hpt3xx_tune_chipset (ide_drive_t *drive, u8 speed)
443 {
444 struct pci_dev *dev = HWIF(drive)->pci_dev;
445
446 if (hpt_minimum_revision(dev, 8))
447 hpt374_tune_chipset(drive, speed);
448 #if 0
449 else if (hpt_minimum_revision(dev, 7))
450 hpt371_tune_chipset(drive, speed);
451 else if (hpt_minimum_revision(dev, 6))
452 hpt302_tune_chipset(drive, speed);
453 #endif
454 else if (hpt_minimum_revision(dev, 5))
455 hpt372_tune_chipset(drive, speed);
456 else if (hpt_minimum_revision(dev, 3))
457 hpt370_tune_chipset(drive, speed);
458 else if (hpt_minimum_revision(dev, 2))
459 hpt368_tune_chipset(drive, speed);
460 else
461 hpt366_tune_chipset(drive, speed);
462
463 return ((int) ide_config_drive_speed(drive, speed));
464 }
465
hpt3xx_tune_drive(ide_drive_t * drive,u8 pio)466 static void hpt3xx_tune_drive (ide_drive_t *drive, u8 pio)
467 {
468 pio = ide_get_best_pio_mode(drive, pio, 5, NULL);
469 (void) hpt3xx_tune_chipset(drive, (XFER_PIO_0 + pio));
470 }
471
472 /*
473 * This allows the configuration of ide_pci chipset registers
474 * for cards that learn about the drive's UDMA, DMA, PIO capabilities
475 * after the drive is reported by the OS. Initally for designed for
476 * HPT366 UDMA chipset by HighPoint|Triones Technologies, Inc.
477 *
478 * check_in_drive_lists(drive, bad_ata66_4)
479 * check_in_drive_lists(drive, bad_ata66_3)
480 * check_in_drive_lists(drive, bad_ata33)
481 *
482 */
config_chipset_for_dma(ide_drive_t * drive)483 static int config_chipset_for_dma (ide_drive_t *drive)
484 {
485 u8 speed = ide_dma_speed(drive, hpt3xx_ratemask(drive));
486
487 if (!speed)
488 return 0;
489
490 if (pci_get_drvdata(HWIF(drive)->pci_dev) == NULL)
491 return 0;
492
493 (void) hpt3xx_tune_chipset(drive, speed);
494 return ide_dma_enable(drive);
495 }
496
hpt3xx_quirkproc(ide_drive_t * drive)497 static int hpt3xx_quirkproc (ide_drive_t *drive)
498 {
499 return ((int) check_in_drive_lists(drive, quirk_drives));
500 }
501
hpt3xx_intrproc(ide_drive_t * drive)502 static void hpt3xx_intrproc (ide_drive_t *drive)
503 {
504 ide_hwif_t *hwif = HWIF(drive);
505
506 if (drive->quirk_list)
507 return;
508 /* drives in the quirk_list may not like intr setups/cleanups */
509 hwif->OUTB(drive->ctl|2, IDE_CONTROL_REG);
510 }
511
hpt3xx_maskproc(ide_drive_t * drive,int mask)512 static void hpt3xx_maskproc (ide_drive_t *drive, int mask)
513 {
514 struct pci_dev *dev = HWIF(drive)->pci_dev;
515
516 if (drive->quirk_list) {
517 if (hpt_minimum_revision(dev,3)) {
518 u8 reg5a = 0;
519 pci_read_config_byte(dev, 0x5a, ®5a);
520 if (((reg5a & 0x10) >> 4) != mask)
521 pci_write_config_byte(dev, 0x5a, mask ? (reg5a | 0x10) : (reg5a & ~0x10));
522 } else {
523 if (mask) {
524 disable_irq(HWIF(drive)->irq);
525 } else {
526 enable_irq(HWIF(drive)->irq);
527 }
528 }
529 } else {
530 if (IDE_CONTROL_REG)
531 HWIF(drive)->OUTB(mask ? (drive->ctl | 2) :
532 (drive->ctl & ~2),
533 IDE_CONTROL_REG);
534 }
535 }
536
hpt366_config_drive_xfer_rate(ide_drive_t * drive)537 static int hpt366_config_drive_xfer_rate (ide_drive_t *drive)
538 {
539 ide_hwif_t *hwif = HWIF(drive);
540 struct hd_driveid *id = drive->id;
541
542 drive->init_speed = 0;
543
544 if ((id->capability & 1) && drive->autodma) {
545 /* Consult the list of known "bad" drives */
546 if (hwif->ide_dma_bad_drive(drive))
547 goto fast_ata_pio;
548 if (id->field_valid & 4) {
549 if (id->dma_ultra & hwif->ultra_mask) {
550 /* Force if Capable UltraDMA */
551 int dma = config_chipset_for_dma(drive);
552 if ((id->field_valid & 2) && !dma)
553 goto try_dma_modes;
554 }
555 } else if (id->field_valid & 2) {
556 try_dma_modes:
557 if (id->dma_mword & hwif->mwdma_mask) {
558 /* Force if Capable regular DMA modes */
559 if (!config_chipset_for_dma(drive))
560 goto no_dma_set;
561 }
562 } else if (hwif->ide_dma_good_drive(drive) &&
563 (id->eide_dma_time < 150)) {
564 /* Consult the list of known "good" drives */
565 if (!config_chipset_for_dma(drive))
566 goto no_dma_set;
567 } else {
568 goto fast_ata_pio;
569 }
570 return hwif->ide_dma_on(drive);
571 } else if ((id->capability & 8) || (id->field_valid & 2)) {
572 fast_ata_pio:
573 no_dma_set:
574 hpt3xx_tune_drive(drive, 5);
575 return hwif->ide_dma_off_quietly(drive);
576 }
577 /* IORDY not supported */
578 return 0;
579 }
580
581 /*
582 * This is specific to the HPT366 UDMA bios chipset
583 * by HighPoint|Triones Technologies, Inc.
584 */
hpt366_ide_dma_lostirq(ide_drive_t * drive)585 static int hpt366_ide_dma_lostirq (ide_drive_t *drive)
586 {
587 struct pci_dev *dev = HWIF(drive)->pci_dev;
588 u8 reg50h = 0, reg52h = 0, reg5ah = 0;
589
590 pci_read_config_byte(dev, 0x50, ®50h);
591 pci_read_config_byte(dev, 0x52, ®52h);
592 pci_read_config_byte(dev, 0x5a, ®5ah);
593 printk("%s: (%s) reg50h=0x%02x, reg52h=0x%02x, reg5ah=0x%02x\n",
594 drive->name, __FUNCTION__, reg50h, reg52h, reg5ah);
595 if (reg5ah & 0x10)
596 pci_write_config_byte(dev, 0x5a, reg5ah & ~0x10);
597 #if 0
598 /* how about we flush and reset, mmmkay? */
599 pci_write_config_byte(dev, 0x51, 0x1F);
600 /* fall through to a reset */
601 case ide_dma_begin:
602 case ide_dma_end:
603 /* reset the chips state over and over.. */
604 pci_write_config_byte(dev, 0x51, 0x13);
605 #endif
606 return __ide_dma_lostirq(drive);
607 }
608
hpt370_clear_engine(ide_drive_t * drive)609 static void hpt370_clear_engine (ide_drive_t *drive)
610 {
611 u8 regstate = HWIF(drive)->channel ? 0x54 : 0x50;
612 pci_write_config_byte(HWIF(drive)->pci_dev, regstate, 0x37);
613 udelay(10);
614 }
615
hpt370_ide_dma_begin(ide_drive_t * drive)616 static int hpt370_ide_dma_begin (ide_drive_t *drive)
617 {
618 #ifdef HPT_RESET_STATE_ENGINE
619 hpt370_clear_engine(drive);
620 #endif
621 return __ide_dma_begin(drive);
622 }
623
hpt370_ide_dma_end(ide_drive_t * drive)624 static int hpt370_ide_dma_end (ide_drive_t *drive)
625 {
626 ide_hwif_t *hwif = HWIF(drive);
627 u8 dma_stat = hwif->INB(hwif->dma_status);
628
629 if (dma_stat & 0x01) {
630 /* wait a little */
631 udelay(20);
632 dma_stat = hwif->INB(hwif->dma_status);
633 }
634 if ((dma_stat & 0x01) != 0)
635 /* fallthrough */
636 (void) HWIF(drive)->ide_dma_timeout(drive);
637
638 return __ide_dma_end(drive);
639 }
640
hpt370_lostirq_timeout(ide_drive_t * drive)641 static void hpt370_lostirq_timeout (ide_drive_t *drive)
642 {
643 ide_hwif_t *hwif = HWIF(drive);
644 u8 bfifo = 0, reginfo = hwif->channel ? 0x56 : 0x52;
645 u8 dma_stat = 0, dma_cmd = 0;
646
647 pci_read_config_byte(HWIF(drive)->pci_dev, reginfo, &bfifo);
648 printk("%s: %d bytes in FIFO\n", drive->name, bfifo);
649 hpt370_clear_engine(drive);
650 /* get dma command mode */
651 dma_cmd = hwif->INB(hwif->dma_command);
652 /* stop dma */
653 hwif->OUTB(dma_cmd & ~0x1, hwif->dma_command);
654 dma_stat = hwif->INB(hwif->dma_status);
655 /* clear errors */
656 hwif->OUTB(dma_stat | 0x6, hwif->dma_status);
657 }
658
hpt370_ide_dma_timeout(ide_drive_t * drive)659 static int hpt370_ide_dma_timeout (ide_drive_t *drive)
660 {
661 hpt370_lostirq_timeout(drive);
662 hpt370_clear_engine(drive);
663 return __ide_dma_timeout(drive);
664 }
665
hpt370_ide_dma_lostirq(ide_drive_t * drive)666 static int hpt370_ide_dma_lostirq (ide_drive_t *drive)
667 {
668 hpt370_lostirq_timeout(drive);
669 hpt370_clear_engine(drive);
670 return __ide_dma_lostirq(drive);
671 }
672
673 /* returns 1 if DMA IRQ issued, 0 otherwise */
hpt374_ide_dma_test_irq(ide_drive_t * drive)674 static int hpt374_ide_dma_test_irq(ide_drive_t *drive)
675 {
676 ide_hwif_t *hwif = HWIF(drive);
677 u16 bfifo = 0;
678 u8 reginfo = hwif->channel ? 0x56 : 0x52;
679 u8 dma_stat;
680
681 pci_read_config_word(hwif->pci_dev, reginfo, &bfifo);
682 if (bfifo & 0x1FF) {
683 // printk("%s: %d bytes in FIFO\n", drive->name, bfifo);
684 return 0;
685 }
686
687 dma_stat = hwif->INB(hwif->dma_status);
688 /* return 1 if INTR asserted */
689 if ((dma_stat & 4) == 4)
690 return 1;
691
692 if (!drive->waiting_for_dma)
693 printk(KERN_WARNING "%s: (%s) called while not waiting\n",
694 drive->name, __FUNCTION__);
695 return 0;
696 }
697
hpt374_ide_dma_end(ide_drive_t * drive)698 static int hpt374_ide_dma_end (ide_drive_t *drive)
699 {
700 struct pci_dev *dev = HWIF(drive)->pci_dev;
701 ide_hwif_t *hwif = HWIF(drive);
702 u8 msc_stat = 0, mscreg = hwif->channel ? 0x54 : 0x50;
703 u8 bwsr_stat = 0, bwsr_mask = hwif->channel ? 0x02 : 0x01;
704
705 pci_read_config_byte(dev, 0x6a, &bwsr_stat);
706 pci_read_config_byte(dev, mscreg, &msc_stat);
707 if ((bwsr_stat & bwsr_mask) == bwsr_mask)
708 pci_write_config_byte(dev, mscreg, msc_stat|0x30);
709 return __ide_dma_end(drive);
710 }
711
712 /**
713 * hpt372n_set_clock - perform clock switching dance
714 * @drive: Drive to switch
715 * @mode: Switching mode (0x21 for write, 0x23 otherwise)
716 *
717 * Switch the DPLL clock on the HPT372N devices. This is a
718 * right mess.
719 */
720
hpt372n_set_clock(ide_drive_t * drive,int mode)721 static void hpt372n_set_clock(ide_drive_t *drive, int mode)
722 {
723 ide_hwif_t *hwif = HWIF(drive);
724
725 /* FIXME: should we check for DMA active and BUG() */
726 /* Tristate the bus */
727 outb(0x80, hwif->dma_base+0x73);
728 outb(0x80, hwif->dma_base+0x77);
729
730 /* Switch clock and reset channels */
731 outb(mode, hwif->dma_base+0x7B);
732 outb(0xC0, hwif->dma_base+0x79);
733
734 /* Reset state machines */
735 outb(0x37, hwif->dma_base+0x70);
736 outb(0x37, hwif->dma_base+0x74);
737
738 /* Complete reset */
739 outb(0x00, hwif->dma_base+0x79);
740
741 /* Reconnect channels to bus */
742 outb(0x00, hwif->dma_base+0x73);
743 outb(0x00, hwif->dma_base+0x79);
744 }
745
746 /**
747 * hpt372n_rw_disk - wrapper for I/O
748 * @drive: drive for command
749 * @rq: block request structure
750 * @block: block number
751 *
752 * This is called when a disk I/O is issued to the 372N instead
753 * of the default functionality. We need it because of the clock
754 * switching
755 *
756 */
757
hpt372n_rw_disk(ide_drive_t * drive,struct request * rq,unsigned long block)758 static ide_startstop_t hpt372n_rw_disk(ide_drive_t *drive, struct request *rq, unsigned long block)
759 {
760 int wantclock;
761
762 if(rq_data_dir(rq) == READ)
763 wantclock = 0x21;
764 else
765 wantclock = 0x23;
766
767 if(HWIF(drive)->config_data != wantclock)
768 {
769 hpt372n_set_clock(drive, wantclock);
770 HWIF(drive)->config_data = wantclock;
771 }
772 return __ide_do_rw_disk(drive, rq, block);
773 }
774
775 /*
776 * Since SUN Cobalt is attempting to do this operation, I should disclose
777 * this has been a long time ago Thu Jul 27 16:40:57 2000 was the patch date
778 * HOTSWAP ATA Infrastructure.
779 */
780
hpt3xx_reset(ide_drive_t * drive)781 static void hpt3xx_reset (ide_drive_t *drive)
782 {
783 #if 0
784 unsigned long high_16 = pci_resource_start(HWIF(drive)->pci_dev, 4);
785 u8 reset = (HWIF(drive)->channel) ? 0x80 : 0x40;
786 u8 reg59h = 0;
787
788 pci_read_config_byte(HWIF(drive)->pci_dev, 0x59, ®59h);
789 pci_write_config_byte(HWIF(drive)->pci_dev, 0x59, reg59h|reset);
790 pci_write_config_byte(HWIF(drive)->pci_dev, 0x59, reg59h);
791 #endif
792 }
793
hpt3xx_tristate(ide_drive_t * drive,int state)794 static int hpt3xx_tristate (ide_drive_t * drive, int state)
795 {
796 ide_hwif_t *hwif = HWIF(drive);
797 struct pci_dev *dev = hwif->pci_dev;
798 u8 reg59h = 0, reset = (hwif->channel) ? 0x80 : 0x40;
799 u8 regXXh = 0, state_reg= (hwif->channel) ? 0x57 : 0x53;
800
801 if (!hwif)
802 return -EINVAL;
803
804 // hwif->bus_state = state;
805
806 pci_read_config_byte(dev, 0x59, ®59h);
807 pci_read_config_byte(dev, state_reg, ®XXh);
808
809 switch(state)
810 {
811 case BUSSTATE_ON:
812 (void) ide_do_reset(drive);
813 pci_write_config_byte(dev, state_reg, regXXh|0x80);
814 pci_write_config_byte(dev, 0x59, reg59h|reset);
815 break;
816 case BUSSTATE_OFF:
817 pci_write_config_byte(dev, 0x59, reg59h & ~(reset));
818 pci_write_config_byte(dev, state_reg, regXXh & ~(0x80));
819 (void) ide_do_reset(drive);
820 break;
821 default:
822 return -EINVAL;
823 }
824 return 0;
825 }
826
827 /*
828 * set/get power state for a drive.
829 * turning the power off does the following things:
830 * 1) soft-reset the drive
831 * 2) tri-states the ide bus
832 *
833 * when we turn things back on, we need to re-initialize things.
834 */
835 #define TRISTATE_BIT 0x8000
hpt370_busproc(ide_drive_t * drive,int state)836 static int hpt370_busproc(ide_drive_t * drive, int state)
837 {
838 ide_hwif_t *hwif = HWIF(drive);
839 struct pci_dev *dev = hwif->pci_dev;
840 u8 tristate = 0, resetmask = 0, bus_reg = 0;
841 u16 tri_reg;
842
843 if (!hwif)
844 return -EINVAL;
845
846 hwif->bus_state = state;
847
848 if (hwif->channel) {
849 /* secondary channel */
850 tristate = 0x56;
851 resetmask = 0x80;
852 } else {
853 /* primary channel */
854 tristate = 0x52;
855 resetmask = 0x40;
856 }
857
858 /* grab status */
859 pci_read_config_word(dev, tristate, &tri_reg);
860 pci_read_config_byte(dev, 0x59, &bus_reg);
861
862 /* set the state. we don't set it if we don't need to do so.
863 * make sure that the drive knows that it has failed if it's off */
864 switch (state) {
865 case BUSSTATE_ON:
866 hwif->drives[0].failures = 0;
867 hwif->drives[1].failures = 0;
868 if ((bus_reg & resetmask) == 0)
869 return 0;
870 tri_reg &= ~TRISTATE_BIT;
871 bus_reg &= ~resetmask;
872 break;
873 case BUSSTATE_OFF:
874 hwif->drives[0].failures = hwif->drives[0].max_failures + 1;
875 hwif->drives[1].failures = hwif->drives[1].max_failures + 1;
876 if ((tri_reg & TRISTATE_BIT) == 0 && (bus_reg & resetmask))
877 return 0;
878 tri_reg &= ~TRISTATE_BIT;
879 bus_reg |= resetmask;
880 break;
881 case BUSSTATE_TRISTATE:
882 hwif->drives[0].failures = hwif->drives[0].max_failures + 1;
883 hwif->drives[1].failures = hwif->drives[1].max_failures + 1;
884 if ((tri_reg & TRISTATE_BIT) && (bus_reg & resetmask))
885 return 0;
886 tri_reg |= TRISTATE_BIT;
887 bus_reg |= resetmask;
888 break;
889 default:
890 return -EINVAL;
891 }
892 pci_write_config_byte(dev, 0x59, bus_reg);
893 pci_write_config_word(dev, tristate, tri_reg);
894
895 return 0;
896 }
897
init_hpt37x(struct pci_dev * dev)898 static int __init init_hpt37x(struct pci_dev *dev)
899 {
900 int adjust, i;
901 u16 freq;
902 u32 pll;
903 u8 reg5bh;
904 u8 reg5ah;
905 unsigned long dmabase = pci_resource_start(dev, 4);
906 u8 did, rid;
907 int is_372n = 0;
908 #if 1
909 pci_read_config_byte(dev, 0x5a, ®5ah);
910 /* interrupt force enable */
911 pci_write_config_byte(dev, 0x5a, (reg5ah & ~0x10));
912 #endif
913
914 if(dmabase)
915 {
916 did = inb(dmabase + 0x22);
917 rid = inb(dmabase + 0x28);
918
919 if((did == 4 && rid == 6) || (did == 5 && rid > 1))
920 is_372n = 1;
921 }
922
923 /*
924 * default to pci clock. make sure MA15/16 are set to output
925 * to prevent drives having problems with 40-pin cables. Needed
926 * for some drives such as IBM-DTLA which will not enter ready
927 * state on reset when PDIAG is a input.
928 *
929 * ToDo: should we set 0x21 when using PLL mode ?
930 */
931 pci_write_config_byte(dev, 0x5b, 0x23);
932
933 /*
934 * set up the PLL. we need to adjust it so that it's stable.
935 * freq = Tpll * 192 / Tpci
936 *
937 * Todo. For non x86 should probably check the dword is
938 * set to 0xABCDExxx indicating the BIOS saved f_CNT
939 */
940 pci_read_config_word(dev, 0x78, &freq);
941 freq &= 0x1FF;
942
943 /*
944 * The 372N uses different PCI clock information and has
945 * some other complications
946 * On PCI33 timing we must clock switch
947 * On PCI66 timing we must NOT use the PCI clock
948 *
949 * Currently we always set up the PLL for the 372N
950 */
951
952 pci_set_drvdata(dev, NULL);
953
954 if(is_372n)
955 {
956 printk(KERN_INFO "hpt: HPT372N detected, using 372N timing.\n");
957 if(freq < 0x55)
958 pll = F_LOW_PCI_33;
959 else if(freq < 0x70)
960 pll = F_LOW_PCI_40;
961 else if(freq < 0x7F)
962 pll = F_LOW_PCI_50;
963 else
964 pll = F_LOW_PCI_66;
965
966 printk(KERN_INFO "FREQ: %d PLL: %d\n", freq, pll);
967
968 /* We always use the pll not the PCI clock on 372N */
969 }
970 else
971 {
972 if(freq < 0x9C)
973 pll = F_LOW_PCI_33;
974 else if(freq < 0xb0)
975 pll = F_LOW_PCI_40;
976 else if(freq <0xc8)
977 pll = F_LOW_PCI_50;
978 else
979 pll = F_LOW_PCI_66;
980
981 if (pll == F_LOW_PCI_33) {
982 if (hpt_minimum_revision(dev,8))
983 pci_set_drvdata(dev, (void *) thirty_three_base_hpt374);
984 else if (hpt_minimum_revision(dev,5))
985 pci_set_drvdata(dev, (void *) thirty_three_base_hpt372);
986 else if (hpt_minimum_revision(dev,4))
987 pci_set_drvdata(dev, (void *) thirty_three_base_hpt370a);
988 else
989 pci_set_drvdata(dev, (void *) thirty_three_base_hpt370);
990 printk("HPT37X: using 33MHz PCI clock\n");
991 } else if (pll == F_LOW_PCI_40) {
992 /* Unsupported */
993 } else if (pll == F_LOW_PCI_50) {
994 if (hpt_minimum_revision(dev,8))
995 pci_set_drvdata(dev, NULL);
996 else if (hpt_minimum_revision(dev,5))
997 pci_set_drvdata(dev, (void *) fifty_base_hpt372);
998 else if (hpt_minimum_revision(dev,4))
999 pci_set_drvdata(dev, (void *) fifty_base_hpt370a);
1000 else
1001 pci_set_drvdata(dev, (void *) fifty_base_hpt370a);
1002 printk("HPT37X: using 50MHz PCI clock\n");
1003 } else {
1004 if (hpt_minimum_revision(dev,8))
1005 {
1006 printk(KERN_ERR "HPT37x: 66MHz timings are not supported.\n");
1007 }
1008 else if (hpt_minimum_revision(dev,5))
1009 pci_set_drvdata(dev, (void *) sixty_six_base_hpt372);
1010 else if (hpt_minimum_revision(dev,4))
1011 pci_set_drvdata(dev, (void *) sixty_six_base_hpt370a);
1012 else
1013 pci_set_drvdata(dev, (void *) sixty_six_base_hpt370);
1014 printk("HPT37X: using 66MHz PCI clock\n");
1015 }
1016 }
1017
1018 /*
1019 * only try the pll if we don't have a table for the clock
1020 * speed that we're running at. NOTE: the internal PLL will
1021 * result in slow reads when using a 33MHz PCI clock. we also
1022 * don't like to use the PLL because it will cause glitches
1023 * on PRST/SRST when the HPT state engine gets reset.
1024 *
1025 * ToDo: Use 66MHz PLL when ATA133 devices are present on a
1026 * 372 device so we can get ATA133 support
1027 */
1028 if (pci_get_drvdata(dev))
1029 goto init_hpt37X_done;
1030
1031 if (hpt_minimum_revision(dev,8))
1032 {
1033 printk(KERN_ERR "HPT374: Only 33MHz PCI timings are supported.\n");
1034 return -EOPNOTSUPP;
1035 }
1036 /*
1037 * adjust PLL based upon PCI clock, enable it, and wait for
1038 * stabilization.
1039 */
1040 adjust = 0;
1041 freq = (pll < F_LOW_PCI_50) ? 2 : 4;
1042 while (adjust++ < 6) {
1043 pci_write_config_dword(dev, 0x5c, (freq + pll) << 16 |
1044 pll | 0x100);
1045
1046 /* wait for clock stabilization */
1047 for (i = 0; i < 0x50000; i++) {
1048 pci_read_config_byte(dev, 0x5b, ®5bh);
1049 if (reg5bh & 0x80) {
1050 /* spin looking for the clock to destabilize */
1051 for (i = 0; i < 0x1000; ++i) {
1052 pci_read_config_byte(dev, 0x5b,
1053 ®5bh);
1054 if ((reg5bh & 0x80) == 0)
1055 goto pll_recal;
1056 }
1057 pci_read_config_dword(dev, 0x5c, &pll);
1058 pci_write_config_dword(dev, 0x5c,
1059 pll & ~0x100);
1060 pci_write_config_byte(dev, 0x5b, 0x21);
1061 if (hpt_minimum_revision(dev,5))
1062 pci_set_drvdata(dev, (void *) fifty_base_hpt372);
1063 else if (hpt_minimum_revision(dev,4))
1064 pci_set_drvdata(dev, (void *) fifty_base_hpt370a);
1065 else
1066 pci_set_drvdata(dev, (void *) fifty_base_hpt370a);
1067 printk("HPT37X: using 50MHz internal PLL\n");
1068 goto init_hpt37X_done;
1069 }
1070 }
1071 pll_recal:
1072 if (adjust & 1)
1073 pll -= (adjust >> 1);
1074 else
1075 pll += (adjust >> 1);
1076 }
1077
1078 init_hpt37X_done:
1079 /* reset state engine */
1080 pci_write_config_byte(dev, 0x50, 0x37);
1081 pci_write_config_byte(dev, 0x54, 0x37);
1082 udelay(100);
1083 return 0;
1084 }
1085
init_hpt366(struct pci_dev * dev)1086 static int __init init_hpt366 (struct pci_dev *dev)
1087 {
1088 u32 reg1 = 0;
1089 u8 drive_fast = 0;
1090
1091 /*
1092 * Disable the "fast interrupt" prediction.
1093 */
1094 pci_read_config_byte(dev, 0x51, &drive_fast);
1095 if (drive_fast & 0x80)
1096 pci_write_config_byte(dev, 0x51, drive_fast & ~0x80);
1097 pci_read_config_dword(dev, 0x40, ®1);
1098
1099 /* detect bus speed by looking at control reg timing: */
1100 switch((reg1 >> 8) & 7) {
1101 case 5:
1102 pci_set_drvdata(dev, (void *) forty_base_hpt366);
1103 break;
1104 case 9:
1105 pci_set_drvdata(dev, (void *) twenty_five_base_hpt366);
1106 break;
1107 case 7:
1108 default:
1109 pci_set_drvdata(dev, (void *) thirty_three_base_hpt366);
1110 break;
1111 }
1112
1113 if (!pci_get_drvdata(dev))
1114 {
1115 printk(KERN_ERR "hpt366: unknown bus timing.\n");
1116 pci_set_drvdata(dev, NULL);
1117 }
1118 return 0;
1119 }
1120
init_chipset_hpt366(struct pci_dev * dev,const char * name)1121 static unsigned int __init init_chipset_hpt366 (struct pci_dev *dev, const char *name)
1122 {
1123 int ret = 0;
1124 u8 test = 0;
1125
1126 if (dev->resource[PCI_ROM_RESOURCE].start)
1127 pci_write_config_byte(dev, PCI_ROM_ADDRESS,
1128 dev->resource[PCI_ROM_RESOURCE].start | PCI_ROM_ADDRESS_ENABLE);
1129
1130 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &test);
1131 if (test != (L1_CACHE_BYTES / 4))
1132 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE,
1133 (L1_CACHE_BYTES / 4));
1134
1135 pci_read_config_byte(dev, PCI_LATENCY_TIMER, &test);
1136 if (test != 0x78)
1137 pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x78);
1138
1139 pci_read_config_byte(dev, PCI_MIN_GNT, &test);
1140 if (test != 0x08)
1141 pci_write_config_byte(dev, PCI_MIN_GNT, 0x08);
1142
1143 pci_read_config_byte(dev, PCI_MAX_LAT, &test);
1144 if (test != 0x08)
1145 pci_write_config_byte(dev, PCI_MAX_LAT, 0x08);
1146
1147 if (hpt_minimum_revision(dev, 3)) {
1148 ret = init_hpt37x(dev);
1149 } else {
1150 ret =init_hpt366(dev);
1151 }
1152 if (ret)
1153 return ret;
1154
1155 #if defined(DISPLAY_HPT366_TIMINGS) && defined(CONFIG_PROC_FS)
1156 hpt_devs[n_hpt_devs++] = dev;
1157
1158 if (!hpt366_proc) {
1159 hpt366_proc = 1;
1160 ide_pci_register_host_proc(&hpt366_procs[0]);
1161 }
1162 #endif /* DISPLAY_HPT366_TIMINGS && CONFIG_PROC_FS */
1163
1164 return dev->irq;
1165 }
1166
init_hwif_hpt366(ide_hwif_t * hwif)1167 static void __init init_hwif_hpt366 (ide_hwif_t *hwif)
1168 {
1169 struct pci_dev *dev = hwif->pci_dev;
1170 u8 ata66 = 0, regmask = (hwif->channel) ? 0x01 : 0x02;
1171 u8 did, rid;
1172 unsigned long dmabase = hwif->dma_base;
1173 int is_372n = 0;
1174
1175 if(dmabase)
1176 {
1177 did = inb(dmabase + 0x22);
1178 rid = inb(dmabase + 0x28);
1179
1180 if((did == 4 && rid == 6) || (did == 5 && rid > 1))
1181 is_372n = 1;
1182 }
1183
1184 if(is_372n)
1185 printk(KERN_ERR "HPT372N support is EXPERIMENTAL ONLY.\n");
1186
1187 hwif->tuneproc = &hpt3xx_tune_drive;
1188 hwif->speedproc = &hpt3xx_tune_chipset;
1189 hwif->quirkproc = &hpt3xx_quirkproc;
1190 hwif->intrproc = &hpt3xx_intrproc;
1191 hwif->maskproc = &hpt3xx_maskproc;
1192
1193 if(is_372n)
1194 hwif->rw_disk = &hpt372n_rw_disk;
1195
1196 /*
1197 * The HPT37x uses the CBLID pins as outputs for MA15/MA16
1198 * address lines to access an external eeprom. To read valid
1199 * cable detect state the pins must be enabled as inputs.
1200 */
1201 if (hpt_minimum_revision(dev, 8) && PCI_FUNC(dev->devfn) & 1) {
1202 /*
1203 * HPT374 PCI function 1
1204 * - set bit 15 of reg 0x52 to enable TCBLID as input
1205 * - set bit 15 of reg 0x56 to enable FCBLID as input
1206 */
1207 u16 mcr3, mcr6;
1208 pci_read_config_word(dev, 0x52, &mcr3);
1209 pci_read_config_word(dev, 0x56, &mcr6);
1210 pci_write_config_word(dev, 0x52, mcr3 | 0x8000);
1211 pci_write_config_word(dev, 0x56, mcr6 | 0x8000);
1212 /* now read cable id register */
1213 pci_read_config_byte(dev, 0x5a, &ata66);
1214 pci_write_config_word(dev, 0x52, mcr3);
1215 pci_write_config_word(dev, 0x56, mcr6);
1216 } else if (hpt_minimum_revision(dev, 3)) {
1217 /*
1218 * HPT370/372 and 374 pcifn 0
1219 * - clear bit 0 of 0x5b to enable P/SCBLID as inputs
1220 */
1221 u8 scr2;
1222 pci_read_config_byte(dev, 0x5b, &scr2);
1223 pci_write_config_byte(dev, 0x5b, scr2 & ~1);
1224 /* now read cable id register */
1225 pci_read_config_byte(dev, 0x5a, &ata66);
1226 pci_write_config_byte(dev, 0x5b, scr2);
1227 } else {
1228 pci_read_config_byte(dev, 0x5a, &ata66);
1229 }
1230
1231 #ifdef DEBUG
1232 printk("HPT366: reg5ah=0x%02x ATA-%s Cable Port%d\n",
1233 ata66, (ata66 & regmask) ? "33" : "66",
1234 PCI_FUNC(hwif->pci_dev->devfn));
1235 #endif /* DEBUG */
1236
1237 #ifdef HPT_SERIALIZE_IO
1238 /* serialize access to this device */
1239 if (hwif->mate)
1240 hwif->serialized = hwif->mate->serialized = 1;
1241 #endif
1242
1243 if (hpt_minimum_revision(dev,3)) {
1244 u8 reg5ah = 0;
1245 pci_write_config_byte(dev, 0x5a, reg5ah & ~0x10);
1246 /*
1247 * set up ioctl for power status.
1248 * note: power affects both
1249 * drives on each channel
1250 */
1251 hwif->resetproc = &hpt3xx_reset;
1252 hwif->busproc = &hpt370_busproc;
1253 // hwif->drives[0].autotune = hwif->drives[1].autotune = 1;
1254 } else if (hpt_minimum_revision(dev,2)) {
1255 hwif->resetproc = &hpt3xx_reset;
1256 hwif->busproc = &hpt3xx_tristate;
1257 } else {
1258 hwif->resetproc = &hpt3xx_reset;
1259 hwif->busproc = &hpt3xx_tristate;
1260 }
1261
1262 if (!hwif->dma_base) {
1263 hwif->drives[0].autotune = 1;
1264 hwif->drives[1].autotune = 1;
1265 return;
1266 }
1267
1268 hwif->ultra_mask = 0x7f;
1269 hwif->mwdma_mask = 0x07;
1270
1271 if (!(hwif->udma_four))
1272 hwif->udma_four = ((ata66 & regmask) ? 0 : 1);
1273 hwif->ide_dma_check = &hpt366_config_drive_xfer_rate;
1274
1275 if (hpt_minimum_revision(dev,8)) {
1276 hwif->ide_dma_test_irq = &hpt374_ide_dma_test_irq;
1277 hwif->ide_dma_end = &hpt374_ide_dma_end;
1278 } else if (hpt_minimum_revision(dev,5)) {
1279 hwif->ide_dma_test_irq = &hpt374_ide_dma_test_irq;
1280 hwif->ide_dma_end = &hpt374_ide_dma_end;
1281 } else if (hpt_minimum_revision(dev,3)) {
1282 hwif->ide_dma_begin = &hpt370_ide_dma_begin;
1283 hwif->ide_dma_end = &hpt370_ide_dma_end;
1284 hwif->ide_dma_timeout = &hpt370_ide_dma_timeout;
1285 hwif->ide_dma_lostirq = &hpt370_ide_dma_lostirq;
1286 } else if (hpt_minimum_revision(dev,2))
1287 hwif->ide_dma_lostirq = &hpt366_ide_dma_lostirq;
1288 else
1289 hwif->ide_dma_lostirq = &hpt366_ide_dma_lostirq;
1290
1291 if (!noautodma)
1292 hwif->autodma = 1;
1293 hwif->drives[0].autodma = hwif->autodma;
1294 hwif->drives[1].autodma = hwif->autodma;
1295 }
1296
init_dma_hpt366(ide_hwif_t * hwif,unsigned long dmabase)1297 static void __init init_dma_hpt366 (ide_hwif_t *hwif, unsigned long dmabase)
1298 {
1299 u8 masterdma = 0, slavedma = 0;
1300 u8 dma_new = 0, dma_old = 0;
1301 u8 primary = hwif->channel ? 0x4b : 0x43;
1302 u8 secondary = hwif->channel ? 0x4f : 0x47;
1303 unsigned long flags;
1304
1305 if (!dmabase)
1306 return;
1307
1308 if(pci_get_drvdata(hwif->pci_dev) == NULL)
1309 {
1310 printk(KERN_WARNING "hpt: no known IDE timings, disabling DMA.\n");
1311 return;
1312 }
1313
1314 dma_old = hwif->INB(dmabase+2);
1315
1316 local_irq_save(flags);
1317
1318 dma_new = dma_old;
1319 pci_read_config_byte(hwif->pci_dev, primary, &masterdma);
1320 pci_read_config_byte(hwif->pci_dev, secondary, &slavedma);
1321
1322 if (masterdma & 0x30) dma_new |= 0x20;
1323 if (slavedma & 0x30) dma_new |= 0x40;
1324 if (dma_new != dma_old)
1325 hwif->OUTB(dma_new, dmabase+2);
1326
1327 local_irq_restore(flags);
1328
1329 ide_setup_dma(hwif, dmabase, 8);
1330 }
1331
1332 extern void ide_setup_pci_device(struct pci_dev *, ide_pci_device_t *);
1333 extern void ide_setup_pci_devices(struct pci_dev *, struct pci_dev *, ide_pci_device_t *);
1334
init_setup_hpt374(struct pci_dev * dev,ide_pci_device_t * d)1335 static void __init init_setup_hpt374 (struct pci_dev *dev, ide_pci_device_t *d)
1336 {
1337 struct pci_dev *findev = NULL;
1338
1339 if (PCI_FUNC(dev->devfn) & 1)
1340 return;
1341
1342 pci_for_each_dev(findev) {
1343 if ((findev->vendor == dev->vendor) &&
1344 (findev->device == dev->device) &&
1345 ((findev->devfn - dev->devfn) == 1) &&
1346 (PCI_FUNC(findev->devfn) & 1)) {
1347 u8 irq = 0, irq2 = 0;
1348 pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
1349 pci_read_config_byte(findev, PCI_INTERRUPT_LINE, &irq2);
1350 if (irq != irq2) {
1351 pci_write_config_byte(findev,
1352 PCI_INTERRUPT_LINE, irq);
1353 findev->irq = dev->irq;
1354 printk("%s: pci-config space interrupt "
1355 "fixed.\n", d->name);
1356 }
1357 ide_setup_pci_devices(dev, findev, d);
1358 return;
1359 }
1360 }
1361 ide_setup_pci_device(dev, d);
1362 }
1363
init_setup_hpt37x(struct pci_dev * dev,ide_pci_device_t * d)1364 static void __init init_setup_hpt37x (struct pci_dev *dev, ide_pci_device_t *d)
1365 {
1366 ide_setup_pci_device(dev, d);
1367 }
1368
init_setup_hpt366(struct pci_dev * dev,ide_pci_device_t * d)1369 static void __init init_setup_hpt366 (struct pci_dev *dev, ide_pci_device_t *d)
1370 {
1371 struct pci_dev *findev = NULL;
1372 u8 pin1 = 0, pin2 = 0;
1373 unsigned int class_rev;
1374 static char *chipset_names[] = {"HPT366", "HPT366", "HPT368",
1375 "HPT370", "HPT370A", "HPT372"};
1376
1377 if (PCI_FUNC(dev->devfn) & 1)
1378 return;
1379
1380 pci_read_config_dword(dev, PCI_CLASS_REVISION, &class_rev);
1381 class_rev &= 0xff;
1382
1383 /* New ident 372N reports revision 1. We could do the
1384 io port based type identification instead perhaps (DID, RID) */
1385
1386 if(d->device == PCI_DEVICE_ID_TTI_HPT372N)
1387 class_rev = 5;
1388
1389 if(class_rev < 6)
1390 d->name = chipset_names[class_rev];
1391
1392 switch(class_rev) {
1393 case 5:
1394 case 4:
1395 case 3: ide_setup_pci_device(dev, d);
1396 return;
1397 default: break;
1398 }
1399
1400 d->channels = 1;
1401
1402 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin1);
1403 pci_for_each_dev(findev) {
1404 if ((findev->vendor == dev->vendor) &&
1405 (findev->device == dev->device) &&
1406 ((findev->devfn - dev->devfn) == 1) &&
1407 (PCI_FUNC(findev->devfn) & 1)) {
1408 pci_read_config_byte(findev, PCI_INTERRUPT_PIN, &pin2);
1409 if ((pin1 != pin2) && (dev->irq == findev->irq)) {
1410 d->bootable = ON_BOARD;
1411 printk("%s: onboard version of chipset, "
1412 "pin1=%d pin2=%d\n", d->name,
1413 pin1, pin2);
1414 }
1415 ide_setup_pci_devices(dev, findev, d);
1416 return;
1417 }
1418 }
1419 ide_setup_pci_device(dev, d);
1420 }
1421
1422
1423 /**
1424 * hpt366_init_one - called when an HPT366 is found
1425 * @dev: the hpt366 device
1426 * @id: the matching pci id
1427 *
1428 * Called when the PCI registration layer (or the IDE initialization)
1429 * finds a device matching our IDE device tables.
1430 */
1431
hpt366_init_one(struct pci_dev * dev,const struct pci_device_id * id)1432 static int __devinit hpt366_init_one(struct pci_dev *dev, const struct pci_device_id *id)
1433 {
1434 ide_pci_device_t *d = &hpt366_chipsets[id->driver_data];
1435
1436 if (dev->device != d->device)
1437 BUG();
1438 d->init_setup(dev, d);
1439 MOD_INC_USE_COUNT;
1440 return 0;
1441 }
1442
1443 static struct pci_device_id hpt366_pci_tbl[] __devinitdata = {
1444 { PCI_VENDOR_ID_TTI, PCI_DEVICE_ID_TTI_HPT366, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
1445 { PCI_VENDOR_ID_TTI, PCI_DEVICE_ID_TTI_HPT372, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1},
1446 { PCI_VENDOR_ID_TTI, PCI_DEVICE_ID_TTI_HPT302, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2},
1447 { PCI_VENDOR_ID_TTI, PCI_DEVICE_ID_TTI_HPT371, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3},
1448 { PCI_VENDOR_ID_TTI, PCI_DEVICE_ID_TTI_HPT374, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4},
1449 { PCI_VENDOR_ID_TTI, PCI_DEVICE_ID_TTI_HPT372N, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5},
1450 { 0, },
1451 };
1452
1453 static struct pci_driver driver = {
1454 .name = "HPT366 IDE",
1455 .id_table = hpt366_pci_tbl,
1456 .probe = hpt366_init_one,
1457 };
1458
hpt366_ide_init(void)1459 static int hpt366_ide_init(void)
1460 {
1461 return ide_pci_register_driver(&driver);
1462 }
1463
hpt366_ide_exit(void)1464 static void hpt366_ide_exit(void)
1465 {
1466 ide_pci_unregister_driver(&driver);
1467 }
1468
1469 module_init(hpt366_ide_init);
1470 module_exit(hpt366_ide_exit);
1471
1472 MODULE_AUTHOR("Andre Hedrick");
1473 MODULE_DESCRIPTION("PCI driver module for Highpoint HPT366 IDE");
1474 MODULE_LICENSE("GPL");
1475
1476 EXPORT_NO_SYMBOLS;
1477