1 /*
2 * libata-sff.c - helper library for PCI IDE BMDMA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2006 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2006 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
33 */
34
35 #include <linux/kernel.h>
36 #include <linux/gfp.h>
37 #include <linux/pci.h>
38 #include <linux/module.h>
39 #include <linux/libata.h>
40 #include <linux/highmem.h>
41
42 #include "libata.h"
43
44 static struct workqueue_struct *ata_sff_wq;
45
46 const struct ata_port_operations ata_sff_port_ops = {
47 .inherits = &ata_base_port_ops,
48
49 .qc_prep = ata_noop_qc_prep,
50 .qc_issue = ata_sff_qc_issue,
51 .qc_fill_rtf = ata_sff_qc_fill_rtf,
52
53 .freeze = ata_sff_freeze,
54 .thaw = ata_sff_thaw,
55 .prereset = ata_sff_prereset,
56 .softreset = ata_sff_softreset,
57 .hardreset = sata_sff_hardreset,
58 .postreset = ata_sff_postreset,
59 .error_handler = ata_sff_error_handler,
60
61 .sff_dev_select = ata_sff_dev_select,
62 .sff_check_status = ata_sff_check_status,
63 .sff_tf_load = ata_sff_tf_load,
64 .sff_tf_read = ata_sff_tf_read,
65 .sff_exec_command = ata_sff_exec_command,
66 .sff_data_xfer = ata_sff_data_xfer,
67 .sff_drain_fifo = ata_sff_drain_fifo,
68
69 .lost_interrupt = ata_sff_lost_interrupt,
70 };
71 EXPORT_SYMBOL_GPL(ata_sff_port_ops);
72
73 /**
74 * ata_sff_check_status - Read device status reg & clear interrupt
75 * @ap: port where the device is
76 *
77 * Reads ATA taskfile status register for currently-selected device
78 * and return its value. This also clears pending interrupts
79 * from this device
80 *
81 * LOCKING:
82 * Inherited from caller.
83 */
ata_sff_check_status(struct ata_port * ap)84 u8 ata_sff_check_status(struct ata_port *ap)
85 {
86 return ioread8(ap->ioaddr.status_addr);
87 }
88 EXPORT_SYMBOL_GPL(ata_sff_check_status);
89
90 /**
91 * ata_sff_altstatus - Read device alternate status reg
92 * @ap: port where the device is
93 *
94 * Reads ATA taskfile alternate status register for
95 * currently-selected device and return its value.
96 *
97 * Note: may NOT be used as the check_altstatus() entry in
98 * ata_port_operations.
99 *
100 * LOCKING:
101 * Inherited from caller.
102 */
ata_sff_altstatus(struct ata_port * ap)103 static u8 ata_sff_altstatus(struct ata_port *ap)
104 {
105 if (ap->ops->sff_check_altstatus)
106 return ap->ops->sff_check_altstatus(ap);
107
108 return ioread8(ap->ioaddr.altstatus_addr);
109 }
110
111 /**
112 * ata_sff_irq_status - Check if the device is busy
113 * @ap: port where the device is
114 *
115 * Determine if the port is currently busy. Uses altstatus
116 * if available in order to avoid clearing shared IRQ status
117 * when finding an IRQ source. Non ctl capable devices don't
118 * share interrupt lines fortunately for us.
119 *
120 * LOCKING:
121 * Inherited from caller.
122 */
ata_sff_irq_status(struct ata_port * ap)123 static u8 ata_sff_irq_status(struct ata_port *ap)
124 {
125 u8 status;
126
127 if (ap->ops->sff_check_altstatus || ap->ioaddr.altstatus_addr) {
128 status = ata_sff_altstatus(ap);
129 /* Not us: We are busy */
130 if (status & ATA_BUSY)
131 return status;
132 }
133 /* Clear INTRQ latch */
134 status = ap->ops->sff_check_status(ap);
135 return status;
136 }
137
138 /**
139 * ata_sff_sync - Flush writes
140 * @ap: Port to wait for.
141 *
142 * CAUTION:
143 * If we have an mmio device with no ctl and no altstatus
144 * method this will fail. No such devices are known to exist.
145 *
146 * LOCKING:
147 * Inherited from caller.
148 */
149
ata_sff_sync(struct ata_port * ap)150 static void ata_sff_sync(struct ata_port *ap)
151 {
152 if (ap->ops->sff_check_altstatus)
153 ap->ops->sff_check_altstatus(ap);
154 else if (ap->ioaddr.altstatus_addr)
155 ioread8(ap->ioaddr.altstatus_addr);
156 }
157
158 /**
159 * ata_sff_pause - Flush writes and wait 400nS
160 * @ap: Port to pause for.
161 *
162 * CAUTION:
163 * If we have an mmio device with no ctl and no altstatus
164 * method this will fail. No such devices are known to exist.
165 *
166 * LOCKING:
167 * Inherited from caller.
168 */
169
ata_sff_pause(struct ata_port * ap)170 void ata_sff_pause(struct ata_port *ap)
171 {
172 ata_sff_sync(ap);
173 ndelay(400);
174 }
175 EXPORT_SYMBOL_GPL(ata_sff_pause);
176
177 /**
178 * ata_sff_dma_pause - Pause before commencing DMA
179 * @ap: Port to pause for.
180 *
181 * Perform I/O fencing and ensure sufficient cycle delays occur
182 * for the HDMA1:0 transition
183 */
184
ata_sff_dma_pause(struct ata_port * ap)185 void ata_sff_dma_pause(struct ata_port *ap)
186 {
187 if (ap->ops->sff_check_altstatus || ap->ioaddr.altstatus_addr) {
188 /* An altstatus read will cause the needed delay without
189 messing up the IRQ status */
190 ata_sff_altstatus(ap);
191 return;
192 }
193 /* There are no DMA controllers without ctl. BUG here to ensure
194 we never violate the HDMA1:0 transition timing and risk
195 corruption. */
196 BUG();
197 }
198 EXPORT_SYMBOL_GPL(ata_sff_dma_pause);
199
200 /**
201 * ata_sff_busy_sleep - sleep until BSY clears, or timeout
202 * @ap: port containing status register to be polled
203 * @tmout_pat: impatience timeout in msecs
204 * @tmout: overall timeout in msecs
205 *
206 * Sleep until ATA Status register bit BSY clears,
207 * or a timeout occurs.
208 *
209 * LOCKING:
210 * Kernel thread context (may sleep).
211 *
212 * RETURNS:
213 * 0 on success, -errno otherwise.
214 */
ata_sff_busy_sleep(struct ata_port * ap,unsigned long tmout_pat,unsigned long tmout)215 int ata_sff_busy_sleep(struct ata_port *ap,
216 unsigned long tmout_pat, unsigned long tmout)
217 {
218 unsigned long timer_start, timeout;
219 u8 status;
220
221 status = ata_sff_busy_wait(ap, ATA_BUSY, 300);
222 timer_start = jiffies;
223 timeout = ata_deadline(timer_start, tmout_pat);
224 while (status != 0xff && (status & ATA_BUSY) &&
225 time_before(jiffies, timeout)) {
226 ata_msleep(ap, 50);
227 status = ata_sff_busy_wait(ap, ATA_BUSY, 3);
228 }
229
230 if (status != 0xff && (status & ATA_BUSY))
231 ata_port_warn(ap,
232 "port is slow to respond, please be patient (Status 0x%x)\n",
233 status);
234
235 timeout = ata_deadline(timer_start, tmout);
236 while (status != 0xff && (status & ATA_BUSY) &&
237 time_before(jiffies, timeout)) {
238 ata_msleep(ap, 50);
239 status = ap->ops->sff_check_status(ap);
240 }
241
242 if (status == 0xff)
243 return -ENODEV;
244
245 if (status & ATA_BUSY) {
246 ata_port_err(ap,
247 "port failed to respond (%lu secs, Status 0x%x)\n",
248 DIV_ROUND_UP(tmout, 1000), status);
249 return -EBUSY;
250 }
251
252 return 0;
253 }
254 EXPORT_SYMBOL_GPL(ata_sff_busy_sleep);
255
ata_sff_check_ready(struct ata_link * link)256 static int ata_sff_check_ready(struct ata_link *link)
257 {
258 u8 status = link->ap->ops->sff_check_status(link->ap);
259
260 return ata_check_ready(status);
261 }
262
263 /**
264 * ata_sff_wait_ready - sleep until BSY clears, or timeout
265 * @link: SFF link to wait ready status for
266 * @deadline: deadline jiffies for the operation
267 *
268 * Sleep until ATA Status register bit BSY clears, or timeout
269 * occurs.
270 *
271 * LOCKING:
272 * Kernel thread context (may sleep).
273 *
274 * RETURNS:
275 * 0 on success, -errno otherwise.
276 */
ata_sff_wait_ready(struct ata_link * link,unsigned long deadline)277 int ata_sff_wait_ready(struct ata_link *link, unsigned long deadline)
278 {
279 return ata_wait_ready(link, deadline, ata_sff_check_ready);
280 }
281 EXPORT_SYMBOL_GPL(ata_sff_wait_ready);
282
283 /**
284 * ata_sff_set_devctl - Write device control reg
285 * @ap: port where the device is
286 * @ctl: value to write
287 *
288 * Writes ATA taskfile device control register.
289 *
290 * Note: may NOT be used as the sff_set_devctl() entry in
291 * ata_port_operations.
292 *
293 * LOCKING:
294 * Inherited from caller.
295 */
ata_sff_set_devctl(struct ata_port * ap,u8 ctl)296 static void ata_sff_set_devctl(struct ata_port *ap, u8 ctl)
297 {
298 if (ap->ops->sff_set_devctl)
299 ap->ops->sff_set_devctl(ap, ctl);
300 else
301 iowrite8(ctl, ap->ioaddr.ctl_addr);
302 }
303
304 /**
305 * ata_sff_dev_select - Select device 0/1 on ATA bus
306 * @ap: ATA channel to manipulate
307 * @device: ATA device (numbered from zero) to select
308 *
309 * Use the method defined in the ATA specification to
310 * make either device 0, or device 1, active on the
311 * ATA channel. Works with both PIO and MMIO.
312 *
313 * May be used as the dev_select() entry in ata_port_operations.
314 *
315 * LOCKING:
316 * caller.
317 */
ata_sff_dev_select(struct ata_port * ap,unsigned int device)318 void ata_sff_dev_select(struct ata_port *ap, unsigned int device)
319 {
320 u8 tmp;
321
322 if (device == 0)
323 tmp = ATA_DEVICE_OBS;
324 else
325 tmp = ATA_DEVICE_OBS | ATA_DEV1;
326
327 iowrite8(tmp, ap->ioaddr.device_addr);
328 ata_sff_pause(ap); /* needed; also flushes, for mmio */
329 }
330 EXPORT_SYMBOL_GPL(ata_sff_dev_select);
331
332 /**
333 * ata_dev_select - Select device 0/1 on ATA bus
334 * @ap: ATA channel to manipulate
335 * @device: ATA device (numbered from zero) to select
336 * @wait: non-zero to wait for Status register BSY bit to clear
337 * @can_sleep: non-zero if context allows sleeping
338 *
339 * Use the method defined in the ATA specification to
340 * make either device 0, or device 1, active on the
341 * ATA channel.
342 *
343 * This is a high-level version of ata_sff_dev_select(), which
344 * additionally provides the services of inserting the proper
345 * pauses and status polling, where needed.
346 *
347 * LOCKING:
348 * caller.
349 */
ata_dev_select(struct ata_port * ap,unsigned int device,unsigned int wait,unsigned int can_sleep)350 static void ata_dev_select(struct ata_port *ap, unsigned int device,
351 unsigned int wait, unsigned int can_sleep)
352 {
353 if (ata_msg_probe(ap))
354 ata_port_info(ap, "ata_dev_select: ENTER, device %u, wait %u\n",
355 device, wait);
356
357 if (wait)
358 ata_wait_idle(ap);
359
360 ap->ops->sff_dev_select(ap, device);
361
362 if (wait) {
363 if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI)
364 ata_msleep(ap, 150);
365 ata_wait_idle(ap);
366 }
367 }
368
369 /**
370 * ata_sff_irq_on - Enable interrupts on a port.
371 * @ap: Port on which interrupts are enabled.
372 *
373 * Enable interrupts on a legacy IDE device using MMIO or PIO,
374 * wait for idle, clear any pending interrupts.
375 *
376 * Note: may NOT be used as the sff_irq_on() entry in
377 * ata_port_operations.
378 *
379 * LOCKING:
380 * Inherited from caller.
381 */
ata_sff_irq_on(struct ata_port * ap)382 void ata_sff_irq_on(struct ata_port *ap)
383 {
384 struct ata_ioports *ioaddr = &ap->ioaddr;
385
386 if (ap->ops->sff_irq_on) {
387 ap->ops->sff_irq_on(ap);
388 return;
389 }
390
391 ap->ctl &= ~ATA_NIEN;
392 ap->last_ctl = ap->ctl;
393
394 if (ap->ops->sff_set_devctl || ioaddr->ctl_addr)
395 ata_sff_set_devctl(ap, ap->ctl);
396 ata_wait_idle(ap);
397
398 if (ap->ops->sff_irq_clear)
399 ap->ops->sff_irq_clear(ap);
400 }
401 EXPORT_SYMBOL_GPL(ata_sff_irq_on);
402
403 /**
404 * ata_sff_tf_load - send taskfile registers to host controller
405 * @ap: Port to which output is sent
406 * @tf: ATA taskfile register set
407 *
408 * Outputs ATA taskfile to standard ATA host controller.
409 *
410 * LOCKING:
411 * Inherited from caller.
412 */
ata_sff_tf_load(struct ata_port * ap,const struct ata_taskfile * tf)413 void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
414 {
415 struct ata_ioports *ioaddr = &ap->ioaddr;
416 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
417
418 if (tf->ctl != ap->last_ctl) {
419 if (ioaddr->ctl_addr)
420 iowrite8(tf->ctl, ioaddr->ctl_addr);
421 ap->last_ctl = tf->ctl;
422 ata_wait_idle(ap);
423 }
424
425 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
426 WARN_ON_ONCE(!ioaddr->ctl_addr);
427 iowrite8(tf->hob_feature, ioaddr->feature_addr);
428 iowrite8(tf->hob_nsect, ioaddr->nsect_addr);
429 iowrite8(tf->hob_lbal, ioaddr->lbal_addr);
430 iowrite8(tf->hob_lbam, ioaddr->lbam_addr);
431 iowrite8(tf->hob_lbah, ioaddr->lbah_addr);
432 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
433 tf->hob_feature,
434 tf->hob_nsect,
435 tf->hob_lbal,
436 tf->hob_lbam,
437 tf->hob_lbah);
438 }
439
440 if (is_addr) {
441 iowrite8(tf->feature, ioaddr->feature_addr);
442 iowrite8(tf->nsect, ioaddr->nsect_addr);
443 iowrite8(tf->lbal, ioaddr->lbal_addr);
444 iowrite8(tf->lbam, ioaddr->lbam_addr);
445 iowrite8(tf->lbah, ioaddr->lbah_addr);
446 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
447 tf->feature,
448 tf->nsect,
449 tf->lbal,
450 tf->lbam,
451 tf->lbah);
452 }
453
454 if (tf->flags & ATA_TFLAG_DEVICE) {
455 iowrite8(tf->device, ioaddr->device_addr);
456 VPRINTK("device 0x%X\n", tf->device);
457 }
458
459 ata_wait_idle(ap);
460 }
461 EXPORT_SYMBOL_GPL(ata_sff_tf_load);
462
463 /**
464 * ata_sff_tf_read - input device's ATA taskfile shadow registers
465 * @ap: Port from which input is read
466 * @tf: ATA taskfile register set for storing input
467 *
468 * Reads ATA taskfile registers for currently-selected device
469 * into @tf. Assumes the device has a fully SFF compliant task file
470 * layout and behaviour. If you device does not (eg has a different
471 * status method) then you will need to provide a replacement tf_read
472 *
473 * LOCKING:
474 * Inherited from caller.
475 */
ata_sff_tf_read(struct ata_port * ap,struct ata_taskfile * tf)476 void ata_sff_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
477 {
478 struct ata_ioports *ioaddr = &ap->ioaddr;
479
480 tf->command = ata_sff_check_status(ap);
481 tf->feature = ioread8(ioaddr->error_addr);
482 tf->nsect = ioread8(ioaddr->nsect_addr);
483 tf->lbal = ioread8(ioaddr->lbal_addr);
484 tf->lbam = ioread8(ioaddr->lbam_addr);
485 tf->lbah = ioread8(ioaddr->lbah_addr);
486 tf->device = ioread8(ioaddr->device_addr);
487
488 if (tf->flags & ATA_TFLAG_LBA48) {
489 if (likely(ioaddr->ctl_addr)) {
490 iowrite8(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
491 tf->hob_feature = ioread8(ioaddr->error_addr);
492 tf->hob_nsect = ioread8(ioaddr->nsect_addr);
493 tf->hob_lbal = ioread8(ioaddr->lbal_addr);
494 tf->hob_lbam = ioread8(ioaddr->lbam_addr);
495 tf->hob_lbah = ioread8(ioaddr->lbah_addr);
496 iowrite8(tf->ctl, ioaddr->ctl_addr);
497 ap->last_ctl = tf->ctl;
498 } else
499 WARN_ON_ONCE(1);
500 }
501 }
502 EXPORT_SYMBOL_GPL(ata_sff_tf_read);
503
504 /**
505 * ata_sff_exec_command - issue ATA command to host controller
506 * @ap: port to which command is being issued
507 * @tf: ATA taskfile register set
508 *
509 * Issues ATA command, with proper synchronization with interrupt
510 * handler / other threads.
511 *
512 * LOCKING:
513 * spin_lock_irqsave(host lock)
514 */
ata_sff_exec_command(struct ata_port * ap,const struct ata_taskfile * tf)515 void ata_sff_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)
516 {
517 DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command);
518
519 iowrite8(tf->command, ap->ioaddr.command_addr);
520 ata_sff_pause(ap);
521 }
522 EXPORT_SYMBOL_GPL(ata_sff_exec_command);
523
524 /**
525 * ata_tf_to_host - issue ATA taskfile to host controller
526 * @ap: port to which command is being issued
527 * @tf: ATA taskfile register set
528 *
529 * Issues ATA taskfile register set to ATA host controller,
530 * with proper synchronization with interrupt handler and
531 * other threads.
532 *
533 * LOCKING:
534 * spin_lock_irqsave(host lock)
535 */
ata_tf_to_host(struct ata_port * ap,const struct ata_taskfile * tf)536 static inline void ata_tf_to_host(struct ata_port *ap,
537 const struct ata_taskfile *tf)
538 {
539 ap->ops->sff_tf_load(ap, tf);
540 ap->ops->sff_exec_command(ap, tf);
541 }
542
543 /**
544 * ata_sff_data_xfer - Transfer data by PIO
545 * @dev: device to target
546 * @buf: data buffer
547 * @buflen: buffer length
548 * @rw: read/write
549 *
550 * Transfer data from/to the device data register by PIO.
551 *
552 * LOCKING:
553 * Inherited from caller.
554 *
555 * RETURNS:
556 * Bytes consumed.
557 */
ata_sff_data_xfer(struct ata_device * dev,unsigned char * buf,unsigned int buflen,int rw)558 unsigned int ata_sff_data_xfer(struct ata_device *dev, unsigned char *buf,
559 unsigned int buflen, int rw)
560 {
561 struct ata_port *ap = dev->link->ap;
562 void __iomem *data_addr = ap->ioaddr.data_addr;
563 unsigned int words = buflen >> 1;
564
565 /* Transfer multiple of 2 bytes */
566 if (rw == READ)
567 ioread16_rep(data_addr, buf, words);
568 else
569 iowrite16_rep(data_addr, buf, words);
570
571 /* Transfer trailing byte, if any. */
572 if (unlikely(buflen & 0x01)) {
573 unsigned char pad[2] = { };
574
575 /* Point buf to the tail of buffer */
576 buf += buflen - 1;
577
578 /*
579 * Use io*16_rep() accessors here as well to avoid pointlessly
580 * swapping bytes to and from on the big endian machines...
581 */
582 if (rw == READ) {
583 ioread16_rep(data_addr, pad, 1);
584 *buf = pad[0];
585 } else {
586 pad[0] = *buf;
587 iowrite16_rep(data_addr, pad, 1);
588 }
589 words++;
590 }
591
592 return words << 1;
593 }
594 EXPORT_SYMBOL_GPL(ata_sff_data_xfer);
595
596 /**
597 * ata_sff_data_xfer32 - Transfer data by PIO
598 * @dev: device to target
599 * @buf: data buffer
600 * @buflen: buffer length
601 * @rw: read/write
602 *
603 * Transfer data from/to the device data register by PIO using 32bit
604 * I/O operations.
605 *
606 * LOCKING:
607 * Inherited from caller.
608 *
609 * RETURNS:
610 * Bytes consumed.
611 */
612
ata_sff_data_xfer32(struct ata_device * dev,unsigned char * buf,unsigned int buflen,int rw)613 unsigned int ata_sff_data_xfer32(struct ata_device *dev, unsigned char *buf,
614 unsigned int buflen, int rw)
615 {
616 struct ata_port *ap = dev->link->ap;
617 void __iomem *data_addr = ap->ioaddr.data_addr;
618 unsigned int words = buflen >> 2;
619 int slop = buflen & 3;
620
621 if (!(ap->pflags & ATA_PFLAG_PIO32))
622 return ata_sff_data_xfer(dev, buf, buflen, rw);
623
624 /* Transfer multiple of 4 bytes */
625 if (rw == READ)
626 ioread32_rep(data_addr, buf, words);
627 else
628 iowrite32_rep(data_addr, buf, words);
629
630 /* Transfer trailing bytes, if any */
631 if (unlikely(slop)) {
632 unsigned char pad[4] = { };
633
634 /* Point buf to the tail of buffer */
635 buf += buflen - slop;
636
637 /*
638 * Use io*_rep() accessors here as well to avoid pointlessly
639 * swapping bytes to and from on the big endian machines...
640 */
641 if (rw == READ) {
642 if (slop < 3)
643 ioread16_rep(data_addr, pad, 1);
644 else
645 ioread32_rep(data_addr, pad, 1);
646 memcpy(buf, pad, slop);
647 } else {
648 memcpy(pad, buf, slop);
649 if (slop < 3)
650 iowrite16_rep(data_addr, pad, 1);
651 else
652 iowrite32_rep(data_addr, pad, 1);
653 }
654 }
655 return (buflen + 1) & ~1;
656 }
657 EXPORT_SYMBOL_GPL(ata_sff_data_xfer32);
658
659 /**
660 * ata_sff_data_xfer_noirq - Transfer data by PIO
661 * @dev: device to target
662 * @buf: data buffer
663 * @buflen: buffer length
664 * @rw: read/write
665 *
666 * Transfer data from/to the device data register by PIO. Do the
667 * transfer with interrupts disabled.
668 *
669 * LOCKING:
670 * Inherited from caller.
671 *
672 * RETURNS:
673 * Bytes consumed.
674 */
ata_sff_data_xfer_noirq(struct ata_device * dev,unsigned char * buf,unsigned int buflen,int rw)675 unsigned int ata_sff_data_xfer_noirq(struct ata_device *dev, unsigned char *buf,
676 unsigned int buflen, int rw)
677 {
678 unsigned long flags;
679 unsigned int consumed;
680
681 local_irq_save(flags);
682 consumed = ata_sff_data_xfer32(dev, buf, buflen, rw);
683 local_irq_restore(flags);
684
685 return consumed;
686 }
687 EXPORT_SYMBOL_GPL(ata_sff_data_xfer_noirq);
688
689 /**
690 * ata_pio_sector - Transfer a sector of data.
691 * @qc: Command on going
692 *
693 * Transfer qc->sect_size bytes of data from/to the ATA device.
694 *
695 * LOCKING:
696 * Inherited from caller.
697 */
ata_pio_sector(struct ata_queued_cmd * qc)698 static void ata_pio_sector(struct ata_queued_cmd *qc)
699 {
700 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
701 struct ata_port *ap = qc->ap;
702 struct page *page;
703 unsigned int offset;
704 unsigned char *buf;
705
706 if (qc->curbytes == qc->nbytes - qc->sect_size)
707 ap->hsm_task_state = HSM_ST_LAST;
708
709 page = sg_page(qc->cursg);
710 offset = qc->cursg->offset + qc->cursg_ofs;
711
712 /* get the current page and offset */
713 page = nth_page(page, (offset >> PAGE_SHIFT));
714 offset %= PAGE_SIZE;
715
716 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
717
718 if (PageHighMem(page)) {
719 unsigned long flags;
720
721 /* FIXME: use a bounce buffer */
722 local_irq_save(flags);
723 buf = kmap_atomic(page);
724
725 /* do the actual data transfer */
726 ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size,
727 do_write);
728
729 kunmap_atomic(buf);
730 local_irq_restore(flags);
731 } else {
732 buf = page_address(page);
733 ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size,
734 do_write);
735 }
736
737 if (!do_write && !PageSlab(page))
738 flush_dcache_page(page);
739
740 qc->curbytes += qc->sect_size;
741 qc->cursg_ofs += qc->sect_size;
742
743 if (qc->cursg_ofs == qc->cursg->length) {
744 qc->cursg = sg_next(qc->cursg);
745 qc->cursg_ofs = 0;
746 }
747 }
748
749 /**
750 * ata_pio_sectors - Transfer one or many sectors.
751 * @qc: Command on going
752 *
753 * Transfer one or many sectors of data from/to the
754 * ATA device for the DRQ request.
755 *
756 * LOCKING:
757 * Inherited from caller.
758 */
ata_pio_sectors(struct ata_queued_cmd * qc)759 static void ata_pio_sectors(struct ata_queued_cmd *qc)
760 {
761 if (is_multi_taskfile(&qc->tf)) {
762 /* READ/WRITE MULTIPLE */
763 unsigned int nsect;
764
765 WARN_ON_ONCE(qc->dev->multi_count == 0);
766
767 nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
768 qc->dev->multi_count);
769 while (nsect--)
770 ata_pio_sector(qc);
771 } else
772 ata_pio_sector(qc);
773
774 ata_sff_sync(qc->ap); /* flush */
775 }
776
777 /**
778 * atapi_send_cdb - Write CDB bytes to hardware
779 * @ap: Port to which ATAPI device is attached.
780 * @qc: Taskfile currently active
781 *
782 * When device has indicated its readiness to accept
783 * a CDB, this function is called. Send the CDB.
784 *
785 * LOCKING:
786 * caller.
787 */
atapi_send_cdb(struct ata_port * ap,struct ata_queued_cmd * qc)788 static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
789 {
790 /* send SCSI cdb */
791 DPRINTK("send cdb\n");
792 WARN_ON_ONCE(qc->dev->cdb_len < 12);
793
794 ap->ops->sff_data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
795 ata_sff_sync(ap);
796 /* FIXME: If the CDB is for DMA do we need to do the transition delay
797 or is bmdma_start guaranteed to do it ? */
798 switch (qc->tf.protocol) {
799 case ATAPI_PROT_PIO:
800 ap->hsm_task_state = HSM_ST;
801 break;
802 case ATAPI_PROT_NODATA:
803 ap->hsm_task_state = HSM_ST_LAST;
804 break;
805 #ifdef CONFIG_ATA_BMDMA
806 case ATAPI_PROT_DMA:
807 ap->hsm_task_state = HSM_ST_LAST;
808 /* initiate bmdma */
809 ap->ops->bmdma_start(qc);
810 break;
811 #endif /* CONFIG_ATA_BMDMA */
812 default:
813 BUG();
814 }
815 }
816
817 /**
818 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
819 * @qc: Command on going
820 * @bytes: number of bytes
821 *
822 * Transfer Transfer data from/to the ATAPI device.
823 *
824 * LOCKING:
825 * Inherited from caller.
826 *
827 */
__atapi_pio_bytes(struct ata_queued_cmd * qc,unsigned int bytes)828 static int __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
829 {
830 int rw = (qc->tf.flags & ATA_TFLAG_WRITE) ? WRITE : READ;
831 struct ata_port *ap = qc->ap;
832 struct ata_device *dev = qc->dev;
833 struct ata_eh_info *ehi = &dev->link->eh_info;
834 struct scatterlist *sg;
835 struct page *page;
836 unsigned char *buf;
837 unsigned int offset, count, consumed;
838
839 next_sg:
840 sg = qc->cursg;
841 if (unlikely(!sg)) {
842 ata_ehi_push_desc(ehi, "unexpected or too much trailing data "
843 "buf=%u cur=%u bytes=%u",
844 qc->nbytes, qc->curbytes, bytes);
845 return -1;
846 }
847
848 page = sg_page(sg);
849 offset = sg->offset + qc->cursg_ofs;
850
851 /* get the current page and offset */
852 page = nth_page(page, (offset >> PAGE_SHIFT));
853 offset %= PAGE_SIZE;
854
855 /* don't overrun current sg */
856 count = min(sg->length - qc->cursg_ofs, bytes);
857
858 /* don't cross page boundaries */
859 count = min(count, (unsigned int)PAGE_SIZE - offset);
860
861 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
862
863 if (PageHighMem(page)) {
864 unsigned long flags;
865
866 /* FIXME: use bounce buffer */
867 local_irq_save(flags);
868 buf = kmap_atomic(page);
869
870 /* do the actual data transfer */
871 consumed = ap->ops->sff_data_xfer(dev, buf + offset,
872 count, rw);
873
874 kunmap_atomic(buf);
875 local_irq_restore(flags);
876 } else {
877 buf = page_address(page);
878 consumed = ap->ops->sff_data_xfer(dev, buf + offset,
879 count, rw);
880 }
881
882 bytes -= min(bytes, consumed);
883 qc->curbytes += count;
884 qc->cursg_ofs += count;
885
886 if (qc->cursg_ofs == sg->length) {
887 qc->cursg = sg_next(qc->cursg);
888 qc->cursg_ofs = 0;
889 }
890
891 /*
892 * There used to be a WARN_ON_ONCE(qc->cursg && count != consumed);
893 * Unfortunately __atapi_pio_bytes doesn't know enough to do the WARN
894 * check correctly as it doesn't know if it is the last request being
895 * made. Somebody should implement a proper sanity check.
896 */
897 if (bytes)
898 goto next_sg;
899 return 0;
900 }
901
902 /**
903 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
904 * @qc: Command on going
905 *
906 * Transfer Transfer data from/to the ATAPI device.
907 *
908 * LOCKING:
909 * Inherited from caller.
910 */
atapi_pio_bytes(struct ata_queued_cmd * qc)911 static void atapi_pio_bytes(struct ata_queued_cmd *qc)
912 {
913 struct ata_port *ap = qc->ap;
914 struct ata_device *dev = qc->dev;
915 struct ata_eh_info *ehi = &dev->link->eh_info;
916 unsigned int ireason, bc_lo, bc_hi, bytes;
917 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
918
919 /* Abuse qc->result_tf for temp storage of intermediate TF
920 * here to save some kernel stack usage.
921 * For normal completion, qc->result_tf is not relevant. For
922 * error, qc->result_tf is later overwritten by ata_qc_complete().
923 * So, the correctness of qc->result_tf is not affected.
924 */
925 ap->ops->sff_tf_read(ap, &qc->result_tf);
926 ireason = qc->result_tf.nsect;
927 bc_lo = qc->result_tf.lbam;
928 bc_hi = qc->result_tf.lbah;
929 bytes = (bc_hi << 8) | bc_lo;
930
931 /* shall be cleared to zero, indicating xfer of data */
932 if (unlikely(ireason & ATAPI_COD))
933 goto atapi_check;
934
935 /* make sure transfer direction matches expected */
936 i_write = ((ireason & ATAPI_IO) == 0) ? 1 : 0;
937 if (unlikely(do_write != i_write))
938 goto atapi_check;
939
940 if (unlikely(!bytes))
941 goto atapi_check;
942
943 VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
944
945 if (unlikely(__atapi_pio_bytes(qc, bytes)))
946 goto err_out;
947 ata_sff_sync(ap); /* flush */
948
949 return;
950
951 atapi_check:
952 ata_ehi_push_desc(ehi, "ATAPI check failed (ireason=0x%x bytes=%u)",
953 ireason, bytes);
954 err_out:
955 qc->err_mask |= AC_ERR_HSM;
956 ap->hsm_task_state = HSM_ST_ERR;
957 }
958
959 /**
960 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
961 * @ap: the target ata_port
962 * @qc: qc on going
963 *
964 * RETURNS:
965 * 1 if ok in workqueue, 0 otherwise.
966 */
ata_hsm_ok_in_wq(struct ata_port * ap,struct ata_queued_cmd * qc)967 static inline int ata_hsm_ok_in_wq(struct ata_port *ap,
968 struct ata_queued_cmd *qc)
969 {
970 if (qc->tf.flags & ATA_TFLAG_POLLING)
971 return 1;
972
973 if (ap->hsm_task_state == HSM_ST_FIRST) {
974 if (qc->tf.protocol == ATA_PROT_PIO &&
975 (qc->tf.flags & ATA_TFLAG_WRITE))
976 return 1;
977
978 if (ata_is_atapi(qc->tf.protocol) &&
979 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
980 return 1;
981 }
982
983 return 0;
984 }
985
986 /**
987 * ata_hsm_qc_complete - finish a qc running on standard HSM
988 * @qc: Command to complete
989 * @in_wq: 1 if called from workqueue, 0 otherwise
990 *
991 * Finish @qc which is running on standard HSM.
992 *
993 * LOCKING:
994 * If @in_wq is zero, spin_lock_irqsave(host lock).
995 * Otherwise, none on entry and grabs host lock.
996 */
ata_hsm_qc_complete(struct ata_queued_cmd * qc,int in_wq)997 static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
998 {
999 struct ata_port *ap = qc->ap;
1000 unsigned long flags;
1001
1002 if (ap->ops->error_handler) {
1003 if (in_wq) {
1004 spin_lock_irqsave(ap->lock, flags);
1005
1006 /* EH might have kicked in while host lock is
1007 * released.
1008 */
1009 qc = ata_qc_from_tag(ap, qc->tag);
1010 if (qc) {
1011 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
1012 ata_sff_irq_on(ap);
1013 ata_qc_complete(qc);
1014 } else
1015 ata_port_freeze(ap);
1016 }
1017
1018 spin_unlock_irqrestore(ap->lock, flags);
1019 } else {
1020 if (likely(!(qc->err_mask & AC_ERR_HSM)))
1021 ata_qc_complete(qc);
1022 else
1023 ata_port_freeze(ap);
1024 }
1025 } else {
1026 if (in_wq) {
1027 spin_lock_irqsave(ap->lock, flags);
1028 ata_sff_irq_on(ap);
1029 ata_qc_complete(qc);
1030 spin_unlock_irqrestore(ap->lock, flags);
1031 } else
1032 ata_qc_complete(qc);
1033 }
1034 }
1035
1036 /**
1037 * ata_sff_hsm_move - move the HSM to the next state.
1038 * @ap: the target ata_port
1039 * @qc: qc on going
1040 * @status: current device status
1041 * @in_wq: 1 if called from workqueue, 0 otherwise
1042 *
1043 * RETURNS:
1044 * 1 when poll next status needed, 0 otherwise.
1045 */
ata_sff_hsm_move(struct ata_port * ap,struct ata_queued_cmd * qc,u8 status,int in_wq)1046 int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
1047 u8 status, int in_wq)
1048 {
1049 struct ata_link *link = qc->dev->link;
1050 struct ata_eh_info *ehi = &link->eh_info;
1051 unsigned long flags = 0;
1052 int poll_next;
1053
1054 WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
1055
1056 /* Make sure ata_sff_qc_issue() does not throw things
1057 * like DMA polling into the workqueue. Notice that
1058 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
1059 */
1060 WARN_ON_ONCE(in_wq != ata_hsm_ok_in_wq(ap, qc));
1061
1062 fsm_start:
1063 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
1064 ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
1065
1066 switch (ap->hsm_task_state) {
1067 case HSM_ST_FIRST:
1068 /* Send first data block or PACKET CDB */
1069
1070 /* If polling, we will stay in the work queue after
1071 * sending the data. Otherwise, interrupt handler
1072 * takes over after sending the data.
1073 */
1074 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
1075
1076 /* check device status */
1077 if (unlikely((status & ATA_DRQ) == 0)) {
1078 /* handle BSY=0, DRQ=0 as error */
1079 if (likely(status & (ATA_ERR | ATA_DF)))
1080 /* device stops HSM for abort/error */
1081 qc->err_mask |= AC_ERR_DEV;
1082 else {
1083 /* HSM violation. Let EH handle this */
1084 ata_ehi_push_desc(ehi,
1085 "ST_FIRST: !(DRQ|ERR|DF)");
1086 qc->err_mask |= AC_ERR_HSM;
1087 }
1088
1089 ap->hsm_task_state = HSM_ST_ERR;
1090 goto fsm_start;
1091 }
1092
1093 /* Device should not ask for data transfer (DRQ=1)
1094 * when it finds something wrong.
1095 * We ignore DRQ here and stop the HSM by
1096 * changing hsm_task_state to HSM_ST_ERR and
1097 * let the EH abort the command or reset the device.
1098 */
1099 if (unlikely(status & (ATA_ERR | ATA_DF))) {
1100 /* Some ATAPI tape drives forget to clear the ERR bit
1101 * when doing the next command (mostly request sense).
1102 * We ignore ERR here to workaround and proceed sending
1103 * the CDB.
1104 */
1105 if (!(qc->dev->horkage & ATA_HORKAGE_STUCK_ERR)) {
1106 ata_ehi_push_desc(ehi, "ST_FIRST: "
1107 "DRQ=1 with device error, "
1108 "dev_stat 0x%X", status);
1109 qc->err_mask |= AC_ERR_HSM;
1110 ap->hsm_task_state = HSM_ST_ERR;
1111 goto fsm_start;
1112 }
1113 }
1114
1115 /* Send the CDB (atapi) or the first data block (ata pio out).
1116 * During the state transition, interrupt handler shouldn't
1117 * be invoked before the data transfer is complete and
1118 * hsm_task_state is changed. Hence, the following locking.
1119 */
1120 if (in_wq)
1121 spin_lock_irqsave(ap->lock, flags);
1122
1123 if (qc->tf.protocol == ATA_PROT_PIO) {
1124 /* PIO data out protocol.
1125 * send first data block.
1126 */
1127
1128 /* ata_pio_sectors() might change the state
1129 * to HSM_ST_LAST. so, the state is changed here
1130 * before ata_pio_sectors().
1131 */
1132 ap->hsm_task_state = HSM_ST;
1133 ata_pio_sectors(qc);
1134 } else
1135 /* send CDB */
1136 atapi_send_cdb(ap, qc);
1137
1138 if (in_wq)
1139 spin_unlock_irqrestore(ap->lock, flags);
1140
1141 /* if polling, ata_sff_pio_task() handles the rest.
1142 * otherwise, interrupt handler takes over from here.
1143 */
1144 break;
1145
1146 case HSM_ST:
1147 /* complete command or read/write the data register */
1148 if (qc->tf.protocol == ATAPI_PROT_PIO) {
1149 /* ATAPI PIO protocol */
1150 if ((status & ATA_DRQ) == 0) {
1151 /* No more data to transfer or device error.
1152 * Device error will be tagged in HSM_ST_LAST.
1153 */
1154 ap->hsm_task_state = HSM_ST_LAST;
1155 goto fsm_start;
1156 }
1157
1158 /* Device should not ask for data transfer (DRQ=1)
1159 * when it finds something wrong.
1160 * We ignore DRQ here and stop the HSM by
1161 * changing hsm_task_state to HSM_ST_ERR and
1162 * let the EH abort the command or reset the device.
1163 */
1164 if (unlikely(status & (ATA_ERR | ATA_DF))) {
1165 ata_ehi_push_desc(ehi, "ST-ATAPI: "
1166 "DRQ=1 with device error, "
1167 "dev_stat 0x%X", status);
1168 qc->err_mask |= AC_ERR_HSM;
1169 ap->hsm_task_state = HSM_ST_ERR;
1170 goto fsm_start;
1171 }
1172
1173 atapi_pio_bytes(qc);
1174
1175 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
1176 /* bad ireason reported by device */
1177 goto fsm_start;
1178
1179 } else {
1180 /* ATA PIO protocol */
1181 if (unlikely((status & ATA_DRQ) == 0)) {
1182 /* handle BSY=0, DRQ=0 as error */
1183 if (likely(status & (ATA_ERR | ATA_DF))) {
1184 /* device stops HSM for abort/error */
1185 qc->err_mask |= AC_ERR_DEV;
1186
1187 /* If diagnostic failed and this is
1188 * IDENTIFY, it's likely a phantom
1189 * device. Mark hint.
1190 */
1191 if (qc->dev->horkage &
1192 ATA_HORKAGE_DIAGNOSTIC)
1193 qc->err_mask |=
1194 AC_ERR_NODEV_HINT;
1195 } else {
1196 /* HSM violation. Let EH handle this.
1197 * Phantom devices also trigger this
1198 * condition. Mark hint.
1199 */
1200 ata_ehi_push_desc(ehi, "ST-ATA: "
1201 "DRQ=0 without device error, "
1202 "dev_stat 0x%X", status);
1203 qc->err_mask |= AC_ERR_HSM |
1204 AC_ERR_NODEV_HINT;
1205 }
1206
1207 ap->hsm_task_state = HSM_ST_ERR;
1208 goto fsm_start;
1209 }
1210
1211 /* For PIO reads, some devices may ask for
1212 * data transfer (DRQ=1) alone with ERR=1.
1213 * We respect DRQ here and transfer one
1214 * block of junk data before changing the
1215 * hsm_task_state to HSM_ST_ERR.
1216 *
1217 * For PIO writes, ERR=1 DRQ=1 doesn't make
1218 * sense since the data block has been
1219 * transferred to the device.
1220 */
1221 if (unlikely(status & (ATA_ERR | ATA_DF))) {
1222 /* data might be corrputed */
1223 qc->err_mask |= AC_ERR_DEV;
1224
1225 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
1226 ata_pio_sectors(qc);
1227 status = ata_wait_idle(ap);
1228 }
1229
1230 if (status & (ATA_BUSY | ATA_DRQ)) {
1231 ata_ehi_push_desc(ehi, "ST-ATA: "
1232 "BUSY|DRQ persists on ERR|DF, "
1233 "dev_stat 0x%X", status);
1234 qc->err_mask |= AC_ERR_HSM;
1235 }
1236
1237 /* There are oddball controllers with
1238 * status register stuck at 0x7f and
1239 * lbal/m/h at zero which makes it
1240 * pass all other presence detection
1241 * mechanisms we have. Set NODEV_HINT
1242 * for it. Kernel bz#7241.
1243 */
1244 if (status == 0x7f)
1245 qc->err_mask |= AC_ERR_NODEV_HINT;
1246
1247 /* ata_pio_sectors() might change the
1248 * state to HSM_ST_LAST. so, the state
1249 * is changed after ata_pio_sectors().
1250 */
1251 ap->hsm_task_state = HSM_ST_ERR;
1252 goto fsm_start;
1253 }
1254
1255 ata_pio_sectors(qc);
1256
1257 if (ap->hsm_task_state == HSM_ST_LAST &&
1258 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
1259 /* all data read */
1260 status = ata_wait_idle(ap);
1261 goto fsm_start;
1262 }
1263 }
1264
1265 poll_next = 1;
1266 break;
1267
1268 case HSM_ST_LAST:
1269 if (unlikely(!ata_ok(status))) {
1270 qc->err_mask |= __ac_err_mask(status);
1271 ap->hsm_task_state = HSM_ST_ERR;
1272 goto fsm_start;
1273 }
1274
1275 /* no more data to transfer */
1276 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
1277 ap->print_id, qc->dev->devno, status);
1278
1279 WARN_ON_ONCE(qc->err_mask & (AC_ERR_DEV | AC_ERR_HSM));
1280
1281 ap->hsm_task_state = HSM_ST_IDLE;
1282
1283 /* complete taskfile transaction */
1284 ata_hsm_qc_complete(qc, in_wq);
1285
1286 poll_next = 0;
1287 break;
1288
1289 case HSM_ST_ERR:
1290 ap->hsm_task_state = HSM_ST_IDLE;
1291
1292 /* complete taskfile transaction */
1293 ata_hsm_qc_complete(qc, in_wq);
1294
1295 poll_next = 0;
1296 break;
1297 default:
1298 poll_next = 0;
1299 BUG();
1300 }
1301
1302 return poll_next;
1303 }
1304 EXPORT_SYMBOL_GPL(ata_sff_hsm_move);
1305
ata_sff_queue_work(struct work_struct * work)1306 void ata_sff_queue_work(struct work_struct *work)
1307 {
1308 queue_work(ata_sff_wq, work);
1309 }
1310 EXPORT_SYMBOL_GPL(ata_sff_queue_work);
1311
ata_sff_queue_delayed_work(struct delayed_work * dwork,unsigned long delay)1312 void ata_sff_queue_delayed_work(struct delayed_work *dwork, unsigned long delay)
1313 {
1314 queue_delayed_work(ata_sff_wq, dwork, delay);
1315 }
1316 EXPORT_SYMBOL_GPL(ata_sff_queue_delayed_work);
1317
ata_sff_queue_pio_task(struct ata_link * link,unsigned long delay)1318 void ata_sff_queue_pio_task(struct ata_link *link, unsigned long delay)
1319 {
1320 struct ata_port *ap = link->ap;
1321
1322 WARN_ON((ap->sff_pio_task_link != NULL) &&
1323 (ap->sff_pio_task_link != link));
1324 ap->sff_pio_task_link = link;
1325
1326 /* may fail if ata_sff_flush_pio_task() in progress */
1327 ata_sff_queue_delayed_work(&ap->sff_pio_task, msecs_to_jiffies(delay));
1328 }
1329 EXPORT_SYMBOL_GPL(ata_sff_queue_pio_task);
1330
ata_sff_flush_pio_task(struct ata_port * ap)1331 void ata_sff_flush_pio_task(struct ata_port *ap)
1332 {
1333 DPRINTK("ENTER\n");
1334
1335 cancel_delayed_work_sync(&ap->sff_pio_task);
1336 ap->hsm_task_state = HSM_ST_IDLE;
1337 ap->sff_pio_task_link = NULL;
1338
1339 if (ata_msg_ctl(ap))
1340 ata_port_dbg(ap, "%s: EXIT\n", __func__);
1341 }
1342
ata_sff_pio_task(struct work_struct * work)1343 static void ata_sff_pio_task(struct work_struct *work)
1344 {
1345 struct ata_port *ap =
1346 container_of(work, struct ata_port, sff_pio_task.work);
1347 struct ata_link *link = ap->sff_pio_task_link;
1348 struct ata_queued_cmd *qc;
1349 u8 status;
1350 int poll_next;
1351
1352 BUG_ON(ap->sff_pio_task_link == NULL);
1353 /* qc can be NULL if timeout occurred */
1354 qc = ata_qc_from_tag(ap, link->active_tag);
1355 if (!qc) {
1356 ap->sff_pio_task_link = NULL;
1357 return;
1358 }
1359
1360 fsm_start:
1361 WARN_ON_ONCE(ap->hsm_task_state == HSM_ST_IDLE);
1362
1363 /*
1364 * This is purely heuristic. This is a fast path.
1365 * Sometimes when we enter, BSY will be cleared in
1366 * a chk-status or two. If not, the drive is probably seeking
1367 * or something. Snooze for a couple msecs, then
1368 * chk-status again. If still busy, queue delayed work.
1369 */
1370 status = ata_sff_busy_wait(ap, ATA_BUSY, 5);
1371 if (status & ATA_BUSY) {
1372 ata_msleep(ap, 2);
1373 status = ata_sff_busy_wait(ap, ATA_BUSY, 10);
1374 if (status & ATA_BUSY) {
1375 ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE);
1376 return;
1377 }
1378 }
1379
1380 /*
1381 * hsm_move() may trigger another command to be processed.
1382 * clean the link beforehand.
1383 */
1384 ap->sff_pio_task_link = NULL;
1385 /* move the HSM */
1386 poll_next = ata_sff_hsm_move(ap, qc, status, 1);
1387
1388 /* another command or interrupt handler
1389 * may be running at this point.
1390 */
1391 if (poll_next)
1392 goto fsm_start;
1393 }
1394
1395 /**
1396 * ata_sff_qc_issue - issue taskfile to a SFF controller
1397 * @qc: command to issue to device
1398 *
1399 * This function issues a PIO or NODATA command to a SFF
1400 * controller.
1401 *
1402 * LOCKING:
1403 * spin_lock_irqsave(host lock)
1404 *
1405 * RETURNS:
1406 * Zero on success, AC_ERR_* mask on failure
1407 */
ata_sff_qc_issue(struct ata_queued_cmd * qc)1408 unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
1409 {
1410 struct ata_port *ap = qc->ap;
1411 struct ata_link *link = qc->dev->link;
1412
1413 /* Use polling pio if the LLD doesn't handle
1414 * interrupt driven pio and atapi CDB interrupt.
1415 */
1416 if (ap->flags & ATA_FLAG_PIO_POLLING)
1417 qc->tf.flags |= ATA_TFLAG_POLLING;
1418
1419 /* select the device */
1420 ata_dev_select(ap, qc->dev->devno, 1, 0);
1421
1422 /* start the command */
1423 switch (qc->tf.protocol) {
1424 case ATA_PROT_NODATA:
1425 if (qc->tf.flags & ATA_TFLAG_POLLING)
1426 ata_qc_set_polling(qc);
1427
1428 ata_tf_to_host(ap, &qc->tf);
1429 ap->hsm_task_state = HSM_ST_LAST;
1430
1431 if (qc->tf.flags & ATA_TFLAG_POLLING)
1432 ata_sff_queue_pio_task(link, 0);
1433
1434 break;
1435
1436 case ATA_PROT_PIO:
1437 if (qc->tf.flags & ATA_TFLAG_POLLING)
1438 ata_qc_set_polling(qc);
1439
1440 ata_tf_to_host(ap, &qc->tf);
1441
1442 if (qc->tf.flags & ATA_TFLAG_WRITE) {
1443 /* PIO data out protocol */
1444 ap->hsm_task_state = HSM_ST_FIRST;
1445 ata_sff_queue_pio_task(link, 0);
1446
1447 /* always send first data block using the
1448 * ata_sff_pio_task() codepath.
1449 */
1450 } else {
1451 /* PIO data in protocol */
1452 ap->hsm_task_state = HSM_ST;
1453
1454 if (qc->tf.flags & ATA_TFLAG_POLLING)
1455 ata_sff_queue_pio_task(link, 0);
1456
1457 /* if polling, ata_sff_pio_task() handles the
1458 * rest. otherwise, interrupt handler takes
1459 * over from here.
1460 */
1461 }
1462
1463 break;
1464
1465 case ATAPI_PROT_PIO:
1466 case ATAPI_PROT_NODATA:
1467 if (qc->tf.flags & ATA_TFLAG_POLLING)
1468 ata_qc_set_polling(qc);
1469
1470 ata_tf_to_host(ap, &qc->tf);
1471
1472 ap->hsm_task_state = HSM_ST_FIRST;
1473
1474 /* send cdb by polling if no cdb interrupt */
1475 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
1476 (qc->tf.flags & ATA_TFLAG_POLLING))
1477 ata_sff_queue_pio_task(link, 0);
1478 break;
1479
1480 default:
1481 WARN_ON_ONCE(1);
1482 return AC_ERR_SYSTEM;
1483 }
1484
1485 return 0;
1486 }
1487 EXPORT_SYMBOL_GPL(ata_sff_qc_issue);
1488
1489 /**
1490 * ata_sff_qc_fill_rtf - fill result TF using ->sff_tf_read
1491 * @qc: qc to fill result TF for
1492 *
1493 * @qc is finished and result TF needs to be filled. Fill it
1494 * using ->sff_tf_read.
1495 *
1496 * LOCKING:
1497 * spin_lock_irqsave(host lock)
1498 *
1499 * RETURNS:
1500 * true indicating that result TF is successfully filled.
1501 */
ata_sff_qc_fill_rtf(struct ata_queued_cmd * qc)1502 bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc)
1503 {
1504 qc->ap->ops->sff_tf_read(qc->ap, &qc->result_tf);
1505 return true;
1506 }
1507 EXPORT_SYMBOL_GPL(ata_sff_qc_fill_rtf);
1508
ata_sff_idle_irq(struct ata_port * ap)1509 static unsigned int ata_sff_idle_irq(struct ata_port *ap)
1510 {
1511 ap->stats.idle_irq++;
1512
1513 #ifdef ATA_IRQ_TRAP
1514 if ((ap->stats.idle_irq % 1000) == 0) {
1515 ap->ops->sff_check_status(ap);
1516 if (ap->ops->sff_irq_clear)
1517 ap->ops->sff_irq_clear(ap);
1518 ata_port_warn(ap, "irq trap\n");
1519 return 1;
1520 }
1521 #endif
1522 return 0; /* irq not handled */
1523 }
1524
__ata_sff_port_intr(struct ata_port * ap,struct ata_queued_cmd * qc,bool hsmv_on_idle)1525 static unsigned int __ata_sff_port_intr(struct ata_port *ap,
1526 struct ata_queued_cmd *qc,
1527 bool hsmv_on_idle)
1528 {
1529 u8 status;
1530
1531 VPRINTK("ata%u: protocol %d task_state %d\n",
1532 ap->print_id, qc->tf.protocol, ap->hsm_task_state);
1533
1534 /* Check whether we are expecting interrupt in this state */
1535 switch (ap->hsm_task_state) {
1536 case HSM_ST_FIRST:
1537 /* Some pre-ATAPI-4 devices assert INTRQ
1538 * at this state when ready to receive CDB.
1539 */
1540
1541 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
1542 * The flag was turned on only for atapi devices. No
1543 * need to check ata_is_atapi(qc->tf.protocol) again.
1544 */
1545 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1546 return ata_sff_idle_irq(ap);
1547 break;
1548 case HSM_ST_IDLE:
1549 return ata_sff_idle_irq(ap);
1550 default:
1551 break;
1552 }
1553
1554 /* check main status, clearing INTRQ if needed */
1555 status = ata_sff_irq_status(ap);
1556 if (status & ATA_BUSY) {
1557 if (hsmv_on_idle) {
1558 /* BMDMA engine is already stopped, we're screwed */
1559 qc->err_mask |= AC_ERR_HSM;
1560 ap->hsm_task_state = HSM_ST_ERR;
1561 } else
1562 return ata_sff_idle_irq(ap);
1563 }
1564
1565 /* clear irq events */
1566 if (ap->ops->sff_irq_clear)
1567 ap->ops->sff_irq_clear(ap);
1568
1569 ata_sff_hsm_move(ap, qc, status, 0);
1570
1571 return 1; /* irq handled */
1572 }
1573
1574 /**
1575 * ata_sff_port_intr - Handle SFF port interrupt
1576 * @ap: Port on which interrupt arrived (possibly...)
1577 * @qc: Taskfile currently active in engine
1578 *
1579 * Handle port interrupt for given queued command.
1580 *
1581 * LOCKING:
1582 * spin_lock_irqsave(host lock)
1583 *
1584 * RETURNS:
1585 * One if interrupt was handled, zero if not (shared irq).
1586 */
ata_sff_port_intr(struct ata_port * ap,struct ata_queued_cmd * qc)1587 unsigned int ata_sff_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1588 {
1589 return __ata_sff_port_intr(ap, qc, false);
1590 }
1591 EXPORT_SYMBOL_GPL(ata_sff_port_intr);
1592
__ata_sff_interrupt(int irq,void * dev_instance,unsigned int (* port_intr)(struct ata_port *,struct ata_queued_cmd *))1593 static inline irqreturn_t __ata_sff_interrupt(int irq, void *dev_instance,
1594 unsigned int (*port_intr)(struct ata_port *, struct ata_queued_cmd *))
1595 {
1596 struct ata_host *host = dev_instance;
1597 bool retried = false;
1598 unsigned int i;
1599 unsigned int handled, idle, polling;
1600 unsigned long flags;
1601
1602 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
1603 spin_lock_irqsave(&host->lock, flags);
1604
1605 retry:
1606 handled = idle = polling = 0;
1607 for (i = 0; i < host->n_ports; i++) {
1608 struct ata_port *ap = host->ports[i];
1609 struct ata_queued_cmd *qc;
1610
1611 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1612 if (qc) {
1613 if (!(qc->tf.flags & ATA_TFLAG_POLLING))
1614 handled |= port_intr(ap, qc);
1615 else
1616 polling |= 1 << i;
1617 } else
1618 idle |= 1 << i;
1619 }
1620
1621 /*
1622 * If no port was expecting IRQ but the controller is actually
1623 * asserting IRQ line, nobody cared will ensue. Check IRQ
1624 * pending status if available and clear spurious IRQ.
1625 */
1626 if (!handled && !retried) {
1627 bool retry = false;
1628
1629 for (i = 0; i < host->n_ports; i++) {
1630 struct ata_port *ap = host->ports[i];
1631
1632 if (polling & (1 << i))
1633 continue;
1634
1635 if (!ap->ops->sff_irq_check ||
1636 !ap->ops->sff_irq_check(ap))
1637 continue;
1638
1639 if (idle & (1 << i)) {
1640 ap->ops->sff_check_status(ap);
1641 if (ap->ops->sff_irq_clear)
1642 ap->ops->sff_irq_clear(ap);
1643 } else {
1644 /* clear INTRQ and check if BUSY cleared */
1645 if (!(ap->ops->sff_check_status(ap) & ATA_BUSY))
1646 retry |= true;
1647 /*
1648 * With command in flight, we can't do
1649 * sff_irq_clear() w/o racing with completion.
1650 */
1651 }
1652 }
1653
1654 if (retry) {
1655 retried = true;
1656 goto retry;
1657 }
1658 }
1659
1660 spin_unlock_irqrestore(&host->lock, flags);
1661
1662 return IRQ_RETVAL(handled);
1663 }
1664
1665 /**
1666 * ata_sff_interrupt - Default SFF ATA host interrupt handler
1667 * @irq: irq line (unused)
1668 * @dev_instance: pointer to our ata_host information structure
1669 *
1670 * Default interrupt handler for PCI IDE devices. Calls
1671 * ata_sff_port_intr() for each port that is not disabled.
1672 *
1673 * LOCKING:
1674 * Obtains host lock during operation.
1675 *
1676 * RETURNS:
1677 * IRQ_NONE or IRQ_HANDLED.
1678 */
ata_sff_interrupt(int irq,void * dev_instance)1679 irqreturn_t ata_sff_interrupt(int irq, void *dev_instance)
1680 {
1681 return __ata_sff_interrupt(irq, dev_instance, ata_sff_port_intr);
1682 }
1683 EXPORT_SYMBOL_GPL(ata_sff_interrupt);
1684
1685 /**
1686 * ata_sff_lost_interrupt - Check for an apparent lost interrupt
1687 * @ap: port that appears to have timed out
1688 *
1689 * Called from the libata error handlers when the core code suspects
1690 * an interrupt has been lost. If it has complete anything we can and
1691 * then return. Interface must support altstatus for this faster
1692 * recovery to occur.
1693 *
1694 * Locking:
1695 * Caller holds host lock
1696 */
1697
ata_sff_lost_interrupt(struct ata_port * ap)1698 void ata_sff_lost_interrupt(struct ata_port *ap)
1699 {
1700 u8 status;
1701 struct ata_queued_cmd *qc;
1702
1703 /* Only one outstanding command per SFF channel */
1704 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1705 /* We cannot lose an interrupt on a non-existent or polled command */
1706 if (!qc || qc->tf.flags & ATA_TFLAG_POLLING)
1707 return;
1708 /* See if the controller thinks it is still busy - if so the command
1709 isn't a lost IRQ but is still in progress */
1710 status = ata_sff_altstatus(ap);
1711 if (status & ATA_BUSY)
1712 return;
1713
1714 /* There was a command running, we are no longer busy and we have
1715 no interrupt. */
1716 ata_port_warn(ap, "lost interrupt (Status 0x%x)\n",
1717 status);
1718 /* Run the host interrupt logic as if the interrupt had not been
1719 lost */
1720 ata_sff_port_intr(ap, qc);
1721 }
1722 EXPORT_SYMBOL_GPL(ata_sff_lost_interrupt);
1723
1724 /**
1725 * ata_sff_freeze - Freeze SFF controller port
1726 * @ap: port to freeze
1727 *
1728 * Freeze SFF controller port.
1729 *
1730 * LOCKING:
1731 * Inherited from caller.
1732 */
ata_sff_freeze(struct ata_port * ap)1733 void ata_sff_freeze(struct ata_port *ap)
1734 {
1735 ap->ctl |= ATA_NIEN;
1736 ap->last_ctl = ap->ctl;
1737
1738 if (ap->ops->sff_set_devctl || ap->ioaddr.ctl_addr)
1739 ata_sff_set_devctl(ap, ap->ctl);
1740
1741 /* Under certain circumstances, some controllers raise IRQ on
1742 * ATA_NIEN manipulation. Also, many controllers fail to mask
1743 * previously pending IRQ on ATA_NIEN assertion. Clear it.
1744 */
1745 ap->ops->sff_check_status(ap);
1746
1747 if (ap->ops->sff_irq_clear)
1748 ap->ops->sff_irq_clear(ap);
1749 }
1750 EXPORT_SYMBOL_GPL(ata_sff_freeze);
1751
1752 /**
1753 * ata_sff_thaw - Thaw SFF controller port
1754 * @ap: port to thaw
1755 *
1756 * Thaw SFF controller port.
1757 *
1758 * LOCKING:
1759 * Inherited from caller.
1760 */
ata_sff_thaw(struct ata_port * ap)1761 void ata_sff_thaw(struct ata_port *ap)
1762 {
1763 /* clear & re-enable interrupts */
1764 ap->ops->sff_check_status(ap);
1765 if (ap->ops->sff_irq_clear)
1766 ap->ops->sff_irq_clear(ap);
1767 ata_sff_irq_on(ap);
1768 }
1769 EXPORT_SYMBOL_GPL(ata_sff_thaw);
1770
1771 /**
1772 * ata_sff_prereset - prepare SFF link for reset
1773 * @link: SFF link to be reset
1774 * @deadline: deadline jiffies for the operation
1775 *
1776 * SFF link @link is about to be reset. Initialize it. It first
1777 * calls ata_std_prereset() and wait for !BSY if the port is
1778 * being softreset.
1779 *
1780 * LOCKING:
1781 * Kernel thread context (may sleep)
1782 *
1783 * RETURNS:
1784 * 0 on success, -errno otherwise.
1785 */
ata_sff_prereset(struct ata_link * link,unsigned long deadline)1786 int ata_sff_prereset(struct ata_link *link, unsigned long deadline)
1787 {
1788 struct ata_eh_context *ehc = &link->eh_context;
1789 int rc;
1790
1791 rc = ata_std_prereset(link, deadline);
1792 if (rc)
1793 return rc;
1794
1795 /* if we're about to do hardreset, nothing more to do */
1796 if (ehc->i.action & ATA_EH_HARDRESET)
1797 return 0;
1798
1799 /* wait for !BSY if we don't know that no device is attached */
1800 if (!ata_link_offline(link)) {
1801 rc = ata_sff_wait_ready(link, deadline);
1802 if (rc && rc != -ENODEV) {
1803 ata_link_warn(link,
1804 "device not ready (errno=%d), forcing hardreset\n",
1805 rc);
1806 ehc->i.action |= ATA_EH_HARDRESET;
1807 }
1808 }
1809
1810 return 0;
1811 }
1812 EXPORT_SYMBOL_GPL(ata_sff_prereset);
1813
1814 /**
1815 * ata_devchk - PATA device presence detection
1816 * @ap: ATA channel to examine
1817 * @device: Device to examine (starting at zero)
1818 *
1819 * This technique was originally described in
1820 * Hale Landis's ATADRVR (www.ata-atapi.com), and
1821 * later found its way into the ATA/ATAPI spec.
1822 *
1823 * Write a pattern to the ATA shadow registers,
1824 * and if a device is present, it will respond by
1825 * correctly storing and echoing back the
1826 * ATA shadow register contents.
1827 *
1828 * LOCKING:
1829 * caller.
1830 */
ata_devchk(struct ata_port * ap,unsigned int device)1831 static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
1832 {
1833 struct ata_ioports *ioaddr = &ap->ioaddr;
1834 u8 nsect, lbal;
1835
1836 ap->ops->sff_dev_select(ap, device);
1837
1838 iowrite8(0x55, ioaddr->nsect_addr);
1839 iowrite8(0xaa, ioaddr->lbal_addr);
1840
1841 iowrite8(0xaa, ioaddr->nsect_addr);
1842 iowrite8(0x55, ioaddr->lbal_addr);
1843
1844 iowrite8(0x55, ioaddr->nsect_addr);
1845 iowrite8(0xaa, ioaddr->lbal_addr);
1846
1847 nsect = ioread8(ioaddr->nsect_addr);
1848 lbal = ioread8(ioaddr->lbal_addr);
1849
1850 if ((nsect == 0x55) && (lbal == 0xaa))
1851 return 1; /* we found a device */
1852
1853 return 0; /* nothing found */
1854 }
1855
1856 /**
1857 * ata_sff_dev_classify - Parse returned ATA device signature
1858 * @dev: ATA device to classify (starting at zero)
1859 * @present: device seems present
1860 * @r_err: Value of error register on completion
1861 *
1862 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
1863 * an ATA/ATAPI-defined set of values is placed in the ATA
1864 * shadow registers, indicating the results of device detection
1865 * and diagnostics.
1866 *
1867 * Select the ATA device, and read the values from the ATA shadow
1868 * registers. Then parse according to the Error register value,
1869 * and the spec-defined values examined by ata_dev_classify().
1870 *
1871 * LOCKING:
1872 * caller.
1873 *
1874 * RETURNS:
1875 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
1876 */
ata_sff_dev_classify(struct ata_device * dev,int present,u8 * r_err)1877 unsigned int ata_sff_dev_classify(struct ata_device *dev, int present,
1878 u8 *r_err)
1879 {
1880 struct ata_port *ap = dev->link->ap;
1881 struct ata_taskfile tf;
1882 unsigned int class;
1883 u8 err;
1884
1885 ap->ops->sff_dev_select(ap, dev->devno);
1886
1887 memset(&tf, 0, sizeof(tf));
1888
1889 ap->ops->sff_tf_read(ap, &tf);
1890 err = tf.feature;
1891 if (r_err)
1892 *r_err = err;
1893
1894 /* see if device passed diags: continue and warn later */
1895 if (err == 0)
1896 /* diagnostic fail : do nothing _YET_ */
1897 dev->horkage |= ATA_HORKAGE_DIAGNOSTIC;
1898 else if (err == 1)
1899 /* do nothing */ ;
1900 else if ((dev->devno == 0) && (err == 0x81))
1901 /* do nothing */ ;
1902 else
1903 return ATA_DEV_NONE;
1904
1905 /* determine if device is ATA or ATAPI */
1906 class = ata_dev_classify(&tf);
1907
1908 if (class == ATA_DEV_UNKNOWN) {
1909 /* If the device failed diagnostic, it's likely to
1910 * have reported incorrect device signature too.
1911 * Assume ATA device if the device seems present but
1912 * device signature is invalid with diagnostic
1913 * failure.
1914 */
1915 if (present && (dev->horkage & ATA_HORKAGE_DIAGNOSTIC))
1916 class = ATA_DEV_ATA;
1917 else
1918 class = ATA_DEV_NONE;
1919 } else if ((class == ATA_DEV_ATA) &&
1920 (ap->ops->sff_check_status(ap) == 0))
1921 class = ATA_DEV_NONE;
1922
1923 return class;
1924 }
1925 EXPORT_SYMBOL_GPL(ata_sff_dev_classify);
1926
1927 /**
1928 * ata_sff_wait_after_reset - wait for devices to become ready after reset
1929 * @link: SFF link which is just reset
1930 * @devmask: mask of present devices
1931 * @deadline: deadline jiffies for the operation
1932 *
1933 * Wait devices attached to SFF @link to become ready after
1934 * reset. It contains preceding 150ms wait to avoid accessing TF
1935 * status register too early.
1936 *
1937 * LOCKING:
1938 * Kernel thread context (may sleep).
1939 *
1940 * RETURNS:
1941 * 0 on success, -ENODEV if some or all of devices in @devmask
1942 * don't seem to exist. -errno on other errors.
1943 */
ata_sff_wait_after_reset(struct ata_link * link,unsigned int devmask,unsigned long deadline)1944 int ata_sff_wait_after_reset(struct ata_link *link, unsigned int devmask,
1945 unsigned long deadline)
1946 {
1947 struct ata_port *ap = link->ap;
1948 struct ata_ioports *ioaddr = &ap->ioaddr;
1949 unsigned int dev0 = devmask & (1 << 0);
1950 unsigned int dev1 = devmask & (1 << 1);
1951 int rc, ret = 0;
1952
1953 ata_msleep(ap, ATA_WAIT_AFTER_RESET);
1954
1955 /* always check readiness of the master device */
1956 rc = ata_sff_wait_ready(link, deadline);
1957 /* -ENODEV means the odd clown forgot the D7 pulldown resistor
1958 * and TF status is 0xff, bail out on it too.
1959 */
1960 if (rc)
1961 return rc;
1962
1963 /* if device 1 was found in ata_devchk, wait for register
1964 * access briefly, then wait for BSY to clear.
1965 */
1966 if (dev1) {
1967 int i;
1968
1969 ap->ops->sff_dev_select(ap, 1);
1970
1971 /* Wait for register access. Some ATAPI devices fail
1972 * to set nsect/lbal after reset, so don't waste too
1973 * much time on it. We're gonna wait for !BSY anyway.
1974 */
1975 for (i = 0; i < 2; i++) {
1976 u8 nsect, lbal;
1977
1978 nsect = ioread8(ioaddr->nsect_addr);
1979 lbal = ioread8(ioaddr->lbal_addr);
1980 if ((nsect == 1) && (lbal == 1))
1981 break;
1982 ata_msleep(ap, 50); /* give drive a breather */
1983 }
1984
1985 rc = ata_sff_wait_ready(link, deadline);
1986 if (rc) {
1987 if (rc != -ENODEV)
1988 return rc;
1989 ret = rc;
1990 }
1991 }
1992
1993 /* is all this really necessary? */
1994 ap->ops->sff_dev_select(ap, 0);
1995 if (dev1)
1996 ap->ops->sff_dev_select(ap, 1);
1997 if (dev0)
1998 ap->ops->sff_dev_select(ap, 0);
1999
2000 return ret;
2001 }
2002 EXPORT_SYMBOL_GPL(ata_sff_wait_after_reset);
2003
ata_bus_softreset(struct ata_port * ap,unsigned int devmask,unsigned long deadline)2004 static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
2005 unsigned long deadline)
2006 {
2007 struct ata_ioports *ioaddr = &ap->ioaddr;
2008
2009 DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
2010
2011 /* software reset. causes dev0 to be selected */
2012 iowrite8(ap->ctl, ioaddr->ctl_addr);
2013 udelay(20); /* FIXME: flush */
2014 iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
2015 udelay(20); /* FIXME: flush */
2016 iowrite8(ap->ctl, ioaddr->ctl_addr);
2017 ap->last_ctl = ap->ctl;
2018
2019 /* wait the port to become ready */
2020 return ata_sff_wait_after_reset(&ap->link, devmask, deadline);
2021 }
2022
2023 /**
2024 * ata_sff_softreset - reset host port via ATA SRST
2025 * @link: ATA link to reset
2026 * @classes: resulting classes of attached devices
2027 * @deadline: deadline jiffies for the operation
2028 *
2029 * Reset host port using ATA SRST.
2030 *
2031 * LOCKING:
2032 * Kernel thread context (may sleep)
2033 *
2034 * RETURNS:
2035 * 0 on success, -errno otherwise.
2036 */
ata_sff_softreset(struct ata_link * link,unsigned int * classes,unsigned long deadline)2037 int ata_sff_softreset(struct ata_link *link, unsigned int *classes,
2038 unsigned long deadline)
2039 {
2040 struct ata_port *ap = link->ap;
2041 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2042 unsigned int devmask = 0;
2043 int rc;
2044 u8 err;
2045
2046 DPRINTK("ENTER\n");
2047
2048 /* determine if device 0/1 are present */
2049 if (ata_devchk(ap, 0))
2050 devmask |= (1 << 0);
2051 if (slave_possible && ata_devchk(ap, 1))
2052 devmask |= (1 << 1);
2053
2054 /* select device 0 again */
2055 ap->ops->sff_dev_select(ap, 0);
2056
2057 /* issue bus reset */
2058 DPRINTK("about to softreset, devmask=%x\n", devmask);
2059 rc = ata_bus_softreset(ap, devmask, deadline);
2060 /* if link is occupied, -ENODEV too is an error */
2061 if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
2062 ata_link_err(link, "SRST failed (errno=%d)\n", rc);
2063 return rc;
2064 }
2065
2066 /* determine by signature whether we have ATA or ATAPI devices */
2067 classes[0] = ata_sff_dev_classify(&link->device[0],
2068 devmask & (1 << 0), &err);
2069 if (slave_possible && err != 0x81)
2070 classes[1] = ata_sff_dev_classify(&link->device[1],
2071 devmask & (1 << 1), &err);
2072
2073 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
2074 return 0;
2075 }
2076 EXPORT_SYMBOL_GPL(ata_sff_softreset);
2077
2078 /**
2079 * sata_sff_hardreset - reset host port via SATA phy reset
2080 * @link: link to reset
2081 * @class: resulting class of attached device
2082 * @deadline: deadline jiffies for the operation
2083 *
2084 * SATA phy-reset host port using DET bits of SControl register,
2085 * wait for !BSY and classify the attached device.
2086 *
2087 * LOCKING:
2088 * Kernel thread context (may sleep)
2089 *
2090 * RETURNS:
2091 * 0 on success, -errno otherwise.
2092 */
sata_sff_hardreset(struct ata_link * link,unsigned int * class,unsigned long deadline)2093 int sata_sff_hardreset(struct ata_link *link, unsigned int *class,
2094 unsigned long deadline)
2095 {
2096 struct ata_eh_context *ehc = &link->eh_context;
2097 const unsigned long *timing = sata_ehc_deb_timing(ehc);
2098 bool online;
2099 int rc;
2100
2101 rc = sata_link_hardreset(link, timing, deadline, &online,
2102 ata_sff_check_ready);
2103 if (online)
2104 *class = ata_sff_dev_classify(link->device, 1, NULL);
2105
2106 DPRINTK("EXIT, class=%u\n", *class);
2107 return rc;
2108 }
2109 EXPORT_SYMBOL_GPL(sata_sff_hardreset);
2110
2111 /**
2112 * ata_sff_postreset - SFF postreset callback
2113 * @link: the target SFF ata_link
2114 * @classes: classes of attached devices
2115 *
2116 * This function is invoked after a successful reset. It first
2117 * calls ata_std_postreset() and performs SFF specific postreset
2118 * processing.
2119 *
2120 * LOCKING:
2121 * Kernel thread context (may sleep)
2122 */
ata_sff_postreset(struct ata_link * link,unsigned int * classes)2123 void ata_sff_postreset(struct ata_link *link, unsigned int *classes)
2124 {
2125 struct ata_port *ap = link->ap;
2126
2127 ata_std_postreset(link, classes);
2128
2129 /* is double-select really necessary? */
2130 if (classes[0] != ATA_DEV_NONE)
2131 ap->ops->sff_dev_select(ap, 1);
2132 if (classes[1] != ATA_DEV_NONE)
2133 ap->ops->sff_dev_select(ap, 0);
2134
2135 /* bail out if no device is present */
2136 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2137 DPRINTK("EXIT, no device\n");
2138 return;
2139 }
2140
2141 /* set up device control */
2142 if (ap->ops->sff_set_devctl || ap->ioaddr.ctl_addr) {
2143 ata_sff_set_devctl(ap, ap->ctl);
2144 ap->last_ctl = ap->ctl;
2145 }
2146 }
2147 EXPORT_SYMBOL_GPL(ata_sff_postreset);
2148
2149 /**
2150 * ata_sff_drain_fifo - Stock FIFO drain logic for SFF controllers
2151 * @qc: command
2152 *
2153 * Drain the FIFO and device of any stuck data following a command
2154 * failing to complete. In some cases this is necessary before a
2155 * reset will recover the device.
2156 *
2157 */
2158
ata_sff_drain_fifo(struct ata_queued_cmd * qc)2159 void ata_sff_drain_fifo(struct ata_queued_cmd *qc)
2160 {
2161 int count;
2162 struct ata_port *ap;
2163
2164 /* We only need to flush incoming data when a command was running */
2165 if (qc == NULL || qc->dma_dir == DMA_TO_DEVICE)
2166 return;
2167
2168 ap = qc->ap;
2169 /* Drain up to 64K of data before we give up this recovery method */
2170 for (count = 0; (ap->ops->sff_check_status(ap) & ATA_DRQ)
2171 && count < 65536; count += 2)
2172 ioread16(ap->ioaddr.data_addr);
2173
2174 /* Can become DEBUG later */
2175 if (count)
2176 ata_port_dbg(ap, "drained %d bytes to clear DRQ\n", count);
2177
2178 }
2179 EXPORT_SYMBOL_GPL(ata_sff_drain_fifo);
2180
2181 /**
2182 * ata_sff_error_handler - Stock error handler for SFF controller
2183 * @ap: port to handle error for
2184 *
2185 * Stock error handler for SFF controller. It can handle both
2186 * PATA and SATA controllers. Many controllers should be able to
2187 * use this EH as-is or with some added handling before and
2188 * after.
2189 *
2190 * LOCKING:
2191 * Kernel thread context (may sleep)
2192 */
ata_sff_error_handler(struct ata_port * ap)2193 void ata_sff_error_handler(struct ata_port *ap)
2194 {
2195 ata_reset_fn_t softreset = ap->ops->softreset;
2196 ata_reset_fn_t hardreset = ap->ops->hardreset;
2197 struct ata_queued_cmd *qc;
2198 unsigned long flags;
2199
2200 qc = __ata_qc_from_tag(ap, ap->link.active_tag);
2201 if (qc && !(qc->flags & ATA_QCFLAG_FAILED))
2202 qc = NULL;
2203
2204 spin_lock_irqsave(ap->lock, flags);
2205
2206 /*
2207 * We *MUST* do FIFO draining before we issue a reset as
2208 * several devices helpfully clear their internal state and
2209 * will lock solid if we touch the data port post reset. Pass
2210 * qc in case anyone wants to do different PIO/DMA recovery or
2211 * has per command fixups
2212 */
2213 if (ap->ops->sff_drain_fifo)
2214 ap->ops->sff_drain_fifo(qc);
2215
2216 spin_unlock_irqrestore(ap->lock, flags);
2217
2218 /* ignore ata_sff_softreset if ctl isn't accessible */
2219 if (softreset == ata_sff_softreset && !ap->ioaddr.ctl_addr)
2220 softreset = NULL;
2221
2222 /* ignore built-in hardresets if SCR access is not available */
2223 if ((hardreset == sata_std_hardreset ||
2224 hardreset == sata_sff_hardreset) && !sata_scr_valid(&ap->link))
2225 hardreset = NULL;
2226
2227 ata_do_eh(ap, ap->ops->prereset, softreset, hardreset,
2228 ap->ops->postreset);
2229 }
2230 EXPORT_SYMBOL_GPL(ata_sff_error_handler);
2231
2232 /**
2233 * ata_sff_std_ports - initialize ioaddr with standard port offsets.
2234 * @ioaddr: IO address structure to be initialized
2235 *
2236 * Utility function which initializes data_addr, error_addr,
2237 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
2238 * device_addr, status_addr, and command_addr to standard offsets
2239 * relative to cmd_addr.
2240 *
2241 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
2242 */
ata_sff_std_ports(struct ata_ioports * ioaddr)2243 void ata_sff_std_ports(struct ata_ioports *ioaddr)
2244 {
2245 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
2246 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
2247 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
2248 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
2249 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
2250 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
2251 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
2252 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
2253 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
2254 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
2255 }
2256 EXPORT_SYMBOL_GPL(ata_sff_std_ports);
2257
2258 #ifdef CONFIG_PCI
2259
ata_resources_present(struct pci_dev * pdev,int port)2260 static int ata_resources_present(struct pci_dev *pdev, int port)
2261 {
2262 int i;
2263
2264 /* Check the PCI resources for this channel are enabled */
2265 port = port * 2;
2266 for (i = 0; i < 2; i++) {
2267 if (pci_resource_start(pdev, port + i) == 0 ||
2268 pci_resource_len(pdev, port + i) == 0)
2269 return 0;
2270 }
2271 return 1;
2272 }
2273
2274 /**
2275 * ata_pci_sff_init_host - acquire native PCI ATA resources and init host
2276 * @host: target ATA host
2277 *
2278 * Acquire native PCI ATA resources for @host and initialize the
2279 * first two ports of @host accordingly. Ports marked dummy are
2280 * skipped and allocation failure makes the port dummy.
2281 *
2282 * Note that native PCI resources are valid even for legacy hosts
2283 * as we fix up pdev resources array early in boot, so this
2284 * function can be used for both native and legacy SFF hosts.
2285 *
2286 * LOCKING:
2287 * Inherited from calling layer (may sleep).
2288 *
2289 * RETURNS:
2290 * 0 if at least one port is initialized, -ENODEV if no port is
2291 * available.
2292 */
ata_pci_sff_init_host(struct ata_host * host)2293 int ata_pci_sff_init_host(struct ata_host *host)
2294 {
2295 struct device *gdev = host->dev;
2296 struct pci_dev *pdev = to_pci_dev(gdev);
2297 unsigned int mask = 0;
2298 int i, rc;
2299
2300 /* request, iomap BARs and init port addresses accordingly */
2301 for (i = 0; i < 2; i++) {
2302 struct ata_port *ap = host->ports[i];
2303 int base = i * 2;
2304 void __iomem * const *iomap;
2305
2306 if (ata_port_is_dummy(ap))
2307 continue;
2308
2309 /* Discard disabled ports. Some controllers show
2310 * their unused channels this way. Disabled ports are
2311 * made dummy.
2312 */
2313 if (!ata_resources_present(pdev, i)) {
2314 ap->ops = &ata_dummy_port_ops;
2315 continue;
2316 }
2317
2318 rc = pcim_iomap_regions(pdev, 0x3 << base,
2319 dev_driver_string(gdev));
2320 if (rc) {
2321 dev_warn(gdev,
2322 "failed to request/iomap BARs for port %d (errno=%d)\n",
2323 i, rc);
2324 if (rc == -EBUSY)
2325 pcim_pin_device(pdev);
2326 ap->ops = &ata_dummy_port_ops;
2327 continue;
2328 }
2329 host->iomap = iomap = pcim_iomap_table(pdev);
2330
2331 ap->ioaddr.cmd_addr = iomap[base];
2332 ap->ioaddr.altstatus_addr =
2333 ap->ioaddr.ctl_addr = (void __iomem *)
2334 ((unsigned long)iomap[base + 1] | ATA_PCI_CTL_OFS);
2335 ata_sff_std_ports(&ap->ioaddr);
2336
2337 ata_port_desc(ap, "cmd 0x%llx ctl 0x%llx",
2338 (unsigned long long)pci_resource_start(pdev, base),
2339 (unsigned long long)pci_resource_start(pdev, base + 1));
2340
2341 mask |= 1 << i;
2342 }
2343
2344 if (!mask) {
2345 dev_err(gdev, "no available native port\n");
2346 return -ENODEV;
2347 }
2348
2349 return 0;
2350 }
2351 EXPORT_SYMBOL_GPL(ata_pci_sff_init_host);
2352
2353 /**
2354 * ata_pci_sff_prepare_host - helper to prepare PCI PIO-only SFF ATA host
2355 * @pdev: target PCI device
2356 * @ppi: array of port_info, must be enough for two ports
2357 * @r_host: out argument for the initialized ATA host
2358 *
2359 * Helper to allocate PIO-only SFF ATA host for @pdev, acquire
2360 * all PCI resources and initialize it accordingly in one go.
2361 *
2362 * LOCKING:
2363 * Inherited from calling layer (may sleep).
2364 *
2365 * RETURNS:
2366 * 0 on success, -errno otherwise.
2367 */
ata_pci_sff_prepare_host(struct pci_dev * pdev,const struct ata_port_info * const * ppi,struct ata_host ** r_host)2368 int ata_pci_sff_prepare_host(struct pci_dev *pdev,
2369 const struct ata_port_info * const *ppi,
2370 struct ata_host **r_host)
2371 {
2372 struct ata_host *host;
2373 int rc;
2374
2375 if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL))
2376 return -ENOMEM;
2377
2378 host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2);
2379 if (!host) {
2380 dev_err(&pdev->dev, "failed to allocate ATA host\n");
2381 rc = -ENOMEM;
2382 goto err_out;
2383 }
2384
2385 rc = ata_pci_sff_init_host(host);
2386 if (rc)
2387 goto err_out;
2388
2389 devres_remove_group(&pdev->dev, NULL);
2390 *r_host = host;
2391 return 0;
2392
2393 err_out:
2394 devres_release_group(&pdev->dev, NULL);
2395 return rc;
2396 }
2397 EXPORT_SYMBOL_GPL(ata_pci_sff_prepare_host);
2398
2399 /**
2400 * ata_pci_sff_activate_host - start SFF host, request IRQ and register it
2401 * @host: target SFF ATA host
2402 * @irq_handler: irq_handler used when requesting IRQ(s)
2403 * @sht: scsi_host_template to use when registering the host
2404 *
2405 * This is the counterpart of ata_host_activate() for SFF ATA
2406 * hosts. This separate helper is necessary because SFF hosts
2407 * use two separate interrupts in legacy mode.
2408 *
2409 * LOCKING:
2410 * Inherited from calling layer (may sleep).
2411 *
2412 * RETURNS:
2413 * 0 on success, -errno otherwise.
2414 */
ata_pci_sff_activate_host(struct ata_host * host,irq_handler_t irq_handler,struct scsi_host_template * sht)2415 int ata_pci_sff_activate_host(struct ata_host *host,
2416 irq_handler_t irq_handler,
2417 struct scsi_host_template *sht)
2418 {
2419 struct device *dev = host->dev;
2420 struct pci_dev *pdev = to_pci_dev(dev);
2421 const char *drv_name = dev_driver_string(host->dev);
2422 int legacy_mode = 0, rc;
2423
2424 rc = ata_host_start(host);
2425 if (rc)
2426 return rc;
2427
2428 if ((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
2429 u8 tmp8, mask;
2430
2431 /* TODO: What if one channel is in native mode ... */
2432 pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
2433 mask = (1 << 2) | (1 << 0);
2434 if ((tmp8 & mask) != mask)
2435 legacy_mode = 1;
2436 #if defined(CONFIG_NO_ATA_LEGACY)
2437 /* Some platforms with PCI limits cannot address compat
2438 port space. In that case we punt if their firmware has
2439 left a device in compatibility mode */
2440 if (legacy_mode) {
2441 printk(KERN_ERR "ata: Compatibility mode ATA is not supported on this platform, skipping.\n");
2442 return -EOPNOTSUPP;
2443 }
2444 #endif
2445 }
2446
2447 if (!devres_open_group(dev, NULL, GFP_KERNEL))
2448 return -ENOMEM;
2449
2450 if (!legacy_mode && pdev->irq) {
2451 int i;
2452
2453 rc = devm_request_irq(dev, pdev->irq, irq_handler,
2454 IRQF_SHARED, drv_name, host);
2455 if (rc)
2456 goto out;
2457
2458 for (i = 0; i < 2; i++) {
2459 if (ata_port_is_dummy(host->ports[i]))
2460 continue;
2461 ata_port_desc(host->ports[i], "irq %d", pdev->irq);
2462 }
2463 } else if (legacy_mode) {
2464 if (!ata_port_is_dummy(host->ports[0])) {
2465 rc = devm_request_irq(dev, ATA_PRIMARY_IRQ(pdev),
2466 irq_handler, IRQF_SHARED,
2467 drv_name, host);
2468 if (rc)
2469 goto out;
2470
2471 ata_port_desc(host->ports[0], "irq %d",
2472 ATA_PRIMARY_IRQ(pdev));
2473 }
2474
2475 if (!ata_port_is_dummy(host->ports[1])) {
2476 rc = devm_request_irq(dev, ATA_SECONDARY_IRQ(pdev),
2477 irq_handler, IRQF_SHARED,
2478 drv_name, host);
2479 if (rc)
2480 goto out;
2481
2482 ata_port_desc(host->ports[1], "irq %d",
2483 ATA_SECONDARY_IRQ(pdev));
2484 }
2485 }
2486
2487 rc = ata_host_register(host, sht);
2488 out:
2489 if (rc == 0)
2490 devres_remove_group(dev, NULL);
2491 else
2492 devres_release_group(dev, NULL);
2493
2494 return rc;
2495 }
2496 EXPORT_SYMBOL_GPL(ata_pci_sff_activate_host);
2497
ata_sff_find_valid_pi(const struct ata_port_info * const * ppi)2498 static const struct ata_port_info *ata_sff_find_valid_pi(
2499 const struct ata_port_info * const *ppi)
2500 {
2501 int i;
2502
2503 /* look up the first valid port_info */
2504 for (i = 0; i < 2 && ppi[i]; i++)
2505 if (ppi[i]->port_ops != &ata_dummy_port_ops)
2506 return ppi[i];
2507
2508 return NULL;
2509 }
2510
ata_pci_init_one(struct pci_dev * pdev,const struct ata_port_info * const * ppi,struct scsi_host_template * sht,void * host_priv,int hflags,bool bmdma)2511 static int ata_pci_init_one(struct pci_dev *pdev,
2512 const struct ata_port_info * const *ppi,
2513 struct scsi_host_template *sht, void *host_priv,
2514 int hflags, bool bmdma)
2515 {
2516 struct device *dev = &pdev->dev;
2517 const struct ata_port_info *pi;
2518 struct ata_host *host = NULL;
2519 int rc;
2520
2521 DPRINTK("ENTER\n");
2522
2523 pi = ata_sff_find_valid_pi(ppi);
2524 if (!pi) {
2525 dev_err(&pdev->dev, "no valid port_info specified\n");
2526 return -EINVAL;
2527 }
2528
2529 if (!devres_open_group(dev, NULL, GFP_KERNEL))
2530 return -ENOMEM;
2531
2532 rc = pcim_enable_device(pdev);
2533 if (rc)
2534 goto out;
2535
2536 #ifdef CONFIG_ATA_BMDMA
2537 if (bmdma)
2538 /* prepare and activate BMDMA host */
2539 rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
2540 else
2541 #endif
2542 /* prepare and activate SFF host */
2543 rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
2544 if (rc)
2545 goto out;
2546 host->private_data = host_priv;
2547 host->flags |= hflags;
2548
2549 #ifdef CONFIG_ATA_BMDMA
2550 if (bmdma) {
2551 pci_set_master(pdev);
2552 rc = ata_pci_sff_activate_host(host, ata_bmdma_interrupt, sht);
2553 } else
2554 #endif
2555 rc = ata_pci_sff_activate_host(host, ata_sff_interrupt, sht);
2556 out:
2557 if (rc == 0)
2558 devres_remove_group(&pdev->dev, NULL);
2559 else
2560 devres_release_group(&pdev->dev, NULL);
2561
2562 return rc;
2563 }
2564
2565 /**
2566 * ata_pci_sff_init_one - Initialize/register PIO-only PCI IDE controller
2567 * @pdev: Controller to be initialized
2568 * @ppi: array of port_info, must be enough for two ports
2569 * @sht: scsi_host_template to use when registering the host
2570 * @host_priv: host private_data
2571 * @hflag: host flags
2572 *
2573 * This is a helper function which can be called from a driver's
2574 * xxx_init_one() probe function if the hardware uses traditional
2575 * IDE taskfile registers and is PIO only.
2576 *
2577 * ASSUMPTION:
2578 * Nobody makes a single channel controller that appears solely as
2579 * the secondary legacy port on PCI.
2580 *
2581 * LOCKING:
2582 * Inherited from PCI layer (may sleep).
2583 *
2584 * RETURNS:
2585 * Zero on success, negative on errno-based value on error.
2586 */
ata_pci_sff_init_one(struct pci_dev * pdev,const struct ata_port_info * const * ppi,struct scsi_host_template * sht,void * host_priv,int hflag)2587 int ata_pci_sff_init_one(struct pci_dev *pdev,
2588 const struct ata_port_info * const *ppi,
2589 struct scsi_host_template *sht, void *host_priv, int hflag)
2590 {
2591 return ata_pci_init_one(pdev, ppi, sht, host_priv, hflag, 0);
2592 }
2593 EXPORT_SYMBOL_GPL(ata_pci_sff_init_one);
2594
2595 #endif /* CONFIG_PCI */
2596
2597 /*
2598 * BMDMA support
2599 */
2600
2601 #ifdef CONFIG_ATA_BMDMA
2602
2603 const struct ata_port_operations ata_bmdma_port_ops = {
2604 .inherits = &ata_sff_port_ops,
2605
2606 .error_handler = ata_bmdma_error_handler,
2607 .post_internal_cmd = ata_bmdma_post_internal_cmd,
2608
2609 .qc_prep = ata_bmdma_qc_prep,
2610 .qc_issue = ata_bmdma_qc_issue,
2611
2612 .sff_irq_clear = ata_bmdma_irq_clear,
2613 .bmdma_setup = ata_bmdma_setup,
2614 .bmdma_start = ata_bmdma_start,
2615 .bmdma_stop = ata_bmdma_stop,
2616 .bmdma_status = ata_bmdma_status,
2617
2618 .port_start = ata_bmdma_port_start,
2619 };
2620 EXPORT_SYMBOL_GPL(ata_bmdma_port_ops);
2621
2622 const struct ata_port_operations ata_bmdma32_port_ops = {
2623 .inherits = &ata_bmdma_port_ops,
2624
2625 .sff_data_xfer = ata_sff_data_xfer32,
2626 .port_start = ata_bmdma_port_start32,
2627 };
2628 EXPORT_SYMBOL_GPL(ata_bmdma32_port_ops);
2629
2630 /**
2631 * ata_bmdma_fill_sg - Fill PCI IDE PRD table
2632 * @qc: Metadata associated with taskfile to be transferred
2633 *
2634 * Fill PCI IDE PRD (scatter-gather) table with segments
2635 * associated with the current disk command.
2636 *
2637 * LOCKING:
2638 * spin_lock_irqsave(host lock)
2639 *
2640 */
ata_bmdma_fill_sg(struct ata_queued_cmd * qc)2641 static void ata_bmdma_fill_sg(struct ata_queued_cmd *qc)
2642 {
2643 struct ata_port *ap = qc->ap;
2644 struct ata_bmdma_prd *prd = ap->bmdma_prd;
2645 struct scatterlist *sg;
2646 unsigned int si, pi;
2647
2648 pi = 0;
2649 for_each_sg(qc->sg, sg, qc->n_elem, si) {
2650 u32 addr, offset;
2651 u32 sg_len, len;
2652
2653 /* determine if physical DMA addr spans 64K boundary.
2654 * Note h/w doesn't support 64-bit, so we unconditionally
2655 * truncate dma_addr_t to u32.
2656 */
2657 addr = (u32) sg_dma_address(sg);
2658 sg_len = sg_dma_len(sg);
2659
2660 while (sg_len) {
2661 offset = addr & 0xffff;
2662 len = sg_len;
2663 if ((offset + sg_len) > 0x10000)
2664 len = 0x10000 - offset;
2665
2666 prd[pi].addr = cpu_to_le32(addr);
2667 prd[pi].flags_len = cpu_to_le32(len & 0xffff);
2668 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
2669
2670 pi++;
2671 sg_len -= len;
2672 addr += len;
2673 }
2674 }
2675
2676 prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2677 }
2678
2679 /**
2680 * ata_bmdma_fill_sg_dumb - Fill PCI IDE PRD table
2681 * @qc: Metadata associated with taskfile to be transferred
2682 *
2683 * Fill PCI IDE PRD (scatter-gather) table with segments
2684 * associated with the current disk command. Perform the fill
2685 * so that we avoid writing any length 64K records for
2686 * controllers that don't follow the spec.
2687 *
2688 * LOCKING:
2689 * spin_lock_irqsave(host lock)
2690 *
2691 */
ata_bmdma_fill_sg_dumb(struct ata_queued_cmd * qc)2692 static void ata_bmdma_fill_sg_dumb(struct ata_queued_cmd *qc)
2693 {
2694 struct ata_port *ap = qc->ap;
2695 struct ata_bmdma_prd *prd = ap->bmdma_prd;
2696 struct scatterlist *sg;
2697 unsigned int si, pi;
2698
2699 pi = 0;
2700 for_each_sg(qc->sg, sg, qc->n_elem, si) {
2701 u32 addr, offset;
2702 u32 sg_len, len, blen;
2703
2704 /* determine if physical DMA addr spans 64K boundary.
2705 * Note h/w doesn't support 64-bit, so we unconditionally
2706 * truncate dma_addr_t to u32.
2707 */
2708 addr = (u32) sg_dma_address(sg);
2709 sg_len = sg_dma_len(sg);
2710
2711 while (sg_len) {
2712 offset = addr & 0xffff;
2713 len = sg_len;
2714 if ((offset + sg_len) > 0x10000)
2715 len = 0x10000 - offset;
2716
2717 blen = len & 0xffff;
2718 prd[pi].addr = cpu_to_le32(addr);
2719 if (blen == 0) {
2720 /* Some PATA chipsets like the CS5530 can't
2721 cope with 0x0000 meaning 64K as the spec
2722 says */
2723 prd[pi].flags_len = cpu_to_le32(0x8000);
2724 blen = 0x8000;
2725 prd[++pi].addr = cpu_to_le32(addr + 0x8000);
2726 }
2727 prd[pi].flags_len = cpu_to_le32(blen);
2728 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
2729
2730 pi++;
2731 sg_len -= len;
2732 addr += len;
2733 }
2734 }
2735
2736 prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2737 }
2738
2739 /**
2740 * ata_bmdma_qc_prep - Prepare taskfile for submission
2741 * @qc: Metadata associated with taskfile to be prepared
2742 *
2743 * Prepare ATA taskfile for submission.
2744 *
2745 * LOCKING:
2746 * spin_lock_irqsave(host lock)
2747 */
ata_bmdma_qc_prep(struct ata_queued_cmd * qc)2748 void ata_bmdma_qc_prep(struct ata_queued_cmd *qc)
2749 {
2750 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2751 return;
2752
2753 ata_bmdma_fill_sg(qc);
2754 }
2755 EXPORT_SYMBOL_GPL(ata_bmdma_qc_prep);
2756
2757 /**
2758 * ata_bmdma_dumb_qc_prep - Prepare taskfile for submission
2759 * @qc: Metadata associated with taskfile to be prepared
2760 *
2761 * Prepare ATA taskfile for submission.
2762 *
2763 * LOCKING:
2764 * spin_lock_irqsave(host lock)
2765 */
ata_bmdma_dumb_qc_prep(struct ata_queued_cmd * qc)2766 void ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc)
2767 {
2768 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2769 return;
2770
2771 ata_bmdma_fill_sg_dumb(qc);
2772 }
2773 EXPORT_SYMBOL_GPL(ata_bmdma_dumb_qc_prep);
2774
2775 /**
2776 * ata_bmdma_qc_issue - issue taskfile to a BMDMA controller
2777 * @qc: command to issue to device
2778 *
2779 * This function issues a PIO, NODATA or DMA command to a
2780 * SFF/BMDMA controller. PIO and NODATA are handled by
2781 * ata_sff_qc_issue().
2782 *
2783 * LOCKING:
2784 * spin_lock_irqsave(host lock)
2785 *
2786 * RETURNS:
2787 * Zero on success, AC_ERR_* mask on failure
2788 */
ata_bmdma_qc_issue(struct ata_queued_cmd * qc)2789 unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc)
2790 {
2791 struct ata_port *ap = qc->ap;
2792 struct ata_link *link = qc->dev->link;
2793
2794 /* defer PIO handling to sff_qc_issue */
2795 if (!ata_is_dma(qc->tf.protocol))
2796 return ata_sff_qc_issue(qc);
2797
2798 /* select the device */
2799 ata_dev_select(ap, qc->dev->devno, 1, 0);
2800
2801 /* start the command */
2802 switch (qc->tf.protocol) {
2803 case ATA_PROT_DMA:
2804 WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);
2805
2806 ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
2807 ap->ops->bmdma_setup(qc); /* set up bmdma */
2808 ap->ops->bmdma_start(qc); /* initiate bmdma */
2809 ap->hsm_task_state = HSM_ST_LAST;
2810 break;
2811
2812 case ATAPI_PROT_DMA:
2813 WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);
2814
2815 ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
2816 ap->ops->bmdma_setup(qc); /* set up bmdma */
2817 ap->hsm_task_state = HSM_ST_FIRST;
2818
2819 /* send cdb by polling if no cdb interrupt */
2820 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
2821 ata_sff_queue_pio_task(link, 0);
2822 break;
2823
2824 default:
2825 WARN_ON(1);
2826 return AC_ERR_SYSTEM;
2827 }
2828
2829 return 0;
2830 }
2831 EXPORT_SYMBOL_GPL(ata_bmdma_qc_issue);
2832
2833 /**
2834 * ata_bmdma_port_intr - Handle BMDMA port interrupt
2835 * @ap: Port on which interrupt arrived (possibly...)
2836 * @qc: Taskfile currently active in engine
2837 *
2838 * Handle port interrupt for given queued command.
2839 *
2840 * LOCKING:
2841 * spin_lock_irqsave(host lock)
2842 *
2843 * RETURNS:
2844 * One if interrupt was handled, zero if not (shared irq).
2845 */
ata_bmdma_port_intr(struct ata_port * ap,struct ata_queued_cmd * qc)2846 unsigned int ata_bmdma_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
2847 {
2848 struct ata_eh_info *ehi = &ap->link.eh_info;
2849 u8 host_stat = 0;
2850 bool bmdma_stopped = false;
2851 unsigned int handled;
2852
2853 if (ap->hsm_task_state == HSM_ST_LAST && ata_is_dma(qc->tf.protocol)) {
2854 /* check status of DMA engine */
2855 host_stat = ap->ops->bmdma_status(ap);
2856 VPRINTK("ata%u: host_stat 0x%X\n", ap->print_id, host_stat);
2857
2858 /* if it's not our irq... */
2859 if (!(host_stat & ATA_DMA_INTR))
2860 return ata_sff_idle_irq(ap);
2861
2862 /* before we do anything else, clear DMA-Start bit */
2863 ap->ops->bmdma_stop(qc);
2864 bmdma_stopped = true;
2865
2866 if (unlikely(host_stat & ATA_DMA_ERR)) {
2867 /* error when transferring data to/from memory */
2868 qc->err_mask |= AC_ERR_HOST_BUS;
2869 ap->hsm_task_state = HSM_ST_ERR;
2870 }
2871 }
2872
2873 handled = __ata_sff_port_intr(ap, qc, bmdma_stopped);
2874
2875 if (unlikely(qc->err_mask) && ata_is_dma(qc->tf.protocol))
2876 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
2877
2878 return handled;
2879 }
2880 EXPORT_SYMBOL_GPL(ata_bmdma_port_intr);
2881
2882 /**
2883 * ata_bmdma_interrupt - Default BMDMA ATA host interrupt handler
2884 * @irq: irq line (unused)
2885 * @dev_instance: pointer to our ata_host information structure
2886 *
2887 * Default interrupt handler for PCI IDE devices. Calls
2888 * ata_bmdma_port_intr() for each port that is not disabled.
2889 *
2890 * LOCKING:
2891 * Obtains host lock during operation.
2892 *
2893 * RETURNS:
2894 * IRQ_NONE or IRQ_HANDLED.
2895 */
ata_bmdma_interrupt(int irq,void * dev_instance)2896 irqreturn_t ata_bmdma_interrupt(int irq, void *dev_instance)
2897 {
2898 return __ata_sff_interrupt(irq, dev_instance, ata_bmdma_port_intr);
2899 }
2900 EXPORT_SYMBOL_GPL(ata_bmdma_interrupt);
2901
2902 /**
2903 * ata_bmdma_error_handler - Stock error handler for BMDMA controller
2904 * @ap: port to handle error for
2905 *
2906 * Stock error handler for BMDMA controller. It can handle both
2907 * PATA and SATA controllers. Most BMDMA controllers should be
2908 * able to use this EH as-is or with some added handling before
2909 * and after.
2910 *
2911 * LOCKING:
2912 * Kernel thread context (may sleep)
2913 */
ata_bmdma_error_handler(struct ata_port * ap)2914 void ata_bmdma_error_handler(struct ata_port *ap)
2915 {
2916 struct ata_queued_cmd *qc;
2917 unsigned long flags;
2918 bool thaw = false;
2919
2920 qc = __ata_qc_from_tag(ap, ap->link.active_tag);
2921 if (qc && !(qc->flags & ATA_QCFLAG_FAILED))
2922 qc = NULL;
2923
2924 /* reset PIO HSM and stop DMA engine */
2925 spin_lock_irqsave(ap->lock, flags);
2926
2927 if (qc && ata_is_dma(qc->tf.protocol)) {
2928 u8 host_stat;
2929
2930 host_stat = ap->ops->bmdma_status(ap);
2931
2932 /* BMDMA controllers indicate host bus error by
2933 * setting DMA_ERR bit and timing out. As it wasn't
2934 * really a timeout event, adjust error mask and
2935 * cancel frozen state.
2936 */
2937 if (qc->err_mask == AC_ERR_TIMEOUT && (host_stat & ATA_DMA_ERR)) {
2938 qc->err_mask = AC_ERR_HOST_BUS;
2939 thaw = true;
2940 }
2941
2942 ap->ops->bmdma_stop(qc);
2943
2944 /* if we're gonna thaw, make sure IRQ is clear */
2945 if (thaw) {
2946 ap->ops->sff_check_status(ap);
2947 if (ap->ops->sff_irq_clear)
2948 ap->ops->sff_irq_clear(ap);
2949 }
2950 }
2951
2952 spin_unlock_irqrestore(ap->lock, flags);
2953
2954 if (thaw)
2955 ata_eh_thaw_port(ap);
2956
2957 ata_sff_error_handler(ap);
2958 }
2959 EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
2960
2961 /**
2962 * ata_bmdma_post_internal_cmd - Stock post_internal_cmd for BMDMA
2963 * @qc: internal command to clean up
2964 *
2965 * LOCKING:
2966 * Kernel thread context (may sleep)
2967 */
ata_bmdma_post_internal_cmd(struct ata_queued_cmd * qc)2968 void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc)
2969 {
2970 struct ata_port *ap = qc->ap;
2971 unsigned long flags;
2972
2973 if (ata_is_dma(qc->tf.protocol)) {
2974 spin_lock_irqsave(ap->lock, flags);
2975 ap->ops->bmdma_stop(qc);
2976 spin_unlock_irqrestore(ap->lock, flags);
2977 }
2978 }
2979 EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
2980
2981 /**
2982 * ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt.
2983 * @ap: Port associated with this ATA transaction.
2984 *
2985 * Clear interrupt and error flags in DMA status register.
2986 *
2987 * May be used as the irq_clear() entry in ata_port_operations.
2988 *
2989 * LOCKING:
2990 * spin_lock_irqsave(host lock)
2991 */
ata_bmdma_irq_clear(struct ata_port * ap)2992 void ata_bmdma_irq_clear(struct ata_port *ap)
2993 {
2994 void __iomem *mmio = ap->ioaddr.bmdma_addr;
2995
2996 if (!mmio)
2997 return;
2998
2999 iowrite8(ioread8(mmio + ATA_DMA_STATUS), mmio + ATA_DMA_STATUS);
3000 }
3001 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
3002
3003 /**
3004 * ata_bmdma_setup - Set up PCI IDE BMDMA transaction
3005 * @qc: Info associated with this ATA transaction.
3006 *
3007 * LOCKING:
3008 * spin_lock_irqsave(host lock)
3009 */
ata_bmdma_setup(struct ata_queued_cmd * qc)3010 void ata_bmdma_setup(struct ata_queued_cmd *qc)
3011 {
3012 struct ata_port *ap = qc->ap;
3013 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
3014 u8 dmactl;
3015
3016 /* load PRD table addr. */
3017 mb(); /* make sure PRD table writes are visible to controller */
3018 iowrite32(ap->bmdma_prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
3019
3020 /* specify data direction, triple-check start bit is clear */
3021 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
3022 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
3023 if (!rw)
3024 dmactl |= ATA_DMA_WR;
3025 iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
3026
3027 /* issue r/w command */
3028 ap->ops->sff_exec_command(ap, &qc->tf);
3029 }
3030 EXPORT_SYMBOL_GPL(ata_bmdma_setup);
3031
3032 /**
3033 * ata_bmdma_start - Start a PCI IDE BMDMA transaction
3034 * @qc: Info associated with this ATA transaction.
3035 *
3036 * LOCKING:
3037 * spin_lock_irqsave(host lock)
3038 */
ata_bmdma_start(struct ata_queued_cmd * qc)3039 void ata_bmdma_start(struct ata_queued_cmd *qc)
3040 {
3041 struct ata_port *ap = qc->ap;
3042 u8 dmactl;
3043
3044 /* start host DMA transaction */
3045 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
3046 iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
3047
3048 /* Strictly, one may wish to issue an ioread8() here, to
3049 * flush the mmio write. However, control also passes
3050 * to the hardware at this point, and it will interrupt
3051 * us when we are to resume control. So, in effect,
3052 * we don't care when the mmio write flushes.
3053 * Further, a read of the DMA status register _immediately_
3054 * following the write may not be what certain flaky hardware
3055 * is expected, so I think it is best to not add a readb()
3056 * without first all the MMIO ATA cards/mobos.
3057 * Or maybe I'm just being paranoid.
3058 *
3059 * FIXME: The posting of this write means I/O starts are
3060 * unnecessarily delayed for MMIO
3061 */
3062 }
3063 EXPORT_SYMBOL_GPL(ata_bmdma_start);
3064
3065 /**
3066 * ata_bmdma_stop - Stop PCI IDE BMDMA transfer
3067 * @qc: Command we are ending DMA for
3068 *
3069 * Clears the ATA_DMA_START flag in the dma control register
3070 *
3071 * May be used as the bmdma_stop() entry in ata_port_operations.
3072 *
3073 * LOCKING:
3074 * spin_lock_irqsave(host lock)
3075 */
ata_bmdma_stop(struct ata_queued_cmd * qc)3076 void ata_bmdma_stop(struct ata_queued_cmd *qc)
3077 {
3078 struct ata_port *ap = qc->ap;
3079 void __iomem *mmio = ap->ioaddr.bmdma_addr;
3080
3081 /* clear start/stop bit */
3082 iowrite8(ioread8(mmio + ATA_DMA_CMD) & ~ATA_DMA_START,
3083 mmio + ATA_DMA_CMD);
3084
3085 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
3086 ata_sff_dma_pause(ap);
3087 }
3088 EXPORT_SYMBOL_GPL(ata_bmdma_stop);
3089
3090 /**
3091 * ata_bmdma_status - Read PCI IDE BMDMA status
3092 * @ap: Port associated with this ATA transaction.
3093 *
3094 * Read and return BMDMA status register.
3095 *
3096 * May be used as the bmdma_status() entry in ata_port_operations.
3097 *
3098 * LOCKING:
3099 * spin_lock_irqsave(host lock)
3100 */
ata_bmdma_status(struct ata_port * ap)3101 u8 ata_bmdma_status(struct ata_port *ap)
3102 {
3103 return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
3104 }
3105 EXPORT_SYMBOL_GPL(ata_bmdma_status);
3106
3107
3108 /**
3109 * ata_bmdma_port_start - Set port up for bmdma.
3110 * @ap: Port to initialize
3111 *
3112 * Called just after data structures for each port are
3113 * initialized. Allocates space for PRD table.
3114 *
3115 * May be used as the port_start() entry in ata_port_operations.
3116 *
3117 * LOCKING:
3118 * Inherited from caller.
3119 */
ata_bmdma_port_start(struct ata_port * ap)3120 int ata_bmdma_port_start(struct ata_port *ap)
3121 {
3122 if (ap->mwdma_mask || ap->udma_mask) {
3123 ap->bmdma_prd =
3124 dmam_alloc_coherent(ap->host->dev, ATA_PRD_TBL_SZ,
3125 &ap->bmdma_prd_dma, GFP_KERNEL);
3126 if (!ap->bmdma_prd)
3127 return -ENOMEM;
3128 }
3129
3130 return 0;
3131 }
3132 EXPORT_SYMBOL_GPL(ata_bmdma_port_start);
3133
3134 /**
3135 * ata_bmdma_port_start32 - Set port up for dma.
3136 * @ap: Port to initialize
3137 *
3138 * Called just after data structures for each port are
3139 * initialized. Enables 32bit PIO and allocates space for PRD
3140 * table.
3141 *
3142 * May be used as the port_start() entry in ata_port_operations for
3143 * devices that are capable of 32bit PIO.
3144 *
3145 * LOCKING:
3146 * Inherited from caller.
3147 */
ata_bmdma_port_start32(struct ata_port * ap)3148 int ata_bmdma_port_start32(struct ata_port *ap)
3149 {
3150 ap->pflags |= ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE;
3151 return ata_bmdma_port_start(ap);
3152 }
3153 EXPORT_SYMBOL_GPL(ata_bmdma_port_start32);
3154
3155 #ifdef CONFIG_PCI
3156
3157 /**
3158 * ata_pci_bmdma_clear_simplex - attempt to kick device out of simplex
3159 * @pdev: PCI device
3160 *
3161 * Some PCI ATA devices report simplex mode but in fact can be told to
3162 * enter non simplex mode. This implements the necessary logic to
3163 * perform the task on such devices. Calling it on other devices will
3164 * have -undefined- behaviour.
3165 */
ata_pci_bmdma_clear_simplex(struct pci_dev * pdev)3166 int ata_pci_bmdma_clear_simplex(struct pci_dev *pdev)
3167 {
3168 unsigned long bmdma = pci_resource_start(pdev, 4);
3169 u8 simplex;
3170
3171 if (bmdma == 0)
3172 return -ENOENT;
3173
3174 simplex = inb(bmdma + 0x02);
3175 outb(simplex & 0x60, bmdma + 0x02);
3176 simplex = inb(bmdma + 0x02);
3177 if (simplex & 0x80)
3178 return -EOPNOTSUPP;
3179 return 0;
3180 }
3181 EXPORT_SYMBOL_GPL(ata_pci_bmdma_clear_simplex);
3182
ata_bmdma_nodma(struct ata_host * host,const char * reason)3183 static void ata_bmdma_nodma(struct ata_host *host, const char *reason)
3184 {
3185 int i;
3186
3187 dev_err(host->dev, "BMDMA: %s, falling back to PIO\n", reason);
3188
3189 for (i = 0; i < 2; i++) {
3190 host->ports[i]->mwdma_mask = 0;
3191 host->ports[i]->udma_mask = 0;
3192 }
3193 }
3194
3195 /**
3196 * ata_pci_bmdma_init - acquire PCI BMDMA resources and init ATA host
3197 * @host: target ATA host
3198 *
3199 * Acquire PCI BMDMA resources and initialize @host accordingly.
3200 *
3201 * LOCKING:
3202 * Inherited from calling layer (may sleep).
3203 */
ata_pci_bmdma_init(struct ata_host * host)3204 void ata_pci_bmdma_init(struct ata_host *host)
3205 {
3206 struct device *gdev = host->dev;
3207 struct pci_dev *pdev = to_pci_dev(gdev);
3208 int i, rc;
3209
3210 /* No BAR4 allocation: No DMA */
3211 if (pci_resource_start(pdev, 4) == 0) {
3212 ata_bmdma_nodma(host, "BAR4 is zero");
3213 return;
3214 }
3215
3216 /*
3217 * Some controllers require BMDMA region to be initialized
3218 * even if DMA is not in use to clear IRQ status via
3219 * ->sff_irq_clear method. Try to initialize bmdma_addr
3220 * regardless of dma masks.
3221 */
3222 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
3223 if (rc)
3224 ata_bmdma_nodma(host, "failed to set dma mask");
3225 if (!rc) {
3226 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
3227 if (rc)
3228 ata_bmdma_nodma(host,
3229 "failed to set consistent dma mask");
3230 }
3231
3232 /* request and iomap DMA region */
3233 rc = pcim_iomap_regions(pdev, 1 << 4, dev_driver_string(gdev));
3234 if (rc) {
3235 ata_bmdma_nodma(host, "failed to request/iomap BAR4");
3236 return;
3237 }
3238 host->iomap = pcim_iomap_table(pdev);
3239
3240 for (i = 0; i < 2; i++) {
3241 struct ata_port *ap = host->ports[i];
3242 void __iomem *bmdma = host->iomap[4] + 8 * i;
3243
3244 if (ata_port_is_dummy(ap))
3245 continue;
3246
3247 ap->ioaddr.bmdma_addr = bmdma;
3248 if ((!(ap->flags & ATA_FLAG_IGN_SIMPLEX)) &&
3249 (ioread8(bmdma + 2) & 0x80))
3250 host->flags |= ATA_HOST_SIMPLEX;
3251
3252 ata_port_desc(ap, "bmdma 0x%llx",
3253 (unsigned long long)pci_resource_start(pdev, 4) + 8 * i);
3254 }
3255 }
3256 EXPORT_SYMBOL_GPL(ata_pci_bmdma_init);
3257
3258 /**
3259 * ata_pci_bmdma_prepare_host - helper to prepare PCI BMDMA ATA host
3260 * @pdev: target PCI device
3261 * @ppi: array of port_info, must be enough for two ports
3262 * @r_host: out argument for the initialized ATA host
3263 *
3264 * Helper to allocate BMDMA ATA host for @pdev, acquire all PCI
3265 * resources and initialize it accordingly in one go.
3266 *
3267 * LOCKING:
3268 * Inherited from calling layer (may sleep).
3269 *
3270 * RETURNS:
3271 * 0 on success, -errno otherwise.
3272 */
ata_pci_bmdma_prepare_host(struct pci_dev * pdev,const struct ata_port_info * const * ppi,struct ata_host ** r_host)3273 int ata_pci_bmdma_prepare_host(struct pci_dev *pdev,
3274 const struct ata_port_info * const * ppi,
3275 struct ata_host **r_host)
3276 {
3277 int rc;
3278
3279 rc = ata_pci_sff_prepare_host(pdev, ppi, r_host);
3280 if (rc)
3281 return rc;
3282
3283 ata_pci_bmdma_init(*r_host);
3284 return 0;
3285 }
3286 EXPORT_SYMBOL_GPL(ata_pci_bmdma_prepare_host);
3287
3288 /**
3289 * ata_pci_bmdma_init_one - Initialize/register BMDMA PCI IDE controller
3290 * @pdev: Controller to be initialized
3291 * @ppi: array of port_info, must be enough for two ports
3292 * @sht: scsi_host_template to use when registering the host
3293 * @host_priv: host private_data
3294 * @hflags: host flags
3295 *
3296 * This function is similar to ata_pci_sff_init_one() but also
3297 * takes care of BMDMA initialization.
3298 *
3299 * LOCKING:
3300 * Inherited from PCI layer (may sleep).
3301 *
3302 * RETURNS:
3303 * Zero on success, negative on errno-based value on error.
3304 */
ata_pci_bmdma_init_one(struct pci_dev * pdev,const struct ata_port_info * const * ppi,struct scsi_host_template * sht,void * host_priv,int hflags)3305 int ata_pci_bmdma_init_one(struct pci_dev *pdev,
3306 const struct ata_port_info * const * ppi,
3307 struct scsi_host_template *sht, void *host_priv,
3308 int hflags)
3309 {
3310 return ata_pci_init_one(pdev, ppi, sht, host_priv, hflags, 1);
3311 }
3312 EXPORT_SYMBOL_GPL(ata_pci_bmdma_init_one);
3313
3314 #endif /* CONFIG_PCI */
3315 #endif /* CONFIG_ATA_BMDMA */
3316
3317 /**
3318 * ata_sff_port_init - Initialize SFF/BMDMA ATA port
3319 * @ap: Port to initialize
3320 *
3321 * Called on port allocation to initialize SFF/BMDMA specific
3322 * fields.
3323 *
3324 * LOCKING:
3325 * None.
3326 */
ata_sff_port_init(struct ata_port * ap)3327 void ata_sff_port_init(struct ata_port *ap)
3328 {
3329 INIT_DELAYED_WORK(&ap->sff_pio_task, ata_sff_pio_task);
3330 ap->ctl = ATA_DEVCTL_OBS;
3331 ap->last_ctl = 0xFF;
3332 }
3333
ata_sff_init(void)3334 int __init ata_sff_init(void)
3335 {
3336 ata_sff_wq = alloc_workqueue("ata_sff", WQ_MEM_RECLAIM, WQ_MAX_ACTIVE);
3337 if (!ata_sff_wq)
3338 return -ENOMEM;
3339
3340 return 0;
3341 }
3342
ata_sff_exit(void)3343 void ata_sff_exit(void)
3344 {
3345 destroy_workqueue(ata_sff_wq);
3346 }
3347