1 /*
2  *  libata-core.c - helper library for ATA
3  *
4  *  Maintained by:  Jeff Garzik <jgarzik@pobox.com>
5  *    		    Please ALWAYS copy linux-ide@vger.kernel.org
6  *		    on emails.
7  *
8  *  Copyright 2003-2004 Red Hat, Inc.  All rights reserved.
9  *  Copyright 2003-2004 Jeff Garzik
10  *
11  *
12  *  This program is free software; you can redistribute it and/or modify
13  *  it under the terms of the GNU General Public License as published by
14  *  the Free Software Foundation; either version 2, or (at your option)
15  *  any later version.
16  *
17  *  This program is distributed in the hope that it will be useful,
18  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
19  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  *  GNU General Public License for more details.
21  *
22  *  You should have received a copy of the GNU General Public License
23  *  along with this program; see the file COPYING.  If not, write to
24  *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25  *
26  *
27  *  libata documentation is available via 'make {ps|pdf}docs',
28  *  as Documentation/DocBook/libata.*
29  *
30  *  Hardware documentation available from http://www.t13.org/ and
31  *  http://www.sata-io.org/
32  *
33  */
34 
35 #include <linux/config.h>
36 #include <linux/kernel.h>
37 #include <linux/module.h>
38 #include <linux/pci.h>
39 #include <linux/init.h>
40 #include <linux/list.h>
41 #include <linux/mm.h>
42 #include <linux/highmem.h>
43 #include <linux/spinlock.h>
44 #include <linux/blkdev.h>
45 #include <linux/delay.h>
46 #include <linux/timer.h>
47 #include <linux/interrupt.h>
48 #include <linux/completion.h>
49 #include <linux/tqueue.h>
50 #include <scsi/scsi.h>
51 #include "scsi.h"
52 #include <scsi/scsi_host.h>
53 #include <linux/libata.h>
54 #include <asm/io.h>
55 #include <asm/semaphore.h>
56 #include <asm/byteorder.h>
57 
58 #include "libata.h"
59 
60 static unsigned int ata_busy_sleep (struct ata_port *ap,
61 				    unsigned long tmout_pat,
62 			    	    unsigned long tmout);
63 static void ata_dev_reread_id(struct ata_port *ap, struct ata_device *dev);
64 static void ata_dev_init_params(struct ata_port *ap, struct ata_device *dev);
65 static void ata_set_mode(struct ata_port *ap);
66 static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev);
67 static unsigned int ata_get_mode_mask(const struct ata_port *ap, int shift);
68 static int fgb(u32 bitmap);
69 static int ata_choose_xfer_mode(const struct ata_port *ap,
70 				u8 *xfer_mode_out,
71 				unsigned int *xfer_shift_out);
72 static void __ata_qc_complete(struct ata_queued_cmd *qc);
73 
74 static unsigned int ata_unique_id = 1;
75 static LIST_HEAD(ata_probe_list);
76 static spinlock_t ata_module_lock = SPIN_LOCK_UNLOCKED;
77 
78 int atapi_enabled = 0;
79 MODULE_PARM(atapi_enabled, "i");
80 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
81 
82 MODULE_AUTHOR("Jeff Garzik");
83 MODULE_DESCRIPTION("Library module for ATA devices");
84 MODULE_LICENSE("GPL");
85 MODULE_VERSION(DRV_VERSION);
86 
87 /**
88  *	ata_tf_load_pio - send taskfile registers to host controller
89  *	@ap: Port to which output is sent
90  *	@tf: ATA taskfile register set
91  *
92  *	Outputs ATA taskfile to standard ATA host controller.
93  *
94  *	LOCKING:
95  *	Inherited from caller.
96  */
97 
ata_tf_load_pio(struct ata_port * ap,const struct ata_taskfile * tf)98 static void ata_tf_load_pio(struct ata_port *ap, const struct ata_taskfile *tf)
99 {
100 	struct ata_ioports *ioaddr = &ap->ioaddr;
101 	unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
102 
103 	if (tf->ctl != ap->last_ctl) {
104 		outb(tf->ctl, ioaddr->ctl_addr);
105 		ap->last_ctl = tf->ctl;
106 		ata_wait_idle(ap);
107 	}
108 
109 	if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
110 		outb(tf->hob_feature, ioaddr->feature_addr);
111 		outb(tf->hob_nsect, ioaddr->nsect_addr);
112 		outb(tf->hob_lbal, ioaddr->lbal_addr);
113 		outb(tf->hob_lbam, ioaddr->lbam_addr);
114 		outb(tf->hob_lbah, ioaddr->lbah_addr);
115 		VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
116 			tf->hob_feature,
117 			tf->hob_nsect,
118 			tf->hob_lbal,
119 			tf->hob_lbam,
120 			tf->hob_lbah);
121 	}
122 
123 	if (is_addr) {
124 		outb(tf->feature, ioaddr->feature_addr);
125 		outb(tf->nsect, ioaddr->nsect_addr);
126 		outb(tf->lbal, ioaddr->lbal_addr);
127 		outb(tf->lbam, ioaddr->lbam_addr);
128 		outb(tf->lbah, ioaddr->lbah_addr);
129 		VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
130 			tf->feature,
131 			tf->nsect,
132 			tf->lbal,
133 			tf->lbam,
134 			tf->lbah);
135 	}
136 
137 	if (tf->flags & ATA_TFLAG_DEVICE) {
138 		outb(tf->device, ioaddr->device_addr);
139 		VPRINTK("device 0x%X\n", tf->device);
140 	}
141 
142 	ata_wait_idle(ap);
143 }
144 
145 /**
146  *	ata_tf_load_mmio - send taskfile registers to host controller
147  *	@ap: Port to which output is sent
148  *	@tf: ATA taskfile register set
149  *
150  *	Outputs ATA taskfile to standard ATA host controller using MMIO.
151  *
152  *	LOCKING:
153  *	Inherited from caller.
154  */
155 
ata_tf_load_mmio(struct ata_port * ap,const struct ata_taskfile * tf)156 static void ata_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
157 {
158 	struct ata_ioports *ioaddr = &ap->ioaddr;
159 	unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
160 
161 	if (tf->ctl != ap->last_ctl) {
162 		writeb(tf->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
163 		ap->last_ctl = tf->ctl;
164 		ata_wait_idle(ap);
165 	}
166 
167 	if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
168 		writeb(tf->hob_feature, (void __iomem *) ioaddr->feature_addr);
169 		writeb(tf->hob_nsect, (void __iomem *) ioaddr->nsect_addr);
170 		writeb(tf->hob_lbal, (void __iomem *) ioaddr->lbal_addr);
171 		writeb(tf->hob_lbam, (void __iomem *) ioaddr->lbam_addr);
172 		writeb(tf->hob_lbah, (void __iomem *) ioaddr->lbah_addr);
173 		VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
174 			tf->hob_feature,
175 			tf->hob_nsect,
176 			tf->hob_lbal,
177 			tf->hob_lbam,
178 			tf->hob_lbah);
179 	}
180 
181 	if (is_addr) {
182 		writeb(tf->feature, (void __iomem *) ioaddr->feature_addr);
183 		writeb(tf->nsect, (void __iomem *) ioaddr->nsect_addr);
184 		writeb(tf->lbal, (void __iomem *) ioaddr->lbal_addr);
185 		writeb(tf->lbam, (void __iomem *) ioaddr->lbam_addr);
186 		writeb(tf->lbah, (void __iomem *) ioaddr->lbah_addr);
187 		VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
188 			tf->feature,
189 			tf->nsect,
190 			tf->lbal,
191 			tf->lbam,
192 			tf->lbah);
193 	}
194 
195 	if (tf->flags & ATA_TFLAG_DEVICE) {
196 		writeb(tf->device, (void __iomem *) ioaddr->device_addr);
197 		VPRINTK("device 0x%X\n", tf->device);
198 	}
199 
200 	ata_wait_idle(ap);
201 }
202 
203 
204 /**
205  *	ata_tf_load - send taskfile registers to host controller
206  *	@ap: Port to which output is sent
207  *	@tf: ATA taskfile register set
208  *
209  *	Outputs ATA taskfile to standard ATA host controller using MMIO
210  *	or PIO as indicated by the ATA_FLAG_MMIO flag.
211  *	Writes the control, feature, nsect, lbal, lbam, and lbah registers.
212  *	Optionally (ATA_TFLAG_LBA48) writes hob_feature, hob_nsect,
213  *	hob_lbal, hob_lbam, and hob_lbah.
214  *
215  *	This function waits for idle (!BUSY and !DRQ) after writing
216  *	registers.  If the control register has a new value, this
217  *	function also waits for idle after writing control and before
218  *	writing the remaining registers.
219  *
220  *	May be used as the tf_load() entry in ata_port_operations.
221  *
222  *	LOCKING:
223  *	Inherited from caller.
224  */
ata_tf_load(struct ata_port * ap,const struct ata_taskfile * tf)225 void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
226 {
227 	if (ap->flags & ATA_FLAG_MMIO)
228 		ata_tf_load_mmio(ap, tf);
229 	else
230 		ata_tf_load_pio(ap, tf);
231 }
232 
233 /**
234  *	ata_exec_command_pio - issue ATA command to host controller
235  *	@ap: port to which command is being issued
236  *	@tf: ATA taskfile register set
237  *
238  *	Issues PIO write to ATA command register, with proper
239  *	synchronization with interrupt handler / other threads.
240  *
241  *	LOCKING:
242  *	spin_lock_irqsave(host_set lock)
243  */
244 
ata_exec_command_pio(struct ata_port * ap,const struct ata_taskfile * tf)245 static void ata_exec_command_pio(struct ata_port *ap, const struct ata_taskfile *tf)
246 {
247 	DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
248 
249        	outb(tf->command, ap->ioaddr.command_addr);
250 	ata_pause(ap);
251 }
252 
253 
254 /**
255  *	ata_exec_command_mmio - issue ATA command to host controller
256  *	@ap: port to which command is being issued
257  *	@tf: ATA taskfile register set
258  *
259  *	Issues MMIO write to ATA command register, with proper
260  *	synchronization with interrupt handler / other threads.
261  *
262  *	LOCKING:
263  *	spin_lock_irqsave(host_set lock)
264  */
265 
ata_exec_command_mmio(struct ata_port * ap,const struct ata_taskfile * tf)266 static void ata_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
267 {
268 	DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
269 
270        	writeb(tf->command, (void __iomem *) ap->ioaddr.command_addr);
271 	ata_pause(ap);
272 }
273 
274 
275 /**
276  *	ata_exec_command - issue ATA command to host controller
277  *	@ap: port to which command is being issued
278  *	@tf: ATA taskfile register set
279  *
280  *	Issues PIO/MMIO write to ATA command register, with proper
281  *	synchronization with interrupt handler / other threads.
282  *
283  *	LOCKING:
284  *	spin_lock_irqsave(host_set lock)
285  */
ata_exec_command(struct ata_port * ap,const struct ata_taskfile * tf)286 void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)
287 {
288 	if (ap->flags & ATA_FLAG_MMIO)
289 		ata_exec_command_mmio(ap, tf);
290 	else
291 		ata_exec_command_pio(ap, tf);
292 }
293 
294 /**
295  *	ata_tf_to_host - issue ATA taskfile to host controller
296  *	@ap: port to which command is being issued
297  *	@tf: ATA taskfile register set
298  *
299  *	Issues ATA taskfile register set to ATA host controller,
300  *	with proper synchronization with interrupt handler and
301  *	other threads.
302  *
303  *	LOCKING:
304  *	spin_lock_irqsave(host_set lock)
305  */
306 
ata_tf_to_host(struct ata_port * ap,const struct ata_taskfile * tf)307 static inline void ata_tf_to_host(struct ata_port *ap,
308 				  const struct ata_taskfile *tf)
309 {
310 	ap->ops->tf_load(ap, tf);
311 	ap->ops->exec_command(ap, tf);
312 }
313 
314 /**
315  *	ata_tf_read_pio - input device's ATA taskfile shadow registers
316  *	@ap: Port from which input is read
317  *	@tf: ATA taskfile register set for storing input
318  *
319  *	Reads ATA taskfile registers for currently-selected device
320  *	into @tf.
321  *
322  *	LOCKING:
323  *	Inherited from caller.
324  */
325 
ata_tf_read_pio(struct ata_port * ap,struct ata_taskfile * tf)326 static void ata_tf_read_pio(struct ata_port *ap, struct ata_taskfile *tf)
327 {
328 	struct ata_ioports *ioaddr = &ap->ioaddr;
329 
330 	tf->command = ata_check_status(ap);
331 	tf->feature = inb(ioaddr->error_addr);
332 	tf->nsect = inb(ioaddr->nsect_addr);
333 	tf->lbal = inb(ioaddr->lbal_addr);
334 	tf->lbam = inb(ioaddr->lbam_addr);
335 	tf->lbah = inb(ioaddr->lbah_addr);
336 	tf->device = inb(ioaddr->device_addr);
337 
338 	if (tf->flags & ATA_TFLAG_LBA48) {
339 		outb(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
340 		tf->hob_feature = inb(ioaddr->error_addr);
341 		tf->hob_nsect = inb(ioaddr->nsect_addr);
342 		tf->hob_lbal = inb(ioaddr->lbal_addr);
343 		tf->hob_lbam = inb(ioaddr->lbam_addr);
344 		tf->hob_lbah = inb(ioaddr->lbah_addr);
345 	}
346 }
347 
348 /**
349  *	ata_tf_read_mmio - input device's ATA taskfile shadow registers
350  *	@ap: Port from which input is read
351  *	@tf: ATA taskfile register set for storing input
352  *
353  *	Reads ATA taskfile registers for currently-selected device
354  *	into @tf via MMIO.
355  *
356  *	LOCKING:
357  *	Inherited from caller.
358  */
359 
ata_tf_read_mmio(struct ata_port * ap,struct ata_taskfile * tf)360 static void ata_tf_read_mmio(struct ata_port *ap, struct ata_taskfile *tf)
361 {
362 	struct ata_ioports *ioaddr = &ap->ioaddr;
363 
364 	tf->command = ata_check_status(ap);
365 	tf->feature = readb((void __iomem *)ioaddr->error_addr);
366 	tf->nsect = readb((void __iomem *)ioaddr->nsect_addr);
367 	tf->lbal = readb((void __iomem *)ioaddr->lbal_addr);
368 	tf->lbam = readb((void __iomem *)ioaddr->lbam_addr);
369 	tf->lbah = readb((void __iomem *)ioaddr->lbah_addr);
370 	tf->device = readb((void __iomem *)ioaddr->device_addr);
371 
372 	if (tf->flags & ATA_TFLAG_LBA48) {
373 		writeb(tf->ctl | ATA_HOB, (void __iomem *) ap->ioaddr.ctl_addr);
374 		tf->hob_feature = readb((void __iomem *)ioaddr->error_addr);
375 		tf->hob_nsect = readb((void __iomem *)ioaddr->nsect_addr);
376 		tf->hob_lbal = readb((void __iomem *)ioaddr->lbal_addr);
377 		tf->hob_lbam = readb((void __iomem *)ioaddr->lbam_addr);
378 		tf->hob_lbah = readb((void __iomem *)ioaddr->lbah_addr);
379 	}
380 }
381 
382 
383 /**
384  *	ata_tf_read - input device's ATA taskfile shadow registers
385  *	@ap: Port from which input is read
386  *	@tf: ATA taskfile register set for storing input
387  *
388  *	Reads ATA taskfile registers for currently-selected device
389  *	into @tf.
390  *
391  *	Reads nsect, lbal, lbam, lbah, and device.  If ATA_TFLAG_LBA48
392  *	is set, also reads the hob registers.
393  *
394  *	May be used as the tf_read() entry in ata_port_operations.
395  *
396  *	LOCKING:
397  *	Inherited from caller.
398  */
ata_tf_read(struct ata_port * ap,struct ata_taskfile * tf)399 void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
400 {
401 	if (ap->flags & ATA_FLAG_MMIO)
402 		ata_tf_read_mmio(ap, tf);
403 	else
404 		ata_tf_read_pio(ap, tf);
405 }
406 
407 /**
408  *	ata_check_status_pio - Read device status reg & clear interrupt
409  *	@ap: port where the device is
410  *
411  *	Reads ATA taskfile status register for currently-selected device
412  *	and return its value. This also clears pending interrupts
413  *      from this device
414  *
415  *	LOCKING:
416  *	Inherited from caller.
417  */
ata_check_status_pio(struct ata_port * ap)418 static u8 ata_check_status_pio(struct ata_port *ap)
419 {
420 	return inb(ap->ioaddr.status_addr);
421 }
422 
423 /**
424  *	ata_check_status_mmio - Read device status reg & clear interrupt
425  *	@ap: port where the device is
426  *
427  *	Reads ATA taskfile status register for currently-selected device
428  *	via MMIO and return its value. This also clears pending interrupts
429  *      from this device
430  *
431  *	LOCKING:
432  *	Inherited from caller.
433  */
ata_check_status_mmio(struct ata_port * ap)434 static u8 ata_check_status_mmio(struct ata_port *ap)
435 {
436        	return readb((void __iomem *) ap->ioaddr.status_addr);
437 }
438 
439 
440 /**
441  *	ata_check_status - Read device status reg & clear interrupt
442  *	@ap: port where the device is
443  *
444  *	Reads ATA taskfile status register for currently-selected device
445  *	and return its value. This also clears pending interrupts
446  *      from this device
447  *
448  *	May be used as the check_status() entry in ata_port_operations.
449  *
450  *	LOCKING:
451  *	Inherited from caller.
452  */
ata_check_status(struct ata_port * ap)453 u8 ata_check_status(struct ata_port *ap)
454 {
455 	if (ap->flags & ATA_FLAG_MMIO)
456 		return ata_check_status_mmio(ap);
457 	return ata_check_status_pio(ap);
458 }
459 
460 
461 /**
462  *	ata_altstatus - Read device alternate status reg
463  *	@ap: port where the device is
464  *
465  *	Reads ATA taskfile alternate status register for
466  *	currently-selected device and return its value.
467  *
468  *	Note: may NOT be used as the check_altstatus() entry in
469  *	ata_port_operations.
470  *
471  *	LOCKING:
472  *	Inherited from caller.
473  */
ata_altstatus(struct ata_port * ap)474 u8 ata_altstatus(struct ata_port *ap)
475 {
476 	if (ap->ops->check_altstatus)
477 		return ap->ops->check_altstatus(ap);
478 
479 	if (ap->flags & ATA_FLAG_MMIO)
480 		return readb((void __iomem *)ap->ioaddr.altstatus_addr);
481 	return inb(ap->ioaddr.altstatus_addr);
482 }
483 
484 
485 /**
486  *	ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
487  *	@tf: Taskfile to convert
488  *	@fis: Buffer into which data will output
489  *	@pmp: Port multiplier port
490  *
491  *	Converts a standard ATA taskfile to a Serial ATA
492  *	FIS structure (Register - Host to Device).
493  *
494  *	LOCKING:
495  *	Inherited from caller.
496  */
497 
ata_tf_to_fis(const struct ata_taskfile * tf,u8 * fis,u8 pmp)498 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
499 {
500 	fis[0] = 0x27;	/* Register - Host to Device FIS */
501 	fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
502 					    bit 7 indicates Command FIS */
503 	fis[2] = tf->command;
504 	fis[3] = tf->feature;
505 
506 	fis[4] = tf->lbal;
507 	fis[5] = tf->lbam;
508 	fis[6] = tf->lbah;
509 	fis[7] = tf->device;
510 
511 	fis[8] = tf->hob_lbal;
512 	fis[9] = tf->hob_lbam;
513 	fis[10] = tf->hob_lbah;
514 	fis[11] = tf->hob_feature;
515 
516 	fis[12] = tf->nsect;
517 	fis[13] = tf->hob_nsect;
518 	fis[14] = 0;
519 	fis[15] = tf->ctl;
520 
521 	fis[16] = 0;
522 	fis[17] = 0;
523 	fis[18] = 0;
524 	fis[19] = 0;
525 }
526 
527 /**
528  *	ata_tf_from_fis - Convert SATA FIS to ATA taskfile
529  *	@fis: Buffer from which data will be input
530  *	@tf: Taskfile to output
531  *
532  *	Converts a serial ATA FIS structure to a standard ATA taskfile.
533  *
534  *	LOCKING:
535  *	Inherited from caller.
536  */
537 
ata_tf_from_fis(const u8 * fis,struct ata_taskfile * tf)538 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
539 {
540 	tf->command	= fis[2];	/* status */
541 	tf->feature	= fis[3];	/* error */
542 
543 	tf->lbal	= fis[4];
544 	tf->lbam	= fis[5];
545 	tf->lbah	= fis[6];
546 	tf->device	= fis[7];
547 
548 	tf->hob_lbal	= fis[8];
549 	tf->hob_lbam	= fis[9];
550 	tf->hob_lbah	= fis[10];
551 
552 	tf->nsect	= fis[12];
553 	tf->hob_nsect	= fis[13];
554 }
555 
556 static const u8 ata_rw_cmds[] = {
557 	/* pio multi */
558 	ATA_CMD_READ_MULTI,
559 	ATA_CMD_WRITE_MULTI,
560 	ATA_CMD_READ_MULTI_EXT,
561 	ATA_CMD_WRITE_MULTI_EXT,
562 	/* pio */
563 	ATA_CMD_PIO_READ,
564 	ATA_CMD_PIO_WRITE,
565 	ATA_CMD_PIO_READ_EXT,
566 	ATA_CMD_PIO_WRITE_EXT,
567 	/* dma */
568 	ATA_CMD_READ,
569 	ATA_CMD_WRITE,
570 	ATA_CMD_READ_EXT,
571 	ATA_CMD_WRITE_EXT
572 };
573 
574 /**
575  *	ata_rwcmd_protocol - set taskfile r/w commands and protocol
576  *	@qc: command to examine and configure
577  *
578  *	Examine the device configuration and tf->flags to calculate
579  *	the proper read/write commands and protocol to use.
580  *
581  *	LOCKING:
582  *	caller.
583  */
ata_rwcmd_protocol(struct ata_queued_cmd * qc)584 void ata_rwcmd_protocol(struct ata_queued_cmd *qc)
585 {
586 	struct ata_taskfile *tf = &qc->tf;
587 	struct ata_device *dev = qc->dev;
588 
589 	int index, lba48, write;
590 
591 	lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
592 	write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
593 
594 	if (dev->flags & ATA_DFLAG_PIO) {
595 		tf->protocol = ATA_PROT_PIO;
596 		index = dev->multi_count ? 0 : 4;
597 	} else {
598 		tf->protocol = ATA_PROT_DMA;
599 		index = 8;
600 	}
601 
602 	tf->command = ata_rw_cmds[index + lba48 + write];
603 }
604 
605 static const char * xfer_mode_str[] = {
606 	"UDMA/16",
607 	"UDMA/25",
608 	"UDMA/33",
609 	"UDMA/44",
610 	"UDMA/66",
611 	"UDMA/100",
612 	"UDMA/133",
613 	"UDMA7",
614 	"MWDMA0",
615 	"MWDMA1",
616 	"MWDMA2",
617 	"PIO0",
618 	"PIO1",
619 	"PIO2",
620 	"PIO3",
621 	"PIO4",
622 };
623 
624 /**
625  *	ata_udma_string - convert UDMA bit offset to string
626  *	@mask: mask of bits supported; only highest bit counts.
627  *
628  *	Determine string which represents the highest speed
629  *	(highest bit in @udma_mask).
630  *
631  *	LOCKING:
632  *	None.
633  *
634  *	RETURNS:
635  *	Constant C string representing highest speed listed in
636  *	@udma_mask, or the constant C string "<n/a>".
637  */
638 
ata_mode_string(unsigned int mask)639 static const char *ata_mode_string(unsigned int mask)
640 {
641 	int i;
642 
643 	for (i = 7; i >= 0; i--)
644 		if (mask & (1 << i))
645 			goto out;
646 	for (i = ATA_SHIFT_MWDMA + 2; i >= ATA_SHIFT_MWDMA; i--)
647 		if (mask & (1 << i))
648 			goto out;
649 	for (i = ATA_SHIFT_PIO + 4; i >= ATA_SHIFT_PIO; i--)
650 		if (mask & (1 << i))
651 			goto out;
652 
653 	return "<n/a>";
654 
655 out:
656 	return xfer_mode_str[i];
657 }
658 
659 /**
660  *	ata_pio_devchk - PATA device presence detection
661  *	@ap: ATA channel to examine
662  *	@device: Device to examine (starting at zero)
663  *
664  *	This technique was originally described in
665  *	Hale Landis's ATADRVR (www.ata-atapi.com), and
666  *	later found its way into the ATA/ATAPI spec.
667  *
668  *	Write a pattern to the ATA shadow registers,
669  *	and if a device is present, it will respond by
670  *	correctly storing and echoing back the
671  *	ATA shadow register contents.
672  *
673  *	LOCKING:
674  *	caller.
675  */
676 
ata_pio_devchk(struct ata_port * ap,unsigned int device)677 static unsigned int ata_pio_devchk(struct ata_port *ap,
678 				   unsigned int device)
679 {
680 	struct ata_ioports *ioaddr = &ap->ioaddr;
681 	u8 nsect, lbal;
682 
683 	ap->ops->dev_select(ap, device);
684 
685 	outb(0x55, ioaddr->nsect_addr);
686 	outb(0xaa, ioaddr->lbal_addr);
687 
688 	outb(0xaa, ioaddr->nsect_addr);
689 	outb(0x55, ioaddr->lbal_addr);
690 
691 	outb(0x55, ioaddr->nsect_addr);
692 	outb(0xaa, ioaddr->lbal_addr);
693 
694 	nsect = inb(ioaddr->nsect_addr);
695 	lbal = inb(ioaddr->lbal_addr);
696 
697 	if ((nsect == 0x55) && (lbal == 0xaa))
698 		return 1;	/* we found a device */
699 
700 	return 0;		/* nothing found */
701 }
702 
703 /**
704  *	ata_mmio_devchk - PATA device presence detection
705  *	@ap: ATA channel to examine
706  *	@device: Device to examine (starting at zero)
707  *
708  *	This technique was originally described in
709  *	Hale Landis's ATADRVR (www.ata-atapi.com), and
710  *	later found its way into the ATA/ATAPI spec.
711  *
712  *	Write a pattern to the ATA shadow registers,
713  *	and if a device is present, it will respond by
714  *	correctly storing and echoing back the
715  *	ATA shadow register contents.
716  *
717  *	LOCKING:
718  *	caller.
719  */
720 
ata_mmio_devchk(struct ata_port * ap,unsigned int device)721 static unsigned int ata_mmio_devchk(struct ata_port *ap,
722 				    unsigned int device)
723 {
724 	struct ata_ioports *ioaddr = &ap->ioaddr;
725 	u8 nsect, lbal;
726 
727 	ap->ops->dev_select(ap, device);
728 
729 	writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
730 	writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
731 
732 	writeb(0xaa, (void __iomem *) ioaddr->nsect_addr);
733 	writeb(0x55, (void __iomem *) ioaddr->lbal_addr);
734 
735 	writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
736 	writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
737 
738 	nsect = readb((void __iomem *) ioaddr->nsect_addr);
739 	lbal = readb((void __iomem *) ioaddr->lbal_addr);
740 
741 	if ((nsect == 0x55) && (lbal == 0xaa))
742 		return 1;	/* we found a device */
743 
744 	return 0;		/* nothing found */
745 }
746 
747 /**
748  *	ata_devchk - PATA device presence detection
749  *	@ap: ATA channel to examine
750  *	@device: Device to examine (starting at zero)
751  *
752  *	Dispatch ATA device presence detection, depending
753  *	on whether we are using PIO or MMIO to talk to the
754  *	ATA shadow registers.
755  *
756  *	LOCKING:
757  *	caller.
758  */
759 
ata_devchk(struct ata_port * ap,unsigned int device)760 static unsigned int ata_devchk(struct ata_port *ap,
761 				    unsigned int device)
762 {
763 	if (ap->flags & ATA_FLAG_MMIO)
764 		return ata_mmio_devchk(ap, device);
765 	return ata_pio_devchk(ap, device);
766 }
767 
768 /**
769  *	ata_dev_classify - determine device type based on ATA-spec signature
770  *	@tf: ATA taskfile register set for device to be identified
771  *
772  *	Determine from taskfile register contents whether a device is
773  *	ATA or ATAPI, as per "Signature and persistence" section
774  *	of ATA/PI spec (volume 1, sect 5.14).
775  *
776  *	LOCKING:
777  *	None.
778  *
779  *	RETURNS:
780  *	Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
781  *	the event of failure.
782  */
783 
ata_dev_classify(const struct ata_taskfile * tf)784 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
785 {
786 	/* Apple's open source Darwin code hints that some devices only
787 	 * put a proper signature into the LBA mid/high registers,
788 	 * So, we only check those.  It's sufficient for uniqueness.
789 	 */
790 
791 	if (((tf->lbam == 0) && (tf->lbah == 0)) ||
792 	    ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
793 		DPRINTK("found ATA device by sig\n");
794 		return ATA_DEV_ATA;
795 	}
796 
797 	if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
798 	    ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
799 		DPRINTK("found ATAPI device by sig\n");
800 		return ATA_DEV_ATAPI;
801 	}
802 
803 	DPRINTK("unknown device\n");
804 	return ATA_DEV_UNKNOWN;
805 }
806 
807 /**
808  *	ata_dev_try_classify - Parse returned ATA device signature
809  *	@ap: ATA channel to examine
810  *	@device: Device to examine (starting at zero)
811  *
812  *	After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
813  *	an ATA/ATAPI-defined set of values is placed in the ATA
814  *	shadow registers, indicating the results of device detection
815  *	and diagnostics.
816  *
817  *	Select the ATA device, and read the values from the ATA shadow
818  *	registers.  Then parse according to the Error register value,
819  *	and the spec-defined values examined by ata_dev_classify().
820  *
821  *	LOCKING:
822  *	caller.
823  */
824 
ata_dev_try_classify(struct ata_port * ap,unsigned int device)825 static u8 ata_dev_try_classify(struct ata_port *ap, unsigned int device)
826 {
827 	struct ata_device *dev = &ap->device[device];
828 	struct ata_taskfile tf;
829 	unsigned int class;
830 	u8 err;
831 
832 	ap->ops->dev_select(ap, device);
833 
834 	memset(&tf, 0, sizeof(tf));
835 
836 	ap->ops->tf_read(ap, &tf);
837 	err = tf.feature;
838 
839 	dev->class = ATA_DEV_NONE;
840 
841 	/* see if device passed diags */
842 	if (err == 1)
843 		/* do nothing */ ;
844 	else if ((device == 0) && (err == 0x81))
845 		/* do nothing */ ;
846 	else
847 		return err;
848 
849 	/* determine if device if ATA or ATAPI */
850 	class = ata_dev_classify(&tf);
851 	if (class == ATA_DEV_UNKNOWN)
852 		return err;
853 	if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
854 		return err;
855 
856 	dev->class = class;
857 
858 	return err;
859 }
860 
861 /**
862  *	ata_dev_id_string - Convert IDENTIFY DEVICE page into string
863  *	@id: IDENTIFY DEVICE results we will examine
864  *	@s: string into which data is output
865  *	@ofs: offset into identify device page
866  *	@len: length of string to return. must be an even number.
867  *
868  *	The strings in the IDENTIFY DEVICE page are broken up into
869  *	16-bit chunks.  Run through the string, and output each
870  *	8-bit chunk linearly, regardless of platform.
871  *
872  *	LOCKING:
873  *	caller.
874  */
875 
ata_dev_id_string(const u16 * id,unsigned char * s,unsigned int ofs,unsigned int len)876 void ata_dev_id_string(const u16 *id, unsigned char *s,
877 		       unsigned int ofs, unsigned int len)
878 {
879 	unsigned int c;
880 
881 	while (len > 0) {
882 		c = id[ofs] >> 8;
883 		*s = c;
884 		s++;
885 
886 		c = id[ofs] & 0xff;
887 		*s = c;
888 		s++;
889 
890 		ofs++;
891 		len -= 2;
892 	}
893 }
894 
895 
896 /**
897  *	ata_noop_dev_select - Select device 0/1 on ATA bus
898  *	@ap: ATA channel to manipulate
899  *	@device: ATA device (numbered from zero) to select
900  *
901  *	This function performs no actual function.
902  *
903  *	May be used as the dev_select() entry in ata_port_operations.
904  *
905  *	LOCKING:
906  *	caller.
907  */
ata_noop_dev_select(struct ata_port * ap,unsigned int device)908 void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
909 {
910 }
911 
912 
913 /**
914  *	ata_std_dev_select - Select device 0/1 on ATA bus
915  *	@ap: ATA channel to manipulate
916  *	@device: ATA device (numbered from zero) to select
917  *
918  *	Use the method defined in the ATA specification to
919  *	make either device 0, or device 1, active on the
920  *	ATA channel.  Works with both PIO and MMIO.
921  *
922  *	May be used as the dev_select() entry in ata_port_operations.
923  *
924  *	LOCKING:
925  *	caller.
926  */
927 
ata_std_dev_select(struct ata_port * ap,unsigned int device)928 void ata_std_dev_select (struct ata_port *ap, unsigned int device)
929 {
930 	u8 tmp;
931 
932 	if (device == 0)
933 		tmp = ATA_DEVICE_OBS;
934 	else
935 		tmp = ATA_DEVICE_OBS | ATA_DEV1;
936 
937 	if (ap->flags & ATA_FLAG_MMIO) {
938 		writeb(tmp, (void __iomem *) ap->ioaddr.device_addr);
939 	} else {
940 		outb(tmp, ap->ioaddr.device_addr);
941 	}
942 	ata_pause(ap);		/* needed; also flushes, for mmio */
943 }
944 
945 /**
946  *	ata_dev_select - Select device 0/1 on ATA bus
947  *	@ap: ATA channel to manipulate
948  *	@device: ATA device (numbered from zero) to select
949  *	@wait: non-zero to wait for Status register BSY bit to clear
950  *	@can_sleep: non-zero if context allows sleeping
951  *
952  *	Use the method defined in the ATA specification to
953  *	make either device 0, or device 1, active on the
954  *	ATA channel.
955  *
956  *	This is a high-level version of ata_std_dev_select(),
957  *	which additionally provides the services of inserting
958  *	the proper pauses and status polling, where needed.
959  *
960  *	LOCKING:
961  *	caller.
962  */
963 
ata_dev_select(struct ata_port * ap,unsigned int device,unsigned int wait,unsigned int can_sleep)964 void ata_dev_select(struct ata_port *ap, unsigned int device,
965 			   unsigned int wait, unsigned int can_sleep)
966 {
967 	VPRINTK("ENTER, ata%u: device %u, wait %u\n",
968 		ap->id, device, wait);
969 
970 	if (wait)
971 		ata_wait_idle(ap);
972 
973 	ap->ops->dev_select(ap, device);
974 
975 	if (wait) {
976 		if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
977 			msleep(150);
978 		ata_wait_idle(ap);
979 	}
980 }
981 
982 /**
983  *	ata_dump_id - IDENTIFY DEVICE info debugging output
984  *	@dev: Device whose IDENTIFY DEVICE page we will dump
985  *
986  *	Dump selected 16-bit words from a detected device's
987  *	IDENTIFY PAGE page.
988  *
989  *	LOCKING:
990  *	caller.
991  */
992 
ata_dump_id(const struct ata_device * dev)993 static inline void ata_dump_id(const struct ata_device *dev)
994 {
995 	DPRINTK("49==0x%04x  "
996 		"53==0x%04x  "
997 		"63==0x%04x  "
998 		"64==0x%04x  "
999 		"75==0x%04x  \n",
1000 		dev->id[49],
1001 		dev->id[53],
1002 		dev->id[63],
1003 		dev->id[64],
1004 		dev->id[75]);
1005 	DPRINTK("80==0x%04x  "
1006 		"81==0x%04x  "
1007 		"82==0x%04x  "
1008 		"83==0x%04x  "
1009 		"84==0x%04x  \n",
1010 		dev->id[80],
1011 		dev->id[81],
1012 		dev->id[82],
1013 		dev->id[83],
1014 		dev->id[84]);
1015 	DPRINTK("88==0x%04x  "
1016 		"93==0x%04x\n",
1017 		dev->id[88],
1018 		dev->id[93]);
1019 }
1020 
1021 /*
1022  *	Compute the PIO modes available for this device. This is not as
1023  *	trivial as it seems if we must consider early devices correctly.
1024  *
1025  *	FIXME: pre IDE drive timing (do we care ?).
1026  */
1027 
ata_pio_modes(const struct ata_device * adev)1028 static unsigned int ata_pio_modes(const struct ata_device *adev)
1029 {
1030 	u16 modes;
1031 
1032 	/* Usual case. Word 53 indicates word 88 is valid */
1033 	if (adev->id[ATA_ID_FIELD_VALID] & (1 << 2)) {
1034 		modes = adev->id[ATA_ID_PIO_MODES] & 0x03;
1035 		modes <<= 3;
1036 		modes |= 0x7;
1037 		return modes;
1038 	}
1039 
1040 	/* If word 88 isn't valid then Word 51 holds the PIO timing number
1041 	   for the maximum. Turn it into a mask and return it */
1042 	modes = (2 << (adev->id[ATA_ID_OLD_PIO_MODES] & 0xFF)) - 1 ;
1043 	return modes;
1044 }
1045 
ata_qc_wait_err(struct ata_queued_cmd * qc,struct completion * wait)1046 static int ata_qc_wait_err(struct ata_queued_cmd *qc,
1047 			   struct completion *wait)
1048 {
1049 	int rc = 0;
1050 
1051 	wait_for_completion(wait);
1052 
1053 	return rc;
1054 }
1055 
1056 /**
1057  *	ata_dev_identify - obtain IDENTIFY x DEVICE page
1058  *	@ap: port on which device we wish to probe resides
1059  *	@device: device bus address, starting at zero
1060  *
1061  *	Following bus reset, we issue the IDENTIFY [PACKET] DEVICE
1062  *	command, and read back the 512-byte device information page.
1063  *	The device information page is fed to us via the standard
1064  *	PIO-IN protocol, but we hand-code it here. (TODO: investigate
1065  *	using standard PIO-IN paths)
1066  *
1067  *	After reading the device information page, we use several
1068  *	bits of information from it to initialize data structures
1069  *	that will be used during the lifetime of the ata_device.
1070  *	Other data from the info page is used to disqualify certain
1071  *	older ATA devices we do not wish to support.
1072  *
1073  *	LOCKING:
1074  *	Inherited from caller.  Some functions called by this function
1075  *	obtain the host_set lock.
1076  */
1077 
ata_dev_identify(struct ata_port * ap,unsigned int device)1078 static void ata_dev_identify(struct ata_port *ap, unsigned int device)
1079 {
1080 	struct ata_device *dev = &ap->device[device];
1081 	unsigned int major_version;
1082 	u16 tmp;
1083 	unsigned long xfer_modes;
1084 	unsigned int using_edd;
1085 	DECLARE_COMPLETION(wait);
1086 	struct ata_queued_cmd *qc;
1087 	unsigned long flags;
1088 	int rc;
1089 
1090 	if (!ata_dev_present(dev)) {
1091 		DPRINTK("ENTER/EXIT (host %u, dev %u) -- nodev\n",
1092 			ap->id, device);
1093 		return;
1094 	}
1095 
1096 	if (ap->flags & (ATA_FLAG_SRST | ATA_FLAG_SATA_RESET))
1097 		using_edd = 0;
1098 	else
1099 		using_edd = 1;
1100 
1101 	DPRINTK("ENTER, host %u, dev %u\n", ap->id, device);
1102 
1103 	assert (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ATAPI ||
1104 		dev->class == ATA_DEV_NONE);
1105 
1106 	ata_dev_select(ap, device, 1, 1); /* select device 0/1 */
1107 
1108 	qc = ata_qc_new_init(ap, dev);
1109 	BUG_ON(qc == NULL);
1110 
1111 	ata_sg_init_one(qc, dev->id, sizeof(dev->id));
1112 	qc->dma_dir = DMA_FROM_DEVICE;
1113 	qc->tf.protocol = ATA_PROT_PIO;
1114 	qc->nsect = 1;
1115 
1116 retry:
1117 	if (dev->class == ATA_DEV_ATA) {
1118 		qc->tf.command = ATA_CMD_ID_ATA;
1119 		DPRINTK("do ATA identify\n");
1120 	} else {
1121 		qc->tf.command = ATA_CMD_ID_ATAPI;
1122 		DPRINTK("do ATAPI identify\n");
1123 	}
1124 
1125 	qc->waiting = &wait;
1126 	qc->complete_fn = ata_qc_complete_noop;
1127 
1128 	spin_lock_irqsave(&ap->host_set->lock, flags);
1129 	rc = ata_qc_issue(qc);
1130 	spin_unlock_irqrestore(&ap->host_set->lock, flags);
1131 
1132 	if (rc)
1133 		goto err_out;
1134 	else
1135 		ata_qc_wait_err(qc, &wait);
1136 
1137 	spin_lock_irqsave(&ap->host_set->lock, flags);
1138 	ap->ops->tf_read(ap, &qc->tf);
1139 	spin_unlock_irqrestore(&ap->host_set->lock, flags);
1140 
1141 	if (qc->tf.command & ATA_ERR) {
1142 		/*
1143 		 * arg!  EDD works for all test cases, but seems to return
1144 		 * the ATA signature for some ATAPI devices.  Until the
1145 		 * reason for this is found and fixed, we fix up the mess
1146 		 * here.  If IDENTIFY DEVICE returns command aborted
1147 		 * (as ATAPI devices do), then we issue an
1148 		 * IDENTIFY PACKET DEVICE.
1149 		 *
1150 		 * ATA software reset (SRST, the default) does not appear
1151 		 * to have this problem.
1152 		 */
1153 		if ((using_edd) && (dev->class == ATA_DEV_ATA)) {
1154 			u8 err = qc->tf.feature;
1155 			if (err & ATA_ABORTED) {
1156 				dev->class = ATA_DEV_ATAPI;
1157 				qc->cursg = 0;
1158 				qc->cursg_ofs = 0;
1159 				qc->cursect = 0;
1160 				qc->nsect = 1;
1161 				goto retry;
1162 			}
1163 		}
1164 		goto err_out;
1165 	}
1166 
1167 	swap_buf_le16(dev->id, ATA_ID_WORDS);
1168 
1169 	/* print device capabilities */
1170 	printk(KERN_DEBUG "ata%u: dev %u cfg "
1171 	       "49:%04x 82:%04x 83:%04x 84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n",
1172 	       ap->id, device, dev->id[49],
1173 	       dev->id[82], dev->id[83], dev->id[84],
1174 	       dev->id[85], dev->id[86], dev->id[87],
1175 	       dev->id[88]);
1176 
1177 	/*
1178 	 * common ATA, ATAPI feature tests
1179 	 */
1180 
1181 	/* we require DMA support (bits 8 of word 49) */
1182 	if (!ata_id_has_dma(dev->id)) {
1183 		printk(KERN_DEBUG "ata%u: no dma\n", ap->id);
1184 		goto err_out_nosup;
1185 	}
1186 
1187 	/* quick-n-dirty find max transfer mode; for printk only */
1188 	xfer_modes = dev->id[ATA_ID_UDMA_MODES];
1189 	if (!xfer_modes)
1190 		xfer_modes = (dev->id[ATA_ID_MWDMA_MODES]) << ATA_SHIFT_MWDMA;
1191 	if (!xfer_modes)
1192 		xfer_modes = ata_pio_modes(dev);
1193 
1194 	ata_dump_id(dev);
1195 
1196 	/* ATA-specific feature tests */
1197 	if (dev->class == ATA_DEV_ATA) {
1198 		if (!ata_id_is_ata(dev->id))	/* sanity check */
1199 			goto err_out_nosup;
1200 
1201 		/* get major version */
1202 		tmp = dev->id[ATA_ID_MAJOR_VER];
1203 		for (major_version = 14; major_version >= 1; major_version--)
1204 			if (tmp & (1 << major_version))
1205 				break;
1206 
1207 		/*
1208 		 * The exact sequence expected by certain pre-ATA4 drives is:
1209 		 * SRST RESET
1210 		 * IDENTIFY
1211 		 * INITIALIZE DEVICE PARAMETERS
1212 		 * anything else..
1213 		 * Some drives were very specific about that exact sequence.
1214 		 */
1215 		if (major_version < 4 || (!ata_id_has_lba(dev->id))) {
1216 			ata_dev_init_params(ap, dev);
1217 
1218 			/* current CHS translation info (id[53-58]) might be
1219 			 * changed. reread the identify device info.
1220 			 */
1221 			ata_dev_reread_id(ap, dev);
1222 		}
1223 
1224 		if (ata_id_has_lba(dev->id)) {
1225 			dev->flags |= ATA_DFLAG_LBA;
1226 
1227 			if (ata_id_has_lba48(dev->id)) {
1228 				dev->flags |= ATA_DFLAG_LBA48;
1229 				dev->n_sectors = ata_id_u64(dev->id, 100);
1230 			} else {
1231 				dev->n_sectors = ata_id_u32(dev->id, 60);
1232 			}
1233 
1234 			/* print device info to dmesg */
1235 			printk(KERN_INFO "ata%u: dev %u ATA-%d, max %s, %Lu sectors:%s\n",
1236 			       ap->id, device,
1237 			       major_version,
1238 			       ata_mode_string(xfer_modes),
1239 			       (unsigned long long)dev->n_sectors,
1240 			       dev->flags & ATA_DFLAG_LBA48 ? " LBA48" : " LBA");
1241 		} else {
1242 			/* CHS */
1243 
1244 			/* Default translation */
1245 			dev->cylinders	= dev->id[1];
1246 			dev->heads	= dev->id[3];
1247 			dev->sectors	= dev->id[6];
1248 			dev->n_sectors	= dev->cylinders * dev->heads * dev->sectors;
1249 
1250 			if (ata_id_current_chs_valid(dev->id)) {
1251 				/* Current CHS translation is valid. */
1252 				dev->cylinders = dev->id[54];
1253 				dev->heads     = dev->id[55];
1254 				dev->sectors   = dev->id[56];
1255 
1256 				dev->n_sectors = ata_id_u32(dev->id, 57);
1257 			}
1258 
1259 			/* print device info to dmesg */
1260 			printk(KERN_INFO "ata%u: dev %u ATA-%d, max %s, %Lu sectors: CHS %d/%d/%d\n",
1261 			       ap->id, device,
1262 			       major_version,
1263 			       ata_mode_string(xfer_modes),
1264 			       (unsigned long long)dev->n_sectors,
1265 			       (int)dev->cylinders, (int)dev->heads, (int)dev->sectors);
1266 
1267 		}
1268 
1269 		ap->host->max_cmd_len = 16;
1270 	}
1271 
1272 	/* ATAPI-specific feature tests */
1273 	else if (dev->class == ATA_DEV_ATAPI) {
1274 		if (ata_id_is_ata(dev->id))		/* sanity check */
1275 			goto err_out_nosup;
1276 
1277 		rc = atapi_cdb_len(dev->id);
1278 		if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1279 			printk(KERN_WARNING "ata%u: unsupported CDB len\n", ap->id);
1280 			goto err_out_nosup;
1281 		}
1282 		ap->cdb_len = (unsigned int) rc;
1283 		ap->host->max_cmd_len = (unsigned char) ap->cdb_len;
1284 
1285 		/* print device info to dmesg */
1286 		printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n",
1287 		       ap->id, device,
1288 		       ata_mode_string(xfer_modes));
1289 	}
1290 
1291 	DPRINTK("EXIT, drv_stat = 0x%x\n", ata_chk_status(ap));
1292 	return;
1293 
1294 err_out_nosup:
1295 	printk(KERN_WARNING "ata%u: dev %u not supported, ignoring\n",
1296 	       ap->id, device);
1297 err_out:
1298 	dev->class++;	/* converts ATA_DEV_xxx into ATA_DEV_xxx_UNSUP */
1299 	DPRINTK("EXIT, err\n");
1300 }
1301 
1302 
ata_dev_knobble(const struct ata_port * ap)1303 static inline u8 ata_dev_knobble(const struct ata_port *ap)
1304 {
1305 	return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(ap->device->id)));
1306 }
1307 
1308 /**
1309  * 	ata_dev_config - Run device specific handlers and check for
1310  * 			 SATA->PATA bridges
1311  * 	@ap: Bus
1312  * 	@i:  Device
1313  *
1314  * 	LOCKING:
1315  */
1316 
ata_dev_config(struct ata_port * ap,unsigned int i)1317 void ata_dev_config(struct ata_port *ap, unsigned int i)
1318 {
1319 	/* limit bridge transfers to udma5, 200 sectors */
1320 	if (ata_dev_knobble(ap)) {
1321 		printk(KERN_INFO "ata%u(%u): applying bridge limits\n",
1322 			ap->id, ap->device->devno);
1323 		ap->udma_mask &= ATA_UDMA5;
1324 		ap->host->max_sectors = ATA_MAX_SECTORS;
1325 		ap->host->hostt->max_sectors = ATA_MAX_SECTORS;
1326 		ap->device->flags |= ATA_DFLAG_LOCK_SECTORS;
1327 	}
1328 
1329 	if (ap->ops->dev_config)
1330 		ap->ops->dev_config(ap, &ap->device[i]);
1331 }
1332 
1333 /**
1334  *	ata_bus_probe - Reset and probe ATA bus
1335  *	@ap: Bus to probe
1336  *
1337  *	Master ATA bus probing function.  Initiates a hardware-dependent
1338  *	bus reset, then attempts to identify any devices found on
1339  *	the bus.
1340  *
1341  *	LOCKING:
1342  *	PCI/etc. bus probe sem.
1343  *
1344  *	RETURNS:
1345  *	Zero on success, non-zero on error.
1346  */
1347 
ata_bus_probe(struct ata_port * ap)1348 static int ata_bus_probe(struct ata_port *ap)
1349 {
1350 	unsigned int i, found = 0;
1351 
1352 	ap->ops->phy_reset(ap);
1353 	if (ap->flags & ATA_FLAG_PORT_DISABLED)
1354 		goto err_out;
1355 
1356 	for (i = 0; i < ATA_MAX_DEVICES; i++) {
1357 		ata_dev_identify(ap, i);
1358 		if (ata_dev_present(&ap->device[i])) {
1359 			found = 1;
1360 			ata_dev_config(ap,i);
1361 		}
1362 	}
1363 
1364 	if ((!found) || (ap->flags & ATA_FLAG_PORT_DISABLED))
1365 		goto err_out_disable;
1366 
1367 	ata_set_mode(ap);
1368 	if (ap->flags & ATA_FLAG_PORT_DISABLED)
1369 		goto err_out_disable;
1370 
1371 	return 0;
1372 
1373 err_out_disable:
1374 	ap->ops->port_disable(ap);
1375 err_out:
1376 	return -1;
1377 }
1378 
1379 /**
1380  *	ata_port_probe - Mark port as enabled
1381  *	@ap: Port for which we indicate enablement
1382  *
1383  *	Modify @ap data structure such that the system
1384  *	thinks that the entire port is enabled.
1385  *
1386  *	LOCKING: host_set lock, or some other form of
1387  *	serialization.
1388  */
1389 
ata_port_probe(struct ata_port * ap)1390 void ata_port_probe(struct ata_port *ap)
1391 {
1392 	ap->flags &= ~ATA_FLAG_PORT_DISABLED;
1393 }
1394 
1395 /**
1396  *	__sata_phy_reset - Wake/reset a low-level SATA PHY
1397  *	@ap: SATA port associated with target SATA PHY.
1398  *
1399  *	This function issues commands to standard SATA Sxxx
1400  *	PHY registers, to wake up the phy (and device), and
1401  *	clear any reset condition.
1402  *
1403  *	LOCKING:
1404  *	PCI/etc. bus probe sem.
1405  *
1406  */
__sata_phy_reset(struct ata_port * ap)1407 void __sata_phy_reset(struct ata_port *ap)
1408 {
1409 	u32 sstatus;
1410 	unsigned long timeout = jiffies + (HZ * 5);
1411 
1412 	if (ap->flags & ATA_FLAG_SATA_RESET) {
1413 		/* issue phy wake/reset */
1414 		scr_write_flush(ap, SCR_CONTROL, 0x301);
1415 		/* Couldn't find anything in SATA I/II specs, but
1416 		 * AHCI-1.1 10.4.2 says at least 1 ms. */
1417 		mdelay(1);
1418 	}
1419 	scr_write_flush(ap, SCR_CONTROL, 0x300); /* phy wake/clear reset */
1420 
1421 	/* wait for phy to become ready, if necessary */
1422 	do {
1423 		msleep(200);
1424 		sstatus = scr_read(ap, SCR_STATUS);
1425 		if ((sstatus & 0xf) != 1)
1426 			break;
1427 	} while (time_before(jiffies, timeout));
1428 
1429 	/* TODO: phy layer with polling, timeouts, etc. */
1430 	if (sata_dev_present(ap))
1431 		ata_port_probe(ap);
1432 	else {
1433 		sstatus = scr_read(ap, SCR_STATUS);
1434 		printk(KERN_INFO "ata%u: no device found (phy stat %08x)\n",
1435 		       ap->id, sstatus);
1436 		ata_port_disable(ap);
1437 	}
1438 
1439 	if (ap->flags & ATA_FLAG_PORT_DISABLED)
1440 		return;
1441 
1442 	if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
1443 		ata_port_disable(ap);
1444 		return;
1445 	}
1446 
1447 	ap->cbl = ATA_CBL_SATA;
1448 }
1449 
1450 /**
1451  *	sata_phy_reset - Reset SATA bus.
1452  *	@ap: SATA port associated with target SATA PHY.
1453  *
1454  *	This function resets the SATA bus, and then probes
1455  *	the bus for devices.
1456  *
1457  *	LOCKING:
1458  *	PCI/etc. bus probe sem.
1459  *
1460  */
sata_phy_reset(struct ata_port * ap)1461 void sata_phy_reset(struct ata_port *ap)
1462 {
1463 	__sata_phy_reset(ap);
1464 	if (ap->flags & ATA_FLAG_PORT_DISABLED)
1465 		return;
1466 	ata_bus_reset(ap);
1467 }
1468 
1469 /**
1470  *	ata_port_disable - Disable port.
1471  *	@ap: Port to be disabled.
1472  *
1473  *	Modify @ap data structure such that the system
1474  *	thinks that the entire port is disabled, and should
1475  *	never attempt to probe or communicate with devices
1476  *	on this port.
1477  *
1478  *	LOCKING: host_set lock, or some other form of
1479  *	serialization.
1480  */
1481 
ata_port_disable(struct ata_port * ap)1482 void ata_port_disable(struct ata_port *ap)
1483 {
1484 	ap->device[0].class = ATA_DEV_NONE;
1485 	ap->device[1].class = ATA_DEV_NONE;
1486 	ap->flags |= ATA_FLAG_PORT_DISABLED;
1487 }
1488 
1489 /*
1490  * This mode timing computation functionality is ported over from
1491  * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
1492  */
1493 /*
1494  * PIO 0-5, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
1495  * These were taken from ATA/ATAPI-6 standard, rev 0a, except
1496  * for PIO 5, which is a nonstandard extension and UDMA6, which
1497  * is currently supported only by Maxtor drives.
1498  */
1499 
1500 static const struct ata_timing ata_timing[] = {
1501 
1502 	{ XFER_UDMA_6,     0,   0,   0,   0,   0,   0,   0,  15 },
1503 	{ XFER_UDMA_5,     0,   0,   0,   0,   0,   0,   0,  20 },
1504 	{ XFER_UDMA_4,     0,   0,   0,   0,   0,   0,   0,  30 },
1505 	{ XFER_UDMA_3,     0,   0,   0,   0,   0,   0,   0,  45 },
1506 
1507 	{ XFER_UDMA_2,     0,   0,   0,   0,   0,   0,   0,  60 },
1508 	{ XFER_UDMA_1,     0,   0,   0,   0,   0,   0,   0,  80 },
1509 	{ XFER_UDMA_0,     0,   0,   0,   0,   0,   0,   0, 120 },
1510 
1511 /*	{ XFER_UDMA_SLOW,  0,   0,   0,   0,   0,   0,   0, 150 }, */
1512 
1513 	{ XFER_MW_DMA_2,  25,   0,   0,   0,  70,  25, 120,   0 },
1514 	{ XFER_MW_DMA_1,  45,   0,   0,   0,  80,  50, 150,   0 },
1515 	{ XFER_MW_DMA_0,  60,   0,   0,   0, 215, 215, 480,   0 },
1516 
1517 	{ XFER_SW_DMA_2,  60,   0,   0,   0, 120, 120, 240,   0 },
1518 	{ XFER_SW_DMA_1,  90,   0,   0,   0, 240, 240, 480,   0 },
1519 	{ XFER_SW_DMA_0, 120,   0,   0,   0, 480, 480, 960,   0 },
1520 
1521 /*	{ XFER_PIO_5,     20,  50,  30, 100,  50,  30, 100,   0 }, */
1522 	{ XFER_PIO_4,     25,  70,  25, 120,  70,  25, 120,   0 },
1523 	{ XFER_PIO_3,     30,  80,  70, 180,  80,  70, 180,   0 },
1524 
1525 	{ XFER_PIO_2,     30, 290,  40, 330, 100,  90, 240,   0 },
1526 	{ XFER_PIO_1,     50, 290,  93, 383, 125, 100, 383,   0 },
1527 	{ XFER_PIO_0,     70, 290, 240, 600, 165, 150, 600,   0 },
1528 
1529 /*	{ XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960,   0 }, */
1530 
1531 	{ 0xFF }
1532 };
1533 
1534 #define ENOUGH(v,unit)		(((v)-1)/(unit)+1)
1535 #define EZ(v,unit)		((v)?ENOUGH(v,unit):0)
1536 
ata_timing_quantize(const struct ata_timing * t,struct ata_timing * q,int T,int UT)1537 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
1538 {
1539 	q->setup   = EZ(t->setup   * 1000,  T);
1540 	q->act8b   = EZ(t->act8b   * 1000,  T);
1541 	q->rec8b   = EZ(t->rec8b   * 1000,  T);
1542 	q->cyc8b   = EZ(t->cyc8b   * 1000,  T);
1543 	q->active  = EZ(t->active  * 1000,  T);
1544 	q->recover = EZ(t->recover * 1000,  T);
1545 	q->cycle   = EZ(t->cycle   * 1000,  T);
1546 	q->udma    = EZ(t->udma    * 1000, UT);
1547 }
1548 
ata_timing_merge(const struct ata_timing * a,const struct ata_timing * b,struct ata_timing * m,unsigned int what)1549 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
1550 		      struct ata_timing *m, unsigned int what)
1551 {
1552 	if (what & ATA_TIMING_SETUP  ) m->setup   = max(a->setup,   b->setup);
1553 	if (what & ATA_TIMING_ACT8B  ) m->act8b   = max(a->act8b,   b->act8b);
1554 	if (what & ATA_TIMING_REC8B  ) m->rec8b   = max(a->rec8b,   b->rec8b);
1555 	if (what & ATA_TIMING_CYC8B  ) m->cyc8b   = max(a->cyc8b,   b->cyc8b);
1556 	if (what & ATA_TIMING_ACTIVE ) m->active  = max(a->active,  b->active);
1557 	if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
1558 	if (what & ATA_TIMING_CYCLE  ) m->cycle   = max(a->cycle,   b->cycle);
1559 	if (what & ATA_TIMING_UDMA   ) m->udma    = max(a->udma,    b->udma);
1560 }
1561 
ata_timing_find_mode(unsigned short speed)1562 static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
1563 {
1564 	const struct ata_timing *t;
1565 
1566 	for (t = ata_timing; t->mode != speed; t++)
1567 		if (t->mode == 0xFF)
1568 			return NULL;
1569 	return t;
1570 }
1571 
ata_timing_compute(struct ata_device * adev,unsigned short speed,struct ata_timing * t,int T,int UT)1572 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
1573 		       struct ata_timing *t, int T, int UT)
1574 {
1575 	const struct ata_timing *s;
1576 	struct ata_timing p;
1577 
1578 	/*
1579 	 * Find the mode.
1580 	 */
1581 
1582 	if (!(s = ata_timing_find_mode(speed)))
1583 		return -EINVAL;
1584 
1585 	memcpy(t, s, sizeof(*s));
1586 
1587 	/*
1588 	 * If the drive is an EIDE drive, it can tell us it needs extended
1589 	 * PIO/MW_DMA cycle timing.
1590 	 */
1591 
1592 	if (adev->id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE drive */
1593 		memset(&p, 0, sizeof(p));
1594 		if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
1595 			if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
1596 					    else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
1597 		} else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
1598 			p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
1599 		}
1600 		ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
1601 	}
1602 
1603 	/*
1604 	 * Convert the timing to bus clock counts.
1605 	 */
1606 
1607 	ata_timing_quantize(t, t, T, UT);
1608 
1609 	/*
1610 	 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY, S.M.A.R.T
1611 	 * and some other commands. We have to ensure that the DMA cycle timing is
1612 	 * slower/equal than the fastest PIO timing.
1613 	 */
1614 
1615 	if (speed > XFER_PIO_4) {
1616 		ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
1617 		ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
1618 	}
1619 
1620 	/*
1621 	 * Lenghten active & recovery time so that cycle time is correct.
1622 	 */
1623 
1624 	if (t->act8b + t->rec8b < t->cyc8b) {
1625 		t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
1626 		t->rec8b = t->cyc8b - t->act8b;
1627 	}
1628 
1629 	if (t->active + t->recover < t->cycle) {
1630 		t->active += (t->cycle - (t->active + t->recover)) / 2;
1631 		t->recover = t->cycle - t->active;
1632 	}
1633 
1634 	return 0;
1635 }
1636 
1637 static const struct {
1638 	unsigned int shift;
1639 	u8 base;
1640 } xfer_mode_classes[] = {
1641 	{ ATA_SHIFT_UDMA,	XFER_UDMA_0 },
1642 	{ ATA_SHIFT_MWDMA,	XFER_MW_DMA_0 },
1643 	{ ATA_SHIFT_PIO,	XFER_PIO_0 },
1644 };
1645 
base_from_shift(unsigned int shift)1646 static inline u8 base_from_shift(unsigned int shift)
1647 {
1648 	int i;
1649 
1650 	for (i = 0; i < ARRAY_SIZE(xfer_mode_classes); i++)
1651 		if (xfer_mode_classes[i].shift == shift)
1652 			return xfer_mode_classes[i].base;
1653 
1654 	return 0xff;
1655 }
1656 
ata_dev_set_mode(struct ata_port * ap,struct ata_device * dev)1657 static void ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev)
1658 {
1659 	int ofs, idx;
1660 	u8 base;
1661 
1662 	if (!ata_dev_present(dev) || (ap->flags & ATA_FLAG_PORT_DISABLED))
1663 		return;
1664 
1665 	if (dev->xfer_shift == ATA_SHIFT_PIO)
1666 		dev->flags |= ATA_DFLAG_PIO;
1667 
1668 	ata_dev_set_xfermode(ap, dev);
1669 
1670 	base = base_from_shift(dev->xfer_shift);
1671 	ofs = dev->xfer_mode - base;
1672 	idx = ofs + dev->xfer_shift;
1673 	WARN_ON(idx >= ARRAY_SIZE(xfer_mode_str));
1674 
1675 	DPRINTK("idx=%d xfer_shift=%u, xfer_mode=0x%x, base=0x%x, offset=%d\n",
1676 		idx, dev->xfer_shift, (int)dev->xfer_mode, (int)base, ofs);
1677 
1678 	printk(KERN_INFO "ata%u: dev %u configured for %s\n",
1679 		ap->id, dev->devno, xfer_mode_str[idx]);
1680 }
1681 
ata_host_set_pio(struct ata_port * ap)1682 static int ata_host_set_pio(struct ata_port *ap)
1683 {
1684 	unsigned int mask;
1685 	int x, i;
1686 	u8 base, xfer_mode;
1687 
1688 	mask = ata_get_mode_mask(ap, ATA_SHIFT_PIO);
1689 	x = fgb(mask);
1690 	if (x < 0) {
1691 		printk(KERN_WARNING "ata%u: no PIO support\n", ap->id);
1692 		return -1;
1693 	}
1694 
1695 	base = base_from_shift(ATA_SHIFT_PIO);
1696 	xfer_mode = base + x;
1697 
1698 	DPRINTK("base 0x%x xfer_mode 0x%x mask 0x%x x %d\n",
1699 		(int)base, (int)xfer_mode, mask, x);
1700 
1701 	for (i = 0; i < ATA_MAX_DEVICES; i++) {
1702 		struct ata_device *dev = &ap->device[i];
1703 		if (ata_dev_present(dev)) {
1704 			dev->pio_mode = xfer_mode;
1705 			dev->xfer_mode = xfer_mode;
1706 			dev->xfer_shift = ATA_SHIFT_PIO;
1707 			if (ap->ops->set_piomode)
1708 				ap->ops->set_piomode(ap, dev);
1709 		}
1710 	}
1711 
1712 	return 0;
1713 }
1714 
ata_host_set_dma(struct ata_port * ap,u8 xfer_mode,unsigned int xfer_shift)1715 static void ata_host_set_dma(struct ata_port *ap, u8 xfer_mode,
1716 			    unsigned int xfer_shift)
1717 {
1718 	int i;
1719 
1720 	for (i = 0; i < ATA_MAX_DEVICES; i++) {
1721 		struct ata_device *dev = &ap->device[i];
1722 		if (ata_dev_present(dev)) {
1723 			dev->dma_mode = xfer_mode;
1724 			dev->xfer_mode = xfer_mode;
1725 			dev->xfer_shift = xfer_shift;
1726 			if (ap->ops->set_dmamode)
1727 				ap->ops->set_dmamode(ap, dev);
1728 		}
1729 	}
1730 }
1731 
1732 /**
1733  *	ata_set_mode - Program timings and issue SET FEATURES - XFER
1734  *	@ap: port on which timings will be programmed
1735  *
1736  *	Set ATA device disk transfer mode (PIO3, UDMA6, etc.).
1737  *
1738  *	LOCKING:
1739  *	PCI/etc. bus probe sem.
1740  *
1741  */
ata_set_mode(struct ata_port * ap)1742 static void ata_set_mode(struct ata_port *ap)
1743 {
1744 	unsigned int xfer_shift;
1745 	u8 xfer_mode;
1746 	int rc;
1747 
1748 	/* step 1: always set host PIO timings */
1749 	rc = ata_host_set_pio(ap);
1750 	if (rc)
1751 		goto err_out;
1752 
1753 	/* step 2: choose the best data xfer mode */
1754 	xfer_mode = xfer_shift = 0;
1755 	rc = ata_choose_xfer_mode(ap, &xfer_mode, &xfer_shift);
1756 	if (rc)
1757 		goto err_out;
1758 
1759 	/* step 3: if that xfer mode isn't PIO, set host DMA timings */
1760 	if (xfer_shift != ATA_SHIFT_PIO)
1761 		ata_host_set_dma(ap, xfer_mode, xfer_shift);
1762 
1763 	/* step 4: update devices' xfer mode */
1764 	ata_dev_set_mode(ap, &ap->device[0]);
1765 	ata_dev_set_mode(ap, &ap->device[1]);
1766 
1767 	if (ap->flags & ATA_FLAG_PORT_DISABLED)
1768 		return;
1769 
1770 	if (ap->ops->post_set_mode)
1771 		ap->ops->post_set_mode(ap);
1772 
1773 	return;
1774 
1775 err_out:
1776 	ata_port_disable(ap);
1777 }
1778 
1779 /**
1780  *	ata_busy_sleep - sleep until BSY clears, or timeout
1781  *	@ap: port containing status register to be polled
1782  *	@tmout_pat: impatience timeout
1783  *	@tmout: overall timeout
1784  *
1785  *	Sleep until ATA Status register bit BSY clears,
1786  *	or a timeout occurs.
1787  *
1788  *	LOCKING: None.
1789  *
1790  */
1791 
ata_busy_sleep(struct ata_port * ap,unsigned long tmout_pat,unsigned long tmout)1792 static unsigned int ata_busy_sleep (struct ata_port *ap,
1793 				    unsigned long tmout_pat,
1794 			    	    unsigned long tmout)
1795 {
1796 	unsigned long timer_start, timeout;
1797 	u8 status;
1798 
1799 	status = ata_busy_wait(ap, ATA_BUSY, 300);
1800 	timer_start = jiffies;
1801 	timeout = timer_start + tmout_pat;
1802 	while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
1803 		msleep(50);
1804 		status = ata_busy_wait(ap, ATA_BUSY, 3);
1805 	}
1806 
1807 	if (status & ATA_BUSY)
1808 		printk(KERN_WARNING "ata%u is slow to respond, "
1809 		       "please be patient\n", ap->id);
1810 
1811 	timeout = timer_start + tmout;
1812 	while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
1813 		msleep(50);
1814 		status = ata_chk_status(ap);
1815 	}
1816 
1817 	if (status & ATA_BUSY) {
1818 		printk(KERN_ERR "ata%u failed to respond (%lu secs)\n",
1819 		       ap->id, tmout / HZ);
1820 		return 1;
1821 	}
1822 
1823 	return 0;
1824 }
1825 
ata_bus_post_reset(struct ata_port * ap,unsigned int devmask)1826 static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
1827 {
1828 	struct ata_ioports *ioaddr = &ap->ioaddr;
1829 	unsigned int dev0 = devmask & (1 << 0);
1830 	unsigned int dev1 = devmask & (1 << 1);
1831 	unsigned long timeout;
1832 
1833 	/* if device 0 was found in ata_devchk, wait for its
1834 	 * BSY bit to clear
1835 	 */
1836 	if (dev0)
1837 		ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1838 
1839 	/* if device 1 was found in ata_devchk, wait for
1840 	 * register access, then wait for BSY to clear
1841 	 */
1842 	timeout = jiffies + ATA_TMOUT_BOOT;
1843 	while (dev1) {
1844 		u8 nsect, lbal;
1845 
1846 		ap->ops->dev_select(ap, 1);
1847 		if (ap->flags & ATA_FLAG_MMIO) {
1848 			nsect = readb((void __iomem *) ioaddr->nsect_addr);
1849 			lbal = readb((void __iomem *) ioaddr->lbal_addr);
1850 		} else {
1851 			nsect = inb(ioaddr->nsect_addr);
1852 			lbal = inb(ioaddr->lbal_addr);
1853 		}
1854 		if ((nsect == 1) && (lbal == 1))
1855 			break;
1856 		if (time_after(jiffies, timeout)) {
1857 			dev1 = 0;
1858 			break;
1859 		}
1860 		msleep(50);	/* give drive a breather */
1861 	}
1862 	if (dev1)
1863 		ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1864 
1865 	/* is all this really necessary? */
1866 	ap->ops->dev_select(ap, 0);
1867 	if (dev1)
1868 		ap->ops->dev_select(ap, 1);
1869 	if (dev0)
1870 		ap->ops->dev_select(ap, 0);
1871 }
1872 
1873 /**
1874  *	ata_bus_edd - Issue EXECUTE DEVICE DIAGNOSTIC command.
1875  *	@ap: Port to reset and probe
1876  *
1877  *	Use the EXECUTE DEVICE DIAGNOSTIC command to reset and
1878  *	probe the bus.  Not often used these days.
1879  *
1880  *	LOCKING:
1881  *	PCI/etc. bus probe sem.
1882  *	Obtains host_set lock.
1883  *
1884  */
1885 
ata_bus_edd(struct ata_port * ap)1886 static unsigned int ata_bus_edd(struct ata_port *ap)
1887 {
1888 	struct ata_taskfile tf;
1889 	unsigned long flags;
1890 
1891 	/* set up execute-device-diag (bus reset) taskfile */
1892 	/* also, take interrupts to a known state (disabled) */
1893 	DPRINTK("execute-device-diag\n");
1894 	ata_tf_init(ap, &tf, 0);
1895 	tf.ctl |= ATA_NIEN;
1896 	tf.command = ATA_CMD_EDD;
1897 	tf.protocol = ATA_PROT_NODATA;
1898 
1899 	/* do bus reset */
1900 	spin_lock_irqsave(&ap->host_set->lock, flags);
1901 	ata_tf_to_host(ap, &tf);
1902 	spin_unlock_irqrestore(&ap->host_set->lock, flags);
1903 
1904 	/* spec says at least 2ms.  but who knows with those
1905 	 * crazy ATAPI devices...
1906 	 */
1907 	msleep(150);
1908 
1909 	return ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1910 }
1911 
ata_bus_softreset(struct ata_port * ap,unsigned int devmask)1912 static unsigned int ata_bus_softreset(struct ata_port *ap,
1913 				      unsigned int devmask)
1914 {
1915 	struct ata_ioports *ioaddr = &ap->ioaddr;
1916 
1917 	DPRINTK("ata%u: bus reset via SRST\n", ap->id);
1918 
1919 	/* software reset.  causes dev0 to be selected */
1920 	if (ap->flags & ATA_FLAG_MMIO) {
1921 		writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1922 		udelay(20);	/* FIXME: flush */
1923 		writeb(ap->ctl | ATA_SRST, (void __iomem *) ioaddr->ctl_addr);
1924 		udelay(20);	/* FIXME: flush */
1925 		writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1926 	} else {
1927 		outb(ap->ctl, ioaddr->ctl_addr);
1928 		udelay(10);
1929 		outb(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
1930 		udelay(10);
1931 		outb(ap->ctl, ioaddr->ctl_addr);
1932 	}
1933 
1934 	/* spec mandates ">= 2ms" before checking status.
1935 	 * We wait 150ms, because that was the magic delay used for
1936 	 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
1937 	 * between when the ATA command register is written, and then
1938 	 * status is checked.  Because waiting for "a while" before
1939 	 * checking status is fine, post SRST, we perform this magic
1940 	 * delay here as well.
1941 	 */
1942 	msleep(150);
1943 
1944 	ata_bus_post_reset(ap, devmask);
1945 
1946 	return 0;
1947 }
1948 
1949 /**
1950  *	ata_bus_reset - reset host port and associated ATA channel
1951  *	@ap: port to reset
1952  *
1953  *	This is typically the first time we actually start issuing
1954  *	commands to the ATA channel.  We wait for BSY to clear, then
1955  *	issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
1956  *	result.  Determine what devices, if any, are on the channel
1957  *	by looking at the device 0/1 error register.  Look at the signature
1958  *	stored in each device's taskfile registers, to determine if
1959  *	the device is ATA or ATAPI.
1960  *
1961  *	LOCKING:
1962  *	PCI/etc. bus probe sem.
1963  *	Obtains host_set lock.
1964  *
1965  *	SIDE EFFECTS:
1966  *	Sets ATA_FLAG_PORT_DISABLED if bus reset fails.
1967  */
1968 
ata_bus_reset(struct ata_port * ap)1969 void ata_bus_reset(struct ata_port *ap)
1970 {
1971 	struct ata_ioports *ioaddr = &ap->ioaddr;
1972 	unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
1973 	u8 err;
1974 	unsigned int dev0, dev1 = 0, rc = 0, devmask = 0;
1975 
1976 	DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no);
1977 
1978 	/* determine if device 0/1 are present */
1979 	if (ap->flags & ATA_FLAG_SATA_RESET)
1980 		dev0 = 1;
1981 	else {
1982 		dev0 = ata_devchk(ap, 0);
1983 		if (slave_possible)
1984 			dev1 = ata_devchk(ap, 1);
1985 	}
1986 
1987 	if (dev0)
1988 		devmask |= (1 << 0);
1989 	if (dev1)
1990 		devmask |= (1 << 1);
1991 
1992 	/* select device 0 again */
1993 	ap->ops->dev_select(ap, 0);
1994 
1995 	/* issue bus reset */
1996 	if (ap->flags & ATA_FLAG_SRST)
1997 		rc = ata_bus_softreset(ap, devmask);
1998 	else if ((ap->flags & ATA_FLAG_SATA_RESET) == 0) {
1999 		/* set up device control */
2000 		if (ap->flags & ATA_FLAG_MMIO)
2001 			writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2002 		else
2003 			outb(ap->ctl, ioaddr->ctl_addr);
2004 		rc = ata_bus_edd(ap);
2005 	}
2006 
2007 	if (rc)
2008 		goto err_out;
2009 
2010 	/*
2011 	 * determine by signature whether we have ATA or ATAPI devices
2012 	 */
2013 	err = ata_dev_try_classify(ap, 0);
2014 	if ((slave_possible) && (err != 0x81))
2015 		ata_dev_try_classify(ap, 1);
2016 
2017 	/* re-enable interrupts */
2018 	if (ap->ioaddr.ctl_addr)	/* FIXME: hack. create a hook instead */
2019 		ata_irq_on(ap);
2020 
2021 	/* is double-select really necessary? */
2022 	if (ap->device[1].class != ATA_DEV_NONE)
2023 		ap->ops->dev_select(ap, 1);
2024 	if (ap->device[0].class != ATA_DEV_NONE)
2025 		ap->ops->dev_select(ap, 0);
2026 
2027 	/* if no devices were detected, disable this port */
2028 	if ((ap->device[0].class == ATA_DEV_NONE) &&
2029 	    (ap->device[1].class == ATA_DEV_NONE))
2030 		goto err_out;
2031 
2032 	if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
2033 		/* set up device control for ATA_FLAG_SATA_RESET */
2034 		if (ap->flags & ATA_FLAG_MMIO)
2035 			writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2036 		else
2037 			outb(ap->ctl, ioaddr->ctl_addr);
2038 	}
2039 
2040 	DPRINTK("EXIT\n");
2041 	return;
2042 
2043 err_out:
2044 	printk(KERN_ERR "ata%u: disabling port\n", ap->id);
2045 	ap->ops->port_disable(ap);
2046 
2047 	DPRINTK("EXIT\n");
2048 }
2049 
ata_pr_blacklisted(const struct ata_port * ap,const struct ata_device * dev)2050 static void ata_pr_blacklisted(const struct ata_port *ap,
2051 			       const struct ata_device *dev)
2052 {
2053 	printk(KERN_WARNING "ata%u: dev %u is on DMA blacklist, disabling DMA\n",
2054 		ap->id, dev->devno);
2055 }
2056 
2057 static const char * ata_dma_blacklist [] = {
2058 	"WDC AC11000H",
2059 	"WDC AC22100H",
2060 	"WDC AC32500H",
2061 	"WDC AC33100H",
2062 	"WDC AC31600H",
2063 	"WDC AC32100H",
2064 	"WDC AC23200L",
2065 	"Compaq CRD-8241B",
2066 	"CRD-8400B",
2067 	"CRD-8480B",
2068 	"CRD-8482B",
2069  	"CRD-84",
2070 	"SanDisk SDP3B",
2071 	"SanDisk SDP3B-64",
2072 	"SANYO CD-ROM CRD",
2073 	"HITACHI CDR-8",
2074 	"HITACHI CDR-8335",
2075 	"HITACHI CDR-8435",
2076 	"Toshiba CD-ROM XM-6202B",
2077 	"TOSHIBA CD-ROM XM-1702BC",
2078 	"CD-532E-A",
2079 	"E-IDE CD-ROM CR-840",
2080 	"CD-ROM Drive/F5A",
2081 	"WPI CDD-820",
2082 	"SAMSUNG CD-ROM SC-148C",
2083 	"SAMSUNG CD-ROM SC",
2084 	"SanDisk SDP3B-64",
2085 	"ATAPI CD-ROM DRIVE 40X MAXIMUM",
2086 	"_NEC DV5800A",
2087 };
2088 
ata_dma_blacklisted(const struct ata_device * dev)2089 static int ata_dma_blacklisted(const struct ata_device *dev)
2090 {
2091 	unsigned char model_num[40];
2092 	char *s;
2093 	unsigned int len;
2094 	int i;
2095 
2096 	ata_dev_id_string(dev->id, model_num, ATA_ID_PROD_OFS,
2097 			  sizeof(model_num));
2098 	s = &model_num[0];
2099 	len = strnlen(s, sizeof(model_num));
2100 
2101 	/* ATAPI specifies that empty space is blank-filled; remove blanks */
2102 	while ((len > 0) && (s[len - 1] == ' ')) {
2103 		len--;
2104 		s[len] = 0;
2105 	}
2106 
2107 	for (i = 0; i < ARRAY_SIZE(ata_dma_blacklist); i++)
2108 		if (!strncmp(ata_dma_blacklist[i], s, len))
2109 			return 1;
2110 
2111 	return 0;
2112 }
2113 
ata_get_mode_mask(const struct ata_port * ap,int shift)2114 static unsigned int ata_get_mode_mask(const struct ata_port *ap, int shift)
2115 {
2116 	const struct ata_device *master, *slave;
2117 	unsigned int mask;
2118 
2119 	master = &ap->device[0];
2120 	slave = &ap->device[1];
2121 
2122 	assert (ata_dev_present(master) || ata_dev_present(slave));
2123 
2124 	if (shift == ATA_SHIFT_UDMA) {
2125 		mask = ap->udma_mask;
2126 		if (ata_dev_present(master)) {
2127 			mask &= (master->id[ATA_ID_UDMA_MODES] & 0xff);
2128 			if (ata_dma_blacklisted(master)) {
2129 				mask = 0;
2130 				ata_pr_blacklisted(ap, master);
2131 			}
2132 		}
2133 		if (ata_dev_present(slave)) {
2134 			mask &= (slave->id[ATA_ID_UDMA_MODES] & 0xff);
2135 			if (ata_dma_blacklisted(slave)) {
2136 				mask = 0;
2137 				ata_pr_blacklisted(ap, slave);
2138 			}
2139 		}
2140 	}
2141 	else if (shift == ATA_SHIFT_MWDMA) {
2142 		mask = ap->mwdma_mask;
2143 		if (ata_dev_present(master)) {
2144 			mask &= (master->id[ATA_ID_MWDMA_MODES] & 0x07);
2145 			if (ata_dma_blacklisted(master)) {
2146 				mask = 0;
2147 				ata_pr_blacklisted(ap, master);
2148 			}
2149 		}
2150 		if (ata_dev_present(slave)) {
2151 			mask &= (slave->id[ATA_ID_MWDMA_MODES] & 0x07);
2152 			if (ata_dma_blacklisted(slave)) {
2153 				mask = 0;
2154 				ata_pr_blacklisted(ap, slave);
2155 			}
2156 		}
2157 	}
2158 	else if (shift == ATA_SHIFT_PIO) {
2159 		mask = ap->pio_mask;
2160 		if (ata_dev_present(master)) {
2161 			/* spec doesn't return explicit support for
2162 			 * PIO0-2, so we fake it
2163 			 */
2164 			u16 tmp_mode = master->id[ATA_ID_PIO_MODES] & 0x03;
2165 			tmp_mode <<= 3;
2166 			tmp_mode |= 0x7;
2167 			mask &= tmp_mode;
2168 		}
2169 		if (ata_dev_present(slave)) {
2170 			/* spec doesn't return explicit support for
2171 			 * PIO0-2, so we fake it
2172 			 */
2173 			u16 tmp_mode = slave->id[ATA_ID_PIO_MODES] & 0x03;
2174 			tmp_mode <<= 3;
2175 			tmp_mode |= 0x7;
2176 			mask &= tmp_mode;
2177 		}
2178 	}
2179 	else {
2180 		mask = 0xffffffff; /* shut up compiler warning */
2181 		BUG();
2182 	}
2183 
2184 	return mask;
2185 }
2186 
2187 /* find greatest bit */
fgb(u32 bitmap)2188 static int fgb(u32 bitmap)
2189 {
2190 	unsigned int i;
2191 	int x = -1;
2192 
2193 	for (i = 0; i < 32; i++)
2194 		if (bitmap & (1 << i))
2195 			x = i;
2196 
2197 	return x;
2198 }
2199 
2200 /**
2201  *	ata_choose_xfer_mode - attempt to find best transfer mode
2202  *	@ap: Port for which an xfer mode will be selected
2203  *	@xfer_mode_out: (output) SET FEATURES - XFER MODE code
2204  *	@xfer_shift_out: (output) bit shift that selects this mode
2205  *
2206  *	Based on host and device capabilities, determine the
2207  *	maximum transfer mode that is amenable to all.
2208  *
2209  *	LOCKING:
2210  *	PCI/etc. bus probe sem.
2211  *
2212  *	RETURNS:
2213  *	Zero on success, negative on error.
2214  */
2215 
ata_choose_xfer_mode(const struct ata_port * ap,u8 * xfer_mode_out,unsigned int * xfer_shift_out)2216 static int ata_choose_xfer_mode(const struct ata_port *ap,
2217 				u8 *xfer_mode_out,
2218 				unsigned int *xfer_shift_out)
2219 {
2220 	unsigned int mask, shift;
2221 	int x, i;
2222 
2223 	for (i = 0; i < ARRAY_SIZE(xfer_mode_classes); i++) {
2224 		shift = xfer_mode_classes[i].shift;
2225 		mask = ata_get_mode_mask(ap, shift);
2226 
2227 		x = fgb(mask);
2228 		if (x >= 0) {
2229 			*xfer_mode_out = xfer_mode_classes[i].base + x;
2230 			*xfer_shift_out = shift;
2231 			return 0;
2232 		}
2233 	}
2234 
2235 	return -1;
2236 }
2237 
2238 /**
2239  *	ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
2240  *	@ap: Port associated with device @dev
2241  *	@dev: Device to which command will be sent
2242  *
2243  *	Issue SET FEATURES - XFER MODE command to device @dev
2244  *	on port @ap.
2245  *
2246  *	LOCKING:
2247  *	PCI/etc. bus probe sem.
2248  */
2249 
ata_dev_set_xfermode(struct ata_port * ap,struct ata_device * dev)2250 static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev)
2251 {
2252 	DECLARE_COMPLETION(wait);
2253 	struct ata_queued_cmd *qc;
2254 	int rc;
2255 	unsigned long flags;
2256 
2257 	/* set up set-features taskfile */
2258 	DPRINTK("set features - xfer mode\n");
2259 
2260 	qc = ata_qc_new_init(ap, dev);
2261 	BUG_ON(qc == NULL);
2262 
2263 	qc->tf.command = ATA_CMD_SET_FEATURES;
2264 	qc->tf.feature = SETFEATURES_XFER;
2265 	qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2266 	qc->tf.protocol = ATA_PROT_NODATA;
2267 	qc->tf.nsect = dev->xfer_mode;
2268 
2269 	qc->waiting = &wait;
2270 	qc->complete_fn = ata_qc_complete_noop;
2271 
2272 	spin_lock_irqsave(&ap->host_set->lock, flags);
2273 	rc = ata_qc_issue(qc);
2274 	spin_unlock_irqrestore(&ap->host_set->lock, flags);
2275 
2276 	if (rc)
2277 		ata_port_disable(ap);
2278 	else
2279 		ata_qc_wait_err(qc, &wait);
2280 
2281 	DPRINTK("EXIT\n");
2282 }
2283 
2284 /**
2285  *	ata_dev_reread_id - Reread the device identify device info
2286  *	@ap: port where the device is
2287  *	@dev: device to reread the identify device info
2288  *
2289  *	LOCKING:
2290  */
2291 
ata_dev_reread_id(struct ata_port * ap,struct ata_device * dev)2292 static void ata_dev_reread_id(struct ata_port *ap, struct ata_device *dev)
2293 {
2294 	DECLARE_COMPLETION(wait);
2295 	struct ata_queued_cmd *qc;
2296 	unsigned long flags;
2297 	int rc;
2298 
2299 	qc = ata_qc_new_init(ap, dev);
2300 	BUG_ON(qc == NULL);
2301 
2302 	ata_sg_init_one(qc, dev->id, sizeof(dev->id));
2303 	qc->dma_dir = DMA_FROM_DEVICE;
2304 
2305 	if (dev->class == ATA_DEV_ATA) {
2306 		qc->tf.command = ATA_CMD_ID_ATA;
2307 		DPRINTK("do ATA identify\n");
2308 	} else {
2309 		qc->tf.command = ATA_CMD_ID_ATAPI;
2310 		DPRINTK("do ATAPI identify\n");
2311 	}
2312 
2313 	qc->tf.flags |= ATA_TFLAG_DEVICE;
2314 	qc->tf.protocol = ATA_PROT_PIO;
2315 	qc->nsect = 1;
2316 
2317 	qc->waiting = &wait;
2318 	qc->complete_fn = ata_qc_complete_noop;
2319 
2320 	spin_lock_irqsave(&ap->host_set->lock, flags);
2321 	rc = ata_qc_issue(qc);
2322 	spin_unlock_irqrestore(&ap->host_set->lock, flags);
2323 
2324 	if (rc)
2325 		goto err_out;
2326 
2327 	ata_qc_wait_err(qc, &wait);
2328 
2329 	swap_buf_le16(dev->id, ATA_ID_WORDS);
2330 
2331 	ata_dump_id(dev);
2332 
2333 	DPRINTK("EXIT\n");
2334 
2335 	return;
2336 err_out:
2337 	ata_port_disable(ap);
2338 }
2339 
2340 /**
2341  *	ata_dev_init_params - Issue INIT DEV PARAMS command
2342  *	@ap: Port associated with device @dev
2343  *	@dev: Device to which command will be sent
2344  *
2345  *	LOCKING:
2346  */
2347 
ata_dev_init_params(struct ata_port * ap,struct ata_device * dev)2348 static void ata_dev_init_params(struct ata_port *ap, struct ata_device *dev)
2349 {
2350 	DECLARE_COMPLETION(wait);
2351 	struct ata_queued_cmd *qc;
2352 	int rc;
2353 	unsigned long flags;
2354 	u16 sectors = dev->id[6];
2355 	u16 heads   = dev->id[3];
2356 
2357 	/* Number of sectors per track 1-255. Number of heads 1-16 */
2358 	if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
2359 		return;
2360 
2361 	/* set up init dev params taskfile */
2362 	DPRINTK("init dev params \n");
2363 
2364 	qc = ata_qc_new_init(ap, dev);
2365 	BUG_ON(qc == NULL);
2366 
2367 	qc->tf.command = ATA_CMD_INIT_DEV_PARAMS;
2368 	qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2369 	qc->tf.protocol = ATA_PROT_NODATA;
2370 	qc->tf.nsect = sectors;
2371 	qc->tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
2372 
2373 	qc->waiting = &wait;
2374 	qc->complete_fn = ata_qc_complete_noop;
2375 
2376 	spin_lock_irqsave(&ap->host_set->lock, flags);
2377 	rc = ata_qc_issue(qc);
2378 	spin_unlock_irqrestore(&ap->host_set->lock, flags);
2379 
2380 	if (rc)
2381 		ata_port_disable(ap);
2382 	else
2383 		ata_qc_wait_err(qc, &wait);
2384 
2385 	DPRINTK("EXIT\n");
2386 }
2387 
2388 /**
2389  *	ata_sg_clean - Unmap DMA memory associated with command
2390  *	@qc: Command containing DMA memory to be released
2391  *
2392  *	Unmap all mapped DMA memory associated with this command.
2393  *
2394  *	LOCKING:
2395  *	spin_lock_irqsave(host_set lock)
2396  */
2397 
ata_sg_clean(struct ata_queued_cmd * qc)2398 static void ata_sg_clean(struct ata_queued_cmd *qc)
2399 {
2400 	struct ata_port *ap = qc->ap;
2401 	struct scatterlist *sg = qc->__sg;
2402 	int dir = qc->dma_dir;
2403 	void *pad_buf = NULL;
2404 
2405 	assert(qc->flags & ATA_QCFLAG_DMAMAP);
2406 	assert(sg != NULL);
2407 
2408 	if (qc->flags & ATA_QCFLAG_SINGLE)
2409 		assert(qc->n_elem == 1);
2410 
2411 	VPRINTK("unmapping %u sg elements\n", qc->n_elem);
2412 
2413 	/* if we padded the buffer out to 32-bit bound, and data
2414 	 * xfer direction is from-device, we must copy from the
2415 	 * pad buffer back into the supplied buffer
2416 	 */
2417 	if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
2418 		pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
2419 
2420 	if (qc->flags & ATA_QCFLAG_SG) {
2421 		if (qc->n_elem)
2422 			dma_unmap_sg(ap->host_set->dev, sg, qc->n_elem, dir);
2423 		/* restore last sg */
2424 		sg[qc->orig_n_elem - 1].length += qc->pad_len;
2425 		if (pad_buf) {
2426 			struct scatterlist *psg = &qc->pad_sgent;
2427 			void *addr = kmap_atomic(psg->page, KM_IRQ0);
2428 			memcpy(addr + psg->offset, pad_buf, qc->pad_len);
2429 			kunmap_atomic(psg->page, KM_IRQ0);
2430 		}
2431 	} else {
2432 		if (sg_dma_len(&sg[0]) > 0)
2433 			dma_unmap_single(ap->host_set->dev,
2434 				sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
2435 				dir);
2436 		/* restore sg */
2437 		sg->length += qc->pad_len;
2438 		if (pad_buf)
2439 			memcpy(qc->buf_virt + sg->length - qc->pad_len,
2440 			       pad_buf, qc->pad_len);
2441 	}
2442 
2443 	qc->flags &= ~ATA_QCFLAG_DMAMAP;
2444 	qc->__sg = NULL;
2445 }
2446 
2447 /**
2448  *	ata_fill_sg - Fill PCI IDE PRD table
2449  *	@qc: Metadata associated with taskfile to be transferred
2450  *
2451  *	Fill PCI IDE PRD (scatter-gather) table with segments
2452  *	associated with the current disk command.
2453  *
2454  *	LOCKING:
2455  *	spin_lock_irqsave(host_set lock)
2456  *
2457  */
ata_fill_sg(struct ata_queued_cmd * qc)2458 static void ata_fill_sg(struct ata_queued_cmd *qc)
2459 {
2460 	struct ata_port *ap = qc->ap;
2461 	struct scatterlist *sg;
2462 	unsigned int idx;
2463 
2464 	assert(qc->__sg != NULL);
2465 	assert(qc->n_elem > 0);
2466 
2467 	idx = 0;
2468 	ata_for_each_sg(sg, qc) {
2469 		u32 addr, offset;
2470 		u32 sg_len, len;
2471 
2472 		/* determine if physical DMA addr spans 64K boundary.
2473 		 * Note h/w doesn't support 64-bit, so we unconditionally
2474 		 * truncate dma_addr_t to u32.
2475 		 */
2476 		addr = (u32) sg_dma_address(sg);
2477 		sg_len = sg_dma_len(sg);
2478 
2479 		while (sg_len) {
2480 			offset = addr & 0xffff;
2481 			len = sg_len;
2482 			if ((offset + sg_len) > 0x10000)
2483 				len = 0x10000 - offset;
2484 
2485 			ap->prd[idx].addr = cpu_to_le32(addr);
2486 			ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
2487 			VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
2488 
2489 			idx++;
2490 			sg_len -= len;
2491 			addr += len;
2492 		}
2493 	}
2494 
2495 	if (idx)
2496 		ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2497 }
2498 /**
2499  *	ata_check_atapi_dma - Check whether ATAPI DMA can be supported
2500  *	@qc: Metadata associated with taskfile to check
2501  *
2502  *	Allow low-level driver to filter ATA PACKET commands, returning
2503  *	a status indicating whether or not it is OK to use DMA for the
2504  *	supplied PACKET command.
2505  *
2506  *	LOCKING:
2507  *	spin_lock_irqsave(host_set lock)
2508  *
2509  *	RETURNS: 0 when ATAPI DMA can be used
2510  *               nonzero otherwise
2511  */
ata_check_atapi_dma(struct ata_queued_cmd * qc)2512 int ata_check_atapi_dma(struct ata_queued_cmd *qc)
2513 {
2514 	struct ata_port *ap = qc->ap;
2515 	int rc = 0; /* Assume ATAPI DMA is OK by default */
2516 
2517 	if (ap->ops->check_atapi_dma)
2518 		rc = ap->ops->check_atapi_dma(qc);
2519 
2520 	return rc;
2521 }
2522 /**
2523  *	ata_qc_prep - Prepare taskfile for submission
2524  *	@qc: Metadata associated with taskfile to be prepared
2525  *
2526  *	Prepare ATA taskfile for submission.
2527  *
2528  *	LOCKING:
2529  *	spin_lock_irqsave(host_set lock)
2530  */
ata_qc_prep(struct ata_queued_cmd * qc)2531 void ata_qc_prep(struct ata_queued_cmd *qc)
2532 {
2533 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2534 		return;
2535 
2536 	ata_fill_sg(qc);
2537 }
2538 
2539 /**
2540  *	ata_sg_init_one - Associate command with memory buffer
2541  *	@qc: Command to be associated
2542  *	@buf: Memory buffer
2543  *	@buflen: Length of memory buffer, in bytes.
2544  *
2545  *	Initialize the data-related elements of queued_cmd @qc
2546  *	to point to a single memory buffer, @buf of byte length @buflen.
2547  *
2548  *	LOCKING:
2549  *	spin_lock_irqsave(host_set lock)
2550  */
2551 
ata_sg_init_one(struct ata_queued_cmd * qc,void * buf,unsigned int buflen)2552 void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
2553 {
2554 	struct scatterlist *sg;
2555 
2556 	qc->flags |= ATA_QCFLAG_SINGLE;
2557 
2558 	memset(&qc->sgent, 0, sizeof(qc->sgent));
2559 	qc->__sg = &qc->sgent;
2560 	qc->n_elem = 1;
2561 	qc->orig_n_elem = 1;
2562 	qc->buf_virt = buf;
2563 
2564 	sg = qc->__sg;
2565 	sg_init_one(sg, buf, buflen);
2566 }
2567 
2568 /**
2569  *	ata_sg_init - Associate command with scatter-gather table.
2570  *	@qc: Command to be associated
2571  *	@sg: Scatter-gather table.
2572  *	@n_elem: Number of elements in s/g table.
2573  *
2574  *	Initialize the data-related elements of queued_cmd @qc
2575  *	to point to a scatter-gather table @sg, containing @n_elem
2576  *	elements.
2577  *
2578  *	LOCKING:
2579  *	spin_lock_irqsave(host_set lock)
2580  */
2581 
ata_sg_init(struct ata_queued_cmd * qc,struct scatterlist * sg,unsigned int n_elem)2582 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
2583 		 unsigned int n_elem)
2584 {
2585 	qc->flags |= ATA_QCFLAG_SG;
2586 	qc->__sg = sg;
2587 	qc->n_elem = n_elem;
2588 	qc->orig_n_elem = n_elem;
2589 }
2590 
2591 /**
2592  *	ata_sg_setup_one - DMA-map the memory buffer associated with a command.
2593  *	@qc: Command with memory buffer to be mapped.
2594  *
2595  *	DMA-map the memory buffer associated with queued_cmd @qc.
2596  *
2597  *	LOCKING:
2598  *	spin_lock_irqsave(host_set lock)
2599  *
2600  *	RETURNS:
2601  *	Zero on success, negative on error.
2602  */
2603 
ata_sg_setup_one(struct ata_queued_cmd * qc)2604 static int ata_sg_setup_one(struct ata_queued_cmd *qc)
2605 {
2606 	struct ata_port *ap = qc->ap;
2607 	int dir = qc->dma_dir;
2608 	struct scatterlist *sg = qc->__sg;
2609 	dma_addr_t dma_address;
2610 
2611 	/* we must lengthen transfers to end on a 32-bit boundary */
2612 	qc->pad_len = sg->length & 3;
2613 	if (qc->pad_len) {
2614 		void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
2615 		struct scatterlist *psg = &qc->pad_sgent;
2616 
2617 		assert(qc->dev->class == ATA_DEV_ATAPI);
2618 
2619 		memset(pad_buf, 0, ATA_DMA_PAD_SZ);
2620 
2621 		if (qc->tf.flags & ATA_TFLAG_WRITE)
2622 			memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
2623 			       qc->pad_len);
2624 
2625 		sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
2626 		sg_dma_len(psg) = ATA_DMA_PAD_SZ;
2627 		/* trim sg */
2628 		sg->length -= qc->pad_len;
2629 
2630 		DPRINTK("padding done, sg->length=%u pad_len=%u\n",
2631 			sg->length, qc->pad_len);
2632 	}
2633 
2634 	if (!sg->length) {
2635 		sg_dma_address(sg) = 0;
2636 		goto skip_map;
2637 	}
2638 
2639 	dma_address = dma_map_single(ap->host_set->dev, qc->buf_virt,
2640 				     sg->length, dir);
2641 	if (dma_mapping_error(dma_address)) {
2642 		/* restore sg */
2643 		sg->length += qc->pad_len;
2644 		return -1;
2645 	}
2646 
2647 	sg_dma_address(sg) = dma_address;
2648 skip_map:
2649 	sg_dma_len(sg) = sg->length;
2650 
2651 	DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
2652 		qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
2653 
2654 	return 0;
2655 }
2656 
2657 /**
2658  *	ata_sg_setup - DMA-map the scatter-gather table associated with a command.
2659  *	@qc: Command with scatter-gather table to be mapped.
2660  *
2661  *	DMA-map the scatter-gather table associated with queued_cmd @qc.
2662  *
2663  *	LOCKING:
2664  *	spin_lock_irqsave(host_set lock)
2665  *
2666  *	RETURNS:
2667  *	Zero on success, negative on error.
2668  *
2669  */
2670 
ata_sg_setup(struct ata_queued_cmd * qc)2671 static int ata_sg_setup(struct ata_queued_cmd *qc)
2672 {
2673 	struct ata_port *ap = qc->ap;
2674 	struct scatterlist *sg = qc->__sg;
2675 	struct scatterlist *lsg = &sg[qc->n_elem - 1];
2676 	int n_elem, pre_n_elem, dir, trim_sg = 0;
2677 
2678 	VPRINTK("ENTER, ata%u\n", ap->id);
2679 	assert(qc->flags & ATA_QCFLAG_SG);
2680 
2681 	/* we must lengthen transfers to end on a 32-bit boundary */
2682 	qc->pad_len = lsg->length & 3;
2683 	if (qc->pad_len) {
2684 		void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
2685 		struct scatterlist *psg = &qc->pad_sgent;
2686 		unsigned int offset;
2687 
2688 		assert(qc->dev->class == ATA_DEV_ATAPI);
2689 
2690 		memset(pad_buf, 0, ATA_DMA_PAD_SZ);
2691 
2692 		/*
2693 		 * psg->page/offset are used to copy to-be-written
2694 		 * data in this function or read data in ata_sg_clean.
2695 		 */
2696 		offset = lsg->offset + lsg->length - qc->pad_len;
2697 		psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
2698 		psg->offset = offset_in_page(offset);
2699 
2700 		if (qc->tf.flags & ATA_TFLAG_WRITE) {
2701 			void *addr = kmap_atomic(psg->page, KM_IRQ0);
2702 			memcpy(pad_buf, addr + psg->offset, qc->pad_len);
2703 			kunmap_atomic(psg->page, KM_IRQ0);
2704 		}
2705 
2706 		sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
2707 		sg_dma_len(psg) = ATA_DMA_PAD_SZ;
2708 		/* trim last sg */
2709 		lsg->length -= qc->pad_len;
2710 		if (lsg->length == 0)
2711 			trim_sg = 1;
2712 
2713 		DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
2714 			qc->n_elem - 1, lsg->length, qc->pad_len);
2715 	}
2716 
2717 	pre_n_elem = qc->n_elem;
2718 	if (trim_sg && pre_n_elem)
2719 		pre_n_elem--;
2720 
2721 	if (!pre_n_elem) {
2722 		n_elem = 0;
2723 		goto skip_map;
2724 	}
2725 
2726 	dir = qc->dma_dir;
2727 	n_elem = dma_map_sg(ap->host_set->dev, sg, pre_n_elem, dir);
2728 	if (n_elem < 1) {
2729 		/* restore last sg */
2730 		lsg->length += qc->pad_len;
2731 		return -1;
2732 	}
2733 
2734 	DPRINTK("%d sg elements mapped\n", n_elem);
2735 
2736 skip_map:
2737 	qc->n_elem = n_elem;
2738 
2739 	return 0;
2740 }
2741 
2742 /**
2743  *	ata_poll_qc_complete - turn irq back on and finish qc
2744  *	@qc: Command to complete
2745  *	@err_mask: ATA status register content
2746  *
2747  *	LOCKING:
2748  *	None.  (grabs host lock)
2749  */
2750 
ata_poll_qc_complete(struct ata_queued_cmd * qc,unsigned int err_mask)2751 void ata_poll_qc_complete(struct ata_queued_cmd *qc, unsigned int err_mask)
2752 {
2753 	struct ata_port *ap = qc->ap;
2754 	unsigned long flags;
2755 
2756 	spin_lock_irqsave(&ap->host_set->lock, flags);
2757 	ap->flags &= ~ATA_FLAG_NOINTR;
2758 	ata_irq_on(ap);
2759 	ata_qc_complete(qc, err_mask);
2760 	spin_unlock_irqrestore(&ap->host_set->lock, flags);
2761 }
2762 
2763 /**
2764  *	ata_pio_poll -
2765  *	@ap: the target ata_port
2766  *
2767  *	LOCKING:
2768  *	None.  (executing in kernel thread context)
2769  *
2770  *	RETURNS:
2771  *	timeout value to use
2772  */
2773 
ata_pio_poll(struct ata_port * ap)2774 static unsigned long ata_pio_poll(struct ata_port *ap)
2775 {
2776 	u8 status;
2777 	unsigned int poll_state = HSM_ST_UNKNOWN;
2778 	unsigned int reg_state = HSM_ST_UNKNOWN;
2779 
2780 	switch (ap->hsm_task_state) {
2781 	case HSM_ST:
2782 	case HSM_ST_POLL:
2783 		poll_state = HSM_ST_POLL;
2784 		reg_state = HSM_ST;
2785 		break;
2786 	case HSM_ST_LAST:
2787 	case HSM_ST_LAST_POLL:
2788 		poll_state = HSM_ST_LAST_POLL;
2789 		reg_state = HSM_ST_LAST;
2790 		break;
2791 	default:
2792 		BUG();
2793 		break;
2794 	}
2795 
2796 	status = ata_chk_status(ap);
2797 	if (status & ATA_BUSY) {
2798 		if (time_after(jiffies, ap->pio_task_timeout)) {
2799 			ap->hsm_task_state = HSM_ST_TMOUT;
2800 			return 0;
2801 		}
2802 		ap->hsm_task_state = poll_state;
2803 		return ATA_SHORT_PAUSE;
2804 	}
2805 
2806 	ap->hsm_task_state = reg_state;
2807 	return 0;
2808 }
2809 
2810 /**
2811  *	ata_pio_complete - check if drive is busy or idle
2812  *	@ap: the target ata_port
2813  *
2814  *	LOCKING:
2815  *	None.  (executing in kernel thread context)
2816  *
2817  *	RETURNS:
2818  *	Non-zero if qc completed, zero otherwise.
2819  */
2820 
ata_pio_complete(struct ata_port * ap)2821 static int ata_pio_complete (struct ata_port *ap)
2822 {
2823 	struct ata_queued_cmd *qc;
2824 	u8 drv_stat;
2825 
2826 	/*
2827 	 * This is purely heuristic.  This is a fast path.  Sometimes when
2828 	 * we enter, BSY will be cleared in a chk-status or two.  If not,
2829 	 * the drive is probably seeking or something.  Snooze for a couple
2830 	 * msecs, then chk-status again.  If still busy, fall back to
2831 	 * HSM_ST_POLL state.
2832 	 */
2833 	drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10);
2834 	if (drv_stat & (ATA_BUSY | ATA_DRQ)) {
2835 		msleep(2);
2836 		drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10);
2837 		if (drv_stat & (ATA_BUSY | ATA_DRQ)) {
2838 			ap->hsm_task_state = HSM_ST_LAST_POLL;
2839 			ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
2840 			return 0;
2841 		}
2842 	}
2843 
2844 	drv_stat = ata_wait_idle(ap);
2845 	if (!ata_ok(drv_stat)) {
2846 		ap->hsm_task_state = HSM_ST_ERR;
2847 		return 0;
2848 	}
2849 
2850 	qc = ata_qc_from_tag(ap, ap->active_tag);
2851 	assert(qc != NULL);
2852 
2853 	ap->hsm_task_state = HSM_ST_IDLE;
2854 
2855 	ata_poll_qc_complete(qc, 0);
2856 
2857 	/* another command may start at this point */
2858 
2859 	return 1;
2860 }
2861 
2862 
2863 /**
2864  *	swap_buf_le16 - swap halves of 16-words in place
2865  *	@buf:  Buffer to swap
2866  *	@buf_words:  Number of 16-bit words in buffer.
2867  *
2868  *	Swap halves of 16-bit words if needed to convert from
2869  *	little-endian byte order to native cpu byte order, or
2870  *	vice-versa.
2871  *
2872  *	LOCKING:
2873  *	Inherited from caller.
2874  */
swap_buf_le16(u16 * buf,unsigned int buf_words)2875 void swap_buf_le16(u16 *buf, unsigned int buf_words)
2876 {
2877 #ifdef __BIG_ENDIAN
2878 	unsigned int i;
2879 
2880 	for (i = 0; i < buf_words; i++)
2881 		buf[i] = le16_to_cpu(buf[i]);
2882 #endif /* __BIG_ENDIAN */
2883 }
2884 
2885 /**
2886  *	ata_mmio_data_xfer - Transfer data by MMIO
2887  *	@ap: port to read/write
2888  *	@buf: data buffer
2889  *	@buflen: buffer length
2890  *	@write_data: read/write
2891  *
2892  *	Transfer data from/to the device data register by MMIO.
2893  *
2894  *	LOCKING:
2895  *	Inherited from caller.
2896  */
2897 
ata_mmio_data_xfer(struct ata_port * ap,unsigned char * buf,unsigned int buflen,int write_data)2898 static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf,
2899 			       unsigned int buflen, int write_data)
2900 {
2901 	unsigned int i;
2902 	unsigned int words = buflen >> 1;
2903 	u16 *buf16 = (u16 *) buf;
2904 	void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr;
2905 
2906 	/* Transfer multiple of 2 bytes */
2907 	if (write_data) {
2908 		for (i = 0; i < words; i++)
2909 			writew(le16_to_cpu(buf16[i]), mmio);
2910 	} else {
2911 		for (i = 0; i < words; i++)
2912 			buf16[i] = cpu_to_le16(readw(mmio));
2913 	}
2914 
2915 	/* Transfer trailing 1 byte, if any. */
2916 	if (unlikely(buflen & 0x01)) {
2917 		u16 align_buf[1] = { 0 };
2918 		unsigned char *trailing_buf = buf + buflen - 1;
2919 
2920 		if (write_data) {
2921 			memcpy(align_buf, trailing_buf, 1);
2922 			writew(le16_to_cpu(align_buf[0]), mmio);
2923 		} else {
2924 			align_buf[0] = cpu_to_le16(readw(mmio));
2925 			memcpy(trailing_buf, align_buf, 1);
2926 		}
2927 	}
2928 }
2929 
2930 /**
2931  *	ata_pio_data_xfer - Transfer data by PIO
2932  *	@ap: port to read/write
2933  *	@buf: data buffer
2934  *	@buflen: buffer length
2935  *	@write_data: read/write
2936  *
2937  *	Transfer data from/to the device data register by PIO.
2938  *
2939  *	LOCKING:
2940  *	Inherited from caller.
2941  */
2942 
ata_pio_data_xfer(struct ata_port * ap,unsigned char * buf,unsigned int buflen,int write_data)2943 static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf,
2944 			      unsigned int buflen, int write_data)
2945 {
2946 	unsigned int words = buflen >> 1;
2947 
2948 	/* Transfer multiple of 2 bytes */
2949 	if (write_data)
2950 		outsw(ap->ioaddr.data_addr, buf, words);
2951 	else
2952 		insw(ap->ioaddr.data_addr, buf, words);
2953 
2954 	/* Transfer trailing 1 byte, if any. */
2955 	if (unlikely(buflen & 0x01)) {
2956 		u16 align_buf[1] = { 0 };
2957 		unsigned char *trailing_buf = buf + buflen - 1;
2958 
2959 		if (write_data) {
2960 			memcpy(align_buf, trailing_buf, 1);
2961 			outw(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
2962 		} else {
2963 			align_buf[0] = cpu_to_le16(inw(ap->ioaddr.data_addr));
2964 			memcpy(trailing_buf, align_buf, 1);
2965 		}
2966 	}
2967 }
2968 
2969 /**
2970  *	ata_data_xfer - Transfer data from/to the data register.
2971  *	@ap: port to read/write
2972  *	@buf: data buffer
2973  *	@buflen: buffer length
2974  *	@do_write: read/write
2975  *
2976  *	Transfer data from/to the device data register.
2977  *
2978  *	LOCKING:
2979  *	Inherited from caller.
2980  */
2981 
ata_data_xfer(struct ata_port * ap,unsigned char * buf,unsigned int buflen,int do_write)2982 static void ata_data_xfer(struct ata_port *ap, unsigned char *buf,
2983 			  unsigned int buflen, int do_write)
2984 {
2985 	if (ap->flags & ATA_FLAG_MMIO)
2986 		ata_mmio_data_xfer(ap, buf, buflen, do_write);
2987 	else
2988 		ata_pio_data_xfer(ap, buf, buflen, do_write);
2989 }
2990 
2991 /**
2992  *	ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data.
2993  *	@qc: Command on going
2994  *
2995  *	Transfer ATA_SECT_SIZE of data from/to the ATA device.
2996  *
2997  *	LOCKING:
2998  *	Inherited from caller.
2999  */
3000 
ata_pio_sector(struct ata_queued_cmd * qc)3001 static void ata_pio_sector(struct ata_queued_cmd *qc)
3002 {
3003 	int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3004 	struct scatterlist *sg = qc->__sg;
3005 	struct ata_port *ap = qc->ap;
3006 	struct page *page;
3007 	unsigned int offset;
3008 	unsigned char *buf;
3009 
3010 	if (qc->cursect == (qc->nsect - 1))
3011 		ap->hsm_task_state = HSM_ST_LAST;
3012 
3013 	page = sg[qc->cursg].page;
3014 	offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE;
3015 
3016 	/* get the current page and offset */
3017 	page = nth_page(page, (offset >> PAGE_SHIFT));
3018 	offset %= PAGE_SIZE;
3019 
3020 	buf = kmap(page) + offset;
3021 
3022 	qc->cursect++;
3023 	qc->cursg_ofs++;
3024 
3025 	if ((qc->cursg_ofs * ATA_SECT_SIZE) == (&sg[qc->cursg])->length) {
3026 		qc->cursg++;
3027 		qc->cursg_ofs = 0;
3028 	}
3029 
3030 	DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3031 
3032 	/* do the actual data transfer */
3033 	do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3034 	ata_data_xfer(ap, buf, ATA_SECT_SIZE, do_write);
3035 
3036 	kunmap(page);
3037 }
3038 
3039 /**
3040  *	__atapi_pio_bytes - Transfer data from/to the ATAPI device.
3041  *	@qc: Command on going
3042  *	@bytes: number of bytes
3043  *
3044  *	Transfer Transfer data from/to the ATAPI device.
3045  *
3046  *	LOCKING:
3047  *	Inherited from caller.
3048  *
3049  */
3050 
__atapi_pio_bytes(struct ata_queued_cmd * qc,unsigned int bytes)3051 static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
3052 {
3053 	int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3054 	struct scatterlist *sg = qc->__sg;
3055 	struct ata_port *ap = qc->ap;
3056 	struct page *page;
3057 	unsigned char *buf;
3058 	unsigned int offset, count;
3059 
3060 	if (qc->curbytes + bytes >= qc->nbytes)
3061 		ap->hsm_task_state = HSM_ST_LAST;
3062 
3063 next_sg:
3064 	if (unlikely(qc->cursg >= qc->n_elem)) {
3065 		/*
3066 		 * The end of qc->sg is reached and the device expects
3067 		 * more data to transfer. In order not to overrun qc->sg
3068 		 * and fulfill length specified in the byte count register,
3069 		 *    - for read case, discard trailing data from the device
3070 		 *    - for write case, padding zero data to the device
3071 		 */
3072 		u16 pad_buf[1] = { 0 };
3073 		unsigned int words = bytes >> 1;
3074 		unsigned int i;
3075 
3076 		if (words) /* warning if bytes > 1 */
3077 			printk(KERN_WARNING "ata%u: %u bytes trailing data\n",
3078 			       ap->id, bytes);
3079 
3080 		for (i = 0; i < words; i++)
3081 			ata_data_xfer(ap, (unsigned char*)pad_buf, 2, do_write);
3082 
3083 		ap->hsm_task_state = HSM_ST_LAST;
3084 		return;
3085 	}
3086 
3087 	sg = &qc->__sg[qc->cursg];
3088 
3089 	page = sg->page;
3090 	offset = sg->offset + qc->cursg_ofs;
3091 
3092 	/* get the current page and offset */
3093 	page = nth_page(page, (offset >> PAGE_SHIFT));
3094 	offset %= PAGE_SIZE;
3095 
3096 	/* don't overrun current sg */
3097 	count = min(sg->length - qc->cursg_ofs, bytes);
3098 
3099 	/* don't cross page boundaries */
3100 	count = min(count, (unsigned int)PAGE_SIZE - offset);
3101 
3102 	buf = kmap(page) + offset;
3103 
3104 	bytes -= count;
3105 	qc->curbytes += count;
3106 	qc->cursg_ofs += count;
3107 
3108 	if (qc->cursg_ofs == sg->length) {
3109 		qc->cursg++;
3110 		qc->cursg_ofs = 0;
3111 	}
3112 
3113 	DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3114 
3115 	/* do the actual data transfer */
3116 	ata_data_xfer(ap, buf, count, do_write);
3117 
3118 	kunmap(page);
3119 
3120 	if (bytes)
3121 		goto next_sg;
3122 }
3123 
3124 /**
3125  *	atapi_pio_bytes - Transfer data from/to the ATAPI device.
3126  *	@qc: Command on going
3127  *
3128  *	Transfer Transfer data from/to the ATAPI device.
3129  *
3130  *	LOCKING:
3131  *	Inherited from caller.
3132  */
3133 
atapi_pio_bytes(struct ata_queued_cmd * qc)3134 static void atapi_pio_bytes(struct ata_queued_cmd *qc)
3135 {
3136 	struct ata_port *ap = qc->ap;
3137 	struct ata_device *dev = qc->dev;
3138 	unsigned int ireason, bc_lo, bc_hi, bytes;
3139 	int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
3140 
3141 	ap->ops->tf_read(ap, &qc->tf);
3142 	ireason = qc->tf.nsect;
3143 	bc_lo = qc->tf.lbam;
3144 	bc_hi = qc->tf.lbah;
3145 	bytes = (bc_hi << 8) | bc_lo;
3146 
3147 	/* shall be cleared to zero, indicating xfer of data */
3148 	if (ireason & (1 << 0))
3149 		goto err_out;
3150 
3151 	/* make sure transfer direction matches expected */
3152 	i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
3153 	if (do_write != i_write)
3154 		goto err_out;
3155 
3156 	__atapi_pio_bytes(qc, bytes);
3157 
3158 	return;
3159 
3160 err_out:
3161 	printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n",
3162 	      ap->id, dev->devno);
3163 	ap->hsm_task_state = HSM_ST_ERR;
3164 }
3165 
3166 /**
3167  *	ata_pio_block - start PIO on a block
3168  *	@ap: the target ata_port
3169  *
3170  *	LOCKING:
3171  *	None.  (executing in kernel thread context)
3172  */
3173 
ata_pio_block(struct ata_port * ap)3174 static void ata_pio_block(struct ata_port *ap)
3175 {
3176 	struct ata_queued_cmd *qc;
3177 	u8 status;
3178 
3179 	/*
3180 	 * This is purely heuristic.  This is a fast path.
3181 	 * Sometimes when we enter, BSY will be cleared in
3182 	 * a chk-status or two.  If not, the drive is probably seeking
3183 	 * or something.  Snooze for a couple msecs, then
3184 	 * chk-status again.  If still busy, fall back to
3185 	 * HSM_ST_POLL state.
3186 	 */
3187 	status = ata_busy_wait(ap, ATA_BUSY, 5);
3188 	if (status & ATA_BUSY) {
3189 		msleep(2);
3190 		status = ata_busy_wait(ap, ATA_BUSY, 10);
3191 		if (status & ATA_BUSY) {
3192 			ap->hsm_task_state = HSM_ST_POLL;
3193 			ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
3194 			return;
3195 		}
3196 	}
3197 
3198 	qc = ata_qc_from_tag(ap, ap->active_tag);
3199 	assert(qc != NULL);
3200 
3201 	if (is_atapi_taskfile(&qc->tf)) {
3202 		/* no more data to transfer or unsupported ATAPI command */
3203 		if ((status & ATA_DRQ) == 0) {
3204 			ap->hsm_task_state = HSM_ST_LAST;
3205 			return;
3206 		}
3207 
3208 		atapi_pio_bytes(qc);
3209 	} else {
3210 		/* handle BSY=0, DRQ=0 as error */
3211 		if ((status & ATA_DRQ) == 0) {
3212 			ap->hsm_task_state = HSM_ST_ERR;
3213 			return;
3214 		}
3215 
3216 		ata_pio_sector(qc);
3217 	}
3218 }
3219 
ata_pio_error(struct ata_port * ap)3220 static void ata_pio_error(struct ata_port *ap)
3221 {
3222 	struct ata_queued_cmd *qc;
3223 
3224 	printk(KERN_WARNING "ata%u: PIO error\n", ap->id);
3225 
3226 	qc = ata_qc_from_tag(ap, ap->active_tag);
3227 	assert(qc != NULL);
3228 
3229 	ap->hsm_task_state = HSM_ST_IDLE;
3230 
3231 	ata_poll_qc_complete(qc, AC_ERR_ATA_BUS);
3232 }
3233 
ata_pio_task(void * _data)3234 static void ata_pio_task(void *_data)
3235 {
3236 	struct ata_port *ap = _data;
3237 	unsigned long timeout;
3238 	int qc_completed;
3239 
3240 fsm_start:
3241 	timeout = 0;
3242 	qc_completed = 0;
3243 
3244 	switch (ap->hsm_task_state) {
3245 	case HSM_ST_IDLE:
3246 		return;
3247 
3248 	case HSM_ST:
3249 		ata_pio_block(ap);
3250 		break;
3251 
3252 	case HSM_ST_LAST:
3253 		qc_completed = ata_pio_complete(ap);
3254 		break;
3255 
3256 	case HSM_ST_POLL:
3257 	case HSM_ST_LAST_POLL:
3258 		timeout = ata_pio_poll(ap);
3259 		break;
3260 
3261 	case HSM_ST_TMOUT:
3262 	case HSM_ST_ERR:
3263 		ata_pio_error(ap);
3264 		return;
3265 	}
3266 
3267 	if (timeout) {
3268 		set_current_state(TASK_UNINTERRUPTIBLE);
3269 		schedule_timeout(timeout);
3270 		schedule_task(&ap->pio_task);
3271 	}
3272 	else if (!qc_completed)
3273 		goto fsm_start;
3274 }
3275 
3276 /**
3277  *	ata_qc_timeout - Handle timeout of queued command
3278  *	@qc: Command that timed out
3279  *
3280  *	Some part of the kernel (currently, only the SCSI layer)
3281  *	has noticed that the active command on port @ap has not
3282  *	completed after a specified length of time.  Handle this
3283  *	condition by disabling DMA (if necessary) and completing
3284  *	transactions, with error if necessary.
3285  *
3286  *	This also handles the case of the "lost interrupt", where
3287  *	for some reason (possibly hardware bug, possibly driver bug)
3288  *	an interrupt was not delivered to the driver, even though the
3289  *	transaction completed successfully.
3290  *
3291  *	LOCKING:
3292  *	Inherited from SCSI layer (none, can sleep)
3293  */
3294 
ata_qc_timeout(struct ata_queued_cmd * qc)3295 static void ata_qc_timeout(struct ata_queued_cmd *qc)
3296 {
3297 	struct ata_port *ap = qc->ap;
3298 	struct ata_host_set *host_set = ap->host_set;
3299 	u8 host_stat = 0, drv_stat;
3300 	unsigned long flags;
3301 
3302 	DPRINTK("ENTER\n");
3303 
3304 	spin_lock_irqsave(&host_set->lock, flags);
3305 
3306 	/* hack alert!  We cannot use the supplied completion
3307 	 * function from inside the ->eh_strategy_handler() thread.
3308 	 * libata is the only user of ->eh_strategy_handler() in
3309 	 * any kernel, so the default scsi_done() assumes it is
3310 	 * not being called from the SCSI EH.
3311 	 */
3312 	qc->scsidone = scsi_finish_command;
3313 
3314 	switch (qc->tf.protocol) {
3315 
3316 	case ATA_PROT_DMA:
3317 	case ATA_PROT_ATAPI_DMA:
3318 		host_stat = ap->ops->bmdma_status(ap);
3319 
3320 		/* before we do anything else, clear DMA-Start bit */
3321 		ap->ops->bmdma_stop(qc);
3322 
3323 		/* fall through */
3324 
3325 	default:
3326 		ata_altstatus(ap);
3327 		drv_stat = ata_chk_status(ap);
3328 
3329 		/* ack bmdma irq events */
3330 		ap->ops->irq_clear(ap);
3331 
3332 		printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x host_stat 0x%x\n",
3333 		       ap->id, qc->tf.command, drv_stat, host_stat);
3334 
3335 		/* complete taskfile transaction */
3336 		ata_qc_complete(qc, ac_err_mask(drv_stat));
3337 		break;
3338 	}
3339 
3340 	spin_unlock_irqrestore(&host_set->lock, flags);
3341 
3342 	DPRINTK("EXIT\n");
3343 }
3344 
3345 /**
3346  *	ata_eng_timeout - Handle timeout of queued command
3347  *	@ap: Port on which timed-out command is active
3348  *
3349  *	Some part of the kernel (currently, only the SCSI layer)
3350  *	has noticed that the active command on port @ap has not
3351  *	completed after a specified length of time.  Handle this
3352  *	condition by disabling DMA (if necessary) and completing
3353  *	transactions, with error if necessary.
3354  *
3355  *	This also handles the case of the "lost interrupt", where
3356  *	for some reason (possibly hardware bug, possibly driver bug)
3357  *	an interrupt was not delivered to the driver, even though the
3358  *	transaction completed successfully.
3359  *
3360  *	LOCKING:
3361  *	Inherited from SCSI layer (none, can sleep)
3362  */
3363 
ata_eng_timeout(struct ata_port * ap)3364 void ata_eng_timeout(struct ata_port *ap)
3365 {
3366 	struct ata_queued_cmd *qc;
3367 
3368 	DPRINTK("ENTER\n");
3369 
3370 	qc = ata_qc_from_tag(ap, ap->active_tag);
3371 	if (qc)
3372 		ata_qc_timeout(qc);
3373 	else {
3374 		printk(KERN_ERR "ata%u: BUG: timeout without command\n",
3375 		       ap->id);
3376 		goto out;
3377 	}
3378 
3379 out:
3380 	DPRINTK("EXIT\n");
3381 }
3382 
3383 /**
3384  *	ata_qc_new - Request an available ATA command, for queueing
3385  *	@ap: Port associated with device @dev
3386  *	@dev: Device from whom we request an available command structure
3387  *
3388  *	LOCKING:
3389  *	None.
3390  */
3391 
ata_qc_new(struct ata_port * ap)3392 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
3393 {
3394 	struct ata_queued_cmd *qc = NULL;
3395 	unsigned int i;
3396 
3397 	for (i = 0; i < ATA_MAX_QUEUE; i++)
3398 		if (!test_and_set_bit(i, &ap->qactive)) {
3399 			qc = ata_qc_from_tag(ap, i);
3400 			break;
3401 		}
3402 
3403 	if (qc)
3404 		qc->tag = i;
3405 
3406 	return qc;
3407 }
3408 
3409 /**
3410  *	ata_qc_new_init - Request an available ATA command, and initialize it
3411  *	@ap: Port associated with device @dev
3412  *	@dev: Device from whom we request an available command structure
3413  *
3414  *	LOCKING:
3415  *	None.
3416  */
3417 
ata_qc_new_init(struct ata_port * ap,struct ata_device * dev)3418 struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
3419 				      struct ata_device *dev)
3420 {
3421 	struct ata_queued_cmd *qc;
3422 
3423 	qc = ata_qc_new(ap);
3424 	if (qc) {
3425 		qc->scsicmd = NULL;
3426 		qc->ap = ap;
3427 		qc->dev = dev;
3428 
3429 		ata_qc_reinit(qc);
3430 	}
3431 
3432 	return qc;
3433 }
3434 
ata_qc_complete_noop(struct ata_queued_cmd * qc,unsigned int err_mask)3435 int ata_qc_complete_noop(struct ata_queued_cmd *qc, unsigned int err_mask)
3436 {
3437 	return 0;
3438 }
3439 
__ata_qc_complete(struct ata_queued_cmd * qc)3440 static void __ata_qc_complete(struct ata_queued_cmd *qc)
3441 {
3442 	struct ata_port *ap = qc->ap;
3443 	unsigned int tag, do_clear = 0;
3444 
3445 	qc->flags = 0;
3446 	tag = qc->tag;
3447 	if (likely(ata_tag_valid(tag))) {
3448 		if (tag == ap->active_tag)
3449 			ap->active_tag = ATA_TAG_POISON;
3450 		qc->tag = ATA_TAG_POISON;
3451 		do_clear = 1;
3452 	}
3453 
3454 	if (qc->waiting) {
3455 		struct completion *waiting = qc->waiting;
3456 		qc->waiting = NULL;
3457 		complete(waiting);
3458 	}
3459 
3460 	if (likely(do_clear))
3461 		clear_bit(tag, &ap->qactive);
3462 }
3463 
3464 /**
3465  *	ata_qc_free - free unused ata_queued_cmd
3466  *	@qc: Command to complete
3467  *
3468  *	Designed to free unused ata_queued_cmd object
3469  *	in case something prevents using it.
3470  *
3471  *	LOCKING:
3472  *	spin_lock_irqsave(host_set lock)
3473  */
ata_qc_free(struct ata_queued_cmd * qc)3474 void ata_qc_free(struct ata_queued_cmd *qc)
3475 {
3476 	assert(qc != NULL);	/* ata_qc_from_tag _might_ return NULL */
3477 	assert(qc->waiting == NULL);	/* nothing should be waiting */
3478 
3479 	__ata_qc_complete(qc);
3480 }
3481 
3482 /**
3483  *	ata_qc_complete - Complete an active ATA command
3484  *	@qc: Command to complete
3485  *	@err_mask: ATA Status register contents
3486  *
3487  *	Indicate to the mid and upper layers that an ATA
3488  *	command has completed, with either an ok or not-ok status.
3489  *
3490  *	LOCKING:
3491  *	spin_lock_irqsave(host_set lock)
3492  */
3493 
ata_qc_complete(struct ata_queued_cmd * qc,unsigned int err_mask)3494 void ata_qc_complete(struct ata_queued_cmd *qc, unsigned int err_mask)
3495 {
3496 	int rc;
3497 
3498 	assert(qc != NULL);	/* ata_qc_from_tag _might_ return NULL */
3499 	assert(qc->flags & ATA_QCFLAG_ACTIVE);
3500 
3501 	if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
3502 		ata_sg_clean(qc);
3503 
3504 	/* atapi: mark qc as inactive to prevent the interrupt handler
3505 	 * from completing the command twice later, before the error handler
3506 	 * is called. (when rc != 0 and atapi request sense is needed)
3507 	 */
3508 	qc->flags &= ~ATA_QCFLAG_ACTIVE;
3509 
3510 	/* call completion callback */
3511 	rc = qc->complete_fn(qc, err_mask);
3512 
3513 	/* if callback indicates not to complete command (non-zero),
3514 	 * return immediately
3515 	 */
3516 	if (rc != 0)
3517 		return;
3518 
3519 	__ata_qc_complete(qc);
3520 
3521 	VPRINTK("EXIT\n");
3522 }
3523 
ata_should_dma_map(struct ata_queued_cmd * qc)3524 static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
3525 {
3526 	struct ata_port *ap = qc->ap;
3527 
3528 	switch (qc->tf.protocol) {
3529 	case ATA_PROT_DMA:
3530 	case ATA_PROT_ATAPI_DMA:
3531 		return 1;
3532 
3533 	case ATA_PROT_ATAPI:
3534 	case ATA_PROT_PIO:
3535 	case ATA_PROT_PIO_MULT:
3536 		if (ap->flags & ATA_FLAG_PIO_DMA)
3537 			return 1;
3538 
3539 		/* fall through */
3540 
3541 	default:
3542 		return 0;
3543 	}
3544 
3545 	/* never reached */
3546 }
3547 
3548 /**
3549  *	ata_qc_issue - issue taskfile to device
3550  *	@qc: command to issue to device
3551  *
3552  *	Prepare an ATA command to submission to device.
3553  *	This includes mapping the data into a DMA-able
3554  *	area, filling in the S/G table, and finally
3555  *	writing the taskfile to hardware, starting the command.
3556  *
3557  *	LOCKING:
3558  *	spin_lock_irqsave(host_set lock)
3559  *
3560  *	RETURNS:
3561  *	Zero on success, negative on error.
3562  */
3563 
ata_qc_issue(struct ata_queued_cmd * qc)3564 int ata_qc_issue(struct ata_queued_cmd *qc)
3565 {
3566 	struct ata_port *ap = qc->ap;
3567 
3568 	if (ata_should_dma_map(qc)) {
3569 		if (qc->flags & ATA_QCFLAG_SG) {
3570 			if (ata_sg_setup(qc))
3571 				goto err_out;
3572 		} else if (qc->flags & ATA_QCFLAG_SINGLE) {
3573 			if (ata_sg_setup_one(qc))
3574 				goto err_out;
3575 		}
3576 	} else {
3577 		qc->flags &= ~ATA_QCFLAG_DMAMAP;
3578 	}
3579 
3580 	ap->ops->qc_prep(qc);
3581 
3582 	qc->ap->active_tag = qc->tag;
3583 	qc->flags |= ATA_QCFLAG_ACTIVE;
3584 
3585 	return ap->ops->qc_issue(qc);
3586 
3587 err_out:
3588 	return -1;
3589 }
3590 
3591 
3592 /**
3593  *	ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
3594  *	@qc: command to issue to device
3595  *
3596  *	Using various libata functions and hooks, this function
3597  *	starts an ATA command.  ATA commands are grouped into
3598  *	classes called "protocols", and issuing each type of protocol
3599  *	is slightly different.
3600  *
3601  *	May be used as the qc_issue() entry in ata_port_operations.
3602  *
3603  *	LOCKING:
3604  *	spin_lock_irqsave(host_set lock)
3605  *
3606  *	RETURNS:
3607  *	Zero on success, negative on error.
3608  */
3609 
ata_qc_issue_prot(struct ata_queued_cmd * qc)3610 int ata_qc_issue_prot(struct ata_queued_cmd *qc)
3611 {
3612 	struct ata_port *ap = qc->ap;
3613 
3614 	ata_dev_select(ap, qc->dev->devno, 1, 0);
3615 
3616 	switch (qc->tf.protocol) {
3617 	case ATA_PROT_NODATA:
3618 		ata_tf_to_host(ap, &qc->tf);
3619 		break;
3620 
3621 	case ATA_PROT_DMA:
3622 		ap->ops->tf_load(ap, &qc->tf);	 /* load tf registers */
3623 		ap->ops->bmdma_setup(qc);	    /* set up bmdma */
3624 		ap->ops->bmdma_start(qc);	    /* initiate bmdma */
3625 		break;
3626 
3627 	case ATA_PROT_PIO: /* load tf registers, initiate polling pio */
3628 		ata_qc_set_polling(qc);
3629 		ata_tf_to_host(ap, &qc->tf);
3630 		ap->hsm_task_state = HSM_ST;
3631 		schedule_task(&ap->pio_task);
3632 		break;
3633 
3634 	case ATA_PROT_ATAPI:
3635 		ata_qc_set_polling(qc);
3636 		ata_tf_to_host(ap, &qc->tf);
3637 		schedule_task(&ap->packet_task);
3638 		break;
3639 
3640 	case ATA_PROT_ATAPI_NODATA:
3641 		ap->flags |= ATA_FLAG_NOINTR;
3642 		ata_tf_to_host(ap, &qc->tf);
3643 		schedule_task(&ap->packet_task);
3644 		break;
3645 
3646 	case ATA_PROT_ATAPI_DMA:
3647 		ap->flags |= ATA_FLAG_NOINTR;
3648 		ap->ops->tf_load(ap, &qc->tf);	 /* load tf registers */
3649 		ap->ops->bmdma_setup(qc);	    /* set up bmdma */
3650 		schedule_task(&ap->packet_task);
3651 		break;
3652 
3653 	default:
3654 		WARN_ON(1);
3655 		return -1;
3656 	}
3657 
3658 	return 0;
3659 }
3660 
3661 /**
3662  *	ata_bmdma_setup_mmio - Set up PCI IDE BMDMA transaction
3663  *	@qc: Info associated with this ATA transaction.
3664  *
3665  *	LOCKING:
3666  *	spin_lock_irqsave(host_set lock)
3667  */
3668 
ata_bmdma_setup_mmio(struct ata_queued_cmd * qc)3669 static void ata_bmdma_setup_mmio (struct ata_queued_cmd *qc)
3670 {
3671 	struct ata_port *ap = qc->ap;
3672 	unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
3673 	u8 dmactl;
3674 	void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
3675 
3676 	/* load PRD table addr. */
3677 	mb();	/* make sure PRD table writes are visible to controller */
3678 	writel(ap->prd_dma, mmio + ATA_DMA_TABLE_OFS);
3679 
3680 	/* specify data direction, triple-check start bit is clear */
3681 	dmactl = readb(mmio + ATA_DMA_CMD);
3682 	dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
3683 	if (!rw)
3684 		dmactl |= ATA_DMA_WR;
3685 	writeb(dmactl, mmio + ATA_DMA_CMD);
3686 
3687 	/* issue r/w command */
3688 	ap->ops->exec_command(ap, &qc->tf);
3689 }
3690 
3691 /**
3692  *	ata_bmdma_start_mmio - Start a PCI IDE BMDMA transaction
3693  *	@qc: Info associated with this ATA transaction.
3694  *
3695  *	LOCKING:
3696  *	spin_lock_irqsave(host_set lock)
3697  */
3698 
ata_bmdma_start_mmio(struct ata_queued_cmd * qc)3699 static void ata_bmdma_start_mmio (struct ata_queued_cmd *qc)
3700 {
3701 	struct ata_port *ap = qc->ap;
3702 	void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
3703 	u8 dmactl;
3704 
3705 	/* start host DMA transaction */
3706 	dmactl = readb(mmio + ATA_DMA_CMD);
3707 	writeb(dmactl | ATA_DMA_START, mmio + ATA_DMA_CMD);
3708 
3709 	/* Strictly, one may wish to issue a readb() here, to
3710 	 * flush the mmio write.  However, control also passes
3711 	 * to the hardware at this point, and it will interrupt
3712 	 * us when we are to resume control.  So, in effect,
3713 	 * we don't care when the mmio write flushes.
3714 	 * Further, a read of the DMA status register _immediately_
3715 	 * following the write may not be what certain flaky hardware
3716 	 * is expected, so I think it is best to not add a readb()
3717 	 * without first all the MMIO ATA cards/mobos.
3718 	 * Or maybe I'm just being paranoid.
3719 	 */
3720 }
3721 
3722 /**
3723  *	ata_bmdma_setup_pio - Set up PCI IDE BMDMA transaction (PIO)
3724  *	@qc: Info associated with this ATA transaction.
3725  *
3726  *	LOCKING:
3727  *	spin_lock_irqsave(host_set lock)
3728  */
3729 
ata_bmdma_setup_pio(struct ata_queued_cmd * qc)3730 static void ata_bmdma_setup_pio (struct ata_queued_cmd *qc)
3731 {
3732 	struct ata_port *ap = qc->ap;
3733 	unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
3734 	u8 dmactl;
3735 
3736 	/* load PRD table addr. */
3737 	outl(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
3738 
3739 	/* specify data direction, triple-check start bit is clear */
3740 	dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
3741 	dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
3742 	if (!rw)
3743 		dmactl |= ATA_DMA_WR;
3744 	outb(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
3745 
3746 	/* issue r/w command */
3747 	ap->ops->exec_command(ap, &qc->tf);
3748 }
3749 
3750 /**
3751  *	ata_bmdma_start_pio - Start a PCI IDE BMDMA transaction (PIO)
3752  *	@qc: Info associated with this ATA transaction.
3753  *
3754  *	LOCKING:
3755  *	spin_lock_irqsave(host_set lock)
3756  */
3757 
ata_bmdma_start_pio(struct ata_queued_cmd * qc)3758 static void ata_bmdma_start_pio (struct ata_queued_cmd *qc)
3759 {
3760 	struct ata_port *ap = qc->ap;
3761 	u8 dmactl;
3762 
3763 	/* start host DMA transaction */
3764 	dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
3765 	outb(dmactl | ATA_DMA_START,
3766 	     ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
3767 }
3768 
3769 
3770 /**
3771  *	ata_bmdma_start - Start a PCI IDE BMDMA transaction
3772  *	@qc: Info associated with this ATA transaction.
3773  *
3774  *	Writes the ATA_DMA_START flag to the DMA command register.
3775  *
3776  *	May be used as the bmdma_start() entry in ata_port_operations.
3777  *
3778  *	LOCKING:
3779  *	spin_lock_irqsave(host_set lock)
3780  */
ata_bmdma_start(struct ata_queued_cmd * qc)3781 void ata_bmdma_start(struct ata_queued_cmd *qc)
3782 {
3783 	if (qc->ap->flags & ATA_FLAG_MMIO)
3784 		ata_bmdma_start_mmio(qc);
3785 	else
3786 		ata_bmdma_start_pio(qc);
3787 }
3788 
3789 
3790 /**
3791  *	ata_bmdma_setup - Set up PCI IDE BMDMA transaction
3792  *	@qc: Info associated with this ATA transaction.
3793  *
3794  *	Writes address of PRD table to device's PRD Table Address
3795  *	register, sets the DMA control register, and calls
3796  *	ops->exec_command() to start the transfer.
3797  *
3798  *	May be used as the bmdma_setup() entry in ata_port_operations.
3799  *
3800  *	LOCKING:
3801  *	spin_lock_irqsave(host_set lock)
3802  */
ata_bmdma_setup(struct ata_queued_cmd * qc)3803 void ata_bmdma_setup(struct ata_queued_cmd *qc)
3804 {
3805 	if (qc->ap->flags & ATA_FLAG_MMIO)
3806 		ata_bmdma_setup_mmio(qc);
3807 	else
3808 		ata_bmdma_setup_pio(qc);
3809 }
3810 
3811 
3812 /**
3813  *	ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt.
3814  *	@ap: Port associated with this ATA transaction.
3815  *
3816  *	Clear interrupt and error flags in DMA status register.
3817  *
3818  *	May be used as the irq_clear() entry in ata_port_operations.
3819  *
3820  *	LOCKING:
3821  *	spin_lock_irqsave(host_set lock)
3822  */
3823 
ata_bmdma_irq_clear(struct ata_port * ap)3824 void ata_bmdma_irq_clear(struct ata_port *ap)
3825 {
3826     if (ap->flags & ATA_FLAG_MMIO) {
3827         void __iomem *mmio = ((void __iomem *) ap->ioaddr.bmdma_addr) + ATA_DMA_STATUS;
3828         writeb(readb(mmio), mmio);
3829     } else {
3830         unsigned long addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS;
3831         outb(inb(addr), addr);
3832     }
3833 
3834 }
3835 
3836 
3837 /**
3838  *	ata_bmdma_status - Read PCI IDE BMDMA status
3839  *	@ap: Port associated with this ATA transaction.
3840  *
3841  *	Read and return BMDMA status register.
3842  *
3843  *	May be used as the bmdma_status() entry in ata_port_operations.
3844  *
3845  *	LOCKING:
3846  *	spin_lock_irqsave(host_set lock)
3847  */
3848 
ata_bmdma_status(struct ata_port * ap)3849 u8 ata_bmdma_status(struct ata_port *ap)
3850 {
3851 	u8 host_stat;
3852 	if (ap->flags & ATA_FLAG_MMIO) {
3853 		void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
3854 		host_stat = readb(mmio + ATA_DMA_STATUS);
3855 	} else
3856 		host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
3857 	return host_stat;
3858 }
3859 
3860 
3861 /**
3862  *	ata_bmdma_stop - Stop PCI IDE BMDMA transfer
3863  *	@qc: Command we are ending DMA for
3864  *
3865  *	Clears the ATA_DMA_START flag in the dma control register
3866  *
3867  *	May be used as the bmdma_stop() entry in ata_port_operations.
3868  *
3869  *	LOCKING:
3870  *	spin_lock_irqsave(host_set lock)
3871  */
3872 
ata_bmdma_stop(struct ata_queued_cmd * qc)3873 void ata_bmdma_stop(struct ata_queued_cmd *qc)
3874 {
3875 	struct ata_port *ap = qc->ap;
3876 	if (ap->flags & ATA_FLAG_MMIO) {
3877 		void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
3878 
3879 		/* clear start/stop bit */
3880 		writeb(readb(mmio + ATA_DMA_CMD) & ~ATA_DMA_START,
3881 			mmio + ATA_DMA_CMD);
3882 	} else {
3883 		/* clear start/stop bit */
3884 		outb(inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START,
3885 			ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
3886 	}
3887 
3888 	/* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
3889 	ata_altstatus(ap);        /* dummy read */
3890 }
3891 
3892 /**
3893  *	ata_host_intr - Handle host interrupt for given (port, task)
3894  *	@ap: Port on which interrupt arrived (possibly...)
3895  *	@qc: Taskfile currently active in engine
3896  *
3897  *	Handle host interrupt for given queued command.  Currently,
3898  *	only DMA interrupts are handled.  All other commands are
3899  *	handled via polling with interrupts disabled (nIEN bit).
3900  *
3901  *	LOCKING:
3902  *	spin_lock_irqsave(host_set lock)
3903  *
3904  *	RETURNS:
3905  *	One if interrupt was handled, zero if not (shared irq).
3906  */
3907 
ata_host_intr(struct ata_port * ap,struct ata_queued_cmd * qc)3908 inline unsigned int ata_host_intr (struct ata_port *ap,
3909 				   struct ata_queued_cmd *qc)
3910 {
3911 	u8 status, host_stat;
3912 
3913 	switch (qc->tf.protocol) {
3914 
3915 	case ATA_PROT_DMA:
3916 	case ATA_PROT_ATAPI_DMA:
3917 	case ATA_PROT_ATAPI:
3918 		/* check status of DMA engine */
3919 		host_stat = ap->ops->bmdma_status(ap);
3920 		VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
3921 
3922 		/* if it's not our irq... */
3923 		if (!(host_stat & ATA_DMA_INTR))
3924 			goto idle_irq;
3925 
3926 		/* before we do anything else, clear DMA-Start bit */
3927 		ap->ops->bmdma_stop(qc);
3928 
3929 		/* fall through */
3930 
3931 	case ATA_PROT_ATAPI_NODATA:
3932 	case ATA_PROT_NODATA:
3933 		/* check altstatus */
3934 		status = ata_altstatus(ap);
3935 		if (status & ATA_BUSY)
3936 			goto idle_irq;
3937 
3938 		/* check main status, clearing INTRQ */
3939 		status = ata_chk_status(ap);
3940 		if (unlikely(status & ATA_BUSY))
3941 			goto idle_irq;
3942 		DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
3943 			ap->id, qc->tf.protocol, status);
3944 
3945 		/* ack bmdma irq events */
3946 		ap->ops->irq_clear(ap);
3947 
3948 		/* complete taskfile transaction */
3949 		ata_qc_complete(qc, ac_err_mask(status));
3950 		break;
3951 
3952 	default:
3953 		goto idle_irq;
3954 	}
3955 
3956 	return 1;	/* irq handled */
3957 
3958 idle_irq:
3959 	ap->stats.idle_irq++;
3960 
3961 #ifdef ATA_IRQ_TRAP
3962 	if ((ap->stats.idle_irq % 1000) == 0) {
3963 		handled = 1;
3964 		ata_irq_ack(ap, 0); /* debug trap */
3965 		printk(KERN_WARNING "ata%d: irq trap\n", ap->id);
3966 	}
3967 #endif
3968 	return 0;	/* irq not handled */
3969 }
3970 
3971 /**
3972  *	ata_interrupt - Default ATA host interrupt handler
3973  *	@irq: irq line (unused)
3974  *	@dev_instance: pointer to our ata_host_set information structure
3975  *	@regs: unused
3976  *
3977  *	Default interrupt handler for PCI IDE devices.  Calls
3978  *	ata_host_intr() for each port that is not disabled.
3979  *
3980  *	LOCKING:
3981  *	Obtains host_set lock during operation.
3982  *
3983  *	RETURNS:
3984  *	IRQ_NONE or IRQ_HANDLED.
3985  */
3986 
ata_interrupt(int irq,void * dev_instance,struct pt_regs * regs)3987 irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
3988 {
3989 	struct ata_host_set *host_set = dev_instance;
3990 	unsigned int i;
3991 	unsigned int handled = 0;
3992 	unsigned long flags;
3993 
3994 	/* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
3995 	spin_lock_irqsave(&host_set->lock, flags);
3996 
3997 	for (i = 0; i < host_set->n_ports; i++) {
3998 		struct ata_port *ap;
3999 
4000 		ap = host_set->ports[i];
4001 		if (ap &&
4002 		    !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) {
4003 			struct ata_queued_cmd *qc;
4004 
4005 			qc = ata_qc_from_tag(ap, ap->active_tag);
4006 			if (qc && (!(qc->tf.ctl & ATA_NIEN)) &&
4007 			    (qc->flags & ATA_QCFLAG_ACTIVE))
4008 				handled |= ata_host_intr(ap, qc);
4009 		}
4010 	}
4011 
4012 	spin_unlock_irqrestore(&host_set->lock, flags);
4013 
4014 	return IRQ_RETVAL(handled);
4015 }
4016 
4017 /**
4018  *	atapi_packet_task - Write CDB bytes to hardware
4019  *	@_data: Port to which ATAPI device is attached.
4020  *
4021  *	When device has indicated its readiness to accept
4022  *	a CDB, this function is called.  Send the CDB.
4023  *	If DMA is to be performed, exit immediately.
4024  *	Otherwise, we are in polling mode, so poll
4025  *	status under operation succeeds or fails.
4026  *
4027  *	LOCKING:
4028  *	Kernel thread context (may sleep)
4029  */
4030 
atapi_packet_task(void * _data)4031 static void atapi_packet_task(void *_data)
4032 {
4033 	struct ata_port *ap = _data;
4034 	struct ata_queued_cmd *qc;
4035 	u8 status;
4036 
4037 	qc = ata_qc_from_tag(ap, ap->active_tag);
4038 	assert(qc != NULL);
4039 	assert(qc->flags & ATA_QCFLAG_ACTIVE);
4040 
4041 	/* sleep-wait for BSY to clear */
4042 	DPRINTK("busy wait\n");
4043 	if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB))
4044 		goto err_out_status;
4045 
4046 	/* make sure DRQ is set */
4047 	status = ata_chk_status(ap);
4048 	if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ)
4049 		goto err_out;
4050 
4051 	/* send SCSI cdb */
4052 	DPRINTK("send cdb\n");
4053 	assert(ap->cdb_len >= 12);
4054 
4055 	if (qc->tf.protocol == ATA_PROT_ATAPI_DMA ||
4056 	    qc->tf.protocol == ATA_PROT_ATAPI_NODATA) {
4057 		unsigned long flags;
4058 
4059 		/* Once we're done issuing command and kicking bmdma,
4060 		 * irq handler takes over.  To not lose irq, we need
4061 		 * to clear NOINTR flag before sending cdb, but
4062 		 * interrupt handler shouldn't be invoked before we're
4063 		 * finished.  Hence, the following locking.
4064 		 */
4065 		spin_lock_irqsave(&ap->host_set->lock, flags);
4066 		ap->flags &= ~ATA_FLAG_NOINTR;
4067 		ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1);
4068 		if (qc->tf.protocol == ATA_PROT_ATAPI_DMA)
4069 			ap->ops->bmdma_start(qc);	/* initiate bmdma */
4070 		spin_unlock_irqrestore(&ap->host_set->lock, flags);
4071 	} else {
4072 		ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1);
4073 
4074 		/* PIO commands are handled by polling */
4075 		ap->hsm_task_state = HSM_ST;
4076 		schedule_task(&ap->pio_task);
4077 	}
4078 
4079 	return;
4080 
4081 err_out_status:
4082 	status = ata_chk_status(ap);
4083 err_out:
4084 	ata_poll_qc_complete(qc, __ac_err_mask(status));
4085 }
4086 
4087 
4088 /**
4089  *	ata_port_start - Set port up for dma.
4090  *	@ap: Port to initialize
4091  *
4092  *	Called just after data structures for each port are
4093  *	initialized.  Allocates space for PRD table.
4094  *
4095  *	May be used as the port_start() entry in ata_port_operations.
4096  *
4097  *	LOCKING:
4098  *	Inherited from caller.
4099  */
4100 
ata_port_start(struct ata_port * ap)4101 int ata_port_start (struct ata_port *ap)
4102 {
4103 	struct device *dev = ap->host_set->dev;
4104 	int rc;
4105 
4106 	ap->prd = dma_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, GFP_KERNEL);
4107 	if (!ap->prd)
4108 		return -ENOMEM;
4109 
4110 	rc = ata_pad_alloc(ap, dev);
4111 	if (rc) {
4112 		dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
4113 		return rc;
4114 	}
4115 
4116 	DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma);
4117 
4118 	return 0;
4119 }
4120 
4121 
4122 /**
4123  *	ata_port_stop - Undo ata_port_start()
4124  *	@ap: Port to shut down
4125  *
4126  *	Frees the PRD table.
4127  *
4128  *	May be used as the port_stop() entry in ata_port_operations.
4129  *
4130  *	LOCKING:
4131  *	Inherited from caller.
4132  */
4133 
ata_port_stop(struct ata_port * ap)4134 void ata_port_stop (struct ata_port *ap)
4135 {
4136 	struct device *dev = ap->host_set->dev;
4137 
4138 	dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
4139 	ata_pad_free(ap, dev);
4140 }
4141 
ata_host_stop(struct ata_host_set * host_set)4142 void ata_host_stop (struct ata_host_set *host_set)
4143 {
4144 	if (host_set->mmio_base)
4145 		iounmap(host_set->mmio_base);
4146 }
4147 
4148 
4149 /**
4150  *	ata_host_remove - Unregister SCSI host structure with upper layers
4151  *	@ap: Port to unregister
4152  *	@do_unregister: 1 if we fully unregister, 0 to just stop the port
4153  *
4154  *	LOCKING:
4155  *	Inherited from caller.
4156  */
4157 
ata_host_remove(struct ata_port * ap,unsigned int do_unregister)4158 static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister)
4159 {
4160 	struct Scsi_Host *sh = ap->host;
4161 
4162 	DPRINTK("ENTER\n");
4163 
4164 	if (do_unregister)
4165 		scsi_unregister(sh);
4166 
4167 	ap->ops->port_stop(ap);
4168 }
4169 
4170 /**
4171  *	ata_host_init - Initialize an ata_port structure
4172  *	@ap: Structure to initialize
4173  *	@host: associated SCSI mid-layer structure
4174  *	@host_set: Collection of hosts to which @ap belongs
4175  *	@ent: Probe information provided by low-level driver
4176  *	@port_no: Port number associated with this ata_port
4177  *
4178  *	Initialize a new ata_port structure, and its associated
4179  *	scsi_host.
4180  *
4181  *	LOCKING:
4182  *	Inherited from caller.
4183  */
4184 
ata_host_init(struct ata_port * ap,struct Scsi_Host * host,struct ata_host_set * host_set,const struct ata_probe_ent * ent,unsigned int port_no)4185 static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
4186 			  struct ata_host_set *host_set,
4187 			  const struct ata_probe_ent *ent, unsigned int port_no)
4188 {
4189 	unsigned int i;
4190 
4191 	host->max_id = 16;
4192 	host->max_lun = 1;
4193 	host->max_channel = 1;
4194 	host->unique_id = ata_unique_id++;
4195 	host->max_cmd_len = 12;
4196 	host->pci_dev = to_pci_dev(ent->dev);
4197 
4198 	ap->flags = ATA_FLAG_PORT_DISABLED;
4199 	ap->id = host->unique_id;
4200 	ap->host = host;
4201 	ap->ctl = ATA_DEVCTL_OBS;
4202 	ap->host_set = host_set;
4203 	ap->port_no = port_no;
4204 	ap->hard_port_no =
4205 		ent->legacy_mode ? ent->hard_port_no : port_no;
4206 	ap->pio_mask = ent->pio_mask;
4207 	ap->mwdma_mask = ent->mwdma_mask;
4208 	ap->udma_mask = ent->udma_mask;
4209 	ap->flags |= ent->host_flags;
4210 	ap->ops = ent->port_ops;
4211 	ap->cbl = ATA_CBL_NONE;
4212 	ap->active_tag = ATA_TAG_POISON;
4213 	ap->last_ctl = 0xFF;
4214 
4215 	INIT_TQUEUE(&ap->packet_task, atapi_packet_task, ap);
4216 	INIT_TQUEUE(&ap->pio_task, ata_pio_task, ap);
4217 
4218 	for (i = 0; i < ATA_MAX_DEVICES; i++)
4219 		ap->device[i].devno = i;
4220 
4221 #ifdef ATA_IRQ_TRAP
4222 	ap->stats.unhandled_irq = 1;
4223 	ap->stats.idle_irq = 1;
4224 #endif
4225 
4226 	memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
4227 }
4228 
4229 /**
4230  *	ata_host_add - Attach low-level ATA driver to system
4231  *	@ent: Information provided by low-level driver
4232  *	@host_set: Collections of ports to which we add
4233  *	@port_no: Port number associated with this host
4234  *
4235  *	Attach low-level ATA driver to system.
4236  *
4237  *	LOCKING:
4238  *	PCI/etc. bus probe sem.
4239  *
4240  *	RETURNS:
4241  *	New ata_port on success, for NULL on error.
4242  */
4243 
ata_host_add(const struct ata_probe_ent * ent,struct ata_host_set * host_set,unsigned int port_no)4244 static struct ata_port * ata_host_add(const struct ata_probe_ent *ent,
4245 				      struct ata_host_set *host_set,
4246 				      unsigned int port_no)
4247 {
4248 	struct Scsi_Host *host;
4249 	struct ata_port *ap;
4250 	int rc;
4251 
4252 	DPRINTK("ENTER\n");
4253 	host = scsi_register(ent->sht, sizeof(struct ata_port));
4254 	if (!host)
4255 		return NULL;
4256 
4257 	ap = (struct ata_port *) &host->hostdata[0];
4258 
4259 	ata_host_init(ap, host, host_set, ent, port_no);
4260 
4261 	rc = ap->ops->port_start(ap);
4262 	if (rc)
4263 		goto err_out;
4264 
4265 	return ap;
4266 
4267 err_out:
4268 	scsi_unregister(host);
4269 	return NULL;
4270 }
4271 
4272 /**
4273  *	ata_device_add - Register hardware device with ATA and SCSI layers
4274  *	@ent: Probe information describing hardware device to be registered
4275  *
4276  *	This function processes the information provided in the probe
4277  *	information struct @ent, allocates the necessary ATA and SCSI
4278  *	host information structures, initializes them, and registers
4279  *	everything with requisite kernel subsystems.
4280  *
4281  *	This function requests irqs, probes the ATA bus, and probes
4282  *	the SCSI bus.
4283  *
4284  *	LOCKING:
4285  *	PCI/etc. bus probe sem.
4286  *
4287  *	RETURNS:
4288  *	Number of ports registered.  Zero on error (no ports registered).
4289  */
4290 
ata_device_add(const struct ata_probe_ent * ent)4291 int ata_device_add(const struct ata_probe_ent *ent)
4292 {
4293 	unsigned int count = 0, i;
4294 	struct device *dev = ent->dev;
4295 	struct ata_host_set *host_set;
4296 
4297 	DPRINTK("ENTER\n");
4298 	/* alloc a container for our list of ATA ports (buses) */
4299 	host_set = kzalloc(sizeof(struct ata_host_set) +
4300 			   (ent->n_ports * sizeof(void *)), GFP_KERNEL);
4301 	if (!host_set)
4302 		return 0;
4303 	spin_lock_init(&host_set->lock);
4304 
4305 	host_set->dev = dev;
4306 	host_set->n_ports = ent->n_ports;
4307 	host_set->irq = ent->irq;
4308 	host_set->mmio_base = ent->mmio_base;
4309 	host_set->private_data = ent->private_data;
4310 	host_set->ops = ent->port_ops;
4311 
4312 	/* register each port bound to this device */
4313 	for (i = 0; i < ent->n_ports; i++) {
4314 		struct ata_port *ap;
4315 		unsigned long xfer_mode_mask;
4316 
4317 		ap = ata_host_add(ent, host_set, i);
4318 		if (!ap)
4319 			goto err_out;
4320 
4321 		host_set->ports[i] = ap;
4322 		xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
4323 				(ap->mwdma_mask << ATA_SHIFT_MWDMA) |
4324 				(ap->pio_mask << ATA_SHIFT_PIO);
4325 
4326 		/* print per-port info to dmesg */
4327 		printk(KERN_INFO "ata%u: %cATA max %s cmd 0x%lX ctl 0x%lX "
4328 				 "bmdma 0x%lX irq %lu\n",
4329 			ap->id,
4330 			ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
4331 			ata_mode_string(xfer_mode_mask),
4332 	       		ap->ioaddr.cmd_addr,
4333 	       		ap->ioaddr.ctl_addr,
4334 	       		ap->ioaddr.bmdma_addr,
4335 	       		ent->irq);
4336 
4337 		ata_chk_status(ap);
4338 		host_set->ops->irq_clear(ap);
4339 		count++;
4340 	}
4341 
4342 	if (!count)
4343 		goto err_free_ret;
4344 
4345 	/* obtain irq, that is shared between channels */
4346 	if (request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags,
4347 			DRV_NAME, host_set))
4348 		goto err_out;
4349 
4350 	/* perform each probe synchronously */
4351 	DPRINTK("probe begin\n");
4352 	for (i = 0; i < count; i++) {
4353 		struct ata_port *ap;
4354 		int rc;
4355 
4356 		ap = host_set->ports[i];
4357 
4358 		DPRINTK("ata%u: probe begin\n", ap->id);
4359 		rc = ata_bus_probe(ap);
4360 		DPRINTK("ata%u: probe end\n", ap->id);
4361 
4362 		if (rc) {
4363 			/* FIXME: do something useful here?
4364 			 * Current libata behavior will
4365 			 * tear down everything when
4366 			 * the module is removed
4367 			 * or the h/w is unplugged.
4368 			 */
4369 		}
4370 	}
4371 
4372 	dev_set_drvdata(dev, host_set);
4373 
4374 	VPRINTK("EXIT, returning %u\n", ent->n_ports);
4375 	return ent->n_ports; /* success */
4376 
4377 err_out:
4378 	for (i = 0; i < count; i++) {
4379 		ata_host_remove(host_set->ports[i], 1);
4380 		/* scsi_host_put(host_set->ports[i]->host); */
4381 	}
4382 err_free_ret:
4383 	kfree(host_set);
4384 	VPRINTK("EXIT, returning 0\n");
4385 	return 0;
4386 }
4387 
4388 /**
4389  *	ata_host_set_remove - PCI layer callback for device removal
4390  *	@host_set: ATA host set that was removed
4391  *
4392  *	Unregister all objects associated with this host set. Free those
4393  *	objects.
4394  *
4395  *	LOCKING:
4396  *	Inherited from calling layer (may sleep).
4397  */
4398 
ata_host_set_remove(struct ata_host_set * host_set)4399 void ata_host_set_remove(struct ata_host_set *host_set)
4400 {
4401 	struct ata_port *ap;
4402 	Scsi_Host_Template *sht;
4403 	unsigned int i;
4404 	int rc;
4405 
4406 	/* FIXME: this unregisters all ports attached to the
4407 	 * Scsi_Host_Template given.  We _might_ have multiple
4408 	 * templates (though we don't ATM), so this is ok... for now.
4409 	 */
4410 	ap = host_set->ports[0];
4411 	sht = ap->host->hostt;
4412 	rc = scsi_unregister_module(MODULE_SCSI_HA, sht);
4413 	/* FIXME: handle 'rc' failure? */
4414 
4415 	free_irq(host_set->irq, host_set);
4416 
4417 	for (i = 0; i < host_set->n_ports; i++) {
4418 		ap = host_set->ports[i];
4419 
4420 		if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) {
4421 			struct ata_ioports *ioaddr = &ap->ioaddr;
4422 
4423 			if (ioaddr->cmd_addr == 0x1f0)
4424 				release_region(0x1f0, 8);
4425 			else if (ioaddr->cmd_addr == 0x170)
4426 				release_region(0x170, 8);
4427 		}
4428 	}
4429 
4430 	if (host_set->ops->host_stop)
4431 		host_set->ops->host_stop(host_set);
4432 
4433 	kfree(host_set);
4434 }
4435 
4436 /**
4437  *	ata_scsi_detect -
4438  *	@sht:
4439  *
4440  *	LOCKING:
4441  *
4442  *	RETURNS:
4443  *
4444  */
4445 
ata_scsi_detect(Scsi_Host_Template * sht)4446 int ata_scsi_detect(Scsi_Host_Template *sht)
4447 {
4448 	struct list_head *node;
4449 	struct ata_probe_ent *ent;
4450 	int count = 0;
4451 
4452 	VPRINTK("ENTER\n");
4453 
4454 	sht->use_new_eh_code = 1;	/* IORL hack, part deux */
4455 
4456 	spin_lock(&ata_module_lock);
4457 	while (!list_empty(&ata_probe_list)) {
4458 		node = ata_probe_list.next;
4459 		ent = list_entry(node, struct ata_probe_ent, node);
4460 		list_del(node);
4461 
4462 		spin_unlock(&ata_module_lock);
4463 
4464 		count += ata_device_add(ent);
4465 		kfree(ent);
4466 
4467 		spin_lock(&ata_module_lock);
4468 	}
4469 	spin_unlock(&ata_module_lock);
4470 
4471 	VPRINTK("EXIT, returning %d\n", count);
4472 	return count;
4473 }
4474 
4475 /**
4476  *	ata_scsi_release - SCSI layer callback hook for host unload
4477  *	@host: libata host to be unloaded
4478  *
4479  *	Performs all duties necessary to shut down a libata port...
4480  *	Kill port kthread, disable port, and release resources.
4481  *
4482  *	LOCKING:
4483  *	Inherited from SCSI layer.
4484  *
4485  *	RETURNS:
4486  *	One.
4487  */
4488 
ata_scsi_release(struct Scsi_Host * host)4489 int ata_scsi_release(struct Scsi_Host *host)
4490 {
4491 	struct ata_port *ap = (struct ata_port *) &host->hostdata[0];
4492 
4493 	DPRINTK("ENTER\n");
4494 
4495 	ap->ops->port_disable(ap);
4496 	ata_host_remove(ap, 0);
4497 
4498 	DPRINTK("EXIT\n");
4499 	return 1;
4500 }
4501 
4502 /**
4503  *	ata_std_ports - initialize ioaddr with standard port offsets.
4504  *	@ioaddr: IO address structure to be initialized
4505  *
4506  *	Utility function which initializes data_addr, error_addr,
4507  *	feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
4508  *	device_addr, status_addr, and command_addr to standard offsets
4509  *	relative to cmd_addr.
4510  *
4511  *	Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
4512  */
4513 
ata_std_ports(struct ata_ioports * ioaddr)4514 void ata_std_ports(struct ata_ioports *ioaddr)
4515 {
4516 	ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
4517 	ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
4518 	ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
4519 	ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
4520 	ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
4521 	ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
4522 	ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
4523 	ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
4524 	ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
4525 	ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
4526 }
4527 
4528 static struct ata_probe_ent *
ata_probe_ent_alloc(struct device * dev,const struct ata_port_info * port)4529 ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port)
4530 {
4531 	struct ata_probe_ent *probe_ent;
4532 
4533 	probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
4534 	if (!probe_ent) {
4535 		printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
4536 		       pci_name(to_pci_dev(dev)));
4537 		return NULL;
4538 	}
4539 
4540 	INIT_LIST_HEAD(&probe_ent->node);
4541 	probe_ent->dev = dev;
4542 
4543 	probe_ent->sht = port->sht;
4544 	probe_ent->host_flags = port->host_flags;
4545 	probe_ent->pio_mask = port->pio_mask;
4546 	probe_ent->mwdma_mask = port->mwdma_mask;
4547 	probe_ent->udma_mask = port->udma_mask;
4548 	probe_ent->port_ops = port->port_ops;
4549 
4550 	return probe_ent;
4551 }
4552 
4553 /**
4554  *	ata_add_to_probe_list - add an entry to the list of things
4555  *	to be probed.
4556  *	@probe_ent: describes device to be probed.
4557  *
4558  *	LOCKING:
4559  */
4560 
ata_add_to_probe_list(struct ata_probe_ent * probe_ent)4561 void ata_add_to_probe_list(struct ata_probe_ent *probe_ent)
4562 {
4563 	spin_lock(&ata_module_lock);
4564 	list_add_tail(&probe_ent->node, &ata_probe_list);
4565 	spin_unlock(&ata_module_lock);
4566 }
4567 
4568 
4569 #ifdef CONFIG_PCI
4570 
ata_pci_host_stop(struct ata_host_set * host_set)4571 void ata_pci_host_stop (struct ata_host_set *host_set)
4572 {
4573 	struct pci_dev *pdev = to_pci_dev(host_set->dev);
4574 
4575 	pci_iounmap(pdev, host_set->mmio_base);
4576 }
4577 
4578 /**
4579  *	ata_pci_init_native_mode - Initialize native-mode driver
4580  *	@pdev:  pci device to be initialized
4581  *	@port:  array[2] of pointers to port info structures.
4582  *	@ports: bitmap of ports present
4583  *
4584  *	Utility function which allocates and initializes an
4585  *	ata_probe_ent structure for a standard dual-port
4586  *	PIO-based IDE controller.  The returned ata_probe_ent
4587  *	structure can be passed to ata_device_add().  The returned
4588  *	ata_probe_ent structure should then be freed with kfree().
4589  *
4590  *	The caller need only pass the address of the primary port, the
4591  *	secondary will be deduced automatically. If the device has non
4592  *	standard secondary port mappings this function can be called twice,
4593  *	once for each interface.
4594  */
4595 
4596 struct ata_probe_ent *
ata_pci_init_native_mode(struct pci_dev * pdev,struct ata_port_info ** port,int ports)4597 ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int ports)
4598 {
4599 	struct ata_probe_ent *probe_ent =
4600 		ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]);
4601 	int p = 0;
4602 
4603 	if (!probe_ent)
4604 		return NULL;
4605 
4606 	probe_ent->irq = pdev->irq;
4607 	probe_ent->irq_flags = SA_SHIRQ;
4608 	probe_ent->private_data = port[0]->private_data;
4609 
4610 	if (ports & ATA_PORT_PRIMARY) {
4611 		probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 0);
4612 		probe_ent->port[p].altstatus_addr =
4613 		probe_ent->port[p].ctl_addr =
4614 			pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS;
4615 		probe_ent->port[p].bmdma_addr = pci_resource_start(pdev, 4);
4616 		ata_std_ports(&probe_ent->port[p]);
4617 		p++;
4618 	}
4619 
4620 	if (ports & ATA_PORT_SECONDARY) {
4621 		probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 2);
4622 		probe_ent->port[p].altstatus_addr =
4623 		probe_ent->port[p].ctl_addr =
4624 			pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS;
4625 		probe_ent->port[p].bmdma_addr = pci_resource_start(pdev, 4) + 8;
4626 		ata_std_ports(&probe_ent->port[p]);
4627 		p++;
4628 	}
4629 
4630 	probe_ent->n_ports = p;
4631 	return probe_ent;
4632 }
4633 
ata_pci_init_legacy_port(struct pci_dev * pdev,struct ata_port_info * port,int port_num)4634 static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev, struct ata_port_info *port, int port_num)
4635 {
4636 	struct ata_probe_ent *probe_ent;
4637 
4638 	probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port);
4639 	if (!probe_ent)
4640 		return NULL;
4641 
4642 	probe_ent->legacy_mode = 1;
4643 	probe_ent->n_ports = 1;
4644 	probe_ent->hard_port_no = port_num;
4645 	probe_ent->private_data = port->private_data;
4646 
4647 	switch(port_num)
4648 	{
4649 		case 0:
4650 			probe_ent->irq = 14;
4651 			probe_ent->port[0].cmd_addr = 0x1f0;
4652 			probe_ent->port[0].altstatus_addr =
4653 			probe_ent->port[0].ctl_addr = 0x3f6;
4654 			break;
4655 		case 1:
4656 			probe_ent->irq = 15;
4657 			probe_ent->port[0].cmd_addr = 0x170;
4658 			probe_ent->port[0].altstatus_addr =
4659 			probe_ent->port[0].ctl_addr = 0x376;
4660 			break;
4661 	}
4662 	probe_ent->port[0].bmdma_addr = pci_resource_start(pdev, 4) + 8 * port_num;
4663 	ata_std_ports(&probe_ent->port[0]);
4664 	return probe_ent;
4665 }
4666 
4667 /**
4668  *	ata_pci_init_one - Initialize/register PCI IDE host controller
4669  *	@pdev: Controller to be initialized
4670  *	@port_info: Information from low-level host driver
4671  *	@n_ports: Number of ports attached to host controller
4672  *
4673  *	This is a helper function which can be called from a driver's
4674  *	xxx_init_one() probe function if the hardware uses traditional
4675  *	IDE taskfile registers.
4676  *
4677  *	This function calls pci_enable_device(), reserves its register
4678  *	regions, sets the dma mask, enables bus master mode, and calls
4679  *	ata_device_add()
4680  *
4681  *	LOCKING:
4682  *	Inherited from PCI layer (may sleep).
4683  *
4684  *	RETURNS:
4685  *	Zero on success, negative on errno-based value on error.
4686  */
4687 
ata_pci_init_one(struct pci_dev * pdev,struct ata_port_info ** port_info,unsigned int n_ports)4688 int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
4689 		      unsigned int n_ports)
4690 {
4691 	struct ata_probe_ent *probe_ent = NULL, *probe_ent2 = NULL;
4692 	struct ata_port_info *port[2];
4693 	u8 tmp8, mask;
4694 	unsigned int legacy_mode = 0;
4695 	int disable_dev_on_err = 1;
4696 	int rc;
4697 
4698 	DPRINTK("ENTER\n");
4699 
4700 	port[0] = port_info[0];
4701 	if (n_ports > 1)
4702 		port[1] = port_info[1];
4703 	else
4704 		port[1] = port[0];
4705 
4706 	if ((port[0]->host_flags & ATA_FLAG_NO_LEGACY) == 0
4707 	    && (pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
4708 		/* TODO: What if one channel is in native mode ... */
4709 		pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
4710 		mask = (1 << 2) | (1 << 0);
4711 		if ((tmp8 & mask) != mask)
4712 			legacy_mode = (1 << 3);
4713 	}
4714 
4715 	/* FIXME... */
4716 	if ((!legacy_mode) && (n_ports > 2)) {
4717 		printk(KERN_ERR "ata: BUG: native mode, n_ports > 2\n");
4718 		n_ports = 2;
4719 		/* For now */
4720 	}
4721 
4722 	/* FIXME: Really for ATA it isn't safe because the device may be
4723 	   multi-purpose and we want to leave it alone if it was already
4724 	   enabled. Secondly for shared use as Arjan says we want refcounting
4725 
4726 	   Checking dev->is_enabled is insufficient as this is not set at
4727 	   boot for the primary video which is BIOS enabled
4728          */
4729 
4730 	rc = pci_enable_device(pdev);
4731 	if (rc)
4732 		return rc;
4733 
4734 	rc = pci_request_regions(pdev, DRV_NAME);
4735 	if (rc) {
4736 		disable_dev_on_err = 0;
4737 		goto err_out;
4738 	}
4739 
4740 	if (legacy_mode) {
4741 		if (!request_region(0x1f0, 8, "libata")) {
4742 			disable_dev_on_err = 0;
4743 			printk(KERN_WARNING "ata: 0x1f0 IDE port busy\n");
4744 		} else
4745 			legacy_mode |= (1 << 0);
4746 
4747 		if (!request_region(0x170, 8, "libata")) {
4748 			disable_dev_on_err = 0;
4749 			printk(KERN_WARNING "ata: 0x170 IDE port busy\n");
4750 		} else
4751 			legacy_mode |= (1 << 1);
4752 	}
4753 
4754 	/* we have legacy mode, but all ports are unavailable */
4755 	if (legacy_mode == (1 << 3)) {
4756 		rc = -EBUSY;
4757 		goto err_out_regions;
4758 	}
4759 
4760 	rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
4761 	if (rc)
4762 		goto err_out_regions;
4763 	rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
4764 	if (rc)
4765 		goto err_out_regions;
4766 
4767 	if (legacy_mode) {
4768 		if (legacy_mode & (1 << 0))
4769 			probe_ent = ata_pci_init_legacy_port(pdev, port[0], 0);
4770 		if (legacy_mode & (1 << 1))
4771 			probe_ent2 = ata_pci_init_legacy_port(pdev, port[1], 1);
4772 	} else {
4773 		if (n_ports == 2)
4774 			probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
4775 		else
4776 			probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY);
4777 	}
4778 	if (!probe_ent && !probe_ent2) {
4779 		rc = -ENOMEM;
4780 		goto err_out_regions;
4781 	}
4782 
4783 	pci_set_master(pdev);
4784 
4785 	spin_lock(&ata_module_lock);
4786 	if (legacy_mode) {
4787 		if (legacy_mode & (1 << 0))
4788 			list_add_tail(&probe_ent->node, &ata_probe_list);
4789 		else
4790 			kfree(probe_ent);
4791 		if (legacy_mode & (1 << 1))
4792 			list_add_tail(&probe_ent2->node, &ata_probe_list);
4793 		else
4794 			kfree(probe_ent2);
4795 	} else {
4796 		list_add_tail(&probe_ent->node, &ata_probe_list);
4797 	}
4798 	spin_unlock(&ata_module_lock);
4799 
4800 	return 0;
4801 
4802 err_out_regions:
4803 	if (legacy_mode & (1 << 0))
4804 		release_region(0x1f0, 8);
4805 	if (legacy_mode & (1 << 1))
4806 		release_region(0x170, 8);
4807 	pci_release_regions(pdev);
4808 err_out:
4809 	if (disable_dev_on_err)
4810 		pci_disable_device(pdev);
4811 	return rc;
4812 }
4813 
4814 /**
4815  *	ata_pci_remove_one - PCI layer callback for device removal
4816  *	@pdev: PCI device that was removed
4817  *
4818  *	PCI layer indicates to libata via this hook that
4819  *	hot-unplug or module unload event has occurred.
4820  *	Handle this by unregistering all objects associated
4821  *	with this PCI device.  Free those objects.  Then finally
4822  *	release PCI resources and disable device.
4823  *
4824  *	LOCKING:
4825  *	Inherited from PCI layer (may sleep).
4826  */
4827 
ata_pci_remove_one(struct pci_dev * pdev)4828 void ata_pci_remove_one (struct pci_dev *pdev)
4829 {
4830 	struct device *dev = pci_dev_to_dev(pdev);
4831 	struct ata_host_set *host_set = dev_get_drvdata(dev);
4832 
4833 	ata_host_set_remove(host_set);
4834 	pci_release_regions(pdev);
4835 	pci_disable_device(pdev);
4836 	dev_set_drvdata(dev, NULL);
4837 }
4838 
4839 /* move to PCI subsystem */
pci_test_config_bits(struct pci_dev * pdev,const struct pci_bits * bits)4840 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
4841 {
4842 	unsigned long tmp = 0;
4843 
4844 	switch (bits->width) {
4845 	case 1: {
4846 		u8 tmp8 = 0;
4847 		pci_read_config_byte(pdev, bits->reg, &tmp8);
4848 		tmp = tmp8;
4849 		break;
4850 	}
4851 	case 2: {
4852 		u16 tmp16 = 0;
4853 		pci_read_config_word(pdev, bits->reg, &tmp16);
4854 		tmp = tmp16;
4855 		break;
4856 	}
4857 	case 4: {
4858 		u32 tmp32 = 0;
4859 		pci_read_config_dword(pdev, bits->reg, &tmp32);
4860 		tmp = tmp32;
4861 		break;
4862 	}
4863 
4864 	default:
4865 		return -EINVAL;
4866 	}
4867 
4868 	tmp &= bits->mask;
4869 
4870 	return (tmp == bits->val) ? 1 : 0;
4871 }
4872 #endif /* CONFIG_PCI */
4873 
4874 
ata_init(void)4875 static int __init ata_init(void)
4876 {
4877 	printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
4878 	return 0;
4879 }
4880 
ata_exit(void)4881 static void __exit ata_exit(void)
4882 {
4883 }
4884 
4885 module_init(ata_init);
4886 module_exit(ata_exit);
4887 
4888 static unsigned long ratelimit_time;
4889 static spinlock_t ata_ratelimit_lock = SPIN_LOCK_UNLOCKED;
4890 
ata_ratelimit(void)4891 int ata_ratelimit(void)
4892 {
4893 	int rc;
4894 	unsigned long flags;
4895 
4896 	spin_lock_irqsave(&ata_ratelimit_lock, flags);
4897 
4898 	if (time_after(jiffies, ratelimit_time)) {
4899 		rc = 1;
4900 		ratelimit_time = jiffies + (HZ/5);
4901 	} else
4902 		rc = 0;
4903 
4904 	spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
4905 
4906 	return rc;
4907 }
4908 
4909 /*
4910  * libata is essentially a library of internal helper functions for
4911  * low-level ATA host controller drivers.  As such, the API/ABI is
4912  * likely to change as new drivers are added and updated.
4913  * Do not depend on ABI/API stability.
4914  */
4915 
4916 EXPORT_SYMBOL_GPL(ata_std_bios_param);
4917 EXPORT_SYMBOL_GPL(ata_std_ports);
4918 EXPORT_SYMBOL_GPL(ata_device_add);
4919 EXPORT_SYMBOL_GPL(ata_host_set_remove);
4920 EXPORT_SYMBOL_GPL(ata_sg_init);
4921 EXPORT_SYMBOL_GPL(ata_sg_init_one);
4922 EXPORT_SYMBOL_GPL(ata_qc_complete);
4923 EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
4924 EXPORT_SYMBOL_GPL(ata_eng_timeout);
4925 EXPORT_SYMBOL_GPL(ata_tf_load);
4926 EXPORT_SYMBOL_GPL(ata_tf_read);
4927 EXPORT_SYMBOL_GPL(ata_noop_dev_select);
4928 EXPORT_SYMBOL_GPL(ata_std_dev_select);
4929 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
4930 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
4931 EXPORT_SYMBOL_GPL(ata_check_status);
4932 EXPORT_SYMBOL_GPL(ata_altstatus);
4933 EXPORT_SYMBOL_GPL(ata_exec_command);
4934 EXPORT_SYMBOL_GPL(ata_port_start);
4935 EXPORT_SYMBOL_GPL(ata_port_stop);
4936 EXPORT_SYMBOL_GPL(ata_host_stop);
4937 EXPORT_SYMBOL_GPL(ata_interrupt);
4938 EXPORT_SYMBOL_GPL(ata_qc_prep);
4939 EXPORT_SYMBOL_GPL(ata_bmdma_setup);
4940 EXPORT_SYMBOL_GPL(ata_bmdma_start);
4941 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
4942 EXPORT_SYMBOL_GPL(ata_bmdma_status);
4943 EXPORT_SYMBOL_GPL(ata_bmdma_stop);
4944 EXPORT_SYMBOL_GPL(ata_port_probe);
4945 EXPORT_SYMBOL_GPL(sata_phy_reset);
4946 EXPORT_SYMBOL_GPL(__sata_phy_reset);
4947 EXPORT_SYMBOL_GPL(ata_bus_reset);
4948 EXPORT_SYMBOL_GPL(ata_port_disable);
4949 EXPORT_SYMBOL_GPL(ata_ratelimit);
4950 EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
4951 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
4952 EXPORT_SYMBOL_GPL(ata_scsi_error);
4953 EXPORT_SYMBOL_GPL(ata_scsi_detect);
4954 EXPORT_SYMBOL_GPL(ata_add_to_probe_list);
4955 EXPORT_SYMBOL_GPL(ssleep);
4956 EXPORT_SYMBOL_GPL(ata_scsi_release);
4957 EXPORT_SYMBOL_GPL(ata_host_intr);
4958 EXPORT_SYMBOL_GPL(ata_dev_classify);
4959 EXPORT_SYMBOL_GPL(ata_dev_id_string);
4960 EXPORT_SYMBOL_GPL(ata_dev_config);
4961 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
4962 
4963 EXPORT_SYMBOL_GPL(ata_timing_compute);
4964 EXPORT_SYMBOL_GPL(ata_timing_merge);
4965 
4966 #ifdef CONFIG_PCI
4967 EXPORT_SYMBOL_GPL(pci_test_config_bits);
4968 EXPORT_SYMBOL_GPL(ata_pci_host_stop);
4969 EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
4970 EXPORT_SYMBOL_GPL(ata_pci_init_one);
4971 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
4972 #endif /* CONFIG_PCI */
4973