1 /*
2  *  libata-core.c - helper library for ATA
3  *
4  *  Maintained by:  Jeff Garzik <jgarzik@pobox.com>
5  *    		    Please ALWAYS copy linux-ide@vger.kernel.org
6  *		    on emails.
7  *
8  *  Copyright 2003-2004 Red Hat, Inc.  All rights reserved.
9  *  Copyright 2003-2004 Jeff Garzik
10  *
11  *
12  *  This program is free software; you can redistribute it and/or modify
13  *  it under the terms of the GNU General Public License as published by
14  *  the Free Software Foundation; either version 2, or (at your option)
15  *  any later version.
16  *
17  *  This program is distributed in the hope that it will be useful,
18  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
19  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  *  GNU General Public License for more details.
21  *
22  *  You should have received a copy of the GNU General Public License
23  *  along with this program; see the file COPYING.  If not, write to
24  *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25  *
26  *
27  *  libata documentation is available via 'make {ps|pdf}docs',
28  *  as Documentation/DocBook/libata.*
29  *
30  *  Hardware documentation available from http://www.t13.org/ and
31  *  http://www.sata-io.org/
32  *
33  *  Standards documents from:
34  *	http://www.t13.org (ATA standards, PCI DMA IDE spec)
35  *	http://www.t10.org (SCSI MMC - for ATAPI MMC)
36  *	http://www.sata-io.org (SATA)
37  *	http://www.compactflash.org (CF)
38  *	http://www.qic.org (QIC157 - Tape and DSC)
39  *	http://www.ce-ata.org (CE-ATA: not supported)
40  *
41  */
42 
43 #include <linux/kernel.h>
44 #include <linux/module.h>
45 #include <linux/pci.h>
46 #include <linux/init.h>
47 #include <linux/list.h>
48 #include <linux/mm.h>
49 #include <linux/spinlock.h>
50 #include <linux/blkdev.h>
51 #include <linux/delay.h>
52 #include <linux/timer.h>
53 #include <linux/interrupt.h>
54 #include <linux/completion.h>
55 #include <linux/suspend.h>
56 #include <linux/workqueue.h>
57 #include <linux/scatterlist.h>
58 #include <linux/io.h>
59 #include <linux/async.h>
60 #include <linux/log2.h>
61 #include <linux/slab.h>
62 #include <scsi/scsi.h>
63 #include <scsi/scsi_cmnd.h>
64 #include <scsi/scsi_host.h>
65 #include <linux/libata.h>
66 #include <asm/byteorder.h>
67 #include <linux/cdrom.h>
68 #include <linux/ratelimit.h>
69 #include <linux/pm_runtime.h>
70 
71 #include "libata.h"
72 #include "libata-transport.h"
73 
74 /* debounce timing parameters in msecs { interval, duration, timeout } */
75 const unsigned long sata_deb_timing_normal[]		= {   5,  100, 2000 };
76 const unsigned long sata_deb_timing_hotplug[]		= {  25,  500, 2000 };
77 const unsigned long sata_deb_timing_long[]		= { 100, 2000, 5000 };
78 
79 const struct ata_port_operations ata_base_port_ops = {
80 	.prereset		= ata_std_prereset,
81 	.postreset		= ata_std_postreset,
82 	.error_handler		= ata_std_error_handler,
83 };
84 
85 const struct ata_port_operations sata_port_ops = {
86 	.inherits		= &ata_base_port_ops,
87 
88 	.qc_defer		= ata_std_qc_defer,
89 	.hardreset		= sata_std_hardreset,
90 };
91 
92 static unsigned int ata_dev_init_params(struct ata_device *dev,
93 					u16 heads, u16 sectors);
94 static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
95 static void ata_dev_xfermask(struct ata_device *dev);
96 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
97 
98 atomic_t ata_print_id = ATOMIC_INIT(0);
99 
100 struct ata_force_param {
101 	const char	*name;
102 	unsigned int	cbl;
103 	int		spd_limit;
104 	unsigned long	xfer_mask;
105 	unsigned int	horkage_on;
106 	unsigned int	horkage_off;
107 	unsigned int	lflags;
108 };
109 
110 struct ata_force_ent {
111 	int			port;
112 	int			device;
113 	struct ata_force_param	param;
114 };
115 
116 static struct ata_force_ent *ata_force_tbl;
117 static int ata_force_tbl_size;
118 
119 static char ata_force_param_buf[PAGE_SIZE] __initdata;
120 /* param_buf is thrown away after initialization, disallow read */
121 module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
122 MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)");
123 
124 static int atapi_enabled = 1;
125 module_param(atapi_enabled, int, 0444);
126 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on [default])");
127 
128 static int atapi_dmadir = 0;
129 module_param(atapi_dmadir, int, 0444);
130 MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off [default], 1=on)");
131 
132 int atapi_passthru16 = 1;
133 module_param(atapi_passthru16, int, 0444);
134 MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices (0=off, 1=on [default])");
135 
136 int libata_fua = 0;
137 module_param_named(fua, libata_fua, int, 0444);
138 MODULE_PARM_DESC(fua, "FUA support (0=off [default], 1=on)");
139 
140 static int ata_ignore_hpa;
141 module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
142 MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
143 
144 static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
145 module_param_named(dma, libata_dma_mask, int, 0444);
146 MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
147 
148 static int ata_probe_timeout;
149 module_param(ata_probe_timeout, int, 0444);
150 MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
151 
152 int libata_noacpi = 0;
153 module_param_named(noacpi, libata_noacpi, int, 0444);
154 MODULE_PARM_DESC(noacpi, "Disable the use of ACPI in probe/suspend/resume (0=off [default], 1=on)");
155 
156 int libata_allow_tpm = 0;
157 module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
158 MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)");
159 
160 static int atapi_an;
161 module_param(atapi_an, int, 0444);
162 MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)");
163 
164 MODULE_AUTHOR("Jeff Garzik");
165 MODULE_DESCRIPTION("Library module for ATA devices");
166 MODULE_LICENSE("GPL");
167 MODULE_VERSION(DRV_VERSION);
168 
169 
ata_sstatus_online(u32 sstatus)170 static bool ata_sstatus_online(u32 sstatus)
171 {
172 	return (sstatus & 0xf) == 0x3;
173 }
174 
175 /**
176  *	ata_link_next - link iteration helper
177  *	@link: the previous link, NULL to start
178  *	@ap: ATA port containing links to iterate
179  *	@mode: iteration mode, one of ATA_LITER_*
180  *
181  *	LOCKING:
182  *	Host lock or EH context.
183  *
184  *	RETURNS:
185  *	Pointer to the next link.
186  */
ata_link_next(struct ata_link * link,struct ata_port * ap,enum ata_link_iter_mode mode)187 struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap,
188 			       enum ata_link_iter_mode mode)
189 {
190 	BUG_ON(mode != ATA_LITER_EDGE &&
191 	       mode != ATA_LITER_PMP_FIRST && mode != ATA_LITER_HOST_FIRST);
192 
193 	/* NULL link indicates start of iteration */
194 	if (!link)
195 		switch (mode) {
196 		case ATA_LITER_EDGE:
197 		case ATA_LITER_PMP_FIRST:
198 			if (sata_pmp_attached(ap))
199 				return ap->pmp_link;
200 			/* fall through */
201 		case ATA_LITER_HOST_FIRST:
202 			return &ap->link;
203 		}
204 
205 	/* we just iterated over the host link, what's next? */
206 	if (link == &ap->link)
207 		switch (mode) {
208 		case ATA_LITER_HOST_FIRST:
209 			if (sata_pmp_attached(ap))
210 				return ap->pmp_link;
211 			/* fall through */
212 		case ATA_LITER_PMP_FIRST:
213 			if (unlikely(ap->slave_link))
214 				return ap->slave_link;
215 			/* fall through */
216 		case ATA_LITER_EDGE:
217 			return NULL;
218 		}
219 
220 	/* slave_link excludes PMP */
221 	if (unlikely(link == ap->slave_link))
222 		return NULL;
223 
224 	/* we were over a PMP link */
225 	if (++link < ap->pmp_link + ap->nr_pmp_links)
226 		return link;
227 
228 	if (mode == ATA_LITER_PMP_FIRST)
229 		return &ap->link;
230 
231 	return NULL;
232 }
233 
234 /**
235  *	ata_dev_next - device iteration helper
236  *	@dev: the previous device, NULL to start
237  *	@link: ATA link containing devices to iterate
238  *	@mode: iteration mode, one of ATA_DITER_*
239  *
240  *	LOCKING:
241  *	Host lock or EH context.
242  *
243  *	RETURNS:
244  *	Pointer to the next device.
245  */
ata_dev_next(struct ata_device * dev,struct ata_link * link,enum ata_dev_iter_mode mode)246 struct ata_device *ata_dev_next(struct ata_device *dev, struct ata_link *link,
247 				enum ata_dev_iter_mode mode)
248 {
249 	BUG_ON(mode != ATA_DITER_ENABLED && mode != ATA_DITER_ENABLED_REVERSE &&
250 	       mode != ATA_DITER_ALL && mode != ATA_DITER_ALL_REVERSE);
251 
252 	/* NULL dev indicates start of iteration */
253 	if (!dev)
254 		switch (mode) {
255 		case ATA_DITER_ENABLED:
256 		case ATA_DITER_ALL:
257 			dev = link->device;
258 			goto check;
259 		case ATA_DITER_ENABLED_REVERSE:
260 		case ATA_DITER_ALL_REVERSE:
261 			dev = link->device + ata_link_max_devices(link) - 1;
262 			goto check;
263 		}
264 
265  next:
266 	/* move to the next one */
267 	switch (mode) {
268 	case ATA_DITER_ENABLED:
269 	case ATA_DITER_ALL:
270 		if (++dev < link->device + ata_link_max_devices(link))
271 			goto check;
272 		return NULL;
273 	case ATA_DITER_ENABLED_REVERSE:
274 	case ATA_DITER_ALL_REVERSE:
275 		if (--dev >= link->device)
276 			goto check;
277 		return NULL;
278 	}
279 
280  check:
281 	if ((mode == ATA_DITER_ENABLED || mode == ATA_DITER_ENABLED_REVERSE) &&
282 	    !ata_dev_enabled(dev))
283 		goto next;
284 	return dev;
285 }
286 
287 /**
288  *	ata_dev_phys_link - find physical link for a device
289  *	@dev: ATA device to look up physical link for
290  *
291  *	Look up physical link which @dev is attached to.  Note that
292  *	this is different from @dev->link only when @dev is on slave
293  *	link.  For all other cases, it's the same as @dev->link.
294  *
295  *	LOCKING:
296  *	Don't care.
297  *
298  *	RETURNS:
299  *	Pointer to the found physical link.
300  */
ata_dev_phys_link(struct ata_device * dev)301 struct ata_link *ata_dev_phys_link(struct ata_device *dev)
302 {
303 	struct ata_port *ap = dev->link->ap;
304 
305 	if (!ap->slave_link)
306 		return dev->link;
307 	if (!dev->devno)
308 		return &ap->link;
309 	return ap->slave_link;
310 }
311 
312 /**
313  *	ata_force_cbl - force cable type according to libata.force
314  *	@ap: ATA port of interest
315  *
316  *	Force cable type according to libata.force and whine about it.
317  *	The last entry which has matching port number is used, so it
318  *	can be specified as part of device force parameters.  For
319  *	example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the
320  *	same effect.
321  *
322  *	LOCKING:
323  *	EH context.
324  */
ata_force_cbl(struct ata_port * ap)325 void ata_force_cbl(struct ata_port *ap)
326 {
327 	int i;
328 
329 	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
330 		const struct ata_force_ent *fe = &ata_force_tbl[i];
331 
332 		if (fe->port != -1 && fe->port != ap->print_id)
333 			continue;
334 
335 		if (fe->param.cbl == ATA_CBL_NONE)
336 			continue;
337 
338 		ap->cbl = fe->param.cbl;
339 		ata_port_notice(ap, "FORCE: cable set to %s\n", fe->param.name);
340 		return;
341 	}
342 }
343 
344 /**
345  *	ata_force_link_limits - force link limits according to libata.force
346  *	@link: ATA link of interest
347  *
348  *	Force link flags and SATA spd limit according to libata.force
349  *	and whine about it.  When only the port part is specified
350  *	(e.g. 1:), the limit applies to all links connected to both
351  *	the host link and all fan-out ports connected via PMP.  If the
352  *	device part is specified as 0 (e.g. 1.00:), it specifies the
353  *	first fan-out link not the host link.  Device number 15 always
354  *	points to the host link whether PMP is attached or not.  If the
355  *	controller has slave link, device number 16 points to it.
356  *
357  *	LOCKING:
358  *	EH context.
359  */
ata_force_link_limits(struct ata_link * link)360 static void ata_force_link_limits(struct ata_link *link)
361 {
362 	bool did_spd = false;
363 	int linkno = link->pmp;
364 	int i;
365 
366 	if (ata_is_host_link(link))
367 		linkno += 15;
368 
369 	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
370 		const struct ata_force_ent *fe = &ata_force_tbl[i];
371 
372 		if (fe->port != -1 && fe->port != link->ap->print_id)
373 			continue;
374 
375 		if (fe->device != -1 && fe->device != linkno)
376 			continue;
377 
378 		/* only honor the first spd limit */
379 		if (!did_spd && fe->param.spd_limit) {
380 			link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
381 			ata_link_notice(link, "FORCE: PHY spd limit set to %s\n",
382 					fe->param.name);
383 			did_spd = true;
384 		}
385 
386 		/* let lflags stack */
387 		if (fe->param.lflags) {
388 			link->flags |= fe->param.lflags;
389 			ata_link_notice(link,
390 					"FORCE: link flag 0x%x forced -> 0x%x\n",
391 					fe->param.lflags, link->flags);
392 		}
393 	}
394 }
395 
396 /**
397  *	ata_force_xfermask - force xfermask according to libata.force
398  *	@dev: ATA device of interest
399  *
400  *	Force xfer_mask according to libata.force and whine about it.
401  *	For consistency with link selection, device number 15 selects
402  *	the first device connected to the host link.
403  *
404  *	LOCKING:
405  *	EH context.
406  */
ata_force_xfermask(struct ata_device * dev)407 static void ata_force_xfermask(struct ata_device *dev)
408 {
409 	int devno = dev->link->pmp + dev->devno;
410 	int alt_devno = devno;
411 	int i;
412 
413 	/* allow n.15/16 for devices attached to host port */
414 	if (ata_is_host_link(dev->link))
415 		alt_devno += 15;
416 
417 	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
418 		const struct ata_force_ent *fe = &ata_force_tbl[i];
419 		unsigned long pio_mask, mwdma_mask, udma_mask;
420 
421 		if (fe->port != -1 && fe->port != dev->link->ap->print_id)
422 			continue;
423 
424 		if (fe->device != -1 && fe->device != devno &&
425 		    fe->device != alt_devno)
426 			continue;
427 
428 		if (!fe->param.xfer_mask)
429 			continue;
430 
431 		ata_unpack_xfermask(fe->param.xfer_mask,
432 				    &pio_mask, &mwdma_mask, &udma_mask);
433 		if (udma_mask)
434 			dev->udma_mask = udma_mask;
435 		else if (mwdma_mask) {
436 			dev->udma_mask = 0;
437 			dev->mwdma_mask = mwdma_mask;
438 		} else {
439 			dev->udma_mask = 0;
440 			dev->mwdma_mask = 0;
441 			dev->pio_mask = pio_mask;
442 		}
443 
444 		ata_dev_notice(dev, "FORCE: xfer_mask set to %s\n",
445 			       fe->param.name);
446 		return;
447 	}
448 }
449 
450 /**
451  *	ata_force_horkage - force horkage according to libata.force
452  *	@dev: ATA device of interest
453  *
454  *	Force horkage according to libata.force and whine about it.
455  *	For consistency with link selection, device number 15 selects
456  *	the first device connected to the host link.
457  *
458  *	LOCKING:
459  *	EH context.
460  */
ata_force_horkage(struct ata_device * dev)461 static void ata_force_horkage(struct ata_device *dev)
462 {
463 	int devno = dev->link->pmp + dev->devno;
464 	int alt_devno = devno;
465 	int i;
466 
467 	/* allow n.15/16 for devices attached to host port */
468 	if (ata_is_host_link(dev->link))
469 		alt_devno += 15;
470 
471 	for (i = 0; i < ata_force_tbl_size; i++) {
472 		const struct ata_force_ent *fe = &ata_force_tbl[i];
473 
474 		if (fe->port != -1 && fe->port != dev->link->ap->print_id)
475 			continue;
476 
477 		if (fe->device != -1 && fe->device != devno &&
478 		    fe->device != alt_devno)
479 			continue;
480 
481 		if (!(~dev->horkage & fe->param.horkage_on) &&
482 		    !(dev->horkage & fe->param.horkage_off))
483 			continue;
484 
485 		dev->horkage |= fe->param.horkage_on;
486 		dev->horkage &= ~fe->param.horkage_off;
487 
488 		ata_dev_notice(dev, "FORCE: horkage modified (%s)\n",
489 			       fe->param.name);
490 	}
491 }
492 
493 /**
494  *	atapi_cmd_type - Determine ATAPI command type from SCSI opcode
495  *	@opcode: SCSI opcode
496  *
497  *	Determine ATAPI command type from @opcode.
498  *
499  *	LOCKING:
500  *	None.
501  *
502  *	RETURNS:
503  *	ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC}
504  */
atapi_cmd_type(u8 opcode)505 int atapi_cmd_type(u8 opcode)
506 {
507 	switch (opcode) {
508 	case GPCMD_READ_10:
509 	case GPCMD_READ_12:
510 		return ATAPI_READ;
511 
512 	case GPCMD_WRITE_10:
513 	case GPCMD_WRITE_12:
514 	case GPCMD_WRITE_AND_VERIFY_10:
515 		return ATAPI_WRITE;
516 
517 	case GPCMD_READ_CD:
518 	case GPCMD_READ_CD_MSF:
519 		return ATAPI_READ_CD;
520 
521 	case ATA_16:
522 	case ATA_12:
523 		if (atapi_passthru16)
524 			return ATAPI_PASS_THRU;
525 		/* fall thru */
526 	default:
527 		return ATAPI_MISC;
528 	}
529 }
530 
531 /**
532  *	ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
533  *	@tf: Taskfile to convert
534  *	@pmp: Port multiplier port
535  *	@is_cmd: This FIS is for command
536  *	@fis: Buffer into which data will output
537  *
538  *	Converts a standard ATA taskfile to a Serial ATA
539  *	FIS structure (Register - Host to Device).
540  *
541  *	LOCKING:
542  *	Inherited from caller.
543  */
ata_tf_to_fis(const struct ata_taskfile * tf,u8 pmp,int is_cmd,u8 * fis)544 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
545 {
546 	fis[0] = 0x27;			/* Register - Host to Device FIS */
547 	fis[1] = pmp & 0xf;		/* Port multiplier number*/
548 	if (is_cmd)
549 		fis[1] |= (1 << 7);	/* bit 7 indicates Command FIS */
550 
551 	fis[2] = tf->command;
552 	fis[3] = tf->feature;
553 
554 	fis[4] = tf->lbal;
555 	fis[5] = tf->lbam;
556 	fis[6] = tf->lbah;
557 	fis[7] = tf->device;
558 
559 	fis[8] = tf->hob_lbal;
560 	fis[9] = tf->hob_lbam;
561 	fis[10] = tf->hob_lbah;
562 	fis[11] = tf->hob_feature;
563 
564 	fis[12] = tf->nsect;
565 	fis[13] = tf->hob_nsect;
566 	fis[14] = 0;
567 	fis[15] = tf->ctl;
568 
569 	fis[16] = 0;
570 	fis[17] = 0;
571 	fis[18] = 0;
572 	fis[19] = 0;
573 }
574 
575 /**
576  *	ata_tf_from_fis - Convert SATA FIS to ATA taskfile
577  *	@fis: Buffer from which data will be input
578  *	@tf: Taskfile to output
579  *
580  *	Converts a serial ATA FIS structure to a standard ATA taskfile.
581  *
582  *	LOCKING:
583  *	Inherited from caller.
584  */
585 
ata_tf_from_fis(const u8 * fis,struct ata_taskfile * tf)586 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
587 {
588 	tf->command	= fis[2];	/* status */
589 	tf->feature	= fis[3];	/* error */
590 
591 	tf->lbal	= fis[4];
592 	tf->lbam	= fis[5];
593 	tf->lbah	= fis[6];
594 	tf->device	= fis[7];
595 
596 	tf->hob_lbal	= fis[8];
597 	tf->hob_lbam	= fis[9];
598 	tf->hob_lbah	= fis[10];
599 
600 	tf->nsect	= fis[12];
601 	tf->hob_nsect	= fis[13];
602 }
603 
604 static const u8 ata_rw_cmds[] = {
605 	/* pio multi */
606 	ATA_CMD_READ_MULTI,
607 	ATA_CMD_WRITE_MULTI,
608 	ATA_CMD_READ_MULTI_EXT,
609 	ATA_CMD_WRITE_MULTI_EXT,
610 	0,
611 	0,
612 	0,
613 	ATA_CMD_WRITE_MULTI_FUA_EXT,
614 	/* pio */
615 	ATA_CMD_PIO_READ,
616 	ATA_CMD_PIO_WRITE,
617 	ATA_CMD_PIO_READ_EXT,
618 	ATA_CMD_PIO_WRITE_EXT,
619 	0,
620 	0,
621 	0,
622 	0,
623 	/* dma */
624 	ATA_CMD_READ,
625 	ATA_CMD_WRITE,
626 	ATA_CMD_READ_EXT,
627 	ATA_CMD_WRITE_EXT,
628 	0,
629 	0,
630 	0,
631 	ATA_CMD_WRITE_FUA_EXT
632 };
633 
634 /**
635  *	ata_rwcmd_protocol - set taskfile r/w commands and protocol
636  *	@tf: command to examine and configure
637  *	@dev: device tf belongs to
638  *
639  *	Examine the device configuration and tf->flags to calculate
640  *	the proper read/write commands and protocol to use.
641  *
642  *	LOCKING:
643  *	caller.
644  */
ata_rwcmd_protocol(struct ata_taskfile * tf,struct ata_device * dev)645 static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
646 {
647 	u8 cmd;
648 
649 	int index, fua, lba48, write;
650 
651 	fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
652 	lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
653 	write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
654 
655 	if (dev->flags & ATA_DFLAG_PIO) {
656 		tf->protocol = ATA_PROT_PIO;
657 		index = dev->multi_count ? 0 : 8;
658 	} else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
659 		/* Unable to use DMA due to host limitation */
660 		tf->protocol = ATA_PROT_PIO;
661 		index = dev->multi_count ? 0 : 8;
662 	} else {
663 		tf->protocol = ATA_PROT_DMA;
664 		index = 16;
665 	}
666 
667 	cmd = ata_rw_cmds[index + fua + lba48 + write];
668 	if (cmd) {
669 		tf->command = cmd;
670 		return 0;
671 	}
672 	return -1;
673 }
674 
675 /**
676  *	ata_tf_read_block - Read block address from ATA taskfile
677  *	@tf: ATA taskfile of interest
678  *	@dev: ATA device @tf belongs to
679  *
680  *	LOCKING:
681  *	None.
682  *
683  *	Read block address from @tf.  This function can handle all
684  *	three address formats - LBA, LBA48 and CHS.  tf->protocol and
685  *	flags select the address format to use.
686  *
687  *	RETURNS:
688  *	Block address read from @tf.
689  */
ata_tf_read_block(struct ata_taskfile * tf,struct ata_device * dev)690 u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
691 {
692 	u64 block = 0;
693 
694 	if (tf->flags & ATA_TFLAG_LBA) {
695 		if (tf->flags & ATA_TFLAG_LBA48) {
696 			block |= (u64)tf->hob_lbah << 40;
697 			block |= (u64)tf->hob_lbam << 32;
698 			block |= (u64)tf->hob_lbal << 24;
699 		} else
700 			block |= (tf->device & 0xf) << 24;
701 
702 		block |= tf->lbah << 16;
703 		block |= tf->lbam << 8;
704 		block |= tf->lbal;
705 	} else {
706 		u32 cyl, head, sect;
707 
708 		cyl = tf->lbam | (tf->lbah << 8);
709 		head = tf->device & 0xf;
710 		sect = tf->lbal;
711 
712 		if (!sect) {
713 			ata_dev_warn(dev,
714 				     "device reported invalid CHS sector 0\n");
715 			sect = 1; /* oh well */
716 		}
717 
718 		block = (cyl * dev->heads + head) * dev->sectors + sect - 1;
719 	}
720 
721 	return block;
722 }
723 
724 /**
725  *	ata_build_rw_tf - Build ATA taskfile for given read/write request
726  *	@tf: Target ATA taskfile
727  *	@dev: ATA device @tf belongs to
728  *	@block: Block address
729  *	@n_block: Number of blocks
730  *	@tf_flags: RW/FUA etc...
731  *	@tag: tag
732  *
733  *	LOCKING:
734  *	None.
735  *
736  *	Build ATA taskfile @tf for read/write request described by
737  *	@block, @n_block, @tf_flags and @tag on @dev.
738  *
739  *	RETURNS:
740  *
741  *	0 on success, -ERANGE if the request is too large for @dev,
742  *	-EINVAL if the request is invalid.
743  */
ata_build_rw_tf(struct ata_taskfile * tf,struct ata_device * dev,u64 block,u32 n_block,unsigned int tf_flags,unsigned int tag)744 int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
745 		    u64 block, u32 n_block, unsigned int tf_flags,
746 		    unsigned int tag)
747 {
748 	tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
749 	tf->flags |= tf_flags;
750 
751 	if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
752 		/* yay, NCQ */
753 		if (!lba_48_ok(block, n_block))
754 			return -ERANGE;
755 
756 		tf->protocol = ATA_PROT_NCQ;
757 		tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
758 
759 		if (tf->flags & ATA_TFLAG_WRITE)
760 			tf->command = ATA_CMD_FPDMA_WRITE;
761 		else
762 			tf->command = ATA_CMD_FPDMA_READ;
763 
764 		tf->nsect = tag << 3;
765 		tf->hob_feature = (n_block >> 8) & 0xff;
766 		tf->feature = n_block & 0xff;
767 
768 		tf->hob_lbah = (block >> 40) & 0xff;
769 		tf->hob_lbam = (block >> 32) & 0xff;
770 		tf->hob_lbal = (block >> 24) & 0xff;
771 		tf->lbah = (block >> 16) & 0xff;
772 		tf->lbam = (block >> 8) & 0xff;
773 		tf->lbal = block & 0xff;
774 
775 		tf->device = 1 << 6;
776 		if (tf->flags & ATA_TFLAG_FUA)
777 			tf->device |= 1 << 7;
778 	} else if (dev->flags & ATA_DFLAG_LBA) {
779 		tf->flags |= ATA_TFLAG_LBA;
780 
781 		if (lba_28_ok(block, n_block)) {
782 			/* use LBA28 */
783 			tf->device |= (block >> 24) & 0xf;
784 		} else if (lba_48_ok(block, n_block)) {
785 			if (!(dev->flags & ATA_DFLAG_LBA48))
786 				return -ERANGE;
787 
788 			/* use LBA48 */
789 			tf->flags |= ATA_TFLAG_LBA48;
790 
791 			tf->hob_nsect = (n_block >> 8) & 0xff;
792 
793 			tf->hob_lbah = (block >> 40) & 0xff;
794 			tf->hob_lbam = (block >> 32) & 0xff;
795 			tf->hob_lbal = (block >> 24) & 0xff;
796 		} else
797 			/* request too large even for LBA48 */
798 			return -ERANGE;
799 
800 		if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
801 			return -EINVAL;
802 
803 		tf->nsect = n_block & 0xff;
804 
805 		tf->lbah = (block >> 16) & 0xff;
806 		tf->lbam = (block >> 8) & 0xff;
807 		tf->lbal = block & 0xff;
808 
809 		tf->device |= ATA_LBA;
810 	} else {
811 		/* CHS */
812 		u32 sect, head, cyl, track;
813 
814 		/* The request -may- be too large for CHS addressing. */
815 		if (!lba_28_ok(block, n_block))
816 			return -ERANGE;
817 
818 		if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
819 			return -EINVAL;
820 
821 		/* Convert LBA to CHS */
822 		track = (u32)block / dev->sectors;
823 		cyl   = track / dev->heads;
824 		head  = track % dev->heads;
825 		sect  = (u32)block % dev->sectors + 1;
826 
827 		DPRINTK("block %u track %u cyl %u head %u sect %u\n",
828 			(u32)block, track, cyl, head, sect);
829 
830 		/* Check whether the converted CHS can fit.
831 		   Cylinder: 0-65535
832 		   Head: 0-15
833 		   Sector: 1-255*/
834 		if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
835 			return -ERANGE;
836 
837 		tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
838 		tf->lbal = sect;
839 		tf->lbam = cyl;
840 		tf->lbah = cyl >> 8;
841 		tf->device |= head;
842 	}
843 
844 	return 0;
845 }
846 
847 /**
848  *	ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
849  *	@pio_mask: pio_mask
850  *	@mwdma_mask: mwdma_mask
851  *	@udma_mask: udma_mask
852  *
853  *	Pack @pio_mask, @mwdma_mask and @udma_mask into a single
854  *	unsigned int xfer_mask.
855  *
856  *	LOCKING:
857  *	None.
858  *
859  *	RETURNS:
860  *	Packed xfer_mask.
861  */
ata_pack_xfermask(unsigned long pio_mask,unsigned long mwdma_mask,unsigned long udma_mask)862 unsigned long ata_pack_xfermask(unsigned long pio_mask,
863 				unsigned long mwdma_mask,
864 				unsigned long udma_mask)
865 {
866 	return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
867 		((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
868 		((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
869 }
870 
871 /**
872  *	ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
873  *	@xfer_mask: xfer_mask to unpack
874  *	@pio_mask: resulting pio_mask
875  *	@mwdma_mask: resulting mwdma_mask
876  *	@udma_mask: resulting udma_mask
877  *
878  *	Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
879  *	Any NULL distination masks will be ignored.
880  */
ata_unpack_xfermask(unsigned long xfer_mask,unsigned long * pio_mask,unsigned long * mwdma_mask,unsigned long * udma_mask)881 void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
882 			 unsigned long *mwdma_mask, unsigned long *udma_mask)
883 {
884 	if (pio_mask)
885 		*pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
886 	if (mwdma_mask)
887 		*mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
888 	if (udma_mask)
889 		*udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
890 }
891 
892 static const struct ata_xfer_ent {
893 	int shift, bits;
894 	u8 base;
895 } ata_xfer_tbl[] = {
896 	{ ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
897 	{ ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
898 	{ ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
899 	{ -1, },
900 };
901 
902 /**
903  *	ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
904  *	@xfer_mask: xfer_mask of interest
905  *
906  *	Return matching XFER_* value for @xfer_mask.  Only the highest
907  *	bit of @xfer_mask is considered.
908  *
909  *	LOCKING:
910  *	None.
911  *
912  *	RETURNS:
913  *	Matching XFER_* value, 0xff if no match found.
914  */
ata_xfer_mask2mode(unsigned long xfer_mask)915 u8 ata_xfer_mask2mode(unsigned long xfer_mask)
916 {
917 	int highbit = fls(xfer_mask) - 1;
918 	const struct ata_xfer_ent *ent;
919 
920 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
921 		if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
922 			return ent->base + highbit - ent->shift;
923 	return 0xff;
924 }
925 
926 /**
927  *	ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
928  *	@xfer_mode: XFER_* of interest
929  *
930  *	Return matching xfer_mask for @xfer_mode.
931  *
932  *	LOCKING:
933  *	None.
934  *
935  *	RETURNS:
936  *	Matching xfer_mask, 0 if no match found.
937  */
ata_xfer_mode2mask(u8 xfer_mode)938 unsigned long ata_xfer_mode2mask(u8 xfer_mode)
939 {
940 	const struct ata_xfer_ent *ent;
941 
942 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
943 		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
944 			return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
945 				& ~((1 << ent->shift) - 1);
946 	return 0;
947 }
948 
949 /**
950  *	ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
951  *	@xfer_mode: XFER_* of interest
952  *
953  *	Return matching xfer_shift for @xfer_mode.
954  *
955  *	LOCKING:
956  *	None.
957  *
958  *	RETURNS:
959  *	Matching xfer_shift, -1 if no match found.
960  */
ata_xfer_mode2shift(unsigned long xfer_mode)961 int ata_xfer_mode2shift(unsigned long xfer_mode)
962 {
963 	const struct ata_xfer_ent *ent;
964 
965 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
966 		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
967 			return ent->shift;
968 	return -1;
969 }
970 
971 /**
972  *	ata_mode_string - convert xfer_mask to string
973  *	@xfer_mask: mask of bits supported; only highest bit counts.
974  *
975  *	Determine string which represents the highest speed
976  *	(highest bit in @modemask).
977  *
978  *	LOCKING:
979  *	None.
980  *
981  *	RETURNS:
982  *	Constant C string representing highest speed listed in
983  *	@mode_mask, or the constant C string "<n/a>".
984  */
ata_mode_string(unsigned long xfer_mask)985 const char *ata_mode_string(unsigned long xfer_mask)
986 {
987 	static const char * const xfer_mode_str[] = {
988 		"PIO0",
989 		"PIO1",
990 		"PIO2",
991 		"PIO3",
992 		"PIO4",
993 		"PIO5",
994 		"PIO6",
995 		"MWDMA0",
996 		"MWDMA1",
997 		"MWDMA2",
998 		"MWDMA3",
999 		"MWDMA4",
1000 		"UDMA/16",
1001 		"UDMA/25",
1002 		"UDMA/33",
1003 		"UDMA/44",
1004 		"UDMA/66",
1005 		"UDMA/100",
1006 		"UDMA/133",
1007 		"UDMA7",
1008 	};
1009 	int highbit;
1010 
1011 	highbit = fls(xfer_mask) - 1;
1012 	if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
1013 		return xfer_mode_str[highbit];
1014 	return "<n/a>";
1015 }
1016 
sata_spd_string(unsigned int spd)1017 const char *sata_spd_string(unsigned int spd)
1018 {
1019 	static const char * const spd_str[] = {
1020 		"1.5 Gbps",
1021 		"3.0 Gbps",
1022 		"6.0 Gbps",
1023 	};
1024 
1025 	if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
1026 		return "<unknown>";
1027 	return spd_str[spd - 1];
1028 }
1029 
1030 /**
1031  *	ata_dev_classify - determine device type based on ATA-spec signature
1032  *	@tf: ATA taskfile register set for device to be identified
1033  *
1034  *	Determine from taskfile register contents whether a device is
1035  *	ATA or ATAPI, as per "Signature and persistence" section
1036  *	of ATA/PI spec (volume 1, sect 5.14).
1037  *
1038  *	LOCKING:
1039  *	None.
1040  *
1041  *	RETURNS:
1042  *	Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or
1043  *	%ATA_DEV_UNKNOWN the event of failure.
1044  */
ata_dev_classify(const struct ata_taskfile * tf)1045 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1046 {
1047 	/* Apple's open source Darwin code hints that some devices only
1048 	 * put a proper signature into the LBA mid/high registers,
1049 	 * So, we only check those.  It's sufficient for uniqueness.
1050 	 *
1051 	 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
1052 	 * signatures for ATA and ATAPI devices attached on SerialATA,
1053 	 * 0x3c/0xc3 and 0x69/0x96 respectively.  However, SerialATA
1054 	 * spec has never mentioned about using different signatures
1055 	 * for ATA/ATAPI devices.  Then, Serial ATA II: Port
1056 	 * Multiplier specification began to use 0x69/0x96 to identify
1057 	 * port multpliers and 0x3c/0xc3 to identify SEMB device.
1058 	 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
1059 	 * 0x69/0x96 shortly and described them as reserved for
1060 	 * SerialATA.
1061 	 *
1062 	 * We follow the current spec and consider that 0x69/0x96
1063 	 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
1064 	 * Unfortunately, WDC WD1600JS-62MHB5 (a hard drive) reports
1065 	 * SEMB signature.  This is worked around in
1066 	 * ata_dev_read_id().
1067 	 */
1068 	if ((tf->lbam == 0) && (tf->lbah == 0)) {
1069 		DPRINTK("found ATA device by sig\n");
1070 		return ATA_DEV_ATA;
1071 	}
1072 
1073 	if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
1074 		DPRINTK("found ATAPI device by sig\n");
1075 		return ATA_DEV_ATAPI;
1076 	}
1077 
1078 	if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
1079 		DPRINTK("found PMP device by sig\n");
1080 		return ATA_DEV_PMP;
1081 	}
1082 
1083 	if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
1084 		DPRINTK("found SEMB device by sig (could be ATA device)\n");
1085 		return ATA_DEV_SEMB;
1086 	}
1087 
1088 	DPRINTK("unknown device\n");
1089 	return ATA_DEV_UNKNOWN;
1090 }
1091 
1092 /**
1093  *	ata_id_string - Convert IDENTIFY DEVICE page into string
1094  *	@id: IDENTIFY DEVICE results we will examine
1095  *	@s: string into which data is output
1096  *	@ofs: offset into identify device page
1097  *	@len: length of string to return. must be an even number.
1098  *
1099  *	The strings in the IDENTIFY DEVICE page are broken up into
1100  *	16-bit chunks.  Run through the string, and output each
1101  *	8-bit chunk linearly, regardless of platform.
1102  *
1103  *	LOCKING:
1104  *	caller.
1105  */
1106 
ata_id_string(const u16 * id,unsigned char * s,unsigned int ofs,unsigned int len)1107 void ata_id_string(const u16 *id, unsigned char *s,
1108 		   unsigned int ofs, unsigned int len)
1109 {
1110 	unsigned int c;
1111 
1112 	BUG_ON(len & 1);
1113 
1114 	while (len > 0) {
1115 		c = id[ofs] >> 8;
1116 		*s = c;
1117 		s++;
1118 
1119 		c = id[ofs] & 0xff;
1120 		*s = c;
1121 		s++;
1122 
1123 		ofs++;
1124 		len -= 2;
1125 	}
1126 }
1127 
1128 /**
1129  *	ata_id_c_string - Convert IDENTIFY DEVICE page into C string
1130  *	@id: IDENTIFY DEVICE results we will examine
1131  *	@s: string into which data is output
1132  *	@ofs: offset into identify device page
1133  *	@len: length of string to return. must be an odd number.
1134  *
1135  *	This function is identical to ata_id_string except that it
1136  *	trims trailing spaces and terminates the resulting string with
1137  *	null.  @len must be actual maximum length (even number) + 1.
1138  *
1139  *	LOCKING:
1140  *	caller.
1141  */
ata_id_c_string(const u16 * id,unsigned char * s,unsigned int ofs,unsigned int len)1142 void ata_id_c_string(const u16 *id, unsigned char *s,
1143 		     unsigned int ofs, unsigned int len)
1144 {
1145 	unsigned char *p;
1146 
1147 	ata_id_string(id, s, ofs, len - 1);
1148 
1149 	p = s + strnlen(s, len - 1);
1150 	while (p > s && p[-1] == ' ')
1151 		p--;
1152 	*p = '\0';
1153 }
1154 
ata_id_n_sectors(const u16 * id)1155 static u64 ata_id_n_sectors(const u16 *id)
1156 {
1157 	if (ata_id_has_lba(id)) {
1158 		if (ata_id_has_lba48(id))
1159 			return ata_id_u64(id, ATA_ID_LBA_CAPACITY_2);
1160 		else
1161 			return ata_id_u32(id, ATA_ID_LBA_CAPACITY);
1162 	} else {
1163 		if (ata_id_current_chs_valid(id))
1164 			return id[ATA_ID_CUR_CYLS] * id[ATA_ID_CUR_HEADS] *
1165 			       id[ATA_ID_CUR_SECTORS];
1166 		else
1167 			return id[ATA_ID_CYLS] * id[ATA_ID_HEADS] *
1168 			       id[ATA_ID_SECTORS];
1169 	}
1170 }
1171 
ata_tf_to_lba48(const struct ata_taskfile * tf)1172 u64 ata_tf_to_lba48(const struct ata_taskfile *tf)
1173 {
1174 	u64 sectors = 0;
1175 
1176 	sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1177 	sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
1178 	sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24;
1179 	sectors |= (tf->lbah & 0xff) << 16;
1180 	sectors |= (tf->lbam & 0xff) << 8;
1181 	sectors |= (tf->lbal & 0xff);
1182 
1183 	return sectors;
1184 }
1185 
ata_tf_to_lba(const struct ata_taskfile * tf)1186 u64 ata_tf_to_lba(const struct ata_taskfile *tf)
1187 {
1188 	u64 sectors = 0;
1189 
1190 	sectors |= (tf->device & 0x0f) << 24;
1191 	sectors |= (tf->lbah & 0xff) << 16;
1192 	sectors |= (tf->lbam & 0xff) << 8;
1193 	sectors |= (tf->lbal & 0xff);
1194 
1195 	return sectors;
1196 }
1197 
1198 /**
1199  *	ata_read_native_max_address - Read native max address
1200  *	@dev: target device
1201  *	@max_sectors: out parameter for the result native max address
1202  *
1203  *	Perform an LBA48 or LBA28 native size query upon the device in
1204  *	question.
1205  *
1206  *	RETURNS:
1207  *	0 on success, -EACCES if command is aborted by the drive.
1208  *	-EIO on other errors.
1209  */
ata_read_native_max_address(struct ata_device * dev,u64 * max_sectors)1210 static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1211 {
1212 	unsigned int err_mask;
1213 	struct ata_taskfile tf;
1214 	int lba48 = ata_id_has_lba48(dev->id);
1215 
1216 	ata_tf_init(dev, &tf);
1217 
1218 	/* always clear all address registers */
1219 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1220 
1221 	if (lba48) {
1222 		tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1223 		tf.flags |= ATA_TFLAG_LBA48;
1224 	} else
1225 		tf.command = ATA_CMD_READ_NATIVE_MAX;
1226 
1227 	tf.protocol |= ATA_PROT_NODATA;
1228 	tf.device |= ATA_LBA;
1229 
1230 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1231 	if (err_mask) {
1232 		ata_dev_warn(dev,
1233 			     "failed to read native max address (err_mask=0x%x)\n",
1234 			     err_mask);
1235 		if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1236 			return -EACCES;
1237 		return -EIO;
1238 	}
1239 
1240 	if (lba48)
1241 		*max_sectors = ata_tf_to_lba48(&tf) + 1;
1242 	else
1243 		*max_sectors = ata_tf_to_lba(&tf) + 1;
1244 	if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
1245 		(*max_sectors)--;
1246 	return 0;
1247 }
1248 
1249 /**
1250  *	ata_set_max_sectors - Set max sectors
1251  *	@dev: target device
1252  *	@new_sectors: new max sectors value to set for the device
1253  *
1254  *	Set max sectors of @dev to @new_sectors.
1255  *
1256  *	RETURNS:
1257  *	0 on success, -EACCES if command is aborted or denied (due to
1258  *	previous non-volatile SET_MAX) by the drive.  -EIO on other
1259  *	errors.
1260  */
ata_set_max_sectors(struct ata_device * dev,u64 new_sectors)1261 static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1262 {
1263 	unsigned int err_mask;
1264 	struct ata_taskfile tf;
1265 	int lba48 = ata_id_has_lba48(dev->id);
1266 
1267 	new_sectors--;
1268 
1269 	ata_tf_init(dev, &tf);
1270 
1271 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1272 
1273 	if (lba48) {
1274 		tf.command = ATA_CMD_SET_MAX_EXT;
1275 		tf.flags |= ATA_TFLAG_LBA48;
1276 
1277 		tf.hob_lbal = (new_sectors >> 24) & 0xff;
1278 		tf.hob_lbam = (new_sectors >> 32) & 0xff;
1279 		tf.hob_lbah = (new_sectors >> 40) & 0xff;
1280 	} else {
1281 		tf.command = ATA_CMD_SET_MAX;
1282 
1283 		tf.device |= (new_sectors >> 24) & 0xf;
1284 	}
1285 
1286 	tf.protocol |= ATA_PROT_NODATA;
1287 	tf.device |= ATA_LBA;
1288 
1289 	tf.lbal = (new_sectors >> 0) & 0xff;
1290 	tf.lbam = (new_sectors >> 8) & 0xff;
1291 	tf.lbah = (new_sectors >> 16) & 0xff;
1292 
1293 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1294 	if (err_mask) {
1295 		ata_dev_warn(dev,
1296 			     "failed to set max address (err_mask=0x%x)\n",
1297 			     err_mask);
1298 		if (err_mask == AC_ERR_DEV &&
1299 		    (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1300 			return -EACCES;
1301 		return -EIO;
1302 	}
1303 
1304 	return 0;
1305 }
1306 
1307 /**
1308  *	ata_hpa_resize		-	Resize a device with an HPA set
1309  *	@dev: Device to resize
1310  *
1311  *	Read the size of an LBA28 or LBA48 disk with HPA features and resize
1312  *	it if required to the full size of the media. The caller must check
1313  *	the drive has the HPA feature set enabled.
1314  *
1315  *	RETURNS:
1316  *	0 on success, -errno on failure.
1317  */
ata_hpa_resize(struct ata_device * dev)1318 static int ata_hpa_resize(struct ata_device *dev)
1319 {
1320 	struct ata_eh_context *ehc = &dev->link->eh_context;
1321 	int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1322 	bool unlock_hpa = ata_ignore_hpa || dev->flags & ATA_DFLAG_UNLOCK_HPA;
1323 	u64 sectors = ata_id_n_sectors(dev->id);
1324 	u64 native_sectors;
1325 	int rc;
1326 
1327 	/* do we need to do it? */
1328 	if (dev->class != ATA_DEV_ATA ||
1329 	    !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1330 	    (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
1331 		return 0;
1332 
1333 	/* read native max address */
1334 	rc = ata_read_native_max_address(dev, &native_sectors);
1335 	if (rc) {
1336 		/* If device aborted the command or HPA isn't going to
1337 		 * be unlocked, skip HPA resizing.
1338 		 */
1339 		if (rc == -EACCES || !unlock_hpa) {
1340 			ata_dev_warn(dev,
1341 				     "HPA support seems broken, skipping HPA handling\n");
1342 			dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1343 
1344 			/* we can continue if device aborted the command */
1345 			if (rc == -EACCES)
1346 				rc = 0;
1347 		}
1348 
1349 		return rc;
1350 	}
1351 	dev->n_native_sectors = native_sectors;
1352 
1353 	/* nothing to do? */
1354 	if (native_sectors <= sectors || !unlock_hpa) {
1355 		if (!print_info || native_sectors == sectors)
1356 			return 0;
1357 
1358 		if (native_sectors > sectors)
1359 			ata_dev_info(dev,
1360 				"HPA detected: current %llu, native %llu\n",
1361 				(unsigned long long)sectors,
1362 				(unsigned long long)native_sectors);
1363 		else if (native_sectors < sectors)
1364 			ata_dev_warn(dev,
1365 				"native sectors (%llu) is smaller than sectors (%llu)\n",
1366 				(unsigned long long)native_sectors,
1367 				(unsigned long long)sectors);
1368 		return 0;
1369 	}
1370 
1371 	/* let's unlock HPA */
1372 	rc = ata_set_max_sectors(dev, native_sectors);
1373 	if (rc == -EACCES) {
1374 		/* if device aborted the command, skip HPA resizing */
1375 		ata_dev_warn(dev,
1376 			     "device aborted resize (%llu -> %llu), skipping HPA handling\n",
1377 			     (unsigned long long)sectors,
1378 			     (unsigned long long)native_sectors);
1379 		dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1380 		return 0;
1381 	} else if (rc)
1382 		return rc;
1383 
1384 	/* re-read IDENTIFY data */
1385 	rc = ata_dev_reread_id(dev, 0);
1386 	if (rc) {
1387 		ata_dev_err(dev,
1388 			    "failed to re-read IDENTIFY data after HPA resizing\n");
1389 		return rc;
1390 	}
1391 
1392 	if (print_info) {
1393 		u64 new_sectors = ata_id_n_sectors(dev->id);
1394 		ata_dev_info(dev,
1395 			"HPA unlocked: %llu -> %llu, native %llu\n",
1396 			(unsigned long long)sectors,
1397 			(unsigned long long)new_sectors,
1398 			(unsigned long long)native_sectors);
1399 	}
1400 
1401 	return 0;
1402 }
1403 
1404 /**
1405  *	ata_dump_id - IDENTIFY DEVICE info debugging output
1406  *	@id: IDENTIFY DEVICE page to dump
1407  *
1408  *	Dump selected 16-bit words from the given IDENTIFY DEVICE
1409  *	page.
1410  *
1411  *	LOCKING:
1412  *	caller.
1413  */
1414 
ata_dump_id(const u16 * id)1415 static inline void ata_dump_id(const u16 *id)
1416 {
1417 	DPRINTK("49==0x%04x  "
1418 		"53==0x%04x  "
1419 		"63==0x%04x  "
1420 		"64==0x%04x  "
1421 		"75==0x%04x  \n",
1422 		id[49],
1423 		id[53],
1424 		id[63],
1425 		id[64],
1426 		id[75]);
1427 	DPRINTK("80==0x%04x  "
1428 		"81==0x%04x  "
1429 		"82==0x%04x  "
1430 		"83==0x%04x  "
1431 		"84==0x%04x  \n",
1432 		id[80],
1433 		id[81],
1434 		id[82],
1435 		id[83],
1436 		id[84]);
1437 	DPRINTK("88==0x%04x  "
1438 		"93==0x%04x\n",
1439 		id[88],
1440 		id[93]);
1441 }
1442 
1443 /**
1444  *	ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1445  *	@id: IDENTIFY data to compute xfer mask from
1446  *
1447  *	Compute the xfermask for this device. This is not as trivial
1448  *	as it seems if we must consider early devices correctly.
1449  *
1450  *	FIXME: pre IDE drive timing (do we care ?).
1451  *
1452  *	LOCKING:
1453  *	None.
1454  *
1455  *	RETURNS:
1456  *	Computed xfermask
1457  */
ata_id_xfermask(const u16 * id)1458 unsigned long ata_id_xfermask(const u16 *id)
1459 {
1460 	unsigned long pio_mask, mwdma_mask, udma_mask;
1461 
1462 	/* Usual case. Word 53 indicates word 64 is valid */
1463 	if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1464 		pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1465 		pio_mask <<= 3;
1466 		pio_mask |= 0x7;
1467 	} else {
1468 		/* If word 64 isn't valid then Word 51 high byte holds
1469 		 * the PIO timing number for the maximum. Turn it into
1470 		 * a mask.
1471 		 */
1472 		u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
1473 		if (mode < 5)	/* Valid PIO range */
1474 			pio_mask = (2 << mode) - 1;
1475 		else
1476 			pio_mask = 1;
1477 
1478 		/* But wait.. there's more. Design your standards by
1479 		 * committee and you too can get a free iordy field to
1480 		 * process. However its the speeds not the modes that
1481 		 * are supported... Note drivers using the timing API
1482 		 * will get this right anyway
1483 		 */
1484 	}
1485 
1486 	mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1487 
1488 	if (ata_id_is_cfa(id)) {
1489 		/*
1490 		 *	Process compact flash extended modes
1491 		 */
1492 		int pio = (id[ATA_ID_CFA_MODES] >> 0) & 0x7;
1493 		int dma = (id[ATA_ID_CFA_MODES] >> 3) & 0x7;
1494 
1495 		if (pio)
1496 			pio_mask |= (1 << 5);
1497 		if (pio > 1)
1498 			pio_mask |= (1 << 6);
1499 		if (dma)
1500 			mwdma_mask |= (1 << 3);
1501 		if (dma > 1)
1502 			mwdma_mask |= (1 << 4);
1503 	}
1504 
1505 	udma_mask = 0;
1506 	if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1507 		udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1508 
1509 	return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1510 }
1511 
ata_qc_complete_internal(struct ata_queued_cmd * qc)1512 static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1513 {
1514 	struct completion *waiting = qc->private_data;
1515 
1516 	complete(waiting);
1517 }
1518 
1519 /**
1520  *	ata_exec_internal_sg - execute libata internal command
1521  *	@dev: Device to which the command is sent
1522  *	@tf: Taskfile registers for the command and the result
1523  *	@cdb: CDB for packet command
1524  *	@dma_dir: Data tranfer direction of the command
1525  *	@sgl: sg list for the data buffer of the command
1526  *	@n_elem: Number of sg entries
1527  *	@timeout: Timeout in msecs (0 for default)
1528  *
1529  *	Executes libata internal command with timeout.  @tf contains
1530  *	command on entry and result on return.  Timeout and error
1531  *	conditions are reported via return value.  No recovery action
1532  *	is taken after a command times out.  It's caller's duty to
1533  *	clean up after timeout.
1534  *
1535  *	LOCKING:
1536  *	None.  Should be called with kernel context, might sleep.
1537  *
1538  *	RETURNS:
1539  *	Zero on success, AC_ERR_* mask on failure
1540  */
ata_exec_internal_sg(struct ata_device * dev,struct ata_taskfile * tf,const u8 * cdb,int dma_dir,struct scatterlist * sgl,unsigned int n_elem,unsigned long timeout)1541 unsigned ata_exec_internal_sg(struct ata_device *dev,
1542 			      struct ata_taskfile *tf, const u8 *cdb,
1543 			      int dma_dir, struct scatterlist *sgl,
1544 			      unsigned int n_elem, unsigned long timeout)
1545 {
1546 	struct ata_link *link = dev->link;
1547 	struct ata_port *ap = link->ap;
1548 	u8 command = tf->command;
1549 	int auto_timeout = 0;
1550 	struct ata_queued_cmd *qc;
1551 	unsigned int tag, preempted_tag;
1552 	u32 preempted_sactive, preempted_qc_active;
1553 	int preempted_nr_active_links;
1554 	DECLARE_COMPLETION_ONSTACK(wait);
1555 	unsigned long flags;
1556 	unsigned int err_mask;
1557 	int rc;
1558 
1559 	spin_lock_irqsave(ap->lock, flags);
1560 
1561 	/* no internal command while frozen */
1562 	if (ap->pflags & ATA_PFLAG_FROZEN) {
1563 		spin_unlock_irqrestore(ap->lock, flags);
1564 		return AC_ERR_SYSTEM;
1565 	}
1566 
1567 	/* initialize internal qc */
1568 
1569 	/* XXX: Tag 0 is used for drivers with legacy EH as some
1570 	 * drivers choke if any other tag is given.  This breaks
1571 	 * ata_tag_internal() test for those drivers.  Don't use new
1572 	 * EH stuff without converting to it.
1573 	 */
1574 	if (ap->ops->error_handler)
1575 		tag = ATA_TAG_INTERNAL;
1576 	else
1577 		tag = 0;
1578 
1579 	if (test_and_set_bit(tag, &ap->qc_allocated))
1580 		BUG();
1581 	qc = __ata_qc_from_tag(ap, tag);
1582 
1583 	qc->tag = tag;
1584 	qc->scsicmd = NULL;
1585 	qc->ap = ap;
1586 	qc->dev = dev;
1587 	ata_qc_reinit(qc);
1588 
1589 	preempted_tag = link->active_tag;
1590 	preempted_sactive = link->sactive;
1591 	preempted_qc_active = ap->qc_active;
1592 	preempted_nr_active_links = ap->nr_active_links;
1593 	link->active_tag = ATA_TAG_POISON;
1594 	link->sactive = 0;
1595 	ap->qc_active = 0;
1596 	ap->nr_active_links = 0;
1597 
1598 	/* prepare & issue qc */
1599 	qc->tf = *tf;
1600 	if (cdb)
1601 		memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1602 
1603 	/* some SATA bridges need us to indicate data xfer direction */
1604 	if (tf->protocol == ATAPI_PROT_DMA && (dev->flags & ATA_DFLAG_DMADIR) &&
1605 	    dma_dir == DMA_FROM_DEVICE)
1606 		qc->tf.feature |= ATAPI_DMADIR;
1607 
1608 	qc->flags |= ATA_QCFLAG_RESULT_TF;
1609 	qc->dma_dir = dma_dir;
1610 	if (dma_dir != DMA_NONE) {
1611 		unsigned int i, buflen = 0;
1612 		struct scatterlist *sg;
1613 
1614 		for_each_sg(sgl, sg, n_elem, i)
1615 			buflen += sg->length;
1616 
1617 		ata_sg_init(qc, sgl, n_elem);
1618 		qc->nbytes = buflen;
1619 	}
1620 
1621 	qc->private_data = &wait;
1622 	qc->complete_fn = ata_qc_complete_internal;
1623 
1624 	ata_qc_issue(qc);
1625 
1626 	spin_unlock_irqrestore(ap->lock, flags);
1627 
1628 	if (!timeout) {
1629 		if (ata_probe_timeout)
1630 			timeout = ata_probe_timeout * 1000;
1631 		else {
1632 			timeout = ata_internal_cmd_timeout(dev, command);
1633 			auto_timeout = 1;
1634 		}
1635 	}
1636 
1637 	if (ap->ops->error_handler)
1638 		ata_eh_release(ap);
1639 
1640 	rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
1641 
1642 	if (ap->ops->error_handler)
1643 		ata_eh_acquire(ap);
1644 
1645 	ata_sff_flush_pio_task(ap);
1646 
1647 	if (!rc) {
1648 		spin_lock_irqsave(ap->lock, flags);
1649 
1650 		/* We're racing with irq here.  If we lose, the
1651 		 * following test prevents us from completing the qc
1652 		 * twice.  If we win, the port is frozen and will be
1653 		 * cleaned up by ->post_internal_cmd().
1654 		 */
1655 		if (qc->flags & ATA_QCFLAG_ACTIVE) {
1656 			qc->err_mask |= AC_ERR_TIMEOUT;
1657 
1658 			if (ap->ops->error_handler)
1659 				ata_port_freeze(ap);
1660 			else
1661 				ata_qc_complete(qc);
1662 
1663 			if (ata_msg_warn(ap))
1664 				ata_dev_warn(dev, "qc timeout (cmd 0x%x)\n",
1665 					     command);
1666 		}
1667 
1668 		spin_unlock_irqrestore(ap->lock, flags);
1669 	}
1670 
1671 	/* do post_internal_cmd */
1672 	if (ap->ops->post_internal_cmd)
1673 		ap->ops->post_internal_cmd(qc);
1674 
1675 	/* perform minimal error analysis */
1676 	if (qc->flags & ATA_QCFLAG_FAILED) {
1677 		if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1678 			qc->err_mask |= AC_ERR_DEV;
1679 
1680 		if (!qc->err_mask)
1681 			qc->err_mask |= AC_ERR_OTHER;
1682 
1683 		if (qc->err_mask & ~AC_ERR_OTHER)
1684 			qc->err_mask &= ~AC_ERR_OTHER;
1685 	}
1686 
1687 	/* finish up */
1688 	spin_lock_irqsave(ap->lock, flags);
1689 
1690 	*tf = qc->result_tf;
1691 	err_mask = qc->err_mask;
1692 
1693 	ata_qc_free(qc);
1694 	link->active_tag = preempted_tag;
1695 	link->sactive = preempted_sactive;
1696 	ap->qc_active = preempted_qc_active;
1697 	ap->nr_active_links = preempted_nr_active_links;
1698 
1699 	spin_unlock_irqrestore(ap->lock, flags);
1700 
1701 	if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout)
1702 		ata_internal_cmd_timed_out(dev, command);
1703 
1704 	return err_mask;
1705 }
1706 
1707 /**
1708  *	ata_exec_internal - execute libata internal command
1709  *	@dev: Device to which the command is sent
1710  *	@tf: Taskfile registers for the command and the result
1711  *	@cdb: CDB for packet command
1712  *	@dma_dir: Data tranfer direction of the command
1713  *	@buf: Data buffer of the command
1714  *	@buflen: Length of data buffer
1715  *	@timeout: Timeout in msecs (0 for default)
1716  *
1717  *	Wrapper around ata_exec_internal_sg() which takes simple
1718  *	buffer instead of sg list.
1719  *
1720  *	LOCKING:
1721  *	None.  Should be called with kernel context, might sleep.
1722  *
1723  *	RETURNS:
1724  *	Zero on success, AC_ERR_* mask on failure
1725  */
ata_exec_internal(struct ata_device * dev,struct ata_taskfile * tf,const u8 * cdb,int dma_dir,void * buf,unsigned int buflen,unsigned long timeout)1726 unsigned ata_exec_internal(struct ata_device *dev,
1727 			   struct ata_taskfile *tf, const u8 *cdb,
1728 			   int dma_dir, void *buf, unsigned int buflen,
1729 			   unsigned long timeout)
1730 {
1731 	struct scatterlist *psg = NULL, sg;
1732 	unsigned int n_elem = 0;
1733 
1734 	if (dma_dir != DMA_NONE) {
1735 		WARN_ON(!buf);
1736 		sg_init_one(&sg, buf, buflen);
1737 		psg = &sg;
1738 		n_elem++;
1739 	}
1740 
1741 	return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1742 				    timeout);
1743 }
1744 
1745 /**
1746  *	ata_do_simple_cmd - execute simple internal command
1747  *	@dev: Device to which the command is sent
1748  *	@cmd: Opcode to execute
1749  *
1750  *	Execute a 'simple' command, that only consists of the opcode
1751  *	'cmd' itself, without filling any other registers
1752  *
1753  *	LOCKING:
1754  *	Kernel thread context (may sleep).
1755  *
1756  *	RETURNS:
1757  *	Zero on success, AC_ERR_* mask on failure
1758  */
ata_do_simple_cmd(struct ata_device * dev,u8 cmd)1759 unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
1760 {
1761 	struct ata_taskfile tf;
1762 
1763 	ata_tf_init(dev, &tf);
1764 
1765 	tf.command = cmd;
1766 	tf.flags |= ATA_TFLAG_DEVICE;
1767 	tf.protocol = ATA_PROT_NODATA;
1768 
1769 	return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1770 }
1771 
1772 /**
1773  *	ata_pio_need_iordy	-	check if iordy needed
1774  *	@adev: ATA device
1775  *
1776  *	Check if the current speed of the device requires IORDY. Used
1777  *	by various controllers for chip configuration.
1778  */
ata_pio_need_iordy(const struct ata_device * adev)1779 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1780 {
1781 	/* Don't set IORDY if we're preparing for reset.  IORDY may
1782 	 * lead to controller lock up on certain controllers if the
1783 	 * port is not occupied.  See bko#11703 for details.
1784 	 */
1785 	if (adev->link->ap->pflags & ATA_PFLAG_RESETTING)
1786 		return 0;
1787 	/* Controller doesn't support IORDY.  Probably a pointless
1788 	 * check as the caller should know this.
1789 	 */
1790 	if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1791 		return 0;
1792 	/* CF spec. r4.1 Table 22 says no iordy on PIO5 and PIO6.  */
1793 	if (ata_id_is_cfa(adev->id)
1794 	    && (adev->pio_mode == XFER_PIO_5 || adev->pio_mode == XFER_PIO_6))
1795 		return 0;
1796 	/* PIO3 and higher it is mandatory */
1797 	if (adev->pio_mode > XFER_PIO_2)
1798 		return 1;
1799 	/* We turn it on when possible */
1800 	if (ata_id_has_iordy(adev->id))
1801 		return 1;
1802 	return 0;
1803 }
1804 
1805 /**
1806  *	ata_pio_mask_no_iordy	-	Return the non IORDY mask
1807  *	@adev: ATA device
1808  *
1809  *	Compute the highest mode possible if we are not using iordy. Return
1810  *	-1 if no iordy mode is available.
1811  */
ata_pio_mask_no_iordy(const struct ata_device * adev)1812 static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1813 {
1814 	/* If we have no drive specific rule, then PIO 2 is non IORDY */
1815 	if (adev->id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE */
1816 		u16 pio = adev->id[ATA_ID_EIDE_PIO];
1817 		/* Is the speed faster than the drive allows non IORDY ? */
1818 		if (pio) {
1819 			/* This is cycle times not frequency - watch the logic! */
1820 			if (pio > 240)	/* PIO2 is 240nS per cycle */
1821 				return 3 << ATA_SHIFT_PIO;
1822 			return 7 << ATA_SHIFT_PIO;
1823 		}
1824 	}
1825 	return 3 << ATA_SHIFT_PIO;
1826 }
1827 
1828 /**
1829  *	ata_do_dev_read_id		-	default ID read method
1830  *	@dev: device
1831  *	@tf: proposed taskfile
1832  *	@id: data buffer
1833  *
1834  *	Issue the identify taskfile and hand back the buffer containing
1835  *	identify data. For some RAID controllers and for pre ATA devices
1836  *	this function is wrapped or replaced by the driver
1837  */
ata_do_dev_read_id(struct ata_device * dev,struct ata_taskfile * tf,u16 * id)1838 unsigned int ata_do_dev_read_id(struct ata_device *dev,
1839 					struct ata_taskfile *tf, u16 *id)
1840 {
1841 	return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE,
1842 				     id, sizeof(id[0]) * ATA_ID_WORDS, 0);
1843 }
1844 
1845 /**
1846  *	ata_dev_read_id - Read ID data from the specified device
1847  *	@dev: target device
1848  *	@p_class: pointer to class of the target device (may be changed)
1849  *	@flags: ATA_READID_* flags
1850  *	@id: buffer to read IDENTIFY data into
1851  *
1852  *	Read ID data from the specified device.  ATA_CMD_ID_ATA is
1853  *	performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1854  *	devices.  This function also issues ATA_CMD_INIT_DEV_PARAMS
1855  *	for pre-ATA4 drives.
1856  *
1857  *	FIXME: ATA_CMD_ID_ATA is optional for early drives and right
1858  *	now we abort if we hit that case.
1859  *
1860  *	LOCKING:
1861  *	Kernel thread context (may sleep)
1862  *
1863  *	RETURNS:
1864  *	0 on success, -errno otherwise.
1865  */
ata_dev_read_id(struct ata_device * dev,unsigned int * p_class,unsigned int flags,u16 * id)1866 int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1867 		    unsigned int flags, u16 *id)
1868 {
1869 	struct ata_port *ap = dev->link->ap;
1870 	unsigned int class = *p_class;
1871 	struct ata_taskfile tf;
1872 	unsigned int err_mask = 0;
1873 	const char *reason;
1874 	bool is_semb = class == ATA_DEV_SEMB;
1875 	int may_fallback = 1, tried_spinup = 0;
1876 	int rc;
1877 
1878 	if (ata_msg_ctl(ap))
1879 		ata_dev_dbg(dev, "%s: ENTER\n", __func__);
1880 
1881 retry:
1882 	ata_tf_init(dev, &tf);
1883 
1884 	switch (class) {
1885 	case ATA_DEV_SEMB:
1886 		class = ATA_DEV_ATA;	/* some hard drives report SEMB sig */
1887 	case ATA_DEV_ATA:
1888 		tf.command = ATA_CMD_ID_ATA;
1889 		break;
1890 	case ATA_DEV_ATAPI:
1891 		tf.command = ATA_CMD_ID_ATAPI;
1892 		break;
1893 	default:
1894 		rc = -ENODEV;
1895 		reason = "unsupported class";
1896 		goto err_out;
1897 	}
1898 
1899 	tf.protocol = ATA_PROT_PIO;
1900 
1901 	/* Some devices choke if TF registers contain garbage.  Make
1902 	 * sure those are properly initialized.
1903 	 */
1904 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1905 
1906 	/* Device presence detection is unreliable on some
1907 	 * controllers.  Always poll IDENTIFY if available.
1908 	 */
1909 	tf.flags |= ATA_TFLAG_POLLING;
1910 
1911 	if (ap->ops->read_id)
1912 		err_mask = ap->ops->read_id(dev, &tf, id);
1913 	else
1914 		err_mask = ata_do_dev_read_id(dev, &tf, id);
1915 
1916 	if (err_mask) {
1917 		if (err_mask & AC_ERR_NODEV_HINT) {
1918 			ata_dev_dbg(dev, "NODEV after polling detection\n");
1919 			return -ENOENT;
1920 		}
1921 
1922 		if (is_semb) {
1923 			ata_dev_info(dev,
1924 		     "IDENTIFY failed on device w/ SEMB sig, disabled\n");
1925 			/* SEMB is not supported yet */
1926 			*p_class = ATA_DEV_SEMB_UNSUP;
1927 			return 0;
1928 		}
1929 
1930 		if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1931 			/* Device or controller might have reported
1932 			 * the wrong device class.  Give a shot at the
1933 			 * other IDENTIFY if the current one is
1934 			 * aborted by the device.
1935 			 */
1936 			if (may_fallback) {
1937 				may_fallback = 0;
1938 
1939 				if (class == ATA_DEV_ATA)
1940 					class = ATA_DEV_ATAPI;
1941 				else
1942 					class = ATA_DEV_ATA;
1943 				goto retry;
1944 			}
1945 
1946 			/* Control reaches here iff the device aborted
1947 			 * both flavors of IDENTIFYs which happens
1948 			 * sometimes with phantom devices.
1949 			 */
1950 			ata_dev_dbg(dev,
1951 				    "both IDENTIFYs aborted, assuming NODEV\n");
1952 			return -ENOENT;
1953 		}
1954 
1955 		rc = -EIO;
1956 		reason = "I/O error";
1957 		goto err_out;
1958 	}
1959 
1960 	if (dev->horkage & ATA_HORKAGE_DUMP_ID) {
1961 		ata_dev_dbg(dev, "dumping IDENTIFY data, "
1962 			    "class=%d may_fallback=%d tried_spinup=%d\n",
1963 			    class, may_fallback, tried_spinup);
1964 		print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET,
1965 			       16, 2, id, ATA_ID_WORDS * sizeof(*id), true);
1966 	}
1967 
1968 	/* Falling back doesn't make sense if ID data was read
1969 	 * successfully at least once.
1970 	 */
1971 	may_fallback = 0;
1972 
1973 	swap_buf_le16(id, ATA_ID_WORDS);
1974 
1975 	/* sanity check */
1976 	rc = -EINVAL;
1977 	reason = "device reports invalid type";
1978 
1979 	if (class == ATA_DEV_ATA) {
1980 		if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1981 			goto err_out;
1982 		if (ap->host->flags & ATA_HOST_IGNORE_ATA &&
1983 							ata_id_is_ata(id)) {
1984 			ata_dev_dbg(dev,
1985 				"host indicates ignore ATA devices, ignored\n");
1986 			return -ENOENT;
1987 		}
1988 	} else {
1989 		if (ata_id_is_ata(id))
1990 			goto err_out;
1991 	}
1992 
1993 	if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1994 		tried_spinup = 1;
1995 		/*
1996 		 * Drive powered-up in standby mode, and requires a specific
1997 		 * SET_FEATURES spin-up subcommand before it will accept
1998 		 * anything other than the original IDENTIFY command.
1999 		 */
2000 		err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
2001 		if (err_mask && id[2] != 0x738c) {
2002 			rc = -EIO;
2003 			reason = "SPINUP failed";
2004 			goto err_out;
2005 		}
2006 		/*
2007 		 * If the drive initially returned incomplete IDENTIFY info,
2008 		 * we now must reissue the IDENTIFY command.
2009 		 */
2010 		if (id[2] == 0x37c8)
2011 			goto retry;
2012 	}
2013 
2014 	if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
2015 		/*
2016 		 * The exact sequence expected by certain pre-ATA4 drives is:
2017 		 * SRST RESET
2018 		 * IDENTIFY (optional in early ATA)
2019 		 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
2020 		 * anything else..
2021 		 * Some drives were very specific about that exact sequence.
2022 		 *
2023 		 * Note that ATA4 says lba is mandatory so the second check
2024 		 * should never trigger.
2025 		 */
2026 		if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
2027 			err_mask = ata_dev_init_params(dev, id[3], id[6]);
2028 			if (err_mask) {
2029 				rc = -EIO;
2030 				reason = "INIT_DEV_PARAMS failed";
2031 				goto err_out;
2032 			}
2033 
2034 			/* current CHS translation info (id[53-58]) might be
2035 			 * changed. reread the identify device info.
2036 			 */
2037 			flags &= ~ATA_READID_POSTRESET;
2038 			goto retry;
2039 		}
2040 	}
2041 
2042 	*p_class = class;
2043 
2044 	return 0;
2045 
2046  err_out:
2047 	if (ata_msg_warn(ap))
2048 		ata_dev_warn(dev, "failed to IDENTIFY (%s, err_mask=0x%x)\n",
2049 			     reason, err_mask);
2050 	return rc;
2051 }
2052 
ata_do_link_spd_horkage(struct ata_device * dev)2053 static int ata_do_link_spd_horkage(struct ata_device *dev)
2054 {
2055 	struct ata_link *plink = ata_dev_phys_link(dev);
2056 	u32 target, target_limit;
2057 
2058 	if (!sata_scr_valid(plink))
2059 		return 0;
2060 
2061 	if (dev->horkage & ATA_HORKAGE_1_5_GBPS)
2062 		target = 1;
2063 	else
2064 		return 0;
2065 
2066 	target_limit = (1 << target) - 1;
2067 
2068 	/* if already on stricter limit, no need to push further */
2069 	if (plink->sata_spd_limit <= target_limit)
2070 		return 0;
2071 
2072 	plink->sata_spd_limit = target_limit;
2073 
2074 	/* Request another EH round by returning -EAGAIN if link is
2075 	 * going faster than the target speed.  Forward progress is
2076 	 * guaranteed by setting sata_spd_limit to target_limit above.
2077 	 */
2078 	if (plink->sata_spd > target) {
2079 		ata_dev_info(dev, "applying link speed limit horkage to %s\n",
2080 			     sata_spd_string(target));
2081 		return -EAGAIN;
2082 	}
2083 	return 0;
2084 }
2085 
ata_dev_knobble(struct ata_device * dev)2086 static inline u8 ata_dev_knobble(struct ata_device *dev)
2087 {
2088 	struct ata_port *ap = dev->link->ap;
2089 
2090 	if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK)
2091 		return 0;
2092 
2093 	return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
2094 }
2095 
ata_dev_config_ncq(struct ata_device * dev,char * desc,size_t desc_sz)2096 static int ata_dev_config_ncq(struct ata_device *dev,
2097 			       char *desc, size_t desc_sz)
2098 {
2099 	struct ata_port *ap = dev->link->ap;
2100 	int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2101 	unsigned int err_mask;
2102 	char *aa_desc = "";
2103 
2104 	if (!ata_id_has_ncq(dev->id)) {
2105 		desc[0] = '\0';
2106 		return 0;
2107 	}
2108 	if (dev->horkage & ATA_HORKAGE_NONCQ) {
2109 		snprintf(desc, desc_sz, "NCQ (not used)");
2110 		return 0;
2111 	}
2112 	if (ap->flags & ATA_FLAG_NCQ) {
2113 		hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
2114 		dev->flags |= ATA_DFLAG_NCQ;
2115 	}
2116 
2117 	if (!(dev->horkage & ATA_HORKAGE_BROKEN_FPDMA_AA) &&
2118 		(ap->flags & ATA_FLAG_FPDMA_AA) &&
2119 		ata_id_has_fpdma_aa(dev->id)) {
2120 		err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE,
2121 			SATA_FPDMA_AA);
2122 		if (err_mask) {
2123 			ata_dev_err(dev,
2124 				    "failed to enable AA (error_mask=0x%x)\n",
2125 				    err_mask);
2126 			if (err_mask != AC_ERR_DEV) {
2127 				dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA;
2128 				return -EIO;
2129 			}
2130 		} else
2131 			aa_desc = ", AA";
2132 	}
2133 
2134 	if (hdepth >= ddepth)
2135 		snprintf(desc, desc_sz, "NCQ (depth %d)%s", ddepth, aa_desc);
2136 	else
2137 		snprintf(desc, desc_sz, "NCQ (depth %d/%d)%s", hdepth,
2138 			ddepth, aa_desc);
2139 	return 0;
2140 }
2141 
2142 /**
2143  *	ata_dev_configure - Configure the specified ATA/ATAPI device
2144  *	@dev: Target device to configure
2145  *
2146  *	Configure @dev according to @dev->id.  Generic and low-level
2147  *	driver specific fixups are also applied.
2148  *
2149  *	LOCKING:
2150  *	Kernel thread context (may sleep)
2151  *
2152  *	RETURNS:
2153  *	0 on success, -errno otherwise
2154  */
ata_dev_configure(struct ata_device * dev)2155 int ata_dev_configure(struct ata_device *dev)
2156 {
2157 	struct ata_port *ap = dev->link->ap;
2158 	struct ata_eh_context *ehc = &dev->link->eh_context;
2159 	int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
2160 	const u16 *id = dev->id;
2161 	unsigned long xfer_mask;
2162 	char revbuf[7];		/* XYZ-99\0 */
2163 	char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2164 	char modelbuf[ATA_ID_PROD_LEN+1];
2165 	int rc;
2166 
2167 	if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
2168 		ata_dev_info(dev, "%s: ENTER/EXIT -- nodev\n", __func__);
2169 		return 0;
2170 	}
2171 
2172 	if (ata_msg_probe(ap))
2173 		ata_dev_dbg(dev, "%s: ENTER\n", __func__);
2174 
2175 	/* set horkage */
2176 	dev->horkage |= ata_dev_blacklisted(dev);
2177 	ata_force_horkage(dev);
2178 
2179 	if (dev->horkage & ATA_HORKAGE_DISABLE) {
2180 		ata_dev_info(dev, "unsupported device, disabling\n");
2181 		ata_dev_disable(dev);
2182 		return 0;
2183 	}
2184 
2185 	if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) &&
2186 	    dev->class == ATA_DEV_ATAPI) {
2187 		ata_dev_warn(dev, "WARNING: ATAPI is %s, device ignored\n",
2188 			     atapi_enabled ? "not supported with this driver"
2189 			     : "disabled");
2190 		ata_dev_disable(dev);
2191 		return 0;
2192 	}
2193 
2194 	rc = ata_do_link_spd_horkage(dev);
2195 	if (rc)
2196 		return rc;
2197 
2198 	/* let ACPI work its magic */
2199 	rc = ata_acpi_on_devcfg(dev);
2200 	if (rc)
2201 		return rc;
2202 
2203 	/* massage HPA, do it early as it might change IDENTIFY data */
2204 	rc = ata_hpa_resize(dev);
2205 	if (rc)
2206 		return rc;
2207 
2208 	/* print device capabilities */
2209 	if (ata_msg_probe(ap))
2210 		ata_dev_dbg(dev,
2211 			    "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2212 			    "85:%04x 86:%04x 87:%04x 88:%04x\n",
2213 			    __func__,
2214 			    id[49], id[82], id[83], id[84],
2215 			    id[85], id[86], id[87], id[88]);
2216 
2217 	/* initialize to-be-configured parameters */
2218 	dev->flags &= ~ATA_DFLAG_CFG_MASK;
2219 	dev->max_sectors = 0;
2220 	dev->cdb_len = 0;
2221 	dev->n_sectors = 0;
2222 	dev->cylinders = 0;
2223 	dev->heads = 0;
2224 	dev->sectors = 0;
2225 	dev->multi_count = 0;
2226 
2227 	/*
2228 	 * common ATA, ATAPI feature tests
2229 	 */
2230 
2231 	/* find max transfer mode; for printk only */
2232 	xfer_mask = ata_id_xfermask(id);
2233 
2234 	if (ata_msg_probe(ap))
2235 		ata_dump_id(id);
2236 
2237 	/* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2238 	ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2239 			sizeof(fwrevbuf));
2240 
2241 	ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2242 			sizeof(modelbuf));
2243 
2244 	/* ATA-specific feature tests */
2245 	if (dev->class == ATA_DEV_ATA) {
2246 		if (ata_id_is_cfa(id)) {
2247 			/* CPRM may make this media unusable */
2248 			if (id[ATA_ID_CFA_KEY_MGMT] & 1)
2249 				ata_dev_warn(dev,
2250 	"supports DRM functions and may not be fully accessible\n");
2251 			snprintf(revbuf, 7, "CFA");
2252 		} else {
2253 			snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
2254 			/* Warn the user if the device has TPM extensions */
2255 			if (ata_id_has_tpm(id))
2256 				ata_dev_warn(dev,
2257 	"supports DRM functions and may not be fully accessible\n");
2258 		}
2259 
2260 		dev->n_sectors = ata_id_n_sectors(id);
2261 
2262 		/* get current R/W Multiple count setting */
2263 		if ((dev->id[47] >> 8) == 0x80 && (dev->id[59] & 0x100)) {
2264 			unsigned int max = dev->id[47] & 0xff;
2265 			unsigned int cnt = dev->id[59] & 0xff;
2266 			/* only recognize/allow powers of two here */
2267 			if (is_power_of_2(max) && is_power_of_2(cnt))
2268 				if (cnt <= max)
2269 					dev->multi_count = cnt;
2270 		}
2271 
2272 		if (ata_id_has_lba(id)) {
2273 			const char *lba_desc;
2274 			char ncq_desc[24];
2275 
2276 			lba_desc = "LBA";
2277 			dev->flags |= ATA_DFLAG_LBA;
2278 			if (ata_id_has_lba48(id)) {
2279 				dev->flags |= ATA_DFLAG_LBA48;
2280 				lba_desc = "LBA48";
2281 
2282 				if (dev->n_sectors >= (1UL << 28) &&
2283 				    ata_id_has_flush_ext(id))
2284 					dev->flags |= ATA_DFLAG_FLUSH_EXT;
2285 			}
2286 
2287 			/* config NCQ */
2288 			rc = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2289 			if (rc)
2290 				return rc;
2291 
2292 			/* print device info to dmesg */
2293 			if (ata_msg_drv(ap) && print_info) {
2294 				ata_dev_info(dev, "%s: %s, %s, max %s\n",
2295 					     revbuf, modelbuf, fwrevbuf,
2296 					     ata_mode_string(xfer_mask));
2297 				ata_dev_info(dev,
2298 					     "%llu sectors, multi %u: %s %s\n",
2299 					(unsigned long long)dev->n_sectors,
2300 					dev->multi_count, lba_desc, ncq_desc);
2301 			}
2302 		} else {
2303 			/* CHS */
2304 
2305 			/* Default translation */
2306 			dev->cylinders	= id[1];
2307 			dev->heads	= id[3];
2308 			dev->sectors	= id[6];
2309 
2310 			if (ata_id_current_chs_valid(id)) {
2311 				/* Current CHS translation is valid. */
2312 				dev->cylinders = id[54];
2313 				dev->heads     = id[55];
2314 				dev->sectors   = id[56];
2315 			}
2316 
2317 			/* print device info to dmesg */
2318 			if (ata_msg_drv(ap) && print_info) {
2319 				ata_dev_info(dev, "%s: %s, %s, max %s\n",
2320 					     revbuf,	modelbuf, fwrevbuf,
2321 					     ata_mode_string(xfer_mask));
2322 				ata_dev_info(dev,
2323 					     "%llu sectors, multi %u, CHS %u/%u/%u\n",
2324 					     (unsigned long long)dev->n_sectors,
2325 					     dev->multi_count, dev->cylinders,
2326 					     dev->heads, dev->sectors);
2327 			}
2328 		}
2329 
2330 		dev->cdb_len = 16;
2331 	}
2332 
2333 	/* ATAPI-specific feature tests */
2334 	else if (dev->class == ATA_DEV_ATAPI) {
2335 		const char *cdb_intr_string = "";
2336 		const char *atapi_an_string = "";
2337 		const char *dma_dir_string = "";
2338 		u32 sntf;
2339 
2340 		rc = atapi_cdb_len(id);
2341 		if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
2342 			if (ata_msg_warn(ap))
2343 				ata_dev_warn(dev, "unsupported CDB len\n");
2344 			rc = -EINVAL;
2345 			goto err_out_nosup;
2346 		}
2347 		dev->cdb_len = (unsigned int) rc;
2348 
2349 		/* Enable ATAPI AN if both the host and device have
2350 		 * the support.  If PMP is attached, SNTF is required
2351 		 * to enable ATAPI AN to discern between PHY status
2352 		 * changed notifications and ATAPI ANs.
2353 		 */
2354 		if (atapi_an &&
2355 		    (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2356 		    (!sata_pmp_attached(ap) ||
2357 		     sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
2358 			unsigned int err_mask;
2359 
2360 			/* issue SET feature command to turn this on */
2361 			err_mask = ata_dev_set_feature(dev,
2362 					SETFEATURES_SATA_ENABLE, SATA_AN);
2363 			if (err_mask)
2364 				ata_dev_err(dev,
2365 					    "failed to enable ATAPI AN (err_mask=0x%x)\n",
2366 					    err_mask);
2367 			else {
2368 				dev->flags |= ATA_DFLAG_AN;
2369 				atapi_an_string = ", ATAPI AN";
2370 			}
2371 		}
2372 
2373 		if (ata_id_cdb_intr(dev->id)) {
2374 			dev->flags |= ATA_DFLAG_CDB_INTR;
2375 			cdb_intr_string = ", CDB intr";
2376 		}
2377 
2378 		if (atapi_dmadir || atapi_id_dmadir(dev->id)) {
2379 			dev->flags |= ATA_DFLAG_DMADIR;
2380 			dma_dir_string = ", DMADIR";
2381 		}
2382 
2383 		/* print device info to dmesg */
2384 		if (ata_msg_drv(ap) && print_info)
2385 			ata_dev_info(dev,
2386 				     "ATAPI: %s, %s, max %s%s%s%s\n",
2387 				     modelbuf, fwrevbuf,
2388 				     ata_mode_string(xfer_mask),
2389 				     cdb_intr_string, atapi_an_string,
2390 				     dma_dir_string);
2391 	}
2392 
2393 	/* determine max_sectors */
2394 	dev->max_sectors = ATA_MAX_SECTORS;
2395 	if (dev->flags & ATA_DFLAG_LBA48)
2396 		dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2397 
2398 	/* Limit PATA drive on SATA cable bridge transfers to udma5,
2399 	   200 sectors */
2400 	if (ata_dev_knobble(dev)) {
2401 		if (ata_msg_drv(ap) && print_info)
2402 			ata_dev_info(dev, "applying bridge limits\n");
2403 		dev->udma_mask &= ATA_UDMA5;
2404 		dev->max_sectors = ATA_MAX_SECTORS;
2405 	}
2406 
2407 	if ((dev->class == ATA_DEV_ATAPI) &&
2408 	    (atapi_command_packet_set(id) == TYPE_TAPE)) {
2409 		dev->max_sectors = ATA_MAX_SECTORS_TAPE;
2410 		dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2411 	}
2412 
2413 	if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
2414 		dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2415 					 dev->max_sectors);
2416 
2417 	if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48)
2418 		dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2419 
2420 	if (ap->ops->dev_config)
2421 		ap->ops->dev_config(dev);
2422 
2423 	if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2424 		/* Let the user know. We don't want to disallow opens for
2425 		   rescue purposes, or in case the vendor is just a blithering
2426 		   idiot. Do this after the dev_config call as some controllers
2427 		   with buggy firmware may want to avoid reporting false device
2428 		   bugs */
2429 
2430 		if (print_info) {
2431 			ata_dev_warn(dev,
2432 "Drive reports diagnostics failure. This may indicate a drive\n");
2433 			ata_dev_warn(dev,
2434 "fault or invalid emulation. Contact drive vendor for information.\n");
2435 		}
2436 	}
2437 
2438 	if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) {
2439 		ata_dev_warn(dev, "WARNING: device requires firmware update to be fully functional\n");
2440 		ata_dev_warn(dev, "         contact the vendor or visit http://ata.wiki.kernel.org\n");
2441 	}
2442 
2443 	return 0;
2444 
2445 err_out_nosup:
2446 	if (ata_msg_probe(ap))
2447 		ata_dev_dbg(dev, "%s: EXIT, err\n", __func__);
2448 	return rc;
2449 }
2450 
2451 /**
2452  *	ata_cable_40wire	-	return 40 wire cable type
2453  *	@ap: port
2454  *
2455  *	Helper method for drivers which want to hardwire 40 wire cable
2456  *	detection.
2457  */
2458 
ata_cable_40wire(struct ata_port * ap)2459 int ata_cable_40wire(struct ata_port *ap)
2460 {
2461 	return ATA_CBL_PATA40;
2462 }
2463 
2464 /**
2465  *	ata_cable_80wire	-	return 80 wire cable type
2466  *	@ap: port
2467  *
2468  *	Helper method for drivers which want to hardwire 80 wire cable
2469  *	detection.
2470  */
2471 
ata_cable_80wire(struct ata_port * ap)2472 int ata_cable_80wire(struct ata_port *ap)
2473 {
2474 	return ATA_CBL_PATA80;
2475 }
2476 
2477 /**
2478  *	ata_cable_unknown	-	return unknown PATA cable.
2479  *	@ap: port
2480  *
2481  *	Helper method for drivers which have no PATA cable detection.
2482  */
2483 
ata_cable_unknown(struct ata_port * ap)2484 int ata_cable_unknown(struct ata_port *ap)
2485 {
2486 	return ATA_CBL_PATA_UNK;
2487 }
2488 
2489 /**
2490  *	ata_cable_ignore	-	return ignored PATA cable.
2491  *	@ap: port
2492  *
2493  *	Helper method for drivers which don't use cable type to limit
2494  *	transfer mode.
2495  */
ata_cable_ignore(struct ata_port * ap)2496 int ata_cable_ignore(struct ata_port *ap)
2497 {
2498 	return ATA_CBL_PATA_IGN;
2499 }
2500 
2501 /**
2502  *	ata_cable_sata	-	return SATA cable type
2503  *	@ap: port
2504  *
2505  *	Helper method for drivers which have SATA cables
2506  */
2507 
ata_cable_sata(struct ata_port * ap)2508 int ata_cable_sata(struct ata_port *ap)
2509 {
2510 	return ATA_CBL_SATA;
2511 }
2512 
2513 /**
2514  *	ata_bus_probe - Reset and probe ATA bus
2515  *	@ap: Bus to probe
2516  *
2517  *	Master ATA bus probing function.  Initiates a hardware-dependent
2518  *	bus reset, then attempts to identify any devices found on
2519  *	the bus.
2520  *
2521  *	LOCKING:
2522  *	PCI/etc. bus probe sem.
2523  *
2524  *	RETURNS:
2525  *	Zero on success, negative errno otherwise.
2526  */
2527 
ata_bus_probe(struct ata_port * ap)2528 int ata_bus_probe(struct ata_port *ap)
2529 {
2530 	unsigned int classes[ATA_MAX_DEVICES];
2531 	int tries[ATA_MAX_DEVICES];
2532 	int rc;
2533 	struct ata_device *dev;
2534 
2535 	ata_for_each_dev(dev, &ap->link, ALL)
2536 		tries[dev->devno] = ATA_PROBE_MAX_TRIES;
2537 
2538  retry:
2539 	ata_for_each_dev(dev, &ap->link, ALL) {
2540 		/* If we issue an SRST then an ATA drive (not ATAPI)
2541 		 * may change configuration and be in PIO0 timing. If
2542 		 * we do a hard reset (or are coming from power on)
2543 		 * this is true for ATA or ATAPI. Until we've set a
2544 		 * suitable controller mode we should not touch the
2545 		 * bus as we may be talking too fast.
2546 		 */
2547 		dev->pio_mode = XFER_PIO_0;
2548 		dev->dma_mode = 0xff;
2549 
2550 		/* If the controller has a pio mode setup function
2551 		 * then use it to set the chipset to rights. Don't
2552 		 * touch the DMA setup as that will be dealt with when
2553 		 * configuring devices.
2554 		 */
2555 		if (ap->ops->set_piomode)
2556 			ap->ops->set_piomode(ap, dev);
2557 	}
2558 
2559 	/* reset and determine device classes */
2560 	ap->ops->phy_reset(ap);
2561 
2562 	ata_for_each_dev(dev, &ap->link, ALL) {
2563 		if (dev->class != ATA_DEV_UNKNOWN)
2564 			classes[dev->devno] = dev->class;
2565 		else
2566 			classes[dev->devno] = ATA_DEV_NONE;
2567 
2568 		dev->class = ATA_DEV_UNKNOWN;
2569 	}
2570 
2571 	/* read IDENTIFY page and configure devices. We have to do the identify
2572 	   specific sequence bass-ackwards so that PDIAG- is released by
2573 	   the slave device */
2574 
2575 	ata_for_each_dev(dev, &ap->link, ALL_REVERSE) {
2576 		if (tries[dev->devno])
2577 			dev->class = classes[dev->devno];
2578 
2579 		if (!ata_dev_enabled(dev))
2580 			continue;
2581 
2582 		rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2583 				     dev->id);
2584 		if (rc)
2585 			goto fail;
2586 	}
2587 
2588 	/* Now ask for the cable type as PDIAG- should have been released */
2589 	if (ap->ops->cable_detect)
2590 		ap->cbl = ap->ops->cable_detect(ap);
2591 
2592 	/* We may have SATA bridge glue hiding here irrespective of
2593 	 * the reported cable types and sensed types.  When SATA
2594 	 * drives indicate we have a bridge, we don't know which end
2595 	 * of the link the bridge is which is a problem.
2596 	 */
2597 	ata_for_each_dev(dev, &ap->link, ENABLED)
2598 		if (ata_id_is_sata(dev->id))
2599 			ap->cbl = ATA_CBL_SATA;
2600 
2601 	/* After the identify sequence we can now set up the devices. We do
2602 	   this in the normal order so that the user doesn't get confused */
2603 
2604 	ata_for_each_dev(dev, &ap->link, ENABLED) {
2605 		ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
2606 		rc = ata_dev_configure(dev);
2607 		ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
2608 		if (rc)
2609 			goto fail;
2610 	}
2611 
2612 	/* configure transfer mode */
2613 	rc = ata_set_mode(&ap->link, &dev);
2614 	if (rc)
2615 		goto fail;
2616 
2617 	ata_for_each_dev(dev, &ap->link, ENABLED)
2618 		return 0;
2619 
2620 	return -ENODEV;
2621 
2622  fail:
2623 	tries[dev->devno]--;
2624 
2625 	switch (rc) {
2626 	case -EINVAL:
2627 		/* eeek, something went very wrong, give up */
2628 		tries[dev->devno] = 0;
2629 		break;
2630 
2631 	case -ENODEV:
2632 		/* give it just one more chance */
2633 		tries[dev->devno] = min(tries[dev->devno], 1);
2634 	case -EIO:
2635 		if (tries[dev->devno] == 1) {
2636 			/* This is the last chance, better to slow
2637 			 * down than lose it.
2638 			 */
2639 			sata_down_spd_limit(&ap->link, 0);
2640 			ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2641 		}
2642 	}
2643 
2644 	if (!tries[dev->devno])
2645 		ata_dev_disable(dev);
2646 
2647 	goto retry;
2648 }
2649 
2650 /**
2651  *	sata_print_link_status - Print SATA link status
2652  *	@link: SATA link to printk link status about
2653  *
2654  *	This function prints link speed and status of a SATA link.
2655  *
2656  *	LOCKING:
2657  *	None.
2658  */
sata_print_link_status(struct ata_link * link)2659 static void sata_print_link_status(struct ata_link *link)
2660 {
2661 	u32 sstatus, scontrol, tmp;
2662 
2663 	if (sata_scr_read(link, SCR_STATUS, &sstatus))
2664 		return;
2665 	sata_scr_read(link, SCR_CONTROL, &scontrol);
2666 
2667 	if (ata_phys_link_online(link)) {
2668 		tmp = (sstatus >> 4) & 0xf;
2669 		ata_link_info(link, "SATA link up %s (SStatus %X SControl %X)\n",
2670 			      sata_spd_string(tmp), sstatus, scontrol);
2671 	} else {
2672 		ata_link_info(link, "SATA link down (SStatus %X SControl %X)\n",
2673 			      sstatus, scontrol);
2674 	}
2675 }
2676 
2677 /**
2678  *	ata_dev_pair		-	return other device on cable
2679  *	@adev: device
2680  *
2681  *	Obtain the other device on the same cable, or if none is
2682  *	present NULL is returned
2683  */
2684 
ata_dev_pair(struct ata_device * adev)2685 struct ata_device *ata_dev_pair(struct ata_device *adev)
2686 {
2687 	struct ata_link *link = adev->link;
2688 	struct ata_device *pair = &link->device[1 - adev->devno];
2689 	if (!ata_dev_enabled(pair))
2690 		return NULL;
2691 	return pair;
2692 }
2693 
2694 /**
2695  *	sata_down_spd_limit - adjust SATA spd limit downward
2696  *	@link: Link to adjust SATA spd limit for
2697  *	@spd_limit: Additional limit
2698  *
2699  *	Adjust SATA spd limit of @link downward.  Note that this
2700  *	function only adjusts the limit.  The change must be applied
2701  *	using sata_set_spd().
2702  *
2703  *	If @spd_limit is non-zero, the speed is limited to equal to or
2704  *	lower than @spd_limit if such speed is supported.  If
2705  *	@spd_limit is slower than any supported speed, only the lowest
2706  *	supported speed is allowed.
2707  *
2708  *	LOCKING:
2709  *	Inherited from caller.
2710  *
2711  *	RETURNS:
2712  *	0 on success, negative errno on failure
2713  */
sata_down_spd_limit(struct ata_link * link,u32 spd_limit)2714 int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
2715 {
2716 	u32 sstatus, spd, mask;
2717 	int rc, bit;
2718 
2719 	if (!sata_scr_valid(link))
2720 		return -EOPNOTSUPP;
2721 
2722 	/* If SCR can be read, use it to determine the current SPD.
2723 	 * If not, use cached value in link->sata_spd.
2724 	 */
2725 	rc = sata_scr_read(link, SCR_STATUS, &sstatus);
2726 	if (rc == 0 && ata_sstatus_online(sstatus))
2727 		spd = (sstatus >> 4) & 0xf;
2728 	else
2729 		spd = link->sata_spd;
2730 
2731 	mask = link->sata_spd_limit;
2732 	if (mask <= 1)
2733 		return -EINVAL;
2734 
2735 	/* unconditionally mask off the highest bit */
2736 	bit = fls(mask) - 1;
2737 	mask &= ~(1 << bit);
2738 
2739 	/* Mask off all speeds higher than or equal to the current
2740 	 * one.  Force 1.5Gbps if current SPD is not available.
2741 	 */
2742 	if (spd > 1)
2743 		mask &= (1 << (spd - 1)) - 1;
2744 	else
2745 		mask &= 1;
2746 
2747 	/* were we already at the bottom? */
2748 	if (!mask)
2749 		return -EINVAL;
2750 
2751 	if (spd_limit) {
2752 		if (mask & ((1 << spd_limit) - 1))
2753 			mask &= (1 << spd_limit) - 1;
2754 		else {
2755 			bit = ffs(mask) - 1;
2756 			mask = 1 << bit;
2757 		}
2758 	}
2759 
2760 	link->sata_spd_limit = mask;
2761 
2762 	ata_link_warn(link, "limiting SATA link speed to %s\n",
2763 		      sata_spd_string(fls(mask)));
2764 
2765 	return 0;
2766 }
2767 
__sata_set_spd_needed(struct ata_link * link,u32 * scontrol)2768 static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
2769 {
2770 	struct ata_link *host_link = &link->ap->link;
2771 	u32 limit, target, spd;
2772 
2773 	limit = link->sata_spd_limit;
2774 
2775 	/* Don't configure downstream link faster than upstream link.
2776 	 * It doesn't speed up anything and some PMPs choke on such
2777 	 * configuration.
2778 	 */
2779 	if (!ata_is_host_link(link) && host_link->sata_spd)
2780 		limit &= (1 << host_link->sata_spd) - 1;
2781 
2782 	if (limit == UINT_MAX)
2783 		target = 0;
2784 	else
2785 		target = fls(limit);
2786 
2787 	spd = (*scontrol >> 4) & 0xf;
2788 	*scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
2789 
2790 	return spd != target;
2791 }
2792 
2793 /**
2794  *	sata_set_spd_needed - is SATA spd configuration needed
2795  *	@link: Link in question
2796  *
2797  *	Test whether the spd limit in SControl matches
2798  *	@link->sata_spd_limit.  This function is used to determine
2799  *	whether hardreset is necessary to apply SATA spd
2800  *	configuration.
2801  *
2802  *	LOCKING:
2803  *	Inherited from caller.
2804  *
2805  *	RETURNS:
2806  *	1 if SATA spd configuration is needed, 0 otherwise.
2807  */
sata_set_spd_needed(struct ata_link * link)2808 static int sata_set_spd_needed(struct ata_link *link)
2809 {
2810 	u32 scontrol;
2811 
2812 	if (sata_scr_read(link, SCR_CONTROL, &scontrol))
2813 		return 1;
2814 
2815 	return __sata_set_spd_needed(link, &scontrol);
2816 }
2817 
2818 /**
2819  *	sata_set_spd - set SATA spd according to spd limit
2820  *	@link: Link to set SATA spd for
2821  *
2822  *	Set SATA spd of @link according to sata_spd_limit.
2823  *
2824  *	LOCKING:
2825  *	Inherited from caller.
2826  *
2827  *	RETURNS:
2828  *	0 if spd doesn't need to be changed, 1 if spd has been
2829  *	changed.  Negative errno if SCR registers are inaccessible.
2830  */
sata_set_spd(struct ata_link * link)2831 int sata_set_spd(struct ata_link *link)
2832 {
2833 	u32 scontrol;
2834 	int rc;
2835 
2836 	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
2837 		return rc;
2838 
2839 	if (!__sata_set_spd_needed(link, &scontrol))
2840 		return 0;
2841 
2842 	if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
2843 		return rc;
2844 
2845 	return 1;
2846 }
2847 
2848 /*
2849  * This mode timing computation functionality is ported over from
2850  * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2851  */
2852 /*
2853  * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
2854  * These were taken from ATA/ATAPI-6 standard, rev 0a, except
2855  * for UDMA6, which is currently supported only by Maxtor drives.
2856  *
2857  * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
2858  */
2859 
2860 static const struct ata_timing ata_timing[] = {
2861 /*	{ XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 0,  960,   0 }, */
2862 	{ XFER_PIO_0,     70, 290, 240, 600, 165, 150, 0,  600,   0 },
2863 	{ XFER_PIO_1,     50, 290,  93, 383, 125, 100, 0,  383,   0 },
2864 	{ XFER_PIO_2,     30, 290,  40, 330, 100,  90, 0,  240,   0 },
2865 	{ XFER_PIO_3,     30,  80,  70, 180,  80,  70, 0,  180,   0 },
2866 	{ XFER_PIO_4,     25,  70,  25, 120,  70,  25, 0,  120,   0 },
2867 	{ XFER_PIO_5,     15,  65,  25, 100,  65,  25, 0,  100,   0 },
2868 	{ XFER_PIO_6,     10,  55,  20,  80,  55,  20, 0,   80,   0 },
2869 
2870 	{ XFER_SW_DMA_0, 120,   0,   0,   0, 480, 480, 50, 960,   0 },
2871 	{ XFER_SW_DMA_1,  90,   0,   0,   0, 240, 240, 30, 480,   0 },
2872 	{ XFER_SW_DMA_2,  60,   0,   0,   0, 120, 120, 20, 240,   0 },
2873 
2874 	{ XFER_MW_DMA_0,  60,   0,   0,   0, 215, 215, 20, 480,   0 },
2875 	{ XFER_MW_DMA_1,  45,   0,   0,   0,  80,  50, 5,  150,   0 },
2876 	{ XFER_MW_DMA_2,  25,   0,   0,   0,  70,  25, 5,  120,   0 },
2877 	{ XFER_MW_DMA_3,  25,   0,   0,   0,  65,  25, 5,  100,   0 },
2878 	{ XFER_MW_DMA_4,  25,   0,   0,   0,  55,  20, 5,   80,   0 },
2879 
2880 /*	{ XFER_UDMA_SLOW,  0,   0,   0,   0,   0,   0, 0,    0, 150 }, */
2881 	{ XFER_UDMA_0,     0,   0,   0,   0,   0,   0, 0,    0, 120 },
2882 	{ XFER_UDMA_1,     0,   0,   0,   0,   0,   0, 0,    0,  80 },
2883 	{ XFER_UDMA_2,     0,   0,   0,   0,   0,   0, 0,    0,  60 },
2884 	{ XFER_UDMA_3,     0,   0,   0,   0,   0,   0, 0,    0,  45 },
2885 	{ XFER_UDMA_4,     0,   0,   0,   0,   0,   0, 0,    0,  30 },
2886 	{ XFER_UDMA_5,     0,   0,   0,   0,   0,   0, 0,    0,  20 },
2887 	{ XFER_UDMA_6,     0,   0,   0,   0,   0,   0, 0,    0,  15 },
2888 
2889 	{ 0xFF }
2890 };
2891 
2892 #define ENOUGH(v, unit)		(((v)-1)/(unit)+1)
2893 #define EZ(v, unit)		((v)?ENOUGH(v, unit):0)
2894 
ata_timing_quantize(const struct ata_timing * t,struct ata_timing * q,int T,int UT)2895 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2896 {
2897 	q->setup	= EZ(t->setup      * 1000,  T);
2898 	q->act8b	= EZ(t->act8b      * 1000,  T);
2899 	q->rec8b	= EZ(t->rec8b      * 1000,  T);
2900 	q->cyc8b	= EZ(t->cyc8b      * 1000,  T);
2901 	q->active	= EZ(t->active     * 1000,  T);
2902 	q->recover	= EZ(t->recover    * 1000,  T);
2903 	q->dmack_hold	= EZ(t->dmack_hold * 1000,  T);
2904 	q->cycle	= EZ(t->cycle      * 1000,  T);
2905 	q->udma		= EZ(t->udma       * 1000, UT);
2906 }
2907 
ata_timing_merge(const struct ata_timing * a,const struct ata_timing * b,struct ata_timing * m,unsigned int what)2908 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2909 		      struct ata_timing *m, unsigned int what)
2910 {
2911 	if (what & ATA_TIMING_SETUP  ) m->setup   = max(a->setup,   b->setup);
2912 	if (what & ATA_TIMING_ACT8B  ) m->act8b   = max(a->act8b,   b->act8b);
2913 	if (what & ATA_TIMING_REC8B  ) m->rec8b   = max(a->rec8b,   b->rec8b);
2914 	if (what & ATA_TIMING_CYC8B  ) m->cyc8b   = max(a->cyc8b,   b->cyc8b);
2915 	if (what & ATA_TIMING_ACTIVE ) m->active  = max(a->active,  b->active);
2916 	if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2917 	if (what & ATA_TIMING_DMACK_HOLD) m->dmack_hold = max(a->dmack_hold, b->dmack_hold);
2918 	if (what & ATA_TIMING_CYCLE  ) m->cycle   = max(a->cycle,   b->cycle);
2919 	if (what & ATA_TIMING_UDMA   ) m->udma    = max(a->udma,    b->udma);
2920 }
2921 
ata_timing_find_mode(u8 xfer_mode)2922 const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
2923 {
2924 	const struct ata_timing *t = ata_timing;
2925 
2926 	while (xfer_mode > t->mode)
2927 		t++;
2928 
2929 	if (xfer_mode == t->mode)
2930 		return t;
2931 	return NULL;
2932 }
2933 
ata_timing_compute(struct ata_device * adev,unsigned short speed,struct ata_timing * t,int T,int UT)2934 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2935 		       struct ata_timing *t, int T, int UT)
2936 {
2937 	const u16 *id = adev->id;
2938 	const struct ata_timing *s;
2939 	struct ata_timing p;
2940 
2941 	/*
2942 	 * Find the mode.
2943 	 */
2944 
2945 	if (!(s = ata_timing_find_mode(speed)))
2946 		return -EINVAL;
2947 
2948 	memcpy(t, s, sizeof(*s));
2949 
2950 	/*
2951 	 * If the drive is an EIDE drive, it can tell us it needs extended
2952 	 * PIO/MW_DMA cycle timing.
2953 	 */
2954 
2955 	if (id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE drive */
2956 		memset(&p, 0, sizeof(p));
2957 
2958 		if (speed >= XFER_PIO_0 && speed < XFER_SW_DMA_0) {
2959 			if (speed <= XFER_PIO_2)
2960 				p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO];
2961 			else if ((speed <= XFER_PIO_4) ||
2962 				 (speed == XFER_PIO_5 && !ata_id_is_cfa(id)))
2963 				p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY];
2964 		} else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2)
2965 			p.cycle = id[ATA_ID_EIDE_DMA_MIN];
2966 
2967 		ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2968 	}
2969 
2970 	/*
2971 	 * Convert the timing to bus clock counts.
2972 	 */
2973 
2974 	ata_timing_quantize(t, t, T, UT);
2975 
2976 	/*
2977 	 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2978 	 * S.M.A.R.T * and some other commands. We have to ensure that the
2979 	 * DMA cycle timing is slower/equal than the fastest PIO timing.
2980 	 */
2981 
2982 	if (speed > XFER_PIO_6) {
2983 		ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2984 		ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2985 	}
2986 
2987 	/*
2988 	 * Lengthen active & recovery time so that cycle time is correct.
2989 	 */
2990 
2991 	if (t->act8b + t->rec8b < t->cyc8b) {
2992 		t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2993 		t->rec8b = t->cyc8b - t->act8b;
2994 	}
2995 
2996 	if (t->active + t->recover < t->cycle) {
2997 		t->active += (t->cycle - (t->active + t->recover)) / 2;
2998 		t->recover = t->cycle - t->active;
2999 	}
3000 
3001 	/* In a few cases quantisation may produce enough errors to
3002 	   leave t->cycle too low for the sum of active and recovery
3003 	   if so we must correct this */
3004 	if (t->active + t->recover > t->cycle)
3005 		t->cycle = t->active + t->recover;
3006 
3007 	return 0;
3008 }
3009 
3010 /**
3011  *	ata_timing_cycle2mode - find xfer mode for the specified cycle duration
3012  *	@xfer_shift: ATA_SHIFT_* value for transfer type to examine.
3013  *	@cycle: cycle duration in ns
3014  *
3015  *	Return matching xfer mode for @cycle.  The returned mode is of
3016  *	the transfer type specified by @xfer_shift.  If @cycle is too
3017  *	slow for @xfer_shift, 0xff is returned.  If @cycle is faster
3018  *	than the fastest known mode, the fasted mode is returned.
3019  *
3020  *	LOCKING:
3021  *	None.
3022  *
3023  *	RETURNS:
3024  *	Matching xfer_mode, 0xff if no match found.
3025  */
ata_timing_cycle2mode(unsigned int xfer_shift,int cycle)3026 u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
3027 {
3028 	u8 base_mode = 0xff, last_mode = 0xff;
3029 	const struct ata_xfer_ent *ent;
3030 	const struct ata_timing *t;
3031 
3032 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
3033 		if (ent->shift == xfer_shift)
3034 			base_mode = ent->base;
3035 
3036 	for (t = ata_timing_find_mode(base_mode);
3037 	     t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
3038 		unsigned short this_cycle;
3039 
3040 		switch (xfer_shift) {
3041 		case ATA_SHIFT_PIO:
3042 		case ATA_SHIFT_MWDMA:
3043 			this_cycle = t->cycle;
3044 			break;
3045 		case ATA_SHIFT_UDMA:
3046 			this_cycle = t->udma;
3047 			break;
3048 		default:
3049 			return 0xff;
3050 		}
3051 
3052 		if (cycle > this_cycle)
3053 			break;
3054 
3055 		last_mode = t->mode;
3056 	}
3057 
3058 	return last_mode;
3059 }
3060 
3061 /**
3062  *	ata_down_xfermask_limit - adjust dev xfer masks downward
3063  *	@dev: Device to adjust xfer masks
3064  *	@sel: ATA_DNXFER_* selector
3065  *
3066  *	Adjust xfer masks of @dev downward.  Note that this function
3067  *	does not apply the change.  Invoking ata_set_mode() afterwards
3068  *	will apply the limit.
3069  *
3070  *	LOCKING:
3071  *	Inherited from caller.
3072  *
3073  *	RETURNS:
3074  *	0 on success, negative errno on failure
3075  */
ata_down_xfermask_limit(struct ata_device * dev,unsigned int sel)3076 int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
3077 {
3078 	char buf[32];
3079 	unsigned long orig_mask, xfer_mask;
3080 	unsigned long pio_mask, mwdma_mask, udma_mask;
3081 	int quiet, highbit;
3082 
3083 	quiet = !!(sel & ATA_DNXFER_QUIET);
3084 	sel &= ~ATA_DNXFER_QUIET;
3085 
3086 	xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
3087 						  dev->mwdma_mask,
3088 						  dev->udma_mask);
3089 	ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
3090 
3091 	switch (sel) {
3092 	case ATA_DNXFER_PIO:
3093 		highbit = fls(pio_mask) - 1;
3094 		pio_mask &= ~(1 << highbit);
3095 		break;
3096 
3097 	case ATA_DNXFER_DMA:
3098 		if (udma_mask) {
3099 			highbit = fls(udma_mask) - 1;
3100 			udma_mask &= ~(1 << highbit);
3101 			if (!udma_mask)
3102 				return -ENOENT;
3103 		} else if (mwdma_mask) {
3104 			highbit = fls(mwdma_mask) - 1;
3105 			mwdma_mask &= ~(1 << highbit);
3106 			if (!mwdma_mask)
3107 				return -ENOENT;
3108 		}
3109 		break;
3110 
3111 	case ATA_DNXFER_40C:
3112 		udma_mask &= ATA_UDMA_MASK_40C;
3113 		break;
3114 
3115 	case ATA_DNXFER_FORCE_PIO0:
3116 		pio_mask &= 1;
3117 	case ATA_DNXFER_FORCE_PIO:
3118 		mwdma_mask = 0;
3119 		udma_mask = 0;
3120 		break;
3121 
3122 	default:
3123 		BUG();
3124 	}
3125 
3126 	xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3127 
3128 	if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3129 		return -ENOENT;
3130 
3131 	if (!quiet) {
3132 		if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3133 			snprintf(buf, sizeof(buf), "%s:%s",
3134 				 ata_mode_string(xfer_mask),
3135 				 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3136 		else
3137 			snprintf(buf, sizeof(buf), "%s",
3138 				 ata_mode_string(xfer_mask));
3139 
3140 		ata_dev_warn(dev, "limiting speed to %s\n", buf);
3141 	}
3142 
3143 	ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3144 			    &dev->udma_mask);
3145 
3146 	return 0;
3147 }
3148 
ata_dev_set_mode(struct ata_device * dev)3149 static int ata_dev_set_mode(struct ata_device *dev)
3150 {
3151 	struct ata_port *ap = dev->link->ap;
3152 	struct ata_eh_context *ehc = &dev->link->eh_context;
3153 	const bool nosetxfer = dev->horkage & ATA_HORKAGE_NOSETXFER;
3154 	const char *dev_err_whine = "";
3155 	int ign_dev_err = 0;
3156 	unsigned int err_mask = 0;
3157 	int rc;
3158 
3159 	dev->flags &= ~ATA_DFLAG_PIO;
3160 	if (dev->xfer_shift == ATA_SHIFT_PIO)
3161 		dev->flags |= ATA_DFLAG_PIO;
3162 
3163 	if (nosetxfer && ap->flags & ATA_FLAG_SATA && ata_id_is_sata(dev->id))
3164 		dev_err_whine = " (SET_XFERMODE skipped)";
3165 	else {
3166 		if (nosetxfer)
3167 			ata_dev_warn(dev,
3168 				     "NOSETXFER but PATA detected - can't "
3169 				     "skip SETXFER, might malfunction\n");
3170 		err_mask = ata_dev_set_xfermode(dev);
3171 	}
3172 
3173 	if (err_mask & ~AC_ERR_DEV)
3174 		goto fail;
3175 
3176 	/* revalidate */
3177 	ehc->i.flags |= ATA_EHI_POST_SETMODE;
3178 	rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3179 	ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3180 	if (rc)
3181 		return rc;
3182 
3183 	if (dev->xfer_shift == ATA_SHIFT_PIO) {
3184 		/* Old CFA may refuse this command, which is just fine */
3185 		if (ata_id_is_cfa(dev->id))
3186 			ign_dev_err = 1;
3187 		/* Catch several broken garbage emulations plus some pre
3188 		   ATA devices */
3189 		if (ata_id_major_version(dev->id) == 0 &&
3190 					dev->pio_mode <= XFER_PIO_2)
3191 			ign_dev_err = 1;
3192 		/* Some very old devices and some bad newer ones fail
3193 		   any kind of SET_XFERMODE request but support PIO0-2
3194 		   timings and no IORDY */
3195 		if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2)
3196 			ign_dev_err = 1;
3197 	}
3198 	/* Early MWDMA devices do DMA but don't allow DMA mode setting.
3199 	   Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
3200 	if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3201 	    dev->dma_mode == XFER_MW_DMA_0 &&
3202 	    (dev->id[63] >> 8) & 1)
3203 		ign_dev_err = 1;
3204 
3205 	/* if the device is actually configured correctly, ignore dev err */
3206 	if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
3207 		ign_dev_err = 1;
3208 
3209 	if (err_mask & AC_ERR_DEV) {
3210 		if (!ign_dev_err)
3211 			goto fail;
3212 		else
3213 			dev_err_whine = " (device error ignored)";
3214 	}
3215 
3216 	DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3217 		dev->xfer_shift, (int)dev->xfer_mode);
3218 
3219 	ata_dev_info(dev, "configured for %s%s\n",
3220 		     ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
3221 		     dev_err_whine);
3222 
3223 	return 0;
3224 
3225  fail:
3226 	ata_dev_err(dev, "failed to set xfermode (err_mask=0x%x)\n", err_mask);
3227 	return -EIO;
3228 }
3229 
3230 /**
3231  *	ata_do_set_mode - Program timings and issue SET FEATURES - XFER
3232  *	@link: link on which timings will be programmed
3233  *	@r_failed_dev: out parameter for failed device
3234  *
3235  *	Standard implementation of the function used to tune and set
3236  *	ATA device disk transfer mode (PIO3, UDMA6, etc.).  If
3237  *	ata_dev_set_mode() fails, pointer to the failing device is
3238  *	returned in @r_failed_dev.
3239  *
3240  *	LOCKING:
3241  *	PCI/etc. bus probe sem.
3242  *
3243  *	RETURNS:
3244  *	0 on success, negative errno otherwise
3245  */
3246 
ata_do_set_mode(struct ata_link * link,struct ata_device ** r_failed_dev)3247 int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3248 {
3249 	struct ata_port *ap = link->ap;
3250 	struct ata_device *dev;
3251 	int rc = 0, used_dma = 0, found = 0;
3252 
3253 	/* step 1: calculate xfer_mask */
3254 	ata_for_each_dev(dev, link, ENABLED) {
3255 		unsigned long pio_mask, dma_mask;
3256 		unsigned int mode_mask;
3257 
3258 		mode_mask = ATA_DMA_MASK_ATA;
3259 		if (dev->class == ATA_DEV_ATAPI)
3260 			mode_mask = ATA_DMA_MASK_ATAPI;
3261 		else if (ata_id_is_cfa(dev->id))
3262 			mode_mask = ATA_DMA_MASK_CFA;
3263 
3264 		ata_dev_xfermask(dev);
3265 		ata_force_xfermask(dev);
3266 
3267 		pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3268 
3269 		if (libata_dma_mask & mode_mask)
3270 			dma_mask = ata_pack_xfermask(0, dev->mwdma_mask,
3271 						     dev->udma_mask);
3272 		else
3273 			dma_mask = 0;
3274 
3275 		dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3276 		dev->dma_mode = ata_xfer_mask2mode(dma_mask);
3277 
3278 		found = 1;
3279 		if (ata_dma_enabled(dev))
3280 			used_dma = 1;
3281 	}
3282 	if (!found)
3283 		goto out;
3284 
3285 	/* step 2: always set host PIO timings */
3286 	ata_for_each_dev(dev, link, ENABLED) {
3287 		if (dev->pio_mode == 0xff) {
3288 			ata_dev_warn(dev, "no PIO support\n");
3289 			rc = -EINVAL;
3290 			goto out;
3291 		}
3292 
3293 		dev->xfer_mode = dev->pio_mode;
3294 		dev->xfer_shift = ATA_SHIFT_PIO;
3295 		if (ap->ops->set_piomode)
3296 			ap->ops->set_piomode(ap, dev);
3297 	}
3298 
3299 	/* step 3: set host DMA timings */
3300 	ata_for_each_dev(dev, link, ENABLED) {
3301 		if (!ata_dma_enabled(dev))
3302 			continue;
3303 
3304 		dev->xfer_mode = dev->dma_mode;
3305 		dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3306 		if (ap->ops->set_dmamode)
3307 			ap->ops->set_dmamode(ap, dev);
3308 	}
3309 
3310 	/* step 4: update devices' xfer mode */
3311 	ata_for_each_dev(dev, link, ENABLED) {
3312 		rc = ata_dev_set_mode(dev);
3313 		if (rc)
3314 			goto out;
3315 	}
3316 
3317 	/* Record simplex status. If we selected DMA then the other
3318 	 * host channels are not permitted to do so.
3319 	 */
3320 	if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
3321 		ap->host->simplex_claimed = ap;
3322 
3323  out:
3324 	if (rc)
3325 		*r_failed_dev = dev;
3326 	return rc;
3327 }
3328 
3329 /**
3330  *	ata_wait_ready - wait for link to become ready
3331  *	@link: link to be waited on
3332  *	@deadline: deadline jiffies for the operation
3333  *	@check_ready: callback to check link readiness
3334  *
3335  *	Wait for @link to become ready.  @check_ready should return
3336  *	positive number if @link is ready, 0 if it isn't, -ENODEV if
3337  *	link doesn't seem to be occupied, other errno for other error
3338  *	conditions.
3339  *
3340  *	Transient -ENODEV conditions are allowed for
3341  *	ATA_TMOUT_FF_WAIT.
3342  *
3343  *	LOCKING:
3344  *	EH context.
3345  *
3346  *	RETURNS:
3347  *	0 if @linke is ready before @deadline; otherwise, -errno.
3348  */
ata_wait_ready(struct ata_link * link,unsigned long deadline,int (* check_ready)(struct ata_link * link))3349 int ata_wait_ready(struct ata_link *link, unsigned long deadline,
3350 		   int (*check_ready)(struct ata_link *link))
3351 {
3352 	unsigned long start = jiffies;
3353 	unsigned long nodev_deadline;
3354 	int warned = 0;
3355 
3356 	/* choose which 0xff timeout to use, read comment in libata.h */
3357 	if (link->ap->host->flags & ATA_HOST_PARALLEL_SCAN)
3358 		nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT_LONG);
3359 	else
3360 		nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
3361 
3362 	/* Slave readiness can't be tested separately from master.  On
3363 	 * M/S emulation configuration, this function should be called
3364 	 * only on the master and it will handle both master and slave.
3365 	 */
3366 	WARN_ON(link == link->ap->slave_link);
3367 
3368 	if (time_after(nodev_deadline, deadline))
3369 		nodev_deadline = deadline;
3370 
3371 	while (1) {
3372 		unsigned long now = jiffies;
3373 		int ready, tmp;
3374 
3375 		ready = tmp = check_ready(link);
3376 		if (ready > 0)
3377 			return 0;
3378 
3379 		/*
3380 		 * -ENODEV could be transient.  Ignore -ENODEV if link
3381 		 * is online.  Also, some SATA devices take a long
3382 		 * time to clear 0xff after reset.  Wait for
3383 		 * ATA_TMOUT_FF_WAIT[_LONG] on -ENODEV if link isn't
3384 		 * offline.
3385 		 *
3386 		 * Note that some PATA controllers (pata_ali) explode
3387 		 * if status register is read more than once when
3388 		 * there's no device attached.
3389 		 */
3390 		if (ready == -ENODEV) {
3391 			if (ata_link_online(link))
3392 				ready = 0;
3393 			else if ((link->ap->flags & ATA_FLAG_SATA) &&
3394 				 !ata_link_offline(link) &&
3395 				 time_before(now, nodev_deadline))
3396 				ready = 0;
3397 		}
3398 
3399 		if (ready)
3400 			return ready;
3401 		if (time_after(now, deadline))
3402 			return -EBUSY;
3403 
3404 		if (!warned && time_after(now, start + 5 * HZ) &&
3405 		    (deadline - now > 3 * HZ)) {
3406 			ata_link_warn(link,
3407 				"link is slow to respond, please be patient "
3408 				"(ready=%d)\n", tmp);
3409 			warned = 1;
3410 		}
3411 
3412 		ata_msleep(link->ap, 50);
3413 	}
3414 }
3415 
3416 /**
3417  *	ata_wait_after_reset - wait for link to become ready after reset
3418  *	@link: link to be waited on
3419  *	@deadline: deadline jiffies for the operation
3420  *	@check_ready: callback to check link readiness
3421  *
3422  *	Wait for @link to become ready after reset.
3423  *
3424  *	LOCKING:
3425  *	EH context.
3426  *
3427  *	RETURNS:
3428  *	0 if @linke is ready before @deadline; otherwise, -errno.
3429  */
ata_wait_after_reset(struct ata_link * link,unsigned long deadline,int (* check_ready)(struct ata_link * link))3430 int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
3431 				int (*check_ready)(struct ata_link *link))
3432 {
3433 	ata_msleep(link->ap, ATA_WAIT_AFTER_RESET);
3434 
3435 	return ata_wait_ready(link, deadline, check_ready);
3436 }
3437 
3438 /**
3439  *	sata_link_debounce - debounce SATA phy status
3440  *	@link: ATA link to debounce SATA phy status for
3441  *	@params: timing parameters { interval, duratinon, timeout } in msec
3442  *	@deadline: deadline jiffies for the operation
3443  *
3444  *	Make sure SStatus of @link reaches stable state, determined by
3445  *	holding the same value where DET is not 1 for @duration polled
3446  *	every @interval, before @timeout.  Timeout constraints the
3447  *	beginning of the stable state.  Because DET gets stuck at 1 on
3448  *	some controllers after hot unplugging, this functions waits
3449  *	until timeout then returns 0 if DET is stable at 1.
3450  *
3451  *	@timeout is further limited by @deadline.  The sooner of the
3452  *	two is used.
3453  *
3454  *	LOCKING:
3455  *	Kernel thread context (may sleep)
3456  *
3457  *	RETURNS:
3458  *	0 on success, -errno on failure.
3459  */
sata_link_debounce(struct ata_link * link,const unsigned long * params,unsigned long deadline)3460 int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3461 		       unsigned long deadline)
3462 {
3463 	unsigned long interval = params[0];
3464 	unsigned long duration = params[1];
3465 	unsigned long last_jiffies, t;
3466 	u32 last, cur;
3467 	int rc;
3468 
3469 	t = ata_deadline(jiffies, params[2]);
3470 	if (time_before(t, deadline))
3471 		deadline = t;
3472 
3473 	if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3474 		return rc;
3475 	cur &= 0xf;
3476 
3477 	last = cur;
3478 	last_jiffies = jiffies;
3479 
3480 	while (1) {
3481 		ata_msleep(link->ap, interval);
3482 		if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3483 			return rc;
3484 		cur &= 0xf;
3485 
3486 		/* DET stable? */
3487 		if (cur == last) {
3488 			if (cur == 1 && time_before(jiffies, deadline))
3489 				continue;
3490 			if (time_after(jiffies,
3491 				       ata_deadline(last_jiffies, duration)))
3492 				return 0;
3493 			continue;
3494 		}
3495 
3496 		/* unstable, start over */
3497 		last = cur;
3498 		last_jiffies = jiffies;
3499 
3500 		/* Check deadline.  If debouncing failed, return
3501 		 * -EPIPE to tell upper layer to lower link speed.
3502 		 */
3503 		if (time_after(jiffies, deadline))
3504 			return -EPIPE;
3505 	}
3506 }
3507 
3508 /**
3509  *	sata_link_resume - resume SATA link
3510  *	@link: ATA link to resume SATA
3511  *	@params: timing parameters { interval, duratinon, timeout } in msec
3512  *	@deadline: deadline jiffies for the operation
3513  *
3514  *	Resume SATA phy @link and debounce it.
3515  *
3516  *	LOCKING:
3517  *	Kernel thread context (may sleep)
3518  *
3519  *	RETURNS:
3520  *	0 on success, -errno on failure.
3521  */
sata_link_resume(struct ata_link * link,const unsigned long * params,unsigned long deadline)3522 int sata_link_resume(struct ata_link *link, const unsigned long *params,
3523 		     unsigned long deadline)
3524 {
3525 	int tries = ATA_LINK_RESUME_TRIES;
3526 	u32 scontrol, serror;
3527 	int rc;
3528 
3529 	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3530 		return rc;
3531 
3532 	/*
3533 	 * Writes to SControl sometimes get ignored under certain
3534 	 * controllers (ata_piix SIDPR).  Make sure DET actually is
3535 	 * cleared.
3536 	 */
3537 	do {
3538 		scontrol = (scontrol & 0x0f0) | 0x300;
3539 		if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3540 			return rc;
3541 		/*
3542 		 * Some PHYs react badly if SStatus is pounded
3543 		 * immediately after resuming.  Delay 200ms before
3544 		 * debouncing.
3545 		 */
3546 		ata_msleep(link->ap, 200);
3547 
3548 		/* is SControl restored correctly? */
3549 		if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3550 			return rc;
3551 	} while ((scontrol & 0xf0f) != 0x300 && --tries);
3552 
3553 	if ((scontrol & 0xf0f) != 0x300) {
3554 		ata_link_warn(link, "failed to resume link (SControl %X)\n",
3555 			     scontrol);
3556 		return 0;
3557 	}
3558 
3559 	if (tries < ATA_LINK_RESUME_TRIES)
3560 		ata_link_warn(link, "link resume succeeded after %d retries\n",
3561 			      ATA_LINK_RESUME_TRIES - tries);
3562 
3563 	if ((rc = sata_link_debounce(link, params, deadline)))
3564 		return rc;
3565 
3566 	/* clear SError, some PHYs require this even for SRST to work */
3567 	if (!(rc = sata_scr_read(link, SCR_ERROR, &serror)))
3568 		rc = sata_scr_write(link, SCR_ERROR, serror);
3569 
3570 	return rc != -EINVAL ? rc : 0;
3571 }
3572 
3573 /**
3574  *	sata_link_scr_lpm - manipulate SControl IPM and SPM fields
3575  *	@link: ATA link to manipulate SControl for
3576  *	@policy: LPM policy to configure
3577  *	@spm_wakeup: initiate LPM transition to active state
3578  *
3579  *	Manipulate the IPM field of the SControl register of @link
3580  *	according to @policy.  If @policy is ATA_LPM_MAX_POWER and
3581  *	@spm_wakeup is %true, the SPM field is manipulated to wake up
3582  *	the link.  This function also clears PHYRDY_CHG before
3583  *	returning.
3584  *
3585  *	LOCKING:
3586  *	EH context.
3587  *
3588  *	RETURNS:
3589  *	0 on succes, -errno otherwise.
3590  */
sata_link_scr_lpm(struct ata_link * link,enum ata_lpm_policy policy,bool spm_wakeup)3591 int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy,
3592 		      bool spm_wakeup)
3593 {
3594 	struct ata_eh_context *ehc = &link->eh_context;
3595 	bool woken_up = false;
3596 	u32 scontrol;
3597 	int rc;
3598 
3599 	rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
3600 	if (rc)
3601 		return rc;
3602 
3603 	switch (policy) {
3604 	case ATA_LPM_MAX_POWER:
3605 		/* disable all LPM transitions */
3606 		scontrol |= (0x3 << 8);
3607 		/* initiate transition to active state */
3608 		if (spm_wakeup) {
3609 			scontrol |= (0x4 << 12);
3610 			woken_up = true;
3611 		}
3612 		break;
3613 	case ATA_LPM_MED_POWER:
3614 		/* allow LPM to PARTIAL */
3615 		scontrol &= ~(0x1 << 8);
3616 		scontrol |= (0x2 << 8);
3617 		break;
3618 	case ATA_LPM_MIN_POWER:
3619 		if (ata_link_nr_enabled(link) > 0)
3620 			/* no restrictions on LPM transitions */
3621 			scontrol &= ~(0x3 << 8);
3622 		else {
3623 			/* empty port, power off */
3624 			scontrol &= ~0xf;
3625 			scontrol |= (0x1 << 2);
3626 		}
3627 		break;
3628 	default:
3629 		WARN_ON(1);
3630 	}
3631 
3632 	rc = sata_scr_write(link, SCR_CONTROL, scontrol);
3633 	if (rc)
3634 		return rc;
3635 
3636 	/* give the link time to transit out of LPM state */
3637 	if (woken_up)
3638 		msleep(10);
3639 
3640 	/* clear PHYRDY_CHG from SError */
3641 	ehc->i.serror &= ~SERR_PHYRDY_CHG;
3642 	return sata_scr_write(link, SCR_ERROR, SERR_PHYRDY_CHG);
3643 }
3644 
3645 /**
3646  *	ata_std_prereset - prepare for reset
3647  *	@link: ATA link to be reset
3648  *	@deadline: deadline jiffies for the operation
3649  *
3650  *	@link is about to be reset.  Initialize it.  Failure from
3651  *	prereset makes libata abort whole reset sequence and give up
3652  *	that port, so prereset should be best-effort.  It does its
3653  *	best to prepare for reset sequence but if things go wrong, it
3654  *	should just whine, not fail.
3655  *
3656  *	LOCKING:
3657  *	Kernel thread context (may sleep)
3658  *
3659  *	RETURNS:
3660  *	0 on success, -errno otherwise.
3661  */
ata_std_prereset(struct ata_link * link,unsigned long deadline)3662 int ata_std_prereset(struct ata_link *link, unsigned long deadline)
3663 {
3664 	struct ata_port *ap = link->ap;
3665 	struct ata_eh_context *ehc = &link->eh_context;
3666 	const unsigned long *timing = sata_ehc_deb_timing(ehc);
3667 	int rc;
3668 
3669 	/* if we're about to do hardreset, nothing more to do */
3670 	if (ehc->i.action & ATA_EH_HARDRESET)
3671 		return 0;
3672 
3673 	/* if SATA, resume link */
3674 	if (ap->flags & ATA_FLAG_SATA) {
3675 		rc = sata_link_resume(link, timing, deadline);
3676 		/* whine about phy resume failure but proceed */
3677 		if (rc && rc != -EOPNOTSUPP)
3678 			ata_link_warn(link,
3679 				      "failed to resume link for reset (errno=%d)\n",
3680 				      rc);
3681 	}
3682 
3683 	/* no point in trying softreset on offline link */
3684 	if (ata_phys_link_offline(link))
3685 		ehc->i.action &= ~ATA_EH_SOFTRESET;
3686 
3687 	return 0;
3688 }
3689 
3690 /**
3691  *	sata_link_hardreset - reset link via SATA phy reset
3692  *	@link: link to reset
3693  *	@timing: timing parameters { interval, duratinon, timeout } in msec
3694  *	@deadline: deadline jiffies for the operation
3695  *	@online: optional out parameter indicating link onlineness
3696  *	@check_ready: optional callback to check link readiness
3697  *
3698  *	SATA phy-reset @link using DET bits of SControl register.
3699  *	After hardreset, link readiness is waited upon using
3700  *	ata_wait_ready() if @check_ready is specified.  LLDs are
3701  *	allowed to not specify @check_ready and wait itself after this
3702  *	function returns.  Device classification is LLD's
3703  *	responsibility.
3704  *
3705  *	*@online is set to one iff reset succeeded and @link is online
3706  *	after reset.
3707  *
3708  *	LOCKING:
3709  *	Kernel thread context (may sleep)
3710  *
3711  *	RETURNS:
3712  *	0 on success, -errno otherwise.
3713  */
sata_link_hardreset(struct ata_link * link,const unsigned long * timing,unsigned long deadline,bool * online,int (* check_ready)(struct ata_link *))3714 int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
3715 			unsigned long deadline,
3716 			bool *online, int (*check_ready)(struct ata_link *))
3717 {
3718 	u32 scontrol;
3719 	int rc;
3720 
3721 	DPRINTK("ENTER\n");
3722 
3723 	if (online)
3724 		*online = false;
3725 
3726 	if (sata_set_spd_needed(link)) {
3727 		/* SATA spec says nothing about how to reconfigure
3728 		 * spd.  To be on the safe side, turn off phy during
3729 		 * reconfiguration.  This works for at least ICH7 AHCI
3730 		 * and Sil3124.
3731 		 */
3732 		if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3733 			goto out;
3734 
3735 		scontrol = (scontrol & 0x0f0) | 0x304;
3736 
3737 		if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3738 			goto out;
3739 
3740 		sata_set_spd(link);
3741 	}
3742 
3743 	/* issue phy wake/reset */
3744 	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3745 		goto out;
3746 
3747 	scontrol = (scontrol & 0x0f0) | 0x301;
3748 
3749 	if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
3750 		goto out;
3751 
3752 	/* Couldn't find anything in SATA I/II specs, but AHCI-1.1
3753 	 * 10.4.2 says at least 1 ms.
3754 	 */
3755 	ata_msleep(link->ap, 1);
3756 
3757 	/* bring link back */
3758 	rc = sata_link_resume(link, timing, deadline);
3759 	if (rc)
3760 		goto out;
3761 	/* if link is offline nothing more to do */
3762 	if (ata_phys_link_offline(link))
3763 		goto out;
3764 
3765 	/* Link is online.  From this point, -ENODEV too is an error. */
3766 	if (online)
3767 		*online = true;
3768 
3769 	if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) {
3770 		/* If PMP is supported, we have to do follow-up SRST.
3771 		 * Some PMPs don't send D2H Reg FIS after hardreset if
3772 		 * the first port is empty.  Wait only for
3773 		 * ATA_TMOUT_PMP_SRST_WAIT.
3774 		 */
3775 		if (check_ready) {
3776 			unsigned long pmp_deadline;
3777 
3778 			pmp_deadline = ata_deadline(jiffies,
3779 						    ATA_TMOUT_PMP_SRST_WAIT);
3780 			if (time_after(pmp_deadline, deadline))
3781 				pmp_deadline = deadline;
3782 			ata_wait_ready(link, pmp_deadline, check_ready);
3783 		}
3784 		rc = -EAGAIN;
3785 		goto out;
3786 	}
3787 
3788 	rc = 0;
3789 	if (check_ready)
3790 		rc = ata_wait_ready(link, deadline, check_ready);
3791  out:
3792 	if (rc && rc != -EAGAIN) {
3793 		/* online is set iff link is online && reset succeeded */
3794 		if (online)
3795 			*online = false;
3796 		ata_link_err(link, "COMRESET failed (errno=%d)\n", rc);
3797 	}
3798 	DPRINTK("EXIT, rc=%d\n", rc);
3799 	return rc;
3800 }
3801 
3802 /**
3803  *	sata_std_hardreset - COMRESET w/o waiting or classification
3804  *	@link: link to reset
3805  *	@class: resulting class of attached device
3806  *	@deadline: deadline jiffies for the operation
3807  *
3808  *	Standard SATA COMRESET w/o waiting or classification.
3809  *
3810  *	LOCKING:
3811  *	Kernel thread context (may sleep)
3812  *
3813  *	RETURNS:
3814  *	0 if link offline, -EAGAIN if link online, -errno on errors.
3815  */
sata_std_hardreset(struct ata_link * link,unsigned int * class,unsigned long deadline)3816 int sata_std_hardreset(struct ata_link *link, unsigned int *class,
3817 		       unsigned long deadline)
3818 {
3819 	const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
3820 	bool online;
3821 	int rc;
3822 
3823 	/* do hardreset */
3824 	rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
3825 	return online ? -EAGAIN : rc;
3826 }
3827 
3828 /**
3829  *	ata_std_postreset - standard postreset callback
3830  *	@link: the target ata_link
3831  *	@classes: classes of attached devices
3832  *
3833  *	This function is invoked after a successful reset.  Note that
3834  *	the device might have been reset more than once using
3835  *	different reset methods before postreset is invoked.
3836  *
3837  *	LOCKING:
3838  *	Kernel thread context (may sleep)
3839  */
ata_std_postreset(struct ata_link * link,unsigned int * classes)3840 void ata_std_postreset(struct ata_link *link, unsigned int *classes)
3841 {
3842 	u32 serror;
3843 
3844 	DPRINTK("ENTER\n");
3845 
3846 	/* reset complete, clear SError */
3847 	if (!sata_scr_read(link, SCR_ERROR, &serror))
3848 		sata_scr_write(link, SCR_ERROR, serror);
3849 
3850 	/* print link status */
3851 	sata_print_link_status(link);
3852 
3853 	DPRINTK("EXIT\n");
3854 }
3855 
3856 /**
3857  *	ata_dev_same_device - Determine whether new ID matches configured device
3858  *	@dev: device to compare against
3859  *	@new_class: class of the new device
3860  *	@new_id: IDENTIFY page of the new device
3861  *
3862  *	Compare @new_class and @new_id against @dev and determine
3863  *	whether @dev is the device indicated by @new_class and
3864  *	@new_id.
3865  *
3866  *	LOCKING:
3867  *	None.
3868  *
3869  *	RETURNS:
3870  *	1 if @dev matches @new_class and @new_id, 0 otherwise.
3871  */
ata_dev_same_device(struct ata_device * dev,unsigned int new_class,const u16 * new_id)3872 static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3873 			       const u16 *new_id)
3874 {
3875 	const u16 *old_id = dev->id;
3876 	unsigned char model[2][ATA_ID_PROD_LEN + 1];
3877 	unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
3878 
3879 	if (dev->class != new_class) {
3880 		ata_dev_info(dev, "class mismatch %d != %d\n",
3881 			     dev->class, new_class);
3882 		return 0;
3883 	}
3884 
3885 	ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3886 	ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3887 	ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3888 	ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
3889 
3890 	if (strcmp(model[0], model[1])) {
3891 		ata_dev_info(dev, "model number mismatch '%s' != '%s'\n",
3892 			     model[0], model[1]);
3893 		return 0;
3894 	}
3895 
3896 	if (strcmp(serial[0], serial[1])) {
3897 		ata_dev_info(dev, "serial number mismatch '%s' != '%s'\n",
3898 			     serial[0], serial[1]);
3899 		return 0;
3900 	}
3901 
3902 	return 1;
3903 }
3904 
3905 /**
3906  *	ata_dev_reread_id - Re-read IDENTIFY data
3907  *	@dev: target ATA device
3908  *	@readid_flags: read ID flags
3909  *
3910  *	Re-read IDENTIFY page and make sure @dev is still attached to
3911  *	the port.
3912  *
3913  *	LOCKING:
3914  *	Kernel thread context (may sleep)
3915  *
3916  *	RETURNS:
3917  *	0 on success, negative errno otherwise
3918  */
ata_dev_reread_id(struct ata_device * dev,unsigned int readid_flags)3919 int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
3920 {
3921 	unsigned int class = dev->class;
3922 	u16 *id = (void *)dev->link->ap->sector_buf;
3923 	int rc;
3924 
3925 	/* read ID data */
3926 	rc = ata_dev_read_id(dev, &class, readid_flags, id);
3927 	if (rc)
3928 		return rc;
3929 
3930 	/* is the device still there? */
3931 	if (!ata_dev_same_device(dev, class, id))
3932 		return -ENODEV;
3933 
3934 	memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
3935 	return 0;
3936 }
3937 
3938 /**
3939  *	ata_dev_revalidate - Revalidate ATA device
3940  *	@dev: device to revalidate
3941  *	@new_class: new class code
3942  *	@readid_flags: read ID flags
3943  *
3944  *	Re-read IDENTIFY page, make sure @dev is still attached to the
3945  *	port and reconfigure it according to the new IDENTIFY page.
3946  *
3947  *	LOCKING:
3948  *	Kernel thread context (may sleep)
3949  *
3950  *	RETURNS:
3951  *	0 on success, negative errno otherwise
3952  */
ata_dev_revalidate(struct ata_device * dev,unsigned int new_class,unsigned int readid_flags)3953 int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
3954 		       unsigned int readid_flags)
3955 {
3956 	u64 n_sectors = dev->n_sectors;
3957 	u64 n_native_sectors = dev->n_native_sectors;
3958 	int rc;
3959 
3960 	if (!ata_dev_enabled(dev))
3961 		return -ENODEV;
3962 
3963 	/* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
3964 	if (ata_class_enabled(new_class) &&
3965 	    new_class != ATA_DEV_ATA &&
3966 	    new_class != ATA_DEV_ATAPI &&
3967 	    new_class != ATA_DEV_SEMB) {
3968 		ata_dev_info(dev, "class mismatch %u != %u\n",
3969 			     dev->class, new_class);
3970 		rc = -ENODEV;
3971 		goto fail;
3972 	}
3973 
3974 	/* re-read ID */
3975 	rc = ata_dev_reread_id(dev, readid_flags);
3976 	if (rc)
3977 		goto fail;
3978 
3979 	/* configure device according to the new ID */
3980 	rc = ata_dev_configure(dev);
3981 	if (rc)
3982 		goto fail;
3983 
3984 	/* verify n_sectors hasn't changed */
3985 	if (dev->class != ATA_DEV_ATA || !n_sectors ||
3986 	    dev->n_sectors == n_sectors)
3987 		return 0;
3988 
3989 	/* n_sectors has changed */
3990 	ata_dev_warn(dev, "n_sectors mismatch %llu != %llu\n",
3991 		     (unsigned long long)n_sectors,
3992 		     (unsigned long long)dev->n_sectors);
3993 
3994 	/*
3995 	 * Something could have caused HPA to be unlocked
3996 	 * involuntarily.  If n_native_sectors hasn't changed and the
3997 	 * new size matches it, keep the device.
3998 	 */
3999 	if (dev->n_native_sectors == n_native_sectors &&
4000 	    dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) {
4001 		ata_dev_warn(dev,
4002 			     "new n_sectors matches native, probably "
4003 			     "late HPA unlock, n_sectors updated\n");
4004 		/* use the larger n_sectors */
4005 		return 0;
4006 	}
4007 
4008 	/*
4009 	 * Some BIOSes boot w/o HPA but resume w/ HPA locked.  Try
4010 	 * unlocking HPA in those cases.
4011 	 *
4012 	 * https://bugzilla.kernel.org/show_bug.cgi?id=15396
4013 	 */
4014 	if (dev->n_native_sectors == n_native_sectors &&
4015 	    dev->n_sectors < n_sectors && n_sectors == n_native_sectors &&
4016 	    !(dev->horkage & ATA_HORKAGE_BROKEN_HPA)) {
4017 		ata_dev_warn(dev,
4018 			     "old n_sectors matches native, probably "
4019 			     "late HPA lock, will try to unlock HPA\n");
4020 		/* try unlocking HPA */
4021 		dev->flags |= ATA_DFLAG_UNLOCK_HPA;
4022 		rc = -EIO;
4023 	} else
4024 		rc = -ENODEV;
4025 
4026 	/* restore original n_[native_]sectors and fail */
4027 	dev->n_native_sectors = n_native_sectors;
4028 	dev->n_sectors = n_sectors;
4029  fail:
4030 	ata_dev_err(dev, "revalidation failed (errno=%d)\n", rc);
4031 	return rc;
4032 }
4033 
4034 struct ata_blacklist_entry {
4035 	const char *model_num;
4036 	const char *model_rev;
4037 	unsigned long horkage;
4038 };
4039 
4040 static const struct ata_blacklist_entry ata_device_blacklist [] = {
4041 	/* Devices with DMA related problems under Linux */
4042 	{ "WDC AC11000H",	NULL,		ATA_HORKAGE_NODMA },
4043 	{ "WDC AC22100H",	NULL,		ATA_HORKAGE_NODMA },
4044 	{ "WDC AC32500H",	NULL,		ATA_HORKAGE_NODMA },
4045 	{ "WDC AC33100H",	NULL,		ATA_HORKAGE_NODMA },
4046 	{ "WDC AC31600H",	NULL,		ATA_HORKAGE_NODMA },
4047 	{ "WDC AC32100H",	"24.09P07",	ATA_HORKAGE_NODMA },
4048 	{ "WDC AC23200L",	"21.10N21",	ATA_HORKAGE_NODMA },
4049 	{ "Compaq CRD-8241B", 	NULL,		ATA_HORKAGE_NODMA },
4050 	{ "CRD-8400B",		NULL, 		ATA_HORKAGE_NODMA },
4051 	{ "CRD-848[02]B",	NULL,		ATA_HORKAGE_NODMA },
4052 	{ "CRD-84",		NULL,		ATA_HORKAGE_NODMA },
4053 	{ "SanDisk SDP3B",	NULL,		ATA_HORKAGE_NODMA },
4054 	{ "SanDisk SDP3B-64",	NULL,		ATA_HORKAGE_NODMA },
4055 	{ "SANYO CD-ROM CRD",	NULL,		ATA_HORKAGE_NODMA },
4056 	{ "HITACHI CDR-8",	NULL,		ATA_HORKAGE_NODMA },
4057 	{ "HITACHI CDR-8[34]35",NULL,		ATA_HORKAGE_NODMA },
4058 	{ "Toshiba CD-ROM XM-6202B", NULL,	ATA_HORKAGE_NODMA },
4059 	{ "TOSHIBA CD-ROM XM-1702BC", NULL,	ATA_HORKAGE_NODMA },
4060 	{ "CD-532E-A", 		NULL,		ATA_HORKAGE_NODMA },
4061 	{ "E-IDE CD-ROM CR-840",NULL,		ATA_HORKAGE_NODMA },
4062 	{ "CD-ROM Drive/F5A",	NULL,		ATA_HORKAGE_NODMA },
4063 	{ "WPI CDD-820", 	NULL,		ATA_HORKAGE_NODMA },
4064 	{ "SAMSUNG CD-ROM SC-148C", NULL,	ATA_HORKAGE_NODMA },
4065 	{ "SAMSUNG CD-ROM SC",	NULL,		ATA_HORKAGE_NODMA },
4066 	{ "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
4067 	{ "_NEC DV5800A", 	NULL,		ATA_HORKAGE_NODMA },
4068 	{ "SAMSUNG CD-ROM SN-124", "N001",	ATA_HORKAGE_NODMA },
4069 	{ "Seagate STT20000A", NULL,		ATA_HORKAGE_NODMA },
4070 	/* Odd clown on sil3726/4726 PMPs */
4071 	{ "Config  Disk",	NULL,		ATA_HORKAGE_DISABLE },
4072 
4073 	/* Weird ATAPI devices */
4074 	{ "TORiSAN DVD-ROM DRD-N216", NULL,	ATA_HORKAGE_MAX_SEC_128 },
4075 	{ "QUANTUM DAT    DAT72-000", NULL,	ATA_HORKAGE_ATAPI_MOD16_DMA },
4076 	{ "Slimtype DVD A  DS8A8SH", NULL,	ATA_HORKAGE_MAX_SEC_LBA48 },
4077 	{ "Slimtype DVD A  DS8A9SH", NULL,	ATA_HORKAGE_MAX_SEC_LBA48 },
4078 
4079 	/* Devices we expect to fail diagnostics */
4080 
4081 	/* Devices where NCQ should be avoided */
4082 	/* NCQ is slow */
4083 	{ "WDC WD740ADFD-00",	NULL,		ATA_HORKAGE_NONCQ },
4084 	{ "WDC WD740ADFD-00NLR1", NULL,		ATA_HORKAGE_NONCQ, },
4085 	/* http://thread.gmane.org/gmane.linux.ide/14907 */
4086 	{ "FUJITSU MHT2060BH",	NULL,		ATA_HORKAGE_NONCQ },
4087 	/* NCQ is broken */
4088 	{ "Maxtor *",		"BANC*",	ATA_HORKAGE_NONCQ },
4089 	{ "Maxtor 7V300F0",	"VA111630",	ATA_HORKAGE_NONCQ },
4090 	{ "ST380817AS",		"3.42",		ATA_HORKAGE_NONCQ },
4091 	{ "ST3160023AS",	"3.42",		ATA_HORKAGE_NONCQ },
4092 	{ "OCZ CORE_SSD",	"02.10104",	ATA_HORKAGE_NONCQ },
4093 
4094 	/* Seagate NCQ + FLUSH CACHE firmware bug */
4095 	{ "ST31500341AS",	"SD1[5-9]",	ATA_HORKAGE_NONCQ |
4096 						ATA_HORKAGE_FIRMWARE_WARN },
4097 
4098 	{ "ST31000333AS",	"SD1[5-9]",	ATA_HORKAGE_NONCQ |
4099 						ATA_HORKAGE_FIRMWARE_WARN },
4100 
4101 	{ "ST3640[36]23AS",	"SD1[5-9]",	ATA_HORKAGE_NONCQ |
4102 						ATA_HORKAGE_FIRMWARE_WARN },
4103 
4104 	{ "ST3320[68]13AS",	"SD1[5-9]",	ATA_HORKAGE_NONCQ |
4105 						ATA_HORKAGE_FIRMWARE_WARN },
4106 
4107 	/* Seagate Momentus SpinPoint M8 seem to have FPMDA_AA issues */
4108 	{ "ST1000LM024 HN-M101MBB", "2AR10001",	ATA_HORKAGE_BROKEN_FPDMA_AA },
4109 	{ "ST1000LM024 HN-M101MBB", "2BA30001",	ATA_HORKAGE_BROKEN_FPDMA_AA },
4110 
4111 	/* Blacklist entries taken from Silicon Image 3124/3132
4112 	   Windows driver .inf file - also several Linux problem reports */
4113 	{ "HTS541060G9SA00",    "MB3OC60D",     ATA_HORKAGE_NONCQ, },
4114 	{ "HTS541080G9SA00",    "MB4OC60D",     ATA_HORKAGE_NONCQ, },
4115 	{ "HTS541010G9SA00",    "MBZOC60D",     ATA_HORKAGE_NONCQ, },
4116 
4117 	/* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */
4118 	{ "C300-CTFDDAC128MAG",	"0001",		ATA_HORKAGE_NONCQ, },
4119 
4120 	/* devices which puke on READ_NATIVE_MAX */
4121 	{ "HDS724040KLSA80",	"KFAOA20N",	ATA_HORKAGE_BROKEN_HPA, },
4122 	{ "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
4123 	{ "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
4124 	{ "MAXTOR 6L080L4",	"A93.0500",	ATA_HORKAGE_BROKEN_HPA },
4125 
4126 	/* this one allows HPA unlocking but fails IOs on the area */
4127 	{ "OCZ-VERTEX",		    "1.30",	ATA_HORKAGE_BROKEN_HPA },
4128 
4129 	/* Devices which report 1 sector over size HPA */
4130 	{ "ST340823A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
4131 	{ "ST320413A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
4132 	{ "ST310211A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
4133 
4134 	/* Devices which get the IVB wrong */
4135 	{ "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
4136 	/* Maybe we should just blacklist TSSTcorp... */
4137 	{ "TSSTcorp CDDVDW SH-S202[HJN]", "SB0[01]",  ATA_HORKAGE_IVB, },
4138 
4139 	/* Devices that do not need bridging limits applied */
4140 	{ "MTRON MSP-SATA*",		NULL,	ATA_HORKAGE_BRIDGE_OK, },
4141 
4142 	/* Devices which aren't very happy with higher link speeds */
4143 	{ "WD My Book",			NULL,	ATA_HORKAGE_1_5_GBPS, },
4144 	{ "Seagate FreeAgent GoFlex",	NULL,	ATA_HORKAGE_1_5_GBPS, },
4145 
4146 	/*
4147 	 * Devices which choke on SETXFER.  Applies only if both the
4148 	 * device and controller are SATA.
4149 	 */
4150 	{ "PIONEER DVD-RW  DVRTD08",	NULL,	ATA_HORKAGE_NOSETXFER },
4151 	{ "PIONEER DVD-RW  DVRTD08A",	NULL,	ATA_HORKAGE_NOSETXFER },
4152 	{ "PIONEER DVD-RW  DVR-215",	NULL,	ATA_HORKAGE_NOSETXFER },
4153 	{ "PIONEER DVD-RW  DVR-212D",	NULL,	ATA_HORKAGE_NOSETXFER },
4154 	{ "PIONEER DVD-RW  DVR-216D",	NULL,	ATA_HORKAGE_NOSETXFER },
4155 
4156 	/* End Marker */
4157 	{ }
4158 };
4159 
4160 /**
4161  *	glob_match - match a text string against a glob-style pattern
4162  *	@text: the string to be examined
4163  *	@pattern: the glob-style pattern to be matched against
4164  *
4165  *	Either/both of text and pattern can be empty strings.
4166  *
4167  *	Match text against a glob-style pattern, with wildcards and simple sets:
4168  *
4169  *		?	matches any single character.
4170  *		*	matches any run of characters.
4171  *		[xyz]	matches a single character from the set: x, y, or z.
4172  *		[a-d]	matches a single character from the range: a, b, c, or d.
4173  *		[a-d0-9] matches a single character from either range.
4174  *
4175  *	The special characters ?, [, -, or *, can be matched using a set, eg. [*]
4176  *	Behaviour with malformed patterns is undefined, though generally reasonable.
4177  *
4178  *	Sample patterns:  "SD1?",  "SD1[0-5]",  "*R0",  "SD*1?[012]*xx"
4179  *
4180  *	This function uses one level of recursion per '*' in pattern.
4181  *	Since it calls _nothing_ else, and has _no_ explicit local variables,
4182  *	this will not cause stack problems for any reasonable use here.
4183  *
4184  *	RETURNS:
4185  *	0 on match, 1 otherwise.
4186  */
glob_match(const char * text,const char * pattern)4187 static int glob_match (const char *text, const char *pattern)
4188 {
4189 	do {
4190 		/* Match single character or a '?' wildcard */
4191 		if (*text == *pattern || *pattern == '?') {
4192 			if (!*pattern++)
4193 				return 0;  /* End of both strings: match */
4194 		} else {
4195 			/* Match single char against a '[' bracketed ']' pattern set */
4196 			if (!*text || *pattern != '[')
4197 				break;  /* Not a pattern set */
4198 			while (*++pattern && *pattern != ']' && *text != *pattern) {
4199 				if (*pattern == '-' && *(pattern - 1) != '[')
4200 					if (*text > *(pattern - 1) && *text < *(pattern + 1)) {
4201 						++pattern;
4202 						break;
4203 					}
4204 			}
4205 			if (!*pattern || *pattern == ']')
4206 				return 1;  /* No match */
4207 			while (*pattern && *pattern++ != ']');
4208 		}
4209 	} while (*++text && *pattern);
4210 
4211 	/* Match any run of chars against a '*' wildcard */
4212 	if (*pattern == '*') {
4213 		if (!*++pattern)
4214 			return 0;  /* Match: avoid recursion at end of pattern */
4215 		/* Loop to handle additional pattern chars after the wildcard */
4216 		while (*text) {
4217 			if (glob_match(text, pattern) == 0)
4218 				return 0;  /* Remainder matched */
4219 			++text;  /* Absorb (match) this char and try again */
4220 		}
4221 	}
4222 	if (!*text && !*pattern)
4223 		return 0;  /* End of both strings: match */
4224 	return 1;  /* No match */
4225 }
4226 
ata_dev_blacklisted(const struct ata_device * dev)4227 static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
4228 {
4229 	unsigned char model_num[ATA_ID_PROD_LEN + 1];
4230 	unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
4231 	const struct ata_blacklist_entry *ad = ata_device_blacklist;
4232 
4233 	ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
4234 	ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
4235 
4236 	while (ad->model_num) {
4237 		if (!glob_match(model_num, ad->model_num)) {
4238 			if (ad->model_rev == NULL)
4239 				return ad->horkage;
4240 			if (!glob_match(model_rev, ad->model_rev))
4241 				return ad->horkage;
4242 		}
4243 		ad++;
4244 	}
4245 	return 0;
4246 }
4247 
ata_dma_blacklisted(const struct ata_device * dev)4248 static int ata_dma_blacklisted(const struct ata_device *dev)
4249 {
4250 	/* We don't support polling DMA.
4251 	 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
4252 	 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
4253 	 */
4254 	if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
4255 	    (dev->flags & ATA_DFLAG_CDB_INTR))
4256 		return 1;
4257 	return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
4258 }
4259 
4260 /**
4261  *	ata_is_40wire		-	check drive side detection
4262  *	@dev: device
4263  *
4264  *	Perform drive side detection decoding, allowing for device vendors
4265  *	who can't follow the documentation.
4266  */
4267 
ata_is_40wire(struct ata_device * dev)4268 static int ata_is_40wire(struct ata_device *dev)
4269 {
4270 	if (dev->horkage & ATA_HORKAGE_IVB)
4271 		return ata_drive_40wire_relaxed(dev->id);
4272 	return ata_drive_40wire(dev->id);
4273 }
4274 
4275 /**
4276  *	cable_is_40wire		-	40/80/SATA decider
4277  *	@ap: port to consider
4278  *
4279  *	This function encapsulates the policy for speed management
4280  *	in one place. At the moment we don't cache the result but
4281  *	there is a good case for setting ap->cbl to the result when
4282  *	we are called with unknown cables (and figuring out if it
4283  *	impacts hotplug at all).
4284  *
4285  *	Return 1 if the cable appears to be 40 wire.
4286  */
4287 
cable_is_40wire(struct ata_port * ap)4288 static int cable_is_40wire(struct ata_port *ap)
4289 {
4290 	struct ata_link *link;
4291 	struct ata_device *dev;
4292 
4293 	/* If the controller thinks we are 40 wire, we are. */
4294 	if (ap->cbl == ATA_CBL_PATA40)
4295 		return 1;
4296 
4297 	/* If the controller thinks we are 80 wire, we are. */
4298 	if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA)
4299 		return 0;
4300 
4301 	/* If the system is known to be 40 wire short cable (eg
4302 	 * laptop), then we allow 80 wire modes even if the drive
4303 	 * isn't sure.
4304 	 */
4305 	if (ap->cbl == ATA_CBL_PATA40_SHORT)
4306 		return 0;
4307 
4308 	/* If the controller doesn't know, we scan.
4309 	 *
4310 	 * Note: We look for all 40 wire detects at this point.  Any
4311 	 *       80 wire detect is taken to be 80 wire cable because
4312 	 * - in many setups only the one drive (slave if present) will
4313 	 *   give a valid detect
4314 	 * - if you have a non detect capable drive you don't want it
4315 	 *   to colour the choice
4316 	 */
4317 	ata_for_each_link(link, ap, EDGE) {
4318 		ata_for_each_dev(dev, link, ENABLED) {
4319 			if (!ata_is_40wire(dev))
4320 				return 0;
4321 		}
4322 	}
4323 	return 1;
4324 }
4325 
4326 /**
4327  *	ata_dev_xfermask - Compute supported xfermask of the given device
4328  *	@dev: Device to compute xfermask for
4329  *
4330  *	Compute supported xfermask of @dev and store it in
4331  *	dev->*_mask.  This function is responsible for applying all
4332  *	known limits including host controller limits, device
4333  *	blacklist, etc...
4334  *
4335  *	LOCKING:
4336  *	None.
4337  */
ata_dev_xfermask(struct ata_device * dev)4338 static void ata_dev_xfermask(struct ata_device *dev)
4339 {
4340 	struct ata_link *link = dev->link;
4341 	struct ata_port *ap = link->ap;
4342 	struct ata_host *host = ap->host;
4343 	unsigned long xfer_mask;
4344 
4345 	/* controller modes available */
4346 	xfer_mask = ata_pack_xfermask(ap->pio_mask,
4347 				      ap->mwdma_mask, ap->udma_mask);
4348 
4349 	/* drive modes available */
4350 	xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4351 				       dev->mwdma_mask, dev->udma_mask);
4352 	xfer_mask &= ata_id_xfermask(dev->id);
4353 
4354 	/*
4355 	 *	CFA Advanced TrueIDE timings are not allowed on a shared
4356 	 *	cable
4357 	 */
4358 	if (ata_dev_pair(dev)) {
4359 		/* No PIO5 or PIO6 */
4360 		xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4361 		/* No MWDMA3 or MWDMA 4 */
4362 		xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4363 	}
4364 
4365 	if (ata_dma_blacklisted(dev)) {
4366 		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4367 		ata_dev_warn(dev,
4368 			     "device is on DMA blacklist, disabling DMA\n");
4369 	}
4370 
4371 	if ((host->flags & ATA_HOST_SIMPLEX) &&
4372 	    host->simplex_claimed && host->simplex_claimed != ap) {
4373 		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4374 		ata_dev_warn(dev,
4375 			     "simplex DMA is claimed by other device, disabling DMA\n");
4376 	}
4377 
4378 	if (ap->flags & ATA_FLAG_NO_IORDY)
4379 		xfer_mask &= ata_pio_mask_no_iordy(dev);
4380 
4381 	if (ap->ops->mode_filter)
4382 		xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
4383 
4384 	/* Apply cable rule here.  Don't apply it early because when
4385 	 * we handle hot plug the cable type can itself change.
4386 	 * Check this last so that we know if the transfer rate was
4387 	 * solely limited by the cable.
4388 	 * Unknown or 80 wire cables reported host side are checked
4389 	 * drive side as well. Cases where we know a 40wire cable
4390 	 * is used safely for 80 are not checked here.
4391 	 */
4392 	if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4393 		/* UDMA/44 or higher would be available */
4394 		if (cable_is_40wire(ap)) {
4395 			ata_dev_warn(dev,
4396 				     "limited to UDMA/33 due to 40-wire cable\n");
4397 			xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4398 		}
4399 
4400 	ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4401 			    &dev->mwdma_mask, &dev->udma_mask);
4402 }
4403 
4404 /**
4405  *	ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
4406  *	@dev: Device to which command will be sent
4407  *
4408  *	Issue SET FEATURES - XFER MODE command to device @dev
4409  *	on port @ap.
4410  *
4411  *	LOCKING:
4412  *	PCI/etc. bus probe sem.
4413  *
4414  *	RETURNS:
4415  *	0 on success, AC_ERR_* mask otherwise.
4416  */
4417 
ata_dev_set_xfermode(struct ata_device * dev)4418 static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
4419 {
4420 	struct ata_taskfile tf;
4421 	unsigned int err_mask;
4422 
4423 	/* set up set-features taskfile */
4424 	DPRINTK("set features - xfer mode\n");
4425 
4426 	/* Some controllers and ATAPI devices show flaky interrupt
4427 	 * behavior after setting xfer mode.  Use polling instead.
4428 	 */
4429 	ata_tf_init(dev, &tf);
4430 	tf.command = ATA_CMD_SET_FEATURES;
4431 	tf.feature = SETFEATURES_XFER;
4432 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
4433 	tf.protocol = ATA_PROT_NODATA;
4434 	/* If we are using IORDY we must send the mode setting command */
4435 	if (ata_pio_need_iordy(dev))
4436 		tf.nsect = dev->xfer_mode;
4437 	/* If the device has IORDY and the controller does not - turn it off */
4438  	else if (ata_id_has_iordy(dev->id))
4439 		tf.nsect = 0x01;
4440 	else /* In the ancient relic department - skip all of this */
4441 		return 0;
4442 
4443 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4444 
4445 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4446 	return err_mask;
4447 }
4448 
4449 /**
4450  *	ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
4451  *	@dev: Device to which command will be sent
4452  *	@enable: Whether to enable or disable the feature
4453  *	@feature: The sector count represents the feature to set
4454  *
4455  *	Issue SET FEATURES - SATA FEATURES command to device @dev
4456  *	on port @ap with sector count
4457  *
4458  *	LOCKING:
4459  *	PCI/etc. bus probe sem.
4460  *
4461  *	RETURNS:
4462  *	0 on success, AC_ERR_* mask otherwise.
4463  */
ata_dev_set_feature(struct ata_device * dev,u8 enable,u8 feature)4464 unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, u8 feature)
4465 {
4466 	struct ata_taskfile tf;
4467 	unsigned int err_mask;
4468 
4469 	/* set up set-features taskfile */
4470 	DPRINTK("set features - SATA features\n");
4471 
4472 	ata_tf_init(dev, &tf);
4473 	tf.command = ATA_CMD_SET_FEATURES;
4474 	tf.feature = enable;
4475 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4476 	tf.protocol = ATA_PROT_NODATA;
4477 	tf.nsect = feature;
4478 
4479 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4480 
4481 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4482 	return err_mask;
4483 }
4484 
4485 /**
4486  *	ata_dev_init_params - Issue INIT DEV PARAMS command
4487  *	@dev: Device to which command will be sent
4488  *	@heads: Number of heads (taskfile parameter)
4489  *	@sectors: Number of sectors (taskfile parameter)
4490  *
4491  *	LOCKING:
4492  *	Kernel thread context (may sleep)
4493  *
4494  *	RETURNS:
4495  *	0 on success, AC_ERR_* mask otherwise.
4496  */
ata_dev_init_params(struct ata_device * dev,u16 heads,u16 sectors)4497 static unsigned int ata_dev_init_params(struct ata_device *dev,
4498 					u16 heads, u16 sectors)
4499 {
4500 	struct ata_taskfile tf;
4501 	unsigned int err_mask;
4502 
4503 	/* Number of sectors per track 1-255. Number of heads 1-16 */
4504 	if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
4505 		return AC_ERR_INVALID;
4506 
4507 	/* set up init dev params taskfile */
4508 	DPRINTK("init dev params \n");
4509 
4510 	ata_tf_init(dev, &tf);
4511 	tf.command = ATA_CMD_INIT_DEV_PARAMS;
4512 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4513 	tf.protocol = ATA_PROT_NODATA;
4514 	tf.nsect = sectors;
4515 	tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
4516 
4517 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4518 	/* A clean abort indicates an original or just out of spec drive
4519 	   and we should continue as we issue the setup based on the
4520 	   drive reported working geometry */
4521 	if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4522 		err_mask = 0;
4523 
4524 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4525 	return err_mask;
4526 }
4527 
4528 /**
4529  *	ata_sg_clean - Unmap DMA memory associated with command
4530  *	@qc: Command containing DMA memory to be released
4531  *
4532  *	Unmap all mapped DMA memory associated with this command.
4533  *
4534  *	LOCKING:
4535  *	spin_lock_irqsave(host lock)
4536  */
ata_sg_clean(struct ata_queued_cmd * qc)4537 void ata_sg_clean(struct ata_queued_cmd *qc)
4538 {
4539 	struct ata_port *ap = qc->ap;
4540 	struct scatterlist *sg = qc->sg;
4541 	int dir = qc->dma_dir;
4542 
4543 	WARN_ON_ONCE(sg == NULL);
4544 
4545 	VPRINTK("unmapping %u sg elements\n", qc->n_elem);
4546 
4547 	if (qc->n_elem)
4548 		dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir);
4549 
4550 	qc->flags &= ~ATA_QCFLAG_DMAMAP;
4551 	qc->sg = NULL;
4552 }
4553 
4554 /**
4555  *	atapi_check_dma - Check whether ATAPI DMA can be supported
4556  *	@qc: Metadata associated with taskfile to check
4557  *
4558  *	Allow low-level driver to filter ATA PACKET commands, returning
4559  *	a status indicating whether or not it is OK to use DMA for the
4560  *	supplied PACKET command.
4561  *
4562  *	LOCKING:
4563  *	spin_lock_irqsave(host lock)
4564  *
4565  *	RETURNS: 0 when ATAPI DMA can be used
4566  *               nonzero otherwise
4567  */
atapi_check_dma(struct ata_queued_cmd * qc)4568 int atapi_check_dma(struct ata_queued_cmd *qc)
4569 {
4570 	struct ata_port *ap = qc->ap;
4571 
4572 	/* Don't allow DMA if it isn't multiple of 16 bytes.  Quite a
4573 	 * few ATAPI devices choke on such DMA requests.
4574 	 */
4575 	if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) &&
4576 	    unlikely(qc->nbytes & 15))
4577 		return 1;
4578 
4579 	if (ap->ops->check_atapi_dma)
4580 		return ap->ops->check_atapi_dma(qc);
4581 
4582 	return 0;
4583 }
4584 
4585 /**
4586  *	ata_std_qc_defer - Check whether a qc needs to be deferred
4587  *	@qc: ATA command in question
4588  *
4589  *	Non-NCQ commands cannot run with any other command, NCQ or
4590  *	not.  As upper layer only knows the queue depth, we are
4591  *	responsible for maintaining exclusion.  This function checks
4592  *	whether a new command @qc can be issued.
4593  *
4594  *	LOCKING:
4595  *	spin_lock_irqsave(host lock)
4596  *
4597  *	RETURNS:
4598  *	ATA_DEFER_* if deferring is needed, 0 otherwise.
4599  */
ata_std_qc_defer(struct ata_queued_cmd * qc)4600 int ata_std_qc_defer(struct ata_queued_cmd *qc)
4601 {
4602 	struct ata_link *link = qc->dev->link;
4603 
4604 	if (qc->tf.protocol == ATA_PROT_NCQ) {
4605 		if (!ata_tag_valid(link->active_tag))
4606 			return 0;
4607 	} else {
4608 		if (!ata_tag_valid(link->active_tag) && !link->sactive)
4609 			return 0;
4610 	}
4611 
4612 	return ATA_DEFER_LINK;
4613 }
4614 
ata_noop_qc_prep(struct ata_queued_cmd * qc)4615 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4616 
4617 /**
4618  *	ata_sg_init - Associate command with scatter-gather table.
4619  *	@qc: Command to be associated
4620  *	@sg: Scatter-gather table.
4621  *	@n_elem: Number of elements in s/g table.
4622  *
4623  *	Initialize the data-related elements of queued_cmd @qc
4624  *	to point to a scatter-gather table @sg, containing @n_elem
4625  *	elements.
4626  *
4627  *	LOCKING:
4628  *	spin_lock_irqsave(host lock)
4629  */
ata_sg_init(struct ata_queued_cmd * qc,struct scatterlist * sg,unsigned int n_elem)4630 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4631 		 unsigned int n_elem)
4632 {
4633 	qc->sg = sg;
4634 	qc->n_elem = n_elem;
4635 	qc->cursg = qc->sg;
4636 }
4637 
4638 /**
4639  *	ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4640  *	@qc: Command with scatter-gather table to be mapped.
4641  *
4642  *	DMA-map the scatter-gather table associated with queued_cmd @qc.
4643  *
4644  *	LOCKING:
4645  *	spin_lock_irqsave(host lock)
4646  *
4647  *	RETURNS:
4648  *	Zero on success, negative on error.
4649  *
4650  */
ata_sg_setup(struct ata_queued_cmd * qc)4651 static int ata_sg_setup(struct ata_queued_cmd *qc)
4652 {
4653 	struct ata_port *ap = qc->ap;
4654 	unsigned int n_elem;
4655 
4656 	VPRINTK("ENTER, ata%u\n", ap->print_id);
4657 
4658 	n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
4659 	if (n_elem < 1)
4660 		return -1;
4661 
4662 	DPRINTK("%d sg elements mapped\n", n_elem);
4663 	qc->orig_n_elem = qc->n_elem;
4664 	qc->n_elem = n_elem;
4665 	qc->flags |= ATA_QCFLAG_DMAMAP;
4666 
4667 	return 0;
4668 }
4669 
4670 /**
4671  *	swap_buf_le16 - swap halves of 16-bit words in place
4672  *	@buf:  Buffer to swap
4673  *	@buf_words:  Number of 16-bit words in buffer.
4674  *
4675  *	Swap halves of 16-bit words if needed to convert from
4676  *	little-endian byte order to native cpu byte order, or
4677  *	vice-versa.
4678  *
4679  *	LOCKING:
4680  *	Inherited from caller.
4681  */
swap_buf_le16(u16 * buf,unsigned int buf_words)4682 void swap_buf_le16(u16 *buf, unsigned int buf_words)
4683 {
4684 #ifdef __BIG_ENDIAN
4685 	unsigned int i;
4686 
4687 	for (i = 0; i < buf_words; i++)
4688 		buf[i] = le16_to_cpu(buf[i]);
4689 #endif /* __BIG_ENDIAN */
4690 }
4691 
4692 /**
4693  *	ata_qc_new - Request an available ATA command, for queueing
4694  *	@ap: target port
4695  *
4696  *	LOCKING:
4697  *	None.
4698  */
4699 
ata_qc_new(struct ata_port * ap)4700 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4701 {
4702 	struct ata_queued_cmd *qc = NULL;
4703 	unsigned int i, tag;
4704 
4705 	/* no command while frozen */
4706 	if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
4707 		return NULL;
4708 
4709 	for (i = 0; i < ATA_MAX_QUEUE; i++) {
4710 		tag = (i + ap->last_tag + 1) % ATA_MAX_QUEUE;
4711 
4712 		/* the last tag is reserved for internal command. */
4713 		if (tag == ATA_TAG_INTERNAL)
4714 			continue;
4715 
4716 		if (!test_and_set_bit(tag, &ap->qc_allocated)) {
4717 			qc = __ata_qc_from_tag(ap, tag);
4718 			qc->tag = tag;
4719 			ap->last_tag = tag;
4720 			break;
4721 		}
4722 	}
4723 
4724 	return qc;
4725 }
4726 
4727 /**
4728  *	ata_qc_new_init - Request an available ATA command, and initialize it
4729  *	@dev: Device from whom we request an available command structure
4730  *
4731  *	LOCKING:
4732  *	None.
4733  */
4734 
ata_qc_new_init(struct ata_device * dev)4735 struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
4736 {
4737 	struct ata_port *ap = dev->link->ap;
4738 	struct ata_queued_cmd *qc;
4739 
4740 	qc = ata_qc_new(ap);
4741 	if (qc) {
4742 		qc->scsicmd = NULL;
4743 		qc->ap = ap;
4744 		qc->dev = dev;
4745 
4746 		ata_qc_reinit(qc);
4747 	}
4748 
4749 	return qc;
4750 }
4751 
4752 /**
4753  *	ata_qc_free - free unused ata_queued_cmd
4754  *	@qc: Command to complete
4755  *
4756  *	Designed to free unused ata_queued_cmd object
4757  *	in case something prevents using it.
4758  *
4759  *	LOCKING:
4760  *	spin_lock_irqsave(host lock)
4761  */
ata_qc_free(struct ata_queued_cmd * qc)4762 void ata_qc_free(struct ata_queued_cmd *qc)
4763 {
4764 	struct ata_port *ap;
4765 	unsigned int tag;
4766 
4767 	WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4768 	ap = qc->ap;
4769 
4770 	qc->flags = 0;
4771 	tag = qc->tag;
4772 	if (likely(ata_tag_valid(tag))) {
4773 		qc->tag = ATA_TAG_POISON;
4774 		clear_bit(tag, &ap->qc_allocated);
4775 	}
4776 }
4777 
__ata_qc_complete(struct ata_queued_cmd * qc)4778 void __ata_qc_complete(struct ata_queued_cmd *qc)
4779 {
4780 	struct ata_port *ap;
4781 	struct ata_link *link;
4782 
4783 	WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4784 	WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
4785 	ap = qc->ap;
4786 	link = qc->dev->link;
4787 
4788 	if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4789 		ata_sg_clean(qc);
4790 
4791 	/* command should be marked inactive atomically with qc completion */
4792 	if (qc->tf.protocol == ATA_PROT_NCQ) {
4793 		link->sactive &= ~(1 << qc->tag);
4794 		if (!link->sactive)
4795 			ap->nr_active_links--;
4796 	} else {
4797 		link->active_tag = ATA_TAG_POISON;
4798 		ap->nr_active_links--;
4799 	}
4800 
4801 	/* clear exclusive status */
4802 	if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
4803 		     ap->excl_link == link))
4804 		ap->excl_link = NULL;
4805 
4806 	/* atapi: mark qc as inactive to prevent the interrupt handler
4807 	 * from completing the command twice later, before the error handler
4808 	 * is called. (when rc != 0 and atapi request sense is needed)
4809 	 */
4810 	qc->flags &= ~ATA_QCFLAG_ACTIVE;
4811 	ap->qc_active &= ~(1 << qc->tag);
4812 
4813 	/* call completion callback */
4814 	qc->complete_fn(qc);
4815 }
4816 
fill_result_tf(struct ata_queued_cmd * qc)4817 static void fill_result_tf(struct ata_queued_cmd *qc)
4818 {
4819 	struct ata_port *ap = qc->ap;
4820 
4821 	qc->result_tf.flags = qc->tf.flags;
4822 	ap->ops->qc_fill_rtf(qc);
4823 }
4824 
ata_verify_xfer(struct ata_queued_cmd * qc)4825 static void ata_verify_xfer(struct ata_queued_cmd *qc)
4826 {
4827 	struct ata_device *dev = qc->dev;
4828 
4829 	if (ata_is_nodata(qc->tf.protocol))
4830 		return;
4831 
4832 	if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
4833 		return;
4834 
4835 	dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
4836 }
4837 
4838 /**
4839  *	ata_qc_complete - Complete an active ATA command
4840  *	@qc: Command to complete
4841  *
4842  *	Indicate to the mid and upper layers that an ATA command has
4843  *	completed, with either an ok or not-ok status.
4844  *
4845  *	Refrain from calling this function multiple times when
4846  *	successfully completing multiple NCQ commands.
4847  *	ata_qc_complete_multiple() should be used instead, which will
4848  *	properly update IRQ expect state.
4849  *
4850  *	LOCKING:
4851  *	spin_lock_irqsave(host lock)
4852  */
ata_qc_complete(struct ata_queued_cmd * qc)4853 void ata_qc_complete(struct ata_queued_cmd *qc)
4854 {
4855 	struct ata_port *ap = qc->ap;
4856 
4857 	/* XXX: New EH and old EH use different mechanisms to
4858 	 * synchronize EH with regular execution path.
4859 	 *
4860 	 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4861 	 * Normal execution path is responsible for not accessing a
4862 	 * failed qc.  libata core enforces the rule by returning NULL
4863 	 * from ata_qc_from_tag() for failed qcs.
4864 	 *
4865 	 * Old EH depends on ata_qc_complete() nullifying completion
4866 	 * requests if ATA_QCFLAG_EH_SCHEDULED is set.  Old EH does
4867 	 * not synchronize with interrupt handler.  Only PIO task is
4868 	 * taken care of.
4869 	 */
4870 	if (ap->ops->error_handler) {
4871 		struct ata_device *dev = qc->dev;
4872 		struct ata_eh_info *ehi = &dev->link->eh_info;
4873 
4874 		if (unlikely(qc->err_mask))
4875 			qc->flags |= ATA_QCFLAG_FAILED;
4876 
4877 		/*
4878 		 * Finish internal commands without any further processing
4879 		 * and always with the result TF filled.
4880 		 */
4881 		if (unlikely(ata_tag_internal(qc->tag))) {
4882 			fill_result_tf(qc);
4883 			__ata_qc_complete(qc);
4884 			return;
4885 		}
4886 
4887 		/*
4888 		 * Non-internal qc has failed.  Fill the result TF and
4889 		 * summon EH.
4890 		 */
4891 		if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4892 			fill_result_tf(qc);
4893 			ata_qc_schedule_eh(qc);
4894 			return;
4895 		}
4896 
4897 		WARN_ON_ONCE(ap->pflags & ATA_PFLAG_FROZEN);
4898 
4899 		/* read result TF if requested */
4900 		if (qc->flags & ATA_QCFLAG_RESULT_TF)
4901 			fill_result_tf(qc);
4902 
4903 		/* Some commands need post-processing after successful
4904 		 * completion.
4905 		 */
4906 		switch (qc->tf.command) {
4907 		case ATA_CMD_SET_FEATURES:
4908 			if (qc->tf.feature != SETFEATURES_WC_ON &&
4909 			    qc->tf.feature != SETFEATURES_WC_OFF)
4910 				break;
4911 			/* fall through */
4912 		case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
4913 		case ATA_CMD_SET_MULTI: /* multi_count changed */
4914 			/* revalidate device */
4915 			ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
4916 			ata_port_schedule_eh(ap);
4917 			break;
4918 
4919 		case ATA_CMD_SLEEP:
4920 			dev->flags |= ATA_DFLAG_SLEEPING;
4921 			break;
4922 		}
4923 
4924 		if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
4925 			ata_verify_xfer(qc);
4926 
4927 		__ata_qc_complete(qc);
4928 	} else {
4929 		if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4930 			return;
4931 
4932 		/* read result TF if failed or requested */
4933 		if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
4934 			fill_result_tf(qc);
4935 
4936 		__ata_qc_complete(qc);
4937 	}
4938 }
4939 
4940 /**
4941  *	ata_qc_complete_multiple - Complete multiple qcs successfully
4942  *	@ap: port in question
4943  *	@qc_active: new qc_active mask
4944  *
4945  *	Complete in-flight commands.  This functions is meant to be
4946  *	called from low-level driver's interrupt routine to complete
4947  *	requests normally.  ap->qc_active and @qc_active is compared
4948  *	and commands are completed accordingly.
4949  *
4950  *	Always use this function when completing multiple NCQ commands
4951  *	from IRQ handlers instead of calling ata_qc_complete()
4952  *	multiple times to keep IRQ expect status properly in sync.
4953  *
4954  *	LOCKING:
4955  *	spin_lock_irqsave(host lock)
4956  *
4957  *	RETURNS:
4958  *	Number of completed commands on success, -errno otherwise.
4959  */
ata_qc_complete_multiple(struct ata_port * ap,u32 qc_active)4960 int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active)
4961 {
4962 	int nr_done = 0;
4963 	u32 done_mask;
4964 
4965 	done_mask = ap->qc_active ^ qc_active;
4966 
4967 	if (unlikely(done_mask & qc_active)) {
4968 		ata_port_err(ap, "illegal qc_active transition (%08x->%08x)\n",
4969 			     ap->qc_active, qc_active);
4970 		return -EINVAL;
4971 	}
4972 
4973 	while (done_mask) {
4974 		struct ata_queued_cmd *qc;
4975 		unsigned int tag = __ffs(done_mask);
4976 
4977 		qc = ata_qc_from_tag(ap, tag);
4978 		if (qc) {
4979 			ata_qc_complete(qc);
4980 			nr_done++;
4981 		}
4982 		done_mask &= ~(1 << tag);
4983 	}
4984 
4985 	return nr_done;
4986 }
4987 
4988 /**
4989  *	ata_qc_issue - issue taskfile to device
4990  *	@qc: command to issue to device
4991  *
4992  *	Prepare an ATA command to submission to device.
4993  *	This includes mapping the data into a DMA-able
4994  *	area, filling in the S/G table, and finally
4995  *	writing the taskfile to hardware, starting the command.
4996  *
4997  *	LOCKING:
4998  *	spin_lock_irqsave(host lock)
4999  */
ata_qc_issue(struct ata_queued_cmd * qc)5000 void ata_qc_issue(struct ata_queued_cmd *qc)
5001 {
5002 	struct ata_port *ap = qc->ap;
5003 	struct ata_link *link = qc->dev->link;
5004 	u8 prot = qc->tf.protocol;
5005 
5006 	/* Make sure only one non-NCQ command is outstanding.  The
5007 	 * check is skipped for old EH because it reuses active qc to
5008 	 * request ATAPI sense.
5009 	 */
5010 	WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag));
5011 
5012 	if (ata_is_ncq(prot)) {
5013 		WARN_ON_ONCE(link->sactive & (1 << qc->tag));
5014 
5015 		if (!link->sactive)
5016 			ap->nr_active_links++;
5017 		link->sactive |= 1 << qc->tag;
5018 	} else {
5019 		WARN_ON_ONCE(link->sactive);
5020 
5021 		ap->nr_active_links++;
5022 		link->active_tag = qc->tag;
5023 	}
5024 
5025 	qc->flags |= ATA_QCFLAG_ACTIVE;
5026 	ap->qc_active |= 1 << qc->tag;
5027 
5028 	/*
5029 	 * We guarantee to LLDs that they will have at least one
5030 	 * non-zero sg if the command is a data command.
5031 	 */
5032 	if (WARN_ON_ONCE(ata_is_data(prot) &&
5033 			 (!qc->sg || !qc->n_elem || !qc->nbytes)))
5034 		goto sys_err;
5035 
5036 	if (ata_is_dma(prot) || (ata_is_pio(prot) &&
5037 				 (ap->flags & ATA_FLAG_PIO_DMA)))
5038 		if (ata_sg_setup(qc))
5039 			goto sys_err;
5040 
5041 	/* if device is sleeping, schedule reset and abort the link */
5042 	if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
5043 		link->eh_info.action |= ATA_EH_RESET;
5044 		ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
5045 		ata_link_abort(link);
5046 		return;
5047 	}
5048 
5049 	ap->ops->qc_prep(qc);
5050 
5051 	qc->err_mask |= ap->ops->qc_issue(qc);
5052 	if (unlikely(qc->err_mask))
5053 		goto err;
5054 	return;
5055 
5056 sys_err:
5057 	qc->err_mask |= AC_ERR_SYSTEM;
5058 err:
5059 	ata_qc_complete(qc);
5060 }
5061 
5062 /**
5063  *	sata_scr_valid - test whether SCRs are accessible
5064  *	@link: ATA link to test SCR accessibility for
5065  *
5066  *	Test whether SCRs are accessible for @link.
5067  *
5068  *	LOCKING:
5069  *	None.
5070  *
5071  *	RETURNS:
5072  *	1 if SCRs are accessible, 0 otherwise.
5073  */
sata_scr_valid(struct ata_link * link)5074 int sata_scr_valid(struct ata_link *link)
5075 {
5076 	struct ata_port *ap = link->ap;
5077 
5078 	return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
5079 }
5080 
5081 /**
5082  *	sata_scr_read - read SCR register of the specified port
5083  *	@link: ATA link to read SCR for
5084  *	@reg: SCR to read
5085  *	@val: Place to store read value
5086  *
5087  *	Read SCR register @reg of @link into *@val.  This function is
5088  *	guaranteed to succeed if @link is ap->link, the cable type of
5089  *	the port is SATA and the port implements ->scr_read.
5090  *
5091  *	LOCKING:
5092  *	None if @link is ap->link.  Kernel thread context otherwise.
5093  *
5094  *	RETURNS:
5095  *	0 on success, negative errno on failure.
5096  */
sata_scr_read(struct ata_link * link,int reg,u32 * val)5097 int sata_scr_read(struct ata_link *link, int reg, u32 *val)
5098 {
5099 	if (ata_is_host_link(link)) {
5100 		if (sata_scr_valid(link))
5101 			return link->ap->ops->scr_read(link, reg, val);
5102 		return -EOPNOTSUPP;
5103 	}
5104 
5105 	return sata_pmp_scr_read(link, reg, val);
5106 }
5107 
5108 /**
5109  *	sata_scr_write - write SCR register of the specified port
5110  *	@link: ATA link to write SCR for
5111  *	@reg: SCR to write
5112  *	@val: value to write
5113  *
5114  *	Write @val to SCR register @reg of @link.  This function is
5115  *	guaranteed to succeed if @link is ap->link, the cable type of
5116  *	the port is SATA and the port implements ->scr_read.
5117  *
5118  *	LOCKING:
5119  *	None if @link is ap->link.  Kernel thread context otherwise.
5120  *
5121  *	RETURNS:
5122  *	0 on success, negative errno on failure.
5123  */
sata_scr_write(struct ata_link * link,int reg,u32 val)5124 int sata_scr_write(struct ata_link *link, int reg, u32 val)
5125 {
5126 	if (ata_is_host_link(link)) {
5127 		if (sata_scr_valid(link))
5128 			return link->ap->ops->scr_write(link, reg, val);
5129 		return -EOPNOTSUPP;
5130 	}
5131 
5132 	return sata_pmp_scr_write(link, reg, val);
5133 }
5134 
5135 /**
5136  *	sata_scr_write_flush - write SCR register of the specified port and flush
5137  *	@link: ATA link to write SCR for
5138  *	@reg: SCR to write
5139  *	@val: value to write
5140  *
5141  *	This function is identical to sata_scr_write() except that this
5142  *	function performs flush after writing to the register.
5143  *
5144  *	LOCKING:
5145  *	None if @link is ap->link.  Kernel thread context otherwise.
5146  *
5147  *	RETURNS:
5148  *	0 on success, negative errno on failure.
5149  */
sata_scr_write_flush(struct ata_link * link,int reg,u32 val)5150 int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
5151 {
5152 	if (ata_is_host_link(link)) {
5153 		int rc;
5154 
5155 		if (sata_scr_valid(link)) {
5156 			rc = link->ap->ops->scr_write(link, reg, val);
5157 			if (rc == 0)
5158 				rc = link->ap->ops->scr_read(link, reg, &val);
5159 			return rc;
5160 		}
5161 		return -EOPNOTSUPP;
5162 	}
5163 
5164 	return sata_pmp_scr_write(link, reg, val);
5165 }
5166 
5167 /**
5168  *	ata_phys_link_online - test whether the given link is online
5169  *	@link: ATA link to test
5170  *
5171  *	Test whether @link is online.  Note that this function returns
5172  *	0 if online status of @link cannot be obtained, so
5173  *	ata_link_online(link) != !ata_link_offline(link).
5174  *
5175  *	LOCKING:
5176  *	None.
5177  *
5178  *	RETURNS:
5179  *	True if the port online status is available and online.
5180  */
ata_phys_link_online(struct ata_link * link)5181 bool ata_phys_link_online(struct ata_link *link)
5182 {
5183 	u32 sstatus;
5184 
5185 	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5186 	    ata_sstatus_online(sstatus))
5187 		return true;
5188 	return false;
5189 }
5190 
5191 /**
5192  *	ata_phys_link_offline - test whether the given link is offline
5193  *	@link: ATA link to test
5194  *
5195  *	Test whether @link is offline.  Note that this function
5196  *	returns 0 if offline status of @link cannot be obtained, so
5197  *	ata_link_online(link) != !ata_link_offline(link).
5198  *
5199  *	LOCKING:
5200  *	None.
5201  *
5202  *	RETURNS:
5203  *	True if the port offline status is available and offline.
5204  */
ata_phys_link_offline(struct ata_link * link)5205 bool ata_phys_link_offline(struct ata_link *link)
5206 {
5207 	u32 sstatus;
5208 
5209 	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5210 	    !ata_sstatus_online(sstatus))
5211 		return true;
5212 	return false;
5213 }
5214 
5215 /**
5216  *	ata_link_online - test whether the given link is online
5217  *	@link: ATA link to test
5218  *
5219  *	Test whether @link is online.  This is identical to
5220  *	ata_phys_link_online() when there's no slave link.  When
5221  *	there's a slave link, this function should only be called on
5222  *	the master link and will return true if any of M/S links is
5223  *	online.
5224  *
5225  *	LOCKING:
5226  *	None.
5227  *
5228  *	RETURNS:
5229  *	True if the port online status is available and online.
5230  */
ata_link_online(struct ata_link * link)5231 bool ata_link_online(struct ata_link *link)
5232 {
5233 	struct ata_link *slave = link->ap->slave_link;
5234 
5235 	WARN_ON(link == slave);	/* shouldn't be called on slave link */
5236 
5237 	return ata_phys_link_online(link) ||
5238 		(slave && ata_phys_link_online(slave));
5239 }
5240 
5241 /**
5242  *	ata_link_offline - test whether the given link is offline
5243  *	@link: ATA link to test
5244  *
5245  *	Test whether @link is offline.  This is identical to
5246  *	ata_phys_link_offline() when there's no slave link.  When
5247  *	there's a slave link, this function should only be called on
5248  *	the master link and will return true if both M/S links are
5249  *	offline.
5250  *
5251  *	LOCKING:
5252  *	None.
5253  *
5254  *	RETURNS:
5255  *	True if the port offline status is available and offline.
5256  */
ata_link_offline(struct ata_link * link)5257 bool ata_link_offline(struct ata_link *link)
5258 {
5259 	struct ata_link *slave = link->ap->slave_link;
5260 
5261 	WARN_ON(link == slave);	/* shouldn't be called on slave link */
5262 
5263 	return ata_phys_link_offline(link) &&
5264 		(!slave || ata_phys_link_offline(slave));
5265 }
5266 
5267 #ifdef CONFIG_PM
ata_port_request_pm(struct ata_port * ap,pm_message_t mesg,unsigned int action,unsigned int ehi_flags,int wait)5268 static int ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
5269 			       unsigned int action, unsigned int ehi_flags,
5270 			       int wait)
5271 {
5272 	struct ata_link *link;
5273 	unsigned long flags;
5274 	int rc;
5275 
5276 	/* Previous resume operation might still be in
5277 	 * progress.  Wait for PM_PENDING to clear.
5278 	 */
5279 	if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5280 		ata_port_wait_eh(ap);
5281 		WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5282 	}
5283 
5284 	/* request PM ops to EH */
5285 	spin_lock_irqsave(ap->lock, flags);
5286 
5287 	ap->pm_mesg = mesg;
5288 	if (wait) {
5289 		rc = 0;
5290 		ap->pm_result = &rc;
5291 	}
5292 
5293 	ap->pflags |= ATA_PFLAG_PM_PENDING;
5294 	ata_for_each_link(link, ap, HOST_FIRST) {
5295 		link->eh_info.action |= action;
5296 		link->eh_info.flags |= ehi_flags;
5297 	}
5298 
5299 	ata_port_schedule_eh(ap);
5300 
5301 	spin_unlock_irqrestore(ap->lock, flags);
5302 
5303 	/* wait and check result */
5304 	if (wait) {
5305 		ata_port_wait_eh(ap);
5306 		WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5307 	}
5308 
5309 	return rc;
5310 }
5311 
5312 #define to_ata_port(d) container_of(d, struct ata_port, tdev)
5313 
ata_port_suspend_common(struct device * dev,pm_message_t mesg)5314 static int ata_port_suspend_common(struct device *dev, pm_message_t mesg)
5315 {
5316 	struct ata_port *ap = to_ata_port(dev);
5317 	unsigned int ehi_flags = ATA_EHI_QUIET;
5318 	int rc;
5319 
5320 	/*
5321 	 * On some hardware, device fails to respond after spun down
5322 	 * for suspend.  As the device won't be used before being
5323 	 * resumed, we don't need to touch the device.  Ask EH to skip
5324 	 * the usual stuff and proceed directly to suspend.
5325 	 *
5326 	 * http://thread.gmane.org/gmane.linux.ide/46764
5327 	 */
5328 	if (mesg.event == PM_EVENT_SUSPEND)
5329 		ehi_flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_NO_RECOVERY;
5330 
5331 	rc = ata_port_request_pm(ap, mesg, 0, ehi_flags, 1);
5332 	return rc;
5333 }
5334 
ata_port_suspend(struct device * dev)5335 static int ata_port_suspend(struct device *dev)
5336 {
5337 	if (pm_runtime_suspended(dev))
5338 		return 0;
5339 
5340 	return ata_port_suspend_common(dev, PMSG_SUSPEND);
5341 }
5342 
ata_port_do_freeze(struct device * dev)5343 static int ata_port_do_freeze(struct device *dev)
5344 {
5345 	if (pm_runtime_suspended(dev))
5346 		pm_runtime_resume(dev);
5347 
5348 	return ata_port_suspend_common(dev, PMSG_FREEZE);
5349 }
5350 
ata_port_poweroff(struct device * dev)5351 static int ata_port_poweroff(struct device *dev)
5352 {
5353 	if (pm_runtime_suspended(dev))
5354 		return 0;
5355 
5356 	return ata_port_suspend_common(dev, PMSG_HIBERNATE);
5357 }
5358 
ata_port_resume_common(struct device * dev)5359 static int ata_port_resume_common(struct device *dev)
5360 {
5361 	struct ata_port *ap = to_ata_port(dev);
5362 	int rc;
5363 
5364 	rc = ata_port_request_pm(ap, PMSG_ON, ATA_EH_RESET,
5365 		ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 1);
5366 	return rc;
5367 }
5368 
ata_port_resume(struct device * dev)5369 static int ata_port_resume(struct device *dev)
5370 {
5371 	int rc;
5372 
5373 	rc = ata_port_resume_common(dev);
5374 	if (!rc) {
5375 		pm_runtime_disable(dev);
5376 		pm_runtime_set_active(dev);
5377 		pm_runtime_enable(dev);
5378 	}
5379 
5380 	return rc;
5381 }
5382 
ata_port_runtime_idle(struct device * dev)5383 static int ata_port_runtime_idle(struct device *dev)
5384 {
5385 	return pm_runtime_suspend(dev);
5386 }
5387 
5388 static const struct dev_pm_ops ata_port_pm_ops = {
5389 	.suspend = ata_port_suspend,
5390 	.resume = ata_port_resume,
5391 	.freeze = ata_port_do_freeze,
5392 	.thaw = ata_port_resume,
5393 	.poweroff = ata_port_poweroff,
5394 	.restore = ata_port_resume,
5395 
5396 	.runtime_suspend = ata_port_suspend,
5397 	.runtime_resume = ata_port_resume_common,
5398 	.runtime_idle = ata_port_runtime_idle,
5399 };
5400 
5401 /**
5402  *	ata_host_suspend - suspend host
5403  *	@host: host to suspend
5404  *	@mesg: PM message
5405  *
5406  *	Suspend @host.  Actual operation is performed by port suspend.
5407  */
ata_host_suspend(struct ata_host * host,pm_message_t mesg)5408 int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5409 {
5410 	host->dev->power.power_state = mesg;
5411 	return 0;
5412 }
5413 
5414 /**
5415  *	ata_host_resume - resume host
5416  *	@host: host to resume
5417  *
5418  *	Resume @host.  Actual operation is performed by port resume.
5419  */
ata_host_resume(struct ata_host * host)5420 void ata_host_resume(struct ata_host *host)
5421 {
5422 	host->dev->power.power_state = PMSG_ON;
5423 }
5424 #endif
5425 
5426 struct device_type ata_port_type = {
5427 	.name = "ata_port",
5428 #ifdef CONFIG_PM
5429 	.pm = &ata_port_pm_ops,
5430 #endif
5431 };
5432 
5433 /**
5434  *	ata_dev_init - Initialize an ata_device structure
5435  *	@dev: Device structure to initialize
5436  *
5437  *	Initialize @dev in preparation for probing.
5438  *
5439  *	LOCKING:
5440  *	Inherited from caller.
5441  */
ata_dev_init(struct ata_device * dev)5442 void ata_dev_init(struct ata_device *dev)
5443 {
5444 	struct ata_link *link = ata_dev_phys_link(dev);
5445 	struct ata_port *ap = link->ap;
5446 	unsigned long flags;
5447 
5448 	/* SATA spd limit is bound to the attached device, reset together */
5449 	link->sata_spd_limit = link->hw_sata_spd_limit;
5450 	link->sata_spd = 0;
5451 
5452 	/* High bits of dev->flags are used to record warm plug
5453 	 * requests which occur asynchronously.  Synchronize using
5454 	 * host lock.
5455 	 */
5456 	spin_lock_irqsave(ap->lock, flags);
5457 	dev->flags &= ~ATA_DFLAG_INIT_MASK;
5458 	dev->horkage = 0;
5459 	spin_unlock_irqrestore(ap->lock, flags);
5460 
5461 	memset((void *)dev + ATA_DEVICE_CLEAR_BEGIN, 0,
5462 	       ATA_DEVICE_CLEAR_END - ATA_DEVICE_CLEAR_BEGIN);
5463 	dev->pio_mask = UINT_MAX;
5464 	dev->mwdma_mask = UINT_MAX;
5465 	dev->udma_mask = UINT_MAX;
5466 }
5467 
5468 /**
5469  *	ata_link_init - Initialize an ata_link structure
5470  *	@ap: ATA port link is attached to
5471  *	@link: Link structure to initialize
5472  *	@pmp: Port multiplier port number
5473  *
5474  *	Initialize @link.
5475  *
5476  *	LOCKING:
5477  *	Kernel thread context (may sleep)
5478  */
ata_link_init(struct ata_port * ap,struct ata_link * link,int pmp)5479 void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
5480 {
5481 	int i;
5482 
5483 	/* clear everything except for devices */
5484 	memset((void *)link + ATA_LINK_CLEAR_BEGIN, 0,
5485 	       ATA_LINK_CLEAR_END - ATA_LINK_CLEAR_BEGIN);
5486 
5487 	link->ap = ap;
5488 	link->pmp = pmp;
5489 	link->active_tag = ATA_TAG_POISON;
5490 	link->hw_sata_spd_limit = UINT_MAX;
5491 
5492 	/* can't use iterator, ap isn't initialized yet */
5493 	for (i = 0; i < ATA_MAX_DEVICES; i++) {
5494 		struct ata_device *dev = &link->device[i];
5495 
5496 		dev->link = link;
5497 		dev->devno = dev - link->device;
5498 #ifdef CONFIG_ATA_ACPI
5499 		dev->gtf_filter = ata_acpi_gtf_filter;
5500 #endif
5501 		ata_dev_init(dev);
5502 	}
5503 }
5504 
5505 /**
5506  *	sata_link_init_spd - Initialize link->sata_spd_limit
5507  *	@link: Link to configure sata_spd_limit for
5508  *
5509  *	Initialize @link->[hw_]sata_spd_limit to the currently
5510  *	configured value.
5511  *
5512  *	LOCKING:
5513  *	Kernel thread context (may sleep).
5514  *
5515  *	RETURNS:
5516  *	0 on success, -errno on failure.
5517  */
sata_link_init_spd(struct ata_link * link)5518 int sata_link_init_spd(struct ata_link *link)
5519 {
5520 	u8 spd;
5521 	int rc;
5522 
5523 	rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol);
5524 	if (rc)
5525 		return rc;
5526 
5527 	spd = (link->saved_scontrol >> 4) & 0xf;
5528 	if (spd)
5529 		link->hw_sata_spd_limit &= (1 << spd) - 1;
5530 
5531 	ata_force_link_limits(link);
5532 
5533 	link->sata_spd_limit = link->hw_sata_spd_limit;
5534 
5535 	return 0;
5536 }
5537 
5538 /**
5539  *	ata_port_alloc - allocate and initialize basic ATA port resources
5540  *	@host: ATA host this allocated port belongs to
5541  *
5542  *	Allocate and initialize basic ATA port resources.
5543  *
5544  *	RETURNS:
5545  *	Allocate ATA port on success, NULL on failure.
5546  *
5547  *	LOCKING:
5548  *	Inherited from calling layer (may sleep).
5549  */
ata_port_alloc(struct ata_host * host)5550 struct ata_port *ata_port_alloc(struct ata_host *host)
5551 {
5552 	struct ata_port *ap;
5553 
5554 	DPRINTK("ENTER\n");
5555 
5556 	ap = kzalloc(sizeof(*ap), GFP_KERNEL);
5557 	if (!ap)
5558 		return NULL;
5559 
5560 	ap->pflags |= ATA_PFLAG_INITIALIZING | ATA_PFLAG_FROZEN;
5561 	ap->lock = &host->lock;
5562 	ap->print_id = -1;
5563 	ap->host = host;
5564 	ap->dev = host->dev;
5565 
5566 #if defined(ATA_VERBOSE_DEBUG)
5567 	/* turn on all debugging levels */
5568 	ap->msg_enable = 0x00FF;
5569 #elif defined(ATA_DEBUG)
5570 	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
5571 #else
5572 	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
5573 #endif
5574 
5575 	mutex_init(&ap->scsi_scan_mutex);
5576 	INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5577 	INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
5578 	INIT_LIST_HEAD(&ap->eh_done_q);
5579 	init_waitqueue_head(&ap->eh_wait_q);
5580 	init_completion(&ap->park_req_pending);
5581 	init_timer_deferrable(&ap->fastdrain_timer);
5582 	ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
5583 	ap->fastdrain_timer.data = (unsigned long)ap;
5584 
5585 	ap->cbl = ATA_CBL_NONE;
5586 
5587 	ata_link_init(ap, &ap->link, 0);
5588 
5589 #ifdef ATA_IRQ_TRAP
5590 	ap->stats.unhandled_irq = 1;
5591 	ap->stats.idle_irq = 1;
5592 #endif
5593 	ata_sff_port_init(ap);
5594 
5595 	return ap;
5596 }
5597 
ata_host_release(struct device * gendev,void * res)5598 static void ata_host_release(struct device *gendev, void *res)
5599 {
5600 	struct ata_host *host = dev_get_drvdata(gendev);
5601 	int i;
5602 
5603 	for (i = 0; i < host->n_ports; i++) {
5604 		struct ata_port *ap = host->ports[i];
5605 
5606 		if (!ap)
5607 			continue;
5608 
5609 		if (ap->scsi_host)
5610 			scsi_host_put(ap->scsi_host);
5611 
5612 		kfree(ap->pmp_link);
5613 		kfree(ap->slave_link);
5614 		kfree(ap);
5615 		host->ports[i] = NULL;
5616 	}
5617 
5618 	dev_set_drvdata(gendev, NULL);
5619 }
5620 
5621 /**
5622  *	ata_host_alloc - allocate and init basic ATA host resources
5623  *	@dev: generic device this host is associated with
5624  *	@max_ports: maximum number of ATA ports associated with this host
5625  *
5626  *	Allocate and initialize basic ATA host resources.  LLD calls
5627  *	this function to allocate a host, initializes it fully and
5628  *	attaches it using ata_host_register().
5629  *
5630  *	@max_ports ports are allocated and host->n_ports is
5631  *	initialized to @max_ports.  The caller is allowed to decrease
5632  *	host->n_ports before calling ata_host_register().  The unused
5633  *	ports will be automatically freed on registration.
5634  *
5635  *	RETURNS:
5636  *	Allocate ATA host on success, NULL on failure.
5637  *
5638  *	LOCKING:
5639  *	Inherited from calling layer (may sleep).
5640  */
ata_host_alloc(struct device * dev,int max_ports)5641 struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
5642 {
5643 	struct ata_host *host;
5644 	size_t sz;
5645 	int i;
5646 
5647 	DPRINTK("ENTER\n");
5648 
5649 	if (!devres_open_group(dev, NULL, GFP_KERNEL))
5650 		return NULL;
5651 
5652 	/* alloc a container for our list of ATA ports (buses) */
5653 	sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
5654 	/* alloc a container for our list of ATA ports (buses) */
5655 	host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
5656 	if (!host)
5657 		goto err_out;
5658 
5659 	devres_add(dev, host);
5660 	dev_set_drvdata(dev, host);
5661 
5662 	spin_lock_init(&host->lock);
5663 	mutex_init(&host->eh_mutex);
5664 	host->dev = dev;
5665 	host->n_ports = max_ports;
5666 
5667 	/* allocate ports bound to this host */
5668 	for (i = 0; i < max_ports; i++) {
5669 		struct ata_port *ap;
5670 
5671 		ap = ata_port_alloc(host);
5672 		if (!ap)
5673 			goto err_out;
5674 
5675 		ap->port_no = i;
5676 		host->ports[i] = ap;
5677 	}
5678 
5679 	devres_remove_group(dev, NULL);
5680 	return host;
5681 
5682  err_out:
5683 	devres_release_group(dev, NULL);
5684 	return NULL;
5685 }
5686 
5687 /**
5688  *	ata_host_alloc_pinfo - alloc host and init with port_info array
5689  *	@dev: generic device this host is associated with
5690  *	@ppi: array of ATA port_info to initialize host with
5691  *	@n_ports: number of ATA ports attached to this host
5692  *
5693  *	Allocate ATA host and initialize with info from @ppi.  If NULL
5694  *	terminated, @ppi may contain fewer entries than @n_ports.  The
5695  *	last entry will be used for the remaining ports.
5696  *
5697  *	RETURNS:
5698  *	Allocate ATA host on success, NULL on failure.
5699  *
5700  *	LOCKING:
5701  *	Inherited from calling layer (may sleep).
5702  */
ata_host_alloc_pinfo(struct device * dev,const struct ata_port_info * const * ppi,int n_ports)5703 struct ata_host *ata_host_alloc_pinfo(struct device *dev,
5704 				      const struct ata_port_info * const * ppi,
5705 				      int n_ports)
5706 {
5707 	const struct ata_port_info *pi;
5708 	struct ata_host *host;
5709 	int i, j;
5710 
5711 	host = ata_host_alloc(dev, n_ports);
5712 	if (!host)
5713 		return NULL;
5714 
5715 	for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
5716 		struct ata_port *ap = host->ports[i];
5717 
5718 		if (ppi[j])
5719 			pi = ppi[j++];
5720 
5721 		ap->pio_mask = pi->pio_mask;
5722 		ap->mwdma_mask = pi->mwdma_mask;
5723 		ap->udma_mask = pi->udma_mask;
5724 		ap->flags |= pi->flags;
5725 		ap->link.flags |= pi->link_flags;
5726 		ap->ops = pi->port_ops;
5727 
5728 		if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
5729 			host->ops = pi->port_ops;
5730 	}
5731 
5732 	return host;
5733 }
5734 
5735 /**
5736  *	ata_slave_link_init - initialize slave link
5737  *	@ap: port to initialize slave link for
5738  *
5739  *	Create and initialize slave link for @ap.  This enables slave
5740  *	link handling on the port.
5741  *
5742  *	In libata, a port contains links and a link contains devices.
5743  *	There is single host link but if a PMP is attached to it,
5744  *	there can be multiple fan-out links.  On SATA, there's usually
5745  *	a single device connected to a link but PATA and SATA
5746  *	controllers emulating TF based interface can have two - master
5747  *	and slave.
5748  *
5749  *	However, there are a few controllers which don't fit into this
5750  *	abstraction too well - SATA controllers which emulate TF
5751  *	interface with both master and slave devices but also have
5752  *	separate SCR register sets for each device.  These controllers
5753  *	need separate links for physical link handling
5754  *	(e.g. onlineness, link speed) but should be treated like a
5755  *	traditional M/S controller for everything else (e.g. command
5756  *	issue, softreset).
5757  *
5758  *	slave_link is libata's way of handling this class of
5759  *	controllers without impacting core layer too much.  For
5760  *	anything other than physical link handling, the default host
5761  *	link is used for both master and slave.  For physical link
5762  *	handling, separate @ap->slave_link is used.  All dirty details
5763  *	are implemented inside libata core layer.  From LLD's POV, the
5764  *	only difference is that prereset, hardreset and postreset are
5765  *	called once more for the slave link, so the reset sequence
5766  *	looks like the following.
5767  *
5768  *	prereset(M) -> prereset(S) -> hardreset(M) -> hardreset(S) ->
5769  *	softreset(M) -> postreset(M) -> postreset(S)
5770  *
5771  *	Note that softreset is called only for the master.  Softreset
5772  *	resets both M/S by definition, so SRST on master should handle
5773  *	both (the standard method will work just fine).
5774  *
5775  *	LOCKING:
5776  *	Should be called before host is registered.
5777  *
5778  *	RETURNS:
5779  *	0 on success, -errno on failure.
5780  */
ata_slave_link_init(struct ata_port * ap)5781 int ata_slave_link_init(struct ata_port *ap)
5782 {
5783 	struct ata_link *link;
5784 
5785 	WARN_ON(ap->slave_link);
5786 	WARN_ON(ap->flags & ATA_FLAG_PMP);
5787 
5788 	link = kzalloc(sizeof(*link), GFP_KERNEL);
5789 	if (!link)
5790 		return -ENOMEM;
5791 
5792 	ata_link_init(ap, link, 1);
5793 	ap->slave_link = link;
5794 	return 0;
5795 }
5796 
ata_host_stop(struct device * gendev,void * res)5797 static void ata_host_stop(struct device *gendev, void *res)
5798 {
5799 	struct ata_host *host = dev_get_drvdata(gendev);
5800 	int i;
5801 
5802 	WARN_ON(!(host->flags & ATA_HOST_STARTED));
5803 
5804 	for (i = 0; i < host->n_ports; i++) {
5805 		struct ata_port *ap = host->ports[i];
5806 
5807 		if (ap->ops->port_stop)
5808 			ap->ops->port_stop(ap);
5809 	}
5810 
5811 	if (host->ops->host_stop)
5812 		host->ops->host_stop(host);
5813 }
5814 
5815 /**
5816  *	ata_finalize_port_ops - finalize ata_port_operations
5817  *	@ops: ata_port_operations to finalize
5818  *
5819  *	An ata_port_operations can inherit from another ops and that
5820  *	ops can again inherit from another.  This can go on as many
5821  *	times as necessary as long as there is no loop in the
5822  *	inheritance chain.
5823  *
5824  *	Ops tables are finalized when the host is started.  NULL or
5825  *	unspecified entries are inherited from the closet ancestor
5826  *	which has the method and the entry is populated with it.
5827  *	After finalization, the ops table directly points to all the
5828  *	methods and ->inherits is no longer necessary and cleared.
5829  *
5830  *	Using ATA_OP_NULL, inheriting ops can force a method to NULL.
5831  *
5832  *	LOCKING:
5833  *	None.
5834  */
ata_finalize_port_ops(struct ata_port_operations * ops)5835 static void ata_finalize_port_ops(struct ata_port_operations *ops)
5836 {
5837 	static DEFINE_SPINLOCK(lock);
5838 	const struct ata_port_operations *cur;
5839 	void **begin = (void **)ops;
5840 	void **end = (void **)&ops->inherits;
5841 	void **pp;
5842 
5843 	if (!ops || !ops->inherits)
5844 		return;
5845 
5846 	spin_lock(&lock);
5847 
5848 	for (cur = ops->inherits; cur; cur = cur->inherits) {
5849 		void **inherit = (void **)cur;
5850 
5851 		for (pp = begin; pp < end; pp++, inherit++)
5852 			if (!*pp)
5853 				*pp = *inherit;
5854 	}
5855 
5856 	for (pp = begin; pp < end; pp++)
5857 		if (IS_ERR(*pp))
5858 			*pp = NULL;
5859 
5860 	ops->inherits = NULL;
5861 
5862 	spin_unlock(&lock);
5863 }
5864 
5865 /**
5866  *	ata_host_start - start and freeze ports of an ATA host
5867  *	@host: ATA host to start ports for
5868  *
5869  *	Start and then freeze ports of @host.  Started status is
5870  *	recorded in host->flags, so this function can be called
5871  *	multiple times.  Ports are guaranteed to get started only
5872  *	once.  If host->ops isn't initialized yet, its set to the
5873  *	first non-dummy port ops.
5874  *
5875  *	LOCKING:
5876  *	Inherited from calling layer (may sleep).
5877  *
5878  *	RETURNS:
5879  *	0 if all ports are started successfully, -errno otherwise.
5880  */
ata_host_start(struct ata_host * host)5881 int ata_host_start(struct ata_host *host)
5882 {
5883 	int have_stop = 0;
5884 	void *start_dr = NULL;
5885 	int i, rc;
5886 
5887 	if (host->flags & ATA_HOST_STARTED)
5888 		return 0;
5889 
5890 	ata_finalize_port_ops(host->ops);
5891 
5892 	for (i = 0; i < host->n_ports; i++) {
5893 		struct ata_port *ap = host->ports[i];
5894 
5895 		ata_finalize_port_ops(ap->ops);
5896 
5897 		if (!host->ops && !ata_port_is_dummy(ap))
5898 			host->ops = ap->ops;
5899 
5900 		if (ap->ops->port_stop)
5901 			have_stop = 1;
5902 	}
5903 
5904 	if (host->ops->host_stop)
5905 		have_stop = 1;
5906 
5907 	if (have_stop) {
5908 		start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
5909 		if (!start_dr)
5910 			return -ENOMEM;
5911 	}
5912 
5913 	for (i = 0; i < host->n_ports; i++) {
5914 		struct ata_port *ap = host->ports[i];
5915 
5916 		if (ap->ops->port_start) {
5917 			rc = ap->ops->port_start(ap);
5918 			if (rc) {
5919 				if (rc != -ENODEV)
5920 					dev_err(host->dev,
5921 						"failed to start port %d (errno=%d)\n",
5922 						i, rc);
5923 				goto err_out;
5924 			}
5925 		}
5926 		ata_eh_freeze_port(ap);
5927 	}
5928 
5929 	if (start_dr)
5930 		devres_add(host->dev, start_dr);
5931 	host->flags |= ATA_HOST_STARTED;
5932 	return 0;
5933 
5934  err_out:
5935 	while (--i >= 0) {
5936 		struct ata_port *ap = host->ports[i];
5937 
5938 		if (ap->ops->port_stop)
5939 			ap->ops->port_stop(ap);
5940 	}
5941 	devres_free(start_dr);
5942 	return rc;
5943 }
5944 
5945 /**
5946  *	ata_sas_host_init - Initialize a host struct
5947  *	@host:	host to initialize
5948  *	@dev:	device host is attached to
5949  *	@flags:	host flags
5950  *	@ops:	port_ops
5951  *
5952  *	LOCKING:
5953  *	PCI/etc. bus probe sem.
5954  *
5955  */
5956 /* KILLME - the only user left is ipr */
ata_host_init(struct ata_host * host,struct device * dev,unsigned long flags,struct ata_port_operations * ops)5957 void ata_host_init(struct ata_host *host, struct device *dev,
5958 		   unsigned long flags, struct ata_port_operations *ops)
5959 {
5960 	spin_lock_init(&host->lock);
5961 	mutex_init(&host->eh_mutex);
5962 	host->dev = dev;
5963 	host->flags = flags;
5964 	host->ops = ops;
5965 }
5966 
__ata_port_probe(struct ata_port * ap)5967 void __ata_port_probe(struct ata_port *ap)
5968 {
5969 	struct ata_eh_info *ehi = &ap->link.eh_info;
5970 	unsigned long flags;
5971 
5972 	/* kick EH for boot probing */
5973 	spin_lock_irqsave(ap->lock, flags);
5974 
5975 	ehi->probe_mask |= ATA_ALL_DEVICES;
5976 	ehi->action |= ATA_EH_RESET;
5977 	ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
5978 
5979 	ap->pflags &= ~ATA_PFLAG_INITIALIZING;
5980 	ap->pflags |= ATA_PFLAG_LOADING;
5981 	ata_port_schedule_eh(ap);
5982 
5983 	spin_unlock_irqrestore(ap->lock, flags);
5984 }
5985 
ata_port_probe(struct ata_port * ap)5986 int ata_port_probe(struct ata_port *ap)
5987 {
5988 	int rc = 0;
5989 
5990 	if (ap->ops->error_handler) {
5991 		__ata_port_probe(ap);
5992 		ata_port_wait_eh(ap);
5993 	} else {
5994 		DPRINTK("ata%u: bus probe begin\n", ap->print_id);
5995 		rc = ata_bus_probe(ap);
5996 		DPRINTK("ata%u: bus probe end\n", ap->print_id);
5997 	}
5998 	return rc;
5999 }
6000 
6001 
async_port_probe(void * data,async_cookie_t cookie)6002 static void async_port_probe(void *data, async_cookie_t cookie)
6003 {
6004 	struct ata_port *ap = data;
6005 
6006 	/*
6007 	 * If we're not allowed to scan this host in parallel,
6008 	 * we need to wait until all previous scans have completed
6009 	 * before going further.
6010 	 * Jeff Garzik says this is only within a controller, so we
6011 	 * don't need to wait for port 0, only for later ports.
6012 	 */
6013 	if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0)
6014 		async_synchronize_cookie(cookie);
6015 
6016 	(void)ata_port_probe(ap);
6017 
6018 	/* in order to keep device order, we need to synchronize at this point */
6019 	async_synchronize_cookie(cookie);
6020 
6021 	ata_scsi_scan_host(ap, 1);
6022 }
6023 
6024 /**
6025  *	ata_host_register - register initialized ATA host
6026  *	@host: ATA host to register
6027  *	@sht: template for SCSI host
6028  *
6029  *	Register initialized ATA host.  @host is allocated using
6030  *	ata_host_alloc() and fully initialized by LLD.  This function
6031  *	starts ports, registers @host with ATA and SCSI layers and
6032  *	probe registered devices.
6033  *
6034  *	LOCKING:
6035  *	Inherited from calling layer (may sleep).
6036  *
6037  *	RETURNS:
6038  *	0 on success, -errno otherwise.
6039  */
ata_host_register(struct ata_host * host,struct scsi_host_template * sht)6040 int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6041 {
6042 	int i, rc;
6043 
6044 	/* host must have been started */
6045 	if (!(host->flags & ATA_HOST_STARTED)) {
6046 		dev_err(host->dev, "BUG: trying to register unstarted host\n");
6047 		WARN_ON(1);
6048 		return -EINVAL;
6049 	}
6050 
6051 	/* Blow away unused ports.  This happens when LLD can't
6052 	 * determine the exact number of ports to allocate at
6053 	 * allocation time.
6054 	 */
6055 	for (i = host->n_ports; host->ports[i]; i++)
6056 		kfree(host->ports[i]);
6057 
6058 	/* give ports names and add SCSI hosts */
6059 	for (i = 0; i < host->n_ports; i++)
6060 		host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
6061 
6062 
6063 	/* Create associated sysfs transport objects  */
6064 	for (i = 0; i < host->n_ports; i++) {
6065 		rc = ata_tport_add(host->dev,host->ports[i]);
6066 		if (rc) {
6067 			goto err_tadd;
6068 		}
6069 	}
6070 
6071 	rc = ata_scsi_add_hosts(host, sht);
6072 	if (rc)
6073 		goto err_tadd;
6074 
6075 	/* associate with ACPI nodes */
6076 	ata_acpi_associate(host);
6077 
6078 	/* set cable, sata_spd_limit and report */
6079 	for (i = 0; i < host->n_ports; i++) {
6080 		struct ata_port *ap = host->ports[i];
6081 		unsigned long xfer_mask;
6082 
6083 		/* set SATA cable type if still unset */
6084 		if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
6085 			ap->cbl = ATA_CBL_SATA;
6086 
6087 		/* init sata_spd_limit to the current value */
6088 		sata_link_init_spd(&ap->link);
6089 		if (ap->slave_link)
6090 			sata_link_init_spd(ap->slave_link);
6091 
6092 		/* print per-port info to dmesg */
6093 		xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
6094 					      ap->udma_mask);
6095 
6096 		if (!ata_port_is_dummy(ap)) {
6097 			ata_port_info(ap, "%cATA max %s %s\n",
6098 				      (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
6099 				      ata_mode_string(xfer_mask),
6100 				      ap->link.eh_info.desc);
6101 			ata_ehi_clear_desc(&ap->link.eh_info);
6102 		} else
6103 			ata_port_info(ap, "DUMMY\n");
6104 	}
6105 
6106 	/* perform each probe asynchronously */
6107 	for (i = 0; i < host->n_ports; i++) {
6108 		struct ata_port *ap = host->ports[i];
6109 		async_schedule(async_port_probe, ap);
6110 	}
6111 
6112 	return 0;
6113 
6114  err_tadd:
6115 	while (--i >= 0) {
6116 		ata_tport_delete(host->ports[i]);
6117 	}
6118 	return rc;
6119 
6120 }
6121 
6122 /**
6123  *	ata_host_activate - start host, request IRQ and register it
6124  *	@host: target ATA host
6125  *	@irq: IRQ to request
6126  *	@irq_handler: irq_handler used when requesting IRQ
6127  *	@irq_flags: irq_flags used when requesting IRQ
6128  *	@sht: scsi_host_template to use when registering the host
6129  *
6130  *	After allocating an ATA host and initializing it, most libata
6131  *	LLDs perform three steps to activate the host - start host,
6132  *	request IRQ and register it.  This helper takes necessasry
6133  *	arguments and performs the three steps in one go.
6134  *
6135  *	An invalid IRQ skips the IRQ registration and expects the host to
6136  *	have set polling mode on the port. In this case, @irq_handler
6137  *	should be NULL.
6138  *
6139  *	LOCKING:
6140  *	Inherited from calling layer (may sleep).
6141  *
6142  *	RETURNS:
6143  *	0 on success, -errno otherwise.
6144  */
ata_host_activate(struct ata_host * host,int irq,irq_handler_t irq_handler,unsigned long irq_flags,struct scsi_host_template * sht)6145 int ata_host_activate(struct ata_host *host, int irq,
6146 		      irq_handler_t irq_handler, unsigned long irq_flags,
6147 		      struct scsi_host_template *sht)
6148 {
6149 	int i, rc;
6150 
6151 	rc = ata_host_start(host);
6152 	if (rc)
6153 		return rc;
6154 
6155 	/* Special case for polling mode */
6156 	if (!irq) {
6157 		WARN_ON(irq_handler);
6158 		return ata_host_register(host, sht);
6159 	}
6160 
6161 	rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
6162 			      dev_driver_string(host->dev), host);
6163 	if (rc)
6164 		return rc;
6165 
6166 	for (i = 0; i < host->n_ports; i++)
6167 		ata_port_desc(host->ports[i], "irq %d", irq);
6168 
6169 	rc = ata_host_register(host, sht);
6170 	/* if failed, just free the IRQ and leave ports alone */
6171 	if (rc)
6172 		devm_free_irq(host->dev, irq, host);
6173 
6174 	return rc;
6175 }
6176 
6177 /**
6178  *	ata_port_detach - Detach ATA port in prepration of device removal
6179  *	@ap: ATA port to be detached
6180  *
6181  *	Detach all ATA devices and the associated SCSI devices of @ap;
6182  *	then, remove the associated SCSI host.  @ap is guaranteed to
6183  *	be quiescent on return from this function.
6184  *
6185  *	LOCKING:
6186  *	Kernel thread context (may sleep).
6187  */
ata_port_detach(struct ata_port * ap)6188 static void ata_port_detach(struct ata_port *ap)
6189 {
6190 	unsigned long flags;
6191 
6192 	if (!ap->ops->error_handler)
6193 		goto skip_eh;
6194 
6195 	/* tell EH we're leaving & flush EH */
6196 	spin_lock_irqsave(ap->lock, flags);
6197 	ap->pflags |= ATA_PFLAG_UNLOADING;
6198 	ata_port_schedule_eh(ap);
6199 	spin_unlock_irqrestore(ap->lock, flags);
6200 
6201 	/* wait till EH commits suicide */
6202 	ata_port_wait_eh(ap);
6203 
6204 	/* it better be dead now */
6205 	WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED));
6206 
6207 	cancel_delayed_work_sync(&ap->hotplug_task);
6208 
6209  skip_eh:
6210 	if (ap->pmp_link) {
6211 		int i;
6212 		for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
6213 			ata_tlink_delete(&ap->pmp_link[i]);
6214 	}
6215 	ata_tport_delete(ap);
6216 
6217 	/* remove the associated SCSI host */
6218 	scsi_remove_host(ap->scsi_host);
6219 }
6220 
6221 /**
6222  *	ata_host_detach - Detach all ports of an ATA host
6223  *	@host: Host to detach
6224  *
6225  *	Detach all ports of @host.
6226  *
6227  *	LOCKING:
6228  *	Kernel thread context (may sleep).
6229  */
ata_host_detach(struct ata_host * host)6230 void ata_host_detach(struct ata_host *host)
6231 {
6232 	int i;
6233 
6234 	for (i = 0; i < host->n_ports; i++)
6235 		ata_port_detach(host->ports[i]);
6236 
6237 	/* the host is dead now, dissociate ACPI */
6238 	ata_acpi_dissociate(host);
6239 }
6240 
6241 #ifdef CONFIG_PCI
6242 
6243 /**
6244  *	ata_pci_remove_one - PCI layer callback for device removal
6245  *	@pdev: PCI device that was removed
6246  *
6247  *	PCI layer indicates to libata via this hook that hot-unplug or
6248  *	module unload event has occurred.  Detach all ports.  Resource
6249  *	release is handled via devres.
6250  *
6251  *	LOCKING:
6252  *	Inherited from PCI layer (may sleep).
6253  */
ata_pci_remove_one(struct pci_dev * pdev)6254 void ata_pci_remove_one(struct pci_dev *pdev)
6255 {
6256 	struct device *dev = &pdev->dev;
6257 	struct ata_host *host = dev_get_drvdata(dev);
6258 
6259 	ata_host_detach(host);
6260 }
6261 
6262 /* move to PCI subsystem */
pci_test_config_bits(struct pci_dev * pdev,const struct pci_bits * bits)6263 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
6264 {
6265 	unsigned long tmp = 0;
6266 
6267 	switch (bits->width) {
6268 	case 1: {
6269 		u8 tmp8 = 0;
6270 		pci_read_config_byte(pdev, bits->reg, &tmp8);
6271 		tmp = tmp8;
6272 		break;
6273 	}
6274 	case 2: {
6275 		u16 tmp16 = 0;
6276 		pci_read_config_word(pdev, bits->reg, &tmp16);
6277 		tmp = tmp16;
6278 		break;
6279 	}
6280 	case 4: {
6281 		u32 tmp32 = 0;
6282 		pci_read_config_dword(pdev, bits->reg, &tmp32);
6283 		tmp = tmp32;
6284 		break;
6285 	}
6286 
6287 	default:
6288 		return -EINVAL;
6289 	}
6290 
6291 	tmp &= bits->mask;
6292 
6293 	return (tmp == bits->val) ? 1 : 0;
6294 }
6295 
6296 #ifdef CONFIG_PM
ata_pci_device_do_suspend(struct pci_dev * pdev,pm_message_t mesg)6297 void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
6298 {
6299 	pci_save_state(pdev);
6300 	pci_disable_device(pdev);
6301 
6302 	if (mesg.event & PM_EVENT_SLEEP)
6303 		pci_set_power_state(pdev, PCI_D3hot);
6304 }
6305 
ata_pci_device_do_resume(struct pci_dev * pdev)6306 int ata_pci_device_do_resume(struct pci_dev *pdev)
6307 {
6308 	int rc;
6309 
6310 	pci_set_power_state(pdev, PCI_D0);
6311 	pci_restore_state(pdev);
6312 
6313 	rc = pcim_enable_device(pdev);
6314 	if (rc) {
6315 		dev_err(&pdev->dev,
6316 			"failed to enable device after resume (%d)\n", rc);
6317 		return rc;
6318 	}
6319 
6320 	pci_set_master(pdev);
6321 	return 0;
6322 }
6323 
ata_pci_device_suspend(struct pci_dev * pdev,pm_message_t mesg)6324 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
6325 {
6326 	struct ata_host *host = dev_get_drvdata(&pdev->dev);
6327 	int rc = 0;
6328 
6329 	rc = ata_host_suspend(host, mesg);
6330 	if (rc)
6331 		return rc;
6332 
6333 	ata_pci_device_do_suspend(pdev, mesg);
6334 
6335 	return 0;
6336 }
6337 
ata_pci_device_resume(struct pci_dev * pdev)6338 int ata_pci_device_resume(struct pci_dev *pdev)
6339 {
6340 	struct ata_host *host = dev_get_drvdata(&pdev->dev);
6341 	int rc;
6342 
6343 	rc = ata_pci_device_do_resume(pdev);
6344 	if (rc == 0)
6345 		ata_host_resume(host);
6346 	return rc;
6347 }
6348 #endif /* CONFIG_PM */
6349 
6350 #endif /* CONFIG_PCI */
6351 
ata_parse_force_one(char ** cur,struct ata_force_ent * force_ent,const char ** reason)6352 static int __init ata_parse_force_one(char **cur,
6353 				      struct ata_force_ent *force_ent,
6354 				      const char **reason)
6355 {
6356 	/* FIXME: Currently, there's no way to tag init const data and
6357 	 * using __initdata causes build failure on some versions of
6358 	 * gcc.  Once __initdataconst is implemented, add const to the
6359 	 * following structure.
6360 	 */
6361 	static struct ata_force_param force_tbl[] __initdata = {
6362 		{ "40c",	.cbl		= ATA_CBL_PATA40 },
6363 		{ "80c",	.cbl		= ATA_CBL_PATA80 },
6364 		{ "short40c",	.cbl		= ATA_CBL_PATA40_SHORT },
6365 		{ "unk",	.cbl		= ATA_CBL_PATA_UNK },
6366 		{ "ign",	.cbl		= ATA_CBL_PATA_IGN },
6367 		{ "sata",	.cbl		= ATA_CBL_SATA },
6368 		{ "1.5Gbps",	.spd_limit	= 1 },
6369 		{ "3.0Gbps",	.spd_limit	= 2 },
6370 		{ "noncq",	.horkage_on	= ATA_HORKAGE_NONCQ },
6371 		{ "ncq",	.horkage_off	= ATA_HORKAGE_NONCQ },
6372 		{ "dump_id",	.horkage_on	= ATA_HORKAGE_DUMP_ID },
6373 		{ "pio0",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 0) },
6374 		{ "pio1",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 1) },
6375 		{ "pio2",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 2) },
6376 		{ "pio3",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 3) },
6377 		{ "pio4",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 4) },
6378 		{ "pio5",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 5) },
6379 		{ "pio6",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 6) },
6380 		{ "mwdma0",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 0) },
6381 		{ "mwdma1",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 1) },
6382 		{ "mwdma2",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 2) },
6383 		{ "mwdma3",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 3) },
6384 		{ "mwdma4",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 4) },
6385 		{ "udma0",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 0) },
6386 		{ "udma16",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 0) },
6387 		{ "udma/16",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 0) },
6388 		{ "udma1",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 1) },
6389 		{ "udma25",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 1) },
6390 		{ "udma/25",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 1) },
6391 		{ "udma2",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 2) },
6392 		{ "udma33",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 2) },
6393 		{ "udma/33",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 2) },
6394 		{ "udma3",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 3) },
6395 		{ "udma44",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 3) },
6396 		{ "udma/44",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 3) },
6397 		{ "udma4",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 4) },
6398 		{ "udma66",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 4) },
6399 		{ "udma/66",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 4) },
6400 		{ "udma5",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 5) },
6401 		{ "udma100",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 5) },
6402 		{ "udma/100",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 5) },
6403 		{ "udma6",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 6) },
6404 		{ "udma133",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 6) },
6405 		{ "udma/133",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 6) },
6406 		{ "udma7",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 7) },
6407 		{ "nohrst",	.lflags		= ATA_LFLAG_NO_HRST },
6408 		{ "nosrst",	.lflags		= ATA_LFLAG_NO_SRST },
6409 		{ "norst",	.lflags		= ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST },
6410 	};
6411 	char *start = *cur, *p = *cur;
6412 	char *id, *val, *endp;
6413 	const struct ata_force_param *match_fp = NULL;
6414 	int nr_matches = 0, i;
6415 
6416 	/* find where this param ends and update *cur */
6417 	while (*p != '\0' && *p != ',')
6418 		p++;
6419 
6420 	if (*p == '\0')
6421 		*cur = p;
6422 	else
6423 		*cur = p + 1;
6424 
6425 	*p = '\0';
6426 
6427 	/* parse */
6428 	p = strchr(start, ':');
6429 	if (!p) {
6430 		val = strstrip(start);
6431 		goto parse_val;
6432 	}
6433 	*p = '\0';
6434 
6435 	id = strstrip(start);
6436 	val = strstrip(p + 1);
6437 
6438 	/* parse id */
6439 	p = strchr(id, '.');
6440 	if (p) {
6441 		*p++ = '\0';
6442 		force_ent->device = simple_strtoul(p, &endp, 10);
6443 		if (p == endp || *endp != '\0') {
6444 			*reason = "invalid device";
6445 			return -EINVAL;
6446 		}
6447 	}
6448 
6449 	force_ent->port = simple_strtoul(id, &endp, 10);
6450 	if (p == endp || *endp != '\0') {
6451 		*reason = "invalid port/link";
6452 		return -EINVAL;
6453 	}
6454 
6455  parse_val:
6456 	/* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */
6457 	for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
6458 		const struct ata_force_param *fp = &force_tbl[i];
6459 
6460 		if (strncasecmp(val, fp->name, strlen(val)))
6461 			continue;
6462 
6463 		nr_matches++;
6464 		match_fp = fp;
6465 
6466 		if (strcasecmp(val, fp->name) == 0) {
6467 			nr_matches = 1;
6468 			break;
6469 		}
6470 	}
6471 
6472 	if (!nr_matches) {
6473 		*reason = "unknown value";
6474 		return -EINVAL;
6475 	}
6476 	if (nr_matches > 1) {
6477 		*reason = "ambigious value";
6478 		return -EINVAL;
6479 	}
6480 
6481 	force_ent->param = *match_fp;
6482 
6483 	return 0;
6484 }
6485 
ata_parse_force_param(void)6486 static void __init ata_parse_force_param(void)
6487 {
6488 	int idx = 0, size = 1;
6489 	int last_port = -1, last_device = -1;
6490 	char *p, *cur, *next;
6491 
6492 	/* calculate maximum number of params and allocate force_tbl */
6493 	for (p = ata_force_param_buf; *p; p++)
6494 		if (*p == ',')
6495 			size++;
6496 
6497 	ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL);
6498 	if (!ata_force_tbl) {
6499 		printk(KERN_WARNING "ata: failed to extend force table, "
6500 		       "libata.force ignored\n");
6501 		return;
6502 	}
6503 
6504 	/* parse and populate the table */
6505 	for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
6506 		const char *reason = "";
6507 		struct ata_force_ent te = { .port = -1, .device = -1 };
6508 
6509 		next = cur;
6510 		if (ata_parse_force_one(&next, &te, &reason)) {
6511 			printk(KERN_WARNING "ata: failed to parse force "
6512 			       "parameter \"%s\" (%s)\n",
6513 			       cur, reason);
6514 			continue;
6515 		}
6516 
6517 		if (te.port == -1) {
6518 			te.port = last_port;
6519 			te.device = last_device;
6520 		}
6521 
6522 		ata_force_tbl[idx++] = te;
6523 
6524 		last_port = te.port;
6525 		last_device = te.device;
6526 	}
6527 
6528 	ata_force_tbl_size = idx;
6529 }
6530 
ata_init(void)6531 static int __init ata_init(void)
6532 {
6533 	int rc;
6534 
6535 	ata_parse_force_param();
6536 
6537 	rc = ata_sff_init();
6538 	if (rc) {
6539 		kfree(ata_force_tbl);
6540 		return rc;
6541 	}
6542 
6543 	libata_transport_init();
6544 	ata_scsi_transport_template = ata_attach_transport();
6545 	if (!ata_scsi_transport_template) {
6546 		ata_sff_exit();
6547 		rc = -ENOMEM;
6548 		goto err_out;
6549 	}
6550 
6551 	printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6552 	return 0;
6553 
6554 err_out:
6555 	return rc;
6556 }
6557 
ata_exit(void)6558 static void __exit ata_exit(void)
6559 {
6560 	ata_release_transport(ata_scsi_transport_template);
6561 	libata_transport_exit();
6562 	ata_sff_exit();
6563 	kfree(ata_force_tbl);
6564 }
6565 
6566 subsys_initcall(ata_init);
6567 module_exit(ata_exit);
6568 
6569 static DEFINE_RATELIMIT_STATE(ratelimit, HZ / 5, 1);
6570 
ata_ratelimit(void)6571 int ata_ratelimit(void)
6572 {
6573 	return __ratelimit(&ratelimit);
6574 }
6575 
6576 /**
6577  *	ata_msleep - ATA EH owner aware msleep
6578  *	@ap: ATA port to attribute the sleep to
6579  *	@msecs: duration to sleep in milliseconds
6580  *
6581  *	Sleeps @msecs.  If the current task is owner of @ap's EH, the
6582  *	ownership is released before going to sleep and reacquired
6583  *	after the sleep is complete.  IOW, other ports sharing the
6584  *	@ap->host will be allowed to own the EH while this task is
6585  *	sleeping.
6586  *
6587  *	LOCKING:
6588  *	Might sleep.
6589  */
ata_msleep(struct ata_port * ap,unsigned int msecs)6590 void ata_msleep(struct ata_port *ap, unsigned int msecs)
6591 {
6592 	bool owns_eh = ap && ap->host->eh_owner == current;
6593 
6594 	if (owns_eh)
6595 		ata_eh_release(ap);
6596 
6597 	msleep(msecs);
6598 
6599 	if (owns_eh)
6600 		ata_eh_acquire(ap);
6601 }
6602 
6603 /**
6604  *	ata_wait_register - wait until register value changes
6605  *	@ap: ATA port to wait register for, can be NULL
6606  *	@reg: IO-mapped register
6607  *	@mask: Mask to apply to read register value
6608  *	@val: Wait condition
6609  *	@interval: polling interval in milliseconds
6610  *	@timeout: timeout in milliseconds
6611  *
6612  *	Waiting for some bits of register to change is a common
6613  *	operation for ATA controllers.  This function reads 32bit LE
6614  *	IO-mapped register @reg and tests for the following condition.
6615  *
6616  *	(*@reg & mask) != val
6617  *
6618  *	If the condition is met, it returns; otherwise, the process is
6619  *	repeated after @interval_msec until timeout.
6620  *
6621  *	LOCKING:
6622  *	Kernel thread context (may sleep)
6623  *
6624  *	RETURNS:
6625  *	The final register value.
6626  */
ata_wait_register(struct ata_port * ap,void __iomem * reg,u32 mask,u32 val,unsigned long interval,unsigned long timeout)6627 u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val,
6628 		      unsigned long interval, unsigned long timeout)
6629 {
6630 	unsigned long deadline;
6631 	u32 tmp;
6632 
6633 	tmp = ioread32(reg);
6634 
6635 	/* Calculate timeout _after_ the first read to make sure
6636 	 * preceding writes reach the controller before starting to
6637 	 * eat away the timeout.
6638 	 */
6639 	deadline = ata_deadline(jiffies, timeout);
6640 
6641 	while ((tmp & mask) == val && time_before(jiffies, deadline)) {
6642 		ata_msleep(ap, interval);
6643 		tmp = ioread32(reg);
6644 	}
6645 
6646 	return tmp;
6647 }
6648 
6649 /*
6650  * Dummy port_ops
6651  */
ata_dummy_qc_issue(struct ata_queued_cmd * qc)6652 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6653 {
6654 	return AC_ERR_SYSTEM;
6655 }
6656 
ata_dummy_error_handler(struct ata_port * ap)6657 static void ata_dummy_error_handler(struct ata_port *ap)
6658 {
6659 	/* truly dummy */
6660 }
6661 
6662 struct ata_port_operations ata_dummy_port_ops = {
6663 	.qc_prep		= ata_noop_qc_prep,
6664 	.qc_issue		= ata_dummy_qc_issue,
6665 	.error_handler		= ata_dummy_error_handler,
6666 };
6667 
6668 const struct ata_port_info ata_dummy_port_info = {
6669 	.port_ops		= &ata_dummy_port_ops,
6670 };
6671 
6672 /*
6673  * Utility print functions
6674  */
ata_port_printk(const struct ata_port * ap,const char * level,const char * fmt,...)6675 int ata_port_printk(const struct ata_port *ap, const char *level,
6676 		    const char *fmt, ...)
6677 {
6678 	struct va_format vaf;
6679 	va_list args;
6680 	int r;
6681 
6682 	va_start(args, fmt);
6683 
6684 	vaf.fmt = fmt;
6685 	vaf.va = &args;
6686 
6687 	r = printk("%sata%u: %pV", level, ap->print_id, &vaf);
6688 
6689 	va_end(args);
6690 
6691 	return r;
6692 }
6693 EXPORT_SYMBOL(ata_port_printk);
6694 
ata_link_printk(const struct ata_link * link,const char * level,const char * fmt,...)6695 int ata_link_printk(const struct ata_link *link, const char *level,
6696 		    const char *fmt, ...)
6697 {
6698 	struct va_format vaf;
6699 	va_list args;
6700 	int r;
6701 
6702 	va_start(args, fmt);
6703 
6704 	vaf.fmt = fmt;
6705 	vaf.va = &args;
6706 
6707 	if (sata_pmp_attached(link->ap) || link->ap->slave_link)
6708 		r = printk("%sata%u.%02u: %pV",
6709 			   level, link->ap->print_id, link->pmp, &vaf);
6710 	else
6711 		r = printk("%sata%u: %pV",
6712 			   level, link->ap->print_id, &vaf);
6713 
6714 	va_end(args);
6715 
6716 	return r;
6717 }
6718 EXPORT_SYMBOL(ata_link_printk);
6719 
ata_dev_printk(const struct ata_device * dev,const char * level,const char * fmt,...)6720 int ata_dev_printk(const struct ata_device *dev, const char *level,
6721 		    const char *fmt, ...)
6722 {
6723 	struct va_format vaf;
6724 	va_list args;
6725 	int r;
6726 
6727 	va_start(args, fmt);
6728 
6729 	vaf.fmt = fmt;
6730 	vaf.va = &args;
6731 
6732 	r = printk("%sata%u.%02u: %pV",
6733 		   level, dev->link->ap->print_id, dev->link->pmp + dev->devno,
6734 		   &vaf);
6735 
6736 	va_end(args);
6737 
6738 	return r;
6739 }
6740 EXPORT_SYMBOL(ata_dev_printk);
6741 
ata_print_version(const struct device * dev,const char * version)6742 void ata_print_version(const struct device *dev, const char *version)
6743 {
6744 	dev_printk(KERN_DEBUG, dev, "version %s\n", version);
6745 }
6746 EXPORT_SYMBOL(ata_print_version);
6747 
6748 /*
6749  * libata is essentially a library of internal helper functions for
6750  * low-level ATA host controller drivers.  As such, the API/ABI is
6751  * likely to change as new drivers are added and updated.
6752  * Do not depend on ABI/API stability.
6753  */
6754 EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6755 EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6756 EXPORT_SYMBOL_GPL(sata_deb_timing_long);
6757 EXPORT_SYMBOL_GPL(ata_base_port_ops);
6758 EXPORT_SYMBOL_GPL(sata_port_ops);
6759 EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
6760 EXPORT_SYMBOL_GPL(ata_dummy_port_info);
6761 EXPORT_SYMBOL_GPL(ata_link_next);
6762 EXPORT_SYMBOL_GPL(ata_dev_next);
6763 EXPORT_SYMBOL_GPL(ata_std_bios_param);
6764 EXPORT_SYMBOL_GPL(ata_scsi_unlock_native_capacity);
6765 EXPORT_SYMBOL_GPL(ata_host_init);
6766 EXPORT_SYMBOL_GPL(ata_host_alloc);
6767 EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
6768 EXPORT_SYMBOL_GPL(ata_slave_link_init);
6769 EXPORT_SYMBOL_GPL(ata_host_start);
6770 EXPORT_SYMBOL_GPL(ata_host_register);
6771 EXPORT_SYMBOL_GPL(ata_host_activate);
6772 EXPORT_SYMBOL_GPL(ata_host_detach);
6773 EXPORT_SYMBOL_GPL(ata_sg_init);
6774 EXPORT_SYMBOL_GPL(ata_qc_complete);
6775 EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
6776 EXPORT_SYMBOL_GPL(atapi_cmd_type);
6777 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6778 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6779 EXPORT_SYMBOL_GPL(ata_pack_xfermask);
6780 EXPORT_SYMBOL_GPL(ata_unpack_xfermask);
6781 EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
6782 EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
6783 EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
6784 EXPORT_SYMBOL_GPL(ata_mode_string);
6785 EXPORT_SYMBOL_GPL(ata_id_xfermask);
6786 EXPORT_SYMBOL_GPL(ata_do_set_mode);
6787 EXPORT_SYMBOL_GPL(ata_std_qc_defer);
6788 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
6789 EXPORT_SYMBOL_GPL(ata_dev_disable);
6790 EXPORT_SYMBOL_GPL(sata_set_spd);
6791 EXPORT_SYMBOL_GPL(ata_wait_after_reset);
6792 EXPORT_SYMBOL_GPL(sata_link_debounce);
6793 EXPORT_SYMBOL_GPL(sata_link_resume);
6794 EXPORT_SYMBOL_GPL(sata_link_scr_lpm);
6795 EXPORT_SYMBOL_GPL(ata_std_prereset);
6796 EXPORT_SYMBOL_GPL(sata_link_hardreset);
6797 EXPORT_SYMBOL_GPL(sata_std_hardreset);
6798 EXPORT_SYMBOL_GPL(ata_std_postreset);
6799 EXPORT_SYMBOL_GPL(ata_dev_classify);
6800 EXPORT_SYMBOL_GPL(ata_dev_pair);
6801 EXPORT_SYMBOL_GPL(ata_ratelimit);
6802 EXPORT_SYMBOL_GPL(ata_msleep);
6803 EXPORT_SYMBOL_GPL(ata_wait_register);
6804 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
6805 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
6806 EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
6807 EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
6808 EXPORT_SYMBOL_GPL(__ata_change_queue_depth);
6809 EXPORT_SYMBOL_GPL(sata_scr_valid);
6810 EXPORT_SYMBOL_GPL(sata_scr_read);
6811 EXPORT_SYMBOL_GPL(sata_scr_write);
6812 EXPORT_SYMBOL_GPL(sata_scr_write_flush);
6813 EXPORT_SYMBOL_GPL(ata_link_online);
6814 EXPORT_SYMBOL_GPL(ata_link_offline);
6815 #ifdef CONFIG_PM
6816 EXPORT_SYMBOL_GPL(ata_host_suspend);
6817 EXPORT_SYMBOL_GPL(ata_host_resume);
6818 #endif /* CONFIG_PM */
6819 EXPORT_SYMBOL_GPL(ata_id_string);
6820 EXPORT_SYMBOL_GPL(ata_id_c_string);
6821 EXPORT_SYMBOL_GPL(ata_do_dev_read_id);
6822 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6823 
6824 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
6825 EXPORT_SYMBOL_GPL(ata_timing_find_mode);
6826 EXPORT_SYMBOL_GPL(ata_timing_compute);
6827 EXPORT_SYMBOL_GPL(ata_timing_merge);
6828 EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
6829 
6830 #ifdef CONFIG_PCI
6831 EXPORT_SYMBOL_GPL(pci_test_config_bits);
6832 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6833 #ifdef CONFIG_PM
6834 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6835 EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
6836 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6837 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6838 #endif /* CONFIG_PM */
6839 #endif /* CONFIG_PCI */
6840 
6841 EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
6842 EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
6843 EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
6844 EXPORT_SYMBOL_GPL(ata_port_desc);
6845 #ifdef CONFIG_PCI
6846 EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
6847 #endif /* CONFIG_PCI */
6848 EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
6849 EXPORT_SYMBOL_GPL(ata_link_abort);
6850 EXPORT_SYMBOL_GPL(ata_port_abort);
6851 EXPORT_SYMBOL_GPL(ata_port_freeze);
6852 EXPORT_SYMBOL_GPL(sata_async_notification);
6853 EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6854 EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
6855 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6856 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
6857 EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error);
6858 EXPORT_SYMBOL_GPL(ata_do_eh);
6859 EXPORT_SYMBOL_GPL(ata_std_error_handler);
6860 
6861 EXPORT_SYMBOL_GPL(ata_cable_40wire);
6862 EXPORT_SYMBOL_GPL(ata_cable_80wire);
6863 EXPORT_SYMBOL_GPL(ata_cable_unknown);
6864 EXPORT_SYMBOL_GPL(ata_cable_ignore);
6865 EXPORT_SYMBOL_GPL(ata_cable_sata);
6866