1 /*
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
33 * Standards documents from:
34 * http://www.t13.org (ATA standards, PCI DMA IDE spec)
35 * http://www.t10.org (SCSI MMC - for ATAPI MMC)
36 * http://www.sata-io.org (SATA)
37 * http://www.compactflash.org (CF)
38 * http://www.qic.org (QIC157 - Tape and DSC)
39 * http://www.ce-ata.org (CE-ATA: not supported)
40 *
41 */
42
43 #include <linux/kernel.h>
44 #include <linux/module.h>
45 #include <linux/pci.h>
46 #include <linux/init.h>
47 #include <linux/list.h>
48 #include <linux/mm.h>
49 #include <linux/spinlock.h>
50 #include <linux/blkdev.h>
51 #include <linux/delay.h>
52 #include <linux/timer.h>
53 #include <linux/interrupt.h>
54 #include <linux/completion.h>
55 #include <linux/suspend.h>
56 #include <linux/workqueue.h>
57 #include <linux/scatterlist.h>
58 #include <linux/io.h>
59 #include <linux/async.h>
60 #include <linux/log2.h>
61 #include <linux/slab.h>
62 #include <scsi/scsi.h>
63 #include <scsi/scsi_cmnd.h>
64 #include <scsi/scsi_host.h>
65 #include <linux/libata.h>
66 #include <asm/byteorder.h>
67 #include <linux/cdrom.h>
68 #include <linux/ratelimit.h>
69
70 #include "libata.h"
71 #include "libata-transport.h"
72
73 /* debounce timing parameters in msecs { interval, duration, timeout } */
74 const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
75 const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
76 const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
77
78 const struct ata_port_operations ata_base_port_ops = {
79 .prereset = ata_std_prereset,
80 .postreset = ata_std_postreset,
81 .error_handler = ata_std_error_handler,
82 };
83
84 const struct ata_port_operations sata_port_ops = {
85 .inherits = &ata_base_port_ops,
86
87 .qc_defer = ata_std_qc_defer,
88 .hardreset = sata_std_hardreset,
89 };
90
91 static unsigned int ata_dev_init_params(struct ata_device *dev,
92 u16 heads, u16 sectors);
93 static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
94 static void ata_dev_xfermask(struct ata_device *dev);
95 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
96
97 unsigned int ata_print_id = 1;
98
99 struct ata_force_param {
100 const char *name;
101 unsigned int cbl;
102 int spd_limit;
103 unsigned long xfer_mask;
104 unsigned int horkage_on;
105 unsigned int horkage_off;
106 unsigned int lflags;
107 };
108
109 struct ata_force_ent {
110 int port;
111 int device;
112 struct ata_force_param param;
113 };
114
115 static struct ata_force_ent *ata_force_tbl;
116 static int ata_force_tbl_size;
117
118 static char ata_force_param_buf[PAGE_SIZE] __initdata;
119 /* param_buf is thrown away after initialization, disallow read */
120 module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
121 MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)");
122
123 static int atapi_enabled = 1;
124 module_param(atapi_enabled, int, 0444);
125 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on [default])");
126
127 static int atapi_dmadir = 0;
128 module_param(atapi_dmadir, int, 0444);
129 MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off [default], 1=on)");
130
131 int atapi_passthru16 = 1;
132 module_param(atapi_passthru16, int, 0444);
133 MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices (0=off, 1=on [default])");
134
135 int libata_fua = 0;
136 module_param_named(fua, libata_fua, int, 0444);
137 MODULE_PARM_DESC(fua, "FUA support (0=off [default], 1=on)");
138
139 static int ata_ignore_hpa;
140 module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
141 MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
142
143 static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
144 module_param_named(dma, libata_dma_mask, int, 0444);
145 MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
146
147 static int ata_probe_timeout;
148 module_param(ata_probe_timeout, int, 0444);
149 MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
150
151 int libata_noacpi = 0;
152 module_param_named(noacpi, libata_noacpi, int, 0444);
153 MODULE_PARM_DESC(noacpi, "Disable the use of ACPI in probe/suspend/resume (0=off [default], 1=on)");
154
155 int libata_allow_tpm = 0;
156 module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
157 MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)");
158
159 static int atapi_an;
160 module_param(atapi_an, int, 0444);
161 MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)");
162
163 MODULE_AUTHOR("Jeff Garzik");
164 MODULE_DESCRIPTION("Library module for ATA devices");
165 MODULE_LICENSE("GPL");
166 MODULE_VERSION(DRV_VERSION);
167
168
ata_sstatus_online(u32 sstatus)169 static bool ata_sstatus_online(u32 sstatus)
170 {
171 return (sstatus & 0xf) == 0x3;
172 }
173
174 /**
175 * ata_link_next - link iteration helper
176 * @link: the previous link, NULL to start
177 * @ap: ATA port containing links to iterate
178 * @mode: iteration mode, one of ATA_LITER_*
179 *
180 * LOCKING:
181 * Host lock or EH context.
182 *
183 * RETURNS:
184 * Pointer to the next link.
185 */
ata_link_next(struct ata_link * link,struct ata_port * ap,enum ata_link_iter_mode mode)186 struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap,
187 enum ata_link_iter_mode mode)
188 {
189 BUG_ON(mode != ATA_LITER_EDGE &&
190 mode != ATA_LITER_PMP_FIRST && mode != ATA_LITER_HOST_FIRST);
191
192 /* NULL link indicates start of iteration */
193 if (!link)
194 switch (mode) {
195 case ATA_LITER_EDGE:
196 case ATA_LITER_PMP_FIRST:
197 if (sata_pmp_attached(ap))
198 return ap->pmp_link;
199 /* fall through */
200 case ATA_LITER_HOST_FIRST:
201 return &ap->link;
202 }
203
204 /* we just iterated over the host link, what's next? */
205 if (link == &ap->link)
206 switch (mode) {
207 case ATA_LITER_HOST_FIRST:
208 if (sata_pmp_attached(ap))
209 return ap->pmp_link;
210 /* fall through */
211 case ATA_LITER_PMP_FIRST:
212 if (unlikely(ap->slave_link))
213 return ap->slave_link;
214 /* fall through */
215 case ATA_LITER_EDGE:
216 return NULL;
217 }
218
219 /* slave_link excludes PMP */
220 if (unlikely(link == ap->slave_link))
221 return NULL;
222
223 /* we were over a PMP link */
224 if (++link < ap->pmp_link + ap->nr_pmp_links)
225 return link;
226
227 if (mode == ATA_LITER_PMP_FIRST)
228 return &ap->link;
229
230 return NULL;
231 }
232
233 /**
234 * ata_dev_next - device iteration helper
235 * @dev: the previous device, NULL to start
236 * @link: ATA link containing devices to iterate
237 * @mode: iteration mode, one of ATA_DITER_*
238 *
239 * LOCKING:
240 * Host lock or EH context.
241 *
242 * RETURNS:
243 * Pointer to the next device.
244 */
ata_dev_next(struct ata_device * dev,struct ata_link * link,enum ata_dev_iter_mode mode)245 struct ata_device *ata_dev_next(struct ata_device *dev, struct ata_link *link,
246 enum ata_dev_iter_mode mode)
247 {
248 BUG_ON(mode != ATA_DITER_ENABLED && mode != ATA_DITER_ENABLED_REVERSE &&
249 mode != ATA_DITER_ALL && mode != ATA_DITER_ALL_REVERSE);
250
251 /* NULL dev indicates start of iteration */
252 if (!dev)
253 switch (mode) {
254 case ATA_DITER_ENABLED:
255 case ATA_DITER_ALL:
256 dev = link->device;
257 goto check;
258 case ATA_DITER_ENABLED_REVERSE:
259 case ATA_DITER_ALL_REVERSE:
260 dev = link->device + ata_link_max_devices(link) - 1;
261 goto check;
262 }
263
264 next:
265 /* move to the next one */
266 switch (mode) {
267 case ATA_DITER_ENABLED:
268 case ATA_DITER_ALL:
269 if (++dev < link->device + ata_link_max_devices(link))
270 goto check;
271 return NULL;
272 case ATA_DITER_ENABLED_REVERSE:
273 case ATA_DITER_ALL_REVERSE:
274 if (--dev >= link->device)
275 goto check;
276 return NULL;
277 }
278
279 check:
280 if ((mode == ATA_DITER_ENABLED || mode == ATA_DITER_ENABLED_REVERSE) &&
281 !ata_dev_enabled(dev))
282 goto next;
283 return dev;
284 }
285
286 /**
287 * ata_dev_phys_link - find physical link for a device
288 * @dev: ATA device to look up physical link for
289 *
290 * Look up physical link which @dev is attached to. Note that
291 * this is different from @dev->link only when @dev is on slave
292 * link. For all other cases, it's the same as @dev->link.
293 *
294 * LOCKING:
295 * Don't care.
296 *
297 * RETURNS:
298 * Pointer to the found physical link.
299 */
ata_dev_phys_link(struct ata_device * dev)300 struct ata_link *ata_dev_phys_link(struct ata_device *dev)
301 {
302 struct ata_port *ap = dev->link->ap;
303
304 if (!ap->slave_link)
305 return dev->link;
306 if (!dev->devno)
307 return &ap->link;
308 return ap->slave_link;
309 }
310
311 /**
312 * ata_force_cbl - force cable type according to libata.force
313 * @ap: ATA port of interest
314 *
315 * Force cable type according to libata.force and whine about it.
316 * The last entry which has matching port number is used, so it
317 * can be specified as part of device force parameters. For
318 * example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the
319 * same effect.
320 *
321 * LOCKING:
322 * EH context.
323 */
ata_force_cbl(struct ata_port * ap)324 void ata_force_cbl(struct ata_port *ap)
325 {
326 int i;
327
328 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
329 const struct ata_force_ent *fe = &ata_force_tbl[i];
330
331 if (fe->port != -1 && fe->port != ap->print_id)
332 continue;
333
334 if (fe->param.cbl == ATA_CBL_NONE)
335 continue;
336
337 ap->cbl = fe->param.cbl;
338 ata_port_printk(ap, KERN_NOTICE,
339 "FORCE: cable set to %s\n", fe->param.name);
340 return;
341 }
342 }
343
344 /**
345 * ata_force_link_limits - force link limits according to libata.force
346 * @link: ATA link of interest
347 *
348 * Force link flags and SATA spd limit according to libata.force
349 * and whine about it. When only the port part is specified
350 * (e.g. 1:), the limit applies to all links connected to both
351 * the host link and all fan-out ports connected via PMP. If the
352 * device part is specified as 0 (e.g. 1.00:), it specifies the
353 * first fan-out link not the host link. Device number 15 always
354 * points to the host link whether PMP is attached or not. If the
355 * controller has slave link, device number 16 points to it.
356 *
357 * LOCKING:
358 * EH context.
359 */
ata_force_link_limits(struct ata_link * link)360 static void ata_force_link_limits(struct ata_link *link)
361 {
362 bool did_spd = false;
363 int linkno = link->pmp;
364 int i;
365
366 if (ata_is_host_link(link))
367 linkno += 15;
368
369 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
370 const struct ata_force_ent *fe = &ata_force_tbl[i];
371
372 if (fe->port != -1 && fe->port != link->ap->print_id)
373 continue;
374
375 if (fe->device != -1 && fe->device != linkno)
376 continue;
377
378 /* only honor the first spd limit */
379 if (!did_spd && fe->param.spd_limit) {
380 link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
381 ata_link_printk(link, KERN_NOTICE,
382 "FORCE: PHY spd limit set to %s\n",
383 fe->param.name);
384 did_spd = true;
385 }
386
387 /* let lflags stack */
388 if (fe->param.lflags) {
389 link->flags |= fe->param.lflags;
390 ata_link_printk(link, KERN_NOTICE,
391 "FORCE: link flag 0x%x forced -> 0x%x\n",
392 fe->param.lflags, link->flags);
393 }
394 }
395 }
396
397 /**
398 * ata_force_xfermask - force xfermask according to libata.force
399 * @dev: ATA device of interest
400 *
401 * Force xfer_mask according to libata.force and whine about it.
402 * For consistency with link selection, device number 15 selects
403 * the first device connected to the host link.
404 *
405 * LOCKING:
406 * EH context.
407 */
ata_force_xfermask(struct ata_device * dev)408 static void ata_force_xfermask(struct ata_device *dev)
409 {
410 int devno = dev->link->pmp + dev->devno;
411 int alt_devno = devno;
412 int i;
413
414 /* allow n.15/16 for devices attached to host port */
415 if (ata_is_host_link(dev->link))
416 alt_devno += 15;
417
418 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
419 const struct ata_force_ent *fe = &ata_force_tbl[i];
420 unsigned long pio_mask, mwdma_mask, udma_mask;
421
422 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
423 continue;
424
425 if (fe->device != -1 && fe->device != devno &&
426 fe->device != alt_devno)
427 continue;
428
429 if (!fe->param.xfer_mask)
430 continue;
431
432 ata_unpack_xfermask(fe->param.xfer_mask,
433 &pio_mask, &mwdma_mask, &udma_mask);
434 if (udma_mask)
435 dev->udma_mask = udma_mask;
436 else if (mwdma_mask) {
437 dev->udma_mask = 0;
438 dev->mwdma_mask = mwdma_mask;
439 } else {
440 dev->udma_mask = 0;
441 dev->mwdma_mask = 0;
442 dev->pio_mask = pio_mask;
443 }
444
445 ata_dev_printk(dev, KERN_NOTICE,
446 "FORCE: xfer_mask set to %s\n", fe->param.name);
447 return;
448 }
449 }
450
451 /**
452 * ata_force_horkage - force horkage according to libata.force
453 * @dev: ATA device of interest
454 *
455 * Force horkage according to libata.force and whine about it.
456 * For consistency with link selection, device number 15 selects
457 * the first device connected to the host link.
458 *
459 * LOCKING:
460 * EH context.
461 */
ata_force_horkage(struct ata_device * dev)462 static void ata_force_horkage(struct ata_device *dev)
463 {
464 int devno = dev->link->pmp + dev->devno;
465 int alt_devno = devno;
466 int i;
467
468 /* allow n.15/16 for devices attached to host port */
469 if (ata_is_host_link(dev->link))
470 alt_devno += 15;
471
472 for (i = 0; i < ata_force_tbl_size; i++) {
473 const struct ata_force_ent *fe = &ata_force_tbl[i];
474
475 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
476 continue;
477
478 if (fe->device != -1 && fe->device != devno &&
479 fe->device != alt_devno)
480 continue;
481
482 if (!(~dev->horkage & fe->param.horkage_on) &&
483 !(dev->horkage & fe->param.horkage_off))
484 continue;
485
486 dev->horkage |= fe->param.horkage_on;
487 dev->horkage &= ~fe->param.horkage_off;
488
489 ata_dev_printk(dev, KERN_NOTICE,
490 "FORCE: horkage modified (%s)\n", fe->param.name);
491 }
492 }
493
494 /**
495 * atapi_cmd_type - Determine ATAPI command type from SCSI opcode
496 * @opcode: SCSI opcode
497 *
498 * Determine ATAPI command type from @opcode.
499 *
500 * LOCKING:
501 * None.
502 *
503 * RETURNS:
504 * ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC}
505 */
atapi_cmd_type(u8 opcode)506 int atapi_cmd_type(u8 opcode)
507 {
508 switch (opcode) {
509 case GPCMD_READ_10:
510 case GPCMD_READ_12:
511 return ATAPI_READ;
512
513 case GPCMD_WRITE_10:
514 case GPCMD_WRITE_12:
515 case GPCMD_WRITE_AND_VERIFY_10:
516 return ATAPI_WRITE;
517
518 case GPCMD_READ_CD:
519 case GPCMD_READ_CD_MSF:
520 return ATAPI_READ_CD;
521
522 case ATA_16:
523 case ATA_12:
524 if (atapi_passthru16)
525 return ATAPI_PASS_THRU;
526 /* fall thru */
527 default:
528 return ATAPI_MISC;
529 }
530 }
531
532 /**
533 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
534 * @tf: Taskfile to convert
535 * @pmp: Port multiplier port
536 * @is_cmd: This FIS is for command
537 * @fis: Buffer into which data will output
538 *
539 * Converts a standard ATA taskfile to a Serial ATA
540 * FIS structure (Register - Host to Device).
541 *
542 * LOCKING:
543 * Inherited from caller.
544 */
ata_tf_to_fis(const struct ata_taskfile * tf,u8 pmp,int is_cmd,u8 * fis)545 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
546 {
547 fis[0] = 0x27; /* Register - Host to Device FIS */
548 fis[1] = pmp & 0xf; /* Port multiplier number*/
549 if (is_cmd)
550 fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */
551
552 fis[2] = tf->command;
553 fis[3] = tf->feature;
554
555 fis[4] = tf->lbal;
556 fis[5] = tf->lbam;
557 fis[6] = tf->lbah;
558 fis[7] = tf->device;
559
560 fis[8] = tf->hob_lbal;
561 fis[9] = tf->hob_lbam;
562 fis[10] = tf->hob_lbah;
563 fis[11] = tf->hob_feature;
564
565 fis[12] = tf->nsect;
566 fis[13] = tf->hob_nsect;
567 fis[14] = 0;
568 fis[15] = tf->ctl;
569
570 fis[16] = 0;
571 fis[17] = 0;
572 fis[18] = 0;
573 fis[19] = 0;
574 }
575
576 /**
577 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
578 * @fis: Buffer from which data will be input
579 * @tf: Taskfile to output
580 *
581 * Converts a serial ATA FIS structure to a standard ATA taskfile.
582 *
583 * LOCKING:
584 * Inherited from caller.
585 */
586
ata_tf_from_fis(const u8 * fis,struct ata_taskfile * tf)587 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
588 {
589 tf->command = fis[2]; /* status */
590 tf->feature = fis[3]; /* error */
591
592 tf->lbal = fis[4];
593 tf->lbam = fis[5];
594 tf->lbah = fis[6];
595 tf->device = fis[7];
596
597 tf->hob_lbal = fis[8];
598 tf->hob_lbam = fis[9];
599 tf->hob_lbah = fis[10];
600
601 tf->nsect = fis[12];
602 tf->hob_nsect = fis[13];
603 }
604
605 static const u8 ata_rw_cmds[] = {
606 /* pio multi */
607 ATA_CMD_READ_MULTI,
608 ATA_CMD_WRITE_MULTI,
609 ATA_CMD_READ_MULTI_EXT,
610 ATA_CMD_WRITE_MULTI_EXT,
611 0,
612 0,
613 0,
614 ATA_CMD_WRITE_MULTI_FUA_EXT,
615 /* pio */
616 ATA_CMD_PIO_READ,
617 ATA_CMD_PIO_WRITE,
618 ATA_CMD_PIO_READ_EXT,
619 ATA_CMD_PIO_WRITE_EXT,
620 0,
621 0,
622 0,
623 0,
624 /* dma */
625 ATA_CMD_READ,
626 ATA_CMD_WRITE,
627 ATA_CMD_READ_EXT,
628 ATA_CMD_WRITE_EXT,
629 0,
630 0,
631 0,
632 ATA_CMD_WRITE_FUA_EXT
633 };
634
635 /**
636 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
637 * @tf: command to examine and configure
638 * @dev: device tf belongs to
639 *
640 * Examine the device configuration and tf->flags to calculate
641 * the proper read/write commands and protocol to use.
642 *
643 * LOCKING:
644 * caller.
645 */
ata_rwcmd_protocol(struct ata_taskfile * tf,struct ata_device * dev)646 static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
647 {
648 u8 cmd;
649
650 int index, fua, lba48, write;
651
652 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
653 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
654 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
655
656 if (dev->flags & ATA_DFLAG_PIO) {
657 tf->protocol = ATA_PROT_PIO;
658 index = dev->multi_count ? 0 : 8;
659 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
660 /* Unable to use DMA due to host limitation */
661 tf->protocol = ATA_PROT_PIO;
662 index = dev->multi_count ? 0 : 8;
663 } else {
664 tf->protocol = ATA_PROT_DMA;
665 index = 16;
666 }
667
668 cmd = ata_rw_cmds[index + fua + lba48 + write];
669 if (cmd) {
670 tf->command = cmd;
671 return 0;
672 }
673 return -1;
674 }
675
676 /**
677 * ata_tf_read_block - Read block address from ATA taskfile
678 * @tf: ATA taskfile of interest
679 * @dev: ATA device @tf belongs to
680 *
681 * LOCKING:
682 * None.
683 *
684 * Read block address from @tf. This function can handle all
685 * three address formats - LBA, LBA48 and CHS. tf->protocol and
686 * flags select the address format to use.
687 *
688 * RETURNS:
689 * Block address read from @tf.
690 */
ata_tf_read_block(struct ata_taskfile * tf,struct ata_device * dev)691 u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
692 {
693 u64 block = 0;
694
695 if (tf->flags & ATA_TFLAG_LBA) {
696 if (tf->flags & ATA_TFLAG_LBA48) {
697 block |= (u64)tf->hob_lbah << 40;
698 block |= (u64)tf->hob_lbam << 32;
699 block |= (u64)tf->hob_lbal << 24;
700 } else
701 block |= (tf->device & 0xf) << 24;
702
703 block |= tf->lbah << 16;
704 block |= tf->lbam << 8;
705 block |= tf->lbal;
706 } else {
707 u32 cyl, head, sect;
708
709 cyl = tf->lbam | (tf->lbah << 8);
710 head = tf->device & 0xf;
711 sect = tf->lbal;
712
713 if (!sect) {
714 ata_dev_printk(dev, KERN_WARNING, "device reported "
715 "invalid CHS sector 0\n");
716 sect = 1; /* oh well */
717 }
718
719 block = (cyl * dev->heads + head) * dev->sectors + sect - 1;
720 }
721
722 return block;
723 }
724
725 /**
726 * ata_build_rw_tf - Build ATA taskfile for given read/write request
727 * @tf: Target ATA taskfile
728 * @dev: ATA device @tf belongs to
729 * @block: Block address
730 * @n_block: Number of blocks
731 * @tf_flags: RW/FUA etc...
732 * @tag: tag
733 *
734 * LOCKING:
735 * None.
736 *
737 * Build ATA taskfile @tf for read/write request described by
738 * @block, @n_block, @tf_flags and @tag on @dev.
739 *
740 * RETURNS:
741 *
742 * 0 on success, -ERANGE if the request is too large for @dev,
743 * -EINVAL if the request is invalid.
744 */
ata_build_rw_tf(struct ata_taskfile * tf,struct ata_device * dev,u64 block,u32 n_block,unsigned int tf_flags,unsigned int tag)745 int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
746 u64 block, u32 n_block, unsigned int tf_flags,
747 unsigned int tag)
748 {
749 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
750 tf->flags |= tf_flags;
751
752 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
753 /* yay, NCQ */
754 if (!lba_48_ok(block, n_block))
755 return -ERANGE;
756
757 tf->protocol = ATA_PROT_NCQ;
758 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
759
760 if (tf->flags & ATA_TFLAG_WRITE)
761 tf->command = ATA_CMD_FPDMA_WRITE;
762 else
763 tf->command = ATA_CMD_FPDMA_READ;
764
765 tf->nsect = tag << 3;
766 tf->hob_feature = (n_block >> 8) & 0xff;
767 tf->feature = n_block & 0xff;
768
769 tf->hob_lbah = (block >> 40) & 0xff;
770 tf->hob_lbam = (block >> 32) & 0xff;
771 tf->hob_lbal = (block >> 24) & 0xff;
772 tf->lbah = (block >> 16) & 0xff;
773 tf->lbam = (block >> 8) & 0xff;
774 tf->lbal = block & 0xff;
775
776 tf->device = 1 << 6;
777 if (tf->flags & ATA_TFLAG_FUA)
778 tf->device |= 1 << 7;
779 } else if (dev->flags & ATA_DFLAG_LBA) {
780 tf->flags |= ATA_TFLAG_LBA;
781
782 if (lba_28_ok(block, n_block)) {
783 /* use LBA28 */
784 tf->device |= (block >> 24) & 0xf;
785 } else if (lba_48_ok(block, n_block)) {
786 if (!(dev->flags & ATA_DFLAG_LBA48))
787 return -ERANGE;
788
789 /* use LBA48 */
790 tf->flags |= ATA_TFLAG_LBA48;
791
792 tf->hob_nsect = (n_block >> 8) & 0xff;
793
794 tf->hob_lbah = (block >> 40) & 0xff;
795 tf->hob_lbam = (block >> 32) & 0xff;
796 tf->hob_lbal = (block >> 24) & 0xff;
797 } else
798 /* request too large even for LBA48 */
799 return -ERANGE;
800
801 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
802 return -EINVAL;
803
804 tf->nsect = n_block & 0xff;
805
806 tf->lbah = (block >> 16) & 0xff;
807 tf->lbam = (block >> 8) & 0xff;
808 tf->lbal = block & 0xff;
809
810 tf->device |= ATA_LBA;
811 } else {
812 /* CHS */
813 u32 sect, head, cyl, track;
814
815 /* The request -may- be too large for CHS addressing. */
816 if (!lba_28_ok(block, n_block))
817 return -ERANGE;
818
819 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
820 return -EINVAL;
821
822 /* Convert LBA to CHS */
823 track = (u32)block / dev->sectors;
824 cyl = track / dev->heads;
825 head = track % dev->heads;
826 sect = (u32)block % dev->sectors + 1;
827
828 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
829 (u32)block, track, cyl, head, sect);
830
831 /* Check whether the converted CHS can fit.
832 Cylinder: 0-65535
833 Head: 0-15
834 Sector: 1-255*/
835 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
836 return -ERANGE;
837
838 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
839 tf->lbal = sect;
840 tf->lbam = cyl;
841 tf->lbah = cyl >> 8;
842 tf->device |= head;
843 }
844
845 return 0;
846 }
847
848 /**
849 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
850 * @pio_mask: pio_mask
851 * @mwdma_mask: mwdma_mask
852 * @udma_mask: udma_mask
853 *
854 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
855 * unsigned int xfer_mask.
856 *
857 * LOCKING:
858 * None.
859 *
860 * RETURNS:
861 * Packed xfer_mask.
862 */
ata_pack_xfermask(unsigned long pio_mask,unsigned long mwdma_mask,unsigned long udma_mask)863 unsigned long ata_pack_xfermask(unsigned long pio_mask,
864 unsigned long mwdma_mask,
865 unsigned long udma_mask)
866 {
867 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
868 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
869 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
870 }
871
872 /**
873 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
874 * @xfer_mask: xfer_mask to unpack
875 * @pio_mask: resulting pio_mask
876 * @mwdma_mask: resulting mwdma_mask
877 * @udma_mask: resulting udma_mask
878 *
879 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
880 * Any NULL distination masks will be ignored.
881 */
ata_unpack_xfermask(unsigned long xfer_mask,unsigned long * pio_mask,unsigned long * mwdma_mask,unsigned long * udma_mask)882 void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
883 unsigned long *mwdma_mask, unsigned long *udma_mask)
884 {
885 if (pio_mask)
886 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
887 if (mwdma_mask)
888 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
889 if (udma_mask)
890 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
891 }
892
893 static const struct ata_xfer_ent {
894 int shift, bits;
895 u8 base;
896 } ata_xfer_tbl[] = {
897 { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
898 { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
899 { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
900 { -1, },
901 };
902
903 /**
904 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
905 * @xfer_mask: xfer_mask of interest
906 *
907 * Return matching XFER_* value for @xfer_mask. Only the highest
908 * bit of @xfer_mask is considered.
909 *
910 * LOCKING:
911 * None.
912 *
913 * RETURNS:
914 * Matching XFER_* value, 0xff if no match found.
915 */
ata_xfer_mask2mode(unsigned long xfer_mask)916 u8 ata_xfer_mask2mode(unsigned long xfer_mask)
917 {
918 int highbit = fls(xfer_mask) - 1;
919 const struct ata_xfer_ent *ent;
920
921 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
922 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
923 return ent->base + highbit - ent->shift;
924 return 0xff;
925 }
926
927 /**
928 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
929 * @xfer_mode: XFER_* of interest
930 *
931 * Return matching xfer_mask for @xfer_mode.
932 *
933 * LOCKING:
934 * None.
935 *
936 * RETURNS:
937 * Matching xfer_mask, 0 if no match found.
938 */
ata_xfer_mode2mask(u8 xfer_mode)939 unsigned long ata_xfer_mode2mask(u8 xfer_mode)
940 {
941 const struct ata_xfer_ent *ent;
942
943 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
944 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
945 return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
946 & ~((1 << ent->shift) - 1);
947 return 0;
948 }
949
950 /**
951 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
952 * @xfer_mode: XFER_* of interest
953 *
954 * Return matching xfer_shift for @xfer_mode.
955 *
956 * LOCKING:
957 * None.
958 *
959 * RETURNS:
960 * Matching xfer_shift, -1 if no match found.
961 */
ata_xfer_mode2shift(unsigned long xfer_mode)962 int ata_xfer_mode2shift(unsigned long xfer_mode)
963 {
964 const struct ata_xfer_ent *ent;
965
966 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
967 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
968 return ent->shift;
969 return -1;
970 }
971
972 /**
973 * ata_mode_string - convert xfer_mask to string
974 * @xfer_mask: mask of bits supported; only highest bit counts.
975 *
976 * Determine string which represents the highest speed
977 * (highest bit in @modemask).
978 *
979 * LOCKING:
980 * None.
981 *
982 * RETURNS:
983 * Constant C string representing highest speed listed in
984 * @mode_mask, or the constant C string "<n/a>".
985 */
ata_mode_string(unsigned long xfer_mask)986 const char *ata_mode_string(unsigned long xfer_mask)
987 {
988 static const char * const xfer_mode_str[] = {
989 "PIO0",
990 "PIO1",
991 "PIO2",
992 "PIO3",
993 "PIO4",
994 "PIO5",
995 "PIO6",
996 "MWDMA0",
997 "MWDMA1",
998 "MWDMA2",
999 "MWDMA3",
1000 "MWDMA4",
1001 "UDMA/16",
1002 "UDMA/25",
1003 "UDMA/33",
1004 "UDMA/44",
1005 "UDMA/66",
1006 "UDMA/100",
1007 "UDMA/133",
1008 "UDMA7",
1009 };
1010 int highbit;
1011
1012 highbit = fls(xfer_mask) - 1;
1013 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
1014 return xfer_mode_str[highbit];
1015 return "<n/a>";
1016 }
1017
sata_spd_string(unsigned int spd)1018 const char *sata_spd_string(unsigned int spd)
1019 {
1020 static const char * const spd_str[] = {
1021 "1.5 Gbps",
1022 "3.0 Gbps",
1023 "6.0 Gbps",
1024 };
1025
1026 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
1027 return "<unknown>";
1028 return spd_str[spd - 1];
1029 }
1030
1031 /**
1032 * ata_dev_classify - determine device type based on ATA-spec signature
1033 * @tf: ATA taskfile register set for device to be identified
1034 *
1035 * Determine from taskfile register contents whether a device is
1036 * ATA or ATAPI, as per "Signature and persistence" section
1037 * of ATA/PI spec (volume 1, sect 5.14).
1038 *
1039 * LOCKING:
1040 * None.
1041 *
1042 * RETURNS:
1043 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or
1044 * %ATA_DEV_UNKNOWN the event of failure.
1045 */
ata_dev_classify(const struct ata_taskfile * tf)1046 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1047 {
1048 /* Apple's open source Darwin code hints that some devices only
1049 * put a proper signature into the LBA mid/high registers,
1050 * So, we only check those. It's sufficient for uniqueness.
1051 *
1052 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
1053 * signatures for ATA and ATAPI devices attached on SerialATA,
1054 * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA
1055 * spec has never mentioned about using different signatures
1056 * for ATA/ATAPI devices. Then, Serial ATA II: Port
1057 * Multiplier specification began to use 0x69/0x96 to identify
1058 * port multpliers and 0x3c/0xc3 to identify SEMB device.
1059 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
1060 * 0x69/0x96 shortly and described them as reserved for
1061 * SerialATA.
1062 *
1063 * We follow the current spec and consider that 0x69/0x96
1064 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
1065 * Unfortunately, WDC WD1600JS-62MHB5 (a hard drive) reports
1066 * SEMB signature. This is worked around in
1067 * ata_dev_read_id().
1068 */
1069 if ((tf->lbam == 0) && (tf->lbah == 0)) {
1070 DPRINTK("found ATA device by sig\n");
1071 return ATA_DEV_ATA;
1072 }
1073
1074 if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
1075 DPRINTK("found ATAPI device by sig\n");
1076 return ATA_DEV_ATAPI;
1077 }
1078
1079 if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
1080 DPRINTK("found PMP device by sig\n");
1081 return ATA_DEV_PMP;
1082 }
1083
1084 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
1085 DPRINTK("found SEMB device by sig (could be ATA device)\n");
1086 return ATA_DEV_SEMB;
1087 }
1088
1089 DPRINTK("unknown device\n");
1090 return ATA_DEV_UNKNOWN;
1091 }
1092
1093 /**
1094 * ata_id_string - Convert IDENTIFY DEVICE page into string
1095 * @id: IDENTIFY DEVICE results we will examine
1096 * @s: string into which data is output
1097 * @ofs: offset into identify device page
1098 * @len: length of string to return. must be an even number.
1099 *
1100 * The strings in the IDENTIFY DEVICE page are broken up into
1101 * 16-bit chunks. Run through the string, and output each
1102 * 8-bit chunk linearly, regardless of platform.
1103 *
1104 * LOCKING:
1105 * caller.
1106 */
1107
ata_id_string(const u16 * id,unsigned char * s,unsigned int ofs,unsigned int len)1108 void ata_id_string(const u16 *id, unsigned char *s,
1109 unsigned int ofs, unsigned int len)
1110 {
1111 unsigned int c;
1112
1113 BUG_ON(len & 1);
1114
1115 while (len > 0) {
1116 c = id[ofs] >> 8;
1117 *s = c;
1118 s++;
1119
1120 c = id[ofs] & 0xff;
1121 *s = c;
1122 s++;
1123
1124 ofs++;
1125 len -= 2;
1126 }
1127 }
1128
1129 /**
1130 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
1131 * @id: IDENTIFY DEVICE results we will examine
1132 * @s: string into which data is output
1133 * @ofs: offset into identify device page
1134 * @len: length of string to return. must be an odd number.
1135 *
1136 * This function is identical to ata_id_string except that it
1137 * trims trailing spaces and terminates the resulting string with
1138 * null. @len must be actual maximum length (even number) + 1.
1139 *
1140 * LOCKING:
1141 * caller.
1142 */
ata_id_c_string(const u16 * id,unsigned char * s,unsigned int ofs,unsigned int len)1143 void ata_id_c_string(const u16 *id, unsigned char *s,
1144 unsigned int ofs, unsigned int len)
1145 {
1146 unsigned char *p;
1147
1148 ata_id_string(id, s, ofs, len - 1);
1149
1150 p = s + strnlen(s, len - 1);
1151 while (p > s && p[-1] == ' ')
1152 p--;
1153 *p = '\0';
1154 }
1155
ata_id_n_sectors(const u16 * id)1156 static u64 ata_id_n_sectors(const u16 *id)
1157 {
1158 if (ata_id_has_lba(id)) {
1159 if (ata_id_has_lba48(id))
1160 return ata_id_u64(id, ATA_ID_LBA_CAPACITY_2);
1161 else
1162 return ata_id_u32(id, ATA_ID_LBA_CAPACITY);
1163 } else {
1164 if (ata_id_current_chs_valid(id))
1165 return id[ATA_ID_CUR_CYLS] * id[ATA_ID_CUR_HEADS] *
1166 id[ATA_ID_CUR_SECTORS];
1167 else
1168 return id[ATA_ID_CYLS] * id[ATA_ID_HEADS] *
1169 id[ATA_ID_SECTORS];
1170 }
1171 }
1172
ata_tf_to_lba48(const struct ata_taskfile * tf)1173 u64 ata_tf_to_lba48(const struct ata_taskfile *tf)
1174 {
1175 u64 sectors = 0;
1176
1177 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1178 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
1179 sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24;
1180 sectors |= (tf->lbah & 0xff) << 16;
1181 sectors |= (tf->lbam & 0xff) << 8;
1182 sectors |= (tf->lbal & 0xff);
1183
1184 return sectors;
1185 }
1186
ata_tf_to_lba(const struct ata_taskfile * tf)1187 u64 ata_tf_to_lba(const struct ata_taskfile *tf)
1188 {
1189 u64 sectors = 0;
1190
1191 sectors |= (tf->device & 0x0f) << 24;
1192 sectors |= (tf->lbah & 0xff) << 16;
1193 sectors |= (tf->lbam & 0xff) << 8;
1194 sectors |= (tf->lbal & 0xff);
1195
1196 return sectors;
1197 }
1198
1199 /**
1200 * ata_read_native_max_address - Read native max address
1201 * @dev: target device
1202 * @max_sectors: out parameter for the result native max address
1203 *
1204 * Perform an LBA48 or LBA28 native size query upon the device in
1205 * question.
1206 *
1207 * RETURNS:
1208 * 0 on success, -EACCES if command is aborted by the drive.
1209 * -EIO on other errors.
1210 */
ata_read_native_max_address(struct ata_device * dev,u64 * max_sectors)1211 static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1212 {
1213 unsigned int err_mask;
1214 struct ata_taskfile tf;
1215 int lba48 = ata_id_has_lba48(dev->id);
1216
1217 ata_tf_init(dev, &tf);
1218
1219 /* always clear all address registers */
1220 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1221
1222 if (lba48) {
1223 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1224 tf.flags |= ATA_TFLAG_LBA48;
1225 } else
1226 tf.command = ATA_CMD_READ_NATIVE_MAX;
1227
1228 tf.protocol |= ATA_PROT_NODATA;
1229 tf.device |= ATA_LBA;
1230
1231 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1232 if (err_mask) {
1233 ata_dev_printk(dev, KERN_WARNING, "failed to read native "
1234 "max address (err_mask=0x%x)\n", err_mask);
1235 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1236 return -EACCES;
1237 return -EIO;
1238 }
1239
1240 if (lba48)
1241 *max_sectors = ata_tf_to_lba48(&tf) + 1;
1242 else
1243 *max_sectors = ata_tf_to_lba(&tf) + 1;
1244 if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
1245 (*max_sectors)--;
1246 return 0;
1247 }
1248
1249 /**
1250 * ata_set_max_sectors - Set max sectors
1251 * @dev: target device
1252 * @new_sectors: new max sectors value to set for the device
1253 *
1254 * Set max sectors of @dev to @new_sectors.
1255 *
1256 * RETURNS:
1257 * 0 on success, -EACCES if command is aborted or denied (due to
1258 * previous non-volatile SET_MAX) by the drive. -EIO on other
1259 * errors.
1260 */
ata_set_max_sectors(struct ata_device * dev,u64 new_sectors)1261 static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1262 {
1263 unsigned int err_mask;
1264 struct ata_taskfile tf;
1265 int lba48 = ata_id_has_lba48(dev->id);
1266
1267 new_sectors--;
1268
1269 ata_tf_init(dev, &tf);
1270
1271 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1272
1273 if (lba48) {
1274 tf.command = ATA_CMD_SET_MAX_EXT;
1275 tf.flags |= ATA_TFLAG_LBA48;
1276
1277 tf.hob_lbal = (new_sectors >> 24) & 0xff;
1278 tf.hob_lbam = (new_sectors >> 32) & 0xff;
1279 tf.hob_lbah = (new_sectors >> 40) & 0xff;
1280 } else {
1281 tf.command = ATA_CMD_SET_MAX;
1282
1283 tf.device |= (new_sectors >> 24) & 0xf;
1284 }
1285
1286 tf.protocol |= ATA_PROT_NODATA;
1287 tf.device |= ATA_LBA;
1288
1289 tf.lbal = (new_sectors >> 0) & 0xff;
1290 tf.lbam = (new_sectors >> 8) & 0xff;
1291 tf.lbah = (new_sectors >> 16) & 0xff;
1292
1293 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1294 if (err_mask) {
1295 ata_dev_printk(dev, KERN_WARNING, "failed to set "
1296 "max address (err_mask=0x%x)\n", err_mask);
1297 if (err_mask == AC_ERR_DEV &&
1298 (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1299 return -EACCES;
1300 return -EIO;
1301 }
1302
1303 return 0;
1304 }
1305
1306 /**
1307 * ata_hpa_resize - Resize a device with an HPA set
1308 * @dev: Device to resize
1309 *
1310 * Read the size of an LBA28 or LBA48 disk with HPA features and resize
1311 * it if required to the full size of the media. The caller must check
1312 * the drive has the HPA feature set enabled.
1313 *
1314 * RETURNS:
1315 * 0 on success, -errno on failure.
1316 */
ata_hpa_resize(struct ata_device * dev)1317 static int ata_hpa_resize(struct ata_device *dev)
1318 {
1319 struct ata_eh_context *ehc = &dev->link->eh_context;
1320 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1321 bool unlock_hpa = ata_ignore_hpa || dev->flags & ATA_DFLAG_UNLOCK_HPA;
1322 u64 sectors = ata_id_n_sectors(dev->id);
1323 u64 native_sectors;
1324 int rc;
1325
1326 /* do we need to do it? */
1327 if (dev->class != ATA_DEV_ATA ||
1328 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1329 (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
1330 return 0;
1331
1332 /* read native max address */
1333 rc = ata_read_native_max_address(dev, &native_sectors);
1334 if (rc) {
1335 /* If device aborted the command or HPA isn't going to
1336 * be unlocked, skip HPA resizing.
1337 */
1338 if (rc == -EACCES || !unlock_hpa) {
1339 ata_dev_printk(dev, KERN_WARNING, "HPA support seems "
1340 "broken, skipping HPA handling\n");
1341 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1342
1343 /* we can continue if device aborted the command */
1344 if (rc == -EACCES)
1345 rc = 0;
1346 }
1347
1348 return rc;
1349 }
1350 dev->n_native_sectors = native_sectors;
1351
1352 /* nothing to do? */
1353 if (native_sectors <= sectors || !unlock_hpa) {
1354 if (!print_info || native_sectors == sectors)
1355 return 0;
1356
1357 if (native_sectors > sectors)
1358 ata_dev_printk(dev, KERN_INFO,
1359 "HPA detected: current %llu, native %llu\n",
1360 (unsigned long long)sectors,
1361 (unsigned long long)native_sectors);
1362 else if (native_sectors < sectors)
1363 ata_dev_printk(dev, KERN_WARNING,
1364 "native sectors (%llu) is smaller than "
1365 "sectors (%llu)\n",
1366 (unsigned long long)native_sectors,
1367 (unsigned long long)sectors);
1368 return 0;
1369 }
1370
1371 /* let's unlock HPA */
1372 rc = ata_set_max_sectors(dev, native_sectors);
1373 if (rc == -EACCES) {
1374 /* if device aborted the command, skip HPA resizing */
1375 ata_dev_printk(dev, KERN_WARNING, "device aborted resize "
1376 "(%llu -> %llu), skipping HPA handling\n",
1377 (unsigned long long)sectors,
1378 (unsigned long long)native_sectors);
1379 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1380 return 0;
1381 } else if (rc)
1382 return rc;
1383
1384 /* re-read IDENTIFY data */
1385 rc = ata_dev_reread_id(dev, 0);
1386 if (rc) {
1387 ata_dev_printk(dev, KERN_ERR, "failed to re-read IDENTIFY "
1388 "data after HPA resizing\n");
1389 return rc;
1390 }
1391
1392 if (print_info) {
1393 u64 new_sectors = ata_id_n_sectors(dev->id);
1394 ata_dev_printk(dev, KERN_INFO,
1395 "HPA unlocked: %llu -> %llu, native %llu\n",
1396 (unsigned long long)sectors,
1397 (unsigned long long)new_sectors,
1398 (unsigned long long)native_sectors);
1399 }
1400
1401 return 0;
1402 }
1403
1404 /**
1405 * ata_dump_id - IDENTIFY DEVICE info debugging output
1406 * @id: IDENTIFY DEVICE page to dump
1407 *
1408 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1409 * page.
1410 *
1411 * LOCKING:
1412 * caller.
1413 */
1414
ata_dump_id(const u16 * id)1415 static inline void ata_dump_id(const u16 *id)
1416 {
1417 DPRINTK("49==0x%04x "
1418 "53==0x%04x "
1419 "63==0x%04x "
1420 "64==0x%04x "
1421 "75==0x%04x \n",
1422 id[49],
1423 id[53],
1424 id[63],
1425 id[64],
1426 id[75]);
1427 DPRINTK("80==0x%04x "
1428 "81==0x%04x "
1429 "82==0x%04x "
1430 "83==0x%04x "
1431 "84==0x%04x \n",
1432 id[80],
1433 id[81],
1434 id[82],
1435 id[83],
1436 id[84]);
1437 DPRINTK("88==0x%04x "
1438 "93==0x%04x\n",
1439 id[88],
1440 id[93]);
1441 }
1442
1443 /**
1444 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1445 * @id: IDENTIFY data to compute xfer mask from
1446 *
1447 * Compute the xfermask for this device. This is not as trivial
1448 * as it seems if we must consider early devices correctly.
1449 *
1450 * FIXME: pre IDE drive timing (do we care ?).
1451 *
1452 * LOCKING:
1453 * None.
1454 *
1455 * RETURNS:
1456 * Computed xfermask
1457 */
ata_id_xfermask(const u16 * id)1458 unsigned long ata_id_xfermask(const u16 *id)
1459 {
1460 unsigned long pio_mask, mwdma_mask, udma_mask;
1461
1462 /* Usual case. Word 53 indicates word 64 is valid */
1463 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1464 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1465 pio_mask <<= 3;
1466 pio_mask |= 0x7;
1467 } else {
1468 /* If word 64 isn't valid then Word 51 high byte holds
1469 * the PIO timing number for the maximum. Turn it into
1470 * a mask.
1471 */
1472 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
1473 if (mode < 5) /* Valid PIO range */
1474 pio_mask = (2 << mode) - 1;
1475 else
1476 pio_mask = 1;
1477
1478 /* But wait.. there's more. Design your standards by
1479 * committee and you too can get a free iordy field to
1480 * process. However its the speeds not the modes that
1481 * are supported... Note drivers using the timing API
1482 * will get this right anyway
1483 */
1484 }
1485
1486 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1487
1488 if (ata_id_is_cfa(id)) {
1489 /*
1490 * Process compact flash extended modes
1491 */
1492 int pio = (id[ATA_ID_CFA_MODES] >> 0) & 0x7;
1493 int dma = (id[ATA_ID_CFA_MODES] >> 3) & 0x7;
1494
1495 if (pio)
1496 pio_mask |= (1 << 5);
1497 if (pio > 1)
1498 pio_mask |= (1 << 6);
1499 if (dma)
1500 mwdma_mask |= (1 << 3);
1501 if (dma > 1)
1502 mwdma_mask |= (1 << 4);
1503 }
1504
1505 udma_mask = 0;
1506 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1507 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1508
1509 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1510 }
1511
ata_qc_complete_internal(struct ata_queued_cmd * qc)1512 static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1513 {
1514 struct completion *waiting = qc->private_data;
1515
1516 complete(waiting);
1517 }
1518
1519 /**
1520 * ata_exec_internal_sg - execute libata internal command
1521 * @dev: Device to which the command is sent
1522 * @tf: Taskfile registers for the command and the result
1523 * @cdb: CDB for packet command
1524 * @dma_dir: Data tranfer direction of the command
1525 * @sgl: sg list for the data buffer of the command
1526 * @n_elem: Number of sg entries
1527 * @timeout: Timeout in msecs (0 for default)
1528 *
1529 * Executes libata internal command with timeout. @tf contains
1530 * command on entry and result on return. Timeout and error
1531 * conditions are reported via return value. No recovery action
1532 * is taken after a command times out. It's caller's duty to
1533 * clean up after timeout.
1534 *
1535 * LOCKING:
1536 * None. Should be called with kernel context, might sleep.
1537 *
1538 * RETURNS:
1539 * Zero on success, AC_ERR_* mask on failure
1540 */
ata_exec_internal_sg(struct ata_device * dev,struct ata_taskfile * tf,const u8 * cdb,int dma_dir,struct scatterlist * sgl,unsigned int n_elem,unsigned long timeout)1541 unsigned ata_exec_internal_sg(struct ata_device *dev,
1542 struct ata_taskfile *tf, const u8 *cdb,
1543 int dma_dir, struct scatterlist *sgl,
1544 unsigned int n_elem, unsigned long timeout)
1545 {
1546 struct ata_link *link = dev->link;
1547 struct ata_port *ap = link->ap;
1548 u8 command = tf->command;
1549 int auto_timeout = 0;
1550 struct ata_queued_cmd *qc;
1551 unsigned int tag, preempted_tag;
1552 u32 preempted_sactive, preempted_qc_active;
1553 int preempted_nr_active_links;
1554 DECLARE_COMPLETION_ONSTACK(wait);
1555 unsigned long flags;
1556 unsigned int err_mask;
1557 int rc;
1558
1559 spin_lock_irqsave(ap->lock, flags);
1560
1561 /* no internal command while frozen */
1562 if (ap->pflags & ATA_PFLAG_FROZEN) {
1563 spin_unlock_irqrestore(ap->lock, flags);
1564 return AC_ERR_SYSTEM;
1565 }
1566
1567 /* initialize internal qc */
1568
1569 /* XXX: Tag 0 is used for drivers with legacy EH as some
1570 * drivers choke if any other tag is given. This breaks
1571 * ata_tag_internal() test for those drivers. Don't use new
1572 * EH stuff without converting to it.
1573 */
1574 if (ap->ops->error_handler)
1575 tag = ATA_TAG_INTERNAL;
1576 else
1577 tag = 0;
1578
1579 if (test_and_set_bit(tag, &ap->qc_allocated))
1580 BUG();
1581 qc = __ata_qc_from_tag(ap, tag);
1582
1583 qc->tag = tag;
1584 qc->scsicmd = NULL;
1585 qc->ap = ap;
1586 qc->dev = dev;
1587 ata_qc_reinit(qc);
1588
1589 preempted_tag = link->active_tag;
1590 preempted_sactive = link->sactive;
1591 preempted_qc_active = ap->qc_active;
1592 preempted_nr_active_links = ap->nr_active_links;
1593 link->active_tag = ATA_TAG_POISON;
1594 link->sactive = 0;
1595 ap->qc_active = 0;
1596 ap->nr_active_links = 0;
1597
1598 /* prepare & issue qc */
1599 qc->tf = *tf;
1600 if (cdb)
1601 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1602 qc->flags |= ATA_QCFLAG_RESULT_TF;
1603 qc->dma_dir = dma_dir;
1604 if (dma_dir != DMA_NONE) {
1605 unsigned int i, buflen = 0;
1606 struct scatterlist *sg;
1607
1608 for_each_sg(sgl, sg, n_elem, i)
1609 buflen += sg->length;
1610
1611 ata_sg_init(qc, sgl, n_elem);
1612 qc->nbytes = buflen;
1613 }
1614
1615 qc->private_data = &wait;
1616 qc->complete_fn = ata_qc_complete_internal;
1617
1618 ata_qc_issue(qc);
1619
1620 spin_unlock_irqrestore(ap->lock, flags);
1621
1622 if (!timeout) {
1623 if (ata_probe_timeout)
1624 timeout = ata_probe_timeout * 1000;
1625 else {
1626 timeout = ata_internal_cmd_timeout(dev, command);
1627 auto_timeout = 1;
1628 }
1629 }
1630
1631 if (ap->ops->error_handler)
1632 ata_eh_release(ap);
1633
1634 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
1635
1636 if (ap->ops->error_handler)
1637 ata_eh_acquire(ap);
1638
1639 ata_sff_flush_pio_task(ap);
1640
1641 if (!rc) {
1642 spin_lock_irqsave(ap->lock, flags);
1643
1644 /* We're racing with irq here. If we lose, the
1645 * following test prevents us from completing the qc
1646 * twice. If we win, the port is frozen and will be
1647 * cleaned up by ->post_internal_cmd().
1648 */
1649 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1650 qc->err_mask |= AC_ERR_TIMEOUT;
1651
1652 if (ap->ops->error_handler)
1653 ata_port_freeze(ap);
1654 else
1655 ata_qc_complete(qc);
1656
1657 if (ata_msg_warn(ap))
1658 ata_dev_printk(dev, KERN_WARNING,
1659 "qc timeout (cmd 0x%x)\n", command);
1660 }
1661
1662 spin_unlock_irqrestore(ap->lock, flags);
1663 }
1664
1665 /* do post_internal_cmd */
1666 if (ap->ops->post_internal_cmd)
1667 ap->ops->post_internal_cmd(qc);
1668
1669 /* perform minimal error analysis */
1670 if (qc->flags & ATA_QCFLAG_FAILED) {
1671 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1672 qc->err_mask |= AC_ERR_DEV;
1673
1674 if (!qc->err_mask)
1675 qc->err_mask |= AC_ERR_OTHER;
1676
1677 if (qc->err_mask & ~AC_ERR_OTHER)
1678 qc->err_mask &= ~AC_ERR_OTHER;
1679 }
1680
1681 /* finish up */
1682 spin_lock_irqsave(ap->lock, flags);
1683
1684 *tf = qc->result_tf;
1685 err_mask = qc->err_mask;
1686
1687 ata_qc_free(qc);
1688 link->active_tag = preempted_tag;
1689 link->sactive = preempted_sactive;
1690 ap->qc_active = preempted_qc_active;
1691 ap->nr_active_links = preempted_nr_active_links;
1692
1693 spin_unlock_irqrestore(ap->lock, flags);
1694
1695 if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout)
1696 ata_internal_cmd_timed_out(dev, command);
1697
1698 return err_mask;
1699 }
1700
1701 /**
1702 * ata_exec_internal - execute libata internal command
1703 * @dev: Device to which the command is sent
1704 * @tf: Taskfile registers for the command and the result
1705 * @cdb: CDB for packet command
1706 * @dma_dir: Data tranfer direction of the command
1707 * @buf: Data buffer of the command
1708 * @buflen: Length of data buffer
1709 * @timeout: Timeout in msecs (0 for default)
1710 *
1711 * Wrapper around ata_exec_internal_sg() which takes simple
1712 * buffer instead of sg list.
1713 *
1714 * LOCKING:
1715 * None. Should be called with kernel context, might sleep.
1716 *
1717 * RETURNS:
1718 * Zero on success, AC_ERR_* mask on failure
1719 */
ata_exec_internal(struct ata_device * dev,struct ata_taskfile * tf,const u8 * cdb,int dma_dir,void * buf,unsigned int buflen,unsigned long timeout)1720 unsigned ata_exec_internal(struct ata_device *dev,
1721 struct ata_taskfile *tf, const u8 *cdb,
1722 int dma_dir, void *buf, unsigned int buflen,
1723 unsigned long timeout)
1724 {
1725 struct scatterlist *psg = NULL, sg;
1726 unsigned int n_elem = 0;
1727
1728 if (dma_dir != DMA_NONE) {
1729 WARN_ON(!buf);
1730 sg_init_one(&sg, buf, buflen);
1731 psg = &sg;
1732 n_elem++;
1733 }
1734
1735 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1736 timeout);
1737 }
1738
1739 /**
1740 * ata_do_simple_cmd - execute simple internal command
1741 * @dev: Device to which the command is sent
1742 * @cmd: Opcode to execute
1743 *
1744 * Execute a 'simple' command, that only consists of the opcode
1745 * 'cmd' itself, without filling any other registers
1746 *
1747 * LOCKING:
1748 * Kernel thread context (may sleep).
1749 *
1750 * RETURNS:
1751 * Zero on success, AC_ERR_* mask on failure
1752 */
ata_do_simple_cmd(struct ata_device * dev,u8 cmd)1753 unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
1754 {
1755 struct ata_taskfile tf;
1756
1757 ata_tf_init(dev, &tf);
1758
1759 tf.command = cmd;
1760 tf.flags |= ATA_TFLAG_DEVICE;
1761 tf.protocol = ATA_PROT_NODATA;
1762
1763 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1764 }
1765
1766 /**
1767 * ata_pio_need_iordy - check if iordy needed
1768 * @adev: ATA device
1769 *
1770 * Check if the current speed of the device requires IORDY. Used
1771 * by various controllers for chip configuration.
1772 */
ata_pio_need_iordy(const struct ata_device * adev)1773 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1774 {
1775 /* Don't set IORDY if we're preparing for reset. IORDY may
1776 * lead to controller lock up on certain controllers if the
1777 * port is not occupied. See bko#11703 for details.
1778 */
1779 if (adev->link->ap->pflags & ATA_PFLAG_RESETTING)
1780 return 0;
1781 /* Controller doesn't support IORDY. Probably a pointless
1782 * check as the caller should know this.
1783 */
1784 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1785 return 0;
1786 /* CF spec. r4.1 Table 22 says no iordy on PIO5 and PIO6. */
1787 if (ata_id_is_cfa(adev->id)
1788 && (adev->pio_mode == XFER_PIO_5 || adev->pio_mode == XFER_PIO_6))
1789 return 0;
1790 /* PIO3 and higher it is mandatory */
1791 if (adev->pio_mode > XFER_PIO_2)
1792 return 1;
1793 /* We turn it on when possible */
1794 if (ata_id_has_iordy(adev->id))
1795 return 1;
1796 return 0;
1797 }
1798
1799 /**
1800 * ata_pio_mask_no_iordy - Return the non IORDY mask
1801 * @adev: ATA device
1802 *
1803 * Compute the highest mode possible if we are not using iordy. Return
1804 * -1 if no iordy mode is available.
1805 */
ata_pio_mask_no_iordy(const struct ata_device * adev)1806 static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1807 {
1808 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1809 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1810 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1811 /* Is the speed faster than the drive allows non IORDY ? */
1812 if (pio) {
1813 /* This is cycle times not frequency - watch the logic! */
1814 if (pio > 240) /* PIO2 is 240nS per cycle */
1815 return 3 << ATA_SHIFT_PIO;
1816 return 7 << ATA_SHIFT_PIO;
1817 }
1818 }
1819 return 3 << ATA_SHIFT_PIO;
1820 }
1821
1822 /**
1823 * ata_do_dev_read_id - default ID read method
1824 * @dev: device
1825 * @tf: proposed taskfile
1826 * @id: data buffer
1827 *
1828 * Issue the identify taskfile and hand back the buffer containing
1829 * identify data. For some RAID controllers and for pre ATA devices
1830 * this function is wrapped or replaced by the driver
1831 */
ata_do_dev_read_id(struct ata_device * dev,struct ata_taskfile * tf,u16 * id)1832 unsigned int ata_do_dev_read_id(struct ata_device *dev,
1833 struct ata_taskfile *tf, u16 *id)
1834 {
1835 return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE,
1836 id, sizeof(id[0]) * ATA_ID_WORDS, 0);
1837 }
1838
1839 /**
1840 * ata_dev_read_id - Read ID data from the specified device
1841 * @dev: target device
1842 * @p_class: pointer to class of the target device (may be changed)
1843 * @flags: ATA_READID_* flags
1844 * @id: buffer to read IDENTIFY data into
1845 *
1846 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1847 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1848 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1849 * for pre-ATA4 drives.
1850 *
1851 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right
1852 * now we abort if we hit that case.
1853 *
1854 * LOCKING:
1855 * Kernel thread context (may sleep)
1856 *
1857 * RETURNS:
1858 * 0 on success, -errno otherwise.
1859 */
ata_dev_read_id(struct ata_device * dev,unsigned int * p_class,unsigned int flags,u16 * id)1860 int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1861 unsigned int flags, u16 *id)
1862 {
1863 struct ata_port *ap = dev->link->ap;
1864 unsigned int class = *p_class;
1865 struct ata_taskfile tf;
1866 unsigned int err_mask = 0;
1867 const char *reason;
1868 bool is_semb = class == ATA_DEV_SEMB;
1869 int may_fallback = 1, tried_spinup = 0;
1870 int rc;
1871
1872 if (ata_msg_ctl(ap))
1873 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__);
1874
1875 retry:
1876 ata_tf_init(dev, &tf);
1877
1878 switch (class) {
1879 case ATA_DEV_SEMB:
1880 class = ATA_DEV_ATA; /* some hard drives report SEMB sig */
1881 case ATA_DEV_ATA:
1882 tf.command = ATA_CMD_ID_ATA;
1883 break;
1884 case ATA_DEV_ATAPI:
1885 tf.command = ATA_CMD_ID_ATAPI;
1886 break;
1887 default:
1888 rc = -ENODEV;
1889 reason = "unsupported class";
1890 goto err_out;
1891 }
1892
1893 tf.protocol = ATA_PROT_PIO;
1894
1895 /* Some devices choke if TF registers contain garbage. Make
1896 * sure those are properly initialized.
1897 */
1898 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1899
1900 /* Device presence detection is unreliable on some
1901 * controllers. Always poll IDENTIFY if available.
1902 */
1903 tf.flags |= ATA_TFLAG_POLLING;
1904
1905 if (ap->ops->read_id)
1906 err_mask = ap->ops->read_id(dev, &tf, id);
1907 else
1908 err_mask = ata_do_dev_read_id(dev, &tf, id);
1909
1910 if (err_mask) {
1911 if (err_mask & AC_ERR_NODEV_HINT) {
1912 ata_dev_printk(dev, KERN_DEBUG,
1913 "NODEV after polling detection\n");
1914 return -ENOENT;
1915 }
1916
1917 if (is_semb) {
1918 ata_dev_printk(dev, KERN_INFO, "IDENTIFY failed on "
1919 "device w/ SEMB sig, disabled\n");
1920 /* SEMB is not supported yet */
1921 *p_class = ATA_DEV_SEMB_UNSUP;
1922 return 0;
1923 }
1924
1925 if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1926 /* Device or controller might have reported
1927 * the wrong device class. Give a shot at the
1928 * other IDENTIFY if the current one is
1929 * aborted by the device.
1930 */
1931 if (may_fallback) {
1932 may_fallback = 0;
1933
1934 if (class == ATA_DEV_ATA)
1935 class = ATA_DEV_ATAPI;
1936 else
1937 class = ATA_DEV_ATA;
1938 goto retry;
1939 }
1940
1941 /* Control reaches here iff the device aborted
1942 * both flavors of IDENTIFYs which happens
1943 * sometimes with phantom devices.
1944 */
1945 ata_dev_printk(dev, KERN_DEBUG,
1946 "both IDENTIFYs aborted, assuming NODEV\n");
1947 return -ENOENT;
1948 }
1949
1950 rc = -EIO;
1951 reason = "I/O error";
1952 goto err_out;
1953 }
1954
1955 if (dev->horkage & ATA_HORKAGE_DUMP_ID) {
1956 ata_dev_printk(dev, KERN_DEBUG, "dumping IDENTIFY data, "
1957 "class=%d may_fallback=%d tried_spinup=%d\n",
1958 class, may_fallback, tried_spinup);
1959 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET,
1960 16, 2, id, ATA_ID_WORDS * sizeof(*id), true);
1961 }
1962
1963 /* Falling back doesn't make sense if ID data was read
1964 * successfully at least once.
1965 */
1966 may_fallback = 0;
1967
1968 swap_buf_le16(id, ATA_ID_WORDS);
1969
1970 /* sanity check */
1971 rc = -EINVAL;
1972 reason = "device reports invalid type";
1973
1974 if (class == ATA_DEV_ATA) {
1975 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1976 goto err_out;
1977 } else {
1978 if (ata_id_is_ata(id))
1979 goto err_out;
1980 }
1981
1982 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1983 tried_spinup = 1;
1984 /*
1985 * Drive powered-up in standby mode, and requires a specific
1986 * SET_FEATURES spin-up subcommand before it will accept
1987 * anything other than the original IDENTIFY command.
1988 */
1989 err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
1990 if (err_mask && id[2] != 0x738c) {
1991 rc = -EIO;
1992 reason = "SPINUP failed";
1993 goto err_out;
1994 }
1995 /*
1996 * If the drive initially returned incomplete IDENTIFY info,
1997 * we now must reissue the IDENTIFY command.
1998 */
1999 if (id[2] == 0x37c8)
2000 goto retry;
2001 }
2002
2003 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
2004 /*
2005 * The exact sequence expected by certain pre-ATA4 drives is:
2006 * SRST RESET
2007 * IDENTIFY (optional in early ATA)
2008 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
2009 * anything else..
2010 * Some drives were very specific about that exact sequence.
2011 *
2012 * Note that ATA4 says lba is mandatory so the second check
2013 * should never trigger.
2014 */
2015 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
2016 err_mask = ata_dev_init_params(dev, id[3], id[6]);
2017 if (err_mask) {
2018 rc = -EIO;
2019 reason = "INIT_DEV_PARAMS failed";
2020 goto err_out;
2021 }
2022
2023 /* current CHS translation info (id[53-58]) might be
2024 * changed. reread the identify device info.
2025 */
2026 flags &= ~ATA_READID_POSTRESET;
2027 goto retry;
2028 }
2029 }
2030
2031 *p_class = class;
2032
2033 return 0;
2034
2035 err_out:
2036 if (ata_msg_warn(ap))
2037 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
2038 "(%s, err_mask=0x%x)\n", reason, err_mask);
2039 return rc;
2040 }
2041
ata_do_link_spd_horkage(struct ata_device * dev)2042 static int ata_do_link_spd_horkage(struct ata_device *dev)
2043 {
2044 struct ata_link *plink = ata_dev_phys_link(dev);
2045 u32 target, target_limit;
2046
2047 if (!sata_scr_valid(plink))
2048 return 0;
2049
2050 if (dev->horkage & ATA_HORKAGE_1_5_GBPS)
2051 target = 1;
2052 else
2053 return 0;
2054
2055 target_limit = (1 << target) - 1;
2056
2057 /* if already on stricter limit, no need to push further */
2058 if (plink->sata_spd_limit <= target_limit)
2059 return 0;
2060
2061 plink->sata_spd_limit = target_limit;
2062
2063 /* Request another EH round by returning -EAGAIN if link is
2064 * going faster than the target speed. Forward progress is
2065 * guaranteed by setting sata_spd_limit to target_limit above.
2066 */
2067 if (plink->sata_spd > target) {
2068 ata_dev_printk(dev, KERN_INFO,
2069 "applying link speed limit horkage to %s\n",
2070 sata_spd_string(target));
2071 return -EAGAIN;
2072 }
2073 return 0;
2074 }
2075
ata_dev_knobble(struct ata_device * dev)2076 static inline u8 ata_dev_knobble(struct ata_device *dev)
2077 {
2078 struct ata_port *ap = dev->link->ap;
2079
2080 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK)
2081 return 0;
2082
2083 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
2084 }
2085
ata_dev_config_ncq(struct ata_device * dev,char * desc,size_t desc_sz)2086 static int ata_dev_config_ncq(struct ata_device *dev,
2087 char *desc, size_t desc_sz)
2088 {
2089 struct ata_port *ap = dev->link->ap;
2090 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2091 unsigned int err_mask;
2092 char *aa_desc = "";
2093
2094 if (!ata_id_has_ncq(dev->id)) {
2095 desc[0] = '\0';
2096 return 0;
2097 }
2098 if (dev->horkage & ATA_HORKAGE_NONCQ) {
2099 snprintf(desc, desc_sz, "NCQ (not used)");
2100 return 0;
2101 }
2102 if (ap->flags & ATA_FLAG_NCQ) {
2103 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
2104 dev->flags |= ATA_DFLAG_NCQ;
2105 }
2106
2107 if (!(dev->horkage & ATA_HORKAGE_BROKEN_FPDMA_AA) &&
2108 (ap->flags & ATA_FLAG_FPDMA_AA) &&
2109 ata_id_has_fpdma_aa(dev->id)) {
2110 err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE,
2111 SATA_FPDMA_AA);
2112 if (err_mask) {
2113 ata_dev_printk(dev, KERN_ERR, "failed to enable AA"
2114 "(error_mask=0x%x)\n", err_mask);
2115 if (err_mask != AC_ERR_DEV) {
2116 dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA;
2117 return -EIO;
2118 }
2119 } else
2120 aa_desc = ", AA";
2121 }
2122
2123 if (hdepth >= ddepth)
2124 snprintf(desc, desc_sz, "NCQ (depth %d)%s", ddepth, aa_desc);
2125 else
2126 snprintf(desc, desc_sz, "NCQ (depth %d/%d)%s", hdepth,
2127 ddepth, aa_desc);
2128 return 0;
2129 }
2130
2131 /**
2132 * ata_dev_configure - Configure the specified ATA/ATAPI device
2133 * @dev: Target device to configure
2134 *
2135 * Configure @dev according to @dev->id. Generic and low-level
2136 * driver specific fixups are also applied.
2137 *
2138 * LOCKING:
2139 * Kernel thread context (may sleep)
2140 *
2141 * RETURNS:
2142 * 0 on success, -errno otherwise
2143 */
ata_dev_configure(struct ata_device * dev)2144 int ata_dev_configure(struct ata_device *dev)
2145 {
2146 struct ata_port *ap = dev->link->ap;
2147 struct ata_eh_context *ehc = &dev->link->eh_context;
2148 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
2149 const u16 *id = dev->id;
2150 unsigned long xfer_mask;
2151 char revbuf[7]; /* XYZ-99\0 */
2152 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2153 char modelbuf[ATA_ID_PROD_LEN+1];
2154 int rc;
2155
2156 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
2157 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
2158 __func__);
2159 return 0;
2160 }
2161
2162 if (ata_msg_probe(ap))
2163 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__);
2164
2165 /* set horkage */
2166 dev->horkage |= ata_dev_blacklisted(dev);
2167 ata_force_horkage(dev);
2168
2169 if (dev->horkage & ATA_HORKAGE_DISABLE) {
2170 ata_dev_printk(dev, KERN_INFO,
2171 "unsupported device, disabling\n");
2172 ata_dev_disable(dev);
2173 return 0;
2174 }
2175
2176 if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) &&
2177 dev->class == ATA_DEV_ATAPI) {
2178 ata_dev_printk(dev, KERN_WARNING,
2179 "WARNING: ATAPI is %s, device ignored.\n",
2180 atapi_enabled ? "not supported with this driver"
2181 : "disabled");
2182 ata_dev_disable(dev);
2183 return 0;
2184 }
2185
2186 rc = ata_do_link_spd_horkage(dev);
2187 if (rc)
2188 return rc;
2189
2190 /* let ACPI work its magic */
2191 rc = ata_acpi_on_devcfg(dev);
2192 if (rc)
2193 return rc;
2194
2195 /* massage HPA, do it early as it might change IDENTIFY data */
2196 rc = ata_hpa_resize(dev);
2197 if (rc)
2198 return rc;
2199
2200 /* print device capabilities */
2201 if (ata_msg_probe(ap))
2202 ata_dev_printk(dev, KERN_DEBUG,
2203 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2204 "85:%04x 86:%04x 87:%04x 88:%04x\n",
2205 __func__,
2206 id[49], id[82], id[83], id[84],
2207 id[85], id[86], id[87], id[88]);
2208
2209 /* initialize to-be-configured parameters */
2210 dev->flags &= ~ATA_DFLAG_CFG_MASK;
2211 dev->max_sectors = 0;
2212 dev->cdb_len = 0;
2213 dev->n_sectors = 0;
2214 dev->cylinders = 0;
2215 dev->heads = 0;
2216 dev->sectors = 0;
2217 dev->multi_count = 0;
2218
2219 /*
2220 * common ATA, ATAPI feature tests
2221 */
2222
2223 /* find max transfer mode; for printk only */
2224 xfer_mask = ata_id_xfermask(id);
2225
2226 if (ata_msg_probe(ap))
2227 ata_dump_id(id);
2228
2229 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2230 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2231 sizeof(fwrevbuf));
2232
2233 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2234 sizeof(modelbuf));
2235
2236 /* ATA-specific feature tests */
2237 if (dev->class == ATA_DEV_ATA) {
2238 if (ata_id_is_cfa(id)) {
2239 /* CPRM may make this media unusable */
2240 if (id[ATA_ID_CFA_KEY_MGMT] & 1)
2241 ata_dev_printk(dev, KERN_WARNING,
2242 "supports DRM functions and may "
2243 "not be fully accessible.\n");
2244 snprintf(revbuf, 7, "CFA");
2245 } else {
2246 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
2247 /* Warn the user if the device has TPM extensions */
2248 if (ata_id_has_tpm(id))
2249 ata_dev_printk(dev, KERN_WARNING,
2250 "supports DRM functions and may "
2251 "not be fully accessible.\n");
2252 }
2253
2254 dev->n_sectors = ata_id_n_sectors(id);
2255
2256 /* get current R/W Multiple count setting */
2257 if ((dev->id[47] >> 8) == 0x80 && (dev->id[59] & 0x100)) {
2258 unsigned int max = dev->id[47] & 0xff;
2259 unsigned int cnt = dev->id[59] & 0xff;
2260 /* only recognize/allow powers of two here */
2261 if (is_power_of_2(max) && is_power_of_2(cnt))
2262 if (cnt <= max)
2263 dev->multi_count = cnt;
2264 }
2265
2266 if (ata_id_has_lba(id)) {
2267 const char *lba_desc;
2268 char ncq_desc[24];
2269
2270 lba_desc = "LBA";
2271 dev->flags |= ATA_DFLAG_LBA;
2272 if (ata_id_has_lba48(id)) {
2273 dev->flags |= ATA_DFLAG_LBA48;
2274 lba_desc = "LBA48";
2275
2276 if (dev->n_sectors >= (1UL << 28) &&
2277 ata_id_has_flush_ext(id))
2278 dev->flags |= ATA_DFLAG_FLUSH_EXT;
2279 }
2280
2281 /* config NCQ */
2282 rc = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2283 if (rc)
2284 return rc;
2285
2286 /* print device info to dmesg */
2287 if (ata_msg_drv(ap) && print_info) {
2288 ata_dev_printk(dev, KERN_INFO,
2289 "%s: %s, %s, max %s\n",
2290 revbuf, modelbuf, fwrevbuf,
2291 ata_mode_string(xfer_mask));
2292 ata_dev_printk(dev, KERN_INFO,
2293 "%Lu sectors, multi %u: %s %s\n",
2294 (unsigned long long)dev->n_sectors,
2295 dev->multi_count, lba_desc, ncq_desc);
2296 }
2297 } else {
2298 /* CHS */
2299
2300 /* Default translation */
2301 dev->cylinders = id[1];
2302 dev->heads = id[3];
2303 dev->sectors = id[6];
2304
2305 if (ata_id_current_chs_valid(id)) {
2306 /* Current CHS translation is valid. */
2307 dev->cylinders = id[54];
2308 dev->heads = id[55];
2309 dev->sectors = id[56];
2310 }
2311
2312 /* print device info to dmesg */
2313 if (ata_msg_drv(ap) && print_info) {
2314 ata_dev_printk(dev, KERN_INFO,
2315 "%s: %s, %s, max %s\n",
2316 revbuf, modelbuf, fwrevbuf,
2317 ata_mode_string(xfer_mask));
2318 ata_dev_printk(dev, KERN_INFO,
2319 "%Lu sectors, multi %u, CHS %u/%u/%u\n",
2320 (unsigned long long)dev->n_sectors,
2321 dev->multi_count, dev->cylinders,
2322 dev->heads, dev->sectors);
2323 }
2324 }
2325
2326 dev->cdb_len = 16;
2327 }
2328
2329 /* ATAPI-specific feature tests */
2330 else if (dev->class == ATA_DEV_ATAPI) {
2331 const char *cdb_intr_string = "";
2332 const char *atapi_an_string = "";
2333 const char *dma_dir_string = "";
2334 u32 sntf;
2335
2336 rc = atapi_cdb_len(id);
2337 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
2338 if (ata_msg_warn(ap))
2339 ata_dev_printk(dev, KERN_WARNING,
2340 "unsupported CDB len\n");
2341 rc = -EINVAL;
2342 goto err_out_nosup;
2343 }
2344 dev->cdb_len = (unsigned int) rc;
2345
2346 /* Enable ATAPI AN if both the host and device have
2347 * the support. If PMP is attached, SNTF is required
2348 * to enable ATAPI AN to discern between PHY status
2349 * changed notifications and ATAPI ANs.
2350 */
2351 if (atapi_an &&
2352 (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2353 (!sata_pmp_attached(ap) ||
2354 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
2355 unsigned int err_mask;
2356
2357 /* issue SET feature command to turn this on */
2358 err_mask = ata_dev_set_feature(dev,
2359 SETFEATURES_SATA_ENABLE, SATA_AN);
2360 if (err_mask)
2361 ata_dev_printk(dev, KERN_ERR,
2362 "failed to enable ATAPI AN "
2363 "(err_mask=0x%x)\n", err_mask);
2364 else {
2365 dev->flags |= ATA_DFLAG_AN;
2366 atapi_an_string = ", ATAPI AN";
2367 }
2368 }
2369
2370 if (ata_id_cdb_intr(dev->id)) {
2371 dev->flags |= ATA_DFLAG_CDB_INTR;
2372 cdb_intr_string = ", CDB intr";
2373 }
2374
2375 if (atapi_dmadir || atapi_id_dmadir(dev->id)) {
2376 dev->flags |= ATA_DFLAG_DMADIR;
2377 dma_dir_string = ", DMADIR";
2378 }
2379
2380 /* print device info to dmesg */
2381 if (ata_msg_drv(ap) && print_info)
2382 ata_dev_printk(dev, KERN_INFO,
2383 "ATAPI: %s, %s, max %s%s%s%s\n",
2384 modelbuf, fwrevbuf,
2385 ata_mode_string(xfer_mask),
2386 cdb_intr_string, atapi_an_string,
2387 dma_dir_string);
2388 }
2389
2390 /* determine max_sectors */
2391 dev->max_sectors = ATA_MAX_SECTORS;
2392 if (dev->flags & ATA_DFLAG_LBA48)
2393 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2394
2395 /* Limit PATA drive on SATA cable bridge transfers to udma5,
2396 200 sectors */
2397 if (ata_dev_knobble(dev)) {
2398 if (ata_msg_drv(ap) && print_info)
2399 ata_dev_printk(dev, KERN_INFO,
2400 "applying bridge limits\n");
2401 dev->udma_mask &= ATA_UDMA5;
2402 dev->max_sectors = ATA_MAX_SECTORS;
2403 }
2404
2405 if ((dev->class == ATA_DEV_ATAPI) &&
2406 (atapi_command_packet_set(id) == TYPE_TAPE)) {
2407 dev->max_sectors = ATA_MAX_SECTORS_TAPE;
2408 dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2409 }
2410
2411 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
2412 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2413 dev->max_sectors);
2414
2415 if (ap->ops->dev_config)
2416 ap->ops->dev_config(dev);
2417
2418 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2419 /* Let the user know. We don't want to disallow opens for
2420 rescue purposes, or in case the vendor is just a blithering
2421 idiot. Do this after the dev_config call as some controllers
2422 with buggy firmware may want to avoid reporting false device
2423 bugs */
2424
2425 if (print_info) {
2426 ata_dev_printk(dev, KERN_WARNING,
2427 "Drive reports diagnostics failure. This may indicate a drive\n");
2428 ata_dev_printk(dev, KERN_WARNING,
2429 "fault or invalid emulation. Contact drive vendor for information.\n");
2430 }
2431 }
2432
2433 if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) {
2434 ata_dev_printk(dev, KERN_WARNING, "WARNING: device requires "
2435 "firmware update to be fully functional.\n");
2436 ata_dev_printk(dev, KERN_WARNING, " contact the vendor "
2437 "or visit http://ata.wiki.kernel.org.\n");
2438 }
2439
2440 return 0;
2441
2442 err_out_nosup:
2443 if (ata_msg_probe(ap))
2444 ata_dev_printk(dev, KERN_DEBUG,
2445 "%s: EXIT, err\n", __func__);
2446 return rc;
2447 }
2448
2449 /**
2450 * ata_cable_40wire - return 40 wire cable type
2451 * @ap: port
2452 *
2453 * Helper method for drivers which want to hardwire 40 wire cable
2454 * detection.
2455 */
2456
ata_cable_40wire(struct ata_port * ap)2457 int ata_cable_40wire(struct ata_port *ap)
2458 {
2459 return ATA_CBL_PATA40;
2460 }
2461
2462 /**
2463 * ata_cable_80wire - return 80 wire cable type
2464 * @ap: port
2465 *
2466 * Helper method for drivers which want to hardwire 80 wire cable
2467 * detection.
2468 */
2469
ata_cable_80wire(struct ata_port * ap)2470 int ata_cable_80wire(struct ata_port *ap)
2471 {
2472 return ATA_CBL_PATA80;
2473 }
2474
2475 /**
2476 * ata_cable_unknown - return unknown PATA cable.
2477 * @ap: port
2478 *
2479 * Helper method for drivers which have no PATA cable detection.
2480 */
2481
ata_cable_unknown(struct ata_port * ap)2482 int ata_cable_unknown(struct ata_port *ap)
2483 {
2484 return ATA_CBL_PATA_UNK;
2485 }
2486
2487 /**
2488 * ata_cable_ignore - return ignored PATA cable.
2489 * @ap: port
2490 *
2491 * Helper method for drivers which don't use cable type to limit
2492 * transfer mode.
2493 */
ata_cable_ignore(struct ata_port * ap)2494 int ata_cable_ignore(struct ata_port *ap)
2495 {
2496 return ATA_CBL_PATA_IGN;
2497 }
2498
2499 /**
2500 * ata_cable_sata - return SATA cable type
2501 * @ap: port
2502 *
2503 * Helper method for drivers which have SATA cables
2504 */
2505
ata_cable_sata(struct ata_port * ap)2506 int ata_cable_sata(struct ata_port *ap)
2507 {
2508 return ATA_CBL_SATA;
2509 }
2510
2511 /**
2512 * ata_bus_probe - Reset and probe ATA bus
2513 * @ap: Bus to probe
2514 *
2515 * Master ATA bus probing function. Initiates a hardware-dependent
2516 * bus reset, then attempts to identify any devices found on
2517 * the bus.
2518 *
2519 * LOCKING:
2520 * PCI/etc. bus probe sem.
2521 *
2522 * RETURNS:
2523 * Zero on success, negative errno otherwise.
2524 */
2525
ata_bus_probe(struct ata_port * ap)2526 int ata_bus_probe(struct ata_port *ap)
2527 {
2528 unsigned int classes[ATA_MAX_DEVICES];
2529 int tries[ATA_MAX_DEVICES];
2530 int rc;
2531 struct ata_device *dev;
2532
2533 ata_for_each_dev(dev, &ap->link, ALL)
2534 tries[dev->devno] = ATA_PROBE_MAX_TRIES;
2535
2536 retry:
2537 ata_for_each_dev(dev, &ap->link, ALL) {
2538 /* If we issue an SRST then an ATA drive (not ATAPI)
2539 * may change configuration and be in PIO0 timing. If
2540 * we do a hard reset (or are coming from power on)
2541 * this is true for ATA or ATAPI. Until we've set a
2542 * suitable controller mode we should not touch the
2543 * bus as we may be talking too fast.
2544 */
2545 dev->pio_mode = XFER_PIO_0;
2546
2547 /* If the controller has a pio mode setup function
2548 * then use it to set the chipset to rights. Don't
2549 * touch the DMA setup as that will be dealt with when
2550 * configuring devices.
2551 */
2552 if (ap->ops->set_piomode)
2553 ap->ops->set_piomode(ap, dev);
2554 }
2555
2556 /* reset and determine device classes */
2557 ap->ops->phy_reset(ap);
2558
2559 ata_for_each_dev(dev, &ap->link, ALL) {
2560 if (dev->class != ATA_DEV_UNKNOWN)
2561 classes[dev->devno] = dev->class;
2562 else
2563 classes[dev->devno] = ATA_DEV_NONE;
2564
2565 dev->class = ATA_DEV_UNKNOWN;
2566 }
2567
2568 /* read IDENTIFY page and configure devices. We have to do the identify
2569 specific sequence bass-ackwards so that PDIAG- is released by
2570 the slave device */
2571
2572 ata_for_each_dev(dev, &ap->link, ALL_REVERSE) {
2573 if (tries[dev->devno])
2574 dev->class = classes[dev->devno];
2575
2576 if (!ata_dev_enabled(dev))
2577 continue;
2578
2579 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2580 dev->id);
2581 if (rc)
2582 goto fail;
2583 }
2584
2585 /* Now ask for the cable type as PDIAG- should have been released */
2586 if (ap->ops->cable_detect)
2587 ap->cbl = ap->ops->cable_detect(ap);
2588
2589 /* We may have SATA bridge glue hiding here irrespective of
2590 * the reported cable types and sensed types. When SATA
2591 * drives indicate we have a bridge, we don't know which end
2592 * of the link the bridge is which is a problem.
2593 */
2594 ata_for_each_dev(dev, &ap->link, ENABLED)
2595 if (ata_id_is_sata(dev->id))
2596 ap->cbl = ATA_CBL_SATA;
2597
2598 /* After the identify sequence we can now set up the devices. We do
2599 this in the normal order so that the user doesn't get confused */
2600
2601 ata_for_each_dev(dev, &ap->link, ENABLED) {
2602 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
2603 rc = ata_dev_configure(dev);
2604 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
2605 if (rc)
2606 goto fail;
2607 }
2608
2609 /* configure transfer mode */
2610 rc = ata_set_mode(&ap->link, &dev);
2611 if (rc)
2612 goto fail;
2613
2614 ata_for_each_dev(dev, &ap->link, ENABLED)
2615 return 0;
2616
2617 return -ENODEV;
2618
2619 fail:
2620 tries[dev->devno]--;
2621
2622 switch (rc) {
2623 case -EINVAL:
2624 /* eeek, something went very wrong, give up */
2625 tries[dev->devno] = 0;
2626 break;
2627
2628 case -ENODEV:
2629 /* give it just one more chance */
2630 tries[dev->devno] = min(tries[dev->devno], 1);
2631 case -EIO:
2632 if (tries[dev->devno] == 1) {
2633 /* This is the last chance, better to slow
2634 * down than lose it.
2635 */
2636 sata_down_spd_limit(&ap->link, 0);
2637 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2638 }
2639 }
2640
2641 if (!tries[dev->devno])
2642 ata_dev_disable(dev);
2643
2644 goto retry;
2645 }
2646
2647 /**
2648 * sata_print_link_status - Print SATA link status
2649 * @link: SATA link to printk link status about
2650 *
2651 * This function prints link speed and status of a SATA link.
2652 *
2653 * LOCKING:
2654 * None.
2655 */
sata_print_link_status(struct ata_link * link)2656 static void sata_print_link_status(struct ata_link *link)
2657 {
2658 u32 sstatus, scontrol, tmp;
2659
2660 if (sata_scr_read(link, SCR_STATUS, &sstatus))
2661 return;
2662 sata_scr_read(link, SCR_CONTROL, &scontrol);
2663
2664 if (ata_phys_link_online(link)) {
2665 tmp = (sstatus >> 4) & 0xf;
2666 ata_link_printk(link, KERN_INFO,
2667 "SATA link up %s (SStatus %X SControl %X)\n",
2668 sata_spd_string(tmp), sstatus, scontrol);
2669 } else {
2670 ata_link_printk(link, KERN_INFO,
2671 "SATA link down (SStatus %X SControl %X)\n",
2672 sstatus, scontrol);
2673 }
2674 }
2675
2676 /**
2677 * ata_dev_pair - return other device on cable
2678 * @adev: device
2679 *
2680 * Obtain the other device on the same cable, or if none is
2681 * present NULL is returned
2682 */
2683
ata_dev_pair(struct ata_device * adev)2684 struct ata_device *ata_dev_pair(struct ata_device *adev)
2685 {
2686 struct ata_link *link = adev->link;
2687 struct ata_device *pair = &link->device[1 - adev->devno];
2688 if (!ata_dev_enabled(pair))
2689 return NULL;
2690 return pair;
2691 }
2692
2693 /**
2694 * sata_down_spd_limit - adjust SATA spd limit downward
2695 * @link: Link to adjust SATA spd limit for
2696 * @spd_limit: Additional limit
2697 *
2698 * Adjust SATA spd limit of @link downward. Note that this
2699 * function only adjusts the limit. The change must be applied
2700 * using sata_set_spd().
2701 *
2702 * If @spd_limit is non-zero, the speed is limited to equal to or
2703 * lower than @spd_limit if such speed is supported. If
2704 * @spd_limit is slower than any supported speed, only the lowest
2705 * supported speed is allowed.
2706 *
2707 * LOCKING:
2708 * Inherited from caller.
2709 *
2710 * RETURNS:
2711 * 0 on success, negative errno on failure
2712 */
sata_down_spd_limit(struct ata_link * link,u32 spd_limit)2713 int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
2714 {
2715 u32 sstatus, spd, mask;
2716 int rc, bit;
2717
2718 if (!sata_scr_valid(link))
2719 return -EOPNOTSUPP;
2720
2721 /* If SCR can be read, use it to determine the current SPD.
2722 * If not, use cached value in link->sata_spd.
2723 */
2724 rc = sata_scr_read(link, SCR_STATUS, &sstatus);
2725 if (rc == 0 && ata_sstatus_online(sstatus))
2726 spd = (sstatus >> 4) & 0xf;
2727 else
2728 spd = link->sata_spd;
2729
2730 mask = link->sata_spd_limit;
2731 if (mask <= 1)
2732 return -EINVAL;
2733
2734 /* unconditionally mask off the highest bit */
2735 bit = fls(mask) - 1;
2736 mask &= ~(1 << bit);
2737
2738 /* Mask off all speeds higher than or equal to the current
2739 * one. Force 1.5Gbps if current SPD is not available.
2740 */
2741 if (spd > 1)
2742 mask &= (1 << (spd - 1)) - 1;
2743 else
2744 mask &= 1;
2745
2746 /* were we already at the bottom? */
2747 if (!mask)
2748 return -EINVAL;
2749
2750 if (spd_limit) {
2751 if (mask & ((1 << spd_limit) - 1))
2752 mask &= (1 << spd_limit) - 1;
2753 else {
2754 bit = ffs(mask) - 1;
2755 mask = 1 << bit;
2756 }
2757 }
2758
2759 link->sata_spd_limit = mask;
2760
2761 ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n",
2762 sata_spd_string(fls(mask)));
2763
2764 return 0;
2765 }
2766
__sata_set_spd_needed(struct ata_link * link,u32 * scontrol)2767 static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
2768 {
2769 struct ata_link *host_link = &link->ap->link;
2770 u32 limit, target, spd;
2771
2772 limit = link->sata_spd_limit;
2773
2774 /* Don't configure downstream link faster than upstream link.
2775 * It doesn't speed up anything and some PMPs choke on such
2776 * configuration.
2777 */
2778 if (!ata_is_host_link(link) && host_link->sata_spd)
2779 limit &= (1 << host_link->sata_spd) - 1;
2780
2781 if (limit == UINT_MAX)
2782 target = 0;
2783 else
2784 target = fls(limit);
2785
2786 spd = (*scontrol >> 4) & 0xf;
2787 *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
2788
2789 return spd != target;
2790 }
2791
2792 /**
2793 * sata_set_spd_needed - is SATA spd configuration needed
2794 * @link: Link in question
2795 *
2796 * Test whether the spd limit in SControl matches
2797 * @link->sata_spd_limit. This function is used to determine
2798 * whether hardreset is necessary to apply SATA spd
2799 * configuration.
2800 *
2801 * LOCKING:
2802 * Inherited from caller.
2803 *
2804 * RETURNS:
2805 * 1 if SATA spd configuration is needed, 0 otherwise.
2806 */
sata_set_spd_needed(struct ata_link * link)2807 static int sata_set_spd_needed(struct ata_link *link)
2808 {
2809 u32 scontrol;
2810
2811 if (sata_scr_read(link, SCR_CONTROL, &scontrol))
2812 return 1;
2813
2814 return __sata_set_spd_needed(link, &scontrol);
2815 }
2816
2817 /**
2818 * sata_set_spd - set SATA spd according to spd limit
2819 * @link: Link to set SATA spd for
2820 *
2821 * Set SATA spd of @link according to sata_spd_limit.
2822 *
2823 * LOCKING:
2824 * Inherited from caller.
2825 *
2826 * RETURNS:
2827 * 0 if spd doesn't need to be changed, 1 if spd has been
2828 * changed. Negative errno if SCR registers are inaccessible.
2829 */
sata_set_spd(struct ata_link * link)2830 int sata_set_spd(struct ata_link *link)
2831 {
2832 u32 scontrol;
2833 int rc;
2834
2835 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
2836 return rc;
2837
2838 if (!__sata_set_spd_needed(link, &scontrol))
2839 return 0;
2840
2841 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
2842 return rc;
2843
2844 return 1;
2845 }
2846
2847 /*
2848 * This mode timing computation functionality is ported over from
2849 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2850 */
2851 /*
2852 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
2853 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
2854 * for UDMA6, which is currently supported only by Maxtor drives.
2855 *
2856 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
2857 */
2858
2859 static const struct ata_timing ata_timing[] = {
2860 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 0, 960, 0 }, */
2861 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 0, 600, 0 },
2862 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 0, 383, 0 },
2863 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 0, 240, 0 },
2864 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 0, 180, 0 },
2865 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 0, 120, 0 },
2866 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 0, 100, 0 },
2867 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 0, 80, 0 },
2868
2869 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 50, 960, 0 },
2870 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 30, 480, 0 },
2871 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 20, 240, 0 },
2872
2873 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 20, 480, 0 },
2874 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 5, 150, 0 },
2875 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 5, 120, 0 },
2876 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 5, 100, 0 },
2877 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 5, 80, 0 },
2878
2879 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 0, 150 }, */
2880 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 0, 120 },
2881 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 0, 80 },
2882 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 0, 60 },
2883 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 0, 45 },
2884 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 0, 30 },
2885 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 0, 20 },
2886 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 0, 15 },
2887
2888 { 0xFF }
2889 };
2890
2891 #define ENOUGH(v, unit) (((v)-1)/(unit)+1)
2892 #define EZ(v, unit) ((v)?ENOUGH(v, unit):0)
2893
ata_timing_quantize(const struct ata_timing * t,struct ata_timing * q,int T,int UT)2894 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2895 {
2896 q->setup = EZ(t->setup * 1000, T);
2897 q->act8b = EZ(t->act8b * 1000, T);
2898 q->rec8b = EZ(t->rec8b * 1000, T);
2899 q->cyc8b = EZ(t->cyc8b * 1000, T);
2900 q->active = EZ(t->active * 1000, T);
2901 q->recover = EZ(t->recover * 1000, T);
2902 q->dmack_hold = EZ(t->dmack_hold * 1000, T);
2903 q->cycle = EZ(t->cycle * 1000, T);
2904 q->udma = EZ(t->udma * 1000, UT);
2905 }
2906
ata_timing_merge(const struct ata_timing * a,const struct ata_timing * b,struct ata_timing * m,unsigned int what)2907 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2908 struct ata_timing *m, unsigned int what)
2909 {
2910 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2911 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2912 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2913 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2914 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2915 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2916 if (what & ATA_TIMING_DMACK_HOLD) m->dmack_hold = max(a->dmack_hold, b->dmack_hold);
2917 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2918 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2919 }
2920
ata_timing_find_mode(u8 xfer_mode)2921 const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
2922 {
2923 const struct ata_timing *t = ata_timing;
2924
2925 while (xfer_mode > t->mode)
2926 t++;
2927
2928 if (xfer_mode == t->mode)
2929 return t;
2930 return NULL;
2931 }
2932
ata_timing_compute(struct ata_device * adev,unsigned short speed,struct ata_timing * t,int T,int UT)2933 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2934 struct ata_timing *t, int T, int UT)
2935 {
2936 const u16 *id = adev->id;
2937 const struct ata_timing *s;
2938 struct ata_timing p;
2939
2940 /*
2941 * Find the mode.
2942 */
2943
2944 if (!(s = ata_timing_find_mode(speed)))
2945 return -EINVAL;
2946
2947 memcpy(t, s, sizeof(*s));
2948
2949 /*
2950 * If the drive is an EIDE drive, it can tell us it needs extended
2951 * PIO/MW_DMA cycle timing.
2952 */
2953
2954 if (id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
2955 memset(&p, 0, sizeof(p));
2956
2957 if (speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2958 if (speed <= XFER_PIO_2)
2959 p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO];
2960 else if ((speed <= XFER_PIO_4) ||
2961 (speed == XFER_PIO_5 && !ata_id_is_cfa(id)))
2962 p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY];
2963 } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2)
2964 p.cycle = id[ATA_ID_EIDE_DMA_MIN];
2965
2966 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2967 }
2968
2969 /*
2970 * Convert the timing to bus clock counts.
2971 */
2972
2973 ata_timing_quantize(t, t, T, UT);
2974
2975 /*
2976 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2977 * S.M.A.R.T * and some other commands. We have to ensure that the
2978 * DMA cycle timing is slower/equal than the fastest PIO timing.
2979 */
2980
2981 if (speed > XFER_PIO_6) {
2982 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2983 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2984 }
2985
2986 /*
2987 * Lengthen active & recovery time so that cycle time is correct.
2988 */
2989
2990 if (t->act8b + t->rec8b < t->cyc8b) {
2991 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2992 t->rec8b = t->cyc8b - t->act8b;
2993 }
2994
2995 if (t->active + t->recover < t->cycle) {
2996 t->active += (t->cycle - (t->active + t->recover)) / 2;
2997 t->recover = t->cycle - t->active;
2998 }
2999
3000 /* In a few cases quantisation may produce enough errors to
3001 leave t->cycle too low for the sum of active and recovery
3002 if so we must correct this */
3003 if (t->active + t->recover > t->cycle)
3004 t->cycle = t->active + t->recover;
3005
3006 return 0;
3007 }
3008
3009 /**
3010 * ata_timing_cycle2mode - find xfer mode for the specified cycle duration
3011 * @xfer_shift: ATA_SHIFT_* value for transfer type to examine.
3012 * @cycle: cycle duration in ns
3013 *
3014 * Return matching xfer mode for @cycle. The returned mode is of
3015 * the transfer type specified by @xfer_shift. If @cycle is too
3016 * slow for @xfer_shift, 0xff is returned. If @cycle is faster
3017 * than the fastest known mode, the fasted mode is returned.
3018 *
3019 * LOCKING:
3020 * None.
3021 *
3022 * RETURNS:
3023 * Matching xfer_mode, 0xff if no match found.
3024 */
ata_timing_cycle2mode(unsigned int xfer_shift,int cycle)3025 u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
3026 {
3027 u8 base_mode = 0xff, last_mode = 0xff;
3028 const struct ata_xfer_ent *ent;
3029 const struct ata_timing *t;
3030
3031 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
3032 if (ent->shift == xfer_shift)
3033 base_mode = ent->base;
3034
3035 for (t = ata_timing_find_mode(base_mode);
3036 t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
3037 unsigned short this_cycle;
3038
3039 switch (xfer_shift) {
3040 case ATA_SHIFT_PIO:
3041 case ATA_SHIFT_MWDMA:
3042 this_cycle = t->cycle;
3043 break;
3044 case ATA_SHIFT_UDMA:
3045 this_cycle = t->udma;
3046 break;
3047 default:
3048 return 0xff;
3049 }
3050
3051 if (cycle > this_cycle)
3052 break;
3053
3054 last_mode = t->mode;
3055 }
3056
3057 return last_mode;
3058 }
3059
3060 /**
3061 * ata_down_xfermask_limit - adjust dev xfer masks downward
3062 * @dev: Device to adjust xfer masks
3063 * @sel: ATA_DNXFER_* selector
3064 *
3065 * Adjust xfer masks of @dev downward. Note that this function
3066 * does not apply the change. Invoking ata_set_mode() afterwards
3067 * will apply the limit.
3068 *
3069 * LOCKING:
3070 * Inherited from caller.
3071 *
3072 * RETURNS:
3073 * 0 on success, negative errno on failure
3074 */
ata_down_xfermask_limit(struct ata_device * dev,unsigned int sel)3075 int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
3076 {
3077 char buf[32];
3078 unsigned long orig_mask, xfer_mask;
3079 unsigned long pio_mask, mwdma_mask, udma_mask;
3080 int quiet, highbit;
3081
3082 quiet = !!(sel & ATA_DNXFER_QUIET);
3083 sel &= ~ATA_DNXFER_QUIET;
3084
3085 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
3086 dev->mwdma_mask,
3087 dev->udma_mask);
3088 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
3089
3090 switch (sel) {
3091 case ATA_DNXFER_PIO:
3092 highbit = fls(pio_mask) - 1;
3093 pio_mask &= ~(1 << highbit);
3094 break;
3095
3096 case ATA_DNXFER_DMA:
3097 if (udma_mask) {
3098 highbit = fls(udma_mask) - 1;
3099 udma_mask &= ~(1 << highbit);
3100 if (!udma_mask)
3101 return -ENOENT;
3102 } else if (mwdma_mask) {
3103 highbit = fls(mwdma_mask) - 1;
3104 mwdma_mask &= ~(1 << highbit);
3105 if (!mwdma_mask)
3106 return -ENOENT;
3107 }
3108 break;
3109
3110 case ATA_DNXFER_40C:
3111 udma_mask &= ATA_UDMA_MASK_40C;
3112 break;
3113
3114 case ATA_DNXFER_FORCE_PIO0:
3115 pio_mask &= 1;
3116 case ATA_DNXFER_FORCE_PIO:
3117 mwdma_mask = 0;
3118 udma_mask = 0;
3119 break;
3120
3121 default:
3122 BUG();
3123 }
3124
3125 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3126
3127 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3128 return -ENOENT;
3129
3130 if (!quiet) {
3131 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3132 snprintf(buf, sizeof(buf), "%s:%s",
3133 ata_mode_string(xfer_mask),
3134 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3135 else
3136 snprintf(buf, sizeof(buf), "%s",
3137 ata_mode_string(xfer_mask));
3138
3139 ata_dev_printk(dev, KERN_WARNING,
3140 "limiting speed to %s\n", buf);
3141 }
3142
3143 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3144 &dev->udma_mask);
3145
3146 return 0;
3147 }
3148
ata_dev_set_mode(struct ata_device * dev)3149 static int ata_dev_set_mode(struct ata_device *dev)
3150 {
3151 struct ata_port *ap = dev->link->ap;
3152 struct ata_eh_context *ehc = &dev->link->eh_context;
3153 const bool nosetxfer = dev->horkage & ATA_HORKAGE_NOSETXFER;
3154 const char *dev_err_whine = "";
3155 int ign_dev_err = 0;
3156 unsigned int err_mask = 0;
3157 int rc;
3158
3159 dev->flags &= ~ATA_DFLAG_PIO;
3160 if (dev->xfer_shift == ATA_SHIFT_PIO)
3161 dev->flags |= ATA_DFLAG_PIO;
3162
3163 if (nosetxfer && ap->flags & ATA_FLAG_SATA && ata_id_is_sata(dev->id))
3164 dev_err_whine = " (SET_XFERMODE skipped)";
3165 else {
3166 if (nosetxfer)
3167 ata_dev_printk(dev, KERN_WARNING,
3168 "NOSETXFER but PATA detected - can't "
3169 "skip SETXFER, might malfunction\n");
3170 err_mask = ata_dev_set_xfermode(dev);
3171 }
3172
3173 if (err_mask & ~AC_ERR_DEV)
3174 goto fail;
3175
3176 /* revalidate */
3177 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3178 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3179 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3180 if (rc)
3181 return rc;
3182
3183 if (dev->xfer_shift == ATA_SHIFT_PIO) {
3184 /* Old CFA may refuse this command, which is just fine */
3185 if (ata_id_is_cfa(dev->id))
3186 ign_dev_err = 1;
3187 /* Catch several broken garbage emulations plus some pre
3188 ATA devices */
3189 if (ata_id_major_version(dev->id) == 0 &&
3190 dev->pio_mode <= XFER_PIO_2)
3191 ign_dev_err = 1;
3192 /* Some very old devices and some bad newer ones fail
3193 any kind of SET_XFERMODE request but support PIO0-2
3194 timings and no IORDY */
3195 if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2)
3196 ign_dev_err = 1;
3197 }
3198 /* Early MWDMA devices do DMA but don't allow DMA mode setting.
3199 Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
3200 if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3201 dev->dma_mode == XFER_MW_DMA_0 &&
3202 (dev->id[63] >> 8) & 1)
3203 ign_dev_err = 1;
3204
3205 /* if the device is actually configured correctly, ignore dev err */
3206 if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
3207 ign_dev_err = 1;
3208
3209 if (err_mask & AC_ERR_DEV) {
3210 if (!ign_dev_err)
3211 goto fail;
3212 else
3213 dev_err_whine = " (device error ignored)";
3214 }
3215
3216 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3217 dev->xfer_shift, (int)dev->xfer_mode);
3218
3219 ata_dev_printk(dev, KERN_INFO, "configured for %s%s\n",
3220 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
3221 dev_err_whine);
3222
3223 return 0;
3224
3225 fail:
3226 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
3227 "(err_mask=0x%x)\n", err_mask);
3228 return -EIO;
3229 }
3230
3231 /**
3232 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
3233 * @link: link on which timings will be programmed
3234 * @r_failed_dev: out parameter for failed device
3235 *
3236 * Standard implementation of the function used to tune and set
3237 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
3238 * ata_dev_set_mode() fails, pointer to the failing device is
3239 * returned in @r_failed_dev.
3240 *
3241 * LOCKING:
3242 * PCI/etc. bus probe sem.
3243 *
3244 * RETURNS:
3245 * 0 on success, negative errno otherwise
3246 */
3247
ata_do_set_mode(struct ata_link * link,struct ata_device ** r_failed_dev)3248 int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3249 {
3250 struct ata_port *ap = link->ap;
3251 struct ata_device *dev;
3252 int rc = 0, used_dma = 0, found = 0;
3253
3254 /* step 1: calculate xfer_mask */
3255 ata_for_each_dev(dev, link, ENABLED) {
3256 unsigned long pio_mask, dma_mask;
3257 unsigned int mode_mask;
3258
3259 mode_mask = ATA_DMA_MASK_ATA;
3260 if (dev->class == ATA_DEV_ATAPI)
3261 mode_mask = ATA_DMA_MASK_ATAPI;
3262 else if (ata_id_is_cfa(dev->id))
3263 mode_mask = ATA_DMA_MASK_CFA;
3264
3265 ata_dev_xfermask(dev);
3266 ata_force_xfermask(dev);
3267
3268 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3269 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3270
3271 if (libata_dma_mask & mode_mask)
3272 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3273 else
3274 dma_mask = 0;
3275
3276 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3277 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
3278
3279 found = 1;
3280 if (ata_dma_enabled(dev))
3281 used_dma = 1;
3282 }
3283 if (!found)
3284 goto out;
3285
3286 /* step 2: always set host PIO timings */
3287 ata_for_each_dev(dev, link, ENABLED) {
3288 if (dev->pio_mode == 0xff) {
3289 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
3290 rc = -EINVAL;
3291 goto out;
3292 }
3293
3294 dev->xfer_mode = dev->pio_mode;
3295 dev->xfer_shift = ATA_SHIFT_PIO;
3296 if (ap->ops->set_piomode)
3297 ap->ops->set_piomode(ap, dev);
3298 }
3299
3300 /* step 3: set host DMA timings */
3301 ata_for_each_dev(dev, link, ENABLED) {
3302 if (!ata_dma_enabled(dev))
3303 continue;
3304
3305 dev->xfer_mode = dev->dma_mode;
3306 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3307 if (ap->ops->set_dmamode)
3308 ap->ops->set_dmamode(ap, dev);
3309 }
3310
3311 /* step 4: update devices' xfer mode */
3312 ata_for_each_dev(dev, link, ENABLED) {
3313 rc = ata_dev_set_mode(dev);
3314 if (rc)
3315 goto out;
3316 }
3317
3318 /* Record simplex status. If we selected DMA then the other
3319 * host channels are not permitted to do so.
3320 */
3321 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
3322 ap->host->simplex_claimed = ap;
3323
3324 out:
3325 if (rc)
3326 *r_failed_dev = dev;
3327 return rc;
3328 }
3329
3330 /**
3331 * ata_wait_ready - wait for link to become ready
3332 * @link: link to be waited on
3333 * @deadline: deadline jiffies for the operation
3334 * @check_ready: callback to check link readiness
3335 *
3336 * Wait for @link to become ready. @check_ready should return
3337 * positive number if @link is ready, 0 if it isn't, -ENODEV if
3338 * link doesn't seem to be occupied, other errno for other error
3339 * conditions.
3340 *
3341 * Transient -ENODEV conditions are allowed for
3342 * ATA_TMOUT_FF_WAIT.
3343 *
3344 * LOCKING:
3345 * EH context.
3346 *
3347 * RETURNS:
3348 * 0 if @linke is ready before @deadline; otherwise, -errno.
3349 */
ata_wait_ready(struct ata_link * link,unsigned long deadline,int (* check_ready)(struct ata_link * link))3350 int ata_wait_ready(struct ata_link *link, unsigned long deadline,
3351 int (*check_ready)(struct ata_link *link))
3352 {
3353 unsigned long start = jiffies;
3354 unsigned long nodev_deadline;
3355 int warned = 0;
3356
3357 /* choose which 0xff timeout to use, read comment in libata.h */
3358 if (link->ap->host->flags & ATA_HOST_PARALLEL_SCAN)
3359 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT_LONG);
3360 else
3361 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
3362
3363 /* Slave readiness can't be tested separately from master. On
3364 * M/S emulation configuration, this function should be called
3365 * only on the master and it will handle both master and slave.
3366 */
3367 WARN_ON(link == link->ap->slave_link);
3368
3369 if (time_after(nodev_deadline, deadline))
3370 nodev_deadline = deadline;
3371
3372 while (1) {
3373 unsigned long now = jiffies;
3374 int ready, tmp;
3375
3376 ready = tmp = check_ready(link);
3377 if (ready > 0)
3378 return 0;
3379
3380 /*
3381 * -ENODEV could be transient. Ignore -ENODEV if link
3382 * is online. Also, some SATA devices take a long
3383 * time to clear 0xff after reset. Wait for
3384 * ATA_TMOUT_FF_WAIT[_LONG] on -ENODEV if link isn't
3385 * offline.
3386 *
3387 * Note that some PATA controllers (pata_ali) explode
3388 * if status register is read more than once when
3389 * there's no device attached.
3390 */
3391 if (ready == -ENODEV) {
3392 if (ata_link_online(link))
3393 ready = 0;
3394 else if ((link->ap->flags & ATA_FLAG_SATA) &&
3395 !ata_link_offline(link) &&
3396 time_before(now, nodev_deadline))
3397 ready = 0;
3398 }
3399
3400 if (ready)
3401 return ready;
3402 if (time_after(now, deadline))
3403 return -EBUSY;
3404
3405 if (!warned && time_after(now, start + 5 * HZ) &&
3406 (deadline - now > 3 * HZ)) {
3407 ata_link_printk(link, KERN_WARNING,
3408 "link is slow to respond, please be patient "
3409 "(ready=%d)\n", tmp);
3410 warned = 1;
3411 }
3412
3413 ata_msleep(link->ap, 50);
3414 }
3415 }
3416
3417 /**
3418 * ata_wait_after_reset - wait for link to become ready after reset
3419 * @link: link to be waited on
3420 * @deadline: deadline jiffies for the operation
3421 * @check_ready: callback to check link readiness
3422 *
3423 * Wait for @link to become ready after reset.
3424 *
3425 * LOCKING:
3426 * EH context.
3427 *
3428 * RETURNS:
3429 * 0 if @linke is ready before @deadline; otherwise, -errno.
3430 */
ata_wait_after_reset(struct ata_link * link,unsigned long deadline,int (* check_ready)(struct ata_link * link))3431 int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
3432 int (*check_ready)(struct ata_link *link))
3433 {
3434 ata_msleep(link->ap, ATA_WAIT_AFTER_RESET);
3435
3436 return ata_wait_ready(link, deadline, check_ready);
3437 }
3438
3439 /**
3440 * sata_link_debounce - debounce SATA phy status
3441 * @link: ATA link to debounce SATA phy status for
3442 * @params: timing parameters { interval, duratinon, timeout } in msec
3443 * @deadline: deadline jiffies for the operation
3444 *
3445 * Make sure SStatus of @link reaches stable state, determined by
3446 * holding the same value where DET is not 1 for @duration polled
3447 * every @interval, before @timeout. Timeout constraints the
3448 * beginning of the stable state. Because DET gets stuck at 1 on
3449 * some controllers after hot unplugging, this functions waits
3450 * until timeout then returns 0 if DET is stable at 1.
3451 *
3452 * @timeout is further limited by @deadline. The sooner of the
3453 * two is used.
3454 *
3455 * LOCKING:
3456 * Kernel thread context (may sleep)
3457 *
3458 * RETURNS:
3459 * 0 on success, -errno on failure.
3460 */
sata_link_debounce(struct ata_link * link,const unsigned long * params,unsigned long deadline)3461 int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3462 unsigned long deadline)
3463 {
3464 unsigned long interval = params[0];
3465 unsigned long duration = params[1];
3466 unsigned long last_jiffies, t;
3467 u32 last, cur;
3468 int rc;
3469
3470 t = ata_deadline(jiffies, params[2]);
3471 if (time_before(t, deadline))
3472 deadline = t;
3473
3474 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3475 return rc;
3476 cur &= 0xf;
3477
3478 last = cur;
3479 last_jiffies = jiffies;
3480
3481 while (1) {
3482 ata_msleep(link->ap, interval);
3483 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3484 return rc;
3485 cur &= 0xf;
3486
3487 /* DET stable? */
3488 if (cur == last) {
3489 if (cur == 1 && time_before(jiffies, deadline))
3490 continue;
3491 if (time_after(jiffies,
3492 ata_deadline(last_jiffies, duration)))
3493 return 0;
3494 continue;
3495 }
3496
3497 /* unstable, start over */
3498 last = cur;
3499 last_jiffies = jiffies;
3500
3501 /* Check deadline. If debouncing failed, return
3502 * -EPIPE to tell upper layer to lower link speed.
3503 */
3504 if (time_after(jiffies, deadline))
3505 return -EPIPE;
3506 }
3507 }
3508
3509 /**
3510 * sata_link_resume - resume SATA link
3511 * @link: ATA link to resume SATA
3512 * @params: timing parameters { interval, duratinon, timeout } in msec
3513 * @deadline: deadline jiffies for the operation
3514 *
3515 * Resume SATA phy @link and debounce it.
3516 *
3517 * LOCKING:
3518 * Kernel thread context (may sleep)
3519 *
3520 * RETURNS:
3521 * 0 on success, -errno on failure.
3522 */
sata_link_resume(struct ata_link * link,const unsigned long * params,unsigned long deadline)3523 int sata_link_resume(struct ata_link *link, const unsigned long *params,
3524 unsigned long deadline)
3525 {
3526 int tries = ATA_LINK_RESUME_TRIES;
3527 u32 scontrol, serror;
3528 int rc;
3529
3530 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3531 return rc;
3532
3533 /*
3534 * Writes to SControl sometimes get ignored under certain
3535 * controllers (ata_piix SIDPR). Make sure DET actually is
3536 * cleared.
3537 */
3538 do {
3539 scontrol = (scontrol & 0x0f0) | 0x300;
3540 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3541 return rc;
3542 /*
3543 * Some PHYs react badly if SStatus is pounded
3544 * immediately after resuming. Delay 200ms before
3545 * debouncing.
3546 */
3547 ata_msleep(link->ap, 200);
3548
3549 /* is SControl restored correctly? */
3550 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3551 return rc;
3552 } while ((scontrol & 0xf0f) != 0x300 && --tries);
3553
3554 if ((scontrol & 0xf0f) != 0x300) {
3555 ata_link_printk(link, KERN_ERR,
3556 "failed to resume link (SControl %X)\n",
3557 scontrol);
3558 return 0;
3559 }
3560
3561 if (tries < ATA_LINK_RESUME_TRIES)
3562 ata_link_printk(link, KERN_WARNING,
3563 "link resume succeeded after %d retries\n",
3564 ATA_LINK_RESUME_TRIES - tries);
3565
3566 if ((rc = sata_link_debounce(link, params, deadline)))
3567 return rc;
3568
3569 /* clear SError, some PHYs require this even for SRST to work */
3570 if (!(rc = sata_scr_read(link, SCR_ERROR, &serror)))
3571 rc = sata_scr_write(link, SCR_ERROR, serror);
3572
3573 return rc != -EINVAL ? rc : 0;
3574 }
3575
3576 /**
3577 * sata_link_scr_lpm - manipulate SControl IPM and SPM fields
3578 * @link: ATA link to manipulate SControl for
3579 * @policy: LPM policy to configure
3580 * @spm_wakeup: initiate LPM transition to active state
3581 *
3582 * Manipulate the IPM field of the SControl register of @link
3583 * according to @policy. If @policy is ATA_LPM_MAX_POWER and
3584 * @spm_wakeup is %true, the SPM field is manipulated to wake up
3585 * the link. This function also clears PHYRDY_CHG before
3586 * returning.
3587 *
3588 * LOCKING:
3589 * EH context.
3590 *
3591 * RETURNS:
3592 * 0 on succes, -errno otherwise.
3593 */
sata_link_scr_lpm(struct ata_link * link,enum ata_lpm_policy policy,bool spm_wakeup)3594 int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy,
3595 bool spm_wakeup)
3596 {
3597 struct ata_eh_context *ehc = &link->eh_context;
3598 bool woken_up = false;
3599 u32 scontrol;
3600 int rc;
3601
3602 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
3603 if (rc)
3604 return rc;
3605
3606 switch (policy) {
3607 case ATA_LPM_MAX_POWER:
3608 /* disable all LPM transitions */
3609 scontrol |= (0x3 << 8);
3610 /* initiate transition to active state */
3611 if (spm_wakeup) {
3612 scontrol |= (0x4 << 12);
3613 woken_up = true;
3614 }
3615 break;
3616 case ATA_LPM_MED_POWER:
3617 /* allow LPM to PARTIAL */
3618 scontrol &= ~(0x1 << 8);
3619 scontrol |= (0x2 << 8);
3620 break;
3621 case ATA_LPM_MIN_POWER:
3622 /* no restrictions on LPM transitions */
3623 scontrol &= ~(0x3 << 8);
3624 break;
3625 default:
3626 WARN_ON(1);
3627 }
3628
3629 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
3630 if (rc)
3631 return rc;
3632
3633 /* give the link time to transit out of LPM state */
3634 if (woken_up)
3635 msleep(10);
3636
3637 /* clear PHYRDY_CHG from SError */
3638 ehc->i.serror &= ~SERR_PHYRDY_CHG;
3639 return sata_scr_write(link, SCR_ERROR, SERR_PHYRDY_CHG);
3640 }
3641
3642 /**
3643 * ata_std_prereset - prepare for reset
3644 * @link: ATA link to be reset
3645 * @deadline: deadline jiffies for the operation
3646 *
3647 * @link is about to be reset. Initialize it. Failure from
3648 * prereset makes libata abort whole reset sequence and give up
3649 * that port, so prereset should be best-effort. It does its
3650 * best to prepare for reset sequence but if things go wrong, it
3651 * should just whine, not fail.
3652 *
3653 * LOCKING:
3654 * Kernel thread context (may sleep)
3655 *
3656 * RETURNS:
3657 * 0 on success, -errno otherwise.
3658 */
ata_std_prereset(struct ata_link * link,unsigned long deadline)3659 int ata_std_prereset(struct ata_link *link, unsigned long deadline)
3660 {
3661 struct ata_port *ap = link->ap;
3662 struct ata_eh_context *ehc = &link->eh_context;
3663 const unsigned long *timing = sata_ehc_deb_timing(ehc);
3664 int rc;
3665
3666 /* if we're about to do hardreset, nothing more to do */
3667 if (ehc->i.action & ATA_EH_HARDRESET)
3668 return 0;
3669
3670 /* if SATA, resume link */
3671 if (ap->flags & ATA_FLAG_SATA) {
3672 rc = sata_link_resume(link, timing, deadline);
3673 /* whine about phy resume failure but proceed */
3674 if (rc && rc != -EOPNOTSUPP)
3675 ata_link_printk(link, KERN_WARNING, "failed to resume "
3676 "link for reset (errno=%d)\n", rc);
3677 }
3678
3679 /* no point in trying softreset on offline link */
3680 if (ata_phys_link_offline(link))
3681 ehc->i.action &= ~ATA_EH_SOFTRESET;
3682
3683 return 0;
3684 }
3685
3686 /**
3687 * sata_link_hardreset - reset link via SATA phy reset
3688 * @link: link to reset
3689 * @timing: timing parameters { interval, duratinon, timeout } in msec
3690 * @deadline: deadline jiffies for the operation
3691 * @online: optional out parameter indicating link onlineness
3692 * @check_ready: optional callback to check link readiness
3693 *
3694 * SATA phy-reset @link using DET bits of SControl register.
3695 * After hardreset, link readiness is waited upon using
3696 * ata_wait_ready() if @check_ready is specified. LLDs are
3697 * allowed to not specify @check_ready and wait itself after this
3698 * function returns. Device classification is LLD's
3699 * responsibility.
3700 *
3701 * *@online is set to one iff reset succeeded and @link is online
3702 * after reset.
3703 *
3704 * LOCKING:
3705 * Kernel thread context (may sleep)
3706 *
3707 * RETURNS:
3708 * 0 on success, -errno otherwise.
3709 */
sata_link_hardreset(struct ata_link * link,const unsigned long * timing,unsigned long deadline,bool * online,int (* check_ready)(struct ata_link *))3710 int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
3711 unsigned long deadline,
3712 bool *online, int (*check_ready)(struct ata_link *))
3713 {
3714 u32 scontrol;
3715 int rc;
3716
3717 DPRINTK("ENTER\n");
3718
3719 if (online)
3720 *online = false;
3721
3722 if (sata_set_spd_needed(link)) {
3723 /* SATA spec says nothing about how to reconfigure
3724 * spd. To be on the safe side, turn off phy during
3725 * reconfiguration. This works for at least ICH7 AHCI
3726 * and Sil3124.
3727 */
3728 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3729 goto out;
3730
3731 scontrol = (scontrol & 0x0f0) | 0x304;
3732
3733 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3734 goto out;
3735
3736 sata_set_spd(link);
3737 }
3738
3739 /* issue phy wake/reset */
3740 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3741 goto out;
3742
3743 scontrol = (scontrol & 0x0f0) | 0x301;
3744
3745 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
3746 goto out;
3747
3748 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
3749 * 10.4.2 says at least 1 ms.
3750 */
3751 ata_msleep(link->ap, 1);
3752
3753 /* bring link back */
3754 rc = sata_link_resume(link, timing, deadline);
3755 if (rc)
3756 goto out;
3757 /* if link is offline nothing more to do */
3758 if (ata_phys_link_offline(link))
3759 goto out;
3760
3761 /* Link is online. From this point, -ENODEV too is an error. */
3762 if (online)
3763 *online = true;
3764
3765 if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) {
3766 /* If PMP is supported, we have to do follow-up SRST.
3767 * Some PMPs don't send D2H Reg FIS after hardreset if
3768 * the first port is empty. Wait only for
3769 * ATA_TMOUT_PMP_SRST_WAIT.
3770 */
3771 if (check_ready) {
3772 unsigned long pmp_deadline;
3773
3774 pmp_deadline = ata_deadline(jiffies,
3775 ATA_TMOUT_PMP_SRST_WAIT);
3776 if (time_after(pmp_deadline, deadline))
3777 pmp_deadline = deadline;
3778 ata_wait_ready(link, pmp_deadline, check_ready);
3779 }
3780 rc = -EAGAIN;
3781 goto out;
3782 }
3783
3784 rc = 0;
3785 if (check_ready)
3786 rc = ata_wait_ready(link, deadline, check_ready);
3787 out:
3788 if (rc && rc != -EAGAIN) {
3789 /* online is set iff link is online && reset succeeded */
3790 if (online)
3791 *online = false;
3792 ata_link_printk(link, KERN_ERR,
3793 "COMRESET failed (errno=%d)\n", rc);
3794 }
3795 DPRINTK("EXIT, rc=%d\n", rc);
3796 return rc;
3797 }
3798
3799 /**
3800 * sata_std_hardreset - COMRESET w/o waiting or classification
3801 * @link: link to reset
3802 * @class: resulting class of attached device
3803 * @deadline: deadline jiffies for the operation
3804 *
3805 * Standard SATA COMRESET w/o waiting or classification.
3806 *
3807 * LOCKING:
3808 * Kernel thread context (may sleep)
3809 *
3810 * RETURNS:
3811 * 0 if link offline, -EAGAIN if link online, -errno on errors.
3812 */
sata_std_hardreset(struct ata_link * link,unsigned int * class,unsigned long deadline)3813 int sata_std_hardreset(struct ata_link *link, unsigned int *class,
3814 unsigned long deadline)
3815 {
3816 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
3817 bool online;
3818 int rc;
3819
3820 /* do hardreset */
3821 rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
3822 return online ? -EAGAIN : rc;
3823 }
3824
3825 /**
3826 * ata_std_postreset - standard postreset callback
3827 * @link: the target ata_link
3828 * @classes: classes of attached devices
3829 *
3830 * This function is invoked after a successful reset. Note that
3831 * the device might have been reset more than once using
3832 * different reset methods before postreset is invoked.
3833 *
3834 * LOCKING:
3835 * Kernel thread context (may sleep)
3836 */
ata_std_postreset(struct ata_link * link,unsigned int * classes)3837 void ata_std_postreset(struct ata_link *link, unsigned int *classes)
3838 {
3839 u32 serror;
3840
3841 DPRINTK("ENTER\n");
3842
3843 /* reset complete, clear SError */
3844 if (!sata_scr_read(link, SCR_ERROR, &serror))
3845 sata_scr_write(link, SCR_ERROR, serror);
3846
3847 /* print link status */
3848 sata_print_link_status(link);
3849
3850 DPRINTK("EXIT\n");
3851 }
3852
3853 /**
3854 * ata_dev_same_device - Determine whether new ID matches configured device
3855 * @dev: device to compare against
3856 * @new_class: class of the new device
3857 * @new_id: IDENTIFY page of the new device
3858 *
3859 * Compare @new_class and @new_id against @dev and determine
3860 * whether @dev is the device indicated by @new_class and
3861 * @new_id.
3862 *
3863 * LOCKING:
3864 * None.
3865 *
3866 * RETURNS:
3867 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
3868 */
ata_dev_same_device(struct ata_device * dev,unsigned int new_class,const u16 * new_id)3869 static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3870 const u16 *new_id)
3871 {
3872 const u16 *old_id = dev->id;
3873 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3874 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
3875
3876 if (dev->class != new_class) {
3877 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3878 dev->class, new_class);
3879 return 0;
3880 }
3881
3882 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3883 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3884 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3885 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
3886
3887 if (strcmp(model[0], model[1])) {
3888 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3889 "'%s' != '%s'\n", model[0], model[1]);
3890 return 0;
3891 }
3892
3893 if (strcmp(serial[0], serial[1])) {
3894 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3895 "'%s' != '%s'\n", serial[0], serial[1]);
3896 return 0;
3897 }
3898
3899 return 1;
3900 }
3901
3902 /**
3903 * ata_dev_reread_id - Re-read IDENTIFY data
3904 * @dev: target ATA device
3905 * @readid_flags: read ID flags
3906 *
3907 * Re-read IDENTIFY page and make sure @dev is still attached to
3908 * the port.
3909 *
3910 * LOCKING:
3911 * Kernel thread context (may sleep)
3912 *
3913 * RETURNS:
3914 * 0 on success, negative errno otherwise
3915 */
ata_dev_reread_id(struct ata_device * dev,unsigned int readid_flags)3916 int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
3917 {
3918 unsigned int class = dev->class;
3919 u16 *id = (void *)dev->link->ap->sector_buf;
3920 int rc;
3921
3922 /* read ID data */
3923 rc = ata_dev_read_id(dev, &class, readid_flags, id);
3924 if (rc)
3925 return rc;
3926
3927 /* is the device still there? */
3928 if (!ata_dev_same_device(dev, class, id))
3929 return -ENODEV;
3930
3931 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
3932 return 0;
3933 }
3934
3935 /**
3936 * ata_dev_revalidate - Revalidate ATA device
3937 * @dev: device to revalidate
3938 * @new_class: new class code
3939 * @readid_flags: read ID flags
3940 *
3941 * Re-read IDENTIFY page, make sure @dev is still attached to the
3942 * port and reconfigure it according to the new IDENTIFY page.
3943 *
3944 * LOCKING:
3945 * Kernel thread context (may sleep)
3946 *
3947 * RETURNS:
3948 * 0 on success, negative errno otherwise
3949 */
ata_dev_revalidate(struct ata_device * dev,unsigned int new_class,unsigned int readid_flags)3950 int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
3951 unsigned int readid_flags)
3952 {
3953 u64 n_sectors = dev->n_sectors;
3954 u64 n_native_sectors = dev->n_native_sectors;
3955 int rc;
3956
3957 if (!ata_dev_enabled(dev))
3958 return -ENODEV;
3959
3960 /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
3961 if (ata_class_enabled(new_class) &&
3962 new_class != ATA_DEV_ATA &&
3963 new_class != ATA_DEV_ATAPI &&
3964 new_class != ATA_DEV_SEMB) {
3965 ata_dev_printk(dev, KERN_INFO, "class mismatch %u != %u\n",
3966 dev->class, new_class);
3967 rc = -ENODEV;
3968 goto fail;
3969 }
3970
3971 /* re-read ID */
3972 rc = ata_dev_reread_id(dev, readid_flags);
3973 if (rc)
3974 goto fail;
3975
3976 /* configure device according to the new ID */
3977 rc = ata_dev_configure(dev);
3978 if (rc)
3979 goto fail;
3980
3981 /* verify n_sectors hasn't changed */
3982 if (dev->class != ATA_DEV_ATA || !n_sectors ||
3983 dev->n_sectors == n_sectors)
3984 return 0;
3985
3986 /* n_sectors has changed */
3987 ata_dev_printk(dev, KERN_WARNING, "n_sectors mismatch %llu != %llu\n",
3988 (unsigned long long)n_sectors,
3989 (unsigned long long)dev->n_sectors);
3990
3991 /*
3992 * Something could have caused HPA to be unlocked
3993 * involuntarily. If n_native_sectors hasn't changed and the
3994 * new size matches it, keep the device.
3995 */
3996 if (dev->n_native_sectors == n_native_sectors &&
3997 dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) {
3998 ata_dev_printk(dev, KERN_WARNING,
3999 "new n_sectors matches native, probably "
4000 "late HPA unlock, n_sectors updated\n");
4001 /* use the larger n_sectors */
4002 return 0;
4003 }
4004
4005 /*
4006 * Some BIOSes boot w/o HPA but resume w/ HPA locked. Try
4007 * unlocking HPA in those cases.
4008 *
4009 * https://bugzilla.kernel.org/show_bug.cgi?id=15396
4010 */
4011 if (dev->n_native_sectors == n_native_sectors &&
4012 dev->n_sectors < n_sectors && n_sectors == n_native_sectors &&
4013 !(dev->horkage & ATA_HORKAGE_BROKEN_HPA)) {
4014 ata_dev_printk(dev, KERN_WARNING,
4015 "old n_sectors matches native, probably "
4016 "late HPA lock, will try to unlock HPA\n");
4017 /* try unlocking HPA */
4018 dev->flags |= ATA_DFLAG_UNLOCK_HPA;
4019 rc = -EIO;
4020 } else
4021 rc = -ENODEV;
4022
4023 /* restore original n_[native_]sectors and fail */
4024 dev->n_native_sectors = n_native_sectors;
4025 dev->n_sectors = n_sectors;
4026 fail:
4027 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
4028 return rc;
4029 }
4030
4031 struct ata_blacklist_entry {
4032 const char *model_num;
4033 const char *model_rev;
4034 unsigned long horkage;
4035 };
4036
4037 static const struct ata_blacklist_entry ata_device_blacklist [] = {
4038 /* Devices with DMA related problems under Linux */
4039 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
4040 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
4041 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
4042 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
4043 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
4044 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
4045 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
4046 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
4047 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
4048 { "CRD-848[02]B", NULL, ATA_HORKAGE_NODMA },
4049 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
4050 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
4051 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
4052 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
4053 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
4054 { "HITACHI CDR-8[34]35",NULL, ATA_HORKAGE_NODMA },
4055 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
4056 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
4057 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
4058 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
4059 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
4060 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
4061 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
4062 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
4063 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
4064 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
4065 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
4066 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
4067 /* Odd clown on sil3726/4726 PMPs */
4068 { "Config Disk", NULL, ATA_HORKAGE_DISABLE },
4069
4070 /* Weird ATAPI devices */
4071 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
4072 { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA },
4073
4074 /* Devices we expect to fail diagnostics */
4075
4076 /* Devices where NCQ should be avoided */
4077 /* NCQ is slow */
4078 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
4079 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
4080 /* http://thread.gmane.org/gmane.linux.ide/14907 */
4081 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
4082 /* NCQ is broken */
4083 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
4084 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
4085 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ },
4086 { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ },
4087 { "OCZ CORE_SSD", "02.10104", ATA_HORKAGE_NONCQ },
4088
4089 /* Seagate NCQ + FLUSH CACHE firmware bug */
4090 { "ST31500341AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4091 ATA_HORKAGE_FIRMWARE_WARN },
4092
4093 { "ST31000333AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4094 ATA_HORKAGE_FIRMWARE_WARN },
4095
4096 { "ST3640[36]23AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4097 ATA_HORKAGE_FIRMWARE_WARN },
4098
4099 { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4100 ATA_HORKAGE_FIRMWARE_WARN },
4101
4102 /* Blacklist entries taken from Silicon Image 3124/3132
4103 Windows driver .inf file - also several Linux problem reports */
4104 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
4105 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
4106 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
4107
4108 /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */
4109 { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, },
4110
4111 /* devices which puke on READ_NATIVE_MAX */
4112 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
4113 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
4114 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
4115 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
4116
4117 /* this one allows HPA unlocking but fails IOs on the area */
4118 { "OCZ-VERTEX", "1.30", ATA_HORKAGE_BROKEN_HPA },
4119
4120 /* Devices which report 1 sector over size HPA */
4121 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, },
4122 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, },
4123 { "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE, },
4124
4125 /* Devices which get the IVB wrong */
4126 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
4127 /* Maybe we should just blacklist TSSTcorp... */
4128 { "TSSTcorp CDDVDW SH-S202[HJN]", "SB0[01]", ATA_HORKAGE_IVB, },
4129
4130 /* Devices that do not need bridging limits applied */
4131 { "MTRON MSP-SATA*", NULL, ATA_HORKAGE_BRIDGE_OK, },
4132
4133 /* Devices which aren't very happy with higher link speeds */
4134 { "WD My Book", NULL, ATA_HORKAGE_1_5_GBPS, },
4135
4136 /*
4137 * Devices which choke on SETXFER. Applies only if both the
4138 * device and controller are SATA.
4139 */
4140 { "PIONEER DVD-RW DVRTD08", "1.00", ATA_HORKAGE_NOSETXFER },
4141 { "PIONEER DVD-RW DVR-212D", "1.28", ATA_HORKAGE_NOSETXFER },
4142 { "PIONEER DVD-RW DVR-216D", "1.08", ATA_HORKAGE_NOSETXFER },
4143
4144 /* End Marker */
4145 { }
4146 };
4147
4148 /**
4149 * glob_match - match a text string against a glob-style pattern
4150 * @text: the string to be examined
4151 * @pattern: the glob-style pattern to be matched against
4152 *
4153 * Either/both of text and pattern can be empty strings.
4154 *
4155 * Match text against a glob-style pattern, with wildcards and simple sets:
4156 *
4157 * ? matches any single character.
4158 * * matches any run of characters.
4159 * [xyz] matches a single character from the set: x, y, or z.
4160 * [a-d] matches a single character from the range: a, b, c, or d.
4161 * [a-d0-9] matches a single character from either range.
4162 *
4163 * The special characters ?, [, -, or *, can be matched using a set, eg. [*]
4164 * Behaviour with malformed patterns is undefined, though generally reasonable.
4165 *
4166 * Sample patterns: "SD1?", "SD1[0-5]", "*R0", "SD*1?[012]*xx"
4167 *
4168 * This function uses one level of recursion per '*' in pattern.
4169 * Since it calls _nothing_ else, and has _no_ explicit local variables,
4170 * this will not cause stack problems for any reasonable use here.
4171 *
4172 * RETURNS:
4173 * 0 on match, 1 otherwise.
4174 */
glob_match(const char * text,const char * pattern)4175 static int glob_match (const char *text, const char *pattern)
4176 {
4177 do {
4178 /* Match single character or a '?' wildcard */
4179 if (*text == *pattern || *pattern == '?') {
4180 if (!*pattern++)
4181 return 0; /* End of both strings: match */
4182 } else {
4183 /* Match single char against a '[' bracketed ']' pattern set */
4184 if (!*text || *pattern != '[')
4185 break; /* Not a pattern set */
4186 while (*++pattern && *pattern != ']' && *text != *pattern) {
4187 if (*pattern == '-' && *(pattern - 1) != '[')
4188 if (*text > *(pattern - 1) && *text < *(pattern + 1)) {
4189 ++pattern;
4190 break;
4191 }
4192 }
4193 if (!*pattern || *pattern == ']')
4194 return 1; /* No match */
4195 while (*pattern && *pattern++ != ']');
4196 }
4197 } while (*++text && *pattern);
4198
4199 /* Match any run of chars against a '*' wildcard */
4200 if (*pattern == '*') {
4201 if (!*++pattern)
4202 return 0; /* Match: avoid recursion at end of pattern */
4203 /* Loop to handle additional pattern chars after the wildcard */
4204 while (*text) {
4205 if (glob_match(text, pattern) == 0)
4206 return 0; /* Remainder matched */
4207 ++text; /* Absorb (match) this char and try again */
4208 }
4209 }
4210 if (!*text && !*pattern)
4211 return 0; /* End of both strings: match */
4212 return 1; /* No match */
4213 }
4214
ata_dev_blacklisted(const struct ata_device * dev)4215 static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
4216 {
4217 unsigned char model_num[ATA_ID_PROD_LEN + 1];
4218 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
4219 const struct ata_blacklist_entry *ad = ata_device_blacklist;
4220
4221 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
4222 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
4223
4224 while (ad->model_num) {
4225 if (!glob_match(model_num, ad->model_num)) {
4226 if (ad->model_rev == NULL)
4227 return ad->horkage;
4228 if (!glob_match(model_rev, ad->model_rev))
4229 return ad->horkage;
4230 }
4231 ad++;
4232 }
4233 return 0;
4234 }
4235
ata_dma_blacklisted(const struct ata_device * dev)4236 static int ata_dma_blacklisted(const struct ata_device *dev)
4237 {
4238 /* We don't support polling DMA.
4239 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
4240 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
4241 */
4242 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
4243 (dev->flags & ATA_DFLAG_CDB_INTR))
4244 return 1;
4245 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
4246 }
4247
4248 /**
4249 * ata_is_40wire - check drive side detection
4250 * @dev: device
4251 *
4252 * Perform drive side detection decoding, allowing for device vendors
4253 * who can't follow the documentation.
4254 */
4255
ata_is_40wire(struct ata_device * dev)4256 static int ata_is_40wire(struct ata_device *dev)
4257 {
4258 if (dev->horkage & ATA_HORKAGE_IVB)
4259 return ata_drive_40wire_relaxed(dev->id);
4260 return ata_drive_40wire(dev->id);
4261 }
4262
4263 /**
4264 * cable_is_40wire - 40/80/SATA decider
4265 * @ap: port to consider
4266 *
4267 * This function encapsulates the policy for speed management
4268 * in one place. At the moment we don't cache the result but
4269 * there is a good case for setting ap->cbl to the result when
4270 * we are called with unknown cables (and figuring out if it
4271 * impacts hotplug at all).
4272 *
4273 * Return 1 if the cable appears to be 40 wire.
4274 */
4275
cable_is_40wire(struct ata_port * ap)4276 static int cable_is_40wire(struct ata_port *ap)
4277 {
4278 struct ata_link *link;
4279 struct ata_device *dev;
4280
4281 /* If the controller thinks we are 40 wire, we are. */
4282 if (ap->cbl == ATA_CBL_PATA40)
4283 return 1;
4284
4285 /* If the controller thinks we are 80 wire, we are. */
4286 if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA)
4287 return 0;
4288
4289 /* If the system is known to be 40 wire short cable (eg
4290 * laptop), then we allow 80 wire modes even if the drive
4291 * isn't sure.
4292 */
4293 if (ap->cbl == ATA_CBL_PATA40_SHORT)
4294 return 0;
4295
4296 /* If the controller doesn't know, we scan.
4297 *
4298 * Note: We look for all 40 wire detects at this point. Any
4299 * 80 wire detect is taken to be 80 wire cable because
4300 * - in many setups only the one drive (slave if present) will
4301 * give a valid detect
4302 * - if you have a non detect capable drive you don't want it
4303 * to colour the choice
4304 */
4305 ata_for_each_link(link, ap, EDGE) {
4306 ata_for_each_dev(dev, link, ENABLED) {
4307 if (!ata_is_40wire(dev))
4308 return 0;
4309 }
4310 }
4311 return 1;
4312 }
4313
4314 /**
4315 * ata_dev_xfermask - Compute supported xfermask of the given device
4316 * @dev: Device to compute xfermask for
4317 *
4318 * Compute supported xfermask of @dev and store it in
4319 * dev->*_mask. This function is responsible for applying all
4320 * known limits including host controller limits, device
4321 * blacklist, etc...
4322 *
4323 * LOCKING:
4324 * None.
4325 */
ata_dev_xfermask(struct ata_device * dev)4326 static void ata_dev_xfermask(struct ata_device *dev)
4327 {
4328 struct ata_link *link = dev->link;
4329 struct ata_port *ap = link->ap;
4330 struct ata_host *host = ap->host;
4331 unsigned long xfer_mask;
4332
4333 /* controller modes available */
4334 xfer_mask = ata_pack_xfermask(ap->pio_mask,
4335 ap->mwdma_mask, ap->udma_mask);
4336
4337 /* drive modes available */
4338 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4339 dev->mwdma_mask, dev->udma_mask);
4340 xfer_mask &= ata_id_xfermask(dev->id);
4341
4342 /*
4343 * CFA Advanced TrueIDE timings are not allowed on a shared
4344 * cable
4345 */
4346 if (ata_dev_pair(dev)) {
4347 /* No PIO5 or PIO6 */
4348 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4349 /* No MWDMA3 or MWDMA 4 */
4350 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4351 }
4352
4353 if (ata_dma_blacklisted(dev)) {
4354 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4355 ata_dev_printk(dev, KERN_WARNING,
4356 "device is on DMA blacklist, disabling DMA\n");
4357 }
4358
4359 if ((host->flags & ATA_HOST_SIMPLEX) &&
4360 host->simplex_claimed && host->simplex_claimed != ap) {
4361 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4362 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
4363 "other device, disabling DMA\n");
4364 }
4365
4366 if (ap->flags & ATA_FLAG_NO_IORDY)
4367 xfer_mask &= ata_pio_mask_no_iordy(dev);
4368
4369 if (ap->ops->mode_filter)
4370 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
4371
4372 /* Apply cable rule here. Don't apply it early because when
4373 * we handle hot plug the cable type can itself change.
4374 * Check this last so that we know if the transfer rate was
4375 * solely limited by the cable.
4376 * Unknown or 80 wire cables reported host side are checked
4377 * drive side as well. Cases where we know a 40wire cable
4378 * is used safely for 80 are not checked here.
4379 */
4380 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4381 /* UDMA/44 or higher would be available */
4382 if (cable_is_40wire(ap)) {
4383 ata_dev_printk(dev, KERN_WARNING,
4384 "limited to UDMA/33 due to 40-wire cable\n");
4385 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4386 }
4387
4388 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4389 &dev->mwdma_mask, &dev->udma_mask);
4390 }
4391
4392 /**
4393 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
4394 * @dev: Device to which command will be sent
4395 *
4396 * Issue SET FEATURES - XFER MODE command to device @dev
4397 * on port @ap.
4398 *
4399 * LOCKING:
4400 * PCI/etc. bus probe sem.
4401 *
4402 * RETURNS:
4403 * 0 on success, AC_ERR_* mask otherwise.
4404 */
4405
ata_dev_set_xfermode(struct ata_device * dev)4406 static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
4407 {
4408 struct ata_taskfile tf;
4409 unsigned int err_mask;
4410
4411 /* set up set-features taskfile */
4412 DPRINTK("set features - xfer mode\n");
4413
4414 /* Some controllers and ATAPI devices show flaky interrupt
4415 * behavior after setting xfer mode. Use polling instead.
4416 */
4417 ata_tf_init(dev, &tf);
4418 tf.command = ATA_CMD_SET_FEATURES;
4419 tf.feature = SETFEATURES_XFER;
4420 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
4421 tf.protocol = ATA_PROT_NODATA;
4422 /* If we are using IORDY we must send the mode setting command */
4423 if (ata_pio_need_iordy(dev))
4424 tf.nsect = dev->xfer_mode;
4425 /* If the device has IORDY and the controller does not - turn it off */
4426 else if (ata_id_has_iordy(dev->id))
4427 tf.nsect = 0x01;
4428 else /* In the ancient relic department - skip all of this */
4429 return 0;
4430
4431 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4432
4433 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4434 return err_mask;
4435 }
4436
4437 /**
4438 * ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
4439 * @dev: Device to which command will be sent
4440 * @enable: Whether to enable or disable the feature
4441 * @feature: The sector count represents the feature to set
4442 *
4443 * Issue SET FEATURES - SATA FEATURES command to device @dev
4444 * on port @ap with sector count
4445 *
4446 * LOCKING:
4447 * PCI/etc. bus probe sem.
4448 *
4449 * RETURNS:
4450 * 0 on success, AC_ERR_* mask otherwise.
4451 */
ata_dev_set_feature(struct ata_device * dev,u8 enable,u8 feature)4452 unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, u8 feature)
4453 {
4454 struct ata_taskfile tf;
4455 unsigned int err_mask;
4456
4457 /* set up set-features taskfile */
4458 DPRINTK("set features - SATA features\n");
4459
4460 ata_tf_init(dev, &tf);
4461 tf.command = ATA_CMD_SET_FEATURES;
4462 tf.feature = enable;
4463 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4464 tf.protocol = ATA_PROT_NODATA;
4465 tf.nsect = feature;
4466
4467 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4468
4469 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4470 return err_mask;
4471 }
4472
4473 /**
4474 * ata_dev_init_params - Issue INIT DEV PARAMS command
4475 * @dev: Device to which command will be sent
4476 * @heads: Number of heads (taskfile parameter)
4477 * @sectors: Number of sectors (taskfile parameter)
4478 *
4479 * LOCKING:
4480 * Kernel thread context (may sleep)
4481 *
4482 * RETURNS:
4483 * 0 on success, AC_ERR_* mask otherwise.
4484 */
ata_dev_init_params(struct ata_device * dev,u16 heads,u16 sectors)4485 static unsigned int ata_dev_init_params(struct ata_device *dev,
4486 u16 heads, u16 sectors)
4487 {
4488 struct ata_taskfile tf;
4489 unsigned int err_mask;
4490
4491 /* Number of sectors per track 1-255. Number of heads 1-16 */
4492 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
4493 return AC_ERR_INVALID;
4494
4495 /* set up init dev params taskfile */
4496 DPRINTK("init dev params \n");
4497
4498 ata_tf_init(dev, &tf);
4499 tf.command = ATA_CMD_INIT_DEV_PARAMS;
4500 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4501 tf.protocol = ATA_PROT_NODATA;
4502 tf.nsect = sectors;
4503 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
4504
4505 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4506 /* A clean abort indicates an original or just out of spec drive
4507 and we should continue as we issue the setup based on the
4508 drive reported working geometry */
4509 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4510 err_mask = 0;
4511
4512 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4513 return err_mask;
4514 }
4515
4516 /**
4517 * ata_sg_clean - Unmap DMA memory associated with command
4518 * @qc: Command containing DMA memory to be released
4519 *
4520 * Unmap all mapped DMA memory associated with this command.
4521 *
4522 * LOCKING:
4523 * spin_lock_irqsave(host lock)
4524 */
ata_sg_clean(struct ata_queued_cmd * qc)4525 void ata_sg_clean(struct ata_queued_cmd *qc)
4526 {
4527 struct ata_port *ap = qc->ap;
4528 struct scatterlist *sg = qc->sg;
4529 int dir = qc->dma_dir;
4530
4531 WARN_ON_ONCE(sg == NULL);
4532
4533 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
4534
4535 if (qc->n_elem)
4536 dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir);
4537
4538 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4539 qc->sg = NULL;
4540 }
4541
4542 /**
4543 * atapi_check_dma - Check whether ATAPI DMA can be supported
4544 * @qc: Metadata associated with taskfile to check
4545 *
4546 * Allow low-level driver to filter ATA PACKET commands, returning
4547 * a status indicating whether or not it is OK to use DMA for the
4548 * supplied PACKET command.
4549 *
4550 * LOCKING:
4551 * spin_lock_irqsave(host lock)
4552 *
4553 * RETURNS: 0 when ATAPI DMA can be used
4554 * nonzero otherwise
4555 */
atapi_check_dma(struct ata_queued_cmd * qc)4556 int atapi_check_dma(struct ata_queued_cmd *qc)
4557 {
4558 struct ata_port *ap = qc->ap;
4559
4560 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a
4561 * few ATAPI devices choke on such DMA requests.
4562 */
4563 if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) &&
4564 unlikely(qc->nbytes & 15))
4565 return 1;
4566
4567 if (ap->ops->check_atapi_dma)
4568 return ap->ops->check_atapi_dma(qc);
4569
4570 return 0;
4571 }
4572
4573 /**
4574 * ata_std_qc_defer - Check whether a qc needs to be deferred
4575 * @qc: ATA command in question
4576 *
4577 * Non-NCQ commands cannot run with any other command, NCQ or
4578 * not. As upper layer only knows the queue depth, we are
4579 * responsible for maintaining exclusion. This function checks
4580 * whether a new command @qc can be issued.
4581 *
4582 * LOCKING:
4583 * spin_lock_irqsave(host lock)
4584 *
4585 * RETURNS:
4586 * ATA_DEFER_* if deferring is needed, 0 otherwise.
4587 */
ata_std_qc_defer(struct ata_queued_cmd * qc)4588 int ata_std_qc_defer(struct ata_queued_cmd *qc)
4589 {
4590 struct ata_link *link = qc->dev->link;
4591
4592 if (qc->tf.protocol == ATA_PROT_NCQ) {
4593 if (!ata_tag_valid(link->active_tag))
4594 return 0;
4595 } else {
4596 if (!ata_tag_valid(link->active_tag) && !link->sactive)
4597 return 0;
4598 }
4599
4600 return ATA_DEFER_LINK;
4601 }
4602
ata_noop_qc_prep(struct ata_queued_cmd * qc)4603 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4604
4605 /**
4606 * ata_sg_init - Associate command with scatter-gather table.
4607 * @qc: Command to be associated
4608 * @sg: Scatter-gather table.
4609 * @n_elem: Number of elements in s/g table.
4610 *
4611 * Initialize the data-related elements of queued_cmd @qc
4612 * to point to a scatter-gather table @sg, containing @n_elem
4613 * elements.
4614 *
4615 * LOCKING:
4616 * spin_lock_irqsave(host lock)
4617 */
ata_sg_init(struct ata_queued_cmd * qc,struct scatterlist * sg,unsigned int n_elem)4618 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4619 unsigned int n_elem)
4620 {
4621 qc->sg = sg;
4622 qc->n_elem = n_elem;
4623 qc->cursg = qc->sg;
4624 }
4625
4626 /**
4627 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4628 * @qc: Command with scatter-gather table to be mapped.
4629 *
4630 * DMA-map the scatter-gather table associated with queued_cmd @qc.
4631 *
4632 * LOCKING:
4633 * spin_lock_irqsave(host lock)
4634 *
4635 * RETURNS:
4636 * Zero on success, negative on error.
4637 *
4638 */
ata_sg_setup(struct ata_queued_cmd * qc)4639 static int ata_sg_setup(struct ata_queued_cmd *qc)
4640 {
4641 struct ata_port *ap = qc->ap;
4642 unsigned int n_elem;
4643
4644 VPRINTK("ENTER, ata%u\n", ap->print_id);
4645
4646 n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
4647 if (n_elem < 1)
4648 return -1;
4649
4650 DPRINTK("%d sg elements mapped\n", n_elem);
4651 qc->orig_n_elem = qc->n_elem;
4652 qc->n_elem = n_elem;
4653 qc->flags |= ATA_QCFLAG_DMAMAP;
4654
4655 return 0;
4656 }
4657
4658 /**
4659 * swap_buf_le16 - swap halves of 16-bit words in place
4660 * @buf: Buffer to swap
4661 * @buf_words: Number of 16-bit words in buffer.
4662 *
4663 * Swap halves of 16-bit words if needed to convert from
4664 * little-endian byte order to native cpu byte order, or
4665 * vice-versa.
4666 *
4667 * LOCKING:
4668 * Inherited from caller.
4669 */
swap_buf_le16(u16 * buf,unsigned int buf_words)4670 void swap_buf_le16(u16 *buf, unsigned int buf_words)
4671 {
4672 #ifdef __BIG_ENDIAN
4673 unsigned int i;
4674
4675 for (i = 0; i < buf_words; i++)
4676 buf[i] = le16_to_cpu(buf[i]);
4677 #endif /* __BIG_ENDIAN */
4678 }
4679
4680 /**
4681 * ata_qc_new - Request an available ATA command, for queueing
4682 * @ap: target port
4683 *
4684 * LOCKING:
4685 * None.
4686 */
4687
ata_qc_new(struct ata_port * ap)4688 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4689 {
4690 struct ata_queued_cmd *qc = NULL;
4691 unsigned int i;
4692
4693 /* no command while frozen */
4694 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
4695 return NULL;
4696
4697 /* the last tag is reserved for internal command. */
4698 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
4699 if (!test_and_set_bit(i, &ap->qc_allocated)) {
4700 qc = __ata_qc_from_tag(ap, i);
4701 break;
4702 }
4703
4704 if (qc)
4705 qc->tag = i;
4706
4707 return qc;
4708 }
4709
4710 /**
4711 * ata_qc_new_init - Request an available ATA command, and initialize it
4712 * @dev: Device from whom we request an available command structure
4713 *
4714 * LOCKING:
4715 * None.
4716 */
4717
ata_qc_new_init(struct ata_device * dev)4718 struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
4719 {
4720 struct ata_port *ap = dev->link->ap;
4721 struct ata_queued_cmd *qc;
4722
4723 qc = ata_qc_new(ap);
4724 if (qc) {
4725 qc->scsicmd = NULL;
4726 qc->ap = ap;
4727 qc->dev = dev;
4728
4729 ata_qc_reinit(qc);
4730 }
4731
4732 return qc;
4733 }
4734
4735 /**
4736 * ata_qc_free - free unused ata_queued_cmd
4737 * @qc: Command to complete
4738 *
4739 * Designed to free unused ata_queued_cmd object
4740 * in case something prevents using it.
4741 *
4742 * LOCKING:
4743 * spin_lock_irqsave(host lock)
4744 */
ata_qc_free(struct ata_queued_cmd * qc)4745 void ata_qc_free(struct ata_queued_cmd *qc)
4746 {
4747 struct ata_port *ap;
4748 unsigned int tag;
4749
4750 WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4751 ap = qc->ap;
4752
4753 qc->flags = 0;
4754 tag = qc->tag;
4755 if (likely(ata_tag_valid(tag))) {
4756 qc->tag = ATA_TAG_POISON;
4757 clear_bit(tag, &ap->qc_allocated);
4758 }
4759 }
4760
__ata_qc_complete(struct ata_queued_cmd * qc)4761 void __ata_qc_complete(struct ata_queued_cmd *qc)
4762 {
4763 struct ata_port *ap;
4764 struct ata_link *link;
4765
4766 WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4767 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
4768 ap = qc->ap;
4769 link = qc->dev->link;
4770
4771 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4772 ata_sg_clean(qc);
4773
4774 /* command should be marked inactive atomically with qc completion */
4775 if (qc->tf.protocol == ATA_PROT_NCQ) {
4776 link->sactive &= ~(1 << qc->tag);
4777 if (!link->sactive)
4778 ap->nr_active_links--;
4779 } else {
4780 link->active_tag = ATA_TAG_POISON;
4781 ap->nr_active_links--;
4782 }
4783
4784 /* clear exclusive status */
4785 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
4786 ap->excl_link == link))
4787 ap->excl_link = NULL;
4788
4789 /* atapi: mark qc as inactive to prevent the interrupt handler
4790 * from completing the command twice later, before the error handler
4791 * is called. (when rc != 0 and atapi request sense is needed)
4792 */
4793 qc->flags &= ~ATA_QCFLAG_ACTIVE;
4794 ap->qc_active &= ~(1 << qc->tag);
4795
4796 /* call completion callback */
4797 qc->complete_fn(qc);
4798 }
4799
fill_result_tf(struct ata_queued_cmd * qc)4800 static void fill_result_tf(struct ata_queued_cmd *qc)
4801 {
4802 struct ata_port *ap = qc->ap;
4803
4804 qc->result_tf.flags = qc->tf.flags;
4805 ap->ops->qc_fill_rtf(qc);
4806 }
4807
ata_verify_xfer(struct ata_queued_cmd * qc)4808 static void ata_verify_xfer(struct ata_queued_cmd *qc)
4809 {
4810 struct ata_device *dev = qc->dev;
4811
4812 if (ata_is_nodata(qc->tf.protocol))
4813 return;
4814
4815 if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
4816 return;
4817
4818 dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
4819 }
4820
4821 /**
4822 * ata_qc_complete - Complete an active ATA command
4823 * @qc: Command to complete
4824 *
4825 * Indicate to the mid and upper layers that an ATA command has
4826 * completed, with either an ok or not-ok status.
4827 *
4828 * Refrain from calling this function multiple times when
4829 * successfully completing multiple NCQ commands.
4830 * ata_qc_complete_multiple() should be used instead, which will
4831 * properly update IRQ expect state.
4832 *
4833 * LOCKING:
4834 * spin_lock_irqsave(host lock)
4835 */
ata_qc_complete(struct ata_queued_cmd * qc)4836 void ata_qc_complete(struct ata_queued_cmd *qc)
4837 {
4838 struct ata_port *ap = qc->ap;
4839
4840 /* XXX: New EH and old EH use different mechanisms to
4841 * synchronize EH with regular execution path.
4842 *
4843 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4844 * Normal execution path is responsible for not accessing a
4845 * failed qc. libata core enforces the rule by returning NULL
4846 * from ata_qc_from_tag() for failed qcs.
4847 *
4848 * Old EH depends on ata_qc_complete() nullifying completion
4849 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
4850 * not synchronize with interrupt handler. Only PIO task is
4851 * taken care of.
4852 */
4853 if (ap->ops->error_handler) {
4854 struct ata_device *dev = qc->dev;
4855 struct ata_eh_info *ehi = &dev->link->eh_info;
4856
4857 if (unlikely(qc->err_mask))
4858 qc->flags |= ATA_QCFLAG_FAILED;
4859
4860 /*
4861 * Finish internal commands without any further processing
4862 * and always with the result TF filled.
4863 */
4864 if (unlikely(ata_tag_internal(qc->tag))) {
4865 fill_result_tf(qc);
4866 __ata_qc_complete(qc);
4867 return;
4868 }
4869
4870 /*
4871 * Non-internal qc has failed. Fill the result TF and
4872 * summon EH.
4873 */
4874 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4875 fill_result_tf(qc);
4876 ata_qc_schedule_eh(qc);
4877 return;
4878 }
4879
4880 WARN_ON_ONCE(ap->pflags & ATA_PFLAG_FROZEN);
4881
4882 /* read result TF if requested */
4883 if (qc->flags & ATA_QCFLAG_RESULT_TF)
4884 fill_result_tf(qc);
4885
4886 /* Some commands need post-processing after successful
4887 * completion.
4888 */
4889 switch (qc->tf.command) {
4890 case ATA_CMD_SET_FEATURES:
4891 if (qc->tf.feature != SETFEATURES_WC_ON &&
4892 qc->tf.feature != SETFEATURES_WC_OFF)
4893 break;
4894 /* fall through */
4895 case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
4896 case ATA_CMD_SET_MULTI: /* multi_count changed */
4897 /* revalidate device */
4898 ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
4899 ata_port_schedule_eh(ap);
4900 break;
4901
4902 case ATA_CMD_SLEEP:
4903 dev->flags |= ATA_DFLAG_SLEEPING;
4904 break;
4905 }
4906
4907 if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
4908 ata_verify_xfer(qc);
4909
4910 __ata_qc_complete(qc);
4911 } else {
4912 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4913 return;
4914
4915 /* read result TF if failed or requested */
4916 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
4917 fill_result_tf(qc);
4918
4919 __ata_qc_complete(qc);
4920 }
4921 }
4922
4923 /**
4924 * ata_qc_complete_multiple - Complete multiple qcs successfully
4925 * @ap: port in question
4926 * @qc_active: new qc_active mask
4927 *
4928 * Complete in-flight commands. This functions is meant to be
4929 * called from low-level driver's interrupt routine to complete
4930 * requests normally. ap->qc_active and @qc_active is compared
4931 * and commands are completed accordingly.
4932 *
4933 * Always use this function when completing multiple NCQ commands
4934 * from IRQ handlers instead of calling ata_qc_complete()
4935 * multiple times to keep IRQ expect status properly in sync.
4936 *
4937 * LOCKING:
4938 * spin_lock_irqsave(host lock)
4939 *
4940 * RETURNS:
4941 * Number of completed commands on success, -errno otherwise.
4942 */
ata_qc_complete_multiple(struct ata_port * ap,u32 qc_active)4943 int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active)
4944 {
4945 int nr_done = 0;
4946 u32 done_mask;
4947
4948 done_mask = ap->qc_active ^ qc_active;
4949
4950 if (unlikely(done_mask & qc_active)) {
4951 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
4952 "(%08x->%08x)\n", ap->qc_active, qc_active);
4953 return -EINVAL;
4954 }
4955
4956 while (done_mask) {
4957 struct ata_queued_cmd *qc;
4958 unsigned int tag = __ffs(done_mask);
4959
4960 qc = ata_qc_from_tag(ap, tag);
4961 if (qc) {
4962 ata_qc_complete(qc);
4963 nr_done++;
4964 }
4965 done_mask &= ~(1 << tag);
4966 }
4967
4968 return nr_done;
4969 }
4970
4971 /**
4972 * ata_qc_issue - issue taskfile to device
4973 * @qc: command to issue to device
4974 *
4975 * Prepare an ATA command to submission to device.
4976 * This includes mapping the data into a DMA-able
4977 * area, filling in the S/G table, and finally
4978 * writing the taskfile to hardware, starting the command.
4979 *
4980 * LOCKING:
4981 * spin_lock_irqsave(host lock)
4982 */
ata_qc_issue(struct ata_queued_cmd * qc)4983 void ata_qc_issue(struct ata_queued_cmd *qc)
4984 {
4985 struct ata_port *ap = qc->ap;
4986 struct ata_link *link = qc->dev->link;
4987 u8 prot = qc->tf.protocol;
4988
4989 /* Make sure only one non-NCQ command is outstanding. The
4990 * check is skipped for old EH because it reuses active qc to
4991 * request ATAPI sense.
4992 */
4993 WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag));
4994
4995 if (ata_is_ncq(prot)) {
4996 WARN_ON_ONCE(link->sactive & (1 << qc->tag));
4997
4998 if (!link->sactive)
4999 ap->nr_active_links++;
5000 link->sactive |= 1 << qc->tag;
5001 } else {
5002 WARN_ON_ONCE(link->sactive);
5003
5004 ap->nr_active_links++;
5005 link->active_tag = qc->tag;
5006 }
5007
5008 qc->flags |= ATA_QCFLAG_ACTIVE;
5009 ap->qc_active |= 1 << qc->tag;
5010
5011 /*
5012 * We guarantee to LLDs that they will have at least one
5013 * non-zero sg if the command is a data command.
5014 */
5015 if (WARN_ON_ONCE(ata_is_data(prot) &&
5016 (!qc->sg || !qc->n_elem || !qc->nbytes)))
5017 goto sys_err;
5018
5019 if (ata_is_dma(prot) || (ata_is_pio(prot) &&
5020 (ap->flags & ATA_FLAG_PIO_DMA)))
5021 if (ata_sg_setup(qc))
5022 goto sys_err;
5023
5024 /* if device is sleeping, schedule reset and abort the link */
5025 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
5026 link->eh_info.action |= ATA_EH_RESET;
5027 ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
5028 ata_link_abort(link);
5029 return;
5030 }
5031
5032 ap->ops->qc_prep(qc);
5033
5034 qc->err_mask |= ap->ops->qc_issue(qc);
5035 if (unlikely(qc->err_mask))
5036 goto err;
5037 return;
5038
5039 sys_err:
5040 qc->err_mask |= AC_ERR_SYSTEM;
5041 err:
5042 ata_qc_complete(qc);
5043 }
5044
5045 /**
5046 * sata_scr_valid - test whether SCRs are accessible
5047 * @link: ATA link to test SCR accessibility for
5048 *
5049 * Test whether SCRs are accessible for @link.
5050 *
5051 * LOCKING:
5052 * None.
5053 *
5054 * RETURNS:
5055 * 1 if SCRs are accessible, 0 otherwise.
5056 */
sata_scr_valid(struct ata_link * link)5057 int sata_scr_valid(struct ata_link *link)
5058 {
5059 struct ata_port *ap = link->ap;
5060
5061 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
5062 }
5063
5064 /**
5065 * sata_scr_read - read SCR register of the specified port
5066 * @link: ATA link to read SCR for
5067 * @reg: SCR to read
5068 * @val: Place to store read value
5069 *
5070 * Read SCR register @reg of @link into *@val. This function is
5071 * guaranteed to succeed if @link is ap->link, the cable type of
5072 * the port is SATA and the port implements ->scr_read.
5073 *
5074 * LOCKING:
5075 * None if @link is ap->link. Kernel thread context otherwise.
5076 *
5077 * RETURNS:
5078 * 0 on success, negative errno on failure.
5079 */
sata_scr_read(struct ata_link * link,int reg,u32 * val)5080 int sata_scr_read(struct ata_link *link, int reg, u32 *val)
5081 {
5082 if (ata_is_host_link(link)) {
5083 if (sata_scr_valid(link))
5084 return link->ap->ops->scr_read(link, reg, val);
5085 return -EOPNOTSUPP;
5086 }
5087
5088 return sata_pmp_scr_read(link, reg, val);
5089 }
5090
5091 /**
5092 * sata_scr_write - write SCR register of the specified port
5093 * @link: ATA link to write SCR for
5094 * @reg: SCR to write
5095 * @val: value to write
5096 *
5097 * Write @val to SCR register @reg of @link. This function is
5098 * guaranteed to succeed if @link is ap->link, the cable type of
5099 * the port is SATA and the port implements ->scr_read.
5100 *
5101 * LOCKING:
5102 * None if @link is ap->link. Kernel thread context otherwise.
5103 *
5104 * RETURNS:
5105 * 0 on success, negative errno on failure.
5106 */
sata_scr_write(struct ata_link * link,int reg,u32 val)5107 int sata_scr_write(struct ata_link *link, int reg, u32 val)
5108 {
5109 if (ata_is_host_link(link)) {
5110 if (sata_scr_valid(link))
5111 return link->ap->ops->scr_write(link, reg, val);
5112 return -EOPNOTSUPP;
5113 }
5114
5115 return sata_pmp_scr_write(link, reg, val);
5116 }
5117
5118 /**
5119 * sata_scr_write_flush - write SCR register of the specified port and flush
5120 * @link: ATA link to write SCR for
5121 * @reg: SCR to write
5122 * @val: value to write
5123 *
5124 * This function is identical to sata_scr_write() except that this
5125 * function performs flush after writing to the register.
5126 *
5127 * LOCKING:
5128 * None if @link is ap->link. Kernel thread context otherwise.
5129 *
5130 * RETURNS:
5131 * 0 on success, negative errno on failure.
5132 */
sata_scr_write_flush(struct ata_link * link,int reg,u32 val)5133 int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
5134 {
5135 if (ata_is_host_link(link)) {
5136 int rc;
5137
5138 if (sata_scr_valid(link)) {
5139 rc = link->ap->ops->scr_write(link, reg, val);
5140 if (rc == 0)
5141 rc = link->ap->ops->scr_read(link, reg, &val);
5142 return rc;
5143 }
5144 return -EOPNOTSUPP;
5145 }
5146
5147 return sata_pmp_scr_write(link, reg, val);
5148 }
5149
5150 /**
5151 * ata_phys_link_online - test whether the given link is online
5152 * @link: ATA link to test
5153 *
5154 * Test whether @link is online. Note that this function returns
5155 * 0 if online status of @link cannot be obtained, so
5156 * ata_link_online(link) != !ata_link_offline(link).
5157 *
5158 * LOCKING:
5159 * None.
5160 *
5161 * RETURNS:
5162 * True if the port online status is available and online.
5163 */
ata_phys_link_online(struct ata_link * link)5164 bool ata_phys_link_online(struct ata_link *link)
5165 {
5166 u32 sstatus;
5167
5168 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5169 ata_sstatus_online(sstatus))
5170 return true;
5171 return false;
5172 }
5173
5174 /**
5175 * ata_phys_link_offline - test whether the given link is offline
5176 * @link: ATA link to test
5177 *
5178 * Test whether @link is offline. Note that this function
5179 * returns 0 if offline status of @link cannot be obtained, so
5180 * ata_link_online(link) != !ata_link_offline(link).
5181 *
5182 * LOCKING:
5183 * None.
5184 *
5185 * RETURNS:
5186 * True if the port offline status is available and offline.
5187 */
ata_phys_link_offline(struct ata_link * link)5188 bool ata_phys_link_offline(struct ata_link *link)
5189 {
5190 u32 sstatus;
5191
5192 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5193 !ata_sstatus_online(sstatus))
5194 return true;
5195 return false;
5196 }
5197
5198 /**
5199 * ata_link_online - test whether the given link is online
5200 * @link: ATA link to test
5201 *
5202 * Test whether @link is online. This is identical to
5203 * ata_phys_link_online() when there's no slave link. When
5204 * there's a slave link, this function should only be called on
5205 * the master link and will return true if any of M/S links is
5206 * online.
5207 *
5208 * LOCKING:
5209 * None.
5210 *
5211 * RETURNS:
5212 * True if the port online status is available and online.
5213 */
ata_link_online(struct ata_link * link)5214 bool ata_link_online(struct ata_link *link)
5215 {
5216 struct ata_link *slave = link->ap->slave_link;
5217
5218 WARN_ON(link == slave); /* shouldn't be called on slave link */
5219
5220 return ata_phys_link_online(link) ||
5221 (slave && ata_phys_link_online(slave));
5222 }
5223
5224 /**
5225 * ata_link_offline - test whether the given link is offline
5226 * @link: ATA link to test
5227 *
5228 * Test whether @link is offline. This is identical to
5229 * ata_phys_link_offline() when there's no slave link. When
5230 * there's a slave link, this function should only be called on
5231 * the master link and will return true if both M/S links are
5232 * offline.
5233 *
5234 * LOCKING:
5235 * None.
5236 *
5237 * RETURNS:
5238 * True if the port offline status is available and offline.
5239 */
ata_link_offline(struct ata_link * link)5240 bool ata_link_offline(struct ata_link *link)
5241 {
5242 struct ata_link *slave = link->ap->slave_link;
5243
5244 WARN_ON(link == slave); /* shouldn't be called on slave link */
5245
5246 return ata_phys_link_offline(link) &&
5247 (!slave || ata_phys_link_offline(slave));
5248 }
5249
5250 #ifdef CONFIG_PM
ata_host_request_pm(struct ata_host * host,pm_message_t mesg,unsigned int action,unsigned int ehi_flags,int wait)5251 static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
5252 unsigned int action, unsigned int ehi_flags,
5253 int wait)
5254 {
5255 unsigned long flags;
5256 int i, rc;
5257
5258 for (i = 0; i < host->n_ports; i++) {
5259 struct ata_port *ap = host->ports[i];
5260 struct ata_link *link;
5261
5262 /* Previous resume operation might still be in
5263 * progress. Wait for PM_PENDING to clear.
5264 */
5265 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5266 ata_port_wait_eh(ap);
5267 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5268 }
5269
5270 /* request PM ops to EH */
5271 spin_lock_irqsave(ap->lock, flags);
5272
5273 ap->pm_mesg = mesg;
5274 if (wait) {
5275 rc = 0;
5276 ap->pm_result = &rc;
5277 }
5278
5279 ap->pflags |= ATA_PFLAG_PM_PENDING;
5280 ata_for_each_link(link, ap, HOST_FIRST) {
5281 link->eh_info.action |= action;
5282 link->eh_info.flags |= ehi_flags;
5283 }
5284
5285 ata_port_schedule_eh(ap);
5286
5287 spin_unlock_irqrestore(ap->lock, flags);
5288
5289 /* wait and check result */
5290 if (wait) {
5291 ata_port_wait_eh(ap);
5292 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5293 if (rc)
5294 return rc;
5295 }
5296 }
5297
5298 return 0;
5299 }
5300
5301 /**
5302 * ata_host_suspend - suspend host
5303 * @host: host to suspend
5304 * @mesg: PM message
5305 *
5306 * Suspend @host. Actual operation is performed by EH. This
5307 * function requests EH to perform PM operations and waits for EH
5308 * to finish.
5309 *
5310 * LOCKING:
5311 * Kernel thread context (may sleep).
5312 *
5313 * RETURNS:
5314 * 0 on success, -errno on failure.
5315 */
ata_host_suspend(struct ata_host * host,pm_message_t mesg)5316 int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5317 {
5318 unsigned int ehi_flags = ATA_EHI_QUIET;
5319 int rc;
5320
5321 /*
5322 * On some hardware, device fails to respond after spun down
5323 * for suspend. As the device won't be used before being
5324 * resumed, we don't need to touch the device. Ask EH to skip
5325 * the usual stuff and proceed directly to suspend.
5326 *
5327 * http://thread.gmane.org/gmane.linux.ide/46764
5328 */
5329 if (mesg.event == PM_EVENT_SUSPEND)
5330 ehi_flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_NO_RECOVERY;
5331
5332 rc = ata_host_request_pm(host, mesg, 0, ehi_flags, 1);
5333 if (rc == 0)
5334 host->dev->power.power_state = mesg;
5335 return rc;
5336 }
5337
5338 /**
5339 * ata_host_resume - resume host
5340 * @host: host to resume
5341 *
5342 * Resume @host. Actual operation is performed by EH. This
5343 * function requests EH to perform PM operations and returns.
5344 * Note that all resume operations are performed parallelly.
5345 *
5346 * LOCKING:
5347 * Kernel thread context (may sleep).
5348 */
ata_host_resume(struct ata_host * host)5349 void ata_host_resume(struct ata_host *host)
5350 {
5351 ata_host_request_pm(host, PMSG_ON, ATA_EH_RESET,
5352 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
5353 host->dev->power.power_state = PMSG_ON;
5354 }
5355 #endif
5356
5357 /**
5358 * ata_dev_init - Initialize an ata_device structure
5359 * @dev: Device structure to initialize
5360 *
5361 * Initialize @dev in preparation for probing.
5362 *
5363 * LOCKING:
5364 * Inherited from caller.
5365 */
ata_dev_init(struct ata_device * dev)5366 void ata_dev_init(struct ata_device *dev)
5367 {
5368 struct ata_link *link = ata_dev_phys_link(dev);
5369 struct ata_port *ap = link->ap;
5370 unsigned long flags;
5371
5372 /* SATA spd limit is bound to the attached device, reset together */
5373 link->sata_spd_limit = link->hw_sata_spd_limit;
5374 link->sata_spd = 0;
5375
5376 /* High bits of dev->flags are used to record warm plug
5377 * requests which occur asynchronously. Synchronize using
5378 * host lock.
5379 */
5380 spin_lock_irqsave(ap->lock, flags);
5381 dev->flags &= ~ATA_DFLAG_INIT_MASK;
5382 dev->horkage = 0;
5383 spin_unlock_irqrestore(ap->lock, flags);
5384
5385 memset((void *)dev + ATA_DEVICE_CLEAR_BEGIN, 0,
5386 ATA_DEVICE_CLEAR_END - ATA_DEVICE_CLEAR_BEGIN);
5387 dev->pio_mask = UINT_MAX;
5388 dev->mwdma_mask = UINT_MAX;
5389 dev->udma_mask = UINT_MAX;
5390 }
5391
5392 /**
5393 * ata_link_init - Initialize an ata_link structure
5394 * @ap: ATA port link is attached to
5395 * @link: Link structure to initialize
5396 * @pmp: Port multiplier port number
5397 *
5398 * Initialize @link.
5399 *
5400 * LOCKING:
5401 * Kernel thread context (may sleep)
5402 */
ata_link_init(struct ata_port * ap,struct ata_link * link,int pmp)5403 void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
5404 {
5405 int i;
5406
5407 /* clear everything except for devices */
5408 memset((void *)link + ATA_LINK_CLEAR_BEGIN, 0,
5409 ATA_LINK_CLEAR_END - ATA_LINK_CLEAR_BEGIN);
5410
5411 link->ap = ap;
5412 link->pmp = pmp;
5413 link->active_tag = ATA_TAG_POISON;
5414 link->hw_sata_spd_limit = UINT_MAX;
5415
5416 /* can't use iterator, ap isn't initialized yet */
5417 for (i = 0; i < ATA_MAX_DEVICES; i++) {
5418 struct ata_device *dev = &link->device[i];
5419
5420 dev->link = link;
5421 dev->devno = dev - link->device;
5422 #ifdef CONFIG_ATA_ACPI
5423 dev->gtf_filter = ata_acpi_gtf_filter;
5424 #endif
5425 ata_dev_init(dev);
5426 }
5427 }
5428
5429 /**
5430 * sata_link_init_spd - Initialize link->sata_spd_limit
5431 * @link: Link to configure sata_spd_limit for
5432 *
5433 * Initialize @link->[hw_]sata_spd_limit to the currently
5434 * configured value.
5435 *
5436 * LOCKING:
5437 * Kernel thread context (may sleep).
5438 *
5439 * RETURNS:
5440 * 0 on success, -errno on failure.
5441 */
sata_link_init_spd(struct ata_link * link)5442 int sata_link_init_spd(struct ata_link *link)
5443 {
5444 u8 spd;
5445 int rc;
5446
5447 rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol);
5448 if (rc)
5449 return rc;
5450
5451 spd = (link->saved_scontrol >> 4) & 0xf;
5452 if (spd)
5453 link->hw_sata_spd_limit &= (1 << spd) - 1;
5454
5455 ata_force_link_limits(link);
5456
5457 link->sata_spd_limit = link->hw_sata_spd_limit;
5458
5459 return 0;
5460 }
5461
5462 /**
5463 * ata_port_alloc - allocate and initialize basic ATA port resources
5464 * @host: ATA host this allocated port belongs to
5465 *
5466 * Allocate and initialize basic ATA port resources.
5467 *
5468 * RETURNS:
5469 * Allocate ATA port on success, NULL on failure.
5470 *
5471 * LOCKING:
5472 * Inherited from calling layer (may sleep).
5473 */
ata_port_alloc(struct ata_host * host)5474 struct ata_port *ata_port_alloc(struct ata_host *host)
5475 {
5476 struct ata_port *ap;
5477
5478 DPRINTK("ENTER\n");
5479
5480 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
5481 if (!ap)
5482 return NULL;
5483
5484 ap->pflags |= ATA_PFLAG_INITIALIZING | ATA_PFLAG_FROZEN;
5485 ap->lock = &host->lock;
5486 ap->print_id = -1;
5487 ap->host = host;
5488 ap->dev = host->dev;
5489
5490 #if defined(ATA_VERBOSE_DEBUG)
5491 /* turn on all debugging levels */
5492 ap->msg_enable = 0x00FF;
5493 #elif defined(ATA_DEBUG)
5494 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
5495 #else
5496 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
5497 #endif
5498
5499 mutex_init(&ap->scsi_scan_mutex);
5500 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5501 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
5502 INIT_LIST_HEAD(&ap->eh_done_q);
5503 init_waitqueue_head(&ap->eh_wait_q);
5504 init_completion(&ap->park_req_pending);
5505 init_timer_deferrable(&ap->fastdrain_timer);
5506 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
5507 ap->fastdrain_timer.data = (unsigned long)ap;
5508
5509 ap->cbl = ATA_CBL_NONE;
5510
5511 ata_link_init(ap, &ap->link, 0);
5512
5513 #ifdef ATA_IRQ_TRAP
5514 ap->stats.unhandled_irq = 1;
5515 ap->stats.idle_irq = 1;
5516 #endif
5517 ata_sff_port_init(ap);
5518
5519 return ap;
5520 }
5521
ata_host_release(struct device * gendev,void * res)5522 static void ata_host_release(struct device *gendev, void *res)
5523 {
5524 struct ata_host *host = dev_get_drvdata(gendev);
5525 int i;
5526
5527 for (i = 0; i < host->n_ports; i++) {
5528 struct ata_port *ap = host->ports[i];
5529
5530 if (!ap)
5531 continue;
5532
5533 if (ap->scsi_host)
5534 scsi_host_put(ap->scsi_host);
5535
5536 kfree(ap->pmp_link);
5537 kfree(ap->slave_link);
5538 kfree(ap);
5539 host->ports[i] = NULL;
5540 }
5541
5542 dev_set_drvdata(gendev, NULL);
5543 }
5544
5545 /**
5546 * ata_host_alloc - allocate and init basic ATA host resources
5547 * @dev: generic device this host is associated with
5548 * @max_ports: maximum number of ATA ports associated with this host
5549 *
5550 * Allocate and initialize basic ATA host resources. LLD calls
5551 * this function to allocate a host, initializes it fully and
5552 * attaches it using ata_host_register().
5553 *
5554 * @max_ports ports are allocated and host->n_ports is
5555 * initialized to @max_ports. The caller is allowed to decrease
5556 * host->n_ports before calling ata_host_register(). The unused
5557 * ports will be automatically freed on registration.
5558 *
5559 * RETURNS:
5560 * Allocate ATA host on success, NULL on failure.
5561 *
5562 * LOCKING:
5563 * Inherited from calling layer (may sleep).
5564 */
ata_host_alloc(struct device * dev,int max_ports)5565 struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
5566 {
5567 struct ata_host *host;
5568 size_t sz;
5569 int i;
5570
5571 DPRINTK("ENTER\n");
5572
5573 if (!devres_open_group(dev, NULL, GFP_KERNEL))
5574 return NULL;
5575
5576 /* alloc a container for our list of ATA ports (buses) */
5577 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
5578 /* alloc a container for our list of ATA ports (buses) */
5579 host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
5580 if (!host)
5581 goto err_out;
5582
5583 devres_add(dev, host);
5584 dev_set_drvdata(dev, host);
5585
5586 spin_lock_init(&host->lock);
5587 mutex_init(&host->eh_mutex);
5588 host->dev = dev;
5589 host->n_ports = max_ports;
5590
5591 /* allocate ports bound to this host */
5592 for (i = 0; i < max_ports; i++) {
5593 struct ata_port *ap;
5594
5595 ap = ata_port_alloc(host);
5596 if (!ap)
5597 goto err_out;
5598
5599 ap->port_no = i;
5600 host->ports[i] = ap;
5601 }
5602
5603 devres_remove_group(dev, NULL);
5604 return host;
5605
5606 err_out:
5607 devres_release_group(dev, NULL);
5608 return NULL;
5609 }
5610
5611 /**
5612 * ata_host_alloc_pinfo - alloc host and init with port_info array
5613 * @dev: generic device this host is associated with
5614 * @ppi: array of ATA port_info to initialize host with
5615 * @n_ports: number of ATA ports attached to this host
5616 *
5617 * Allocate ATA host and initialize with info from @ppi. If NULL
5618 * terminated, @ppi may contain fewer entries than @n_ports. The
5619 * last entry will be used for the remaining ports.
5620 *
5621 * RETURNS:
5622 * Allocate ATA host on success, NULL on failure.
5623 *
5624 * LOCKING:
5625 * Inherited from calling layer (may sleep).
5626 */
ata_host_alloc_pinfo(struct device * dev,const struct ata_port_info * const * ppi,int n_ports)5627 struct ata_host *ata_host_alloc_pinfo(struct device *dev,
5628 const struct ata_port_info * const * ppi,
5629 int n_ports)
5630 {
5631 const struct ata_port_info *pi;
5632 struct ata_host *host;
5633 int i, j;
5634
5635 host = ata_host_alloc(dev, n_ports);
5636 if (!host)
5637 return NULL;
5638
5639 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
5640 struct ata_port *ap = host->ports[i];
5641
5642 if (ppi[j])
5643 pi = ppi[j++];
5644
5645 ap->pio_mask = pi->pio_mask;
5646 ap->mwdma_mask = pi->mwdma_mask;
5647 ap->udma_mask = pi->udma_mask;
5648 ap->flags |= pi->flags;
5649 ap->link.flags |= pi->link_flags;
5650 ap->ops = pi->port_ops;
5651
5652 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
5653 host->ops = pi->port_ops;
5654 }
5655
5656 return host;
5657 }
5658
5659 /**
5660 * ata_slave_link_init - initialize slave link
5661 * @ap: port to initialize slave link for
5662 *
5663 * Create and initialize slave link for @ap. This enables slave
5664 * link handling on the port.
5665 *
5666 * In libata, a port contains links and a link contains devices.
5667 * There is single host link but if a PMP is attached to it,
5668 * there can be multiple fan-out links. On SATA, there's usually
5669 * a single device connected to a link but PATA and SATA
5670 * controllers emulating TF based interface can have two - master
5671 * and slave.
5672 *
5673 * However, there are a few controllers which don't fit into this
5674 * abstraction too well - SATA controllers which emulate TF
5675 * interface with both master and slave devices but also have
5676 * separate SCR register sets for each device. These controllers
5677 * need separate links for physical link handling
5678 * (e.g. onlineness, link speed) but should be treated like a
5679 * traditional M/S controller for everything else (e.g. command
5680 * issue, softreset).
5681 *
5682 * slave_link is libata's way of handling this class of
5683 * controllers without impacting core layer too much. For
5684 * anything other than physical link handling, the default host
5685 * link is used for both master and slave. For physical link
5686 * handling, separate @ap->slave_link is used. All dirty details
5687 * are implemented inside libata core layer. From LLD's POV, the
5688 * only difference is that prereset, hardreset and postreset are
5689 * called once more for the slave link, so the reset sequence
5690 * looks like the following.
5691 *
5692 * prereset(M) -> prereset(S) -> hardreset(M) -> hardreset(S) ->
5693 * softreset(M) -> postreset(M) -> postreset(S)
5694 *
5695 * Note that softreset is called only for the master. Softreset
5696 * resets both M/S by definition, so SRST on master should handle
5697 * both (the standard method will work just fine).
5698 *
5699 * LOCKING:
5700 * Should be called before host is registered.
5701 *
5702 * RETURNS:
5703 * 0 on success, -errno on failure.
5704 */
ata_slave_link_init(struct ata_port * ap)5705 int ata_slave_link_init(struct ata_port *ap)
5706 {
5707 struct ata_link *link;
5708
5709 WARN_ON(ap->slave_link);
5710 WARN_ON(ap->flags & ATA_FLAG_PMP);
5711
5712 link = kzalloc(sizeof(*link), GFP_KERNEL);
5713 if (!link)
5714 return -ENOMEM;
5715
5716 ata_link_init(ap, link, 1);
5717 ap->slave_link = link;
5718 return 0;
5719 }
5720
ata_host_stop(struct device * gendev,void * res)5721 static void ata_host_stop(struct device *gendev, void *res)
5722 {
5723 struct ata_host *host = dev_get_drvdata(gendev);
5724 int i;
5725
5726 WARN_ON(!(host->flags & ATA_HOST_STARTED));
5727
5728 for (i = 0; i < host->n_ports; i++) {
5729 struct ata_port *ap = host->ports[i];
5730
5731 if (ap->ops->port_stop)
5732 ap->ops->port_stop(ap);
5733 }
5734
5735 if (host->ops->host_stop)
5736 host->ops->host_stop(host);
5737 }
5738
5739 /**
5740 * ata_finalize_port_ops - finalize ata_port_operations
5741 * @ops: ata_port_operations to finalize
5742 *
5743 * An ata_port_operations can inherit from another ops and that
5744 * ops can again inherit from another. This can go on as many
5745 * times as necessary as long as there is no loop in the
5746 * inheritance chain.
5747 *
5748 * Ops tables are finalized when the host is started. NULL or
5749 * unspecified entries are inherited from the closet ancestor
5750 * which has the method and the entry is populated with it.
5751 * After finalization, the ops table directly points to all the
5752 * methods and ->inherits is no longer necessary and cleared.
5753 *
5754 * Using ATA_OP_NULL, inheriting ops can force a method to NULL.
5755 *
5756 * LOCKING:
5757 * None.
5758 */
ata_finalize_port_ops(struct ata_port_operations * ops)5759 static void ata_finalize_port_ops(struct ata_port_operations *ops)
5760 {
5761 static DEFINE_SPINLOCK(lock);
5762 const struct ata_port_operations *cur;
5763 void **begin = (void **)ops;
5764 void **end = (void **)&ops->inherits;
5765 void **pp;
5766
5767 if (!ops || !ops->inherits)
5768 return;
5769
5770 spin_lock(&lock);
5771
5772 for (cur = ops->inherits; cur; cur = cur->inherits) {
5773 void **inherit = (void **)cur;
5774
5775 for (pp = begin; pp < end; pp++, inherit++)
5776 if (!*pp)
5777 *pp = *inherit;
5778 }
5779
5780 for (pp = begin; pp < end; pp++)
5781 if (IS_ERR(*pp))
5782 *pp = NULL;
5783
5784 ops->inherits = NULL;
5785
5786 spin_unlock(&lock);
5787 }
5788
5789 /**
5790 * ata_host_start - start and freeze ports of an ATA host
5791 * @host: ATA host to start ports for
5792 *
5793 * Start and then freeze ports of @host. Started status is
5794 * recorded in host->flags, so this function can be called
5795 * multiple times. Ports are guaranteed to get started only
5796 * once. If host->ops isn't initialized yet, its set to the
5797 * first non-dummy port ops.
5798 *
5799 * LOCKING:
5800 * Inherited from calling layer (may sleep).
5801 *
5802 * RETURNS:
5803 * 0 if all ports are started successfully, -errno otherwise.
5804 */
ata_host_start(struct ata_host * host)5805 int ata_host_start(struct ata_host *host)
5806 {
5807 int have_stop = 0;
5808 void *start_dr = NULL;
5809 int i, rc;
5810
5811 if (host->flags & ATA_HOST_STARTED)
5812 return 0;
5813
5814 ata_finalize_port_ops(host->ops);
5815
5816 for (i = 0; i < host->n_ports; i++) {
5817 struct ata_port *ap = host->ports[i];
5818
5819 ata_finalize_port_ops(ap->ops);
5820
5821 if (!host->ops && !ata_port_is_dummy(ap))
5822 host->ops = ap->ops;
5823
5824 if (ap->ops->port_stop)
5825 have_stop = 1;
5826 }
5827
5828 if (host->ops->host_stop)
5829 have_stop = 1;
5830
5831 if (have_stop) {
5832 start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
5833 if (!start_dr)
5834 return -ENOMEM;
5835 }
5836
5837 for (i = 0; i < host->n_ports; i++) {
5838 struct ata_port *ap = host->ports[i];
5839
5840 if (ap->ops->port_start) {
5841 rc = ap->ops->port_start(ap);
5842 if (rc) {
5843 if (rc != -ENODEV)
5844 dev_printk(KERN_ERR, host->dev,
5845 "failed to start port %d "
5846 "(errno=%d)\n", i, rc);
5847 goto err_out;
5848 }
5849 }
5850 ata_eh_freeze_port(ap);
5851 }
5852
5853 if (start_dr)
5854 devres_add(host->dev, start_dr);
5855 host->flags |= ATA_HOST_STARTED;
5856 return 0;
5857
5858 err_out:
5859 while (--i >= 0) {
5860 struct ata_port *ap = host->ports[i];
5861
5862 if (ap->ops->port_stop)
5863 ap->ops->port_stop(ap);
5864 }
5865 devres_free(start_dr);
5866 return rc;
5867 }
5868
5869 /**
5870 * ata_sas_host_init - Initialize a host struct
5871 * @host: host to initialize
5872 * @dev: device host is attached to
5873 * @flags: host flags
5874 * @ops: port_ops
5875 *
5876 * LOCKING:
5877 * PCI/etc. bus probe sem.
5878 *
5879 */
5880 /* KILLME - the only user left is ipr */
ata_host_init(struct ata_host * host,struct device * dev,unsigned long flags,struct ata_port_operations * ops)5881 void ata_host_init(struct ata_host *host, struct device *dev,
5882 unsigned long flags, struct ata_port_operations *ops)
5883 {
5884 spin_lock_init(&host->lock);
5885 mutex_init(&host->eh_mutex);
5886 host->dev = dev;
5887 host->flags = flags;
5888 host->ops = ops;
5889 }
5890
ata_port_probe(struct ata_port * ap)5891 int ata_port_probe(struct ata_port *ap)
5892 {
5893 int rc = 0;
5894
5895 /* probe */
5896 if (ap->ops->error_handler) {
5897 struct ata_eh_info *ehi = &ap->link.eh_info;
5898 unsigned long flags;
5899
5900 /* kick EH for boot probing */
5901 spin_lock_irqsave(ap->lock, flags);
5902
5903 ehi->probe_mask |= ATA_ALL_DEVICES;
5904 ehi->action |= ATA_EH_RESET;
5905 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
5906
5907 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
5908 ap->pflags |= ATA_PFLAG_LOADING;
5909 ata_port_schedule_eh(ap);
5910
5911 spin_unlock_irqrestore(ap->lock, flags);
5912
5913 /* wait for EH to finish */
5914 ata_port_wait_eh(ap);
5915 } else {
5916 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
5917 rc = ata_bus_probe(ap);
5918 DPRINTK("ata%u: bus probe end\n", ap->print_id);
5919 }
5920 return rc;
5921 }
5922
5923
async_port_probe(void * data,async_cookie_t cookie)5924 static void async_port_probe(void *data, async_cookie_t cookie)
5925 {
5926 struct ata_port *ap = data;
5927
5928 /*
5929 * If we're not allowed to scan this host in parallel,
5930 * we need to wait until all previous scans have completed
5931 * before going further.
5932 * Jeff Garzik says this is only within a controller, so we
5933 * don't need to wait for port 0, only for later ports.
5934 */
5935 if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0)
5936 async_synchronize_cookie(cookie);
5937
5938 (void)ata_port_probe(ap);
5939
5940 /* in order to keep device order, we need to synchronize at this point */
5941 async_synchronize_cookie(cookie);
5942
5943 ata_scsi_scan_host(ap, 1);
5944 }
5945
5946 /**
5947 * ata_host_register - register initialized ATA host
5948 * @host: ATA host to register
5949 * @sht: template for SCSI host
5950 *
5951 * Register initialized ATA host. @host is allocated using
5952 * ata_host_alloc() and fully initialized by LLD. This function
5953 * starts ports, registers @host with ATA and SCSI layers and
5954 * probe registered devices.
5955 *
5956 * LOCKING:
5957 * Inherited from calling layer (may sleep).
5958 *
5959 * RETURNS:
5960 * 0 on success, -errno otherwise.
5961 */
ata_host_register(struct ata_host * host,struct scsi_host_template * sht)5962 int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
5963 {
5964 int i, rc;
5965
5966 /* host must have been started */
5967 if (!(host->flags & ATA_HOST_STARTED)) {
5968 dev_printk(KERN_ERR, host->dev,
5969 "BUG: trying to register unstarted host\n");
5970 WARN_ON(1);
5971 return -EINVAL;
5972 }
5973
5974 /* Blow away unused ports. This happens when LLD can't
5975 * determine the exact number of ports to allocate at
5976 * allocation time.
5977 */
5978 for (i = host->n_ports; host->ports[i]; i++)
5979 kfree(host->ports[i]);
5980
5981 /* give ports names and add SCSI hosts */
5982 for (i = 0; i < host->n_ports; i++)
5983 host->ports[i]->print_id = ata_print_id++;
5984
5985
5986 /* Create associated sysfs transport objects */
5987 for (i = 0; i < host->n_ports; i++) {
5988 rc = ata_tport_add(host->dev,host->ports[i]);
5989 if (rc) {
5990 goto err_tadd;
5991 }
5992 }
5993
5994 rc = ata_scsi_add_hosts(host, sht);
5995 if (rc)
5996 goto err_tadd;
5997
5998 /* associate with ACPI nodes */
5999 ata_acpi_associate(host);
6000
6001 /* set cable, sata_spd_limit and report */
6002 for (i = 0; i < host->n_ports; i++) {
6003 struct ata_port *ap = host->ports[i];
6004 unsigned long xfer_mask;
6005
6006 /* set SATA cable type if still unset */
6007 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
6008 ap->cbl = ATA_CBL_SATA;
6009
6010 /* init sata_spd_limit to the current value */
6011 sata_link_init_spd(&ap->link);
6012 if (ap->slave_link)
6013 sata_link_init_spd(ap->slave_link);
6014
6015 /* print per-port info to dmesg */
6016 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
6017 ap->udma_mask);
6018
6019 if (!ata_port_is_dummy(ap)) {
6020 ata_port_printk(ap, KERN_INFO,
6021 "%cATA max %s %s\n",
6022 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
6023 ata_mode_string(xfer_mask),
6024 ap->link.eh_info.desc);
6025 ata_ehi_clear_desc(&ap->link.eh_info);
6026 } else
6027 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
6028 }
6029
6030 /* perform each probe asynchronously */
6031 for (i = 0; i < host->n_ports; i++) {
6032 struct ata_port *ap = host->ports[i];
6033 async_schedule(async_port_probe, ap);
6034 }
6035
6036 return 0;
6037
6038 err_tadd:
6039 while (--i >= 0) {
6040 ata_tport_delete(host->ports[i]);
6041 }
6042 return rc;
6043
6044 }
6045
6046 /**
6047 * ata_host_activate - start host, request IRQ and register it
6048 * @host: target ATA host
6049 * @irq: IRQ to request
6050 * @irq_handler: irq_handler used when requesting IRQ
6051 * @irq_flags: irq_flags used when requesting IRQ
6052 * @sht: scsi_host_template to use when registering the host
6053 *
6054 * After allocating an ATA host and initializing it, most libata
6055 * LLDs perform three steps to activate the host - start host,
6056 * request IRQ and register it. This helper takes necessasry
6057 * arguments and performs the three steps in one go.
6058 *
6059 * An invalid IRQ skips the IRQ registration and expects the host to
6060 * have set polling mode on the port. In this case, @irq_handler
6061 * should be NULL.
6062 *
6063 * LOCKING:
6064 * Inherited from calling layer (may sleep).
6065 *
6066 * RETURNS:
6067 * 0 on success, -errno otherwise.
6068 */
ata_host_activate(struct ata_host * host,int irq,irq_handler_t irq_handler,unsigned long irq_flags,struct scsi_host_template * sht)6069 int ata_host_activate(struct ata_host *host, int irq,
6070 irq_handler_t irq_handler, unsigned long irq_flags,
6071 struct scsi_host_template *sht)
6072 {
6073 int i, rc;
6074
6075 rc = ata_host_start(host);
6076 if (rc)
6077 return rc;
6078
6079 /* Special case for polling mode */
6080 if (!irq) {
6081 WARN_ON(irq_handler);
6082 return ata_host_register(host, sht);
6083 }
6084
6085 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
6086 dev_driver_string(host->dev), host);
6087 if (rc)
6088 return rc;
6089
6090 for (i = 0; i < host->n_ports; i++)
6091 ata_port_desc(host->ports[i], "irq %d", irq);
6092
6093 rc = ata_host_register(host, sht);
6094 /* if failed, just free the IRQ and leave ports alone */
6095 if (rc)
6096 devm_free_irq(host->dev, irq, host);
6097
6098 return rc;
6099 }
6100
6101 /**
6102 * ata_port_detach - Detach ATA port in prepration of device removal
6103 * @ap: ATA port to be detached
6104 *
6105 * Detach all ATA devices and the associated SCSI devices of @ap;
6106 * then, remove the associated SCSI host. @ap is guaranteed to
6107 * be quiescent on return from this function.
6108 *
6109 * LOCKING:
6110 * Kernel thread context (may sleep).
6111 */
ata_port_detach(struct ata_port * ap)6112 static void ata_port_detach(struct ata_port *ap)
6113 {
6114 unsigned long flags;
6115
6116 if (!ap->ops->error_handler)
6117 goto skip_eh;
6118
6119 /* tell EH we're leaving & flush EH */
6120 spin_lock_irqsave(ap->lock, flags);
6121 ap->pflags |= ATA_PFLAG_UNLOADING;
6122 ata_port_schedule_eh(ap);
6123 spin_unlock_irqrestore(ap->lock, flags);
6124
6125 /* wait till EH commits suicide */
6126 ata_port_wait_eh(ap);
6127
6128 /* it better be dead now */
6129 WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED));
6130
6131 cancel_delayed_work_sync(&ap->hotplug_task);
6132
6133 skip_eh:
6134 if (ap->pmp_link) {
6135 int i;
6136 for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
6137 ata_tlink_delete(&ap->pmp_link[i]);
6138 }
6139 ata_tport_delete(ap);
6140
6141 /* remove the associated SCSI host */
6142 scsi_remove_host(ap->scsi_host);
6143 }
6144
6145 /**
6146 * ata_host_detach - Detach all ports of an ATA host
6147 * @host: Host to detach
6148 *
6149 * Detach all ports of @host.
6150 *
6151 * LOCKING:
6152 * Kernel thread context (may sleep).
6153 */
ata_host_detach(struct ata_host * host)6154 void ata_host_detach(struct ata_host *host)
6155 {
6156 int i;
6157
6158 for (i = 0; i < host->n_ports; i++)
6159 ata_port_detach(host->ports[i]);
6160
6161 /* the host is dead now, dissociate ACPI */
6162 ata_acpi_dissociate(host);
6163 }
6164
6165 #ifdef CONFIG_PCI
6166
6167 /**
6168 * ata_pci_remove_one - PCI layer callback for device removal
6169 * @pdev: PCI device that was removed
6170 *
6171 * PCI layer indicates to libata via this hook that hot-unplug or
6172 * module unload event has occurred. Detach all ports. Resource
6173 * release is handled via devres.
6174 *
6175 * LOCKING:
6176 * Inherited from PCI layer (may sleep).
6177 */
ata_pci_remove_one(struct pci_dev * pdev)6178 void ata_pci_remove_one(struct pci_dev *pdev)
6179 {
6180 struct device *dev = &pdev->dev;
6181 struct ata_host *host = dev_get_drvdata(dev);
6182
6183 ata_host_detach(host);
6184 }
6185
6186 /* move to PCI subsystem */
pci_test_config_bits(struct pci_dev * pdev,const struct pci_bits * bits)6187 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
6188 {
6189 unsigned long tmp = 0;
6190
6191 switch (bits->width) {
6192 case 1: {
6193 u8 tmp8 = 0;
6194 pci_read_config_byte(pdev, bits->reg, &tmp8);
6195 tmp = tmp8;
6196 break;
6197 }
6198 case 2: {
6199 u16 tmp16 = 0;
6200 pci_read_config_word(pdev, bits->reg, &tmp16);
6201 tmp = tmp16;
6202 break;
6203 }
6204 case 4: {
6205 u32 tmp32 = 0;
6206 pci_read_config_dword(pdev, bits->reg, &tmp32);
6207 tmp = tmp32;
6208 break;
6209 }
6210
6211 default:
6212 return -EINVAL;
6213 }
6214
6215 tmp &= bits->mask;
6216
6217 return (tmp == bits->val) ? 1 : 0;
6218 }
6219
6220 #ifdef CONFIG_PM
ata_pci_device_do_suspend(struct pci_dev * pdev,pm_message_t mesg)6221 void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
6222 {
6223 pci_save_state(pdev);
6224 pci_disable_device(pdev);
6225
6226 if (mesg.event & PM_EVENT_SLEEP)
6227 pci_set_power_state(pdev, PCI_D3hot);
6228 }
6229
ata_pci_device_do_resume(struct pci_dev * pdev)6230 int ata_pci_device_do_resume(struct pci_dev *pdev)
6231 {
6232 int rc;
6233
6234 pci_set_power_state(pdev, PCI_D0);
6235 pci_restore_state(pdev);
6236
6237 rc = pcim_enable_device(pdev);
6238 if (rc) {
6239 dev_printk(KERN_ERR, &pdev->dev,
6240 "failed to enable device after resume (%d)\n", rc);
6241 return rc;
6242 }
6243
6244 pci_set_master(pdev);
6245 return 0;
6246 }
6247
ata_pci_device_suspend(struct pci_dev * pdev,pm_message_t mesg)6248 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
6249 {
6250 struct ata_host *host = dev_get_drvdata(&pdev->dev);
6251 int rc = 0;
6252
6253 rc = ata_host_suspend(host, mesg);
6254 if (rc)
6255 return rc;
6256
6257 ata_pci_device_do_suspend(pdev, mesg);
6258
6259 return 0;
6260 }
6261
ata_pci_device_resume(struct pci_dev * pdev)6262 int ata_pci_device_resume(struct pci_dev *pdev)
6263 {
6264 struct ata_host *host = dev_get_drvdata(&pdev->dev);
6265 int rc;
6266
6267 rc = ata_pci_device_do_resume(pdev);
6268 if (rc == 0)
6269 ata_host_resume(host);
6270 return rc;
6271 }
6272 #endif /* CONFIG_PM */
6273
6274 #endif /* CONFIG_PCI */
6275
ata_parse_force_one(char ** cur,struct ata_force_ent * force_ent,const char ** reason)6276 static int __init ata_parse_force_one(char **cur,
6277 struct ata_force_ent *force_ent,
6278 const char **reason)
6279 {
6280 /* FIXME: Currently, there's no way to tag init const data and
6281 * using __initdata causes build failure on some versions of
6282 * gcc. Once __initdataconst is implemented, add const to the
6283 * following structure.
6284 */
6285 static struct ata_force_param force_tbl[] __initdata = {
6286 { "40c", .cbl = ATA_CBL_PATA40 },
6287 { "80c", .cbl = ATA_CBL_PATA80 },
6288 { "short40c", .cbl = ATA_CBL_PATA40_SHORT },
6289 { "unk", .cbl = ATA_CBL_PATA_UNK },
6290 { "ign", .cbl = ATA_CBL_PATA_IGN },
6291 { "sata", .cbl = ATA_CBL_SATA },
6292 { "1.5Gbps", .spd_limit = 1 },
6293 { "3.0Gbps", .spd_limit = 2 },
6294 { "noncq", .horkage_on = ATA_HORKAGE_NONCQ },
6295 { "ncq", .horkage_off = ATA_HORKAGE_NONCQ },
6296 { "dump_id", .horkage_on = ATA_HORKAGE_DUMP_ID },
6297 { "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) },
6298 { "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) },
6299 { "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) },
6300 { "pio3", .xfer_mask = 1 << (ATA_SHIFT_PIO + 3) },
6301 { "pio4", .xfer_mask = 1 << (ATA_SHIFT_PIO + 4) },
6302 { "pio5", .xfer_mask = 1 << (ATA_SHIFT_PIO + 5) },
6303 { "pio6", .xfer_mask = 1 << (ATA_SHIFT_PIO + 6) },
6304 { "mwdma0", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 0) },
6305 { "mwdma1", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 1) },
6306 { "mwdma2", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 2) },
6307 { "mwdma3", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 3) },
6308 { "mwdma4", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 4) },
6309 { "udma0", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6310 { "udma16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6311 { "udma/16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6312 { "udma1", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6313 { "udma25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6314 { "udma/25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6315 { "udma2", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6316 { "udma33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6317 { "udma/33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6318 { "udma3", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6319 { "udma44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6320 { "udma/44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6321 { "udma4", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6322 { "udma66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6323 { "udma/66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6324 { "udma5", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6325 { "udma100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6326 { "udma/100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6327 { "udma6", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6328 { "udma133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6329 { "udma/133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6330 { "udma7", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 7) },
6331 { "nohrst", .lflags = ATA_LFLAG_NO_HRST },
6332 { "nosrst", .lflags = ATA_LFLAG_NO_SRST },
6333 { "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST },
6334 };
6335 char *start = *cur, *p = *cur;
6336 char *id, *val, *endp;
6337 const struct ata_force_param *match_fp = NULL;
6338 int nr_matches = 0, i;
6339
6340 /* find where this param ends and update *cur */
6341 while (*p != '\0' && *p != ',')
6342 p++;
6343
6344 if (*p == '\0')
6345 *cur = p;
6346 else
6347 *cur = p + 1;
6348
6349 *p = '\0';
6350
6351 /* parse */
6352 p = strchr(start, ':');
6353 if (!p) {
6354 val = strstrip(start);
6355 goto parse_val;
6356 }
6357 *p = '\0';
6358
6359 id = strstrip(start);
6360 val = strstrip(p + 1);
6361
6362 /* parse id */
6363 p = strchr(id, '.');
6364 if (p) {
6365 *p++ = '\0';
6366 force_ent->device = simple_strtoul(p, &endp, 10);
6367 if (p == endp || *endp != '\0') {
6368 *reason = "invalid device";
6369 return -EINVAL;
6370 }
6371 }
6372
6373 force_ent->port = simple_strtoul(id, &endp, 10);
6374 if (p == endp || *endp != '\0') {
6375 *reason = "invalid port/link";
6376 return -EINVAL;
6377 }
6378
6379 parse_val:
6380 /* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */
6381 for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
6382 const struct ata_force_param *fp = &force_tbl[i];
6383
6384 if (strncasecmp(val, fp->name, strlen(val)))
6385 continue;
6386
6387 nr_matches++;
6388 match_fp = fp;
6389
6390 if (strcasecmp(val, fp->name) == 0) {
6391 nr_matches = 1;
6392 break;
6393 }
6394 }
6395
6396 if (!nr_matches) {
6397 *reason = "unknown value";
6398 return -EINVAL;
6399 }
6400 if (nr_matches > 1) {
6401 *reason = "ambigious value";
6402 return -EINVAL;
6403 }
6404
6405 force_ent->param = *match_fp;
6406
6407 return 0;
6408 }
6409
ata_parse_force_param(void)6410 static void __init ata_parse_force_param(void)
6411 {
6412 int idx = 0, size = 1;
6413 int last_port = -1, last_device = -1;
6414 char *p, *cur, *next;
6415
6416 /* calculate maximum number of params and allocate force_tbl */
6417 for (p = ata_force_param_buf; *p; p++)
6418 if (*p == ',')
6419 size++;
6420
6421 ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL);
6422 if (!ata_force_tbl) {
6423 printk(KERN_WARNING "ata: failed to extend force table, "
6424 "libata.force ignored\n");
6425 return;
6426 }
6427
6428 /* parse and populate the table */
6429 for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
6430 const char *reason = "";
6431 struct ata_force_ent te = { .port = -1, .device = -1 };
6432
6433 next = cur;
6434 if (ata_parse_force_one(&next, &te, &reason)) {
6435 printk(KERN_WARNING "ata: failed to parse force "
6436 "parameter \"%s\" (%s)\n",
6437 cur, reason);
6438 continue;
6439 }
6440
6441 if (te.port == -1) {
6442 te.port = last_port;
6443 te.device = last_device;
6444 }
6445
6446 ata_force_tbl[idx++] = te;
6447
6448 last_port = te.port;
6449 last_device = te.device;
6450 }
6451
6452 ata_force_tbl_size = idx;
6453 }
6454
ata_init(void)6455 static int __init ata_init(void)
6456 {
6457 int rc;
6458
6459 ata_parse_force_param();
6460
6461 rc = ata_sff_init();
6462 if (rc) {
6463 kfree(ata_force_tbl);
6464 return rc;
6465 }
6466
6467 libata_transport_init();
6468 ata_scsi_transport_template = ata_attach_transport();
6469 if (!ata_scsi_transport_template) {
6470 ata_sff_exit();
6471 rc = -ENOMEM;
6472 goto err_out;
6473 }
6474
6475 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6476 return 0;
6477
6478 err_out:
6479 return rc;
6480 }
6481
ata_exit(void)6482 static void __exit ata_exit(void)
6483 {
6484 ata_release_transport(ata_scsi_transport_template);
6485 libata_transport_exit();
6486 ata_sff_exit();
6487 kfree(ata_force_tbl);
6488 }
6489
6490 subsys_initcall(ata_init);
6491 module_exit(ata_exit);
6492
6493 static DEFINE_RATELIMIT_STATE(ratelimit, HZ / 5, 1);
6494
ata_ratelimit(void)6495 int ata_ratelimit(void)
6496 {
6497 return __ratelimit(&ratelimit);
6498 }
6499
6500 /**
6501 * ata_msleep - ATA EH owner aware msleep
6502 * @ap: ATA port to attribute the sleep to
6503 * @msecs: duration to sleep in milliseconds
6504 *
6505 * Sleeps @msecs. If the current task is owner of @ap's EH, the
6506 * ownership is released before going to sleep and reacquired
6507 * after the sleep is complete. IOW, other ports sharing the
6508 * @ap->host will be allowed to own the EH while this task is
6509 * sleeping.
6510 *
6511 * LOCKING:
6512 * Might sleep.
6513 */
ata_msleep(struct ata_port * ap,unsigned int msecs)6514 void ata_msleep(struct ata_port *ap, unsigned int msecs)
6515 {
6516 bool owns_eh = ap && ap->host->eh_owner == current;
6517
6518 if (owns_eh)
6519 ata_eh_release(ap);
6520
6521 msleep(msecs);
6522
6523 if (owns_eh)
6524 ata_eh_acquire(ap);
6525 }
6526
6527 /**
6528 * ata_wait_register - wait until register value changes
6529 * @ap: ATA port to wait register for, can be NULL
6530 * @reg: IO-mapped register
6531 * @mask: Mask to apply to read register value
6532 * @val: Wait condition
6533 * @interval: polling interval in milliseconds
6534 * @timeout: timeout in milliseconds
6535 *
6536 * Waiting for some bits of register to change is a common
6537 * operation for ATA controllers. This function reads 32bit LE
6538 * IO-mapped register @reg and tests for the following condition.
6539 *
6540 * (*@reg & mask) != val
6541 *
6542 * If the condition is met, it returns; otherwise, the process is
6543 * repeated after @interval_msec until timeout.
6544 *
6545 * LOCKING:
6546 * Kernel thread context (may sleep)
6547 *
6548 * RETURNS:
6549 * The final register value.
6550 */
ata_wait_register(struct ata_port * ap,void __iomem * reg,u32 mask,u32 val,unsigned long interval,unsigned long timeout)6551 u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val,
6552 unsigned long interval, unsigned long timeout)
6553 {
6554 unsigned long deadline;
6555 u32 tmp;
6556
6557 tmp = ioread32(reg);
6558
6559 /* Calculate timeout _after_ the first read to make sure
6560 * preceding writes reach the controller before starting to
6561 * eat away the timeout.
6562 */
6563 deadline = ata_deadline(jiffies, timeout);
6564
6565 while ((tmp & mask) == val && time_before(jiffies, deadline)) {
6566 ata_msleep(ap, interval);
6567 tmp = ioread32(reg);
6568 }
6569
6570 return tmp;
6571 }
6572
6573 /*
6574 * Dummy port_ops
6575 */
ata_dummy_qc_issue(struct ata_queued_cmd * qc)6576 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6577 {
6578 return AC_ERR_SYSTEM;
6579 }
6580
ata_dummy_error_handler(struct ata_port * ap)6581 static void ata_dummy_error_handler(struct ata_port *ap)
6582 {
6583 /* truly dummy */
6584 }
6585
6586 struct ata_port_operations ata_dummy_port_ops = {
6587 .qc_prep = ata_noop_qc_prep,
6588 .qc_issue = ata_dummy_qc_issue,
6589 .error_handler = ata_dummy_error_handler,
6590 };
6591
6592 const struct ata_port_info ata_dummy_port_info = {
6593 .port_ops = &ata_dummy_port_ops,
6594 };
6595
6596 /*
6597 * libata is essentially a library of internal helper functions for
6598 * low-level ATA host controller drivers. As such, the API/ABI is
6599 * likely to change as new drivers are added and updated.
6600 * Do not depend on ABI/API stability.
6601 */
6602 EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6603 EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6604 EXPORT_SYMBOL_GPL(sata_deb_timing_long);
6605 EXPORT_SYMBOL_GPL(ata_base_port_ops);
6606 EXPORT_SYMBOL_GPL(sata_port_ops);
6607 EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
6608 EXPORT_SYMBOL_GPL(ata_dummy_port_info);
6609 EXPORT_SYMBOL_GPL(ata_link_next);
6610 EXPORT_SYMBOL_GPL(ata_dev_next);
6611 EXPORT_SYMBOL_GPL(ata_std_bios_param);
6612 EXPORT_SYMBOL_GPL(ata_scsi_unlock_native_capacity);
6613 EXPORT_SYMBOL_GPL(ata_host_init);
6614 EXPORT_SYMBOL_GPL(ata_host_alloc);
6615 EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
6616 EXPORT_SYMBOL_GPL(ata_slave_link_init);
6617 EXPORT_SYMBOL_GPL(ata_host_start);
6618 EXPORT_SYMBOL_GPL(ata_host_register);
6619 EXPORT_SYMBOL_GPL(ata_host_activate);
6620 EXPORT_SYMBOL_GPL(ata_host_detach);
6621 EXPORT_SYMBOL_GPL(ata_sg_init);
6622 EXPORT_SYMBOL_GPL(ata_qc_complete);
6623 EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
6624 EXPORT_SYMBOL_GPL(atapi_cmd_type);
6625 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6626 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6627 EXPORT_SYMBOL_GPL(ata_pack_xfermask);
6628 EXPORT_SYMBOL_GPL(ata_unpack_xfermask);
6629 EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
6630 EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
6631 EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
6632 EXPORT_SYMBOL_GPL(ata_mode_string);
6633 EXPORT_SYMBOL_GPL(ata_id_xfermask);
6634 EXPORT_SYMBOL_GPL(ata_do_set_mode);
6635 EXPORT_SYMBOL_GPL(ata_std_qc_defer);
6636 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
6637 EXPORT_SYMBOL_GPL(ata_dev_disable);
6638 EXPORT_SYMBOL_GPL(sata_set_spd);
6639 EXPORT_SYMBOL_GPL(ata_wait_after_reset);
6640 EXPORT_SYMBOL_GPL(sata_link_debounce);
6641 EXPORT_SYMBOL_GPL(sata_link_resume);
6642 EXPORT_SYMBOL_GPL(sata_link_scr_lpm);
6643 EXPORT_SYMBOL_GPL(ata_std_prereset);
6644 EXPORT_SYMBOL_GPL(sata_link_hardreset);
6645 EXPORT_SYMBOL_GPL(sata_std_hardreset);
6646 EXPORT_SYMBOL_GPL(ata_std_postreset);
6647 EXPORT_SYMBOL_GPL(ata_dev_classify);
6648 EXPORT_SYMBOL_GPL(ata_dev_pair);
6649 EXPORT_SYMBOL_GPL(ata_ratelimit);
6650 EXPORT_SYMBOL_GPL(ata_msleep);
6651 EXPORT_SYMBOL_GPL(ata_wait_register);
6652 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
6653 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
6654 EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
6655 EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
6656 EXPORT_SYMBOL_GPL(sata_scr_valid);
6657 EXPORT_SYMBOL_GPL(sata_scr_read);
6658 EXPORT_SYMBOL_GPL(sata_scr_write);
6659 EXPORT_SYMBOL_GPL(sata_scr_write_flush);
6660 EXPORT_SYMBOL_GPL(ata_link_online);
6661 EXPORT_SYMBOL_GPL(ata_link_offline);
6662 #ifdef CONFIG_PM
6663 EXPORT_SYMBOL_GPL(ata_host_suspend);
6664 EXPORT_SYMBOL_GPL(ata_host_resume);
6665 #endif /* CONFIG_PM */
6666 EXPORT_SYMBOL_GPL(ata_id_string);
6667 EXPORT_SYMBOL_GPL(ata_id_c_string);
6668 EXPORT_SYMBOL_GPL(ata_do_dev_read_id);
6669 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6670
6671 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
6672 EXPORT_SYMBOL_GPL(ata_timing_find_mode);
6673 EXPORT_SYMBOL_GPL(ata_timing_compute);
6674 EXPORT_SYMBOL_GPL(ata_timing_merge);
6675 EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
6676
6677 #ifdef CONFIG_PCI
6678 EXPORT_SYMBOL_GPL(pci_test_config_bits);
6679 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6680 #ifdef CONFIG_PM
6681 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6682 EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
6683 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6684 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6685 #endif /* CONFIG_PM */
6686 #endif /* CONFIG_PCI */
6687
6688 EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
6689 EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
6690 EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
6691 EXPORT_SYMBOL_GPL(ata_port_desc);
6692 #ifdef CONFIG_PCI
6693 EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
6694 #endif /* CONFIG_PCI */
6695 EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
6696 EXPORT_SYMBOL_GPL(ata_link_abort);
6697 EXPORT_SYMBOL_GPL(ata_port_abort);
6698 EXPORT_SYMBOL_GPL(ata_port_freeze);
6699 EXPORT_SYMBOL_GPL(sata_async_notification);
6700 EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6701 EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
6702 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6703 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
6704 EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error);
6705 EXPORT_SYMBOL_GPL(ata_do_eh);
6706 EXPORT_SYMBOL_GPL(ata_std_error_handler);
6707
6708 EXPORT_SYMBOL_GPL(ata_cable_40wire);
6709 EXPORT_SYMBOL_GPL(ata_cable_80wire);
6710 EXPORT_SYMBOL_GPL(ata_cable_unknown);
6711 EXPORT_SYMBOL_GPL(ata_cable_ignore);
6712 EXPORT_SYMBOL_GPL(ata_cable_sata);
6713