1 /*
2 * Adaptec AIC7xxx device driver for Linux.
3 *
4 * $Id: //depot/aic7xxx/linux/drivers/scsi/aic7xxx/aic7xxx_osm.c#234 $
5 *
6 * Copyright (c) 1994 John Aycock
7 * The University of Calgary Department of Computer Science.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2, or (at your option)
12 * any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; see the file COPYING. If not, write to
21 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
22 *
23 * Sources include the Adaptec 1740 driver (aha1740.c), the Ultrastor 24F
24 * driver (ultrastor.c), various Linux kernel source, the Adaptec EISA
25 * config file (!adp7771.cfg), the Adaptec AHA-2740A Series User's Guide,
26 * the Linux Kernel Hacker's Guide, Writing a SCSI Device Driver for Linux,
27 * the Adaptec 1542 driver (aha1542.c), the Adaptec EISA overlay file
28 * (adp7770.ovl), the Adaptec AHA-2740 Series Technical Reference Manual,
29 * the Adaptec AIC-7770 Data Book, the ANSI SCSI specification, the
30 * ANSI SCSI-2 specification (draft 10c), ...
31 *
32 * --------------------------------------------------------------------------
33 *
34 * Modifications by Daniel M. Eischen (deischen@iworks.InterWorks.org):
35 *
36 * Substantially modified to include support for wide and twin bus
37 * adapters, DMAing of SCBs, tagged queueing, IRQ sharing, bug fixes,
38 * SCB paging, and other rework of the code.
39 *
40 * --------------------------------------------------------------------------
41 * Copyright (c) 1994-2000 Justin T. Gibbs.
42 * Copyright (c) 2000-2001 Adaptec Inc.
43 * All rights reserved.
44 *
45 * Redistribution and use in source and binary forms, with or without
46 * modification, are permitted provided that the following conditions
47 * are met:
48 * 1. Redistributions of source code must retain the above copyright
49 * notice, this list of conditions, and the following disclaimer,
50 * without modification.
51 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
52 * substantially similar to the "NO WARRANTY" disclaimer below
53 * ("Disclaimer") and any redistribution must be conditioned upon
54 * including a substantially similar Disclaimer requirement for further
55 * binary redistribution.
56 * 3. Neither the names of the above-listed copyright holders nor the names
57 * of any contributors may be used to endorse or promote products derived
58 * from this software without specific prior written permission.
59 *
60 * Alternatively, this software may be distributed under the terms of the
61 * GNU General Public License ("GPL") version 2 as published by the Free
62 * Software Foundation.
63 *
64 * NO WARRANTY
65 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
66 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
67 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
68 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
69 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
70 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
71 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
72 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
73 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
74 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
75 * POSSIBILITY OF SUCH DAMAGES.
76 *
77 *---------------------------------------------------------------------------
78 *
79 * Thanks also go to (in alphabetical order) the following:
80 *
81 * Rory Bolt - Sequencer bug fixes
82 * Jay Estabrook - Initial DEC Alpha support
83 * Doug Ledford - Much needed abort/reset bug fixes
84 * Kai Makisara - DMAing of SCBs
85 *
86 * A Boot time option was also added for not resetting the scsi bus.
87 *
88 * Form: aic7xxx=extended
89 * aic7xxx=no_reset
90 * aic7xxx=verbose
91 *
92 * Daniel M. Eischen, deischen@iworks.InterWorks.org, 1/23/97
93 *
94 * Id: aic7xxx.c,v 4.1 1997/06/12 08:23:42 deang Exp
95 */
96
97 /*
98 * Further driver modifications made by Doug Ledford <dledford@redhat.com>
99 *
100 * Copyright (c) 1997-1999 Doug Ledford
101 *
102 * These changes are released under the same licensing terms as the FreeBSD
103 * driver written by Justin Gibbs. Please see his Copyright notice above
104 * for the exact terms and conditions covering my changes as well as the
105 * warranty statement.
106 *
107 * Modifications made to the aic7xxx.c,v 4.1 driver from Dan Eischen include
108 * but are not limited to:
109 *
110 * 1: Import of the latest FreeBSD sequencer code for this driver
111 * 2: Modification of kernel code to accommodate different sequencer semantics
112 * 3: Extensive changes throughout kernel portion of driver to improve
113 * abort/reset processing and error hanndling
114 * 4: Other work contributed by various people on the Internet
115 * 5: Changes to printk information and verbosity selection code
116 * 6: General reliability related changes, especially in IRQ management
117 * 7: Modifications to the default probe/attach order for supported cards
118 * 8: SMP friendliness has been improved
119 *
120 */
121
122 #include "aic7xxx_osm.h"
123 #include "aic7xxx_inline.h"
124 #include <scsi/scsicam.h>
125
126 /*
127 * Include aiclib.c as part of our
128 * "module dependencies are hard" work around.
129 */
130 #include "aiclib.c"
131
132 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,0)
133 #include <linux/init.h> /* __setup */
134 #endif
135
136
137 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
138 #include "sd.h" /* For geometry detection */
139 #endif
140
141 #include <linux/mm.h> /* For fetching system memory size */
142 #include <linux/blk.h> /* For block_size() */
143
144 /*
145 * Lock protecting manipulation of the ahc softc list.
146 */
147 spinlock_t ahc_list_spinlock;
148
149 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
150 /* For dynamic sglist size calculation. */
151 u_int ahc_linux_nseg;
152 #endif
153
154 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,0)
155 struct proc_dir_entry proc_scsi_aic7xxx = {
156 PROC_SCSI_AIC7XXX, 7, "aic7xxx",
157 S_IFDIR | S_IRUGO | S_IXUGO, 2,
158 0, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL
159 };
160 #endif
161
162 /*
163 * Set this to the delay in seconds after SCSI bus reset.
164 * Note, we honor this only for the initial bus reset.
165 * The scsi error recovery code performs its own bus settle
166 * delay handling for error recovery actions.
167 */
168 #ifdef CONFIG_AIC7XXX_RESET_DELAY_MS
169 #define AIC7XXX_RESET_DELAY CONFIG_AIC7XXX_RESET_DELAY_MS
170 #else
171 #define AIC7XXX_RESET_DELAY 5000
172 #endif
173
174 /*
175 * Control collection of SCSI transfer statistics for the /proc filesystem.
176 *
177 * NOTE: Do NOT enable this when running on kernels version 1.2.x and below.
178 * NOTE: This does affect performance since it has to maintain statistics.
179 */
180 #ifdef CONFIG_AIC7XXX_PROC_STATS
181 #define AIC7XXX_PROC_STATS
182 #endif
183
184 /*
185 * To change the default number of tagged transactions allowed per-device,
186 * add a line to the lilo.conf file like:
187 * append="aic7xxx=verbose,tag_info:{{32,32,32,32},{32,32,32,32}}"
188 * which will result in the first four devices on the first two
189 * controllers being set to a tagged queue depth of 32.
190 *
191 * The tag_commands is an array of 16 to allow for wide and twin adapters.
192 * Twin adapters will use indexes 0-7 for channel 0, and indexes 8-15
193 * for channel 1.
194 */
195 typedef struct {
196 uint8_t tag_commands[16]; /* Allow for wide/twin adapters. */
197 } adapter_tag_info_t;
198
199 /*
200 * Modify this as you see fit for your system.
201 *
202 * 0 tagged queuing disabled
203 * 1 <= n <= 253 n == max tags ever dispatched.
204 *
205 * The driver will throttle the number of commands dispatched to a
206 * device if it returns queue full. For devices with a fixed maximum
207 * queue depth, the driver will eventually determine this depth and
208 * lock it in (a console message is printed to indicate that a lock
209 * has occurred). On some devices, queue full is returned for a temporary
210 * resource shortage. These devices will return queue full at varying
211 * depths. The driver will throttle back when the queue fulls occur and
212 * attempt to slowly increase the depth over time as the device recovers
213 * from the resource shortage.
214 *
215 * In this example, the first line will disable tagged queueing for all
216 * the devices on the first probed aic7xxx adapter.
217 *
218 * The second line enables tagged queueing with 4 commands/LUN for IDs
219 * (0, 2-11, 13-15), disables tagged queueing for ID 12, and tells the
220 * driver to attempt to use up to 64 tags for ID 1.
221 *
222 * The third line is the same as the first line.
223 *
224 * The fourth line disables tagged queueing for devices 0 and 3. It
225 * enables tagged queueing for the other IDs, with 16 commands/LUN
226 * for IDs 1 and 4, 127 commands/LUN for ID 8, and 4 commands/LUN for
227 * IDs 2, 5-7, and 9-15.
228 */
229
230 /*
231 * NOTE: The below structure is for reference only, the actual structure
232 * to modify in order to change things is just below this comment block.
233 adapter_tag_info_t aic7xxx_tag_info[] =
234 {
235 {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
236 {{4, 64, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 4, 4, 4}},
237 {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
238 {{0, 16, 4, 0, 16, 4, 4, 4, 127, 4, 4, 4, 4, 4, 4, 4}}
239 };
240 */
241
242 #ifdef CONFIG_AIC7XXX_CMDS_PER_DEVICE
243 #define AIC7XXX_CMDS_PER_DEVICE CONFIG_AIC7XXX_CMDS_PER_DEVICE
244 #else
245 #define AIC7XXX_CMDS_PER_DEVICE AHC_MAX_QUEUE
246 #endif
247
248 #define AIC7XXX_CONFIGED_TAG_COMMANDS { \
249 AIC7XXX_CMDS_PER_DEVICE, AIC7XXX_CMDS_PER_DEVICE, \
250 AIC7XXX_CMDS_PER_DEVICE, AIC7XXX_CMDS_PER_DEVICE, \
251 AIC7XXX_CMDS_PER_DEVICE, AIC7XXX_CMDS_PER_DEVICE, \
252 AIC7XXX_CMDS_PER_DEVICE, AIC7XXX_CMDS_PER_DEVICE, \
253 AIC7XXX_CMDS_PER_DEVICE, AIC7XXX_CMDS_PER_DEVICE, \
254 AIC7XXX_CMDS_PER_DEVICE, AIC7XXX_CMDS_PER_DEVICE, \
255 AIC7XXX_CMDS_PER_DEVICE, AIC7XXX_CMDS_PER_DEVICE, \
256 AIC7XXX_CMDS_PER_DEVICE, AIC7XXX_CMDS_PER_DEVICE \
257 }
258
259 /*
260 * By default, use the number of commands specified by
261 * the users kernel configuration.
262 */
263 static adapter_tag_info_t aic7xxx_tag_info[] =
264 {
265 {AIC7XXX_CONFIGED_TAG_COMMANDS},
266 {AIC7XXX_CONFIGED_TAG_COMMANDS},
267 {AIC7XXX_CONFIGED_TAG_COMMANDS},
268 {AIC7XXX_CONFIGED_TAG_COMMANDS},
269 {AIC7XXX_CONFIGED_TAG_COMMANDS},
270 {AIC7XXX_CONFIGED_TAG_COMMANDS},
271 {AIC7XXX_CONFIGED_TAG_COMMANDS},
272 {AIC7XXX_CONFIGED_TAG_COMMANDS},
273 {AIC7XXX_CONFIGED_TAG_COMMANDS},
274 {AIC7XXX_CONFIGED_TAG_COMMANDS},
275 {AIC7XXX_CONFIGED_TAG_COMMANDS},
276 {AIC7XXX_CONFIGED_TAG_COMMANDS},
277 {AIC7XXX_CONFIGED_TAG_COMMANDS},
278 {AIC7XXX_CONFIGED_TAG_COMMANDS},
279 {AIC7XXX_CONFIGED_TAG_COMMANDS},
280 {AIC7XXX_CONFIGED_TAG_COMMANDS}
281 };
282
283 /*
284 * DV option:
285 *
286 * positive value = DV Enabled
287 * zero = DV Disabled
288 * negative value = DV Default for adapter type/seeprom
289 */
290 #ifdef CONFIG_AIC7XXX_DV_SETTING
291 #define AIC7XXX_CONFIGED_DV CONFIG_AIC7XXX_DV_SETTING
292 #else
293 #define AIC7XXX_CONFIGED_DV -1
294 #endif
295
296 static int8_t aic7xxx_dv_settings[] =
297 {
298 AIC7XXX_CONFIGED_DV,
299 AIC7XXX_CONFIGED_DV,
300 AIC7XXX_CONFIGED_DV,
301 AIC7XXX_CONFIGED_DV,
302 AIC7XXX_CONFIGED_DV,
303 AIC7XXX_CONFIGED_DV,
304 AIC7XXX_CONFIGED_DV,
305 AIC7XXX_CONFIGED_DV,
306 AIC7XXX_CONFIGED_DV,
307 AIC7XXX_CONFIGED_DV,
308 AIC7XXX_CONFIGED_DV,
309 AIC7XXX_CONFIGED_DV,
310 AIC7XXX_CONFIGED_DV,
311 AIC7XXX_CONFIGED_DV,
312 AIC7XXX_CONFIGED_DV,
313 AIC7XXX_CONFIGED_DV
314 };
315
316 /*
317 * There should be a specific return value for this in scsi.h, but
318 * it seems that most drivers ignore it.
319 */
320 #define DID_UNDERFLOW DID_ERROR
321
322 void
ahc_print_path(struct ahc_softc * ahc,struct scb * scb)323 ahc_print_path(struct ahc_softc *ahc, struct scb *scb)
324 {
325 printk("(scsi%d:%c:%d:%d): ",
326 ahc->platform_data->host->host_no,
327 scb != NULL ? SCB_GET_CHANNEL(ahc, scb) : 'X',
328 scb != NULL ? SCB_GET_TARGET(ahc, scb) : -1,
329 scb != NULL ? SCB_GET_LUN(scb) : -1);
330 }
331
332 /*
333 * XXX - these options apply unilaterally to _all_ 274x/284x/294x
334 * cards in the system. This should be fixed. Exceptions to this
335 * rule are noted in the comments.
336 */
337
338 /*
339 * Skip the scsi bus reset. Non 0 make us skip the reset at startup. This
340 * has no effect on any later resets that might occur due to things like
341 * SCSI bus timeouts.
342 */
343 static uint32_t aic7xxx_no_reset;
344
345 /*
346 * Certain PCI motherboards will scan PCI devices from highest to lowest,
347 * others scan from lowest to highest, and they tend to do all kinds of
348 * strange things when they come into contact with PCI bridge chips. The
349 * net result of all this is that the PCI card that is actually used to boot
350 * the machine is very hard to detect. Most motherboards go from lowest
351 * PCI slot number to highest, and the first SCSI controller found is the
352 * one you boot from. The only exceptions to this are when a controller
353 * has its BIOS disabled. So, we by default sort all of our SCSI controllers
354 * from lowest PCI slot number to highest PCI slot number. We also force
355 * all controllers with their BIOS disabled to the end of the list. This
356 * works on *almost* all computers. Where it doesn't work, we have this
357 * option. Setting this option to non-0 will reverse the order of the sort
358 * to highest first, then lowest, but will still leave cards with their BIOS
359 * disabled at the very end. That should fix everyone up unless there are
360 * really strange cirumstances.
361 */
362 static uint32_t aic7xxx_reverse_scan;
363
364 /*
365 * Should we force EXTENDED translation on a controller.
366 * 0 == Use whatever is in the SEEPROM or default to off
367 * 1 == Use whatever is in the SEEPROM or default to on
368 */
369 static uint32_t aic7xxx_extended;
370
371 /*
372 * PCI bus parity checking of the Adaptec controllers. This is somewhat
373 * dubious at best. To my knowledge, this option has never actually
374 * solved a PCI parity problem, but on certain machines with broken PCI
375 * chipset configurations where stray PCI transactions with bad parity are
376 * the norm rather than the exception, the error messages can be overwelming.
377 * It's included in the driver for completeness.
378 * 0 = Shut off PCI parity check
379 * non-0 = reverse polarity pci parity checking
380 */
381 static uint32_t aic7xxx_pci_parity = ~0;
382
383 /*
384 * Certain newer motherboards have put new PCI based devices into the
385 * IO spaces that used to typically be occupied by VLB or EISA cards.
386 * This overlap can cause these newer motherboards to lock up when scanned
387 * for older EISA and VLB devices. Setting this option to non-0 will
388 * cause the driver to skip scanning for any VLB or EISA controllers and
389 * only support the PCI controllers. NOTE: this means that if the kernel
390 * os compiled with PCI support disabled, then setting this to non-0
391 * would result in never finding any devices :)
392 */
393 #ifndef CONFIG_AIC7XXX_PROBE_EISA_VL
394 uint32_t aic7xxx_probe_eisa_vl;
395 #else
396 uint32_t aic7xxx_probe_eisa_vl = ~0;
397 #endif
398
399 /*
400 * There are lots of broken chipsets in the world. Some of them will
401 * violate the PCI spec when we issue byte sized memory writes to our
402 * controller. I/O mapped register access, if allowed by the given
403 * platform, will work in almost all cases.
404 */
405 uint32_t aic7xxx_allow_memio = ~0;
406
407 /*
408 * aic7xxx_detect() has been run, so register all device arrivals
409 * immediately with the system rather than deferring to the sorted
410 * attachment performed by aic7xxx_detect().
411 */
412 int aic7xxx_detect_complete;
413
414 /*
415 * So that we can set how long each device is given as a selection timeout.
416 * The table of values goes like this:
417 * 0 - 256ms
418 * 1 - 128ms
419 * 2 - 64ms
420 * 3 - 32ms
421 * We default to 256ms because some older devices need a longer time
422 * to respond to initial selection.
423 */
424 static uint32_t aic7xxx_seltime;
425
426 /*
427 * Certain devices do not perform any aging on commands. Should the
428 * device be saturated by commands in one portion of the disk, it is
429 * possible for transactions on far away sectors to never be serviced.
430 * To handle these devices, we can periodically send an ordered tag to
431 * force all outstanding transactions to be serviced prior to a new
432 * transaction.
433 */
434 uint32_t aic7xxx_periodic_otag;
435
436 /*
437 * Module information and settable options.
438 */
439 #ifdef MODULE
440 static char *aic7xxx = NULL;
441 /*
442 * Just in case someone uses commas to separate items on the insmod
443 * command line, we define a dummy buffer here to avoid having insmod
444 * write wild stuff into our code segment
445 */
446 static char dummy_buffer[60] = "Please don't trounce on me insmod!!\n";
447
448 MODULE_AUTHOR("Maintainer: Justin T. Gibbs <gibbs@scsiguy.com>");
449 MODULE_DESCRIPTION("Adaptec Aic77XX/78XX SCSI Host Bus Adapter driver");
450 #ifdef MODULE_LICENSE
451 MODULE_LICENSE("Dual BSD/GPL");
452 #endif
453 MODULE_PARM(aic7xxx, "s");
454 MODULE_PARM_DESC(aic7xxx,
455 "period delimited, options string.\n"
456 " verbose Enable verbose/diagnostic logging\n"
457 " allow_memio Allow device registers to be memory mapped\n"
458 " debug Bitmask of debug values to enable\n"
459 " no_probe Toggle EISA/VLB controller probing\n"
460 " probe_eisa_vl Toggle EISA/VLB controller probing\n"
461 " no_reset Supress initial bus resets\n"
462 " extended Enable extended geometry on all controllers\n"
463 " periodic_otag Send an ordered tagged transaction\n"
464 " periodically to prevent tag starvation.\n"
465 " This may be required by some older disk\n"
466 " drives or RAID arrays.\n"
467 " reverse_scan Sort PCI devices highest Bus/Slot to lowest\n"
468 " tag_info:<tag_str> Set per-target tag depth\n"
469 " global_tag_depth:<int> Global tag depth for every target\n"
470 " on every bus\n"
471 " dv:<dv_settings> Set per-controller Domain Validation Setting.\n"
472 " seltime:<int> Selection Timeout\n"
473 " (0/256ms,1/128ms,2/64ms,3/32ms)\n"
474 "\n"
475 " Sample /etc/modules.conf line:\n"
476 " Toggle EISA/VLB probing\n"
477 " Set tag depth on Controller 1/Target 1 to 10 tags\n"
478 " Shorten the selection timeout to 128ms\n"
479 "\n"
480 " options aic7xxx 'aic7xxx=probe_eisa_vl.tag_info:{{}.{.10}}.seltime:1'\n"
481 );
482 #endif
483
484 static void ahc_linux_handle_scsi_status(struct ahc_softc *,
485 struct ahc_linux_device *,
486 struct scb *);
487 static void ahc_linux_queue_cmd_complete(struct ahc_softc *ahc,
488 Scsi_Cmnd *cmd);
489 static void ahc_linux_filter_inquiry(struct ahc_softc*, struct ahc_devinfo*);
490 static void ahc_linux_sem_timeout(u_long arg);
491 static void ahc_linux_freeze_simq(struct ahc_softc *ahc);
492 static void ahc_linux_release_simq(u_long arg);
493 static void ahc_linux_dev_timed_unfreeze(u_long arg);
494 static int ahc_linux_queue_recovery_cmd(Scsi_Cmnd *cmd, scb_flag flag);
495 static void ahc_linux_initialize_scsi_bus(struct ahc_softc *ahc);
496 static void ahc_linux_size_nseg(void);
497 static void ahc_linux_thread_run_complete_queue(struct ahc_softc *ahc);
498 static void ahc_linux_start_dv(struct ahc_softc *ahc);
499 static void ahc_linux_dv_timeout(struct scsi_cmnd *cmd);
500 static int ahc_linux_dv_thread(void *data);
501 static void ahc_linux_kill_dv_thread(struct ahc_softc *ahc);
502 static void ahc_linux_dv_target(struct ahc_softc *ahc, u_int target);
503 static void ahc_linux_dv_transition(struct ahc_softc *ahc,
504 struct scsi_cmnd *cmd,
505 struct ahc_devinfo *devinfo,
506 struct ahc_linux_target *targ);
507 static void ahc_linux_dv_fill_cmd(struct ahc_softc *ahc,
508 struct scsi_cmnd *cmd,
509 struct ahc_devinfo *devinfo);
510 static void ahc_linux_dv_inq(struct ahc_softc *ahc,
511 struct scsi_cmnd *cmd,
512 struct ahc_devinfo *devinfo,
513 struct ahc_linux_target *targ,
514 u_int request_length);
515 static void ahc_linux_dv_tur(struct ahc_softc *ahc,
516 struct scsi_cmnd *cmd,
517 struct ahc_devinfo *devinfo);
518 static void ahc_linux_dv_rebd(struct ahc_softc *ahc,
519 struct scsi_cmnd *cmd,
520 struct ahc_devinfo *devinfo,
521 struct ahc_linux_target *targ);
522 static void ahc_linux_dv_web(struct ahc_softc *ahc,
523 struct scsi_cmnd *cmd,
524 struct ahc_devinfo *devinfo,
525 struct ahc_linux_target *targ);
526 static void ahc_linux_dv_reb(struct ahc_softc *ahc,
527 struct scsi_cmnd *cmd,
528 struct ahc_devinfo *devinfo,
529 struct ahc_linux_target *targ);
530 static void ahc_linux_dv_su(struct ahc_softc *ahc,
531 struct scsi_cmnd *cmd,
532 struct ahc_devinfo *devinfo,
533 struct ahc_linux_target *targ);
534 static int ahc_linux_fallback(struct ahc_softc *ahc,
535 struct ahc_devinfo *devinfo);
536 static void ahc_linux_dv_complete(Scsi_Cmnd *cmd);
537 static void ahc_linux_generate_dv_pattern(struct ahc_linux_target *targ);
538 static u_int ahc_linux_user_tagdepth(struct ahc_softc *ahc,
539 struct ahc_devinfo *devinfo);
540 static u_int ahc_linux_user_dv_setting(struct ahc_softc *ahc);
541 static void ahc_linux_device_queue_depth(struct ahc_softc *ahc,
542 struct ahc_linux_device *dev);
543 static struct ahc_linux_target* ahc_linux_alloc_target(struct ahc_softc*,
544 u_int, u_int);
545 static void ahc_linux_free_target(struct ahc_softc*,
546 struct ahc_linux_target*);
547 static struct ahc_linux_device* ahc_linux_alloc_device(struct ahc_softc*,
548 struct ahc_linux_target*,
549 u_int);
550 static void ahc_linux_free_device(struct ahc_softc*,
551 struct ahc_linux_device*);
552 static void ahc_linux_run_device_queue(struct ahc_softc*,
553 struct ahc_linux_device*);
554 static void ahc_linux_setup_tag_info_global(char *p);
555 static aic_option_callback_t ahc_linux_setup_tag_info;
556 static aic_option_callback_t ahc_linux_setup_dv;
557 static int aic7xxx_setup(char *s);
558 static int ahc_linux_next_unit(void);
559 static void ahc_runq_tasklet(unsigned long data);
560 static struct ahc_cmd *ahc_linux_run_complete_queue(struct ahc_softc *ahc);
561
562 /********************************* Inlines ************************************/
563 static __inline void ahc_schedule_runq(struct ahc_softc *ahc);
564 static __inline struct ahc_linux_device*
565 ahc_linux_get_device(struct ahc_softc *ahc, u_int channel,
566 u_int target, u_int lun, int alloc);
567 static __inline void ahc_schedule_completeq(struct ahc_softc *ahc);
568 static __inline void ahc_linux_check_device_queue(struct ahc_softc *ahc,
569 struct ahc_linux_device *dev);
570 static __inline struct ahc_linux_device *
571 ahc_linux_next_device_to_run(struct ahc_softc *ahc);
572 static __inline void ahc_linux_run_device_queues(struct ahc_softc *ahc);
573 static __inline void ahc_linux_unmap_scb(struct ahc_softc*, struct scb*);
574
575 static __inline int ahc_linux_map_seg(struct ahc_softc *ahc, struct scb *scb,
576 struct ahc_dma_seg *sg,
577 bus_addr_t addr, bus_size_t len);
578
579 static __inline void
ahc_schedule_completeq(struct ahc_softc * ahc)580 ahc_schedule_completeq(struct ahc_softc *ahc)
581 {
582 if ((ahc->platform_data->flags & AHC_RUN_CMPLT_Q_TIMER) == 0) {
583 ahc->platform_data->flags |= AHC_RUN_CMPLT_Q_TIMER;
584 ahc->platform_data->completeq_timer.expires = jiffies;
585 add_timer(&ahc->platform_data->completeq_timer);
586 }
587 }
588
589 /*
590 * Must be called with our lock held.
591 */
592 static __inline void
ahc_schedule_runq(struct ahc_softc * ahc)593 ahc_schedule_runq(struct ahc_softc *ahc)
594 {
595 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
596 tasklet_schedule(&ahc->platform_data->runq_tasklet);
597 #else
598 /*
599 * Tasklets are not available, so run inline.
600 */
601 ahc_runq_tasklet((unsigned long)ahc);
602 #endif
603 }
604
605 static __inline struct ahc_linux_device*
ahc_linux_get_device(struct ahc_softc * ahc,u_int channel,u_int target,u_int lun,int alloc)606 ahc_linux_get_device(struct ahc_softc *ahc, u_int channel, u_int target,
607 u_int lun, int alloc)
608 {
609 struct ahc_linux_target *targ;
610 struct ahc_linux_device *dev;
611 u_int target_offset;
612
613 target_offset = target;
614 if (channel != 0)
615 target_offset += 8;
616 targ = ahc->platform_data->targets[target_offset];
617 if (targ == NULL) {
618 if (alloc != 0) {
619 targ = ahc_linux_alloc_target(ahc, channel, target);
620 if (targ == NULL)
621 return (NULL);
622 } else
623 return (NULL);
624 }
625 dev = targ->devices[lun];
626 if (dev == NULL && alloc != 0)
627 dev = ahc_linux_alloc_device(ahc, targ, lun);
628 return (dev);
629 }
630
631 #define AHC_LINUX_MAX_RETURNED_ERRORS 4
632 static struct ahc_cmd *
ahc_linux_run_complete_queue(struct ahc_softc * ahc)633 ahc_linux_run_complete_queue(struct ahc_softc *ahc)
634 {
635 struct ahc_cmd *acmd;
636 u_long done_flags;
637 int with_errors;
638
639 with_errors = 0;
640 ahc_done_lock(ahc, &done_flags);
641 while ((acmd = TAILQ_FIRST(&ahc->platform_data->completeq)) != NULL) {
642 Scsi_Cmnd *cmd;
643
644 if (with_errors > AHC_LINUX_MAX_RETURNED_ERRORS) {
645 /*
646 * Linux uses stack recursion to requeue
647 * commands that need to be retried. Avoid
648 * blowing out the stack by "spoon feeding"
649 * commands that completed with error back
650 * the operating system in case they are going
651 * to be retried. "ick"
652 */
653 ahc_schedule_completeq(ahc);
654 break;
655 }
656 TAILQ_REMOVE(&ahc->platform_data->completeq,
657 acmd, acmd_links.tqe);
658 cmd = &acmd_scsi_cmd(acmd);
659 cmd->host_scribble = NULL;
660 if (ahc_cmd_get_transaction_status(cmd) != DID_OK
661 || (cmd->result & 0xFF) != SCSI_STATUS_OK)
662 with_errors++;
663
664 cmd->scsi_done(cmd);
665 }
666 ahc_done_unlock(ahc, &done_flags);
667 return (acmd);
668 }
669
670 static __inline void
ahc_linux_check_device_queue(struct ahc_softc * ahc,struct ahc_linux_device * dev)671 ahc_linux_check_device_queue(struct ahc_softc *ahc,
672 struct ahc_linux_device *dev)
673 {
674 if ((dev->flags & AHC_DEV_FREEZE_TIL_EMPTY) != 0
675 && dev->active == 0) {
676 dev->flags &= ~AHC_DEV_FREEZE_TIL_EMPTY;
677 dev->qfrozen--;
678 }
679
680 if (TAILQ_FIRST(&dev->busyq) == NULL
681 || dev->openings == 0 || dev->qfrozen != 0)
682 return;
683
684 ahc_linux_run_device_queue(ahc, dev);
685 }
686
687 static __inline struct ahc_linux_device *
ahc_linux_next_device_to_run(struct ahc_softc * ahc)688 ahc_linux_next_device_to_run(struct ahc_softc *ahc)
689 {
690
691 if ((ahc->flags & AHC_RESOURCE_SHORTAGE) != 0
692 || (ahc->platform_data->qfrozen != 0
693 && AHC_DV_SIMQ_FROZEN(ahc) == 0))
694 return (NULL);
695 return (TAILQ_FIRST(&ahc->platform_data->device_runq));
696 }
697
698 static __inline void
ahc_linux_run_device_queues(struct ahc_softc * ahc)699 ahc_linux_run_device_queues(struct ahc_softc *ahc)
700 {
701 struct ahc_linux_device *dev;
702
703 while ((dev = ahc_linux_next_device_to_run(ahc)) != NULL) {
704 TAILQ_REMOVE(&ahc->platform_data->device_runq, dev, links);
705 dev->flags &= ~AHC_DEV_ON_RUN_LIST;
706 ahc_linux_check_device_queue(ahc, dev);
707 }
708 }
709
710 static __inline void
ahc_linux_unmap_scb(struct ahc_softc * ahc,struct scb * scb)711 ahc_linux_unmap_scb(struct ahc_softc *ahc, struct scb *scb)
712 {
713 Scsi_Cmnd *cmd;
714
715 cmd = scb->io_ctx;
716 ahc_sync_sglist(ahc, scb, BUS_DMASYNC_POSTWRITE);
717 if (cmd->use_sg != 0) {
718 struct scatterlist *sg;
719
720 sg = (struct scatterlist *)cmd->request_buffer;
721 pci_unmap_sg(ahc->dev_softc, sg, cmd->use_sg,
722 scsi_to_pci_dma_dir(cmd->sc_data_direction));
723 } else if (cmd->request_bufflen != 0) {
724 pci_unmap_single(ahc->dev_softc,
725 scb->platform_data->buf_busaddr,
726 cmd->request_bufflen,
727 scsi_to_pci_dma_dir(cmd->sc_data_direction));
728 }
729 }
730
731 static __inline int
ahc_linux_map_seg(struct ahc_softc * ahc,struct scb * scb,struct ahc_dma_seg * sg,bus_addr_t addr,bus_size_t len)732 ahc_linux_map_seg(struct ahc_softc *ahc, struct scb *scb,
733 struct ahc_dma_seg *sg, bus_addr_t addr, bus_size_t len)
734 {
735 int consumed;
736
737 if ((scb->sg_count + 1) > AHC_NSEG)
738 panic("Too few segs for dma mapping. "
739 "Increase AHC_NSEG\n");
740
741 consumed = 1;
742 sg->addr = ahc_htole32(addr & 0xFFFFFFFF);
743 scb->platform_data->xfer_len += len;
744
745 if (sizeof(bus_addr_t) > 4
746 && (ahc->flags & AHC_39BIT_ADDRESSING) != 0)
747 len |= (addr >> 8) & AHC_SG_HIGH_ADDR_MASK;
748
749 sg->len = ahc_htole32(len);
750 return (consumed);
751 }
752
753 /************************ Host template entry points *************************/
754 static int ahc_linux_detect(Scsi_Host_Template *);
755 static int ahc_linux_queue(Scsi_Cmnd *, void (*)(Scsi_Cmnd *));
756 static const char *ahc_linux_info(struct Scsi_Host *);
757 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
758 static int ahc_linux_slave_alloc(Scsi_Device *);
759 static int ahc_linux_slave_configure(Scsi_Device *);
760 static void ahc_linux_slave_destroy(Scsi_Device *);
761 #if defined(__i386__)
762 static int ahc_linux_biosparam(struct scsi_device*,
763 struct block_device*,
764 sector_t, int[]);
765 #endif
766 #else
767 static int ahc_linux_release(struct Scsi_Host *);
768 static void ahc_linux_select_queue_depth(struct Scsi_Host *host,
769 Scsi_Device *scsi_devs);
770 #if defined(__i386__)
771 static int ahc_linux_biosparam(Disk *, kdev_t, int[]);
772 #endif
773 #endif
774 static int ahc_linux_bus_reset(Scsi_Cmnd *);
775 static int ahc_linux_dev_reset(Scsi_Cmnd *);
776 static int ahc_linux_abort(Scsi_Cmnd *);
777
778 /*
779 * Calculate a safe value for AHC_NSEG (as expressed through ahc_linux_nseg).
780 *
781 * In pre-2.5.X...
782 * The midlayer allocates an S/G array dynamically when a command is issued
783 * using SCSI malloc. This array, which is in an OS dependent format that
784 * must later be copied to our private S/G list, is sized to house just the
785 * number of segments needed for the current transfer. Since the code that
786 * sizes the SCSI malloc pool does not take into consideration fragmentation
787 * of the pool, executing transactions numbering just a fraction of our
788 * concurrent transaction limit with list lengths aproaching AHC_NSEG will
789 * quickly depleat the SCSI malloc pool of usable space. Unfortunately, the
790 * mid-layer does not properly handle this scsi malloc failures for the S/G
791 * array and the result can be a lockup of the I/O subsystem. We try to size
792 * our S/G list so that it satisfies our drivers allocation requirements in
793 * addition to avoiding fragmentation of the SCSI malloc pool.
794 */
795 static void
ahc_linux_size_nseg(void)796 ahc_linux_size_nseg(void)
797 {
798 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
799 u_int cur_size;
800 u_int best_size;
801
802 /*
803 * The SCSI allocator rounds to the nearest 512 bytes
804 * an cannot allocate across a page boundary. Our algorithm
805 * is to start at 1K of scsi malloc space per-command and
806 * loop through all factors of the PAGE_SIZE and pick the best.
807 */
808 best_size = 0;
809 for (cur_size = 1024; cur_size <= PAGE_SIZE; cur_size *= 2) {
810 u_int nseg;
811
812 nseg = cur_size / sizeof(struct scatterlist);
813 if (nseg < AHC_LINUX_MIN_NSEG)
814 continue;
815
816 if (best_size == 0) {
817 best_size = cur_size;
818 ahc_linux_nseg = nseg;
819 } else {
820 u_int best_rem;
821 u_int cur_rem;
822
823 /*
824 * Compare the traits of the current "best_size"
825 * with the current size to determine if the
826 * current size is a better size.
827 */
828 best_rem = best_size % sizeof(struct scatterlist);
829 cur_rem = cur_size % sizeof(struct scatterlist);
830 if (cur_rem < best_rem) {
831 best_size = cur_size;
832 ahc_linux_nseg = nseg;
833 }
834 }
835 }
836 #endif
837 }
838
839 /*
840 * Try to detect an Adaptec 7XXX controller.
841 */
842 static int
ahc_linux_detect(Scsi_Host_Template * template)843 ahc_linux_detect(Scsi_Host_Template *template)
844 {
845 struct ahc_softc *ahc;
846 int found;
847
848 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
849 /*
850 * It is a bug that the upper layer takes
851 * this lock just prior to calling us.
852 */
853 spin_unlock_irq(&io_request_lock);
854 #endif
855
856 /*
857 * Sanity checking of Linux SCSI data structures so
858 * that some of our hacks^H^H^H^H^Hassumptions aren't
859 * violated.
860 */
861 if (offsetof(struct ahc_cmd_internal, end)
862 > offsetof(struct scsi_cmnd, host_scribble)) {
863 printf("ahc_linux_detect: SCSI data structures changed.\n");
864 printf("ahc_linux_detect: Unable to attach\n");
865 return (0);
866 }
867 ahc_linux_size_nseg();
868 #ifdef MODULE
869 /*
870 * If we've been passed any parameters, process them now.
871 */
872 if (aic7xxx)
873 aic7xxx_setup(aic7xxx);
874 if (dummy_buffer[0] != 'P')
875 printk(KERN_WARNING
876 "aic7xxx: Please read the file /usr/src/linux/drivers/scsi/README.aic7xxx\n"
877 "aic7xxx: to see the proper way to specify options to the aic7xxx module\n"
878 "aic7xxx: Specifically, don't use any commas when passing arguments to\n"
879 "aic7xxx: insmod or else it might trash certain memory areas.\n");
880 #endif
881
882 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,3,0)
883 template->proc_name = "aic7xxx";
884 #else
885 template->proc_dir = &proc_scsi_aic7xxx;
886 #endif
887
888 /*
889 * Initialize our softc list lock prior to
890 * probing for any adapters.
891 */
892 ahc_list_lockinit();
893
894 #ifdef CONFIG_PCI
895 ahc_linux_pci_init();
896 #endif
897
898 #ifdef CONFIG_EISA
899 ahc_linux_eisa_init();
900 #endif
901
902 /*
903 * Register with the SCSI layer all
904 * controllers we've found.
905 */
906 found = 0;
907 TAILQ_FOREACH(ahc, &ahc_tailq, links) {
908
909 if (ahc_linux_register_host(ahc, template) == 0)
910 found++;
911 }
912 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
913 spin_lock_irq(&io_request_lock);
914 #endif
915 aic7xxx_detect_complete++;
916 return (found);
917 }
918
919 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
920 /*
921 * Free the passed in Scsi_Host memory structures prior to unloading the
922 * module.
923 */
924 int
ahc_linux_release(struct Scsi_Host * host)925 ahc_linux_release(struct Scsi_Host * host)
926 {
927 struct ahc_softc *ahc;
928 u_long l;
929
930 ahc_list_lock(&l);
931 if (host != NULL) {
932
933 /*
934 * We should be able to just perform
935 * the free directly, but check our
936 * list for extra sanity.
937 */
938 ahc = ahc_find_softc(*(struct ahc_softc **)host->hostdata);
939 if (ahc != NULL) {
940 u_long s;
941
942 ahc_lock(ahc, &s);
943 ahc_intr_enable(ahc, FALSE);
944 ahc_unlock(ahc, &s);
945 ahc_free(ahc);
946 }
947 }
948 ahc_list_unlock(&l);
949 return (0);
950 }
951 #endif
952
953 /*
954 * Return a string describing the driver.
955 */
956 static const char *
ahc_linux_info(struct Scsi_Host * host)957 ahc_linux_info(struct Scsi_Host *host)
958 {
959 static char buffer[512];
960 char ahc_info[256];
961 char *bp;
962 struct ahc_softc *ahc;
963
964 bp = &buffer[0];
965 ahc = *(struct ahc_softc **)host->hostdata;
966 memset(bp, 0, sizeof(buffer));
967 strcpy(bp, "Adaptec AIC7XXX EISA/VLB/PCI SCSI HBA DRIVER, Rev ");
968 strcat(bp, AIC7XXX_DRIVER_VERSION);
969 strcat(bp, "\n");
970 strcat(bp, " <");
971 strcat(bp, ahc->description);
972 strcat(bp, ">\n");
973 strcat(bp, " ");
974 ahc_controller_info(ahc, ahc_info);
975 strcat(bp, ahc_info);
976 strcat(bp, "\n");
977
978 return (bp);
979 }
980
981 /*
982 * Queue an SCB to the controller.
983 */
984 static int
ahc_linux_queue(Scsi_Cmnd * cmd,void (* scsi_done)(Scsi_Cmnd *))985 ahc_linux_queue(Scsi_Cmnd * cmd, void (*scsi_done) (Scsi_Cmnd *))
986 {
987 struct ahc_softc *ahc;
988 struct ahc_linux_device *dev;
989 u_long flags;
990
991 ahc = *(struct ahc_softc **)cmd->device->host->hostdata;
992
993 /*
994 * Save the callback on completion function.
995 */
996 cmd->scsi_done = scsi_done;
997
998 ahc_midlayer_entrypoint_lock(ahc, &flags);
999
1000 /*
1001 * Close the race of a command that was in the process of
1002 * being queued to us just as our simq was frozen. Let
1003 * DV commands through so long as we are only frozen to
1004 * perform DV.
1005 */
1006 if (ahc->platform_data->qfrozen != 0
1007 && AHC_DV_CMD(cmd) == 0) {
1008
1009 ahc_cmd_set_transaction_status(cmd, CAM_REQUEUE_REQ);
1010 ahc_linux_queue_cmd_complete(ahc, cmd);
1011 ahc_schedule_completeq(ahc);
1012 ahc_midlayer_entrypoint_unlock(ahc, &flags);
1013 return (0);
1014 }
1015 dev = ahc_linux_get_device(ahc, cmd->device->channel, cmd->device->id,
1016 cmd->device->lun, /*alloc*/TRUE);
1017 if (dev == NULL) {
1018 ahc_cmd_set_transaction_status(cmd, CAM_RESRC_UNAVAIL);
1019 ahc_linux_queue_cmd_complete(ahc, cmd);
1020 ahc_schedule_completeq(ahc);
1021 ahc_midlayer_entrypoint_unlock(ahc, &flags);
1022 printf("%s: aic7xxx_linux_queue - Unable to allocate device!\n",
1023 ahc_name(ahc));
1024 return (0);
1025 }
1026 cmd->result = CAM_REQ_INPROG << 16;
1027 TAILQ_INSERT_TAIL(&dev->busyq, (struct ahc_cmd *)cmd, acmd_links.tqe);
1028 if ((dev->flags & AHC_DEV_ON_RUN_LIST) == 0) {
1029 TAILQ_INSERT_TAIL(&ahc->platform_data->device_runq, dev, links);
1030 dev->flags |= AHC_DEV_ON_RUN_LIST;
1031 ahc_linux_run_device_queues(ahc);
1032 }
1033 ahc_midlayer_entrypoint_unlock(ahc, &flags);
1034 return (0);
1035 }
1036
1037 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
1038 static int
ahc_linux_slave_alloc(Scsi_Device * device)1039 ahc_linux_slave_alloc(Scsi_Device *device)
1040 {
1041 struct ahc_softc *ahc;
1042
1043 ahc = *((struct ahc_softc **)device->host->hostdata);
1044 if (bootverbose)
1045 printf("%s: Slave Alloc %d\n", ahc_name(ahc), device->id);
1046 return (0);
1047 }
1048
1049 static int
ahc_linux_slave_configure(Scsi_Device * device)1050 ahc_linux_slave_configure(Scsi_Device *device)
1051 {
1052 struct ahc_softc *ahc;
1053 struct ahc_linux_device *dev;
1054 u_long flags;
1055
1056 ahc = *((struct ahc_softc **)device->host->hostdata);
1057 if (bootverbose)
1058 printf("%s: Slave Configure %d\n", ahc_name(ahc), device->id);
1059 ahc_midlayer_entrypoint_lock(ahc, &flags);
1060 /*
1061 * Since Linux has attached to the device, configure
1062 * it so we don't free and allocate the device
1063 * structure on every command.
1064 */
1065 dev = ahc_linux_get_device(ahc, device->channel,
1066 device->id, device->lun,
1067 /*alloc*/TRUE);
1068 if (dev != NULL) {
1069 dev->flags &= ~AHC_DEV_UNCONFIGURED;
1070 dev->scsi_device = device;
1071 ahc_linux_device_queue_depth(ahc, dev);
1072 }
1073 ahc_midlayer_entrypoint_unlock(ahc, &flags);
1074 return (0);
1075 }
1076
1077 static void
ahc_linux_slave_destroy(Scsi_Device * device)1078 ahc_linux_slave_destroy(Scsi_Device *device)
1079 {
1080 struct ahc_softc *ahc;
1081 struct ahc_linux_device *dev;
1082 u_long flags;
1083
1084 ahc = *((struct ahc_softc **)device->host->hostdata);
1085 if (bootverbose)
1086 printf("%s: Slave Destroy %d\n", ahc_name(ahc), device->id);
1087 ahc_midlayer_entrypoint_lock(ahc, &flags);
1088 dev = ahc_linux_get_device(ahc, device->channel,
1089 device->id, device->lun,
1090 /*alloc*/FALSE);
1091 /*
1092 * Filter out "silly" deletions of real devices by only
1093 * deleting devices that have had slave_configure()
1094 * called on them. All other devices that have not
1095 * been configured will automatically be deleted by
1096 * the refcounting process.
1097 */
1098 if (dev != NULL
1099 && (dev->flags & AHC_DEV_SLAVE_CONFIGURED) != 0) {
1100 dev->flags |= AHC_DEV_UNCONFIGURED;
1101 if (TAILQ_EMPTY(&dev->busyq)
1102 && dev->active == 0
1103 && (dev->flags & AHC_DEV_TIMER_ACTIVE) == 0)
1104 ahc_linux_free_device(ahc, dev);
1105 }
1106 ahc_midlayer_entrypoint_unlock(ahc, &flags);
1107 }
1108 #else
1109 /*
1110 * Sets the queue depth for each SCSI device hanging
1111 * off the input host adapter.
1112 */
1113 static void
ahc_linux_select_queue_depth(struct Scsi_Host * host,Scsi_Device * scsi_devs)1114 ahc_linux_select_queue_depth(struct Scsi_Host *host, Scsi_Device *scsi_devs)
1115 {
1116 Scsi_Device *device;
1117 Scsi_Device *ldev;
1118 struct ahc_softc *ahc;
1119 u_long flags;
1120
1121 ahc = *((struct ahc_softc **)host->hostdata);
1122 ahc_lock(ahc, &flags);
1123 for (device = scsi_devs; device != NULL; device = device->next) {
1124
1125 /*
1126 * Watch out for duplicate devices. This works around
1127 * some quirks in how the SCSI scanning code does its
1128 * device management.
1129 */
1130 for (ldev = scsi_devs; ldev != device; ldev = ldev->next) {
1131 if (ldev->host == device->host
1132 && ldev->channel == device->channel
1133 && ldev->id == device->id
1134 && ldev->lun == device->lun)
1135 break;
1136 }
1137 /* Skip duplicate. */
1138 if (ldev != device)
1139 continue;
1140
1141 if (device->host == host) {
1142 struct ahc_linux_device *dev;
1143
1144 /*
1145 * Since Linux has attached to the device, configure
1146 * it so we don't free and allocate the device
1147 * structure on every command.
1148 */
1149 dev = ahc_linux_get_device(ahc, device->channel,
1150 device->id, device->lun,
1151 /*alloc*/TRUE);
1152 if (dev != NULL) {
1153 dev->flags &= ~AHC_DEV_UNCONFIGURED;
1154 dev->scsi_device = device;
1155 ahc_linux_device_queue_depth(ahc, dev);
1156 device->queue_depth = dev->openings
1157 + dev->active;
1158 if ((dev->flags & (AHC_DEV_Q_BASIC
1159 | AHC_DEV_Q_TAGGED)) == 0) {
1160 /*
1161 * We allow the OS to queue 2 untagged
1162 * transactions to us at any time even
1163 * though we can only execute them
1164 * serially on the controller/device.
1165 * This should remove some latency.
1166 */
1167 device->queue_depth = 2;
1168 }
1169 }
1170 }
1171 }
1172 ahc_unlock(ahc, &flags);
1173 }
1174 #endif
1175
1176 #if defined(__i386__)
1177 /*
1178 * Return the disk geometry for the given SCSI device.
1179 */
1180 static int
1181 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
ahc_linux_biosparam(struct scsi_device * sdev,struct block_device * bdev,sector_t capacity,int geom[])1182 ahc_linux_biosparam(struct scsi_device *sdev, struct block_device *bdev,
1183 sector_t capacity, int geom[])
1184 {
1185 uint8_t *bh;
1186 #else
1187 ahc_linux_biosparam(Disk *disk, kdev_t dev, int geom[])
1188 {
1189 struct scsi_device *sdev = disk->device;
1190 u_long capacity = disk->capacity;
1191 struct buffer_head *bh;
1192 #endif
1193 int heads;
1194 int sectors;
1195 int cylinders;
1196 int ret;
1197 int extended;
1198 struct ahc_softc *ahc;
1199 u_int channel;
1200
1201 ahc = *((struct ahc_softc **)sdev->host->hostdata);
1202 channel = sdev->channel;
1203
1204 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
1205 bh = scsi_bios_ptable(bdev);
1206 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,17)
1207 bh = bread(MKDEV(MAJOR(dev), MINOR(dev) & ~0xf), 0, block_size(dev));
1208 #else
1209 bh = bread(MKDEV(MAJOR(dev), MINOR(dev) & ~0xf), 0, 1024);
1210 #endif
1211
1212 if (bh) {
1213 ret = scsi_partsize(bh, capacity,
1214 &geom[2], &geom[0], &geom[1]);
1215 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
1216 kfree(bh);
1217 #else
1218 brelse(bh);
1219 #endif
1220 if (ret != -1)
1221 return (ret);
1222 }
1223 heads = 64;
1224 sectors = 32;
1225 cylinders = aic_sector_div(capacity, heads, sectors);
1226
1227 if (aic7xxx_extended != 0)
1228 extended = 1;
1229 else if (channel == 0)
1230 extended = (ahc->flags & AHC_EXTENDED_TRANS_A) != 0;
1231 else
1232 extended = (ahc->flags & AHC_EXTENDED_TRANS_B) != 0;
1233 if (extended && cylinders >= 1024) {
1234 heads = 255;
1235 sectors = 63;
1236 cylinders = aic_sector_div(capacity, heads, sectors);
1237 }
1238 geom[0] = heads;
1239 geom[1] = sectors;
1240 geom[2] = cylinders;
1241 return (0);
1242 }
1243 #endif
1244
1245 /*
1246 * Abort the current SCSI command(s).
1247 */
1248 static int
1249 ahc_linux_abort(Scsi_Cmnd *cmd)
1250 {
1251 int error;
1252
1253 error = ahc_linux_queue_recovery_cmd(cmd, SCB_ABORT);
1254 if (error != 0)
1255 printf("aic7xxx_abort returns 0x%x\n", error);
1256 return (error);
1257 }
1258
1259 /*
1260 * Attempt to send a target reset message to the device that timed out.
1261 */
1262 static int
1263 ahc_linux_dev_reset(Scsi_Cmnd *cmd)
1264 {
1265 int error;
1266
1267 error = ahc_linux_queue_recovery_cmd(cmd, SCB_DEVICE_RESET);
1268 if (error != 0)
1269 printf("aic7xxx_dev_reset returns 0x%x\n", error);
1270 return (error);
1271 }
1272
1273 /*
1274 * Reset the SCSI bus.
1275 */
1276 static int
1277 ahc_linux_bus_reset(Scsi_Cmnd *cmd)
1278 {
1279 struct ahc_softc *ahc;
1280 u_long s;
1281 int found;
1282
1283 ahc = *(struct ahc_softc **)cmd->device->host->hostdata;
1284 ahc_midlayer_entrypoint_lock(ahc, &s);
1285 found = ahc_reset_channel(ahc, cmd->device->channel + 'A',
1286 /*initiate reset*/TRUE);
1287 ahc_linux_run_complete_queue(ahc);
1288 ahc_midlayer_entrypoint_unlock(ahc, &s);
1289
1290 if (bootverbose)
1291 printf("%s: SCSI bus reset delivered. "
1292 "%d SCBs aborted.\n", ahc_name(ahc), found);
1293
1294 return SUCCESS;
1295 }
1296
1297 Scsi_Host_Template aic7xxx_driver_template = {
1298 .module = THIS_MODULE,
1299 .name = "aic7xxx",
1300 .proc_info = ahc_linux_proc_info,
1301 .info = ahc_linux_info,
1302 .queuecommand = ahc_linux_queue,
1303 .eh_abort_handler = ahc_linux_abort,
1304 .eh_device_reset_handler = ahc_linux_dev_reset,
1305 .eh_bus_reset_handler = ahc_linux_bus_reset,
1306 #if defined(__i386__)
1307 .bios_param = ahc_linux_biosparam,
1308 #endif
1309 .can_queue = AHC_MAX_QUEUE,
1310 .this_id = -1,
1311 .cmd_per_lun = 2,
1312 .use_clustering = ENABLE_CLUSTERING,
1313 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,7)
1314 /*
1315 * We can only map 16MB per-SG
1316 * so create a sector limit of
1317 * "16MB" in 2K sectors.
1318 */
1319 .max_sectors = 8192,
1320 #endif
1321 #if defined CONFIG_HIGHIO || LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
1322 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,10)
1323 /* Assume RedHat Distribution with its different HIGHIO conventions. */
1324 .can_dma_32 = 1,
1325 .single_sg_okay = 1,
1326 #else
1327 .highmem_io = 1,
1328 #endif
1329 #endif
1330 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
1331 .slave_alloc = ahc_linux_slave_alloc,
1332 .slave_configure = ahc_linux_slave_configure,
1333 .slave_destroy = ahc_linux_slave_destroy,
1334 #else
1335 .detect = ahc_linux_detect,
1336 .release = ahc_linux_release,
1337 .select_queue_depths = ahc_linux_select_queue_depth,
1338 .use_new_eh_code = 1,
1339 #endif
1340 };
1341
1342 /**************************** Tasklet Handler *********************************/
1343
1344 /*
1345 * In 2.4.X and above, this routine is called from a tasklet,
1346 * so we must re-acquire our lock prior to executing this code.
1347 * In all prior kernels, ahc_schedule_runq() calls this routine
1348 * directly and ahc_schedule_runq() is called with our lock held.
1349 */
1350 static void
1351 ahc_runq_tasklet(unsigned long data)
1352 {
1353 struct ahc_softc* ahc;
1354 struct ahc_linux_device *dev;
1355 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
1356 u_long flags;
1357 #endif
1358
1359 ahc = (struct ahc_softc *)data;
1360 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
1361 ahc_lock(ahc, &flags);
1362 #endif
1363 while ((dev = ahc_linux_next_device_to_run(ahc)) != NULL) {
1364
1365 TAILQ_REMOVE(&ahc->platform_data->device_runq, dev, links);
1366 dev->flags &= ~AHC_DEV_ON_RUN_LIST;
1367 ahc_linux_check_device_queue(ahc, dev);
1368 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
1369 /* Yeild to our interrupt handler */
1370 ahc_unlock(ahc, &flags);
1371 ahc_lock(ahc, &flags);
1372 #endif
1373 }
1374 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
1375 ahc_unlock(ahc, &flags);
1376 #endif
1377 }
1378
1379 /******************************** Macros **************************************/
1380 #define BUILD_SCSIID(ahc, cmd) \
1381 ((((cmd)->device->id << TID_SHIFT) & TID) \
1382 | (((cmd)->device->channel == 0) ? (ahc)->our_id : (ahc)->our_id_b) \
1383 | (((cmd)->device->channel == 0) ? 0 : TWIN_CHNLB))
1384
1385 /******************************** Bus DMA *************************************/
1386 int
1387 ahc_dma_tag_create(struct ahc_softc *ahc, bus_dma_tag_t parent,
1388 bus_size_t alignment, bus_size_t boundary,
1389 bus_addr_t lowaddr, bus_addr_t highaddr,
1390 bus_dma_filter_t *filter, void *filterarg,
1391 bus_size_t maxsize, int nsegments,
1392 bus_size_t maxsegsz, int flags, bus_dma_tag_t *ret_tag)
1393 {
1394 bus_dma_tag_t dmat;
1395
1396 dmat = malloc(sizeof(*dmat), M_DEVBUF, M_NOWAIT);
1397 if (dmat == NULL)
1398 return (ENOMEM);
1399
1400 /*
1401 * Linux is very simplistic about DMA memory. For now don't
1402 * maintain all specification information. Once Linux supplies
1403 * better facilities for doing these operations, or the
1404 * needs of this particular driver change, we might need to do
1405 * more here.
1406 */
1407 dmat->alignment = alignment;
1408 dmat->boundary = boundary;
1409 dmat->maxsize = maxsize;
1410 *ret_tag = dmat;
1411 return (0);
1412 }
1413
1414 void
1415 ahc_dma_tag_destroy(struct ahc_softc *ahc, bus_dma_tag_t dmat)
1416 {
1417 free(dmat, M_DEVBUF);
1418 }
1419
1420 int
1421 ahc_dmamem_alloc(struct ahc_softc *ahc, bus_dma_tag_t dmat, void** vaddr,
1422 int flags, bus_dmamap_t *mapp)
1423 {
1424 bus_dmamap_t map;
1425
1426 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,0)
1427 map = malloc(sizeof(*map), M_DEVBUF, M_NOWAIT);
1428 if (map == NULL)
1429 return (ENOMEM);
1430 /*
1431 * Although we can dma data above 4GB, our
1432 * "consistent" memory is below 4GB for
1433 * space efficiency reasons (only need a 4byte
1434 * address). For this reason, we have to reset
1435 * our dma mask when doing allocations.
1436 */
1437 if (ahc->dev_softc != NULL)
1438 ahc_pci_set_dma_mask(ahc->dev_softc, 0xFFFFFFFF);
1439 *vaddr = pci_alloc_consistent(ahc->dev_softc,
1440 dmat->maxsize, &map->bus_addr);
1441 if (ahc->dev_softc != NULL)
1442 ahc_pci_set_dma_mask(ahc->dev_softc,
1443 ahc->platform_data->hw_dma_mask);
1444 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(2,3,0) */
1445 /*
1446 * At least in 2.2.14, malloc is a slab allocator so all
1447 * allocations are aligned. We assume for these kernel versions
1448 * that all allocations will be bellow 4Gig, physically contiguous,
1449 * and accessible via DMA by the controller.
1450 */
1451 map = NULL; /* No additional information to store */
1452 *vaddr = malloc(dmat->maxsize, M_DEVBUF, M_NOWAIT);
1453 #endif
1454 if (*vaddr == NULL)
1455 return (ENOMEM);
1456 *mapp = map;
1457 return(0);
1458 }
1459
1460 void
1461 ahc_dmamem_free(struct ahc_softc *ahc, bus_dma_tag_t dmat,
1462 void* vaddr, bus_dmamap_t map)
1463 {
1464 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,0)
1465 pci_free_consistent(ahc->dev_softc, dmat->maxsize,
1466 vaddr, map->bus_addr);
1467 #else
1468 free(vaddr, M_DEVBUF);
1469 #endif
1470 }
1471
1472 int
1473 ahc_dmamap_load(struct ahc_softc *ahc, bus_dma_tag_t dmat, bus_dmamap_t map,
1474 void *buf, bus_size_t buflen, bus_dmamap_callback_t *cb,
1475 void *cb_arg, int flags)
1476 {
1477 /*
1478 * Assume for now that this will only be used during
1479 * initialization and not for per-transaction buffer mapping.
1480 */
1481 bus_dma_segment_t stack_sg;
1482
1483 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,0)
1484 stack_sg.ds_addr = map->bus_addr;
1485 #else
1486 #define VIRT_TO_BUS(a) (uint32_t)virt_to_bus((void *)(a))
1487 stack_sg.ds_addr = VIRT_TO_BUS(buf);
1488 #endif
1489 stack_sg.ds_len = dmat->maxsize;
1490 cb(cb_arg, &stack_sg, /*nseg*/1, /*error*/0);
1491 return (0);
1492 }
1493
1494 void
1495 ahc_dmamap_destroy(struct ahc_softc *ahc, bus_dma_tag_t dmat, bus_dmamap_t map)
1496 {
1497 /*
1498 * The map may is NULL in our < 2.3.X implementation.
1499 */
1500 if (map != NULL)
1501 free(map, M_DEVBUF);
1502 }
1503
1504 int
1505 ahc_dmamap_unload(struct ahc_softc *ahc, bus_dma_tag_t dmat, bus_dmamap_t map)
1506 {
1507 /* Nothing to do */
1508 return (0);
1509 }
1510
1511 /********************* Platform Dependent Functions ***************************/
1512 /*
1513 * Compare "left hand" softc with "right hand" softc, returning:
1514 * < 0 - lahc has a lower priority than rahc
1515 * 0 - Softcs are equal
1516 * > 0 - lahc has a higher priority than rahc
1517 */
1518 int
1519 ahc_softc_comp(struct ahc_softc *lahc, struct ahc_softc *rahc)
1520 {
1521 int value;
1522 int rvalue;
1523 int lvalue;
1524
1525 /*
1526 * Under Linux, cards are ordered as follows:
1527 * 1) VLB/EISA BIOS enabled devices sorted by BIOS address.
1528 * 2) PCI devices with BIOS enabled sorted by bus/slot/func.
1529 * 3) All remaining VLB/EISA devices sorted by ioport.
1530 * 4) All remaining PCI devices sorted by bus/slot/func.
1531 */
1532 value = (lahc->flags & AHC_BIOS_ENABLED)
1533 - (rahc->flags & AHC_BIOS_ENABLED);
1534 if (value != 0)
1535 /* Controllers with BIOS enabled have a *higher* priority */
1536 return (value);
1537
1538 /*
1539 * Same BIOS setting, now sort based on bus type.
1540 * EISA and VL controllers sort together. EISA/VL
1541 * have higher priority than PCI.
1542 */
1543 rvalue = (rahc->chip & AHC_BUS_MASK);
1544 if (rvalue == AHC_VL)
1545 rvalue = AHC_EISA;
1546 lvalue = (lahc->chip & AHC_BUS_MASK);
1547 if (lvalue == AHC_VL)
1548 lvalue = AHC_EISA;
1549 value = rvalue - lvalue;
1550 if (value != 0)
1551 return (value);
1552
1553 /* Still equal. Sort by BIOS address, ioport, or bus/slot/func. */
1554 switch (rvalue) {
1555 #ifdef CONFIG_PCI
1556 case AHC_PCI:
1557 {
1558 char primary_channel;
1559
1560 if (aic7xxx_reverse_scan != 0)
1561 value = ahc_get_pci_bus(lahc->dev_softc)
1562 - ahc_get_pci_bus(rahc->dev_softc);
1563 else
1564 value = ahc_get_pci_bus(rahc->dev_softc)
1565 - ahc_get_pci_bus(lahc->dev_softc);
1566 if (value != 0)
1567 break;
1568 if (aic7xxx_reverse_scan != 0)
1569 value = ahc_get_pci_slot(lahc->dev_softc)
1570 - ahc_get_pci_slot(rahc->dev_softc);
1571 else
1572 value = ahc_get_pci_slot(rahc->dev_softc)
1573 - ahc_get_pci_slot(lahc->dev_softc);
1574 if (value != 0)
1575 break;
1576 /*
1577 * On multi-function devices, the user can choose
1578 * to have function 1 probed before function 0.
1579 * Give whichever channel is the primary channel
1580 * the highest priority.
1581 */
1582 primary_channel = (lahc->flags & AHC_PRIMARY_CHANNEL) + 'A';
1583 value = -1;
1584 if (lahc->channel == primary_channel)
1585 value = 1;
1586 break;
1587 }
1588 #endif
1589 #ifdef CONFIG_EISA
1590 case AHC_EISA:
1591 if ((rahc->flags & AHC_BIOS_ENABLED) != 0) {
1592 value = rahc->platform_data->bios_address
1593 - lahc->platform_data->bios_address;
1594 } else {
1595 value = rahc->bsh.ioport
1596 - lahc->bsh.ioport;
1597 }
1598 break;
1599 #endif
1600 default:
1601 panic("ahc_softc_sort: invalid bus type");
1602 }
1603 return (value);
1604 }
1605
1606 static void
1607 ahc_linux_setup_tag_info_global(char *p)
1608 {
1609 int tags, i, j;
1610
1611 tags = simple_strtoul(p + 1, NULL, 0) & 0xff;
1612 printf("Setting Global Tags= %d\n", tags);
1613
1614 for (i = 0; i < NUM_ELEMENTS(aic7xxx_tag_info); i++) {
1615 for (j = 0; j < AHC_NUM_TARGETS; j++) {
1616 aic7xxx_tag_info[i].tag_commands[j] = tags;
1617 }
1618 }
1619 }
1620
1621 static void
1622 ahc_linux_setup_tag_info(u_long arg, int instance, int targ, int32_t value)
1623 {
1624
1625 if ((instance >= 0) && (targ >= 0)
1626 && (instance < NUM_ELEMENTS(aic7xxx_tag_info))
1627 && (targ < AHC_NUM_TARGETS)) {
1628 aic7xxx_tag_info[instance].tag_commands[targ] = value & 0xff;
1629 if (bootverbose)
1630 printf("tag_info[%d:%d] = %d\n", instance, targ, value);
1631 }
1632 }
1633
1634 static void
1635 ahc_linux_setup_dv(u_long arg, int instance, int targ, int32_t value)
1636 {
1637
1638 if ((instance >= 0)
1639 && (instance < NUM_ELEMENTS(aic7xxx_dv_settings))) {
1640 aic7xxx_dv_settings[instance] = value;
1641 if (bootverbose)
1642 printf("dv[%d] = %d\n", instance, value);
1643 }
1644 }
1645
1646 /*
1647 * Handle Linux boot parameters. This routine allows for assigning a value
1648 * to a parameter with a ':' between the parameter and the value.
1649 * ie. aic7xxx=stpwlev:1,extended
1650 */
1651 static int
1652 aic7xxx_setup(char *s)
1653 {
1654 int i, n;
1655 char *p;
1656 char *end;
1657
1658 static struct {
1659 const char *name;
1660 uint32_t *flag;
1661 } options[] = {
1662 { "extended", &aic7xxx_extended },
1663 { "no_reset", &aic7xxx_no_reset },
1664 { "verbose", &aic7xxx_verbose },
1665 { "allow_memio", &aic7xxx_allow_memio},
1666 #ifdef AHC_DEBUG
1667 { "debug", &ahc_debug },
1668 #endif
1669 { "reverse_scan", &aic7xxx_reverse_scan },
1670 { "no_probe", &aic7xxx_probe_eisa_vl },
1671 { "probe_eisa_vl", &aic7xxx_probe_eisa_vl },
1672 { "periodic_otag", &aic7xxx_periodic_otag },
1673 { "pci_parity", &aic7xxx_pci_parity },
1674 { "seltime", &aic7xxx_seltime },
1675 { "tag_info", NULL },
1676 { "global_tag_depth", NULL },
1677 { "dv", NULL }
1678 };
1679
1680 end = strchr(s, '\0');
1681
1682 /*
1683 * XXX ia64 gcc isn't smart enough to know that NUM_ELEMENTS
1684 * will never be 0 in this case.
1685 */
1686 n = 0;
1687
1688 while ((p = strsep(&s, ",.")) != NULL) {
1689 if (*p == '\0')
1690 continue;
1691 for (i = 0; i < NUM_ELEMENTS(options); i++) {
1692
1693 n = strlen(options[i].name);
1694 if (strncmp(options[i].name, p, n) == 0)
1695 break;
1696 }
1697 if (i == NUM_ELEMENTS(options))
1698 continue;
1699
1700 if (strncmp(p, "global_tag_depth", n) == 0) {
1701 ahc_linux_setup_tag_info_global(p + n);
1702 } else if (strncmp(p, "tag_info", n) == 0) {
1703 s = aic_parse_brace_option("tag_info", p + n, end,
1704 2, ahc_linux_setup_tag_info, 0);
1705 } else if (strncmp(p, "dv", n) == 0) {
1706 s = aic_parse_brace_option("dv", p + n, end, 1,
1707 ahc_linux_setup_dv, 0);
1708 } else if (p[n] == ':') {
1709 *(options[i].flag) = simple_strtoul(p + n + 1, NULL, 0);
1710 } else if (strncmp(p, "verbose", n) == 0) {
1711 *(options[i].flag) = 1;
1712 } else {
1713 *(options[i].flag) ^= 0xFFFFFFFF;
1714 }
1715 }
1716 return 1;
1717 }
1718
1719 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,3,0)
1720 __setup("aic7xxx=", aic7xxx_setup);
1721 #endif
1722
1723 uint32_t aic7xxx_verbose;
1724
1725 int
1726 ahc_linux_register_host(struct ahc_softc *ahc, Scsi_Host_Template *template)
1727 {
1728 char buf[80];
1729 struct Scsi_Host *host;
1730 char *new_name;
1731 u_long s;
1732 u_int targ_offset;
1733
1734 template->name = ahc->description;
1735 host = scsi_register(template, sizeof(struct ahc_softc *));
1736 if (host == NULL)
1737 return (ENOMEM);
1738
1739 *((struct ahc_softc **)host->hostdata) = ahc;
1740 ahc_lock(ahc, &s);
1741 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
1742 scsi_assign_lock(host, &ahc->platform_data->spin_lock);
1743 #elif AHC_SCSI_HAS_HOST_LOCK != 0
1744 host->lock = &ahc->platform_data->spin_lock;
1745 #endif
1746 ahc->platform_data->host = host;
1747 host->can_queue = AHC_MAX_QUEUE;
1748 host->cmd_per_lun = 2;
1749 /* XXX No way to communicate the ID for multiple channels */
1750 host->this_id = ahc->our_id;
1751 host->irq = ahc->platform_data->irq;
1752 host->max_id = (ahc->features & AHC_WIDE) ? 16 : 8;
1753 host->max_lun = AHC_NUM_LUNS;
1754 host->max_channel = (ahc->features & AHC_TWIN) ? 1 : 0;
1755 host->sg_tablesize = AHC_NSEG;
1756 ahc_set_unit(ahc, ahc_linux_next_unit());
1757 sprintf(buf, "scsi%d", host->host_no);
1758 new_name = malloc(strlen(buf) + 1, M_DEVBUF, M_NOWAIT);
1759 if (new_name != NULL) {
1760 strcpy(new_name, buf);
1761 ahc_set_name(ahc, new_name);
1762 }
1763 host->unique_id = ahc->unit;
1764 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,4) && \
1765 LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
1766 scsi_set_pci_device(host, ahc->dev_softc);
1767 #endif
1768 ahc_linux_initialize_scsi_bus(ahc);
1769 ahc_unlock(ahc, &s);
1770 ahc->platform_data->dv_pid = kernel_thread(ahc_linux_dv_thread, ahc, 0);
1771 ahc_lock(ahc, &s);
1772 if (ahc->platform_data->dv_pid < 0) {
1773 printf("%s: Failed to create DV thread, error= %d\n",
1774 ahc_name(ahc), ahc->platform_data->dv_pid);
1775 return (-ahc->platform_data->dv_pid);
1776 }
1777 /*
1778 * Initially allocate *all* of our linux target objects
1779 * so that the DV thread will scan them all in parallel
1780 * just after driver initialization. Any device that
1781 * does not exist will have its target object destroyed
1782 * by the selection timeout handler. In the case of a
1783 * device that appears after the initial DV scan, async
1784 * negotiation will occur for the first command, and DV
1785 * will comence should that first command be successful.
1786 */
1787 for (targ_offset = 0;
1788 targ_offset < host->max_id * (host->max_channel + 1);
1789 targ_offset++) {
1790 u_int channel;
1791 u_int target;
1792
1793 channel = 0;
1794 target = targ_offset;
1795 if (target > 7
1796 && (ahc->features & AHC_TWIN) != 0) {
1797 channel = 1;
1798 target &= 0x7;
1799 }
1800 /*
1801 * Skip our own ID. Some Compaq/HP storage devices
1802 * have enclosure management devices that respond to
1803 * single bit selection (i.e. selecting ourselves).
1804 * It is expected that either an external application
1805 * or a modified kernel will be used to probe this
1806 * ID if it is appropriate. To accommodate these
1807 * installations, ahc_linux_alloc_target() will allocate
1808 * for our ID if asked to do so.
1809 */
1810 if ((channel == 0 && target == ahc->our_id)
1811 || (channel == 1 && target == ahc->our_id_b))
1812 continue;
1813
1814 ahc_linux_alloc_target(ahc, channel, target);
1815 }
1816 ahc_intr_enable(ahc, TRUE);
1817 ahc_linux_start_dv(ahc);
1818 ahc_unlock(ahc, &s);
1819
1820 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
1821 scsi_add_host(host, (ahc->dev_softc ? &ahc->dev_softc->dev : NULL));
1822 #endif
1823 return (0);
1824 }
1825
1826 uint64_t
1827 ahc_linux_get_memsize(void)
1828 {
1829 struct sysinfo si;
1830
1831 si_meminfo(&si);
1832 return ((uint64_t)si.totalram << PAGE_SHIFT);
1833 }
1834
1835 /*
1836 * Find the smallest available unit number to use
1837 * for a new device. We don't just use a static
1838 * count to handle the "repeated hot-(un)plug"
1839 * scenario.
1840 */
1841 static int
1842 ahc_linux_next_unit(void)
1843 {
1844 struct ahc_softc *ahc;
1845 int unit;
1846
1847 unit = 0;
1848 retry:
1849 TAILQ_FOREACH(ahc, &ahc_tailq, links) {
1850 if (ahc->unit == unit) {
1851 unit++;
1852 goto retry;
1853 }
1854 }
1855 return (unit);
1856 }
1857
1858 /*
1859 * Place the SCSI bus into a known state by either resetting it,
1860 * or forcing transfer negotiations on the next command to any
1861 * target.
1862 */
1863 void
1864 ahc_linux_initialize_scsi_bus(struct ahc_softc *ahc)
1865 {
1866 int i;
1867 int numtarg;
1868
1869 i = 0;
1870 numtarg = 0;
1871
1872 if (aic7xxx_no_reset != 0)
1873 ahc->flags &= ~(AHC_RESET_BUS_A|AHC_RESET_BUS_B);
1874
1875 if ((ahc->flags & AHC_RESET_BUS_A) != 0)
1876 ahc_reset_channel(ahc, 'A', /*initiate_reset*/TRUE);
1877 else
1878 numtarg = (ahc->features & AHC_WIDE) ? 16 : 8;
1879
1880 if ((ahc->features & AHC_TWIN) != 0) {
1881
1882 if ((ahc->flags & AHC_RESET_BUS_B) != 0) {
1883 ahc_reset_channel(ahc, 'B', /*initiate_reset*/TRUE);
1884 } else {
1885 if (numtarg == 0)
1886 i = 8;
1887 numtarg += 8;
1888 }
1889 }
1890
1891 /*
1892 * Force negotiation to async for all targets that
1893 * will not see an initial bus reset.
1894 */
1895 for (; i < numtarg; i++) {
1896 struct ahc_devinfo devinfo;
1897 struct ahc_initiator_tinfo *tinfo;
1898 struct ahc_tmode_tstate *tstate;
1899 u_int our_id;
1900 u_int target_id;
1901 char channel;
1902
1903 channel = 'A';
1904 our_id = ahc->our_id;
1905 target_id = i;
1906 if (i > 7 && (ahc->features & AHC_TWIN) != 0) {
1907 channel = 'B';
1908 our_id = ahc->our_id_b;
1909 target_id = i % 8;
1910 }
1911 tinfo = ahc_fetch_transinfo(ahc, channel, our_id,
1912 target_id, &tstate);
1913 ahc_compile_devinfo(&devinfo, our_id, target_id,
1914 CAM_LUN_WILDCARD, channel, ROLE_INITIATOR);
1915 ahc_update_neg_request(ahc, &devinfo, tstate,
1916 tinfo, AHC_NEG_ALWAYS);
1917 }
1918 /* Give the bus some time to recover */
1919 if ((ahc->flags & (AHC_RESET_BUS_A|AHC_RESET_BUS_B)) != 0) {
1920 ahc_linux_freeze_simq(ahc);
1921 init_timer(&ahc->platform_data->reset_timer);
1922 ahc->platform_data->reset_timer.data = (u_long)ahc;
1923 ahc->platform_data->reset_timer.expires =
1924 jiffies + (AIC7XXX_RESET_DELAY * HZ)/1000;
1925 ahc->platform_data->reset_timer.function =
1926 ahc_linux_release_simq;
1927 add_timer(&ahc->platform_data->reset_timer);
1928 }
1929 }
1930
1931 int
1932 ahc_platform_alloc(struct ahc_softc *ahc, void *platform_arg)
1933 {
1934
1935 ahc->platform_data =
1936 malloc(sizeof(struct ahc_platform_data), M_DEVBUF, M_NOWAIT);
1937 if (ahc->platform_data == NULL)
1938 return (ENOMEM);
1939 memset(ahc->platform_data, 0, sizeof(struct ahc_platform_data));
1940 TAILQ_INIT(&ahc->platform_data->completeq);
1941 TAILQ_INIT(&ahc->platform_data->device_runq);
1942 ahc->platform_data->irq = AHC_LINUX_NOIRQ;
1943 ahc->platform_data->hw_dma_mask = 0xFFFFFFFF;
1944 ahc_lockinit(ahc);
1945 ahc_done_lockinit(ahc);
1946 init_timer(&ahc->platform_data->completeq_timer);
1947 ahc->platform_data->completeq_timer.data = (u_long)ahc;
1948 ahc->platform_data->completeq_timer.function =
1949 (ahc_linux_callback_t *)ahc_linux_thread_run_complete_queue;
1950 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,0)
1951 init_MUTEX_LOCKED(&ahc->platform_data->eh_sem);
1952 init_MUTEX_LOCKED(&ahc->platform_data->dv_sem);
1953 init_MUTEX_LOCKED(&ahc->platform_data->dv_cmd_sem);
1954 #else
1955 ahc->platform_data->eh_sem = MUTEX_LOCKED;
1956 ahc->platform_data->dv_sem = MUTEX_LOCKED;
1957 ahc->platform_data->dv_cmd_sem = MUTEX_LOCKED;
1958 #endif
1959 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
1960 tasklet_init(&ahc->platform_data->runq_tasklet, ahc_runq_tasklet,
1961 (unsigned long)ahc);
1962 #endif
1963 ahc->seltime = (aic7xxx_seltime & 0x3) << 4;
1964 ahc->seltime_b = (aic7xxx_seltime & 0x3) << 4;
1965 if (aic7xxx_pci_parity == 0)
1966 ahc->flags |= AHC_DISABLE_PCI_PERR;
1967
1968 return (0);
1969 }
1970
1971 void
1972 ahc_platform_free(struct ahc_softc *ahc)
1973 {
1974 struct ahc_linux_target *targ;
1975 struct ahc_linux_device *dev;
1976 int i, j;
1977
1978 if (ahc->platform_data != NULL) {
1979 del_timer_sync(&ahc->platform_data->completeq_timer);
1980 ahc_linux_kill_dv_thread(ahc);
1981 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
1982 tasklet_kill(&ahc->platform_data->runq_tasklet);
1983 #endif
1984 if (ahc->platform_data->host != NULL) {
1985 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
1986 scsi_remove_host(ahc->platform_data->host);
1987 #endif
1988 scsi_unregister(ahc->platform_data->host);
1989 }
1990
1991 /* destroy all of the device and target objects */
1992 for (i = 0; i < AHC_NUM_TARGETS; i++) {
1993 targ = ahc->platform_data->targets[i];
1994 if (targ != NULL) {
1995 /* Keep target around through the loop. */
1996 targ->refcount++;
1997 for (j = 0; j < AHC_NUM_LUNS; j++) {
1998
1999 if (targ->devices[j] == NULL)
2000 continue;
2001 dev = targ->devices[j];
2002 ahc_linux_free_device(ahc, dev);
2003 }
2004 /*
2005 * Forcibly free the target now that
2006 * all devices are gone.
2007 */
2008 ahc_linux_free_target(ahc, targ);
2009 }
2010 }
2011
2012 if (ahc->platform_data->irq != AHC_LINUX_NOIRQ)
2013 free_irq(ahc->platform_data->irq, ahc);
2014 if (ahc->tag == BUS_SPACE_PIO
2015 && ahc->bsh.ioport != 0)
2016 release_region(ahc->bsh.ioport, 256);
2017 if (ahc->tag == BUS_SPACE_MEMIO
2018 && ahc->bsh.maddr != NULL) {
2019 u_long base_addr;
2020
2021 base_addr = (u_long)ahc->bsh.maddr;
2022 base_addr &= PAGE_MASK;
2023 iounmap((void *)base_addr);
2024 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
2025 release_mem_region(ahc->platform_data->mem_busaddr,
2026 0x1000);
2027 #endif
2028 }
2029 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0) && \
2030 LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
2031 /*
2032 * In 2.4 we detach from the scsi midlayer before the PCI
2033 * layer invokes our remove callback. No per-instance
2034 * detach is provided, so we must reach inside the PCI
2035 * subsystem's internals and detach our driver manually.
2036 */
2037 if (ahc->dev_softc != NULL)
2038 ahc->dev_softc->driver = NULL;
2039 #endif
2040 free(ahc->platform_data, M_DEVBUF);
2041 }
2042 }
2043
2044 void
2045 ahc_platform_freeze_devq(struct ahc_softc *ahc, struct scb *scb)
2046 {
2047 ahc_platform_abort_scbs(ahc, SCB_GET_TARGET(ahc, scb),
2048 SCB_GET_CHANNEL(ahc, scb),
2049 SCB_GET_LUN(scb), SCB_LIST_NULL,
2050 ROLE_UNKNOWN, CAM_REQUEUE_REQ);
2051 }
2052
2053 void
2054 ahc_platform_set_tags(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
2055 ahc_queue_alg alg)
2056 {
2057 struct ahc_linux_device *dev;
2058 int was_queuing;
2059 int now_queuing;
2060
2061 dev = ahc_linux_get_device(ahc, devinfo->channel - 'A',
2062 devinfo->target,
2063 devinfo->lun, /*alloc*/FALSE);
2064 if (dev == NULL)
2065 return;
2066 was_queuing = dev->flags & (AHC_DEV_Q_BASIC|AHC_DEV_Q_TAGGED);
2067 switch (alg) {
2068 default:
2069 case AHC_QUEUE_NONE:
2070 now_queuing = 0;
2071 break;
2072 case AHC_QUEUE_BASIC:
2073 now_queuing = AHC_DEV_Q_BASIC;
2074 break;
2075 case AHC_QUEUE_TAGGED:
2076 now_queuing = AHC_DEV_Q_TAGGED;
2077 break;
2078 }
2079 if ((dev->flags & AHC_DEV_FREEZE_TIL_EMPTY) == 0
2080 && (was_queuing != now_queuing)
2081 && (dev->active != 0)) {
2082 dev->flags |= AHC_DEV_FREEZE_TIL_EMPTY;
2083 dev->qfrozen++;
2084 }
2085
2086 dev->flags &= ~(AHC_DEV_Q_BASIC|AHC_DEV_Q_TAGGED|AHC_DEV_PERIODIC_OTAG);
2087 if (now_queuing) {
2088 u_int usertags;
2089
2090 usertags = ahc_linux_user_tagdepth(ahc, devinfo);
2091 if (!was_queuing) {
2092 /*
2093 * Start out agressively and allow our
2094 * dynamic queue depth algorithm to take
2095 * care of the rest.
2096 */
2097 dev->maxtags = usertags;
2098 dev->openings = dev->maxtags - dev->active;
2099 }
2100 if (dev->maxtags == 0) {
2101 /*
2102 * Queueing is disabled by the user.
2103 */
2104 dev->openings = 1;
2105 } else if (alg == AHC_QUEUE_TAGGED) {
2106 dev->flags |= AHC_DEV_Q_TAGGED;
2107 if (aic7xxx_periodic_otag != 0)
2108 dev->flags |= AHC_DEV_PERIODIC_OTAG;
2109 } else
2110 dev->flags |= AHC_DEV_Q_BASIC;
2111 } else {
2112 /* We can only have one opening. */
2113 dev->maxtags = 0;
2114 dev->openings = 1 - dev->active;
2115 }
2116 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
2117 if (dev->scsi_device != NULL) {
2118 switch ((dev->flags & (AHC_DEV_Q_BASIC|AHC_DEV_Q_TAGGED))) {
2119 case AHC_DEV_Q_BASIC:
2120 scsi_adjust_queue_depth(dev->scsi_device,
2121 MSG_SIMPLE_TASK,
2122 dev->openings + dev->active);
2123 break;
2124 case AHC_DEV_Q_TAGGED:
2125 scsi_adjust_queue_depth(dev->scsi_device,
2126 MSG_ORDERED_TASK,
2127 dev->openings + dev->active);
2128 break;
2129 default:
2130 /*
2131 * We allow the OS to queue 2 untagged transactions to
2132 * us at any time even though we can only execute them
2133 * serially on the controller/device. This should
2134 * remove some latency.
2135 */
2136 scsi_adjust_queue_depth(dev->scsi_device,
2137 /*NON-TAGGED*/0,
2138 /*queue depth*/2);
2139 break;
2140 }
2141 }
2142 #endif
2143 }
2144
2145 int
2146 ahc_platform_abort_scbs(struct ahc_softc *ahc, int target, char channel,
2147 int lun, u_int tag, role_t role, uint32_t status)
2148 {
2149 int chan;
2150 int maxchan;
2151 int targ;
2152 int maxtarg;
2153 int clun;
2154 int maxlun;
2155 int count;
2156
2157 if (tag != SCB_LIST_NULL)
2158 return (0);
2159
2160 chan = 0;
2161 if (channel != ALL_CHANNELS) {
2162 chan = channel - 'A';
2163 maxchan = chan + 1;
2164 } else {
2165 maxchan = (ahc->features & AHC_TWIN) ? 2 : 1;
2166 }
2167 targ = 0;
2168 if (target != CAM_TARGET_WILDCARD) {
2169 targ = target;
2170 maxtarg = targ + 1;
2171 } else {
2172 maxtarg = (ahc->features & AHC_WIDE) ? 16 : 8;
2173 }
2174 clun = 0;
2175 if (lun != CAM_LUN_WILDCARD) {
2176 clun = lun;
2177 maxlun = clun + 1;
2178 } else {
2179 maxlun = AHC_NUM_LUNS;
2180 }
2181
2182 count = 0;
2183 for (; chan < maxchan; chan++) {
2184
2185 for (; targ < maxtarg; targ++) {
2186
2187 for (; clun < maxlun; clun++) {
2188 struct ahc_linux_device *dev;
2189 struct ahc_busyq *busyq;
2190 struct ahc_cmd *acmd;
2191
2192 dev = ahc_linux_get_device(ahc, chan,
2193 targ, clun,
2194 /*alloc*/FALSE);
2195 if (dev == NULL)
2196 continue;
2197
2198 busyq = &dev->busyq;
2199 while ((acmd = TAILQ_FIRST(busyq)) != NULL) {
2200 Scsi_Cmnd *cmd;
2201
2202 cmd = &acmd_scsi_cmd(acmd);
2203 TAILQ_REMOVE(busyq, acmd,
2204 acmd_links.tqe);
2205 count++;
2206 cmd->result = status << 16;
2207 ahc_linux_queue_cmd_complete(ahc, cmd);
2208 }
2209 }
2210 }
2211 }
2212
2213 return (count);
2214 }
2215
2216 static void
2217 ahc_linux_thread_run_complete_queue(struct ahc_softc *ahc)
2218 {
2219 u_long flags;
2220
2221 ahc_lock(ahc, &flags);
2222 del_timer(&ahc->platform_data->completeq_timer);
2223 ahc->platform_data->flags &= ~AHC_RUN_CMPLT_Q_TIMER;
2224 ahc_linux_run_complete_queue(ahc);
2225 ahc_unlock(ahc, &flags);
2226 }
2227
2228 static void
2229 ahc_linux_start_dv(struct ahc_softc *ahc)
2230 {
2231
2232 /*
2233 * Freeze the simq and signal ahc_linux_queue to not let any
2234 * more commands through.
2235 */
2236 if ((ahc->platform_data->flags & AHC_DV_ACTIVE) == 0) {
2237 #ifdef AHC_DEBUG
2238 if (ahc_debug & AHC_SHOW_DV)
2239 printf("%s: Waking DV thread\n", ahc_name(ahc));
2240 #endif
2241
2242 ahc->platform_data->flags |= AHC_DV_ACTIVE;
2243 ahc_linux_freeze_simq(ahc);
2244
2245 /* Wake up the DV kthread */
2246 up(&ahc->platform_data->dv_sem);
2247 }
2248 }
2249
2250 static void
2251 ahc_linux_kill_dv_thread(struct ahc_softc *ahc)
2252 {
2253 u_long s;
2254
2255 ahc_lock(ahc, &s);
2256 if (ahc->platform_data->dv_pid != 0) {
2257 ahc->platform_data->flags |= AHC_DV_SHUTDOWN;
2258 ahc_unlock(ahc, &s);
2259 up(&ahc->platform_data->dv_sem);
2260
2261 /*
2262 * Use the eh_sem as an indicator that the
2263 * dv thread is exiting. Note that the dv
2264 * thread must still return after performing
2265 * the up on our semaphore before it has
2266 * completely exited this module. Unfortunately,
2267 * there seems to be no easy way to wait for the
2268 * exit of a thread for which you are not the
2269 * parent (dv threads are parented by init).
2270 * Cross your fingers...
2271 */
2272 down(&ahc->platform_data->eh_sem);
2273
2274 /*
2275 * Mark the dv thread as already dead. This
2276 * avoids attempting to kill it a second time.
2277 * This is necessary because we must kill the
2278 * DV thread before calling ahc_free() in the
2279 * module shutdown case to avoid bogus locking
2280 * in the SCSI mid-layer, but we ahc_free() is
2281 * called without killing the DV thread in the
2282 * instance detach case, so ahc_platform_free()
2283 * calls us again to verify that the DV thread
2284 * is dead.
2285 */
2286 ahc->platform_data->dv_pid = 0;
2287 } else {
2288 ahc_unlock(ahc, &s);
2289 }
2290 }
2291
2292 static int
2293 ahc_linux_dv_thread(void *data)
2294 {
2295 struct ahc_softc *ahc;
2296 int target;
2297 u_long s;
2298
2299 ahc = (struct ahc_softc *)data;
2300
2301 #ifdef AHC_DEBUG
2302 if (ahc_debug & AHC_SHOW_DV)
2303 printf("Launching DV Thread\n");
2304 #endif
2305
2306 /*
2307 * Complete thread creation.
2308 */
2309 lock_kernel();
2310 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
2311 /*
2312 * Don't care about any signals.
2313 */
2314 siginitsetinv(¤t->blocked, 0);
2315
2316 daemonize();
2317 sprintf(current->comm, "ahc_dv_%d", ahc->unit);
2318 #else
2319 daemonize("ahc_dv_%d", ahc->unit);
2320 #endif
2321 unlock_kernel();
2322
2323 while (1) {
2324 /*
2325 * Use down_interruptible() rather than down() to
2326 * avoid inclusion in the load average.
2327 */
2328 down_interruptible(&ahc->platform_data->dv_sem);
2329
2330 /* Check to see if we've been signaled to exit */
2331 ahc_lock(ahc, &s);
2332 if ((ahc->platform_data->flags & AHC_DV_SHUTDOWN) != 0) {
2333 ahc_unlock(ahc, &s);
2334 break;
2335 }
2336 ahc_unlock(ahc, &s);
2337
2338 #ifdef AHC_DEBUG
2339 if (ahc_debug & AHC_SHOW_DV)
2340 printf("%s: Beginning Domain Validation\n",
2341 ahc_name(ahc));
2342 #endif
2343
2344 /*
2345 * Wait for any pending commands to drain before proceeding.
2346 */
2347 ahc_lock(ahc, &s);
2348 while (LIST_FIRST(&ahc->pending_scbs) != NULL) {
2349 ahc->platform_data->flags |= AHC_DV_WAIT_SIMQ_EMPTY;
2350 ahc_unlock(ahc, &s);
2351 down_interruptible(&ahc->platform_data->dv_sem);
2352 ahc_lock(ahc, &s);
2353 }
2354
2355 /*
2356 * Wait for the SIMQ to be released so that DV is the
2357 * only reason the queue is frozen.
2358 */
2359 while (AHC_DV_SIMQ_FROZEN(ahc) == 0) {
2360 ahc->platform_data->flags |= AHC_DV_WAIT_SIMQ_RELEASE;
2361 ahc_unlock(ahc, &s);
2362 down_interruptible(&ahc->platform_data->dv_sem);
2363 ahc_lock(ahc, &s);
2364 }
2365 ahc_unlock(ahc, &s);
2366
2367 for (target = 0; target < AHC_NUM_TARGETS; target++)
2368 ahc_linux_dv_target(ahc, target);
2369
2370 ahc_lock(ahc, &s);
2371 ahc->platform_data->flags &= ~AHC_DV_ACTIVE;
2372 ahc_unlock(ahc, &s);
2373
2374 /*
2375 * Release the SIMQ so that normal commands are
2376 * allowed to continue on the bus.
2377 */
2378 ahc_linux_release_simq((u_long)ahc);
2379 }
2380 up(&ahc->platform_data->eh_sem);
2381 return (0);
2382 }
2383
2384 #define AHC_LINUX_DV_INQ_SHORT_LEN 36
2385 #define AHC_LINUX_DV_INQ_LEN 256
2386 #define AHC_LINUX_DV_TIMEOUT (HZ / 4)
2387
2388 #define AHC_SET_DV_STATE(ahc, targ, newstate) \
2389 ahc_set_dv_state(ahc, targ, newstate, __LINE__)
2390
2391 static __inline void
2392 ahc_set_dv_state(struct ahc_softc *ahc, struct ahc_linux_target *targ,
2393 ahc_dv_state newstate, u_int line)
2394 {
2395 ahc_dv_state oldstate;
2396
2397 oldstate = targ->dv_state;
2398 #ifdef AHC_DEBUG
2399 if (ahc_debug & AHC_SHOW_DV)
2400 printf("%s:%d: Going from state %d to state %d\n",
2401 ahc_name(ahc), line, oldstate, newstate);
2402 #endif
2403
2404 if (oldstate == newstate)
2405 targ->dv_state_retry++;
2406 else
2407 targ->dv_state_retry = 0;
2408 targ->dv_state = newstate;
2409 }
2410
2411 static void
2412 ahc_linux_dv_target(struct ahc_softc *ahc, u_int target_offset)
2413 {
2414 struct ahc_devinfo devinfo;
2415 struct ahc_linux_target *targ;
2416 struct scsi_cmnd *cmd;
2417 struct scsi_device *scsi_dev;
2418 struct scsi_sense_data *sense;
2419 uint8_t *buffer;
2420 u_long s;
2421 u_int timeout;
2422 int echo_size;
2423
2424 sense = NULL;
2425 buffer = NULL;
2426 echo_size = 0;
2427 ahc_lock(ahc, &s);
2428 targ = ahc->platform_data->targets[target_offset];
2429 if (targ == NULL || (targ->flags & AHC_DV_REQUIRED) == 0) {
2430 ahc_unlock(ahc, &s);
2431 return;
2432 }
2433 ahc_compile_devinfo(&devinfo,
2434 targ->channel == 0 ? ahc->our_id : ahc->our_id_b,
2435 targ->target, /*lun*/0, targ->channel + 'A',
2436 ROLE_INITIATOR);
2437 #ifdef AHC_DEBUG
2438 if (ahc_debug & AHC_SHOW_DV) {
2439 ahc_print_devinfo(ahc, &devinfo);
2440 printf("Performing DV\n");
2441 }
2442 #endif
2443
2444 ahc_unlock(ahc, &s);
2445
2446 cmd = malloc(sizeof(struct scsi_cmnd), M_DEVBUF, M_WAITOK);
2447 scsi_dev = malloc(sizeof(struct scsi_device), M_DEVBUF, M_WAITOK);
2448 scsi_dev->host = ahc->platform_data->host;
2449 scsi_dev->id = devinfo.target;
2450 scsi_dev->lun = devinfo.lun;
2451 scsi_dev->channel = devinfo.channel - 'A';
2452 ahc->platform_data->dv_scsi_dev = scsi_dev;
2453
2454 AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_INQ_SHORT_ASYNC);
2455
2456 while (targ->dv_state != AHC_DV_STATE_EXIT) {
2457 timeout = AHC_LINUX_DV_TIMEOUT;
2458 switch (targ->dv_state) {
2459 case AHC_DV_STATE_INQ_SHORT_ASYNC:
2460 case AHC_DV_STATE_INQ_ASYNC:
2461 case AHC_DV_STATE_INQ_ASYNC_VERIFY:
2462 /*
2463 * Set things to async narrow to reduce the
2464 * chance that the INQ will fail.
2465 */
2466 ahc_lock(ahc, &s);
2467 ahc_set_syncrate(ahc, &devinfo, NULL, 0, 0, 0,
2468 AHC_TRANS_GOAL, /*paused*/FALSE);
2469 ahc_set_width(ahc, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
2470 AHC_TRANS_GOAL, /*paused*/FALSE);
2471 ahc_unlock(ahc, &s);
2472 timeout = 10 * HZ;
2473 targ->flags &= ~AHC_INQ_VALID;
2474 /* FALLTHROUGH */
2475 case AHC_DV_STATE_INQ_VERIFY:
2476 {
2477 u_int inq_len;
2478
2479 if (targ->dv_state == AHC_DV_STATE_INQ_SHORT_ASYNC)
2480 inq_len = AHC_LINUX_DV_INQ_SHORT_LEN;
2481 else
2482 inq_len = targ->inq_data->additional_length + 5;
2483 ahc_linux_dv_inq(ahc, cmd, &devinfo, targ, inq_len);
2484 break;
2485 }
2486 case AHC_DV_STATE_TUR:
2487 case AHC_DV_STATE_BUSY:
2488 timeout = 5 * HZ;
2489 ahc_linux_dv_tur(ahc, cmd, &devinfo);
2490 break;
2491 case AHC_DV_STATE_REBD:
2492 ahc_linux_dv_rebd(ahc, cmd, &devinfo, targ);
2493 break;
2494 case AHC_DV_STATE_WEB:
2495 ahc_linux_dv_web(ahc, cmd, &devinfo, targ);
2496 break;
2497
2498 case AHC_DV_STATE_REB:
2499 ahc_linux_dv_reb(ahc, cmd, &devinfo, targ);
2500 break;
2501
2502 case AHC_DV_STATE_SU:
2503 ahc_linux_dv_su(ahc, cmd, &devinfo, targ);
2504 timeout = 50 * HZ;
2505 break;
2506
2507 default:
2508 ahc_print_devinfo(ahc, &devinfo);
2509 printf("Unknown DV state %d\n", targ->dv_state);
2510 goto out;
2511 }
2512
2513 /* Queue the command and wait for it to complete */
2514 /* Abuse eh_timeout in the scsi_cmnd struct for our purposes */
2515 init_timer(&cmd->eh_timeout);
2516 #ifdef AHC_DEBUG
2517 if ((ahc_debug & AHC_SHOW_MESSAGES) != 0)
2518 /*
2519 * All of the printfs during negotiation
2520 * really slow down the negotiation.
2521 * Add a bit of time just to be safe.
2522 */
2523 timeout += HZ;
2524 #endif
2525 scsi_add_timer(cmd, timeout, ahc_linux_dv_timeout);
2526 /*
2527 * In 2.5.X, it is assumed that all calls from the
2528 * "midlayer" (which we are emulating) will have the
2529 * ahc host lock held. For other kernels, the
2530 * io_request_lock must be held.
2531 */
2532 #if AHC_SCSI_HAS_HOST_LOCK != 0
2533 ahc_lock(ahc, &s);
2534 #else
2535 spin_lock_irqsave(&io_request_lock, s);
2536 #endif
2537 ahc_linux_queue(cmd, ahc_linux_dv_complete);
2538 #if AHC_SCSI_HAS_HOST_LOCK != 0
2539 ahc_unlock(ahc, &s);
2540 #else
2541 spin_unlock_irqrestore(&io_request_lock, s);
2542 #endif
2543 down_interruptible(&ahc->platform_data->dv_cmd_sem);
2544 /*
2545 * Wait for the SIMQ to be released so that DV is the
2546 * only reason the queue is frozen.
2547 */
2548 ahc_lock(ahc, &s);
2549 while (AHC_DV_SIMQ_FROZEN(ahc) == 0) {
2550 ahc->platform_data->flags |= AHC_DV_WAIT_SIMQ_RELEASE;
2551 ahc_unlock(ahc, &s);
2552 down_interruptible(&ahc->platform_data->dv_sem);
2553 ahc_lock(ahc, &s);
2554 }
2555 ahc_unlock(ahc, &s);
2556
2557 ahc_linux_dv_transition(ahc, cmd, &devinfo, targ);
2558 }
2559
2560 out:
2561 if ((targ->flags & AHC_INQ_VALID) != 0
2562 && ahc_linux_get_device(ahc, devinfo.channel - 'A',
2563 devinfo.target, devinfo.lun,
2564 /*alloc*/FALSE) == NULL) {
2565 /*
2566 * The DV state machine failed to configure this device.
2567 * This is normal if DV is disabled. Since we have inquiry
2568 * data, filter it and use the "optimistic" negotiation
2569 * parameters found in the inquiry string.
2570 */
2571 ahc_linux_filter_inquiry(ahc, &devinfo);
2572 if ((targ->flags & (AHC_BASIC_DV|AHC_ENHANCED_DV)) != 0) {
2573 ahc_print_devinfo(ahc, &devinfo);
2574 printf("DV failed to configure device. "
2575 "Please file a bug report against "
2576 "this driver.\n");
2577 }
2578 }
2579
2580 if (cmd != NULL)
2581 free(cmd, M_DEVBUF);
2582
2583 if (ahc->platform_data->dv_scsi_dev != NULL) {
2584 free(ahc->platform_data->dv_scsi_dev, M_DEVBUF);
2585 ahc->platform_data->dv_scsi_dev = NULL;
2586 }
2587
2588 ahc_lock(ahc, &s);
2589 if (targ->dv_buffer != NULL) {
2590 free(targ->dv_buffer, M_DEVBUF);
2591 targ->dv_buffer = NULL;
2592 }
2593 if (targ->dv_buffer1 != NULL) {
2594 free(targ->dv_buffer1, M_DEVBUF);
2595 targ->dv_buffer1 = NULL;
2596 }
2597 targ->flags &= ~AHC_DV_REQUIRED;
2598 if (targ->refcount == 0)
2599 ahc_linux_free_target(ahc, targ);
2600 ahc_unlock(ahc, &s);
2601 }
2602
2603 static void
2604 ahc_linux_dv_transition(struct ahc_softc *ahc, struct scsi_cmnd *cmd,
2605 struct ahc_devinfo *devinfo,
2606 struct ahc_linux_target *targ)
2607 {
2608 u_int32_t status;
2609
2610 status = aic_error_action(cmd, targ->inq_data,
2611 ahc_cmd_get_transaction_status(cmd),
2612 ahc_cmd_get_scsi_status(cmd));
2613
2614 #ifdef AHC_DEBUG
2615 if (ahc_debug & AHC_SHOW_DV) {
2616 ahc_print_devinfo(ahc, devinfo);
2617 printf("Entering ahc_linux_dv_transition, state= %d, "
2618 "status= 0x%x, cmd->result= 0x%x\n", targ->dv_state,
2619 status, cmd->result);
2620 }
2621 #endif
2622
2623 switch (targ->dv_state) {
2624 case AHC_DV_STATE_INQ_SHORT_ASYNC:
2625 case AHC_DV_STATE_INQ_ASYNC:
2626 switch (status & SS_MASK) {
2627 case SS_NOP:
2628 {
2629 AHC_SET_DV_STATE(ahc, targ, targ->dv_state+1);
2630 break;
2631 }
2632 case SS_INQ_REFRESH:
2633 AHC_SET_DV_STATE(ahc, targ,
2634 AHC_DV_STATE_INQ_SHORT_ASYNC);
2635 break;
2636 case SS_TUR:
2637 case SS_RETRY:
2638 AHC_SET_DV_STATE(ahc, targ, targ->dv_state);
2639 if (ahc_cmd_get_transaction_status(cmd)
2640 == CAM_REQUEUE_REQ)
2641 targ->dv_state_retry--;
2642 if ((status & SS_ERRMASK) == EBUSY)
2643 AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_BUSY);
2644 if (targ->dv_state_retry < 10)
2645 break;
2646 /* FALLTHROUGH */
2647 default:
2648 AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_EXIT);
2649 #ifdef AHC_DEBUG
2650 if (ahc_debug & AHC_SHOW_DV) {
2651 ahc_print_devinfo(ahc, devinfo);
2652 printf("Failed DV inquiry, skipping\n");
2653 }
2654 #endif
2655 break;
2656 }
2657 break;
2658 case AHC_DV_STATE_INQ_ASYNC_VERIFY:
2659 switch (status & SS_MASK) {
2660 case SS_NOP:
2661 {
2662 u_int xportflags;
2663 u_int spi3data;
2664
2665 if (memcmp(targ->inq_data, targ->dv_buffer,
2666 AHC_LINUX_DV_INQ_LEN) != 0) {
2667 /*
2668 * Inquiry data must have changed.
2669 * Try from the top again.
2670 */
2671 AHC_SET_DV_STATE(ahc, targ,
2672 AHC_DV_STATE_INQ_SHORT_ASYNC);
2673 break;
2674 }
2675
2676 AHC_SET_DV_STATE(ahc, targ, targ->dv_state+1);
2677 targ->flags |= AHC_INQ_VALID;
2678 if (ahc_linux_user_dv_setting(ahc) == 0)
2679 break;
2680
2681 xportflags = targ->inq_data->flags;
2682 if ((xportflags & (SID_Sync|SID_WBus16)) == 0)
2683 break;
2684
2685 spi3data = targ->inq_data->spi3data;
2686 switch (spi3data & SID_SPI_CLOCK_DT_ST) {
2687 default:
2688 case SID_SPI_CLOCK_ST:
2689 /* Assume only basic DV is supported. */
2690 targ->flags |= AHC_BASIC_DV;
2691 break;
2692 case SID_SPI_CLOCK_DT:
2693 case SID_SPI_CLOCK_DT_ST:
2694 targ->flags |= AHC_ENHANCED_DV;
2695 break;
2696 }
2697 break;
2698 }
2699 case SS_INQ_REFRESH:
2700 AHC_SET_DV_STATE(ahc, targ,
2701 AHC_DV_STATE_INQ_SHORT_ASYNC);
2702 break;
2703 case SS_TUR:
2704 case SS_RETRY:
2705 AHC_SET_DV_STATE(ahc, targ, targ->dv_state);
2706 if (ahc_cmd_get_transaction_status(cmd)
2707 == CAM_REQUEUE_REQ)
2708 targ->dv_state_retry--;
2709
2710 if ((status & SS_ERRMASK) == EBUSY)
2711 AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_BUSY);
2712 if (targ->dv_state_retry < 10)
2713 break;
2714 /* FALLTHROUGH */
2715 default:
2716 AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_EXIT);
2717 #ifdef AHC_DEBUG
2718 if (ahc_debug & AHC_SHOW_DV) {
2719 ahc_print_devinfo(ahc, devinfo);
2720 printf("Failed DV inquiry, skipping\n");
2721 }
2722 #endif
2723 break;
2724 }
2725 break;
2726 case AHC_DV_STATE_INQ_VERIFY:
2727 switch (status & SS_MASK) {
2728 case SS_NOP:
2729 {
2730
2731 if (memcmp(targ->inq_data, targ->dv_buffer,
2732 AHC_LINUX_DV_INQ_LEN) == 0) {
2733 AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_EXIT);
2734 break;
2735 }
2736 #ifdef AHC_DEBUG
2737 if (ahc_debug & AHC_SHOW_DV) {
2738 int i;
2739
2740 ahc_print_devinfo(ahc, devinfo);
2741 printf("Inquiry buffer mismatch:");
2742 for (i = 0; i < AHC_LINUX_DV_INQ_LEN; i++) {
2743 if ((i & 0xF) == 0)
2744 printf("\n ");
2745 printf("0x%x:0x0%x ",
2746 ((uint8_t *)targ->inq_data)[i],
2747 targ->dv_buffer[i]);
2748 }
2749 printf("\n");
2750 }
2751 #endif
2752
2753 if (ahc_linux_fallback(ahc, devinfo) != 0) {
2754 AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_EXIT);
2755 break;
2756 }
2757 /*
2758 * Do not count "falling back"
2759 * against our retries.
2760 */
2761 targ->dv_state_retry = 0;
2762 AHC_SET_DV_STATE(ahc, targ, targ->dv_state);
2763 break;
2764 }
2765 case SS_INQ_REFRESH:
2766 AHC_SET_DV_STATE(ahc, targ,
2767 AHC_DV_STATE_INQ_SHORT_ASYNC);
2768 break;
2769 case SS_TUR:
2770 case SS_RETRY:
2771 AHC_SET_DV_STATE(ahc, targ, targ->dv_state);
2772 if (ahc_cmd_get_transaction_status(cmd)
2773 == CAM_REQUEUE_REQ) {
2774 targ->dv_state_retry--;
2775 } else if ((status & SSQ_FALLBACK) != 0) {
2776 if (ahc_linux_fallback(ahc, devinfo) != 0) {
2777 AHC_SET_DV_STATE(ahc, targ,
2778 AHC_DV_STATE_EXIT);
2779 break;
2780 }
2781 /*
2782 * Do not count "falling back"
2783 * against our retries.
2784 */
2785 targ->dv_state_retry = 0;
2786 } else if ((status & SS_ERRMASK) == EBUSY)
2787 AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_BUSY);
2788 if (targ->dv_state_retry < 10)
2789 break;
2790 /* FALLTHROUGH */
2791 default:
2792 AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_EXIT);
2793 #ifdef AHC_DEBUG
2794 if (ahc_debug & AHC_SHOW_DV) {
2795 ahc_print_devinfo(ahc, devinfo);
2796 printf("Failed DV inquiry, skipping\n");
2797 }
2798 #endif
2799 break;
2800 }
2801 break;
2802
2803 case AHC_DV_STATE_TUR:
2804 switch (status & SS_MASK) {
2805 case SS_NOP:
2806 if ((targ->flags & AHC_BASIC_DV) != 0) {
2807 ahc_linux_filter_inquiry(ahc, devinfo);
2808 AHC_SET_DV_STATE(ahc, targ,
2809 AHC_DV_STATE_INQ_VERIFY);
2810 } else if ((targ->flags & AHC_ENHANCED_DV) != 0) {
2811 AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_REBD);
2812 } else {
2813 AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_EXIT);
2814 }
2815 break;
2816 case SS_RETRY:
2817 case SS_TUR:
2818 if ((status & SS_ERRMASK) == EBUSY) {
2819 AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_BUSY);
2820 break;
2821 }
2822 AHC_SET_DV_STATE(ahc, targ, targ->dv_state);
2823 if (ahc_cmd_get_transaction_status(cmd)
2824 == CAM_REQUEUE_REQ) {
2825 targ->dv_state_retry--;
2826 } else if ((status & SSQ_FALLBACK) != 0) {
2827 if (ahc_linux_fallback(ahc, devinfo) != 0) {
2828 AHC_SET_DV_STATE(ahc, targ,
2829 AHC_DV_STATE_EXIT);
2830 break;
2831 }
2832 /*
2833 * Do not count "falling back"
2834 * against our retries.
2835 */
2836 targ->dv_state_retry = 0;
2837 }
2838 if (targ->dv_state_retry >= 10) {
2839 #ifdef AHC_DEBUG
2840 if (ahc_debug & AHC_SHOW_DV) {
2841 ahc_print_devinfo(ahc, devinfo);
2842 printf("DV TUR reties exhausted\n");
2843 }
2844 #endif
2845 AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_EXIT);
2846 break;
2847 }
2848 if (status & SSQ_DELAY)
2849 scsi_sleep(1 * HZ);
2850
2851 break;
2852 case SS_START:
2853 AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_SU);
2854 break;
2855 case SS_INQ_REFRESH:
2856 AHC_SET_DV_STATE(ahc, targ,
2857 AHC_DV_STATE_INQ_SHORT_ASYNC);
2858 break;
2859 default:
2860 AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_EXIT);
2861 break;
2862 }
2863 break;
2864
2865 case AHC_DV_STATE_REBD:
2866 switch (status & SS_MASK) {
2867 case SS_NOP:
2868 {
2869 uint32_t echo_size;
2870
2871 AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_WEB);
2872 echo_size = scsi_3btoul(&targ->dv_buffer[1]);
2873 echo_size &= 0x1FFF;
2874 #ifdef AHC_DEBUG
2875 if (ahc_debug & AHC_SHOW_DV) {
2876 ahc_print_devinfo(ahc, devinfo);
2877 printf("Echo buffer size= %d\n", echo_size);
2878 }
2879 #endif
2880 if (echo_size == 0) {
2881 AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_EXIT);
2882 break;
2883 }
2884
2885 /* Generate the buffer pattern */
2886 targ->dv_echo_size = echo_size;
2887 ahc_linux_generate_dv_pattern(targ);
2888 /*
2889 * Setup initial negotiation values.
2890 */
2891 ahc_linux_filter_inquiry(ahc, devinfo);
2892 break;
2893 }
2894 case SS_INQ_REFRESH:
2895 AHC_SET_DV_STATE(ahc, targ,
2896 AHC_DV_STATE_INQ_SHORT_ASYNC);
2897 break;
2898 case SS_RETRY:
2899 AHC_SET_DV_STATE(ahc, targ, targ->dv_state);
2900 if (ahc_cmd_get_transaction_status(cmd)
2901 == CAM_REQUEUE_REQ)
2902 targ->dv_state_retry--;
2903 if (targ->dv_state_retry <= 10)
2904 break;
2905 #ifdef AHC_DEBUG
2906 if (ahc_debug & AHC_SHOW_DV) {
2907 ahc_print_devinfo(ahc, devinfo);
2908 printf("DV REBD reties exhausted\n");
2909 }
2910 #endif
2911 /* FALLTHROUGH */
2912 case SS_FATAL:
2913 default:
2914 /*
2915 * Setup initial negotiation values
2916 * and try level 1 DV.
2917 */
2918 ahc_linux_filter_inquiry(ahc, devinfo);
2919 AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_INQ_VERIFY);
2920 targ->dv_echo_size = 0;
2921 break;
2922 }
2923 break;
2924
2925 case AHC_DV_STATE_WEB:
2926 switch (status & SS_MASK) {
2927 case SS_NOP:
2928 AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_REB);
2929 break;
2930 case SS_INQ_REFRESH:
2931 AHC_SET_DV_STATE(ahc, targ,
2932 AHC_DV_STATE_INQ_SHORT_ASYNC);
2933 break;
2934 case SS_RETRY:
2935 AHC_SET_DV_STATE(ahc, targ, targ->dv_state);
2936 if (ahc_cmd_get_transaction_status(cmd)
2937 == CAM_REQUEUE_REQ) {
2938 targ->dv_state_retry--;
2939 } else if ((status & SSQ_FALLBACK) != 0) {
2940 if (ahc_linux_fallback(ahc, devinfo) != 0) {
2941 AHC_SET_DV_STATE(ahc, targ,
2942 AHC_DV_STATE_EXIT);
2943 break;
2944 }
2945 /*
2946 * Do not count "falling back"
2947 * against our retries.
2948 */
2949 targ->dv_state_retry = 0;
2950 }
2951 if (targ->dv_state_retry <= 10)
2952 break;
2953 /* FALLTHROUGH */
2954 #ifdef AHC_DEBUG
2955 if (ahc_debug & AHC_SHOW_DV) {
2956 ahc_print_devinfo(ahc, devinfo);
2957 printf("DV WEB reties exhausted\n");
2958 }
2959 #endif
2960 default:
2961 AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_EXIT);
2962 break;
2963 }
2964 break;
2965
2966 case AHC_DV_STATE_REB:
2967 switch (status & SS_MASK) {
2968 case SS_NOP:
2969 if (memcmp(targ->dv_buffer, targ->dv_buffer1,
2970 targ->dv_echo_size) != 0) {
2971 if (ahc_linux_fallback(ahc, devinfo) != 0)
2972 AHC_SET_DV_STATE(ahc, targ,
2973 AHC_DV_STATE_EXIT);
2974 else
2975 AHC_SET_DV_STATE(ahc, targ,
2976 AHC_DV_STATE_WEB);
2977 break;
2978 }
2979
2980 if (targ->dv_buffer != NULL) {
2981 free(targ->dv_buffer, M_DEVBUF);
2982 targ->dv_buffer = NULL;
2983 }
2984 if (targ->dv_buffer1 != NULL) {
2985 free(targ->dv_buffer1, M_DEVBUF);
2986 targ->dv_buffer1 = NULL;
2987 }
2988 AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_EXIT);
2989 break;
2990 case SS_INQ_REFRESH:
2991 AHC_SET_DV_STATE(ahc, targ,
2992 AHC_DV_STATE_INQ_SHORT_ASYNC);
2993 break;
2994 case SS_RETRY:
2995 AHC_SET_DV_STATE(ahc, targ, targ->dv_state);
2996 if (ahc_cmd_get_transaction_status(cmd)
2997 == CAM_REQUEUE_REQ) {
2998 targ->dv_state_retry--;
2999 } else if ((status & SSQ_FALLBACK) != 0) {
3000 if (ahc_linux_fallback(ahc, devinfo) != 0) {
3001 AHC_SET_DV_STATE(ahc, targ,
3002 AHC_DV_STATE_EXIT);
3003 break;
3004 }
3005 AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_WEB);
3006 }
3007 if (targ->dv_state_retry <= 10) {
3008 if ((status & (SSQ_DELAY_RANDOM|SSQ_DELAY))!= 0)
3009 scsi_sleep(ahc->our_id*HZ/10);
3010 break;
3011 }
3012 #ifdef AHC_DEBUG
3013 if (ahc_debug & AHC_SHOW_DV) {
3014 ahc_print_devinfo(ahc, devinfo);
3015 printf("DV REB reties exhausted\n");
3016 }
3017 #endif
3018 /* FALLTHROUGH */
3019 default:
3020 AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_EXIT);
3021 break;
3022 }
3023 break;
3024
3025 case AHC_DV_STATE_SU:
3026 switch (status & SS_MASK) {
3027 case SS_NOP:
3028 case SS_INQ_REFRESH:
3029 AHC_SET_DV_STATE(ahc, targ,
3030 AHC_DV_STATE_INQ_SHORT_ASYNC);
3031 break;
3032 default:
3033 AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_EXIT);
3034 break;
3035 }
3036 break;
3037
3038 case AHC_DV_STATE_BUSY:
3039 switch (status & SS_MASK) {
3040 case SS_NOP:
3041 case SS_INQ_REFRESH:
3042 AHC_SET_DV_STATE(ahc, targ,
3043 AHC_DV_STATE_INQ_SHORT_ASYNC);
3044 break;
3045 case SS_TUR:
3046 case SS_RETRY:
3047 AHC_SET_DV_STATE(ahc, targ, targ->dv_state);
3048 if (ahc_cmd_get_transaction_status(cmd)
3049 == CAM_REQUEUE_REQ) {
3050 targ->dv_state_retry--;
3051 } else if (targ->dv_state_retry < 60) {
3052 if ((status & SSQ_DELAY) != 0)
3053 scsi_sleep(1 * HZ);
3054 } else {
3055 #ifdef AHC_DEBUG
3056 if (ahc_debug & AHC_SHOW_DV) {
3057 ahc_print_devinfo(ahc, devinfo);
3058 printf("DV BUSY reties exhausted\n");
3059 }
3060 #endif
3061 AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_EXIT);
3062 }
3063 break;
3064 default:
3065 AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_EXIT);
3066 break;
3067 }
3068 break;
3069
3070 default:
3071 printf("%s: Invalid DV completion state %d\n", ahc_name(ahc),
3072 targ->dv_state);
3073 AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_EXIT);
3074 break;
3075 }
3076 }
3077
3078 static void
3079 ahc_linux_dv_fill_cmd(struct ahc_softc *ahc, struct scsi_cmnd *cmd,
3080 struct ahc_devinfo *devinfo)
3081 {
3082 memset(cmd, 0, sizeof(struct scsi_cmnd));
3083 cmd->device = ahc->platform_data->dv_scsi_dev;
3084 cmd->scsi_done = ahc_linux_dv_complete;
3085 }
3086
3087 /*
3088 * Synthesize an inquiry command. On the return trip, it'll be
3089 * sniffed and the device transfer settings set for us.
3090 */
3091 static void
3092 ahc_linux_dv_inq(struct ahc_softc *ahc, struct scsi_cmnd *cmd,
3093 struct ahc_devinfo *devinfo, struct ahc_linux_target *targ,
3094 u_int request_length)
3095 {
3096
3097 #ifdef AHC_DEBUG
3098 if (ahc_debug & AHC_SHOW_DV) {
3099 ahc_print_devinfo(ahc, devinfo);
3100 printf("Sending INQ\n");
3101 }
3102 #endif
3103 if (targ->inq_data == NULL)
3104 targ->inq_data = malloc(AHC_LINUX_DV_INQ_LEN,
3105 M_DEVBUF, M_WAITOK);
3106 if (targ->dv_state > AHC_DV_STATE_INQ_ASYNC) {
3107 if (targ->dv_buffer != NULL)
3108 free(targ->dv_buffer, M_DEVBUF);
3109 targ->dv_buffer = malloc(AHC_LINUX_DV_INQ_LEN,
3110 M_DEVBUF, M_WAITOK);
3111 }
3112
3113 ahc_linux_dv_fill_cmd(ahc, cmd, devinfo);
3114 cmd->sc_data_direction = SCSI_DATA_READ;
3115 cmd->cmd_len = 6;
3116 cmd->cmnd[0] = INQUIRY;
3117 cmd->cmnd[4] = request_length;
3118 cmd->request_bufflen = request_length;
3119 if (targ->dv_state > AHC_DV_STATE_INQ_ASYNC)
3120 cmd->request_buffer = targ->dv_buffer;
3121 else
3122 cmd->request_buffer = targ->inq_data;
3123 memset(cmd->request_buffer, 0, AHC_LINUX_DV_INQ_LEN);
3124 }
3125
3126 static void
3127 ahc_linux_dv_tur(struct ahc_softc *ahc, struct scsi_cmnd *cmd,
3128 struct ahc_devinfo *devinfo)
3129 {
3130
3131 #ifdef AHC_DEBUG
3132 if (ahc_debug & AHC_SHOW_DV) {
3133 ahc_print_devinfo(ahc, devinfo);
3134 printf("Sending TUR\n");
3135 }
3136 #endif
3137 /* Do a TUR to clear out any non-fatal transitional state */
3138 ahc_linux_dv_fill_cmd(ahc, cmd, devinfo);
3139 cmd->sc_data_direction = SCSI_DATA_NONE;
3140 cmd->cmd_len = 6;
3141 cmd->cmnd[0] = TEST_UNIT_READY;
3142 }
3143
3144 #define AHC_REBD_LEN 4
3145
3146 static void
3147 ahc_linux_dv_rebd(struct ahc_softc *ahc, struct scsi_cmnd *cmd,
3148 struct ahc_devinfo *devinfo, struct ahc_linux_target *targ)
3149 {
3150
3151 #ifdef AHC_DEBUG
3152 if (ahc_debug & AHC_SHOW_DV) {
3153 ahc_print_devinfo(ahc, devinfo);
3154 printf("Sending REBD\n");
3155 }
3156 #endif
3157 if (targ->dv_buffer != NULL)
3158 free(targ->dv_buffer, M_DEVBUF);
3159 targ->dv_buffer = malloc(AHC_REBD_LEN, M_DEVBUF, M_WAITOK);
3160 ahc_linux_dv_fill_cmd(ahc, cmd, devinfo);
3161 cmd->sc_data_direction = SCSI_DATA_READ;
3162 cmd->cmd_len = 10;
3163 cmd->cmnd[0] = READ_BUFFER;
3164 cmd->cmnd[1] = 0x0b;
3165 scsi_ulto3b(AHC_REBD_LEN, &cmd->cmnd[6]);
3166 cmd->request_bufflen = AHC_REBD_LEN;
3167 cmd->underflow = cmd->request_bufflen;
3168 cmd->request_buffer = targ->dv_buffer;
3169 }
3170
3171 static void
3172 ahc_linux_dv_web(struct ahc_softc *ahc, struct scsi_cmnd *cmd,
3173 struct ahc_devinfo *devinfo, struct ahc_linux_target *targ)
3174 {
3175
3176 #ifdef AHC_DEBUG
3177 if (ahc_debug & AHC_SHOW_DV) {
3178 ahc_print_devinfo(ahc, devinfo);
3179 printf("Sending WEB\n");
3180 }
3181 #endif
3182 ahc_linux_dv_fill_cmd(ahc, cmd, devinfo);
3183 cmd->sc_data_direction = SCSI_DATA_WRITE;
3184 cmd->cmd_len = 10;
3185 cmd->cmnd[0] = WRITE_BUFFER;
3186 cmd->cmnd[1] = 0x0a;
3187 scsi_ulto3b(targ->dv_echo_size, &cmd->cmnd[6]);
3188 cmd->request_bufflen = targ->dv_echo_size;
3189 cmd->underflow = cmd->request_bufflen;
3190 cmd->request_buffer = targ->dv_buffer;
3191 }
3192
3193 static void
3194 ahc_linux_dv_reb(struct ahc_softc *ahc, struct scsi_cmnd *cmd,
3195 struct ahc_devinfo *devinfo, struct ahc_linux_target *targ)
3196 {
3197
3198 #ifdef AHC_DEBUG
3199 if (ahc_debug & AHC_SHOW_DV) {
3200 ahc_print_devinfo(ahc, devinfo);
3201 printf("Sending REB\n");
3202 }
3203 #endif
3204 ahc_linux_dv_fill_cmd(ahc, cmd, devinfo);
3205 cmd->sc_data_direction = SCSI_DATA_READ;
3206 cmd->cmd_len = 10;
3207 cmd->cmnd[0] = READ_BUFFER;
3208 cmd->cmnd[1] = 0x0a;
3209 scsi_ulto3b(targ->dv_echo_size, &cmd->cmnd[6]);
3210 cmd->request_bufflen = targ->dv_echo_size;
3211 cmd->underflow = cmd->request_bufflen;
3212 cmd->request_buffer = targ->dv_buffer1;
3213 }
3214
3215 static void
3216 ahc_linux_dv_su(struct ahc_softc *ahc, struct scsi_cmnd *cmd,
3217 struct ahc_devinfo *devinfo,
3218 struct ahc_linux_target *targ)
3219 {
3220 u_int le;
3221
3222 le = SID_IS_REMOVABLE(targ->inq_data) ? SSS_LOEJ : 0;
3223
3224 #ifdef AHC_DEBUG
3225 if (ahc_debug & AHC_SHOW_DV) {
3226 ahc_print_devinfo(ahc, devinfo);
3227 printf("Sending SU\n");
3228 }
3229 #endif
3230 ahc_linux_dv_fill_cmd(ahc, cmd, devinfo);
3231 cmd->sc_data_direction = SCSI_DATA_NONE;
3232 cmd->cmd_len = 6;
3233 cmd->cmnd[0] = START_STOP_UNIT;
3234 cmd->cmnd[4] = le | SSS_START;
3235 }
3236
3237 static int
3238 ahc_linux_fallback(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
3239 {
3240 struct ahc_linux_target *targ;
3241 struct ahc_initiator_tinfo *tinfo;
3242 struct ahc_transinfo *goal;
3243 struct ahc_tmode_tstate *tstate;
3244 struct ahc_syncrate *syncrate;
3245 u_long s;
3246 u_int width;
3247 u_int period;
3248 u_int offset;
3249 u_int ppr_options;
3250 u_int cur_speed;
3251 u_int wide_speed;
3252 u_int narrow_speed;
3253 u_int fallback_speed;
3254
3255 #ifdef AHC_DEBUG
3256 if (ahc_debug & AHC_SHOW_DV) {
3257 ahc_print_devinfo(ahc, devinfo);
3258 printf("Trying to fallback\n");
3259 }
3260 #endif
3261 ahc_lock(ahc, &s);
3262 targ = ahc->platform_data->targets[devinfo->target_offset];
3263 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel,
3264 devinfo->our_scsiid,
3265 devinfo->target, &tstate);
3266 goal = &tinfo->goal;
3267 width = goal->width;
3268 period = goal->period;
3269 offset = goal->offset;
3270 ppr_options = goal->ppr_options;
3271 if (offset == 0)
3272 period = AHC_ASYNC_XFER_PERIOD;
3273 if (targ->dv_next_narrow_period == 0)
3274 targ->dv_next_narrow_period = MAX(period, AHC_SYNCRATE_ULTRA2);
3275 if (targ->dv_next_wide_period == 0)
3276 targ->dv_next_wide_period = period;
3277 if (targ->dv_max_width == 0)
3278 targ->dv_max_width = width;
3279 if (targ->dv_max_ppr_options == 0)
3280 targ->dv_max_ppr_options = ppr_options;
3281 if (targ->dv_last_ppr_options == 0)
3282 targ->dv_last_ppr_options = ppr_options;
3283
3284 cur_speed = aic_calc_speed(width, period, offset, AHC_SYNCRATE_MIN);
3285 wide_speed = aic_calc_speed(MSG_EXT_WDTR_BUS_16_BIT,
3286 targ->dv_next_wide_period,
3287 MAX_OFFSET,
3288 AHC_SYNCRATE_MIN);
3289 narrow_speed = aic_calc_speed(MSG_EXT_WDTR_BUS_8_BIT,
3290 targ->dv_next_narrow_period,
3291 MAX_OFFSET,
3292 AHC_SYNCRATE_MIN);
3293 fallback_speed = aic_calc_speed(width, period+1, offset,
3294 AHC_SYNCRATE_MIN);
3295 #ifdef AHC_DEBUG
3296 if (ahc_debug & AHC_SHOW_DV) {
3297 printf("cur_speed= %d, wide_speed= %d, narrow_speed= %d, "
3298 "fallback_speed= %d\n", cur_speed, wide_speed,
3299 narrow_speed, fallback_speed);
3300 }
3301 #endif
3302
3303 if (cur_speed > 160000) {
3304 /*
3305 * Paced/DT/IU_REQ only transfer speeds. All we
3306 * can do is fallback in terms of syncrate.
3307 */
3308 period++;
3309 } else if (cur_speed > 80000) {
3310 if ((ppr_options & MSG_EXT_PPR_IU_REQ) != 0) {
3311 /*
3312 * Try without IU_REQ as it may be confusing
3313 * an expander.
3314 */
3315 ppr_options &= ~MSG_EXT_PPR_IU_REQ;
3316 } else {
3317 /*
3318 * Paced/DT only transfer speeds. All we
3319 * can do is fallback in terms of syncrate.
3320 */
3321 period++;
3322 ppr_options = targ->dv_max_ppr_options;
3323 }
3324 } else if (cur_speed > 3300) {
3325
3326 /*
3327 * In this range we the following
3328 * options ordered from highest to
3329 * lowest desireability:
3330 *
3331 * o Wide/DT
3332 * o Wide/non-DT
3333 * o Narrow at a potentally higher sync rate.
3334 *
3335 * All modes are tested with and without IU_REQ
3336 * set since using IUs may confuse an expander.
3337 */
3338 if ((ppr_options & MSG_EXT_PPR_IU_REQ) != 0) {
3339
3340 ppr_options &= ~MSG_EXT_PPR_IU_REQ;
3341 } else if ((ppr_options & MSG_EXT_PPR_DT_REQ) != 0) {
3342 /*
3343 * Try going non-DT.
3344 */
3345 ppr_options = targ->dv_max_ppr_options;
3346 ppr_options &= ~MSG_EXT_PPR_DT_REQ;
3347 } else if (targ->dv_last_ppr_options != 0) {
3348 /*
3349 * Try without QAS or any other PPR options.
3350 * We may need a non-PPR message to work with
3351 * an expander. We look at the "last PPR options"
3352 * so we will perform this fallback even if the
3353 * target responded to our PPR negotiation with
3354 * no option bits set.
3355 */
3356 ppr_options = 0;
3357 } else if (width == MSG_EXT_WDTR_BUS_16_BIT) {
3358 /*
3359 * If the next narrow speed is greater than
3360 * the next wide speed, fallback to narrow.
3361 * Otherwise fallback to the next DT/Wide setting.
3362 * The narrow async speed will always be smaller
3363 * than the wide async speed, so handle this case
3364 * specifically.
3365 */
3366 ppr_options = targ->dv_max_ppr_options;
3367 if (narrow_speed > fallback_speed
3368 || period >= AHC_ASYNC_XFER_PERIOD) {
3369 targ->dv_next_wide_period = period+1;
3370 width = MSG_EXT_WDTR_BUS_8_BIT;
3371 period = targ->dv_next_narrow_period;
3372 } else {
3373 period++;
3374 }
3375 } else if ((ahc->features & AHC_WIDE) != 0
3376 && targ->dv_max_width != 0
3377 && wide_speed >= fallback_speed
3378 && (targ->dv_next_wide_period <= AHC_ASYNC_XFER_PERIOD
3379 || period >= AHC_ASYNC_XFER_PERIOD)) {
3380
3381 /*
3382 * We are narrow. Try falling back
3383 * to the next wide speed with
3384 * all supported ppr options set.
3385 */
3386 targ->dv_next_narrow_period = period+1;
3387 width = MSG_EXT_WDTR_BUS_16_BIT;
3388 period = targ->dv_next_wide_period;
3389 ppr_options = targ->dv_max_ppr_options;
3390 } else {
3391 /* Only narrow fallback is allowed. */
3392 period++;
3393 ppr_options = targ->dv_max_ppr_options;
3394 }
3395 } else {
3396 ahc_unlock(ahc, &s);
3397 return (-1);
3398 }
3399 offset = MAX_OFFSET;
3400 syncrate = ahc_find_syncrate(ahc, &period, &ppr_options,
3401 AHC_SYNCRATE_DT);
3402 ahc_set_width(ahc, devinfo, width, AHC_TRANS_GOAL, FALSE);
3403 if (period == 0) {
3404 period = 0;
3405 offset = 0;
3406 ppr_options = 0;
3407 if (width == MSG_EXT_WDTR_BUS_8_BIT)
3408 targ->dv_next_narrow_period = AHC_ASYNC_XFER_PERIOD;
3409 else
3410 targ->dv_next_wide_period = AHC_ASYNC_XFER_PERIOD;
3411 }
3412 ahc_set_syncrate(ahc, devinfo, syncrate, period, offset,
3413 ppr_options, AHC_TRANS_GOAL, FALSE);
3414 targ->dv_last_ppr_options = ppr_options;
3415 ahc_unlock(ahc, &s);
3416 return (0);
3417 }
3418
3419 static void
3420 ahc_linux_dv_timeout(struct scsi_cmnd *cmd)
3421 {
3422 struct ahc_softc *ahc;
3423 struct scb *scb;
3424 u_long flags;
3425
3426 ahc = *((struct ahc_softc **)cmd->device->host->hostdata);
3427 ahc_lock(ahc, &flags);
3428
3429 #ifdef AHC_DEBUG
3430 if (ahc_debug & AHC_SHOW_DV) {
3431 printf("%s: Timeout while doing DV command %x.\n",
3432 ahc_name(ahc), cmd->cmnd[0]);
3433 ahc_dump_card_state(ahc);
3434 }
3435 #endif
3436
3437 /*
3438 * Guard against "done race". No action is
3439 * required if we just completed.
3440 */
3441 if ((scb = (struct scb *)cmd->host_scribble) == NULL) {
3442 ahc_unlock(ahc, &flags);
3443 return;
3444 }
3445
3446 /*
3447 * Command has not completed. Mark this
3448 * SCB as having failing status prior to
3449 * resetting the bus, so we get the correct
3450 * error code.
3451 */
3452 if ((scb->flags & SCB_SENSE) != 0)
3453 ahc_set_transaction_status(scb, CAM_AUTOSENSE_FAIL);
3454 else
3455 ahc_set_transaction_status(scb, CAM_CMD_TIMEOUT);
3456 ahc_reset_channel(ahc, cmd->device->channel + 'A', /*initiate*/TRUE);
3457
3458 /*
3459 * Add a minimal bus settle delay for devices that are slow to
3460 * respond after bus resets.
3461 */
3462 ahc_linux_freeze_simq(ahc);
3463 init_timer(&ahc->platform_data->reset_timer);
3464 ahc->platform_data->reset_timer.data = (u_long)ahc;
3465 ahc->platform_data->reset_timer.expires = jiffies + HZ / 2;
3466 ahc->platform_data->reset_timer.function =
3467 (ahc_linux_callback_t *)ahc_linux_release_simq;
3468 add_timer(&ahc->platform_data->reset_timer);
3469 if (ahc_linux_next_device_to_run(ahc) != NULL)
3470 ahc_schedule_runq(ahc);
3471 ahc_linux_run_complete_queue(ahc);
3472 ahc_unlock(ahc, &flags);
3473 }
3474
3475 static void
3476 ahc_linux_dv_complete(struct scsi_cmnd *cmd)
3477 {
3478 struct ahc_softc *ahc;
3479
3480 ahc = *((struct ahc_softc **)cmd->device->host->hostdata);
3481
3482 /* Delete the DV timer before it goes off! */
3483 scsi_delete_timer(cmd);
3484
3485 #ifdef AHC_DEBUG
3486 if (ahc_debug & AHC_SHOW_DV)
3487 printf("%s:%d:%d: Command completed, status= 0x%x\n",
3488 ahc_name(ahc), cmd->device->channel,
3489 cmd->device->id, cmd->result);
3490 #endif
3491
3492 /* Wake up the state machine */
3493 up(&ahc->platform_data->dv_cmd_sem);
3494 }
3495
3496 static void
3497 ahc_linux_generate_dv_pattern(struct ahc_linux_target *targ)
3498 {
3499 uint16_t b;
3500 u_int i;
3501 u_int j;
3502
3503 if (targ->dv_buffer != NULL)
3504 free(targ->dv_buffer, M_DEVBUF);
3505 targ->dv_buffer = malloc(targ->dv_echo_size, M_DEVBUF, M_WAITOK);
3506 if (targ->dv_buffer1 != NULL)
3507 free(targ->dv_buffer1, M_DEVBUF);
3508 targ->dv_buffer1 = malloc(targ->dv_echo_size, M_DEVBUF, M_WAITOK);
3509
3510 i = 0;
3511 b = 0x0001;
3512 for (j = 0 ; i < targ->dv_echo_size; j++) {
3513 if (j < 32) {
3514 /*
3515 * 32bytes of sequential numbers.
3516 */
3517 targ->dv_buffer[i++] = j & 0xff;
3518 } else if (j < 48) {
3519 /*
3520 * 32bytes of repeating 0x0000, 0xffff.
3521 */
3522 targ->dv_buffer[i++] = (j & 0x02) ? 0xff : 0x00;
3523 } else if (j < 64) {
3524 /*
3525 * 32bytes of repeating 0x5555, 0xaaaa.
3526 */
3527 targ->dv_buffer[i++] = (j & 0x02) ? 0xaa : 0x55;
3528 } else {
3529 /*
3530 * Remaining buffer is filled with a repeating
3531 * patter of:
3532 *
3533 * 0xffff
3534 * ~0x0001 << shifted once in each loop.
3535 */
3536 if (j & 0x02) {
3537 if (j & 0x01) {
3538 targ->dv_buffer[i++] = ~(b >> 8) & 0xff;
3539 b <<= 1;
3540 if (b == 0x0000)
3541 b = 0x0001;
3542 } else {
3543 targ->dv_buffer[i++] = (~b & 0xff);
3544 }
3545 } else {
3546 targ->dv_buffer[i++] = 0xff;
3547 }
3548 }
3549 }
3550 }
3551
3552 static u_int
3553 ahc_linux_user_tagdepth(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
3554 {
3555 static int warned_user;
3556 u_int tags;
3557
3558 tags = 0;
3559 if ((ahc->user_discenable & devinfo->target_mask) != 0) {
3560 if (ahc->unit >= NUM_ELEMENTS(aic7xxx_tag_info)) {
3561 if (warned_user == 0) {
3562
3563 printf(KERN_WARNING
3564 "aic7xxx: WARNING: Insufficient tag_info instances\n"
3565 "aic7xxx: for installed controllers. Using defaults\n"
3566 "aic7xxx: Please update the aic7xxx_tag_info array in\n"
3567 "aic7xxx: the aic7xxx_osm..c source file.\n");
3568 warned_user++;
3569 }
3570 tags = AHC_MAX_QUEUE;
3571 } else {
3572 adapter_tag_info_t *tag_info;
3573
3574 tag_info = &aic7xxx_tag_info[ahc->unit];
3575 tags = tag_info->tag_commands[devinfo->target_offset];
3576 if (tags > AHC_MAX_QUEUE)
3577 tags = AHC_MAX_QUEUE;
3578 }
3579 }
3580 return (tags);
3581 }
3582
3583 static u_int
3584 ahc_linux_user_dv_setting(struct ahc_softc *ahc)
3585 {
3586 static int warned_user;
3587 int dv;
3588
3589 if (ahc->unit >= NUM_ELEMENTS(aic7xxx_dv_settings)) {
3590 if (warned_user == 0) {
3591
3592 printf(KERN_WARNING
3593 "aic7xxx: WARNING: Insufficient dv settings instances\n"
3594 "aic7xxx: for installed controllers. Using defaults\n"
3595 "aic7xxx: Please update the aic7xxx_dv_settings array\n"
3596 "aic7xxx: in the aic7xxx_osm.c source file.\n");
3597 warned_user++;
3598 }
3599 dv = -1;
3600 } else {
3601
3602 dv = aic7xxx_dv_settings[ahc->unit];
3603 }
3604
3605 if (dv < 0) {
3606 u_long s;
3607
3608 /*
3609 * Apply the default.
3610 */
3611 /*
3612 * XXX - Enable DV on non-U160 controllers once it
3613 * has been tested there.
3614 */
3615 ahc_lock(ahc, &s);
3616 dv = (ahc->features & AHC_DT);
3617 if (ahc->seep_config != 0
3618 && ahc->seep_config->signature >= CFSIGNATURE2)
3619 dv = (ahc->seep_config->adapter_control & CFENABLEDV);
3620 ahc_unlock(ahc, &s);
3621 }
3622 return (dv);
3623 }
3624
3625 /*
3626 * Determines the queue depth for a given device.
3627 */
3628 static void
3629 ahc_linux_device_queue_depth(struct ahc_softc *ahc,
3630 struct ahc_linux_device *dev)
3631 {
3632 struct ahc_devinfo devinfo;
3633 u_int tags;
3634
3635 ahc_compile_devinfo(&devinfo,
3636 dev->target->channel == 0
3637 ? ahc->our_id : ahc->our_id_b,
3638 dev->target->target, dev->lun,
3639 dev->target->channel == 0 ? 'A' : 'B',
3640 ROLE_INITIATOR);
3641 tags = ahc_linux_user_tagdepth(ahc, &devinfo);
3642 if (tags != 0
3643 && dev->scsi_device != NULL
3644 && dev->scsi_device->tagged_supported != 0) {
3645
3646 ahc_set_tags(ahc, &devinfo, AHC_QUEUE_TAGGED);
3647 ahc_print_devinfo(ahc, &devinfo);
3648 printf("Tagged Queuing enabled. Depth %d\n", tags);
3649 } else {
3650 ahc_set_tags(ahc, &devinfo, AHC_QUEUE_NONE);
3651 }
3652 }
3653
3654 static void
3655 ahc_linux_run_device_queue(struct ahc_softc *ahc, struct ahc_linux_device *dev)
3656 {
3657 struct ahc_cmd *acmd;
3658 struct scsi_cmnd *cmd;
3659 struct scb *scb;
3660 struct hardware_scb *hscb;
3661 struct ahc_initiator_tinfo *tinfo;
3662 struct ahc_tmode_tstate *tstate;
3663 uint16_t mask;
3664
3665 if ((dev->flags & AHC_DEV_ON_RUN_LIST) != 0)
3666 panic("running device on run list");
3667
3668 while ((acmd = TAILQ_FIRST(&dev->busyq)) != NULL
3669 && dev->openings > 0 && dev->qfrozen == 0) {
3670
3671 /*
3672 * Schedule us to run later. The only reason we are not
3673 * running is because the whole controller Q is frozen.
3674 */
3675 if (ahc->platform_data->qfrozen != 0
3676 && AHC_DV_SIMQ_FROZEN(ahc) == 0) {
3677 TAILQ_INSERT_TAIL(&ahc->platform_data->device_runq,
3678 dev, links);
3679 dev->flags |= AHC_DEV_ON_RUN_LIST;
3680 return;
3681 }
3682 /*
3683 * Get an scb to use.
3684 */
3685 if ((scb = ahc_get_scb(ahc)) == NULL) {
3686 TAILQ_INSERT_TAIL(&ahc->platform_data->device_runq,
3687 dev, links);
3688 dev->flags |= AHC_DEV_ON_RUN_LIST;
3689 ahc->flags |= AHC_RESOURCE_SHORTAGE;
3690 return;
3691 }
3692 TAILQ_REMOVE(&dev->busyq, acmd, acmd_links.tqe);
3693 cmd = &acmd_scsi_cmd(acmd);
3694 scb->io_ctx = cmd;
3695 scb->platform_data->dev = dev;
3696 hscb = scb->hscb;
3697 cmd->host_scribble = (char *)scb;
3698
3699 /*
3700 * Fill out basics of the HSCB.
3701 */
3702 hscb->control = 0;
3703 hscb->scsiid = BUILD_SCSIID(ahc, cmd);
3704 hscb->lun = cmd->device->lun;
3705 mask = SCB_GET_TARGET_MASK(ahc, scb);
3706 tinfo = ahc_fetch_transinfo(ahc, SCB_GET_CHANNEL(ahc, scb),
3707 SCB_GET_OUR_ID(scb),
3708 SCB_GET_TARGET(ahc, scb), &tstate);
3709 hscb->scsirate = tinfo->scsirate;
3710 hscb->scsioffset = tinfo->curr.offset;
3711 if ((tstate->ultraenb & mask) != 0)
3712 hscb->control |= ULTRAENB;
3713
3714 if ((ahc->user_discenable & mask) != 0)
3715 hscb->control |= DISCENB;
3716
3717 if (AHC_DV_CMD(cmd) != 0)
3718 scb->flags |= SCB_SILENT;
3719
3720 if ((tstate->auto_negotiate & mask) != 0) {
3721 scb->flags |= SCB_AUTO_NEGOTIATE;
3722 scb->hscb->control |= MK_MESSAGE;
3723 }
3724
3725 if ((dev->flags & (AHC_DEV_Q_TAGGED|AHC_DEV_Q_BASIC)) != 0) {
3726 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
3727 int msg_bytes;
3728 uint8_t tag_msgs[2];
3729
3730 msg_bytes = scsi_populate_tag_msg(cmd, tag_msgs);
3731 if (msg_bytes && tag_msgs[0] != MSG_SIMPLE_TASK) {
3732 hscb->control |= tag_msgs[0];
3733 if (tag_msgs[0] == MSG_ORDERED_TASK)
3734 dev->commands_since_idle_or_otag = 0;
3735 } else
3736 #endif
3737 if (dev->commands_since_idle_or_otag == AHC_OTAG_THRESH
3738 && (dev->flags & AHC_DEV_Q_TAGGED) != 0) {
3739 hscb->control |= MSG_ORDERED_TASK;
3740 dev->commands_since_idle_or_otag = 0;
3741 } else {
3742 hscb->control |= MSG_SIMPLE_TASK;
3743 }
3744 }
3745
3746 hscb->cdb_len = cmd->cmd_len;
3747 if (hscb->cdb_len <= 12) {
3748 memcpy(hscb->shared_data.cdb, cmd->cmnd, hscb->cdb_len);
3749 } else {
3750 memcpy(hscb->cdb32, cmd->cmnd, hscb->cdb_len);
3751 scb->flags |= SCB_CDB32_PTR;
3752 }
3753
3754 scb->platform_data->xfer_len = 0;
3755 ahc_set_residual(scb, 0);
3756 ahc_set_sense_residual(scb, 0);
3757 scb->sg_count = 0;
3758 if (cmd->use_sg != 0) {
3759 struct ahc_dma_seg *sg;
3760 struct scatterlist *cur_seg;
3761 struct scatterlist *end_seg;
3762 int nseg;
3763
3764 cur_seg = (struct scatterlist *)cmd->request_buffer;
3765 nseg = pci_map_sg(ahc->dev_softc, cur_seg, cmd->use_sg,
3766 scsi_to_pci_dma_dir(cmd->sc_data_direction));
3767 end_seg = cur_seg + nseg;
3768 /* Copy the segments into the SG list. */
3769 sg = scb->sg_list;
3770 /*
3771 * The sg_count may be larger than nseg if
3772 * a transfer crosses a 32bit page.
3773 */
3774 while (cur_seg < end_seg) {
3775 bus_addr_t addr;
3776 bus_size_t len;
3777 int consumed;
3778
3779 addr = sg_dma_address(cur_seg);
3780 len = sg_dma_len(cur_seg);
3781 consumed = ahc_linux_map_seg(ahc, scb,
3782 sg, addr, len);
3783 sg += consumed;
3784 scb->sg_count += consumed;
3785 cur_seg++;
3786 }
3787 sg--;
3788 sg->len |= ahc_htole32(AHC_DMA_LAST_SEG);
3789
3790 /*
3791 * Reset the sg list pointer.
3792 */
3793 scb->hscb->sgptr =
3794 ahc_htole32(scb->sg_list_phys | SG_FULL_RESID);
3795
3796 /*
3797 * Copy the first SG into the "current"
3798 * data pointer area.
3799 */
3800 scb->hscb->dataptr = scb->sg_list->addr;
3801 scb->hscb->datacnt = scb->sg_list->len;
3802 } else if (cmd->request_bufflen != 0) {
3803 struct ahc_dma_seg *sg;
3804 bus_addr_t addr;
3805
3806 sg = scb->sg_list;
3807 addr = pci_map_single(ahc->dev_softc,
3808 cmd->request_buffer,
3809 cmd->request_bufflen,
3810 scsi_to_pci_dma_dir(cmd->sc_data_direction));
3811 scb->platform_data->buf_busaddr = addr;
3812 scb->sg_count = ahc_linux_map_seg(ahc, scb,
3813 sg, addr,
3814 cmd->request_bufflen);
3815 sg->len |= ahc_htole32(AHC_DMA_LAST_SEG);
3816
3817 /*
3818 * Reset the sg list pointer.
3819 */
3820 scb->hscb->sgptr =
3821 ahc_htole32(scb->sg_list_phys | SG_FULL_RESID);
3822
3823 /*
3824 * Copy the first SG into the "current"
3825 * data pointer area.
3826 */
3827 scb->hscb->dataptr = sg->addr;
3828 scb->hscb->datacnt = sg->len;
3829 } else {
3830 scb->hscb->sgptr = ahc_htole32(SG_LIST_NULL);
3831 scb->hscb->dataptr = 0;
3832 scb->hscb->datacnt = 0;
3833 scb->sg_count = 0;
3834 }
3835
3836 ahc_sync_sglist(ahc, scb, BUS_DMASYNC_PREWRITE);
3837 LIST_INSERT_HEAD(&ahc->pending_scbs, scb, pending_links);
3838 dev->openings--;
3839 dev->active++;
3840 dev->commands_issued++;
3841 if ((dev->flags & AHC_DEV_PERIODIC_OTAG) != 0)
3842 dev->commands_since_idle_or_otag++;
3843
3844 /*
3845 * We only allow one untagged transaction
3846 * per target in the initiator role unless
3847 * we are storing a full busy target *lun*
3848 * table in SCB space.
3849 */
3850 if ((scb->hscb->control & (TARGET_SCB|TAG_ENB)) == 0
3851 && (ahc->features & AHC_SCB_BTT) == 0) {
3852 struct scb_tailq *untagged_q;
3853 int target_offset;
3854
3855 target_offset = SCB_GET_TARGET_OFFSET(ahc, scb);
3856 untagged_q = &(ahc->untagged_queues[target_offset]);
3857 TAILQ_INSERT_TAIL(untagged_q, scb, links.tqe);
3858 scb->flags |= SCB_UNTAGGEDQ;
3859 if (TAILQ_FIRST(untagged_q) != scb)
3860 continue;
3861 }
3862 scb->flags |= SCB_ACTIVE;
3863 ahc_queue_scb(ahc, scb);
3864 }
3865 }
3866
3867 /*
3868 * SCSI controller interrupt handler.
3869 */
3870 irqreturn_t
3871 ahc_linux_isr(int irq, void *dev_id, struct pt_regs * regs)
3872 {
3873 struct ahc_softc *ahc;
3874 u_long flags;
3875 int ours;
3876
3877 ahc = (struct ahc_softc *) dev_id;
3878 ahc_lock(ahc, &flags);
3879 ours = ahc_intr(ahc);
3880 if (ahc_linux_next_device_to_run(ahc) != NULL)
3881 ahc_schedule_runq(ahc);
3882 ahc_linux_run_complete_queue(ahc);
3883 ahc_unlock(ahc, &flags);
3884 return IRQ_RETVAL(ours);
3885 }
3886
3887 void
3888 ahc_platform_flushwork(struct ahc_softc *ahc)
3889 {
3890
3891 while (ahc_linux_run_complete_queue(ahc) != NULL)
3892 ;
3893 }
3894
3895 static struct ahc_linux_target*
3896 ahc_linux_alloc_target(struct ahc_softc *ahc, u_int channel, u_int target)
3897 {
3898 struct ahc_linux_target *targ;
3899 u_int target_offset;
3900
3901 target_offset = target;
3902 if (channel != 0)
3903 target_offset += 8;
3904
3905 targ = malloc(sizeof(*targ), M_DEVBUG, M_NOWAIT);
3906 if (targ == NULL)
3907 return (NULL);
3908 memset(targ, 0, sizeof(*targ));
3909 targ->channel = channel;
3910 targ->target = target;
3911 targ->ahc = ahc;
3912 targ->flags = AHC_DV_REQUIRED;
3913 ahc->platform_data->targets[target_offset] = targ;
3914 return (targ);
3915 }
3916
3917 static void
3918 ahc_linux_free_target(struct ahc_softc *ahc, struct ahc_linux_target *targ)
3919 {
3920 struct ahc_devinfo devinfo;
3921 struct ahc_initiator_tinfo *tinfo;
3922 struct ahc_tmode_tstate *tstate;
3923 u_int our_id;
3924 u_int target_offset;
3925 char channel;
3926
3927 /*
3928 * Force a negotiation to async/narrow on any
3929 * future command to this device unless a bus
3930 * reset occurs between now and that command.
3931 */
3932 channel = 'A' + targ->channel;
3933 our_id = ahc->our_id;
3934 target_offset = targ->target;
3935 if (targ->channel != 0) {
3936 target_offset += 8;
3937 our_id = ahc->our_id_b;
3938 }
3939 tinfo = ahc_fetch_transinfo(ahc, channel, our_id,
3940 targ->target, &tstate);
3941 ahc_compile_devinfo(&devinfo, our_id, targ->target, CAM_LUN_WILDCARD,
3942 channel, ROLE_INITIATOR);
3943 ahc_set_syncrate(ahc, &devinfo, NULL, 0, 0, 0,
3944 AHC_TRANS_GOAL, /*paused*/FALSE);
3945 ahc_set_width(ahc, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
3946 AHC_TRANS_GOAL, /*paused*/FALSE);
3947 ahc_update_neg_request(ahc, &devinfo, tstate, tinfo, AHC_NEG_ALWAYS);
3948 ahc->platform_data->targets[target_offset] = NULL;
3949 if (targ->inq_data != NULL)
3950 free(targ->inq_data, M_DEVBUF);
3951 if (targ->dv_buffer != NULL)
3952 free(targ->dv_buffer, M_DEVBUF);
3953 if (targ->dv_buffer1 != NULL)
3954 free(targ->dv_buffer1, M_DEVBUF);
3955 free(targ, M_DEVBUF);
3956 }
3957
3958 static struct ahc_linux_device*
3959 ahc_linux_alloc_device(struct ahc_softc *ahc,
3960 struct ahc_linux_target *targ, u_int lun)
3961 {
3962 struct ahc_linux_device *dev;
3963
3964 dev = malloc(sizeof(*dev), M_DEVBUG, M_NOWAIT);
3965 if (dev == NULL)
3966 return (NULL);
3967 memset(dev, 0, sizeof(*dev));
3968 init_timer(&dev->timer);
3969 TAILQ_INIT(&dev->busyq);
3970 dev->flags = AHC_DEV_UNCONFIGURED;
3971 dev->lun = lun;
3972 dev->target = targ;
3973
3974 /*
3975 * We start out life using untagged
3976 * transactions of which we allow one.
3977 */
3978 dev->openings = 1;
3979
3980 /*
3981 * Set maxtags to 0. This will be changed if we
3982 * later determine that we are dealing with
3983 * a tagged queuing capable device.
3984 */
3985 dev->maxtags = 0;
3986
3987 targ->refcount++;
3988 targ->devices[lun] = dev;
3989 return (dev);
3990 }
3991
3992 static void
3993 ahc_linux_free_device(struct ahc_softc *ahc, struct ahc_linux_device *dev)
3994 {
3995 struct ahc_linux_target *targ;
3996
3997 del_timer_sync(&dev->timer);
3998 targ = dev->target;
3999 targ->devices[dev->lun] = NULL;
4000 free(dev, M_DEVBUF);
4001 targ->refcount--;
4002 if (targ->refcount == 0
4003 && (targ->flags & AHC_DV_REQUIRED) == 0)
4004 ahc_linux_free_target(ahc, targ);
4005 }
4006
4007 void
4008 ahc_send_async(struct ahc_softc *ahc, char channel,
4009 u_int target, u_int lun, ac_code code, void *arg)
4010 {
4011 switch (code) {
4012 case AC_TRANSFER_NEG:
4013 {
4014 char buf[80];
4015 struct ahc_linux_target *targ;
4016 struct info_str info;
4017 struct ahc_initiator_tinfo *tinfo;
4018 struct ahc_tmode_tstate *tstate;
4019 int target_offset;
4020
4021 info.buffer = buf;
4022 info.length = sizeof(buf);
4023 info.offset = 0;
4024 info.pos = 0;
4025 tinfo = ahc_fetch_transinfo(ahc, channel,
4026 channel == 'A' ? ahc->our_id
4027 : ahc->our_id_b,
4028 target, &tstate);
4029
4030 /*
4031 * Don't bother reporting results while
4032 * negotiations are still pending.
4033 */
4034 if (tinfo->curr.period != tinfo->goal.period
4035 || tinfo->curr.width != tinfo->goal.width
4036 || tinfo->curr.offset != tinfo->goal.offset
4037 || tinfo->curr.ppr_options != tinfo->goal.ppr_options)
4038 if (bootverbose == 0)
4039 break;
4040
4041 /*
4042 * Don't bother reporting results that
4043 * are identical to those last reported.
4044 */
4045 target_offset = target;
4046 if (channel == 'B')
4047 target_offset += 8;
4048 targ = ahc->platform_data->targets[target_offset];
4049 if (targ == NULL)
4050 break;
4051 if (tinfo->curr.period == targ->last_tinfo.period
4052 && tinfo->curr.width == targ->last_tinfo.width
4053 && tinfo->curr.offset == targ->last_tinfo.offset
4054 && tinfo->curr.ppr_options == targ->last_tinfo.ppr_options)
4055 if (bootverbose == 0)
4056 break;
4057
4058 targ->last_tinfo.period = tinfo->curr.period;
4059 targ->last_tinfo.width = tinfo->curr.width;
4060 targ->last_tinfo.offset = tinfo->curr.offset;
4061 targ->last_tinfo.ppr_options = tinfo->curr.ppr_options;
4062
4063 printf("(%s:%c:", ahc_name(ahc), channel);
4064 if (target == CAM_TARGET_WILDCARD)
4065 printf("*): ");
4066 else
4067 printf("%d): ", target);
4068 ahc_format_transinfo(&info, &tinfo->curr);
4069 if (info.pos < info.length)
4070 *info.buffer = '\0';
4071 else
4072 buf[info.length - 1] = '\0';
4073 printf("%s", buf);
4074 break;
4075 }
4076 case AC_SENT_BDR:
4077 {
4078 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
4079 WARN_ON(lun != CAM_LUN_WILDCARD);
4080 scsi_report_device_reset(ahc->platform_data->host,
4081 channel - 'A', target);
4082 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,0)
4083 Scsi_Device *scsi_dev;
4084
4085 /*
4086 * Find the SCSI device associated with this
4087 * request and indicate that a UA is expected.
4088 */
4089 for (scsi_dev = ahc->platform_data->host->host_queue;
4090 scsi_dev != NULL; scsi_dev = scsi_dev->next) {
4091 if (channel - 'A' == scsi_dev->channel
4092 && target == scsi_dev->id
4093 && (lun == CAM_LUN_WILDCARD
4094 || lun == scsi_dev->lun)) {
4095 scsi_dev->was_reset = 1;
4096 scsi_dev->expecting_cc_ua = 1;
4097 }
4098 }
4099 #endif
4100 break;
4101 }
4102 case AC_BUS_RESET:
4103 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,0)
4104 if (ahc->platform_data->host != NULL) {
4105 scsi_report_bus_reset(ahc->platform_data->host,
4106 channel - 'A');
4107 }
4108 #endif
4109 break;
4110 default:
4111 panic("ahc_send_async: Unexpected async event");
4112 }
4113 }
4114
4115 /*
4116 * Calls the higher level scsi done function and frees the scb.
4117 */
4118 void
4119 ahc_done(struct ahc_softc *ahc, struct scb *scb)
4120 {
4121 Scsi_Cmnd *cmd;
4122 struct ahc_linux_device *dev;
4123
4124 LIST_REMOVE(scb, pending_links);
4125 if ((scb->flags & SCB_UNTAGGEDQ) != 0) {
4126 struct scb_tailq *untagged_q;
4127 int target_offset;
4128
4129 target_offset = SCB_GET_TARGET_OFFSET(ahc, scb);
4130 untagged_q = &(ahc->untagged_queues[target_offset]);
4131 TAILQ_REMOVE(untagged_q, scb, links.tqe);
4132 ahc_run_untagged_queue(ahc, untagged_q);
4133 }
4134
4135 if ((scb->flags & SCB_ACTIVE) == 0) {
4136 printf("SCB %d done'd twice\n", scb->hscb->tag);
4137 ahc_dump_card_state(ahc);
4138 panic("Stopping for safety");
4139 }
4140 cmd = scb->io_ctx;
4141 dev = scb->platform_data->dev;
4142 dev->active--;
4143 dev->openings++;
4144 if ((cmd->result & (CAM_DEV_QFRZN << 16)) != 0) {
4145 cmd->result &= ~(CAM_DEV_QFRZN << 16);
4146 dev->qfrozen--;
4147 }
4148 ahc_linux_unmap_scb(ahc, scb);
4149
4150 /*
4151 * Guard against stale sense data.
4152 * The Linux mid-layer assumes that sense
4153 * was retrieved anytime the first byte of
4154 * the sense buffer looks "sane".
4155 */
4156 cmd->sense_buffer[0] = 0;
4157 if (ahc_get_transaction_status(scb) == CAM_REQ_INPROG) {
4158 uint32_t amount_xferred;
4159
4160 amount_xferred =
4161 ahc_get_transfer_length(scb) - ahc_get_residual(scb);
4162 if ((scb->flags & SCB_TRANSMISSION_ERROR) != 0) {
4163 #ifdef AHC_DEBUG
4164 if ((ahc_debug & AHC_SHOW_MISC) != 0) {
4165 ahc_print_path(ahc, scb);
4166 printf("Set CAM_UNCOR_PARITY\n");
4167 }
4168 #endif
4169 ahc_set_transaction_status(scb, CAM_UNCOR_PARITY);
4170 } else if (amount_xferred < scb->io_ctx->underflow) {
4171 u_int i;
4172
4173 ahc_print_path(ahc, scb);
4174 printf("CDB:");
4175 for (i = 0; i < scb->io_ctx->cmd_len; i++)
4176 printf(" 0x%x", scb->io_ctx->cmnd[i]);
4177 printf("\n");
4178 ahc_print_path(ahc, scb);
4179 printf("Saw underflow (%ld of %ld bytes). "
4180 "Treated as error\n",
4181 ahc_get_residual(scb),
4182 ahc_get_transfer_length(scb));
4183 ahc_set_transaction_status(scb, CAM_DATA_RUN_ERR);
4184 } else {
4185 ahc_set_transaction_status(scb, CAM_REQ_CMP);
4186 }
4187 } else if (ahc_get_transaction_status(scb) == CAM_SCSI_STATUS_ERROR) {
4188 ahc_linux_handle_scsi_status(ahc, dev, scb);
4189 } else if (ahc_get_transaction_status(scb) == CAM_SEL_TIMEOUT) {
4190 dev->flags |= AHC_DEV_UNCONFIGURED;
4191 if (AHC_DV_CMD(cmd) == FALSE)
4192 dev->target->flags &= ~AHC_DV_REQUIRED;
4193 }
4194 /*
4195 * Start DV for devices that require it assuming the first command
4196 * sent does not result in a selection timeout.
4197 */
4198 if (ahc_get_transaction_status(scb) != CAM_SEL_TIMEOUT
4199 && (dev->target->flags & AHC_DV_REQUIRED) != 0)
4200 ahc_linux_start_dv(ahc);
4201
4202 if (dev->openings == 1
4203 && ahc_get_transaction_status(scb) == CAM_REQ_CMP
4204 && ahc_get_scsi_status(scb) != SCSI_STATUS_QUEUE_FULL)
4205 dev->tag_success_count++;
4206 /*
4207 * Some devices deal with temporary internal resource
4208 * shortages by returning queue full. When the queue
4209 * full occurrs, we throttle back. Slowly try to get
4210 * back to our previous queue depth.
4211 */
4212 if ((dev->openings + dev->active) < dev->maxtags
4213 && dev->tag_success_count > AHC_TAG_SUCCESS_INTERVAL) {
4214 dev->tag_success_count = 0;
4215 dev->openings++;
4216 }
4217
4218 if (dev->active == 0)
4219 dev->commands_since_idle_or_otag = 0;
4220
4221 if (TAILQ_EMPTY(&dev->busyq)) {
4222 if ((dev->flags & AHC_DEV_UNCONFIGURED) != 0
4223 && dev->active == 0
4224 && (dev->flags & AHC_DEV_TIMER_ACTIVE) == 0)
4225 ahc_linux_free_device(ahc, dev);
4226 } else if ((dev->flags & AHC_DEV_ON_RUN_LIST) == 0) {
4227 TAILQ_INSERT_TAIL(&ahc->platform_data->device_runq, dev, links);
4228 dev->flags |= AHC_DEV_ON_RUN_LIST;
4229 }
4230
4231 if ((scb->flags & SCB_RECOVERY_SCB) != 0) {
4232 printf("Recovery SCB completes\n");
4233 if (ahc_get_transaction_status(scb) == CAM_BDR_SENT
4234 || ahc_get_transaction_status(scb) == CAM_REQ_ABORTED)
4235 ahc_set_transaction_status(scb, CAM_CMD_TIMEOUT);
4236 if ((ahc->platform_data->flags & AHC_UP_EH_SEMAPHORE) != 0) {
4237 ahc->platform_data->flags &= ~AHC_UP_EH_SEMAPHORE;
4238 up(&ahc->platform_data->eh_sem);
4239 }
4240 }
4241
4242 ahc_free_scb(ahc, scb);
4243 ahc_linux_queue_cmd_complete(ahc, cmd);
4244
4245 if ((ahc->platform_data->flags & AHC_DV_WAIT_SIMQ_EMPTY) != 0
4246 && LIST_FIRST(&ahc->pending_scbs) == NULL) {
4247 ahc->platform_data->flags &= ~AHC_DV_WAIT_SIMQ_EMPTY;
4248 up(&ahc->platform_data->dv_sem);
4249 }
4250
4251 }
4252
4253 static void
4254 ahc_linux_handle_scsi_status(struct ahc_softc *ahc,
4255 struct ahc_linux_device *dev, struct scb *scb)
4256 {
4257 struct ahc_devinfo devinfo;
4258
4259 ahc_compile_devinfo(&devinfo,
4260 ahc->our_id,
4261 dev->target->target, dev->lun,
4262 dev->target->channel == 0 ? 'A' : 'B',
4263 ROLE_INITIATOR);
4264
4265 /*
4266 * We don't currently trust the mid-layer to
4267 * properly deal with queue full or busy. So,
4268 * when one occurs, we tell the mid-layer to
4269 * unconditionally requeue the command to us
4270 * so that we can retry it ourselves. We also
4271 * implement our own throttling mechanism so
4272 * we don't clobber the device with too many
4273 * commands.
4274 */
4275 switch (ahc_get_scsi_status(scb)) {
4276 default:
4277 break;
4278 case SCSI_STATUS_CHECK_COND:
4279 case SCSI_STATUS_CMD_TERMINATED:
4280 {
4281 Scsi_Cmnd *cmd;
4282
4283 /*
4284 * Copy sense information to the OS's cmd
4285 * structure if it is available.
4286 */
4287 cmd = scb->io_ctx;
4288 if (scb->flags & SCB_SENSE) {
4289 u_int sense_size;
4290
4291 sense_size = MIN(sizeof(struct scsi_sense_data)
4292 - ahc_get_sense_residual(scb),
4293 sizeof(cmd->sense_buffer));
4294 memcpy(cmd->sense_buffer,
4295 ahc_get_sense_buf(ahc, scb), sense_size);
4296 if (sense_size < sizeof(cmd->sense_buffer))
4297 memset(&cmd->sense_buffer[sense_size], 0,
4298 sizeof(cmd->sense_buffer) - sense_size);
4299 cmd->result |= (DRIVER_SENSE << 24);
4300 #ifdef AHC_DEBUG
4301 if (ahc_debug & AHC_SHOW_SENSE) {
4302 int i;
4303
4304 printf("Copied %d bytes of sense data:",
4305 sense_size);
4306 for (i = 0; i < sense_size; i++) {
4307 if ((i & 0xF) == 0)
4308 printf("\n");
4309 printf("0x%x ", cmd->sense_buffer[i]);
4310 }
4311 printf("\n");
4312 }
4313 #endif
4314 }
4315 break;
4316 }
4317 case SCSI_STATUS_QUEUE_FULL:
4318 {
4319 /*
4320 * By the time the core driver has returned this
4321 * command, all other commands that were queued
4322 * to us but not the device have been returned.
4323 * This ensures that dev->active is equal to
4324 * the number of commands actually queued to
4325 * the device.
4326 */
4327 dev->tag_success_count = 0;
4328 if (dev->active != 0) {
4329 /*
4330 * Drop our opening count to the number
4331 * of commands currently outstanding.
4332 */
4333 dev->openings = 0;
4334 /*
4335 ahc_print_path(ahc, scb);
4336 printf("Dropping tag count to %d\n", dev->active);
4337 */
4338 if (dev->active == dev->tags_on_last_queuefull) {
4339
4340 dev->last_queuefull_same_count++;
4341 /*
4342 * If we repeatedly see a queue full
4343 * at the same queue depth, this
4344 * device has a fixed number of tag
4345 * slots. Lock in this tag depth
4346 * so we stop seeing queue fulls from
4347 * this device.
4348 */
4349 if (dev->last_queuefull_same_count
4350 == AHC_LOCK_TAGS_COUNT) {
4351 dev->maxtags = dev->active;
4352 ahc_print_path(ahc, scb);
4353 printf("Locking max tag count at %d\n",
4354 dev->active);
4355 }
4356 } else {
4357 dev->tags_on_last_queuefull = dev->active;
4358 dev->last_queuefull_same_count = 0;
4359 }
4360 ahc_set_transaction_status(scb, CAM_REQUEUE_REQ);
4361 ahc_set_scsi_status(scb, SCSI_STATUS_OK);
4362 ahc_platform_set_tags(ahc, &devinfo,
4363 (dev->flags & AHC_DEV_Q_BASIC)
4364 ? AHC_QUEUE_BASIC : AHC_QUEUE_TAGGED);
4365 break;
4366 }
4367 /*
4368 * Drop down to a single opening, and treat this
4369 * as if the target returned BUSY SCSI status.
4370 */
4371 dev->openings = 1;
4372 ahc_set_scsi_status(scb, SCSI_STATUS_BUSY);
4373 ahc_platform_set_tags(ahc, &devinfo,
4374 (dev->flags & AHC_DEV_Q_BASIC)
4375 ? AHC_QUEUE_BASIC : AHC_QUEUE_TAGGED);
4376 /* FALLTHROUGH */
4377 }
4378 case SCSI_STATUS_BUSY:
4379 {
4380 /*
4381 * Set a short timer to defer sending commands for
4382 * a bit since Linux will not delay in this case.
4383 */
4384 if ((dev->flags & AHC_DEV_TIMER_ACTIVE) != 0) {
4385 printf("%s:%c:%d: Device Timer still active during "
4386 "busy processing\n", ahc_name(ahc),
4387 dev->target->channel, dev->target->target);
4388 break;
4389 }
4390 dev->flags |= AHC_DEV_TIMER_ACTIVE;
4391 dev->qfrozen++;
4392 init_timer(&dev->timer);
4393 dev->timer.data = (u_long)dev;
4394 dev->timer.expires = jiffies + (HZ/2);
4395 dev->timer.function = ahc_linux_dev_timed_unfreeze;
4396 add_timer(&dev->timer);
4397 break;
4398 }
4399 }
4400 }
4401
4402 static void
4403 ahc_linux_queue_cmd_complete(struct ahc_softc *ahc, Scsi_Cmnd *cmd)
4404 {
4405 /*
4406 * Typically, the complete queue has very few entries
4407 * queued to it before the queue is emptied by
4408 * ahc_linux_run_complete_queue, so sorting the entries
4409 * by generation number should be inexpensive.
4410 * We perform the sort so that commands that complete
4411 * with an error are retuned in the order origionally
4412 * queued to the controller so that any subsequent retries
4413 * are performed in order. The underlying ahc routines do
4414 * not guarantee the order that aborted commands will be
4415 * returned to us.
4416 */
4417 struct ahc_completeq *completeq;
4418 struct ahc_cmd *list_cmd;
4419 struct ahc_cmd *acmd;
4420
4421 /*
4422 * Map CAM error codes into Linux Error codes. We
4423 * avoid the conversion so that the DV code has the
4424 * full error information available when making
4425 * state change decisions.
4426 */
4427 if (AHC_DV_CMD(cmd) == FALSE) {
4428 u_int new_status;
4429
4430 switch (ahc_cmd_get_transaction_status(cmd)) {
4431 case CAM_REQ_INPROG:
4432 case CAM_REQ_CMP:
4433 case CAM_SCSI_STATUS_ERROR:
4434 new_status = DID_OK;
4435 break;
4436 case CAM_REQ_ABORTED:
4437 new_status = DID_ABORT;
4438 break;
4439 case CAM_BUSY:
4440 new_status = DID_BUS_BUSY;
4441 break;
4442 case CAM_REQ_INVALID:
4443 case CAM_PATH_INVALID:
4444 new_status = DID_BAD_TARGET;
4445 break;
4446 case CAM_SEL_TIMEOUT:
4447 new_status = DID_NO_CONNECT;
4448 break;
4449 case CAM_SCSI_BUS_RESET:
4450 case CAM_BDR_SENT:
4451 new_status = DID_RESET;
4452 break;
4453 case CAM_UNCOR_PARITY:
4454 new_status = DID_PARITY;
4455 break;
4456 case CAM_CMD_TIMEOUT:
4457 new_status = DID_TIME_OUT;
4458 break;
4459 case CAM_UA_ABORT:
4460 case CAM_REQ_CMP_ERR:
4461 case CAM_AUTOSENSE_FAIL:
4462 case CAM_NO_HBA:
4463 case CAM_DATA_RUN_ERR:
4464 case CAM_UNEXP_BUSFREE:
4465 case CAM_SEQUENCE_FAIL:
4466 case CAM_CCB_LEN_ERR:
4467 case CAM_PROVIDE_FAIL:
4468 case CAM_REQ_TERMIO:
4469 case CAM_UNREC_HBA_ERROR:
4470 case CAM_REQ_TOO_BIG:
4471 new_status = DID_ERROR;
4472 break;
4473 case CAM_REQUEUE_REQ:
4474 /*
4475 * If we want the request requeued, make sure there
4476 * are sufficent retries. In the old scsi error code,
4477 * we used to be able to specify a result code that
4478 * bypassed the retry count. Now we must use this
4479 * hack. We also "fake" a check condition with
4480 * a sense code of ABORTED COMMAND. This seems to
4481 * evoke a retry even if this command is being sent
4482 * via the eh thread. Ick! Ick! Ick!
4483 */
4484 if (cmd->retries > 0)
4485 cmd->retries--;
4486 new_status = DID_OK;
4487 ahc_cmd_set_scsi_status(cmd, SCSI_STATUS_CHECK_COND);
4488 cmd->result |= (DRIVER_SENSE << 24);
4489 memset(cmd->sense_buffer, 0,
4490 sizeof(cmd->sense_buffer));
4491 cmd->sense_buffer[0] = SSD_ERRCODE_VALID
4492 | SSD_CURRENT_ERROR;
4493 cmd->sense_buffer[2] = SSD_KEY_ABORTED_COMMAND;
4494 break;
4495 default:
4496 /* We should never get here */
4497 new_status = DID_ERROR;
4498 break;
4499 }
4500
4501 ahc_cmd_set_transaction_status(cmd, new_status);
4502 }
4503
4504 completeq = &ahc->platform_data->completeq;
4505 list_cmd = TAILQ_FIRST(completeq);
4506 acmd = (struct ahc_cmd *)cmd;
4507 while (list_cmd != NULL
4508 && acmd_scsi_cmd(list_cmd).serial_number
4509 < acmd_scsi_cmd(acmd).serial_number)
4510 list_cmd = TAILQ_NEXT(list_cmd, acmd_links.tqe);
4511 if (list_cmd != NULL)
4512 TAILQ_INSERT_BEFORE(list_cmd, acmd, acmd_links.tqe);
4513 else
4514 TAILQ_INSERT_TAIL(completeq, acmd, acmd_links.tqe);
4515 }
4516
4517 static void
4518 ahc_linux_filter_inquiry(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
4519 {
4520 struct scsi_inquiry_data *sid;
4521 struct ahc_initiator_tinfo *tinfo;
4522 struct ahc_transinfo *user;
4523 struct ahc_transinfo *goal;
4524 struct ahc_transinfo *curr;
4525 struct ahc_tmode_tstate *tstate;
4526 struct ahc_syncrate *syncrate;
4527 struct ahc_linux_device *dev;
4528 u_int maxsync;
4529 u_int width;
4530 u_int period;
4531 u_int offset;
4532 u_int ppr_options;
4533 u_int trans_version;
4534 u_int prot_version;
4535
4536 /*
4537 * Determine if this lun actually exists. If so,
4538 * hold on to its corresponding device structure.
4539 * If not, make sure we release the device and
4540 * don't bother processing the rest of this inquiry
4541 * command.
4542 */
4543 dev = ahc_linux_get_device(ahc, devinfo->channel - 'A',
4544 devinfo->target, devinfo->lun,
4545 /*alloc*/TRUE);
4546
4547 sid = (struct scsi_inquiry_data *)dev->target->inq_data;
4548 if (SID_QUAL(sid) == SID_QUAL_LU_CONNECTED) {
4549
4550 dev->flags &= ~AHC_DEV_UNCONFIGURED;
4551 } else {
4552 dev->flags |= AHC_DEV_UNCONFIGURED;
4553 return;
4554 }
4555
4556 /*
4557 * Update our notion of this device's transfer
4558 * negotiation capabilities.
4559 */
4560 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel,
4561 devinfo->our_scsiid,
4562 devinfo->target, &tstate);
4563 user = &tinfo->user;
4564 goal = &tinfo->goal;
4565 curr = &tinfo->curr;
4566 width = user->width;
4567 period = user->period;
4568 offset = user->offset;
4569 ppr_options = user->ppr_options;
4570 trans_version = user->transport_version;
4571 prot_version = MIN(user->protocol_version, SID_ANSI_REV(sid));
4572
4573 /*
4574 * Only attempt SPI3/4 once we've verified that
4575 * the device claims to support SPI3/4 features.
4576 */
4577 if (prot_version < SCSI_REV_2)
4578 trans_version = SID_ANSI_REV(sid);
4579 else
4580 trans_version = SCSI_REV_2;
4581
4582 if ((sid->flags & SID_WBus16) == 0)
4583 width = MSG_EXT_WDTR_BUS_8_BIT;
4584 if ((sid->flags & SID_Sync) == 0) {
4585 period = 0;
4586 offset = 0;
4587 ppr_options = 0;
4588 }
4589 if ((sid->spi3data & SID_SPI_QAS) == 0)
4590 ppr_options &= ~MSG_EXT_PPR_QAS_REQ;
4591 if ((sid->spi3data & SID_SPI_CLOCK_DT) == 0)
4592 ppr_options &= MSG_EXT_PPR_QAS_REQ;
4593 if ((sid->spi3data & SID_SPI_IUS) == 0)
4594 ppr_options &= (MSG_EXT_PPR_DT_REQ
4595 | MSG_EXT_PPR_QAS_REQ);
4596
4597 if (prot_version > SCSI_REV_2
4598 && ppr_options != 0)
4599 trans_version = user->transport_version;
4600
4601 ahc_validate_width(ahc, /*tinfo limit*/NULL, &width, ROLE_UNKNOWN);
4602 if ((ahc->features & AHC_ULTRA2) != 0)
4603 maxsync = AHC_SYNCRATE_DT;
4604 else if ((ahc->features & AHC_ULTRA) != 0)
4605 maxsync = AHC_SYNCRATE_ULTRA;
4606 else
4607 maxsync = AHC_SYNCRATE_FAST;
4608
4609 syncrate = ahc_find_syncrate(ahc, &period, &ppr_options, maxsync);
4610 ahc_validate_offset(ahc, /*tinfo limit*/NULL, syncrate,
4611 &offset, width, ROLE_UNKNOWN);
4612 if (offset == 0 || period == 0) {
4613 period = 0;
4614 offset = 0;
4615 ppr_options = 0;
4616 }
4617 /* Apply our filtered user settings. */
4618 curr->transport_version = trans_version;
4619 curr->protocol_version = prot_version;
4620 ahc_set_width(ahc, devinfo, width, AHC_TRANS_GOAL, /*paused*/FALSE);
4621 ahc_set_syncrate(ahc, devinfo, syncrate, period,
4622 offset, ppr_options, AHC_TRANS_GOAL,
4623 /*paused*/FALSE);
4624 }
4625
4626 static void
4627 ahc_linux_sem_timeout(u_long arg)
4628 {
4629 struct ahc_softc *ahc;
4630 u_long s;
4631
4632 ahc = (struct ahc_softc *)arg;
4633
4634 ahc_lock(ahc, &s);
4635 if ((ahc->platform_data->flags & AHC_UP_EH_SEMAPHORE) != 0) {
4636 ahc->platform_data->flags &= ~AHC_UP_EH_SEMAPHORE;
4637 up(&ahc->platform_data->eh_sem);
4638 }
4639 ahc_unlock(ahc, &s);
4640 }
4641
4642 static void
4643 ahc_linux_freeze_simq(struct ahc_softc *ahc)
4644 {
4645 ahc->platform_data->qfrozen++;
4646 if (ahc->platform_data->qfrozen == 1) {
4647 scsi_block_requests(ahc->platform_data->host);
4648
4649 /* XXX What about Twin channels? */
4650 ahc_platform_abort_scbs(ahc, CAM_TARGET_WILDCARD, ALL_CHANNELS,
4651 CAM_LUN_WILDCARD, SCB_LIST_NULL,
4652 ROLE_INITIATOR, CAM_REQUEUE_REQ);
4653 }
4654 }
4655
4656 static void
4657 ahc_linux_release_simq(u_long arg)
4658 {
4659 struct ahc_softc *ahc;
4660 u_long s;
4661 int unblock_reqs;
4662
4663 ahc = (struct ahc_softc *)arg;
4664
4665 unblock_reqs = 0;
4666 ahc_lock(ahc, &s);
4667 if (ahc->platform_data->qfrozen > 0)
4668 ahc->platform_data->qfrozen--;
4669 if (ahc->platform_data->qfrozen == 0)
4670 unblock_reqs = 1;
4671 if (AHC_DV_SIMQ_FROZEN(ahc)
4672 && ((ahc->platform_data->flags & AHC_DV_WAIT_SIMQ_RELEASE) != 0)) {
4673 ahc->platform_data->flags &= ~AHC_DV_WAIT_SIMQ_RELEASE;
4674 up(&ahc->platform_data->dv_sem);
4675 }
4676 ahc_schedule_runq(ahc);
4677 ahc_unlock(ahc, &s);
4678 /*
4679 * There is still a race here. The mid-layer
4680 * should keep its own freeze count and use
4681 * a bottom half handler to run the queues
4682 * so we can unblock with our own lock held.
4683 */
4684 if (unblock_reqs)
4685 scsi_unblock_requests(ahc->platform_data->host);
4686 }
4687
4688 static void
4689 ahc_linux_dev_timed_unfreeze(u_long arg)
4690 {
4691 struct ahc_linux_device *dev;
4692 struct ahc_softc *ahc;
4693 u_long s;
4694
4695 dev = (struct ahc_linux_device *)arg;
4696 ahc = dev->target->ahc;
4697 ahc_lock(ahc, &s);
4698 dev->flags &= ~AHC_DEV_TIMER_ACTIVE;
4699 if (dev->qfrozen > 0)
4700 dev->qfrozen--;
4701 if (dev->qfrozen == 0
4702 && (dev->flags & AHC_DEV_ON_RUN_LIST) == 0)
4703 ahc_linux_run_device_queue(ahc, dev);
4704 if (TAILQ_EMPTY(&dev->busyq)
4705 && dev->active == 0)
4706 ahc_linux_free_device(ahc, dev);
4707 ahc_unlock(ahc, &s);
4708 }
4709
4710 static int
4711 ahc_linux_queue_recovery_cmd(Scsi_Cmnd *cmd, scb_flag flag)
4712 {
4713 struct ahc_softc *ahc;
4714 struct ahc_cmd *acmd;
4715 struct ahc_cmd *list_acmd;
4716 struct ahc_linux_device *dev;
4717 struct scb *pending_scb;
4718 u_long s;
4719 u_int saved_scbptr;
4720 u_int active_scb_index;
4721 u_int last_phase;
4722 u_int saved_scsiid;
4723 u_int cdb_byte;
4724 int retval;
4725 int was_paused;
4726 int paused;
4727 int wait;
4728 int disconnected;
4729
4730 pending_scb = NULL;
4731 paused = FALSE;
4732 wait = FALSE;
4733 ahc = *(struct ahc_softc **)cmd->device->host->hostdata;
4734 acmd = (struct ahc_cmd *)cmd;
4735
4736 printf("%s:%d:%d:%d: Attempting to queue a%s message\n",
4737 ahc_name(ahc), cmd->device->channel,
4738 cmd->device->id, cmd->device->lun,
4739 flag == SCB_ABORT ? "n ABORT" : " TARGET RESET");
4740
4741 printf("CDB:");
4742 for (cdb_byte = 0; cdb_byte < cmd->cmd_len; cdb_byte++)
4743 printf(" 0x%x", cmd->cmnd[cdb_byte]);
4744 printf("\n");
4745
4746 /*
4747 * In all versions of Linux, we have to work around
4748 * a major flaw in how the mid-layer is locked down
4749 * if we are to sleep successfully in our error handler
4750 * while allowing our interrupt handler to run. Since
4751 * the midlayer acquires either the io_request_lock or
4752 * our lock prior to calling us, we must use the
4753 * spin_unlock_irq() method for unlocking our lock.
4754 * This will force interrupts to be enabled on the
4755 * current CPU. Since the EH thread should not have
4756 * been running with CPU interrupts disabled other than
4757 * by acquiring either the io_request_lock or our own
4758 * lock, this *should* be safe.
4759 */
4760 ahc_midlayer_entrypoint_lock(ahc, &s);
4761
4762 /*
4763 * First determine if we currently own this command.
4764 * Start by searching the device queue. If not found
4765 * there, check the pending_scb list. If not found
4766 * at all, and the system wanted us to just abort the
4767 * command, return success.
4768 */
4769 dev = ahc_linux_get_device(ahc, cmd->device->channel, cmd->device->id,
4770 cmd->device->lun, /*alloc*/FALSE);
4771
4772 if (dev == NULL) {
4773 /*
4774 * No target device for this command exists,
4775 * so we must not still own the command.
4776 */
4777 printf("%s:%d:%d:%d: Is not an active device\n",
4778 ahc_name(ahc), cmd->device->channel, cmd->device->id,
4779 cmd->device->lun);
4780 retval = SUCCESS;
4781 goto no_cmd;
4782 }
4783
4784 TAILQ_FOREACH(list_acmd, &dev->busyq, acmd_links.tqe) {
4785 if (list_acmd == acmd)
4786 break;
4787 }
4788
4789 if (list_acmd != NULL) {
4790 printf("%s:%d:%d:%d: Command found on device queue\n",
4791 ahc_name(ahc), cmd->device->channel, cmd->device->id,
4792 cmd->device->lun);
4793 if (flag == SCB_ABORT) {
4794 TAILQ_REMOVE(&dev->busyq, list_acmd, acmd_links.tqe);
4795 cmd->result = DID_ABORT << 16;
4796 ahc_linux_queue_cmd_complete(ahc, cmd);
4797 retval = SUCCESS;
4798 goto done;
4799 }
4800 }
4801
4802 if ((dev->flags & (AHC_DEV_Q_BASIC|AHC_DEV_Q_TAGGED)) == 0
4803 && ahc_search_untagged_queues(ahc, cmd, cmd->device->id,
4804 cmd->device->channel + 'A',
4805 cmd->device->lun,
4806 CAM_REQ_ABORTED, SEARCH_COMPLETE) != 0) {
4807 printf("%s:%d:%d:%d: Command found on untagged queue\n",
4808 ahc_name(ahc), cmd->device->channel, cmd->device->id,
4809 cmd->device->lun);
4810 retval = SUCCESS;
4811 goto done;
4812 }
4813
4814 /*
4815 * See if we can find a matching cmd in the pending list.
4816 */
4817 LIST_FOREACH(pending_scb, &ahc->pending_scbs, pending_links) {
4818 if (pending_scb->io_ctx == cmd)
4819 break;
4820 }
4821
4822 if (pending_scb == NULL && flag == SCB_DEVICE_RESET) {
4823
4824 /* Any SCB for this device will do for a target reset */
4825 LIST_FOREACH(pending_scb, &ahc->pending_scbs, pending_links) {
4826 if (ahc_match_scb(ahc, pending_scb, cmd->device->id,
4827 cmd->device->channel + 'A',
4828 CAM_LUN_WILDCARD,
4829 SCB_LIST_NULL, ROLE_INITIATOR))
4830 break;
4831 }
4832 }
4833
4834 if (pending_scb == NULL) {
4835 printf("%s:%d:%d:%d: Command not found\n",
4836 ahc_name(ahc), cmd->device->channel, cmd->device->id,
4837 cmd->device->lun);
4838 goto no_cmd;
4839 }
4840
4841 if ((pending_scb->flags & SCB_RECOVERY_SCB) != 0) {
4842 /*
4843 * We can't queue two recovery actions using the same SCB
4844 */
4845 retval = FAILED;
4846 goto done;
4847 }
4848
4849 /*
4850 * Ensure that the card doesn't do anything
4851 * behind our back and that we didn't "just" miss
4852 * an interrupt that would affect this cmd.
4853 */
4854 was_paused = ahc_is_paused(ahc);
4855 ahc_pause_and_flushwork(ahc);
4856 paused = TRUE;
4857
4858 if ((pending_scb->flags & SCB_ACTIVE) == 0) {
4859 printf("%s:%d:%d:%d: Command already completed\n",
4860 ahc_name(ahc), cmd->device->channel, cmd->device->id,
4861 cmd->device->lun);
4862 goto no_cmd;
4863 }
4864
4865 printf("%s: At time of recovery, card was %spaused\n",
4866 ahc_name(ahc), was_paused ? "" : "not ");
4867 ahc_dump_card_state(ahc);
4868
4869 disconnected = TRUE;
4870 if (flag == SCB_ABORT) {
4871 if (ahc_search_qinfifo(ahc, cmd->device->id,
4872 cmd->device->channel + 'A',
4873 cmd->device->lun,
4874 pending_scb->hscb->tag,
4875 ROLE_INITIATOR, CAM_REQ_ABORTED,
4876 SEARCH_COMPLETE) > 0) {
4877 printf("%s:%d:%d:%d: Cmd aborted from QINFIFO\n",
4878 ahc_name(ahc), cmd->device->channel,
4879 cmd->device->id, cmd->device->lun);
4880 retval = SUCCESS;
4881 goto done;
4882 }
4883 } else if (ahc_search_qinfifo(ahc, cmd->device->id,
4884 cmd->device->channel + 'A',
4885 cmd->device->lun, pending_scb->hscb->tag,
4886 ROLE_INITIATOR, /*status*/0,
4887 SEARCH_COUNT) > 0) {
4888 disconnected = FALSE;
4889 }
4890
4891 if (disconnected && (ahc_inb(ahc, SEQ_FLAGS) & NOT_IDENTIFIED) == 0) {
4892 struct scb *bus_scb;
4893
4894 bus_scb = ahc_lookup_scb(ahc, ahc_inb(ahc, SCB_TAG));
4895 if (bus_scb == pending_scb)
4896 disconnected = FALSE;
4897 else if (flag != SCB_ABORT
4898 && ahc_inb(ahc, SAVED_SCSIID) == pending_scb->hscb->scsiid
4899 && ahc_inb(ahc, SAVED_LUN) == SCB_GET_LUN(pending_scb))
4900 disconnected = FALSE;
4901 }
4902
4903 /*
4904 * At this point, pending_scb is the scb associated with the
4905 * passed in command. That command is currently active on the
4906 * bus, is in the disconnected state, or we're hoping to find
4907 * a command for the same target active on the bus to abuse to
4908 * send a BDR. Queue the appropriate message based on which of
4909 * these states we are in.
4910 */
4911 last_phase = ahc_inb(ahc, LASTPHASE);
4912 saved_scbptr = ahc_inb(ahc, SCBPTR);
4913 active_scb_index = ahc_inb(ahc, SCB_TAG);
4914 saved_scsiid = ahc_inb(ahc, SAVED_SCSIID);
4915 if (last_phase != P_BUSFREE
4916 && (pending_scb->hscb->tag == active_scb_index
4917 || (flag == SCB_DEVICE_RESET
4918 && SCSIID_TARGET(ahc, saved_scsiid) == cmd->device->id))) {
4919
4920 /*
4921 * We're active on the bus, so assert ATN
4922 * and hope that the target responds.
4923 */
4924 pending_scb = ahc_lookup_scb(ahc, active_scb_index);
4925 pending_scb->flags |= SCB_RECOVERY_SCB|flag;
4926 ahc_outb(ahc, MSG_OUT, HOST_MSG);
4927 ahc_outb(ahc, SCSISIGO, last_phase|ATNO);
4928 printf("%s:%d:%d:%d: Device is active, asserting ATN\n",
4929 ahc_name(ahc), cmd->device->channel, cmd->device->id,
4930 cmd->device->lun);
4931 wait = TRUE;
4932 } else if (disconnected) {
4933
4934 /*
4935 * Actually re-queue this SCB in an attempt
4936 * to select the device before it reconnects.
4937 * In either case (selection or reselection),
4938 * we will now issue the approprate message
4939 * to the timed-out device.
4940 *
4941 * Set the MK_MESSAGE control bit indicating
4942 * that we desire to send a message. We
4943 * also set the disconnected flag since
4944 * in the paging case there is no guarantee
4945 * that our SCB control byte matches the
4946 * version on the card. We don't want the
4947 * sequencer to abort the command thinking
4948 * an unsolicited reselection occurred.
4949 */
4950 pending_scb->hscb->control |= MK_MESSAGE|DISCONNECTED;
4951 pending_scb->flags |= SCB_RECOVERY_SCB|flag;
4952
4953 /*
4954 * Remove any cached copy of this SCB in the
4955 * disconnected list in preparation for the
4956 * queuing of our abort SCB. We use the
4957 * same element in the SCB, SCB_NEXT, for
4958 * both the qinfifo and the disconnected list.
4959 */
4960 ahc_search_disc_list(ahc, cmd->device->id,
4961 cmd->device->channel + 'A',
4962 cmd->device->lun, pending_scb->hscb->tag,
4963 /*stop_on_first*/TRUE,
4964 /*remove*/TRUE,
4965 /*save_state*/FALSE);
4966
4967 /*
4968 * In the non-paging case, the sequencer will
4969 * never re-reference the in-core SCB.
4970 * To make sure we are notified during
4971 * reslection, set the MK_MESSAGE flag in
4972 * the card's copy of the SCB.
4973 */
4974 if ((ahc->flags & AHC_PAGESCBS) == 0) {
4975 ahc_outb(ahc, SCBPTR, pending_scb->hscb->tag);
4976 ahc_outb(ahc, SCB_CONTROL,
4977 ahc_inb(ahc, SCB_CONTROL)|MK_MESSAGE);
4978 }
4979
4980 /*
4981 * Clear out any entries in the QINFIFO first
4982 * so we are the next SCB for this target
4983 * to run.
4984 */
4985 ahc_search_qinfifo(ahc, cmd->device->id,
4986 cmd->device->channel + 'A',
4987 cmd->device->lun, SCB_LIST_NULL,
4988 ROLE_INITIATOR, CAM_REQUEUE_REQ,
4989 SEARCH_COMPLETE);
4990 ahc_qinfifo_requeue_tail(ahc, pending_scb);
4991 ahc_outb(ahc, SCBPTR, saved_scbptr);
4992 ahc_print_path(ahc, pending_scb);
4993 printf("Device is disconnected, re-queuing SCB\n");
4994 wait = TRUE;
4995 } else {
4996 printf("%s:%d:%d:%d: Unable to deliver message\n",
4997 ahc_name(ahc), cmd->device->channel, cmd->device->id,
4998 cmd->device->lun);
4999 retval = FAILED;
5000 goto done;
5001 }
5002
5003 no_cmd:
5004 /*
5005 * Our assumption is that if we don't have the command, no
5006 * recovery action was required, so we return success. Again,
5007 * the semantics of the mid-layer recovery engine are not
5008 * well defined, so this may change in time.
5009 */
5010 retval = SUCCESS;
5011 done:
5012 if (paused)
5013 ahc_unpause(ahc);
5014 if (wait) {
5015 struct timer_list timer;
5016 int ret;
5017
5018 ahc->platform_data->flags |= AHC_UP_EH_SEMAPHORE;
5019 spin_unlock_irq(&ahc->platform_data->spin_lock);
5020 init_timer(&timer);
5021 timer.data = (u_long)ahc;
5022 timer.expires = jiffies + (5 * HZ);
5023 timer.function = ahc_linux_sem_timeout;
5024 add_timer(&timer);
5025 printf("Recovery code sleeping\n");
5026 down(&ahc->platform_data->eh_sem);
5027 printf("Recovery code awake\n");
5028 ret = del_timer_sync(&timer);
5029 if (ret == 0) {
5030 printf("Timer Expired\n");
5031 retval = FAILED;
5032 }
5033 spin_lock_irq(&ahc->platform_data->spin_lock);
5034 }
5035 ahc_schedule_runq(ahc);
5036 ahc_linux_run_complete_queue(ahc);
5037 ahc_midlayer_entrypoint_unlock(ahc, &s);
5038 return (retval);
5039 }
5040
5041 void
5042 ahc_platform_dump_card_state(struct ahc_softc *ahc)
5043 {
5044 struct ahc_linux_device *dev;
5045 int channel;
5046 int maxchannel;
5047 int target;
5048 int maxtarget;
5049 int lun;
5050 int i;
5051
5052 maxchannel = (ahc->features & AHC_TWIN) ? 1 : 0;
5053 maxtarget = (ahc->features & AHC_WIDE) ? 15 : 7;
5054 for (channel = 0; channel <= maxchannel; channel++) {
5055
5056 for (target = 0; target <=maxtarget; target++) {
5057
5058 for (lun = 0; lun < AHC_NUM_LUNS; lun++) {
5059 struct ahc_cmd *acmd;
5060
5061 dev = ahc_linux_get_device(ahc, channel, target,
5062 lun, /*alloc*/FALSE);
5063 if (dev == NULL)
5064 continue;
5065
5066 printf("DevQ(%d:%d:%d): ",
5067 channel, target, lun);
5068 i = 0;
5069 TAILQ_FOREACH(acmd, &dev->busyq,
5070 acmd_links.tqe) {
5071 if (i++ > AHC_SCB_MAX)
5072 break;
5073 }
5074 printf("%d waiting\n", i);
5075 }
5076 }
5077 }
5078 }
5079
5080 static int __init
5081 ahc_linux_init(void)
5082 {
5083 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
5084 return (ahc_linux_detect(&aic7xxx_driver_template) ? 0 : -ENODEV);
5085 #else
5086 scsi_register_module(MODULE_SCSI_HA, &aic7xxx_driver_template);
5087 if (aic7xxx_driver_template.present == 0) {
5088 scsi_unregister_module(MODULE_SCSI_HA,
5089 &aic7xxx_driver_template);
5090 return (-ENODEV);
5091 }
5092
5093 return (0);
5094 #endif
5095 }
5096
5097 static void __exit
5098 ahc_linux_exit(void)
5099 {
5100 struct ahc_softc *ahc;
5101 u_long l;
5102
5103 /*
5104 * Shutdown DV threads before going into the SCSI mid-layer.
5105 * This avoids situations where the mid-layer locks the entire
5106 * kernel so that waiting for our DV threads to exit leads
5107 * to deadlock.
5108 */
5109 ahc_list_lock(&l);
5110 TAILQ_FOREACH(ahc, &ahc_tailq, links) {
5111
5112 ahc_linux_kill_dv_thread(ahc);
5113 }
5114 ahc_list_unlock(&l);
5115
5116 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
5117 /*
5118 * In 2.4 we have to unregister from the PCI core _after_
5119 * unregistering from the scsi midlayer to avoid dangling
5120 * references.
5121 */
5122 scsi_unregister_module(MODULE_SCSI_HA, &aic7xxx_driver_template);
5123 #endif
5124 #ifdef CONFIG_PCI
5125 ahc_linux_pci_exit();
5126 #endif
5127 #ifdef CONFIG_EISA
5128 ahc_linux_eisa_exit();
5129 #endif
5130 }
5131
5132 module_init(ahc_linux_init);
5133 module_exit(ahc_linux_exit);
5134