1 /*
2 * scsi.c Copyright (C) 1992 Drew Eckhardt
3 * Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale
4 *
5 * generic mid-level SCSI driver
6 * Initial versions: Drew Eckhardt
7 * Subsequent revisions: Eric Youngdale
8 *
9 * <drew@colorado.edu>
10 *
11 * Bug correction thanks go to :
12 * Rik Faith <faith@cs.unc.edu>
13 * Tommy Thorn <tthorn>
14 * Thomas Wuensche <tw@fgb1.fgb.mw.tu-muenchen.de>
15 *
16 * Modified by Eric Youngdale eric@andante.org or ericy@gnu.ai.mit.edu to
17 * add scatter-gather, multiple outstanding request, and other
18 * enhancements.
19 *
20 * Native multichannel, wide scsi, /proc/scsi and hot plugging
21 * support added by Michael Neuffer <mike@i-connect.net>
22 *
23 * Added request_module("scsi_hostadapter") for kerneld:
24 * (Put an "alias scsi_hostadapter your_hostadapter" in /etc/modules.conf)
25 * Bjorn Ekwall <bj0rn@blox.se>
26 * (changed to kmod)
27 *
28 * Major improvements to the timeout, abort, and reset processing,
29 * as well as performance modifications for large queue depths by
30 * Leonard N. Zubkoff <lnz@dandelion.com>
31 *
32 * Converted cli() code to spinlocks, Ingo Molnar
33 *
34 * Jiffies wrap fixes (host->resetting), 3 Dec 1998 Andrea Arcangeli
35 *
36 * out_of_space hacks, D. Gilbert (dpg) 990608
37 */
38
39 #define REVISION "Revision: 1.00"
40 #define VERSION "Id: scsi.c 1.00 2000/09/26"
41
42 #include <linux/config.h>
43 #include <linux/module.h>
44
45 #include <linux/sched.h>
46 #include <linux/timer.h>
47 #include <linux/string.h>
48 #include <linux/slab.h>
49 #include <linux/ioport.h>
50 #include <linux/kernel.h>
51 #include <linux/stat.h>
52 #include <linux/blk.h>
53 #include <linux/interrupt.h>
54 #include <linux/delay.h>
55 #include <linux/init.h>
56 #include <linux/smp_lock.h>
57 #include <linux/completion.h>
58
59 #define __KERNEL_SYSCALLS__
60
61 #include <linux/unistd.h>
62 #include <linux/spinlock.h>
63
64 #include <asm/system.h>
65 #include <asm/irq.h>
66 #include <asm/dma.h>
67 #include <asm/uaccess.h>
68
69 #include "scsi.h"
70 #include "hosts.h"
71 #include "constants.h"
72
73 #ifdef CONFIG_KMOD
74 #include <linux/kmod.h>
75 #endif
76
77 #undef USE_STATIC_SCSI_MEMORY
78
79 struct proc_dir_entry *proc_scsi;
80
81 #ifdef CONFIG_PROC_FS
82 static int scsi_proc_info(char *buffer, char **start, off_t offset, int length);
83 static void scsi_dump_status(int level);
84 #endif
85
86 /*
87 static const char RCSid[] = "$Header: /vger/u4/cvs/linux/drivers/scsi/scsi.c,v 1.38 1997/01/19 23:07:18 davem Exp $";
88 */
89
90 /*
91 * Definitions and constants.
92 */
93
94 #define MIN_RESET_DELAY (2*HZ)
95
96 /* Do not call reset on error if we just did a reset within 15 sec. */
97 #define MIN_RESET_PERIOD (15*HZ)
98
99 /*
100 * Macro to determine the size of SCSI command. This macro takes vendor
101 * unique commands into account. SCSI commands in groups 6 and 7 are
102 * vendor unique and we will depend upon the command length being
103 * supplied correctly in cmd_len.
104 */
105 #define CDB_SIZE(SCpnt) ((((SCpnt->cmnd[0] >> 5) & 7) < 6) ? \
106 COMMAND_SIZE(SCpnt->cmnd[0]) : SCpnt->cmd_len)
107
108 /*
109 * Data declarations.
110 */
111 unsigned long scsi_pid;
112 Scsi_Cmnd *last_cmnd;
113 /* Command group 3 is reserved and should never be used. */
114 const unsigned char scsi_command_size[8] =
115 {
116 6, 10, 10, 12,
117 16, 12, 10, 10
118 };
119 static unsigned long serial_number;
120 static Scsi_Cmnd *scsi_bh_queue_head;
121 static Scsi_Cmnd *scsi_bh_queue_tail;
122
123 /*
124 * Note - the initial logging level can be set here to log events at boot time.
125 * After the system is up, you may enable logging via the /proc interface.
126 */
127 unsigned int scsi_logging_level;
128
129 const char *const scsi_device_types[MAX_SCSI_DEVICE_CODE] =
130 {
131 "Direct-Access ",
132 "Sequential-Access",
133 "Printer ",
134 "Processor ",
135 "WORM ",
136 "CD-ROM ",
137 "Scanner ",
138 "Optical Device ",
139 "Medium Changer ",
140 "Communications ",
141 "Unknown ",
142 "Unknown ",
143 "Unknown ",
144 "Enclosure ",
145 };
146
147 /*
148 * Function prototypes.
149 */
150 extern void scsi_times_out(Scsi_Cmnd * SCpnt);
151 void scsi_build_commandblocks(Scsi_Device * SDpnt);
152
153 /*
154 * These are the interface to the old error handling code. It should go away
155 * someday soon.
156 */
157 extern void scsi_old_done(Scsi_Cmnd * SCpnt);
158 extern void scsi_old_times_out(Scsi_Cmnd * SCpnt);
159 extern int scsi_old_reset(Scsi_Cmnd *SCpnt, unsigned int flag);
160
161 /*
162 * Private interface into the new error handling code.
163 */
164 extern int scsi_new_reset(Scsi_Cmnd *SCpnt, unsigned int flag);
165
166 /*
167 * Function: scsi_initialize_queue()
168 *
169 * Purpose: Selects queue handler function for a device.
170 *
171 * Arguments: SDpnt - device for which we need a handler function.
172 *
173 * Returns: Nothing
174 *
175 * Lock status: No locking assumed or required.
176 *
177 * Notes: Most devices will end up using scsi_request_fn for the
178 * handler function (at least as things are done now).
179 * The "block" feature basically ensures that only one of
180 * the blocked hosts is active at one time, mainly to work around
181 * buggy DMA chipsets where the memory gets starved.
182 * For this case, we have a special handler function, which
183 * does some checks and ultimately calls scsi_request_fn.
184 *
185 * The single_lun feature is a similar special case.
186 *
187 * We handle these things by stacking the handlers. The
188 * special case handlers simply check a few conditions,
189 * and return if they are not supposed to do anything.
190 * In the event that things are OK, then they call the next
191 * handler in the list - ultimately they call scsi_request_fn
192 * to do the dirty deed.
193 */
scsi_initialize_queue(Scsi_Device * SDpnt,struct Scsi_Host * SHpnt)194 void scsi_initialize_queue(Scsi_Device * SDpnt, struct Scsi_Host * SHpnt)
195 {
196 request_queue_t *q = &SDpnt->request_queue;
197
198 blk_init_queue(q, scsi_request_fn);
199 blk_queue_headactive(q, 0);
200 blk_queue_throttle_sectors(q, 1);
201 q->queuedata = (void *) SDpnt;
202 }
203
204 #ifdef MODULE
205 MODULE_PARM(scsi_logging_level, "i");
206 MODULE_PARM_DESC(scsi_logging_level, "SCSI logging level; should be zero or nonzero");
207
208 #else
209
scsi_logging_setup(char * str)210 static int __init scsi_logging_setup(char *str)
211 {
212 int tmp;
213
214 if (get_option(&str, &tmp) == 1) {
215 scsi_logging_level = (tmp ? ~0 : 0);
216 return 1;
217 } else {
218 printk(KERN_INFO "scsi_logging_setup : usage scsi_logging_level=n "
219 "(n should be 0 or non-zero)\n");
220 return 0;
221 }
222 }
223
224 __setup("scsi_logging=", scsi_logging_setup);
225
226 #endif
227
228 /*
229 * Issue a command and wait for it to complete
230 */
231
scsi_wait_done(Scsi_Cmnd * SCpnt)232 static void scsi_wait_done(Scsi_Cmnd * SCpnt)
233 {
234 struct request *req;
235
236 req = &SCpnt->request;
237 req->rq_status = RQ_SCSI_DONE; /* Busy, but indicate request done */
238
239 if (req->waiting != NULL) {
240 complete(req->waiting);
241 }
242 }
243
244 /*
245 * This lock protects the freelist for all devices on the system.
246 * We could make this finer grained by having a single lock per
247 * device if it is ever found that there is excessive contention
248 * on this lock.
249 */
250 static spinlock_t device_request_lock = SPIN_LOCK_UNLOCKED;
251
252 /*
253 * Used to protect insertion into and removal from the queue of
254 * commands to be processed by the bottom half handler.
255 */
256 static spinlock_t scsi_bhqueue_lock = SPIN_LOCK_UNLOCKED;
257
258 /*
259 * Function: scsi_allocate_request
260 *
261 * Purpose: Allocate a request descriptor.
262 *
263 * Arguments: device - device for which we want a request
264 *
265 * Lock status: No locks assumed to be held. This function is SMP-safe.
266 *
267 * Returns: Pointer to request block.
268 *
269 * Notes: With the new queueing code, it becomes important
270 * to track the difference between a command and a
271 * request. A request is a pending item in the queue that
272 * has not yet reached the top of the queue.
273 */
274
scsi_allocate_request(Scsi_Device * device)275 Scsi_Request *scsi_allocate_request(Scsi_Device * device)
276 {
277 Scsi_Request *SRpnt = NULL;
278
279 if (!device)
280 panic("No device passed to scsi_allocate_request().\n");
281
282 SRpnt = (Scsi_Request *) kmalloc(sizeof(Scsi_Request), GFP_ATOMIC);
283 if( SRpnt == NULL )
284 {
285 return NULL;
286 }
287
288 memset(SRpnt, 0, sizeof(Scsi_Request));
289 SRpnt->sr_device = device;
290 SRpnt->sr_host = device->host;
291 SRpnt->sr_magic = SCSI_REQ_MAGIC;
292 SRpnt->sr_data_direction = SCSI_DATA_UNKNOWN;
293
294 return SRpnt;
295 }
296
297 /*
298 * Function: scsi_release_request
299 *
300 * Purpose: Release a request descriptor.
301 *
302 * Arguments: device - device for which we want a request
303 *
304 * Lock status: No locks assumed to be held. This function is SMP-safe.
305 *
306 * Returns: Pointer to request block.
307 *
308 * Notes: With the new queueing code, it becomes important
309 * to track the difference between a command and a
310 * request. A request is a pending item in the queue that
311 * has not yet reached the top of the queue. We still need
312 * to free a request when we are done with it, of course.
313 */
scsi_release_request(Scsi_Request * req)314 void scsi_release_request(Scsi_Request * req)
315 {
316 if( req->sr_command != NULL )
317 {
318 scsi_release_command(req->sr_command);
319 req->sr_command = NULL;
320 }
321
322 kfree(req);
323 }
324
325 /*
326 * Function: scsi_allocate_device
327 *
328 * Purpose: Allocate a command descriptor.
329 *
330 * Arguments: device - device for which we want a command descriptor
331 * wait - 1 if we should wait in the event that none
332 * are available.
333 * interruptible - 1 if we should unblock and return NULL
334 * in the event that we must wait, and a signal
335 * arrives.
336 *
337 * Lock status: No locks assumed to be held. This function is SMP-safe.
338 *
339 * Returns: Pointer to command descriptor.
340 *
341 * Notes: Prior to the new queue code, this function was not SMP-safe.
342 *
343 * If the wait flag is true, and we are waiting for a free
344 * command block, this function will interrupt and return
345 * NULL in the event that a signal arrives that needs to
346 * be handled.
347 *
348 * This function is deprecated, and drivers should be
349 * rewritten to use Scsi_Request instead of Scsi_Cmnd.
350 */
351
scsi_allocate_device(Scsi_Device * device,int wait,int interruptable)352 Scsi_Cmnd *scsi_allocate_device(Scsi_Device * device, int wait,
353 int interruptable)
354 {
355 struct Scsi_Host *host;
356 Scsi_Cmnd *SCpnt = NULL;
357 Scsi_Device *SDpnt;
358 unsigned long flags;
359
360 if (!device)
361 panic("No device passed to scsi_allocate_device().\n");
362
363 host = device->host;
364
365 spin_lock_irqsave(&device_request_lock, flags);
366
367 while (1 == 1) {
368 SCpnt = NULL;
369 if (!device->device_blocked) {
370 if (device->single_lun) {
371 /*
372 * FIXME(eric) - this is not at all optimal. Given that
373 * single lun devices are rare and usually slow
374 * (i.e. CD changers), this is good enough for now, but
375 * we may want to come back and optimize this later.
376 *
377 * Scan through all of the devices attached to this
378 * host, and see if any are active or not. If so,
379 * we need to defer this command.
380 *
381 * We really need a busy counter per device. This would
382 * allow us to more easily figure out whether we should
383 * do anything here or not.
384 */
385 for (SDpnt = host->host_queue;
386 SDpnt;
387 SDpnt = SDpnt->next) {
388 /*
389 * Only look for other devices on the same bus
390 * with the same target ID.
391 */
392 if (SDpnt->channel != device->channel
393 || SDpnt->id != device->id
394 || SDpnt == device) {
395 continue;
396 }
397 if( atomic_read(&SDpnt->device_active) != 0)
398 {
399 break;
400 }
401 }
402 if (SDpnt) {
403 /*
404 * Some other device in this cluster is busy.
405 * If asked to wait, we need to wait, otherwise
406 * return NULL.
407 */
408 SCpnt = NULL;
409 goto busy;
410 }
411 }
412 /*
413 * Now we can check for a free command block for this device.
414 */
415 for (SCpnt = device->device_queue; SCpnt; SCpnt = SCpnt->next) {
416 if (SCpnt->request.rq_status == RQ_INACTIVE)
417 break;
418 }
419 }
420 /*
421 * If we couldn't find a free command block, and we have been
422 * asked to wait, then do so.
423 */
424 if (SCpnt) {
425 break;
426 }
427 busy:
428 /*
429 * If we have been asked to wait for a free block, then
430 * wait here.
431 */
432 if (wait) {
433 DECLARE_WAITQUEUE(wait, current);
434
435 /*
436 * We need to wait for a free commandblock. We need to
437 * insert ourselves into the list before we release the
438 * lock. This way if a block were released the same
439 * microsecond that we released the lock, the call
440 * to schedule() wouldn't block (well, it might switch,
441 * but the current task will still be schedulable.
442 */
443 add_wait_queue(&device->scpnt_wait, &wait);
444 if( interruptable ) {
445 set_current_state(TASK_INTERRUPTIBLE);
446 } else {
447 set_current_state(TASK_UNINTERRUPTIBLE);
448 }
449
450 spin_unlock_irqrestore(&device_request_lock, flags);
451
452 /*
453 * This should block until a device command block
454 * becomes available.
455 */
456 schedule();
457
458 spin_lock_irqsave(&device_request_lock, flags);
459
460 remove_wait_queue(&device->scpnt_wait, &wait);
461 /*
462 * FIXME - Isn't this redundant?? Someone
463 * else will have forced the state back to running.
464 */
465 set_current_state(TASK_RUNNING);
466 /*
467 * In the event that a signal has arrived that we need
468 * to consider, then simply return NULL. Everyone
469 * that calls us should be prepared for this
470 * possibility, and pass the appropriate code back
471 * to the user.
472 */
473 if( interruptable ) {
474 if (signal_pending(current)) {
475 spin_unlock_irqrestore(&device_request_lock, flags);
476 return NULL;
477 }
478 }
479 } else {
480 spin_unlock_irqrestore(&device_request_lock, flags);
481 return NULL;
482 }
483 }
484
485 SCpnt->request.rq_status = RQ_SCSI_BUSY;
486 SCpnt->request.waiting = NULL; /* And no one is waiting for this
487 * to complete */
488 atomic_inc(&SCpnt->host->host_active);
489 atomic_inc(&SCpnt->device->device_active);
490
491 SCpnt->buffer = NULL;
492 SCpnt->bufflen = 0;
493 SCpnt->request_buffer = NULL;
494 SCpnt->request_bufflen = 0;
495
496 SCpnt->use_sg = 0; /* Reset the scatter-gather flag */
497 SCpnt->old_use_sg = 0;
498 SCpnt->transfersize = 0; /* No default transfer size */
499 SCpnt->cmd_len = 0;
500
501 SCpnt->sc_data_direction = SCSI_DATA_UNKNOWN;
502 SCpnt->sc_request = NULL;
503 SCpnt->sc_magic = SCSI_CMND_MAGIC;
504
505 SCpnt->result = 0;
506 SCpnt->underflow = 0; /* Do not flag underflow conditions */
507 SCpnt->old_underflow = 0;
508 SCpnt->resid = 0;
509 SCpnt->state = SCSI_STATE_INITIALIZING;
510 SCpnt->owner = SCSI_OWNER_HIGHLEVEL;
511
512 spin_unlock_irqrestore(&device_request_lock, flags);
513
514 SCSI_LOG_MLQUEUE(5, printk("Activating command for device %d (%d)\n",
515 SCpnt->target,
516 atomic_read(&SCpnt->host->host_active)));
517
518 return SCpnt;
519 }
520
__scsi_release_command(Scsi_Cmnd * SCpnt)521 inline void __scsi_release_command(Scsi_Cmnd * SCpnt)
522 {
523 unsigned long flags;
524 Scsi_Device * SDpnt;
525
526 spin_lock_irqsave(&device_request_lock, flags);
527
528 SDpnt = SCpnt->device;
529
530 SCpnt->request.rq_status = RQ_INACTIVE;
531 SCpnt->state = SCSI_STATE_UNUSED;
532 SCpnt->owner = SCSI_OWNER_NOBODY;
533 atomic_dec(&SCpnt->host->host_active);
534 atomic_dec(&SDpnt->device_active);
535
536 SCSI_LOG_MLQUEUE(5, printk("Deactivating command for device %d (active=%d, failed=%d)\n",
537 SCpnt->target,
538 atomic_read(&SCpnt->host->host_active),
539 SCpnt->host->host_failed));
540 if (SCpnt->host->host_failed != 0) {
541 SCSI_LOG_ERROR_RECOVERY(5, printk("Error handler thread %d %d\n",
542 SCpnt->host->in_recovery,
543 SCpnt->host->eh_active));
544 }
545 /*
546 * If the host is having troubles, then look to see if this was the last
547 * command that might have failed. If so, wake up the error handler.
548 */
549 if (SCpnt->host->in_recovery
550 && !SCpnt->host->eh_active
551 && SCpnt->host->host_busy == SCpnt->host->host_failed) {
552 SCSI_LOG_ERROR_RECOVERY(5, printk("Waking error handler thread (%d)\n",
553 atomic_read(&SCpnt->host->eh_wait->count)));
554 up(SCpnt->host->eh_wait);
555 }
556
557 spin_unlock_irqrestore(&device_request_lock, flags);
558
559 /*
560 * Wake up anyone waiting for this device. Do this after we
561 * have released the lock, as they will need it as soon as
562 * they wake up.
563 */
564 wake_up(&SDpnt->scpnt_wait);
565 }
566
567 /*
568 * Function: scsi_release_command
569 *
570 * Purpose: Release a command block.
571 *
572 * Arguments: SCpnt - command block we are releasing.
573 *
574 * Notes: The command block can no longer be used by the caller once
575 * this funciton is called. This is in effect the inverse
576 * of scsi_allocate_device. Note that we also must perform
577 * a couple of additional tasks. We must first wake up any
578 * processes that might have blocked waiting for a command
579 * block, and secondly we must hit the queue handler function
580 * to make sure that the device is busy. Note - there is an
581 * option to not do this - there were instances where we could
582 * recurse too deeply and blow the stack if this happened
583 * when we were indirectly called from the request function
584 * itself.
585 *
586 * The idea is that a lot of the mid-level internals gunk
587 * gets hidden in this function. Upper level drivers don't
588 * have any chickens to wave in the air to get things to
589 * work reliably.
590 *
591 * This function is deprecated, and drivers should be
592 * rewritten to use Scsi_Request instead of Scsi_Cmnd.
593 */
scsi_release_command(Scsi_Cmnd * SCpnt)594 void scsi_release_command(Scsi_Cmnd * SCpnt)
595 {
596 request_queue_t *q;
597 Scsi_Device * SDpnt;
598
599 SDpnt = SCpnt->device;
600
601 __scsi_release_command(SCpnt);
602
603 /*
604 * Finally, hit the queue request function to make sure that
605 * the device is actually busy if there are requests present.
606 * This won't block - if the device cannot take any more, life
607 * will go on.
608 */
609 q = &SDpnt->request_queue;
610 scsi_queue_next_request(q, NULL);
611 }
612
613 /*
614 * Function: scsi_dispatch_command
615 *
616 * Purpose: Dispatch a command to the low-level driver.
617 *
618 * Arguments: SCpnt - command block we are dispatching.
619 *
620 * Notes:
621 */
scsi_dispatch_cmd(Scsi_Cmnd * SCpnt)622 int scsi_dispatch_cmd(Scsi_Cmnd * SCpnt)
623 {
624 #ifdef DEBUG_DELAY
625 unsigned long clock;
626 #endif
627 struct Scsi_Host *host;
628 int rtn = 0;
629 unsigned long flags = 0;
630 unsigned long timeout;
631
632 ASSERT_LOCK(&io_request_lock, 0);
633
634 #if DEBUG
635 unsigned long *ret = 0;
636 #ifdef __mips__
637 __asm__ __volatile__("move\t%0,$31":"=r"(ret));
638 #else
639 ret = __builtin_return_address(0);
640 #endif
641 #endif
642
643 host = SCpnt->host;
644
645 /* Assign a unique nonzero serial_number. */
646 if (++serial_number == 0)
647 serial_number = 1;
648 SCpnt->serial_number = serial_number;
649 SCpnt->pid = scsi_pid++;
650
651 /*
652 * We will wait MIN_RESET_DELAY clock ticks after the last reset so
653 * we can avoid the drive not being ready.
654 */
655 timeout = host->last_reset + MIN_RESET_DELAY;
656
657 if (host->resetting && time_before(jiffies, timeout)) {
658 int ticks_remaining = timeout - jiffies;
659 /*
660 * NOTE: This may be executed from within an interrupt
661 * handler! This is bad, but for now, it'll do. The irq
662 * level of the interrupt handler has been masked out by the
663 * platform dependent interrupt handling code already, so the
664 * sti() here will not cause another call to the SCSI host's
665 * interrupt handler (assuming there is one irq-level per
666 * host).
667 */
668 while (--ticks_remaining >= 0)
669 mdelay(1 + 999 / HZ);
670 host->resetting = 0;
671 }
672 if (host->hostt->use_new_eh_code) {
673 scsi_add_timer(SCpnt, SCpnt->timeout_per_command, scsi_times_out);
674 } else {
675 scsi_add_timer(SCpnt, SCpnt->timeout_per_command,
676 scsi_old_times_out);
677 }
678
679 /*
680 * We will use a queued command if possible, otherwise we will emulate the
681 * queuing and calling of completion function ourselves.
682 */
683 SCSI_LOG_MLQUEUE(3, printk("scsi_dispatch_cmnd (host = %d, channel = %d, target = %d, "
684 "command = %p, buffer = %p, \nbufflen = %d, done = %p)\n",
685 SCpnt->host->host_no, SCpnt->channel, SCpnt->target, SCpnt->cmnd,
686 SCpnt->buffer, SCpnt->bufflen, SCpnt->done));
687
688 SCpnt->state = SCSI_STATE_QUEUED;
689 SCpnt->owner = SCSI_OWNER_LOWLEVEL;
690 if (host->can_queue) {
691 SCSI_LOG_MLQUEUE(3, printk("queuecommand : routine at %p\n",
692 host->hostt->queuecommand));
693 /*
694 * Use the old error handling code if we haven't converted the driver
695 * to use the new one yet. Note - only the new queuecommand variant
696 * passes a meaningful return value.
697 */
698 if (host->hostt->use_new_eh_code) {
699 /*
700 * Before we queue this command, check if the command
701 * length exceeds what the host adapter can handle.
702 */
703 if (CDB_SIZE(SCpnt) <= SCpnt->host->max_cmd_len) {
704 spin_lock_irqsave(&io_request_lock, flags);
705 rtn = host->hostt->queuecommand(SCpnt, scsi_done);
706 spin_unlock_irqrestore(&io_request_lock, flags);
707 if (rtn != 0) {
708 scsi_delete_timer(SCpnt);
709 scsi_mlqueue_insert(SCpnt, SCSI_MLQUEUE_HOST_BUSY);
710 SCSI_LOG_MLQUEUE(3, printk("queuecommand : request rejected\n"));
711 }
712 } else {
713 SCSI_LOG_MLQUEUE(3, printk("queuecommand : command too long.\n"));
714 SCpnt->result = (DID_ABORT << 16);
715 spin_lock_irqsave(&io_request_lock, flags);
716 scsi_done(SCpnt);
717 spin_unlock_irqrestore(&io_request_lock, flags);
718 rtn = 1;
719 }
720 } else {
721 /*
722 * Before we queue this command, check if the command
723 * length exceeds what the host adapter can handle.
724 */
725 if (CDB_SIZE(SCpnt) <= SCpnt->host->max_cmd_len) {
726 spin_lock_irqsave(&io_request_lock, flags);
727 host->hostt->queuecommand(SCpnt, scsi_old_done);
728 spin_unlock_irqrestore(&io_request_lock, flags);
729 } else {
730 SCSI_LOG_MLQUEUE(3, printk("queuecommand : command too long.\n"));
731 SCpnt->result = (DID_ABORT << 16);
732 spin_lock_irqsave(&io_request_lock, flags);
733 scsi_old_done(SCpnt);
734 spin_unlock_irqrestore(&io_request_lock, flags);
735 rtn = 1;
736 }
737 }
738 } else {
739 int temp;
740
741 SCSI_LOG_MLQUEUE(3, printk("command() : routine at %p\n", host->hostt->command));
742 spin_lock_irqsave(&io_request_lock, flags);
743 temp = host->hostt->command(SCpnt);
744 SCpnt->result = temp;
745 #ifdef DEBUG_DELAY
746 spin_unlock_irqrestore(&io_request_lock, flags);
747 clock = jiffies + 4 * HZ;
748 while (time_before(jiffies, clock)) {
749 barrier();
750 cpu_relax();
751 }
752 printk("done(host = %d, result = %04x) : routine at %p\n",
753 host->host_no, temp, host->hostt->command);
754 spin_lock_irqsave(&io_request_lock, flags);
755 #endif
756 if (host->hostt->use_new_eh_code) {
757 scsi_done(SCpnt);
758 } else {
759 scsi_old_done(SCpnt);
760 }
761 spin_unlock_irqrestore(&io_request_lock, flags);
762 }
763 SCSI_LOG_MLQUEUE(3, printk("leaving scsi_dispatch_cmnd()\n"));
764 return rtn;
765 }
766
767 devfs_handle_t scsi_devfs_handle;
768
769 /*
770 * scsi_do_cmd sends all the commands out to the low-level driver. It
771 * handles the specifics required for each low level driver - ie queued
772 * or non queued. It also prevents conflicts when different high level
773 * drivers go for the same host at the same time.
774 */
775
scsi_wait_req(Scsi_Request * SRpnt,const void * cmnd,void * buffer,unsigned bufflen,int timeout,int retries)776 void scsi_wait_req (Scsi_Request * SRpnt, const void *cmnd ,
777 void *buffer, unsigned bufflen,
778 int timeout, int retries)
779 {
780 DECLARE_COMPLETION(wait);
781 request_queue_t *q = &SRpnt->sr_device->request_queue;
782
783 SRpnt->sr_request.waiting = &wait;
784 SRpnt->sr_request.rq_status = RQ_SCSI_BUSY;
785 scsi_do_req (SRpnt, (void *) cmnd,
786 buffer, bufflen, scsi_wait_done, timeout, retries);
787 generic_unplug_device(q);
788 wait_for_completion(&wait);
789 SRpnt->sr_request.waiting = NULL;
790 if( SRpnt->sr_command != NULL )
791 {
792 scsi_release_command(SRpnt->sr_command);
793 SRpnt->sr_command = NULL;
794 }
795
796 }
797
798 /*
799 * Function: scsi_do_req
800 *
801 * Purpose: Queue a SCSI request
802 *
803 * Arguments: SRpnt - command descriptor.
804 * cmnd - actual SCSI command to be performed.
805 * buffer - data buffer.
806 * bufflen - size of data buffer.
807 * done - completion function to be run.
808 * timeout - how long to let it run before timeout.
809 * retries - number of retries we allow.
810 *
811 * Lock status: With the new queueing code, this is SMP-safe, and no locks
812 * need be held upon entry. The old queueing code the lock was
813 * assumed to be held upon entry.
814 *
815 * Returns: Nothing.
816 *
817 * Notes: Prior to the new queue code, this function was not SMP-safe.
818 * Also, this function is now only used for queueing requests
819 * for things like ioctls and character device requests - this
820 * is because we essentially just inject a request into the
821 * queue for the device. Normal block device handling manipulates
822 * the queue directly.
823 */
scsi_do_req(Scsi_Request * SRpnt,const void * cmnd,void * buffer,unsigned bufflen,void (* done)(Scsi_Cmnd *),int timeout,int retries)824 void scsi_do_req(Scsi_Request * SRpnt, const void *cmnd,
825 void *buffer, unsigned bufflen, void (*done) (Scsi_Cmnd *),
826 int timeout, int retries)
827 {
828 Scsi_Device * SDpnt = SRpnt->sr_device;
829 struct Scsi_Host *host = SDpnt->host;
830
831 ASSERT_LOCK(&io_request_lock, 0);
832
833 SCSI_LOG_MLQUEUE(4,
834 {
835 int i;
836 int target = SDpnt->id;
837 int size = COMMAND_SIZE(((const unsigned char *)cmnd)[0]);
838 printk("scsi_do_req (host = %d, channel = %d target = %d, "
839 "buffer =%p, bufflen = %d, done = %p, timeout = %d, "
840 "retries = %d)\n"
841 "command : ", host->host_no, SDpnt->channel, target, buffer,
842 bufflen, done, timeout, retries);
843 for (i = 0; i < size; ++i)
844 printk("%02x ", ((unsigned char *) cmnd)[i]);
845 printk("\n");
846 });
847
848 if (!host) {
849 panic("Invalid or not present host.\n");
850 }
851
852 /*
853 * If the upper level driver is reusing these things, then
854 * we should release the low-level block now. Another one will
855 * be allocated later when this request is getting queued.
856 */
857 if( SRpnt->sr_command != NULL )
858 {
859 scsi_release_command(SRpnt->sr_command);
860 SRpnt->sr_command = NULL;
861 }
862
863 /*
864 * We must prevent reentrancy to the lowlevel host driver. This prevents
865 * it - we enter a loop until the host we want to talk to is not busy.
866 * Race conditions are prevented, as interrupts are disabled in between the
867 * time we check for the host being not busy, and the time we mark it busy
868 * ourselves.
869 */
870
871
872 /*
873 * Our own function scsi_done (which marks the host as not busy, disables
874 * the timeout counter, etc) will be called by us or by the
875 * scsi_hosts[host].queuecommand() function needs to also call
876 * the completion function for the high level driver.
877 */
878
879 memcpy((void *) SRpnt->sr_cmnd, (const void *) cmnd,
880 sizeof(SRpnt->sr_cmnd));
881 SRpnt->sr_bufflen = bufflen;
882 SRpnt->sr_buffer = buffer;
883 SRpnt->sr_allowed = retries;
884 SRpnt->sr_done = done;
885 SRpnt->sr_timeout_per_command = timeout;
886
887 if (SRpnt->sr_cmd_len == 0)
888 SRpnt->sr_cmd_len = COMMAND_SIZE(SRpnt->sr_cmnd[0]);
889
890 /*
891 * At this point, we merely set up the command, stick it in the normal
892 * request queue, and return. Eventually that request will come to the
893 * top of the list, and will be dispatched.
894 */
895 scsi_insert_special_req(SRpnt, 0);
896
897 SCSI_LOG_MLQUEUE(3, printk("Leaving scsi_do_req()\n"));
898 }
899
900 /*
901 * Function: scsi_init_cmd_from_req
902 *
903 * Purpose: Queue a SCSI command
904 * Purpose: Initialize a Scsi_Cmnd from a Scsi_Request
905 *
906 * Arguments: SCpnt - command descriptor.
907 * SRpnt - Request from the queue.
908 *
909 * Lock status: None needed.
910 *
911 * Returns: Nothing.
912 *
913 * Notes: Mainly transfer data from the request structure to the
914 * command structure. The request structure is allocated
915 * using the normal memory allocator, and requests can pile
916 * up to more or less any depth. The command structure represents
917 * a consumable resource, as these are allocated into a pool
918 * when the SCSI subsystem initializes. The preallocation is
919 * required so that in low-memory situations a disk I/O request
920 * won't cause the memory manager to try and write out a page.
921 * The request structure is generally used by ioctls and character
922 * devices.
923 */
scsi_init_cmd_from_req(Scsi_Cmnd * SCpnt,Scsi_Request * SRpnt)924 void scsi_init_cmd_from_req(Scsi_Cmnd * SCpnt, Scsi_Request * SRpnt)
925 {
926 struct Scsi_Host *host = SCpnt->host;
927
928 ASSERT_LOCK(&io_request_lock, 0);
929
930 SCpnt->owner = SCSI_OWNER_MIDLEVEL;
931 SRpnt->sr_command = SCpnt;
932
933 if (!host) {
934 panic("Invalid or not present host.\n");
935 }
936
937 SCpnt->cmd_len = SRpnt->sr_cmd_len;
938 SCpnt->use_sg = SRpnt->sr_use_sg;
939
940 memcpy((void *) &SCpnt->request, (const void *) &SRpnt->sr_request,
941 sizeof(SRpnt->sr_request));
942 memcpy((void *) SCpnt->data_cmnd, (const void *) SRpnt->sr_cmnd,
943 sizeof(SCpnt->data_cmnd));
944 SCpnt->reset_chain = NULL;
945 SCpnt->serial_number = 0;
946 SCpnt->serial_number_at_timeout = 0;
947 SCpnt->bufflen = SRpnt->sr_bufflen;
948 SCpnt->buffer = SRpnt->sr_buffer;
949 SCpnt->flags = 0;
950 SCpnt->retries = 0;
951 SCpnt->allowed = SRpnt->sr_allowed;
952 SCpnt->done = SRpnt->sr_done;
953 SCpnt->timeout_per_command = SRpnt->sr_timeout_per_command;
954
955 SCpnt->sc_data_direction = SRpnt->sr_data_direction;
956
957 SCpnt->sglist_len = SRpnt->sr_sglist_len;
958 SCpnt->underflow = SRpnt->sr_underflow;
959
960 SCpnt->sc_request = SRpnt;
961
962 memcpy((void *) SCpnt->cmnd, (const void *) SRpnt->sr_cmnd,
963 sizeof(SCpnt->cmnd));
964 /* Zero the sense buffer. Some host adapters automatically request
965 * sense on error. 0 is not a valid sense code.
966 */
967 memset((void *) SCpnt->sense_buffer, 0, sizeof SCpnt->sense_buffer);
968 SCpnt->request_buffer = SRpnt->sr_buffer;
969 SCpnt->request_bufflen = SRpnt->sr_bufflen;
970 SCpnt->old_use_sg = SCpnt->use_sg;
971 if (SCpnt->cmd_len == 0)
972 SCpnt->cmd_len = COMMAND_SIZE(SCpnt->cmnd[0]);
973 SCpnt->old_cmd_len = SCpnt->cmd_len;
974 SCpnt->sc_old_data_direction = SCpnt->sc_data_direction;
975 SCpnt->old_underflow = SCpnt->underflow;
976
977 /* Start the timer ticking. */
978
979 SCpnt->internal_timeout = NORMAL_TIMEOUT;
980 SCpnt->abort_reason = 0;
981 SCpnt->result = 0;
982
983 SCSI_LOG_MLQUEUE(3, printk("Leaving scsi_init_cmd_from_req()\n"));
984 }
985
986 /*
987 * Function: scsi_do_cmd
988 *
989 * Purpose: Queue a SCSI command
990 *
991 * Arguments: SCpnt - command descriptor.
992 * cmnd - actual SCSI command to be performed.
993 * buffer - data buffer.
994 * bufflen - size of data buffer.
995 * done - completion function to be run.
996 * timeout - how long to let it run before timeout.
997 * retries - number of retries we allow.
998 *
999 * Lock status: With the new queueing code, this is SMP-safe, and no locks
1000 * need be held upon entry. The old queueing code the lock was
1001 * assumed to be held upon entry.
1002 *
1003 * Returns: Nothing.
1004 *
1005 * Notes: Prior to the new queue code, this function was not SMP-safe.
1006 * Also, this function is now only used for queueing requests
1007 * for things like ioctls and character device requests - this
1008 * is because we essentially just inject a request into the
1009 * queue for the device. Normal block device handling manipulates
1010 * the queue directly.
1011 */
scsi_do_cmd(Scsi_Cmnd * SCpnt,const void * cmnd,void * buffer,unsigned bufflen,void (* done)(Scsi_Cmnd *),int timeout,int retries)1012 void scsi_do_cmd(Scsi_Cmnd * SCpnt, const void *cmnd,
1013 void *buffer, unsigned bufflen, void (*done) (Scsi_Cmnd *),
1014 int timeout, int retries)
1015 {
1016 struct Scsi_Host *host = SCpnt->host;
1017
1018 ASSERT_LOCK(&io_request_lock, 0);
1019
1020 SCpnt->pid = scsi_pid++;
1021 SCpnt->owner = SCSI_OWNER_MIDLEVEL;
1022
1023 SCSI_LOG_MLQUEUE(4,
1024 {
1025 int i;
1026 int target = SCpnt->target;
1027 int size = COMMAND_SIZE(((const unsigned char *)cmnd)[0]);
1028 printk("scsi_do_cmd (host = %d, channel = %d target = %d, "
1029 "buffer =%p, bufflen = %d, done = %p, timeout = %d, "
1030 "retries = %d)\n"
1031 "command : ", host->host_no, SCpnt->channel, target, buffer,
1032 bufflen, done, timeout, retries);
1033 for (i = 0; i < size; ++i)
1034 printk("%02x ", ((unsigned char *) cmnd)[i]);
1035 printk("\n");
1036 });
1037
1038 if (!host) {
1039 panic("Invalid or not present host.\n");
1040 }
1041 /*
1042 * We must prevent reentrancy to the lowlevel host driver. This prevents
1043 * it - we enter a loop until the host we want to talk to is not busy.
1044 * Race conditions are prevented, as interrupts are disabled in between the
1045 * time we check for the host being not busy, and the time we mark it busy
1046 * ourselves.
1047 */
1048
1049
1050 /*
1051 * Our own function scsi_done (which marks the host as not busy, disables
1052 * the timeout counter, etc) will be called by us or by the
1053 * scsi_hosts[host].queuecommand() function needs to also call
1054 * the completion function for the high level driver.
1055 */
1056
1057 memcpy((void *) SCpnt->data_cmnd, (const void *) cmnd,
1058 sizeof(SCpnt->data_cmnd));
1059 SCpnt->reset_chain = NULL;
1060 SCpnt->serial_number = 0;
1061 SCpnt->serial_number_at_timeout = 0;
1062 SCpnt->bufflen = bufflen;
1063 SCpnt->buffer = buffer;
1064 SCpnt->flags = 0;
1065 SCpnt->retries = 0;
1066 SCpnt->allowed = retries;
1067 SCpnt->done = done;
1068 SCpnt->timeout_per_command = timeout;
1069
1070 memcpy((void *) SCpnt->cmnd, (const void *) cmnd,
1071 sizeof(SCpnt->cmnd));
1072 /* Zero the sense buffer. Some host adapters automatically request
1073 * sense on error. 0 is not a valid sense code.
1074 */
1075 memset((void *) SCpnt->sense_buffer, 0, sizeof SCpnt->sense_buffer);
1076 SCpnt->request_buffer = buffer;
1077 SCpnt->request_bufflen = bufflen;
1078 SCpnt->old_use_sg = SCpnt->use_sg;
1079 if (SCpnt->cmd_len == 0)
1080 SCpnt->cmd_len = COMMAND_SIZE(SCpnt->cmnd[0]);
1081 SCpnt->old_cmd_len = SCpnt->cmd_len;
1082 SCpnt->sc_old_data_direction = SCpnt->sc_data_direction;
1083 SCpnt->old_underflow = SCpnt->underflow;
1084
1085 /* Start the timer ticking. */
1086
1087 SCpnt->internal_timeout = NORMAL_TIMEOUT;
1088 SCpnt->abort_reason = 0;
1089 SCpnt->result = 0;
1090
1091 /*
1092 * At this point, we merely set up the command, stick it in the normal
1093 * request queue, and return. Eventually that request will come to the
1094 * top of the list, and will be dispatched.
1095 */
1096 scsi_insert_special_cmd(SCpnt, 0);
1097
1098 SCSI_LOG_MLQUEUE(3, printk("Leaving scsi_do_cmd()\n"));
1099 }
1100
1101 /*
1102 * This function is the mid-level interrupt routine, which decides how
1103 * to handle error conditions. Each invocation of this function must
1104 * do one and *only* one of the following:
1105 *
1106 * 1) Insert command in BH queue.
1107 * 2) Activate error handler for host.
1108 *
1109 * FIXME(eric) - I am concerned about stack overflow (still). An
1110 * interrupt could come while we are processing the bottom queue,
1111 * which would cause another command to be stuffed onto the bottom
1112 * queue, and it would in turn be processed as that interrupt handler
1113 * is returning. Given a sufficiently steady rate of returning
1114 * commands, this could cause the stack to overflow. I am not sure
1115 * what is the most appropriate solution here - we should probably
1116 * keep a depth count, and not process any commands while we still
1117 * have a bottom handler active higher in the stack.
1118 *
1119 * There is currently code in the bottom half handler to monitor
1120 * recursion in the bottom handler and report if it ever happens. If
1121 * this becomes a problem, it won't be hard to engineer something to
1122 * deal with it so that only the outer layer ever does any real
1123 * processing.
1124 */
scsi_done(Scsi_Cmnd * SCpnt)1125 void scsi_done(Scsi_Cmnd * SCpnt)
1126 {
1127 unsigned long flags;
1128 int tstatus;
1129
1130 /*
1131 * We don't have to worry about this one timing out any more.
1132 */
1133 tstatus = scsi_delete_timer(SCpnt);
1134
1135 /*
1136 * If we are unable to remove the timer, it means that the command
1137 * has already timed out. In this case, we have no choice but to
1138 * let the timeout function run, as we have no idea where in fact
1139 * that function could really be. It might be on another processor,
1140 * etc, etc.
1141 */
1142 if (!tstatus) {
1143 SCpnt->done_late = 1;
1144 return;
1145 }
1146 /* Set the serial numbers back to zero */
1147 SCpnt->serial_number = 0;
1148
1149 /*
1150 * First, see whether this command already timed out. If so, we ignore
1151 * the response. We treat it as if the command never finished.
1152 *
1153 * Since serial_number is now 0, the error handler cound detect this
1154 * situation and avoid to call the low level driver abort routine.
1155 * (DB)
1156 *
1157 * FIXME(eric) - I believe that this test is now redundant, due to
1158 * the test of the return status of del_timer().
1159 */
1160 if (SCpnt->state == SCSI_STATE_TIMEOUT) {
1161 SCSI_LOG_MLCOMPLETE(1, printk("Ignoring completion of %p due to timeout status", SCpnt));
1162 return;
1163 }
1164 spin_lock_irqsave(&scsi_bhqueue_lock, flags);
1165
1166 SCpnt->serial_number_at_timeout = 0;
1167 SCpnt->state = SCSI_STATE_BHQUEUE;
1168 SCpnt->owner = SCSI_OWNER_BH_HANDLER;
1169 SCpnt->bh_next = NULL;
1170
1171 /*
1172 * Next, put this command in the BH queue.
1173 *
1174 * We need a spinlock here, or compare and exchange if we can reorder incoming
1175 * Scsi_Cmnds, as it happens pretty often scsi_done is called multiple times
1176 * before bh is serviced. -jj
1177 *
1178 * We already have the io_request_lock here, since we are called from the
1179 * interrupt handler or the error handler. (DB)
1180 *
1181 * This may be true at the moment, but I would like to wean all of the low
1182 * level drivers away from using io_request_lock. Technically they should
1183 * all use their own locking. I am adding a small spinlock to protect
1184 * this datastructure to make it safe for that day. (ERY)
1185 */
1186 if (!scsi_bh_queue_head) {
1187 scsi_bh_queue_head = SCpnt;
1188 scsi_bh_queue_tail = SCpnt;
1189 } else {
1190 scsi_bh_queue_tail->bh_next = SCpnt;
1191 scsi_bh_queue_tail = SCpnt;
1192 }
1193
1194 spin_unlock_irqrestore(&scsi_bhqueue_lock, flags);
1195 /*
1196 * Mark the bottom half handler to be run.
1197 */
1198 mark_bh(SCSI_BH);
1199 }
1200
1201 /*
1202 * Procedure: scsi_bottom_half_handler
1203 *
1204 * Purpose: Called after we have finished processing interrupts, it
1205 * performs post-interrupt handling for commands that may
1206 * have completed.
1207 *
1208 * Notes: This is called with all interrupts enabled. This should reduce
1209 * interrupt latency, stack depth, and reentrancy of the low-level
1210 * drivers.
1211 *
1212 * The io_request_lock is required in all the routine. There was a subtle
1213 * race condition when scsi_done is called after a command has already
1214 * timed out but before the time out is processed by the error handler.
1215 * (DB)
1216 *
1217 * I believe I have corrected this. We simply monitor the return status of
1218 * del_timer() - if this comes back as 0, it means that the timer has fired
1219 * and that a timeout is in progress. I have modified scsi_done() such
1220 * that in this instance the command is never inserted in the bottom
1221 * half queue. Thus the only time we hold the lock here is when
1222 * we wish to atomically remove the contents of the queue.
1223 */
scsi_bottom_half_handler(void)1224 void scsi_bottom_half_handler(void)
1225 {
1226 Scsi_Cmnd *SCpnt;
1227 Scsi_Cmnd *SCnext;
1228 unsigned long flags;
1229
1230
1231 while (1 == 1) {
1232 spin_lock_irqsave(&scsi_bhqueue_lock, flags);
1233 SCpnt = scsi_bh_queue_head;
1234 scsi_bh_queue_head = NULL;
1235 spin_unlock_irqrestore(&scsi_bhqueue_lock, flags);
1236
1237 if (SCpnt == NULL) {
1238 return;
1239 }
1240 SCnext = SCpnt->bh_next;
1241
1242 for (; SCpnt; SCpnt = SCnext) {
1243 SCnext = SCpnt->bh_next;
1244
1245 switch (scsi_decide_disposition(SCpnt)) {
1246 case SUCCESS:
1247 /*
1248 * Add to BH queue.
1249 */
1250 SCSI_LOG_MLCOMPLETE(3, printk("Command finished %d %d 0x%x\n", SCpnt->host->host_busy,
1251 SCpnt->host->host_failed,
1252 SCpnt->result));
1253
1254 scsi_finish_command(SCpnt);
1255 break;
1256 case NEEDS_RETRY:
1257 /*
1258 * We only come in here if we want to retry a command. The
1259 * test to see whether the command should be retried should be
1260 * keeping track of the number of tries, so we don't end up looping,
1261 * of course.
1262 */
1263 SCSI_LOG_MLCOMPLETE(3, printk("Command needs retry %d %d 0x%x\n", SCpnt->host->host_busy,
1264 SCpnt->host->host_failed, SCpnt->result));
1265
1266 scsi_retry_command(SCpnt);
1267 break;
1268 case ADD_TO_MLQUEUE:
1269 /*
1270 * This typically happens for a QUEUE_FULL message -
1271 * typically only when the queue depth is only
1272 * approximate for a given device. Adding a command
1273 * to the queue for the device will prevent further commands
1274 * from being sent to the device, so we shouldn't end up
1275 * with tons of things being sent down that shouldn't be.
1276 */
1277 SCSI_LOG_MLCOMPLETE(3, printk("Command rejected as device queue full, put on ml queue %p\n",
1278 SCpnt));
1279 scsi_mlqueue_insert(SCpnt, SCSI_MLQUEUE_DEVICE_BUSY);
1280 break;
1281 default:
1282 /*
1283 * Here we have a fatal error of some sort. Turn it over to
1284 * the error handler.
1285 */
1286 SCSI_LOG_MLCOMPLETE(3, printk("Command failed %p %x active=%d busy=%d failed=%d\n",
1287 SCpnt, SCpnt->result,
1288 atomic_read(&SCpnt->host->host_active),
1289 SCpnt->host->host_busy,
1290 SCpnt->host->host_failed));
1291
1292 /*
1293 * Dump the sense information too.
1294 */
1295 if ((status_byte(SCpnt->result) & CHECK_CONDITION) != 0) {
1296 SCSI_LOG_MLCOMPLETE(3, print_sense("bh", SCpnt));
1297 }
1298 if (SCpnt->host->eh_wait != NULL) {
1299 SCpnt->host->host_failed++;
1300 SCpnt->owner = SCSI_OWNER_ERROR_HANDLER;
1301 SCpnt->state = SCSI_STATE_FAILED;
1302 SCpnt->host->in_recovery = 1;
1303 /*
1304 * If the host is having troubles, then look to see if this was the last
1305 * command that might have failed. If so, wake up the error handler.
1306 */
1307 if (SCpnt->host->host_busy == SCpnt->host->host_failed) {
1308 SCSI_LOG_ERROR_RECOVERY(5, printk("Waking error handler thread (%d)\n",
1309 atomic_read(&SCpnt->host->eh_wait->count)));
1310 up(SCpnt->host->eh_wait);
1311 }
1312 } else {
1313 /*
1314 * We only get here if the error recovery thread has died.
1315 */
1316 scsi_finish_command(SCpnt);
1317 }
1318 }
1319 } /* for(; SCpnt...) */
1320
1321 } /* while(1==1) */
1322
1323 }
1324
1325 /*
1326 * Function: scsi_retry_command
1327 *
1328 * Purpose: Send a command back to the low level to be retried.
1329 *
1330 * Notes: This command is always executed in the context of the
1331 * bottom half handler, or the error handler thread. Low
1332 * level drivers should not become re-entrant as a result of
1333 * this.
1334 */
scsi_retry_command(Scsi_Cmnd * SCpnt)1335 int scsi_retry_command(Scsi_Cmnd * SCpnt)
1336 {
1337 memcpy((void *) SCpnt->cmnd, (void *) SCpnt->data_cmnd,
1338 sizeof(SCpnt->data_cmnd));
1339 SCpnt->request_buffer = SCpnt->buffer;
1340 SCpnt->request_bufflen = SCpnt->bufflen;
1341 SCpnt->use_sg = SCpnt->old_use_sg;
1342 SCpnt->cmd_len = SCpnt->old_cmd_len;
1343 SCpnt->sc_data_direction = SCpnt->sc_old_data_direction;
1344 SCpnt->underflow = SCpnt->old_underflow;
1345
1346 /*
1347 * Zero the sense information from the last time we tried
1348 * this command.
1349 */
1350 memset((void *) SCpnt->sense_buffer, 0, sizeof SCpnt->sense_buffer);
1351
1352 return scsi_dispatch_cmd(SCpnt);
1353 }
1354
1355 /*
1356 * Function: scsi_finish_command
1357 *
1358 * Purpose: Pass command off to upper layer for finishing of I/O
1359 * request, waking processes that are waiting on results,
1360 * etc.
1361 */
scsi_finish_command(Scsi_Cmnd * SCpnt)1362 void scsi_finish_command(Scsi_Cmnd * SCpnt)
1363 {
1364 struct Scsi_Host *host;
1365 Scsi_Device *device;
1366 Scsi_Request * SRpnt;
1367 unsigned long flags;
1368
1369 ASSERT_LOCK(&io_request_lock, 0);
1370
1371 host = SCpnt->host;
1372 device = SCpnt->device;
1373
1374 /*
1375 * We need to protect the decrement, as otherwise a race condition
1376 * would exist. Fiddling with SCpnt isn't a problem as the
1377 * design only allows a single SCpnt to be active in only
1378 * one execution context, but the device and host structures are
1379 * shared.
1380 */
1381 spin_lock_irqsave(&io_request_lock, flags);
1382 host->host_busy--; /* Indicate that we are free */
1383 device->device_busy--; /* Decrement device usage counter. */
1384 spin_unlock_irqrestore(&io_request_lock, flags);
1385
1386 /*
1387 * Clear the flags which say that the device/host is no longer
1388 * capable of accepting new commands. These are set in scsi_queue.c
1389 * for both the queue full condition on a device, and for a
1390 * host full condition on the host.
1391 */
1392 host->host_blocked = FALSE;
1393 device->device_blocked = FALSE;
1394
1395 /*
1396 * If we have valid sense information, then some kind of recovery
1397 * must have taken place. Make a note of this.
1398 */
1399 if (scsi_sense_valid(SCpnt)) {
1400 SCpnt->result |= (DRIVER_SENSE << 24);
1401 }
1402 SCSI_LOG_MLCOMPLETE(3, printk("Notifying upper driver of completion for device %d %x\n",
1403 SCpnt->device->id, SCpnt->result));
1404
1405 SCpnt->owner = SCSI_OWNER_HIGHLEVEL;
1406 SCpnt->state = SCSI_STATE_FINISHED;
1407
1408 /* We can get here with use_sg=0, causing a panic in the upper level (DB) */
1409 SCpnt->use_sg = SCpnt->old_use_sg;
1410
1411 /*
1412 * If there is an associated request structure, copy the data over before we call the
1413 * completion function.
1414 */
1415 SRpnt = SCpnt->sc_request;
1416 if( SRpnt != NULL ) {
1417 SRpnt->sr_result = SRpnt->sr_command->result;
1418 if( SRpnt->sr_result != 0 ) {
1419 memcpy(SRpnt->sr_sense_buffer,
1420 SRpnt->sr_command->sense_buffer,
1421 sizeof(SRpnt->sr_sense_buffer));
1422 }
1423 }
1424
1425 SCpnt->done(SCpnt);
1426 }
1427
1428 static int scsi_register_host(Scsi_Host_Template *);
1429 static int scsi_unregister_host(Scsi_Host_Template *);
1430
1431 /*
1432 * Function: scsi_release_commandblocks()
1433 *
1434 * Purpose: Release command blocks associated with a device.
1435 *
1436 * Arguments: SDpnt - device
1437 *
1438 * Returns: Nothing
1439 *
1440 * Lock status: No locking assumed or required.
1441 *
1442 * Notes:
1443 */
scsi_release_commandblocks(Scsi_Device * SDpnt)1444 void scsi_release_commandblocks(Scsi_Device * SDpnt)
1445 {
1446 Scsi_Cmnd *SCpnt, *SCnext;
1447 unsigned long flags;
1448
1449 spin_lock_irqsave(&device_request_lock, flags);
1450 for (SCpnt = SDpnt->device_queue; SCpnt; SCpnt = SCnext) {
1451 SDpnt->device_queue = SCnext = SCpnt->next;
1452 kfree((char *) SCpnt);
1453 }
1454 SDpnt->has_cmdblocks = 0;
1455 SDpnt->queue_depth = 0;
1456 spin_unlock_irqrestore(&device_request_lock, flags);
1457 }
1458
1459 /*
1460 * Function: scsi_build_commandblocks()
1461 *
1462 * Purpose: Allocate command blocks associated with a device.
1463 *
1464 * Arguments: SDpnt - device
1465 *
1466 * Returns: Nothing
1467 *
1468 * Lock status: No locking assumed or required.
1469 *
1470 * Notes:
1471 */
scsi_build_commandblocks(Scsi_Device * SDpnt)1472 void scsi_build_commandblocks(Scsi_Device * SDpnt)
1473 {
1474 unsigned long flags;
1475 struct Scsi_Host *host = SDpnt->host;
1476 int j;
1477 Scsi_Cmnd *SCpnt;
1478
1479 spin_lock_irqsave(&device_request_lock, flags);
1480
1481 if (SDpnt->queue_depth == 0)
1482 {
1483 SDpnt->queue_depth = host->cmd_per_lun;
1484 if (SDpnt->queue_depth == 0)
1485 SDpnt->queue_depth = 1; /* live to fight another day */
1486 }
1487 SDpnt->device_queue = NULL;
1488
1489 for (j = 0; j < SDpnt->queue_depth; j++) {
1490 SCpnt = (Scsi_Cmnd *)
1491 kmalloc(sizeof(Scsi_Cmnd),
1492 GFP_ATOMIC |
1493 (host->unchecked_isa_dma ? GFP_DMA : 0));
1494 if (NULL == SCpnt)
1495 break; /* If not, the next line will oops ... */
1496 memset(SCpnt, 0, sizeof(Scsi_Cmnd));
1497 SCpnt->host = host;
1498 SCpnt->device = SDpnt;
1499 SCpnt->target = SDpnt->id;
1500 SCpnt->lun = SDpnt->lun;
1501 SCpnt->channel = SDpnt->channel;
1502 SCpnt->request.rq_status = RQ_INACTIVE;
1503 SCpnt->use_sg = 0;
1504 SCpnt->old_use_sg = 0;
1505 SCpnt->old_cmd_len = 0;
1506 SCpnt->underflow = 0;
1507 SCpnt->old_underflow = 0;
1508 SCpnt->transfersize = 0;
1509 SCpnt->resid = 0;
1510 SCpnt->serial_number = 0;
1511 SCpnt->serial_number_at_timeout = 0;
1512 SCpnt->host_scribble = NULL;
1513 SCpnt->next = SDpnt->device_queue;
1514 SDpnt->device_queue = SCpnt;
1515 SCpnt->state = SCSI_STATE_UNUSED;
1516 SCpnt->owner = SCSI_OWNER_NOBODY;
1517 }
1518 if (j < SDpnt->queue_depth) { /* low on space (D.Gilbert 990424) */
1519 printk(KERN_WARNING "scsi_build_commandblocks: want=%d, space for=%d blocks\n",
1520 SDpnt->queue_depth, j);
1521 SDpnt->queue_depth = j;
1522 SDpnt->has_cmdblocks = (0 != j);
1523 } else {
1524 SDpnt->has_cmdblocks = 1;
1525 }
1526 spin_unlock_irqrestore(&device_request_lock, flags);
1527 }
1528
scsi_host_no_insert(char * str,int n)1529 void __init scsi_host_no_insert(char *str, int n)
1530 {
1531 Scsi_Host_Name *shn, *shn2;
1532 int len;
1533
1534 len = strlen(str);
1535 if (len && (shn = (Scsi_Host_Name *) kmalloc(sizeof(Scsi_Host_Name), GFP_ATOMIC))) {
1536 if ((shn->name = kmalloc(len+1, GFP_ATOMIC))) {
1537 strncpy(shn->name, str, len);
1538 shn->name[len] = 0;
1539 shn->host_no = n;
1540 shn->host_registered = 0;
1541 shn->loaded_as_module = 1; /* numbers shouldn't be freed in any case */
1542 shn->next = NULL;
1543 if (scsi_host_no_list) {
1544 for (shn2 = scsi_host_no_list;shn2->next;shn2 = shn2->next)
1545 ;
1546 shn2->next = shn;
1547 }
1548 else
1549 scsi_host_no_list = shn;
1550 max_scsi_hosts = n+1;
1551 }
1552 else
1553 kfree((char *) shn);
1554 }
1555 }
1556
1557
1558 static DECLARE_MUTEX(scsi_host_internals_lock);
1559 /*
1560 * Function: scsi_add_single_device()
1561 *
1562 * Purpose: Support for hotplugging SCSI devices. This function
1563 * implements the actual functionality for
1564 * echo "scsi add-single-device 0 1 2 3" >/proc/scsi/scsi
1565 *
1566 * Arguments: shpnt - pointer to the SCSI host structure
1567 * channel - channel of the device to add
1568 * id - id of the device to add
1569 * lun - lun of the device to add
1570 *
1571 * Returns: 0 on success or an error code
1572 *
1573 * Lock status: None needed.
1574 *
1575 * Notes: This feature is probably unsafe for standard SCSI devices,
1576 * but is perfectly normal for things like ieee1394 or USB
1577 * drives since these busses are designed for hotplugging.
1578 * Use at your own risk....
1579 */
scsi_add_single_device(struct Scsi_Host * shpnt,int channel,int id,int lun)1580 int scsi_add_single_device(struct Scsi_Host *shpnt, int channel,
1581 int id, int lun)
1582 {
1583 Scsi_Device *scd;
1584
1585 /* Do a bit of sanity checking */
1586 if (shpnt==NULL) {
1587 return -ENXIO;
1588 }
1589
1590 /* We call functions that can sleep, so use a semaphore to
1591 * avoid racing with scsi_remove_single_device(). We probably
1592 * need to also apply this lock to scsi_register*(),
1593 * scsi_unregister*(), sd_open(), sd_release() and anything
1594 * else that might be messing with with the Scsi_Host or other
1595 * fundamental data structures. */
1596 down(&scsi_host_internals_lock);
1597
1598 /* Check if they asked us to add an already existing device.
1599 * If so, ignore their misguided efforts. */
1600 for (scd = shpnt->host_queue; scd; scd = scd->next) {
1601 if ((scd->channel == channel && scd->id == id && scd->lun == lun)) {
1602 break;
1603 }
1604 }
1605 if (scd) {
1606 up(&scsi_host_internals_lock);
1607 return -ENOSYS;
1608 }
1609
1610 scan_scsis(shpnt, 1, channel, id, lun);
1611 up(&scsi_host_internals_lock);
1612 return 0;
1613 }
1614
1615 /*
1616 * Function: scsi_remove_single_device()
1617 *
1618 * Purpose: Support for hot-unplugging SCSI devices. This function
1619 * implements the actual functionality for
1620 * echo "scsi remove-single-device 0 1 2 3" >/proc/scsi/scsi
1621 *
1622 * Arguments: shpnt - pointer to the SCSI host structure
1623 * channel - channel of the device to add
1624 * id - id of the device to add
1625 * lun - lun of the device to add
1626 *
1627 * Returns: 0 on success or an error code
1628 *
1629 * Lock status: None needed.
1630 *
1631 * Notes: This feature is probably unsafe for standard SCSI devices,
1632 * but is perfectly normal for things like ieee1394 or USB
1633 * drives since these busses are designed for hotplugging.
1634 * Use at your own risk....
1635 */
scsi_remove_single_device(struct Scsi_Host * shpnt,int channel,int id,int lun)1636 int scsi_remove_single_device(struct Scsi_Host *shpnt, int channel,
1637 int id, int lun)
1638 {
1639 Scsi_Device *scd;
1640 struct Scsi_Device_Template *SDTpnt;
1641
1642 /* Do a bit of sanity checking */
1643 if (shpnt==NULL) {
1644 return -ENODEV;
1645 }
1646
1647 /* We call functions that can sleep, so use a semaphore to
1648 * avoid racing with scsi_add_single_device(). We probably
1649 * need to also apply this lock to scsi_register*(),
1650 * scsi_unregister*(), sd_open(), sd_release() and anything
1651 * else that might be messing with with the Scsi_Host or other
1652 * fundamental data structures. */
1653 down(&scsi_host_internals_lock);
1654
1655 /* Make sure the specified device is in fact present */
1656 for (scd = shpnt->host_queue; scd; scd = scd->next) {
1657 if ((scd->channel == channel && scd->id == id && scd->lun == lun)) {
1658 break;
1659 }
1660 }
1661 if (scd==NULL) {
1662 up(&scsi_host_internals_lock);
1663 return -ENODEV;
1664 }
1665
1666 /* See if the specified device is busy. Doesn't this race with
1667 * sd_open(), sd_release() and similar? Why don't they lock
1668 * things when they increment/decrement the access_count? */
1669 if (scd->access_count) {
1670 up(&scsi_host_internals_lock);
1671 return -EBUSY;
1672 }
1673
1674 SDTpnt = scsi_devicelist;
1675 while (SDTpnt != NULL) {
1676 if (SDTpnt->detach)
1677 (*SDTpnt->detach) (scd);
1678 SDTpnt = SDTpnt->next;
1679 }
1680
1681 if (scd->attached == 0) {
1682 /* Nobody is using this device, so we
1683 * can now free all command structures. */
1684 if (shpnt->hostt->revoke)
1685 shpnt->hostt->revoke(scd);
1686 devfs_unregister (scd->de);
1687 scsi_release_commandblocks(scd);
1688
1689 /* Now we can remove the device structure */
1690 if (scd->next != NULL)
1691 scd->next->prev = scd->prev;
1692
1693 if (scd->prev != NULL)
1694 scd->prev->next = scd->next;
1695
1696 if (shpnt->host_queue == scd) {
1697 shpnt->host_queue = scd->next;
1698 }
1699 blk_cleanup_queue(&scd->request_queue);
1700 kfree((char *) scd);
1701 }
1702
1703 up(&scsi_host_internals_lock);
1704 return 0;
1705 }
1706
1707 #ifdef CONFIG_PROC_FS
scsi_proc_info(char * buffer,char ** start,off_t offset,int length)1708 static int scsi_proc_info(char *buffer, char **start, off_t offset, int length)
1709 {
1710 Scsi_Device *scd;
1711 struct Scsi_Host *HBA_ptr;
1712 int size, len = 0;
1713 off_t begin = 0;
1714 off_t pos = 0;
1715
1716 /*
1717 * First, see if there are any attached devices or not.
1718 */
1719 for (HBA_ptr = scsi_hostlist; HBA_ptr; HBA_ptr = HBA_ptr->next) {
1720 if (HBA_ptr->host_queue != NULL) {
1721 break;
1722 }
1723 }
1724 size = sprintf(buffer + len, "Attached devices: %s\n", (HBA_ptr) ? "" : "none");
1725 len += size;
1726 pos = begin + len;
1727 for (HBA_ptr = scsi_hostlist; HBA_ptr; HBA_ptr = HBA_ptr->next) {
1728 #if 0
1729 size += sprintf(buffer + len, "scsi%2d: %s\n", (int) HBA_ptr->host_no,
1730 HBA_ptr->hostt->procname);
1731 len += size;
1732 pos = begin + len;
1733 #endif
1734 for (scd = HBA_ptr->host_queue; scd; scd = scd->next) {
1735 proc_print_scsidevice(scd, buffer, &size, len);
1736 len += size;
1737 pos = begin + len;
1738
1739 if (pos < offset) {
1740 len = 0;
1741 begin = pos;
1742 }
1743 if (pos > offset + length)
1744 goto stop_output;
1745 }
1746 }
1747
1748 stop_output:
1749 *start = buffer + (offset - begin); /* Start of wanted data */
1750 len -= (offset - begin); /* Start slop */
1751 if (len > length)
1752 len = length; /* Ending slop */
1753 return (len);
1754 }
1755
proc_scsi_gen_write(struct file * file,const char * buf,unsigned long length,void * data)1756 static int proc_scsi_gen_write(struct file * file, const char * buf,
1757 unsigned long length, void *data)
1758 {
1759 struct Scsi_Host *HBA_ptr;
1760 char *p;
1761 int host, channel, id, lun;
1762 char * buffer;
1763 int err;
1764
1765 if (!buf || length>PAGE_SIZE)
1766 return -EINVAL;
1767
1768 if (!(buffer = (char *) __get_free_page(GFP_KERNEL)))
1769 return -ENOMEM;
1770 if(copy_from_user(buffer, buf, length))
1771 {
1772 err =-EFAULT;
1773 goto out;
1774 }
1775
1776 err = -EINVAL;
1777
1778 if (length < PAGE_SIZE)
1779 buffer[length] = '\0';
1780 else if (buffer[PAGE_SIZE-1])
1781 goto out;
1782
1783 if (length < 11 || strncmp("scsi", buffer, 4))
1784 goto out;
1785
1786 /*
1787 * Usage: echo "scsi dump #N" > /proc/scsi/scsi
1788 * to dump status of all scsi commands. The number is used to specify the level
1789 * of detail in the dump.
1790 */
1791 if (!strncmp("dump", buffer + 5, 4)) {
1792 unsigned int level;
1793
1794 p = buffer + 10;
1795
1796 if (*p == '\0')
1797 goto out;
1798
1799 level = simple_strtoul(p, NULL, 0);
1800 scsi_dump_status(level);
1801 }
1802 /*
1803 * Usage: echo "scsi log token #N" > /proc/scsi/scsi
1804 * where token is one of [error,scan,mlqueue,mlcomplete,llqueue,
1805 * llcomplete,hlqueue,hlcomplete]
1806 */
1807 #ifdef CONFIG_SCSI_LOGGING /* { */
1808
1809 if (!strncmp("log", buffer + 5, 3)) {
1810 char *token;
1811 unsigned int level;
1812
1813 p = buffer + 9;
1814 token = p;
1815 while (*p != ' ' && *p != '\t' && *p != '\0') {
1816 p++;
1817 }
1818
1819 if (*p == '\0') {
1820 if (strncmp(token, "all", 3) == 0) {
1821 /*
1822 * Turn on absolutely everything.
1823 */
1824 scsi_logging_level = ~0;
1825 } else if (strncmp(token, "none", 4) == 0) {
1826 /*
1827 * Turn off absolutely everything.
1828 */
1829 scsi_logging_level = 0;
1830 } else {
1831 goto out;
1832 }
1833 } else {
1834 *p++ = '\0';
1835
1836 level = simple_strtoul(p, NULL, 0);
1837
1838 /*
1839 * Now figure out what to do with it.
1840 */
1841 if (strcmp(token, "error") == 0) {
1842 SCSI_SET_ERROR_RECOVERY_LOGGING(level);
1843 } else if (strcmp(token, "timeout") == 0) {
1844 SCSI_SET_TIMEOUT_LOGGING(level);
1845 } else if (strcmp(token, "scan") == 0) {
1846 SCSI_SET_SCAN_BUS_LOGGING(level);
1847 } else if (strcmp(token, "mlqueue") == 0) {
1848 SCSI_SET_MLQUEUE_LOGGING(level);
1849 } else if (strcmp(token, "mlcomplete") == 0) {
1850 SCSI_SET_MLCOMPLETE_LOGGING(level);
1851 } else if (strcmp(token, "llqueue") == 0) {
1852 SCSI_SET_LLQUEUE_LOGGING(level);
1853 } else if (strcmp(token, "llcomplete") == 0) {
1854 SCSI_SET_LLCOMPLETE_LOGGING(level);
1855 } else if (strcmp(token, "hlqueue") == 0) {
1856 SCSI_SET_HLQUEUE_LOGGING(level);
1857 } else if (strcmp(token, "hlcomplete") == 0) {
1858 SCSI_SET_HLCOMPLETE_LOGGING(level);
1859 } else if (strcmp(token, "ioctl") == 0) {
1860 SCSI_SET_IOCTL_LOGGING(level);
1861 } else {
1862 goto out;
1863 }
1864 }
1865
1866 printk(KERN_INFO "scsi logging level set to 0x%8.8x\n", scsi_logging_level);
1867 }
1868 #endif /* CONFIG_SCSI_LOGGING */ /* } */
1869
1870 /*
1871 * Usage: echo "scsi add-single-device 0 1 2 3" >/proc/scsi/scsi
1872 * with "0 1 2 3" replaced by your "Host Channel Id Lun".
1873 *
1874 * Consider this feature pre-BETA.
1875 *
1876 * CAUTION: This is not for hotplugging your peripherals. As
1877 * SCSI was not designed for this you could damage your
1878 * hardware and thoroughly confuse the SCSI subsystem.
1879 */
1880 if (!strncmp("add-single-device", buffer + 5, 17)) {
1881 p = buffer + 23;
1882
1883 host = simple_strtoul(p, &p, 0);
1884 channel = simple_strtoul(p + 1, &p, 0);
1885 id = simple_strtoul(p + 1, &p, 0);
1886 lun = simple_strtoul(p + 1, &p, 0);
1887
1888 printk(KERN_INFO "scsi add-single-device %d %d %d %d\n", host, channel,
1889 id, lun);
1890
1891 for (HBA_ptr = scsi_hostlist; HBA_ptr; HBA_ptr = HBA_ptr->next) {
1892 if (HBA_ptr->host_no == host) {
1893 break;
1894 }
1895 }
1896 if ((err=scsi_add_single_device(HBA_ptr, channel, id, lun))==0)
1897 err = length;
1898 goto out;
1899 }
1900
1901 /*
1902 * Usage: echo "scsi remove-single-device 0 1 2 3" >/proc/scsi/scsi
1903 * with "0 1 2 3" replaced by your "Host Channel Id Lun".
1904 *
1905 * Consider this feature pre-BETA.
1906 *
1907 * CAUTION: This is not for hotplugging your peripherals. As
1908 * SCSI was not designed for this you could damage your
1909 * hardware and thoroughly confuse the SCSI subsystem.
1910 */
1911 else if (!strncmp("remove-single-device", buffer + 5, 20)) {
1912 p = buffer + 26;
1913
1914 host = simple_strtoul(p, &p, 0);
1915 channel = simple_strtoul(p + 1, &p, 0);
1916 id = simple_strtoul(p + 1, &p, 0);
1917 lun = simple_strtoul(p + 1, &p, 0);
1918
1919 printk(KERN_INFO "scsi remove-single-device %d %d %d %d\n", host, channel,
1920 id, lun);
1921
1922 for (HBA_ptr = scsi_hostlist; HBA_ptr; HBA_ptr = HBA_ptr->next) {
1923 if (HBA_ptr->host_no == host) {
1924 break;
1925 }
1926 }
1927 if ((err=scsi_remove_single_device(HBA_ptr, channel, id, lun))==0)
1928 err = length;
1929 goto out;
1930 }
1931 out:
1932
1933 free_page((unsigned long) buffer);
1934 return err;
1935 }
1936 #endif
1937
1938 /*
1939 * This entry point should be called by a driver if it is trying
1940 * to add a low level scsi driver to the system.
1941 */
scsi_register_host(Scsi_Host_Template * tpnt)1942 static int scsi_register_host(Scsi_Host_Template * tpnt)
1943 {
1944 int pcount;
1945 struct Scsi_Host *shpnt;
1946 Scsi_Device *SDpnt;
1947 struct Scsi_Device_Template *sdtpnt;
1948 const char *name;
1949 unsigned long flags;
1950 int out_of_space = 0;
1951
1952 if (tpnt->next || !tpnt->detect)
1953 return 1; /* Must be already loaded, or
1954 * no detect routine available
1955 */
1956
1957 /* If max_sectors isn't set, default to max */
1958 if (!tpnt->max_sectors)
1959 tpnt->max_sectors = MAX_SECTORS;
1960
1961 pcount = next_scsi_host;
1962
1963 MOD_INC_USE_COUNT;
1964
1965 /* The detect routine must carefully spinunlock/spinlock if
1966 it enables interrupts, since all interrupt handlers do
1967 spinlock as well.
1968 All lame drivers are going to fail due to the following
1969 spinlock. For the time beeing let's use it only for drivers
1970 using the new scsi code. NOTE: the detect routine could
1971 redefine the value tpnt->use_new_eh_code. (DB, 13 May 1998) */
1972
1973 if (tpnt->use_new_eh_code) {
1974 spin_lock_irqsave(&io_request_lock, flags);
1975 tpnt->present = tpnt->detect(tpnt);
1976 spin_unlock_irqrestore(&io_request_lock, flags);
1977 } else
1978 tpnt->present = tpnt->detect(tpnt);
1979
1980 if (tpnt->present) {
1981 if (pcount == next_scsi_host) {
1982 if (tpnt->present > 1) {
1983 printk(KERN_ERR "scsi: Failure to register low-level scsi driver");
1984 scsi_unregister_host(tpnt);
1985 return 1;
1986 }
1987 /*
1988 * The low-level driver failed to register a driver.
1989 * We can do this now.
1990 */
1991 if(scsi_register(tpnt, 0)==NULL)
1992 {
1993 printk(KERN_ERR "scsi: register failed.\n");
1994 scsi_unregister_host(tpnt);
1995 return 1;
1996 }
1997 }
1998 tpnt->next = scsi_hosts; /* Add to the linked list */
1999 scsi_hosts = tpnt;
2000
2001 /* Add the new driver to /proc/scsi */
2002 #ifdef CONFIG_PROC_FS
2003 build_proc_dir_entries(tpnt);
2004 #endif
2005
2006
2007 /*
2008 * Add the kernel threads for each host adapter that will
2009 * handle error correction.
2010 */
2011 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2012 if (shpnt->hostt == tpnt && shpnt->hostt->use_new_eh_code) {
2013 DECLARE_MUTEX_LOCKED(sem);
2014
2015 shpnt->eh_notify = &sem;
2016 kernel_thread((int (*)(void *)) scsi_error_handler,
2017 (void *) shpnt, 0);
2018
2019 /*
2020 * Now wait for the kernel error thread to initialize itself
2021 * as it might be needed when we scan the bus.
2022 */
2023 down(&sem);
2024 shpnt->eh_notify = NULL;
2025 }
2026 }
2027
2028 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2029 if (shpnt->hostt == tpnt) {
2030 if (tpnt->info) {
2031 name = tpnt->info(shpnt);
2032 } else {
2033 name = tpnt->name;
2034 }
2035 printk(KERN_INFO "scsi%d : %s\n", /* And print a little message */
2036 shpnt->host_no, name);
2037 }
2038 }
2039
2040 /* The next step is to call scan_scsis here. This generates the
2041 * Scsi_Devices entries
2042 */
2043 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2044 if (shpnt->hostt == tpnt) {
2045 scan_scsis(shpnt, 0, 0, 0, 0);
2046 if (shpnt->select_queue_depths != NULL) {
2047 (shpnt->select_queue_depths) (shpnt, shpnt->host_queue);
2048 }
2049 }
2050 }
2051
2052 for (sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next) {
2053 if (sdtpnt->init && sdtpnt->dev_noticed)
2054 (*sdtpnt->init) ();
2055 }
2056
2057 /*
2058 * Next we create the Scsi_Cmnd structures for this host
2059 */
2060 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2061 for (SDpnt = shpnt->host_queue; SDpnt; SDpnt = SDpnt->next)
2062 if (SDpnt->host->hostt == tpnt) {
2063 for (sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next)
2064 if (sdtpnt->attach)
2065 (*sdtpnt->attach) (SDpnt);
2066 if (SDpnt->attached) {
2067 scsi_build_commandblocks(SDpnt);
2068 if (0 == SDpnt->has_cmdblocks)
2069 out_of_space = 1;
2070 }
2071 }
2072 }
2073
2074 /*
2075 * Now that we have all of the devices, resize the DMA pool,
2076 * as required. */
2077 if (!out_of_space)
2078 scsi_resize_dma_pool();
2079
2080
2081 /* This does any final handling that is required. */
2082 for (sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next) {
2083 if (sdtpnt->finish && sdtpnt->nr_dev) {
2084 (*sdtpnt->finish) ();
2085 }
2086 }
2087 }
2088 #if defined(USE_STATIC_SCSI_MEMORY)
2089 printk("SCSI memory: total %ldKb, used %ldKb, free %ldKb.\n",
2090 (scsi_memory_upper_value - scsi_memory_lower_value) / 1024,
2091 (scsi_init_memory_start - scsi_memory_lower_value) / 1024,
2092 (scsi_memory_upper_value - scsi_init_memory_start) / 1024);
2093 #endif
2094
2095 if (out_of_space) {
2096 scsi_unregister_host(tpnt); /* easiest way to clean up?? */
2097 return 1;
2098 } else
2099 return 0;
2100 }
2101
2102 /*
2103 * Similarly, this entry point should be called by a loadable module if it
2104 * is trying to remove a low level scsi driver from the system.
2105 */
scsi_unregister_host(Scsi_Host_Template * tpnt)2106 static int scsi_unregister_host(Scsi_Host_Template * tpnt)
2107 {
2108 int online_status;
2109 int pcount0, pcount;
2110 Scsi_Cmnd *SCpnt;
2111 Scsi_Device *SDpnt;
2112 Scsi_Device *SDpnt1;
2113 struct Scsi_Device_Template *sdtpnt;
2114 struct Scsi_Host *sh1;
2115 struct Scsi_Host *shpnt;
2116 char name[10]; /* host_no>=10^9? I don't think so. */
2117
2118 /* get the big kernel lock, so we don't race with open() */
2119 lock_kernel();
2120
2121 /*
2122 * First verify that this host adapter is completely free with no pending
2123 * commands
2124 */
2125 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2126 for (SDpnt = shpnt->host_queue; SDpnt;
2127 SDpnt = SDpnt->next) {
2128 if (SDpnt->host->hostt == tpnt
2129 && SDpnt->host->hostt->module
2130 && GET_USE_COUNT(SDpnt->host->hostt->module))
2131 goto err_out;
2132 /*
2133 * FIXME(eric) - We need to find a way to notify the
2134 * low level driver that we are shutting down - via the
2135 * special device entry that still needs to get added.
2136 *
2137 * Is detach interface below good enough for this?
2138 */
2139 }
2140 }
2141
2142 /*
2143 * FIXME(eric) put a spinlock on this. We force all of the devices offline
2144 * to help prevent race conditions where other hosts/processors could try and
2145 * get in and queue a command.
2146 */
2147 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2148 for (SDpnt = shpnt->host_queue; SDpnt;
2149 SDpnt = SDpnt->next) {
2150 if (SDpnt->host->hostt == tpnt)
2151 SDpnt->online = FALSE;
2152
2153 }
2154 }
2155
2156 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2157 if (shpnt->hostt != tpnt) {
2158 continue;
2159 }
2160 for (SDpnt = shpnt->host_queue; SDpnt;
2161 SDpnt = SDpnt->next) {
2162 /*
2163 * Loop over all of the commands associated with the device. If any of
2164 * them are busy, then set the state back to inactive and bail.
2165 */
2166 for (SCpnt = SDpnt->device_queue; SCpnt;
2167 SCpnt = SCpnt->next) {
2168 online_status = SDpnt->online;
2169 SDpnt->online = FALSE;
2170 if (SCpnt->request.rq_status != RQ_INACTIVE) {
2171 printk(KERN_ERR "SCSI device not inactive - rq_status=%d, target=%d, pid=%ld, state=%d, owner=%d.\n",
2172 SCpnt->request.rq_status, SCpnt->target, SCpnt->pid,
2173 SCpnt->state, SCpnt->owner);
2174 for (SDpnt1 = shpnt->host_queue; SDpnt1;
2175 SDpnt1 = SDpnt1->next) {
2176 for (SCpnt = SDpnt1->device_queue; SCpnt;
2177 SCpnt = SCpnt->next)
2178 if (SCpnt->request.rq_status == RQ_SCSI_DISCONNECTING)
2179 SCpnt->request.rq_status = RQ_INACTIVE;
2180 }
2181 SDpnt->online = online_status;
2182 printk(KERN_ERR "Device busy???\n");
2183 goto err_out;
2184 }
2185 /*
2186 * No, this device is really free. Mark it as such, and
2187 * continue on.
2188 */
2189 SCpnt->state = SCSI_STATE_DISCONNECTING;
2190 SCpnt->request.rq_status = RQ_SCSI_DISCONNECTING; /* Mark as busy */
2191 }
2192 }
2193 }
2194 /* Next we detach the high level drivers from the Scsi_Device structures */
2195
2196 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2197 if (shpnt->hostt != tpnt) {
2198 continue;
2199 }
2200 for (SDpnt = shpnt->host_queue; SDpnt;
2201 SDpnt = SDpnt->next) {
2202 for (sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next)
2203 if (sdtpnt->detach)
2204 (*sdtpnt->detach) (SDpnt);
2205
2206 /* If something still attached, punt */
2207 if (SDpnt->attached) {
2208 printk(KERN_ERR "Attached usage count = %d\n", SDpnt->attached);
2209 goto err_out;
2210 }
2211 devfs_unregister (SDpnt->de);
2212 }
2213 }
2214
2215 /*
2216 * Next, kill the kernel error recovery thread for this host.
2217 */
2218 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2219 if (shpnt->hostt == tpnt
2220 && shpnt->hostt->use_new_eh_code
2221 && shpnt->ehandler != NULL) {
2222 DECLARE_MUTEX_LOCKED(sem);
2223
2224 shpnt->eh_notify = &sem;
2225 send_sig(SIGHUP, shpnt->ehandler, 1);
2226 down(&sem);
2227 shpnt->eh_notify = NULL;
2228 }
2229 }
2230
2231 /* Next we free up the Scsi_Cmnd structures for this host */
2232
2233 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2234 if (shpnt->hostt != tpnt) {
2235 continue;
2236 }
2237 for (SDpnt = shpnt->host_queue; SDpnt;
2238 SDpnt = shpnt->host_queue) {
2239 scsi_release_commandblocks(SDpnt);
2240
2241 blk_cleanup_queue(&SDpnt->request_queue);
2242 /* Next free up the Scsi_Device structures for this host */
2243 shpnt->host_queue = SDpnt->next;
2244 kfree((char *) SDpnt);
2245
2246 }
2247 }
2248
2249 /* Next we go through and remove the instances of the individual hosts
2250 * that were detected */
2251
2252 pcount0 = next_scsi_host;
2253 for (shpnt = scsi_hostlist; shpnt; shpnt = sh1) {
2254 sh1 = shpnt->next;
2255 if (shpnt->hostt != tpnt)
2256 continue;
2257 pcount = next_scsi_host;
2258 /* Remove the /proc/scsi directory entry */
2259 sprintf(name,"%d",shpnt->host_no);
2260 remove_proc_entry(name, tpnt->proc_dir);
2261 if (tpnt->release)
2262 (*tpnt->release) (shpnt);
2263 else {
2264 /* This is the default case for the release function.
2265 * It should do the right thing for most correctly
2266 * written host adapters.
2267 */
2268 if (shpnt->irq)
2269 free_irq(shpnt->irq, NULL);
2270 if (shpnt->dma_channel != 0xff)
2271 free_dma(shpnt->dma_channel);
2272 if (shpnt->io_port && shpnt->n_io_port)
2273 release_region(shpnt->io_port, shpnt->n_io_port);
2274 }
2275 if (pcount == next_scsi_host)
2276 scsi_unregister(shpnt);
2277 tpnt->present--;
2278 }
2279
2280 /*
2281 * If there are absolutely no more hosts left, it is safe
2282 * to completely nuke the DMA pool. The resize operation will
2283 * do the right thing and free everything.
2284 */
2285 if (!scsi_hosts)
2286 scsi_resize_dma_pool();
2287
2288 if (pcount0 != next_scsi_host)
2289 printk(KERN_INFO "scsi : %d host%s left.\n", next_scsi_host,
2290 (next_scsi_host == 1) ? "" : "s");
2291
2292 #if defined(USE_STATIC_SCSI_MEMORY)
2293 printk("SCSI memory: total %ldKb, used %ldKb, free %ldKb.\n",
2294 (scsi_memory_upper_value - scsi_memory_lower_value) / 1024,
2295 (scsi_init_memory_start - scsi_memory_lower_value) / 1024,
2296 (scsi_memory_upper_value - scsi_init_memory_start) / 1024);
2297 #endif
2298
2299 /*
2300 * Remove it from the linked list and /proc if all
2301 * hosts were successfully removed (ie preset == 0)
2302 */
2303 if (!tpnt->present) {
2304 Scsi_Host_Template **SHTp = &scsi_hosts;
2305 Scsi_Host_Template *SHT;
2306
2307 while ((SHT = *SHTp) != NULL) {
2308 if (SHT == tpnt) {
2309 *SHTp = SHT->next;
2310 remove_proc_entry(tpnt->proc_name, proc_scsi);
2311 break;
2312 }
2313 SHTp = &SHT->next;
2314 }
2315 }
2316 MOD_DEC_USE_COUNT;
2317
2318 unlock_kernel();
2319 return 0;
2320
2321 err_out:
2322 unlock_kernel();
2323 return -1;
2324 }
2325
2326 static int scsi_unregister_device(struct Scsi_Device_Template *tpnt);
2327
2328 /*
2329 * This entry point should be called by a loadable module if it is trying
2330 * add a high level scsi driver to the system.
2331 */
scsi_register_device_module(struct Scsi_Device_Template * tpnt)2332 static int scsi_register_device_module(struct Scsi_Device_Template *tpnt)
2333 {
2334 Scsi_Device *SDpnt;
2335 struct Scsi_Host *shpnt;
2336 int out_of_space = 0;
2337
2338 if (tpnt->next)
2339 return 1;
2340
2341 scsi_register_device(tpnt);
2342 /*
2343 * First scan the devices that we know about, and see if we notice them.
2344 */
2345
2346 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2347 for (SDpnt = shpnt->host_queue; SDpnt;
2348 SDpnt = SDpnt->next) {
2349 if (tpnt->detect)
2350 SDpnt->detected = (*tpnt->detect) (SDpnt);
2351 }
2352 }
2353
2354 /*
2355 * If any of the devices would match this driver, then perform the
2356 * init function.
2357 */
2358 if (tpnt->init && tpnt->dev_noticed) {
2359 if ((*tpnt->init) ()) {
2360 for (shpnt = scsi_hostlist; shpnt;
2361 shpnt = shpnt->next) {
2362 for (SDpnt = shpnt->host_queue; SDpnt;
2363 SDpnt = SDpnt->next) {
2364 SDpnt->detected = 0;
2365 }
2366 }
2367 scsi_deregister_device(tpnt);
2368 return 1;
2369 }
2370 }
2371
2372 /*
2373 * Now actually connect the devices to the new driver.
2374 */
2375 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2376 for (SDpnt = shpnt->host_queue; SDpnt;
2377 SDpnt = SDpnt->next) {
2378 SDpnt->attached += SDpnt->detected;
2379 SDpnt->detected = 0;
2380 if (tpnt->attach)
2381 (*tpnt->attach) (SDpnt);
2382 /*
2383 * If this driver attached to the device, and don't have any
2384 * command blocks for this device, allocate some.
2385 */
2386 if (SDpnt->attached && SDpnt->has_cmdblocks == 0) {
2387 SDpnt->online = TRUE;
2388 scsi_build_commandblocks(SDpnt);
2389 if (0 == SDpnt->has_cmdblocks)
2390 out_of_space = 1;
2391 }
2392 }
2393 }
2394
2395 /*
2396 * This does any final handling that is required.
2397 */
2398 if (tpnt->finish && tpnt->nr_dev)
2399 (*tpnt->finish) ();
2400 if (!out_of_space)
2401 scsi_resize_dma_pool();
2402 MOD_INC_USE_COUNT;
2403
2404 if (out_of_space) {
2405 scsi_unregister_device(tpnt); /* easiest way to clean up?? */
2406 return 1;
2407 } else
2408 return 0;
2409 }
2410
scsi_unregister_device(struct Scsi_Device_Template * tpnt)2411 static int scsi_unregister_device(struct Scsi_Device_Template *tpnt)
2412 {
2413 Scsi_Device *SDpnt;
2414 struct Scsi_Host *shpnt;
2415
2416 lock_kernel();
2417 /*
2418 * If we are busy, this is not going to fly.
2419 */
2420 if (GET_USE_COUNT(tpnt->module) != 0)
2421 goto error_out;
2422
2423 /*
2424 * Next, detach the devices from the driver.
2425 */
2426
2427 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2428 for (SDpnt = shpnt->host_queue; SDpnt;
2429 SDpnt = SDpnt->next) {
2430 if (tpnt->detach)
2431 (*tpnt->detach) (SDpnt);
2432 if (SDpnt->attached == 0) {
2433 SDpnt->online = FALSE;
2434
2435 /*
2436 * Nobody is using this device any more. Free all of the
2437 * command structures.
2438 */
2439 scsi_release_commandblocks(SDpnt);
2440 }
2441 }
2442 }
2443 /*
2444 * Extract the template from the linked list.
2445 */
2446 scsi_deregister_device(tpnt);
2447
2448 MOD_DEC_USE_COUNT;
2449 unlock_kernel();
2450 /*
2451 * Final cleanup for the driver is done in the driver sources in the
2452 * cleanup function.
2453 */
2454 return 0;
2455 error_out:
2456 unlock_kernel();
2457 return -1;
2458 }
2459
2460
2461 /* This function should be called by drivers which needs to register
2462 * with the midlevel scsi system. As of 2.4.0-test9pre3 this is our
2463 * main device/hosts register function /mathiasen
2464 */
scsi_register_module(int module_type,void * ptr)2465 int scsi_register_module(int module_type, void *ptr)
2466 {
2467 switch (module_type) {
2468 case MODULE_SCSI_HA:
2469 return scsi_register_host((Scsi_Host_Template *) ptr);
2470
2471 /* Load upper level device handler of some kind */
2472 case MODULE_SCSI_DEV:
2473 #ifdef CONFIG_KMOD
2474 if (scsi_hosts == NULL)
2475 request_module("scsi_hostadapter");
2476 #endif
2477 return scsi_register_device_module((struct Scsi_Device_Template *) ptr);
2478 /* The rest of these are not yet implemented */
2479
2480 /* Load constants.o */
2481 case MODULE_SCSI_CONST:
2482
2483 /* Load specialized ioctl handler for some device. Intended for
2484 * cdroms that have non-SCSI2 audio command sets. */
2485 case MODULE_SCSI_IOCTL:
2486
2487 default:
2488 return 1;
2489 }
2490 }
2491
2492 /* Reverse the actions taken above
2493 */
scsi_unregister_module(int module_type,void * ptr)2494 int scsi_unregister_module(int module_type, void *ptr)
2495 {
2496 int retval = 0;
2497
2498 switch (module_type) {
2499 case MODULE_SCSI_HA:
2500 retval = scsi_unregister_host((Scsi_Host_Template *) ptr);
2501 break;
2502 case MODULE_SCSI_DEV:
2503 retval = scsi_unregister_device((struct Scsi_Device_Template *)ptr);
2504 break;
2505 /* The rest of these are not yet implemented. */
2506 case MODULE_SCSI_CONST:
2507 case MODULE_SCSI_IOCTL:
2508 break;
2509 default:;
2510 }
2511 return retval;
2512 }
2513
2514 #ifdef CONFIG_PROC_FS
2515 /*
2516 * Function: scsi_dump_status
2517 *
2518 * Purpose: Brain dump of scsi system, used for problem solving.
2519 *
2520 * Arguments: level - used to indicate level of detail.
2521 *
2522 * Notes: The level isn't used at all yet, but we need to find some way
2523 * of sensibly logging varying degrees of information. A quick one-line
2524 * display of each command, plus the status would be most useful.
2525 *
2526 * This does depend upon CONFIG_SCSI_LOGGING - I do want some way of turning
2527 * it all off if the user wants a lean and mean kernel. It would probably
2528 * also be useful to allow the user to specify one single host to be dumped.
2529 * A second argument to the function would be useful for that purpose.
2530 *
2531 * FIXME - some formatting of the output into tables would be very handy.
2532 */
scsi_dump_status(int level)2533 static void scsi_dump_status(int level)
2534 {
2535 #ifdef CONFIG_SCSI_LOGGING /* { */
2536 int i;
2537 struct Scsi_Host *shpnt;
2538 Scsi_Cmnd *SCpnt;
2539 Scsi_Device *SDpnt;
2540 printk(KERN_INFO "Dump of scsi host parameters:\n");
2541 i = 0;
2542 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2543 printk(KERN_INFO " %d %d %d : %d %d\n",
2544 shpnt->host_failed,
2545 shpnt->host_busy,
2546 atomic_read(&shpnt->host_active),
2547 shpnt->host_blocked,
2548 shpnt->host_self_blocked);
2549 }
2550
2551 printk(KERN_INFO "\n\n");
2552 printk(KERN_INFO "Dump of scsi command parameters:\n");
2553 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2554 printk(KERN_INFO "h:c:t:l (dev sect nsect cnumsec sg) (ret all flg) (to/cmd to ito) cmd snse result\n");
2555 for (SDpnt = shpnt->host_queue; SDpnt; SDpnt = SDpnt->next) {
2556 for (SCpnt = SDpnt->device_queue; SCpnt; SCpnt = SCpnt->next) {
2557 /* (0) h:c:t:l (dev sect nsect cnumsec sg) (ret all flg) (to/cmd to ito) cmd snse result %d %x */
2558 printk(KERN_INFO "(%3d) %2d:%1d:%2d:%2d (%6s %4ld %4ld %4ld %4x %1d) (%1d %1d 0x%2x) (%4d %4d %4d) 0x%2.2x 0x%2.2x 0x%8.8x\n",
2559 i++,
2560
2561 SCpnt->host->host_no,
2562 SCpnt->channel,
2563 SCpnt->target,
2564 SCpnt->lun,
2565
2566 kdevname(SCpnt->request.rq_dev),
2567 SCpnt->request.sector,
2568 SCpnt->request.nr_sectors,
2569 SCpnt->request.current_nr_sectors,
2570 SCpnt->request.rq_status,
2571 SCpnt->use_sg,
2572
2573 SCpnt->retries,
2574 SCpnt->allowed,
2575 SCpnt->flags,
2576
2577 SCpnt->timeout_per_command,
2578 SCpnt->timeout,
2579 SCpnt->internal_timeout,
2580
2581 SCpnt->cmnd[0],
2582 SCpnt->sense_buffer[2],
2583 SCpnt->result);
2584 }
2585 }
2586 }
2587
2588 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2589 for (SDpnt = shpnt->host_queue; SDpnt; SDpnt = SDpnt->next) {
2590 /* Now dump the request lists for each block device */
2591 printk(KERN_INFO "Dump of pending block device requests\n");
2592 for (i = 0; i < MAX_BLKDEV; i++) {
2593 struct list_head * queue_head;
2594
2595 queue_head = &blk_dev[i].request_queue.queue_head;
2596 if (!list_empty(queue_head)) {
2597 struct request *req;
2598 struct list_head * entry;
2599
2600 printk(KERN_INFO "%d: ", i);
2601 entry = queue_head->next;
2602 do {
2603 req = blkdev_entry_to_request(entry);
2604 printk("(%s %d %ld %ld %ld) ",
2605 kdevname(req->rq_dev),
2606 req->cmd,
2607 req->sector,
2608 req->nr_sectors,
2609 req->current_nr_sectors);
2610 } while ((entry = entry->next) != queue_head);
2611 printk("\n");
2612 }
2613 }
2614 }
2615 }
2616 #endif /* CONFIG_SCSI_LOGGING */ /* } */
2617 }
2618 #endif /* CONFIG_PROC_FS */
2619
scsi_host_no_init(char * str)2620 static int __init scsi_host_no_init (char *str)
2621 {
2622 static int next_no = 0;
2623 char *temp;
2624
2625 while (str) {
2626 temp = str;
2627 while (*temp && (*temp != ':') && (*temp != ','))
2628 temp++;
2629 if (!*temp)
2630 temp = NULL;
2631 else
2632 *temp++ = 0;
2633 scsi_host_no_insert(str, next_no);
2634 str = temp;
2635 next_no++;
2636 }
2637 return 1;
2638 }
2639
2640 static char *scsihosts;
2641
2642 MODULE_PARM(scsihosts, "s");
2643 MODULE_DESCRIPTION("SCSI core");
2644 MODULE_LICENSE("GPL");
2645
2646 #ifndef MODULE
scsi_setup(char * str)2647 int __init scsi_setup(char *str)
2648 {
2649 scsihosts = str;
2650 return 1;
2651 }
2652
2653 __setup("scsihosts=", scsi_setup);
2654 #endif
2655
init_scsi(void)2656 static int __init init_scsi(void)
2657 {
2658 struct proc_dir_entry *generic;
2659
2660 printk(KERN_INFO "SCSI subsystem driver " REVISION "\n");
2661
2662 if( scsi_init_minimal_dma_pool() != 0 )
2663 {
2664 return 1;
2665 }
2666
2667 /*
2668 * This makes /proc/scsi and /proc/scsi/scsi visible.
2669 */
2670 #ifdef CONFIG_PROC_FS
2671 proc_scsi = proc_mkdir("scsi", 0);
2672 if (!proc_scsi) {
2673 printk (KERN_ERR "cannot init /proc/scsi\n");
2674 return -ENOMEM;
2675 }
2676 generic = create_proc_info_entry ("scsi/scsi", 0, 0, scsi_proc_info);
2677 if (!generic) {
2678 printk (KERN_ERR "cannot init /proc/scsi/scsi\n");
2679 remove_proc_entry("scsi", 0);
2680 return -ENOMEM;
2681 }
2682 generic->write_proc = proc_scsi_gen_write;
2683 #endif
2684
2685 scsi_devfs_handle = devfs_mk_dir (NULL, "scsi", NULL);
2686 if (scsihosts)
2687 printk(KERN_INFO "scsi: host order: %s\n", scsihosts);
2688 scsi_host_no_init (scsihosts);
2689 /*
2690 * This is where the processing takes place for most everything
2691 * when commands are completed.
2692 */
2693 init_bh(SCSI_BH, scsi_bottom_half_handler);
2694
2695 return 0;
2696 }
2697
exit_scsi(void)2698 static void __exit exit_scsi(void)
2699 {
2700 Scsi_Host_Name *shn, *shn2 = NULL;
2701
2702 remove_bh(SCSI_BH);
2703
2704 devfs_unregister (scsi_devfs_handle);
2705 for (shn = scsi_host_no_list;shn;shn = shn->next) {
2706 if (shn->name)
2707 kfree(shn->name);
2708 if (shn2)
2709 kfree (shn2);
2710 shn2 = shn;
2711 }
2712 if (shn2)
2713 kfree (shn2);
2714
2715 #ifdef CONFIG_PROC_FS
2716 /* No, we're not here anymore. Don't show the /proc/scsi files. */
2717 remove_proc_entry ("scsi/scsi", 0);
2718 remove_proc_entry ("scsi", 0);
2719 #endif
2720
2721 /*
2722 * Free up the DMA pool.
2723 */
2724 scsi_resize_dma_pool();
2725
2726 }
2727
2728 module_init(init_scsi);
2729 module_exit(exit_scsi);
2730
2731 /*
2732 * Function: scsi_get_host_dev()
2733 *
2734 * Purpose: Create a Scsi_Device that points to the host adapter itself.
2735 *
2736 * Arguments: SHpnt - Host that needs a Scsi_Device
2737 *
2738 * Lock status: None assumed.
2739 *
2740 * Returns: The Scsi_Device or NULL
2741 *
2742 * Notes:
2743 */
scsi_get_host_dev(struct Scsi_Host * SHpnt)2744 Scsi_Device * scsi_get_host_dev(struct Scsi_Host * SHpnt)
2745 {
2746 Scsi_Device * SDpnt;
2747
2748 /*
2749 * Attach a single Scsi_Device to the Scsi_Host - this should
2750 * be made to look like a "pseudo-device" that points to the
2751 * HA itself. For the moment, we include it at the head of
2752 * the host_queue itself - I don't think we want to show this
2753 * to the HA in select_queue_depths(), as this would probably confuse
2754 * matters.
2755 * Note - this device is not accessible from any high-level
2756 * drivers (including generics), which is probably not
2757 * optimal. We can add hooks later to attach
2758 */
2759 SDpnt = (Scsi_Device *) kmalloc(sizeof(Scsi_Device),
2760 GFP_ATOMIC);
2761 if(SDpnt == NULL)
2762 return NULL;
2763
2764 memset(SDpnt, 0, sizeof(Scsi_Device));
2765
2766 SDpnt->host = SHpnt;
2767 SDpnt->id = SHpnt->this_id;
2768 SDpnt->type = -1;
2769 SDpnt->queue_depth = 1;
2770
2771 scsi_build_commandblocks(SDpnt);
2772
2773 scsi_initialize_queue(SDpnt, SHpnt);
2774
2775 SDpnt->online = TRUE;
2776
2777 /*
2778 * Initialize the object that we will use to wait for command blocks.
2779 */
2780 init_waitqueue_head(&SDpnt->scpnt_wait);
2781 return SDpnt;
2782 }
2783
2784 /*
2785 * Function: scsi_free_host_dev()
2786 *
2787 * Purpose: Create a Scsi_Device that points to the host adapter itself.
2788 *
2789 * Arguments: SHpnt - Host that needs a Scsi_Device
2790 *
2791 * Lock status: None assumed.
2792 *
2793 * Returns: Nothing
2794 *
2795 * Notes:
2796 */
scsi_free_host_dev(Scsi_Device * SDpnt)2797 void scsi_free_host_dev(Scsi_Device * SDpnt)
2798 {
2799 if( (unsigned char) SDpnt->id != (unsigned char) SDpnt->host->this_id )
2800 {
2801 panic("Attempt to delete wrong device\n");
2802 }
2803
2804 blk_cleanup_queue(&SDpnt->request_queue);
2805
2806 /*
2807 * We only have a single SCpnt attached to this device. Free
2808 * it now.
2809 */
2810 scsi_release_commandblocks(SDpnt);
2811 kfree(SDpnt);
2812 }
2813
2814 /*
2815 * Function: scsi_reset_provider_done_command
2816 *
2817 * Purpose: Dummy done routine.
2818 *
2819 * Notes: Some low level drivers will call scsi_done and end up here,
2820 * others won't bother.
2821 * We don't want the bogus command used for the bus/device
2822 * reset to find its way into the mid-layer so we intercept
2823 * it here.
2824 */
2825 static void
scsi_reset_provider_done_command(Scsi_Cmnd * SCpnt)2826 scsi_reset_provider_done_command(Scsi_Cmnd *SCpnt)
2827 {
2828 }
2829
2830 /*
2831 * Function: scsi_reset_provider
2832 *
2833 * Purpose: Send requested reset to a bus or device at any phase.
2834 *
2835 * Arguments: device - device to send reset to
2836 * flag - reset type (see scsi.h)
2837 *
2838 * Returns: SUCCESS/FAILURE.
2839 *
2840 * Notes: This is used by the SCSI Generic driver to provide
2841 * Bus/Device reset capability.
2842 */
2843 int
scsi_reset_provider(Scsi_Device * dev,int flag)2844 scsi_reset_provider(Scsi_Device *dev, int flag)
2845 {
2846 Scsi_Cmnd SC, *SCpnt = &SC;
2847 int rtn;
2848
2849 memset(&SCpnt->eh_timeout, 0, sizeof(SCpnt->eh_timeout));
2850 SCpnt->host = dev->host;
2851 SCpnt->device = dev;
2852 SCpnt->target = dev->id;
2853 SCpnt->lun = dev->lun;
2854 SCpnt->channel = dev->channel;
2855 SCpnt->request.rq_status = RQ_SCSI_BUSY;
2856 SCpnt->request.waiting = NULL;
2857 SCpnt->use_sg = 0;
2858 SCpnt->old_use_sg = 0;
2859 SCpnt->old_cmd_len = 0;
2860 SCpnt->underflow = 0;
2861 SCpnt->transfersize = 0;
2862 SCpnt->resid = 0;
2863 SCpnt->serial_number = 0;
2864 SCpnt->serial_number_at_timeout = 0;
2865 SCpnt->host_scribble = NULL;
2866 SCpnt->next = NULL;
2867 SCpnt->state = SCSI_STATE_INITIALIZING;
2868 SCpnt->owner = SCSI_OWNER_MIDLEVEL;
2869
2870 memset(&SCpnt->cmnd, '\0', sizeof(SCpnt->cmnd));
2871
2872 SCpnt->scsi_done = scsi_reset_provider_done_command;
2873 SCpnt->done = NULL;
2874 SCpnt->reset_chain = NULL;
2875
2876 SCpnt->buffer = NULL;
2877 SCpnt->bufflen = 0;
2878 SCpnt->request_buffer = NULL;
2879 SCpnt->request_bufflen = 0;
2880
2881 SCpnt->internal_timeout = NORMAL_TIMEOUT;
2882 SCpnt->abort_reason = DID_ABORT;
2883
2884 SCpnt->cmd_len = 0;
2885
2886 SCpnt->sc_data_direction = SCSI_DATA_UNKNOWN;
2887 SCpnt->sc_request = NULL;
2888 SCpnt->sc_magic = SCSI_CMND_MAGIC;
2889
2890 /*
2891 * Sometimes the command can get back into the timer chain,
2892 * so use the pid as an identifier.
2893 */
2894 SCpnt->pid = 0;
2895
2896 if (dev->host->hostt->use_new_eh_code) {
2897 rtn = scsi_new_reset(SCpnt, flag);
2898 } else {
2899 unsigned long flags;
2900
2901 spin_lock_irqsave(&io_request_lock, flags);
2902 rtn = scsi_old_reset(SCpnt, flag);
2903 spin_unlock_irqrestore(&io_request_lock, flags);
2904 }
2905
2906 scsi_delete_timer(SCpnt);
2907 return rtn;
2908 }
2909
2910 /*
2911 * Overrides for Emacs so that we follow Linus's tabbing style.
2912 * Emacs will notice this stuff at the end of the file and automatically
2913 * adjust the settings for this buffer only. This must remain at the end
2914 * of the file.
2915 * ---------------------------------------------------------------------------
2916 * Local variables:
2917 * c-indent-level: 4
2918 * c-brace-imaginary-offset: 0
2919 * c-brace-offset: -4
2920 * c-argdecl-indent: 4
2921 * c-label-offset: -4
2922 * c-continued-statement-offset: 4
2923 * c-continued-brace-offset: 0
2924 * indent-tabs-mode: nil
2925 * tab-width: 8
2926 * End:
2927 */
2928