1 /*
2  *  scsi_lib.c Copyright (C) 1999 Eric Youngdale
3  *
4  *  SCSI queueing library.
5  *      Initial versions: Eric Youngdale (eric@andante.org).
6  *                        Based upon conversations with large numbers
7  *                        of people at Linux Expo.
8  */
9 
10 /*
11  * The fundamental purpose of this file is to contain a library of utility
12  * routines that can be used by low-level drivers.   Ultimately the idea
13  * is that there should be a sufficiently rich number of functions that it
14  * would be possible for a driver author to fashion a queueing function for
15  * a low-level driver if they wished.   Note however that this file also
16  * contains the "default" versions of these functions, as we don't want to
17  * go through and retrofit queueing functions into all 30 some-odd drivers.
18  */
19 
20 #define __NO_VERSION__
21 #include <linux/module.h>
22 
23 #include <linux/sched.h>
24 #include <linux/timer.h>
25 #include <linux/string.h>
26 #include <linux/slab.h>
27 #include <linux/ioport.h>
28 #include <linux/kernel.h>
29 #include <linux/stat.h>
30 #include <linux/blk.h>
31 #include <linux/interrupt.h>
32 #include <linux/delay.h>
33 #include <linux/smp_lock.h>
34 #include <linux/completion.h>
35 
36 
37 #define __KERNEL_SYSCALLS__
38 
39 #include <linux/unistd.h>
40 
41 #include <asm/system.h>
42 #include <asm/irq.h>
43 #include <asm/dma.h>
44 
45 #include "scsi.h"
46 #include "hosts.h"
47 #include "constants.h"
48 #include <scsi/scsi_ioctl.h>
49 
50 /*
51  * This entire source file deals with the new queueing code.
52  */
53 
54 /*
55  * Function:	__scsi_insert_special()
56  *
57  * Purpose:	worker for scsi_insert_special_*()
58  *
59  * Arguments:	q - request queue where request should be inserted
60  *		rq - request to be inserted
61  * 		data - private data
62  *		at_head - insert request at head or tail of queue
63  *
64  * Lock status:	Assumed that io_request_lock is not held upon entry.
65  *
66  * Returns:	Nothing
67  */
__scsi_insert_special(request_queue_t * q,struct request * rq,void * data,int at_head)68 static void __scsi_insert_special(request_queue_t *q, struct request *rq,
69 				  void *data, int at_head)
70 {
71 	unsigned long flags;
72 
73 	ASSERT_LOCK(&io_request_lock, 0);
74 
75 	rq->cmd = SPECIAL;
76 	rq->special = data;
77 	rq->q = NULL;
78 	rq->nr_segments = 0;
79 	rq->elevator_sequence = 0;
80 
81 	/*
82 	 * We have the option of inserting the head or the tail of the queue.
83 	 * Typically we use the tail for new ioctls and so forth.  We use the
84 	 * head of the queue for things like a QUEUE_FULL message from a
85 	 * device, or a host that is unable to accept a particular command.
86 	 */
87 	spin_lock_irqsave(&io_request_lock, flags);
88 
89 	if (at_head)
90 		list_add(&rq->queue, &q->queue_head);
91 	else
92 		list_add_tail(&rq->queue, &q->queue_head);
93 
94 	q->request_fn(q);
95 	spin_unlock_irqrestore(&io_request_lock, flags);
96 }
97 
98 
99 /*
100  * Function:    scsi_insert_special_cmd()
101  *
102  * Purpose:     Insert pre-formed command into request queue.
103  *
104  * Arguments:   SCpnt   - command that is ready to be queued.
105  *              at_head - boolean.  True if we should insert at head
106  *                        of queue, false if we should insert at tail.
107  *
108  * Lock status: Assumed that lock is not held upon entry.
109  *
110  * Returns:     Nothing
111  *
112  * Notes:       This function is called from character device and from
113  *              ioctl types of functions where the caller knows exactly
114  *              what SCSI command needs to be issued.   The idea is that
115  *              we merely inject the command into the queue (at the head
116  *              for now), and then call the queue request function to actually
117  *              process it.
118  */
scsi_insert_special_cmd(Scsi_Cmnd * SCpnt,int at_head)119 int scsi_insert_special_cmd(Scsi_Cmnd * SCpnt, int at_head)
120 {
121 	request_queue_t *q = &SCpnt->device->request_queue;
122 
123 	__scsi_insert_special(q, &SCpnt->request, SCpnt, at_head);
124 	return 0;
125 }
126 
127 /*
128  * Function:    scsi_insert_special_req()
129  *
130  * Purpose:     Insert pre-formed request into request queue.
131  *
132  * Arguments:   SRpnt   - request that is ready to be queued.
133  *              at_head - boolean.  True if we should insert at head
134  *                        of queue, false if we should insert at tail.
135  *
136  * Lock status: Assumed that lock is not held upon entry.
137  *
138  * Returns:     Nothing
139  *
140  * Notes:       This function is called from character device and from
141  *              ioctl types of functions where the caller knows exactly
142  *              what SCSI command needs to be issued.   The idea is that
143  *              we merely inject the command into the queue (at the head
144  *              for now), and then call the queue request function to actually
145  *              process it.
146  */
scsi_insert_special_req(Scsi_Request * SRpnt,int at_head)147 int scsi_insert_special_req(Scsi_Request * SRpnt, int at_head)
148 {
149 	request_queue_t *q = &SRpnt->sr_device->request_queue;
150 
151 	__scsi_insert_special(q, &SRpnt->sr_request, SRpnt, at_head);
152 	return 0;
153 }
154 
155 /*
156  * Function:    scsi_init_cmd_errh()
157  *
158  * Purpose:     Initialize SCpnt fields related to error handling.
159  *
160  * Arguments:   SCpnt   - command that is ready to be queued.
161  *
162  * Returns:     Nothing
163  *
164  * Notes:       This function has the job of initializing a number of
165  *              fields related to error handling.   Typically this will
166  *              be called once for each command, as required.
167  */
scsi_init_cmd_errh(Scsi_Cmnd * SCpnt)168 int scsi_init_cmd_errh(Scsi_Cmnd * SCpnt)
169 {
170 	ASSERT_LOCK(&io_request_lock, 0);
171 
172 	SCpnt->owner = SCSI_OWNER_MIDLEVEL;
173 	SCpnt->reset_chain = NULL;
174 	SCpnt->serial_number = 0;
175 	SCpnt->serial_number_at_timeout = 0;
176 	SCpnt->flags = 0;
177 	SCpnt->retries = 0;
178 
179 	SCpnt->abort_reason = 0;
180 
181 	memset((void *) SCpnt->sense_buffer, 0, sizeof SCpnt->sense_buffer);
182 
183 	if (SCpnt->cmd_len == 0)
184 		SCpnt->cmd_len = COMMAND_SIZE(SCpnt->cmnd[0]);
185 
186 	/*
187 	 * We need saved copies of a number of fields - this is because
188 	 * error handling may need to overwrite these with different values
189 	 * to run different commands, and once error handling is complete,
190 	 * we will need to restore these values prior to running the actual
191 	 * command.
192 	 */
193 	SCpnt->old_use_sg = SCpnt->use_sg;
194 	SCpnt->old_cmd_len = SCpnt->cmd_len;
195 	SCpnt->sc_old_data_direction = SCpnt->sc_data_direction;
196 	SCpnt->old_underflow = SCpnt->underflow;
197 	memcpy((void *) SCpnt->data_cmnd,
198 	       (const void *) SCpnt->cmnd, sizeof(SCpnt->cmnd));
199 	SCpnt->buffer = SCpnt->request_buffer;
200 	SCpnt->bufflen = SCpnt->request_bufflen;
201 
202 	SCpnt->reset_chain = NULL;
203 
204 	SCpnt->internal_timeout = NORMAL_TIMEOUT;
205 	SCpnt->abort_reason = 0;
206 
207 	return 1;
208 }
209 
210 /*
211  * Function:    scsi_queue_next_request()
212  *
213  * Purpose:     Handle post-processing of completed commands.
214  *
215  * Arguments:   SCpnt   - command that may need to be requeued.
216  *
217  * Returns:     Nothing
218  *
219  * Notes:       After command completion, there may be blocks left
220  *              over which weren't finished by the previous command
221  *              this can be for a number of reasons - the main one is
222  *              that a medium error occurred, and the sectors after
223  *              the bad block need to be re-read.
224  *
225  *              If SCpnt is NULL, it means that the previous command
226  *              was completely finished, and we should simply start
227  *              a new command, if possible.
228  *
229  *		This is where a lot of special case code has begun to
230  *		accumulate.  It doesn't really affect readability or
231  *		anything, but it might be considered architecturally
232  *		inelegant.  If more of these special cases start to
233  *		accumulate, I am thinking along the lines of implementing
234  *		an atexit() like technology that gets run when commands
235  *		complete.  I am not convinced that it is worth the
236  *		added overhead, however.  Right now as things stand,
237  *		there are simple conditional checks, and most hosts
238  *		would skip past.
239  *
240  *		Another possible solution would be to tailor different
241  *		handler functions, sort of like what we did in scsi_merge.c.
242  *		This is probably a better solution, but the number of different
243  *		permutations grows as 2**N, and if too many more special cases
244  *		get added, we start to get screwed.
245  */
scsi_queue_next_request(request_queue_t * q,Scsi_Cmnd * SCpnt)246 void scsi_queue_next_request(request_queue_t * q, Scsi_Cmnd * SCpnt)
247 {
248 	int all_clear;
249 	unsigned long flags;
250 	Scsi_Device *SDpnt;
251 	struct Scsi_Host *SHpnt;
252 
253 	ASSERT_LOCK(&io_request_lock, 0);
254 
255 	spin_lock_irqsave(&io_request_lock, flags);
256 	if (SCpnt != NULL) {
257 
258 		/*
259 		 * For some reason, we are not done with this request.
260 		 * This happens for I/O errors in the middle of the request,
261 		 * in which case we need to request the blocks that come after
262 		 * the bad sector.
263 		 */
264 		SCpnt->request.special = (void *) SCpnt;
265 		list_add(&SCpnt->request.queue, &q->queue_head);
266 	}
267 
268 	/*
269 	 * Just hit the requeue function for the queue.
270 	 */
271 	q->request_fn(q);
272 
273 	SDpnt = (Scsi_Device *) q->queuedata;
274 	SHpnt = SDpnt->host;
275 
276 	/*
277 	 * If this is a single-lun device, and we are currently finished
278 	 * with this device, then see if we need to get another device
279 	 * started.  FIXME(eric) - if this function gets too cluttered
280 	 * with special case code, then spin off separate versions and
281 	 * use function pointers to pick the right one.
282 	 */
283 	if (SDpnt->single_lun
284 	    && list_empty(&q->queue_head)
285 	    && SDpnt->device_busy == 0) {
286 		request_queue_t *q;
287 
288 		for (SDpnt = SHpnt->host_queue;
289 		     SDpnt;
290 		     SDpnt = SDpnt->next) {
291 			if (((SHpnt->can_queue > 0)
292 			     && (SHpnt->host_busy >= SHpnt->can_queue))
293 			    || (SHpnt->host_blocked)
294 			    || (SHpnt->host_self_blocked)
295 			    || (SDpnt->device_blocked)) {
296 				break;
297 			}
298 			q = &SDpnt->request_queue;
299 			q->request_fn(q);
300 		}
301 	}
302 
303 	/*
304 	 * Now see whether there are other devices on the bus which
305 	 * might be starved.  If so, hit the request function.  If we
306 	 * don't find any, then it is safe to reset the flag.  If we
307 	 * find any device that it is starved, it isn't safe to reset the
308 	 * flag as the queue function releases the lock and thus some
309 	 * other device might have become starved along the way.
310 	 */
311 	all_clear = 1;
312 	if (SHpnt->some_device_starved) {
313 		for (SDpnt = SHpnt->host_queue; SDpnt; SDpnt = SDpnt->next) {
314 			request_queue_t *q;
315 			if ((SHpnt->can_queue > 0 && (SHpnt->host_busy >= SHpnt->can_queue))
316 			    || (SHpnt->host_blocked)
317 			    || (SHpnt->host_self_blocked)) {
318 				break;
319 			}
320 			if (SDpnt->device_blocked || !SDpnt->starved) {
321 				continue;
322 			}
323 			q = &SDpnt->request_queue;
324 			q->request_fn(q);
325 			all_clear = 0;
326 		}
327 		if (SDpnt == NULL && all_clear) {
328 			SHpnt->some_device_starved = 0;
329 		}
330 	}
331 	spin_unlock_irqrestore(&io_request_lock, flags);
332 }
333 
334 /*
335  * Function:    scsi_end_request()
336  *
337  * Purpose:     Post-processing of completed commands called from interrupt
338  *              handler or a bottom-half handler.
339  *
340  * Arguments:   SCpnt    - command that is complete.
341  *              uptodate - 1 if I/O indicates success, 0 for I/O error.
342  *              sectors  - number of sectors we want to mark.
343  *		requeue  - indicates whether we should requeue leftovers.
344  *		frequeue - indicates that if we release the command block
345  *			   that the queue request function should be called.
346  *
347  * Lock status: Assumed that lock is not held upon entry.
348  *
349  * Returns:     Nothing
350  *
351  * Notes:       This is called for block device requests in order to
352  *              mark some number of sectors as complete.
353  *
354  *		We are guaranteeing that the request queue will be goosed
355  *		at some point during this call.
356  */
__scsi_end_request(Scsi_Cmnd * SCpnt,int uptodate,int sectors,int requeue,int frequeue)357 static Scsi_Cmnd *__scsi_end_request(Scsi_Cmnd * SCpnt,
358 				     int uptodate,
359 				     int sectors,
360 				     int requeue,
361 				     int frequeue)
362 {
363 	request_queue_t *q = &SCpnt->device->request_queue;
364 	struct request *req;
365 	struct buffer_head *bh;
366 	unsigned long flags;
367 	int nsect;
368 
369 	ASSERT_LOCK(&io_request_lock, 0);
370 
371 	req = &SCpnt->request;
372 	req->errors = 0;
373 	if (!uptodate) {
374 		printk(" I/O error: dev %s, sector %lu\n",
375 		       kdevname(req->rq_dev), req->sector);
376 	}
377 	do {
378 		if ((bh = req->bh) != NULL) {
379 			nsect = bh->b_size >> 9;
380 			blk_finished_io(nsect);
381 			blk_finished_sectors(req, nsect);
382 			req->bh = bh->b_reqnext;
383 			bh->b_reqnext = NULL;
384 			sectors -= nsect;
385 			bh->b_end_io(bh, uptodate);
386 			if ((bh = req->bh) != NULL) {
387 				req->hard_sector += nsect;
388 				req->hard_nr_sectors -= nsect;
389 				req->sector += nsect;
390 				req->nr_sectors -= nsect;
391 
392 				req->current_nr_sectors = bh->b_size >> 9;
393 				req->hard_cur_sectors = req->current_nr_sectors;
394 				if (req->nr_sectors < req->current_nr_sectors) {
395 					req->nr_sectors = req->current_nr_sectors;
396 					printk("scsi_end_request: buffer-list destroyed\n");
397 				}
398 			}
399 		}
400 	} while (sectors && bh);
401 
402 	/*
403 	 * If there are blocks left over at the end, set up the command
404 	 * to queue the remainder of them.
405 	 */
406 	if (req->bh) {
407 		/*
408 		 * Recount segments whether we are immediately going to
409 		 * requeue the command or not, other code might requeue
410 		 * it later and since we changed the segment count up above,
411 		 * we need it updated.
412 		 */
413 		recount_segments(SCpnt);
414 
415 		/*
416 		 * Bleah.  Leftovers again.  Stick the leftovers in
417 		 * the front of the queue, and goose the queue again.
418 		 */
419 		if (requeue)
420 			scsi_queue_next_request(q, SCpnt);
421 
422 		return SCpnt;
423 	}
424 
425 	spin_lock_irqsave(&io_request_lock, flags);
426 	req_finished_io(req);
427 	spin_unlock_irqrestore(&io_request_lock, flags);
428 
429 	add_blkdev_randomness(MAJOR(req->rq_dev));
430 
431 	/*
432 	 * This request is done.  If there is someone blocked waiting for this
433 	 * request, wake them up.  Do this last, as 'req' might be on the stack
434 	 * and thus not valid right after the complete() call if the task
435 	 * exist
436 	 */
437 	if (req->waiting)
438 		complete(req->waiting);
439 
440 	/*
441 	 * This will goose the queue request function at the end, so we don't
442 	 * need to worry about launching another command.
443 	 */
444 	__scsi_release_command(SCpnt);
445 
446 	if (frequeue)
447 		scsi_queue_next_request(q, NULL);
448 
449 	return NULL;
450 }
451 
452 /*
453  * Function:    scsi_end_request()
454  *
455  * Purpose:     Post-processing of completed commands called from interrupt
456  *              handler or a bottom-half handler.
457  *
458  * Arguments:   SCpnt    - command that is complete.
459  *              uptodate - 1 if I/O indicates success, 0 for I/O error.
460  *              sectors  - number of sectors we want to mark.
461  *
462  * Lock status: Assumed that lock is not held upon entry.
463  *
464  * Returns:     Nothing
465  *
466  * Notes:       This is called for block device requests in order to
467  *              mark some number of sectors as complete.
468  *
469  *		We are guaranteeing that the request queue will be goosed
470  *		at some point during this call.
471  */
scsi_end_request(Scsi_Cmnd * SCpnt,int uptodate,int sectors)472 Scsi_Cmnd *scsi_end_request(Scsi_Cmnd * SCpnt, int uptodate, int sectors)
473 {
474 	return __scsi_end_request(SCpnt, uptodate, sectors, 1, 1);
475 }
476 
477 /*
478  * Function:    scsi_release_buffers()
479  *
480  * Purpose:     Completion processing for block device I/O requests.
481  *
482  * Arguments:   SCpnt   - command that we are bailing.
483  *
484  * Lock status: Assumed that no lock is held upon entry.
485  *
486  * Returns:     Nothing
487  *
488  * Notes:       In the event that an upper level driver rejects a
489  *		command, we must release resources allocated during
490  *		the __init_io() function.  Primarily this would involve
491  *		the scatter-gather table, and potentially any bounce
492  *		buffers.
493  */
scsi_release_buffers(Scsi_Cmnd * SCpnt)494 static void scsi_release_buffers(Scsi_Cmnd * SCpnt)
495 {
496 	ASSERT_LOCK(&io_request_lock, 0);
497 
498 	/*
499 	 * Free up any indirection buffers we allocated for DMA purposes.
500 	 */
501 	if (SCpnt->use_sg) {
502 		struct scatterlist *sgpnt;
503 		void **bbpnt;
504 		int i;
505 
506 		sgpnt = (struct scatterlist *) SCpnt->request_buffer;
507 		bbpnt = SCpnt->bounce_buffers;
508 
509 		if (bbpnt) {
510 			for (i = 0; i < SCpnt->use_sg; i++) {
511 				if (bbpnt[i])
512 					scsi_free(sgpnt[i].address, sgpnt[i].length);
513 			}
514 		}
515 		scsi_free(SCpnt->request_buffer, SCpnt->sglist_len);
516 	} else {
517 		if (SCpnt->request_buffer != SCpnt->request.buffer) {
518 			scsi_free(SCpnt->request_buffer, SCpnt->request_bufflen);
519 		}
520 	}
521 
522 	/*
523 	 * Zero these out.  They now point to freed memory, and it is
524 	 * dangerous to hang onto the pointers.
525 	 */
526 	SCpnt->buffer  = NULL;
527 	SCpnt->bufflen = 0;
528 	SCpnt->request_buffer = NULL;
529 	SCpnt->request_bufflen = 0;
530 }
531 
532 /*
533  * Function:    scsi_io_completion()
534  *
535  * Purpose:     Completion processing for block device I/O requests.
536  *
537  * Arguments:   SCpnt   - command that is finished.
538  *
539  * Lock status: Assumed that no lock is held upon entry.
540  *
541  * Returns:     Nothing
542  *
543  * Notes:       This function is matched in terms of capabilities to
544  *              the function that created the scatter-gather list.
545  *              In other words, if there are no bounce buffers
546  *              (the normal case for most drivers), we don't need
547  *              the logic to deal with cleaning up afterwards.
548  */
scsi_io_completion(Scsi_Cmnd * SCpnt,int good_sectors,int block_sectors)549 void scsi_io_completion(Scsi_Cmnd * SCpnt, int good_sectors,
550 			int block_sectors)
551 {
552 	int result = SCpnt->result;
553 	int this_count = SCpnt->bufflen >> 9;
554 	request_queue_t *q = &SCpnt->device->request_queue;
555 	struct request *req = &SCpnt->request;
556 
557 	/*
558 	 * We must do one of several things here:
559 	 *
560 	 *	Call scsi_end_request.  This will finish off the specified
561 	 *	number of sectors.  If we are done, the command block will
562 	 *	be released, and the queue function will be goosed.  If we
563 	 *	are not done, then scsi_end_request will directly goose
564 	 *	the queue.
565 	 *
566 	 *	We can just use scsi_queue_next_request() here.  This
567 	 *	would be used if we just wanted to retry, for example.
568 	 *
569 	 */
570 	ASSERT_LOCK(&io_request_lock, 0);
571 
572 	/*
573 	 * Free up any indirection buffers we allocated for DMA purposes.
574 	 * For the case of a READ, we need to copy the data out of the
575 	 * bounce buffer and into the real buffer.
576 	 */
577 	if (SCpnt->use_sg) {
578 		struct scatterlist *sgpnt;
579 		void **bbpnt;
580 		int i;
581 
582 		sgpnt = (struct scatterlist *) SCpnt->buffer;
583 		bbpnt = SCpnt->bounce_buffers;
584 
585 		if (bbpnt) {
586 			for (i = 0; i < SCpnt->use_sg; i++) {
587 				if (bbpnt[i]) {
588 					if (req->cmd == READ) {
589 						memcpy(bbpnt[i],
590 						       sgpnt[i].address,
591 						       sgpnt[i].length);
592 					}
593 					scsi_free(sgpnt[i].address, sgpnt[i].length);
594 				}
595 			}
596 		}
597 		scsi_free(SCpnt->buffer, SCpnt->sglist_len);
598 	} else {
599 		if (SCpnt->buffer != req->buffer) {
600 			if (PageHighMem(req->bh->b_page))
601 				BUG();
602 			if (req->cmd == READ)
603 				memcpy(req->buffer, SCpnt->buffer, SCpnt->bufflen);
604 			scsi_free(SCpnt->buffer, SCpnt->bufflen);
605 		}
606 	}
607 
608 	/*
609 	 * Zero these out.  They now point to freed memory, and it is
610 	 * dangerous to hang onto the pointers.
611 	 */
612 	SCpnt->buffer  = NULL;
613 	SCpnt->bufflen = 0;
614 	SCpnt->request_buffer = NULL;
615 	SCpnt->request_bufflen = 0;
616 
617 	/*
618 	 * Next deal with any sectors which we were able to correctly
619 	 * handle.
620 	 */
621 	if (good_sectors > 0) {
622 		SCSI_LOG_HLCOMPLETE(1, printk("%ld sectors total, %d sectors done.\n",
623 					      SCpnt->request.nr_sectors,
624 					      good_sectors));
625 		SCSI_LOG_HLCOMPLETE(1, printk("use_sg is %d\n ", SCpnt->use_sg));
626 
627 		req->errors = 0;
628 		/*
629 		 * If multiple sectors are requested in one buffer, then
630 		 * they will have been finished off by the first command.
631 		 * If not, then we have a multi-buffer command.
632 		 *
633 		 * If block_sectors != 0, it means we had a medium error
634 		 * of some sort, and that we want to mark some number of
635 		 * sectors as not uptodate.  Thus we want to inhibit
636 		 * requeueing right here - we will requeue down below
637 		 * when we handle the bad sectors.
638 		 */
639 		SCpnt = __scsi_end_request(SCpnt,
640 					   1,
641 					   good_sectors,
642 					   result == 0,
643 					   1);
644 
645 		/*
646 		 * If the command completed without error, then either finish off the
647 		 * rest of the command, or start a new one.
648 		 */
649 		if (result == 0 || SCpnt == NULL ) {
650 			return;
651 		}
652 	}
653 	/*
654 	 * Now, if we were good little boys and girls, Santa left us a request
655 	 * sense buffer.  We can extract information from this, so we
656 	 * can choose a block to remap, etc.
657 	 */
658 	if (driver_byte(result) != 0) {
659 		if (suggestion(result) == SUGGEST_REMAP) {
660 #ifdef REMAP
661 			/*
662 			 * Not yet implemented.  A read will fail after being remapped,
663 			 * a write will call the strategy routine again.
664 			 */
665 			if (SCpnt->device->remap) {
666 				result = 0;
667 			}
668 #endif
669 		}
670 		if ((SCpnt->sense_buffer[0] & 0x7f) == 0x70) {
671 			/*
672 			 * If the device is in the process of becoming ready,
673 			 * retry.
674 			 */
675 			if (SCpnt->sense_buffer[12] == 0x04 &&
676 			    SCpnt->sense_buffer[13] == 0x01) {
677 				scsi_queue_next_request(q, SCpnt);
678 				return;
679 			}
680 			if ((SCpnt->sense_buffer[2] & 0xf) == UNIT_ATTENTION) {
681 				if (SCpnt->device->removable) {
682 					/* detected disc change.  set a bit
683 					 * and quietly refuse further access.
684 		 			 */
685 					SCpnt->device->changed = 1;
686 					SCpnt = scsi_end_request(SCpnt, 0, this_count);
687 					return;
688 				} else {
689 					/*
690 				 	* Must have been a power glitch, or a
691 				 	* bus reset.  Could not have been a
692 				 	* media change, so we just retry the
693 				 	* request and see what happens.
694 				 	*/
695 					scsi_queue_next_request(q, SCpnt);
696 					return;
697 				}
698 			}
699 		}
700 		/* If we had an ILLEGAL REQUEST returned, then we may have
701 		 * performed an unsupported command.  The only thing this should be
702 		 * would be a ten byte read where only a six byte read was supported.
703 		 * Also, on a system where READ CAPACITY failed, we have have read
704 		 * past the end of the disk.
705 		 */
706 
707 		switch (SCpnt->sense_buffer[2]) {
708 		case RECOVERED_ERROR: /* Added, KG, 2003-01-20 */
709 			return;
710 		case ILLEGAL_REQUEST:
711 			if (SCpnt->device->ten && SCSI_RETRY_10(SCpnt->cmnd[0])) {
712 				SCpnt->device->ten = 0;
713 				/*
714 				 * This will cause a retry with a 6-byte
715 				 * command.
716 				 */
717 				scsi_queue_next_request(q, SCpnt);
718 				result = 0;
719 			} else {
720 				SCpnt = scsi_end_request(SCpnt, 0, this_count);
721 				return;
722 			}
723 			break;
724 		case NOT_READY:
725 			printk(KERN_INFO "Device %s not ready.\n",
726 			       kdevname(SCpnt->request.rq_dev));
727 			SCpnt = scsi_end_request(SCpnt, 0, this_count);
728 			return;
729 			break;
730 		case MEDIUM_ERROR:
731 		case VOLUME_OVERFLOW:
732 			printk("scsi%d: ERROR on channel %d, id %d, lun %d, CDB: ",
733 			       SCpnt->host->host_no, (int) SCpnt->channel,
734 			       (int) SCpnt->target, (int) SCpnt->lun);
735 			print_command(SCpnt->cmnd);
736 			print_sense("sd", SCpnt);
737 			SCpnt = scsi_end_request(SCpnt, 0, block_sectors);
738 			return;
739 		default:
740 			break;
741 		}
742 	}			/* driver byte != 0 */
743 	if (host_byte(result) == DID_RESET) {
744 		/*
745 		 * Third party bus reset or reset for error
746 		 * recovery reasons.  Just retry the request
747 		 * and see what happens.
748 		 */
749 		scsi_queue_next_request(q, SCpnt);
750 		return;
751 	}
752 	if (result) {
753 		struct Scsi_Device_Template *STpnt;
754 
755 		STpnt = scsi_get_request_dev(&SCpnt->request);
756 		printk("SCSI %s error : host %d channel %d id %d lun %d return code = %x\n",
757 		       (STpnt ? STpnt->name : "device"),
758 		       SCpnt->device->host->host_no,
759 		       SCpnt->device->channel,
760 		       SCpnt->device->id,
761 		       SCpnt->device->lun, result);
762 
763 		if (driver_byte(result) & DRIVER_SENSE)
764 			print_sense("sd", SCpnt);
765 		/*
766 		 * Mark a single buffer as not uptodate.  Queue the remainder.
767 		 * We sometimes get this cruft in the event that a medium error
768 		 * isn't properly reported.
769 		 */
770 		SCpnt = scsi_end_request(SCpnt, 0, SCpnt->request.current_nr_sectors);
771 		return;
772 	}
773 }
774 
775 /*
776  * Function:    scsi_get_request_dev()
777  *
778  * Purpose:     Find the upper-level driver that is responsible for this
779  *              request
780  *
781  * Arguments:   request   - I/O request we are preparing to queue.
782  *
783  * Lock status: No locks assumed to be held, but as it happens the
784  *              io_request_lock is held when this is called.
785  *
786  * Returns:     Nothing
787  *
788  * Notes:       The requests in the request queue may have originated
789  *              from any block device driver.  We need to find out which
790  *              one so that we can later form the appropriate command.
791  */
scsi_get_request_dev(struct request * req)792 struct Scsi_Device_Template *scsi_get_request_dev(struct request *req)
793 {
794 	struct Scsi_Device_Template *spnt;
795 	kdev_t dev = req->rq_dev;
796 	int major = MAJOR(dev);
797 
798 	ASSERT_LOCK(&io_request_lock, 1);
799 
800 	for (spnt = scsi_devicelist; spnt; spnt = spnt->next) {
801 		/*
802 		 * Search for a block device driver that supports this
803 		 * major.
804 		 */
805 		if (spnt->blk && spnt->major == major) {
806 			return spnt;
807 		}
808 		/*
809 		 * I am still not entirely satisfied with this solution,
810 		 * but it is good enough for now.  Disks have a number of
811 		 * major numbers associated with them, the primary
812 		 * 8, which we test above, and a secondary range of 7
813 		 * different consecutive major numbers.   If this ever
814 		 * becomes insufficient, then we could add another function
815 		 * to the structure, and generalize this completely.
816 		 */
817 		if( spnt->min_major != 0
818 		    && spnt->max_major != 0
819 		    && major >= spnt->min_major
820 		    && major <= spnt->max_major )
821 		{
822 			return spnt;
823 		}
824 	}
825 	return NULL;
826 }
827 
828 /*
829  * Function:    scsi_request_fn()
830  *
831  * Purpose:     Generic version of request function for SCSI hosts.
832  *
833  * Arguments:   q       - Pointer to actual queue.
834  *
835  * Returns:     Nothing
836  *
837  * Lock status: IO request lock assumed to be held when called.
838  *
839  * Notes:       The theory is that this function is something which individual
840  *              drivers could also supply if they wished to.   The problem
841  *              is that we have 30 some odd low-level drivers in the kernel
842  *              tree already, and it would be most difficult to retrofit
843  *              this crap into all of them.   Thus this function has the job
844  *              of acting as a generic queue manager for all of those existing
845  *              drivers.
846  */
scsi_request_fn(request_queue_t * q)847 void scsi_request_fn(request_queue_t * q)
848 {
849 	struct request *req;
850 	Scsi_Cmnd *SCpnt;
851 	Scsi_Request *SRpnt;
852 	Scsi_Device *SDpnt;
853 	struct Scsi_Host *SHpnt;
854 	struct Scsi_Device_Template *STpnt;
855 
856 	ASSERT_LOCK(&io_request_lock, 1);
857 
858 	SDpnt = (Scsi_Device *) q->queuedata;
859 	if (!SDpnt) {
860 		panic("Missing device");
861 	}
862 	SHpnt = SDpnt->host;
863 
864 	/*
865 	 * To start with, we keep looping until the queue is empty, or until
866 	 * the host is no longer able to accept any more requests.
867 	 */
868 	while (1 == 1) {
869 		/*
870 		 * Check this again - each time we loop through we will have
871 		 * released the lock and grabbed it again, so each time
872 		 * we need to check to see if the queue is plugged or not.
873 		 */
874 		if (SHpnt->in_recovery || q->plugged)
875 			return;
876 
877 		/*
878 		 * If the device cannot accept another request, then quit.
879 		 */
880 		if (SDpnt->device_blocked) {
881 			break;
882 		}
883 		if ((SHpnt->can_queue > 0 && (SHpnt->host_busy >= SHpnt->can_queue))
884 		    || (SHpnt->host_blocked)
885 		    || (SHpnt->host_self_blocked)) {
886 			/*
887 			 * If we are unable to process any commands at all for
888 			 * this device, then we consider it to be starved.
889 			 * What this means is that there are no outstanding
890 			 * commands for this device and hence we need a
891 			 * little help getting it started again
892 			 * once the host isn't quite so busy.
893 			 */
894 			if (SDpnt->device_busy == 0) {
895 				SDpnt->starved = 1;
896 				SHpnt->some_device_starved = 1;
897 			}
898 			break;
899 		} else {
900 			SDpnt->starved = 0;
901 		}
902 
903  		/*
904 		 * FIXME(eric)
905 		 * I am not sure where the best place to do this is.  We need
906 		 * to hook in a place where we are likely to come if in user
907 		 * space.   Technically the error handling thread should be
908 		 * doing this crap, but the error handler isn't used by
909 		 * most hosts.
910 		 */
911 		if (SDpnt->was_reset) {
912 			/*
913 			 * We need to relock the door, but we might
914 			 * be in an interrupt handler.  Only do this
915 			 * from user space, since we do not want to
916 			 * sleep from an interrupt.
917 			 *
918 			 * FIXME(eric) - have the error handler thread do
919 			 * this work.
920 			 */
921 			SDpnt->was_reset = 0;
922 			if (SDpnt->removable && !in_interrupt()) {
923 				spin_unlock_irq(&io_request_lock);
924 				scsi_ioctl(SDpnt, SCSI_IOCTL_DOORLOCK, 0);
925 				spin_lock_irq(&io_request_lock);
926 				continue;
927 			}
928 		}
929 
930 		/*
931 		 * If we couldn't find a request that could be queued, then we
932 		 * can also quit.
933 		 */
934 		if (list_empty(&q->queue_head))
935 			break;
936 
937 		/*
938 		 * Loop through all of the requests in this queue, and find
939 		 * one that is queueable.
940 		 */
941 		req = blkdev_entry_next_request(&q->queue_head);
942 
943 		/*
944 		 * Find the actual device driver associated with this command.
945 		 * The SPECIAL requests are things like character device or
946 		 * ioctls, which did not originate from ll_rw_blk.  Note that
947 		 * the special field is also used to indicate the SCpnt for
948 		 * the remainder of a partially fulfilled request that can
949 		 * come up when there is a medium error.  We have to treat
950 		 * these two cases differently.  We differentiate by looking
951 		 * at request.cmd, as this tells us the real story.
952 		 */
953 		if (req->cmd == SPECIAL) {
954 			STpnt = NULL;
955 			SCpnt = (Scsi_Cmnd *) req->special;
956 			SRpnt = (Scsi_Request *) req->special;
957 
958 			if( SRpnt->sr_magic == SCSI_REQ_MAGIC ) {
959 				SCpnt = scsi_allocate_device(SRpnt->sr_device,
960 							     FALSE, FALSE);
961 				if( !SCpnt ) {
962 					break;
963 				}
964 				scsi_init_cmd_from_req(SCpnt, SRpnt);
965 			}
966 
967 		} else {
968 			SRpnt = NULL;
969 			STpnt = scsi_get_request_dev(req);
970 			if (!STpnt) {
971 				panic("Unable to find device associated with request");
972 			}
973 			/*
974 			 * Now try and find a command block that we can use.
975 			 */
976 			if( req->special != NULL ) {
977 				SCpnt = (Scsi_Cmnd *) req->special;
978 			} else {
979 				SCpnt = scsi_allocate_device(SDpnt, FALSE, FALSE);
980 			}
981 			/*
982 			 * If so, we are ready to do something.  Bump the count
983 			 * while the queue is locked and then break out of the
984 			 * loop. Otherwise loop around and try another request.
985 			 */
986 			if (!SCpnt) {
987 				break;
988 			}
989 		}
990 
991 		/*
992 		 * Now bump the usage count for both the host and the
993 		 * device.
994 		 */
995 		SHpnt->host_busy++;
996 		SDpnt->device_busy++;
997 
998 		/*
999 		 * Finally, before we release the lock, we copy the
1000 		 * request to the command block, and remove the
1001 		 * request from the request list.   Note that we always
1002 		 * operate on the queue head - there is absolutely no
1003 		 * reason to search the list, because all of the commands
1004 		 * in this queue are for the same device.
1005 		 */
1006 		blkdev_dequeue_request(req);
1007 
1008 		if (req != &SCpnt->request && req != &SRpnt->sr_request ) {
1009 			memcpy(&SCpnt->request, req, sizeof(struct request));
1010 
1011 			/*
1012 			 * We have copied the data out of the request block -
1013 			 * it is now in a field in SCpnt.  Release the request
1014 			 * block.
1015 			 */
1016 			blkdev_release_request(req);
1017 		}
1018 		/*
1019 		 * Now it is finally safe to release the lock.  We are
1020 		 * not going to noodle the request list until this
1021 		 * request has been queued and we loop back to queue
1022 		 * another.
1023 		 */
1024 		req = NULL;
1025 		spin_unlock_irq(&io_request_lock);
1026 
1027 		if (SCpnt->request.cmd != SPECIAL) {
1028 			/*
1029 			 * This will do a couple of things:
1030 			 *  1) Fill in the actual SCSI command.
1031 			 *  2) Fill in any other upper-level specific fields
1032 			 * (timeout).
1033 			 *
1034 			 * If this returns 0, it means that the request failed
1035 			 * (reading past end of disk, reading offline device,
1036 			 * etc).   This won't actually talk to the device, but
1037 			 * some kinds of consistency checking may cause the
1038 			 * request to be rejected immediately.
1039 			 */
1040 			if (STpnt == NULL) {
1041 				STpnt = scsi_get_request_dev(req);
1042 			}
1043 			/*
1044 			 * This sets up the scatter-gather table (allocating if
1045 			 * required).  Hosts that need bounce buffers will also
1046 			 * get those allocated here.
1047 			 */
1048 			if (!SDpnt->scsi_init_io_fn(SCpnt)) {
1049 				/*
1050 				 * probably we ran out of sgtable memory, or
1051 				 * __init_io() wanted to revert to a single
1052 				 * segment request. this would require bouncing
1053 				 * on highmem i/o, so mark the device as
1054 				 * starved and continue later instead
1055 				 */
1056 				spin_lock_irq(&io_request_lock);
1057 				SHpnt->host_busy--;
1058 				SDpnt->device_busy--;
1059 				if (SDpnt->device_busy == 0) {
1060 					SDpnt->starved = 1;
1061 					SHpnt->some_device_starved = 1;
1062 				}
1063 				SCpnt->request.special = SCpnt;
1064 				list_add(&SCpnt->request.queue, &q->queue_head);
1065 				break;
1066 			}
1067 
1068 			/*
1069 			 * Initialize the actual SCSI command for this request.
1070 			 */
1071 			if (!STpnt->init_command(SCpnt)) {
1072 				scsi_release_buffers(SCpnt);
1073 				SCpnt = __scsi_end_request(SCpnt, 0,
1074 							   SCpnt->request.nr_sectors, 0, 0);
1075 				if( SCpnt != NULL )
1076 				{
1077 					panic("Should not have leftover blocks\n");
1078 				}
1079 				spin_lock_irq(&io_request_lock);
1080 				SHpnt->host_busy--;
1081 				SDpnt->device_busy--;
1082 				continue;
1083 			}
1084 		}
1085 		/*
1086 		 * Finally, initialize any error handling parameters, and set up
1087 		 * the timers for timeouts.
1088 		 */
1089 		scsi_init_cmd_errh(SCpnt);
1090 
1091 		/*
1092 		 * Dispatch the command to the low-level driver.
1093 		 */
1094 		scsi_dispatch_cmd(SCpnt);
1095 
1096 		/*
1097 		 * Now we need to grab the lock again.  We are about to mess
1098 		 * with the request queue and try to find another command.
1099 		 */
1100 		spin_lock_irq(&io_request_lock);
1101 	}
1102 }
1103 
1104 /*
1105  * Function:    scsi_block_requests()
1106  *
1107  * Purpose:     Utility function used by low-level drivers to prevent further
1108  *		commands from being queued to the device.
1109  *
1110  * Arguments:   SHpnt       - Host in question
1111  *
1112  * Returns:     Nothing
1113  *
1114  * Lock status: No locks are assumed held.
1115  *
1116  * Notes:       There is no timer nor any other means by which the requests
1117  *		get unblocked other than the low-level driver calling
1118  *		scsi_unblock_requests().
1119  */
scsi_block_requests(struct Scsi_Host * SHpnt)1120 void scsi_block_requests(struct Scsi_Host * SHpnt)
1121 {
1122 	SHpnt->host_self_blocked = TRUE;
1123 }
1124 
1125 /*
1126  * Function:    scsi_unblock_requests()
1127  *
1128  * Purpose:     Utility function used by low-level drivers to allow further
1129  *		commands from being queued to the device.
1130  *
1131  * Arguments:   SHpnt       - Host in question
1132  *
1133  * Returns:     Nothing
1134  *
1135  * Lock status: No locks are assumed held.
1136  *
1137  * Notes:       There is no timer nor any other means by which the requests
1138  *		get unblocked other than the low-level driver calling
1139  *		scsi_unblock_requests().
1140  *
1141  *		This is done as an API function so that changes to the
1142  *		internals of the scsi mid-layer won't require wholesale
1143  *		changes to drivers that use this feature.
1144  */
scsi_unblock_requests(struct Scsi_Host * SHpnt)1145 void scsi_unblock_requests(struct Scsi_Host * SHpnt)
1146 {
1147 	Scsi_Device *SDloop;
1148 
1149 	SHpnt->host_self_blocked = FALSE;
1150 	/* Now that we are unblocked, try to start the queues. */
1151 	for (SDloop = SHpnt->host_queue; SDloop; SDloop = SDloop->next)
1152 		scsi_queue_next_request(&SDloop->request_queue, NULL);
1153 }
1154 
1155 /*
1156  * Function:    scsi_report_bus_reset()
1157  *
1158  * Purpose:     Utility function used by low-level drivers to report that
1159  *		they have observed a bus reset on the bus being handled.
1160  *
1161  * Arguments:   SHpnt       - Host in question
1162  *		channel     - channel on which reset was observed.
1163  *
1164  * Returns:     Nothing
1165  *
1166  * Lock status: No locks are assumed held.
1167  *
1168  * Notes:       This only needs to be called if the reset is one which
1169  *		originates from an unknown location.  Resets originated
1170  *		by the mid-level itself don't need to call this, but there
1171  *		should be no harm.
1172  *
1173  *		The main purpose of this is to make sure that a CHECK_CONDITION
1174  *		is properly treated.
1175  */
scsi_report_bus_reset(struct Scsi_Host * SHpnt,int channel)1176 void scsi_report_bus_reset(struct Scsi_Host * SHpnt, int channel)
1177 {
1178 	Scsi_Device *SDloop;
1179 	for (SDloop = SHpnt->host_queue; SDloop; SDloop = SDloop->next) {
1180 		if (channel == SDloop->channel) {
1181 			SDloop->was_reset = 1;
1182 			SDloop->expecting_cc_ua = 1;
1183 		}
1184 	}
1185 }
1186 
1187 /*
1188  * FIXME(eric) - these are empty stubs for the moment.  I need to re-implement
1189  * host blocking from scratch. The theory is that hosts that wish to block
1190  * will register/deregister using these functions instead of the old way
1191  * of setting the wish_block flag.
1192  *
1193  * The details of the implementation remain to be settled, however the
1194  * stubs are here now so that the actual drivers will properly compile.
1195  */
scsi_register_blocked_host(struct Scsi_Host * SHpnt)1196 void scsi_register_blocked_host(struct Scsi_Host * SHpnt)
1197 {
1198 }
1199 
scsi_deregister_blocked_host(struct Scsi_Host * SHpnt)1200 void scsi_deregister_blocked_host(struct Scsi_Host * SHpnt)
1201 {
1202 }
1203