1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * hosts.c Copyright (C) 1992 Drew Eckhardt
4 * Copyright (C) 1993, 1994, 1995 Eric Youngdale
5 * Copyright (C) 2002-2003 Christoph Hellwig
6 *
7 * mid to lowlevel SCSI driver interface
8 * Initial versions: Drew Eckhardt
9 * Subsequent revisions: Eric Youngdale
10 *
11 * <drew@colorado.edu>
12 *
13 * Jiffies wrap fixes (host->resetting), 3 Dec 1998 Andrea Arcangeli
14 * Added QLOGIC QLA1280 SCSI controller kernel host support.
15 * August 4, 1999 Fred Lewis, Intel DuPont
16 *
17 * Updated to reflect the new initialization scheme for the higher
18 * level of scsi drivers (sd/sr/st)
19 * September 17, 2000 Torben Mathiasen <tmm@image.dk>
20 *
21 * Restructured scsi_host lists and associated functions.
22 * September 04, 2002 Mike Anderson (andmike@us.ibm.com)
23 */
24
25 #include <linux/module.h>
26 #include <linux/blkdev.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/kthread.h>
30 #include <linux/string.h>
31 #include <linux/mm.h>
32 #include <linux/init.h>
33 #include <linux/completion.h>
34 #include <linux/transport_class.h>
35 #include <linux/platform_device.h>
36 #include <linux/pm_runtime.h>
37 #include <linux/idr.h>
38 #include <scsi/scsi_device.h>
39 #include <scsi/scsi_host.h>
40 #include <scsi/scsi_transport.h>
41 #include <scsi/scsi_cmnd.h>
42
43 #include "scsi_priv.h"
44 #include "scsi_logging.h"
45
46
47 static int shost_eh_deadline = -1;
48
49 module_param_named(eh_deadline, shost_eh_deadline, int, S_IRUGO|S_IWUSR);
50 MODULE_PARM_DESC(eh_deadline,
51 "SCSI EH timeout in seconds (should be between 0 and 2^31-1)");
52
53 static DEFINE_IDA(host_index_ida);
54
55
scsi_host_cls_release(struct device * dev)56 static void scsi_host_cls_release(struct device *dev)
57 {
58 put_device(&class_to_shost(dev)->shost_gendev);
59 }
60
61 static struct class shost_class = {
62 .name = "scsi_host",
63 .dev_release = scsi_host_cls_release,
64 .dev_groups = scsi_shost_groups,
65 };
66
67 /**
68 * scsi_host_set_state - Take the given host through the host state model.
69 * @shost: scsi host to change the state of.
70 * @state: state to change to.
71 *
72 * Returns zero if unsuccessful or an error if the requested
73 * transition is illegal.
74 **/
scsi_host_set_state(struct Scsi_Host * shost,enum scsi_host_state state)75 int scsi_host_set_state(struct Scsi_Host *shost, enum scsi_host_state state)
76 {
77 enum scsi_host_state oldstate = shost->shost_state;
78
79 if (state == oldstate)
80 return 0;
81
82 switch (state) {
83 case SHOST_CREATED:
84 /* There are no legal states that come back to
85 * created. This is the manually initialised start
86 * state */
87 goto illegal;
88
89 case SHOST_RUNNING:
90 switch (oldstate) {
91 case SHOST_CREATED:
92 case SHOST_RECOVERY:
93 break;
94 default:
95 goto illegal;
96 }
97 break;
98
99 case SHOST_RECOVERY:
100 switch (oldstate) {
101 case SHOST_RUNNING:
102 break;
103 default:
104 goto illegal;
105 }
106 break;
107
108 case SHOST_CANCEL:
109 switch (oldstate) {
110 case SHOST_CREATED:
111 case SHOST_RUNNING:
112 case SHOST_CANCEL_RECOVERY:
113 break;
114 default:
115 goto illegal;
116 }
117 break;
118
119 case SHOST_DEL:
120 switch (oldstate) {
121 case SHOST_CANCEL:
122 case SHOST_DEL_RECOVERY:
123 break;
124 default:
125 goto illegal;
126 }
127 break;
128
129 case SHOST_CANCEL_RECOVERY:
130 switch (oldstate) {
131 case SHOST_CANCEL:
132 case SHOST_RECOVERY:
133 break;
134 default:
135 goto illegal;
136 }
137 break;
138
139 case SHOST_DEL_RECOVERY:
140 switch (oldstate) {
141 case SHOST_CANCEL_RECOVERY:
142 break;
143 default:
144 goto illegal;
145 }
146 break;
147 }
148 shost->shost_state = state;
149 return 0;
150
151 illegal:
152 SCSI_LOG_ERROR_RECOVERY(1,
153 shost_printk(KERN_ERR, shost,
154 "Illegal host state transition"
155 "%s->%s\n",
156 scsi_host_state_name(oldstate),
157 scsi_host_state_name(state)));
158 return -EINVAL;
159 }
160
161 /**
162 * scsi_remove_host - remove a scsi host
163 * @shost: a pointer to a scsi host to remove
164 **/
scsi_remove_host(struct Scsi_Host * shost)165 void scsi_remove_host(struct Scsi_Host *shost)
166 {
167 unsigned long flags;
168
169 mutex_lock(&shost->scan_mutex);
170 spin_lock_irqsave(shost->host_lock, flags);
171 if (scsi_host_set_state(shost, SHOST_CANCEL))
172 if (scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY)) {
173 spin_unlock_irqrestore(shost->host_lock, flags);
174 mutex_unlock(&shost->scan_mutex);
175 return;
176 }
177 spin_unlock_irqrestore(shost->host_lock, flags);
178
179 scsi_autopm_get_host(shost);
180 flush_workqueue(shost->tmf_work_q);
181 scsi_forget_host(shost);
182 mutex_unlock(&shost->scan_mutex);
183 scsi_proc_host_rm(shost);
184
185 /*
186 * New SCSI devices cannot be attached anymore because of the SCSI host
187 * state so drop the tag set refcnt. Wait until the tag set refcnt drops
188 * to zero because .exit_cmd_priv implementations may need the host
189 * pointer.
190 */
191 kref_put(&shost->tagset_refcnt, scsi_mq_free_tags);
192 wait_for_completion(&shost->tagset_freed);
193
194 spin_lock_irqsave(shost->host_lock, flags);
195 if (scsi_host_set_state(shost, SHOST_DEL))
196 BUG_ON(scsi_host_set_state(shost, SHOST_DEL_RECOVERY));
197 spin_unlock_irqrestore(shost->host_lock, flags);
198
199 transport_unregister_device(&shost->shost_gendev);
200 device_unregister(&shost->shost_dev);
201 device_del(&shost->shost_gendev);
202 }
203 EXPORT_SYMBOL(scsi_remove_host);
204
205 /**
206 * scsi_add_host_with_dma - add a scsi host with dma device
207 * @shost: scsi host pointer to add
208 * @dev: a struct device of type scsi class
209 * @dma_dev: dma device for the host
210 *
211 * Note: You rarely need to worry about this unless you're in a
212 * virtualised host environments, so use the simpler scsi_add_host()
213 * function instead.
214 *
215 * Return value:
216 * 0 on success / != 0 for error
217 **/
scsi_add_host_with_dma(struct Scsi_Host * shost,struct device * dev,struct device * dma_dev)218 int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
219 struct device *dma_dev)
220 {
221 struct scsi_host_template *sht = shost->hostt;
222 int error = -EINVAL;
223
224 shost_printk(KERN_INFO, shost, "%s\n",
225 sht->info ? sht->info(shost) : sht->name);
226
227 if (!shost->can_queue) {
228 shost_printk(KERN_ERR, shost,
229 "can_queue = 0 no longer supported\n");
230 goto fail;
231 }
232
233 /* Use min_t(int, ...) in case shost->can_queue exceeds SHRT_MAX */
234 shost->cmd_per_lun = min_t(int, shost->cmd_per_lun,
235 shost->can_queue);
236
237 error = scsi_init_sense_cache(shost);
238 if (error)
239 goto fail;
240
241 if (!shost->shost_gendev.parent)
242 shost->shost_gendev.parent = dev ? dev : &platform_bus;
243 if (!dma_dev)
244 dma_dev = shost->shost_gendev.parent;
245
246 shost->dma_dev = dma_dev;
247
248 if (dma_dev->dma_mask) {
249 shost->max_sectors = min_t(unsigned int, shost->max_sectors,
250 dma_max_mapping_size(dma_dev) >> SECTOR_SHIFT);
251 }
252
253 error = scsi_mq_setup_tags(shost);
254 if (error)
255 goto fail;
256
257 kref_init(&shost->tagset_refcnt);
258 init_completion(&shost->tagset_freed);
259
260 /*
261 * Increase usage count temporarily here so that calling
262 * scsi_autopm_put_host() will trigger runtime idle if there is
263 * nothing else preventing suspending the device.
264 */
265 pm_runtime_get_noresume(&shost->shost_gendev);
266 pm_runtime_set_active(&shost->shost_gendev);
267 pm_runtime_enable(&shost->shost_gendev);
268 device_enable_async_suspend(&shost->shost_gendev);
269
270 error = device_add(&shost->shost_gendev);
271 if (error)
272 goto out_disable_runtime_pm;
273
274 scsi_host_set_state(shost, SHOST_RUNNING);
275 get_device(shost->shost_gendev.parent);
276
277 device_enable_async_suspend(&shost->shost_dev);
278
279 get_device(&shost->shost_gendev);
280 error = device_add(&shost->shost_dev);
281 if (error)
282 goto out_del_gendev;
283
284 if (shost->transportt->host_size) {
285 shost->shost_data = kzalloc(shost->transportt->host_size,
286 GFP_KERNEL);
287 if (shost->shost_data == NULL) {
288 error = -ENOMEM;
289 goto out_del_dev;
290 }
291 }
292
293 if (shost->transportt->create_work_queue) {
294 snprintf(shost->work_q_name, sizeof(shost->work_q_name),
295 "scsi_wq_%d", shost->host_no);
296 shost->work_q = alloc_workqueue("%s",
297 WQ_SYSFS | __WQ_LEGACY | WQ_MEM_RECLAIM | WQ_UNBOUND,
298 1, shost->work_q_name);
299
300 if (!shost->work_q) {
301 error = -EINVAL;
302 goto out_del_dev;
303 }
304 }
305
306 error = scsi_sysfs_add_host(shost);
307 if (error)
308 goto out_del_dev;
309
310 scsi_proc_host_add(shost);
311 scsi_autopm_put_host(shost);
312 return error;
313
314 /*
315 * Any host allocation in this function will be freed in
316 * scsi_host_dev_release().
317 */
318 out_del_dev:
319 device_del(&shost->shost_dev);
320 out_del_gendev:
321 /*
322 * Host state is SHOST_RUNNING so we have to explicitly release
323 * ->shost_dev.
324 */
325 put_device(&shost->shost_dev);
326 device_del(&shost->shost_gendev);
327 out_disable_runtime_pm:
328 device_disable_async_suspend(&shost->shost_gendev);
329 pm_runtime_disable(&shost->shost_gendev);
330 pm_runtime_set_suspended(&shost->shost_gendev);
331 pm_runtime_put_noidle(&shost->shost_gendev);
332 kref_put(&shost->tagset_refcnt, scsi_mq_free_tags);
333 fail:
334 return error;
335 }
336 EXPORT_SYMBOL(scsi_add_host_with_dma);
337
scsi_host_dev_release(struct device * dev)338 static void scsi_host_dev_release(struct device *dev)
339 {
340 struct Scsi_Host *shost = dev_to_shost(dev);
341 struct device *parent = dev->parent;
342
343 scsi_proc_hostdir_rm(shost->hostt);
344
345 /* Wait for functions invoked through call_rcu(&scmd->rcu, ...) */
346 rcu_barrier();
347
348 if (shost->tmf_work_q)
349 destroy_workqueue(shost->tmf_work_q);
350 if (shost->ehandler)
351 kthread_stop(shost->ehandler);
352 if (shost->work_q)
353 destroy_workqueue(shost->work_q);
354
355 if (shost->shost_state == SHOST_CREATED) {
356 /*
357 * Free the shost_dev device name here if scsi_host_alloc()
358 * and scsi_host_put() have been called but neither
359 * scsi_host_add() nor scsi_host_remove() has been called.
360 * This avoids that the memory allocated for the shost_dev
361 * name is leaked.
362 */
363 kfree(dev_name(&shost->shost_dev));
364 }
365
366 kfree(shost->shost_data);
367
368 ida_free(&host_index_ida, shost->host_no);
369
370 if (shost->shost_state != SHOST_CREATED)
371 put_device(parent);
372 kfree(shost);
373 }
374
375 static struct device_type scsi_host_type = {
376 .name = "scsi_host",
377 .release = scsi_host_dev_release,
378 };
379
380 /**
381 * scsi_host_alloc - register a scsi host adapter instance.
382 * @sht: pointer to scsi host template
383 * @privsize: extra bytes to allocate for driver
384 *
385 * Note:
386 * Allocate a new Scsi_Host and perform basic initialization.
387 * The host is not published to the scsi midlayer until scsi_add_host
388 * is called.
389 *
390 * Return value:
391 * Pointer to a new Scsi_Host
392 **/
scsi_host_alloc(struct scsi_host_template * sht,int privsize)393 struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
394 {
395 struct Scsi_Host *shost;
396 int index;
397
398 shost = kzalloc(sizeof(struct Scsi_Host) + privsize, GFP_KERNEL);
399 if (!shost)
400 return NULL;
401
402 shost->host_lock = &shost->default_lock;
403 spin_lock_init(shost->host_lock);
404 shost->shost_state = SHOST_CREATED;
405 INIT_LIST_HEAD(&shost->__devices);
406 INIT_LIST_HEAD(&shost->__targets);
407 INIT_LIST_HEAD(&shost->eh_abort_list);
408 INIT_LIST_HEAD(&shost->eh_cmd_q);
409 INIT_LIST_HEAD(&shost->starved_list);
410 init_waitqueue_head(&shost->host_wait);
411 mutex_init(&shost->scan_mutex);
412
413 index = ida_alloc(&host_index_ida, GFP_KERNEL);
414 if (index < 0) {
415 kfree(shost);
416 return NULL;
417 }
418 shost->host_no = index;
419
420 shost->dma_channel = 0xff;
421
422 /* These three are default values which can be overridden */
423 shost->max_channel = 0;
424 shost->max_id = 8;
425 shost->max_lun = 8;
426
427 /* Give each shost a default transportt */
428 shost->transportt = &blank_transport_template;
429
430 /*
431 * All drivers right now should be able to handle 12 byte
432 * commands. Every so often there are requests for 16 byte
433 * commands, but individual low-level drivers need to certify that
434 * they actually do something sensible with such commands.
435 */
436 shost->max_cmd_len = 12;
437 shost->hostt = sht;
438 shost->this_id = sht->this_id;
439 shost->can_queue = sht->can_queue;
440 shost->sg_tablesize = sht->sg_tablesize;
441 shost->sg_prot_tablesize = sht->sg_prot_tablesize;
442 shost->cmd_per_lun = sht->cmd_per_lun;
443 shost->no_write_same = sht->no_write_same;
444 shost->host_tagset = sht->host_tagset;
445
446 if (shost_eh_deadline == -1 || !sht->eh_host_reset_handler)
447 shost->eh_deadline = -1;
448 else if ((ulong) shost_eh_deadline * HZ > INT_MAX) {
449 shost_printk(KERN_WARNING, shost,
450 "eh_deadline %u too large, setting to %u\n",
451 shost_eh_deadline, INT_MAX / HZ);
452 shost->eh_deadline = INT_MAX;
453 } else
454 shost->eh_deadline = shost_eh_deadline * HZ;
455
456 if (sht->supported_mode == MODE_UNKNOWN)
457 /* means we didn't set it ... default to INITIATOR */
458 shost->active_mode = MODE_INITIATOR;
459 else
460 shost->active_mode = sht->supported_mode;
461
462 if (sht->max_host_blocked)
463 shost->max_host_blocked = sht->max_host_blocked;
464 else
465 shost->max_host_blocked = SCSI_DEFAULT_HOST_BLOCKED;
466
467 /*
468 * If the driver imposes no hard sector transfer limit, start at
469 * machine infinity initially.
470 */
471 if (sht->max_sectors)
472 shost->max_sectors = sht->max_sectors;
473 else
474 shost->max_sectors = SCSI_DEFAULT_MAX_SECTORS;
475
476 if (sht->max_segment_size)
477 shost->max_segment_size = sht->max_segment_size;
478 else
479 shost->max_segment_size = BLK_MAX_SEGMENT_SIZE;
480
481 /*
482 * assume a 4GB boundary, if not set
483 */
484 if (sht->dma_boundary)
485 shost->dma_boundary = sht->dma_boundary;
486 else
487 shost->dma_boundary = 0xffffffff;
488
489 if (sht->virt_boundary_mask)
490 shost->virt_boundary_mask = sht->virt_boundary_mask;
491
492 device_initialize(&shost->shost_gendev);
493 dev_set_name(&shost->shost_gendev, "host%d", shost->host_no);
494 shost->shost_gendev.bus = &scsi_bus_type;
495 shost->shost_gendev.type = &scsi_host_type;
496 scsi_enable_async_suspend(&shost->shost_gendev);
497
498 device_initialize(&shost->shost_dev);
499 shost->shost_dev.parent = &shost->shost_gendev;
500 shost->shost_dev.class = &shost_class;
501 dev_set_name(&shost->shost_dev, "host%d", shost->host_no);
502 shost->shost_dev.groups = sht->shost_groups;
503
504 shost->ehandler = kthread_run(scsi_error_handler, shost,
505 "scsi_eh_%d", shost->host_no);
506 if (IS_ERR(shost->ehandler)) {
507 shost_printk(KERN_WARNING, shost,
508 "error handler thread failed to spawn, error = %ld\n",
509 PTR_ERR(shost->ehandler));
510 shost->ehandler = NULL;
511 goto fail;
512 }
513
514 shost->tmf_work_q = alloc_workqueue("scsi_tmf_%d",
515 WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS,
516 1, shost->host_no);
517 if (!shost->tmf_work_q) {
518 shost_printk(KERN_WARNING, shost,
519 "failed to create tmf workq\n");
520 goto fail;
521 }
522 scsi_proc_hostdir_add(shost->hostt);
523 return shost;
524 fail:
525 /*
526 * Host state is still SHOST_CREATED and that is enough to release
527 * ->shost_gendev. scsi_host_dev_release() will free
528 * dev_name(&shost->shost_dev).
529 */
530 put_device(&shost->shost_gendev);
531
532 return NULL;
533 }
534 EXPORT_SYMBOL(scsi_host_alloc);
535
__scsi_host_match(struct device * dev,const void * data)536 static int __scsi_host_match(struct device *dev, const void *data)
537 {
538 struct Scsi_Host *p;
539 const unsigned short *hostnum = data;
540
541 p = class_to_shost(dev);
542 return p->host_no == *hostnum;
543 }
544
545 /**
546 * scsi_host_lookup - get a reference to a Scsi_Host by host no
547 * @hostnum: host number to locate
548 *
549 * Return value:
550 * A pointer to located Scsi_Host or NULL.
551 *
552 * The caller must do a scsi_host_put() to drop the reference
553 * that scsi_host_get() took. The put_device() below dropped
554 * the reference from class_find_device().
555 **/
scsi_host_lookup(unsigned short hostnum)556 struct Scsi_Host *scsi_host_lookup(unsigned short hostnum)
557 {
558 struct device *cdev;
559 struct Scsi_Host *shost = NULL;
560
561 cdev = class_find_device(&shost_class, NULL, &hostnum,
562 __scsi_host_match);
563 if (cdev) {
564 shost = scsi_host_get(class_to_shost(cdev));
565 put_device(cdev);
566 }
567 return shost;
568 }
569 EXPORT_SYMBOL(scsi_host_lookup);
570
571 /**
572 * scsi_host_get - inc a Scsi_Host ref count
573 * @shost: Pointer to Scsi_Host to inc.
574 **/
scsi_host_get(struct Scsi_Host * shost)575 struct Scsi_Host *scsi_host_get(struct Scsi_Host *shost)
576 {
577 if ((shost->shost_state == SHOST_DEL) ||
578 !get_device(&shost->shost_gendev))
579 return NULL;
580 return shost;
581 }
582 EXPORT_SYMBOL(scsi_host_get);
583
scsi_host_check_in_flight(struct request * rq,void * data)584 static bool scsi_host_check_in_flight(struct request *rq, void *data)
585 {
586 int *count = data;
587 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
588
589 if (test_bit(SCMD_STATE_INFLIGHT, &cmd->state))
590 (*count)++;
591
592 return true;
593 }
594
595 /**
596 * scsi_host_busy - Return the host busy counter
597 * @shost: Pointer to Scsi_Host to inc.
598 **/
scsi_host_busy(struct Scsi_Host * shost)599 int scsi_host_busy(struct Scsi_Host *shost)
600 {
601 int cnt = 0;
602
603 blk_mq_tagset_busy_iter(&shost->tag_set,
604 scsi_host_check_in_flight, &cnt);
605 return cnt;
606 }
607 EXPORT_SYMBOL(scsi_host_busy);
608
609 /**
610 * scsi_host_put - dec a Scsi_Host ref count
611 * @shost: Pointer to Scsi_Host to dec.
612 **/
scsi_host_put(struct Scsi_Host * shost)613 void scsi_host_put(struct Scsi_Host *shost)
614 {
615 put_device(&shost->shost_gendev);
616 }
617 EXPORT_SYMBOL(scsi_host_put);
618
scsi_init_hosts(void)619 int scsi_init_hosts(void)
620 {
621 return class_register(&shost_class);
622 }
623
scsi_exit_hosts(void)624 void scsi_exit_hosts(void)
625 {
626 class_unregister(&shost_class);
627 ida_destroy(&host_index_ida);
628 }
629
scsi_is_host_device(const struct device * dev)630 int scsi_is_host_device(const struct device *dev)
631 {
632 return dev->type == &scsi_host_type;
633 }
634 EXPORT_SYMBOL(scsi_is_host_device);
635
636 /**
637 * scsi_queue_work - Queue work to the Scsi_Host workqueue.
638 * @shost: Pointer to Scsi_Host.
639 * @work: Work to queue for execution.
640 *
641 * Return value:
642 * 1 - work queued for execution
643 * 0 - work is already queued
644 * -EINVAL - work queue doesn't exist
645 **/
scsi_queue_work(struct Scsi_Host * shost,struct work_struct * work)646 int scsi_queue_work(struct Scsi_Host *shost, struct work_struct *work)
647 {
648 if (unlikely(!shost->work_q)) {
649 shost_printk(KERN_ERR, shost,
650 "ERROR: Scsi host '%s' attempted to queue scsi-work, "
651 "when no workqueue created.\n", shost->hostt->name);
652 dump_stack();
653
654 return -EINVAL;
655 }
656
657 return queue_work(shost->work_q, work);
658 }
659 EXPORT_SYMBOL_GPL(scsi_queue_work);
660
661 /**
662 * scsi_flush_work - Flush a Scsi_Host's workqueue.
663 * @shost: Pointer to Scsi_Host.
664 **/
scsi_flush_work(struct Scsi_Host * shost)665 void scsi_flush_work(struct Scsi_Host *shost)
666 {
667 if (!shost->work_q) {
668 shost_printk(KERN_ERR, shost,
669 "ERROR: Scsi host '%s' attempted to flush scsi-work, "
670 "when no workqueue created.\n", shost->hostt->name);
671 dump_stack();
672 return;
673 }
674
675 flush_workqueue(shost->work_q);
676 }
677 EXPORT_SYMBOL_GPL(scsi_flush_work);
678
complete_all_cmds_iter(struct request * rq,void * data)679 static bool complete_all_cmds_iter(struct request *rq, void *data)
680 {
681 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
682 enum scsi_host_status status = *(enum scsi_host_status *)data;
683
684 scsi_dma_unmap(scmd);
685 scmd->result = 0;
686 set_host_byte(scmd, status);
687 scsi_done(scmd);
688 return true;
689 }
690
691 /**
692 * scsi_host_complete_all_commands - Terminate all running commands
693 * @shost: Scsi Host on which commands should be terminated
694 * @status: Status to be set for the terminated commands
695 *
696 * There is no protection against modification of the number
697 * of outstanding commands. It is the responsibility of the
698 * caller to ensure that concurrent I/O submission and/or
699 * completion is stopped when calling this function.
700 */
scsi_host_complete_all_commands(struct Scsi_Host * shost,enum scsi_host_status status)701 void scsi_host_complete_all_commands(struct Scsi_Host *shost,
702 enum scsi_host_status status)
703 {
704 blk_mq_tagset_busy_iter(&shost->tag_set, complete_all_cmds_iter,
705 &status);
706 }
707 EXPORT_SYMBOL_GPL(scsi_host_complete_all_commands);
708
709 struct scsi_host_busy_iter_data {
710 bool (*fn)(struct scsi_cmnd *, void *);
711 void *priv;
712 };
713
__scsi_host_busy_iter_fn(struct request * req,void * priv)714 static bool __scsi_host_busy_iter_fn(struct request *req, void *priv)
715 {
716 struct scsi_host_busy_iter_data *iter_data = priv;
717 struct scsi_cmnd *sc = blk_mq_rq_to_pdu(req);
718
719 return iter_data->fn(sc, iter_data->priv);
720 }
721
722 /**
723 * scsi_host_busy_iter - Iterate over all busy commands
724 * @shost: Pointer to Scsi_Host.
725 * @fn: Function to call on each busy command
726 * @priv: Data pointer passed to @fn
727 *
728 * If locking against concurrent command completions is required
729 * ithas to be provided by the caller
730 **/
scsi_host_busy_iter(struct Scsi_Host * shost,bool (* fn)(struct scsi_cmnd *,void *),void * priv)731 void scsi_host_busy_iter(struct Scsi_Host *shost,
732 bool (*fn)(struct scsi_cmnd *, void *),
733 void *priv)
734 {
735 struct scsi_host_busy_iter_data iter_data = {
736 .fn = fn,
737 .priv = priv,
738 };
739
740 blk_mq_tagset_busy_iter(&shost->tag_set, __scsi_host_busy_iter_fn,
741 &iter_data);
742 }
743 EXPORT_SYMBOL_GPL(scsi_host_busy_iter);
744