1 /*
2 * scsi_sysfs.c
3 *
4 * SCSI sysfs interface routines.
5 *
6 * Created to pull SCSI mid layer sysfs routines into one file.
7 */
8
9 #include <linux/module.h>
10 #include <linux/slab.h>
11 #include <linux/init.h>
12 #include <linux/blkdev.h>
13 #include <linux/device.h>
14 #include <linux/pm_runtime.h>
15
16 #include <scsi/scsi.h>
17 #include <scsi/scsi_device.h>
18 #include <scsi/scsi_host.h>
19 #include <scsi/scsi_tcq.h>
20 #include <scsi/scsi_transport.h>
21 #include <scsi/scsi_driver.h>
22
23 #include "scsi_priv.h"
24 #include "scsi_logging.h"
25
26 static struct device_type scsi_dev_type;
27
28 static const struct {
29 enum scsi_device_state value;
30 char *name;
31 } sdev_states[] = {
32 { SDEV_CREATED, "created" },
33 { SDEV_RUNNING, "running" },
34 { SDEV_CANCEL, "cancel" },
35 { SDEV_DEL, "deleted" },
36 { SDEV_QUIESCE, "quiesce" },
37 { SDEV_OFFLINE, "offline" },
38 { SDEV_BLOCK, "blocked" },
39 { SDEV_CREATED_BLOCK, "created-blocked" },
40 };
41
scsi_device_state_name(enum scsi_device_state state)42 const char *scsi_device_state_name(enum scsi_device_state state)
43 {
44 int i;
45 char *name = NULL;
46
47 for (i = 0; i < ARRAY_SIZE(sdev_states); i++) {
48 if (sdev_states[i].value == state) {
49 name = sdev_states[i].name;
50 break;
51 }
52 }
53 return name;
54 }
55
56 static const struct {
57 enum scsi_host_state value;
58 char *name;
59 } shost_states[] = {
60 { SHOST_CREATED, "created" },
61 { SHOST_RUNNING, "running" },
62 { SHOST_CANCEL, "cancel" },
63 { SHOST_DEL, "deleted" },
64 { SHOST_RECOVERY, "recovery" },
65 { SHOST_CANCEL_RECOVERY, "cancel/recovery" },
66 { SHOST_DEL_RECOVERY, "deleted/recovery", },
67 };
scsi_host_state_name(enum scsi_host_state state)68 const char *scsi_host_state_name(enum scsi_host_state state)
69 {
70 int i;
71 char *name = NULL;
72
73 for (i = 0; i < ARRAY_SIZE(shost_states); i++) {
74 if (shost_states[i].value == state) {
75 name = shost_states[i].name;
76 break;
77 }
78 }
79 return name;
80 }
81
check_set(unsigned int * val,char * src)82 static int check_set(unsigned int *val, char *src)
83 {
84 char *last;
85
86 if (strncmp(src, "-", 20) == 0) {
87 *val = SCAN_WILD_CARD;
88 } else {
89 /*
90 * Doesn't check for int overflow
91 */
92 *val = simple_strtoul(src, &last, 0);
93 if (*last != '\0')
94 return 1;
95 }
96 return 0;
97 }
98
scsi_scan(struct Scsi_Host * shost,const char * str)99 static int scsi_scan(struct Scsi_Host *shost, const char *str)
100 {
101 char s1[15], s2[15], s3[15], junk;
102 unsigned int channel, id, lun;
103 int res;
104
105 res = sscanf(str, "%10s %10s %10s %c", s1, s2, s3, &junk);
106 if (res != 3)
107 return -EINVAL;
108 if (check_set(&channel, s1))
109 return -EINVAL;
110 if (check_set(&id, s2))
111 return -EINVAL;
112 if (check_set(&lun, s3))
113 return -EINVAL;
114 if (shost->transportt->user_scan)
115 res = shost->transportt->user_scan(shost, channel, id, lun);
116 else
117 res = scsi_scan_host_selected(shost, channel, id, lun, 1);
118 return res;
119 }
120
121 /*
122 * shost_show_function: macro to create an attr function that can be used to
123 * show a non-bit field.
124 */
125 #define shost_show_function(name, field, format_string) \
126 static ssize_t \
127 show_##name (struct device *dev, struct device_attribute *attr, \
128 char *buf) \
129 { \
130 struct Scsi_Host *shost = class_to_shost(dev); \
131 return snprintf (buf, 20, format_string, shost->field); \
132 }
133
134 /*
135 * shost_rd_attr: macro to create a function and attribute variable for a
136 * read only field.
137 */
138 #define shost_rd_attr2(name, field, format_string) \
139 shost_show_function(name, field, format_string) \
140 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL);
141
142 #define shost_rd_attr(field, format_string) \
143 shost_rd_attr2(field, field, format_string)
144
145 /*
146 * Create the actual show/store functions and data structures.
147 */
148
149 static ssize_t
store_scan(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)150 store_scan(struct device *dev, struct device_attribute *attr,
151 const char *buf, size_t count)
152 {
153 struct Scsi_Host *shost = class_to_shost(dev);
154 int res;
155
156 res = scsi_scan(shost, buf);
157 if (res == 0)
158 res = count;
159 return res;
160 };
161 static DEVICE_ATTR(scan, S_IWUSR, NULL, store_scan);
162
163 static ssize_t
store_shost_state(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)164 store_shost_state(struct device *dev, struct device_attribute *attr,
165 const char *buf, size_t count)
166 {
167 int i;
168 struct Scsi_Host *shost = class_to_shost(dev);
169 enum scsi_host_state state = 0;
170
171 for (i = 0; i < ARRAY_SIZE(shost_states); i++) {
172 const int len = strlen(shost_states[i].name);
173 if (strncmp(shost_states[i].name, buf, len) == 0 &&
174 buf[len] == '\n') {
175 state = shost_states[i].value;
176 break;
177 }
178 }
179 if (!state)
180 return -EINVAL;
181
182 if (scsi_host_set_state(shost, state))
183 return -EINVAL;
184 return count;
185 }
186
187 static ssize_t
show_shost_state(struct device * dev,struct device_attribute * attr,char * buf)188 show_shost_state(struct device *dev, struct device_attribute *attr, char *buf)
189 {
190 struct Scsi_Host *shost = class_to_shost(dev);
191 const char *name = scsi_host_state_name(shost->shost_state);
192
193 if (!name)
194 return -EINVAL;
195
196 return snprintf(buf, 20, "%s\n", name);
197 }
198
199 /* DEVICE_ATTR(state) clashes with dev_attr_state for sdev */
200 struct device_attribute dev_attr_hstate =
201 __ATTR(state, S_IRUGO | S_IWUSR, show_shost_state, store_shost_state);
202
203 static ssize_t
show_shost_mode(unsigned int mode,char * buf)204 show_shost_mode(unsigned int mode, char *buf)
205 {
206 ssize_t len = 0;
207
208 if (mode & MODE_INITIATOR)
209 len = sprintf(buf, "%s", "Initiator");
210
211 if (mode & MODE_TARGET)
212 len += sprintf(buf + len, "%s%s", len ? ", " : "", "Target");
213
214 len += sprintf(buf + len, "\n");
215
216 return len;
217 }
218
219 static ssize_t
show_shost_supported_mode(struct device * dev,struct device_attribute * attr,char * buf)220 show_shost_supported_mode(struct device *dev, struct device_attribute *attr,
221 char *buf)
222 {
223 struct Scsi_Host *shost = class_to_shost(dev);
224 unsigned int supported_mode = shost->hostt->supported_mode;
225
226 if (supported_mode == MODE_UNKNOWN)
227 /* by default this should be initiator */
228 supported_mode = MODE_INITIATOR;
229
230 return show_shost_mode(supported_mode, buf);
231 }
232
233 static DEVICE_ATTR(supported_mode, S_IRUGO | S_IWUSR, show_shost_supported_mode, NULL);
234
235 static ssize_t
show_shost_active_mode(struct device * dev,struct device_attribute * attr,char * buf)236 show_shost_active_mode(struct device *dev,
237 struct device_attribute *attr, char *buf)
238 {
239 struct Scsi_Host *shost = class_to_shost(dev);
240
241 if (shost->active_mode == MODE_UNKNOWN)
242 return snprintf(buf, 20, "unknown\n");
243 else
244 return show_shost_mode(shost->active_mode, buf);
245 }
246
247 static DEVICE_ATTR(active_mode, S_IRUGO | S_IWUSR, show_shost_active_mode, NULL);
248
249 shost_rd_attr(unique_id, "%u\n");
250 shost_rd_attr(host_busy, "%hu\n");
251 shost_rd_attr(cmd_per_lun, "%hd\n");
252 shost_rd_attr(can_queue, "%hd\n");
253 shost_rd_attr(sg_tablesize, "%hu\n");
254 shost_rd_attr(sg_prot_tablesize, "%hu\n");
255 shost_rd_attr(unchecked_isa_dma, "%d\n");
256 shost_rd_attr(prot_capabilities, "%u\n");
257 shost_rd_attr(prot_guard_type, "%hd\n");
258 shost_rd_attr2(proc_name, hostt->proc_name, "%s\n");
259
260 static struct attribute *scsi_sysfs_shost_attrs[] = {
261 &dev_attr_unique_id.attr,
262 &dev_attr_host_busy.attr,
263 &dev_attr_cmd_per_lun.attr,
264 &dev_attr_can_queue.attr,
265 &dev_attr_sg_tablesize.attr,
266 &dev_attr_sg_prot_tablesize.attr,
267 &dev_attr_unchecked_isa_dma.attr,
268 &dev_attr_proc_name.attr,
269 &dev_attr_scan.attr,
270 &dev_attr_hstate.attr,
271 &dev_attr_supported_mode.attr,
272 &dev_attr_active_mode.attr,
273 &dev_attr_prot_capabilities.attr,
274 &dev_attr_prot_guard_type.attr,
275 NULL
276 };
277
278 struct attribute_group scsi_shost_attr_group = {
279 .attrs = scsi_sysfs_shost_attrs,
280 };
281
282 const struct attribute_group *scsi_sysfs_shost_attr_groups[] = {
283 &scsi_shost_attr_group,
284 NULL
285 };
286
scsi_device_cls_release(struct device * class_dev)287 static void scsi_device_cls_release(struct device *class_dev)
288 {
289 struct scsi_device *sdev;
290
291 sdev = class_to_sdev(class_dev);
292 put_device(&sdev->sdev_gendev);
293 }
294
scsi_device_dev_release_usercontext(struct work_struct * work)295 static void scsi_device_dev_release_usercontext(struct work_struct *work)
296 {
297 struct scsi_device *sdev;
298 struct device *parent;
299 struct scsi_target *starget;
300 struct list_head *this, *tmp;
301 unsigned long flags;
302
303 sdev = container_of(work, struct scsi_device, ew.work);
304
305 parent = sdev->sdev_gendev.parent;
306 starget = to_scsi_target(parent);
307
308 spin_lock_irqsave(sdev->host->host_lock, flags);
309 starget->reap_ref++;
310 list_del(&sdev->siblings);
311 list_del(&sdev->same_target_siblings);
312 list_del(&sdev->starved_entry);
313 spin_unlock_irqrestore(sdev->host->host_lock, flags);
314
315 cancel_work_sync(&sdev->event_work);
316
317 list_for_each_safe(this, tmp, &sdev->event_list) {
318 struct scsi_event *evt;
319
320 evt = list_entry(this, struct scsi_event, node);
321 list_del(&evt->node);
322 kfree(evt);
323 }
324
325 /* NULL queue means the device can't be used */
326 sdev->request_queue = NULL;
327
328 scsi_target_reap(scsi_target(sdev));
329
330 kfree(sdev->inquiry);
331 kfree(sdev);
332
333 if (parent)
334 put_device(parent);
335 }
336
scsi_device_dev_release(struct device * dev)337 static void scsi_device_dev_release(struct device *dev)
338 {
339 struct scsi_device *sdp = to_scsi_device(dev);
340 execute_in_process_context(scsi_device_dev_release_usercontext,
341 &sdp->ew);
342 }
343
344 static struct class sdev_class = {
345 .name = "scsi_device",
346 .dev_release = scsi_device_cls_release,
347 };
348
349 /* all probing is done in the individual ->probe routines */
scsi_bus_match(struct device * dev,struct device_driver * gendrv)350 static int scsi_bus_match(struct device *dev, struct device_driver *gendrv)
351 {
352 struct scsi_device *sdp;
353
354 if (dev->type != &scsi_dev_type)
355 return 0;
356
357 sdp = to_scsi_device(dev);
358 if (sdp->no_uld_attach)
359 return 0;
360 return (sdp->inq_periph_qual == SCSI_INQ_PQ_CON)? 1: 0;
361 }
362
scsi_bus_uevent(struct device * dev,struct kobj_uevent_env * env)363 static int scsi_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
364 {
365 struct scsi_device *sdev;
366
367 if (dev->type != &scsi_dev_type)
368 return 0;
369
370 sdev = to_scsi_device(dev);
371
372 add_uevent_var(env, "MODALIAS=" SCSI_DEVICE_MODALIAS_FMT, sdev->type);
373 return 0;
374 }
375
376 struct bus_type scsi_bus_type = {
377 .name = "scsi",
378 .match = scsi_bus_match,
379 .uevent = scsi_bus_uevent,
380 #ifdef CONFIG_PM
381 .pm = &scsi_bus_pm_ops,
382 #endif
383 };
384 EXPORT_SYMBOL_GPL(scsi_bus_type);
385
scsi_sysfs_register(void)386 int scsi_sysfs_register(void)
387 {
388 int error;
389
390 error = bus_register(&scsi_bus_type);
391 if (!error) {
392 error = class_register(&sdev_class);
393 if (error)
394 bus_unregister(&scsi_bus_type);
395 }
396
397 return error;
398 }
399
scsi_sysfs_unregister(void)400 void scsi_sysfs_unregister(void)
401 {
402 class_unregister(&sdev_class);
403 bus_unregister(&scsi_bus_type);
404 }
405
406 /*
407 * sdev_show_function: macro to create an attr function that can be used to
408 * show a non-bit field.
409 */
410 #define sdev_show_function(field, format_string) \
411 static ssize_t \
412 sdev_show_##field (struct device *dev, struct device_attribute *attr, \
413 char *buf) \
414 { \
415 struct scsi_device *sdev; \
416 sdev = to_scsi_device(dev); \
417 return snprintf (buf, 20, format_string, sdev->field); \
418 } \
419
420 /*
421 * sdev_rd_attr: macro to create a function and attribute variable for a
422 * read only field.
423 */
424 #define sdev_rd_attr(field, format_string) \
425 sdev_show_function(field, format_string) \
426 static DEVICE_ATTR(field, S_IRUGO, sdev_show_##field, NULL);
427
428
429 /*
430 * sdev_rw_attr: create a function and attribute variable for a
431 * read/write field.
432 */
433 #define sdev_rw_attr(field, format_string) \
434 sdev_show_function(field, format_string) \
435 \
436 static ssize_t \
437 sdev_store_##field (struct device *dev, struct device_attribute *attr, \
438 const char *buf, size_t count) \
439 { \
440 struct scsi_device *sdev; \
441 sdev = to_scsi_device(dev); \
442 sscanf (buf, format_string, &sdev->field); \
443 return count; \
444 } \
445 static DEVICE_ATTR(field, S_IRUGO | S_IWUSR, sdev_show_##field, sdev_store_##field);
446
447 /* Currently we don't export bit fields, but we might in future,
448 * so leave this code in */
449 #if 0
450 /*
451 * sdev_rd_attr: create a function and attribute variable for a
452 * read/write bit field.
453 */
454 #define sdev_rw_attr_bit(field) \
455 sdev_show_function(field, "%d\n") \
456 \
457 static ssize_t \
458 sdev_store_##field (struct device *dev, struct device_attribute *attr, \
459 const char *buf, size_t count) \
460 { \
461 int ret; \
462 struct scsi_device *sdev; \
463 ret = scsi_sdev_check_buf_bit(buf); \
464 if (ret >= 0) { \
465 sdev = to_scsi_device(dev); \
466 sdev->field = ret; \
467 ret = count; \
468 } \
469 return ret; \
470 } \
471 static DEVICE_ATTR(field, S_IRUGO | S_IWUSR, sdev_show_##field, sdev_store_##field);
472
473 /*
474 * scsi_sdev_check_buf_bit: return 0 if buf is "0", return 1 if buf is "1",
475 * else return -EINVAL.
476 */
477 static int scsi_sdev_check_buf_bit(const char *buf)
478 {
479 if ((buf[1] == '\0') || ((buf[1] == '\n') && (buf[2] == '\0'))) {
480 if (buf[0] == '1')
481 return 1;
482 else if (buf[0] == '0')
483 return 0;
484 else
485 return -EINVAL;
486 } else
487 return -EINVAL;
488 }
489 #endif
490 /*
491 * Create the actual show/store functions and data structures.
492 */
493 sdev_rd_attr (device_blocked, "%d\n");
494 sdev_rd_attr (queue_depth, "%d\n");
495 sdev_rd_attr (type, "%d\n");
496 sdev_rd_attr (scsi_level, "%d\n");
497 sdev_rd_attr (vendor, "%.8s\n");
498 sdev_rd_attr (model, "%.16s\n");
499 sdev_rd_attr (rev, "%.4s\n");
500
501 /*
502 * TODO: can we make these symlinks to the block layer ones?
503 */
504 static ssize_t
sdev_show_timeout(struct device * dev,struct device_attribute * attr,char * buf)505 sdev_show_timeout (struct device *dev, struct device_attribute *attr, char *buf)
506 {
507 struct scsi_device *sdev;
508 sdev = to_scsi_device(dev);
509 return snprintf(buf, 20, "%d\n", sdev->request_queue->rq_timeout / HZ);
510 }
511
512 static ssize_t
sdev_store_timeout(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)513 sdev_store_timeout (struct device *dev, struct device_attribute *attr,
514 const char *buf, size_t count)
515 {
516 struct scsi_device *sdev;
517 int timeout;
518 sdev = to_scsi_device(dev);
519 sscanf (buf, "%d\n", &timeout);
520 blk_queue_rq_timeout(sdev->request_queue, timeout * HZ);
521 return count;
522 }
523 static DEVICE_ATTR(timeout, S_IRUGO | S_IWUSR, sdev_show_timeout, sdev_store_timeout);
524
525 static ssize_t
store_rescan_field(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)526 store_rescan_field (struct device *dev, struct device_attribute *attr,
527 const char *buf, size_t count)
528 {
529 scsi_rescan_device(dev);
530 return count;
531 }
532 static DEVICE_ATTR(rescan, S_IWUSR, NULL, store_rescan_field);
533
sdev_store_delete_callback(struct device * dev)534 static void sdev_store_delete_callback(struct device *dev)
535 {
536 scsi_remove_device(to_scsi_device(dev));
537 }
538
539 static ssize_t
sdev_store_delete(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)540 sdev_store_delete(struct device *dev, struct device_attribute *attr,
541 const char *buf, size_t count)
542 {
543 int rc;
544
545 /* An attribute cannot be unregistered by one of its own methods,
546 * so we have to use this roundabout approach.
547 */
548 rc = device_schedule_callback(dev, sdev_store_delete_callback);
549 if (rc)
550 count = rc;
551 return count;
552 };
553 static DEVICE_ATTR(delete, S_IWUSR, NULL, sdev_store_delete);
554
555 static ssize_t
store_state_field(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)556 store_state_field(struct device *dev, struct device_attribute *attr,
557 const char *buf, size_t count)
558 {
559 int i;
560 struct scsi_device *sdev = to_scsi_device(dev);
561 enum scsi_device_state state = 0;
562
563 for (i = 0; i < ARRAY_SIZE(sdev_states); i++) {
564 const int len = strlen(sdev_states[i].name);
565 if (strncmp(sdev_states[i].name, buf, len) == 0 &&
566 buf[len] == '\n') {
567 state = sdev_states[i].value;
568 break;
569 }
570 }
571 if (!state)
572 return -EINVAL;
573
574 if (scsi_device_set_state(sdev, state))
575 return -EINVAL;
576 return count;
577 }
578
579 static ssize_t
show_state_field(struct device * dev,struct device_attribute * attr,char * buf)580 show_state_field(struct device *dev, struct device_attribute *attr, char *buf)
581 {
582 struct scsi_device *sdev = to_scsi_device(dev);
583 const char *name = scsi_device_state_name(sdev->sdev_state);
584
585 if (!name)
586 return -EINVAL;
587
588 return snprintf(buf, 20, "%s\n", name);
589 }
590
591 static DEVICE_ATTR(state, S_IRUGO | S_IWUSR, show_state_field, store_state_field);
592
593 static ssize_t
show_queue_type_field(struct device * dev,struct device_attribute * attr,char * buf)594 show_queue_type_field(struct device *dev, struct device_attribute *attr,
595 char *buf)
596 {
597 struct scsi_device *sdev = to_scsi_device(dev);
598 const char *name = "none";
599
600 if (sdev->ordered_tags)
601 name = "ordered";
602 else if (sdev->simple_tags)
603 name = "simple";
604
605 return snprintf(buf, 20, "%s\n", name);
606 }
607
608 static DEVICE_ATTR(queue_type, S_IRUGO, show_queue_type_field, NULL);
609
610 static ssize_t
show_iostat_counterbits(struct device * dev,struct device_attribute * attr,char * buf)611 show_iostat_counterbits(struct device *dev, struct device_attribute *attr, char *buf)
612 {
613 return snprintf(buf, 20, "%d\n", (int)sizeof(atomic_t) * 8);
614 }
615
616 static DEVICE_ATTR(iocounterbits, S_IRUGO, show_iostat_counterbits, NULL);
617
618 #define show_sdev_iostat(field) \
619 static ssize_t \
620 show_iostat_##field(struct device *dev, struct device_attribute *attr, \
621 char *buf) \
622 { \
623 struct scsi_device *sdev = to_scsi_device(dev); \
624 unsigned long long count = atomic_read(&sdev->field); \
625 return snprintf(buf, 20, "0x%llx\n", count); \
626 } \
627 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
628
629 show_sdev_iostat(iorequest_cnt);
630 show_sdev_iostat(iodone_cnt);
631 show_sdev_iostat(ioerr_cnt);
632
633 static ssize_t
sdev_show_modalias(struct device * dev,struct device_attribute * attr,char * buf)634 sdev_show_modalias(struct device *dev, struct device_attribute *attr, char *buf)
635 {
636 struct scsi_device *sdev;
637 sdev = to_scsi_device(dev);
638 return snprintf (buf, 20, SCSI_DEVICE_MODALIAS_FMT "\n", sdev->type);
639 }
640 static DEVICE_ATTR(modalias, S_IRUGO, sdev_show_modalias, NULL);
641
642 #define DECLARE_EVT_SHOW(name, Cap_name) \
643 static ssize_t \
644 sdev_show_evt_##name(struct device *dev, struct device_attribute *attr, \
645 char *buf) \
646 { \
647 struct scsi_device *sdev = to_scsi_device(dev); \
648 int val = test_bit(SDEV_EVT_##Cap_name, sdev->supported_events);\
649 return snprintf(buf, 20, "%d\n", val); \
650 }
651
652 #define DECLARE_EVT_STORE(name, Cap_name) \
653 static ssize_t \
654 sdev_store_evt_##name(struct device *dev, struct device_attribute *attr,\
655 const char *buf, size_t count) \
656 { \
657 struct scsi_device *sdev = to_scsi_device(dev); \
658 int val = simple_strtoul(buf, NULL, 0); \
659 if (val == 0) \
660 clear_bit(SDEV_EVT_##Cap_name, sdev->supported_events); \
661 else if (val == 1) \
662 set_bit(SDEV_EVT_##Cap_name, sdev->supported_events); \
663 else \
664 return -EINVAL; \
665 return count; \
666 }
667
668 #define DECLARE_EVT(name, Cap_name) \
669 DECLARE_EVT_SHOW(name, Cap_name) \
670 DECLARE_EVT_STORE(name, Cap_name) \
671 static DEVICE_ATTR(evt_##name, S_IRUGO, sdev_show_evt_##name, \
672 sdev_store_evt_##name);
673 #define REF_EVT(name) &dev_attr_evt_##name.attr
674
675 DECLARE_EVT(media_change, MEDIA_CHANGE)
676
677 /* Default template for device attributes. May NOT be modified */
678 static struct attribute *scsi_sdev_attrs[] = {
679 &dev_attr_device_blocked.attr,
680 &dev_attr_type.attr,
681 &dev_attr_scsi_level.attr,
682 &dev_attr_vendor.attr,
683 &dev_attr_model.attr,
684 &dev_attr_rev.attr,
685 &dev_attr_rescan.attr,
686 &dev_attr_delete.attr,
687 &dev_attr_state.attr,
688 &dev_attr_timeout.attr,
689 &dev_attr_iocounterbits.attr,
690 &dev_attr_iorequest_cnt.attr,
691 &dev_attr_iodone_cnt.attr,
692 &dev_attr_ioerr_cnt.attr,
693 &dev_attr_modalias.attr,
694 REF_EVT(media_change),
695 NULL
696 };
697
698 static struct attribute_group scsi_sdev_attr_group = {
699 .attrs = scsi_sdev_attrs,
700 };
701
702 static const struct attribute_group *scsi_sdev_attr_groups[] = {
703 &scsi_sdev_attr_group,
704 NULL
705 };
706
707 static ssize_t
sdev_store_queue_depth_rw(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)708 sdev_store_queue_depth_rw(struct device *dev, struct device_attribute *attr,
709 const char *buf, size_t count)
710 {
711 int depth, retval;
712 struct scsi_device *sdev = to_scsi_device(dev);
713 struct scsi_host_template *sht = sdev->host->hostt;
714
715 if (!sht->change_queue_depth)
716 return -EINVAL;
717
718 depth = simple_strtoul(buf, NULL, 0);
719
720 if (depth < 1)
721 return -EINVAL;
722
723 retval = sht->change_queue_depth(sdev, depth,
724 SCSI_QDEPTH_DEFAULT);
725 if (retval < 0)
726 return retval;
727
728 sdev->max_queue_depth = sdev->queue_depth;
729
730 return count;
731 }
732
733 static struct device_attribute sdev_attr_queue_depth_rw =
734 __ATTR(queue_depth, S_IRUGO | S_IWUSR, sdev_show_queue_depth,
735 sdev_store_queue_depth_rw);
736
737 static ssize_t
sdev_show_queue_ramp_up_period(struct device * dev,struct device_attribute * attr,char * buf)738 sdev_show_queue_ramp_up_period(struct device *dev,
739 struct device_attribute *attr,
740 char *buf)
741 {
742 struct scsi_device *sdev;
743 sdev = to_scsi_device(dev);
744 return snprintf(buf, 20, "%u\n",
745 jiffies_to_msecs(sdev->queue_ramp_up_period));
746 }
747
748 static ssize_t
sdev_store_queue_ramp_up_period(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)749 sdev_store_queue_ramp_up_period(struct device *dev,
750 struct device_attribute *attr,
751 const char *buf, size_t count)
752 {
753 struct scsi_device *sdev = to_scsi_device(dev);
754 unsigned long period;
755
756 if (strict_strtoul(buf, 10, &period))
757 return -EINVAL;
758
759 sdev->queue_ramp_up_period = msecs_to_jiffies(period);
760 return period;
761 }
762
763 static struct device_attribute sdev_attr_queue_ramp_up_period =
764 __ATTR(queue_ramp_up_period, S_IRUGO | S_IWUSR,
765 sdev_show_queue_ramp_up_period,
766 sdev_store_queue_ramp_up_period);
767
768 static ssize_t
sdev_store_queue_type_rw(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)769 sdev_store_queue_type_rw(struct device *dev, struct device_attribute *attr,
770 const char *buf, size_t count)
771 {
772 struct scsi_device *sdev = to_scsi_device(dev);
773 struct scsi_host_template *sht = sdev->host->hostt;
774 int tag_type = 0, retval;
775 int prev_tag_type = scsi_get_tag_type(sdev);
776
777 if (!sdev->tagged_supported || !sht->change_queue_type)
778 return -EINVAL;
779
780 if (strncmp(buf, "ordered", 7) == 0)
781 tag_type = MSG_ORDERED_TAG;
782 else if (strncmp(buf, "simple", 6) == 0)
783 tag_type = MSG_SIMPLE_TAG;
784 else if (strncmp(buf, "none", 4) != 0)
785 return -EINVAL;
786
787 if (tag_type == prev_tag_type)
788 return count;
789
790 retval = sht->change_queue_type(sdev, tag_type);
791 if (retval < 0)
792 return retval;
793
794 return count;
795 }
796
scsi_target_add(struct scsi_target * starget)797 static int scsi_target_add(struct scsi_target *starget)
798 {
799 int error;
800
801 if (starget->state != STARGET_CREATED)
802 return 0;
803
804 error = device_add(&starget->dev);
805 if (error) {
806 dev_err(&starget->dev, "target device_add failed, error %d\n", error);
807 return error;
808 }
809 transport_add_device(&starget->dev);
810 starget->state = STARGET_RUNNING;
811
812 pm_runtime_set_active(&starget->dev);
813 pm_runtime_enable(&starget->dev);
814 device_enable_async_suspend(&starget->dev);
815
816 return 0;
817 }
818
819 static struct device_attribute sdev_attr_queue_type_rw =
820 __ATTR(queue_type, S_IRUGO | S_IWUSR, show_queue_type_field,
821 sdev_store_queue_type_rw);
822
823 /**
824 * scsi_sysfs_add_sdev - add scsi device to sysfs
825 * @sdev: scsi_device to add
826 *
827 * Return value:
828 * 0 on Success / non-zero on Failure
829 **/
scsi_sysfs_add_sdev(struct scsi_device * sdev)830 int scsi_sysfs_add_sdev(struct scsi_device *sdev)
831 {
832 int error, i;
833 struct request_queue *rq = sdev->request_queue;
834 struct scsi_target *starget = sdev->sdev_target;
835
836 error = scsi_device_set_state(sdev, SDEV_RUNNING);
837 if (error)
838 return error;
839
840 error = scsi_target_add(starget);
841 if (error)
842 return error;
843
844 transport_configure_device(&starget->dev);
845
846 device_enable_async_suspend(&sdev->sdev_gendev);
847 scsi_autopm_get_target(starget);
848 pm_runtime_set_active(&sdev->sdev_gendev);
849 pm_runtime_forbid(&sdev->sdev_gendev);
850 pm_runtime_enable(&sdev->sdev_gendev);
851 scsi_autopm_put_target(starget);
852
853 /* The following call will keep sdev active indefinitely, until
854 * its driver does a corresponding scsi_autopm_pm_device(). Only
855 * drivers supporting autosuspend will do this.
856 */
857 scsi_autopm_get_device(sdev);
858
859 error = device_add(&sdev->sdev_gendev);
860 if (error) {
861 sdev_printk(KERN_INFO, sdev,
862 "failed to add device: %d\n", error);
863 return error;
864 }
865 device_enable_async_suspend(&sdev->sdev_dev);
866 error = device_add(&sdev->sdev_dev);
867 if (error) {
868 sdev_printk(KERN_INFO, sdev,
869 "failed to add class device: %d\n", error);
870 device_del(&sdev->sdev_gendev);
871 return error;
872 }
873 transport_add_device(&sdev->sdev_gendev);
874 sdev->is_visible = 1;
875
876 /* create queue files, which may be writable, depending on the host */
877 if (sdev->host->hostt->change_queue_depth) {
878 error = device_create_file(&sdev->sdev_gendev,
879 &sdev_attr_queue_depth_rw);
880 error = device_create_file(&sdev->sdev_gendev,
881 &sdev_attr_queue_ramp_up_period);
882 }
883 else
884 error = device_create_file(&sdev->sdev_gendev, &dev_attr_queue_depth);
885 if (error)
886 return error;
887
888 if (sdev->host->hostt->change_queue_type)
889 error = device_create_file(&sdev->sdev_gendev, &sdev_attr_queue_type_rw);
890 else
891 error = device_create_file(&sdev->sdev_gendev, &dev_attr_queue_type);
892 if (error)
893 return error;
894
895 error = bsg_register_queue(rq, &sdev->sdev_gendev, NULL, NULL);
896
897 if (error)
898 /* we're treating error on bsg register as non-fatal,
899 * so pretend nothing went wrong */
900 sdev_printk(KERN_INFO, sdev,
901 "Failed to register bsg queue, errno=%d\n", error);
902
903 /* add additional host specific attributes */
904 if (sdev->host->hostt->sdev_attrs) {
905 for (i = 0; sdev->host->hostt->sdev_attrs[i]; i++) {
906 error = device_create_file(&sdev->sdev_gendev,
907 sdev->host->hostt->sdev_attrs[i]);
908 if (error)
909 return error;
910 }
911 }
912
913 return error;
914 }
915
__scsi_remove_device(struct scsi_device * sdev)916 void __scsi_remove_device(struct scsi_device *sdev)
917 {
918 struct device *dev = &sdev->sdev_gendev;
919
920 if (sdev->is_visible) {
921 if (scsi_device_set_state(sdev, SDEV_CANCEL) != 0)
922 return;
923
924 bsg_unregister_queue(sdev->request_queue);
925 device_unregister(&sdev->sdev_dev);
926 transport_remove_device(dev);
927 device_del(dev);
928 } else
929 put_device(&sdev->sdev_dev);
930 scsi_device_set_state(sdev, SDEV_DEL);
931 if (sdev->host->hostt->slave_destroy)
932 sdev->host->hostt->slave_destroy(sdev);
933 transport_destroy_device(dev);
934
935 /* cause the request function to reject all I/O requests */
936 sdev->request_queue->queuedata = NULL;
937
938 /* Freeing the queue signals to block that we're done */
939 scsi_free_queue(sdev->request_queue);
940 put_device(dev);
941 }
942
943 /**
944 * scsi_remove_device - unregister a device from the scsi bus
945 * @sdev: scsi_device to unregister
946 **/
scsi_remove_device(struct scsi_device * sdev)947 void scsi_remove_device(struct scsi_device *sdev)
948 {
949 struct Scsi_Host *shost = sdev->host;
950
951 mutex_lock(&shost->scan_mutex);
952 __scsi_remove_device(sdev);
953 mutex_unlock(&shost->scan_mutex);
954 }
955 EXPORT_SYMBOL(scsi_remove_device);
956
__scsi_remove_target(struct scsi_target * starget)957 static void __scsi_remove_target(struct scsi_target *starget)
958 {
959 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
960 unsigned long flags;
961 struct scsi_device *sdev;
962
963 spin_lock_irqsave(shost->host_lock, flags);
964 starget->reap_ref++;
965 restart:
966 list_for_each_entry(sdev, &shost->__devices, siblings) {
967 if (sdev->channel != starget->channel ||
968 sdev->id != starget->id ||
969 scsi_device_get(sdev))
970 continue;
971 spin_unlock_irqrestore(shost->host_lock, flags);
972 scsi_remove_device(sdev);
973 scsi_device_put(sdev);
974 spin_lock_irqsave(shost->host_lock, flags);
975 goto restart;
976 }
977 spin_unlock_irqrestore(shost->host_lock, flags);
978 scsi_target_reap(starget);
979 }
980
__remove_child(struct device * dev,void * data)981 static int __remove_child (struct device * dev, void * data)
982 {
983 if (scsi_is_target_device(dev))
984 __scsi_remove_target(to_scsi_target(dev));
985 return 0;
986 }
987
988 /**
989 * scsi_remove_target - try to remove a target and all its devices
990 * @dev: generic starget or parent of generic stargets to be removed
991 *
992 * Note: This is slightly racy. It is possible that if the user
993 * requests the addition of another device then the target won't be
994 * removed.
995 */
scsi_remove_target(struct device * dev)996 void scsi_remove_target(struct device *dev)
997 {
998 if (scsi_is_target_device(dev)) {
999 __scsi_remove_target(to_scsi_target(dev));
1000 return;
1001 }
1002
1003 get_device(dev);
1004 device_for_each_child(dev, NULL, __remove_child);
1005 put_device(dev);
1006 }
1007 EXPORT_SYMBOL(scsi_remove_target);
1008
scsi_register_driver(struct device_driver * drv)1009 int scsi_register_driver(struct device_driver *drv)
1010 {
1011 drv->bus = &scsi_bus_type;
1012
1013 return driver_register(drv);
1014 }
1015 EXPORT_SYMBOL(scsi_register_driver);
1016
scsi_register_interface(struct class_interface * intf)1017 int scsi_register_interface(struct class_interface *intf)
1018 {
1019 intf->class = &sdev_class;
1020
1021 return class_interface_register(intf);
1022 }
1023 EXPORT_SYMBOL(scsi_register_interface);
1024
1025 /**
1026 * scsi_sysfs_add_host - add scsi host to subsystem
1027 * @shost: scsi host struct to add to subsystem
1028 **/
scsi_sysfs_add_host(struct Scsi_Host * shost)1029 int scsi_sysfs_add_host(struct Scsi_Host *shost)
1030 {
1031 int error, i;
1032
1033 /* add host specific attributes */
1034 if (shost->hostt->shost_attrs) {
1035 for (i = 0; shost->hostt->shost_attrs[i]; i++) {
1036 error = device_create_file(&shost->shost_dev,
1037 shost->hostt->shost_attrs[i]);
1038 if (error)
1039 return error;
1040 }
1041 }
1042
1043 transport_register_device(&shost->shost_gendev);
1044 transport_configure_device(&shost->shost_gendev);
1045 return 0;
1046 }
1047
1048 static struct device_type scsi_dev_type = {
1049 .name = "scsi_device",
1050 .release = scsi_device_dev_release,
1051 .groups = scsi_sdev_attr_groups,
1052 };
1053
scsi_sysfs_device_initialize(struct scsi_device * sdev)1054 void scsi_sysfs_device_initialize(struct scsi_device *sdev)
1055 {
1056 unsigned long flags;
1057 struct Scsi_Host *shost = sdev->host;
1058 struct scsi_target *starget = sdev->sdev_target;
1059
1060 device_initialize(&sdev->sdev_gendev);
1061 sdev->sdev_gendev.bus = &scsi_bus_type;
1062 sdev->sdev_gendev.type = &scsi_dev_type;
1063 dev_set_name(&sdev->sdev_gendev, "%d:%d:%d:%d",
1064 sdev->host->host_no, sdev->channel, sdev->id, sdev->lun);
1065
1066 device_initialize(&sdev->sdev_dev);
1067 sdev->sdev_dev.parent = get_device(&sdev->sdev_gendev);
1068 sdev->sdev_dev.class = &sdev_class;
1069 dev_set_name(&sdev->sdev_dev, "%d:%d:%d:%d",
1070 sdev->host->host_no, sdev->channel, sdev->id, sdev->lun);
1071 sdev->scsi_level = starget->scsi_level;
1072 transport_setup_device(&sdev->sdev_gendev);
1073 spin_lock_irqsave(shost->host_lock, flags);
1074 list_add_tail(&sdev->same_target_siblings, &starget->devices);
1075 list_add_tail(&sdev->siblings, &shost->__devices);
1076 spin_unlock_irqrestore(shost->host_lock, flags);
1077 }
1078
scsi_is_sdev_device(const struct device * dev)1079 int scsi_is_sdev_device(const struct device *dev)
1080 {
1081 return dev->type == &scsi_dev_type;
1082 }
1083 EXPORT_SYMBOL(scsi_is_sdev_device);
1084
1085 /* A blank transport template that is used in drivers that don't
1086 * yet implement Transport Attributes */
1087 struct scsi_transport_template blank_transport_template = { { { {NULL, }, }, }, };
1088