1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * gendisk handling
4 *
5 * Portions Copyright (C) 2020 Christoph Hellwig
6 */
7
8 #include <linux/module.h>
9 #include <linux/ctype.h>
10 #include <linux/fs.h>
11 #include <linux/kdev_t.h>
12 #include <linux/kernel.h>
13 #include <linux/blkdev.h>
14 #include <linux/backing-dev.h>
15 #include <linux/init.h>
16 #include <linux/spinlock.h>
17 #include <linux/proc_fs.h>
18 #include <linux/seq_file.h>
19 #include <linux/slab.h>
20 #include <linux/kmod.h>
21 #include <linux/major.h>
22 #include <linux/mutex.h>
23 #include <linux/idr.h>
24 #include <linux/log2.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/badblocks.h>
27 #include <linux/part_stat.h>
28 #include <linux/blktrace_api.h>
29
30 #include "blk-throttle.h"
31 #include "blk.h"
32 #include "blk-mq-sched.h"
33 #include "blk-rq-qos.h"
34 #include "blk-cgroup.h"
35
36 static struct kobject *block_depr;
37
38 /*
39 * Unique, monotonically increasing sequential number associated with block
40 * devices instances (i.e. incremented each time a device is attached).
41 * Associating uevents with block devices in userspace is difficult and racy:
42 * the uevent netlink socket is lossy, and on slow and overloaded systems has
43 * a very high latency.
44 * Block devices do not have exclusive owners in userspace, any process can set
45 * one up (e.g. loop devices). Moreover, device names can be reused (e.g. loop0
46 * can be reused again and again).
47 * A userspace process setting up a block device and watching for its events
48 * cannot thus reliably tell whether an event relates to the device it just set
49 * up or another earlier instance with the same name.
50 * This sequential number allows userspace processes to solve this problem, and
51 * uniquely associate an uevent to the lifetime to a device.
52 */
53 static atomic64_t diskseq;
54
55 /* for extended dynamic devt allocation, currently only one major is used */
56 #define NR_EXT_DEVT (1 << MINORBITS)
57 static DEFINE_IDA(ext_devt_ida);
58
set_capacity(struct gendisk * disk,sector_t sectors)59 void set_capacity(struct gendisk *disk, sector_t sectors)
60 {
61 bdev_set_nr_sectors(disk->part0, sectors);
62 }
63 EXPORT_SYMBOL(set_capacity);
64
65 /*
66 * Set disk capacity and notify if the size is not currently zero and will not
67 * be set to zero. Returns true if a uevent was sent, otherwise false.
68 */
set_capacity_and_notify(struct gendisk * disk,sector_t size)69 bool set_capacity_and_notify(struct gendisk *disk, sector_t size)
70 {
71 sector_t capacity = get_capacity(disk);
72 char *envp[] = { "RESIZE=1", NULL };
73
74 set_capacity(disk, size);
75
76 /*
77 * Only print a message and send a uevent if the gendisk is user visible
78 * and alive. This avoids spamming the log and udev when setting the
79 * initial capacity during probing.
80 */
81 if (size == capacity ||
82 !disk_live(disk) ||
83 (disk->flags & GENHD_FL_HIDDEN))
84 return false;
85
86 pr_info("%s: detected capacity change from %lld to %lld\n",
87 disk->disk_name, capacity, size);
88
89 /*
90 * Historically we did not send a uevent for changes to/from an empty
91 * device.
92 */
93 if (!capacity || !size)
94 return false;
95 kobject_uevent_env(&disk_to_dev(disk)->kobj, KOBJ_CHANGE, envp);
96 return true;
97 }
98 EXPORT_SYMBOL_GPL(set_capacity_and_notify);
99
part_stat_read_all(struct block_device * part,struct disk_stats * stat)100 static void part_stat_read_all(struct block_device *part,
101 struct disk_stats *stat)
102 {
103 int cpu;
104
105 memset(stat, 0, sizeof(struct disk_stats));
106 for_each_possible_cpu(cpu) {
107 struct disk_stats *ptr = per_cpu_ptr(part->bd_stats, cpu);
108 int group;
109
110 for (group = 0; group < NR_STAT_GROUPS; group++) {
111 stat->nsecs[group] += ptr->nsecs[group];
112 stat->sectors[group] += ptr->sectors[group];
113 stat->ios[group] += ptr->ios[group];
114 stat->merges[group] += ptr->merges[group];
115 }
116
117 stat->io_ticks += ptr->io_ticks;
118 }
119 }
120
part_in_flight(struct block_device * part)121 static unsigned int part_in_flight(struct block_device *part)
122 {
123 unsigned int inflight = 0;
124 int cpu;
125
126 for_each_possible_cpu(cpu) {
127 inflight += part_stat_local_read_cpu(part, in_flight[0], cpu) +
128 part_stat_local_read_cpu(part, in_flight[1], cpu);
129 }
130 if ((int)inflight < 0)
131 inflight = 0;
132
133 return inflight;
134 }
135
part_in_flight_rw(struct block_device * part,unsigned int inflight[2])136 static void part_in_flight_rw(struct block_device *part,
137 unsigned int inflight[2])
138 {
139 int cpu;
140
141 inflight[0] = 0;
142 inflight[1] = 0;
143 for_each_possible_cpu(cpu) {
144 inflight[0] += part_stat_local_read_cpu(part, in_flight[0], cpu);
145 inflight[1] += part_stat_local_read_cpu(part, in_flight[1], cpu);
146 }
147 if ((int)inflight[0] < 0)
148 inflight[0] = 0;
149 if ((int)inflight[1] < 0)
150 inflight[1] = 0;
151 }
152
153 /*
154 * Can be deleted altogether. Later.
155 *
156 */
157 #define BLKDEV_MAJOR_HASH_SIZE 255
158 static struct blk_major_name {
159 struct blk_major_name *next;
160 int major;
161 char name[16];
162 #ifdef CONFIG_BLOCK_LEGACY_AUTOLOAD
163 void (*probe)(dev_t devt);
164 #endif
165 } *major_names[BLKDEV_MAJOR_HASH_SIZE];
166 static DEFINE_MUTEX(major_names_lock);
167 static DEFINE_SPINLOCK(major_names_spinlock);
168
169 /* index in the above - for now: assume no multimajor ranges */
major_to_index(unsigned major)170 static inline int major_to_index(unsigned major)
171 {
172 return major % BLKDEV_MAJOR_HASH_SIZE;
173 }
174
175 #ifdef CONFIG_PROC_FS
blkdev_show(struct seq_file * seqf,off_t offset)176 void blkdev_show(struct seq_file *seqf, off_t offset)
177 {
178 struct blk_major_name *dp;
179
180 spin_lock(&major_names_spinlock);
181 for (dp = major_names[major_to_index(offset)]; dp; dp = dp->next)
182 if (dp->major == offset)
183 seq_printf(seqf, "%3d %s\n", dp->major, dp->name);
184 spin_unlock(&major_names_spinlock);
185 }
186 #endif /* CONFIG_PROC_FS */
187
188 /**
189 * __register_blkdev - register a new block device
190 *
191 * @major: the requested major device number [1..BLKDEV_MAJOR_MAX-1]. If
192 * @major = 0, try to allocate any unused major number.
193 * @name: the name of the new block device as a zero terminated string
194 * @probe: pre-devtmpfs / pre-udev callback used to create disks when their
195 * pre-created device node is accessed. When a probe call uses
196 * add_disk() and it fails the driver must cleanup resources. This
197 * interface may soon be removed.
198 *
199 * The @name must be unique within the system.
200 *
201 * The return value depends on the @major input parameter:
202 *
203 * - if a major device number was requested in range [1..BLKDEV_MAJOR_MAX-1]
204 * then the function returns zero on success, or a negative error code
205 * - if any unused major number was requested with @major = 0 parameter
206 * then the return value is the allocated major number in range
207 * [1..BLKDEV_MAJOR_MAX-1] or a negative error code otherwise
208 *
209 * See Documentation/admin-guide/devices.txt for the list of allocated
210 * major numbers.
211 *
212 * Use register_blkdev instead for any new code.
213 */
__register_blkdev(unsigned int major,const char * name,void (* probe)(dev_t devt))214 int __register_blkdev(unsigned int major, const char *name,
215 void (*probe)(dev_t devt))
216 {
217 struct blk_major_name **n, *p;
218 int index, ret = 0;
219
220 mutex_lock(&major_names_lock);
221
222 /* temporary */
223 if (major == 0) {
224 for (index = ARRAY_SIZE(major_names)-1; index > 0; index--) {
225 if (major_names[index] == NULL)
226 break;
227 }
228
229 if (index == 0) {
230 printk("%s: failed to get major for %s\n",
231 __func__, name);
232 ret = -EBUSY;
233 goto out;
234 }
235 major = index;
236 ret = major;
237 }
238
239 if (major >= BLKDEV_MAJOR_MAX) {
240 pr_err("%s: major requested (%u) is greater than the maximum (%u) for %s\n",
241 __func__, major, BLKDEV_MAJOR_MAX-1, name);
242
243 ret = -EINVAL;
244 goto out;
245 }
246
247 p = kmalloc(sizeof(struct blk_major_name), GFP_KERNEL);
248 if (p == NULL) {
249 ret = -ENOMEM;
250 goto out;
251 }
252
253 p->major = major;
254 #ifdef CONFIG_BLOCK_LEGACY_AUTOLOAD
255 p->probe = probe;
256 #endif
257 strscpy(p->name, name, sizeof(p->name));
258 p->next = NULL;
259 index = major_to_index(major);
260
261 spin_lock(&major_names_spinlock);
262 for (n = &major_names[index]; *n; n = &(*n)->next) {
263 if ((*n)->major == major)
264 break;
265 }
266 if (!*n)
267 *n = p;
268 else
269 ret = -EBUSY;
270 spin_unlock(&major_names_spinlock);
271
272 if (ret < 0) {
273 printk("register_blkdev: cannot get major %u for %s\n",
274 major, name);
275 kfree(p);
276 }
277 out:
278 mutex_unlock(&major_names_lock);
279 return ret;
280 }
281 EXPORT_SYMBOL(__register_blkdev);
282
unregister_blkdev(unsigned int major,const char * name)283 void unregister_blkdev(unsigned int major, const char *name)
284 {
285 struct blk_major_name **n;
286 struct blk_major_name *p = NULL;
287 int index = major_to_index(major);
288
289 mutex_lock(&major_names_lock);
290 spin_lock(&major_names_spinlock);
291 for (n = &major_names[index]; *n; n = &(*n)->next)
292 if ((*n)->major == major)
293 break;
294 if (!*n || strcmp((*n)->name, name)) {
295 WARN_ON(1);
296 } else {
297 p = *n;
298 *n = p->next;
299 }
300 spin_unlock(&major_names_spinlock);
301 mutex_unlock(&major_names_lock);
302 kfree(p);
303 }
304
305 EXPORT_SYMBOL(unregister_blkdev);
306
blk_alloc_ext_minor(void)307 int blk_alloc_ext_minor(void)
308 {
309 int idx;
310
311 idx = ida_alloc_range(&ext_devt_ida, 0, NR_EXT_DEVT - 1, GFP_KERNEL);
312 if (idx == -ENOSPC)
313 return -EBUSY;
314 return idx;
315 }
316
blk_free_ext_minor(unsigned int minor)317 void blk_free_ext_minor(unsigned int minor)
318 {
319 ida_free(&ext_devt_ida, minor);
320 }
321
disk_uevent(struct gendisk * disk,enum kobject_action action)322 void disk_uevent(struct gendisk *disk, enum kobject_action action)
323 {
324 struct block_device *part;
325 unsigned long idx;
326
327 rcu_read_lock();
328 xa_for_each(&disk->part_tbl, idx, part) {
329 if (bdev_is_partition(part) && !bdev_nr_sectors(part))
330 continue;
331 if (!kobject_get_unless_zero(&part->bd_device.kobj))
332 continue;
333
334 rcu_read_unlock();
335 kobject_uevent(bdev_kobj(part), action);
336 put_device(&part->bd_device);
337 rcu_read_lock();
338 }
339 rcu_read_unlock();
340 }
341 EXPORT_SYMBOL_GPL(disk_uevent);
342
disk_scan_partitions(struct gendisk * disk,blk_mode_t mode)343 int disk_scan_partitions(struct gendisk *disk, blk_mode_t mode)
344 {
345 struct block_device *bdev;
346 int ret = 0;
347
348 if (disk->flags & (GENHD_FL_NO_PART | GENHD_FL_HIDDEN))
349 return -EINVAL;
350 if (test_bit(GD_SUPPRESS_PART_SCAN, &disk->state))
351 return -EINVAL;
352 if (disk->open_partitions)
353 return -EBUSY;
354
355 /*
356 * If the device is opened exclusively by current thread already, it's
357 * safe to scan partitons, otherwise, use bd_prepare_to_claim() to
358 * synchronize with other exclusive openers and other partition
359 * scanners.
360 */
361 if (!(mode & BLK_OPEN_EXCL)) {
362 ret = bd_prepare_to_claim(disk->part0, disk_scan_partitions,
363 NULL);
364 if (ret)
365 return ret;
366 }
367
368 set_bit(GD_NEED_PART_SCAN, &disk->state);
369 bdev = blkdev_get_by_dev(disk_devt(disk), mode & ~BLK_OPEN_EXCL, NULL,
370 NULL);
371 if (IS_ERR(bdev))
372 ret = PTR_ERR(bdev);
373 else
374 blkdev_put(bdev, NULL);
375
376 /*
377 * If blkdev_get_by_dev() failed early, GD_NEED_PART_SCAN is still set,
378 * and this will cause that re-assemble partitioned raid device will
379 * creat partition for underlying disk.
380 */
381 clear_bit(GD_NEED_PART_SCAN, &disk->state);
382 if (!(mode & BLK_OPEN_EXCL))
383 bd_abort_claiming(disk->part0, disk_scan_partitions);
384 return ret;
385 }
386
387 /**
388 * device_add_disk - add disk information to kernel list
389 * @parent: parent device for the disk
390 * @disk: per-device partitioning information
391 * @groups: Additional per-device sysfs groups
392 *
393 * This function registers the partitioning information in @disk
394 * with the kernel.
395 */
device_add_disk(struct device * parent,struct gendisk * disk,const struct attribute_group ** groups)396 int __must_check device_add_disk(struct device *parent, struct gendisk *disk,
397 const struct attribute_group **groups)
398
399 {
400 struct device *ddev = disk_to_dev(disk);
401 int ret;
402
403 /* Only makes sense for bio-based to set ->poll_bio */
404 if (queue_is_mq(disk->queue) && disk->fops->poll_bio)
405 return -EINVAL;
406
407 /*
408 * The disk queue should now be all set with enough information about
409 * the device for the elevator code to pick an adequate default
410 * elevator if one is needed, that is, for devices requesting queue
411 * registration.
412 */
413 elevator_init_mq(disk->queue);
414
415 /* Mark bdev as having a submit_bio, if needed */
416 disk->part0->bd_has_submit_bio = disk->fops->submit_bio != NULL;
417
418 /*
419 * If the driver provides an explicit major number it also must provide
420 * the number of minors numbers supported, and those will be used to
421 * setup the gendisk.
422 * Otherwise just allocate the device numbers for both the whole device
423 * and all partitions from the extended dev_t space.
424 */
425 ret = -EINVAL;
426 if (disk->major) {
427 if (WARN_ON(!disk->minors))
428 goto out_exit_elevator;
429
430 if (disk->minors > DISK_MAX_PARTS) {
431 pr_err("block: can't allocate more than %d partitions\n",
432 DISK_MAX_PARTS);
433 disk->minors = DISK_MAX_PARTS;
434 }
435 if (disk->first_minor > MINORMASK ||
436 disk->minors > MINORMASK + 1 ||
437 disk->first_minor + disk->minors > MINORMASK + 1)
438 goto out_exit_elevator;
439 } else {
440 if (WARN_ON(disk->minors))
441 goto out_exit_elevator;
442
443 ret = blk_alloc_ext_minor();
444 if (ret < 0)
445 goto out_exit_elevator;
446 disk->major = BLOCK_EXT_MAJOR;
447 disk->first_minor = ret;
448 }
449
450 /* delay uevents, until we scanned partition table */
451 dev_set_uevent_suppress(ddev, 1);
452
453 ddev->parent = parent;
454 ddev->groups = groups;
455 dev_set_name(ddev, "%s", disk->disk_name);
456 if (!(disk->flags & GENHD_FL_HIDDEN))
457 ddev->devt = MKDEV(disk->major, disk->first_minor);
458 ret = device_add(ddev);
459 if (ret)
460 goto out_free_ext_minor;
461
462 ret = disk_alloc_events(disk);
463 if (ret)
464 goto out_device_del;
465
466 ret = sysfs_create_link(block_depr, &ddev->kobj,
467 kobject_name(&ddev->kobj));
468 if (ret)
469 goto out_device_del;
470
471 /*
472 * avoid probable deadlock caused by allocating memory with
473 * GFP_KERNEL in runtime_resume callback of its all ancestor
474 * devices
475 */
476 pm_runtime_set_memalloc_noio(ddev, true);
477
478 disk->part0->bd_holder_dir =
479 kobject_create_and_add("holders", &ddev->kobj);
480 if (!disk->part0->bd_holder_dir) {
481 ret = -ENOMEM;
482 goto out_del_block_link;
483 }
484 disk->slave_dir = kobject_create_and_add("slaves", &ddev->kobj);
485 if (!disk->slave_dir) {
486 ret = -ENOMEM;
487 goto out_put_holder_dir;
488 }
489
490 ret = blk_register_queue(disk);
491 if (ret)
492 goto out_put_slave_dir;
493
494 if (!(disk->flags & GENHD_FL_HIDDEN)) {
495 ret = bdi_register(disk->bdi, "%u:%u",
496 disk->major, disk->first_minor);
497 if (ret)
498 goto out_unregister_queue;
499 bdi_set_owner(disk->bdi, ddev);
500 ret = sysfs_create_link(&ddev->kobj,
501 &disk->bdi->dev->kobj, "bdi");
502 if (ret)
503 goto out_unregister_bdi;
504
505 /* Make sure the first partition scan will be proceed */
506 if (get_capacity(disk) && !(disk->flags & GENHD_FL_NO_PART) &&
507 !test_bit(GD_SUPPRESS_PART_SCAN, &disk->state))
508 set_bit(GD_NEED_PART_SCAN, &disk->state);
509
510 bdev_add(disk->part0, ddev->devt);
511 if (get_capacity(disk))
512 disk_scan_partitions(disk, BLK_OPEN_READ);
513
514 /*
515 * Announce the disk and partitions after all partitions are
516 * created. (for hidden disks uevents remain suppressed forever)
517 */
518 dev_set_uevent_suppress(ddev, 0);
519 disk_uevent(disk, KOBJ_ADD);
520 } else {
521 /*
522 * Even if the block_device for a hidden gendisk is not
523 * registered, it needs to have a valid bd_dev so that the
524 * freeing of the dynamic major works.
525 */
526 disk->part0->bd_dev = MKDEV(disk->major, disk->first_minor);
527 }
528
529 disk_update_readahead(disk);
530 disk_add_events(disk);
531 set_bit(GD_ADDED, &disk->state);
532 return 0;
533
534 out_unregister_bdi:
535 if (!(disk->flags & GENHD_FL_HIDDEN))
536 bdi_unregister(disk->bdi);
537 out_unregister_queue:
538 blk_unregister_queue(disk);
539 rq_qos_exit(disk->queue);
540 out_put_slave_dir:
541 kobject_put(disk->slave_dir);
542 disk->slave_dir = NULL;
543 out_put_holder_dir:
544 kobject_put(disk->part0->bd_holder_dir);
545 out_del_block_link:
546 sysfs_remove_link(block_depr, dev_name(ddev));
547 pm_runtime_set_memalloc_noio(ddev, false);
548 out_device_del:
549 device_del(ddev);
550 out_free_ext_minor:
551 if (disk->major == BLOCK_EXT_MAJOR)
552 blk_free_ext_minor(disk->first_minor);
553 out_exit_elevator:
554 if (disk->queue->elevator)
555 elevator_exit(disk->queue);
556 return ret;
557 }
558 EXPORT_SYMBOL(device_add_disk);
559
blk_report_disk_dead(struct gendisk * disk,bool surprise)560 static void blk_report_disk_dead(struct gendisk *disk, bool surprise)
561 {
562 struct block_device *bdev;
563 unsigned long idx;
564
565 rcu_read_lock();
566 xa_for_each(&disk->part_tbl, idx, bdev) {
567 if (!kobject_get_unless_zero(&bdev->bd_device.kobj))
568 continue;
569 rcu_read_unlock();
570
571 bdev_mark_dead(bdev, surprise);
572
573 put_device(&bdev->bd_device);
574 rcu_read_lock();
575 }
576 rcu_read_unlock();
577 }
578
__blk_mark_disk_dead(struct gendisk * disk)579 static void __blk_mark_disk_dead(struct gendisk *disk)
580 {
581 /*
582 * Fail any new I/O.
583 */
584 if (test_and_set_bit(GD_DEAD, &disk->state))
585 return;
586
587 if (test_bit(GD_OWNS_QUEUE, &disk->state))
588 blk_queue_flag_set(QUEUE_FLAG_DYING, disk->queue);
589
590 /*
591 * Stop buffered writers from dirtying pages that can't be written out.
592 */
593 set_capacity(disk, 0);
594
595 /*
596 * Prevent new I/O from crossing bio_queue_enter().
597 */
598 blk_queue_start_drain(disk->queue);
599 }
600
601 /**
602 * blk_mark_disk_dead - mark a disk as dead
603 * @disk: disk to mark as dead
604 *
605 * Mark as disk as dead (e.g. surprise removed) and don't accept any new I/O
606 * to this disk.
607 */
blk_mark_disk_dead(struct gendisk * disk)608 void blk_mark_disk_dead(struct gendisk *disk)
609 {
610 __blk_mark_disk_dead(disk);
611 blk_report_disk_dead(disk, true);
612 }
613 EXPORT_SYMBOL_GPL(blk_mark_disk_dead);
614
615 /**
616 * del_gendisk - remove the gendisk
617 * @disk: the struct gendisk to remove
618 *
619 * Removes the gendisk and all its associated resources. This deletes the
620 * partitions associated with the gendisk, and unregisters the associated
621 * request_queue.
622 *
623 * This is the counter to the respective __device_add_disk() call.
624 *
625 * The final removal of the struct gendisk happens when its refcount reaches 0
626 * with put_disk(), which should be called after del_gendisk(), if
627 * __device_add_disk() was used.
628 *
629 * Drivers exist which depend on the release of the gendisk to be synchronous,
630 * it should not be deferred.
631 *
632 * Context: can sleep
633 */
del_gendisk(struct gendisk * disk)634 void del_gendisk(struct gendisk *disk)
635 {
636 struct request_queue *q = disk->queue;
637 struct block_device *part;
638 unsigned long idx;
639
640 might_sleep();
641
642 if (WARN_ON_ONCE(!disk_live(disk) && !(disk->flags & GENHD_FL_HIDDEN)))
643 return;
644
645 disk_del_events(disk);
646
647 /*
648 * Prevent new openers by unlinked the bdev inode.
649 */
650 mutex_lock(&disk->open_mutex);
651 xa_for_each(&disk->part_tbl, idx, part)
652 remove_inode_hash(part->bd_inode);
653 mutex_unlock(&disk->open_mutex);
654
655 /*
656 * Tell the file system to write back all dirty data and shut down if
657 * it hasn't been notified earlier.
658 */
659 if (!test_bit(GD_DEAD, &disk->state))
660 blk_report_disk_dead(disk, false);
661 __blk_mark_disk_dead(disk);
662
663 /*
664 * Drop all partitions now that the disk is marked dead.
665 */
666 mutex_lock(&disk->open_mutex);
667 xa_for_each_start(&disk->part_tbl, idx, part, 1)
668 drop_partition(part);
669 mutex_unlock(&disk->open_mutex);
670
671 if (!(disk->flags & GENHD_FL_HIDDEN)) {
672 sysfs_remove_link(&disk_to_dev(disk)->kobj, "bdi");
673
674 /*
675 * Unregister bdi before releasing device numbers (as they can
676 * get reused and we'd get clashes in sysfs).
677 */
678 bdi_unregister(disk->bdi);
679 }
680
681 blk_unregister_queue(disk);
682
683 kobject_put(disk->part0->bd_holder_dir);
684 kobject_put(disk->slave_dir);
685 disk->slave_dir = NULL;
686
687 part_stat_set_all(disk->part0, 0);
688 disk->part0->bd_stamp = 0;
689 sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk)));
690 pm_runtime_set_memalloc_noio(disk_to_dev(disk), false);
691 device_del(disk_to_dev(disk));
692
693 blk_mq_freeze_queue_wait(q);
694
695 blk_throtl_cancel_bios(disk);
696
697 blk_sync_queue(q);
698 blk_flush_integrity();
699
700 if (queue_is_mq(q))
701 blk_mq_cancel_work_sync(q);
702
703 blk_mq_quiesce_queue(q);
704 if (q->elevator) {
705 mutex_lock(&q->sysfs_lock);
706 elevator_exit(q);
707 mutex_unlock(&q->sysfs_lock);
708 }
709 rq_qos_exit(q);
710 blk_mq_unquiesce_queue(q);
711
712 /*
713 * If the disk does not own the queue, allow using passthrough requests
714 * again. Else leave the queue frozen to fail all I/O.
715 */
716 if (!test_bit(GD_OWNS_QUEUE, &disk->state)) {
717 blk_queue_flag_clear(QUEUE_FLAG_INIT_DONE, q);
718 __blk_mq_unfreeze_queue(q, true);
719 } else {
720 if (queue_is_mq(q))
721 blk_mq_exit_queue(q);
722 }
723 }
724 EXPORT_SYMBOL(del_gendisk);
725
726 /**
727 * invalidate_disk - invalidate the disk
728 * @disk: the struct gendisk to invalidate
729 *
730 * A helper to invalidates the disk. It will clean the disk's associated
731 * buffer/page caches and reset its internal states so that the disk
732 * can be reused by the drivers.
733 *
734 * Context: can sleep
735 */
invalidate_disk(struct gendisk * disk)736 void invalidate_disk(struct gendisk *disk)
737 {
738 struct block_device *bdev = disk->part0;
739
740 invalidate_bdev(bdev);
741 bdev->bd_inode->i_mapping->wb_err = 0;
742 set_capacity(disk, 0);
743 }
744 EXPORT_SYMBOL(invalidate_disk);
745
746 /* sysfs access to bad-blocks list. */
disk_badblocks_show(struct device * dev,struct device_attribute * attr,char * page)747 static ssize_t disk_badblocks_show(struct device *dev,
748 struct device_attribute *attr,
749 char *page)
750 {
751 struct gendisk *disk = dev_to_disk(dev);
752
753 if (!disk->bb)
754 return sprintf(page, "\n");
755
756 return badblocks_show(disk->bb, page, 0);
757 }
758
disk_badblocks_store(struct device * dev,struct device_attribute * attr,const char * page,size_t len)759 static ssize_t disk_badblocks_store(struct device *dev,
760 struct device_attribute *attr,
761 const char *page, size_t len)
762 {
763 struct gendisk *disk = dev_to_disk(dev);
764
765 if (!disk->bb)
766 return -ENXIO;
767
768 return badblocks_store(disk->bb, page, len, 0);
769 }
770
771 #ifdef CONFIG_BLOCK_LEGACY_AUTOLOAD
blk_request_module(dev_t devt)772 void blk_request_module(dev_t devt)
773 {
774 unsigned int major = MAJOR(devt);
775 struct blk_major_name **n;
776
777 mutex_lock(&major_names_lock);
778 for (n = &major_names[major_to_index(major)]; *n; n = &(*n)->next) {
779 if ((*n)->major == major && (*n)->probe) {
780 (*n)->probe(devt);
781 mutex_unlock(&major_names_lock);
782 return;
783 }
784 }
785 mutex_unlock(&major_names_lock);
786
787 if (request_module("block-major-%d-%d", MAJOR(devt), MINOR(devt)) > 0)
788 /* Make old-style 2.4 aliases work */
789 request_module("block-major-%d", MAJOR(devt));
790 }
791 #endif /* CONFIG_BLOCK_LEGACY_AUTOLOAD */
792
793 #ifdef CONFIG_PROC_FS
794 /* iterator */
disk_seqf_start(struct seq_file * seqf,loff_t * pos)795 static void *disk_seqf_start(struct seq_file *seqf, loff_t *pos)
796 {
797 loff_t skip = *pos;
798 struct class_dev_iter *iter;
799 struct device *dev;
800
801 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
802 if (!iter)
803 return ERR_PTR(-ENOMEM);
804
805 seqf->private = iter;
806 class_dev_iter_init(iter, &block_class, NULL, &disk_type);
807 do {
808 dev = class_dev_iter_next(iter);
809 if (!dev)
810 return NULL;
811 } while (skip--);
812
813 return dev_to_disk(dev);
814 }
815
disk_seqf_next(struct seq_file * seqf,void * v,loff_t * pos)816 static void *disk_seqf_next(struct seq_file *seqf, void *v, loff_t *pos)
817 {
818 struct device *dev;
819
820 (*pos)++;
821 dev = class_dev_iter_next(seqf->private);
822 if (dev)
823 return dev_to_disk(dev);
824
825 return NULL;
826 }
827
disk_seqf_stop(struct seq_file * seqf,void * v)828 static void disk_seqf_stop(struct seq_file *seqf, void *v)
829 {
830 struct class_dev_iter *iter = seqf->private;
831
832 /* stop is called even after start failed :-( */
833 if (iter) {
834 class_dev_iter_exit(iter);
835 kfree(iter);
836 seqf->private = NULL;
837 }
838 }
839
show_partition_start(struct seq_file * seqf,loff_t * pos)840 static void *show_partition_start(struct seq_file *seqf, loff_t *pos)
841 {
842 void *p;
843
844 p = disk_seqf_start(seqf, pos);
845 if (!IS_ERR_OR_NULL(p) && !*pos)
846 seq_puts(seqf, "major minor #blocks name\n\n");
847 return p;
848 }
849
show_partition(struct seq_file * seqf,void * v)850 static int show_partition(struct seq_file *seqf, void *v)
851 {
852 struct gendisk *sgp = v;
853 struct block_device *part;
854 unsigned long idx;
855
856 if (!get_capacity(sgp) || (sgp->flags & GENHD_FL_HIDDEN))
857 return 0;
858
859 rcu_read_lock();
860 xa_for_each(&sgp->part_tbl, idx, part) {
861 if (!bdev_nr_sectors(part))
862 continue;
863 seq_printf(seqf, "%4d %7d %10llu %pg\n",
864 MAJOR(part->bd_dev), MINOR(part->bd_dev),
865 bdev_nr_sectors(part) >> 1, part);
866 }
867 rcu_read_unlock();
868 return 0;
869 }
870
871 static const struct seq_operations partitions_op = {
872 .start = show_partition_start,
873 .next = disk_seqf_next,
874 .stop = disk_seqf_stop,
875 .show = show_partition
876 };
877 #endif
878
genhd_device_init(void)879 static int __init genhd_device_init(void)
880 {
881 int error;
882
883 error = class_register(&block_class);
884 if (unlikely(error))
885 return error;
886 blk_dev_init();
887
888 register_blkdev(BLOCK_EXT_MAJOR, "blkext");
889
890 /* create top-level block dir */
891 block_depr = kobject_create_and_add("block", NULL);
892 return 0;
893 }
894
895 subsys_initcall(genhd_device_init);
896
disk_range_show(struct device * dev,struct device_attribute * attr,char * buf)897 static ssize_t disk_range_show(struct device *dev,
898 struct device_attribute *attr, char *buf)
899 {
900 struct gendisk *disk = dev_to_disk(dev);
901
902 return sprintf(buf, "%d\n", disk->minors);
903 }
904
disk_ext_range_show(struct device * dev,struct device_attribute * attr,char * buf)905 static ssize_t disk_ext_range_show(struct device *dev,
906 struct device_attribute *attr, char *buf)
907 {
908 struct gendisk *disk = dev_to_disk(dev);
909
910 return sprintf(buf, "%d\n",
911 (disk->flags & GENHD_FL_NO_PART) ? 1 : DISK_MAX_PARTS);
912 }
913
disk_removable_show(struct device * dev,struct device_attribute * attr,char * buf)914 static ssize_t disk_removable_show(struct device *dev,
915 struct device_attribute *attr, char *buf)
916 {
917 struct gendisk *disk = dev_to_disk(dev);
918
919 return sprintf(buf, "%d\n",
920 (disk->flags & GENHD_FL_REMOVABLE ? 1 : 0));
921 }
922
disk_hidden_show(struct device * dev,struct device_attribute * attr,char * buf)923 static ssize_t disk_hidden_show(struct device *dev,
924 struct device_attribute *attr, char *buf)
925 {
926 struct gendisk *disk = dev_to_disk(dev);
927
928 return sprintf(buf, "%d\n",
929 (disk->flags & GENHD_FL_HIDDEN ? 1 : 0));
930 }
931
disk_ro_show(struct device * dev,struct device_attribute * attr,char * buf)932 static ssize_t disk_ro_show(struct device *dev,
933 struct device_attribute *attr, char *buf)
934 {
935 struct gendisk *disk = dev_to_disk(dev);
936
937 return sprintf(buf, "%d\n", get_disk_ro(disk) ? 1 : 0);
938 }
939
part_size_show(struct device * dev,struct device_attribute * attr,char * buf)940 ssize_t part_size_show(struct device *dev,
941 struct device_attribute *attr, char *buf)
942 {
943 return sprintf(buf, "%llu\n", bdev_nr_sectors(dev_to_bdev(dev)));
944 }
945
part_stat_show(struct device * dev,struct device_attribute * attr,char * buf)946 ssize_t part_stat_show(struct device *dev,
947 struct device_attribute *attr, char *buf)
948 {
949 struct block_device *bdev = dev_to_bdev(dev);
950 struct request_queue *q = bdev_get_queue(bdev);
951 struct disk_stats stat;
952 unsigned int inflight;
953
954 if (queue_is_mq(q))
955 inflight = blk_mq_in_flight(q, bdev);
956 else
957 inflight = part_in_flight(bdev);
958
959 if (inflight) {
960 part_stat_lock();
961 update_io_ticks(bdev, jiffies, true);
962 part_stat_unlock();
963 }
964 part_stat_read_all(bdev, &stat);
965 return sprintf(buf,
966 "%8lu %8lu %8llu %8u "
967 "%8lu %8lu %8llu %8u "
968 "%8u %8u %8u "
969 "%8lu %8lu %8llu %8u "
970 "%8lu %8u"
971 "\n",
972 stat.ios[STAT_READ],
973 stat.merges[STAT_READ],
974 (unsigned long long)stat.sectors[STAT_READ],
975 (unsigned int)div_u64(stat.nsecs[STAT_READ], NSEC_PER_MSEC),
976 stat.ios[STAT_WRITE],
977 stat.merges[STAT_WRITE],
978 (unsigned long long)stat.sectors[STAT_WRITE],
979 (unsigned int)div_u64(stat.nsecs[STAT_WRITE], NSEC_PER_MSEC),
980 inflight,
981 jiffies_to_msecs(stat.io_ticks),
982 (unsigned int)div_u64(stat.nsecs[STAT_READ] +
983 stat.nsecs[STAT_WRITE] +
984 stat.nsecs[STAT_DISCARD] +
985 stat.nsecs[STAT_FLUSH],
986 NSEC_PER_MSEC),
987 stat.ios[STAT_DISCARD],
988 stat.merges[STAT_DISCARD],
989 (unsigned long long)stat.sectors[STAT_DISCARD],
990 (unsigned int)div_u64(stat.nsecs[STAT_DISCARD], NSEC_PER_MSEC),
991 stat.ios[STAT_FLUSH],
992 (unsigned int)div_u64(stat.nsecs[STAT_FLUSH], NSEC_PER_MSEC));
993 }
994
part_inflight_show(struct device * dev,struct device_attribute * attr,char * buf)995 ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr,
996 char *buf)
997 {
998 struct block_device *bdev = dev_to_bdev(dev);
999 struct request_queue *q = bdev_get_queue(bdev);
1000 unsigned int inflight[2];
1001
1002 if (queue_is_mq(q))
1003 blk_mq_in_flight_rw(q, bdev, inflight);
1004 else
1005 part_in_flight_rw(bdev, inflight);
1006
1007 return sprintf(buf, "%8u %8u\n", inflight[0], inflight[1]);
1008 }
1009
disk_capability_show(struct device * dev,struct device_attribute * attr,char * buf)1010 static ssize_t disk_capability_show(struct device *dev,
1011 struct device_attribute *attr, char *buf)
1012 {
1013 dev_warn_once(dev, "the capability attribute has been deprecated.\n");
1014 return sprintf(buf, "0\n");
1015 }
1016
disk_alignment_offset_show(struct device * dev,struct device_attribute * attr,char * buf)1017 static ssize_t disk_alignment_offset_show(struct device *dev,
1018 struct device_attribute *attr,
1019 char *buf)
1020 {
1021 struct gendisk *disk = dev_to_disk(dev);
1022
1023 return sprintf(buf, "%d\n", bdev_alignment_offset(disk->part0));
1024 }
1025
disk_discard_alignment_show(struct device * dev,struct device_attribute * attr,char * buf)1026 static ssize_t disk_discard_alignment_show(struct device *dev,
1027 struct device_attribute *attr,
1028 char *buf)
1029 {
1030 struct gendisk *disk = dev_to_disk(dev);
1031
1032 return sprintf(buf, "%d\n", bdev_alignment_offset(disk->part0));
1033 }
1034
diskseq_show(struct device * dev,struct device_attribute * attr,char * buf)1035 static ssize_t diskseq_show(struct device *dev,
1036 struct device_attribute *attr, char *buf)
1037 {
1038 struct gendisk *disk = dev_to_disk(dev);
1039
1040 return sprintf(buf, "%llu\n", disk->diskseq);
1041 }
1042
1043 static DEVICE_ATTR(range, 0444, disk_range_show, NULL);
1044 static DEVICE_ATTR(ext_range, 0444, disk_ext_range_show, NULL);
1045 static DEVICE_ATTR(removable, 0444, disk_removable_show, NULL);
1046 static DEVICE_ATTR(hidden, 0444, disk_hidden_show, NULL);
1047 static DEVICE_ATTR(ro, 0444, disk_ro_show, NULL);
1048 static DEVICE_ATTR(size, 0444, part_size_show, NULL);
1049 static DEVICE_ATTR(alignment_offset, 0444, disk_alignment_offset_show, NULL);
1050 static DEVICE_ATTR(discard_alignment, 0444, disk_discard_alignment_show, NULL);
1051 static DEVICE_ATTR(capability, 0444, disk_capability_show, NULL);
1052 static DEVICE_ATTR(stat, 0444, part_stat_show, NULL);
1053 static DEVICE_ATTR(inflight, 0444, part_inflight_show, NULL);
1054 static DEVICE_ATTR(badblocks, 0644, disk_badblocks_show, disk_badblocks_store);
1055 static DEVICE_ATTR(diskseq, 0444, diskseq_show, NULL);
1056
1057 #ifdef CONFIG_FAIL_MAKE_REQUEST
part_fail_show(struct device * dev,struct device_attribute * attr,char * buf)1058 ssize_t part_fail_show(struct device *dev,
1059 struct device_attribute *attr, char *buf)
1060 {
1061 return sprintf(buf, "%d\n", dev_to_bdev(dev)->bd_make_it_fail);
1062 }
1063
part_fail_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1064 ssize_t part_fail_store(struct device *dev,
1065 struct device_attribute *attr,
1066 const char *buf, size_t count)
1067 {
1068 int i;
1069
1070 if (count > 0 && sscanf(buf, "%d", &i) > 0)
1071 dev_to_bdev(dev)->bd_make_it_fail = i;
1072
1073 return count;
1074 }
1075
1076 static struct device_attribute dev_attr_fail =
1077 __ATTR(make-it-fail, 0644, part_fail_show, part_fail_store);
1078 #endif /* CONFIG_FAIL_MAKE_REQUEST */
1079
1080 #ifdef CONFIG_FAIL_IO_TIMEOUT
1081 static struct device_attribute dev_attr_fail_timeout =
1082 __ATTR(io-timeout-fail, 0644, part_timeout_show, part_timeout_store);
1083 #endif
1084
1085 static struct attribute *disk_attrs[] = {
1086 &dev_attr_range.attr,
1087 &dev_attr_ext_range.attr,
1088 &dev_attr_removable.attr,
1089 &dev_attr_hidden.attr,
1090 &dev_attr_ro.attr,
1091 &dev_attr_size.attr,
1092 &dev_attr_alignment_offset.attr,
1093 &dev_attr_discard_alignment.attr,
1094 &dev_attr_capability.attr,
1095 &dev_attr_stat.attr,
1096 &dev_attr_inflight.attr,
1097 &dev_attr_badblocks.attr,
1098 &dev_attr_events.attr,
1099 &dev_attr_events_async.attr,
1100 &dev_attr_events_poll_msecs.attr,
1101 &dev_attr_diskseq.attr,
1102 #ifdef CONFIG_FAIL_MAKE_REQUEST
1103 &dev_attr_fail.attr,
1104 #endif
1105 #ifdef CONFIG_FAIL_IO_TIMEOUT
1106 &dev_attr_fail_timeout.attr,
1107 #endif
1108 NULL
1109 };
1110
disk_visible(struct kobject * kobj,struct attribute * a,int n)1111 static umode_t disk_visible(struct kobject *kobj, struct attribute *a, int n)
1112 {
1113 struct device *dev = container_of(kobj, typeof(*dev), kobj);
1114 struct gendisk *disk = dev_to_disk(dev);
1115
1116 if (a == &dev_attr_badblocks.attr && !disk->bb)
1117 return 0;
1118 return a->mode;
1119 }
1120
1121 static struct attribute_group disk_attr_group = {
1122 .attrs = disk_attrs,
1123 .is_visible = disk_visible,
1124 };
1125
1126 static const struct attribute_group *disk_attr_groups[] = {
1127 &disk_attr_group,
1128 #ifdef CONFIG_BLK_DEV_IO_TRACE
1129 &blk_trace_attr_group,
1130 #endif
1131 #ifdef CONFIG_BLK_DEV_INTEGRITY
1132 &blk_integrity_attr_group,
1133 #endif
1134 NULL
1135 };
1136
1137 /**
1138 * disk_release - releases all allocated resources of the gendisk
1139 * @dev: the device representing this disk
1140 *
1141 * This function releases all allocated resources of the gendisk.
1142 *
1143 * Drivers which used __device_add_disk() have a gendisk with a request_queue
1144 * assigned. Since the request_queue sits on top of the gendisk for these
1145 * drivers we also call blk_put_queue() for them, and we expect the
1146 * request_queue refcount to reach 0 at this point, and so the request_queue
1147 * will also be freed prior to the disk.
1148 *
1149 * Context: can sleep
1150 */
disk_release(struct device * dev)1151 static void disk_release(struct device *dev)
1152 {
1153 struct gendisk *disk = dev_to_disk(dev);
1154
1155 might_sleep();
1156 WARN_ON_ONCE(disk_live(disk));
1157
1158 blk_trace_remove(disk->queue);
1159
1160 /*
1161 * To undo the all initialization from blk_mq_init_allocated_queue in
1162 * case of a probe failure where add_disk is never called we have to
1163 * call blk_mq_exit_queue here. We can't do this for the more common
1164 * teardown case (yet) as the tagset can be gone by the time the disk
1165 * is released once it was added.
1166 */
1167 if (queue_is_mq(disk->queue) &&
1168 test_bit(GD_OWNS_QUEUE, &disk->state) &&
1169 !test_bit(GD_ADDED, &disk->state))
1170 blk_mq_exit_queue(disk->queue);
1171
1172 blkcg_exit_disk(disk);
1173
1174 bioset_exit(&disk->bio_split);
1175
1176 disk_release_events(disk);
1177 kfree(disk->random);
1178 disk_free_zone_bitmaps(disk);
1179 xa_destroy(&disk->part_tbl);
1180
1181 disk->queue->disk = NULL;
1182 blk_put_queue(disk->queue);
1183
1184 if (test_bit(GD_ADDED, &disk->state) && disk->fops->free_disk)
1185 disk->fops->free_disk(disk);
1186
1187 iput(disk->part0->bd_inode); /* frees the disk */
1188 }
1189
block_uevent(const struct device * dev,struct kobj_uevent_env * env)1190 static int block_uevent(const struct device *dev, struct kobj_uevent_env *env)
1191 {
1192 const struct gendisk *disk = dev_to_disk(dev);
1193
1194 return add_uevent_var(env, "DISKSEQ=%llu", disk->diskseq);
1195 }
1196
1197 struct class block_class = {
1198 .name = "block",
1199 .dev_uevent = block_uevent,
1200 };
1201
block_devnode(const struct device * dev,umode_t * mode,kuid_t * uid,kgid_t * gid)1202 static char *block_devnode(const struct device *dev, umode_t *mode,
1203 kuid_t *uid, kgid_t *gid)
1204 {
1205 struct gendisk *disk = dev_to_disk(dev);
1206
1207 if (disk->fops->devnode)
1208 return disk->fops->devnode(disk, mode);
1209 return NULL;
1210 }
1211
1212 const struct device_type disk_type = {
1213 .name = "disk",
1214 .groups = disk_attr_groups,
1215 .release = disk_release,
1216 .devnode = block_devnode,
1217 };
1218
1219 #ifdef CONFIG_PROC_FS
1220 /*
1221 * aggregate disk stat collector. Uses the same stats that the sysfs
1222 * entries do, above, but makes them available through one seq_file.
1223 *
1224 * The output looks suspiciously like /proc/partitions with a bunch of
1225 * extra fields.
1226 */
diskstats_show(struct seq_file * seqf,void * v)1227 static int diskstats_show(struct seq_file *seqf, void *v)
1228 {
1229 struct gendisk *gp = v;
1230 struct block_device *hd;
1231 unsigned int inflight;
1232 struct disk_stats stat;
1233 unsigned long idx;
1234
1235 /*
1236 if (&disk_to_dev(gp)->kobj.entry == block_class.devices.next)
1237 seq_puts(seqf, "major minor name"
1238 " rio rmerge rsect ruse wio wmerge "
1239 "wsect wuse running use aveq"
1240 "\n\n");
1241 */
1242
1243 rcu_read_lock();
1244 xa_for_each(&gp->part_tbl, idx, hd) {
1245 if (bdev_is_partition(hd) && !bdev_nr_sectors(hd))
1246 continue;
1247 if (queue_is_mq(gp->queue))
1248 inflight = blk_mq_in_flight(gp->queue, hd);
1249 else
1250 inflight = part_in_flight(hd);
1251
1252 if (inflight) {
1253 part_stat_lock();
1254 update_io_ticks(hd, jiffies, true);
1255 part_stat_unlock();
1256 }
1257 part_stat_read_all(hd, &stat);
1258 seq_printf(seqf, "%4d %7d %pg "
1259 "%lu %lu %lu %u "
1260 "%lu %lu %lu %u "
1261 "%u %u %u "
1262 "%lu %lu %lu %u "
1263 "%lu %u"
1264 "\n",
1265 MAJOR(hd->bd_dev), MINOR(hd->bd_dev), hd,
1266 stat.ios[STAT_READ],
1267 stat.merges[STAT_READ],
1268 stat.sectors[STAT_READ],
1269 (unsigned int)div_u64(stat.nsecs[STAT_READ],
1270 NSEC_PER_MSEC),
1271 stat.ios[STAT_WRITE],
1272 stat.merges[STAT_WRITE],
1273 stat.sectors[STAT_WRITE],
1274 (unsigned int)div_u64(stat.nsecs[STAT_WRITE],
1275 NSEC_PER_MSEC),
1276 inflight,
1277 jiffies_to_msecs(stat.io_ticks),
1278 (unsigned int)div_u64(stat.nsecs[STAT_READ] +
1279 stat.nsecs[STAT_WRITE] +
1280 stat.nsecs[STAT_DISCARD] +
1281 stat.nsecs[STAT_FLUSH],
1282 NSEC_PER_MSEC),
1283 stat.ios[STAT_DISCARD],
1284 stat.merges[STAT_DISCARD],
1285 stat.sectors[STAT_DISCARD],
1286 (unsigned int)div_u64(stat.nsecs[STAT_DISCARD],
1287 NSEC_PER_MSEC),
1288 stat.ios[STAT_FLUSH],
1289 (unsigned int)div_u64(stat.nsecs[STAT_FLUSH],
1290 NSEC_PER_MSEC)
1291 );
1292 }
1293 rcu_read_unlock();
1294
1295 return 0;
1296 }
1297
1298 static const struct seq_operations diskstats_op = {
1299 .start = disk_seqf_start,
1300 .next = disk_seqf_next,
1301 .stop = disk_seqf_stop,
1302 .show = diskstats_show
1303 };
1304
proc_genhd_init(void)1305 static int __init proc_genhd_init(void)
1306 {
1307 proc_create_seq("diskstats", 0, NULL, &diskstats_op);
1308 proc_create_seq("partitions", 0, NULL, &partitions_op);
1309 return 0;
1310 }
1311 module_init(proc_genhd_init);
1312 #endif /* CONFIG_PROC_FS */
1313
part_devt(struct gendisk * disk,u8 partno)1314 dev_t part_devt(struct gendisk *disk, u8 partno)
1315 {
1316 struct block_device *part;
1317 dev_t devt = 0;
1318
1319 rcu_read_lock();
1320 part = xa_load(&disk->part_tbl, partno);
1321 if (part)
1322 devt = part->bd_dev;
1323 rcu_read_unlock();
1324
1325 return devt;
1326 }
1327
__alloc_disk_node(struct request_queue * q,int node_id,struct lock_class_key * lkclass)1328 struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id,
1329 struct lock_class_key *lkclass)
1330 {
1331 struct gendisk *disk;
1332
1333 disk = kzalloc_node(sizeof(struct gendisk), GFP_KERNEL, node_id);
1334 if (!disk)
1335 return NULL;
1336
1337 if (bioset_init(&disk->bio_split, BIO_POOL_SIZE, 0, 0))
1338 goto out_free_disk;
1339
1340 disk->bdi = bdi_alloc(node_id);
1341 if (!disk->bdi)
1342 goto out_free_bioset;
1343
1344 /* bdev_alloc() might need the queue, set before the first call */
1345 disk->queue = q;
1346
1347 disk->part0 = bdev_alloc(disk, 0);
1348 if (!disk->part0)
1349 goto out_free_bdi;
1350
1351 disk->node_id = node_id;
1352 mutex_init(&disk->open_mutex);
1353 xa_init(&disk->part_tbl);
1354 if (xa_insert(&disk->part_tbl, 0, disk->part0, GFP_KERNEL))
1355 goto out_destroy_part_tbl;
1356
1357 if (blkcg_init_disk(disk))
1358 goto out_erase_part0;
1359
1360 rand_initialize_disk(disk);
1361 disk_to_dev(disk)->class = &block_class;
1362 disk_to_dev(disk)->type = &disk_type;
1363 device_initialize(disk_to_dev(disk));
1364 inc_diskseq(disk);
1365 q->disk = disk;
1366 lockdep_init_map(&disk->lockdep_map, "(bio completion)", lkclass, 0);
1367 #ifdef CONFIG_BLOCK_HOLDER_DEPRECATED
1368 INIT_LIST_HEAD(&disk->slave_bdevs);
1369 #endif
1370 return disk;
1371
1372 out_erase_part0:
1373 xa_erase(&disk->part_tbl, 0);
1374 out_destroy_part_tbl:
1375 xa_destroy(&disk->part_tbl);
1376 disk->part0->bd_disk = NULL;
1377 iput(disk->part0->bd_inode);
1378 out_free_bdi:
1379 bdi_put(disk->bdi);
1380 out_free_bioset:
1381 bioset_exit(&disk->bio_split);
1382 out_free_disk:
1383 kfree(disk);
1384 return NULL;
1385 }
1386
__blk_alloc_disk(int node,struct lock_class_key * lkclass)1387 struct gendisk *__blk_alloc_disk(int node, struct lock_class_key *lkclass)
1388 {
1389 struct request_queue *q;
1390 struct gendisk *disk;
1391
1392 q = blk_alloc_queue(node);
1393 if (!q)
1394 return NULL;
1395
1396 disk = __alloc_disk_node(q, node, lkclass);
1397 if (!disk) {
1398 blk_put_queue(q);
1399 return NULL;
1400 }
1401 set_bit(GD_OWNS_QUEUE, &disk->state);
1402 return disk;
1403 }
1404 EXPORT_SYMBOL(__blk_alloc_disk);
1405
1406 /**
1407 * put_disk - decrements the gendisk refcount
1408 * @disk: the struct gendisk to decrement the refcount for
1409 *
1410 * This decrements the refcount for the struct gendisk. When this reaches 0
1411 * we'll have disk_release() called.
1412 *
1413 * Note: for blk-mq disk put_disk must be called before freeing the tag_set
1414 * when handling probe errors (that is before add_disk() is called).
1415 *
1416 * Context: Any context, but the last reference must not be dropped from
1417 * atomic context.
1418 */
put_disk(struct gendisk * disk)1419 void put_disk(struct gendisk *disk)
1420 {
1421 if (disk)
1422 put_device(disk_to_dev(disk));
1423 }
1424 EXPORT_SYMBOL(put_disk);
1425
set_disk_ro_uevent(struct gendisk * gd,int ro)1426 static void set_disk_ro_uevent(struct gendisk *gd, int ro)
1427 {
1428 char event[] = "DISK_RO=1";
1429 char *envp[] = { event, NULL };
1430
1431 if (!ro)
1432 event[8] = '0';
1433 kobject_uevent_env(&disk_to_dev(gd)->kobj, KOBJ_CHANGE, envp);
1434 }
1435
1436 /**
1437 * set_disk_ro - set a gendisk read-only
1438 * @disk: gendisk to operate on
1439 * @read_only: %true to set the disk read-only, %false set the disk read/write
1440 *
1441 * This function is used to indicate whether a given disk device should have its
1442 * read-only flag set. set_disk_ro() is typically used by device drivers to
1443 * indicate whether the underlying physical device is write-protected.
1444 */
set_disk_ro(struct gendisk * disk,bool read_only)1445 void set_disk_ro(struct gendisk *disk, bool read_only)
1446 {
1447 if (read_only) {
1448 if (test_and_set_bit(GD_READ_ONLY, &disk->state))
1449 return;
1450 } else {
1451 if (!test_and_clear_bit(GD_READ_ONLY, &disk->state))
1452 return;
1453 }
1454 set_disk_ro_uevent(disk, read_only);
1455 }
1456 EXPORT_SYMBOL(set_disk_ro);
1457
inc_diskseq(struct gendisk * disk)1458 void inc_diskseq(struct gendisk *disk)
1459 {
1460 disk->diskseq = atomic64_inc_return(&diskseq);
1461 }
1462