Lines Matching refs:bdev

34 	struct block_device bdev;  member
45 return &BDEV_I(inode)->bdev; in I_BDEV()
49 static void bdev_write_inode(struct block_device *bdev) in bdev_write_inode() argument
51 struct inode *inode = bdev->bd_inode; in bdev_write_inode()
61 bdev, ret); in bdev_write_inode()
68 static void kill_bdev(struct block_device *bdev) in kill_bdev() argument
70 struct address_space *mapping = bdev->bd_inode->i_mapping; in kill_bdev()
80 void invalidate_bdev(struct block_device *bdev) in invalidate_bdev() argument
82 struct address_space *mapping = bdev->bd_inode->i_mapping; in invalidate_bdev()
96 int truncate_bdev_range(struct block_device *bdev, blk_mode_t mode, in truncate_bdev_range() argument
105 int err = bd_prepare_to_claim(bdev, truncate_bdev_range, NULL); in truncate_bdev_range()
110 truncate_inode_pages_range(bdev->bd_inode->i_mapping, lstart, lend); in truncate_bdev_range()
112 bd_abort_claiming(bdev, truncate_bdev_range); in truncate_bdev_range()
120 return invalidate_inode_pages2_range(bdev->bd_inode->i_mapping, in truncate_bdev_range()
125 static void set_init_blocksize(struct block_device *bdev) in set_init_blocksize() argument
127 unsigned int bsize = bdev_logical_block_size(bdev); in set_init_blocksize()
128 loff_t size = i_size_read(bdev->bd_inode); in set_init_blocksize()
135 bdev->bd_inode->i_blkbits = blksize_bits(bsize); in set_init_blocksize()
138 int set_blocksize(struct block_device *bdev, int size) in set_blocksize() argument
145 if (size < bdev_logical_block_size(bdev)) in set_blocksize()
149 if (bdev->bd_inode->i_blkbits != blksize_bits(size)) { in set_blocksize()
150 sync_blockdev(bdev); in set_blocksize()
151 bdev->bd_inode->i_blkbits = blksize_bits(size); in set_blocksize()
152 kill_bdev(bdev); in set_blocksize()
182 int sync_blockdev_nowait(struct block_device *bdev) in sync_blockdev_nowait() argument
184 if (!bdev) in sync_blockdev_nowait()
186 return filemap_flush(bdev->bd_inode->i_mapping); in sync_blockdev_nowait()
194 int sync_blockdev(struct block_device *bdev) in sync_blockdev() argument
196 if (!bdev) in sync_blockdev()
198 return filemap_write_and_wait(bdev->bd_inode->i_mapping); in sync_blockdev()
202 int sync_blockdev_range(struct block_device *bdev, loff_t lstart, loff_t lend) in sync_blockdev_range() argument
204 return filemap_write_and_wait_range(bdev->bd_inode->i_mapping, in sync_blockdev_range()
221 int freeze_bdev(struct block_device *bdev) in freeze_bdev() argument
226 mutex_lock(&bdev->bd_fsfreeze_mutex); in freeze_bdev()
227 if (++bdev->bd_fsfreeze_count > 1) in freeze_bdev()
230 sb = get_active_super(bdev); in freeze_bdev()
240 bdev->bd_fsfreeze_count--; in freeze_bdev()
243 bdev->bd_fsfreeze_sb = sb; in freeze_bdev()
246 sync_blockdev(bdev); in freeze_bdev()
248 mutex_unlock(&bdev->bd_fsfreeze_mutex); in freeze_bdev()
259 int thaw_bdev(struct block_device *bdev) in thaw_bdev() argument
264 mutex_lock(&bdev->bd_fsfreeze_mutex); in thaw_bdev()
265 if (!bdev->bd_fsfreeze_count) in thaw_bdev()
269 if (--bdev->bd_fsfreeze_count > 0) in thaw_bdev()
272 sb = bdev->bd_fsfreeze_sb; in thaw_bdev()
281 bdev->bd_fsfreeze_count++; in thaw_bdev()
283 bdev->bd_fsfreeze_sb = NULL; in thaw_bdev()
285 mutex_unlock(&bdev->bd_fsfreeze_mutex); in thaw_bdev()
303 memset(&ei->bdev, 0, sizeof(ei->bdev)); in bdev_alloc_inode()
309 struct block_device *bdev = I_BDEV(inode); in bdev_free_inode() local
311 free_percpu(bdev->bd_stats); in bdev_free_inode()
312 kfree(bdev->bd_meta_info); in bdev_free_inode()
314 if (!bdev_is_partition(bdev)) { in bdev_free_inode()
315 if (bdev->bd_disk && bdev->bd_disk->bdi) in bdev_free_inode()
316 bdi_put(bdev->bd_disk->bdi); in bdev_free_inode()
317 kfree(bdev->bd_disk); in bdev_free_inode()
320 if (MAJOR(bdev->bd_dev) == BLOCK_EXT_MAJOR) in bdev_free_inode()
321 blk_free_ext_minor(MINOR(bdev->bd_dev)); in bdev_free_inode()
387 struct block_device *bdev; in bdev_alloc() local
398 bdev = I_BDEV(inode); in bdev_alloc()
399 mutex_init(&bdev->bd_fsfreeze_mutex); in bdev_alloc()
400 spin_lock_init(&bdev->bd_size_lock); in bdev_alloc()
401 mutex_init(&bdev->bd_holder_lock); in bdev_alloc()
402 bdev->bd_partno = partno; in bdev_alloc()
403 bdev->bd_inode = inode; in bdev_alloc()
404 bdev->bd_queue = disk->queue; in bdev_alloc()
406 bdev->bd_has_submit_bio = disk->part0->bd_has_submit_bio; in bdev_alloc()
408 bdev->bd_has_submit_bio = false; in bdev_alloc()
409 bdev->bd_stats = alloc_percpu(struct disk_stats); in bdev_alloc()
410 if (!bdev->bd_stats) { in bdev_alloc()
414 bdev->bd_disk = disk; in bdev_alloc()
415 return bdev; in bdev_alloc()
418 void bdev_set_nr_sectors(struct block_device *bdev, sector_t sectors) in bdev_set_nr_sectors() argument
420 spin_lock(&bdev->bd_size_lock); in bdev_set_nr_sectors()
421 i_size_write(bdev->bd_inode, (loff_t)sectors << SECTOR_SHIFT); in bdev_set_nr_sectors()
422 bdev->bd_nr_sectors = sectors; in bdev_set_nr_sectors()
423 spin_unlock(&bdev->bd_size_lock); in bdev_set_nr_sectors()
426 void bdev_add(struct block_device *bdev, dev_t dev) in bdev_add() argument
428 if (bdev_stable_writes(bdev)) in bdev_add()
429 mapping_set_stable_writes(bdev->bd_inode->i_mapping); in bdev_add()
430 bdev->bd_dev = dev; in bdev_add()
431 bdev->bd_inode->i_rdev = dev; in bdev_add()
432 bdev->bd_inode->i_ino = dev; in bdev_add()
433 insert_inode_hash(bdev->bd_inode); in bdev_add()
460 static bool bd_may_claim(struct block_device *bdev, void *holder, in bd_may_claim() argument
463 struct block_device *whole = bdev_whole(bdev); in bd_may_claim()
467 if (bdev->bd_holder) { in bd_may_claim()
471 if (bdev->bd_holder == holder) { in bd_may_claim()
472 if (WARN_ON_ONCE(bdev->bd_holder_ops != hops)) in bd_may_claim()
483 if (whole != bdev && in bd_may_claim()
502 int bd_prepare_to_claim(struct block_device *bdev, void *holder, in bd_prepare_to_claim() argument
505 struct block_device *whole = bdev_whole(bdev); in bd_prepare_to_claim()
512 if (!bd_may_claim(bdev, holder, hops)) { in bd_prepare_to_claim()
554 static void bd_finish_claiming(struct block_device *bdev, void *holder, in bd_finish_claiming() argument
557 struct block_device *whole = bdev_whole(bdev); in bd_finish_claiming()
560 BUG_ON(!bd_may_claim(bdev, holder, hops)); in bd_finish_claiming()
567 bdev->bd_holders++; in bd_finish_claiming()
568 mutex_lock(&bdev->bd_holder_lock); in bd_finish_claiming()
569 bdev->bd_holder = holder; in bd_finish_claiming()
570 bdev->bd_holder_ops = hops; in bd_finish_claiming()
571 mutex_unlock(&bdev->bd_holder_lock); in bd_finish_claiming()
585 void bd_abort_claiming(struct block_device *bdev, void *holder) in bd_abort_claiming() argument
588 bd_clear_claiming(bdev_whole(bdev), holder); in bd_abort_claiming()
593 static void bd_end_claim(struct block_device *bdev, void *holder) in bd_end_claim() argument
595 struct block_device *whole = bdev_whole(bdev); in bd_end_claim()
603 WARN_ON_ONCE(bdev->bd_holder != holder); in bd_end_claim()
604 WARN_ON_ONCE(--bdev->bd_holders < 0); in bd_end_claim()
606 if (!bdev->bd_holders) { in bd_end_claim()
607 mutex_lock(&bdev->bd_holder_lock); in bd_end_claim()
608 bdev->bd_holder = NULL; in bd_end_claim()
609 bdev->bd_holder_ops = NULL; in bd_end_claim()
610 mutex_unlock(&bdev->bd_holder_lock); in bd_end_claim()
611 if (bdev->bd_write_holder) in bd_end_claim()
623 disk_unblock_events(bdev->bd_disk); in bd_end_claim()
624 bdev->bd_write_holder = false; in bd_end_claim()
628 static void blkdev_flush_mapping(struct block_device *bdev) in blkdev_flush_mapping() argument
630 WARN_ON_ONCE(bdev->bd_holders); in blkdev_flush_mapping()
631 sync_blockdev(bdev); in blkdev_flush_mapping()
632 kill_bdev(bdev); in blkdev_flush_mapping()
633 bdev_write_inode(bdev); in blkdev_flush_mapping()
636 static int blkdev_get_whole(struct block_device *bdev, blk_mode_t mode) in blkdev_get_whole() argument
638 struct gendisk *disk = bdev->bd_disk; in blkdev_get_whole()
652 if (!atomic_read(&bdev->bd_openers)) in blkdev_get_whole()
653 set_init_blocksize(bdev); in blkdev_get_whole()
656 atomic_inc(&bdev->bd_openers); in blkdev_get_whole()
660 static void blkdev_put_whole(struct block_device *bdev) in blkdev_put_whole() argument
662 if (atomic_dec_and_test(&bdev->bd_openers)) in blkdev_put_whole()
663 blkdev_flush_mapping(bdev); in blkdev_put_whole()
664 if (bdev->bd_disk->fops->release) in blkdev_put_whole()
665 bdev->bd_disk->fops->release(bdev->bd_disk); in blkdev_put_whole()
706 struct block_device *bdev; in blkdev_get_no_open() local
721 bdev = &BDEV_I(inode)->bdev; in blkdev_get_no_open()
722 if (!kobject_get_unless_zero(&bdev->bd_device.kobj)) in blkdev_get_no_open()
723 bdev = NULL; in blkdev_get_no_open()
725 return bdev; in blkdev_get_no_open()
728 void blkdev_put_no_open(struct block_device *bdev) in blkdev_put_no_open() argument
730 put_device(&bdev->bd_device); in blkdev_put_no_open()
758 struct block_device *bdev; in blkdev_get_by_dev() local
769 bdev = blkdev_get_no_open(dev); in blkdev_get_by_dev()
770 if (!bdev) in blkdev_get_by_dev()
772 disk = bdev->bd_disk; in blkdev_get_by_dev()
776 ret = bd_prepare_to_claim(bdev, holder, hops); in blkdev_get_by_dev()
794 if (bdev_is_partition(bdev)) in blkdev_get_by_dev()
795 ret = blkdev_get_part(bdev, mode); in blkdev_get_by_dev()
797 ret = blkdev_get_whole(bdev, mode); in blkdev_get_by_dev()
801 bd_finish_claiming(bdev, holder, hops); in blkdev_get_by_dev()
810 if ((mode & BLK_OPEN_WRITE) && !bdev->bd_write_holder && in blkdev_get_by_dev()
812 bdev->bd_write_holder = true; in blkdev_get_by_dev()
820 return bdev; in blkdev_get_by_dev()
825 bd_abort_claiming(bdev, holder); in blkdev_get_by_dev()
829 blkdev_put_no_open(bdev); in blkdev_get_by_dev()
854 struct block_device *bdev; in blkdev_get_by_path() local
862 bdev = blkdev_get_by_dev(dev, mode, holder, hops); in blkdev_get_by_path()
863 if (!IS_ERR(bdev) && (mode & BLK_OPEN_WRITE) && bdev_read_only(bdev)) { in blkdev_get_by_path()
864 blkdev_put(bdev, holder); in blkdev_get_by_path()
868 return bdev; in blkdev_get_by_path()
872 void blkdev_put(struct block_device *bdev, void *holder) in blkdev_put() argument
874 struct gendisk *disk = bdev->bd_disk; in blkdev_put()
883 if (atomic_read(&bdev->bd_openers) == 1) in blkdev_put()
884 sync_blockdev(bdev); in blkdev_put()
888 bd_end_claim(bdev, holder); in blkdev_put()
897 if (bdev_is_partition(bdev)) in blkdev_put()
898 blkdev_put_part(bdev); in blkdev_put()
900 blkdev_put_whole(bdev); in blkdev_put()
904 blkdev_put_no_open(bdev); in blkdev_put()
961 void bdev_mark_dead(struct block_device *bdev, bool surprise) in bdev_mark_dead() argument
963 mutex_lock(&bdev->bd_holder_lock); in bdev_mark_dead()
964 if (bdev->bd_holder_ops && bdev->bd_holder_ops->mark_dead) in bdev_mark_dead()
965 bdev->bd_holder_ops->mark_dead(bdev, surprise); in bdev_mark_dead()
967 sync_blockdev(bdev); in bdev_mark_dead()
968 mutex_unlock(&bdev->bd_holder_lock); in bdev_mark_dead()
970 invalidate_bdev(bdev); in bdev_mark_dead()
988 struct block_device *bdev; in sync_bdevs() local
1009 bdev = I_BDEV(inode); in sync_bdevs()
1011 mutex_lock(&bdev->bd_disk->open_mutex); in sync_bdevs()
1012 if (!atomic_read(&bdev->bd_openers)) { in sync_bdevs()
1025 mutex_unlock(&bdev->bd_disk->open_mutex); in sync_bdevs()
1042 struct block_device *bdev; in bdev_statx_dioalign() local
1044 bdev = blkdev_get_no_open(inode->i_rdev); in bdev_statx_dioalign()
1045 if (!bdev) in bdev_statx_dioalign()
1048 stat->dio_mem_align = bdev_dma_alignment(bdev) + 1; in bdev_statx_dioalign()
1049 stat->dio_offset_align = bdev_logical_block_size(bdev); in bdev_statx_dioalign()
1052 blkdev_put_no_open(bdev); in bdev_statx_dioalign()