/linux-6.6.21/include/linux/ |
D | blkdev.h | 858 static inline struct request_queue *bdev_get_queue(struct block_device *bdev) in bdev_get_queue() function 1136 return queue_max_zone_append_sectors(bdev_get_queue(bdev)); in bdev_max_zone_append_sectors() 1141 return queue_max_segments(bdev_get_queue(bdev)); in bdev_max_segments() 1156 return queue_logical_block_size(bdev_get_queue(bdev)); in bdev_logical_block_size() 1166 return queue_physical_block_size(bdev_get_queue(bdev)); in bdev_physical_block_size() 1176 return queue_io_min(bdev_get_queue(bdev)); in bdev_io_min() 1186 return queue_io_opt(bdev_get_queue(bdev)); in bdev_io_opt() 1198 return queue_zone_write_granularity(bdev_get_queue(bdev)); in bdev_zone_write_granularity() 1206 return bdev_get_queue(bdev)->limits.max_discard_sectors; in bdev_max_discard_sectors() 1211 return bdev_get_queue(bdev)->limits.discard_granularity; in bdev_discard_granularity() [all …]
|
/linux-6.6.21/block/ |
D | blk-crypto.c | 363 return __blk_crypto_cfg_supported(bdev_get_queue(bdev)->crypto_profile, in blk_crypto_config_supported_natively() 420 struct request_queue *q = bdev_get_queue(bdev); in blk_crypto_evict_key()
|
D | blk-core.c | 645 struct request_queue *q = bdev_get_queue(bio->bi_bdev); in __submit_bio_noacct() 663 if (q == bdev_get_queue(bio->bi_bdev)) in __submit_bio_noacct() 732 struct request_queue *q = bdev_get_queue(bdev); in submit_bio_noacct() 869 q = bdev_get_queue(bdev); in bio_poll()
|
D | blk-settings.c | 708 if (blk_stack_limits(&t->limits, &bdev_get_queue(bdev)->limits, in disk_stack_limits() 967 struct request_queue *q = bdev_get_queue(bdev); in bdev_alignment_offset() 980 struct request_queue *q = bdev_get_queue(bdev); in bdev_discard_alignment()
|
D | blk-rq-qos.h | 141 struct request_queue *q = bdev_get_queue(bio->bi_bdev); in rq_qos_done_bio()
|
D | bio.c | 1045 struct request_queue *q = bdev_get_queue(bio->bi_bdev); in bio_add_zone_append_page() 1175 struct request_queue *q = bdev_get_queue(bio->bi_bdev); in bio_iov_bvec_set() 1211 struct request_queue *q = bdev_get_queue(bio->bi_bdev); in bio_iov_add_zone_append_page() 1584 trace_block_bio_complete(bdev_get_queue(bio->bi_bdev), bio); in bio_endio()
|
D | bio-integrity.c | 126 struct request_queue *q = bdev_get_queue(bio->bi_bdev); in bio_integrity_add_page()
|
D | blk-cgroup.c | 939 spin_unlock_irq(&bdev_get_queue(ctx->bdev)->queue_lock); in blkg_conf_exit() 2048 blkg_get(bdev_get_queue(bio->bi_bdev)->root_blkg); in bio_associate_blkg_from_css() 2049 bio->bi_blkg = bdev_get_queue(bio->bi_bdev)->root_blkg; in bio_associate_blkg_from_css()
|
D | blk-zoned.c | 253 struct request_queue *q = bdev_get_queue(bdev); in blkdev_zone_mgmt()
|
D | blk.h | 66 struct request_queue *q = bdev_get_queue(bio->bi_bdev); in bio_queue_enter()
|
D | genhd.c | 950 struct request_queue *q = bdev_get_queue(bdev); in part_stat_show() 999 struct request_queue *q = bdev_get_queue(bdev); in part_inflight_show()
|
D | ioctl.c | 541 queue_max_sectors(bdev_get_queue(bdev))); in blkdev_common_ioctl()
|
D | blk-merge.c | 403 const struct queue_limits *lim = &bdev_get_queue(bio->bi_bdev)->limits; in bio_split_to_limits()
|
/linux-6.6.21/drivers/md/ |
D | dm-table.c | 410 struct request_queue *q = bdev_get_queue(bdev); in dm_set_device_limits() 864 struct request_queue *q = bdev_get_queue(bdev); in device_is_rq_stackable() 1265 bdev_get_queue(dev->bdev)->crypto_profile; in device_intersect_crypto_capabilities() 1498 struct request_queue *q = bdev_get_queue(dev->bdev); in device_not_poll_capable() 1589 struct request_queue *q = bdev_get_queue(dev->bdev); in device_not_zoned_model() 1782 struct request_queue *q = bdev_get_queue(dev->bdev); in device_flush_capable() 1835 struct request_queue *q = bdev_get_queue(dev->bdev); in device_is_not_random() 1843 struct request_queue *q = bdev_get_queue(dev->bdev); in device_not_write_zeroes_capable()
|
D | dm-mpath.c | 539 q = bdev_get_queue(bdev); in multipath_clone_and_map() 885 struct request_queue *q = bdev_get_queue(bdev); in setup_scsi_dh() 961 q = bdev_get_queue(p->path.dev->bdev); in parse_path() 1626 struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev); in activate_or_offline_path() 2100 struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev); in pgpath_busy()
|
D | dm-io.c | 316 struct request_queue *q = bdev_get_queue(where->bdev); in do_region()
|
D | dm-clone-target.c | 2027 struct queue_limits *dest_limits = &bdev_get_queue(dest_dev)->limits; in disable_passdown_if_not_supported() 2048 struct queue_limits *dest_limits = &bdev_get_queue(dest_bdev)->limits; in set_discard_limits()
|
D | dm-zoned-target.c | 589 if (blk_queue_dying(bdev_get_queue(dmz_dev->bdev))) { in dmz_bdev_is_dying()
|
D | dm-cache-target.c | 3371 struct queue_limits *origin_limits = &bdev_get_queue(origin_bdev)->limits; in disable_passdown_if_not_supported() 3393 struct queue_limits *origin_limits = &bdev_get_queue(origin_bdev)->limits; in set_discard_limits()
|
/linux-6.6.21/kernel/trace/ |
D | blktrace.c | 732 struct request_queue *q = bdev_get_queue(bdev); in blk_trace_ioctl() 1768 struct request_queue *q = bdev_get_queue(bdev); in sysfs_blk_trace_attr_show() 1802 struct request_queue *q = bdev_get_queue(bdev); in sysfs_blk_trace_attr_store()
|
/linux-6.6.21/drivers/block/rnbd/ |
D | rnbd-srv.c | 546 cpu_to_le32(queue_max_hw_sectors(bdev_get_queue(bdev))); in rnbd_srv_fill_msg_open_rsp()
|
/linux-6.6.21/drivers/target/ |
D | target_core_iblock.c | 126 q = bdev_get_queue(bd); in iblock_configure_device()
|
/linux-6.6.21/drivers/md/bcache/ |
D | super.c | 1018 q = bdev_get_queue(dc->bdev); in cached_dev_status_update() 1400 struct request_queue *q = bdev_get_queue(dc->bdev); in cached_dev_init()
|
/linux-6.6.21/drivers/block/ |
D | pktcdvd.c | 717 struct request_queue *q = bdev_get_queue(pd->bdev); in pkt_generic_packet() 2185 q = bdev_get_queue(pd->bdev); in pkt_open_dev()
|
D | loop.c | 770 struct request_queue *backingq = bdev_get_queue(I_BDEV(inode)); in loop_config_discard()
|