Lines Matching refs:request_queue

22 void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)  in blk_queue_rq_timeout()
98 void blk_queue_bounce_limit(struct request_queue *q, enum blk_bounce bounce) in blk_queue_bounce_limit()
123 void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors) in blk_queue_max_hw_sectors()
167 void blk_queue_chunk_sectors(struct request_queue *q, unsigned int chunk_sectors) in blk_queue_chunk_sectors()
178 void blk_queue_max_discard_sectors(struct request_queue *q, in blk_queue_max_discard_sectors()
191 void blk_queue_max_secure_erase_sectors(struct request_queue *q, in blk_queue_max_secure_erase_sectors()
204 void blk_queue_max_write_zeroes_sectors(struct request_queue *q, in blk_queue_max_write_zeroes_sectors()
216 void blk_queue_max_zone_append_sectors(struct request_queue *q, in blk_queue_max_zone_append_sectors()
247 void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments) in blk_queue_max_segments()
268 void blk_queue_max_discard_segments(struct request_queue *q, in blk_queue_max_discard_segments()
284 void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size) in blk_queue_max_segment_size()
309 void blk_queue_logical_block_size(struct request_queue *q, unsigned int size) in blk_queue_logical_block_size()
338 void blk_queue_physical_block_size(struct request_queue *q, unsigned int size) in blk_queue_physical_block_size()
359 void blk_queue_zone_write_granularity(struct request_queue *q, in blk_queue_zone_write_granularity()
383 void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset) in blk_queue_alignment_offset()
393 struct request_queue *q = disk->queue; in disk_update_readahead()
442 void blk_queue_io_min(struct request_queue *q, unsigned int min) in blk_queue_io_min()
480 void blk_queue_io_opt(struct request_queue *q, unsigned int opt) in blk_queue_io_opt()
706 struct request_queue *t = disk->queue; in disk_stack_limits()
727 void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask) in blk_queue_update_dma_pad()
739 void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask) in blk_queue_segment_boundary()
756 void blk_queue_virt_boundary(struct request_queue *q, unsigned long mask) in blk_queue_virt_boundary()
781 void blk_queue_dma_alignment(struct request_queue *q, int mask) in blk_queue_dma_alignment()
801 void blk_queue_update_dma_alignment(struct request_queue *q, int mask) in blk_queue_update_dma_alignment()
816 void blk_set_queue_depth(struct request_queue *q, unsigned int depth) in blk_set_queue_depth()
831 void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua) in blk_queue_write_cache()
858 void blk_queue_required_elevator_features(struct request_queue *q, in blk_queue_required_elevator_features()
872 bool blk_queue_can_use_dma_map_merging(struct request_queue *q, in blk_queue_can_use_dma_map_merging()
920 struct request_queue *q = disk->queue; in disk_set_zoned()
967 struct request_queue *q = bdev_get_queue(bdev); in bdev_alignment_offset()
980 struct request_queue *q = bdev_get_queue(bdev); in bdev_discard_alignment()