/linux-6.6.21/include/linux/ |
D | blk_types.h | 256 typedef __u32 __bitwise blk_opf_t; typedef 268 blk_opf_t bi_opf; /* bottom bits REQ_OP, top bits 355 #define REQ_OP_MASK (__force blk_opf_t)((1 << REQ_OP_BITS) - 1) 373 REQ_OP_READ = (__force blk_opf_t)0, 375 REQ_OP_WRITE = (__force blk_opf_t)1, 377 REQ_OP_FLUSH = (__force blk_opf_t)2, 379 REQ_OP_DISCARD = (__force blk_opf_t)3, 381 REQ_OP_SECURE_ERASE = (__force blk_opf_t)5, 383 REQ_OP_WRITE_ZEROES = (__force blk_opf_t)9, 385 REQ_OP_ZONE_OPEN = (__force blk_opf_t)10, [all …]
|
D | buffer_head.h | 242 int __sync_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags); 243 void write_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags); 244 void submit_bh(blk_opf_t, struct buffer_head *); 248 int __bh_read(struct buffer_head *bh, blk_opf_t op_flags, bool wait); 250 blk_opf_t op_flags, bool force_lock); 402 static inline void bh_readahead(struct buffer_head *bh, blk_opf_t op_flags) in bh_readahead() 412 static inline void bh_read_nowait(struct buffer_head *bh, blk_opf_t op_flags) in bh_read_nowait() 419 static inline int bh_read(struct buffer_head *bh, blk_opf_t op_flags) in bh_read() 432 blk_opf_t op_flags) in bh_readahead_batch()
|
D | bio.h | 420 blk_opf_t opf, gfp_t gfp_mask, 433 unsigned short nr_vecs, blk_opf_t opf, gfp_t gfp_mask) in bio_alloc() 471 unsigned short max_vecs, blk_opf_t opf); 473 void bio_reset(struct bio *bio, struct block_device *bdev, blk_opf_t opf); 817 unsigned int nr_pages, blk_opf_t opf, gfp_t gfp);
|
D | writeback.h | 94 static inline blk_opf_t wbc_to_write_flags(struct writeback_control *wbc) in wbc_to_write_flags() 96 blk_opf_t flags = 0; in wbc_to_write_flags()
|
D | dm-io.h | 62 blk_opf_t bi_opf; /* Request type and flags */
|
D | blktrace_api.h | 113 void blk_fill_rwbs(char *rwbs, blk_opf_t opf);
|
D | blk-mq.h | 85 blk_opf_t cmd_flags; /* op and common flags */ 726 struct request *blk_mq_alloc_request(struct request_queue *q, blk_opf_t opf, 729 blk_opf_t opf, blk_mq_req_flags_t flags,
|
/linux-6.6.21/drivers/block/rnbd/ |
D | rnbd-proto.h | 219 static inline blk_opf_t rnbd_to_bio_flags(u32 rnbd_opf) in rnbd_to_bio_flags() 221 blk_opf_t bio_opf; in rnbd_to_bio_flags()
|
/linux-6.6.21/drivers/scsi/device_handler/ |
D | scsi_dh_hp_sw.c | 86 blk_opf_t opf = REQ_OP_DRV_IN | REQ_FAILFAST_DEV | in hp_sw_tur() 127 blk_opf_t opf = REQ_OP_DRV_IN | REQ_FAILFAST_DEV | in hp_sw_start_stop()
|
/linux-6.6.21/block/ |
D | blk-flush.c | 95 struct blk_flush_queue *fq, blk_opf_t flags); 169 blk_opf_t cmd_flags; in blk_flush_complete_seq() 290 blk_opf_t flags) in blk_kick_flush()
|
D | bfq-iosched.h | 1071 void bfqg_stats_update_io_remove(struct bfq_group *bfqg, blk_opf_t opf); 1072 void bfqg_stats_update_io_merged(struct bfq_group *bfqg, blk_opf_t opf); 1074 u64 io_start_time_ns, blk_opf_t opf); 1082 blk_opf_t opf);
|
D | blk-mq.h | 85 static inline enum hctx_type blk_mq_get_hctx_type(blk_opf_t opf) in blk_mq_get_hctx_type() 106 blk_opf_t opf, in blk_mq_map_queue() 151 blk_opf_t cmd_flags;
|
D | bfq-cgroup.c | 223 blk_opf_t opf) in bfqg_stats_update_io_add() 231 void bfqg_stats_update_io_remove(struct bfq_group *bfqg, blk_opf_t opf) in bfqg_stats_update_io_remove() 236 void bfqg_stats_update_io_merged(struct bfq_group *bfqg, blk_opf_t opf) in bfqg_stats_update_io_merged() 242 u64 io_start_time_ns, blk_opf_t opf) in bfqg_stats_update_completion() 257 void bfqg_stats_update_io_remove(struct bfq_group *bfqg, blk_opf_t opf) { } in bfqg_stats_update_io_remove() 258 void bfqg_stats_update_io_merged(struct bfq_group *bfqg, blk_opf_t opf) { } in bfqg_stats_update_io_merged() 260 u64 io_start_time_ns, blk_opf_t opf) { } in bfqg_stats_update_completion()
|
D | blk-cgroup-rwstat.h | 62 blk_opf_t opf, uint64_t val) in blkg_rwstat_add()
|
D | fops.c | 27 static blk_opf_t dio_bio_write_op(struct kiocb *iocb) in dio_bio_write_op() 29 blk_opf_t opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE; in dio_bio_write_op() 170 blk_opf_t opf = is_read ? REQ_OP_READ : dio_bio_write_op(iocb); in __blkdev_direct_IO() 307 blk_opf_t opf = is_read ? REQ_OP_READ : dio_bio_write_op(iocb); in __blkdev_direct_IO_async()
|
D | blk-merge.c | 735 blk_opf_t ff = rq->cmd_flags & REQ_FAILFAST_MASK; in blk_rq_set_mixed_merge() 754 static inline blk_opf_t bio_failfast(const struct bio *bio) in bio_failfast() 980 const blk_opf_t ff = bio_failfast(bio); in bio_attempt_back_merge() 1006 const blk_opf_t ff = bio_failfast(bio); in bio_attempt_front_merge()
|
D | blk-wbt.c | 534 static inline unsigned int get_limit(struct rq_wb *rwb, blk_opf_t opf) in get_limit() 566 blk_opf_t opf; 586 blk_opf_t opf) in __wbt_wait()
|
D | elevator.h | 38 void (*limit_depth)(blk_opf_t, struct blk_mq_alloc_data *);
|
/linux-6.6.21/drivers/md/ |
D | dm-io.c | 306 static void do_region(const blk_opf_t opf, unsigned int region, in do_region() 384 static void dispatch_io(blk_opf_t opf, unsigned int num_regions, in dispatch_io() 427 struct dm_io_region *where, blk_opf_t opf, struct dpages *dp, in sync_io() 461 struct dm_io_region *where, blk_opf_t opf, in async_io()
|
/linux-6.6.21/fs/nilfs2/ |
D | btnode.h | 38 blk_opf_t, struct buffer_head **, sector_t *);
|
/linux-6.6.21/fs/iomap/ |
D | direct-io.c | 56 struct iomap_dio *dio, unsigned short nr_vecs, blk_opf_t opf) in iomap_dio_alloc_bio() 258 static inline blk_opf_t iomap_dio_bio_opflags(struct iomap_dio *dio, in iomap_dio_bio_opflags() 261 blk_opf_t opflags = REQ_SYNC | REQ_IDLE; in iomap_dio_bio_opflags() 283 blk_opf_t bio_opf; in iomap_dio_bio_iter()
|
/linux-6.6.21/fs/btrfs/ |
D | bio.h | 97 struct btrfs_bio *btrfs_bio_alloc(unsigned int nr_vecs, blk_opf_t opf,
|
D | compression.h | 93 blk_opf_t write_flags,
|
/linux-6.6.21/fs/gfs2/ |
D | log.h | 85 blk_opf_t op_flags);
|
D | lops.h | 19 extern void gfs2_log_submit_bio(struct bio **biop, blk_opf_t opf);
|