/linux-6.1.9/drivers/md/ ! |
D | dm-ebs-target.c | 195 if (bio_op(bio) == REQ_OP_READ) in __ebs_process_bios() 197 else if (bio_op(bio) == REQ_OP_WRITE && !(bio->bi_opf & REQ_PREFLUSH)) { in __ebs_process_bios() 208 if (bio_op(bio) == REQ_OP_READ) in __ebs_process_bios() 210 else if (bio_op(bio) == REQ_OP_WRITE) { in __ebs_process_bios() 213 } else if (bio_op(bio) == REQ_OP_DISCARD) { in __ebs_process_bios() 230 if (unlikely(r && bio_op(bio) == REQ_OP_WRITE)) in __ebs_process_bios() 367 if (unlikely(bio_op(bio) == REQ_OP_FLUSH)) in ebs_map()
|
D | dm-zone.c | 131 switch (bio_op(bio)) { in dm_is_zone_write() 386 switch (bio_op(clone)) { in dm_zone_map_bio_begin() 498 switch (bio_op(bio)) { in dm_need_zone_wp_tracking() 536 orig_bio_details.op = bio_op(clone); in dm_zone_map_bio() 604 bio_op(clone) == REQ_OP_ZONE_APPEND) { in dm_zone_endio() 633 } else if (bio_op(orig_bio) == REQ_OP_ZONE_APPEND) { in dm_zone_endio()
|
D | dm-zoned-target.c | 90 bio_op(bio) == REQ_OP_WRITE && in dmz_bio_endio() 144 if (bio_op(bio) == REQ_OP_WRITE && dmz_is_seq(zone)) in dmz_submit_bio() 412 bio_op(bio)); in dmz_handle_bio() 425 switch (bio_op(bio)) { in dmz_handle_bio() 438 dmz_metadata_label(dmz->metadata), bio_op(bio)); in dmz_handle_bio() 640 bio_op(bio), (unsigned long long)sector, nr_sectors, in dmz_map() 645 if (!nr_sectors && bio_op(bio) != REQ_OP_WRITE) in dmz_map() 659 if (!nr_sectors && bio_op(bio) == REQ_OP_WRITE) { in dmz_map() 677 bio_op(bio), (u64)dmz_bio_chunk(zmd, bio), in dmz_map()
|
D | dm-zero.c | 38 switch (bio_op(bio)) { in zero_map()
|
D | dm-stripe.c | 284 if (unlikely(bio_op(bio) == REQ_OP_DISCARD) || in stripe_map() 285 unlikely(bio_op(bio) == REQ_OP_SECURE_ERASE) || in stripe_map() 286 unlikely(bio_op(bio) == REQ_OP_WRITE_ZEROES)) { in stripe_map()
|
D | dm-flakey.c | 327 if (op_is_zone_mgmt(bio_op(bio))) in flakey_map() 388 if (op_is_zone_mgmt(bio_op(bio))) in flakey_end_io()
|
D | dm.c | 514 bdev_start_io_acct(bio->bi_bdev, sectors, bio_op(bio), in dm_io_acct() 517 bdev_end_io_acct(bio->bi_bdev, bio_op(bio), start_time); in dm_io_acct() 1117 if (bio_op(bio) == REQ_OP_DISCARD && in clone_endio() 1120 else if (bio_op(bio) == REQ_OP_WRITE_ZEROES && in clone_endio() 1337 BUG_ON(op_is_zone_mgmt(bio_op(bio))); in dm_accept_partial_bio() 1338 BUG_ON(bio_op(bio) == REQ_OP_ZONE_APPEND); in dm_accept_partial_bio() 1596 enum req_op op = bio_op(bio); in is_abnormal_io() 1617 switch (bio_op(ci->bio)) { in __process_abnormal_io() 1736 WARN_ON_ONCE(op_is_zone_mgmt(bio_op(bio)) && ci->sector_count)) in init_clone_info()
|
D | dm-raid1.c | 620 if (bio_op(bio) == REQ_OP_DISCARD) { in write_callback() 659 if (bio_op(bio) == REQ_OP_DISCARD) { in do_write() 699 (bio_op(bio) == REQ_OP_DISCARD)) { in do_writes() 1246 bio_op(bio) != REQ_OP_DISCARD) in mirror_end_io()
|
D | dm-region-hash.c | 408 if (bio_op(bio) == REQ_OP_DISCARD) in dm_rh_mark_nosync() 531 if (bio->bi_opf & REQ_PREFLUSH || bio_op(bio) == REQ_OP_DISCARD) in dm_rh_inc_pending()
|
D | md-linear.c | 244 if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && in linear_make_request()
|
/linux-6.1.9/include/linux/ ! |
D | bio.h | 47 (op_is_write(bio_op(bio)) ? WRITE : READ) 56 bio_op(bio) != REQ_OP_DISCARD && in bio_has_data() 57 bio_op(bio) != REQ_OP_SECURE_ERASE && in bio_has_data() 58 bio_op(bio) != REQ_OP_WRITE_ZEROES) in bio_has_data() 66 return bio_op(bio) == REQ_OP_DISCARD || in bio_no_advance_iter() 67 bio_op(bio) == REQ_OP_SECURE_ERASE || in bio_no_advance_iter() 68 bio_op(bio) == REQ_OP_WRITE_ZEROES; in bio_no_advance_iter() 183 switch (bio_op(bio)) { in bio_segments()
|
D | blk_types.h | 470 static inline enum req_op bio_op(const struct bio *bio) in bio_op() function
|
/linux-6.1.9/block/ ! |
D | blk-core.c | 490 if (op_is_write(bio_op(bio)) && bdev_read_only(bio->bi_bdev)) { in bio_check_ro() 744 switch (bio_op(bio)) { in submit_bio_noacct() 819 if (bio_op(bio) == REQ_OP_READ) { in submit_bio() 822 } else if (bio_op(bio) == REQ_OP_WRITE) { in submit_bio() 955 bio_op(bio), start_time); in bio_start_io_acct_time() 968 bio_op(bio), jiffies); in bio_start_io_acct() 990 bdev_end_io_acct(orig_bdev, bio_op(bio), start_time); in bio_end_io_acct_remapped()
|
D | bio-integrity.c | 213 if (bio_op(bio) != REQ_OP_READ && bio_op(bio) != REQ_OP_WRITE) in bio_integrity_prep() 351 if (bio_op(bio) == REQ_OP_READ && !bio->bi_status && in __bio_integrity_endio()
|
D | blk-wbt.c | 527 switch (bio_op(bio)) { in wbt_should_throttle() 550 if (bio_op(bio) == REQ_OP_READ) { in bio_to_wbt_flags() 555 if (bio_op(bio) == REQ_OP_DISCARD) in bio_to_wbt_flags()
|
D | bio.c | 615 if (bio_op(bio) != REQ_OP_READ) in bio_truncate() 1033 if (WARN_ON_ONCE(bio_op(bio) != REQ_OP_ZONE_APPEND)) in bio_add_zone_append_page() 1136 if (bio_op(bio) == REQ_OP_ZONE_APPEND) { in bio_iov_bvec_set() 1238 if (bio_op(bio) == REQ_OP_ZONE_APPEND) { in __bio_iov_iter_get_pages() 1583 if (WARN_ON_ONCE(bio_op(bio) == REQ_OP_ZONE_APPEND)) in bio_split()
|
D | bounce.c | 175 switch (bio_op(bio)) { in bounce_clone_bio()
|
D | blk-merge.c | 350 switch (bio_op(bio)) { in __bio_split_to_limits() 411 switch (bio_op(rq->bio)) { in blk_recalc_rq_segments() 892 if (req_op(rq) != bio_op(bio)) in blk_rq_merge_ok()
|
D | blk-mq.h | 316 bdev_op_is_zoned_write(bio->bi_bdev, bio_op(bio))) in blk_mq_plug()
|
/linux-6.1.9/drivers/block/drbd/ ! |
D | drbd_req.c | 34 | (bio_op(bio_src) == REQ_OP_WRITE_ZEROES ? RQ_ZEROES : 0) in drbd_req_new() 35 | (bio_op(bio_src) == REQ_OP_DISCARD ? RQ_UNMAP : 0); in drbd_req_new() 242 if (op_is_write(bio_op(req->master_bio)) && in drbd_req_complete() 264 bio_op(req->master_bio) == REQ_OP_READ && in drbd_req_complete() 1152 if (bio_op(bio) != REQ_OP_READ) in drbd_submit_req_private_bio() 1167 else if (bio_op(bio) == REQ_OP_WRITE_ZEROES) in drbd_submit_req_private_bio() 1170 else if (bio_op(bio) == REQ_OP_DISCARD) in drbd_submit_req_private_bio() 1226 if (bio_op(bio) == REQ_OP_WRITE_ZEROES || in drbd_request_prepare() 1227 bio_op(bio) == REQ_OP_DISCARD) in drbd_request_prepare()
|
/linux-6.1.9/drivers/md/bcache/ ! |
D | request.c | 373 (bio_op(bio) == REQ_OP_DISCARD)) in check_should_bypass() 378 op_is_write(bio_op(bio)))) in check_should_bypass() 732 s->write = op_is_write(bio_op(bio)); in search_alloc() 993 if (bio_op(bio) == REQ_OP_DISCARD) in cached_dev_write() 1007 if (bio_op(bio) == REQ_OP_DISCARD && in cached_dev_write() 1123 if ((bio_op(bio) == REQ_OP_DISCARD) && in detached_dev_do_request() 1309 s->iop.bypass = (bio_op(bio) == REQ_OP_DISCARD) != 0; in flash_dev_submit_bio()
|
D | writeback.h | 112 if (bio_op(bio) == REQ_OP_DISCARD) in should_writeback()
|
D | io.c | 144 unsigned int threshold = op_is_write(bio_op(bio)) in bch_bbio_count_io_errors()
|
/linux-6.1.9/fs/btrfs/ ! |
D | compression.c | 353 if (bio_op(bio) == REQ_OP_ZONE_APPEND) in alloc_compressed_bio() 393 const enum req_op bio_op = use_append ? REQ_OP_ZONE_APPEND : REQ_OP_WRITE; in btrfs_submit_compressed_write() 425 bio_op | write_flags, end_compressed_bio_write, in btrfs_submit_compressed_write() 789 if (bio_op(comp_bio) == REQ_OP_READ) in btrfs_submit_compressed_read()
|
/linux-6.1.9/drivers/block/ ! |
D | brd.c | 301 bio_op(bio), sector); in brd_submit_bio()
|