/linux-5.19.10/drivers/md/ |
D | dm-flakey.c | 308 (bio_data_dir(bio) == WRITE) ? 'w' : 'r', bio->bi_opf, in corrupt_bio_data() 338 if (bio_data_dir(bio) == READ) { in flakey_map() 387 if (!*error && pb->bio_submitted && (bio_data_dir(bio) == READ)) { in flakey_end_io()
|
D | dm-crypt.c | 532 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) { in crypt_iv_lmk_gen() 550 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) in crypt_iv_lmk_post() 682 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) { in crypt_iv_tcw_gen() 705 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) in crypt_iv_tcw_post() 981 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) { in crypt_iv_elephant() 988 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) { in crypt_iv_elephant() 998 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) { in crypt_iv_elephant() 1018 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) { in crypt_iv_elephant_gen() 1030 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) in crypt_iv_elephant_post() 1338 if (cc->integrity_iv_size && bio_data_dir(ctx->bio_in) != WRITE) { in crypt_convert_block_aead() [all …]
|
D | dm-cache-target.c | 522 return bio_data_dir(bio) == WRITE ? in lock_level() 753 if (bio_data_dir(bio) == WRITE) in remap_to_origin_clear_discard() 762 if (bio_data_dir(bio) == WRITE) { in remap_to_cache_dirty() 829 if (bio_data_dir(origin_bio) == WRITE) in remap_to_origin_and_cache() 1049 return (bio_data_dir(bio) == WRITE) && in bio_writes_complete_block() 1579 atomic_inc(bio_data_dir(bio) == READ ? in inc_hit_counter() 1585 atomic_inc(bio_data_dir(bio) == READ ? in inc_miss_counter() 1612 data_dir = bio_data_dir(bio); in map_bio() 1673 if (bio_data_dir(bio) == WRITE) { in map_bio() 1680 if (bio_data_dir(bio) == WRITE && writethrough_mode(cache) && in map_bio()
|
D | dm-snap.c | 1972 if (bio_data_dir(bio) == WRITE) { in snapshot_map() 1981 bio_data_dir(bio) == WRITE)) { in snapshot_map() 2031 if (bio_data_dir(bio) == WRITE) { in snapshot_map() 2151 if (bio_data_dir(bio) == WRITE && in snapshot_merge_map() 2163 if (bio_data_dir(bio) == WRITE) in snapshot_merge_map() 2171 if (bio_data_dir(bio) == WRITE) { in snapshot_merge_map() 2678 if (bio_data_dir(bio) != WRITE) in origin_map()
|
D | md-faulty.c | 169 if (bio_data_dir(bio) == WRITE) { in faulty_make_request()
|
D | dm-raid1.c | 524 queue_bio(m->ms, bio, bio_data_dir(bio)); in read_callback() 1186 int r, rw = bio_data_dir(bio); in mirror_map() 1236 int rw = bio_data_dir(bio); in mirror_end_io()
|
D | dm-delay.c | 289 if (bio_data_dir(bio) == WRITE) { in delay_map()
|
D | dm-log-writes.c | 665 if (bio_data_dir(bio) == READ) in log_writes_map() 771 if (bio_data_dir(bio) == WRITE && pb->block) { in normal_end_io()
|
D | dm-io.c | 143 if (bio->bi_status && bio_data_dir(bio) == READ) in endio()
|
D | dm-dust.c | 232 if (bio_data_dir(bio) == READ) in dust_map()
|
D | dm-thin.c | 1242 return (bio_data_dir(bio) == WRITE) && in io_overwrites_block() 1811 if (bio_data_dir(bio) == WRITE || op_is_flush(bio->bi_opf) || in __remap_and_issue_shared_cell() 1864 if (bio_data_dir(bio) == WRITE && bio->bi_iter.bi_size) { in process_shared_bio() 1900 if (bio_data_dir(bio) == READ) { in provision_block() 1954 if (bio_data_dir(bio) == READ && tc->origin_dev) { in process_cell() 2005 int rw = bio_data_dir(bio); in __process_bio_read_only()
|
D | dm-writecache.c | 1246 int rw = bio_data_dir(bio); in bio_copy_block() 1561 if (bio_data_dir(bio) == READ) in writecache_map() 1581 atomic_inc(&wc->bio_in_progress[bio_data_dir(bio)]); in writecache_map() 1610 int dir = bio_data_dir(bio); in writecache_end_io()
|
/linux-5.19.10/block/ |
D | blk-throttle.h | 182 if (!tg->has_rules[bio_data_dir(bio)]) in blk_throtl_bio()
|
D | bio-integrity.c | 223 if (bio_data_dir(bio) == READ) { in bio_integrity_prep() 293 if (bio_data_dir(bio) == WRITE) { in bio_integrity_prep()
|
D | blk-throttle.c | 760 bool rw = bio_data_dir(bio); in tg_with_in_iops_limit() 808 bool rw = bio_data_dir(bio); in tg_with_in_bps_limit() 862 bool rw = bio_data_dir(bio); in tg_may_dispatch() 920 bool rw = bio_data_dir(bio); in throtl_charge_bio() 955 bool rw = bio_data_dir(bio); in throtl_add_bio_tg() 1066 tg_dispatch_one_bio(tg, bio_data_dir(bio)); in throtl_dispatch_tg() 1076 tg_dispatch_one_bio(tg, bio_data_dir(bio)); in throtl_dispatch_tg() 2098 bool rw = bio_data_dir(bio); in __blk_throtl_bio() 2241 int rw = bio_data_dir(bio); in blk_throtl_bio_endio()
|
D | bounce.c | 205 int rw = bio_data_dir(*bio_orig); in __blk_queue_bounce()
|
D | blk-map.c | 120 else if (bio_data_dir(bio) == READ) in bio_uncopy_user() 608 bio_release_pages(bio, bio_data_dir(bio) == READ); in blk_rq_unmap_user()
|
D | blk-crypto-fallback.c | 508 if (bio_data_dir(bio) == WRITE) in blk_crypto_fallback_bio_prep()
|
/linux-5.19.10/arch/m68k/emu/ |
D | nfblock.c | 68 dir = bio_data_dir(bio); in nfhd_submit_bio()
|
/linux-5.19.10/drivers/md/bcache/ |
D | io.c | 142 int is_read = (bio_data_dir(bio) == READ ? 1 : 0); in bch_bbio_count_io_errors()
|
D | request.c | 1177 int rw = bio_data_dir(bio); in cached_dev_submit_bio() 1304 } else if (bio_data_dir(bio)) { in flash_dev_submit_bio()
|
/linux-5.19.10/drivers/block/drbd/ |
D | drbd_req.c | 38 req->rq_state = (bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0) in drbd_req_new() 822 if (bio_data_dir(req->master_bio) == WRITE) in __req_mod() 1206 const int rw = bio_data_dir(bio); in drbd_request_prepare() 1319 const int rw = bio_data_dir(req->master_bio); in drbd_send_and_submit() 1447 const int rw = bio_data_dir(req->master_bio); in submit_fast_path()
|
/linux-5.19.10/arch/xtensa/platforms/iss/ |
D | simdisk.c | 115 bio_data_dir(bio) == WRITE); in simdisk_submit_bio()
|
/linux-5.19.10/drivers/s390/block/ |
D | dcssblk.c | 883 if (bio_data_dir(bio) == WRITE) { in dcssblk_submit_bio() 898 if (bio_data_dir(bio) == READ) { in dcssblk_submit_bio()
|
/linux-5.19.10/include/linux/ |
D | bio.h | 46 #define bio_data_dir(bio) \ macro
|