/linux-6.6.21/include/linux/ |
D | bio.h | 22 #define bio_prio(bio) (bio)->bi_ioprio argument 23 #define bio_set_prio(bio, prio) ((bio)->bi_ioprio = prio) argument 25 #define bio_iter_iovec(bio, iter) \ argument 26 bvec_iter_bvec((bio)->bi_io_vec, (iter)) 28 #define bio_iter_page(bio, iter) \ argument 29 bvec_iter_page((bio)->bi_io_vec, (iter)) 30 #define bio_iter_len(bio, iter) \ argument 31 bvec_iter_len((bio)->bi_io_vec, (iter)) 32 #define bio_iter_offset(bio, iter) \ argument 33 bvec_iter_offset((bio)->bi_io_vec, (iter)) [all …]
|
/linux-6.6.21/block/ |
D | bio.c | 32 struct bio *free_list; 33 struct bio *free_list_irq; 115 return bs->front_pad + sizeof(struct bio) + bs->back_pad; in bs_bio_slab_size() 214 void bio_uninit(struct bio *bio) in bio_uninit() argument 217 if (bio->bi_blkg) { in bio_uninit() 218 blkg_put(bio->bi_blkg); in bio_uninit() 219 bio->bi_blkg = NULL; in bio_uninit() 222 if (bio_integrity(bio)) in bio_uninit() 223 bio_integrity_free(bio); in bio_uninit() 225 bio_crypt_free_ctx(bio); in bio_uninit() [all …]
|
D | blk-map.c | 48 static int bio_copy_from_iter(struct bio *bio, struct iov_iter *iter) in bio_copy_from_iter() argument 53 bio_for_each_segment_all(bvec, bio, iter_all) { in bio_copy_from_iter() 79 static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter) in bio_copy_to_iter() argument 84 bio_for_each_segment_all(bvec, bio, iter_all) { in bio_copy_to_iter() 109 static int bio_uncopy_user(struct bio *bio) in bio_uncopy_user() argument 111 struct bio_map_data *bmd = bio->bi_private; in bio_uncopy_user() 122 else if (bio_data_dir(bio) == READ) in bio_uncopy_user() 123 ret = bio_copy_to_iter(bio, bmd->iter); in bio_uncopy_user() 125 bio_free_pages(bio); in bio_uncopy_user() 136 struct bio *bio; in bio_copy_user_iov() local [all …]
|
D | blk-merge.c | 21 static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv) in bio_get_first_bvec() argument 23 *bv = mp_bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter); in bio_get_first_bvec() 26 static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv) in bio_get_last_bvec() argument 28 struct bvec_iter iter = bio->bi_iter; in bio_get_last_bvec() 31 bio_get_first_bvec(bio, bv); in bio_get_last_bvec() 32 if (bv->bv_len == bio->bi_iter.bi_size) in bio_get_last_bvec() 35 bio_advance_iter(bio, &iter, iter.bi_size); in bio_get_last_bvec() 42 *bv = bio->bi_io_vec[idx]; in bio_get_last_bvec() 53 struct request *prev_rq, struct bio *prev, struct bio *next) in bio_will_gap() 66 bio_get_first_bvec(prev_rq->bio, &pb); in bio_will_gap() [all …]
|
D | bounce.c | 76 static void copy_to_high_bio_irq(struct bio *to, struct bio *from) in copy_to_high_bio_irq() 102 static void bounce_end_io(struct bio *bio) in bounce_end_io() argument 104 struct bio *bio_orig = bio->bi_private; in bounce_end_io() 112 bio_for_each_segment_all(bvec, bio, iter_all) { in bounce_end_io() 121 bio_orig->bi_status = bio->bi_status; in bounce_end_io() 123 bio_put(bio); in bounce_end_io() 126 static void bounce_end_io_write(struct bio *bio) in bounce_end_io_write() argument 128 bounce_end_io(bio); in bounce_end_io_write() 131 static void bounce_end_io_read(struct bio *bio) in bounce_end_io_read() argument 133 struct bio *bio_orig = bio->bi_private; in bounce_end_io_read() [all …]
|
D | blk-crypto-internal.h | 31 bool bio_crypt_rq_ctx_compatible(struct request *rq, struct bio *bio); 37 struct bio *bio) in bio_crypt_ctx_back_mergeable() argument 40 bio->bi_crypt_context); in bio_crypt_ctx_back_mergeable() 44 struct bio *bio) in bio_crypt_ctx_front_mergeable() argument 46 return bio_crypt_ctx_mergeable(bio->bi_crypt_context, in bio_crypt_ctx_front_mergeable() 47 bio->bi_iter.bi_size, req->crypt_ctx); in bio_crypt_ctx_front_mergeable() 97 struct bio *bio) in bio_crypt_rq_ctx_compatible() argument 103 struct bio *bio) in bio_crypt_ctx_front_mergeable() argument 109 struct bio *bio) in bio_crypt_ctx_back_mergeable() argument 134 void __bio_crypt_advance(struct bio *bio, unsigned int bytes); [all …]
|
D | blk-core.c | 338 int __bio_queue_enter(struct request_queue *q, struct bio *bio) in __bio_queue_enter() argument 341 struct gendisk *disk = bio->bi_bdev->bd_disk; in __bio_queue_enter() 343 if (bio->bi_opf & REQ_NOWAIT) { in __bio_queue_enter() 346 bio_wouldblock_error(bio); in __bio_queue_enter() 368 bio_io_error(bio); in __bio_queue_enter() 499 static inline void bio_check_ro(struct bio *bio) in bio_check_ro() argument 501 if (op_is_write(bio_op(bio)) && bdev_read_only(bio->bi_bdev)) { in bio_check_ro() 502 if (op_is_flush(bio->bi_opf) && !bio_sectors(bio)) in bio_check_ro() 505 if (bio->bi_bdev->bd_ro_warned) in bio_check_ro() 508 bio->bi_bdev->bd_ro_warned = true; in bio_check_ro() [all …]
|
D | bio-integrity.c | 48 struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio, in bio_integrity_alloc() argument 53 struct bio_set *bs = bio->bi_pool; in bio_integrity_alloc() 56 if (WARN_ON_ONCE(bio_has_crypt_ctx(bio))) in bio_integrity_alloc() 83 bip->bip_bio = bio; in bio_integrity_alloc() 84 bio->bi_integrity = bip; in bio_integrity_alloc() 85 bio->bi_opf |= REQ_INTEGRITY; in bio_integrity_alloc() 101 void bio_integrity_free(struct bio *bio) in bio_integrity_free() argument 103 struct bio_integrity_payload *bip = bio_integrity(bio); in bio_integrity_free() 104 struct bio_set *bs = bio->bi_pool; in bio_integrity_free() 110 bio->bi_integrity = NULL; in bio_integrity_free() [all …]
|
D | blk-lib.c | 39 sector_t nr_sects, gfp_t gfp_mask, struct bio **biop) in __blkdev_issue_discard() 41 struct bio *bio = *biop; in __blkdev_issue_discard() local 67 bio = blk_next_bio(bio, bdev, 0, REQ_OP_DISCARD, gfp_mask); in __blkdev_issue_discard() 68 bio->bi_iter.bi_sector = sector; in __blkdev_issue_discard() 69 bio->bi_iter.bi_size = req_sects << 9; in __blkdev_issue_discard() 82 *biop = bio; in __blkdev_issue_discard() 100 struct bio *bio = NULL; in blkdev_issue_discard() local 105 ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, &bio); in blkdev_issue_discard() 106 if (!ret && bio) { in blkdev_issue_discard() 107 ret = submit_bio_wait(bio); in blkdev_issue_discard() [all …]
|
D | blk-rq-qos.h | 38 void (*throttle)(struct rq_qos *, struct bio *); 39 void (*track)(struct rq_qos *, struct request *, struct bio *); 40 void (*merge)(struct rq_qos *, struct request *, struct bio *); 44 void (*done_bio)(struct rq_qos *, struct bio *); 45 void (*cleanup)(struct rq_qos *, struct bio *); 103 void __rq_qos_cleanup(struct rq_qos *rqos, struct bio *bio); 107 void __rq_qos_throttle(struct rq_qos *rqos, struct bio *bio); 108 void __rq_qos_track(struct rq_qos *rqos, struct request *rq, struct bio *bio); 109 void __rq_qos_merge(struct rq_qos *rqos, struct request *rq, struct bio *bio); 110 void __rq_qos_done_bio(struct rq_qos *rqos, struct bio *bio); [all …]
|
D | blk.h | 37 int __bio_queue_enter(struct request_queue *q, struct bio *bio); 38 void submit_bio_noacct_nocheck(struct bio *bio); 64 static inline int bio_queue_enter(struct bio *bio) in bio_queue_enter() argument 66 struct request_queue *q = bdev_get_queue(bio->bi_bdev); in bio_queue_enter() 70 return __bio_queue_enter(q, bio); in bio_queue_enter() 184 bool __bio_integrity_endio(struct bio *); 185 void bio_integrity_free(struct bio *bio); 186 static inline bool bio_integrity_endio(struct bio *bio) in bio_integrity_endio() argument 188 if (bio_integrity(bio)) in bio_integrity_endio() 189 return __bio_integrity_endio(bio); in bio_integrity_endio() [all …]
|
D | blk-crypto-fallback.c | 52 struct bio *bio; member 144 static void blk_crypto_fallback_encrypt_endio(struct bio *enc_bio) in blk_crypto_fallback_encrypt_endio() 146 struct bio *src_bio = enc_bio->bi_private; in blk_crypto_fallback_encrypt_endio() 160 static struct bio *blk_crypto_fallback_clone_bio(struct bio *bio_src) in blk_crypto_fallback_clone_bio() 165 struct bio *bio; in blk_crypto_fallback_clone_bio() local 167 bio = bio_kmalloc(nr_segs, GFP_NOIO); in blk_crypto_fallback_clone_bio() 168 if (!bio) in blk_crypto_fallback_clone_bio() 170 bio_init(bio, bio_src->bi_bdev, bio->bi_inline_vecs, nr_segs, in blk_crypto_fallback_clone_bio() 173 bio_set_flag(bio, BIO_REMAPPED); in blk_crypto_fallback_clone_bio() 174 bio->bi_ioprio = bio_src->bi_ioprio; in blk_crypto_fallback_clone_bio() [all …]
|
/linux-6.6.21/fs/btrfs/ |
D | bio.c | 38 return is_data_bbio(bbio) && btrfs_op(&bbio->bio) == BTRFS_MAP_WRITE; in bbio_has_ordered_extent() 48 memset(bbio, 0, offsetof(struct btrfs_bio, bio)); in btrfs_bio_init() 67 struct bio *bio; in btrfs_bio_alloc() local 69 bio = bio_alloc_bioset(NULL, nr_vecs, opf, GFP_NOFS, &btrfs_bioset); in btrfs_bio_alloc() 70 bbio = btrfs_bio(bio); in btrfs_bio_alloc() 80 struct bio *bio; in btrfs_split_bio() local 85 bio = bio_split_rw(&orig_bbio->bio, &fs_info->limits, &nr_segs, in btrfs_split_bio() 88 bio = bio_split(&orig_bbio->bio, map_length >> SECTOR_SHIFT, in btrfs_split_bio() 91 bbio = btrfs_bio(bio); in btrfs_split_bio() 109 bio_put(&bbio->bio); in btrfs_cleanup_bio() [all …]
|
/linux-6.6.21/drivers/md/bcache/ |
D | request.c | 40 static void bio_csum(struct bio *bio, struct bkey *k) in bio_csum() argument 46 bio_for_each_segment(bv, bio, iter) { in bio_csum() 111 struct bio *bio = op->bio; in bch_data_invalidate() local 114 bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector); in bch_data_invalidate() 116 while (bio_sectors(bio)) { in bch_data_invalidate() 117 unsigned int sectors = min(bio_sectors(bio), in bch_data_invalidate() 123 bio->bi_iter.bi_sector += sectors; in bch_data_invalidate() 124 bio->bi_iter.bi_size -= sectors << 9; in bch_data_invalidate() 128 bio->bi_iter.bi_sector, in bch_data_invalidate() 134 bio_put(bio); in bch_data_invalidate() [all …]
|
D | io.c | 17 void bch_bbio_free(struct bio *bio, struct cache_set *c) in bch_bbio_free() argument 19 struct bbio *b = container_of(bio, struct bbio, bio); in bch_bbio_free() 24 struct bio *bch_bbio_alloc(struct cache_set *c) in bch_bbio_alloc() 27 struct bio *bio = &b->bio; in bch_bbio_alloc() local 29 bio_init(bio, NULL, bio->bi_inline_vecs, in bch_bbio_alloc() 32 return bio; in bch_bbio_alloc() 35 void __bch_submit_bbio(struct bio *bio, struct cache_set *c) in __bch_submit_bbio() argument 37 struct bbio *b = container_of(bio, struct bbio, bio); in __bch_submit_bbio() 39 bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0); in __bch_submit_bbio() 40 bio_set_dev(bio, c->cache->bdev); in __bch_submit_bbio() [all …]
|
D | movinggc.c | 19 struct bbio bio; member 48 struct bio *bio = &io->bio.bio; in write_moving_finish() local 50 bio_free_pages(bio); in write_moving_finish() 62 static void read_moving_endio(struct bio *bio) in read_moving_endio() argument 64 struct bbio *b = container_of(bio, struct bbio, bio); in read_moving_endio() 65 struct moving_io *io = container_of(bio->bi_private, in read_moving_endio() 68 if (bio->bi_status) in read_moving_endio() 69 io->op.status = bio->bi_status; in read_moving_endio() 75 bch_bbio_endio(io->op.c, bio, bio->bi_status, "reading data to move"); in read_moving_endio() 80 struct bio *bio = &io->bio.bio; in moving_init() local [all …]
|
/linux-6.6.21/fs/ext4/ |
D | readpage.c | 63 struct bio *bio; member 69 static void __read_end_io(struct bio *bio) in __read_end_io() argument 73 bio_for_each_folio_all(fi, bio) { in __read_end_io() 76 if (bio->bi_status) in __read_end_io() 82 if (bio->bi_private) in __read_end_io() 83 mempool_free(bio->bi_private, bio_post_read_ctx_pool); in __read_end_io() 84 bio_put(bio); in __read_end_io() 93 struct bio *bio = ctx->bio; in decrypt_work() local 95 if (fscrypt_decrypt_bio(bio)) in decrypt_work() 98 __read_end_io(bio); in decrypt_work() [all …]
|
/linux-6.6.21/fs/ |
D | mpage.c | 46 static void mpage_read_end_io(struct bio *bio) in mpage_read_end_io() argument 49 int err = blk_status_to_errno(bio->bi_status); in mpage_read_end_io() 51 bio_for_each_folio_all(fi, bio) { in mpage_read_end_io() 59 bio_put(bio); in mpage_read_end_io() 62 static void mpage_write_end_io(struct bio *bio) in mpage_write_end_io() argument 65 int err = blk_status_to_errno(bio->bi_status); in mpage_write_end_io() 67 bio_for_each_folio_all(fi, bio) { in mpage_write_end_io() 75 bio_put(bio); in mpage_write_end_io() 78 static struct bio *mpage_bio_submit_read(struct bio *bio) in mpage_bio_submit_read() argument 80 bio->bi_end_io = mpage_read_end_io; in mpage_bio_submit_read() [all …]
|
/linux-6.6.21/fs/squashfs/ |
D | block.c | 33 static int copy_bio_to_actor(struct bio *bio, in copy_bio_to_actor() argument 46 if (WARN_ON_ONCE(!bio_next_segment(bio, &iter_all))) in copy_bio_to_actor() 70 if (!bio_next_segment(bio, &iter_all)) in copy_bio_to_actor() 79 static int squashfs_bio_read_cached(struct bio *fullbio, in squashfs_bio_read_cached() 87 struct bio *bio = NULL; in squashfs_bio_read_cached() local 114 if (!bio || idx != end_idx) { in squashfs_bio_read_cached() 115 struct bio *new = bio_alloc_clone(bdev, fullbio, in squashfs_bio_read_cached() 118 if (bio) { in squashfs_bio_read_cached() 119 bio_trim(bio, start_idx * PAGE_SECTORS, in squashfs_bio_read_cached() 121 bio_chain(bio, new); in squashfs_bio_read_cached() [all …]
|
/linux-6.6.21/drivers/md/ |
D | raid1-10.c | 16 #define IO_BLOCKED ((struct bio *)1) 21 #define IO_MADE_GOOD ((struct bio *)2) 23 #define BIO_SPECIAL(bio) ((unsigned long)bio <= 2) argument 90 static inline struct resync_pages *get_resync_pages(struct bio *bio) in get_resync_pages() argument 92 return bio->bi_private; in get_resync_pages() 96 static void md_bio_reset_resync_pages(struct bio *bio, struct resync_pages *rp, in md_bio_reset_resync_pages() argument 106 if (WARN_ON(!bio_add_page(bio, page, len, 0))) { in md_bio_reset_resync_pages() 107 bio->bi_status = BLK_STS_RESOURCE; in md_bio_reset_resync_pages() 108 bio_endio(bio); in md_bio_reset_resync_pages() 117 static inline void raid1_submit_write(struct bio *bio) in raid1_submit_write() argument [all …]
|
D | dm-io-rewind.c | 56 static void dm_bio_integrity_rewind(struct bio *bio, unsigned int bytes_done) in dm_bio_integrity_rewind() argument 58 struct bio_integrity_payload *bip = bio_integrity(bio); in dm_bio_integrity_rewind() 59 struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk); in dm_bio_integrity_rewind() 68 static inline void dm_bio_integrity_rewind(struct bio *bio, in dm_bio_integrity_rewind() argument 94 static void dm_bio_crypt_rewind(struct bio *bio, unsigned int bytes) in dm_bio_crypt_rewind() argument 96 struct bio_crypt_ctx *bc = bio->bi_crypt_context; in dm_bio_crypt_rewind() 104 static inline void dm_bio_crypt_rewind(struct bio *bio, unsigned int bytes) in dm_bio_crypt_rewind() argument 110 static inline void dm_bio_rewind_iter(const struct bio *bio, in dm_bio_rewind_iter() argument 116 if (bio_no_advance_iter(bio)) in dm_bio_rewind_iter() 119 dm_bvec_iter_rewind(bio->bi_io_vec, iter, bytes); in dm_bio_rewind_iter() [all …]
|
D | dm-raid1.c | 126 static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw) in queue_bio() argument 135 bio_list_add(bl, bio); in queue_bio() 145 struct bio *bio; in dispatch_bios() local 147 while ((bio = bio_list_pop(bio_list))) in dispatch_bios() 148 queue_bio(ms, bio, WRITE); in dispatch_bios() 168 static struct mirror *bio_get_m(struct bio *bio) in bio_get_m() argument 170 return (struct mirror *) bio->bi_next; in bio_get_m() 173 static void bio_set_m(struct bio *bio, struct mirror *m) in bio_set_m() argument 175 bio->bi_next = (struct bio *) m; in bio_set_m() 445 static int mirror_available(struct mirror_set *ms, struct bio *bio) in mirror_available() argument [all …]
|
D | dm-flakey.c | 21 #define all_corrupt_bio_flags_match(bio, fc) \ argument 22 (((bio)->bi_opf & (fc)->corrupt_bio_flags) == (fc)->corrupt_bio_flags) 333 static void flakey_map_bio(struct dm_target *ti, struct bio *bio) in flakey_map_bio() argument 337 bio_set_dev(bio, fc->dev->bdev); in flakey_map_bio() 338 bio->bi_iter.bi_sector = flakey_map_sector(ti, bio->bi_iter.bi_sector); in flakey_map_bio() 341 static void corrupt_bio_common(struct bio *bio, unsigned int corrupt_bio_byte, in corrupt_bio_common() argument 351 bio_for_each_segment(bvec, bio, iter) { in corrupt_bio_common() 352 if (bio_iter_len(bio, iter) > corrupt_bio_byte) { in corrupt_bio_common() 358 bio, corrupt_bio_value, corrupt_bio_byte, in corrupt_bio_common() 359 (bio_data_dir(bio) == WRITE) ? 'w' : 'r', bio->bi_opf, in corrupt_bio_common() [all …]
|
/linux-6.6.21/drivers/nvme/target/ |
D | io-cmd-bdev.c | 179 static void nvmet_bio_done(struct bio *bio) in nvmet_bio_done() argument 181 struct nvmet_req *req = bio->bi_private; in nvmet_bio_done() 183 nvmet_req_complete(req, blk_to_nvme_status(req, bio->bi_status)); in nvmet_bio_done() 184 nvmet_req_bio_put(req, bio); in nvmet_bio_done() 188 static int nvmet_bdev_alloc_bip(struct nvmet_req *req, struct bio *bio, in nvmet_bdev_alloc_bip() argument 202 bip = bio_integrity_alloc(bio, GFP_NOIO, in nvmet_bdev_alloc_bip() 210 bip_set_seed(bip, bio->bi_iter.bi_sector >> in nvmet_bdev_alloc_bip() 213 resid = bio_integrity_bytes(bi, bio_sectors(bio)); in nvmet_bdev_alloc_bip() 216 rc = bio_integrity_add_page(bio, miter->page, len, in nvmet_bdev_alloc_bip() 233 static int nvmet_bdev_alloc_bip(struct nvmet_req *req, struct bio *bio, in nvmet_bdev_alloc_bip() argument [all …]
|
/linux-6.6.21/fs/f2fs/ |
D | iostat.h | 45 static inline void iostat_update_submit_ctx(struct bio *bio, in iostat_update_submit_ctx() argument 48 struct bio_iostat_ctx *iostat_ctx = bio->bi_private; in iostat_update_submit_ctx() 54 static inline struct bio_post_read_ctx *get_post_read_ctx(struct bio *bio) in get_post_read_ctx() argument 56 struct bio_iostat_ctx *iostat_ctx = bio->bi_private; in get_post_read_ctx() 61 extern void iostat_update_and_unbind_ctx(struct bio *bio); 63 struct bio *bio, struct bio_post_read_ctx *ctx); 71 static inline void iostat_update_and_unbind_ctx(struct bio *bio) {} in iostat_update_and_unbind_ctx() argument 73 struct bio *bio, struct bio_post_read_ctx *ctx) {} in iostat_alloc_and_bind_ctx() argument 74 static inline void iostat_update_submit_ctx(struct bio *bio, in iostat_update_submit_ctx() argument 76 static inline struct bio_post_read_ctx *get_post_read_ctx(struct bio *bio) in get_post_read_ctx() argument [all …]
|