Lines Matching refs:bio
32 struct bio *free_list;
33 struct bio *free_list_irq;
115 return bs->front_pad + sizeof(struct bio) + bs->back_pad; in bs_bio_slab_size()
214 void bio_uninit(struct bio *bio) in bio_uninit() argument
217 if (bio->bi_blkg) { in bio_uninit()
218 blkg_put(bio->bi_blkg); in bio_uninit()
219 bio->bi_blkg = NULL; in bio_uninit()
222 if (bio_integrity(bio)) in bio_uninit()
223 bio_integrity_free(bio); in bio_uninit()
225 bio_crypt_free_ctx(bio); in bio_uninit()
229 static void bio_free(struct bio *bio) in bio_free() argument
231 struct bio_set *bs = bio->bi_pool; in bio_free()
232 void *p = bio; in bio_free()
236 bio_uninit(bio); in bio_free()
237 bvec_free(&bs->bvec_pool, bio->bi_io_vec, bio->bi_max_vecs); in bio_free()
246 void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table, in bio_init() argument
249 bio->bi_next = NULL; in bio_init()
250 bio->bi_bdev = bdev; in bio_init()
251 bio->bi_opf = opf; in bio_init()
252 bio->bi_flags = 0; in bio_init()
253 bio->bi_ioprio = 0; in bio_init()
254 bio->bi_status = 0; in bio_init()
255 bio->bi_iter.bi_sector = 0; in bio_init()
256 bio->bi_iter.bi_size = 0; in bio_init()
257 bio->bi_iter.bi_idx = 0; in bio_init()
258 bio->bi_iter.bi_bvec_done = 0; in bio_init()
259 bio->bi_end_io = NULL; in bio_init()
260 bio->bi_private = NULL; in bio_init()
262 bio->bi_blkg = NULL; in bio_init()
263 bio->bi_issue.value = 0; in bio_init()
265 bio_associate_blkg(bio); in bio_init()
267 bio->bi_iocost_cost = 0; in bio_init()
271 bio->bi_crypt_context = NULL; in bio_init()
274 bio->bi_integrity = NULL; in bio_init()
276 bio->bi_vcnt = 0; in bio_init()
278 atomic_set(&bio->__bi_remaining, 1); in bio_init()
279 atomic_set(&bio->__bi_cnt, 1); in bio_init()
280 bio->bi_cookie = BLK_QC_T_NONE; in bio_init()
282 bio->bi_max_vecs = max_vecs; in bio_init()
283 bio->bi_io_vec = table; in bio_init()
284 bio->bi_pool = NULL; in bio_init()
300 void bio_reset(struct bio *bio, struct block_device *bdev, blk_opf_t opf) in bio_reset() argument
302 bio_uninit(bio); in bio_reset()
303 memset(bio, 0, BIO_RESET_BYTES); in bio_reset()
304 atomic_set(&bio->__bi_remaining, 1); in bio_reset()
305 bio->bi_bdev = bdev; in bio_reset()
306 if (bio->bi_bdev) in bio_reset()
307 bio_associate_blkg(bio); in bio_reset()
308 bio->bi_opf = opf; in bio_reset()
312 static struct bio *__bio_chain_endio(struct bio *bio) in __bio_chain_endio() argument
314 struct bio *parent = bio->bi_private; in __bio_chain_endio()
316 if (bio->bi_status && !parent->bi_status) in __bio_chain_endio()
317 parent->bi_status = bio->bi_status; in __bio_chain_endio()
318 bio_put(bio); in __bio_chain_endio()
322 static void bio_chain_endio(struct bio *bio) in bio_chain_endio() argument
324 bio_endio(__bio_chain_endio(bio)); in bio_chain_endio()
338 void bio_chain(struct bio *bio, struct bio *parent) in bio_chain() argument
340 BUG_ON(bio->bi_private || bio->bi_end_io); in bio_chain()
342 bio->bi_private = parent; in bio_chain()
343 bio->bi_end_io = bio_chain_endio; in bio_chain()
348 struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev, in blk_next_bio() argument
351 struct bio *new = bio_alloc(bdev, nr_pages, opf, gfp); in blk_next_bio()
353 if (bio) { in blk_next_bio()
354 bio_chain(bio, new); in blk_next_bio()
355 submit_bio(bio); in blk_next_bio()
365 struct bio *bio; in bio_alloc_rescue() local
369 bio = bio_list_pop(&bs->rescue_list); in bio_alloc_rescue()
372 if (!bio) in bio_alloc_rescue()
375 submit_bio_noacct(bio); in bio_alloc_rescue()
382 struct bio *bio; in punt_bios_to_rescuer() local
400 while ((bio = bio_list_pop(¤t->bio_list[0]))) in punt_bios_to_rescuer()
401 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio); in punt_bios_to_rescuer()
405 while ((bio = bio_list_pop(¤t->bio_list[1]))) in punt_bios_to_rescuer()
406 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio); in punt_bios_to_rescuer()
432 static struct bio *bio_alloc_percpu_cache(struct block_device *bdev, in bio_alloc_percpu_cache()
437 struct bio *bio; in bio_alloc_percpu_cache() local
448 bio = cache->free_list; in bio_alloc_percpu_cache()
449 cache->free_list = bio->bi_next; in bio_alloc_percpu_cache()
453 bio_init(bio, bdev, nr_vecs ? bio->bi_inline_vecs : NULL, nr_vecs, opf); in bio_alloc_percpu_cache()
454 bio->bi_pool = bs; in bio_alloc_percpu_cache()
455 return bio; in bio_alloc_percpu_cache()
492 struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs, in bio_alloc_bioset()
497 struct bio *bio; in bio_alloc_bioset() local
506 bio = bio_alloc_percpu_cache(bdev, nr_vecs, opf, in bio_alloc_bioset()
508 if (bio) in bio_alloc_bioset()
509 return bio; in bio_alloc_bioset()
554 bio = p + bs->front_pad; in bio_alloc_bioset()
567 bio_init(bio, bdev, bvl, nr_vecs, opf); in bio_alloc_bioset()
569 bio_init(bio, bdev, bio->bi_inline_vecs, BIO_INLINE_VECS, opf); in bio_alloc_bioset()
571 bio_init(bio, bdev, NULL, 0, opf); in bio_alloc_bioset()
574 bio->bi_pool = bs; in bio_alloc_bioset()
575 return bio; in bio_alloc_bioset()
599 struct bio *bio_kmalloc(unsigned short nr_vecs, gfp_t gfp_mask) in bio_kmalloc()
601 struct bio *bio; in bio_kmalloc() local
605 return kmalloc(struct_size(bio, bi_inline_vecs, nr_vecs), gfp_mask); in bio_kmalloc()
609 void zero_fill_bio_iter(struct bio *bio, struct bvec_iter start) in zero_fill_bio_iter() argument
614 __bio_for_each_segment(bv, bio, iter, start) in zero_fill_bio_iter()
629 static void bio_truncate(struct bio *bio, unsigned new_size) in bio_truncate() argument
636 if (new_size >= bio->bi_iter.bi_size) in bio_truncate()
639 if (bio_op(bio) != REQ_OP_READ) in bio_truncate()
642 bio_for_each_segment(bv, bio, iter) { in bio_truncate()
666 bio->bi_iter.bi_size = new_size; in bio_truncate()
681 void guard_bio_eod(struct bio *bio) in guard_bio_eod() argument
683 sector_t maxsector = bdev_nr_sectors(bio->bi_bdev); in guard_bio_eod()
693 if (unlikely(bio->bi_iter.bi_sector >= maxsector)) in guard_bio_eod()
696 maxsector -= bio->bi_iter.bi_sector; in guard_bio_eod()
697 if (likely((bio->bi_iter.bi_size >> 9) <= maxsector)) in guard_bio_eod()
700 bio_truncate(bio, maxsector << 9); in guard_bio_eod()
707 struct bio *bio; in __bio_alloc_cache_prune() local
709 while ((bio = cache->free_list) != NULL) { in __bio_alloc_cache_prune()
710 cache->free_list = bio->bi_next; in __bio_alloc_cache_prune()
712 bio_free(bio); in __bio_alloc_cache_prune()
760 static inline void bio_put_percpu_cache(struct bio *bio) in bio_put_percpu_cache() argument
764 cache = per_cpu_ptr(bio->bi_pool->cache, get_cpu()); in bio_put_percpu_cache()
767 bio_free(bio); in bio_put_percpu_cache()
771 bio_uninit(bio); in bio_put_percpu_cache()
773 if ((bio->bi_opf & REQ_POLLED) && !WARN_ON_ONCE(in_interrupt())) { in bio_put_percpu_cache()
774 bio->bi_next = cache->free_list; in bio_put_percpu_cache()
775 bio->bi_bdev = NULL; in bio_put_percpu_cache()
776 cache->free_list = bio; in bio_put_percpu_cache()
782 bio->bi_next = cache->free_list_irq; in bio_put_percpu_cache()
783 cache->free_list_irq = bio; in bio_put_percpu_cache()
798 void bio_put(struct bio *bio) in bio_put() argument
800 if (unlikely(bio_flagged(bio, BIO_REFFED))) { in bio_put()
801 BUG_ON(!atomic_read(&bio->__bi_cnt)); in bio_put()
802 if (!atomic_dec_and_test(&bio->__bi_cnt)) in bio_put()
805 if (bio->bi_opf & REQ_ALLOC_CACHE) in bio_put()
806 bio_put_percpu_cache(bio); in bio_put()
808 bio_free(bio); in bio_put()
812 static int __bio_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp) in __bio_clone() argument
814 bio_set_flag(bio, BIO_CLONED); in __bio_clone()
815 bio->bi_ioprio = bio_src->bi_ioprio; in __bio_clone()
816 bio->bi_iter = bio_src->bi_iter; in __bio_clone()
818 if (bio->bi_bdev) { in __bio_clone()
819 if (bio->bi_bdev == bio_src->bi_bdev && in __bio_clone()
821 bio_set_flag(bio, BIO_REMAPPED); in __bio_clone()
822 bio_clone_blkg_association(bio, bio_src); in __bio_clone()
825 if (bio_crypt_clone(bio, bio_src, gfp) < 0) in __bio_clone()
828 bio_integrity_clone(bio, bio_src, gfp) < 0) in __bio_clone()
845 struct bio *bio_alloc_clone(struct block_device *bdev, struct bio *bio_src, in bio_alloc_clone()
848 struct bio *bio; in bio_alloc_clone() local
850 bio = bio_alloc_bioset(bdev, 0, bio_src->bi_opf, gfp, bs); in bio_alloc_clone()
851 if (!bio) in bio_alloc_clone()
854 if (__bio_clone(bio, bio_src, gfp) < 0) { in bio_alloc_clone()
855 bio_put(bio); in bio_alloc_clone()
858 bio->bi_io_vec = bio_src->bi_io_vec; in bio_alloc_clone()
860 return bio; in bio_alloc_clone()
876 int bio_init_clone(struct block_device *bdev, struct bio *bio, in bio_init_clone() argument
877 struct bio *bio_src, gfp_t gfp) in bio_init_clone()
881 bio_init(bio, bdev, bio_src->bi_io_vec, 0, bio_src->bi_opf); in bio_init_clone()
882 ret = __bio_clone(bio, bio_src, gfp); in bio_init_clone()
884 bio_uninit(bio); in bio_init_clone()
897 static inline bool bio_full(struct bio *bio, unsigned len) in bio_full() argument
899 if (bio->bi_vcnt >= bio->bi_max_vecs) in bio_full()
901 if (bio->bi_iter.bi_size > UINT_MAX - len) in bio_full()
965 int bio_add_hw_page(struct request_queue *q, struct bio *bio, in bio_add_hw_page() argument
969 if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED))) in bio_add_hw_page()
972 if (((bio->bi_iter.bi_size + len) >> SECTOR_SHIFT) > max_sectors) in bio_add_hw_page()
975 if (bio->bi_vcnt > 0) { in bio_add_hw_page()
976 struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1]; in bio_add_hw_page()
980 bio->bi_iter.bi_size += len; in bio_add_hw_page()
984 if (bio->bi_vcnt >= in bio_add_hw_page()
985 min(bio->bi_max_vecs, queue_max_segments(q))) in bio_add_hw_page()
996 bvec_set_page(&bio->bi_io_vec[bio->bi_vcnt], page, len, offset); in bio_add_hw_page()
997 bio->bi_vcnt++; in bio_add_hw_page()
998 bio->bi_iter.bi_size += len; in bio_add_hw_page()
1017 int bio_add_pc_page(struct request_queue *q, struct bio *bio, in bio_add_pc_page() argument
1021 return bio_add_hw_page(q, bio, page, len, offset, in bio_add_pc_page()
1042 int bio_add_zone_append_page(struct bio *bio, struct page *page, in bio_add_zone_append_page() argument
1045 struct request_queue *q = bdev_get_queue(bio->bi_bdev); in bio_add_zone_append_page()
1048 if (WARN_ON_ONCE(bio_op(bio) != REQ_OP_ZONE_APPEND)) in bio_add_zone_append_page()
1051 if (WARN_ON_ONCE(!bdev_is_zoned(bio->bi_bdev))) in bio_add_zone_append_page()
1054 return bio_add_hw_page(q, bio, page, len, offset, in bio_add_zone_append_page()
1069 void __bio_add_page(struct bio *bio, struct page *page, in __bio_add_page() argument
1072 WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)); in __bio_add_page()
1073 WARN_ON_ONCE(bio_full(bio, len)); in __bio_add_page()
1075 bvec_set_page(&bio->bi_io_vec[bio->bi_vcnt], page, len, off); in __bio_add_page()
1076 bio->bi_iter.bi_size += len; in __bio_add_page()
1077 bio->bi_vcnt++; in __bio_add_page()
1091 int bio_add_page(struct bio *bio, struct page *page, in bio_add_page() argument
1096 if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED))) in bio_add_page()
1098 if (bio->bi_iter.bi_size > UINT_MAX - len) in bio_add_page()
1101 if (bio->bi_vcnt > 0 && in bio_add_page()
1102 bvec_try_merge_page(&bio->bi_io_vec[bio->bi_vcnt - 1], in bio_add_page()
1104 bio->bi_iter.bi_size += len; in bio_add_page()
1108 if (bio->bi_vcnt >= bio->bi_max_vecs) in bio_add_page()
1110 __bio_add_page(bio, page, len, offset); in bio_add_page()
1115 void bio_add_folio_nofail(struct bio *bio, struct folio *folio, size_t len, in bio_add_folio_nofail() argument
1120 __bio_add_page(bio, &folio->page, len, off); in bio_add_folio_nofail()
1137 bool bio_add_folio(struct bio *bio, struct folio *folio, size_t len, in bio_add_folio() argument
1142 return bio_add_page(bio, &folio->page, len, off) > 0; in bio_add_folio()
1146 void __bio_release_pages(struct bio *bio, bool mark_dirty) in __bio_release_pages() argument
1150 bio_for_each_folio_all(fi, bio) { in __bio_release_pages()
1161 bio_release_page(bio, page++); in __bio_release_pages()
1168 void bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter) in bio_iov_bvec_set() argument
1172 WARN_ON_ONCE(bio->bi_max_vecs); in bio_iov_bvec_set()
1174 if (bio_op(bio) == REQ_OP_ZONE_APPEND) { in bio_iov_bvec_set()
1175 struct request_queue *q = bdev_get_queue(bio->bi_bdev); in bio_iov_bvec_set()
1181 bio->bi_vcnt = iter->nr_segs; in bio_iov_bvec_set()
1182 bio->bi_io_vec = (struct bio_vec *)iter->bvec; in bio_iov_bvec_set()
1183 bio->bi_iter.bi_bvec_done = iter->iov_offset; in bio_iov_bvec_set()
1184 bio->bi_iter.bi_size = size; in bio_iov_bvec_set()
1185 bio_set_flag(bio, BIO_CLONED); in bio_iov_bvec_set()
1188 static int bio_iov_add_page(struct bio *bio, struct page *page, in bio_iov_add_page() argument
1193 if (WARN_ON_ONCE(bio->bi_iter.bi_size > UINT_MAX - len)) in bio_iov_add_page()
1196 if (bio->bi_vcnt > 0 && in bio_iov_add_page()
1197 bvec_try_merge_page(&bio->bi_io_vec[bio->bi_vcnt - 1], in bio_iov_add_page()
1199 bio->bi_iter.bi_size += len; in bio_iov_add_page()
1201 bio_release_page(bio, page); in bio_iov_add_page()
1204 __bio_add_page(bio, page, len, offset); in bio_iov_add_page()
1208 static int bio_iov_add_zone_append_page(struct bio *bio, struct page *page, in bio_iov_add_zone_append_page() argument
1211 struct request_queue *q = bdev_get_queue(bio->bi_bdev); in bio_iov_add_zone_append_page()
1214 if (bio_add_hw_page(q, bio, page, len, offset, in bio_iov_add_zone_append_page()
1218 bio_release_page(bio, page); in bio_iov_add_zone_append_page()
1234 static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) in __bio_iov_iter_get_pages() argument
1237 unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt; in __bio_iov_iter_get_pages()
1238 unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt; in __bio_iov_iter_get_pages()
1239 struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt; in __bio_iov_iter_get_pages()
1254 if (bio->bi_bdev && blk_queue_pci_p2pdma(bio->bi_bdev->bd_disk->queue)) in __bio_iov_iter_get_pages()
1265 UINT_MAX - bio->bi_iter.bi_size, in __bio_iov_iter_get_pages()
1272 if (bio->bi_bdev) { in __bio_iov_iter_get_pages()
1273 size_t trim = size & (bdev_logical_block_size(bio->bi_bdev) - 1); in __bio_iov_iter_get_pages()
1287 if (bio_op(bio) == REQ_OP_ZONE_APPEND) { in __bio_iov_iter_get_pages()
1288 ret = bio_iov_add_zone_append_page(bio, page, len, in __bio_iov_iter_get_pages()
1293 bio_iov_add_page(bio, page, len, offset); in __bio_iov_iter_get_pages()
1301 bio_release_page(bio, pages[i++]); in __bio_iov_iter_get_pages()
1326 int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) in bio_iov_iter_get_pages() argument
1330 if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED))) in bio_iov_iter_get_pages()
1334 bio_iov_bvec_set(bio, iter); in bio_iov_iter_get_pages()
1335 iov_iter_advance(iter, bio->bi_iter.bi_size); in bio_iov_iter_get_pages()
1340 bio_set_flag(bio, BIO_PAGE_PINNED); in bio_iov_iter_get_pages()
1342 ret = __bio_iov_iter_get_pages(bio, iter); in bio_iov_iter_get_pages()
1343 } while (!ret && iov_iter_count(iter) && !bio_full(bio, 0)); in bio_iov_iter_get_pages()
1345 return bio->bi_vcnt ? 0 : ret; in bio_iov_iter_get_pages()
1349 static void submit_bio_wait_endio(struct bio *bio) in submit_bio_wait_endio() argument
1351 complete(bio->bi_private); in submit_bio_wait_endio()
1365 int submit_bio_wait(struct bio *bio) in submit_bio_wait() argument
1368 bio->bi_bdev->bd_disk->lockdep_map); in submit_bio_wait()
1371 bio->bi_private = &done; in submit_bio_wait()
1372 bio->bi_end_io = submit_bio_wait_endio; in submit_bio_wait()
1373 bio->bi_opf |= REQ_SYNC; in submit_bio_wait()
1374 submit_bio(bio); in submit_bio_wait()
1385 return blk_status_to_errno(bio->bi_status); in submit_bio_wait()
1389 void __bio_advance(struct bio *bio, unsigned bytes) in __bio_advance() argument
1391 if (bio_integrity(bio)) in __bio_advance()
1392 bio_integrity_advance(bio, bytes); in __bio_advance()
1394 bio_crypt_advance(bio, bytes); in __bio_advance()
1395 bio_advance_iter(bio, &bio->bi_iter, bytes); in __bio_advance()
1399 void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter, in bio_copy_data_iter()
1400 struct bio *src, struct bvec_iter *src_iter) in bio_copy_data_iter()
1428 void bio_copy_data(struct bio *dst, struct bio *src) in bio_copy_data()
1437 void bio_free_pages(struct bio *bio) in bio_free_pages() argument
1442 bio_for_each_segment_all(bvec, bio, iter_all) in bio_free_pages()
1470 void bio_set_pages_dirty(struct bio *bio) in bio_set_pages_dirty() argument
1474 bio_for_each_folio_all(fi, bio) { in bio_set_pages_dirty()
1497 static struct bio *bio_dirty_list;
1504 struct bio *bio, *next; in bio_dirty_fn() local
1511 while ((bio = next) != NULL) { in bio_dirty_fn()
1512 next = bio->bi_private; in bio_dirty_fn()
1514 bio_release_pages(bio, true); in bio_dirty_fn()
1515 bio_put(bio); in bio_dirty_fn()
1519 void bio_check_pages_dirty(struct bio *bio) in bio_check_pages_dirty() argument
1524 bio_for_each_folio_all(fi, bio) { in bio_check_pages_dirty()
1529 bio_release_pages(bio, false); in bio_check_pages_dirty()
1530 bio_put(bio); in bio_check_pages_dirty()
1534 bio->bi_private = bio_dirty_list; in bio_check_pages_dirty()
1535 bio_dirty_list = bio; in bio_check_pages_dirty()
1541 static inline bool bio_remaining_done(struct bio *bio) in bio_remaining_done() argument
1547 if (!bio_flagged(bio, BIO_CHAIN)) in bio_remaining_done()
1550 BUG_ON(atomic_read(&bio->__bi_remaining) <= 0); in bio_remaining_done()
1552 if (atomic_dec_and_test(&bio->__bi_remaining)) { in bio_remaining_done()
1553 bio_clear_flag(bio, BIO_CHAIN); in bio_remaining_done()
1573 void bio_endio(struct bio *bio) in bio_endio() argument
1576 if (!bio_remaining_done(bio)) in bio_endio()
1578 if (!bio_integrity_endio(bio)) in bio_endio()
1581 rq_qos_done_bio(bio); in bio_endio()
1583 if (bio->bi_bdev && bio_flagged(bio, BIO_TRACE_COMPLETION)) { in bio_endio()
1584 trace_block_bio_complete(bdev_get_queue(bio->bi_bdev), bio); in bio_endio()
1585 bio_clear_flag(bio, BIO_TRACE_COMPLETION); in bio_endio()
1596 if (bio->bi_end_io == bio_chain_endio) { in bio_endio()
1597 bio = __bio_chain_endio(bio); in bio_endio()
1601 blk_throtl_bio_endio(bio); in bio_endio()
1603 bio_uninit(bio); in bio_endio()
1604 if (bio->bi_end_io) in bio_endio()
1605 bio->bi_end_io(bio); in bio_endio()
1623 struct bio *bio_split(struct bio *bio, int sectors, in bio_split() argument
1626 struct bio *split; in bio_split()
1629 BUG_ON(sectors >= bio_sectors(bio)); in bio_split()
1632 if (WARN_ON_ONCE(bio_op(bio) == REQ_OP_ZONE_APPEND)) in bio_split()
1635 split = bio_alloc_clone(bio->bi_bdev, bio, gfp, bs); in bio_split()
1644 bio_advance(bio, split->bi_iter.bi_size); in bio_split()
1646 if (bio_flagged(bio, BIO_TRACE_COMPLETION)) in bio_split()
1662 void bio_trim(struct bio *bio, sector_t offset, sector_t size) in bio_trim() argument
1665 offset + size > bio_sectors(bio))) in bio_trim()
1669 if (offset == 0 && size == bio->bi_iter.bi_size) in bio_trim()
1672 bio_advance(bio, offset << 9); in bio_trim()
1673 bio->bi_iter.bi_size = size; in bio_trim()
1675 if (bio_integrity(bio)) in bio_trim()
1676 bio_integrity_trim(bio); in bio_trim()
1785 BUILD_BUG_ON(BIO_FLAG_LAST > 8 * sizeof_field(struct bio, bi_flags)); in init_bio()