Home
last modified time | relevance | path

Searched refs:bv_offset (Results 1 – 25 of 77) sorted by relevance

1234

/linux-6.1.9/include/linux/
Dbvec.h35 unsigned int bv_offset; member
70 (__bvec_iter_bvec((bvec), (iter))->bv_offset + (iter).bi_bvec_done)
79 .bv_offset = mp_bvec_iter_offset((bvec), (iter)), \
98 .bv_offset = bvec_iter_offset((bvec), (iter)), \
172 bv->bv_offset = 0; in bvec_advance()
174 bv->bv_page = bvec->bv_page + (bvec->bv_offset >> PAGE_SHIFT); in bvec_advance()
175 bv->bv_offset = bvec->bv_offset & ~PAGE_MASK; in bvec_advance()
177 bv->bv_len = min_t(unsigned int, PAGE_SIZE - bv->bv_offset, in bvec_advance()
196 return kmap_local_page(bvec->bv_page) + bvec->bv_offset; in bvec_kmap_local()
207 memcpy_from_page(to, bvec->bv_page, bvec->bv_offset, bvec->bv_len); in memcpy_from_bvec()
[all …]
/linux-6.1.9/block/
Dblk.h88 phys_addr_t addr1 = page_to_phys(vec1->bv_page) + vec1->bv_offset; in biovec_phys_mergeable()
89 phys_addr_t addr2 = page_to_phys(vec2->bv_page) + vec2->bv_offset; in biovec_phys_mergeable()
111 ((bprv->bv_offset + bprv->bv_len) & lim->virt_boundary_mask); in __bvec_gap_to_prev()
201 bip_next->bip_vec[0].bv_offset); in integrity_req_gap_back_merge()
212 bip_next->bip_vec[0].bv_offset); in integrity_req_gap_front_merge()
320 bio->bi_io_vec->bv_len + bio->bi_io_vec->bv_offset > PAGE_SIZE; in bio_may_exceed_limits()
Dblk-merge.c69 if (pb.bv_offset & queue_virt_boundary(q)) in bio_will_gap()
85 return __bvec_gap_to_prev(&q->limits, &pb, nb.bv_offset); in bio_will_gap()
233 bv->bv_offset + total_len); in bvec_split_segs()
240 if ((bv->bv_offset + total_len) & lim->virt_boundary_mask) in bvec_split_segs()
282 if (bvprvp && bvec_gap_to_prev(lim, bvprvp, bv.bv_offset)) in bio_split_rw()
287 bv.bv_offset + bv.bv_len <= PAGE_SIZE) { in bio_split_rw()
459 unsigned offset = bvec->bv_offset + total; in blk_bvec_map_sg()
490 sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset); in __blk_bvec_map_sg()
536 if (bvec.bv_offset + bvec.bv_len <= PAGE_SIZE) in __blk_bios_map_sg()
Dblk-map.c55 bvec->bv_offset, in bio_copy_from_iter()
86 bvec->bv_offset, in bio_copy_to_iter()
585 if (bvprvp && bvec_gap_to_prev(lim, bvprvp, bv->bv_offset)) { in blk_rq_map_user_bvec()
594 if (bv->bv_offset + bv->bv_len > PAGE_SIZE) in blk_rq_map_user_bvec()
Dblk-crypto-fallback.c330 enc_bvec->bv_offset); in blk_crypto_fallback_encrypt_bio()
332 enc_bvec->bv_offset); in blk_crypto_fallback_encrypt_bio()
420 sg_set_page(&sg, page, data_unit_size, bv.bv_offset); in blk_crypto_fallback_decrypt_bio()
Dbio.c626 zero_user(bv.bv_page, bv.bv_offset + offset, in bio_truncate()
858 size_t bv_end = bv->bv_offset + bv->bv_len; in page_is_mergeable()
924 phys_addr_t addr1 = page_to_phys(bv->bv_page) + bv->bv_offset; in bio_try_merge_hw_seg()
981 bvec->bv_offset = offset; in bio_add_hw_page()
1063 bv->bv_offset = off; in __bio_add_page()
Dbounce.c96 tovec.bv_offset); in copy_to_high_bio_irq()
Dblk-crypto.c214 if (!IS_ALIGNED(bv.bv_len | bv.bv_offset, data_unit_size)) in bio_crypt_check_alignment()
/linux-6.1.9/drivers/md/bcache/
Dutil.c239 bv->bv_offset = base ? offset_in_page(base) : 0; in bch_bio_map()
243 bv->bv_offset = 0; in bch_bio_map()
244 start: bv->bv_len = min_t(size_t, PAGE_SIZE - bv->bv_offset, in bch_bio_map()
/linux-6.1.9/drivers/xen/
Dbiomerge.c15 return bfn1 + PFN_DOWN(vec1->bv_offset + vec1->bv_len) == bfn2; in xen_biovec_phys_mergeable()
/linux-6.1.9/fs/cifs/
Dfscache.c151 bvec[0].bv_offset = 0; in fscache_fallback_read_page()
181 bvec[0].bv_offset = 0; in fscache_fallback_write_page()
/linux-6.1.9/fs/btrfs/
Dcompression.c187 bv.bv_page, bv.bv_offset))) { in end_compressed_bio_read()
189 bv.bv_offset); in end_compressed_bio_read()
195 bv.bv_page, bv.bv_offset, in end_compressed_bio_read()
498 return page_offset(last->bv_page) + last->bv_len + last->bv_offset; in bio_end_offset()
683 file_offset = bio_first_bvec_all(bio)->bv_offset + in btrfs_submit_compressed_read()
1313 bvec_offset = page_offset(bvec.bv_page) + bvec.bv_offset - cb->start; in btrfs_decompress_buf2page()
1329 memcpy_to_page(bvec.bv_page, bvec.bv_offset, in btrfs_decompress_buf2page()
Dextent_io.c130 btrfs_bio(bio)->file_offset = page_offset(bv->bv_page) + bv->bv_offset; in submit_one_bio()
919 const unsigned int pgoff = bvec->bv_offset; in submit_data_read_repair()
922 const u64 start = page_offset(bvec->bv_page) + bvec->bv_offset; in submit_data_read_repair()
1031 if (!IS_ALIGNED(bvec->bv_offset, sectorsize)) in end_bio_extent_writepage()
1034 bvec->bv_offset, bvec->bv_len); in end_bio_extent_writepage()
1038 bvec->bv_offset, bvec->bv_len); in end_bio_extent_writepage()
1040 start = page_offset(page) + bvec->bv_offset; in end_bio_extent_writepage()
1213 if (!IS_ALIGNED(bvec->bv_offset, sectorsize)) in end_bio_extent_readpage()
1216 bvec->bv_offset, bvec->bv_len); in end_bio_extent_readpage()
1217 else if (!IS_ALIGNED(bvec->bv_offset + bvec->bv_len, in end_bio_extent_readpage()
[all …]
/linux-6.1.9/fs/nfs/
Dfscache.c253 bvec[0].bv_offset = 0; in fscache_fallback_read_page()
283 bvec[0].bv_offset = 0; in fscache_fallback_write_page()
/linux-6.1.9/drivers/block/
Dn64cart.c68 WARN_ON_ONCE((bv->bv_offset & (MIN_ALIGNMENT - 1)) || in n64cart_do_bvec()
Dbrd.c297 WARN_ON_ONCE((bvec.bv_offset & (SECTOR_SIZE - 1)) || in brd_submit_bio()
300 err = brd_do_bvec(brd, bvec.bv_page, len, bvec.bv_offset, in brd_submit_bio()
/linux-6.1.9/fs/crypto/
Dbio.c39 bv->bv_offset); in fscrypt_decrypt_bio()
/linux-6.1.9/net/ceph/
Dmessenger_v2.c153 bv.bv_offset = it->bvec->bv_offset + it->iov_offset; in do_try_sendpage()
168 bv.bv_offset, bv.bv_len, in do_try_sendpage()
290 con->v2.out_bvec.bv_offset = 0; in set_out_bvec_zero()
868 bv->bv_offset = off; in get_bvec_at()
952 sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset); in init_sgs_cursor()
1775 bv.bv_offset = 0; in prepare_read_data()
1792 memcpy_to_page(bv.bv_page, bv.bv_offset, in prepare_read_data_cont()
1798 con->v2.in_bvec.bv_offset, in prepare_read_data_cont()
1807 bv.bv_offset = 0; in prepare_read_data_cont()
1859 bv.bv_offset = 0; in prepare_read_enc_page()
[all …]
/linux-6.1.9/arch/m68k/emu/
Dnfblock.c74 page_to_phys(bvec.bv_page) + bvec.bv_offset); in nfhd_submit_bio()
/linux-6.1.9/drivers/block/zram/
Dzram_drv.c616 if (!bio_add_page(bio, bvec->bv_page, bvec->bv_len, bvec->bv_offset)) { in read_from_bdev_async()
690 bvec.bv_offset = 0; in writeback_store()
744 bvec.bv_offset); in writeback_store()
1265 bvec.bv_offset = 0; in __zram_bvec_read()
1497 vec.bv_offset = 0; in zram_bvec_write()
1614 bv.bv_offset += bv.bv_len; in __zram_make_request()
1682 bv.bv_offset = 0; in zram_rw_page()
/linux-6.1.9/mm/
Dpage_io.c323 sio->bvec[sio->pages].bv_offset = 0; in swap_writepage_fs()
437 sio->bvec[sio->pages].bv_offset = 0; in swap_readpage_fs()
/linux-6.1.9/fs/orangefs/
Dinode.c54 bv.bv_offset = off % PAGE_SIZE; in orangefs_writepage_locked()
110 ow->bv[i].bv_offset = ow->off - in orangefs_writepages_work()
113 ow->bv[i].bv_offset = 0; in orangefs_writepages_work()
305 bv.bv_offset = 0; in orangefs_read_folio()
/linux-6.1.9/drivers/target/
Dtarget_core_file.c287 aio_cmd->bvecs[i].bv_offset = sg->offset; in fd_execute_rw_aio()
335 bvec[i].bv_offset = sg->offset; in fd_do_rw()
471 bvec[i].bv_offset = cmd->t_data_sg[0].offset; in fd_execute_write_same()
/linux-6.1.9/Documentation/block/
Dbiovecs.rst13 ended up partway through a biovec, it would increment bv_offset and decrement
18 bi_size and bi_idx have been moved there; and instead of modifying bv_offset
/linux-6.1.9/lib/
Diov_iter.c55 unsigned offset = p->bv_offset + skip; \
1133 if ((unsigned long)(i->bvec[k].bv_offset + skip) & addr_mask) in iov_iter_aligned_bvec()
1225 res |= (unsigned long)i->bvec[k].bv_offset + skip; in iov_iter_alignment_bvec()
1425 skip += i->bvec->bv_offset; in first_bvec_segment()
1620 unsigned offs = (p->bv_offset + skip) % PAGE_SIZE; in bvec_npages()

1234