Home
last modified time | relevance | path

Searched refs:bv (Results 1 – 25 of 115) sorted by relevance

12345

/linux-6.1.9/drivers/gpu/drm/i915/gt/
Dgen7_renderclear.c44 static int num_primitives(const struct batch_vals *bv) in num_primitives() argument
52 return bv->max_threads; in num_primitives()
56 batch_get_defaults(struct drm_i915_private *i915, struct batch_vals *bv) in batch_get_defaults() argument
62 bv->max_threads = 70; in batch_get_defaults()
65 bv->max_threads = 140; in batch_get_defaults()
68 bv->max_threads = 280; in batch_get_defaults()
71 bv->surface_height = 16 * 16; in batch_get_defaults()
72 bv->surface_width = 32 * 2 * 16; in batch_get_defaults()
77 bv->max_threads = 36; in batch_get_defaults()
80 bv->max_threads = 128; in batch_get_defaults()
[all …]
/linux-6.1.9/include/linux/
Dbvec.h50 struct bio_vec bv; member
101 static inline bool bvec_iter_advance(const struct bio_vec *bv, in bvec_iter_advance() argument
115 while (bytes && bytes >= bv[idx].bv_len) { in bvec_iter_advance()
116 bytes -= bv[idx].bv_len; in bvec_iter_advance()
129 static inline void bvec_iter_advance_single(const struct bio_vec *bv, in bvec_iter_advance_single() argument
134 if (done == bv[iter->bi_idx].bv_len) { in bvec_iter_advance_single()
162 return &iter_all->bv; in bvec_init_iter_all()
168 struct bio_vec *bv = &iter_all->bv; in bvec_advance() local
171 bv->bv_page++; in bvec_advance()
172 bv->bv_offset = 0; in bvec_advance()
[all …]
/linux-6.1.9/drivers/md/bcache/
Dutil.c234 struct bio_vec *bv = bio->bi_io_vec; in bch_bio_map() local
239 bv->bv_offset = base ? offset_in_page(base) : 0; in bch_bio_map()
242 for (; size; bio->bi_vcnt++, bv++) { in bch_bio_map()
243 bv->bv_offset = 0; in bch_bio_map()
244 start: bv->bv_len = min_t(size_t, PAGE_SIZE - bv->bv_offset, in bch_bio_map()
247 bv->bv_page = is_vmalloc_addr(base) in bch_bio_map()
251 base += bv->bv_len; in bch_bio_map()
254 size -= bv->bv_len; in bch_bio_map()
271 struct bio_vec *bv; in bch_bio_alloc_pages() local
277 for (i = 0, bv = bio->bi_io_vec; i < bio->bi_vcnt; bv++, i++) { in bch_bio_alloc_pages()
[all …]
Ddebug.c112 struct bio_vec bv, cbv; in bch_data_verify() local
130 bio_for_each_segment(bv, bio, iter) { in bch_data_verify()
131 void *p1 = bvec_kmap_local(&bv); in bch_data_verify()
137 cache_set_err_on(memcmp(p1, p2, bv.bv_len), in bch_data_verify()
145 bio_advance_iter(check, &citer, bv.bv_len); in bch_data_verify()
/linux-6.1.9/arch/parisc/kernel/
Dentry.S142 bv,n 0(%r3)
760 bv %r0(%r2)
769 bv %r0(%r2)
928 bv %r0(%r20)
957 bv %r0(%r1)
1680 bv %r0(%r2)
1876 bv %r0(%r19) /* jumps to schedule() */
1907 bv %r0(%rp)
1975 bv,n (%r1)
2071 bv,n (%r1)
[all …]
Dreal2.S92 bv 0(%r31)
114 bv 0(%rp)
134 bv 0(%r2)
149 bv 0(%r2)
190 bv 0(%r2)
227 bv 0(%r2)
274 bv 0(%r31)
287 bv 0(%rp)
302 bv %r0(%r2)
Dhpmc.S154 bv (r3) /* call pdce_proc */
169 bv (%r3) /* call pdce_proc */
199 bv (%r3) /* call pdce_proc */
221 bv (%r5)
269 bv (%r3) /* call pdce_proc */
Dhead.S102 bv,n (%r1)
209 bv,n (%rp)
251 bv (%rp)
264 bv (%r3)
387 bv (%rp)
Dpacache.S175 2: bv %r0(%r2)
184 bv,n %r0(%r2)
245 bv %r0(%r2)
306 bv %r0(%r2)
369 bv %r0(%r2)
480 bv %r0(%r2)
660 bv %r0(%r2)
728 bv %r0(%r2)
777 bv %r0(%r2)
826 bv %r0(%r2)
[all …]
/linux-6.1.9/block/
Dblk-merge.c21 static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv) in bio_get_first_bvec() argument
23 *bv = mp_bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter); in bio_get_first_bvec()
26 static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv) in bio_get_last_bvec() argument
31 bio_get_first_bvec(bio, bv); in bio_get_last_bvec()
32 if (bv->bv_len == bio->bi_iter.bi_size) in bio_get_last_bvec()
42 *bv = bio->bi_io_vec[idx]; in bio_get_last_bvec()
49 bv->bv_len = iter.bi_bvec_done; in bio_get_last_bvec()
222 static bool bvec_split_segs(struct queue_limits *lim, const struct bio_vec *bv, in bvec_split_segs() argument
227 unsigned len = min(bv->bv_len, max_len); in bvec_split_segs()
232 seg_size = get_max_segment_size(lim, bv->bv_page, in bvec_split_segs()
[all …]
Dbio.c158 void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned short nr_vecs) in bvec_free() argument
163 mempool_free(bv, pool); in bvec_free()
165 kmem_cache_free(biovec_slab(nr_vecs)->slab, bv); in bvec_free()
587 struct bio_vec bv; in zero_fill_bio() local
590 bio_for_each_segment(bv, bio, iter) in zero_fill_bio()
591 memzero_bvec(&bv); in zero_fill_bio()
607 struct bio_vec bv; in bio_truncate() local
618 bio_for_each_segment(bv, bio, iter) { in bio_truncate()
619 if (done + bv.bv_len > new_size) { in bio_truncate()
626 zero_user(bv.bv_page, bv.bv_offset + offset, in bio_truncate()
[all …]
Dblk-crypto-fallback.c164 struct bio_vec bv; in blk_crypto_fallback_clone_bio() local
178 bio_for_each_segment(bv, bio_src, iter) in blk_crypto_fallback_clone_bio()
179 bio->bi_io_vec[bio->bi_vcnt++] = bv; in blk_crypto_fallback_clone_bio()
215 struct bio_vec bv; in blk_crypto_fallback_split_bio_if_needed() local
218 bio_for_each_segment(bv, bio, iter) { in blk_crypto_fallback_split_bio_if_needed()
219 num_sectors += bv.bv_len >> SECTOR_SHIFT; in blk_crypto_fallback_split_bio_if_needed()
388 struct bio_vec bv; in blk_crypto_fallback_decrypt_bio() local
417 __bio_for_each_segment(bv, bio, iter, f_ctx->crypt_iter) { in blk_crypto_fallback_decrypt_bio()
418 struct page *page = bv.bv_page; in blk_crypto_fallback_decrypt_bio()
420 sg_set_page(&sg, page, data_unit_size, bv.bv_offset); in blk_crypto_fallback_decrypt_bio()
[all …]
Dblk-map.c530 struct bio_vec bv; in blk_rq_append_bio() local
533 bio_for_each_bvec(bv, bio, iter) in blk_rq_append_bio()
579 struct bio_vec *bv = &bvecs[i]; in blk_rq_map_user_bvec() local
585 if (bvprvp && bvec_gap_to_prev(lim, bvprvp, bv->bv_offset)) { in blk_rq_map_user_bvec()
590 if (nsegs >= nr_segs || bytes > UINT_MAX - bv->bv_len) in blk_rq_map_user_bvec()
592 if (bytes + bv->bv_len > nr_iter) in blk_rq_map_user_bvec()
594 if (bv->bv_offset + bv->bv_len > PAGE_SIZE) in blk_rq_map_user_bvec()
598 bytes += bv->bv_len; in blk_rq_map_user_bvec()
599 bvprvp = bv; in blk_rq_map_user_bvec()
Dblk-crypto.c211 struct bio_vec bv; in bio_crypt_check_alignment() local
213 bio_for_each_segment(bv, bio, iter) { in bio_crypt_check_alignment()
214 if (!IS_ALIGNED(bv.bv_len | bv.bv_offset, data_unit_size)) in bio_crypt_check_alignment()
/linux-6.1.9/drivers/net/ethernet/netronome/nfp/bpf/
Dmain.c66 struct nfp_bpf_vnic *bv; in nfp_bpf_vnic_alloc() local
79 bv = kzalloc(sizeof(*bv), GFP_KERNEL); in nfp_bpf_vnic_alloc()
80 if (!bv) in nfp_bpf_vnic_alloc()
82 nn->app_priv = bv; in nfp_bpf_vnic_alloc()
88 bv->start_off = nn_readw(nn, NFP_NET_CFG_BPF_START); in nfp_bpf_vnic_alloc()
89 bv->tgt_done = nn_readw(nn, NFP_NET_CFG_BPF_DONE); in nfp_bpf_vnic_alloc()
99 struct nfp_bpf_vnic *bv = nn->app_priv; in nfp_bpf_vnic_free() local
101 WARN_ON(bv->tc_prog); in nfp_bpf_vnic_free()
102 kfree(bv); in nfp_bpf_vnic_free()
111 struct nfp_bpf_vnic *bv; in nfp_bpf_setup_tc_block_cb() local
[all …]
/linux-6.1.9/drivers/block/
Dn64cart.c62 static bool n64cart_do_bvec(struct device *dev, struct bio_vec *bv, u32 pos) in n64cart_do_bvec() argument
68 WARN_ON_ONCE((bv->bv_offset & (MIN_ALIGNMENT - 1)) || in n64cart_do_bvec()
69 (bv->bv_len & (MIN_ALIGNMENT - 1))); in n64cart_do_bvec()
71 dma_addr = dma_map_bvec(dev, bv, DMA_FROM_DEVICE, 0); in n64cart_do_bvec()
79 n64cart_write_reg(PI_WRITE_REG, bv->bv_len - 1); in n64cart_do_bvec()
83 dma_unmap_page(dev, dma_addr, bv->bv_len, DMA_FROM_DEVICE); in n64cart_do_bvec()
/linux-6.1.9/drivers/s390/block/
Ddasd_fba.c440 struct bio_vec bv; in dasd_fba_build_cp_regular() local
461 rq_for_each_segment(bv, req, iter) { in dasd_fba_build_cp_regular()
462 if (bv.bv_len & (blksize - 1)) in dasd_fba_build_cp_regular()
465 count += bv.bv_len >> (block->s2b_shift + 9); in dasd_fba_build_cp_regular()
466 if (idal_is_needed (page_address(bv.bv_page), bv.bv_len)) in dasd_fba_build_cp_regular()
467 cidaw += bv.bv_len / blksize; in dasd_fba_build_cp_regular()
503 rq_for_each_segment(bv, req, iter) { in dasd_fba_build_cp_regular()
504 dst = bvec_virt(&bv); in dasd_fba_build_cp_regular()
509 memcpy(copy + bv.bv_offset, dst, bv.bv_len); in dasd_fba_build_cp_regular()
511 dst = copy + bv.bv_offset; in dasd_fba_build_cp_regular()
[all …]
/linux-6.1.9/fs/crypto/
Dbio.c33 struct bio_vec *bv; in fscrypt_decrypt_bio() local
36 bio_for_each_segment_all(bv, bio, iter_all) { in fscrypt_decrypt_bio()
37 struct page *page = bv->bv_page; in fscrypt_decrypt_bio()
38 int err = fscrypt_decrypt_pagecache_blocks(page, bv->bv_len, in fscrypt_decrypt_bio()
39 bv->bv_offset); in fscrypt_decrypt_bio()
/linux-6.1.9/drivers/md/
Ddm-io-rewind.c12 static inline bool dm_bvec_iter_rewind(const struct bio_vec *bv, in dm_bvec_iter_rewind() argument
27 while (idx >= 0 && bytes && bytes > bv[idx].bv_len) { in dm_bvec_iter_rewind()
28 bytes -= bv[idx].bv_len; in dm_bvec_iter_rewind()
41 iter->bi_bvec_done = bv[idx].bv_len - bytes; in dm_bvec_iter_rewind()
Ddm-ebs-target.c64 static int __ebs_rw_bvec(struct ebs_c *ec, enum req_op op, struct bio_vec *bv, in __ebs_rw_bvec() argument
70 unsigned int bv_len = bv->bv_len; in __ebs_rw_bvec()
75 if (unlikely(!bv->bv_page || !bv_len)) in __ebs_rw_bvec()
78 pa = bvec_virt(bv); in __ebs_rw_bvec()
101 flush_dcache_page(bv->bv_page); in __ebs_rw_bvec()
103 flush_dcache_page(bv->bv_page); in __ebs_rw_bvec()
124 struct bio_vec bv; in __ebs_rw_bio() local
127 bio_for_each_bvec(bv, bio, iter) { in __ebs_rw_bio()
128 rr = __ebs_rw_bvec(ec, op, &bv, &iter); in __ebs_rw_bio()
/linux-6.1.9/fs/cifs/
Dmisc.c972 if (ctx->bv) { in cifs_aio_ctx_release()
977 set_page_dirty(ctx->bv[i].bv_page); in cifs_aio_ctx_release()
978 put_page(ctx->bv[i].bv_page); in cifs_aio_ctx_release()
980 kvfree(ctx->bv); in cifs_aio_ctx_release()
1001 struct bio_vec *bv = NULL; in setup_aio_ctx_iter() local
1010 if (array_size(max_pages, sizeof(*bv)) <= CIFS_AIO_KMALLOC_LIMIT) in setup_aio_ctx_iter()
1011 bv = kmalloc_array(max_pages, sizeof(*bv), GFP_KERNEL); in setup_aio_ctx_iter()
1013 if (!bv) { in setup_aio_ctx_iter()
1014 bv = vmalloc(array_size(max_pages, sizeof(*bv))); in setup_aio_ctx_iter()
1015 if (!bv) in setup_aio_ctx_iter()
[all …]
/linux-6.1.9/fs/orangefs/
Dinode.c25 struct bio_vec bv; in orangefs_writepage_locked() local
52 bv.bv_page = page; in orangefs_writepage_locked()
53 bv.bv_len = wlen; in orangefs_writepage_locked()
54 bv.bv_offset = off % PAGE_SIZE; in orangefs_writepage_locked()
56 iov_iter_bvec(&iter, WRITE, &bv, 1, wlen); in orangefs_writepage_locked()
87 struct bio_vec *bv; member
105 ow->bv[i].bv_page = ow->pages[i]; in orangefs_writepages_work()
106 ow->bv[i].bv_len = min(page_offset(ow->pages[i]) + PAGE_SIZE, in orangefs_writepages_work()
110 ow->bv[i].bv_offset = ow->off - in orangefs_writepages_work()
113 ow->bv[i].bv_offset = 0; in orangefs_writepages_work()
[all …]
/linux-6.1.9/net/ceph/
Dmessenger_v2.c144 struct bio_vec bv; in do_try_sendpage() local
152 bv.bv_page = it->bvec->bv_page; in do_try_sendpage()
153 bv.bv_offset = it->bvec->bv_offset + it->iov_offset; in do_try_sendpage()
154 bv.bv_len = min(iov_iter_count(it), in do_try_sendpage()
166 if (sendpage_ok(bv.bv_page)) { in do_try_sendpage()
167 ret = sock->ops->sendpage(sock, bv.bv_page, in do_try_sendpage()
168 bv.bv_offset, bv.bv_len, in do_try_sendpage()
171 iov_iter_bvec(&msg.msg_iter, WRITE, &bv, 1, bv.bv_len); in do_try_sendpage()
231 static void set_in_bvec(struct ceph_connection *con, const struct bio_vec *bv) in set_in_bvec() argument
235 con->v2.in_bvec = *bv; in set_in_bvec()
[all …]
/linux-6.1.9/fs/verity/
Dverify.c219 struct bio_vec *bv; in fsverity_verify_bio() local
236 bio_for_each_segment_all(bv, bio, iter_all) in fsverity_verify_bio()
241 bio_for_each_segment_all(bv, bio, iter_all) { in fsverity_verify_bio()
242 struct page *page = bv->bv_page; in fsverity_verify_bio()
/linux-6.1.9/arch/parisc/boot/compressed/
Dhead.S71 bv,n 0(%r3)
84 bv,n 0(%ret0)

12345