Home
last modified time | relevance | path

Searched refs:bv (Results 1 – 25 of 120) sorted by relevance

12345

/linux-6.6.21/drivers/gpu/drm/i915/gt/
Dgen7_renderclear.c44 static int num_primitives(const struct batch_vals *bv) in num_primitives() argument
52 return bv->max_threads; in num_primitives()
56 batch_get_defaults(struct drm_i915_private *i915, struct batch_vals *bv) in batch_get_defaults() argument
62 bv->max_threads = 70; in batch_get_defaults()
65 bv->max_threads = 140; in batch_get_defaults()
68 bv->max_threads = 280; in batch_get_defaults()
71 bv->surface_height = 16 * 16; in batch_get_defaults()
72 bv->surface_width = 32 * 2 * 16; in batch_get_defaults()
77 bv->max_threads = 36; in batch_get_defaults()
80 bv->max_threads = 128; in batch_get_defaults()
[all …]
/linux-6.6.21/include/linux/
Dbvec.h44 static inline void bvec_set_page(struct bio_vec *bv, struct page *page, in bvec_set_page() argument
47 bv->bv_page = page; in bvec_set_page()
48 bv->bv_len = len; in bvec_set_page()
49 bv->bv_offset = offset; in bvec_set_page()
59 static inline void bvec_set_folio(struct bio_vec *bv, struct folio *folio, in bvec_set_folio() argument
62 bvec_set_page(bv, &folio->page, len, offset); in bvec_set_folio()
71 static inline void bvec_set_virt(struct bio_vec *bv, void *vaddr, in bvec_set_virt() argument
74 bvec_set_page(bv, virt_to_page(vaddr), len, offset_in_page(vaddr)); in bvec_set_virt()
89 struct bio_vec bv; member
140 static inline bool bvec_iter_advance(const struct bio_vec *bv, in bvec_iter_advance() argument
[all …]
/linux-6.6.21/drivers/md/bcache/
Dutil.c234 struct bio_vec *bv = bio->bi_io_vec; in bch_bio_map() local
239 bv->bv_offset = base ? offset_in_page(base) : 0; in bch_bio_map()
242 for (; size; bio->bi_vcnt++, bv++) { in bch_bio_map()
243 bv->bv_offset = 0; in bch_bio_map()
244 start: bv->bv_len = min_t(size_t, PAGE_SIZE - bv->bv_offset, in bch_bio_map()
247 bv->bv_page = is_vmalloc_addr(base) in bch_bio_map()
251 base += bv->bv_len; in bch_bio_map()
254 size -= bv->bv_len; in bch_bio_map()
271 struct bio_vec *bv; in bch_bio_alloc_pages() local
277 for (i = 0, bv = bio->bi_io_vec; i < bio->bi_vcnt; bv++, i++) { in bch_bio_alloc_pages()
[all …]
Ddebug.c112 struct bio_vec bv, cbv; in bch_data_verify() local
130 bio_for_each_segment(bv, bio, iter) { in bch_data_verify()
131 void *p1 = bvec_kmap_local(&bv); in bch_data_verify()
137 cache_set_err_on(memcmp(p1, p2, bv.bv_len), in bch_data_verify()
145 bio_advance_iter(check, &citer, bv.bv_len); in bch_data_verify()
/linux-6.6.21/fs/netfs/
Diterator.c40 struct bio_vec *bv = NULL; in netfs_extract_user_iter() local
54 bv_size = array_size(max_pages, sizeof(*bv)); in netfs_extract_user_iter()
55 bv = kvmalloc(bv_size, GFP_KERNEL); in netfs_extract_user_iter()
56 if (!bv) in netfs_extract_user_iter()
64 pages = (void *)bv + bv_size - pg_size; in netfs_extract_user_iter()
92 bvec_set_page(bv + npages + i, *pages++, len - offset, offset); in netfs_extract_user_iter()
100 iov_iter_bvec(new, orig->data_source, bv, npages, orig_len - count); in netfs_extract_user_iter()
/linux-6.6.21/arch/parisc/kernel/
Dentry.S161 bv,n 0(%r3)
772 bv %r0(%r2)
781 bv %r0(%r2)
940 bv %r0(%r20)
969 bv %r0(%r1)
1701 bv %r0(%r2)
1897 bv %r0(%r19) /* jumps to schedule() */
1928 bv %r0(%rp)
1996 bv,n (%r1)
2092 bv,n (%r1)
[all …]
Dreal2.S79 bv 0(%r31)
101 bv 0(%rp)
121 bv 0(%r2)
136 bv 0(%r2)
177 bv 0(%r2)
214 bv 0(%r2)
260 bv 0(%r31)
273 bv 0(%rp)
288 bv %r0(%r2)
Dhpmc.S154 bv (r3) /* call pdce_proc */
169 bv (%r3) /* call pdce_proc */
199 bv (%r3) /* call pdce_proc */
221 bv (%r5)
269 bv (%r3) /* call pdce_proc */
Dhead.S101 bv,n (%r1)
208 bv,n (%rp)
250 bv (%rp)
263 bv (%r3)
277 bv (%r3)
396 bv (%rp)
Dpacache.S175 2: bv %r0(%r2)
184 bv,n %r0(%r2)
245 bv %r0(%r2)
306 bv %r0(%r2)
369 bv %r0(%r2)
480 bv %r0(%r2)
660 bv %r0(%r2)
728 bv %r0(%r2)
777 bv %r0(%r2)
826 bv %r0(%r2)
[all …]
/linux-6.6.21/drivers/net/ethernet/netronome/nfp/bpf/
Dmain.c66 struct nfp_bpf_vnic *bv; in nfp_bpf_vnic_alloc() local
79 bv = kzalloc(sizeof(*bv), GFP_KERNEL); in nfp_bpf_vnic_alloc()
80 if (!bv) in nfp_bpf_vnic_alloc()
82 nn->app_priv = bv; in nfp_bpf_vnic_alloc()
88 bv->start_off = nn_readw(nn, NFP_NET_CFG_BPF_START); in nfp_bpf_vnic_alloc()
89 bv->tgt_done = nn_readw(nn, NFP_NET_CFG_BPF_DONE); in nfp_bpf_vnic_alloc()
99 struct nfp_bpf_vnic *bv = nn->app_priv; in nfp_bpf_vnic_free() local
101 WARN_ON(bv->tc_prog); in nfp_bpf_vnic_free()
102 kfree(bv); in nfp_bpf_vnic_free()
111 struct nfp_bpf_vnic *bv; in nfp_bpf_setup_tc_block_cb() local
[all …]
/linux-6.6.21/block/
Dblk-merge.c21 static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv) in bio_get_first_bvec() argument
23 *bv = mp_bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter); in bio_get_first_bvec()
26 static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv) in bio_get_last_bvec() argument
31 bio_get_first_bvec(bio, bv); in bio_get_last_bvec()
32 if (bv->bv_len == bio->bi_iter.bi_size) in bio_get_last_bvec()
42 *bv = bio->bi_io_vec[idx]; in bio_get_last_bvec()
49 bv->bv_len = iter.bi_bvec_done; in bio_get_last_bvec()
232 const struct bio_vec *bv, unsigned *nsegs, unsigned *bytes, in bvec_split_segs() argument
236 unsigned len = min(bv->bv_len, max_len); in bvec_split_segs()
241 seg_size = get_max_segment_size(lim, bv->bv_page, in bvec_split_segs()
[all …]
Dbio.c163 void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned short nr_vecs) in bvec_free() argument
168 mempool_free(bv, pool); in bvec_free()
170 kmem_cache_free(biovec_slab(nr_vecs)->slab, bv); in bvec_free()
611 struct bio_vec bv; in zero_fill_bio_iter() local
614 __bio_for_each_segment(bv, bio, iter, start) in zero_fill_bio_iter()
615 memzero_bvec(&bv); in zero_fill_bio_iter()
631 struct bio_vec bv; in bio_truncate() local
642 bio_for_each_segment(bv, bio, iter) { in bio_truncate()
643 if (done + bv.bv_len > new_size) { in bio_truncate()
650 zero_user(bv.bv_page, bv.bv_offset + offset, in bio_truncate()
[all …]
Dblk-crypto-fallback.c164 struct bio_vec bv; in blk_crypto_fallback_clone_bio() local
178 bio_for_each_segment(bv, bio_src, iter) in blk_crypto_fallback_clone_bio()
179 bio->bi_io_vec[bio->bi_vcnt++] = bv; in blk_crypto_fallback_clone_bio()
215 struct bio_vec bv; in blk_crypto_fallback_split_bio_if_needed() local
218 bio_for_each_segment(bv, bio, iter) { in blk_crypto_fallback_split_bio_if_needed()
219 num_sectors += bv.bv_len >> SECTOR_SHIFT; in blk_crypto_fallback_split_bio_if_needed()
388 struct bio_vec bv; in blk_crypto_fallback_decrypt_bio() local
417 __bio_for_each_segment(bv, bio, iter, f_ctx->crypt_iter) { in blk_crypto_fallback_decrypt_bio()
418 struct page *page = bv.bv_page; in blk_crypto_fallback_decrypt_bio()
420 sg_set_page(&sg, page, data_unit_size, bv.bv_offset); in blk_crypto_fallback_decrypt_bio()
[all …]
Dbio-integrity.c134 struct bio_vec *bv = &bip->bip_vec[bip->bip_vcnt - 1]; in bio_integrity_add_page() local
137 if (bvec_try_merge_hw_page(q, bv, page, len, offset, in bio_integrity_add_page()
151 if (bvec_gap_to_prev(&q->limits, bv, offset)) in bio_integrity_add_page()
175 struct bio_vec bv; in bio_integrity_process() local
185 __bio_for_each_segment(bv, bio, bviter, *proc_iter) { in bio_integrity_process()
186 void *kaddr = bvec_kmap_local(&bv); in bio_integrity_process()
189 iter.data_size = bv.bv_len; in bio_integrity_process()
Dblk-map.c540 struct bio_vec bv; in blk_rq_append_bio() local
543 bio_for_each_bvec(bv, bio, iter) in blk_rq_append_bio()
589 struct bio_vec *bv = &bvecs[i]; in blk_rq_map_user_bvec() local
595 if (bvprvp && bvec_gap_to_prev(lim, bvprvp, bv->bv_offset)) { in blk_rq_map_user_bvec()
600 if (nsegs >= nr_segs || bytes > UINT_MAX - bv->bv_len) in blk_rq_map_user_bvec()
602 if (bytes + bv->bv_len > nr_iter) in blk_rq_map_user_bvec()
604 if (bv->bv_offset + bv->bv_len > PAGE_SIZE) in blk_rq_map_user_bvec()
608 bytes += bv->bv_len; in blk_rq_map_user_bvec()
609 bvprvp = bv; in blk_rq_map_user_bvec()
Dblk-crypto.c218 struct bio_vec bv; in bio_crypt_check_alignment() local
220 bio_for_each_segment(bv, bio, iter) { in bio_crypt_check_alignment()
221 if (!IS_ALIGNED(bv.bv_len | bv.bv_offset, data_unit_size)) in bio_crypt_check_alignment()
Dbounce.c144 struct bio_vec bv; in bounce_clone_bio() local
181 bio_for_each_segment(bv, bio_src, iter) in bounce_clone_bio()
182 bio->bi_io_vec[bio->bi_vcnt++] = bv; in bounce_clone_bio()
/linux-6.6.21/drivers/block/
Dn64cart.c62 static bool n64cart_do_bvec(struct device *dev, struct bio_vec *bv, u32 pos) in n64cart_do_bvec() argument
68 WARN_ON_ONCE((bv->bv_offset & (MIN_ALIGNMENT - 1)) || in n64cart_do_bvec()
69 (bv->bv_len & (MIN_ALIGNMENT - 1))); in n64cart_do_bvec()
71 dma_addr = dma_map_bvec(dev, bv, DMA_FROM_DEVICE, 0); in n64cart_do_bvec()
79 n64cart_write_reg(PI_WRITE_REG, bv->bv_len - 1); in n64cart_do_bvec()
83 dma_unmap_page(dev, dma_addr, bv->bv_len, DMA_FROM_DEVICE); in n64cart_do_bvec()
/linux-6.6.21/drivers/s390/block/
Ddasd_fba.c440 struct bio_vec bv; in dasd_fba_build_cp_regular() local
461 rq_for_each_segment(bv, req, iter) { in dasd_fba_build_cp_regular()
462 if (bv.bv_len & (blksize - 1)) in dasd_fba_build_cp_regular()
465 count += bv.bv_len >> (block->s2b_shift + 9); in dasd_fba_build_cp_regular()
466 if (idal_is_needed (page_address(bv.bv_page), bv.bv_len)) in dasd_fba_build_cp_regular()
467 cidaw += bv.bv_len / blksize; in dasd_fba_build_cp_regular()
503 rq_for_each_segment(bv, req, iter) { in dasd_fba_build_cp_regular()
504 dst = bvec_virt(&bv); in dasd_fba_build_cp_regular()
509 memcpy(copy + bv.bv_offset, dst, bv.bv_len); in dasd_fba_build_cp_regular()
511 dst = copy + bv.bv_offset; in dasd_fba_build_cp_regular()
[all …]
/linux-6.6.21/net/ceph/
Dmessenger_v2.c146 struct bio_vec bv; in do_try_sendpage() local
154 bvec_set_page(&bv, it->bvec->bv_page, in do_try_sendpage()
168 if (sendpage_ok(bv.bv_page)) in do_try_sendpage()
173 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bv, 1, bv.bv_len); in do_try_sendpage()
232 static void set_in_bvec(struct ceph_connection *con, const struct bio_vec *bv) in set_in_bvec() argument
236 con->v2.in_bvec = *bv; in set_in_bvec()
237 iov_iter_bvec(&con->v2.in_iter, ITER_DEST, &con->v2.in_bvec, 1, bv->bv_len); in set_in_bvec()
273 static void set_out_bvec(struct ceph_connection *con, const struct bio_vec *bv, in set_out_bvec() argument
279 con->v2.out_bvec = *bv; in set_out_bvec()
864 struct bio_vec *bv) in get_bvec_at() argument
[all …]
/linux-6.6.21/drivers/md/
Ddm-io-rewind.c12 static inline bool dm_bvec_iter_rewind(const struct bio_vec *bv, in dm_bvec_iter_rewind() argument
27 while (idx >= 0 && bytes && bytes > bv[idx].bv_len) { in dm_bvec_iter_rewind()
28 bytes -= bv[idx].bv_len; in dm_bvec_iter_rewind()
41 iter->bi_bvec_done = bv[idx].bv_len - bytes; in dm_bvec_iter_rewind()
Ddm-ebs-target.c65 static int __ebs_rw_bvec(struct ebs_c *ec, enum req_op op, struct bio_vec *bv, in __ebs_rw_bvec() argument
71 unsigned int bv_len = bv->bv_len; in __ebs_rw_bvec()
76 if (unlikely(!bv->bv_page || !bv_len)) in __ebs_rw_bvec()
79 pa = bvec_virt(bv); in __ebs_rw_bvec()
102 flush_dcache_page(bv->bv_page); in __ebs_rw_bvec()
104 flush_dcache_page(bv->bv_page); in __ebs_rw_bvec()
125 struct bio_vec bv; in __ebs_rw_bio() local
128 bio_for_each_bvec(bv, bio, iter) { in __ebs_rw_bio()
129 rr = __ebs_rw_bvec(ec, op, &bv, &iter); in __ebs_rw_bio()
/linux-6.6.21/fs/btrfs/
Dbio.c194 struct bio_vec *bv = bio_first_bvec_all(&repair_bbio->bio); in btrfs_end_repair_bio() local
198 !btrfs_data_csum_ok(repair_bbio, dev, 0, bv)) { in btrfs_end_repair_bio()
218 bv->bv_page, bv->bv_offset, mirror); in btrfs_end_repair_bio()
235 struct bio_vec *bv, in repair_one_sector() argument
269 __bio_add_page(repair_bio, bv->bv_page, bv->bv_len, bv->bv_offset); in repair_one_sector()
308 struct bio_vec bv = bio_iter_iovec(&bbio->bio, *iter); in btrfs_check_read_bio() local
310 bv.bv_len = min(bv.bv_len, sectorsize); in btrfs_check_read_bio()
311 if (status || !btrfs_data_csum_ok(bbio, dev, offset, &bv)) in btrfs_check_read_bio()
312 fbio = repair_one_sector(bbio, offset, &bv, fbio); in btrfs_check_read_bio()
/linux-6.6.21/arch/parisc/boot/compressed/
Dhead.S71 bv,n 0(%r3)
84 bv,n 0(%ret0)

12345