Home
last modified time | relevance | path

Searched refs:nr_segs (Results 1 – 25 of 47) sorted by relevance

12

/linux-6.6.21/lib/
Diov_iter.c132 i->nr_segs -= iov - iter_iov(i); \
140 i->nr_segs -= bvec - i->bvec; \
148 i->nr_segs -= kvec - i->kvec; \
285 const struct iovec *iov, unsigned long nr_segs, in iov_iter_init() argument
296 .nr_segs = nr_segs, in iov_iter_init()
613 for (bvec = i->bvec, end = bvec + i->nr_segs; bvec < end; bvec++) { in iov_iter_bvec_advance()
619 i->nr_segs -= bvec - i->bvec; in iov_iter_bvec_advance()
632 for (iov = iter_iov(i), end = iov + i->nr_segs; iov < end; iov++) { in iov_iter_iovec_advance()
638 i->nr_segs -= iov - iter_iov(i); in iov_iter_iovec_advance()
683 i->nr_segs++; in iov_iter_revert()
[all …]
Dkunit_iov_iter.c126 KUNIT_EXPECT_EQ(test, iter.nr_segs, 0); in iov_kunit_copy_to_kvec()
176 KUNIT_EXPECT_EQ(test, iter.nr_segs, 0); in iov_kunit_copy_from_kvec()
285 KUNIT_EXPECT_EQ(test, iter.nr_segs, 0); in iov_kunit_copy_to_bvec()
339 KUNIT_EXPECT_EQ(test, iter.nr_segs, 0); in iov_kunit_copy_from_bvec()
/linux-6.6.21/include/linux/
Duio.h38 unsigned long nr_segs; member
81 unsigned long nr_segs; member
106 state->nr_segs = iter->nr_segs; in iov_iter_save_state()
156 static inline size_t iov_length(const struct iovec *iov, unsigned long nr_segs) in iov_length() argument
161 for (seg = 0; seg < nr_segs; seg++) in iov_length()
279 unsigned long nr_segs, size_t count);
281 unsigned long nr_segs, size_t count);
283 unsigned long nr_segs, size_t count);
367 unsigned long nr_segs, unsigned long fast_segs,
370 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
[all …]
Dbio.h17 static inline unsigned int bio_max_segs(unsigned int nr_segs) in bio_max_segs() argument
19 return min(nr_segs, BIO_MAX_VECS); in bio_max_segs()
Dblk-mq.h956 unsigned int nr_segs) in blk_rq_bio_prep() argument
958 rq->nr_phys_segments = nr_segs; in blk_rq_bio_prep()
/linux-6.6.21/block/
Dblk-merge.c356 unsigned int *nr_segs) in __bio_split_to_limits() argument
364 split = bio_split_discard(bio, lim, nr_segs, bs); in __bio_split_to_limits()
367 split = bio_split_write_zeroes(bio, lim, nr_segs, bs); in __bio_split_to_limits()
370 split = bio_split_rw(bio, lim, nr_segs, bs, in __bio_split_to_limits()
404 unsigned int nr_segs; in bio_split_to_limits() local
407 return __bio_split_to_limits(bio, lim, &nr_segs); in bio_split_to_limits()
636 int ll_back_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs) in ll_back_merge_fn() argument
651 return ll_new_hw_segment(req, bio, nr_segs); in ll_back_merge_fn()
655 unsigned int nr_segs) in ll_front_merge_fn() argument
670 return ll_new_hw_segment(req, bio, nr_segs); in ll_front_merge_fn()
[all …]
Dblk-map.c26 if (data->nr_segs > UIO_MAXIOV) in bio_alloc_map_data()
29 bmd = kmalloc(struct_size(bmd, iov, data->nr_segs), gfp_mask); in bio_alloc_map_data()
34 memcpy(bmd->iov, iter_iov(data), sizeof(struct iovec) * data->nr_segs); in bio_alloc_map_data()
541 unsigned int nr_segs = 0; in blk_rq_append_bio() local
544 nr_segs++; in blk_rq_append_bio()
547 blk_rq_bio_prep(rq, bio, nr_segs); in blk_rq_append_bio()
549 if (!ll_back_merge_fn(rq, bio, nr_segs)) in blk_rq_append_bio()
566 size_t nr_segs = iter->nr_segs; in blk_rq_map_user_bvec() local
575 if (nr_segs > queue_max_segments(q)) in blk_rq_map_user_bvec()
584 blk_rq_bio_prep(rq, bio, nr_segs); in blk_rq_map_user_bvec()
[all …]
Dblk-mq-sched.h11 unsigned int nr_segs, struct request **merged_request);
13 unsigned int nr_segs);
Dblk.h259 unsigned int nr_segs);
261 struct bio *bio, unsigned int nr_segs);
322 unsigned int *nr_segs);
324 unsigned int nr_segs);
Dblk-mq-sched.c340 unsigned int nr_segs) in blk_mq_sched_bio_merge() argument
349 ret = e->type->ops.bio_merge(q, bio, nr_segs); in blk_mq_sched_bio_merge()
367 if (blk_bio_list_merge(q, &ctx->rq_lists[type], bio, nr_segs)) in blk_mq_sched_bio_merge()
Dblk-crypto-fallback.c162 unsigned int nr_segs = bio_segments(bio_src); in blk_crypto_fallback_clone_bio() local
167 bio = bio_kmalloc(nr_segs, GFP_NOIO); in blk_crypto_fallback_clone_bio()
170 bio_init(bio, bio_src->bi_bdev, bio->bi_inline_vecs, nr_segs, in blk_crypto_fallback_clone_bio()
Dblk-mq.c2591 unsigned int nr_segs) in blk_mq_bio_to_request() argument
2599 blk_rq_bio_prep(rq, bio, nr_segs); in blk_mq_bio_to_request()
2888 struct bio *bio, unsigned int nr_segs) in blk_mq_attempt_bio_merge() argument
2891 if (blk_attempt_plug_merge(q, bio, nr_segs)) in blk_mq_attempt_bio_merge()
2893 if (blk_mq_sched_bio_merge(q, bio, nr_segs)) in blk_mq_attempt_bio_merge()
2988 unsigned int nr_segs = 1; in blk_mq_submit_bio() local
3001 bio = __bio_split_to_limits(bio, &q->limits, &nr_segs); in blk_mq_submit_bio()
3007 if (blk_mq_attempt_bio_merge(q, bio, nr_segs)) in blk_mq_submit_bio()
3016 bio = __bio_split_to_limits(bio, &q->limits, &nr_segs); in blk_mq_submit_bio()
3024 rq = blk_mq_get_new_requests(q, plug, bio, nr_segs); in blk_mq_submit_bio()
[all …]
/linux-6.6.21/fs/smb/server/
Dtransport_tcp.c117 unsigned int nr_segs, size_t bytes) in kvec_array_init() argument
128 nr_segs--; in kvec_array_init()
133 memcpy(new, iov, sizeof(*iov) * nr_segs); in kvec_array_init()
136 return nr_segs; in kvec_array_init()
146 static struct kvec *get_conn_iovec(struct tcp_transport *t, unsigned int nr_segs) in get_conn_iovec() argument
150 if (t->iov && nr_segs <= t->nr_iov) in get_conn_iovec()
154 new_iov = kmalloc_array(nr_segs, sizeof(*new_iov), GFP_KERNEL); in get_conn_iovec()
158 t->nr_iov = nr_segs; in get_conn_iovec()
305 unsigned int nr_segs, unsigned int to_read, in ksmbd_tcp_readv() argument
315 iov = get_conn_iovec(t, nr_segs); in ksmbd_tcp_readv()
[all …]
/linux-6.6.21/arch/powerpc/mm/
Ddma-noncoherent.c65 int nr_segs = 1 + ((size - seg_size) + PAGE_SIZE - 1)/PAGE_SIZE; in __dma_sync_page_highmem() local
84 } while (seg_nr < nr_segs); in __dma_sync_page_highmem()
/linux-6.6.21/drivers/scsi/
Dxen-scsifront.c1132 unsigned int sg_grant, nr_segs; in scsifront_read_backend_params() local
1136 nr_segs = min_t(unsigned int, sg_grant, SG_ALL); in scsifront_read_backend_params()
1137 nr_segs = max_t(unsigned int, nr_segs, VSCSIIF_SG_TABLESIZE); in scsifront_read_backend_params()
1138 nr_segs = min_t(unsigned int, nr_segs, in scsifront_read_backend_params()
1143 dev_info(&dev->dev, "using up to %d SG entries\n", nr_segs); in scsifront_read_backend_params()
1144 else if (info->pause && nr_segs < host->sg_tablesize) in scsifront_read_backend_params()
1147 host->sg_tablesize, nr_segs); in scsifront_read_backend_params()
1149 host->sg_tablesize = nr_segs; in scsifront_read_backend_params()
1150 host->max_sectors = (nr_segs - 1) * PAGE_SIZE / 512; in scsifront_read_backend_params()
/linux-6.6.21/drivers/hwtracing/intel_th/
Dmsu.c73 unsigned int nr_segs; member
330 for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) { in msc_win_total_sz()
425 for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) { in msc_win_oldest_sg()
662 for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) { in msc_buffer_clear_hw_header()
992 unsigned int nr_segs) in __msc_buffer_win_alloc() argument
998 ret = sg_alloc_table(win->sgt, nr_segs, GFP_KERNEL); in __msc_buffer_win_alloc()
1002 for_each_sg(win->sgt->sgl, sg_ptr, nr_segs, i) { in __msc_buffer_win_alloc()
1012 return nr_segs; in __msc_buffer_win_alloc()
1037 for_each_sg(win->sgt->sgl, sg_ptr, win->nr_segs, i) { in msc_buffer_set_uc()
1057 for_each_sg(win->sgt->sgl, sg_ptr, win->nr_segs, i) { in msc_buffer_set_wb()
[all …]
/linux-6.6.21/drivers/md/bcache/
Ddebug.c110 unsigned int nr_segs = bio_segments(bio); in bch_data_verify() local
115 check = bio_kmalloc(nr_segs, GFP_NOIO); in bch_data_verify()
118 bio_init(check, bio->bi_bdev, check->bi_inline_vecs, nr_segs, in bch_data_verify()
/linux-6.6.21/fs/fuse/
Ddev.c651 unsigned long nr_segs; member
707 BUG_ON(!cs->nr_segs); in fuse_copy_fill()
713 cs->nr_segs--; in fuse_copy_fill()
715 if (cs->nr_segs >= cs->pipe->max_usage) in fuse_copy_fill()
731 cs->nr_segs++; in fuse_copy_fill()
807 BUG_ON(!cs->nr_segs); in fuse_try_move_page()
811 cs->nr_segs--; in fuse_try_move_page()
900 if (cs->nr_segs >= cs->pipe->max_usage) in fuse_ref_page()
918 cs->nr_segs++; in fuse_ref_page()
1395 if (pipe_occupancy(pipe->head, pipe->tail) + cs.nr_segs > pipe->max_usage) { in fuse_dev_splice_read()
[all …]
/linux-6.6.21/drivers/nvme/target/
Dio-cmd-file.c77 unsigned long nr_segs, size_t count, int ki_flags) in nvmet_file_submit_bvec() argument
94 iov_iter_bvec(&iter, rw, req->f.bvec, nr_segs, count); in nvmet_file_submit_bvec()
/linux-6.6.21/drivers/block/xen-blkback/
Dblkback.c710 invcount = xen_blkbk_unmap_prepare(ring, pages, req->nr_segs, in xen_blkbk_unmap_and_respond()
907 pending_req->nr_segs, in xen_blkbk_map_seg()
923 nseg = pending_req->nr_segs; in xen_blkbk_parse_indirect()
1361 pending_req->nr_segs = nseg; in dispatch_rw_block_io()
1475 pending_req->nr_segs); in dispatch_rw_block_io()
Dcommon.h347 int nr_segs; member
/linux-6.6.21/drivers/usb/host/
Dxen-hcd.c719 int nr_segs = 0; in xenhcd_gnttab_done() local
728 nr_segs = shadow->req.nr_buffer_segs; in xenhcd_gnttab_done()
731 nr_segs += shadow->req.u.isoc.nr_frame_desc_segs; in xenhcd_gnttab_done()
733 for (i = 0; i < nr_segs; i++) { in xenhcd_gnttab_done()
/linux-6.6.21/fs/smb/client/
Dcifsencrypt.c39 for (i = 0; i < iter->nr_segs; i++) { in cifs_shash_bvec()
77 for (i = 0; i < iter->nr_segs; i++) { in cifs_shash_kvec()
/linux-6.6.21/fs/btrfs/
Dbio.c83 unsigned int nr_segs; in btrfs_split_bio() local
85 bio = bio_split_rw(&orig_bbio->bio, &fs_info->limits, &nr_segs, in btrfs_split_bio()
/linux-6.6.21/sound/core/
Dpcm_native.c3532 if (to->nr_segs > 1024 || to->nr_segs != runtime->channels) in snd_pcm_readv()
3537 bufs = kmalloc_array(to->nr_segs, sizeof(void *), GFP_KERNEL); in snd_pcm_readv()
3540 for (i = 0; i < to->nr_segs; ++i) { in snd_pcm_readv()
3572 if (from->nr_segs > 128 || from->nr_segs != runtime->channels || in snd_pcm_writev()
3576 bufs = kmalloc_array(from->nr_segs, sizeof(void *), GFP_KERNEL); in snd_pcm_writev()
3579 for (i = 0; i < from->nr_segs; ++i) { in snd_pcm_writev()

12