/linux-6.1.9/lib/ |
D | iov_iter.c | 134 i->nr_segs -= iov - i->iov; \ 142 i->nr_segs -= bvec - i->bvec; \ 150 i->nr_segs -= kvec - i->kvec; \ 425 const struct iovec *iov, unsigned long nr_segs, in iov_iter_init() argument 435 .nr_segs = nr_segs, in iov_iter_init() 865 for (bvec = i->bvec, end = bvec + i->nr_segs; bvec < end; bvec++) { in iov_iter_bvec_advance() 871 i->nr_segs -= bvec - i->bvec; in iov_iter_bvec_advance() 884 for (iov = i->iov, end = iov + i->nr_segs; iov < end; iov++) { in iov_iter_iovec_advance() 890 i->nr_segs -= iov - i->iov; in iov_iter_iovec_advance() 957 i->nr_segs++; in iov_iter_revert() [all …]
|
/linux-6.1.9/include/linux/ |
D | uio.h | 35 unsigned long nr_segs; member 57 unsigned long nr_segs; member 76 state->nr_segs = iter->nr_segs; in iov_iter_save_state() 131 static inline size_t iov_length(const struct iovec *iov, unsigned long nr_segs) in iov_length() argument 136 for (seg = 0; seg < nr_segs; seg++) in iov_length() 240 unsigned long nr_segs, size_t count); 242 unsigned long nr_segs, size_t count); 244 unsigned long nr_segs, size_t count); 330 unsigned long nr_segs, unsigned long fast_segs, 333 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp, [all …]
|
D | blk-mq.h | 955 unsigned int nr_segs) in blk_rq_bio_prep() argument 957 rq->nr_phys_segments = nr_segs; in blk_rq_bio_prep()
|
D | bio.h | 15 static inline unsigned int bio_max_segs(unsigned int nr_segs) in bio_max_segs() argument 17 return min(nr_segs, BIO_MAX_VECS); in bio_max_segs()
|
/linux-6.1.9/block/ |
D | blk-merge.c | 345 unsigned int *nr_segs) in __bio_split_to_limits() argument 353 split = bio_split_discard(bio, lim, nr_segs, bs); in __bio_split_to_limits() 356 split = bio_split_write_zeroes(bio, lim, nr_segs, bs); in __bio_split_to_limits() 359 split = bio_split_rw(bio, lim, nr_segs, bs, in __bio_split_to_limits() 393 unsigned int nr_segs; in bio_split_to_limits() local 396 return __bio_split_to_limits(bio, lim, &nr_segs); in bio_split_to_limits() 632 int ll_back_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs) in ll_back_merge_fn() argument 647 return ll_new_hw_segment(req, bio, nr_segs); in ll_back_merge_fn() 651 unsigned int nr_segs) in ll_front_merge_fn() argument 666 return ll_new_hw_segment(req, bio, nr_segs); in ll_front_merge_fn() [all …]
|
D | blk-map.c | 26 if (data->nr_segs > UIO_MAXIOV) in bio_alloc_map_data() 29 bmd = kmalloc(struct_size(bmd, iov, data->nr_segs), gfp_mask); in bio_alloc_map_data() 32 memcpy(bmd->iov, data->iov, sizeof(struct iovec) * data->nr_segs); in bio_alloc_map_data() 531 unsigned int nr_segs = 0; in blk_rq_append_bio() local 534 nr_segs++; in blk_rq_append_bio() 537 blk_rq_bio_prep(rq, bio, nr_segs); in blk_rq_append_bio() 539 if (!ll_back_merge_fn(rq, bio, nr_segs)) in blk_rq_append_bio() 556 size_t nr_segs = iter->nr_segs; in blk_rq_map_user_bvec() local 565 if (nr_segs > queue_max_segments(q)) in blk_rq_map_user_bvec() 574 blk_rq_bio_prep(rq, bio, nr_segs); in blk_rq_map_user_bvec() [all …]
|
D | blk-mq-sched.h | 12 unsigned int nr_segs, struct request **merged_request); 14 unsigned int nr_segs);
|
D | blk.h | 263 unsigned int nr_segs); 265 struct bio *bio, unsigned int nr_segs); 324 unsigned int *nr_segs); 326 unsigned int nr_segs);
|
D | blk-mq-sched.c | 346 unsigned int nr_segs) in blk_mq_sched_bio_merge() argument 355 ret = e->type->ops.bio_merge(q, bio, nr_segs); in blk_mq_sched_bio_merge() 373 if (blk_bio_list_merge(q, &ctx->rq_lists[type], bio, nr_segs)) in blk_mq_sched_bio_merge()
|
D | blk-crypto-fallback.c | 162 unsigned int nr_segs = bio_segments(bio_src); in blk_crypto_fallback_clone_bio() local 167 bio = bio_kmalloc(nr_segs, GFP_NOIO); in blk_crypto_fallback_clone_bio() 170 bio_init(bio, bio_src->bi_bdev, bio->bi_inline_vecs, nr_segs, in blk_crypto_fallback_clone_bio()
|
D | blk-mq.c | 2530 unsigned int nr_segs) in blk_mq_bio_to_request() argument 2538 blk_rq_bio_prep(rq, bio, nr_segs); in blk_mq_bio_to_request() 2809 struct bio *bio, unsigned int nr_segs) in blk_mq_attempt_bio_merge() argument 2812 if (blk_attempt_plug_merge(q, bio, nr_segs)) in blk_mq_attempt_bio_merge() 2814 if (blk_mq_sched_bio_merge(q, bio, nr_segs)) in blk_mq_attempt_bio_merge() 2918 unsigned int nr_segs = 1; in blk_mq_submit_bio() local 2923 bio = __bio_split_to_limits(bio, &q->limits, &nr_segs); in blk_mq_submit_bio() 2933 rq = blk_mq_get_cached_request(q, plug, &bio, nr_segs); in blk_mq_submit_bio() 2937 rq = blk_mq_get_new_requests(q, plug, bio, nr_segs); in blk_mq_submit_bio() 2946 blk_mq_bio_to_request(rq, bio, nr_segs); in blk_mq_submit_bio()
|
D | kyber-iosched.c | 570 unsigned int nr_segs) in kyber_bio_merge() argument 581 merged = blk_bio_list_merge(hctx->queue, rq_list, bio, nr_segs); in kyber_bio_merge()
|
/linux-6.1.9/fs/ksmbd/ |
D | transport_tcp.c | 117 unsigned int nr_segs, size_t bytes) in kvec_array_init() argument 128 nr_segs--; in kvec_array_init() 133 memcpy(new, iov, sizeof(*iov) * nr_segs); in kvec_array_init() 136 return nr_segs; in kvec_array_init() 146 static struct kvec *get_conn_iovec(struct tcp_transport *t, unsigned int nr_segs) in get_conn_iovec() argument 150 if (t->iov && nr_segs <= t->nr_iov) in get_conn_iovec() 154 new_iov = kmalloc_array(nr_segs, sizeof(*new_iov), GFP_KERNEL); in get_conn_iovec() 158 t->nr_iov = nr_segs; in get_conn_iovec() 303 unsigned int nr_segs, unsigned int to_read) in ksmbd_tcp_readv() argument 313 iov = get_conn_iovec(t, nr_segs); in ksmbd_tcp_readv() [all …]
|
/linux-6.1.9/arch/powerpc/mm/ |
D | dma-noncoherent.c | 65 int nr_segs = 1 + ((size - seg_size) + PAGE_SIZE - 1)/PAGE_SIZE; in __dma_sync_page_highmem() local 84 } while (seg_nr < nr_segs); in __dma_sync_page_highmem()
|
/linux-6.1.9/drivers/scsi/ |
D | xen-scsifront.c | 1134 unsigned int sg_grant, nr_segs; in scsifront_read_backend_params() local 1138 nr_segs = min_t(unsigned int, sg_grant, SG_ALL); in scsifront_read_backend_params() 1139 nr_segs = max_t(unsigned int, nr_segs, VSCSIIF_SG_TABLESIZE); in scsifront_read_backend_params() 1140 nr_segs = min_t(unsigned int, nr_segs, in scsifront_read_backend_params() 1145 dev_info(&dev->dev, "using up to %d SG entries\n", nr_segs); in scsifront_read_backend_params() 1146 else if (info->pause && nr_segs < host->sg_tablesize) in scsifront_read_backend_params() 1149 host->sg_tablesize, nr_segs); in scsifront_read_backend_params() 1151 host->sg_tablesize = nr_segs; in scsifront_read_backend_params() 1152 host->max_sectors = (nr_segs - 1) * PAGE_SIZE / 512; in scsifront_read_backend_params()
|
D | scsi_lib.c | 1025 unsigned short nr_segs = blk_rq_nr_phys_segments(rq); in scsi_alloc_sgtables() local 1031 if (WARN_ON_ONCE(!nr_segs)) in scsi_alloc_sgtables() 1039 nr_segs++; in scsi_alloc_sgtables() 1044 if (unlikely(sg_alloc_table_chained(&cmd->sdb.table, nr_segs, in scsi_alloc_sgtables()
|
/linux-6.1.9/drivers/hwtracing/intel_th/ |
D | msu.c | 73 unsigned int nr_segs; member 330 for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) { in msc_win_total_sz() 425 for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) { in msc_win_oldest_sg() 662 for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) { in msc_buffer_clear_hw_header() 992 unsigned int nr_segs) in __msc_buffer_win_alloc() argument 998 ret = sg_alloc_table(win->sgt, nr_segs, GFP_KERNEL); in __msc_buffer_win_alloc() 1002 for_each_sg(win->sgt->sgl, sg_ptr, nr_segs, i) { in __msc_buffer_win_alloc() 1012 return nr_segs; in __msc_buffer_win_alloc() 1037 for_each_sg(win->sgt->sgl, sg_ptr, win->nr_segs, i) { in msc_buffer_set_uc() 1057 for_each_sg(win->sgt->sgl, sg_ptr, win->nr_segs, i) { in msc_buffer_set_wb() [all …]
|
/linux-6.1.9/drivers/md/bcache/ |
D | debug.c | 110 unsigned int nr_segs = bio_segments(bio); in bch_data_verify() local 115 check = bio_kmalloc(nr_segs, GFP_NOIO); in bch_data_verify() 118 bio_init(check, bio->bi_bdev, check->bi_inline_vecs, nr_segs, in bch_data_verify()
|
/linux-6.1.9/fs/fuse/ |
D | dev.c | 649 unsigned long nr_segs; member 705 BUG_ON(!cs->nr_segs); in fuse_copy_fill() 711 cs->nr_segs--; in fuse_copy_fill() 713 if (cs->nr_segs >= cs->pipe->max_usage) in fuse_copy_fill() 729 cs->nr_segs++; in fuse_copy_fill() 805 BUG_ON(!cs->nr_segs); in fuse_try_move_page() 809 cs->nr_segs--; in fuse_try_move_page() 897 if (cs->nr_segs >= cs->pipe->max_usage) in fuse_ref_page() 915 cs->nr_segs++; in fuse_ref_page() 1392 if (pipe_occupancy(pipe->head, pipe->tail) + cs.nr_segs > pipe->max_usage) { in fuse_dev_splice_read() [all …]
|
/linux-6.1.9/drivers/block/xen-blkback/ |
D | blkback.c | 710 invcount = xen_blkbk_unmap_prepare(ring, pages, req->nr_segs, in xen_blkbk_unmap_and_respond() 907 pending_req->nr_segs, in xen_blkbk_map_seg() 923 nseg = pending_req->nr_segs; in xen_blkbk_parse_indirect() 1257 pending_req->nr_segs = nseg; in dispatch_rw_block_io() 1371 pending_req->nr_segs); in dispatch_rw_block_io()
|
D | common.h | 347 int nr_segs; member
|
/linux-6.1.9/drivers/nvme/target/ |
D | io-cmd-file.c | 84 unsigned long nr_segs, size_t count, int ki_flags) in nvmet_file_submit_bvec() argument 101 iov_iter_bvec(&iter, rw, req->f.bvec, nr_segs, count); in nvmet_file_submit_bvec()
|
/linux-6.1.9/drivers/usb/host/ |
D | xen-hcd.c | 719 int nr_segs = 0; in xenhcd_gnttab_done() local 728 nr_segs = shadow->req.nr_buffer_segs; in xenhcd_gnttab_done() 731 nr_segs += shadow->req.u.isoc.nr_frame_desc_segs; in xenhcd_gnttab_done() 733 for (i = 0; i < nr_segs; i++) { in xenhcd_gnttab_done()
|
/linux-6.1.9/mm/ |
D | swap.c | 173 int get_kernel_pages(const struct kvec *kiov, int nr_segs, int write, in get_kernel_pages() argument 178 for (seg = 0; seg < nr_segs; seg++) { in get_kernel_pages()
|
/linux-6.1.9/sound/core/ |
D | pcm_native.c | 3529 if (to->nr_segs > 1024 || to->nr_segs != runtime->channels) in snd_pcm_readv() 3534 bufs = kmalloc_array(to->nr_segs, sizeof(void *), GFP_KERNEL); in snd_pcm_readv() 3537 for (i = 0; i < to->nr_segs; ++i) in snd_pcm_readv() 3566 if (from->nr_segs > 128 || from->nr_segs != runtime->channels || in snd_pcm_writev() 3570 bufs = kmalloc_array(from->nr_segs, sizeof(void *), GFP_KERNEL); in snd_pcm_writev() 3573 for (i = 0; i < from->nr_segs; ++i) in snd_pcm_writev()
|