Searched refs:queue_max_segments (Results 1 – 12 of 12) sorted by relevance
138 bufsize = min_t(size_t, bufsize, queue_max_segments(q) << PAGE_SHIFT); in nvme_zns_alloc_report_buffer()
125 return queue_var_show(queue_max_segments(q), page); in queue_max_segments_show()577 QUEUE_RO_ENTRY(queue_max_segments, "max_segments");
565 if (nr_segs > queue_max_segments(q)) in blk_rq_map_user_bvec()
583 return queue_max_segments(rq->q); in blk_rq_get_max_segments()
975 if (bio->bi_vcnt >= queue_max_segments(q)) in bio_add_hw_page()
3007 if (rq->nr_phys_segments > queue_max_segments(q)) { in blk_insert_cloned_request()3009 __func__, rq->nr_phys_segments, queue_max_segments(q)); in blk_insert_cloned_request()
1141 static inline unsigned short queue_max_segments(const struct request_queue *q) in queue_max_segments() function1172 return queue_max_segments(bdev_get_queue(bdev)); in bdev_max_segments()
216 bufsize = min_t(size_t, bufsize, queue_max_segments(q) << PAGE_SHIFT); in sd_zbc_alloc_report_buffer()
348 sdp->sg_tablesize = queue_max_segments(q); in sg_open()1474 sdp->sg_tablesize = queue_max_segments(q); in sg_alloc()
4263 i = queue_max_segments(SDp->request_queue); in st_probe()
913 <= queue_max_segments(q)) { in pkt_set_segment_merging()920 <= queue_max_segments(q)) { in pkt_set_segment_merging()
1074 blk_queue_max_segments(q, queue_max_segments(q) - 1); in ata_scsi_dev_config()