Searched refs:nr_requests (Results 1 – 14 of 14) sorted by relevance
/linux-6.1.9/drivers/s390/block/ |
D | scm_blk.c | 28 static unsigned int nr_requests = 64; variable 31 module_param(nr_requests, uint, S_IRUGO); 32 MODULE_PARM_DESC(nr_requests, "Number of parallel requests."); 455 bdev->tag_set.nr_hw_queues = nr_requests; in scm_blk_dev_setup() 456 bdev->tag_set.queue_depth = nr_requests_per_io * nr_requests; in scm_blk_dev_setup() 551 ret = scm_alloc_rqs(nr_requests); in scm_blk_init()
|
/linux-6.1.9/block/ |
D | blk-mq-sched.c | 103 max_dispatch = hctx->queue->nr_requests; in __blk_mq_do_dispatch_sched() 508 q->nr_requests); in blk_mq_sched_alloc_map_and_rqs() 569 q->nr_requests = q->tag_set->queue_depth; in blk_mq_init_sched() 578 q->nr_requests = 2 * min_t(unsigned int, q->tag_set->queue_depth, in blk_mq_init_sched()
|
D | blk-iolatency.c | 333 unsigned long qd = blkiolat->rqos.q->nr_requests; in scale_cookie_change() 373 unsigned long qd = iolat->blkiolat->rqos.q->nr_requests; in scale_change() 997 iolat->rq_depth.queue_depth = blkg->q->nr_requests; in iolatency_pd_init()
|
D | blk-mq-tag.c | 656 q->nr_requests - q->tag_set->reserved_tags); in blk_mq_tag_update_sched_shared_tags()
|
D | blk-core.c | 429 q->nr_requests = BLKDEV_DEFAULT_RQ; in blk_alloc_queue()
|
D | blk-sysfs.c | 65 return queue_var_show(q->nr_requests, page); in queue_requests_show()
|
D | mq-deadline.c | 626 dd->async_depth = max(1UL, 3 * q->nr_requests / 4); in dd_depth_updated()
|
D | blk-mq.c | 4224 q->nr_requests = set->queue_depth; in blk_mq_init_allocated_queue() 4527 if (q->nr_requests == nr) in blk_mq_update_nr_requests() 4554 q->nr_requests = nr; in blk_mq_update_nr_requests()
|
D | bfq-iosched.c | 683 unsigned limit = data->q->nr_requests; in bfq_limit_depth()
|
/linux-6.1.9/drivers/dma/ |
D | stm32-mdma.c | 256 u32 nr_requests; member 1549 if (config.request >= dmadev->nr_requests) { in stm32_mdma_of_xlate() 1585 u32 nr_channels, nr_requests; in stm32_mdma_probe() local 1601 &nr_requests); in stm32_mdma_probe() 1603 nr_requests = STM32_MDMA_MAX_REQUESTS; in stm32_mdma_probe() 1605 nr_requests); in stm32_mdma_probe() 1619 dmadev->nr_requests = nr_requests; in stm32_mdma_probe()
|
D | owl-dma.c | 1094 int ret, i, nr_channels, nr_requests; in owl_dma_probe() local 1110 ret = of_property_read_u32(np, "dma-requests", &nr_requests); in owl_dma_probe() 1117 nr_channels, nr_requests); in owl_dma_probe() 1122 od->nr_vchans = nr_requests; in owl_dma_probe()
|
/linux-6.1.9/include/linux/ |
D | blkdev.h | 463 unsigned long nr_requests; /* Max # of requests */ member 742 return q->nr_requests; in blk_queue_depth()
|
/linux-6.1.9/Documentation/ABI/stable/ |
D | sysfs-block | 49 This is related to /sys/block/<disk>/queue/nr_requests 481 What: /sys/block/<disk>/queue/nr_requests 495 pools, each independently regulated by nr_requests.
|
/linux-6.1.9/drivers/target/ |
D | target_core_iblock.c | 128 dev->dev_attrib.hw_queue_depth = q->nr_requests; in iblock_configure_device()
|