/linux-6.1.9/include/linux/ |
D | blkdev.h | 424 unsigned long queue_flags; member 592 #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) 593 #define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags) 594 #define blk_queue_has_srcu(q) test_bit(QUEUE_FLAG_HAS_SRCU, &(q)->queue_flags) 595 #define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags) 596 #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) 598 test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) 599 #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) 601 test_bit(QUEUE_FLAG_STABLE_WRITES, &(q)->queue_flags) 602 #define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) [all …]
|
D | blk-mq.h | 908 test_bit(QUEUE_FLAG_FAIL_IO, &q->queue_flags)) in blk_should_fake_timeout()
|
/linux-6.1.9/block/ |
D | blk-sysfs.c | 287 bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \ 368 bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags); in queue_rq_affinity_show() 369 bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags); in queue_rq_affinity_show() 435 return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page); in queue_poll_show() 441 if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) in queue_poll_store() 522 if (test_bit(QUEUE_FLAG_WC, &q->queue_flags)) in queue_wc_show() 552 return sprintf(page, "%u\n", test_bit(QUEUE_FLAG_FUA, &q->queue_flags)); in queue_fua_show()
|
D | blk-core.c | 82 set_bit(flag, &q->queue_flags); in blk_queue_flag_set() 93 clear_bit(flag, &q->queue_flags); in blk_queue_flag_clear() 107 return test_and_set_bit(flag, &q->queue_flags); in blk_queue_flag_test_and_set() 733 !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) { in submit_bio_noacct() 741 if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) in submit_bio_noacct() 849 !test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) in bio_poll()
|
D | blk-mq-tag.c | 47 if (test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags)) in __blk_mq_tag_busy() 49 set_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags); in __blk_mq_tag_busy() 84 &q->queue_flags)) in __blk_mq_tag_idle()
|
D | blk-timeout.c | 43 int set = test_bit(QUEUE_FLAG_FAIL_IO, &disk->queue->queue_flags); in part_timeout_show()
|
D | blk-flush.c | 396 unsigned long fflags = q->queue_flags; /* may change, cache */ in blk_insert_flush()
|
D | blk-mq.h | 358 if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags)) in hctx_may_queue()
|
D | blk-settings.c | 836 wbt_set_write_cache(q, test_bit(QUEUE_FLAG_WC, &q->queue_flags)); in blk_queue_write_cache()
|
D | blk-wbt.c | 844 rwb->wc = test_bit(QUEUE_FLAG_WC, &q->queue_flags); in wbt_init()
|
D | blk-mq-debugfs.c | 140 blk_flags_show(m, q->queue_flags, blk_queue_flag_name, in queue_state_show()
|
D | blk-mq.c | 1123 !test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) in blk_mq_complete_need_ipi() 1136 (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags) && in blk_mq_complete_need_ipi() 1222 if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) { in blk_mq_start_request() 4217 q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT; in blk_mq_init_allocated_queue()
|
/linux-6.1.9/drivers/nvme/host/ |
D | ioctl.c | 712 if (test_bit(QUEUE_FLAG_POLL, &q->queue_flags) && bio && bio->bi_bdev) in nvme_ns_chr_uring_cmd_iopoll() 815 if (test_bit(QUEUE_FLAG_POLL, &q->queue_flags) && bio in nvme_ns_head_chr_uring_cmd_iopoll()
|
/linux-6.1.9/kernel/sched/ |
D | core.c | 7473 int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK; in __sched_setscheduler() local 7630 queue_flags &= ~DEQUEUE_MOVE; in __sched_setscheduler() 7636 dequeue_task(rq, p, queue_flags); in __sched_setscheduler() 7654 queue_flags |= ENQUEUE_HEAD; in __sched_setscheduler() 7656 enqueue_task(rq, p, queue_flags); in __sched_setscheduler() 10243 int queued, running, queue_flags = in sched_move_task() local 10255 dequeue_task(rq, tsk, queue_flags); in sched_move_task() 10262 enqueue_task(rq, tsk, queue_flags); in sched_move_task()
|
/linux-6.1.9/drivers/md/ |
D | dm-table.c | 1495 return !test_bit(QUEUE_FLAG_POLL, &q->queue_flags); in device_not_poll_capable() 1776 return (q->queue_flags & flush); in device_flush_capable()
|
D | raid5-ppl.c | 1320 if (test_bit(QUEUE_FLAG_WC, &q->queue_flags)) in ppl_init_child_log()
|
D | raid5-cache.c | 3094 log->need_cache_flush = test_bit(QUEUE_FLAG_WC, &q->queue_flags) != 0; in r5l_init_log()
|
/linux-6.1.9/drivers/block/ |
D | loop.c | 1141 if (test_bit(QUEUE_FLAG_WC, &lo->lo_queue->queue_flags)) in __loop_clr_fd()
|