/linux-6.1.9/block/ |
D | blk-mq-sched.h | 47 if (rq->rq_flags & RQF_ELV) { in blk_mq_sched_allow_merge() 58 if (rq->rq_flags & RQF_ELV) { in blk_mq_sched_completed_request() 68 if (rq->rq_flags & RQF_ELV) { in blk_mq_sched_requeue_request() 72 if ((rq->rq_flags & RQF_ELVPRIV) && e->type->ops.requeue_request) in blk_mq_sched_requeue_request()
|
D | blk-mq.c | 355 data->rq_flags |= RQF_PM; in blk_mq_rq_ctx_init() 357 data->rq_flags |= RQF_IO_STAT; in blk_mq_rq_ctx_init() 358 rq->rq_flags = data->rq_flags; in blk_mq_rq_ctx_init() 360 if (!(data->rq_flags & RQF_ELV)) { in blk_mq_rq_ctx_init() 392 if (rq->rq_flags & RQF_ELV) { in blk_mq_rq_ctx_init() 401 rq->rq_flags |= RQF_ELVPRIV; in blk_mq_rq_ctx_init() 457 data->rq_flags |= RQF_ELV; in __blk_mq_alloc_requests() 474 if (!(data->rq_flags & RQF_ELV)) in __blk_mq_alloc_requests() 478 data->rq_flags |= RQF_RESV; in __blk_mq_alloc_requests() 655 data.rq_flags |= RQF_ELV; in blk_mq_alloc_request_hctx() [all …]
|
D | blk-flush.c | 137 rq->rq_flags &= ~RQF_FLUSH_SEQ; in blk_flush_restore_request() 338 flush_rq->rq_flags |= RQF_MQ_INFLIGHT; in blk_kick_flush() 344 flush_rq->rq_flags |= RQF_FLUSH_SEQ; in blk_kick_flush() 445 rq->rq_flags |= RQF_FLUSH_SEQ; in blk_insert_flush()
|
D | blk-mq.h | 156 req_flags_t rq_flags; member 174 if (!(data->rq_flags & RQF_ELV)) in blk_mq_tags_from_data() 258 if (rq->rq_flags & RQF_MQ_INFLIGHT) { in __blk_mq_put_driver_tag() 259 rq->rq_flags &= ~RQF_MQ_INFLIGHT; in __blk_mq_put_driver_tag()
|
D | blk-zoned.c | 80 WARN_ON_ONCE(rq->rq_flags & RQF_ZONE_WRITE_LOCKED); in blk_req_zone_write_trylock() 81 rq->rq_flags |= RQF_ZONE_WRITE_LOCKED; in blk_req_zone_write_trylock() 93 WARN_ON_ONCE(rq->rq_flags & RQF_ZONE_WRITE_LOCKED); in __blk_req_zone_write_lock() 94 rq->rq_flags |= RQF_ZONE_WRITE_LOCKED; in __blk_req_zone_write_lock() 100 rq->rq_flags &= ~RQF_ZONE_WRITE_LOCKED; in __blk_req_zone_write_unlock()
|
D | blk-pm.h | 21 if (rq->q->dev && !(rq->rq_flags & RQF_PM)) in blk_pm_mark_last_busy()
|
D | blk.h | 142 if (rq->rq_flags & RQF_NOMERGE_FLAGS) in rq_mergeable() 276 #define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED) 345 return (rq->rq_flags & RQF_IO_STAT) && !blk_rq_is_passthrough(rq); in blk_do_io_stat()
|
D | blk-merge.c | 561 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) in __blk_rq_map_sg() 734 if (rq->rq_flags & RQF_MIXED_MERGE) in blk_rq_set_mixed_merge() 747 rq->rq_flags |= RQF_MIXED_MERGE; in blk_rq_set_mixed_merge() 816 if (((req->rq_flags | next->rq_flags) & RQF_MIXED_MERGE) || in attempt_merge()
|
D | blk-timeout.c | 140 req->rq_flags &= ~RQF_TIMED_OUT; in blk_add_timer()
|
D | blk-mq-sched.c | 402 if ((rq->rq_flags & RQF_FLUSH_SEQ) || blk_rq_is_passthrough(rq)) in blk_mq_sched_bypass_insert() 440 at_head = (rq->rq_flags & RQF_FLUSH_SEQ) ? true : at_head; in blk_mq_sched_insert_request()
|
/linux-6.1.9/include/linux/ |
D | blk-mq.h | 89 req_flags_t rq_flags; member 840 return (rq->rq_flags & (RQF_IO_STAT | RQF_STATS | RQF_ELV)); in blk_mq_need_time_stamp() 845 return rq->rq_flags & RQF_RESV; in blk_mq_is_reserved_rq() 856 if (!iob || (req->rq_flags & RQF_ELV) || ioerror || in blk_mq_add_to_batch() 1072 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) in blk_rq_payload_bytes() 1083 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) in req_bvec() 1122 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) in blk_rq_nr_phys_segments() 1171 if (rq->rq_flags & RQF_ZONE_WRITE_LOCKED) in blk_req_zone_write_unlock()
|
/linux-6.1.9/kernel/sched/ |
D | sched.h | 1540 struct rq_flags { struct 1565 static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf) in rq_pin_lock() 1578 static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf) in rq_unpin_lock() 1588 static inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf) in rq_repin_lock() 1600 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) 1603 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) 1607 static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf) in __task_rq_unlock() 1615 task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf) in task_rq_unlock() 1625 rq_lock_irqsave(struct rq *rq, struct rq_flags *rf) in rq_lock_irqsave() 1633 rq_lock_irq(struct rq *rq, struct rq_flags *rf) in rq_lock_irq() [all …]
|
D | core.c | 606 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) in __task_rq_lock() 630 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) in task_rq_lock() 768 struct rq_flags rf; in hrtick() 796 struct rq_flags rf; in __hrtick_start() 1438 struct rq_flags rf; in uclamp_update_util_min_rt_default() 1690 struct rq_flags rf; in uclamp_update_active() 2313 static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf, in move_queued_task() 2359 static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf, in __migrate_task() 2384 struct rq_flags rf; in migration_cpu_stop() 2720 static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flags *rf, in affine_move_task() [all …]
|
D | stop_task.c | 19 balance_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) in balance_stop()
|
D | psi.c | 958 struct rq_flags rf; in psi_memstall_enter() 989 struct rq_flags rf; in psi_memstall_leave() 1058 struct rq_flags rf; in cgroup_move_task() 1134 struct rq_flags rf; in psi_cgroup_restart()
|
/linux-6.1.9/drivers/scsi/ |
D | scsi_lib.c | 118 if (rq->rq_flags & RQF_DONTPREP) { in scsi_mq_requeue_cmd() 119 rq->rq_flags &= ~RQF_DONTPREP; in scsi_mq_requeue_cmd() 211 req_flags_t rq_flags, int *resid) in __scsi_execute() argument 220 rq_flags & RQF_PM ? BLK_MQ_REQ_PM : 0); in __scsi_execute() 236 req->rq_flags |= rq_flags | RQF_QUIET; in __scsi_execute() 646 if (!(rq->rq_flags & RQF_MIXED_MERGE)) in scsi_rq_err_bytes() 822 if (!(req->rq_flags & RQF_QUIET)) { in scsi_io_completion_action() 913 else if (req->rq_flags & RQF_QUIET) in scsi_io_completion_nz_result() 1153 if (rq->rq_flags & RQF_DONTPREP) { in scsi_cleanup_rq() 1155 rq->rq_flags &= ~RQF_DONTPREP; in scsi_cleanup_rq() [all …]
|
/linux-6.1.9/drivers/nvme/host/ |
D | ioctl.c | 73 struct nvme_command *cmd, blk_opf_t rq_flags, in nvme_alloc_user_request() argument 78 req = blk_mq_alloc_request(q, nvme_req_op(cmd) | rq_flags, blk_flags); in nvme_alloc_user_request() 481 blk_opf_t rq_flags = 0; in nvme_uring_cmd_io() local 517 rq_flags = REQ_NOWAIT; in nvme_uring_cmd_io() 521 rq_flags |= REQ_POLLED; in nvme_uring_cmd_io() 524 req = nvme_alloc_user_request(q, &c, rq_flags, blk_flags); in nvme_uring_cmd_io() 537 if (issue_flags & IO_URING_F_IOPOLL && rq_flags & REQ_POLLED) { in nvme_uring_cmd_io() 541 rq_flags &= ~REQ_POLLED; in nvme_uring_cmd_io()
|
/linux-6.1.9/net/sunrpc/ |
D | svc_xprt.c | 394 if (!test_bit(RQ_DATA, &rqstp->rq_flags)) { in svc_xprt_reserve_slot() 398 set_bit(RQ_DATA, &rqstp->rq_flags); in svc_xprt_reserve_slot() 406 if (test_and_clear_bit(RQ_DATA, &rqstp->rq_flags)) { in svc_xprt_release_slot() 475 if (test_and_set_bit(RQ_BUSY, &rqstp->rq_flags)) in svc_xprt_enqueue() 585 if (test_bit(RQ_BUSY, &rqstp->rq_flags)) in svc_wake_up() 755 clear_bit(RQ_BUSY, &rqstp->rq_flags); in svc_get_next_xprt() 765 set_bit(RQ_BUSY, &rqstp->rq_flags); in svc_get_next_xprt() 1210 if (rqstp->rq_arg.page_len || !test_bit(RQ_USEDEFERRAL, &rqstp->rq_flags)) in svc_defer() 1241 set_bit(RQ_DROPME, &rqstp->rq_flags); in svc_defer()
|
D | svc.c | 640 __set_bit(RQ_BUSY, &rqstp->rq_flags); in svc_rqst_alloc() 732 set_bit(RQ_VICTIM, &rqstp->rq_flags); in choose_victim() 869 if (!test_and_set_bit(RQ_VICTIM, &rqstp->rq_flags)) in svc_exit_thread() 1247 set_bit(RQ_SPLICE_OK, &rqstp->rq_flags); in svc_process_common() 1249 set_bit(RQ_USEDEFERRAL, &rqstp->rq_flags); in svc_process_common() 1250 clear_bit(RQ_DROPME, &rqstp->rq_flags); in svc_process_common()
|
/linux-6.1.9/drivers/net/ethernet/fungible/funcore/ |
D | fun_queue.h | 69 u16 rq_flags; member 120 u16 rq_flags; member
|
D | fun_queue.c | 459 funq->rq_flags = req->rq_flags | FUN_ADMIN_EPSQ_CREATE_FLAG_RQ; in fun_alloc_queue() 528 rc = fun_sq_create(fdev, funq->rq_flags, funq->rqid, funq->cqid, 0, in fun_create_rq()
|
/linux-6.1.9/drivers/mmc/core/ |
D | queue.c | 241 req->rq_flags |= RQF_QUIET; in mmc_mq_queue_rq() 293 if (!(req->rq_flags & RQF_DONTPREP)) { in mmc_mq_queue_rq() 295 req->rq_flags |= RQF_DONTPREP; in mmc_mq_queue_rq()
|
/linux-6.1.9/include/scsi/ |
D | scsi_device.h | 462 req_flags_t rq_flags, int *resid); 465 sshdr, timeout, retries, flags, rq_flags, resid) \ argument 470 sense, sshdr, timeout, retries, flags, rq_flags, \
|
/linux-6.1.9/drivers/scsi/device_handler/ |
D | scsi_dh_hp_sw.c | 167 req->rq_flags |= RQF_QUIET; in hp_sw_prep_fn()
|
/linux-6.1.9/drivers/md/ |
D | dm-rq.c | 264 if (rq->rq_flags & RQF_FAILED) in dm_softirq_done() 291 rq->rq_flags |= RQF_FAILED; in dm_kill_unmapped_request()
|