Lines Matching refs:plug

519 					    struct blk_plug *plug,  in blk_mq_rq_cache_fill()  argument
527 .nr_tags = plug->nr_ios, in blk_mq_rq_cache_fill()
528 .cached_rq = &plug->cached_rq, in blk_mq_rq_cache_fill()
535 plug->nr_ios = 1; in blk_mq_rq_cache_fill()
547 struct blk_plug *plug = current->plug; in blk_mq_alloc_cached_request() local
550 if (!plug) in blk_mq_alloc_cached_request()
553 if (rq_list_empty(plug->cached_rq)) { in blk_mq_alloc_cached_request()
554 if (plug->nr_ios == 1) in blk_mq_alloc_cached_request()
556 rq = blk_mq_rq_cache_fill(q, plug, opf, flags); in blk_mq_alloc_cached_request()
560 rq = rq_list_peek(&plug->cached_rq); in blk_mq_alloc_cached_request()
569 plug->cached_rq = rq_list_next(rq); in blk_mq_alloc_cached_request()
739 void blk_mq_free_plug_rqs(struct blk_plug *plug) in blk_mq_free_plug_rqs() argument
743 while ((rq = rq_list_pop(&plug->cached_rq)) != NULL) in blk_mq_free_plug_rqs()
1282 static inline unsigned short blk_plug_max_rq_count(struct blk_plug *plug) in blk_plug_max_rq_count() argument
1284 if (plug->multiple_queues) in blk_plug_max_rq_count()
1289 static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq) in blk_add_rq_to_plug() argument
1291 struct request *last = rq_list_peek(&plug->mq_list); in blk_add_rq_to_plug()
1293 if (!plug->rq_count) { in blk_add_rq_to_plug()
1295 } else if (plug->rq_count >= blk_plug_max_rq_count(plug) || in blk_add_rq_to_plug()
1298 blk_mq_flush_plug_list(plug, false); in blk_add_rq_to_plug()
1303 if (!plug->multiple_queues && last && last->q != rq->q) in blk_add_rq_to_plug()
1304 plug->multiple_queues = true; in blk_add_rq_to_plug()
1309 if (!plug->has_elevator && (rq->rq_flags & RQF_SCHED_TAGS)) in blk_add_rq_to_plug()
1310 plug->has_elevator = true; in blk_add_rq_to_plug()
1312 rq_list_add(&plug->mq_list, rq); in blk_add_rq_to_plug()
1313 plug->rq_count++; in blk_add_rq_to_plug()
1342 if (current->plug && !at_head) { in blk_execute_rq_nowait()
1343 blk_add_rq_to_plug(current->plug, rq); in blk_execute_rq_nowait()
2711 static void blk_mq_plug_issue_direct(struct blk_plug *plug) in blk_mq_plug_issue_direct() argument
2718 while ((rq = rq_list_pop(&plug->mq_list))) { in blk_mq_plug_issue_direct()
2719 bool last = rq_list_empty(plug->mq_list); in blk_mq_plug_issue_direct()
2751 struct blk_plug *plug) in __blk_mq_flush_plug_list() argument
2755 q->mq_ops->queue_rqs(&plug->mq_list); in __blk_mq_flush_plug_list()
2758 static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched) in blk_mq_dispatch_plug_list() argument
2769 struct request *rq = rq_list_pop(&plug->mq_list); in blk_mq_dispatch_plug_list()
2782 } while (!rq_list_empty(plug->mq_list)); in blk_mq_dispatch_plug_list()
2784 plug->mq_list = requeue_list; in blk_mq_dispatch_plug_list()
2804 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule) in blk_mq_flush_plug_list() argument
2815 if (plug->rq_count == 0) in blk_mq_flush_plug_list()
2817 plug->rq_count = 0; in blk_mq_flush_plug_list()
2819 if (!plug->multiple_queues && !plug->has_elevator && !from_schedule) { in blk_mq_flush_plug_list()
2822 rq = rq_list_peek(&plug->mq_list); in blk_mq_flush_plug_list()
2838 __blk_mq_flush_plug_list(q, plug)); in blk_mq_flush_plug_list()
2839 if (rq_list_empty(plug->mq_list)) in blk_mq_flush_plug_list()
2844 blk_mq_plug_issue_direct(plug)); in blk_mq_flush_plug_list()
2845 if (rq_list_empty(plug->mq_list)) in blk_mq_flush_plug_list()
2850 blk_mq_dispatch_plug_list(plug, from_schedule); in blk_mq_flush_plug_list()
2851 } while (!rq_list_empty(plug->mq_list)); in blk_mq_flush_plug_list()
2900 struct blk_plug *plug, in blk_mq_get_new_requests() argument
2916 if (plug) { in blk_mq_get_new_requests()
2917 data.nr_tags = plug->nr_ios; in blk_mq_get_new_requests()
2918 plug->nr_ios = 1; in blk_mq_get_new_requests()
2919 data.cached_rq = &plug->cached_rq; in blk_mq_get_new_requests()
2932 static bool blk_mq_can_use_cached_rq(struct request *rq, struct blk_plug *plug, in blk_mq_can_use_cached_rq() argument
2938 WARN_ON_ONCE(rq_list_peek(&plug->cached_rq) != rq); in blk_mq_can_use_cached_rq()
2951 plug->cached_rq = rq_list_next(rq); in blk_mq_can_use_cached_rq()
2984 struct blk_plug *plug = blk_mq_plug(bio); in blk_mq_submit_bio() local
2994 if (plug) { in blk_mq_submit_bio()
2995 rq = rq_list_peek(&plug->cached_rq); in blk_mq_submit_bio()
3009 if (blk_mq_can_use_cached_rq(rq, plug, bio)) in blk_mq_submit_bio()
3024 rq = blk_mq_get_new_requests(q, plug, bio, nr_segs); in blk_mq_submit_bio()
3049 if (plug) { in blk_mq_submit_bio()
3050 blk_add_rq_to_plug(plug, rq); in blk_mq_submit_bio()