1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Block device elevator/IO-scheduler.
4 *
5 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
6 *
7 * 30042000 Jens Axboe <axboe@kernel.dk> :
8 *
9 * Split the elevator a bit so that it is possible to choose a different
10 * one or even write a new "plug in". There are three pieces:
11 * - elevator_fn, inserts a new request in the queue list
12 * - elevator_merge_fn, decides whether a new buffer can be merged with
13 * an existing request
14 * - elevator_dequeue_fn, called when a request is taken off the active list
15 *
16 * 20082000 Dave Jones <davej@suse.de> :
17 * Removed tests for max-bomb-segments, which was breaking elvtune
18 * when run without -bN
19 *
20 * Jens:
21 * - Rework again to work with bio instead of buffer_heads
22 * - loose bi_dev comparisons, partition handling is right now
23 * - completely modularize elevator setup and teardown
24 *
25 */
26 #include <linux/kernel.h>
27 #include <linux/fs.h>
28 #include <linux/blkdev.h>
29 #include <linux/bio.h>
30 #include <linux/module.h>
31 #include <linux/slab.h>
32 #include <linux/init.h>
33 #include <linux/compiler.h>
34 #include <linux/blktrace_api.h>
35 #include <linux/hash.h>
36 #include <linux/uaccess.h>
37 #include <linux/pm_runtime.h>
38
39 #include <trace/events/block.h>
40
41 #include "elevator.h"
42 #include "blk.h"
43 #include "blk-mq-sched.h"
44 #include "blk-pm.h"
45 #include "blk-wbt.h"
46 #include "blk-cgroup.h"
47
48 static DEFINE_SPINLOCK(elv_list_lock);
49 static LIST_HEAD(elv_list);
50
51 /*
52 * Merge hash stuff.
53 */
54 #define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq))
55
56 /*
57 * Query io scheduler to see if the current process issuing bio may be
58 * merged with rq.
59 */
elv_iosched_allow_bio_merge(struct request * rq,struct bio * bio)60 static int elv_iosched_allow_bio_merge(struct request *rq, struct bio *bio)
61 {
62 struct request_queue *q = rq->q;
63 struct elevator_queue *e = q->elevator;
64
65 if (e->type->ops.allow_merge)
66 return e->type->ops.allow_merge(q, rq, bio);
67
68 return 1;
69 }
70
71 /*
72 * can we safely merge with this request?
73 */
elv_bio_merge_ok(struct request * rq,struct bio * bio)74 bool elv_bio_merge_ok(struct request *rq, struct bio *bio)
75 {
76 if (!blk_rq_merge_ok(rq, bio))
77 return false;
78
79 if (!elv_iosched_allow_bio_merge(rq, bio))
80 return false;
81
82 return true;
83 }
84 EXPORT_SYMBOL(elv_bio_merge_ok);
85
elv_support_features(unsigned int elv_features,unsigned int required_features)86 static inline bool elv_support_features(unsigned int elv_features,
87 unsigned int required_features)
88 {
89 return (required_features & elv_features) == required_features;
90 }
91
92 /**
93 * elevator_match - Test an elevator name and features
94 * @e: Scheduler to test
95 * @name: Elevator name to test
96 * @required_features: Features that the elevator must provide
97 *
98 * Return true if the elevator @e name matches @name and if @e provides all
99 * the features specified by @required_features.
100 */
elevator_match(const struct elevator_type * e,const char * name,unsigned int required_features)101 static bool elevator_match(const struct elevator_type *e, const char *name,
102 unsigned int required_features)
103 {
104 if (!elv_support_features(e->elevator_features, required_features))
105 return false;
106 if (!strcmp(e->elevator_name, name))
107 return true;
108 if (e->elevator_alias && !strcmp(e->elevator_alias, name))
109 return true;
110
111 return false;
112 }
113
114 /**
115 * elevator_find - Find an elevator
116 * @name: Name of the elevator to find
117 * @required_features: Features that the elevator must provide
118 *
119 * Return the first registered scheduler with name @name and supporting the
120 * features @required_features and NULL otherwise.
121 */
elevator_find(const char * name,unsigned int required_features)122 static struct elevator_type *elevator_find(const char *name,
123 unsigned int required_features)
124 {
125 struct elevator_type *e;
126
127 list_for_each_entry(e, &elv_list, list) {
128 if (elevator_match(e, name, required_features))
129 return e;
130 }
131
132 return NULL;
133 }
134
elevator_put(struct elevator_type * e)135 static void elevator_put(struct elevator_type *e)
136 {
137 module_put(e->elevator_owner);
138 }
139
elevator_get(struct request_queue * q,const char * name,bool try_loading)140 static struct elevator_type *elevator_get(struct request_queue *q,
141 const char *name, bool try_loading)
142 {
143 struct elevator_type *e;
144
145 spin_lock(&elv_list_lock);
146
147 e = elevator_find(name, q->required_elevator_features);
148 if (!e && try_loading) {
149 spin_unlock(&elv_list_lock);
150 request_module("%s-iosched", name);
151 spin_lock(&elv_list_lock);
152 e = elevator_find(name, q->required_elevator_features);
153 }
154
155 if (e && !try_module_get(e->elevator_owner))
156 e = NULL;
157
158 spin_unlock(&elv_list_lock);
159 return e;
160 }
161
162 static struct kobj_type elv_ktype;
163
elevator_alloc(struct request_queue * q,struct elevator_type * e)164 struct elevator_queue *elevator_alloc(struct request_queue *q,
165 struct elevator_type *e)
166 {
167 struct elevator_queue *eq;
168
169 eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node);
170 if (unlikely(!eq))
171 return NULL;
172
173 eq->type = e;
174 kobject_init(&eq->kobj, &elv_ktype);
175 mutex_init(&eq->sysfs_lock);
176 hash_init(eq->hash);
177
178 return eq;
179 }
180 EXPORT_SYMBOL(elevator_alloc);
181
elevator_release(struct kobject * kobj)182 static void elevator_release(struct kobject *kobj)
183 {
184 struct elevator_queue *e;
185
186 e = container_of(kobj, struct elevator_queue, kobj);
187 elevator_put(e->type);
188 kfree(e);
189 }
190
elevator_exit(struct request_queue * q)191 void elevator_exit(struct request_queue *q)
192 {
193 struct elevator_queue *e = q->elevator;
194
195 ioc_clear_queue(q);
196 blk_mq_sched_free_rqs(q);
197
198 mutex_lock(&e->sysfs_lock);
199 blk_mq_exit_sched(q, e);
200 mutex_unlock(&e->sysfs_lock);
201
202 kobject_put(&e->kobj);
203 }
204
__elv_rqhash_del(struct request * rq)205 static inline void __elv_rqhash_del(struct request *rq)
206 {
207 hash_del(&rq->hash);
208 rq->rq_flags &= ~RQF_HASHED;
209 }
210
elv_rqhash_del(struct request_queue * q,struct request * rq)211 void elv_rqhash_del(struct request_queue *q, struct request *rq)
212 {
213 if (ELV_ON_HASH(rq))
214 __elv_rqhash_del(rq);
215 }
216 EXPORT_SYMBOL_GPL(elv_rqhash_del);
217
elv_rqhash_add(struct request_queue * q,struct request * rq)218 void elv_rqhash_add(struct request_queue *q, struct request *rq)
219 {
220 struct elevator_queue *e = q->elevator;
221
222 BUG_ON(ELV_ON_HASH(rq));
223 hash_add(e->hash, &rq->hash, rq_hash_key(rq));
224 rq->rq_flags |= RQF_HASHED;
225 }
226 EXPORT_SYMBOL_GPL(elv_rqhash_add);
227
elv_rqhash_reposition(struct request_queue * q,struct request * rq)228 void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
229 {
230 __elv_rqhash_del(rq);
231 elv_rqhash_add(q, rq);
232 }
233
elv_rqhash_find(struct request_queue * q,sector_t offset)234 struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
235 {
236 struct elevator_queue *e = q->elevator;
237 struct hlist_node *next;
238 struct request *rq;
239
240 hash_for_each_possible_safe(e->hash, rq, next, hash, offset) {
241 BUG_ON(!ELV_ON_HASH(rq));
242
243 if (unlikely(!rq_mergeable(rq))) {
244 __elv_rqhash_del(rq);
245 continue;
246 }
247
248 if (rq_hash_key(rq) == offset)
249 return rq;
250 }
251
252 return NULL;
253 }
254
255 /*
256 * RB-tree support functions for inserting/lookup/removal of requests
257 * in a sorted RB tree.
258 */
elv_rb_add(struct rb_root * root,struct request * rq)259 void elv_rb_add(struct rb_root *root, struct request *rq)
260 {
261 struct rb_node **p = &root->rb_node;
262 struct rb_node *parent = NULL;
263 struct request *__rq;
264
265 while (*p) {
266 parent = *p;
267 __rq = rb_entry(parent, struct request, rb_node);
268
269 if (blk_rq_pos(rq) < blk_rq_pos(__rq))
270 p = &(*p)->rb_left;
271 else if (blk_rq_pos(rq) >= blk_rq_pos(__rq))
272 p = &(*p)->rb_right;
273 }
274
275 rb_link_node(&rq->rb_node, parent, p);
276 rb_insert_color(&rq->rb_node, root);
277 }
278 EXPORT_SYMBOL(elv_rb_add);
279
elv_rb_del(struct rb_root * root,struct request * rq)280 void elv_rb_del(struct rb_root *root, struct request *rq)
281 {
282 BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
283 rb_erase(&rq->rb_node, root);
284 RB_CLEAR_NODE(&rq->rb_node);
285 }
286 EXPORT_SYMBOL(elv_rb_del);
287
elv_rb_find(struct rb_root * root,sector_t sector)288 struct request *elv_rb_find(struct rb_root *root, sector_t sector)
289 {
290 struct rb_node *n = root->rb_node;
291 struct request *rq;
292
293 while (n) {
294 rq = rb_entry(n, struct request, rb_node);
295
296 if (sector < blk_rq_pos(rq))
297 n = n->rb_left;
298 else if (sector > blk_rq_pos(rq))
299 n = n->rb_right;
300 else
301 return rq;
302 }
303
304 return NULL;
305 }
306 EXPORT_SYMBOL(elv_rb_find);
307
elv_merge(struct request_queue * q,struct request ** req,struct bio * bio)308 enum elv_merge elv_merge(struct request_queue *q, struct request **req,
309 struct bio *bio)
310 {
311 struct elevator_queue *e = q->elevator;
312 struct request *__rq;
313
314 /*
315 * Levels of merges:
316 * nomerges: No merges at all attempted
317 * noxmerges: Only simple one-hit cache try
318 * merges: All merge tries attempted
319 */
320 if (blk_queue_nomerges(q) || !bio_mergeable(bio))
321 return ELEVATOR_NO_MERGE;
322
323 /*
324 * First try one-hit cache.
325 */
326 if (q->last_merge && elv_bio_merge_ok(q->last_merge, bio)) {
327 enum elv_merge ret = blk_try_merge(q->last_merge, bio);
328
329 if (ret != ELEVATOR_NO_MERGE) {
330 *req = q->last_merge;
331 return ret;
332 }
333 }
334
335 if (blk_queue_noxmerges(q))
336 return ELEVATOR_NO_MERGE;
337
338 /*
339 * See if our hash lookup can find a potential backmerge.
340 */
341 __rq = elv_rqhash_find(q, bio->bi_iter.bi_sector);
342 if (__rq && elv_bio_merge_ok(__rq, bio)) {
343 *req = __rq;
344
345 if (blk_discard_mergable(__rq))
346 return ELEVATOR_DISCARD_MERGE;
347 return ELEVATOR_BACK_MERGE;
348 }
349
350 if (e->type->ops.request_merge)
351 return e->type->ops.request_merge(q, req, bio);
352
353 return ELEVATOR_NO_MERGE;
354 }
355
356 /*
357 * Attempt to do an insertion back merge. Only check for the case where
358 * we can append 'rq' to an existing request, so we can throw 'rq' away
359 * afterwards.
360 *
361 * Returns true if we merged, false otherwise. 'free' will contain all
362 * requests that need to be freed.
363 */
elv_attempt_insert_merge(struct request_queue * q,struct request * rq,struct list_head * free)364 bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq,
365 struct list_head *free)
366 {
367 struct request *__rq;
368 bool ret;
369
370 if (blk_queue_nomerges(q))
371 return false;
372
373 /*
374 * First try one-hit cache.
375 */
376 if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq)) {
377 list_add(&rq->queuelist, free);
378 return true;
379 }
380
381 if (blk_queue_noxmerges(q))
382 return false;
383
384 ret = false;
385 /*
386 * See if our hash lookup can find a potential backmerge.
387 */
388 while (1) {
389 __rq = elv_rqhash_find(q, blk_rq_pos(rq));
390 if (!__rq || !blk_attempt_req_merge(q, __rq, rq))
391 break;
392
393 list_add(&rq->queuelist, free);
394 /* The merged request could be merged with others, try again */
395 ret = true;
396 rq = __rq;
397 }
398
399 return ret;
400 }
401
elv_merged_request(struct request_queue * q,struct request * rq,enum elv_merge type)402 void elv_merged_request(struct request_queue *q, struct request *rq,
403 enum elv_merge type)
404 {
405 struct elevator_queue *e = q->elevator;
406
407 if (e->type->ops.request_merged)
408 e->type->ops.request_merged(q, rq, type);
409
410 if (type == ELEVATOR_BACK_MERGE)
411 elv_rqhash_reposition(q, rq);
412
413 q->last_merge = rq;
414 }
415
elv_merge_requests(struct request_queue * q,struct request * rq,struct request * next)416 void elv_merge_requests(struct request_queue *q, struct request *rq,
417 struct request *next)
418 {
419 struct elevator_queue *e = q->elevator;
420
421 if (e->type->ops.requests_merged)
422 e->type->ops.requests_merged(q, rq, next);
423
424 elv_rqhash_reposition(q, rq);
425 q->last_merge = rq;
426 }
427
elv_latter_request(struct request_queue * q,struct request * rq)428 struct request *elv_latter_request(struct request_queue *q, struct request *rq)
429 {
430 struct elevator_queue *e = q->elevator;
431
432 if (e->type->ops.next_request)
433 return e->type->ops.next_request(q, rq);
434
435 return NULL;
436 }
437
elv_former_request(struct request_queue * q,struct request * rq)438 struct request *elv_former_request(struct request_queue *q, struct request *rq)
439 {
440 struct elevator_queue *e = q->elevator;
441
442 if (e->type->ops.former_request)
443 return e->type->ops.former_request(q, rq);
444
445 return NULL;
446 }
447
448 #define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
449
450 static ssize_t
elv_attr_show(struct kobject * kobj,struct attribute * attr,char * page)451 elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
452 {
453 struct elv_fs_entry *entry = to_elv(attr);
454 struct elevator_queue *e;
455 ssize_t error;
456
457 if (!entry->show)
458 return -EIO;
459
460 e = container_of(kobj, struct elevator_queue, kobj);
461 mutex_lock(&e->sysfs_lock);
462 error = e->type ? entry->show(e, page) : -ENOENT;
463 mutex_unlock(&e->sysfs_lock);
464 return error;
465 }
466
467 static ssize_t
elv_attr_store(struct kobject * kobj,struct attribute * attr,const char * page,size_t length)468 elv_attr_store(struct kobject *kobj, struct attribute *attr,
469 const char *page, size_t length)
470 {
471 struct elv_fs_entry *entry = to_elv(attr);
472 struct elevator_queue *e;
473 ssize_t error;
474
475 if (!entry->store)
476 return -EIO;
477
478 e = container_of(kobj, struct elevator_queue, kobj);
479 mutex_lock(&e->sysfs_lock);
480 error = e->type ? entry->store(e, page, length) : -ENOENT;
481 mutex_unlock(&e->sysfs_lock);
482 return error;
483 }
484
485 static const struct sysfs_ops elv_sysfs_ops = {
486 .show = elv_attr_show,
487 .store = elv_attr_store,
488 };
489
490 static struct kobj_type elv_ktype = {
491 .sysfs_ops = &elv_sysfs_ops,
492 .release = elevator_release,
493 };
494
elv_register_queue(struct request_queue * q,bool uevent)495 int elv_register_queue(struct request_queue *q, bool uevent)
496 {
497 struct elevator_queue *e = q->elevator;
498 int error;
499
500 lockdep_assert_held(&q->sysfs_lock);
501
502 error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched");
503 if (!error) {
504 struct elv_fs_entry *attr = e->type->elevator_attrs;
505 if (attr) {
506 while (attr->attr.name) {
507 if (sysfs_create_file(&e->kobj, &attr->attr))
508 break;
509 attr++;
510 }
511 }
512 if (uevent)
513 kobject_uevent(&e->kobj, KOBJ_ADD);
514
515 e->registered = 1;
516 }
517 return error;
518 }
519
elv_unregister_queue(struct request_queue * q)520 void elv_unregister_queue(struct request_queue *q)
521 {
522 struct elevator_queue *e = q->elevator;
523
524 lockdep_assert_held(&q->sysfs_lock);
525
526 if (e && e->registered) {
527 struct elevator_queue *e = q->elevator;
528
529 kobject_uevent(&e->kobj, KOBJ_REMOVE);
530 kobject_del(&e->kobj);
531
532 e->registered = 0;
533 }
534 }
535
elv_register(struct elevator_type * e)536 int elv_register(struct elevator_type *e)
537 {
538 /* insert_requests and dispatch_request are mandatory */
539 if (WARN_ON_ONCE(!e->ops.insert_requests || !e->ops.dispatch_request))
540 return -EINVAL;
541
542 /* create icq_cache if requested */
543 if (e->icq_size) {
544 if (WARN_ON(e->icq_size < sizeof(struct io_cq)) ||
545 WARN_ON(e->icq_align < __alignof__(struct io_cq)))
546 return -EINVAL;
547
548 snprintf(e->icq_cache_name, sizeof(e->icq_cache_name),
549 "%s_io_cq", e->elevator_name);
550 e->icq_cache = kmem_cache_create(e->icq_cache_name, e->icq_size,
551 e->icq_align, 0, NULL);
552 if (!e->icq_cache)
553 return -ENOMEM;
554 }
555
556 /* register, don't allow duplicate names */
557 spin_lock(&elv_list_lock);
558 if (elevator_find(e->elevator_name, 0)) {
559 spin_unlock(&elv_list_lock);
560 kmem_cache_destroy(e->icq_cache);
561 return -EBUSY;
562 }
563 list_add_tail(&e->list, &elv_list);
564 spin_unlock(&elv_list_lock);
565
566 printk(KERN_INFO "io scheduler %s registered\n", e->elevator_name);
567
568 return 0;
569 }
570 EXPORT_SYMBOL_GPL(elv_register);
571
elv_unregister(struct elevator_type * e)572 void elv_unregister(struct elevator_type *e)
573 {
574 /* unregister */
575 spin_lock(&elv_list_lock);
576 list_del_init(&e->list);
577 spin_unlock(&elv_list_lock);
578
579 /*
580 * Destroy icq_cache if it exists. icq's are RCU managed. Make
581 * sure all RCU operations are complete before proceeding.
582 */
583 if (e->icq_cache) {
584 rcu_barrier();
585 kmem_cache_destroy(e->icq_cache);
586 e->icq_cache = NULL;
587 }
588 }
589 EXPORT_SYMBOL_GPL(elv_unregister);
590
elevator_switch_mq(struct request_queue * q,struct elevator_type * new_e)591 int elevator_switch_mq(struct request_queue *q,
592 struct elevator_type *new_e)
593 {
594 int ret;
595
596 lockdep_assert_held(&q->sysfs_lock);
597
598 if (q->elevator) {
599 elv_unregister_queue(q);
600 elevator_exit(q);
601 }
602
603 ret = blk_mq_init_sched(q, new_e);
604 if (ret)
605 goto out;
606
607 if (new_e) {
608 ret = elv_register_queue(q, true);
609 if (ret) {
610 elevator_exit(q);
611 goto out;
612 }
613 }
614
615 if (new_e)
616 blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
617 else
618 blk_add_trace_msg(q, "elv switch: none");
619
620 out:
621 return ret;
622 }
623
elv_support_iosched(struct request_queue * q)624 static inline bool elv_support_iosched(struct request_queue *q)
625 {
626 if (!queue_is_mq(q) ||
627 (q->tag_set && (q->tag_set->flags & BLK_MQ_F_NO_SCHED)))
628 return false;
629 return true;
630 }
631
632 /*
633 * For single queue devices, default to using mq-deadline. If we have multiple
634 * queues or mq-deadline is not available, default to "none".
635 */
elevator_get_default(struct request_queue * q)636 static struct elevator_type *elevator_get_default(struct request_queue *q)
637 {
638 if (q->tag_set && q->tag_set->flags & BLK_MQ_F_NO_SCHED_BY_DEFAULT)
639 return NULL;
640
641 if (q->nr_hw_queues != 1 &&
642 !blk_mq_is_shared_tags(q->tag_set->flags))
643 return NULL;
644
645 return elevator_get(q, "mq-deadline", false);
646 }
647
648 /*
649 * Get the first elevator providing the features required by the request queue.
650 * Default to "none" if no matching elevator is found.
651 */
elevator_get_by_features(struct request_queue * q)652 static struct elevator_type *elevator_get_by_features(struct request_queue *q)
653 {
654 struct elevator_type *e, *found = NULL;
655
656 spin_lock(&elv_list_lock);
657
658 list_for_each_entry(e, &elv_list, list) {
659 if (elv_support_features(e->elevator_features,
660 q->required_elevator_features)) {
661 found = e;
662 break;
663 }
664 }
665
666 if (found && !try_module_get(found->elevator_owner))
667 found = NULL;
668
669 spin_unlock(&elv_list_lock);
670 return found;
671 }
672
673 /*
674 * For a device queue that has no required features, use the default elevator
675 * settings. Otherwise, use the first elevator available matching the required
676 * features. If no suitable elevator is find or if the chosen elevator
677 * initialization fails, fall back to the "none" elevator (no elevator).
678 */
elevator_init_mq(struct request_queue * q)679 void elevator_init_mq(struct request_queue *q)
680 {
681 struct elevator_type *e;
682 int err;
683
684 if (!elv_support_iosched(q))
685 return;
686
687 WARN_ON_ONCE(blk_queue_registered(q));
688
689 if (unlikely(q->elevator))
690 return;
691
692 if (!q->required_elevator_features)
693 e = elevator_get_default(q);
694 else
695 e = elevator_get_by_features(q);
696 if (!e)
697 return;
698
699 /*
700 * We are called before adding disk, when there isn't any FS I/O,
701 * so freezing queue plus canceling dispatch work is enough to
702 * drain any dispatch activities originated from passthrough
703 * requests, then no need to quiesce queue which may add long boot
704 * latency, especially when lots of disks are involved.
705 */
706 blk_mq_freeze_queue(q);
707 blk_mq_cancel_work_sync(q);
708
709 err = blk_mq_init_sched(q, e);
710
711 blk_mq_unfreeze_queue(q);
712
713 if (err) {
714 pr_warn("\"%s\" elevator initialization failed, "
715 "falling back to \"none\"\n", e->elevator_name);
716 elevator_put(e);
717 }
718 }
719
720 /*
721 * switch to new_e io scheduler. be careful not to introduce deadlocks -
722 * we don't free the old io scheduler, before we have allocated what we
723 * need for the new one. this way we have a chance of going back to the old
724 * one, if the new one fails init for some reason.
725 */
elevator_switch(struct request_queue * q,struct elevator_type * new_e)726 static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
727 {
728 int err;
729
730 lockdep_assert_held(&q->sysfs_lock);
731
732 blk_mq_freeze_queue(q);
733 blk_mq_quiesce_queue(q);
734
735 err = elevator_switch_mq(q, new_e);
736
737 blk_mq_unquiesce_queue(q);
738 blk_mq_unfreeze_queue(q);
739
740 return err;
741 }
742
743 /*
744 * Switch this queue to the given IO scheduler.
745 */
__elevator_change(struct request_queue * q,const char * name)746 static int __elevator_change(struct request_queue *q, const char *name)
747 {
748 char elevator_name[ELV_NAME_MAX];
749 struct elevator_type *e;
750
751 /* Make sure queue is not in the middle of being removed */
752 if (!blk_queue_registered(q))
753 return -ENOENT;
754
755 /*
756 * Special case for mq, turn off scheduling
757 */
758 if (!strncmp(name, "none", 4)) {
759 if (!q->elevator)
760 return 0;
761 return elevator_switch(q, NULL);
762 }
763
764 strlcpy(elevator_name, name, sizeof(elevator_name));
765 e = elevator_get(q, strstrip(elevator_name), true);
766 if (!e)
767 return -EINVAL;
768
769 if (q->elevator &&
770 elevator_match(q->elevator->type, elevator_name, 0)) {
771 elevator_put(e);
772 return 0;
773 }
774
775 return elevator_switch(q, e);
776 }
777
elv_iosched_store(struct request_queue * q,const char * name,size_t count)778 ssize_t elv_iosched_store(struct request_queue *q, const char *name,
779 size_t count)
780 {
781 int ret;
782
783 if (!elv_support_iosched(q))
784 return count;
785
786 ret = __elevator_change(q, name);
787 if (!ret)
788 return count;
789
790 return ret;
791 }
792
elv_iosched_show(struct request_queue * q,char * name)793 ssize_t elv_iosched_show(struct request_queue *q, char *name)
794 {
795 struct elevator_queue *e = q->elevator;
796 struct elevator_type *elv = NULL;
797 struct elevator_type *__e;
798 int len = 0;
799
800 if (!queue_is_mq(q))
801 return sprintf(name, "none\n");
802
803 if (!q->elevator)
804 len += sprintf(name+len, "[none] ");
805 else
806 elv = e->type;
807
808 spin_lock(&elv_list_lock);
809 list_for_each_entry(__e, &elv_list, list) {
810 if (elv && elevator_match(elv, __e->elevator_name, 0)) {
811 len += sprintf(name+len, "[%s] ", elv->elevator_name);
812 continue;
813 }
814 if (elv_support_iosched(q) &&
815 elevator_match(__e, __e->elevator_name,
816 q->required_elevator_features))
817 len += sprintf(name+len, "%s ", __e->elevator_name);
818 }
819 spin_unlock(&elv_list_lock);
820
821 if (q->elevator)
822 len += sprintf(name+len, "none");
823
824 len += sprintf(len+name, "\n");
825 return len;
826 }
827
elv_rb_former_request(struct request_queue * q,struct request * rq)828 struct request *elv_rb_former_request(struct request_queue *q,
829 struct request *rq)
830 {
831 struct rb_node *rbprev = rb_prev(&rq->rb_node);
832
833 if (rbprev)
834 return rb_entry_rq(rbprev);
835
836 return NULL;
837 }
838 EXPORT_SYMBOL(elv_rb_former_request);
839
elv_rb_latter_request(struct request_queue * q,struct request * rq)840 struct request *elv_rb_latter_request(struct request_queue *q,
841 struct request *rq)
842 {
843 struct rb_node *rbnext = rb_next(&rq->rb_node);
844
845 if (rbnext)
846 return rb_entry_rq(rbnext);
847
848 return NULL;
849 }
850 EXPORT_SYMBOL(elv_rb_latter_request);
851
elevator_setup(char * str)852 static int __init elevator_setup(char *str)
853 {
854 pr_warn("Kernel parameter elevator= does not have any effect anymore.\n"
855 "Please use sysfs to set IO scheduler for individual devices.\n");
856 return 1;
857 }
858
859 __setup("elevator=", elevator_setup);
860