1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Functions related to sysfs handling
4  */
5 #include <linux/kernel.h>
6 #include <linux/slab.h>
7 #include <linux/module.h>
8 #include <linux/bio.h>
9 #include <linux/blkdev.h>
10 #include <linux/backing-dev.h>
11 #include <linux/blktrace_api.h>
12 #include <linux/debugfs.h>
13 
14 #include "blk.h"
15 #include "blk-mq.h"
16 #include "blk-mq-debugfs.h"
17 #include "blk-mq-sched.h"
18 #include "blk-rq-qos.h"
19 #include "blk-wbt.h"
20 #include "blk-cgroup.h"
21 #include "blk-throttle.h"
22 
23 struct queue_sysfs_entry {
24 	struct attribute attr;
25 	ssize_t (*show)(struct request_queue *, char *);
26 	ssize_t (*store)(struct request_queue *, const char *, size_t);
27 };
28 
29 static ssize_t
queue_var_show(unsigned long var,char * page)30 queue_var_show(unsigned long var, char *page)
31 {
32 	return sprintf(page, "%lu\n", var);
33 }
34 
35 static ssize_t
queue_var_store(unsigned long * var,const char * page,size_t count)36 queue_var_store(unsigned long *var, const char *page, size_t count)
37 {
38 	int err;
39 	unsigned long v;
40 
41 	err = kstrtoul(page, 10, &v);
42 	if (err || v > UINT_MAX)
43 		return -EINVAL;
44 
45 	*var = v;
46 
47 	return count;
48 }
49 
queue_requests_show(struct request_queue * q,char * page)50 static ssize_t queue_requests_show(struct request_queue *q, char *page)
51 {
52 	return queue_var_show(q->nr_requests, page);
53 }
54 
55 static ssize_t
queue_requests_store(struct request_queue * q,const char * page,size_t count)56 queue_requests_store(struct request_queue *q, const char *page, size_t count)
57 {
58 	unsigned long nr;
59 	int ret, err;
60 
61 	if (!queue_is_mq(q))
62 		return -EINVAL;
63 
64 	ret = queue_var_store(&nr, page, count);
65 	if (ret < 0)
66 		return ret;
67 
68 	if (nr < BLKDEV_MIN_RQ)
69 		nr = BLKDEV_MIN_RQ;
70 
71 	err = blk_mq_update_nr_requests(q, nr);
72 	if (err)
73 		return err;
74 
75 	return ret;
76 }
77 
queue_ra_show(struct request_queue * q,char * page)78 static ssize_t queue_ra_show(struct request_queue *q, char *page)
79 {
80 	unsigned long ra_kb;
81 
82 	if (!q->disk)
83 		return -EINVAL;
84 	ra_kb = q->disk->bdi->ra_pages << (PAGE_SHIFT - 10);
85 	return queue_var_show(ra_kb, page);
86 }
87 
88 static ssize_t
queue_ra_store(struct request_queue * q,const char * page,size_t count)89 queue_ra_store(struct request_queue *q, const char *page, size_t count)
90 {
91 	unsigned long ra_kb;
92 	ssize_t ret;
93 
94 	if (!q->disk)
95 		return -EINVAL;
96 	ret = queue_var_store(&ra_kb, page, count);
97 	if (ret < 0)
98 		return ret;
99 	q->disk->bdi->ra_pages = ra_kb >> (PAGE_SHIFT - 10);
100 	return ret;
101 }
102 
queue_max_sectors_show(struct request_queue * q,char * page)103 static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
104 {
105 	int max_sectors_kb = queue_max_sectors(q) >> 1;
106 
107 	return queue_var_show(max_sectors_kb, page);
108 }
109 
queue_max_segments_show(struct request_queue * q,char * page)110 static ssize_t queue_max_segments_show(struct request_queue *q, char *page)
111 {
112 	return queue_var_show(queue_max_segments(q), page);
113 }
114 
queue_max_discard_segments_show(struct request_queue * q,char * page)115 static ssize_t queue_max_discard_segments_show(struct request_queue *q,
116 		char *page)
117 {
118 	return queue_var_show(queue_max_discard_segments(q), page);
119 }
120 
queue_max_integrity_segments_show(struct request_queue * q,char * page)121 static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page)
122 {
123 	return queue_var_show(q->limits.max_integrity_segments, page);
124 }
125 
queue_max_segment_size_show(struct request_queue * q,char * page)126 static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
127 {
128 	return queue_var_show(queue_max_segment_size(q), page);
129 }
130 
queue_logical_block_size_show(struct request_queue * q,char * page)131 static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
132 {
133 	return queue_var_show(queue_logical_block_size(q), page);
134 }
135 
queue_physical_block_size_show(struct request_queue * q,char * page)136 static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page)
137 {
138 	return queue_var_show(queue_physical_block_size(q), page);
139 }
140 
queue_chunk_sectors_show(struct request_queue * q,char * page)141 static ssize_t queue_chunk_sectors_show(struct request_queue *q, char *page)
142 {
143 	return queue_var_show(q->limits.chunk_sectors, page);
144 }
145 
queue_io_min_show(struct request_queue * q,char * page)146 static ssize_t queue_io_min_show(struct request_queue *q, char *page)
147 {
148 	return queue_var_show(queue_io_min(q), page);
149 }
150 
queue_io_opt_show(struct request_queue * q,char * page)151 static ssize_t queue_io_opt_show(struct request_queue *q, char *page)
152 {
153 	return queue_var_show(queue_io_opt(q), page);
154 }
155 
queue_discard_granularity_show(struct request_queue * q,char * page)156 static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page)
157 {
158 	return queue_var_show(q->limits.discard_granularity, page);
159 }
160 
queue_discard_max_hw_show(struct request_queue * q,char * page)161 static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page)
162 {
163 
164 	return sprintf(page, "%llu\n",
165 		(unsigned long long)q->limits.max_hw_discard_sectors << 9);
166 }
167 
queue_discard_max_show(struct request_queue * q,char * page)168 static ssize_t queue_discard_max_show(struct request_queue *q, char *page)
169 {
170 	return sprintf(page, "%llu\n",
171 		       (unsigned long long)q->limits.max_discard_sectors << 9);
172 }
173 
queue_discard_max_store(struct request_queue * q,const char * page,size_t count)174 static ssize_t queue_discard_max_store(struct request_queue *q,
175 				       const char *page, size_t count)
176 {
177 	unsigned long max_discard;
178 	ssize_t ret = queue_var_store(&max_discard, page, count);
179 
180 	if (ret < 0)
181 		return ret;
182 
183 	if (max_discard & (q->limits.discard_granularity - 1))
184 		return -EINVAL;
185 
186 	max_discard >>= 9;
187 	if (max_discard > UINT_MAX)
188 		return -EINVAL;
189 
190 	if (max_discard > q->limits.max_hw_discard_sectors)
191 		max_discard = q->limits.max_hw_discard_sectors;
192 
193 	q->limits.max_discard_sectors = max_discard;
194 	return ret;
195 }
196 
queue_discard_zeroes_data_show(struct request_queue * q,char * page)197 static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page)
198 {
199 	return queue_var_show(0, page);
200 }
201 
queue_write_same_max_show(struct request_queue * q,char * page)202 static ssize_t queue_write_same_max_show(struct request_queue *q, char *page)
203 {
204 	return queue_var_show(0, page);
205 }
206 
queue_write_zeroes_max_show(struct request_queue * q,char * page)207 static ssize_t queue_write_zeroes_max_show(struct request_queue *q, char *page)
208 {
209 	return sprintf(page, "%llu\n",
210 		(unsigned long long)q->limits.max_write_zeroes_sectors << 9);
211 }
212 
queue_zone_write_granularity_show(struct request_queue * q,char * page)213 static ssize_t queue_zone_write_granularity_show(struct request_queue *q,
214 						 char *page)
215 {
216 	return queue_var_show(queue_zone_write_granularity(q), page);
217 }
218 
queue_zone_append_max_show(struct request_queue * q,char * page)219 static ssize_t queue_zone_append_max_show(struct request_queue *q, char *page)
220 {
221 	unsigned long long max_sectors = q->limits.max_zone_append_sectors;
222 
223 	return sprintf(page, "%llu\n", max_sectors << SECTOR_SHIFT);
224 }
225 
226 static ssize_t
queue_max_sectors_store(struct request_queue * q,const char * page,size_t count)227 queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
228 {
229 	unsigned long var;
230 	unsigned int max_sectors_kb,
231 		max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1,
232 			page_kb = 1 << (PAGE_SHIFT - 10);
233 	ssize_t ret = queue_var_store(&var, page, count);
234 
235 	if (ret < 0)
236 		return ret;
237 
238 	max_sectors_kb = (unsigned int)var;
239 	max_hw_sectors_kb = min_not_zero(max_hw_sectors_kb,
240 					 q->limits.max_dev_sectors >> 1);
241 	if (max_sectors_kb == 0) {
242 		q->limits.max_user_sectors = 0;
243 		max_sectors_kb = min(max_hw_sectors_kb,
244 				     BLK_DEF_MAX_SECTORS >> 1);
245 	} else {
246 		if (max_sectors_kb > max_hw_sectors_kb ||
247 		    max_sectors_kb < page_kb)
248 			return -EINVAL;
249 		q->limits.max_user_sectors = max_sectors_kb << 1;
250 	}
251 
252 	spin_lock_irq(&q->queue_lock);
253 	q->limits.max_sectors = max_sectors_kb << 1;
254 	if (q->disk)
255 		q->disk->bdi->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10);
256 	spin_unlock_irq(&q->queue_lock);
257 
258 	return ret;
259 }
260 
queue_max_hw_sectors_show(struct request_queue * q,char * page)261 static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
262 {
263 	int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1;
264 
265 	return queue_var_show(max_hw_sectors_kb, page);
266 }
267 
queue_virt_boundary_mask_show(struct request_queue * q,char * page)268 static ssize_t queue_virt_boundary_mask_show(struct request_queue *q, char *page)
269 {
270 	return queue_var_show(q->limits.virt_boundary_mask, page);
271 }
272 
queue_dma_alignment_show(struct request_queue * q,char * page)273 static ssize_t queue_dma_alignment_show(struct request_queue *q, char *page)
274 {
275 	return queue_var_show(queue_dma_alignment(q), page);
276 }
277 
278 #define QUEUE_SYSFS_BIT_FNS(name, flag, neg)				\
279 static ssize_t								\
280 queue_##name##_show(struct request_queue *q, char *page)		\
281 {									\
282 	int bit;							\
283 	bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags);		\
284 	return queue_var_show(neg ? !bit : bit, page);			\
285 }									\
286 static ssize_t								\
287 queue_##name##_store(struct request_queue *q, const char *page, size_t count) \
288 {									\
289 	unsigned long val;						\
290 	ssize_t ret;							\
291 	ret = queue_var_store(&val, page, count);			\
292 	if (ret < 0)							\
293 		 return ret;						\
294 	if (neg)							\
295 		val = !val;						\
296 									\
297 	if (val)							\
298 		blk_queue_flag_set(QUEUE_FLAG_##flag, q);		\
299 	else								\
300 		blk_queue_flag_clear(QUEUE_FLAG_##flag, q);		\
301 	return ret;							\
302 }
303 
304 QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1);
305 QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0);
306 QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0);
307 QUEUE_SYSFS_BIT_FNS(stable_writes, STABLE_WRITES, 0);
308 #undef QUEUE_SYSFS_BIT_FNS
309 
queue_zoned_show(struct request_queue * q,char * page)310 static ssize_t queue_zoned_show(struct request_queue *q, char *page)
311 {
312 	switch (blk_queue_zoned_model(q)) {
313 	case BLK_ZONED_HA:
314 		return sprintf(page, "host-aware\n");
315 	case BLK_ZONED_HM:
316 		return sprintf(page, "host-managed\n");
317 	default:
318 		return sprintf(page, "none\n");
319 	}
320 }
321 
queue_nr_zones_show(struct request_queue * q,char * page)322 static ssize_t queue_nr_zones_show(struct request_queue *q, char *page)
323 {
324 	return queue_var_show(disk_nr_zones(q->disk), page);
325 }
326 
queue_max_open_zones_show(struct request_queue * q,char * page)327 static ssize_t queue_max_open_zones_show(struct request_queue *q, char *page)
328 {
329 	return queue_var_show(bdev_max_open_zones(q->disk->part0), page);
330 }
331 
queue_max_active_zones_show(struct request_queue * q,char * page)332 static ssize_t queue_max_active_zones_show(struct request_queue *q, char *page)
333 {
334 	return queue_var_show(bdev_max_active_zones(q->disk->part0), page);
335 }
336 
queue_nomerges_show(struct request_queue * q,char * page)337 static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
338 {
339 	return queue_var_show((blk_queue_nomerges(q) << 1) |
340 			       blk_queue_noxmerges(q), page);
341 }
342 
queue_nomerges_store(struct request_queue * q,const char * page,size_t count)343 static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
344 				    size_t count)
345 {
346 	unsigned long nm;
347 	ssize_t ret = queue_var_store(&nm, page, count);
348 
349 	if (ret < 0)
350 		return ret;
351 
352 	blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, q);
353 	blk_queue_flag_clear(QUEUE_FLAG_NOXMERGES, q);
354 	if (nm == 2)
355 		blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
356 	else if (nm)
357 		blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
358 
359 	return ret;
360 }
361 
queue_rq_affinity_show(struct request_queue * q,char * page)362 static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page)
363 {
364 	bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags);
365 	bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags);
366 
367 	return queue_var_show(set << force, page);
368 }
369 
370 static ssize_t
queue_rq_affinity_store(struct request_queue * q,const char * page,size_t count)371 queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
372 {
373 	ssize_t ret = -EINVAL;
374 #ifdef CONFIG_SMP
375 	unsigned long val;
376 
377 	ret = queue_var_store(&val, page, count);
378 	if (ret < 0)
379 		return ret;
380 
381 	if (val == 2) {
382 		blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
383 		blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, q);
384 	} else if (val == 1) {
385 		blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
386 		blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
387 	} else if (val == 0) {
388 		blk_queue_flag_clear(QUEUE_FLAG_SAME_COMP, q);
389 		blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
390 	}
391 #endif
392 	return ret;
393 }
394 
queue_poll_delay_show(struct request_queue * q,char * page)395 static ssize_t queue_poll_delay_show(struct request_queue *q, char *page)
396 {
397 	return sprintf(page, "%d\n", -1);
398 }
399 
queue_poll_delay_store(struct request_queue * q,const char * page,size_t count)400 static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page,
401 				size_t count)
402 {
403 	return count;
404 }
405 
queue_poll_show(struct request_queue * q,char * page)406 static ssize_t queue_poll_show(struct request_queue *q, char *page)
407 {
408 	return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page);
409 }
410 
queue_poll_store(struct request_queue * q,const char * page,size_t count)411 static ssize_t queue_poll_store(struct request_queue *q, const char *page,
412 				size_t count)
413 {
414 	if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
415 		return -EINVAL;
416 	pr_info_ratelimited("writes to the poll attribute are ignored.\n");
417 	pr_info_ratelimited("please use driver specific parameters instead.\n");
418 	return count;
419 }
420 
queue_io_timeout_show(struct request_queue * q,char * page)421 static ssize_t queue_io_timeout_show(struct request_queue *q, char *page)
422 {
423 	return sprintf(page, "%u\n", jiffies_to_msecs(q->rq_timeout));
424 }
425 
queue_io_timeout_store(struct request_queue * q,const char * page,size_t count)426 static ssize_t queue_io_timeout_store(struct request_queue *q, const char *page,
427 				  size_t count)
428 {
429 	unsigned int val;
430 	int err;
431 
432 	err = kstrtou32(page, 10, &val);
433 	if (err || val == 0)
434 		return -EINVAL;
435 
436 	blk_queue_rq_timeout(q, msecs_to_jiffies(val));
437 
438 	return count;
439 }
440 
queue_wc_show(struct request_queue * q,char * page)441 static ssize_t queue_wc_show(struct request_queue *q, char *page)
442 {
443 	if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
444 		return sprintf(page, "write back\n");
445 
446 	return sprintf(page, "write through\n");
447 }
448 
queue_wc_store(struct request_queue * q,const char * page,size_t count)449 static ssize_t queue_wc_store(struct request_queue *q, const char *page,
450 			      size_t count)
451 {
452 	if (!strncmp(page, "write back", 10)) {
453 		if (!test_bit(QUEUE_FLAG_HW_WC, &q->queue_flags))
454 			return -EINVAL;
455 		blk_queue_flag_set(QUEUE_FLAG_WC, q);
456 	} else if (!strncmp(page, "write through", 13) ||
457 		 !strncmp(page, "none", 4)) {
458 		blk_queue_flag_clear(QUEUE_FLAG_WC, q);
459 	} else {
460 		return -EINVAL;
461 	}
462 
463 	return count;
464 }
465 
queue_fua_show(struct request_queue * q,char * page)466 static ssize_t queue_fua_show(struct request_queue *q, char *page)
467 {
468 	return sprintf(page, "%u\n", test_bit(QUEUE_FLAG_FUA, &q->queue_flags));
469 }
470 
queue_dax_show(struct request_queue * q,char * page)471 static ssize_t queue_dax_show(struct request_queue *q, char *page)
472 {
473 	return queue_var_show(blk_queue_dax(q), page);
474 }
475 
476 #define QUEUE_RO_ENTRY(_prefix, _name)			\
477 static struct queue_sysfs_entry _prefix##_entry = {	\
478 	.attr	= { .name = _name, .mode = 0444 },	\
479 	.show	= _prefix##_show,			\
480 };
481 
482 #define QUEUE_RW_ENTRY(_prefix, _name)			\
483 static struct queue_sysfs_entry _prefix##_entry = {	\
484 	.attr	= { .name = _name, .mode = 0644 },	\
485 	.show	= _prefix##_show,			\
486 	.store	= _prefix##_store,			\
487 };
488 
489 QUEUE_RW_ENTRY(queue_requests, "nr_requests");
490 QUEUE_RW_ENTRY(queue_ra, "read_ahead_kb");
491 QUEUE_RW_ENTRY(queue_max_sectors, "max_sectors_kb");
492 QUEUE_RO_ENTRY(queue_max_hw_sectors, "max_hw_sectors_kb");
493 QUEUE_RO_ENTRY(queue_max_segments, "max_segments");
494 QUEUE_RO_ENTRY(queue_max_integrity_segments, "max_integrity_segments");
495 QUEUE_RO_ENTRY(queue_max_segment_size, "max_segment_size");
496 QUEUE_RW_ENTRY(elv_iosched, "scheduler");
497 
498 QUEUE_RO_ENTRY(queue_logical_block_size, "logical_block_size");
499 QUEUE_RO_ENTRY(queue_physical_block_size, "physical_block_size");
500 QUEUE_RO_ENTRY(queue_chunk_sectors, "chunk_sectors");
501 QUEUE_RO_ENTRY(queue_io_min, "minimum_io_size");
502 QUEUE_RO_ENTRY(queue_io_opt, "optimal_io_size");
503 
504 QUEUE_RO_ENTRY(queue_max_discard_segments, "max_discard_segments");
505 QUEUE_RO_ENTRY(queue_discard_granularity, "discard_granularity");
506 QUEUE_RO_ENTRY(queue_discard_max_hw, "discard_max_hw_bytes");
507 QUEUE_RW_ENTRY(queue_discard_max, "discard_max_bytes");
508 QUEUE_RO_ENTRY(queue_discard_zeroes_data, "discard_zeroes_data");
509 
510 QUEUE_RO_ENTRY(queue_write_same_max, "write_same_max_bytes");
511 QUEUE_RO_ENTRY(queue_write_zeroes_max, "write_zeroes_max_bytes");
512 QUEUE_RO_ENTRY(queue_zone_append_max, "zone_append_max_bytes");
513 QUEUE_RO_ENTRY(queue_zone_write_granularity, "zone_write_granularity");
514 
515 QUEUE_RO_ENTRY(queue_zoned, "zoned");
516 QUEUE_RO_ENTRY(queue_nr_zones, "nr_zones");
517 QUEUE_RO_ENTRY(queue_max_open_zones, "max_open_zones");
518 QUEUE_RO_ENTRY(queue_max_active_zones, "max_active_zones");
519 
520 QUEUE_RW_ENTRY(queue_nomerges, "nomerges");
521 QUEUE_RW_ENTRY(queue_rq_affinity, "rq_affinity");
522 QUEUE_RW_ENTRY(queue_poll, "io_poll");
523 QUEUE_RW_ENTRY(queue_poll_delay, "io_poll_delay");
524 QUEUE_RW_ENTRY(queue_wc, "write_cache");
525 QUEUE_RO_ENTRY(queue_fua, "fua");
526 QUEUE_RO_ENTRY(queue_dax, "dax");
527 QUEUE_RW_ENTRY(queue_io_timeout, "io_timeout");
528 QUEUE_RO_ENTRY(queue_virt_boundary_mask, "virt_boundary_mask");
529 QUEUE_RO_ENTRY(queue_dma_alignment, "dma_alignment");
530 
531 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
532 QUEUE_RW_ENTRY(blk_throtl_sample_time, "throttle_sample_time");
533 #endif
534 
535 /* legacy alias for logical_block_size: */
536 static struct queue_sysfs_entry queue_hw_sector_size_entry = {
537 	.attr = {.name = "hw_sector_size", .mode = 0444 },
538 	.show = queue_logical_block_size_show,
539 };
540 
541 QUEUE_RW_ENTRY(queue_nonrot, "rotational");
542 QUEUE_RW_ENTRY(queue_iostats, "iostats");
543 QUEUE_RW_ENTRY(queue_random, "add_random");
544 QUEUE_RW_ENTRY(queue_stable_writes, "stable_writes");
545 
546 #ifdef CONFIG_BLK_WBT
queue_var_store64(s64 * var,const char * page)547 static ssize_t queue_var_store64(s64 *var, const char *page)
548 {
549 	int err;
550 	s64 v;
551 
552 	err = kstrtos64(page, 10, &v);
553 	if (err < 0)
554 		return err;
555 
556 	*var = v;
557 	return 0;
558 }
559 
queue_wb_lat_show(struct request_queue * q,char * page)560 static ssize_t queue_wb_lat_show(struct request_queue *q, char *page)
561 {
562 	if (!wbt_rq_qos(q))
563 		return -EINVAL;
564 
565 	if (wbt_disabled(q))
566 		return sprintf(page, "0\n");
567 
568 	return sprintf(page, "%llu\n", div_u64(wbt_get_min_lat(q), 1000));
569 }
570 
queue_wb_lat_store(struct request_queue * q,const char * page,size_t count)571 static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
572 				  size_t count)
573 {
574 	struct rq_qos *rqos;
575 	ssize_t ret;
576 	s64 val;
577 
578 	ret = queue_var_store64(&val, page);
579 	if (ret < 0)
580 		return ret;
581 	if (val < -1)
582 		return -EINVAL;
583 
584 	rqos = wbt_rq_qos(q);
585 	if (!rqos) {
586 		ret = wbt_init(q->disk);
587 		if (ret)
588 			return ret;
589 	}
590 
591 	if (val == -1)
592 		val = wbt_default_latency_nsec(q);
593 	else if (val >= 0)
594 		val *= 1000ULL;
595 
596 	if (wbt_get_min_lat(q) == val)
597 		return count;
598 
599 	/*
600 	 * Ensure that the queue is idled, in case the latency update
601 	 * ends up either enabling or disabling wbt completely. We can't
602 	 * have IO inflight if that happens.
603 	 */
604 	blk_mq_freeze_queue(q);
605 	blk_mq_quiesce_queue(q);
606 
607 	wbt_set_min_lat(q, val);
608 
609 	blk_mq_unquiesce_queue(q);
610 	blk_mq_unfreeze_queue(q);
611 
612 	return count;
613 }
614 
615 QUEUE_RW_ENTRY(queue_wb_lat, "wbt_lat_usec");
616 #endif
617 
618 static struct attribute *queue_attrs[] = {
619 	&queue_ra_entry.attr,
620 	&queue_max_hw_sectors_entry.attr,
621 	&queue_max_sectors_entry.attr,
622 	&queue_max_segments_entry.attr,
623 	&queue_max_discard_segments_entry.attr,
624 	&queue_max_integrity_segments_entry.attr,
625 	&queue_max_segment_size_entry.attr,
626 	&queue_hw_sector_size_entry.attr,
627 	&queue_logical_block_size_entry.attr,
628 	&queue_physical_block_size_entry.attr,
629 	&queue_chunk_sectors_entry.attr,
630 	&queue_io_min_entry.attr,
631 	&queue_io_opt_entry.attr,
632 	&queue_discard_granularity_entry.attr,
633 	&queue_discard_max_entry.attr,
634 	&queue_discard_max_hw_entry.attr,
635 	&queue_discard_zeroes_data_entry.attr,
636 	&queue_write_same_max_entry.attr,
637 	&queue_write_zeroes_max_entry.attr,
638 	&queue_zone_append_max_entry.attr,
639 	&queue_zone_write_granularity_entry.attr,
640 	&queue_nonrot_entry.attr,
641 	&queue_zoned_entry.attr,
642 	&queue_nr_zones_entry.attr,
643 	&queue_max_open_zones_entry.attr,
644 	&queue_max_active_zones_entry.attr,
645 	&queue_nomerges_entry.attr,
646 	&queue_iostats_entry.attr,
647 	&queue_stable_writes_entry.attr,
648 	&queue_random_entry.attr,
649 	&queue_poll_entry.attr,
650 	&queue_wc_entry.attr,
651 	&queue_fua_entry.attr,
652 	&queue_dax_entry.attr,
653 	&queue_poll_delay_entry.attr,
654 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
655 	&blk_throtl_sample_time_entry.attr,
656 #endif
657 	&queue_virt_boundary_mask_entry.attr,
658 	&queue_dma_alignment_entry.attr,
659 	NULL,
660 };
661 
662 static struct attribute *blk_mq_queue_attrs[] = {
663 	&queue_requests_entry.attr,
664 	&elv_iosched_entry.attr,
665 	&queue_rq_affinity_entry.attr,
666 	&queue_io_timeout_entry.attr,
667 #ifdef CONFIG_BLK_WBT
668 	&queue_wb_lat_entry.attr,
669 #endif
670 	NULL,
671 };
672 
queue_attr_visible(struct kobject * kobj,struct attribute * attr,int n)673 static umode_t queue_attr_visible(struct kobject *kobj, struct attribute *attr,
674 				int n)
675 {
676 	struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
677 	struct request_queue *q = disk->queue;
678 
679 	if ((attr == &queue_max_open_zones_entry.attr ||
680 	     attr == &queue_max_active_zones_entry.attr) &&
681 	    !blk_queue_is_zoned(q))
682 		return 0;
683 
684 	return attr->mode;
685 }
686 
blk_mq_queue_attr_visible(struct kobject * kobj,struct attribute * attr,int n)687 static umode_t blk_mq_queue_attr_visible(struct kobject *kobj,
688 					 struct attribute *attr, int n)
689 {
690 	struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
691 	struct request_queue *q = disk->queue;
692 
693 	if (!queue_is_mq(q))
694 		return 0;
695 
696 	if (attr == &queue_io_timeout_entry.attr && !q->mq_ops->timeout)
697 		return 0;
698 
699 	return attr->mode;
700 }
701 
702 static struct attribute_group queue_attr_group = {
703 	.attrs = queue_attrs,
704 	.is_visible = queue_attr_visible,
705 };
706 
707 static struct attribute_group blk_mq_queue_attr_group = {
708 	.attrs = blk_mq_queue_attrs,
709 	.is_visible = blk_mq_queue_attr_visible,
710 };
711 
712 #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
713 
714 static ssize_t
queue_attr_show(struct kobject * kobj,struct attribute * attr,char * page)715 queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
716 {
717 	struct queue_sysfs_entry *entry = to_queue(attr);
718 	struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
719 	struct request_queue *q = disk->queue;
720 	ssize_t res;
721 
722 	if (!entry->show)
723 		return -EIO;
724 	mutex_lock(&q->sysfs_lock);
725 	res = entry->show(q, page);
726 	mutex_unlock(&q->sysfs_lock);
727 	return res;
728 }
729 
730 static ssize_t
queue_attr_store(struct kobject * kobj,struct attribute * attr,const char * page,size_t length)731 queue_attr_store(struct kobject *kobj, struct attribute *attr,
732 		    const char *page, size_t length)
733 {
734 	struct queue_sysfs_entry *entry = to_queue(attr);
735 	struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
736 	struct request_queue *q = disk->queue;
737 	ssize_t res;
738 
739 	if (!entry->store)
740 		return -EIO;
741 
742 	mutex_lock(&q->sysfs_lock);
743 	res = entry->store(q, page, length);
744 	mutex_unlock(&q->sysfs_lock);
745 	return res;
746 }
747 
748 static const struct sysfs_ops queue_sysfs_ops = {
749 	.show	= queue_attr_show,
750 	.store	= queue_attr_store,
751 };
752 
753 static const struct attribute_group *blk_queue_attr_groups[] = {
754 	&queue_attr_group,
755 	&blk_mq_queue_attr_group,
756 	NULL
757 };
758 
blk_queue_release(struct kobject * kobj)759 static void blk_queue_release(struct kobject *kobj)
760 {
761 	/* nothing to do here, all data is associated with the parent gendisk */
762 }
763 
764 static const struct kobj_type blk_queue_ktype = {
765 	.default_groups = blk_queue_attr_groups,
766 	.sysfs_ops	= &queue_sysfs_ops,
767 	.release	= blk_queue_release,
768 };
769 
blk_debugfs_remove(struct gendisk * disk)770 static void blk_debugfs_remove(struct gendisk *disk)
771 {
772 	struct request_queue *q = disk->queue;
773 
774 	mutex_lock(&q->debugfs_mutex);
775 	blk_trace_shutdown(q);
776 	debugfs_remove_recursive(q->debugfs_dir);
777 	q->debugfs_dir = NULL;
778 	q->sched_debugfs_dir = NULL;
779 	q->rqos_debugfs_dir = NULL;
780 	mutex_unlock(&q->debugfs_mutex);
781 }
782 
783 /**
784  * blk_register_queue - register a block layer queue with sysfs
785  * @disk: Disk of which the request queue should be registered with sysfs.
786  */
blk_register_queue(struct gendisk * disk)787 int blk_register_queue(struct gendisk *disk)
788 {
789 	struct request_queue *q = disk->queue;
790 	int ret;
791 
792 	mutex_lock(&q->sysfs_dir_lock);
793 	kobject_init(&disk->queue_kobj, &blk_queue_ktype);
794 	ret = kobject_add(&disk->queue_kobj, &disk_to_dev(disk)->kobj, "queue");
795 	if (ret < 0)
796 		goto out_put_queue_kobj;
797 
798 	if (queue_is_mq(q)) {
799 		ret = blk_mq_sysfs_register(disk);
800 		if (ret)
801 			goto out_put_queue_kobj;
802 	}
803 	mutex_lock(&q->sysfs_lock);
804 
805 	mutex_lock(&q->debugfs_mutex);
806 	q->debugfs_dir = debugfs_create_dir(disk->disk_name, blk_debugfs_root);
807 	if (queue_is_mq(q))
808 		blk_mq_debugfs_register(q);
809 	mutex_unlock(&q->debugfs_mutex);
810 
811 	ret = disk_register_independent_access_ranges(disk);
812 	if (ret)
813 		goto out_debugfs_remove;
814 
815 	if (q->elevator) {
816 		ret = elv_register_queue(q, false);
817 		if (ret)
818 			goto out_unregister_ia_ranges;
819 	}
820 
821 	ret = blk_crypto_sysfs_register(disk);
822 	if (ret)
823 		goto out_elv_unregister;
824 
825 	blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q);
826 	wbt_enable_default(disk);
827 	blk_throtl_register(disk);
828 
829 	/* Now everything is ready and send out KOBJ_ADD uevent */
830 	kobject_uevent(&disk->queue_kobj, KOBJ_ADD);
831 	if (q->elevator)
832 		kobject_uevent(&q->elevator->kobj, KOBJ_ADD);
833 	mutex_unlock(&q->sysfs_lock);
834 	mutex_unlock(&q->sysfs_dir_lock);
835 
836 	/*
837 	 * SCSI probing may synchronously create and destroy a lot of
838 	 * request_queues for non-existent devices.  Shutting down a fully
839 	 * functional queue takes measureable wallclock time as RCU grace
840 	 * periods are involved.  To avoid excessive latency in these
841 	 * cases, a request_queue starts out in a degraded mode which is
842 	 * faster to shut down and is made fully functional here as
843 	 * request_queues for non-existent devices never get registered.
844 	 */
845 	if (!blk_queue_init_done(q)) {
846 		blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q);
847 		percpu_ref_switch_to_percpu(&q->q_usage_counter);
848 	}
849 
850 	return ret;
851 
852 out_elv_unregister:
853 	elv_unregister_queue(q);
854 out_unregister_ia_ranges:
855 	disk_unregister_independent_access_ranges(disk);
856 out_debugfs_remove:
857 	blk_debugfs_remove(disk);
858 	mutex_unlock(&q->sysfs_lock);
859 out_put_queue_kobj:
860 	kobject_put(&disk->queue_kobj);
861 	mutex_unlock(&q->sysfs_dir_lock);
862 	return ret;
863 }
864 
865 /**
866  * blk_unregister_queue - counterpart of blk_register_queue()
867  * @disk: Disk of which the request queue should be unregistered from sysfs.
868  *
869  * Note: the caller is responsible for guaranteeing that this function is called
870  * after blk_register_queue() has finished.
871  */
blk_unregister_queue(struct gendisk * disk)872 void blk_unregister_queue(struct gendisk *disk)
873 {
874 	struct request_queue *q = disk->queue;
875 
876 	if (WARN_ON(!q))
877 		return;
878 
879 	/* Return early if disk->queue was never registered. */
880 	if (!blk_queue_registered(q))
881 		return;
882 
883 	/*
884 	 * Since sysfs_remove_dir() prevents adding new directory entries
885 	 * before removal of existing entries starts, protect against
886 	 * concurrent elv_iosched_store() calls.
887 	 */
888 	mutex_lock(&q->sysfs_lock);
889 	blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q);
890 	mutex_unlock(&q->sysfs_lock);
891 
892 	mutex_lock(&q->sysfs_dir_lock);
893 	/*
894 	 * Remove the sysfs attributes before unregistering the queue data
895 	 * structures that can be modified through sysfs.
896 	 */
897 	if (queue_is_mq(q))
898 		blk_mq_sysfs_unregister(disk);
899 	blk_crypto_sysfs_unregister(disk);
900 
901 	mutex_lock(&q->sysfs_lock);
902 	elv_unregister_queue(q);
903 	disk_unregister_independent_access_ranges(disk);
904 	mutex_unlock(&q->sysfs_lock);
905 
906 	/* Now that we've deleted all child objects, we can delete the queue. */
907 	kobject_uevent(&disk->queue_kobj, KOBJ_REMOVE);
908 	kobject_del(&disk->queue_kobj);
909 	mutex_unlock(&q->sysfs_dir_lock);
910 
911 	blk_debugfs_remove(disk);
912 }
913