Lines Matching refs:q

63 static ssize_t queue_requests_show(struct request_queue *q, char *page)  in queue_requests_show()  argument
65 return queue_var_show(q->nr_requests, page); in queue_requests_show()
69 queue_requests_store(struct request_queue *q, const char *page, size_t count) in queue_requests_store() argument
74 if (!queue_is_mq(q)) in queue_requests_store()
84 err = blk_mq_update_nr_requests(q, nr); in queue_requests_store()
91 static ssize_t queue_ra_show(struct request_queue *q, char *page) in queue_ra_show() argument
95 if (!q->disk) in queue_ra_show()
97 ra_kb = q->disk->bdi->ra_pages << (PAGE_SHIFT - 10); in queue_ra_show()
102 queue_ra_store(struct request_queue *q, const char *page, size_t count) in queue_ra_store() argument
107 if (!q->disk) in queue_ra_store()
112 q->disk->bdi->ra_pages = ra_kb >> (PAGE_SHIFT - 10); in queue_ra_store()
116 static ssize_t queue_max_sectors_show(struct request_queue *q, char *page) in queue_max_sectors_show() argument
118 int max_sectors_kb = queue_max_sectors(q) >> 1; in queue_max_sectors_show()
123 static ssize_t queue_max_segments_show(struct request_queue *q, char *page) in queue_max_segments_show() argument
125 return queue_var_show(queue_max_segments(q), page); in queue_max_segments_show()
128 static ssize_t queue_max_discard_segments_show(struct request_queue *q, in queue_max_discard_segments_show() argument
131 return queue_var_show(queue_max_discard_segments(q), page); in queue_max_discard_segments_show()
134 static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page) in queue_max_integrity_segments_show() argument
136 return queue_var_show(q->limits.max_integrity_segments, page); in queue_max_integrity_segments_show()
139 static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page) in queue_max_segment_size_show() argument
141 return queue_var_show(queue_max_segment_size(q), page); in queue_max_segment_size_show()
144 static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page) in queue_logical_block_size_show() argument
146 return queue_var_show(queue_logical_block_size(q), page); in queue_logical_block_size_show()
149 static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page) in queue_physical_block_size_show() argument
151 return queue_var_show(queue_physical_block_size(q), page); in queue_physical_block_size_show()
154 static ssize_t queue_chunk_sectors_show(struct request_queue *q, char *page) in queue_chunk_sectors_show() argument
156 return queue_var_show(q->limits.chunk_sectors, page); in queue_chunk_sectors_show()
159 static ssize_t queue_io_min_show(struct request_queue *q, char *page) in queue_io_min_show() argument
161 return queue_var_show(queue_io_min(q), page); in queue_io_min_show()
164 static ssize_t queue_io_opt_show(struct request_queue *q, char *page) in queue_io_opt_show() argument
166 return queue_var_show(queue_io_opt(q), page); in queue_io_opt_show()
169 static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page) in queue_discard_granularity_show() argument
171 return queue_var_show(q->limits.discard_granularity, page); in queue_discard_granularity_show()
174 static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page) in queue_discard_max_hw_show() argument
178 (unsigned long long)q->limits.max_hw_discard_sectors << 9); in queue_discard_max_hw_show()
181 static ssize_t queue_discard_max_show(struct request_queue *q, char *page) in queue_discard_max_show() argument
184 (unsigned long long)q->limits.max_discard_sectors << 9); in queue_discard_max_show()
187 static ssize_t queue_discard_max_store(struct request_queue *q, in queue_discard_max_store() argument
196 if (max_discard & (q->limits.discard_granularity - 1)) in queue_discard_max_store()
203 if (max_discard > q->limits.max_hw_discard_sectors) in queue_discard_max_store()
204 max_discard = q->limits.max_hw_discard_sectors; in queue_discard_max_store()
206 q->limits.max_discard_sectors = max_discard; in queue_discard_max_store()
210 static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page) in queue_discard_zeroes_data_show() argument
215 static ssize_t queue_write_same_max_show(struct request_queue *q, char *page) in queue_write_same_max_show() argument
220 static ssize_t queue_write_zeroes_max_show(struct request_queue *q, char *page) in queue_write_zeroes_max_show() argument
223 (unsigned long long)q->limits.max_write_zeroes_sectors << 9); in queue_write_zeroes_max_show()
226 static ssize_t queue_zone_write_granularity_show(struct request_queue *q, in queue_zone_write_granularity_show() argument
229 return queue_var_show(queue_zone_write_granularity(q), page); in queue_zone_write_granularity_show()
232 static ssize_t queue_zone_append_max_show(struct request_queue *q, char *page) in queue_zone_append_max_show() argument
234 unsigned long long max_sectors = q->limits.max_zone_append_sectors; in queue_zone_append_max_show()
240 queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) in queue_max_sectors_store() argument
243 max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1, in queue_max_sectors_store()
251 q->limits.max_dev_sectors >> 1); in queue_max_sectors_store()
256 spin_lock_irq(&q->queue_lock); in queue_max_sectors_store()
257 q->limits.max_sectors = max_sectors_kb << 1; in queue_max_sectors_store()
258 if (q->disk) in queue_max_sectors_store()
259 q->disk->bdi->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10); in queue_max_sectors_store()
260 spin_unlock_irq(&q->queue_lock); in queue_max_sectors_store()
265 static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page) in queue_max_hw_sectors_show() argument
267 int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1; in queue_max_hw_sectors_show()
272 static ssize_t queue_virt_boundary_mask_show(struct request_queue *q, char *page) in queue_virt_boundary_mask_show() argument
274 return queue_var_show(q->limits.virt_boundary_mask, page); in queue_virt_boundary_mask_show()
279 queue_##name##_show(struct request_queue *q, char *page) \
282 bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \
286 queue_##name##_store(struct request_queue *q, const char *page, size_t count) \
297 blk_queue_flag_set(QUEUE_FLAG_##flag, q); \
299 blk_queue_flag_clear(QUEUE_FLAG_##flag, q); \
309 static ssize_t queue_zoned_show(struct request_queue *q, char *page) in queue_zoned_show() argument
311 switch (blk_queue_zoned_model(q)) { in queue_zoned_show()
321 static ssize_t queue_nr_zones_show(struct request_queue *q, char *page) in queue_nr_zones_show() argument
323 return queue_var_show(blk_queue_nr_zones(q), page); in queue_nr_zones_show()
326 static ssize_t queue_max_open_zones_show(struct request_queue *q, char *page) in queue_max_open_zones_show() argument
328 return queue_var_show(queue_max_open_zones(q), page); in queue_max_open_zones_show()
331 static ssize_t queue_max_active_zones_show(struct request_queue *q, char *page) in queue_max_active_zones_show() argument
333 return queue_var_show(queue_max_active_zones(q), page); in queue_max_active_zones_show()
336 static ssize_t queue_nomerges_show(struct request_queue *q, char *page) in queue_nomerges_show() argument
338 return queue_var_show((blk_queue_nomerges(q) << 1) | in queue_nomerges_show()
339 blk_queue_noxmerges(q), page); in queue_nomerges_show()
342 static ssize_t queue_nomerges_store(struct request_queue *q, const char *page, in queue_nomerges_store() argument
351 blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, q); in queue_nomerges_store()
352 blk_queue_flag_clear(QUEUE_FLAG_NOXMERGES, q); in queue_nomerges_store()
354 blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q); in queue_nomerges_store()
356 blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q); in queue_nomerges_store()
361 static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page) in queue_rq_affinity_show() argument
363 bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags); in queue_rq_affinity_show()
364 bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags); in queue_rq_affinity_show()
370 queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count) in queue_rq_affinity_store() argument
381 blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q); in queue_rq_affinity_store()
382 blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, q); in queue_rq_affinity_store()
384 blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q); in queue_rq_affinity_store()
385 blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); in queue_rq_affinity_store()
387 blk_queue_flag_clear(QUEUE_FLAG_SAME_COMP, q); in queue_rq_affinity_store()
388 blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); in queue_rq_affinity_store()
394 static ssize_t queue_poll_delay_show(struct request_queue *q, char *page) in queue_poll_delay_show() argument
398 if (q->poll_nsec == BLK_MQ_POLL_CLASSIC) in queue_poll_delay_show()
401 val = q->poll_nsec / 1000; in queue_poll_delay_show()
406 static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page, in queue_poll_delay_store() argument
411 if (!q->mq_ops || !q->mq_ops->poll) in queue_poll_delay_store()
419 q->poll_nsec = BLK_MQ_POLL_CLASSIC; in queue_poll_delay_store()
421 q->poll_nsec = val * 1000; in queue_poll_delay_store()
428 static ssize_t queue_poll_show(struct request_queue *q, char *page) in queue_poll_show() argument
430 return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page); in queue_poll_show()
433 static ssize_t queue_poll_store(struct request_queue *q, const char *page, in queue_poll_store() argument
436 if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) in queue_poll_store()
443 static ssize_t queue_io_timeout_show(struct request_queue *q, char *page) in queue_io_timeout_show() argument
445 return sprintf(page, "%u\n", jiffies_to_msecs(q->rq_timeout)); in queue_io_timeout_show()
448 static ssize_t queue_io_timeout_store(struct request_queue *q, const char *page, in queue_io_timeout_store() argument
458 blk_queue_rq_timeout(q, msecs_to_jiffies(val)); in queue_io_timeout_store()
463 static ssize_t queue_wb_lat_show(struct request_queue *q, char *page) in queue_wb_lat_show() argument
465 if (!wbt_rq_qos(q)) in queue_wb_lat_show()
468 return sprintf(page, "%llu\n", div_u64(wbt_get_min_lat(q), 1000)); in queue_wb_lat_show()
471 static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page, in queue_wb_lat_store() argument
484 rqos = wbt_rq_qos(q); in queue_wb_lat_store()
486 ret = wbt_init(q); in queue_wb_lat_store()
492 val = wbt_default_latency_nsec(q); in queue_wb_lat_store()
496 if (wbt_get_min_lat(q) == val) in queue_wb_lat_store()
504 blk_mq_freeze_queue(q); in queue_wb_lat_store()
505 blk_mq_quiesce_queue(q); in queue_wb_lat_store()
507 wbt_set_min_lat(q, val); in queue_wb_lat_store()
509 blk_mq_unquiesce_queue(q); in queue_wb_lat_store()
510 blk_mq_unfreeze_queue(q); in queue_wb_lat_store()
515 static ssize_t queue_wc_show(struct request_queue *q, char *page) in queue_wc_show() argument
517 if (test_bit(QUEUE_FLAG_WC, &q->queue_flags)) in queue_wc_show()
523 static ssize_t queue_wc_store(struct request_queue *q, const char *page, in queue_wc_store() argument
538 blk_queue_flag_set(QUEUE_FLAG_WC, q); in queue_wc_store()
540 blk_queue_flag_clear(QUEUE_FLAG_WC, q); in queue_wc_store()
545 static ssize_t queue_fua_show(struct request_queue *q, char *page) in queue_fua_show() argument
547 return sprintf(page, "%u\n", test_bit(QUEUE_FLAG_FUA, &q->queue_flags)); in queue_fua_show()
550 static ssize_t queue_dax_show(struct request_queue *q, char *page) in queue_dax_show() argument
552 return queue_var_show(blk_queue_dax(q), page); in queue_dax_show()
676 struct request_queue *q = in queue_attr_visible() local
680 (!q->mq_ops || !q->mq_ops->timeout)) in queue_attr_visible()
685 !blk_queue_is_zoned(q)) in queue_attr_visible()
703 struct request_queue *q = in queue_attr_show() local
709 mutex_lock(&q->sysfs_lock); in queue_attr_show()
710 res = entry->show(q, page); in queue_attr_show()
711 mutex_unlock(&q->sysfs_lock); in queue_attr_show()
720 struct request_queue *q; in queue_attr_store() local
726 q = container_of(kobj, struct request_queue, kobj); in queue_attr_store()
727 mutex_lock(&q->sysfs_lock); in queue_attr_store()
728 res = entry->store(q, page, length); in queue_attr_store()
729 mutex_unlock(&q->sysfs_lock); in queue_attr_store()
735 struct request_queue *q = container_of(rcu_head, struct request_queue, in blk_free_queue_rcu() local
738 kmem_cache_free(blk_get_queue_kmem_cache(blk_queue_has_srcu(q)), q); in blk_free_queue_rcu()
763 struct request_queue *q = in blk_release_queue() local
768 percpu_ref_exit(&q->q_usage_counter); in blk_release_queue()
770 if (q->poll_stat) in blk_release_queue()
771 blk_stat_remove_callback(q, q->poll_cb); in blk_release_queue()
772 blk_stat_free_callback(q->poll_cb); in blk_release_queue()
774 blk_free_queue_stats(q->stats); in blk_release_queue()
775 kfree(q->poll_stat); in blk_release_queue()
777 blk_queue_free_zone_bitmaps(q); in blk_release_queue()
779 if (queue_is_mq(q)) in blk_release_queue()
780 blk_mq_release(q); in blk_release_queue()
782 bioset_exit(&q->bio_split); in blk_release_queue()
784 if (blk_queue_has_srcu(q)) in blk_release_queue()
785 cleanup_srcu_struct(q->srcu); in blk_release_queue()
787 ida_simple_remove(&blk_queue_ida, q->id); in blk_release_queue()
788 call_rcu(&q->rcu_head, blk_free_queue_rcu); in blk_release_queue()
809 struct request_queue *q = disk->queue; in blk_register_queue() local
815 mutex_lock(&q->sysfs_dir_lock); in blk_register_queue()
817 ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue"); in blk_register_queue()
823 ret = sysfs_create_group(&q->kobj, &queue_attr_group); in blk_register_queue()
826 kobject_del(&q->kobj); in blk_register_queue()
831 if (queue_is_mq(q)) in blk_register_queue()
832 __blk_mq_register_dev(dev, q); in blk_register_queue()
833 mutex_lock(&q->sysfs_lock); in blk_register_queue()
835 mutex_lock(&q->debugfs_mutex); in blk_register_queue()
836 q->debugfs_dir = debugfs_create_dir(kobject_name(q->kobj.parent), in blk_register_queue()
838 if (queue_is_mq(q)) in blk_register_queue()
839 blk_mq_debugfs_register(q); in blk_register_queue()
840 mutex_unlock(&q->debugfs_mutex); in blk_register_queue()
846 if (q->elevator) { in blk_register_queue()
847 ret = elv_register_queue(q, false); in blk_register_queue()
852 ret = blk_crypto_sysfs_register(q); in blk_register_queue()
856 blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q); in blk_register_queue()
857 wbt_enable_default(q); in blk_register_queue()
858 blk_throtl_register_queue(q); in blk_register_queue()
861 kobject_uevent(&q->kobj, KOBJ_ADD); in blk_register_queue()
862 if (q->elevator) in blk_register_queue()
863 kobject_uevent(&q->elevator->kobj, KOBJ_ADD); in blk_register_queue()
864 mutex_unlock(&q->sysfs_lock); in blk_register_queue()
867 mutex_unlock(&q->sysfs_dir_lock); in blk_register_queue()
878 if (!blk_queue_init_done(q)) { in blk_register_queue()
879 blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q); in blk_register_queue()
880 percpu_ref_switch_to_percpu(&q->q_usage_counter); in blk_register_queue()
886 elv_unregister_queue(q); in blk_register_queue()
888 mutex_unlock(&q->sysfs_lock); in blk_register_queue()
889 mutex_unlock(&q->sysfs_dir_lock); in blk_register_queue()
890 kobject_del(&q->kobj); in blk_register_queue()
906 struct request_queue *q = disk->queue; in blk_unregister_queue() local
908 if (WARN_ON(!q)) in blk_unregister_queue()
912 if (!blk_queue_registered(q)) in blk_unregister_queue()
920 mutex_lock(&q->sysfs_lock); in blk_unregister_queue()
921 blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q); in blk_unregister_queue()
922 mutex_unlock(&q->sysfs_lock); in blk_unregister_queue()
924 mutex_lock(&q->sysfs_dir_lock); in blk_unregister_queue()
929 if (queue_is_mq(q)) in blk_unregister_queue()
930 blk_mq_unregister_dev(disk_to_dev(disk), q); in blk_unregister_queue()
931 blk_crypto_sysfs_unregister(q); in blk_unregister_queue()
934 mutex_lock(&q->sysfs_lock); in blk_unregister_queue()
935 elv_unregister_queue(q); in blk_unregister_queue()
937 mutex_unlock(&q->sysfs_lock); in blk_unregister_queue()
940 kobject_uevent(&q->kobj, KOBJ_REMOVE); in blk_unregister_queue()
941 kobject_del(&q->kobj); in blk_unregister_queue()
942 mutex_unlock(&q->sysfs_dir_lock); in blk_unregister_queue()
944 mutex_lock(&q->debugfs_mutex); in blk_unregister_queue()
945 blk_trace_shutdown(q); in blk_unregister_queue()
946 debugfs_remove_recursive(q->debugfs_dir); in blk_unregister_queue()
947 q->debugfs_dir = NULL; in blk_unregister_queue()
948 q->sched_debugfs_dir = NULL; in blk_unregister_queue()
949 q->rqos_debugfs_dir = NULL; in blk_unregister_queue()
950 mutex_unlock(&q->debugfs_mutex); in blk_unregister_queue()