Lines Matching refs:blkg
121 struct blkcg_gq *blkg = container_of(work, struct blkcg_gq, in blkg_free_workfn() local
123 struct request_queue *q = blkg->q; in blkg_free_workfn()
135 if (blkg->pd[i]) in blkg_free_workfn()
136 blkcg_policy[i]->pd_free_fn(blkg->pd[i]); in blkg_free_workfn()
137 if (blkg->parent) in blkg_free_workfn()
138 blkg_put(blkg->parent); in blkg_free_workfn()
140 list_del_init(&blkg->q_node); in blkg_free_workfn()
145 free_percpu(blkg->iostat_cpu); in blkg_free_workfn()
146 percpu_ref_exit(&blkg->refcnt); in blkg_free_workfn()
147 kfree(blkg); in blkg_free_workfn()
156 static void blkg_free(struct blkcg_gq *blkg) in blkg_free() argument
158 if (!blkg) in blkg_free()
165 INIT_WORK(&blkg->free_work, blkg_free_workfn); in blkg_free()
166 schedule_work(&blkg->free_work); in blkg_free()
171 struct blkcg_gq *blkg = container_of(rcu, struct blkcg_gq, rcu_head); in __blkg_release() local
172 struct blkcg *blkcg = blkg->blkcg; in __blkg_release()
176 WARN_ON(!bio_list_empty(&blkg->async_bios)); in __blkg_release()
188 css_put(&blkg->blkcg->css); in __blkg_release()
189 blkg_free(blkg); in __blkg_release()
202 struct blkcg_gq *blkg = container_of(ref, struct blkcg_gq, refcnt); in blkg_release() local
204 call_rcu(&blkg->rcu_head, __blkg_release); in blkg_release()
212 struct blkcg_gq *blkg = container_of(work, struct blkcg_gq, in blkg_async_bio_workfn() local
220 spin_lock(&blkg->async_bio_lock); in blkg_async_bio_workfn()
221 bio_list_merge(&bios, &blkg->async_bios); in blkg_async_bio_workfn()
222 bio_list_init(&blkg->async_bios); in blkg_async_bio_workfn()
223 spin_unlock(&blkg->async_bio_lock); in blkg_async_bio_workfn()
244 struct blkcg_gq *blkg = bio->bi_blkg; in blkcg_punt_bio_submit() local
246 if (blkg->parent) { in blkcg_punt_bio_submit()
247 spin_lock(&blkg->async_bio_lock); in blkcg_punt_bio_submit()
248 bio_list_add(&blkg->async_bios, bio); in blkcg_punt_bio_submit()
249 spin_unlock(&blkg->async_bio_lock); in blkcg_punt_bio_submit()
250 queue_work(blkcg_punt_bio_wq, &blkg->async_bio_work); in blkcg_punt_bio_submit()
308 struct blkcg_gq *blkg; in blkg_alloc() local
312 blkg = kzalloc_node(sizeof(*blkg), gfp_mask, disk->queue->node); in blkg_alloc()
313 if (!blkg) in blkg_alloc()
315 if (percpu_ref_init(&blkg->refcnt, blkg_release, 0, gfp_mask)) in blkg_alloc()
317 blkg->iostat_cpu = alloc_percpu_gfp(struct blkg_iostat_set, gfp_mask); in blkg_alloc()
318 if (!blkg->iostat_cpu) in blkg_alloc()
323 blkg->q = disk->queue; in blkg_alloc()
324 INIT_LIST_HEAD(&blkg->q_node); in blkg_alloc()
325 blkg->blkcg = blkcg; in blkg_alloc()
327 spin_lock_init(&blkg->async_bio_lock); in blkg_alloc()
328 bio_list_init(&blkg->async_bios); in blkg_alloc()
329 INIT_WORK(&blkg->async_bio_work, blkg_async_bio_workfn); in blkg_alloc()
332 u64_stats_init(&blkg->iostat.sync); in blkg_alloc()
334 u64_stats_init(&per_cpu_ptr(blkg->iostat_cpu, cpu)->sync); in blkg_alloc()
335 per_cpu_ptr(blkg->iostat_cpu, cpu)->blkg = blkg; in blkg_alloc()
349 blkg->pd[i] = pd; in blkg_alloc()
350 pd->blkg = blkg; in blkg_alloc()
355 return blkg; in blkg_alloc()
359 if (blkg->pd[i]) in blkg_alloc()
360 blkcg_policy[i]->pd_free_fn(blkg->pd[i]); in blkg_alloc()
363 free_percpu(blkg->iostat_cpu); in blkg_alloc()
365 percpu_ref_exit(&blkg->refcnt); in blkg_alloc()
367 kfree(blkg); in blkg_alloc()
378 struct blkcg_gq *blkg; in blkg_create() local
403 blkg = new_blkg; in blkg_create()
407 blkg->parent = blkg_lookup(blkcg_parent(blkcg), disk->queue); in blkg_create()
408 if (WARN_ON_ONCE(!blkg->parent)) { in blkg_create()
412 blkg_get(blkg->parent); in blkg_create()
419 if (blkg->pd[i] && pol->pd_init_fn) in blkg_create()
420 pol->pd_init_fn(blkg->pd[i]); in blkg_create()
425 ret = radix_tree_insert(&blkcg->blkg_tree, disk->queue->id, blkg); in blkg_create()
427 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list); in blkg_create()
428 list_add(&blkg->q_node, &disk->queue->blkg_list); in blkg_create()
433 if (blkg->pd[i]) { in blkg_create()
435 pol->pd_online_fn(blkg->pd[i]); in blkg_create()
436 blkg->pd[i]->online = true; in blkg_create()
440 blkg->online = true; in blkg_create()
444 return blkg; in blkg_create()
447 blkg_put(blkg); in blkg_create()
475 struct blkcg_gq *blkg; in blkg_lookup_create() local
480 blkg = blkg_lookup(blkcg, q); in blkg_lookup_create()
481 if (blkg) in blkg_lookup_create()
482 return blkg; in blkg_lookup_create()
485 blkg = blkg_lookup(blkcg, q); in blkg_lookup_create()
486 if (blkg) { in blkg_lookup_create()
488 blkg != rcu_dereference(blkcg->blkg_hint)) in blkg_lookup_create()
489 rcu_assign_pointer(blkcg->blkg_hint, blkg); in blkg_lookup_create()
504 blkg = blkg_lookup(parent, q); in blkg_lookup_create()
505 if (blkg) { in blkg_lookup_create()
507 ret_blkg = blkg; in blkg_lookup_create()
514 blkg = blkg_create(pos, disk, NULL); in blkg_lookup_create()
515 if (IS_ERR(blkg)) { in blkg_lookup_create()
516 blkg = ret_blkg; in blkg_lookup_create()
525 return blkg; in blkg_lookup_create()
528 static void blkg_destroy(struct blkcg_gq *blkg) in blkg_destroy() argument
530 struct blkcg *blkcg = blkg->blkcg; in blkg_destroy()
533 lockdep_assert_held(&blkg->q->queue_lock); in blkg_destroy()
542 if (hlist_unhashed(&blkg->blkcg_node)) in blkg_destroy()
548 if (blkg->pd[i] && blkg->pd[i]->online) { in blkg_destroy()
549 blkg->pd[i]->online = false; in blkg_destroy()
551 pol->pd_offline_fn(blkg->pd[i]); in blkg_destroy()
555 blkg->online = false; in blkg_destroy()
557 radix_tree_delete(&blkcg->blkg_tree, blkg->q->id); in blkg_destroy()
558 hlist_del_init_rcu(&blkg->blkcg_node); in blkg_destroy()
565 if (rcu_access_pointer(blkcg->blkg_hint) == blkg) in blkg_destroy()
572 percpu_ref_kill(&blkg->refcnt); in blkg_destroy()
578 struct blkcg_gq *blkg, *n; in blkg_destroy_all() local
584 list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) { in blkg_destroy_all()
585 struct blkcg *blkcg = blkg->blkcg; in blkg_destroy_all()
587 if (hlist_unhashed(&blkg->blkcg_node)) in blkg_destroy_all()
591 blkg_destroy(blkg); in blkg_destroy_all()
626 struct blkcg_gq *blkg; in blkcg_reset_stats() local
637 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) { in blkcg_reset_stats()
640 per_cpu_ptr(blkg->iostat_cpu, cpu); in blkcg_reset_stats()
645 bis->blkg = blkg; in blkcg_reset_stats()
647 memset(&blkg->iostat, 0, sizeof(blkg->iostat)); in blkcg_reset_stats()
648 u64_stats_init(&blkg->iostat.sync); in blkcg_reset_stats()
653 if (blkg->pd[i] && pol->pd_reset_stats_fn) in blkcg_reset_stats()
654 pol->pd_reset_stats_fn(blkg->pd[i]); in blkcg_reset_stats()
663 const char *blkg_dev_name(struct blkcg_gq *blkg) in blkg_dev_name() argument
665 if (!blkg->q->disk) in blkg_dev_name()
667 return bdi_dev_name(blkg->q->disk->bdi); in blkg_dev_name()
694 struct blkcg_gq *blkg; in blkcg_print_blkgs() local
698 hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) { in blkcg_print_blkgs()
699 spin_lock_irq(&blkg->q->queue_lock); in blkcg_print_blkgs()
700 if (blkcg_policy_enabled(blkg->q, pol)) in blkcg_print_blkgs()
701 total += prfill(sf, blkg->pd[pol->plid], data); in blkcg_print_blkgs()
702 spin_unlock_irq(&blkg->q->queue_lock); in blkcg_print_blkgs()
721 const char *dname = blkg_dev_name(pd->blkg); in __blkg_prfill_u64()
818 struct blkcg_gq *blkg; in blkg_conf_prep() local
843 blkg = blkg_lookup(blkcg, q); in blkg_conf_prep()
844 if (blkg) in blkg_conf_prep()
885 blkg = blkg_lookup(pos, q); in blkg_conf_prep()
886 if (blkg) { in blkg_conf_prep()
889 blkg = blkg_create(pos, disk, new_blkg); in blkg_conf_prep()
890 if (IS_ERR(blkg)) { in blkg_conf_prep()
891 ret = PTR_ERR(blkg); in blkg_conf_prep()
903 ctx->blkg = blkg; in blkg_conf_prep()
938 if (ctx->blkg) { in blkg_conf_exit()
940 ctx->blkg = NULL; in blkg_conf_exit()
982 static void blkcg_iostat_update(struct blkcg_gq *blkg, struct blkg_iostat *cur, in blkcg_iostat_update() argument
989 flags = u64_stats_update_begin_irqsave(&blkg->iostat.sync); in blkcg_iostat_update()
992 blkg_iostat_add(&blkg->iostat.cur, &delta); in blkcg_iostat_update()
994 u64_stats_update_end_irqrestore(&blkg->iostat.sync, flags); in blkcg_iostat_update()
1022 struct blkcg_gq *blkg = bisc->blkg; in __blkcg_rstat_flush() local
1023 struct blkcg_gq *parent = blkg->parent; in __blkcg_rstat_flush()
1035 blkcg_iostat_update(blkg, &cur, &bisc->last); in __blkcg_rstat_flush()
1039 blkcg_iostat_update(parent, &blkg->iostat.cur, in __blkcg_rstat_flush()
1040 &blkg->iostat.last); in __blkcg_rstat_flush()
1074 struct blkcg_gq *blkg = bdev->bd_disk->queue->root_blkg; in blkcg_fill_root_iostats() local
1099 flags = u64_stats_update_begin_irqsave(&blkg->iostat.sync); in blkcg_fill_root_iostats()
1100 blkg_iostat_set(&blkg->iostat.cur, &tmp); in blkcg_fill_root_iostats()
1101 u64_stats_update_end_irqrestore(&blkg->iostat.sync, flags); in blkcg_fill_root_iostats()
1105 static void blkcg_print_one_stat(struct blkcg_gq *blkg, struct seq_file *s) in blkcg_print_one_stat() argument
1107 struct blkg_iostat_set *bis = &blkg->iostat; in blkcg_print_one_stat()
1113 if (!blkg->online) in blkcg_print_one_stat()
1116 dname = blkg_dev_name(blkg); in blkcg_print_one_stat()
1139 if (blkcg_debug_stats && atomic_read(&blkg->use_delay)) { in blkcg_print_one_stat()
1141 atomic_read(&blkg->use_delay), in blkcg_print_one_stat()
1142 atomic64_read(&blkg->delay_nsec)); in blkcg_print_one_stat()
1148 if (!blkg->pd[i] || !pol->pd_stat_fn) in blkcg_print_one_stat()
1151 pol->pd_stat_fn(blkg->pd[i], s); in blkcg_print_one_stat()
1160 struct blkcg_gq *blkg; in blkcg_print_stat() local
1168 hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) { in blkcg_print_stat()
1169 spin_lock_irq(&blkg->q->queue_lock); in blkcg_print_stat()
1170 blkcg_print_one_stat(blkg, sf); in blkcg_print_stat()
1171 spin_unlock_irq(&blkg->q->queue_lock); in blkcg_print_stat()
1239 struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first, in blkcg_destroy_blkgs() local
1241 struct request_queue *q = blkg->q; in blkcg_destroy_blkgs()
1255 blkg_destroy(blkg); in blkcg_destroy_blkgs()
1415 struct blkcg_gq *new_blkg, *blkg; in blkcg_init_disk() local
1431 blkg = blkg_create(&blkcg_root, disk, new_blkg); in blkcg_init_disk()
1432 if (IS_ERR(blkg)) in blkcg_init_disk()
1434 q->root_blkg = blkg; in blkcg_init_disk()
1459 return PTR_ERR(blkg); in blkcg_init_disk()
1516 struct blkcg_gq *blkg, *pinned_blkg = NULL; in blkcg_activate_policy() local
1528 list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) { in blkcg_activate_policy()
1531 if (blkg->pd[pol->plid]) in blkcg_activate_policy()
1535 if (blkg == pinned_blkg) { in blkcg_activate_policy()
1539 pd = pol->pd_alloc_fn(disk, blkg->blkcg, in blkcg_activate_policy()
1550 blkg_get(blkg); in blkcg_activate_policy()
1551 pinned_blkg = blkg; in blkcg_activate_policy()
1557 pd_prealloc = pol->pd_alloc_fn(disk, blkg->blkcg, in blkcg_activate_policy()
1565 spin_lock(&blkg->blkcg->lock); in blkcg_activate_policy()
1567 pd->blkg = blkg; in blkcg_activate_policy()
1569 blkg->pd[pol->plid] = pd; in blkcg_activate_policy()
1578 spin_unlock(&blkg->blkcg->lock); in blkcg_activate_policy()
1597 list_for_each_entry(blkg, &q->blkg_list, q_node) { in blkcg_activate_policy()
1598 struct blkcg *blkcg = blkg->blkcg; in blkcg_activate_policy()
1602 pd = blkg->pd[pol->plid]; in blkcg_activate_policy()
1608 blkg->pd[pol->plid] = NULL; in blkcg_activate_policy()
1630 struct blkcg_gq *blkg; in blkcg_deactivate_policy() local
1643 list_for_each_entry(blkg, &q->blkg_list, q_node) { in blkcg_deactivate_policy()
1644 struct blkcg *blkcg = blkg->blkcg; in blkcg_deactivate_policy()
1647 if (blkg->pd[pol->plid]) { in blkcg_deactivate_policy()
1648 if (blkg->pd[pol->plid]->online && pol->pd_offline_fn) in blkcg_deactivate_policy()
1649 pol->pd_offline_fn(blkg->pd[pol->plid]); in blkcg_deactivate_policy()
1650 pol->pd_free_fn(blkg->pd[pol->plid]); in blkcg_deactivate_policy()
1651 blkg->pd[pol->plid] = NULL; in blkcg_deactivate_policy()
1788 static void blkcg_scale_delay(struct blkcg_gq *blkg, u64 now) in blkcg_scale_delay() argument
1790 u64 old = atomic64_read(&blkg->delay_start); in blkcg_scale_delay()
1793 if (atomic_read(&blkg->use_delay) < 0) in blkcg_scale_delay()
1810 atomic64_try_cmpxchg(&blkg->delay_start, &old, now)) { in blkcg_scale_delay()
1811 u64 cur = atomic64_read(&blkg->delay_nsec); in blkcg_scale_delay()
1812 u64 sub = min_t(u64, blkg->last_delay, now - old); in blkcg_scale_delay()
1813 int cur_use = atomic_read(&blkg->use_delay); in blkcg_scale_delay()
1819 if (cur_use < blkg->last_use) in blkcg_scale_delay()
1820 sub = max_t(u64, sub, blkg->last_delay >> 1); in blkcg_scale_delay()
1829 atomic64_set(&blkg->delay_nsec, 0); in blkcg_scale_delay()
1830 blkg->last_delay = 0; in blkcg_scale_delay()
1832 atomic64_sub(sub, &blkg->delay_nsec); in blkcg_scale_delay()
1833 blkg->last_delay = cur - sub; in blkcg_scale_delay()
1835 blkg->last_use = cur_use; in blkcg_scale_delay()
1845 static void blkcg_maybe_throttle_blkg(struct blkcg_gq *blkg, bool use_memdelay) in blkcg_maybe_throttle_blkg() argument
1854 while (blkg->parent) { in blkcg_maybe_throttle_blkg()
1855 int use_delay = atomic_read(&blkg->use_delay); in blkcg_maybe_throttle_blkg()
1860 blkcg_scale_delay(blkg, now); in blkcg_maybe_throttle_blkg()
1861 this_delay = atomic64_read(&blkg->delay_nsec); in blkcg_maybe_throttle_blkg()
1867 blkg = blkg->parent; in blkcg_maybe_throttle_blkg()
1915 struct blkcg_gq *blkg; in blkcg_maybe_throttle_current() local
1928 blkg = blkg_lookup(blkcg, disk->queue); in blkcg_maybe_throttle_current()
1929 if (!blkg) in blkcg_maybe_throttle_current()
1931 if (!blkg_tryget(blkg)) in blkcg_maybe_throttle_current()
1935 blkcg_maybe_throttle_blkg(blkg, use_memdelay); in blkcg_maybe_throttle_current()
1936 blkg_put(blkg); in blkcg_maybe_throttle_current()
1989 void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta) in blkcg_add_delay() argument
1991 if (WARN_ON_ONCE(atomic_read(&blkg->use_delay) < 0)) in blkcg_add_delay()
1993 blkcg_scale_delay(blkg, now); in blkcg_add_delay()
1994 atomic64_add(delta, &blkg->delay_nsec); in blkcg_add_delay()
2009 struct blkcg_gq *blkg, *ret_blkg = NULL; in blkg_tryget_closest() local
2012 blkg = blkg_lookup_create(css_to_blkcg(css), bio->bi_bdev->bd_disk); in blkg_tryget_closest()
2013 while (blkg) { in blkg_tryget_closest()
2014 if (blkg_tryget(blkg)) { in blkg_tryget_closest()
2015 ret_blkg = blkg; in blkg_tryget_closest()
2018 blkg = blkg->parent; in blkg_tryget_closest()