Lines Matching refs:blkg

87 	struct blkcg_gq *blkg = container_of(work, struct blkcg_gq,  in blkg_free_workfn()  local
92 if (blkg->pd[i]) in blkg_free_workfn()
93 blkcg_policy[i]->pd_free_fn(blkg->pd[i]); in blkg_free_workfn()
95 if (blkg->q) in blkg_free_workfn()
96 blk_put_queue(blkg->q); in blkg_free_workfn()
97 free_percpu(blkg->iostat_cpu); in blkg_free_workfn()
98 percpu_ref_exit(&blkg->refcnt); in blkg_free_workfn()
99 kfree(blkg); in blkg_free_workfn()
108 static void blkg_free(struct blkcg_gq *blkg) in blkg_free() argument
110 if (!blkg) in blkg_free()
117 INIT_WORK(&blkg->free_work, blkg_free_workfn); in blkg_free()
118 schedule_work(&blkg->free_work); in blkg_free()
123 struct blkcg_gq *blkg = container_of(rcu, struct blkcg_gq, rcu_head); in __blkg_release() local
125 WARN_ON(!bio_list_empty(&blkg->async_bios)); in __blkg_release()
128 css_put(&blkg->blkcg->css); in __blkg_release()
129 if (blkg->parent) in __blkg_release()
130 blkg_put(blkg->parent); in __blkg_release()
131 blkg_free(blkg); in __blkg_release()
144 struct blkcg_gq *blkg = container_of(ref, struct blkcg_gq, refcnt); in blkg_release() local
146 call_rcu(&blkg->rcu_head, __blkg_release); in blkg_release()
151 struct blkcg_gq *blkg = container_of(work, struct blkcg_gq, in blkg_async_bio_workfn() local
159 spin_lock_bh(&blkg->async_bio_lock); in blkg_async_bio_workfn()
160 bio_list_merge(&bios, &blkg->async_bios); in blkg_async_bio_workfn()
161 bio_list_init(&blkg->async_bios); in blkg_async_bio_workfn()
162 spin_unlock_bh(&blkg->async_bio_lock); in blkg_async_bio_workfn()
213 struct blkcg_gq *blkg; in blkg_alloc() local
217 blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node); in blkg_alloc()
218 if (!blkg) in blkg_alloc()
221 if (percpu_ref_init(&blkg->refcnt, blkg_release, 0, gfp_mask)) in blkg_alloc()
224 blkg->iostat_cpu = alloc_percpu_gfp(struct blkg_iostat_set, gfp_mask); in blkg_alloc()
225 if (!blkg->iostat_cpu) in blkg_alloc()
231 blkg->q = q; in blkg_alloc()
232 INIT_LIST_HEAD(&blkg->q_node); in blkg_alloc()
233 spin_lock_init(&blkg->async_bio_lock); in blkg_alloc()
234 bio_list_init(&blkg->async_bios); in blkg_alloc()
235 INIT_WORK(&blkg->async_bio_work, blkg_async_bio_workfn); in blkg_alloc()
236 blkg->blkcg = blkcg; in blkg_alloc()
238 u64_stats_init(&blkg->iostat.sync); in blkg_alloc()
240 u64_stats_init(&per_cpu_ptr(blkg->iostat_cpu, cpu)->sync); in blkg_alloc()
254 blkg->pd[i] = pd; in blkg_alloc()
255 pd->blkg = blkg; in blkg_alloc()
259 return blkg; in blkg_alloc()
262 blkg_free(blkg); in blkg_alloc()
269 struct blkcg_gq *blkg; in blkg_lookup_slowpath() local
277 blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id); in blkg_lookup_slowpath()
278 if (blkg && blkg->q == q) { in blkg_lookup_slowpath()
281 rcu_assign_pointer(blkcg->blkg_hint, blkg); in blkg_lookup_slowpath()
283 return blkg; in blkg_lookup_slowpath()
298 struct blkcg_gq *blkg; in blkg_create() local
323 blkg = new_blkg; in blkg_create()
327 blkg->parent = __blkg_lookup(blkcg_parent(blkcg), q, false); in blkg_create()
328 if (WARN_ON_ONCE(!blkg->parent)) { in blkg_create()
332 blkg_get(blkg->parent); in blkg_create()
339 if (blkg->pd[i] && pol->pd_init_fn) in blkg_create()
340 pol->pd_init_fn(blkg->pd[i]); in blkg_create()
345 ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg); in blkg_create()
347 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list); in blkg_create()
348 list_add(&blkg->q_node, &q->blkg_list); in blkg_create()
353 if (blkg->pd[i] && pol->pd_online_fn) in blkg_create()
354 pol->pd_online_fn(blkg->pd[i]); in blkg_create()
357 blkg->online = true; in blkg_create()
361 return blkg; in blkg_create()
364 blkg_put(blkg); in blkg_create()
390 struct blkcg_gq *blkg; in blkg_lookup_create() local
395 blkg = blkg_lookup(blkcg, q); in blkg_lookup_create()
396 if (blkg) in blkg_lookup_create()
397 return blkg; in blkg_lookup_create()
400 blkg = __blkg_lookup(blkcg, q, true); in blkg_lookup_create()
401 if (blkg) in blkg_lookup_create()
415 blkg = __blkg_lookup(parent, q, false); in blkg_lookup_create()
416 if (blkg) { in blkg_lookup_create()
418 ret_blkg = blkg; in blkg_lookup_create()
425 blkg = blkg_create(pos, q, NULL); in blkg_lookup_create()
426 if (IS_ERR(blkg)) { in blkg_lookup_create()
427 blkg = ret_blkg; in blkg_lookup_create()
436 return blkg; in blkg_lookup_create()
439 static void blkg_destroy(struct blkcg_gq *blkg) in blkg_destroy() argument
441 struct blkcg *blkcg = blkg->blkcg; in blkg_destroy()
444 lockdep_assert_held(&blkg->q->queue_lock); in blkg_destroy()
448 WARN_ON_ONCE(list_empty(&blkg->q_node)); in blkg_destroy()
449 WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node)); in blkg_destroy()
454 if (blkg->pd[i] && pol->pd_offline_fn) in blkg_destroy()
455 pol->pd_offline_fn(blkg->pd[i]); in blkg_destroy()
458 blkg->online = false; in blkg_destroy()
460 radix_tree_delete(&blkcg->blkg_tree, blkg->q->id); in blkg_destroy()
461 list_del_init(&blkg->q_node); in blkg_destroy()
462 hlist_del_init_rcu(&blkg->blkcg_node); in blkg_destroy()
469 if (rcu_access_pointer(blkcg->blkg_hint) == blkg) in blkg_destroy()
476 percpu_ref_kill(&blkg->refcnt); in blkg_destroy()
487 struct blkcg_gq *blkg, *n; in blkg_destroy_all() local
492 list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) { in blkg_destroy_all()
493 struct blkcg *blkcg = blkg->blkcg; in blkg_destroy_all()
496 blkg_destroy(blkg); in blkg_destroy_all()
519 struct blkcg_gq *blkg; in blkcg_reset_stats() local
530 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) { in blkcg_reset_stats()
533 per_cpu_ptr(blkg->iostat_cpu, cpu); in blkcg_reset_stats()
536 memset(&blkg->iostat, 0, sizeof(blkg->iostat)); in blkcg_reset_stats()
541 if (blkg->pd[i] && pol->pd_reset_stats_fn) in blkcg_reset_stats()
542 pol->pd_reset_stats_fn(blkg->pd[i]); in blkcg_reset_stats()
551 const char *blkg_dev_name(struct blkcg_gq *blkg) in blkg_dev_name() argument
553 if (!blkg->q->disk || !blkg->q->disk->bdi->dev) in blkg_dev_name()
555 return bdi_dev_name(blkg->q->disk->bdi); in blkg_dev_name()
582 struct blkcg_gq *blkg; in blkcg_print_blkgs() local
586 hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) { in blkcg_print_blkgs()
587 spin_lock_irq(&blkg->q->queue_lock); in blkcg_print_blkgs()
588 if (blkcg_policy_enabled(blkg->q, pol)) in blkcg_print_blkgs()
589 total += prfill(sf, blkg->pd[pol->plid], data); in blkcg_print_blkgs()
590 spin_unlock_irq(&blkg->q->queue_lock); in blkcg_print_blkgs()
609 const char *dname = blkg_dev_name(pd->blkg); in __blkg_prfill_u64()
688 struct blkcg_gq *blkg; in blkg_conf_prep() local
708 blkg = blkg_lookup_check(blkcg, pol, q); in blkg_conf_prep()
709 if (IS_ERR(blkg)) { in blkg_conf_prep()
710 ret = PTR_ERR(blkg); in blkg_conf_prep()
714 if (blkg) in blkg_conf_prep()
751 blkg = blkg_lookup_check(pos, pol, q); in blkg_conf_prep()
752 if (IS_ERR(blkg)) { in blkg_conf_prep()
753 ret = PTR_ERR(blkg); in blkg_conf_prep()
758 if (blkg) { in blkg_conf_prep()
761 blkg = blkg_create(pos, q, new_blkg); in blkg_conf_prep()
762 if (IS_ERR(blkg)) { in blkg_conf_prep()
763 ret = PTR_ERR(blkg); in blkg_conf_prep()
776 ctx->blkg = blkg; in blkg_conf_prep()
852 struct blkcg_gq *blkg; in blkcg_rstat_flush() local
860 hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) { in blkcg_rstat_flush()
861 struct blkcg_gq *parent = blkg->parent; in blkcg_rstat_flush()
862 struct blkg_iostat_set *bisc = per_cpu_ptr(blkg->iostat_cpu, cpu); in blkcg_rstat_flush()
874 flags = u64_stats_update_begin_irqsave(&blkg->iostat.sync); in blkcg_rstat_flush()
877 blkg_iostat_add(&blkg->iostat.cur, &delta); in blkcg_rstat_flush()
879 u64_stats_update_end_irqrestore(&blkg->iostat.sync, flags); in blkcg_rstat_flush()
884 blkg_iostat_set(&delta, &blkg->iostat.cur); in blkcg_rstat_flush()
885 blkg_iostat_sub(&delta, &blkg->iostat.last); in blkcg_rstat_flush()
887 blkg_iostat_add(&blkg->iostat.last, &delta); in blkcg_rstat_flush()
915 struct blkcg_gq *blkg = in blkcg_fill_root_iostats() local
941 flags = u64_stats_update_begin_irqsave(&blkg->iostat.sync); in blkcg_fill_root_iostats()
942 blkg_iostat_set(&blkg->iostat.cur, &tmp); in blkcg_fill_root_iostats()
943 u64_stats_update_end_irqrestore(&blkg->iostat.sync, flags); in blkcg_fill_root_iostats()
947 static void blkcg_print_one_stat(struct blkcg_gq *blkg, struct seq_file *s) in blkcg_print_one_stat() argument
949 struct blkg_iostat_set *bis = &blkg->iostat; in blkcg_print_one_stat()
955 if (!blkg->online) in blkcg_print_one_stat()
958 dname = blkg_dev_name(blkg); in blkcg_print_one_stat()
981 if (blkcg_debug_stats && atomic_read(&blkg->use_delay)) { in blkcg_print_one_stat()
983 atomic_read(&blkg->use_delay), in blkcg_print_one_stat()
984 atomic64_read(&blkg->delay_nsec)); in blkcg_print_one_stat()
990 if (!blkg->pd[i] || !pol->pd_stat_fn) in blkcg_print_one_stat()
993 pol->pd_stat_fn(blkg->pd[i], s); in blkcg_print_one_stat()
1002 struct blkcg_gq *blkg; in blkcg_print_stat() local
1010 hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) { in blkcg_print_stat()
1011 spin_lock_irq(&blkg->q->queue_lock); in blkcg_print_stat()
1012 blkcg_print_one_stat(blkg, sf); in blkcg_print_stat()
1013 spin_unlock_irq(&blkg->q->queue_lock); in blkcg_print_stat()
1081 struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first, in blkcg_destroy_blkgs() local
1083 struct request_queue *q = blkg->q; in blkcg_destroy_blkgs()
1097 blkg_destroy(blkg); in blkcg_destroy_blkgs()
1267 struct blkcg_gq *new_blkg, *blkg; in blkcg_init_queue() local
1282 blkg = blkg_create(&blkcg_root, q, new_blkg); in blkcg_init_queue()
1283 if (IS_ERR(blkg)) in blkcg_init_queue()
1285 q->root_blkg = blkg; in blkcg_init_queue()
1314 return PTR_ERR(blkg); in blkcg_init_queue()
1398 struct blkcg_gq *blkg, *pinned_blkg = NULL; in blkcg_activate_policy() local
1410 list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) { in blkcg_activate_policy()
1413 if (blkg->pd[pol->plid]) in blkcg_activate_policy()
1417 if (blkg == pinned_blkg) { in blkcg_activate_policy()
1422 blkg->blkcg); in blkcg_activate_policy()
1432 blkg_get(blkg); in blkcg_activate_policy()
1433 pinned_blkg = blkg; in blkcg_activate_policy()
1440 blkg->blkcg); in blkcg_activate_policy()
1447 blkg->pd[pol->plid] = pd; in blkcg_activate_policy()
1448 pd->blkg = blkg; in blkcg_activate_policy()
1454 list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) in blkcg_activate_policy()
1455 pol->pd_init_fn(blkg->pd[pol->plid]); in blkcg_activate_policy()
1473 list_for_each_entry(blkg, &q->blkg_list, q_node) { in blkcg_activate_policy()
1474 struct blkcg *blkcg = blkg->blkcg; in blkcg_activate_policy()
1477 if (blkg->pd[pol->plid]) { in blkcg_activate_policy()
1478 pol->pd_free_fn(blkg->pd[pol->plid]); in blkcg_activate_policy()
1479 blkg->pd[pol->plid] = NULL; in blkcg_activate_policy()
1500 struct blkcg_gq *blkg; in blkcg_deactivate_policy() local
1512 list_for_each_entry(blkg, &q->blkg_list, q_node) { in blkcg_deactivate_policy()
1513 struct blkcg *blkcg = blkg->blkcg; in blkcg_deactivate_policy()
1516 if (blkg->pd[pol->plid]) { in blkcg_deactivate_policy()
1518 pol->pd_offline_fn(blkg->pd[pol->plid]); in blkcg_deactivate_policy()
1519 pol->pd_free_fn(blkg->pd[pol->plid]); in blkcg_deactivate_policy()
1520 blkg->pd[pol->plid] = NULL; in blkcg_deactivate_policy()
1654 struct blkcg_gq *blkg = bio->bi_blkg; in __blkcg_punt_bio_submit() local
1660 if (!blkg->parent) in __blkcg_punt_bio_submit()
1663 spin_lock_bh(&blkg->async_bio_lock); in __blkcg_punt_bio_submit()
1664 bio_list_add(&blkg->async_bios, bio); in __blkcg_punt_bio_submit()
1665 spin_unlock_bh(&blkg->async_bio_lock); in __blkcg_punt_bio_submit()
1667 queue_work(blkcg_punt_bio_wq, &blkg->async_bio_work); in __blkcg_punt_bio_submit()
1677 static void blkcg_scale_delay(struct blkcg_gq *blkg, u64 now) in blkcg_scale_delay() argument
1679 u64 old = atomic64_read(&blkg->delay_start); in blkcg_scale_delay()
1682 if (atomic_read(&blkg->use_delay) < 0) in blkcg_scale_delay()
1699 atomic64_cmpxchg(&blkg->delay_start, old, now) == old) { in blkcg_scale_delay()
1700 u64 cur = atomic64_read(&blkg->delay_nsec); in blkcg_scale_delay()
1701 u64 sub = min_t(u64, blkg->last_delay, now - old); in blkcg_scale_delay()
1702 int cur_use = atomic_read(&blkg->use_delay); in blkcg_scale_delay()
1708 if (cur_use < blkg->last_use) in blkcg_scale_delay()
1709 sub = max_t(u64, sub, blkg->last_delay >> 1); in blkcg_scale_delay()
1718 atomic64_set(&blkg->delay_nsec, 0); in blkcg_scale_delay()
1719 blkg->last_delay = 0; in blkcg_scale_delay()
1721 atomic64_sub(sub, &blkg->delay_nsec); in blkcg_scale_delay()
1722 blkg->last_delay = cur - sub; in blkcg_scale_delay()
1724 blkg->last_use = cur_use; in blkcg_scale_delay()
1734 static void blkcg_maybe_throttle_blkg(struct blkcg_gq *blkg, bool use_memdelay) in blkcg_maybe_throttle_blkg() argument
1743 while (blkg->parent) { in blkcg_maybe_throttle_blkg()
1744 int use_delay = atomic_read(&blkg->use_delay); in blkcg_maybe_throttle_blkg()
1749 blkcg_scale_delay(blkg, now); in blkcg_maybe_throttle_blkg()
1750 this_delay = atomic64_read(&blkg->delay_nsec); in blkcg_maybe_throttle_blkg()
1756 blkg = blkg->parent; in blkcg_maybe_throttle_blkg()
1804 struct blkcg_gq *blkg; in blkcg_maybe_throttle_current() local
1817 blkg = blkg_lookup(blkcg, q); in blkcg_maybe_throttle_current()
1818 if (!blkg) in blkcg_maybe_throttle_current()
1820 if (!blkg_tryget(blkg)) in blkcg_maybe_throttle_current()
1824 blkcg_maybe_throttle_blkg(blkg, use_memdelay); in blkcg_maybe_throttle_current()
1825 blkg_put(blkg); in blkcg_maybe_throttle_current()
1878 void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta) in blkcg_add_delay() argument
1880 if (WARN_ON_ONCE(atomic_read(&blkg->use_delay) < 0)) in blkcg_add_delay()
1882 blkcg_scale_delay(blkg, now); in blkcg_add_delay()
1883 atomic64_add(delta, &blkg->delay_nsec); in blkcg_add_delay()
1898 struct blkcg_gq *blkg, *ret_blkg = NULL; in blkg_tryget_closest() local
1901 blkg = blkg_lookup_create(css_to_blkcg(css), in blkg_tryget_closest()
1903 while (blkg) { in blkg_tryget_closest()
1904 if (blkg_tryget(blkg)) { in blkg_tryget_closest()
1905 ret_blkg = blkg; in blkg_tryget_closest()
1908 blkg = blkg->parent; in blkg_tryget_closest()