Lines Matching refs:fs_info
27 static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags) in get_restripe_target() argument
29 struct btrfs_balance_control *bctl = fs_info->balance_ctl; in get_restripe_target()
56 static u64 btrfs_reduce_alloc_profile(struct btrfs_fs_info *fs_info, u64 flags) in btrfs_reduce_alloc_profile() argument
58 u64 num_devices = fs_info->fs_devices->rw_devices; in btrfs_reduce_alloc_profile()
67 spin_lock(&fs_info->balance_lock); in btrfs_reduce_alloc_profile()
68 target = get_restripe_target(fs_info, flags); in btrfs_reduce_alloc_profile()
70 spin_unlock(&fs_info->balance_lock); in btrfs_reduce_alloc_profile()
73 spin_unlock(&fs_info->balance_lock); in btrfs_reduce_alloc_profile()
98 u64 btrfs_get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags) in btrfs_get_alloc_profile() argument
105 seq = read_seqbegin(&fs_info->profiles_lock); in btrfs_get_alloc_profile()
108 flags |= fs_info->avail_data_alloc_bits; in btrfs_get_alloc_profile()
110 flags |= fs_info->avail_system_alloc_bits; in btrfs_get_alloc_profile()
112 flags |= fs_info->avail_metadata_alloc_bits; in btrfs_get_alloc_profile()
113 } while (read_seqretry(&fs_info->profiles_lock, seq)); in btrfs_get_alloc_profile()
115 return btrfs_reduce_alloc_profile(fs_info, flags); in btrfs_get_alloc_profile()
135 !BTRFS_FS_LOG_CLEANUP_ERROR(cache->fs_info)) in btrfs_put_block_group()
144 btrfs_discard_cancel_work(&cache->fs_info->discard_ctl, in btrfs_put_block_group()
263 struct btrfs_fs_info *fs_info = cache->fs_info; in btrfs_next_block_group() local
266 read_lock(&fs_info->block_group_cache_lock); in btrfs_next_block_group()
272 read_unlock(&fs_info->block_group_cache_lock); in btrfs_next_block_group()
274 return btrfs_lookup_first_block_group(fs_info, next_bytenr); in btrfs_next_block_group()
283 read_unlock(&fs_info->block_group_cache_lock); in btrfs_next_block_group()
302 struct btrfs_block_group *btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, in btrfs_inc_nocow_writers() argument
308 bg = btrfs_lookup_block_group(fs_info, bytenr); in btrfs_inc_nocow_writers()
355 void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info, in btrfs_dec_block_group_reservations() argument
360 bg = btrfs_lookup_block_group(fs_info, start); in btrfs_dec_block_group_reservations()
466 struct btrfs_fs_info *fs_info = block_group->fs_info; in fragment_free_space() local
470 fs_info->nodesize : fs_info->sectorsize; in fragment_free_space()
492 struct btrfs_fs_info *info = block_group->fs_info; in add_new_free_space()
532 struct btrfs_fs_info *fs_info = block_group->fs_info; in load_extent_tree_free() local
548 extent_root = btrfs_extent_root(fs_info, last); in load_extent_tree_free()
582 if (btrfs_fs_closing(fs_info) > 1) { in load_extent_tree_free()
595 rwsem_is_contended(&fs_info->commit_root_sem)) { in load_extent_tree_free()
597 up_read(&fs_info->commit_root_sem); in load_extent_tree_free()
601 down_read(&fs_info->commit_root_sem); in load_extent_tree_free()
637 fs_info->nodesize; in load_extent_tree_free()
662 struct btrfs_fs_info *fs_info; in caching_thread() local
668 fs_info = block_group->fs_info; in caching_thread()
671 down_read(&fs_info->commit_root_sem); in caching_thread()
673 if (btrfs_test_opt(fs_info, SPACE_CACHE)) { in caching_thread()
697 if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) && in caching_thread()
698 !(test_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags))) in caching_thread()
722 up_read(&fs_info->commit_root_sem); in caching_thread()
734 struct btrfs_fs_info *fs_info = cache->fs_info; in btrfs_cache_block_group() local
739 if (btrfs_is_zoned(fs_info)) in btrfs_cache_block_group()
768 write_lock(&fs_info->block_group_cache_lock); in btrfs_cache_block_group()
770 list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups); in btrfs_cache_block_group()
771 write_unlock(&fs_info->block_group_cache_lock); in btrfs_cache_block_group()
775 btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work); in btrfs_cache_block_group()
785 static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags) in clear_avail_alloc_bits() argument
790 write_seqlock(&fs_info->profiles_lock); in clear_avail_alloc_bits()
792 fs_info->avail_data_alloc_bits &= ~extra_flags; in clear_avail_alloc_bits()
794 fs_info->avail_metadata_alloc_bits &= ~extra_flags; in clear_avail_alloc_bits()
796 fs_info->avail_system_alloc_bits &= ~extra_flags; in clear_avail_alloc_bits()
797 write_sequnlock(&fs_info->profiles_lock); in clear_avail_alloc_bits()
808 static void clear_incompat_bg_bits(struct btrfs_fs_info *fs_info, u64 flags) in clear_incompat_bg_bits() argument
816 struct list_head *head = &fs_info->space_info; in clear_incompat_bg_bits()
832 btrfs_clear_fs_incompat(fs_info, RAID56); in clear_incompat_bg_bits()
834 btrfs_clear_fs_incompat(fs_info, RAID1C34); in clear_incompat_bg_bits()
842 struct btrfs_fs_info *fs_info = trans->fs_info; in remove_block_group_item() local
847 root = btrfs_block_group_root(fs_info); in remove_block_group_item()
865 struct btrfs_fs_info *fs_info = trans->fs_info; in btrfs_remove_block_group() local
878 block_group = btrfs_lookup_block_group(fs_info, group_start); in btrfs_remove_block_group()
888 btrfs_free_ref_tree_range(fs_info, block_group->start, in btrfs_remove_block_group()
895 cluster = &fs_info->data_alloc_cluster; in btrfs_remove_block_group()
904 cluster = &fs_info->meta_alloc_cluster; in btrfs_remove_block_group()
953 write_lock(&fs_info->block_group_cache_lock); in btrfs_remove_block_group()
955 &fs_info->block_group_cache_tree); in btrfs_remove_block_group()
961 write_unlock(&fs_info->block_group_cache_lock); in btrfs_remove_block_group()
972 clear_avail_alloc_bits(fs_info, block_group->flags); in btrfs_remove_block_group()
975 clear_incompat_bg_bits(fs_info, block_group->flags); in btrfs_remove_block_group()
984 write_lock(&fs_info->block_group_cache_lock); in btrfs_remove_block_group()
989 list_for_each_entry(ctl, &fs_info->caching_block_groups, list) { in btrfs_remove_block_group()
999 write_unlock(&fs_info->block_group_cache_lock); in btrfs_remove_block_group()
1017 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) { in btrfs_remove_block_group()
1096 em_tree = &fs_info->mapping_tree; in btrfs_remove_block_group()
1108 btrfs_delayed_refs_rsv_release(fs_info, 1); in btrfs_remove_block_group()
1114 struct btrfs_fs_info *fs_info, const u64 chunk_offset) in btrfs_start_trans_remove_block_group() argument
1116 struct btrfs_root *root = btrfs_block_group_root(fs_info); in btrfs_start_trans_remove_block_group()
1117 struct extent_map_tree *em_tree = &fs_info->mapping_tree; in btrfs_start_trans_remove_block_group()
1211 if (btrfs_can_overcommit(cache->fs_info, sinfo, num_bytes, in inc_block_group_ro()
1218 if (btrfs_is_zoned(cache->fs_info)) { in inc_block_group_ro()
1230 if (ret == -ENOSPC && btrfs_test_opt(cache->fs_info, ENOSPC_DEBUG)) { in inc_block_group_ro()
1231 btrfs_info(cache->fs_info, in inc_block_group_ro()
1233 btrfs_dump_space_info(cache->fs_info, cache->space_info, 0, 0); in inc_block_group_ro()
1241 struct btrfs_fs_info *fs_info = bg->fs_info; in clean_pinned_extents() local
1247 spin_lock(&fs_info->trans_lock); in clean_pinned_extents()
1248 if (trans->transaction->list.prev != &fs_info->trans_list) { in clean_pinned_extents()
1253 spin_unlock(&fs_info->trans_lock); in clean_pinned_extents()
1265 mutex_lock(&fs_info->unused_bg_unpin_mutex); in clean_pinned_extents()
1276 mutex_unlock(&fs_info->unused_bg_unpin_mutex); in clean_pinned_extents()
1287 void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info) in btrfs_delete_unused_bgs() argument
1292 const bool async_trim_enabled = btrfs_test_opt(fs_info, DISCARD_ASYNC); in btrfs_delete_unused_bgs()
1295 if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags)) in btrfs_delete_unused_bgs()
1298 if (btrfs_fs_closing(fs_info)) in btrfs_delete_unused_bgs()
1305 if (!mutex_trylock(&fs_info->reclaim_bgs_lock)) in btrfs_delete_unused_bgs()
1308 spin_lock(&fs_info->unused_bgs_lock); in btrfs_delete_unused_bgs()
1309 while (!list_empty(&fs_info->unused_bgs)) { in btrfs_delete_unused_bgs()
1312 block_group = list_first_entry(&fs_info->unused_bgs, in btrfs_delete_unused_bgs()
1323 spin_unlock(&fs_info->unused_bgs_lock); in btrfs_delete_unused_bgs()
1325 btrfs_discard_cancel_work(&fs_info->discard_ctl, block_group); in btrfs_delete_unused_bgs()
1335 if (btrfs_test_opt(fs_info, DISCARD_ASYNC) && in btrfs_delete_unused_bgs()
1340 btrfs_discard_queue_work(&fs_info->discard_ctl, in btrfs_delete_unused_bgs()
1382 trans = btrfs_start_trans_remove_block_group(fs_info, in btrfs_delete_unused_bgs()
1406 spin_lock(&fs_info->discard_ctl.lock); in btrfs_delete_unused_bgs()
1408 spin_unlock(&fs_info->discard_ctl.lock); in btrfs_delete_unused_bgs()
1410 btrfs_discard_queue_work(&fs_info->discard_ctl, in btrfs_delete_unused_bgs()
1414 spin_unlock(&fs_info->discard_ctl.lock); in btrfs_delete_unused_bgs()
1420 btrfs_space_info_update_bytes_pinned(fs_info, space_info, in btrfs_delete_unused_bgs()
1435 if (!async_trim_enabled && btrfs_test_opt(fs_info, DISCARD_ASYNC)) in btrfs_delete_unused_bgs()
1442 trimming = btrfs_test_opt(fs_info, DISCARD_SYNC) || in btrfs_delete_unused_bgs()
1443 btrfs_is_zoned(fs_info); in btrfs_delete_unused_bgs()
1467 spin_lock(&fs_info->unused_bgs_lock); in btrfs_delete_unused_bgs()
1475 spin_unlock(&fs_info->unused_bgs_lock); in btrfs_delete_unused_bgs()
1482 spin_lock(&fs_info->unused_bgs_lock); in btrfs_delete_unused_bgs()
1484 spin_unlock(&fs_info->unused_bgs_lock); in btrfs_delete_unused_bgs()
1485 mutex_unlock(&fs_info->reclaim_bgs_lock); in btrfs_delete_unused_bgs()
1490 mutex_unlock(&fs_info->reclaim_bgs_lock); in btrfs_delete_unused_bgs()
1492 btrfs_discard_punt_unused_bgs_list(fs_info); in btrfs_delete_unused_bgs()
1497 struct btrfs_fs_info *fs_info = bg->fs_info; in btrfs_mark_bg_unused() local
1499 spin_lock(&fs_info->unused_bgs_lock); in btrfs_mark_bg_unused()
1503 list_add_tail(&bg->bg_list, &fs_info->unused_bgs); in btrfs_mark_bg_unused()
1505 spin_unlock(&fs_info->unused_bgs_lock); in btrfs_mark_bg_unused()
1523 static inline bool btrfs_should_reclaim(struct btrfs_fs_info *fs_info) in btrfs_should_reclaim() argument
1525 if (btrfs_is_zoned(fs_info)) in btrfs_should_reclaim()
1526 return btrfs_zoned_should_reclaim(fs_info); in btrfs_should_reclaim()
1532 struct btrfs_fs_info *fs_info = in btrfs_reclaim_bgs_work() local
1537 if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags)) in btrfs_reclaim_bgs_work()
1540 if (btrfs_fs_closing(fs_info)) in btrfs_reclaim_bgs_work()
1543 if (!btrfs_should_reclaim(fs_info)) in btrfs_reclaim_bgs_work()
1546 sb_start_write(fs_info->sb); in btrfs_reclaim_bgs_work()
1548 if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE)) { in btrfs_reclaim_bgs_work()
1549 sb_end_write(fs_info->sb); in btrfs_reclaim_bgs_work()
1557 if (!mutex_trylock(&fs_info->reclaim_bgs_lock)) { in btrfs_reclaim_bgs_work()
1558 btrfs_exclop_finish(fs_info); in btrfs_reclaim_bgs_work()
1559 sb_end_write(fs_info->sb); in btrfs_reclaim_bgs_work()
1563 spin_lock(&fs_info->unused_bgs_lock); in btrfs_reclaim_bgs_work()
1569 list_sort(NULL, &fs_info->reclaim_bgs, reclaim_bgs_cmp); in btrfs_reclaim_bgs_work()
1570 while (!list_empty(&fs_info->reclaim_bgs)) { in btrfs_reclaim_bgs_work()
1574 bg = list_first_entry(&fs_info->reclaim_bgs, in btrfs_reclaim_bgs_work()
1580 spin_unlock(&fs_info->unused_bgs_lock); in btrfs_reclaim_bgs_work()
1600 if (btrfs_fs_closing(fs_info)) { in btrfs_reclaim_bgs_work()
1617 btrfs_info(fs_info, in btrfs_reclaim_bgs_work()
1622 ret = btrfs_relocate_chunk(fs_info, bg->start); in btrfs_reclaim_bgs_work()
1625 btrfs_err(fs_info, "error relocating chunk %llu", in btrfs_reclaim_bgs_work()
1631 spin_lock(&fs_info->unused_bgs_lock); in btrfs_reclaim_bgs_work()
1633 spin_unlock(&fs_info->unused_bgs_lock); in btrfs_reclaim_bgs_work()
1634 mutex_unlock(&fs_info->reclaim_bgs_lock); in btrfs_reclaim_bgs_work()
1635 btrfs_exclop_finish(fs_info); in btrfs_reclaim_bgs_work()
1636 sb_end_write(fs_info->sb); in btrfs_reclaim_bgs_work()
1639 void btrfs_reclaim_bgs(struct btrfs_fs_info *fs_info) in btrfs_reclaim_bgs() argument
1641 spin_lock(&fs_info->unused_bgs_lock); in btrfs_reclaim_bgs()
1642 if (!list_empty(&fs_info->reclaim_bgs)) in btrfs_reclaim_bgs()
1643 queue_work(system_unbound_wq, &fs_info->reclaim_bgs_work); in btrfs_reclaim_bgs()
1644 spin_unlock(&fs_info->unused_bgs_lock); in btrfs_reclaim_bgs()
1649 struct btrfs_fs_info *fs_info = bg->fs_info; in btrfs_mark_bg_to_reclaim() local
1651 spin_lock(&fs_info->unused_bgs_lock); in btrfs_mark_bg_to_reclaim()
1655 list_add_tail(&bg->bg_list, &fs_info->reclaim_bgs); in btrfs_mark_bg_to_reclaim()
1657 spin_unlock(&fs_info->unused_bgs_lock); in btrfs_mark_bg_to_reclaim()
1660 static int read_bg_from_eb(struct btrfs_fs_info *fs_info, struct btrfs_key *key, in read_bg_from_eb() argument
1674 em_tree = &fs_info->mapping_tree; in read_bg_from_eb()
1679 btrfs_err(fs_info, in read_bg_from_eb()
1686 btrfs_err(fs_info, in read_bg_from_eb()
1699 btrfs_err(fs_info, in read_bg_from_eb()
1711 static int find_first_block_group(struct btrfs_fs_info *fs_info, in find_first_block_group() argument
1715 struct btrfs_root *root = btrfs_block_group_root(fs_info); in find_first_block_group()
1722 return read_bg_from_eb(fs_info, &found_key, path); in find_first_block_group()
1728 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags) in set_avail_alloc_bits() argument
1733 write_seqlock(&fs_info->profiles_lock); in set_avail_alloc_bits()
1735 fs_info->avail_data_alloc_bits |= extra_flags; in set_avail_alloc_bits()
1737 fs_info->avail_metadata_alloc_bits |= extra_flags; in set_avail_alloc_bits()
1739 fs_info->avail_system_alloc_bits |= extra_flags; in set_avail_alloc_bits()
1740 write_sequnlock(&fs_info->profiles_lock); in set_avail_alloc_bits()
1758 int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start, in btrfs_rmap_block() argument
1771 em = btrfs_get_chunk_map(fs_info, chunk_start, 1); in btrfs_rmap_block()
1841 struct btrfs_fs_info *fs_info = cache->fs_info; in exclude_super_stripes() local
1842 const bool zoned = btrfs_is_zoned(fs_info); in exclude_super_stripes()
1851 ret = btrfs_add_excluded_extent(fs_info, cache->start, in exclude_super_stripes()
1859 ret = btrfs_rmap_block(fs_info, cache->start, NULL, in exclude_super_stripes()
1866 btrfs_err(fs_info, in exclude_super_stripes()
1877 ret = btrfs_add_excluded_extent(fs_info, logical[nr], in exclude_super_stripes()
1891 struct btrfs_fs_info *fs_info, u64 start) in btrfs_create_block_group_cache() argument
1908 cache->fs_info = fs_info; in btrfs_create_block_group_cache()
1909 cache->full_stripe_len = btrfs_full_stripe_len(fs_info, start); in btrfs_create_block_group_cache()
1937 static int check_chunk_block_group_mappings(struct btrfs_fs_info *fs_info) in check_chunk_block_group_mappings() argument
1939 struct extent_map_tree *map_tree = &fs_info->mapping_tree; in check_chunk_block_group_mappings()
1957 bg = btrfs_lookup_block_group(fs_info, em->start); in check_chunk_block_group_mappings()
1959 btrfs_err(fs_info, in check_chunk_block_group_mappings()
1969 btrfs_err(fs_info, in check_chunk_block_group_mappings()
2104 static int fill_dummy_bgs(struct btrfs_fs_info *fs_info) in fill_dummy_bgs() argument
2106 struct extent_map_tree *em_tree = &fs_info->mapping_tree; in fill_dummy_bgs()
2117 bg = btrfs_create_block_group_cache(fs_info, em->start); in fill_dummy_bgs()
2129 ret = btrfs_add_block_group_cache(fs_info, bg); in fill_dummy_bgs()
2146 btrfs_add_bg_to_space_info(fs_info, bg); in fill_dummy_bgs()
2148 set_avail_alloc_bits(fs_info, bg->flags); in fill_dummy_bgs()
2151 btrfs_init_global_block_rsv(fs_info); in fill_dummy_bgs()
2276 struct btrfs_fs_info *fs_info = trans->fs_info; in insert_block_group_item() local
2278 struct btrfs_root *root = btrfs_block_group_root(fs_info); in insert_block_group_item()
2298 struct btrfs_fs_info *fs_info = device->fs_info; in insert_dev_extent() local
2299 struct btrfs_root *root = fs_info->dev_root; in insert_dev_extent()
2342 struct btrfs_fs_info *fs_info = trans->fs_info; in insert_dev_extents() local
2351 em = btrfs_get_chunk_map(fs_info, chunk_offset, chunk_size); in insert_dev_extents()
2367 mutex_lock(&fs_info->fs_devices->device_list_mutex); in insert_dev_extents()
2377 mutex_unlock(&fs_info->fs_devices->device_list_mutex); in insert_dev_extents()
2392 struct btrfs_fs_info *fs_info = trans->fs_info; in btrfs_create_pending_block_groups() local
2412 mutex_lock(&fs_info->chunk_mutex); in btrfs_create_pending_block_groups()
2414 mutex_unlock(&fs_info->chunk_mutex); in btrfs_create_pending_block_groups()
2435 btrfs_delayed_refs_rsv_release(fs_info, 1); in btrfs_create_pending_block_groups()
2445 static u64 calculate_global_root_id(struct btrfs_fs_info *fs_info, u64 offset) in calculate_global_root_id() argument
2450 if (!btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) in calculate_global_root_id()
2454 if (btrfs_super_total_bytes(fs_info->super_copy) <= (SZ_1G * 10ULL)) in calculate_global_root_id()
2458 div64_u64_rem(offset, fs_info->nr_global_roots, &index); in calculate_global_root_id()
2466 struct btrfs_fs_info *fs_info = trans->fs_info; in btrfs_make_block_group() local
2472 cache = btrfs_create_block_group_cache(fs_info, chunk_offset); in btrfs_make_block_group()
2481 cache->global_root_id = calculate_global_root_id(fs_info, cache->start); in btrfs_make_block_group()
2483 if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) in btrfs_make_block_group()
2509 cache->space_info = btrfs_find_space_info(fs_info, cache->flags); in btrfs_make_block_group()
2512 ret = btrfs_add_block_group_cache(fs_info, cache); in btrfs_make_block_group()
2523 trace_btrfs_add_block_group(fs_info, cache, 1); in btrfs_make_block_group()
2524 btrfs_add_bg_to_space_info(fs_info, cache); in btrfs_make_block_group()
2525 btrfs_update_global_block_rsv(fs_info); in btrfs_make_block_group()
2540 set_avail_alloc_bits(fs_info, type); in btrfs_make_block_group()
2556 struct btrfs_fs_info *fs_info = cache->fs_info; in btrfs_inc_block_group_ro() local
2558 struct btrfs_root *root = btrfs_block_group_root(fs_info); in btrfs_inc_block_group_ro()
2569 if (sb_rdonly(fs_info->sb)) { in btrfs_inc_block_group_ro()
2570 mutex_lock(&fs_info->ro_block_group_mutex); in btrfs_inc_block_group_ro()
2572 mutex_unlock(&fs_info->ro_block_group_mutex); in btrfs_inc_block_group_ro()
2588 mutex_lock(&fs_info->ro_block_group_mutex); in btrfs_inc_block_group_ro()
2592 mutex_unlock(&fs_info->ro_block_group_mutex); in btrfs_inc_block_group_ro()
2595 ret = btrfs_wait_for_commit(fs_info, transid); in btrfs_inc_block_group_ro()
2607 alloc_flags = btrfs_get_alloc_profile(fs_info, cache->flags); in btrfs_inc_block_group_ro()
2627 alloc_flags = btrfs_get_alloc_profile(fs_info, cache->space_info->flags); in btrfs_inc_block_group_ro()
2635 ret = btrfs_zoned_activate_one_bg(fs_info, cache->space_info, true); in btrfs_inc_block_group_ro()
2644 alloc_flags = btrfs_get_alloc_profile(fs_info, cache->flags); in btrfs_inc_block_group_ro()
2645 mutex_lock(&fs_info->chunk_mutex); in btrfs_inc_block_group_ro()
2647 mutex_unlock(&fs_info->chunk_mutex); in btrfs_inc_block_group_ro()
2650 mutex_unlock(&fs_info->ro_block_group_mutex); in btrfs_inc_block_group_ro()
2666 if (btrfs_is_zoned(cache->fs_info)) { in btrfs_dec_block_group_ro()
2688 struct btrfs_fs_info *fs_info = trans->fs_info; in update_block_group_item() local
2690 struct btrfs_root *root = btrfs_block_group_root(fs_info); in update_block_group_item()
2725 struct btrfs_fs_info *fs_info = block_group->fs_info; in cache_save_setup() local
2726 struct btrfs_root *root = fs_info->tree_root; in cache_save_setup()
2735 if (!btrfs_test_opt(fs_info, SPACE_CACHE)) in cache_save_setup()
2803 ret = btrfs_check_trunc_cache_free_space(fs_info, in cache_save_setup()
2804 &fs_info->global_block_rsv); in cache_save_setup()
2815 !btrfs_test_opt(fs_info, SPACE_CACHE)) { in cache_save_setup()
2848 cache_size *= fs_info->sectorsize; in cache_save_setup()
2888 struct btrfs_fs_info *fs_info = trans->fs_info; in btrfs_setup_space_cache() local
2894 !btrfs_test_opt(fs_info, SPACE_CACHE)) in btrfs_setup_space_cache()
2926 struct btrfs_fs_info *fs_info = trans->fs_info; in btrfs_start_dirty_block_groups() local
3045 btrfs_delayed_refs_rsv_release(fs_info, 1); in btrfs_start_dirty_block_groups()
3083 btrfs_cleanup_dirty_bgs(cur_trans, fs_info); in btrfs_start_dirty_block_groups()
3092 struct btrfs_fs_info *fs_info = trans->fs_info; in btrfs_write_dirty_block_groups() local
3193 btrfs_delayed_refs_rsv_release(fs_info, 1); in btrfs_write_dirty_block_groups()
3242 struct btrfs_fs_info *info = trans->fs_info; in btrfs_update_block_group()
3375 trace_btrfs_space_reservation(cache->fs_info, "space_info", in btrfs_add_reserved_bytes()
3377 btrfs_space_info_update_bytes_may_use(cache->fs_info, in btrfs_add_reserved_bytes()
3387 btrfs_try_granting_tickets(cache->fs_info, space_info); in btrfs_add_reserved_bytes()
3422 btrfs_try_granting_tickets(cache->fs_info, space_info); in btrfs_free_reserved_bytes()
3437 static int should_alloc_chunk(struct btrfs_fs_info *fs_info, in should_alloc_chunk() argument
3451 thresh = btrfs_super_total_bytes(fs_info->super_copy); in should_alloc_chunk()
3465 u64 alloc_flags = btrfs_get_alloc_profile(trans->fs_info, type); in btrfs_force_chunk_alloc()
3529 const u64 sys_flags = btrfs_system_alloc_profile(trans->fs_info); in do_chunk_alloc()
3674 struct btrfs_fs_info *fs_info = trans->fs_info; in btrfs_chunk_alloc() local
3714 space_info = btrfs_find_space_info(fs_info, flags); in btrfs_chunk_alloc()
3721 should_alloc = should_alloc_chunk(fs_info, space_info, force); in btrfs_chunk_alloc()
3743 mutex_lock(&fs_info->chunk_mutex); in btrfs_chunk_alloc()
3744 mutex_unlock(&fs_info->chunk_mutex); in btrfs_chunk_alloc()
3755 mutex_lock(&fs_info->chunk_mutex); in btrfs_chunk_alloc()
3770 if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) { in btrfs_chunk_alloc()
3771 fs_info->data_chunk_allocations++; in btrfs_chunk_alloc()
3772 if (!(fs_info->data_chunk_allocations % in btrfs_chunk_alloc()
3773 fs_info->metadata_ratio)) in btrfs_chunk_alloc()
3774 force_metadata_allocation(fs_info); in btrfs_chunk_alloc()
3808 mutex_unlock(&fs_info->chunk_mutex); in btrfs_chunk_alloc()
3813 static u64 get_profile_num_devs(struct btrfs_fs_info *fs_info, u64 type) in get_profile_num_devs() argument
3819 num_dev = fs_info->fs_devices->rw_devices; in get_profile_num_devs()
3828 struct btrfs_fs_info *fs_info = trans->fs_info; in reserve_chunk_space() local
3837 lockdep_assert_held(&fs_info->chunk_mutex); in reserve_chunk_space()
3839 info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM); in reserve_chunk_space()
3844 if (left < bytes && btrfs_test_opt(fs_info, ENOSPC_DEBUG)) { in reserve_chunk_space()
3845 btrfs_info(fs_info, "left=%llu, need=%llu, flags=%llu", in reserve_chunk_space()
3847 btrfs_dump_space_info(fs_info, info, 0, 0); in reserve_chunk_space()
3851 u64 flags = btrfs_system_alloc_profile(fs_info); in reserve_chunk_space()
3868 ret = btrfs_zoned_activate_one_bg(fs_info, info, true); in reserve_chunk_space()
3887 ret = btrfs_block_rsv_add(fs_info, in reserve_chunk_space()
3888 &fs_info->chunk_block_rsv, in reserve_chunk_space()
3901 struct btrfs_fs_info *fs_info = trans->fs_info; in check_system_chunk() local
3902 const u64 num_devs = get_profile_num_devs(fs_info, type); in check_system_chunk()
3906 bytes = btrfs_calc_metadata_size(fs_info, num_devs) + in check_system_chunk()
3907 btrfs_calc_insert_metadata_size(fs_info, 1); in check_system_chunk()
3931 struct btrfs_fs_info *fs_info = trans->fs_info; in btrfs_reserve_chunk_metadata() local
3935 bytes = btrfs_calc_insert_metadata_size(fs_info, 1); in btrfs_reserve_chunk_metadata()
3937 bytes = btrfs_calc_metadata_size(fs_info, 1); in btrfs_reserve_chunk_metadata()
3939 mutex_lock(&fs_info->chunk_mutex); in btrfs_reserve_chunk_metadata()
3941 mutex_unlock(&fs_info->chunk_mutex); in btrfs_reserve_chunk_metadata()
4093 struct btrfs_fs_info *fs_info = block_group->fs_info; in btrfs_unfreeze_block_group() local
4104 em_tree = &fs_info->mapping_tree; in btrfs_unfreeze_block_group()