1 /* SPDX-License-Identifier: GPL-2.0 */
2 
3 #ifndef BTRFS_BLOCK_GROUP_H
4 #define BTRFS_BLOCK_GROUP_H
5 
6 #include "free-space-cache.h"
7 
8 enum btrfs_disk_cache_state {
9 	BTRFS_DC_WRITTEN,
10 	BTRFS_DC_ERROR,
11 	BTRFS_DC_CLEAR,
12 	BTRFS_DC_SETUP,
13 };
14 
15 enum btrfs_block_group_size_class {
16 	/* Unset */
17 	BTRFS_BG_SZ_NONE,
18 	/* 0 < size <= 128K */
19 	BTRFS_BG_SZ_SMALL,
20 	/* 128K < size <= 8M */
21 	BTRFS_BG_SZ_MEDIUM,
22 	/* 8M < size < BG_LENGTH */
23 	BTRFS_BG_SZ_LARGE,
24 };
25 
26 /*
27  * This describes the state of the block_group for async discard.  This is due
28  * to the two pass nature of it where extent discarding is prioritized over
29  * bitmap discarding.  BTRFS_DISCARD_RESET_CURSOR is set when we are resetting
30  * between lists to prevent contention for discard state variables
31  * (eg. discard_cursor).
32  */
33 enum btrfs_discard_state {
34 	BTRFS_DISCARD_EXTENTS,
35 	BTRFS_DISCARD_BITMAPS,
36 	BTRFS_DISCARD_RESET_CURSOR,
37 };
38 
39 /*
40  * Control flags for do_chunk_alloc's force field CHUNK_ALLOC_NO_FORCE means to
41  * only allocate a chunk if we really need one.
42  *
43  * CHUNK_ALLOC_LIMITED means to only try and allocate one if we have very few
44  * chunks already allocated.  This is used as part of the clustering code to
45  * help make sure we have a good pool of storage to cluster in, without filling
46  * the FS with empty chunks
47  *
48  * CHUNK_ALLOC_FORCE means it must try to allocate one
49  *
50  * CHUNK_ALLOC_FORCE_FOR_EXTENT like CHUNK_ALLOC_FORCE but called from
51  * find_free_extent() that also activaes the zone
52  */
53 enum btrfs_chunk_alloc_enum {
54 	CHUNK_ALLOC_NO_FORCE,
55 	CHUNK_ALLOC_LIMITED,
56 	CHUNK_ALLOC_FORCE,
57 	CHUNK_ALLOC_FORCE_FOR_EXTENT,
58 };
59 
60 /* Block group flags set at runtime */
61 enum btrfs_block_group_flags {
62 	BLOCK_GROUP_FLAG_IREF,
63 	BLOCK_GROUP_FLAG_REMOVED,
64 	BLOCK_GROUP_FLAG_TO_COPY,
65 	BLOCK_GROUP_FLAG_RELOCATING_REPAIR,
66 	BLOCK_GROUP_FLAG_CHUNK_ITEM_INSERTED,
67 	BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE,
68 	BLOCK_GROUP_FLAG_ZONED_DATA_RELOC,
69 	/* Does the block group need to be added to the free space tree? */
70 	BLOCK_GROUP_FLAG_NEEDS_FREE_SPACE,
71 	/* Indicate that the block group is placed on a sequential zone */
72 	BLOCK_GROUP_FLAG_SEQUENTIAL_ZONE,
73 	/*
74 	 * Indicate that block group is in the list of new block groups of a
75 	 * transaction.
76 	 */
77 	BLOCK_GROUP_FLAG_NEW,
78 };
79 
80 enum btrfs_caching_type {
81 	BTRFS_CACHE_NO,
82 	BTRFS_CACHE_STARTED,
83 	BTRFS_CACHE_FINISHED,
84 	BTRFS_CACHE_ERROR,
85 };
86 
87 struct btrfs_caching_control {
88 	struct list_head list;
89 	struct mutex mutex;
90 	wait_queue_head_t wait;
91 	struct btrfs_work work;
92 	struct btrfs_block_group *block_group;
93 	/* Track progress of caching during allocation. */
94 	atomic_t progress;
95 	refcount_t count;
96 };
97 
98 /* Once caching_thread() finds this much free space, it will wake up waiters. */
99 #define CACHING_CTL_WAKE_UP SZ_2M
100 
101 struct btrfs_block_group {
102 	struct btrfs_fs_info *fs_info;
103 	struct inode *inode;
104 	spinlock_t lock;
105 	u64 start;
106 	u64 length;
107 	u64 pinned;
108 	u64 reserved;
109 	u64 used;
110 	u64 delalloc_bytes;
111 	u64 bytes_super;
112 	u64 flags;
113 	u64 cache_generation;
114 	u64 global_root_id;
115 
116 	/*
117 	 * The last committed used bytes of this block group, if the above @used
118 	 * is still the same as @commit_used, we don't need to update block
119 	 * group item of this block group.
120 	 */
121 	u64 commit_used;
122 	/*
123 	 * If the free space extent count exceeds this number, convert the block
124 	 * group to bitmaps.
125 	 */
126 	u32 bitmap_high_thresh;
127 
128 	/*
129 	 * If the free space extent count drops below this number, convert the
130 	 * block group back to extents.
131 	 */
132 	u32 bitmap_low_thresh;
133 
134 	/*
135 	 * It is just used for the delayed data space allocation because
136 	 * only the data space allocation and the relative metadata update
137 	 * can be done cross the transaction.
138 	 */
139 	struct rw_semaphore data_rwsem;
140 
141 	/* For raid56, this is a full stripe, without parity */
142 	unsigned long full_stripe_len;
143 	unsigned long runtime_flags;
144 
145 	unsigned int ro;
146 
147 	int disk_cache_state;
148 
149 	/* Cache tracking stuff */
150 	int cached;
151 	struct btrfs_caching_control *caching_ctl;
152 
153 	struct btrfs_space_info *space_info;
154 
155 	/* Free space cache stuff */
156 	struct btrfs_free_space_ctl *free_space_ctl;
157 
158 	/* Block group cache stuff */
159 	struct rb_node cache_node;
160 
161 	/* For block groups in the same raid type */
162 	struct list_head list;
163 
164 	refcount_t refs;
165 
166 	/*
167 	 * List of struct btrfs_free_clusters for this block group.
168 	 * Today it will only have one thing on it, but that may change
169 	 */
170 	struct list_head cluster_list;
171 
172 	/*
173 	 * Used for several lists:
174 	 *
175 	 * 1) struct btrfs_fs_info::unused_bgs
176 	 * 2) struct btrfs_fs_info::reclaim_bgs
177 	 * 3) struct btrfs_transaction::deleted_bgs
178 	 * 4) struct btrfs_trans_handle::new_bgs
179 	 */
180 	struct list_head bg_list;
181 
182 	/* For read-only block groups */
183 	struct list_head ro_list;
184 
185 	/*
186 	 * When non-zero it means the block group's logical address and its
187 	 * device extents can not be reused for future block group allocations
188 	 * until the counter goes down to 0. This is to prevent them from being
189 	 * reused while some task is still using the block group after it was
190 	 * deleted - we want to make sure they can only be reused for new block
191 	 * groups after that task is done with the deleted block group.
192 	 */
193 	atomic_t frozen;
194 
195 	/* For discard operations */
196 	struct list_head discard_list;
197 	int discard_index;
198 	u64 discard_eligible_time;
199 	u64 discard_cursor;
200 	enum btrfs_discard_state discard_state;
201 
202 	/* For dirty block groups */
203 	struct list_head dirty_list;
204 	struct list_head io_list;
205 
206 	struct btrfs_io_ctl io_ctl;
207 
208 	/*
209 	 * Incremented when doing extent allocations and holding a read lock
210 	 * on the space_info's groups_sem semaphore.
211 	 * Decremented when an ordered extent that represents an IO against this
212 	 * block group's range is created (after it's added to its inode's
213 	 * root's list of ordered extents) or immediately after the allocation
214 	 * if it's a metadata extent or fallocate extent (for these cases we
215 	 * don't create ordered extents).
216 	 */
217 	atomic_t reservations;
218 
219 	/*
220 	 * Incremented while holding the spinlock *lock* by a task checking if
221 	 * it can perform a nocow write (incremented if the value for the *ro*
222 	 * field is 0). Decremented by such tasks once they create an ordered
223 	 * extent or before that if some error happens before reaching that step.
224 	 * This is to prevent races between block group relocation and nocow
225 	 * writes through direct IO.
226 	 */
227 	atomic_t nocow_writers;
228 
229 	/* Lock for free space tree operations. */
230 	struct mutex free_space_lock;
231 
232 	/*
233 	 * Number of extents in this block group used for swap files.
234 	 * All accesses protected by the spinlock 'lock'.
235 	 */
236 	int swap_extents;
237 
238 	/*
239 	 * Allocation offset for the block group to implement sequential
240 	 * allocation. This is used only on a zoned filesystem.
241 	 */
242 	u64 alloc_offset;
243 	u64 zone_unusable;
244 	u64 zone_capacity;
245 	u64 meta_write_pointer;
246 	struct map_lookup *physical_map;
247 	struct list_head active_bg_list;
248 	struct work_struct zone_finish_work;
249 	struct extent_buffer *last_eb;
250 	enum btrfs_block_group_size_class size_class;
251 };
252 
btrfs_block_group_end(struct btrfs_block_group * block_group)253 static inline u64 btrfs_block_group_end(struct btrfs_block_group *block_group)
254 {
255 	return (block_group->start + block_group->length);
256 }
257 
btrfs_is_block_group_used(const struct btrfs_block_group * bg)258 static inline bool btrfs_is_block_group_used(const struct btrfs_block_group *bg)
259 {
260 	lockdep_assert_held(&bg->lock);
261 
262 	return (bg->used > 0 || bg->reserved > 0 || bg->pinned > 0);
263 }
264 
btrfs_is_block_group_data_only(struct btrfs_block_group * block_group)265 static inline bool btrfs_is_block_group_data_only(
266 					struct btrfs_block_group *block_group)
267 {
268 	/*
269 	 * In mixed mode the fragmentation is expected to be high, lowering the
270 	 * efficiency, so only proper data block groups are considered.
271 	 */
272 	return (block_group->flags & BTRFS_BLOCK_GROUP_DATA) &&
273 	       !(block_group->flags & BTRFS_BLOCK_GROUP_METADATA);
274 }
275 
276 #ifdef CONFIG_BTRFS_DEBUG
277 int btrfs_should_fragment_free_space(struct btrfs_block_group *block_group);
278 #endif
279 
280 struct btrfs_block_group *btrfs_lookup_first_block_group(
281 		struct btrfs_fs_info *info, u64 bytenr);
282 struct btrfs_block_group *btrfs_lookup_block_group(
283 		struct btrfs_fs_info *info, u64 bytenr);
284 struct btrfs_block_group *btrfs_next_block_group(
285 		struct btrfs_block_group *cache);
286 void btrfs_get_block_group(struct btrfs_block_group *cache);
287 void btrfs_put_block_group(struct btrfs_block_group *cache);
288 void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info,
289 					const u64 start);
290 void btrfs_wait_block_group_reservations(struct btrfs_block_group *bg);
291 struct btrfs_block_group *btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info,
292 						  u64 bytenr);
293 void btrfs_dec_nocow_writers(struct btrfs_block_group *bg);
294 void btrfs_wait_nocow_writers(struct btrfs_block_group *bg);
295 void btrfs_wait_block_group_cache_progress(struct btrfs_block_group *cache,
296 				           u64 num_bytes);
297 int btrfs_cache_block_group(struct btrfs_block_group *cache, bool wait);
298 void btrfs_put_caching_control(struct btrfs_caching_control *ctl);
299 struct btrfs_caching_control *btrfs_get_caching_control(
300 		struct btrfs_block_group *cache);
301 int btrfs_add_new_free_space(struct btrfs_block_group *block_group,
302 			     u64 start, u64 end, u64 *total_added_ret);
303 struct btrfs_trans_handle *btrfs_start_trans_remove_block_group(
304 				struct btrfs_fs_info *fs_info,
305 				const u64 chunk_offset);
306 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
307 			     u64 group_start, struct extent_map *em);
308 void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info);
309 void btrfs_mark_bg_unused(struct btrfs_block_group *bg);
310 void btrfs_reclaim_bgs_work(struct work_struct *work);
311 void btrfs_reclaim_bgs(struct btrfs_fs_info *fs_info);
312 void btrfs_mark_bg_to_reclaim(struct btrfs_block_group *bg);
313 int btrfs_read_block_groups(struct btrfs_fs_info *info);
314 struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *trans,
315 						 u64 type,
316 						 u64 chunk_offset, u64 size);
317 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans);
318 int btrfs_inc_block_group_ro(struct btrfs_block_group *cache,
319 			     bool do_chunk_alloc);
320 void btrfs_dec_block_group_ro(struct btrfs_block_group *cache);
321 int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans);
322 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans);
323 int btrfs_setup_space_cache(struct btrfs_trans_handle *trans);
324 int btrfs_update_block_group(struct btrfs_trans_handle *trans,
325 			     u64 bytenr, u64 num_bytes, bool alloc);
326 int btrfs_add_reserved_bytes(struct btrfs_block_group *cache,
327 			     u64 ram_bytes, u64 num_bytes, int delalloc,
328 			     bool force_wrong_size_class);
329 void btrfs_free_reserved_bytes(struct btrfs_block_group *cache,
330 			       u64 num_bytes, int delalloc);
331 int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
332 		      enum btrfs_chunk_alloc_enum force);
333 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, u64 type);
334 void check_system_chunk(struct btrfs_trans_handle *trans, const u64 type);
335 void btrfs_reserve_chunk_metadata(struct btrfs_trans_handle *trans,
336 				  bool is_item_insertion);
337 u64 btrfs_get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags);
338 void btrfs_put_block_group_cache(struct btrfs_fs_info *info);
339 int btrfs_free_block_groups(struct btrfs_fs_info *info);
340 int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start,
341 		     u64 physical, u64 **logical, int *naddrs, int *stripe_len);
342 
btrfs_data_alloc_profile(struct btrfs_fs_info * fs_info)343 static inline u64 btrfs_data_alloc_profile(struct btrfs_fs_info *fs_info)
344 {
345 	return btrfs_get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_DATA);
346 }
347 
btrfs_metadata_alloc_profile(struct btrfs_fs_info * fs_info)348 static inline u64 btrfs_metadata_alloc_profile(struct btrfs_fs_info *fs_info)
349 {
350 	return btrfs_get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_METADATA);
351 }
352 
btrfs_system_alloc_profile(struct btrfs_fs_info * fs_info)353 static inline u64 btrfs_system_alloc_profile(struct btrfs_fs_info *fs_info)
354 {
355 	return btrfs_get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
356 }
357 
btrfs_block_group_done(struct btrfs_block_group * cache)358 static inline int btrfs_block_group_done(struct btrfs_block_group *cache)
359 {
360 	smp_mb();
361 	return cache->cached == BTRFS_CACHE_FINISHED ||
362 		cache->cached == BTRFS_CACHE_ERROR;
363 }
364 
365 void btrfs_freeze_block_group(struct btrfs_block_group *cache);
366 void btrfs_unfreeze_block_group(struct btrfs_block_group *cache);
367 
368 bool btrfs_inc_block_group_swap_extents(struct btrfs_block_group *bg);
369 void btrfs_dec_block_group_swap_extents(struct btrfs_block_group *bg, int amount);
370 
371 enum btrfs_block_group_size_class btrfs_calc_block_group_size_class(u64 size);
372 int btrfs_use_block_group_size_class(struct btrfs_block_group *bg,
373 				     enum btrfs_block_group_size_class size_class,
374 				     bool force_wrong_size_class);
375 bool btrfs_block_group_should_use_size_class(struct btrfs_block_group *bg);
376 
377 #endif /* BTRFS_BLOCK_GROUP_H */
378