1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "misc.h"
4 #include "ctree.h"
5 #include "space-info.h"
6 #include "sysfs.h"
7 #include "volumes.h"
8 #include "free-space-cache.h"
9 #include "ordered-data.h"
10 #include "transaction.h"
11 #include "block-group.h"
12 #include "zoned.h"
13 #include "fs.h"
14 #include "accessors.h"
15 #include "extent-tree.h"
16
17 /*
18 * HOW DOES SPACE RESERVATION WORK
19 *
20 * If you want to know about delalloc specifically, there is a separate comment
21 * for that with the delalloc code. This comment is about how the whole system
22 * works generally.
23 *
24 * BASIC CONCEPTS
25 *
26 * 1) space_info. This is the ultimate arbiter of how much space we can use.
27 * There's a description of the bytes_ fields with the struct declaration,
28 * refer to that for specifics on each field. Suffice it to say that for
29 * reservations we care about total_bytes - SUM(space_info->bytes_) when
30 * determining if there is space to make an allocation. There is a space_info
31 * for METADATA, SYSTEM, and DATA areas.
32 *
33 * 2) block_rsv's. These are basically buckets for every different type of
34 * metadata reservation we have. You can see the comment in the block_rsv
35 * code on the rules for each type, but generally block_rsv->reserved is how
36 * much space is accounted for in space_info->bytes_may_use.
37 *
38 * 3) btrfs_calc*_size. These are the worst case calculations we used based
39 * on the number of items we will want to modify. We have one for changing
40 * items, and one for inserting new items. Generally we use these helpers to
41 * determine the size of the block reserves, and then use the actual bytes
42 * values to adjust the space_info counters.
43 *
44 * MAKING RESERVATIONS, THE NORMAL CASE
45 *
46 * We call into either btrfs_reserve_data_bytes() or
47 * btrfs_reserve_metadata_bytes(), depending on which we're looking for, with
48 * num_bytes we want to reserve.
49 *
50 * ->reserve
51 * space_info->bytes_may_reserve += num_bytes
52 *
53 * ->extent allocation
54 * Call btrfs_add_reserved_bytes() which does
55 * space_info->bytes_may_reserve -= num_bytes
56 * space_info->bytes_reserved += extent_bytes
57 *
58 * ->insert reference
59 * Call btrfs_update_block_group() which does
60 * space_info->bytes_reserved -= extent_bytes
61 * space_info->bytes_used += extent_bytes
62 *
63 * MAKING RESERVATIONS, FLUSHING NORMALLY (non-priority)
64 *
65 * Assume we are unable to simply make the reservation because we do not have
66 * enough space
67 *
68 * -> __reserve_bytes
69 * create a reserve_ticket with ->bytes set to our reservation, add it to
70 * the tail of space_info->tickets, kick async flush thread
71 *
72 * ->handle_reserve_ticket
73 * wait on ticket->wait for ->bytes to be reduced to 0, or ->error to be set
74 * on the ticket.
75 *
76 * -> btrfs_async_reclaim_metadata_space/btrfs_async_reclaim_data_space
77 * Flushes various things attempting to free up space.
78 *
79 * -> btrfs_try_granting_tickets()
80 * This is called by anything that either subtracts space from
81 * space_info->bytes_may_use, ->bytes_pinned, etc, or adds to the
82 * space_info->total_bytes. This loops through the ->priority_tickets and
83 * then the ->tickets list checking to see if the reservation can be
84 * completed. If it can the space is added to space_info->bytes_may_use and
85 * the ticket is woken up.
86 *
87 * -> ticket wakeup
88 * Check if ->bytes == 0, if it does we got our reservation and we can carry
89 * on, if not return the appropriate error (ENOSPC, but can be EINTR if we
90 * were interrupted.)
91 *
92 * MAKING RESERVATIONS, FLUSHING HIGH PRIORITY
93 *
94 * Same as the above, except we add ourselves to the
95 * space_info->priority_tickets, and we do not use ticket->wait, we simply
96 * call flush_space() ourselves for the states that are safe for us to call
97 * without deadlocking and hope for the best.
98 *
99 * THE FLUSHING STATES
100 *
101 * Generally speaking we will have two cases for each state, a "nice" state
102 * and a "ALL THE THINGS" state. In btrfs we delay a lot of work in order to
103 * reduce the locking over head on the various trees, and even to keep from
104 * doing any work at all in the case of delayed refs. Each of these delayed
105 * things however hold reservations, and so letting them run allows us to
106 * reclaim space so we can make new reservations.
107 *
108 * FLUSH_DELAYED_ITEMS
109 * Every inode has a delayed item to update the inode. Take a simple write
110 * for example, we would update the inode item at write time to update the
111 * mtime, and then again at finish_ordered_io() time in order to update the
112 * isize or bytes. We keep these delayed items to coalesce these operations
113 * into a single operation done on demand. These are an easy way to reclaim
114 * metadata space.
115 *
116 * FLUSH_DELALLOC
117 * Look at the delalloc comment to get an idea of how much space is reserved
118 * for delayed allocation. We can reclaim some of this space simply by
119 * running delalloc, but usually we need to wait for ordered extents to
120 * reclaim the bulk of this space.
121 *
122 * FLUSH_DELAYED_REFS
123 * We have a block reserve for the outstanding delayed refs space, and every
124 * delayed ref operation holds a reservation. Running these is a quick way
125 * to reclaim space, but we want to hold this until the end because COW can
126 * churn a lot and we can avoid making some extent tree modifications if we
127 * are able to delay for as long as possible.
128 *
129 * ALLOC_CHUNK
130 * We will skip this the first time through space reservation, because of
131 * overcommit and we don't want to have a lot of useless metadata space when
132 * our worst case reservations will likely never come true.
133 *
134 * RUN_DELAYED_IPUTS
135 * If we're freeing inodes we're likely freeing checksums, file extent
136 * items, and extent tree items. Loads of space could be freed up by these
137 * operations, however they won't be usable until the transaction commits.
138 *
139 * COMMIT_TRANS
140 * This will commit the transaction. Historically we had a lot of logic
141 * surrounding whether or not we'd commit the transaction, but this waits born
142 * out of a pre-tickets era where we could end up committing the transaction
143 * thousands of times in a row without making progress. Now thanks to our
144 * ticketing system we know if we're not making progress and can error
145 * everybody out after a few commits rather than burning the disk hoping for
146 * a different answer.
147 *
148 * OVERCOMMIT
149 *
150 * Because we hold so many reservations for metadata we will allow you to
151 * reserve more space than is currently free in the currently allocate
152 * metadata space. This only happens with metadata, data does not allow
153 * overcommitting.
154 *
155 * You can see the current logic for when we allow overcommit in
156 * btrfs_can_overcommit(), but it only applies to unallocated space. If there
157 * is no unallocated space to be had, all reservations are kept within the
158 * free space in the allocated metadata chunks.
159 *
160 * Because of overcommitting, you generally want to use the
161 * btrfs_can_overcommit() logic for metadata allocations, as it does the right
162 * thing with or without extra unallocated space.
163 */
164
btrfs_space_info_used(struct btrfs_space_info * s_info,bool may_use_included)165 u64 __pure btrfs_space_info_used(struct btrfs_space_info *s_info,
166 bool may_use_included)
167 {
168 ASSERT(s_info);
169 return s_info->bytes_used + s_info->bytes_reserved +
170 s_info->bytes_pinned + s_info->bytes_readonly +
171 s_info->bytes_zone_unusable +
172 (may_use_included ? s_info->bytes_may_use : 0);
173 }
174
175 /*
176 * after adding space to the filesystem, we need to clear the full flags
177 * on all the space infos.
178 */
btrfs_clear_space_info_full(struct btrfs_fs_info * info)179 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
180 {
181 struct list_head *head = &info->space_info;
182 struct btrfs_space_info *found;
183
184 list_for_each_entry(found, head, list)
185 found->full = 0;
186 }
187
188 /*
189 * Block groups with more than this value (percents) of unusable space will be
190 * scheduled for background reclaim.
191 */
192 #define BTRFS_DEFAULT_ZONED_RECLAIM_THRESH (75)
193
194 /*
195 * Calculate chunk size depending on volume type (regular or zoned).
196 */
calc_chunk_size(const struct btrfs_fs_info * fs_info,u64 flags)197 static u64 calc_chunk_size(const struct btrfs_fs_info *fs_info, u64 flags)
198 {
199 if (btrfs_is_zoned(fs_info))
200 return fs_info->zone_size;
201
202 ASSERT(flags & BTRFS_BLOCK_GROUP_TYPE_MASK);
203
204 if (flags & BTRFS_BLOCK_GROUP_DATA)
205 return BTRFS_MAX_DATA_CHUNK_SIZE;
206 else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
207 return SZ_32M;
208
209 /* Handle BTRFS_BLOCK_GROUP_METADATA */
210 if (fs_info->fs_devices->total_rw_bytes > 50ULL * SZ_1G)
211 return SZ_1G;
212
213 return SZ_256M;
214 }
215
216 /*
217 * Update default chunk size.
218 */
btrfs_update_space_info_chunk_size(struct btrfs_space_info * space_info,u64 chunk_size)219 void btrfs_update_space_info_chunk_size(struct btrfs_space_info *space_info,
220 u64 chunk_size)
221 {
222 WRITE_ONCE(space_info->chunk_size, chunk_size);
223 }
224
create_space_info(struct btrfs_fs_info * info,u64 flags)225 static int create_space_info(struct btrfs_fs_info *info, u64 flags)
226 {
227
228 struct btrfs_space_info *space_info;
229 int i;
230 int ret;
231
232 space_info = kzalloc(sizeof(*space_info), GFP_NOFS);
233 if (!space_info)
234 return -ENOMEM;
235
236 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
237 INIT_LIST_HEAD(&space_info->block_groups[i]);
238 init_rwsem(&space_info->groups_sem);
239 spin_lock_init(&space_info->lock);
240 space_info->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
241 space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
242 INIT_LIST_HEAD(&space_info->ro_bgs);
243 INIT_LIST_HEAD(&space_info->tickets);
244 INIT_LIST_HEAD(&space_info->priority_tickets);
245 space_info->clamp = 1;
246 btrfs_update_space_info_chunk_size(space_info, calc_chunk_size(info, flags));
247
248 if (btrfs_is_zoned(info))
249 space_info->bg_reclaim_threshold = BTRFS_DEFAULT_ZONED_RECLAIM_THRESH;
250
251 ret = btrfs_sysfs_add_space_info_type(info, space_info);
252 if (ret)
253 return ret;
254
255 list_add(&space_info->list, &info->space_info);
256 if (flags & BTRFS_BLOCK_GROUP_DATA)
257 info->data_sinfo = space_info;
258
259 return ret;
260 }
261
btrfs_init_space_info(struct btrfs_fs_info * fs_info)262 int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
263 {
264 struct btrfs_super_block *disk_super;
265 u64 features;
266 u64 flags;
267 int mixed = 0;
268 int ret;
269
270 disk_super = fs_info->super_copy;
271 if (!btrfs_super_root(disk_super))
272 return -EINVAL;
273
274 features = btrfs_super_incompat_flags(disk_super);
275 if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
276 mixed = 1;
277
278 flags = BTRFS_BLOCK_GROUP_SYSTEM;
279 ret = create_space_info(fs_info, flags);
280 if (ret)
281 goto out;
282
283 if (mixed) {
284 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
285 ret = create_space_info(fs_info, flags);
286 } else {
287 flags = BTRFS_BLOCK_GROUP_METADATA;
288 ret = create_space_info(fs_info, flags);
289 if (ret)
290 goto out;
291
292 flags = BTRFS_BLOCK_GROUP_DATA;
293 ret = create_space_info(fs_info, flags);
294 }
295 out:
296 return ret;
297 }
298
btrfs_add_bg_to_space_info(struct btrfs_fs_info * info,struct btrfs_block_group * block_group)299 void btrfs_add_bg_to_space_info(struct btrfs_fs_info *info,
300 struct btrfs_block_group *block_group)
301 {
302 struct btrfs_space_info *found;
303 int factor, index;
304
305 factor = btrfs_bg_type_to_factor(block_group->flags);
306
307 found = btrfs_find_space_info(info, block_group->flags);
308 ASSERT(found);
309 spin_lock(&found->lock);
310 found->total_bytes += block_group->length;
311 found->disk_total += block_group->length * factor;
312 found->bytes_used += block_group->used;
313 found->disk_used += block_group->used * factor;
314 found->bytes_readonly += block_group->bytes_super;
315 found->bytes_zone_unusable += block_group->zone_unusable;
316 if (block_group->length > 0)
317 found->full = 0;
318 btrfs_try_granting_tickets(info, found);
319 spin_unlock(&found->lock);
320
321 block_group->space_info = found;
322
323 index = btrfs_bg_flags_to_raid_index(block_group->flags);
324 down_write(&found->groups_sem);
325 list_add_tail(&block_group->list, &found->block_groups[index]);
326 up_write(&found->groups_sem);
327 }
328
btrfs_find_space_info(struct btrfs_fs_info * info,u64 flags)329 struct btrfs_space_info *btrfs_find_space_info(struct btrfs_fs_info *info,
330 u64 flags)
331 {
332 struct list_head *head = &info->space_info;
333 struct btrfs_space_info *found;
334
335 flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
336
337 list_for_each_entry(found, head, list) {
338 if (found->flags & flags)
339 return found;
340 }
341 return NULL;
342 }
343
calc_available_free_space(struct btrfs_fs_info * fs_info,struct btrfs_space_info * space_info,enum btrfs_reserve_flush_enum flush)344 static u64 calc_available_free_space(struct btrfs_fs_info *fs_info,
345 struct btrfs_space_info *space_info,
346 enum btrfs_reserve_flush_enum flush)
347 {
348 u64 profile;
349 u64 avail;
350 int factor;
351
352 if (space_info->flags & BTRFS_BLOCK_GROUP_SYSTEM)
353 profile = btrfs_system_alloc_profile(fs_info);
354 else
355 profile = btrfs_metadata_alloc_profile(fs_info);
356
357 avail = atomic64_read(&fs_info->free_chunk_space);
358
359 /*
360 * If we have dup, raid1 or raid10 then only half of the free
361 * space is actually usable. For raid56, the space info used
362 * doesn't include the parity drive, so we don't have to
363 * change the math
364 */
365 factor = btrfs_bg_type_to_factor(profile);
366 avail = div_u64(avail, factor);
367
368 /*
369 * If we aren't flushing all things, let us overcommit up to
370 * 1/2th of the space. If we can flush, don't let us overcommit
371 * too much, let it overcommit up to 1/8 of the space.
372 */
373 if (flush == BTRFS_RESERVE_FLUSH_ALL)
374 avail >>= 3;
375 else
376 avail >>= 1;
377 return avail;
378 }
379
btrfs_can_overcommit(struct btrfs_fs_info * fs_info,struct btrfs_space_info * space_info,u64 bytes,enum btrfs_reserve_flush_enum flush)380 int btrfs_can_overcommit(struct btrfs_fs_info *fs_info,
381 struct btrfs_space_info *space_info, u64 bytes,
382 enum btrfs_reserve_flush_enum flush)
383 {
384 u64 avail;
385 u64 used;
386
387 /* Don't overcommit when in mixed mode */
388 if (space_info->flags & BTRFS_BLOCK_GROUP_DATA)
389 return 0;
390
391 used = btrfs_space_info_used(space_info, true);
392 avail = calc_available_free_space(fs_info, space_info, flush);
393
394 if (used + bytes < space_info->total_bytes + avail)
395 return 1;
396 return 0;
397 }
398
remove_ticket(struct btrfs_space_info * space_info,struct reserve_ticket * ticket)399 static void remove_ticket(struct btrfs_space_info *space_info,
400 struct reserve_ticket *ticket)
401 {
402 if (!list_empty(&ticket->list)) {
403 list_del_init(&ticket->list);
404 ASSERT(space_info->reclaim_size >= ticket->bytes);
405 space_info->reclaim_size -= ticket->bytes;
406 }
407 }
408
409 /*
410 * This is for space we already have accounted in space_info->bytes_may_use, so
411 * basically when we're returning space from block_rsv's.
412 */
btrfs_try_granting_tickets(struct btrfs_fs_info * fs_info,struct btrfs_space_info * space_info)413 void btrfs_try_granting_tickets(struct btrfs_fs_info *fs_info,
414 struct btrfs_space_info *space_info)
415 {
416 struct list_head *head;
417 enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_NO_FLUSH;
418
419 lockdep_assert_held(&space_info->lock);
420
421 head = &space_info->priority_tickets;
422 again:
423 while (!list_empty(head)) {
424 struct reserve_ticket *ticket;
425 u64 used = btrfs_space_info_used(space_info, true);
426
427 ticket = list_first_entry(head, struct reserve_ticket, list);
428
429 /* Check and see if our ticket can be satisfied now. */
430 if ((used + ticket->bytes <= space_info->total_bytes) ||
431 btrfs_can_overcommit(fs_info, space_info, ticket->bytes,
432 flush)) {
433 btrfs_space_info_update_bytes_may_use(fs_info,
434 space_info,
435 ticket->bytes);
436 remove_ticket(space_info, ticket);
437 ticket->bytes = 0;
438 space_info->tickets_id++;
439 wake_up(&ticket->wait);
440 } else {
441 break;
442 }
443 }
444
445 if (head == &space_info->priority_tickets) {
446 head = &space_info->tickets;
447 flush = BTRFS_RESERVE_FLUSH_ALL;
448 goto again;
449 }
450 }
451
452 #define DUMP_BLOCK_RSV(fs_info, rsv_name) \
453 do { \
454 struct btrfs_block_rsv *__rsv = &(fs_info)->rsv_name; \
455 spin_lock(&__rsv->lock); \
456 btrfs_info(fs_info, #rsv_name ": size %llu reserved %llu", \
457 __rsv->size, __rsv->reserved); \
458 spin_unlock(&__rsv->lock); \
459 } while (0)
460
space_info_flag_to_str(const struct btrfs_space_info * space_info)461 static const char *space_info_flag_to_str(const struct btrfs_space_info *space_info)
462 {
463 switch (space_info->flags) {
464 case BTRFS_BLOCK_GROUP_SYSTEM:
465 return "SYSTEM";
466 case BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA:
467 return "DATA+METADATA";
468 case BTRFS_BLOCK_GROUP_DATA:
469 return "DATA";
470 case BTRFS_BLOCK_GROUP_METADATA:
471 return "METADATA";
472 default:
473 return "UNKNOWN";
474 }
475 }
476
dump_global_block_rsv(struct btrfs_fs_info * fs_info)477 static void dump_global_block_rsv(struct btrfs_fs_info *fs_info)
478 {
479 DUMP_BLOCK_RSV(fs_info, global_block_rsv);
480 DUMP_BLOCK_RSV(fs_info, trans_block_rsv);
481 DUMP_BLOCK_RSV(fs_info, chunk_block_rsv);
482 DUMP_BLOCK_RSV(fs_info, delayed_block_rsv);
483 DUMP_BLOCK_RSV(fs_info, delayed_refs_rsv);
484 }
485
__btrfs_dump_space_info(struct btrfs_fs_info * fs_info,struct btrfs_space_info * info)486 static void __btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
487 struct btrfs_space_info *info)
488 {
489 const char *flag_str = space_info_flag_to_str(info);
490 lockdep_assert_held(&info->lock);
491
492 /* The free space could be negative in case of overcommit */
493 btrfs_info(fs_info, "space_info %s has %lld free, is %sfull",
494 flag_str,
495 (s64)(info->total_bytes - btrfs_space_info_used(info, true)),
496 info->full ? "" : "not ");
497 btrfs_info(fs_info,
498 "space_info total=%llu, used=%llu, pinned=%llu, reserved=%llu, may_use=%llu, readonly=%llu zone_unusable=%llu",
499 info->total_bytes, info->bytes_used, info->bytes_pinned,
500 info->bytes_reserved, info->bytes_may_use,
501 info->bytes_readonly, info->bytes_zone_unusable);
502 }
503
btrfs_dump_space_info(struct btrfs_fs_info * fs_info,struct btrfs_space_info * info,u64 bytes,int dump_block_groups)504 void btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
505 struct btrfs_space_info *info, u64 bytes,
506 int dump_block_groups)
507 {
508 struct btrfs_block_group *cache;
509 u64 total_avail = 0;
510 int index = 0;
511
512 spin_lock(&info->lock);
513 __btrfs_dump_space_info(fs_info, info);
514 dump_global_block_rsv(fs_info);
515 spin_unlock(&info->lock);
516
517 if (!dump_block_groups)
518 return;
519
520 down_read(&info->groups_sem);
521 again:
522 list_for_each_entry(cache, &info->block_groups[index], list) {
523 u64 avail;
524
525 spin_lock(&cache->lock);
526 avail = cache->length - cache->used - cache->pinned -
527 cache->reserved - cache->delalloc_bytes -
528 cache->bytes_super - cache->zone_unusable;
529 btrfs_info(fs_info,
530 "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %llu delalloc %llu super %llu zone_unusable (%llu bytes available) %s",
531 cache->start, cache->length, cache->used, cache->pinned,
532 cache->reserved, cache->delalloc_bytes,
533 cache->bytes_super, cache->zone_unusable,
534 avail, cache->ro ? "[readonly]" : "");
535 spin_unlock(&cache->lock);
536 btrfs_dump_free_space(cache, bytes);
537 total_avail += avail;
538 }
539 if (++index < BTRFS_NR_RAID_TYPES)
540 goto again;
541 up_read(&info->groups_sem);
542
543 btrfs_info(fs_info, "%llu bytes available across all block groups", total_avail);
544 }
545
calc_reclaim_items_nr(const struct btrfs_fs_info * fs_info,u64 to_reclaim)546 static inline u64 calc_reclaim_items_nr(const struct btrfs_fs_info *fs_info,
547 u64 to_reclaim)
548 {
549 u64 bytes;
550 u64 nr;
551
552 bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
553 nr = div64_u64(to_reclaim, bytes);
554 if (!nr)
555 nr = 1;
556 return nr;
557 }
558
calc_delayed_refs_nr(const struct btrfs_fs_info * fs_info,u64 to_reclaim)559 static inline u64 calc_delayed_refs_nr(const struct btrfs_fs_info *fs_info,
560 u64 to_reclaim)
561 {
562 const u64 bytes = btrfs_calc_delayed_ref_bytes(fs_info, 1);
563 u64 nr;
564
565 nr = div64_u64(to_reclaim, bytes);
566 if (!nr)
567 nr = 1;
568 return nr;
569 }
570
571 #define EXTENT_SIZE_PER_ITEM SZ_256K
572
573 /*
574 * shrink metadata reservation for delalloc
575 */
shrink_delalloc(struct btrfs_fs_info * fs_info,struct btrfs_space_info * space_info,u64 to_reclaim,bool wait_ordered,bool for_preempt)576 static void shrink_delalloc(struct btrfs_fs_info *fs_info,
577 struct btrfs_space_info *space_info,
578 u64 to_reclaim, bool wait_ordered,
579 bool for_preempt)
580 {
581 struct btrfs_trans_handle *trans;
582 u64 delalloc_bytes;
583 u64 ordered_bytes;
584 u64 items;
585 long time_left;
586 int loops;
587
588 delalloc_bytes = percpu_counter_sum_positive(&fs_info->delalloc_bytes);
589 ordered_bytes = percpu_counter_sum_positive(&fs_info->ordered_bytes);
590 if (delalloc_bytes == 0 && ordered_bytes == 0)
591 return;
592
593 /* Calc the number of the pages we need flush for space reservation */
594 if (to_reclaim == U64_MAX) {
595 items = U64_MAX;
596 } else {
597 /*
598 * to_reclaim is set to however much metadata we need to
599 * reclaim, but reclaiming that much data doesn't really track
600 * exactly. What we really want to do is reclaim full inode's
601 * worth of reservations, however that's not available to us
602 * here. We will take a fraction of the delalloc bytes for our
603 * flushing loops and hope for the best. Delalloc will expand
604 * the amount we write to cover an entire dirty extent, which
605 * will reclaim the metadata reservation for that range. If
606 * it's not enough subsequent flush stages will be more
607 * aggressive.
608 */
609 to_reclaim = max(to_reclaim, delalloc_bytes >> 3);
610 items = calc_reclaim_items_nr(fs_info, to_reclaim) * 2;
611 }
612
613 trans = current->journal_info;
614
615 /*
616 * If we are doing more ordered than delalloc we need to just wait on
617 * ordered extents, otherwise we'll waste time trying to flush delalloc
618 * that likely won't give us the space back we need.
619 */
620 if (ordered_bytes > delalloc_bytes && !for_preempt)
621 wait_ordered = true;
622
623 loops = 0;
624 while ((delalloc_bytes || ordered_bytes) && loops < 3) {
625 u64 temp = min(delalloc_bytes, to_reclaim) >> PAGE_SHIFT;
626 long nr_pages = min_t(u64, temp, LONG_MAX);
627 int async_pages;
628
629 btrfs_start_delalloc_roots(fs_info, nr_pages, true);
630
631 /*
632 * We need to make sure any outstanding async pages are now
633 * processed before we continue. This is because things like
634 * sync_inode() try to be smart and skip writing if the inode is
635 * marked clean. We don't use filemap_fwrite for flushing
636 * because we want to control how many pages we write out at a
637 * time, thus this is the only safe way to make sure we've
638 * waited for outstanding compressed workers to have started
639 * their jobs and thus have ordered extents set up properly.
640 *
641 * This exists because we do not want to wait for each
642 * individual inode to finish its async work, we simply want to
643 * start the IO on everybody, and then come back here and wait
644 * for all of the async work to catch up. Once we're done with
645 * that we know we'll have ordered extents for everything and we
646 * can decide if we wait for that or not.
647 *
648 * If we choose to replace this in the future, make absolutely
649 * sure that the proper waiting is being done in the async case,
650 * as there have been bugs in that area before.
651 */
652 async_pages = atomic_read(&fs_info->async_delalloc_pages);
653 if (!async_pages)
654 goto skip_async;
655
656 /*
657 * We don't want to wait forever, if we wrote less pages in this
658 * loop than we have outstanding, only wait for that number of
659 * pages, otherwise we can wait for all async pages to finish
660 * before continuing.
661 */
662 if (async_pages > nr_pages)
663 async_pages -= nr_pages;
664 else
665 async_pages = 0;
666 wait_event(fs_info->async_submit_wait,
667 atomic_read(&fs_info->async_delalloc_pages) <=
668 async_pages);
669 skip_async:
670 loops++;
671 if (wait_ordered && !trans) {
672 btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1);
673 } else {
674 time_left = schedule_timeout_killable(1);
675 if (time_left)
676 break;
677 }
678
679 /*
680 * If we are for preemption we just want a one-shot of delalloc
681 * flushing so we can stop flushing if we decide we don't need
682 * to anymore.
683 */
684 if (for_preempt)
685 break;
686
687 spin_lock(&space_info->lock);
688 if (list_empty(&space_info->tickets) &&
689 list_empty(&space_info->priority_tickets)) {
690 spin_unlock(&space_info->lock);
691 break;
692 }
693 spin_unlock(&space_info->lock);
694
695 delalloc_bytes = percpu_counter_sum_positive(
696 &fs_info->delalloc_bytes);
697 ordered_bytes = percpu_counter_sum_positive(
698 &fs_info->ordered_bytes);
699 }
700 }
701
702 /*
703 * Try to flush some data based on policy set by @state. This is only advisory
704 * and may fail for various reasons. The caller is supposed to examine the
705 * state of @space_info to detect the outcome.
706 */
flush_space(struct btrfs_fs_info * fs_info,struct btrfs_space_info * space_info,u64 num_bytes,enum btrfs_flush_state state,bool for_preempt)707 static void flush_space(struct btrfs_fs_info *fs_info,
708 struct btrfs_space_info *space_info, u64 num_bytes,
709 enum btrfs_flush_state state, bool for_preempt)
710 {
711 struct btrfs_root *root = fs_info->tree_root;
712 struct btrfs_trans_handle *trans;
713 int nr;
714 int ret = 0;
715
716 switch (state) {
717 case FLUSH_DELAYED_ITEMS_NR:
718 case FLUSH_DELAYED_ITEMS:
719 if (state == FLUSH_DELAYED_ITEMS_NR)
720 nr = calc_reclaim_items_nr(fs_info, num_bytes) * 2;
721 else
722 nr = -1;
723
724 trans = btrfs_join_transaction_nostart(root);
725 if (IS_ERR(trans)) {
726 ret = PTR_ERR(trans);
727 if (ret == -ENOENT)
728 ret = 0;
729 break;
730 }
731 ret = btrfs_run_delayed_items_nr(trans, nr);
732 btrfs_end_transaction(trans);
733 break;
734 case FLUSH_DELALLOC:
735 case FLUSH_DELALLOC_WAIT:
736 case FLUSH_DELALLOC_FULL:
737 if (state == FLUSH_DELALLOC_FULL)
738 num_bytes = U64_MAX;
739 shrink_delalloc(fs_info, space_info, num_bytes,
740 state != FLUSH_DELALLOC, for_preempt);
741 break;
742 case FLUSH_DELAYED_REFS_NR:
743 case FLUSH_DELAYED_REFS:
744 trans = btrfs_join_transaction_nostart(root);
745 if (IS_ERR(trans)) {
746 ret = PTR_ERR(trans);
747 if (ret == -ENOENT)
748 ret = 0;
749 break;
750 }
751 if (state == FLUSH_DELAYED_REFS_NR)
752 nr = calc_delayed_refs_nr(fs_info, num_bytes);
753 else
754 nr = 0;
755 btrfs_run_delayed_refs(trans, nr);
756 btrfs_end_transaction(trans);
757 break;
758 case ALLOC_CHUNK:
759 case ALLOC_CHUNK_FORCE:
760 trans = btrfs_join_transaction(root);
761 if (IS_ERR(trans)) {
762 ret = PTR_ERR(trans);
763 break;
764 }
765 ret = btrfs_chunk_alloc(trans,
766 btrfs_get_alloc_profile(fs_info, space_info->flags),
767 (state == ALLOC_CHUNK) ? CHUNK_ALLOC_NO_FORCE :
768 CHUNK_ALLOC_FORCE);
769 btrfs_end_transaction(trans);
770
771 if (ret > 0 || ret == -ENOSPC)
772 ret = 0;
773 break;
774 case RUN_DELAYED_IPUTS:
775 /*
776 * If we have pending delayed iputs then we could free up a
777 * bunch of pinned space, so make sure we run the iputs before
778 * we do our pinned bytes check below.
779 */
780 btrfs_run_delayed_iputs(fs_info);
781 btrfs_wait_on_delayed_iputs(fs_info);
782 break;
783 case COMMIT_TRANS:
784 ASSERT(current->journal_info == NULL);
785 /*
786 * We don't want to start a new transaction, just attach to the
787 * current one or wait it fully commits in case its commit is
788 * happening at the moment. Note: we don't use a nostart join
789 * because that does not wait for a transaction to fully commit
790 * (only for it to be unblocked, state TRANS_STATE_UNBLOCKED).
791 */
792 trans = btrfs_attach_transaction_barrier(root);
793 if (IS_ERR(trans)) {
794 ret = PTR_ERR(trans);
795 if (ret == -ENOENT)
796 ret = 0;
797 break;
798 }
799 ret = btrfs_commit_transaction(trans);
800 break;
801 default:
802 ret = -ENOSPC;
803 break;
804 }
805
806 trace_btrfs_flush_space(fs_info, space_info->flags, num_bytes, state,
807 ret, for_preempt);
808 return;
809 }
810
811 static inline u64
btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info * fs_info,struct btrfs_space_info * space_info)812 btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info,
813 struct btrfs_space_info *space_info)
814 {
815 u64 used;
816 u64 avail;
817 u64 to_reclaim = space_info->reclaim_size;
818
819 lockdep_assert_held(&space_info->lock);
820
821 avail = calc_available_free_space(fs_info, space_info,
822 BTRFS_RESERVE_FLUSH_ALL);
823 used = btrfs_space_info_used(space_info, true);
824
825 /*
826 * We may be flushing because suddenly we have less space than we had
827 * before, and now we're well over-committed based on our current free
828 * space. If that's the case add in our overage so we make sure to put
829 * appropriate pressure on the flushing state machine.
830 */
831 if (space_info->total_bytes + avail < used)
832 to_reclaim += used - (space_info->total_bytes + avail);
833
834 return to_reclaim;
835 }
836
need_preemptive_reclaim(struct btrfs_fs_info * fs_info,struct btrfs_space_info * space_info)837 static bool need_preemptive_reclaim(struct btrfs_fs_info *fs_info,
838 struct btrfs_space_info *space_info)
839 {
840 u64 global_rsv_size = fs_info->global_block_rsv.reserved;
841 u64 ordered, delalloc;
842 u64 thresh;
843 u64 used;
844
845 thresh = mult_perc(space_info->total_bytes, 90);
846
847 lockdep_assert_held(&space_info->lock);
848
849 /* If we're just plain full then async reclaim just slows us down. */
850 if ((space_info->bytes_used + space_info->bytes_reserved +
851 global_rsv_size) >= thresh)
852 return false;
853
854 used = space_info->bytes_may_use + space_info->bytes_pinned;
855
856 /* The total flushable belongs to the global rsv, don't flush. */
857 if (global_rsv_size >= used)
858 return false;
859
860 /*
861 * 128MiB is 1/4 of the maximum global rsv size. If we have less than
862 * that devoted to other reservations then there's no sense in flushing,
863 * we don't have a lot of things that need flushing.
864 */
865 if (used - global_rsv_size <= SZ_128M)
866 return false;
867
868 /*
869 * We have tickets queued, bail so we don't compete with the async
870 * flushers.
871 */
872 if (space_info->reclaim_size)
873 return false;
874
875 /*
876 * If we have over half of the free space occupied by reservations or
877 * pinned then we want to start flushing.
878 *
879 * We do not do the traditional thing here, which is to say
880 *
881 * if (used >= ((total_bytes + avail) / 2))
882 * return 1;
883 *
884 * because this doesn't quite work how we want. If we had more than 50%
885 * of the space_info used by bytes_used and we had 0 available we'd just
886 * constantly run the background flusher. Instead we want it to kick in
887 * if our reclaimable space exceeds our clamped free space.
888 *
889 * Our clamping range is 2^1 -> 2^8. Practically speaking that means
890 * the following:
891 *
892 * Amount of RAM Minimum threshold Maximum threshold
893 *
894 * 256GiB 1GiB 128GiB
895 * 128GiB 512MiB 64GiB
896 * 64GiB 256MiB 32GiB
897 * 32GiB 128MiB 16GiB
898 * 16GiB 64MiB 8GiB
899 *
900 * These are the range our thresholds will fall in, corresponding to how
901 * much delalloc we need for the background flusher to kick in.
902 */
903
904 thresh = calc_available_free_space(fs_info, space_info,
905 BTRFS_RESERVE_FLUSH_ALL);
906 used = space_info->bytes_used + space_info->bytes_reserved +
907 space_info->bytes_readonly + global_rsv_size;
908 if (used < space_info->total_bytes)
909 thresh += space_info->total_bytes - used;
910 thresh >>= space_info->clamp;
911
912 used = space_info->bytes_pinned;
913
914 /*
915 * If we have more ordered bytes than delalloc bytes then we're either
916 * doing a lot of DIO, or we simply don't have a lot of delalloc waiting
917 * around. Preemptive flushing is only useful in that it can free up
918 * space before tickets need to wait for things to finish. In the case
919 * of ordered extents, preemptively waiting on ordered extents gets us
920 * nothing, if our reservations are tied up in ordered extents we'll
921 * simply have to slow down writers by forcing them to wait on ordered
922 * extents.
923 *
924 * In the case that ordered is larger than delalloc, only include the
925 * block reserves that we would actually be able to directly reclaim
926 * from. In this case if we're heavy on metadata operations this will
927 * clearly be heavy enough to warrant preemptive flushing. In the case
928 * of heavy DIO or ordered reservations, preemptive flushing will just
929 * waste time and cause us to slow down.
930 *
931 * We want to make sure we truly are maxed out on ordered however, so
932 * cut ordered in half, and if it's still higher than delalloc then we
933 * can keep flushing. This is to avoid the case where we start
934 * flushing, and now delalloc == ordered and we stop preemptively
935 * flushing when we could still have several gigs of delalloc to flush.
936 */
937 ordered = percpu_counter_read_positive(&fs_info->ordered_bytes) >> 1;
938 delalloc = percpu_counter_read_positive(&fs_info->delalloc_bytes);
939 if (ordered >= delalloc)
940 used += fs_info->delayed_refs_rsv.reserved +
941 fs_info->delayed_block_rsv.reserved;
942 else
943 used += space_info->bytes_may_use - global_rsv_size;
944
945 return (used >= thresh && !btrfs_fs_closing(fs_info) &&
946 !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state));
947 }
948
steal_from_global_rsv(struct btrfs_fs_info * fs_info,struct btrfs_space_info * space_info,struct reserve_ticket * ticket)949 static bool steal_from_global_rsv(struct btrfs_fs_info *fs_info,
950 struct btrfs_space_info *space_info,
951 struct reserve_ticket *ticket)
952 {
953 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
954 u64 min_bytes;
955
956 if (!ticket->steal)
957 return false;
958
959 if (global_rsv->space_info != space_info)
960 return false;
961
962 spin_lock(&global_rsv->lock);
963 min_bytes = mult_perc(global_rsv->size, 10);
964 if (global_rsv->reserved < min_bytes + ticket->bytes) {
965 spin_unlock(&global_rsv->lock);
966 return false;
967 }
968 global_rsv->reserved -= ticket->bytes;
969 remove_ticket(space_info, ticket);
970 ticket->bytes = 0;
971 wake_up(&ticket->wait);
972 space_info->tickets_id++;
973 if (global_rsv->reserved < global_rsv->size)
974 global_rsv->full = 0;
975 spin_unlock(&global_rsv->lock);
976
977 return true;
978 }
979
980 /*
981 * maybe_fail_all_tickets - we've exhausted our flushing, start failing tickets
982 * @fs_info - fs_info for this fs
983 * @space_info - the space info we were flushing
984 *
985 * We call this when we've exhausted our flushing ability and haven't made
986 * progress in satisfying tickets. The reservation code handles tickets in
987 * order, so if there is a large ticket first and then smaller ones we could
988 * very well satisfy the smaller tickets. This will attempt to wake up any
989 * tickets in the list to catch this case.
990 *
991 * This function returns true if it was able to make progress by clearing out
992 * other tickets, or if it stumbles across a ticket that was smaller than the
993 * first ticket.
994 */
maybe_fail_all_tickets(struct btrfs_fs_info * fs_info,struct btrfs_space_info * space_info)995 static bool maybe_fail_all_tickets(struct btrfs_fs_info *fs_info,
996 struct btrfs_space_info *space_info)
997 {
998 struct reserve_ticket *ticket;
999 u64 tickets_id = space_info->tickets_id;
1000 const bool aborted = BTRFS_FS_ERROR(fs_info);
1001
1002 trace_btrfs_fail_all_tickets(fs_info, space_info);
1003
1004 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
1005 btrfs_info(fs_info, "cannot satisfy tickets, dumping space info");
1006 __btrfs_dump_space_info(fs_info, space_info);
1007 }
1008
1009 while (!list_empty(&space_info->tickets) &&
1010 tickets_id == space_info->tickets_id) {
1011 ticket = list_first_entry(&space_info->tickets,
1012 struct reserve_ticket, list);
1013
1014 if (!aborted && steal_from_global_rsv(fs_info, space_info, ticket))
1015 return true;
1016
1017 if (!aborted && btrfs_test_opt(fs_info, ENOSPC_DEBUG))
1018 btrfs_info(fs_info, "failing ticket with %llu bytes",
1019 ticket->bytes);
1020
1021 remove_ticket(space_info, ticket);
1022 if (aborted)
1023 ticket->error = -EIO;
1024 else
1025 ticket->error = -ENOSPC;
1026 wake_up(&ticket->wait);
1027
1028 /*
1029 * We're just throwing tickets away, so more flushing may not
1030 * trip over btrfs_try_granting_tickets, so we need to call it
1031 * here to see if we can make progress with the next ticket in
1032 * the list.
1033 */
1034 if (!aborted)
1035 btrfs_try_granting_tickets(fs_info, space_info);
1036 }
1037 return (tickets_id != space_info->tickets_id);
1038 }
1039
1040 /*
1041 * This is for normal flushers, we can wait all goddamned day if we want to. We
1042 * will loop and continuously try to flush as long as we are making progress.
1043 * We count progress as clearing off tickets each time we have to loop.
1044 */
btrfs_async_reclaim_metadata_space(struct work_struct * work)1045 static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
1046 {
1047 struct btrfs_fs_info *fs_info;
1048 struct btrfs_space_info *space_info;
1049 u64 to_reclaim;
1050 enum btrfs_flush_state flush_state;
1051 int commit_cycles = 0;
1052 u64 last_tickets_id;
1053
1054 fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work);
1055 space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
1056
1057 spin_lock(&space_info->lock);
1058 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info);
1059 if (!to_reclaim) {
1060 space_info->flush = 0;
1061 spin_unlock(&space_info->lock);
1062 return;
1063 }
1064 last_tickets_id = space_info->tickets_id;
1065 spin_unlock(&space_info->lock);
1066
1067 flush_state = FLUSH_DELAYED_ITEMS_NR;
1068 do {
1069 flush_space(fs_info, space_info, to_reclaim, flush_state, false);
1070 spin_lock(&space_info->lock);
1071 if (list_empty(&space_info->tickets)) {
1072 space_info->flush = 0;
1073 spin_unlock(&space_info->lock);
1074 return;
1075 }
1076 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info,
1077 space_info);
1078 if (last_tickets_id == space_info->tickets_id) {
1079 flush_state++;
1080 } else {
1081 last_tickets_id = space_info->tickets_id;
1082 flush_state = FLUSH_DELAYED_ITEMS_NR;
1083 if (commit_cycles)
1084 commit_cycles--;
1085 }
1086
1087 /*
1088 * We do not want to empty the system of delalloc unless we're
1089 * under heavy pressure, so allow one trip through the flushing
1090 * logic before we start doing a FLUSH_DELALLOC_FULL.
1091 */
1092 if (flush_state == FLUSH_DELALLOC_FULL && !commit_cycles)
1093 flush_state++;
1094
1095 /*
1096 * We don't want to force a chunk allocation until we've tried
1097 * pretty hard to reclaim space. Think of the case where we
1098 * freed up a bunch of space and so have a lot of pinned space
1099 * to reclaim. We would rather use that than possibly create a
1100 * underutilized metadata chunk. So if this is our first run
1101 * through the flushing state machine skip ALLOC_CHUNK_FORCE and
1102 * commit the transaction. If nothing has changed the next go
1103 * around then we can force a chunk allocation.
1104 */
1105 if (flush_state == ALLOC_CHUNK_FORCE && !commit_cycles)
1106 flush_state++;
1107
1108 if (flush_state > COMMIT_TRANS) {
1109 commit_cycles++;
1110 if (commit_cycles > 2) {
1111 if (maybe_fail_all_tickets(fs_info, space_info)) {
1112 flush_state = FLUSH_DELAYED_ITEMS_NR;
1113 commit_cycles--;
1114 } else {
1115 space_info->flush = 0;
1116 }
1117 } else {
1118 flush_state = FLUSH_DELAYED_ITEMS_NR;
1119 }
1120 }
1121 spin_unlock(&space_info->lock);
1122 } while (flush_state <= COMMIT_TRANS);
1123 }
1124
1125 /*
1126 * This handles pre-flushing of metadata space before we get to the point that
1127 * we need to start blocking threads on tickets. The logic here is different
1128 * from the other flush paths because it doesn't rely on tickets to tell us how
1129 * much we need to flush, instead it attempts to keep us below the 80% full
1130 * watermark of space by flushing whichever reservation pool is currently the
1131 * largest.
1132 */
btrfs_preempt_reclaim_metadata_space(struct work_struct * work)1133 static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work)
1134 {
1135 struct btrfs_fs_info *fs_info;
1136 struct btrfs_space_info *space_info;
1137 struct btrfs_block_rsv *delayed_block_rsv;
1138 struct btrfs_block_rsv *delayed_refs_rsv;
1139 struct btrfs_block_rsv *global_rsv;
1140 struct btrfs_block_rsv *trans_rsv;
1141 int loops = 0;
1142
1143 fs_info = container_of(work, struct btrfs_fs_info,
1144 preempt_reclaim_work);
1145 space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
1146 delayed_block_rsv = &fs_info->delayed_block_rsv;
1147 delayed_refs_rsv = &fs_info->delayed_refs_rsv;
1148 global_rsv = &fs_info->global_block_rsv;
1149 trans_rsv = &fs_info->trans_block_rsv;
1150
1151 spin_lock(&space_info->lock);
1152 while (need_preemptive_reclaim(fs_info, space_info)) {
1153 enum btrfs_flush_state flush;
1154 u64 delalloc_size = 0;
1155 u64 to_reclaim, block_rsv_size;
1156 u64 global_rsv_size = global_rsv->reserved;
1157
1158 loops++;
1159
1160 /*
1161 * We don't have a precise counter for the metadata being
1162 * reserved for delalloc, so we'll approximate it by subtracting
1163 * out the block rsv's space from the bytes_may_use. If that
1164 * amount is higher than the individual reserves, then we can
1165 * assume it's tied up in delalloc reservations.
1166 */
1167 block_rsv_size = global_rsv_size +
1168 delayed_block_rsv->reserved +
1169 delayed_refs_rsv->reserved +
1170 trans_rsv->reserved;
1171 if (block_rsv_size < space_info->bytes_may_use)
1172 delalloc_size = space_info->bytes_may_use - block_rsv_size;
1173
1174 /*
1175 * We don't want to include the global_rsv in our calculation,
1176 * because that's space we can't touch. Subtract it from the
1177 * block_rsv_size for the next checks.
1178 */
1179 block_rsv_size -= global_rsv_size;
1180
1181 /*
1182 * We really want to avoid flushing delalloc too much, as it
1183 * could result in poor allocation patterns, so only flush it if
1184 * it's larger than the rest of the pools combined.
1185 */
1186 if (delalloc_size > block_rsv_size) {
1187 to_reclaim = delalloc_size;
1188 flush = FLUSH_DELALLOC;
1189 } else if (space_info->bytes_pinned >
1190 (delayed_block_rsv->reserved +
1191 delayed_refs_rsv->reserved)) {
1192 to_reclaim = space_info->bytes_pinned;
1193 flush = COMMIT_TRANS;
1194 } else if (delayed_block_rsv->reserved >
1195 delayed_refs_rsv->reserved) {
1196 to_reclaim = delayed_block_rsv->reserved;
1197 flush = FLUSH_DELAYED_ITEMS_NR;
1198 } else {
1199 to_reclaim = delayed_refs_rsv->reserved;
1200 flush = FLUSH_DELAYED_REFS_NR;
1201 }
1202
1203 spin_unlock(&space_info->lock);
1204
1205 /*
1206 * We don't want to reclaim everything, just a portion, so scale
1207 * down the to_reclaim by 1/4. If it takes us down to 0,
1208 * reclaim 1 items worth.
1209 */
1210 to_reclaim >>= 2;
1211 if (!to_reclaim)
1212 to_reclaim = btrfs_calc_insert_metadata_size(fs_info, 1);
1213 flush_space(fs_info, space_info, to_reclaim, flush, true);
1214 cond_resched();
1215 spin_lock(&space_info->lock);
1216 }
1217
1218 /* We only went through once, back off our clamping. */
1219 if (loops == 1 && !space_info->reclaim_size)
1220 space_info->clamp = max(1, space_info->clamp - 1);
1221 trace_btrfs_done_preemptive_reclaim(fs_info, space_info);
1222 spin_unlock(&space_info->lock);
1223 }
1224
1225 /*
1226 * FLUSH_DELALLOC_WAIT:
1227 * Space is freed from flushing delalloc in one of two ways.
1228 *
1229 * 1) compression is on and we allocate less space than we reserved
1230 * 2) we are overwriting existing space
1231 *
1232 * For #1 that extra space is reclaimed as soon as the delalloc pages are
1233 * COWed, by way of btrfs_add_reserved_bytes() which adds the actual extent
1234 * length to ->bytes_reserved, and subtracts the reserved space from
1235 * ->bytes_may_use.
1236 *
1237 * For #2 this is trickier. Once the ordered extent runs we will drop the
1238 * extent in the range we are overwriting, which creates a delayed ref for
1239 * that freed extent. This however is not reclaimed until the transaction
1240 * commits, thus the next stages.
1241 *
1242 * RUN_DELAYED_IPUTS
1243 * If we are freeing inodes, we want to make sure all delayed iputs have
1244 * completed, because they could have been on an inode with i_nlink == 0, and
1245 * thus have been truncated and freed up space. But again this space is not
1246 * immediately re-usable, it comes in the form of a delayed ref, which must be
1247 * run and then the transaction must be committed.
1248 *
1249 * COMMIT_TRANS
1250 * This is where we reclaim all of the pinned space generated by running the
1251 * iputs
1252 *
1253 * ALLOC_CHUNK_FORCE
1254 * For data we start with alloc chunk force, however we could have been full
1255 * before, and then the transaction commit could have freed new block groups,
1256 * so if we now have space to allocate do the force chunk allocation.
1257 */
1258 static const enum btrfs_flush_state data_flush_states[] = {
1259 FLUSH_DELALLOC_FULL,
1260 RUN_DELAYED_IPUTS,
1261 COMMIT_TRANS,
1262 ALLOC_CHUNK_FORCE,
1263 };
1264
btrfs_async_reclaim_data_space(struct work_struct * work)1265 static void btrfs_async_reclaim_data_space(struct work_struct *work)
1266 {
1267 struct btrfs_fs_info *fs_info;
1268 struct btrfs_space_info *space_info;
1269 u64 last_tickets_id;
1270 enum btrfs_flush_state flush_state = 0;
1271
1272 fs_info = container_of(work, struct btrfs_fs_info, async_data_reclaim_work);
1273 space_info = fs_info->data_sinfo;
1274
1275 spin_lock(&space_info->lock);
1276 if (list_empty(&space_info->tickets)) {
1277 space_info->flush = 0;
1278 spin_unlock(&space_info->lock);
1279 return;
1280 }
1281 last_tickets_id = space_info->tickets_id;
1282 spin_unlock(&space_info->lock);
1283
1284 while (!space_info->full) {
1285 flush_space(fs_info, space_info, U64_MAX, ALLOC_CHUNK_FORCE, false);
1286 spin_lock(&space_info->lock);
1287 if (list_empty(&space_info->tickets)) {
1288 space_info->flush = 0;
1289 spin_unlock(&space_info->lock);
1290 return;
1291 }
1292
1293 /* Something happened, fail everything and bail. */
1294 if (BTRFS_FS_ERROR(fs_info))
1295 goto aborted_fs;
1296 last_tickets_id = space_info->tickets_id;
1297 spin_unlock(&space_info->lock);
1298 }
1299
1300 while (flush_state < ARRAY_SIZE(data_flush_states)) {
1301 flush_space(fs_info, space_info, U64_MAX,
1302 data_flush_states[flush_state], false);
1303 spin_lock(&space_info->lock);
1304 if (list_empty(&space_info->tickets)) {
1305 space_info->flush = 0;
1306 spin_unlock(&space_info->lock);
1307 return;
1308 }
1309
1310 if (last_tickets_id == space_info->tickets_id) {
1311 flush_state++;
1312 } else {
1313 last_tickets_id = space_info->tickets_id;
1314 flush_state = 0;
1315 }
1316
1317 if (flush_state >= ARRAY_SIZE(data_flush_states)) {
1318 if (space_info->full) {
1319 if (maybe_fail_all_tickets(fs_info, space_info))
1320 flush_state = 0;
1321 else
1322 space_info->flush = 0;
1323 } else {
1324 flush_state = 0;
1325 }
1326
1327 /* Something happened, fail everything and bail. */
1328 if (BTRFS_FS_ERROR(fs_info))
1329 goto aborted_fs;
1330
1331 }
1332 spin_unlock(&space_info->lock);
1333 }
1334 return;
1335
1336 aborted_fs:
1337 maybe_fail_all_tickets(fs_info, space_info);
1338 space_info->flush = 0;
1339 spin_unlock(&space_info->lock);
1340 }
1341
btrfs_init_async_reclaim_work(struct btrfs_fs_info * fs_info)1342 void btrfs_init_async_reclaim_work(struct btrfs_fs_info *fs_info)
1343 {
1344 INIT_WORK(&fs_info->async_reclaim_work, btrfs_async_reclaim_metadata_space);
1345 INIT_WORK(&fs_info->async_data_reclaim_work, btrfs_async_reclaim_data_space);
1346 INIT_WORK(&fs_info->preempt_reclaim_work,
1347 btrfs_preempt_reclaim_metadata_space);
1348 }
1349
1350 static const enum btrfs_flush_state priority_flush_states[] = {
1351 FLUSH_DELAYED_ITEMS_NR,
1352 FLUSH_DELAYED_ITEMS,
1353 ALLOC_CHUNK,
1354 };
1355
1356 static const enum btrfs_flush_state evict_flush_states[] = {
1357 FLUSH_DELAYED_ITEMS_NR,
1358 FLUSH_DELAYED_ITEMS,
1359 FLUSH_DELAYED_REFS_NR,
1360 FLUSH_DELAYED_REFS,
1361 FLUSH_DELALLOC,
1362 FLUSH_DELALLOC_WAIT,
1363 FLUSH_DELALLOC_FULL,
1364 ALLOC_CHUNK,
1365 COMMIT_TRANS,
1366 };
1367
priority_reclaim_metadata_space(struct btrfs_fs_info * fs_info,struct btrfs_space_info * space_info,struct reserve_ticket * ticket,const enum btrfs_flush_state * states,int states_nr)1368 static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info,
1369 struct btrfs_space_info *space_info,
1370 struct reserve_ticket *ticket,
1371 const enum btrfs_flush_state *states,
1372 int states_nr)
1373 {
1374 u64 to_reclaim;
1375 int flush_state = 0;
1376
1377 spin_lock(&space_info->lock);
1378 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info);
1379 /*
1380 * This is the priority reclaim path, so to_reclaim could be >0 still
1381 * because we may have only satisfied the priority tickets and still
1382 * left non priority tickets on the list. We would then have
1383 * to_reclaim but ->bytes == 0.
1384 */
1385 if (ticket->bytes == 0) {
1386 spin_unlock(&space_info->lock);
1387 return;
1388 }
1389
1390 while (flush_state < states_nr) {
1391 spin_unlock(&space_info->lock);
1392 flush_space(fs_info, space_info, to_reclaim, states[flush_state],
1393 false);
1394 flush_state++;
1395 spin_lock(&space_info->lock);
1396 if (ticket->bytes == 0) {
1397 spin_unlock(&space_info->lock);
1398 return;
1399 }
1400 }
1401
1402 /*
1403 * Attempt to steal from the global rsv if we can, except if the fs was
1404 * turned into error mode due to a transaction abort when flushing space
1405 * above, in that case fail with the abort error instead of returning
1406 * success to the caller if we can steal from the global rsv - this is
1407 * just to have caller fail immeditelly instead of later when trying to
1408 * modify the fs, making it easier to debug -ENOSPC problems.
1409 */
1410 if (BTRFS_FS_ERROR(fs_info)) {
1411 ticket->error = BTRFS_FS_ERROR(fs_info);
1412 remove_ticket(space_info, ticket);
1413 } else if (!steal_from_global_rsv(fs_info, space_info, ticket)) {
1414 ticket->error = -ENOSPC;
1415 remove_ticket(space_info, ticket);
1416 }
1417
1418 /*
1419 * We must run try_granting_tickets here because we could be a large
1420 * ticket in front of a smaller ticket that can now be satisfied with
1421 * the available space.
1422 */
1423 btrfs_try_granting_tickets(fs_info, space_info);
1424 spin_unlock(&space_info->lock);
1425 }
1426
priority_reclaim_data_space(struct btrfs_fs_info * fs_info,struct btrfs_space_info * space_info,struct reserve_ticket * ticket)1427 static void priority_reclaim_data_space(struct btrfs_fs_info *fs_info,
1428 struct btrfs_space_info *space_info,
1429 struct reserve_ticket *ticket)
1430 {
1431 spin_lock(&space_info->lock);
1432
1433 /* We could have been granted before we got here. */
1434 if (ticket->bytes == 0) {
1435 spin_unlock(&space_info->lock);
1436 return;
1437 }
1438
1439 while (!space_info->full) {
1440 spin_unlock(&space_info->lock);
1441 flush_space(fs_info, space_info, U64_MAX, ALLOC_CHUNK_FORCE, false);
1442 spin_lock(&space_info->lock);
1443 if (ticket->bytes == 0) {
1444 spin_unlock(&space_info->lock);
1445 return;
1446 }
1447 }
1448
1449 ticket->error = -ENOSPC;
1450 remove_ticket(space_info, ticket);
1451 btrfs_try_granting_tickets(fs_info, space_info);
1452 spin_unlock(&space_info->lock);
1453 }
1454
wait_reserve_ticket(struct btrfs_fs_info * fs_info,struct btrfs_space_info * space_info,struct reserve_ticket * ticket)1455 static void wait_reserve_ticket(struct btrfs_fs_info *fs_info,
1456 struct btrfs_space_info *space_info,
1457 struct reserve_ticket *ticket)
1458
1459 {
1460 DEFINE_WAIT(wait);
1461 int ret = 0;
1462
1463 spin_lock(&space_info->lock);
1464 while (ticket->bytes > 0 && ticket->error == 0) {
1465 ret = prepare_to_wait_event(&ticket->wait, &wait, TASK_KILLABLE);
1466 if (ret) {
1467 /*
1468 * Delete us from the list. After we unlock the space
1469 * info, we don't want the async reclaim job to reserve
1470 * space for this ticket. If that would happen, then the
1471 * ticket's task would not known that space was reserved
1472 * despite getting an error, resulting in a space leak
1473 * (bytes_may_use counter of our space_info).
1474 */
1475 remove_ticket(space_info, ticket);
1476 ticket->error = -EINTR;
1477 break;
1478 }
1479 spin_unlock(&space_info->lock);
1480
1481 schedule();
1482
1483 finish_wait(&ticket->wait, &wait);
1484 spin_lock(&space_info->lock);
1485 }
1486 spin_unlock(&space_info->lock);
1487 }
1488
1489 /*
1490 * Do the appropriate flushing and waiting for a ticket.
1491 *
1492 * @fs_info: the filesystem
1493 * @space_info: space info for the reservation
1494 * @ticket: ticket for the reservation
1495 * @start_ns: timestamp when the reservation started
1496 * @orig_bytes: amount of bytes originally reserved
1497 * @flush: how much we can flush
1498 *
1499 * This does the work of figuring out how to flush for the ticket, waiting for
1500 * the reservation, and returning the appropriate error if there is one.
1501 */
handle_reserve_ticket(struct btrfs_fs_info * fs_info,struct btrfs_space_info * space_info,struct reserve_ticket * ticket,u64 start_ns,u64 orig_bytes,enum btrfs_reserve_flush_enum flush)1502 static int handle_reserve_ticket(struct btrfs_fs_info *fs_info,
1503 struct btrfs_space_info *space_info,
1504 struct reserve_ticket *ticket,
1505 u64 start_ns, u64 orig_bytes,
1506 enum btrfs_reserve_flush_enum flush)
1507 {
1508 int ret;
1509
1510 switch (flush) {
1511 case BTRFS_RESERVE_FLUSH_DATA:
1512 case BTRFS_RESERVE_FLUSH_ALL:
1513 case BTRFS_RESERVE_FLUSH_ALL_STEAL:
1514 wait_reserve_ticket(fs_info, space_info, ticket);
1515 break;
1516 case BTRFS_RESERVE_FLUSH_LIMIT:
1517 priority_reclaim_metadata_space(fs_info, space_info, ticket,
1518 priority_flush_states,
1519 ARRAY_SIZE(priority_flush_states));
1520 break;
1521 case BTRFS_RESERVE_FLUSH_EVICT:
1522 priority_reclaim_metadata_space(fs_info, space_info, ticket,
1523 evict_flush_states,
1524 ARRAY_SIZE(evict_flush_states));
1525 break;
1526 case BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE:
1527 priority_reclaim_data_space(fs_info, space_info, ticket);
1528 break;
1529 default:
1530 ASSERT(0);
1531 break;
1532 }
1533
1534 ret = ticket->error;
1535 ASSERT(list_empty(&ticket->list));
1536 /*
1537 * Check that we can't have an error set if the reservation succeeded,
1538 * as that would confuse tasks and lead them to error out without
1539 * releasing reserved space (if an error happens the expectation is that
1540 * space wasn't reserved at all).
1541 */
1542 ASSERT(!(ticket->bytes == 0 && ticket->error));
1543 trace_btrfs_reserve_ticket(fs_info, space_info->flags, orig_bytes,
1544 start_ns, flush, ticket->error);
1545 return ret;
1546 }
1547
1548 /*
1549 * This returns true if this flush state will go through the ordinary flushing
1550 * code.
1551 */
is_normal_flushing(enum btrfs_reserve_flush_enum flush)1552 static inline bool is_normal_flushing(enum btrfs_reserve_flush_enum flush)
1553 {
1554 return (flush == BTRFS_RESERVE_FLUSH_ALL) ||
1555 (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL);
1556 }
1557
maybe_clamp_preempt(struct btrfs_fs_info * fs_info,struct btrfs_space_info * space_info)1558 static inline void maybe_clamp_preempt(struct btrfs_fs_info *fs_info,
1559 struct btrfs_space_info *space_info)
1560 {
1561 u64 ordered = percpu_counter_sum_positive(&fs_info->ordered_bytes);
1562 u64 delalloc = percpu_counter_sum_positive(&fs_info->delalloc_bytes);
1563
1564 /*
1565 * If we're heavy on ordered operations then clamping won't help us. We
1566 * need to clamp specifically to keep up with dirty'ing buffered
1567 * writers, because there's not a 1:1 correlation of writing delalloc
1568 * and freeing space, like there is with flushing delayed refs or
1569 * delayed nodes. If we're already more ordered than delalloc then
1570 * we're keeping up, otherwise we aren't and should probably clamp.
1571 */
1572 if (ordered < delalloc)
1573 space_info->clamp = min(space_info->clamp + 1, 8);
1574 }
1575
can_steal(enum btrfs_reserve_flush_enum flush)1576 static inline bool can_steal(enum btrfs_reserve_flush_enum flush)
1577 {
1578 return (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL ||
1579 flush == BTRFS_RESERVE_FLUSH_EVICT);
1580 }
1581
1582 /*
1583 * NO_FLUSH and FLUSH_EMERGENCY don't want to create a ticket, they just want to
1584 * fail as quickly as possible.
1585 */
can_ticket(enum btrfs_reserve_flush_enum flush)1586 static inline bool can_ticket(enum btrfs_reserve_flush_enum flush)
1587 {
1588 return (flush != BTRFS_RESERVE_NO_FLUSH &&
1589 flush != BTRFS_RESERVE_FLUSH_EMERGENCY);
1590 }
1591
1592 /*
1593 * Try to reserve bytes from the block_rsv's space.
1594 *
1595 * @fs_info: the filesystem
1596 * @space_info: space info we want to allocate from
1597 * @orig_bytes: number of bytes we want
1598 * @flush: whether or not we can flush to make our reservation
1599 *
1600 * This will reserve orig_bytes number of bytes from the space info associated
1601 * with the block_rsv. If there is not enough space it will make an attempt to
1602 * flush out space to make room. It will do this by flushing delalloc if
1603 * possible or committing the transaction. If flush is 0 then no attempts to
1604 * regain reservations will be made and this will fail if there is not enough
1605 * space already.
1606 */
__reserve_bytes(struct btrfs_fs_info * fs_info,struct btrfs_space_info * space_info,u64 orig_bytes,enum btrfs_reserve_flush_enum flush)1607 static int __reserve_bytes(struct btrfs_fs_info *fs_info,
1608 struct btrfs_space_info *space_info, u64 orig_bytes,
1609 enum btrfs_reserve_flush_enum flush)
1610 {
1611 struct work_struct *async_work;
1612 struct reserve_ticket ticket;
1613 u64 start_ns = 0;
1614 u64 used;
1615 int ret = -ENOSPC;
1616 bool pending_tickets;
1617
1618 ASSERT(orig_bytes);
1619 /*
1620 * If have a transaction handle (current->journal_info != NULL), then
1621 * the flush method can not be neither BTRFS_RESERVE_FLUSH_ALL* nor
1622 * BTRFS_RESERVE_FLUSH_EVICT, as we could deadlock because those
1623 * flushing methods can trigger transaction commits.
1624 */
1625 if (current->journal_info) {
1626 /* One assert per line for easier debugging. */
1627 ASSERT(flush != BTRFS_RESERVE_FLUSH_ALL);
1628 ASSERT(flush != BTRFS_RESERVE_FLUSH_ALL_STEAL);
1629 ASSERT(flush != BTRFS_RESERVE_FLUSH_EVICT);
1630 }
1631
1632 if (flush == BTRFS_RESERVE_FLUSH_DATA)
1633 async_work = &fs_info->async_data_reclaim_work;
1634 else
1635 async_work = &fs_info->async_reclaim_work;
1636
1637 spin_lock(&space_info->lock);
1638 used = btrfs_space_info_used(space_info, true);
1639
1640 /*
1641 * We don't want NO_FLUSH allocations to jump everybody, they can
1642 * generally handle ENOSPC in a different way, so treat them the same as
1643 * normal flushers when it comes to skipping pending tickets.
1644 */
1645 if (is_normal_flushing(flush) || (flush == BTRFS_RESERVE_NO_FLUSH))
1646 pending_tickets = !list_empty(&space_info->tickets) ||
1647 !list_empty(&space_info->priority_tickets);
1648 else
1649 pending_tickets = !list_empty(&space_info->priority_tickets);
1650
1651 /*
1652 * Carry on if we have enough space (short-circuit) OR call
1653 * can_overcommit() to ensure we can overcommit to continue.
1654 */
1655 if (!pending_tickets &&
1656 ((used + orig_bytes <= space_info->total_bytes) ||
1657 btrfs_can_overcommit(fs_info, space_info, orig_bytes, flush))) {
1658 btrfs_space_info_update_bytes_may_use(fs_info, space_info,
1659 orig_bytes);
1660 ret = 0;
1661 }
1662
1663 /*
1664 * Things are dire, we need to make a reservation so we don't abort. We
1665 * will let this reservation go through as long as we have actual space
1666 * left to allocate for the block.
1667 */
1668 if (ret && unlikely(flush == BTRFS_RESERVE_FLUSH_EMERGENCY)) {
1669 used = btrfs_space_info_used(space_info, false);
1670 if (used + orig_bytes <= space_info->total_bytes) {
1671 btrfs_space_info_update_bytes_may_use(fs_info, space_info,
1672 orig_bytes);
1673 ret = 0;
1674 }
1675 }
1676
1677 /*
1678 * If we couldn't make a reservation then setup our reservation ticket
1679 * and kick the async worker if it's not already running.
1680 *
1681 * If we are a priority flusher then we just need to add our ticket to
1682 * the list and we will do our own flushing further down.
1683 */
1684 if (ret && can_ticket(flush)) {
1685 ticket.bytes = orig_bytes;
1686 ticket.error = 0;
1687 space_info->reclaim_size += ticket.bytes;
1688 init_waitqueue_head(&ticket.wait);
1689 ticket.steal = can_steal(flush);
1690 if (trace_btrfs_reserve_ticket_enabled())
1691 start_ns = ktime_get_ns();
1692
1693 if (flush == BTRFS_RESERVE_FLUSH_ALL ||
1694 flush == BTRFS_RESERVE_FLUSH_ALL_STEAL ||
1695 flush == BTRFS_RESERVE_FLUSH_DATA) {
1696 list_add_tail(&ticket.list, &space_info->tickets);
1697 if (!space_info->flush) {
1698 /*
1699 * We were forced to add a reserve ticket, so
1700 * our preemptive flushing is unable to keep
1701 * up. Clamp down on the threshold for the
1702 * preemptive flushing in order to keep up with
1703 * the workload.
1704 */
1705 maybe_clamp_preempt(fs_info, space_info);
1706
1707 space_info->flush = 1;
1708 trace_btrfs_trigger_flush(fs_info,
1709 space_info->flags,
1710 orig_bytes, flush,
1711 "enospc");
1712 queue_work(system_unbound_wq, async_work);
1713 }
1714 } else {
1715 list_add_tail(&ticket.list,
1716 &space_info->priority_tickets);
1717 }
1718 } else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
1719 /*
1720 * We will do the space reservation dance during log replay,
1721 * which means we won't have fs_info->fs_root set, so don't do
1722 * the async reclaim as we will panic.
1723 */
1724 if (!test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags) &&
1725 !work_busy(&fs_info->preempt_reclaim_work) &&
1726 need_preemptive_reclaim(fs_info, space_info)) {
1727 trace_btrfs_trigger_flush(fs_info, space_info->flags,
1728 orig_bytes, flush, "preempt");
1729 queue_work(system_unbound_wq,
1730 &fs_info->preempt_reclaim_work);
1731 }
1732 }
1733 spin_unlock(&space_info->lock);
1734 if (!ret || !can_ticket(flush))
1735 return ret;
1736
1737 return handle_reserve_ticket(fs_info, space_info, &ticket, start_ns,
1738 orig_bytes, flush);
1739 }
1740
1741 /*
1742 * Try to reserve metadata bytes from the block_rsv's space.
1743 *
1744 * @fs_info: the filesystem
1745 * @block_rsv: block_rsv we're allocating for
1746 * @orig_bytes: number of bytes we want
1747 * @flush: whether or not we can flush to make our reservation
1748 *
1749 * This will reserve orig_bytes number of bytes from the space info associated
1750 * with the block_rsv. If there is not enough space it will make an attempt to
1751 * flush out space to make room. It will do this by flushing delalloc if
1752 * possible or committing the transaction. If flush is 0 then no attempts to
1753 * regain reservations will be made and this will fail if there is not enough
1754 * space already.
1755 */
btrfs_reserve_metadata_bytes(struct btrfs_fs_info * fs_info,struct btrfs_block_rsv * block_rsv,u64 orig_bytes,enum btrfs_reserve_flush_enum flush)1756 int btrfs_reserve_metadata_bytes(struct btrfs_fs_info *fs_info,
1757 struct btrfs_block_rsv *block_rsv,
1758 u64 orig_bytes,
1759 enum btrfs_reserve_flush_enum flush)
1760 {
1761 int ret;
1762
1763 ret = __reserve_bytes(fs_info, block_rsv->space_info, orig_bytes, flush);
1764 if (ret == -ENOSPC) {
1765 trace_btrfs_space_reservation(fs_info, "space_info:enospc",
1766 block_rsv->space_info->flags,
1767 orig_bytes, 1);
1768
1769 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG))
1770 btrfs_dump_space_info(fs_info, block_rsv->space_info,
1771 orig_bytes, 0);
1772 }
1773 return ret;
1774 }
1775
1776 /*
1777 * Try to reserve data bytes for an allocation.
1778 *
1779 * @fs_info: the filesystem
1780 * @bytes: number of bytes we need
1781 * @flush: how we are allowed to flush
1782 *
1783 * This will reserve bytes from the data space info. If there is not enough
1784 * space then we will attempt to flush space as specified by flush.
1785 */
btrfs_reserve_data_bytes(struct btrfs_fs_info * fs_info,u64 bytes,enum btrfs_reserve_flush_enum flush)1786 int btrfs_reserve_data_bytes(struct btrfs_fs_info *fs_info, u64 bytes,
1787 enum btrfs_reserve_flush_enum flush)
1788 {
1789 struct btrfs_space_info *data_sinfo = fs_info->data_sinfo;
1790 int ret;
1791
1792 ASSERT(flush == BTRFS_RESERVE_FLUSH_DATA ||
1793 flush == BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE ||
1794 flush == BTRFS_RESERVE_NO_FLUSH);
1795 ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_DATA);
1796
1797 ret = __reserve_bytes(fs_info, data_sinfo, bytes, flush);
1798 if (ret == -ENOSPC) {
1799 trace_btrfs_space_reservation(fs_info, "space_info:enospc",
1800 data_sinfo->flags, bytes, 1);
1801 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG))
1802 btrfs_dump_space_info(fs_info, data_sinfo, bytes, 0);
1803 }
1804 return ret;
1805 }
1806
1807 /* Dump all the space infos when we abort a transaction due to ENOSPC. */
btrfs_dump_space_info_for_trans_abort(struct btrfs_fs_info * fs_info)1808 __cold void btrfs_dump_space_info_for_trans_abort(struct btrfs_fs_info *fs_info)
1809 {
1810 struct btrfs_space_info *space_info;
1811
1812 btrfs_info(fs_info, "dumping space info:");
1813 list_for_each_entry(space_info, &fs_info->space_info, list) {
1814 spin_lock(&space_info->lock);
1815 __btrfs_dump_space_info(fs_info, space_info);
1816 spin_unlock(&space_info->lock);
1817 }
1818 dump_global_block_rsv(fs_info);
1819 }
1820
1821 /*
1822 * Account the unused space of all the readonly block group in the space_info.
1823 * takes mirrors into account.
1824 */
btrfs_account_ro_block_groups_free_space(struct btrfs_space_info * sinfo)1825 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
1826 {
1827 struct btrfs_block_group *block_group;
1828 u64 free_bytes = 0;
1829 int factor;
1830
1831 /* It's df, we don't care if it's racy */
1832 if (list_empty(&sinfo->ro_bgs))
1833 return 0;
1834
1835 spin_lock(&sinfo->lock);
1836 list_for_each_entry(block_group, &sinfo->ro_bgs, ro_list) {
1837 spin_lock(&block_group->lock);
1838
1839 if (!block_group->ro) {
1840 spin_unlock(&block_group->lock);
1841 continue;
1842 }
1843
1844 factor = btrfs_bg_type_to_factor(block_group->flags);
1845 free_bytes += (block_group->length -
1846 block_group->used) * factor;
1847
1848 spin_unlock(&block_group->lock);
1849 }
1850 spin_unlock(&sinfo->lock);
1851
1852 return free_bytes;
1853 }
1854