1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2008 Red Hat. All rights reserved.
4 */
5
6 #include <linux/pagemap.h>
7 #include <linux/sched.h>
8 #include <linux/sched/signal.h>
9 #include <linux/slab.h>
10 #include <linux/math64.h>
11 #include <linux/ratelimit.h>
12 #include <linux/error-injection.h>
13 #include <linux/sched/mm.h>
14 #include "misc.h"
15 #include "ctree.h"
16 #include "free-space-cache.h"
17 #include "transaction.h"
18 #include "disk-io.h"
19 #include "extent_io.h"
20 #include "volumes.h"
21 #include "space-info.h"
22 #include "delalloc-space.h"
23 #include "block-group.h"
24 #include "discard.h"
25 #include "subpage.h"
26 #include "inode-item.h"
27
28 #define BITS_PER_BITMAP (PAGE_SIZE * 8UL)
29 #define MAX_CACHE_BYTES_PER_GIG SZ_64K
30 #define FORCE_EXTENT_THRESHOLD SZ_1M
31
32 struct btrfs_trim_range {
33 u64 start;
34 u64 bytes;
35 struct list_head list;
36 };
37
38 static int link_free_space(struct btrfs_free_space_ctl *ctl,
39 struct btrfs_free_space *info);
40 static void unlink_free_space(struct btrfs_free_space_ctl *ctl,
41 struct btrfs_free_space *info, bool update_stat);
42 static int search_bitmap(struct btrfs_free_space_ctl *ctl,
43 struct btrfs_free_space *bitmap_info, u64 *offset,
44 u64 *bytes, bool for_alloc);
45 static void free_bitmap(struct btrfs_free_space_ctl *ctl,
46 struct btrfs_free_space *bitmap_info);
47 static void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
48 struct btrfs_free_space *info, u64 offset,
49 u64 bytes, bool update_stats);
50
__btrfs_remove_free_space_cache(struct btrfs_free_space_ctl * ctl)51 static void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl)
52 {
53 struct btrfs_free_space *info;
54 struct rb_node *node;
55
56 while ((node = rb_last(&ctl->free_space_offset)) != NULL) {
57 info = rb_entry(node, struct btrfs_free_space, offset_index);
58 if (!info->bitmap) {
59 unlink_free_space(ctl, info, true);
60 kmem_cache_free(btrfs_free_space_cachep, info);
61 } else {
62 free_bitmap(ctl, info);
63 }
64
65 cond_resched_lock(&ctl->tree_lock);
66 }
67 }
68
__lookup_free_space_inode(struct btrfs_root * root,struct btrfs_path * path,u64 offset)69 static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
70 struct btrfs_path *path,
71 u64 offset)
72 {
73 struct btrfs_fs_info *fs_info = root->fs_info;
74 struct btrfs_key key;
75 struct btrfs_key location;
76 struct btrfs_disk_key disk_key;
77 struct btrfs_free_space_header *header;
78 struct extent_buffer *leaf;
79 struct inode *inode = NULL;
80 unsigned nofs_flag;
81 int ret;
82
83 key.objectid = BTRFS_FREE_SPACE_OBJECTID;
84 key.offset = offset;
85 key.type = 0;
86
87 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
88 if (ret < 0)
89 return ERR_PTR(ret);
90 if (ret > 0) {
91 btrfs_release_path(path);
92 return ERR_PTR(-ENOENT);
93 }
94
95 leaf = path->nodes[0];
96 header = btrfs_item_ptr(leaf, path->slots[0],
97 struct btrfs_free_space_header);
98 btrfs_free_space_key(leaf, header, &disk_key);
99 btrfs_disk_key_to_cpu(&location, &disk_key);
100 btrfs_release_path(path);
101
102 /*
103 * We are often under a trans handle at this point, so we need to make
104 * sure NOFS is set to keep us from deadlocking.
105 */
106 nofs_flag = memalloc_nofs_save();
107 inode = btrfs_iget_path(fs_info->sb, location.objectid, root, path);
108 btrfs_release_path(path);
109 memalloc_nofs_restore(nofs_flag);
110 if (IS_ERR(inode))
111 return inode;
112
113 mapping_set_gfp_mask(inode->i_mapping,
114 mapping_gfp_constraint(inode->i_mapping,
115 ~(__GFP_FS | __GFP_HIGHMEM)));
116
117 return inode;
118 }
119
lookup_free_space_inode(struct btrfs_block_group * block_group,struct btrfs_path * path)120 struct inode *lookup_free_space_inode(struct btrfs_block_group *block_group,
121 struct btrfs_path *path)
122 {
123 struct btrfs_fs_info *fs_info = block_group->fs_info;
124 struct inode *inode = NULL;
125 u32 flags = BTRFS_INODE_NODATASUM | BTRFS_INODE_NODATACOW;
126
127 spin_lock(&block_group->lock);
128 if (block_group->inode)
129 inode = igrab(block_group->inode);
130 spin_unlock(&block_group->lock);
131 if (inode)
132 return inode;
133
134 inode = __lookup_free_space_inode(fs_info->tree_root, path,
135 block_group->start);
136 if (IS_ERR(inode))
137 return inode;
138
139 spin_lock(&block_group->lock);
140 if (!((BTRFS_I(inode)->flags & flags) == flags)) {
141 btrfs_info(fs_info, "Old style space inode found, converting.");
142 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM |
143 BTRFS_INODE_NODATACOW;
144 block_group->disk_cache_state = BTRFS_DC_CLEAR;
145 }
146
147 if (!test_and_set_bit(BLOCK_GROUP_FLAG_IREF, &block_group->runtime_flags))
148 block_group->inode = igrab(inode);
149 spin_unlock(&block_group->lock);
150
151 return inode;
152 }
153
__create_free_space_inode(struct btrfs_root * root,struct btrfs_trans_handle * trans,struct btrfs_path * path,u64 ino,u64 offset)154 static int __create_free_space_inode(struct btrfs_root *root,
155 struct btrfs_trans_handle *trans,
156 struct btrfs_path *path,
157 u64 ino, u64 offset)
158 {
159 struct btrfs_key key;
160 struct btrfs_disk_key disk_key;
161 struct btrfs_free_space_header *header;
162 struct btrfs_inode_item *inode_item;
163 struct extent_buffer *leaf;
164 /* We inline CRCs for the free disk space cache */
165 const u64 flags = BTRFS_INODE_NOCOMPRESS | BTRFS_INODE_PREALLOC |
166 BTRFS_INODE_NODATASUM | BTRFS_INODE_NODATACOW;
167 int ret;
168
169 ret = btrfs_insert_empty_inode(trans, root, path, ino);
170 if (ret)
171 return ret;
172
173 leaf = path->nodes[0];
174 inode_item = btrfs_item_ptr(leaf, path->slots[0],
175 struct btrfs_inode_item);
176 btrfs_item_key(leaf, &disk_key, path->slots[0]);
177 memzero_extent_buffer(leaf, (unsigned long)inode_item,
178 sizeof(*inode_item));
179 btrfs_set_inode_generation(leaf, inode_item, trans->transid);
180 btrfs_set_inode_size(leaf, inode_item, 0);
181 btrfs_set_inode_nbytes(leaf, inode_item, 0);
182 btrfs_set_inode_uid(leaf, inode_item, 0);
183 btrfs_set_inode_gid(leaf, inode_item, 0);
184 btrfs_set_inode_mode(leaf, inode_item, S_IFREG | 0600);
185 btrfs_set_inode_flags(leaf, inode_item, flags);
186 btrfs_set_inode_nlink(leaf, inode_item, 1);
187 btrfs_set_inode_transid(leaf, inode_item, trans->transid);
188 btrfs_set_inode_block_group(leaf, inode_item, offset);
189 btrfs_mark_buffer_dirty(leaf);
190 btrfs_release_path(path);
191
192 key.objectid = BTRFS_FREE_SPACE_OBJECTID;
193 key.offset = offset;
194 key.type = 0;
195 ret = btrfs_insert_empty_item(trans, root, path, &key,
196 sizeof(struct btrfs_free_space_header));
197 if (ret < 0) {
198 btrfs_release_path(path);
199 return ret;
200 }
201
202 leaf = path->nodes[0];
203 header = btrfs_item_ptr(leaf, path->slots[0],
204 struct btrfs_free_space_header);
205 memzero_extent_buffer(leaf, (unsigned long)header, sizeof(*header));
206 btrfs_set_free_space_key(leaf, header, &disk_key);
207 btrfs_mark_buffer_dirty(leaf);
208 btrfs_release_path(path);
209
210 return 0;
211 }
212
create_free_space_inode(struct btrfs_trans_handle * trans,struct btrfs_block_group * block_group,struct btrfs_path * path)213 int create_free_space_inode(struct btrfs_trans_handle *trans,
214 struct btrfs_block_group *block_group,
215 struct btrfs_path *path)
216 {
217 int ret;
218 u64 ino;
219
220 ret = btrfs_get_free_objectid(trans->fs_info->tree_root, &ino);
221 if (ret < 0)
222 return ret;
223
224 return __create_free_space_inode(trans->fs_info->tree_root, trans, path,
225 ino, block_group->start);
226 }
227
228 /*
229 * inode is an optional sink: if it is NULL, btrfs_remove_free_space_inode
230 * handles lookup, otherwise it takes ownership and iputs the inode.
231 * Don't reuse an inode pointer after passing it into this function.
232 */
btrfs_remove_free_space_inode(struct btrfs_trans_handle * trans,struct inode * inode,struct btrfs_block_group * block_group)233 int btrfs_remove_free_space_inode(struct btrfs_trans_handle *trans,
234 struct inode *inode,
235 struct btrfs_block_group *block_group)
236 {
237 struct btrfs_path *path;
238 struct btrfs_key key;
239 int ret = 0;
240
241 path = btrfs_alloc_path();
242 if (!path)
243 return -ENOMEM;
244
245 if (!inode)
246 inode = lookup_free_space_inode(block_group, path);
247 if (IS_ERR(inode)) {
248 if (PTR_ERR(inode) != -ENOENT)
249 ret = PTR_ERR(inode);
250 goto out;
251 }
252 ret = btrfs_orphan_add(trans, BTRFS_I(inode));
253 if (ret) {
254 btrfs_add_delayed_iput(inode);
255 goto out;
256 }
257 clear_nlink(inode);
258 /* One for the block groups ref */
259 spin_lock(&block_group->lock);
260 if (test_and_clear_bit(BLOCK_GROUP_FLAG_IREF, &block_group->runtime_flags)) {
261 block_group->inode = NULL;
262 spin_unlock(&block_group->lock);
263 iput(inode);
264 } else {
265 spin_unlock(&block_group->lock);
266 }
267 /* One for the lookup ref */
268 btrfs_add_delayed_iput(inode);
269
270 key.objectid = BTRFS_FREE_SPACE_OBJECTID;
271 key.type = 0;
272 key.offset = block_group->start;
273 ret = btrfs_search_slot(trans, trans->fs_info->tree_root, &key, path,
274 -1, 1);
275 if (ret) {
276 if (ret > 0)
277 ret = 0;
278 goto out;
279 }
280 ret = btrfs_del_item(trans, trans->fs_info->tree_root, path);
281 out:
282 btrfs_free_path(path);
283 return ret;
284 }
285
btrfs_check_trunc_cache_free_space(struct btrfs_fs_info * fs_info,struct btrfs_block_rsv * rsv)286 int btrfs_check_trunc_cache_free_space(struct btrfs_fs_info *fs_info,
287 struct btrfs_block_rsv *rsv)
288 {
289 u64 needed_bytes;
290 int ret;
291
292 /* 1 for slack space, 1 for updating the inode */
293 needed_bytes = btrfs_calc_insert_metadata_size(fs_info, 1) +
294 btrfs_calc_metadata_size(fs_info, 1);
295
296 spin_lock(&rsv->lock);
297 if (rsv->reserved < needed_bytes)
298 ret = -ENOSPC;
299 else
300 ret = 0;
301 spin_unlock(&rsv->lock);
302 return ret;
303 }
304
btrfs_truncate_free_space_cache(struct btrfs_trans_handle * trans,struct btrfs_block_group * block_group,struct inode * vfs_inode)305 int btrfs_truncate_free_space_cache(struct btrfs_trans_handle *trans,
306 struct btrfs_block_group *block_group,
307 struct inode *vfs_inode)
308 {
309 struct btrfs_truncate_control control = {
310 .inode = BTRFS_I(vfs_inode),
311 .new_size = 0,
312 .ino = btrfs_ino(BTRFS_I(vfs_inode)),
313 .min_type = BTRFS_EXTENT_DATA_KEY,
314 .clear_extent_range = true,
315 };
316 struct btrfs_inode *inode = BTRFS_I(vfs_inode);
317 struct btrfs_root *root = inode->root;
318 struct extent_state *cached_state = NULL;
319 int ret = 0;
320 bool locked = false;
321
322 if (block_group) {
323 struct btrfs_path *path = btrfs_alloc_path();
324
325 if (!path) {
326 ret = -ENOMEM;
327 goto fail;
328 }
329 locked = true;
330 mutex_lock(&trans->transaction->cache_write_mutex);
331 if (!list_empty(&block_group->io_list)) {
332 list_del_init(&block_group->io_list);
333
334 btrfs_wait_cache_io(trans, block_group, path);
335 btrfs_put_block_group(block_group);
336 }
337
338 /*
339 * now that we've truncated the cache away, its no longer
340 * setup or written
341 */
342 spin_lock(&block_group->lock);
343 block_group->disk_cache_state = BTRFS_DC_CLEAR;
344 spin_unlock(&block_group->lock);
345 btrfs_free_path(path);
346 }
347
348 btrfs_i_size_write(inode, 0);
349 truncate_pagecache(vfs_inode, 0);
350
351 lock_extent(&inode->io_tree, 0, (u64)-1, &cached_state);
352 btrfs_drop_extent_map_range(inode, 0, (u64)-1, false);
353
354 /*
355 * We skip the throttling logic for free space cache inodes, so we don't
356 * need to check for -EAGAIN.
357 */
358 ret = btrfs_truncate_inode_items(trans, root, &control);
359
360 inode_sub_bytes(&inode->vfs_inode, control.sub_bytes);
361 btrfs_inode_safe_disk_i_size_write(inode, control.last_size);
362
363 unlock_extent(&inode->io_tree, 0, (u64)-1, &cached_state);
364 if (ret)
365 goto fail;
366
367 ret = btrfs_update_inode(trans, root, inode);
368
369 fail:
370 if (locked)
371 mutex_unlock(&trans->transaction->cache_write_mutex);
372 if (ret)
373 btrfs_abort_transaction(trans, ret);
374
375 return ret;
376 }
377
readahead_cache(struct inode * inode)378 static void readahead_cache(struct inode *inode)
379 {
380 struct file_ra_state ra;
381 unsigned long last_index;
382
383 file_ra_state_init(&ra, inode->i_mapping);
384 last_index = (i_size_read(inode) - 1) >> PAGE_SHIFT;
385
386 page_cache_sync_readahead(inode->i_mapping, &ra, NULL, 0, last_index);
387 }
388
io_ctl_init(struct btrfs_io_ctl * io_ctl,struct inode * inode,int write)389 static int io_ctl_init(struct btrfs_io_ctl *io_ctl, struct inode *inode,
390 int write)
391 {
392 int num_pages;
393
394 num_pages = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
395
396 /* Make sure we can fit our crcs and generation into the first page */
397 if (write && (num_pages * sizeof(u32) + sizeof(u64)) > PAGE_SIZE)
398 return -ENOSPC;
399
400 memset(io_ctl, 0, sizeof(struct btrfs_io_ctl));
401
402 io_ctl->pages = kcalloc(num_pages, sizeof(struct page *), GFP_NOFS);
403 if (!io_ctl->pages)
404 return -ENOMEM;
405
406 io_ctl->num_pages = num_pages;
407 io_ctl->fs_info = btrfs_sb(inode->i_sb);
408 io_ctl->inode = inode;
409
410 return 0;
411 }
412 ALLOW_ERROR_INJECTION(io_ctl_init, ERRNO);
413
io_ctl_free(struct btrfs_io_ctl * io_ctl)414 static void io_ctl_free(struct btrfs_io_ctl *io_ctl)
415 {
416 kfree(io_ctl->pages);
417 io_ctl->pages = NULL;
418 }
419
io_ctl_unmap_page(struct btrfs_io_ctl * io_ctl)420 static void io_ctl_unmap_page(struct btrfs_io_ctl *io_ctl)
421 {
422 if (io_ctl->cur) {
423 io_ctl->cur = NULL;
424 io_ctl->orig = NULL;
425 }
426 }
427
io_ctl_map_page(struct btrfs_io_ctl * io_ctl,int clear)428 static void io_ctl_map_page(struct btrfs_io_ctl *io_ctl, int clear)
429 {
430 ASSERT(io_ctl->index < io_ctl->num_pages);
431 io_ctl->page = io_ctl->pages[io_ctl->index++];
432 io_ctl->cur = page_address(io_ctl->page);
433 io_ctl->orig = io_ctl->cur;
434 io_ctl->size = PAGE_SIZE;
435 if (clear)
436 clear_page(io_ctl->cur);
437 }
438
io_ctl_drop_pages(struct btrfs_io_ctl * io_ctl)439 static void io_ctl_drop_pages(struct btrfs_io_ctl *io_ctl)
440 {
441 int i;
442
443 io_ctl_unmap_page(io_ctl);
444
445 for (i = 0; i < io_ctl->num_pages; i++) {
446 if (io_ctl->pages[i]) {
447 btrfs_page_clear_checked(io_ctl->fs_info,
448 io_ctl->pages[i],
449 page_offset(io_ctl->pages[i]),
450 PAGE_SIZE);
451 unlock_page(io_ctl->pages[i]);
452 put_page(io_ctl->pages[i]);
453 }
454 }
455 }
456
io_ctl_prepare_pages(struct btrfs_io_ctl * io_ctl,bool uptodate)457 static int io_ctl_prepare_pages(struct btrfs_io_ctl *io_ctl, bool uptodate)
458 {
459 struct page *page;
460 struct inode *inode = io_ctl->inode;
461 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
462 int i;
463
464 for (i = 0; i < io_ctl->num_pages; i++) {
465 int ret;
466
467 page = find_or_create_page(inode->i_mapping, i, mask);
468 if (!page) {
469 io_ctl_drop_pages(io_ctl);
470 return -ENOMEM;
471 }
472
473 ret = set_page_extent_mapped(page);
474 if (ret < 0) {
475 unlock_page(page);
476 put_page(page);
477 io_ctl_drop_pages(io_ctl);
478 return ret;
479 }
480
481 io_ctl->pages[i] = page;
482 if (uptodate && !PageUptodate(page)) {
483 btrfs_read_folio(NULL, page_folio(page));
484 lock_page(page);
485 if (page->mapping != inode->i_mapping) {
486 btrfs_err(BTRFS_I(inode)->root->fs_info,
487 "free space cache page truncated");
488 io_ctl_drop_pages(io_ctl);
489 return -EIO;
490 }
491 if (!PageUptodate(page)) {
492 btrfs_err(BTRFS_I(inode)->root->fs_info,
493 "error reading free space cache");
494 io_ctl_drop_pages(io_ctl);
495 return -EIO;
496 }
497 }
498 }
499
500 for (i = 0; i < io_ctl->num_pages; i++)
501 clear_page_dirty_for_io(io_ctl->pages[i]);
502
503 return 0;
504 }
505
io_ctl_set_generation(struct btrfs_io_ctl * io_ctl,u64 generation)506 static void io_ctl_set_generation(struct btrfs_io_ctl *io_ctl, u64 generation)
507 {
508 io_ctl_map_page(io_ctl, 1);
509
510 /*
511 * Skip the csum areas. If we don't check crcs then we just have a
512 * 64bit chunk at the front of the first page.
513 */
514 io_ctl->cur += (sizeof(u32) * io_ctl->num_pages);
515 io_ctl->size -= sizeof(u64) + (sizeof(u32) * io_ctl->num_pages);
516
517 put_unaligned_le64(generation, io_ctl->cur);
518 io_ctl->cur += sizeof(u64);
519 }
520
io_ctl_check_generation(struct btrfs_io_ctl * io_ctl,u64 generation)521 static int io_ctl_check_generation(struct btrfs_io_ctl *io_ctl, u64 generation)
522 {
523 u64 cache_gen;
524
525 /*
526 * Skip the crc area. If we don't check crcs then we just have a 64bit
527 * chunk at the front of the first page.
528 */
529 io_ctl->cur += sizeof(u32) * io_ctl->num_pages;
530 io_ctl->size -= sizeof(u64) + (sizeof(u32) * io_ctl->num_pages);
531
532 cache_gen = get_unaligned_le64(io_ctl->cur);
533 if (cache_gen != generation) {
534 btrfs_err_rl(io_ctl->fs_info,
535 "space cache generation (%llu) does not match inode (%llu)",
536 cache_gen, generation);
537 io_ctl_unmap_page(io_ctl);
538 return -EIO;
539 }
540 io_ctl->cur += sizeof(u64);
541 return 0;
542 }
543
io_ctl_set_crc(struct btrfs_io_ctl * io_ctl,int index)544 static void io_ctl_set_crc(struct btrfs_io_ctl *io_ctl, int index)
545 {
546 u32 *tmp;
547 u32 crc = ~(u32)0;
548 unsigned offset = 0;
549
550 if (index == 0)
551 offset = sizeof(u32) * io_ctl->num_pages;
552
553 crc = btrfs_crc32c(crc, io_ctl->orig + offset, PAGE_SIZE - offset);
554 btrfs_crc32c_final(crc, (u8 *)&crc);
555 io_ctl_unmap_page(io_ctl);
556 tmp = page_address(io_ctl->pages[0]);
557 tmp += index;
558 *tmp = crc;
559 }
560
io_ctl_check_crc(struct btrfs_io_ctl * io_ctl,int index)561 static int io_ctl_check_crc(struct btrfs_io_ctl *io_ctl, int index)
562 {
563 u32 *tmp, val;
564 u32 crc = ~(u32)0;
565 unsigned offset = 0;
566
567 if (index == 0)
568 offset = sizeof(u32) * io_ctl->num_pages;
569
570 tmp = page_address(io_ctl->pages[0]);
571 tmp += index;
572 val = *tmp;
573
574 io_ctl_map_page(io_ctl, 0);
575 crc = btrfs_crc32c(crc, io_ctl->orig + offset, PAGE_SIZE - offset);
576 btrfs_crc32c_final(crc, (u8 *)&crc);
577 if (val != crc) {
578 btrfs_err_rl(io_ctl->fs_info,
579 "csum mismatch on free space cache");
580 io_ctl_unmap_page(io_ctl);
581 return -EIO;
582 }
583
584 return 0;
585 }
586
io_ctl_add_entry(struct btrfs_io_ctl * io_ctl,u64 offset,u64 bytes,void * bitmap)587 static int io_ctl_add_entry(struct btrfs_io_ctl *io_ctl, u64 offset, u64 bytes,
588 void *bitmap)
589 {
590 struct btrfs_free_space_entry *entry;
591
592 if (!io_ctl->cur)
593 return -ENOSPC;
594
595 entry = io_ctl->cur;
596 put_unaligned_le64(offset, &entry->offset);
597 put_unaligned_le64(bytes, &entry->bytes);
598 entry->type = (bitmap) ? BTRFS_FREE_SPACE_BITMAP :
599 BTRFS_FREE_SPACE_EXTENT;
600 io_ctl->cur += sizeof(struct btrfs_free_space_entry);
601 io_ctl->size -= sizeof(struct btrfs_free_space_entry);
602
603 if (io_ctl->size >= sizeof(struct btrfs_free_space_entry))
604 return 0;
605
606 io_ctl_set_crc(io_ctl, io_ctl->index - 1);
607
608 /* No more pages to map */
609 if (io_ctl->index >= io_ctl->num_pages)
610 return 0;
611
612 /* map the next page */
613 io_ctl_map_page(io_ctl, 1);
614 return 0;
615 }
616
io_ctl_add_bitmap(struct btrfs_io_ctl * io_ctl,void * bitmap)617 static int io_ctl_add_bitmap(struct btrfs_io_ctl *io_ctl, void *bitmap)
618 {
619 if (!io_ctl->cur)
620 return -ENOSPC;
621
622 /*
623 * If we aren't at the start of the current page, unmap this one and
624 * map the next one if there is any left.
625 */
626 if (io_ctl->cur != io_ctl->orig) {
627 io_ctl_set_crc(io_ctl, io_ctl->index - 1);
628 if (io_ctl->index >= io_ctl->num_pages)
629 return -ENOSPC;
630 io_ctl_map_page(io_ctl, 0);
631 }
632
633 copy_page(io_ctl->cur, bitmap);
634 io_ctl_set_crc(io_ctl, io_ctl->index - 1);
635 if (io_ctl->index < io_ctl->num_pages)
636 io_ctl_map_page(io_ctl, 0);
637 return 0;
638 }
639
io_ctl_zero_remaining_pages(struct btrfs_io_ctl * io_ctl)640 static void io_ctl_zero_remaining_pages(struct btrfs_io_ctl *io_ctl)
641 {
642 /*
643 * If we're not on the boundary we know we've modified the page and we
644 * need to crc the page.
645 */
646 if (io_ctl->cur != io_ctl->orig)
647 io_ctl_set_crc(io_ctl, io_ctl->index - 1);
648 else
649 io_ctl_unmap_page(io_ctl);
650
651 while (io_ctl->index < io_ctl->num_pages) {
652 io_ctl_map_page(io_ctl, 1);
653 io_ctl_set_crc(io_ctl, io_ctl->index - 1);
654 }
655 }
656
io_ctl_read_entry(struct btrfs_io_ctl * io_ctl,struct btrfs_free_space * entry,u8 * type)657 static int io_ctl_read_entry(struct btrfs_io_ctl *io_ctl,
658 struct btrfs_free_space *entry, u8 *type)
659 {
660 struct btrfs_free_space_entry *e;
661 int ret;
662
663 if (!io_ctl->cur) {
664 ret = io_ctl_check_crc(io_ctl, io_ctl->index);
665 if (ret)
666 return ret;
667 }
668
669 e = io_ctl->cur;
670 entry->offset = get_unaligned_le64(&e->offset);
671 entry->bytes = get_unaligned_le64(&e->bytes);
672 *type = e->type;
673 io_ctl->cur += sizeof(struct btrfs_free_space_entry);
674 io_ctl->size -= sizeof(struct btrfs_free_space_entry);
675
676 if (io_ctl->size >= sizeof(struct btrfs_free_space_entry))
677 return 0;
678
679 io_ctl_unmap_page(io_ctl);
680
681 return 0;
682 }
683
io_ctl_read_bitmap(struct btrfs_io_ctl * io_ctl,struct btrfs_free_space * entry)684 static int io_ctl_read_bitmap(struct btrfs_io_ctl *io_ctl,
685 struct btrfs_free_space *entry)
686 {
687 int ret;
688
689 ret = io_ctl_check_crc(io_ctl, io_ctl->index);
690 if (ret)
691 return ret;
692
693 copy_page(entry->bitmap, io_ctl->cur);
694 io_ctl_unmap_page(io_ctl);
695
696 return 0;
697 }
698
recalculate_thresholds(struct btrfs_free_space_ctl * ctl)699 static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
700 {
701 struct btrfs_block_group *block_group = ctl->block_group;
702 u64 max_bytes;
703 u64 bitmap_bytes;
704 u64 extent_bytes;
705 u64 size = block_group->length;
706 u64 bytes_per_bg = BITS_PER_BITMAP * ctl->unit;
707 u64 max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg);
708
709 max_bitmaps = max_t(u64, max_bitmaps, 1);
710
711 if (ctl->total_bitmaps > max_bitmaps)
712 btrfs_err(block_group->fs_info,
713 "invalid free space control: bg start=%llu len=%llu total_bitmaps=%u unit=%u max_bitmaps=%llu bytes_per_bg=%llu",
714 block_group->start, block_group->length,
715 ctl->total_bitmaps, ctl->unit, max_bitmaps,
716 bytes_per_bg);
717 ASSERT(ctl->total_bitmaps <= max_bitmaps);
718
719 /*
720 * We are trying to keep the total amount of memory used per 1GiB of
721 * space to be MAX_CACHE_BYTES_PER_GIG. However, with a reclamation
722 * mechanism of pulling extents >= FORCE_EXTENT_THRESHOLD out of
723 * bitmaps, we may end up using more memory than this.
724 */
725 if (size < SZ_1G)
726 max_bytes = MAX_CACHE_BYTES_PER_GIG;
727 else
728 max_bytes = MAX_CACHE_BYTES_PER_GIG * div_u64(size, SZ_1G);
729
730 bitmap_bytes = ctl->total_bitmaps * ctl->unit;
731
732 /*
733 * we want the extent entry threshold to always be at most 1/2 the max
734 * bytes we can have, or whatever is less than that.
735 */
736 extent_bytes = max_bytes - bitmap_bytes;
737 extent_bytes = min_t(u64, extent_bytes, max_bytes >> 1);
738
739 ctl->extents_thresh =
740 div_u64(extent_bytes, sizeof(struct btrfs_free_space));
741 }
742
__load_free_space_cache(struct btrfs_root * root,struct inode * inode,struct btrfs_free_space_ctl * ctl,struct btrfs_path * path,u64 offset)743 static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
744 struct btrfs_free_space_ctl *ctl,
745 struct btrfs_path *path, u64 offset)
746 {
747 struct btrfs_fs_info *fs_info = root->fs_info;
748 struct btrfs_free_space_header *header;
749 struct extent_buffer *leaf;
750 struct btrfs_io_ctl io_ctl;
751 struct btrfs_key key;
752 struct btrfs_free_space *e, *n;
753 LIST_HEAD(bitmaps);
754 u64 num_entries;
755 u64 num_bitmaps;
756 u64 generation;
757 u8 type;
758 int ret = 0;
759
760 /* Nothing in the space cache, goodbye */
761 if (!i_size_read(inode))
762 return 0;
763
764 key.objectid = BTRFS_FREE_SPACE_OBJECTID;
765 key.offset = offset;
766 key.type = 0;
767
768 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
769 if (ret < 0)
770 return 0;
771 else if (ret > 0) {
772 btrfs_release_path(path);
773 return 0;
774 }
775
776 ret = -1;
777
778 leaf = path->nodes[0];
779 header = btrfs_item_ptr(leaf, path->slots[0],
780 struct btrfs_free_space_header);
781 num_entries = btrfs_free_space_entries(leaf, header);
782 num_bitmaps = btrfs_free_space_bitmaps(leaf, header);
783 generation = btrfs_free_space_generation(leaf, header);
784 btrfs_release_path(path);
785
786 if (!BTRFS_I(inode)->generation) {
787 btrfs_info(fs_info,
788 "the free space cache file (%llu) is invalid, skip it",
789 offset);
790 return 0;
791 }
792
793 if (BTRFS_I(inode)->generation != generation) {
794 btrfs_err(fs_info,
795 "free space inode generation (%llu) did not match free space cache generation (%llu)",
796 BTRFS_I(inode)->generation, generation);
797 return 0;
798 }
799
800 if (!num_entries)
801 return 0;
802
803 ret = io_ctl_init(&io_ctl, inode, 0);
804 if (ret)
805 return ret;
806
807 readahead_cache(inode);
808
809 ret = io_ctl_prepare_pages(&io_ctl, true);
810 if (ret)
811 goto out;
812
813 ret = io_ctl_check_crc(&io_ctl, 0);
814 if (ret)
815 goto free_cache;
816
817 ret = io_ctl_check_generation(&io_ctl, generation);
818 if (ret)
819 goto free_cache;
820
821 while (num_entries) {
822 e = kmem_cache_zalloc(btrfs_free_space_cachep,
823 GFP_NOFS);
824 if (!e) {
825 ret = -ENOMEM;
826 goto free_cache;
827 }
828
829 ret = io_ctl_read_entry(&io_ctl, e, &type);
830 if (ret) {
831 kmem_cache_free(btrfs_free_space_cachep, e);
832 goto free_cache;
833 }
834
835 if (!e->bytes) {
836 ret = -1;
837 kmem_cache_free(btrfs_free_space_cachep, e);
838 goto free_cache;
839 }
840
841 if (type == BTRFS_FREE_SPACE_EXTENT) {
842 spin_lock(&ctl->tree_lock);
843 ret = link_free_space(ctl, e);
844 spin_unlock(&ctl->tree_lock);
845 if (ret) {
846 btrfs_err(fs_info,
847 "Duplicate entries in free space cache, dumping");
848 kmem_cache_free(btrfs_free_space_cachep, e);
849 goto free_cache;
850 }
851 } else {
852 ASSERT(num_bitmaps);
853 num_bitmaps--;
854 e->bitmap = kmem_cache_zalloc(
855 btrfs_free_space_bitmap_cachep, GFP_NOFS);
856 if (!e->bitmap) {
857 ret = -ENOMEM;
858 kmem_cache_free(
859 btrfs_free_space_cachep, e);
860 goto free_cache;
861 }
862 spin_lock(&ctl->tree_lock);
863 ret = link_free_space(ctl, e);
864 ctl->total_bitmaps++;
865 recalculate_thresholds(ctl);
866 spin_unlock(&ctl->tree_lock);
867 if (ret) {
868 btrfs_err(fs_info,
869 "Duplicate entries in free space cache, dumping");
870 kmem_cache_free(btrfs_free_space_cachep, e);
871 goto free_cache;
872 }
873 list_add_tail(&e->list, &bitmaps);
874 }
875
876 num_entries--;
877 }
878
879 io_ctl_unmap_page(&io_ctl);
880
881 /*
882 * We add the bitmaps at the end of the entries in order that
883 * the bitmap entries are added to the cache.
884 */
885 list_for_each_entry_safe(e, n, &bitmaps, list) {
886 list_del_init(&e->list);
887 ret = io_ctl_read_bitmap(&io_ctl, e);
888 if (ret)
889 goto free_cache;
890 }
891
892 io_ctl_drop_pages(&io_ctl);
893 ret = 1;
894 out:
895 io_ctl_free(&io_ctl);
896 return ret;
897 free_cache:
898 io_ctl_drop_pages(&io_ctl);
899
900 spin_lock(&ctl->tree_lock);
901 __btrfs_remove_free_space_cache(ctl);
902 spin_unlock(&ctl->tree_lock);
903 goto out;
904 }
905
copy_free_space_cache(struct btrfs_block_group * block_group,struct btrfs_free_space_ctl * ctl)906 static int copy_free_space_cache(struct btrfs_block_group *block_group,
907 struct btrfs_free_space_ctl *ctl)
908 {
909 struct btrfs_free_space *info;
910 struct rb_node *n;
911 int ret = 0;
912
913 while (!ret && (n = rb_first(&ctl->free_space_offset)) != NULL) {
914 info = rb_entry(n, struct btrfs_free_space, offset_index);
915 if (!info->bitmap) {
916 unlink_free_space(ctl, info, true);
917 ret = btrfs_add_free_space(block_group, info->offset,
918 info->bytes);
919 kmem_cache_free(btrfs_free_space_cachep, info);
920 } else {
921 u64 offset = info->offset;
922 u64 bytes = ctl->unit;
923
924 while (search_bitmap(ctl, info, &offset, &bytes,
925 false) == 0) {
926 ret = btrfs_add_free_space(block_group, offset,
927 bytes);
928 if (ret)
929 break;
930 bitmap_clear_bits(ctl, info, offset, bytes, true);
931 offset = info->offset;
932 bytes = ctl->unit;
933 }
934 free_bitmap(ctl, info);
935 }
936 cond_resched();
937 }
938 return ret;
939 }
940
941 static struct lock_class_key btrfs_free_space_inode_key;
942
load_free_space_cache(struct btrfs_block_group * block_group)943 int load_free_space_cache(struct btrfs_block_group *block_group)
944 {
945 struct btrfs_fs_info *fs_info = block_group->fs_info;
946 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
947 struct btrfs_free_space_ctl tmp_ctl = {};
948 struct inode *inode;
949 struct btrfs_path *path;
950 int ret = 0;
951 bool matched;
952 u64 used = block_group->used;
953
954 /*
955 * Because we could potentially discard our loaded free space, we want
956 * to load everything into a temporary structure first, and then if it's
957 * valid copy it all into the actual free space ctl.
958 */
959 btrfs_init_free_space_ctl(block_group, &tmp_ctl);
960
961 /*
962 * If this block group has been marked to be cleared for one reason or
963 * another then we can't trust the on disk cache, so just return.
964 */
965 spin_lock(&block_group->lock);
966 if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) {
967 spin_unlock(&block_group->lock);
968 return 0;
969 }
970 spin_unlock(&block_group->lock);
971
972 path = btrfs_alloc_path();
973 if (!path)
974 return 0;
975 path->search_commit_root = 1;
976 path->skip_locking = 1;
977
978 /*
979 * We must pass a path with search_commit_root set to btrfs_iget in
980 * order to avoid a deadlock when allocating extents for the tree root.
981 *
982 * When we are COWing an extent buffer from the tree root, when looking
983 * for a free extent, at extent-tree.c:find_free_extent(), we can find
984 * block group without its free space cache loaded. When we find one
985 * we must load its space cache which requires reading its free space
986 * cache's inode item from the root tree. If this inode item is located
987 * in the same leaf that we started COWing before, then we end up in
988 * deadlock on the extent buffer (trying to read lock it when we
989 * previously write locked it).
990 *
991 * It's safe to read the inode item using the commit root because
992 * block groups, once loaded, stay in memory forever (until they are
993 * removed) as well as their space caches once loaded. New block groups
994 * once created get their ->cached field set to BTRFS_CACHE_FINISHED so
995 * we will never try to read their inode item while the fs is mounted.
996 */
997 inode = lookup_free_space_inode(block_group, path);
998 if (IS_ERR(inode)) {
999 btrfs_free_path(path);
1000 return 0;
1001 }
1002
1003 /* We may have converted the inode and made the cache invalid. */
1004 spin_lock(&block_group->lock);
1005 if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) {
1006 spin_unlock(&block_group->lock);
1007 btrfs_free_path(path);
1008 goto out;
1009 }
1010 spin_unlock(&block_group->lock);
1011
1012 /*
1013 * Reinitialize the class of struct inode's mapping->invalidate_lock for
1014 * free space inodes to prevent false positives related to locks for normal
1015 * inodes.
1016 */
1017 lockdep_set_class(&(&inode->i_data)->invalidate_lock,
1018 &btrfs_free_space_inode_key);
1019
1020 ret = __load_free_space_cache(fs_info->tree_root, inode, &tmp_ctl,
1021 path, block_group->start);
1022 btrfs_free_path(path);
1023 if (ret <= 0)
1024 goto out;
1025
1026 matched = (tmp_ctl.free_space == (block_group->length - used -
1027 block_group->bytes_super));
1028
1029 if (matched) {
1030 ret = copy_free_space_cache(block_group, &tmp_ctl);
1031 /*
1032 * ret == 1 means we successfully loaded the free space cache,
1033 * so we need to re-set it here.
1034 */
1035 if (ret == 0)
1036 ret = 1;
1037 } else {
1038 /*
1039 * We need to call the _locked variant so we don't try to update
1040 * the discard counters.
1041 */
1042 spin_lock(&tmp_ctl.tree_lock);
1043 __btrfs_remove_free_space_cache(&tmp_ctl);
1044 spin_unlock(&tmp_ctl.tree_lock);
1045 btrfs_warn(fs_info,
1046 "block group %llu has wrong amount of free space",
1047 block_group->start);
1048 ret = -1;
1049 }
1050 out:
1051 if (ret < 0) {
1052 /* This cache is bogus, make sure it gets cleared */
1053 spin_lock(&block_group->lock);
1054 block_group->disk_cache_state = BTRFS_DC_CLEAR;
1055 spin_unlock(&block_group->lock);
1056 ret = 0;
1057
1058 btrfs_warn(fs_info,
1059 "failed to load free space cache for block group %llu, rebuilding it now",
1060 block_group->start);
1061 }
1062
1063 spin_lock(&ctl->tree_lock);
1064 btrfs_discard_update_discardable(block_group);
1065 spin_unlock(&ctl->tree_lock);
1066 iput(inode);
1067 return ret;
1068 }
1069
1070 static noinline_for_stack
write_cache_extent_entries(struct btrfs_io_ctl * io_ctl,struct btrfs_free_space_ctl * ctl,struct btrfs_block_group * block_group,int * entries,int * bitmaps,struct list_head * bitmap_list)1071 int write_cache_extent_entries(struct btrfs_io_ctl *io_ctl,
1072 struct btrfs_free_space_ctl *ctl,
1073 struct btrfs_block_group *block_group,
1074 int *entries, int *bitmaps,
1075 struct list_head *bitmap_list)
1076 {
1077 int ret;
1078 struct btrfs_free_cluster *cluster = NULL;
1079 struct btrfs_free_cluster *cluster_locked = NULL;
1080 struct rb_node *node = rb_first(&ctl->free_space_offset);
1081 struct btrfs_trim_range *trim_entry;
1082
1083 /* Get the cluster for this block_group if it exists */
1084 if (block_group && !list_empty(&block_group->cluster_list)) {
1085 cluster = list_entry(block_group->cluster_list.next,
1086 struct btrfs_free_cluster,
1087 block_group_list);
1088 }
1089
1090 if (!node && cluster) {
1091 cluster_locked = cluster;
1092 spin_lock(&cluster_locked->lock);
1093 node = rb_first(&cluster->root);
1094 cluster = NULL;
1095 }
1096
1097 /* Write out the extent entries */
1098 while (node) {
1099 struct btrfs_free_space *e;
1100
1101 e = rb_entry(node, struct btrfs_free_space, offset_index);
1102 *entries += 1;
1103
1104 ret = io_ctl_add_entry(io_ctl, e->offset, e->bytes,
1105 e->bitmap);
1106 if (ret)
1107 goto fail;
1108
1109 if (e->bitmap) {
1110 list_add_tail(&e->list, bitmap_list);
1111 *bitmaps += 1;
1112 }
1113 node = rb_next(node);
1114 if (!node && cluster) {
1115 node = rb_first(&cluster->root);
1116 cluster_locked = cluster;
1117 spin_lock(&cluster_locked->lock);
1118 cluster = NULL;
1119 }
1120 }
1121 if (cluster_locked) {
1122 spin_unlock(&cluster_locked->lock);
1123 cluster_locked = NULL;
1124 }
1125
1126 /*
1127 * Make sure we don't miss any range that was removed from our rbtree
1128 * because trimming is running. Otherwise after a umount+mount (or crash
1129 * after committing the transaction) we would leak free space and get
1130 * an inconsistent free space cache report from fsck.
1131 */
1132 list_for_each_entry(trim_entry, &ctl->trimming_ranges, list) {
1133 ret = io_ctl_add_entry(io_ctl, trim_entry->start,
1134 trim_entry->bytes, NULL);
1135 if (ret)
1136 goto fail;
1137 *entries += 1;
1138 }
1139
1140 return 0;
1141 fail:
1142 if (cluster_locked)
1143 spin_unlock(&cluster_locked->lock);
1144 return -ENOSPC;
1145 }
1146
1147 static noinline_for_stack int
update_cache_item(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct inode * inode,struct btrfs_path * path,u64 offset,int entries,int bitmaps)1148 update_cache_item(struct btrfs_trans_handle *trans,
1149 struct btrfs_root *root,
1150 struct inode *inode,
1151 struct btrfs_path *path, u64 offset,
1152 int entries, int bitmaps)
1153 {
1154 struct btrfs_key key;
1155 struct btrfs_free_space_header *header;
1156 struct extent_buffer *leaf;
1157 int ret;
1158
1159 key.objectid = BTRFS_FREE_SPACE_OBJECTID;
1160 key.offset = offset;
1161 key.type = 0;
1162
1163 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1164 if (ret < 0) {
1165 clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1,
1166 EXTENT_DELALLOC, NULL);
1167 goto fail;
1168 }
1169 leaf = path->nodes[0];
1170 if (ret > 0) {
1171 struct btrfs_key found_key;
1172 ASSERT(path->slots[0]);
1173 path->slots[0]--;
1174 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1175 if (found_key.objectid != BTRFS_FREE_SPACE_OBJECTID ||
1176 found_key.offset != offset) {
1177 clear_extent_bit(&BTRFS_I(inode)->io_tree, 0,
1178 inode->i_size - 1, EXTENT_DELALLOC,
1179 NULL);
1180 btrfs_release_path(path);
1181 goto fail;
1182 }
1183 }
1184
1185 BTRFS_I(inode)->generation = trans->transid;
1186 header = btrfs_item_ptr(leaf, path->slots[0],
1187 struct btrfs_free_space_header);
1188 btrfs_set_free_space_entries(leaf, header, entries);
1189 btrfs_set_free_space_bitmaps(leaf, header, bitmaps);
1190 btrfs_set_free_space_generation(leaf, header, trans->transid);
1191 btrfs_mark_buffer_dirty(leaf);
1192 btrfs_release_path(path);
1193
1194 return 0;
1195
1196 fail:
1197 return -1;
1198 }
1199
write_pinned_extent_entries(struct btrfs_trans_handle * trans,struct btrfs_block_group * block_group,struct btrfs_io_ctl * io_ctl,int * entries)1200 static noinline_for_stack int write_pinned_extent_entries(
1201 struct btrfs_trans_handle *trans,
1202 struct btrfs_block_group *block_group,
1203 struct btrfs_io_ctl *io_ctl,
1204 int *entries)
1205 {
1206 u64 start, extent_start, extent_end, len;
1207 struct extent_io_tree *unpin = NULL;
1208 int ret;
1209
1210 if (!block_group)
1211 return 0;
1212
1213 /*
1214 * We want to add any pinned extents to our free space cache
1215 * so we don't leak the space
1216 *
1217 * We shouldn't have switched the pinned extents yet so this is the
1218 * right one
1219 */
1220 unpin = &trans->transaction->pinned_extents;
1221
1222 start = block_group->start;
1223
1224 while (start < block_group->start + block_group->length) {
1225 ret = find_first_extent_bit(unpin, start,
1226 &extent_start, &extent_end,
1227 EXTENT_DIRTY, NULL);
1228 if (ret)
1229 return 0;
1230
1231 /* This pinned extent is out of our range */
1232 if (extent_start >= block_group->start + block_group->length)
1233 return 0;
1234
1235 extent_start = max(extent_start, start);
1236 extent_end = min(block_group->start + block_group->length,
1237 extent_end + 1);
1238 len = extent_end - extent_start;
1239
1240 *entries += 1;
1241 ret = io_ctl_add_entry(io_ctl, extent_start, len, NULL);
1242 if (ret)
1243 return -ENOSPC;
1244
1245 start = extent_end;
1246 }
1247
1248 return 0;
1249 }
1250
1251 static noinline_for_stack int
write_bitmap_entries(struct btrfs_io_ctl * io_ctl,struct list_head * bitmap_list)1252 write_bitmap_entries(struct btrfs_io_ctl *io_ctl, struct list_head *bitmap_list)
1253 {
1254 struct btrfs_free_space *entry, *next;
1255 int ret;
1256
1257 /* Write out the bitmaps */
1258 list_for_each_entry_safe(entry, next, bitmap_list, list) {
1259 ret = io_ctl_add_bitmap(io_ctl, entry->bitmap);
1260 if (ret)
1261 return -ENOSPC;
1262 list_del_init(&entry->list);
1263 }
1264
1265 return 0;
1266 }
1267
flush_dirty_cache(struct inode * inode)1268 static int flush_dirty_cache(struct inode *inode)
1269 {
1270 int ret;
1271
1272 ret = btrfs_wait_ordered_range(inode, 0, (u64)-1);
1273 if (ret)
1274 clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1,
1275 EXTENT_DELALLOC, NULL);
1276
1277 return ret;
1278 }
1279
1280 static void noinline_for_stack
cleanup_bitmap_list(struct list_head * bitmap_list)1281 cleanup_bitmap_list(struct list_head *bitmap_list)
1282 {
1283 struct btrfs_free_space *entry, *next;
1284
1285 list_for_each_entry_safe(entry, next, bitmap_list, list)
1286 list_del_init(&entry->list);
1287 }
1288
1289 static void noinline_for_stack
cleanup_write_cache_enospc(struct inode * inode,struct btrfs_io_ctl * io_ctl,struct extent_state ** cached_state)1290 cleanup_write_cache_enospc(struct inode *inode,
1291 struct btrfs_io_ctl *io_ctl,
1292 struct extent_state **cached_state)
1293 {
1294 io_ctl_drop_pages(io_ctl);
1295 unlock_extent(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
1296 cached_state);
1297 }
1298
__btrfs_wait_cache_io(struct btrfs_root * root,struct btrfs_trans_handle * trans,struct btrfs_block_group * block_group,struct btrfs_io_ctl * io_ctl,struct btrfs_path * path,u64 offset)1299 static int __btrfs_wait_cache_io(struct btrfs_root *root,
1300 struct btrfs_trans_handle *trans,
1301 struct btrfs_block_group *block_group,
1302 struct btrfs_io_ctl *io_ctl,
1303 struct btrfs_path *path, u64 offset)
1304 {
1305 int ret;
1306 struct inode *inode = io_ctl->inode;
1307
1308 if (!inode)
1309 return 0;
1310
1311 /* Flush the dirty pages in the cache file. */
1312 ret = flush_dirty_cache(inode);
1313 if (ret)
1314 goto out;
1315
1316 /* Update the cache item to tell everyone this cache file is valid. */
1317 ret = update_cache_item(trans, root, inode, path, offset,
1318 io_ctl->entries, io_ctl->bitmaps);
1319 out:
1320 if (ret) {
1321 invalidate_inode_pages2(inode->i_mapping);
1322 BTRFS_I(inode)->generation = 0;
1323 if (block_group)
1324 btrfs_debug(root->fs_info,
1325 "failed to write free space cache for block group %llu error %d",
1326 block_group->start, ret);
1327 }
1328 btrfs_update_inode(trans, root, BTRFS_I(inode));
1329
1330 if (block_group) {
1331 /* the dirty list is protected by the dirty_bgs_lock */
1332 spin_lock(&trans->transaction->dirty_bgs_lock);
1333
1334 /* the disk_cache_state is protected by the block group lock */
1335 spin_lock(&block_group->lock);
1336
1337 /*
1338 * only mark this as written if we didn't get put back on
1339 * the dirty list while waiting for IO. Otherwise our
1340 * cache state won't be right, and we won't get written again
1341 */
1342 if (!ret && list_empty(&block_group->dirty_list))
1343 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
1344 else if (ret)
1345 block_group->disk_cache_state = BTRFS_DC_ERROR;
1346
1347 spin_unlock(&block_group->lock);
1348 spin_unlock(&trans->transaction->dirty_bgs_lock);
1349 io_ctl->inode = NULL;
1350 iput(inode);
1351 }
1352
1353 return ret;
1354
1355 }
1356
btrfs_wait_cache_io(struct btrfs_trans_handle * trans,struct btrfs_block_group * block_group,struct btrfs_path * path)1357 int btrfs_wait_cache_io(struct btrfs_trans_handle *trans,
1358 struct btrfs_block_group *block_group,
1359 struct btrfs_path *path)
1360 {
1361 return __btrfs_wait_cache_io(block_group->fs_info->tree_root, trans,
1362 block_group, &block_group->io_ctl,
1363 path, block_group->start);
1364 }
1365
1366 /**
1367 * Write out cached info to an inode
1368 *
1369 * @root: root the inode belongs to
1370 * @inode: freespace inode we are writing out
1371 * @ctl: free space cache we are going to write out
1372 * @block_group: block_group for this cache if it belongs to a block_group
1373 * @io_ctl: holds context for the io
1374 * @trans: the trans handle
1375 *
1376 * This function writes out a free space cache struct to disk for quick recovery
1377 * on mount. This will return 0 if it was successful in writing the cache out,
1378 * or an errno if it was not.
1379 */
__btrfs_write_out_cache(struct btrfs_root * root,struct inode * inode,struct btrfs_free_space_ctl * ctl,struct btrfs_block_group * block_group,struct btrfs_io_ctl * io_ctl,struct btrfs_trans_handle * trans)1380 static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
1381 struct btrfs_free_space_ctl *ctl,
1382 struct btrfs_block_group *block_group,
1383 struct btrfs_io_ctl *io_ctl,
1384 struct btrfs_trans_handle *trans)
1385 {
1386 struct extent_state *cached_state = NULL;
1387 LIST_HEAD(bitmap_list);
1388 int entries = 0;
1389 int bitmaps = 0;
1390 int ret;
1391 int must_iput = 0;
1392
1393 if (!i_size_read(inode))
1394 return -EIO;
1395
1396 WARN_ON(io_ctl->pages);
1397 ret = io_ctl_init(io_ctl, inode, 1);
1398 if (ret)
1399 return ret;
1400
1401 if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA)) {
1402 down_write(&block_group->data_rwsem);
1403 spin_lock(&block_group->lock);
1404 if (block_group->delalloc_bytes) {
1405 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
1406 spin_unlock(&block_group->lock);
1407 up_write(&block_group->data_rwsem);
1408 BTRFS_I(inode)->generation = 0;
1409 ret = 0;
1410 must_iput = 1;
1411 goto out;
1412 }
1413 spin_unlock(&block_group->lock);
1414 }
1415
1416 /* Lock all pages first so we can lock the extent safely. */
1417 ret = io_ctl_prepare_pages(io_ctl, false);
1418 if (ret)
1419 goto out_unlock;
1420
1421 lock_extent(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
1422 &cached_state);
1423
1424 io_ctl_set_generation(io_ctl, trans->transid);
1425
1426 mutex_lock(&ctl->cache_writeout_mutex);
1427 /* Write out the extent entries in the free space cache */
1428 spin_lock(&ctl->tree_lock);
1429 ret = write_cache_extent_entries(io_ctl, ctl,
1430 block_group, &entries, &bitmaps,
1431 &bitmap_list);
1432 if (ret)
1433 goto out_nospc_locked;
1434
1435 /*
1436 * Some spaces that are freed in the current transaction are pinned,
1437 * they will be added into free space cache after the transaction is
1438 * committed, we shouldn't lose them.
1439 *
1440 * If this changes while we are working we'll get added back to
1441 * the dirty list and redo it. No locking needed
1442 */
1443 ret = write_pinned_extent_entries(trans, block_group, io_ctl, &entries);
1444 if (ret)
1445 goto out_nospc_locked;
1446
1447 /*
1448 * At last, we write out all the bitmaps and keep cache_writeout_mutex
1449 * locked while doing it because a concurrent trim can be manipulating
1450 * or freeing the bitmap.
1451 */
1452 ret = write_bitmap_entries(io_ctl, &bitmap_list);
1453 spin_unlock(&ctl->tree_lock);
1454 mutex_unlock(&ctl->cache_writeout_mutex);
1455 if (ret)
1456 goto out_nospc;
1457
1458 /* Zero out the rest of the pages just to make sure */
1459 io_ctl_zero_remaining_pages(io_ctl);
1460
1461 /* Everything is written out, now we dirty the pages in the file. */
1462 ret = btrfs_dirty_pages(BTRFS_I(inode), io_ctl->pages,
1463 io_ctl->num_pages, 0, i_size_read(inode),
1464 &cached_state, false);
1465 if (ret)
1466 goto out_nospc;
1467
1468 if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA))
1469 up_write(&block_group->data_rwsem);
1470 /*
1471 * Release the pages and unlock the extent, we will flush
1472 * them out later
1473 */
1474 io_ctl_drop_pages(io_ctl);
1475 io_ctl_free(io_ctl);
1476
1477 unlock_extent(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
1478 &cached_state);
1479
1480 /*
1481 * at this point the pages are under IO and we're happy,
1482 * The caller is responsible for waiting on them and updating
1483 * the cache and the inode
1484 */
1485 io_ctl->entries = entries;
1486 io_ctl->bitmaps = bitmaps;
1487
1488 ret = btrfs_fdatawrite_range(inode, 0, (u64)-1);
1489 if (ret)
1490 goto out;
1491
1492 return 0;
1493
1494 out_nospc_locked:
1495 cleanup_bitmap_list(&bitmap_list);
1496 spin_unlock(&ctl->tree_lock);
1497 mutex_unlock(&ctl->cache_writeout_mutex);
1498
1499 out_nospc:
1500 cleanup_write_cache_enospc(inode, io_ctl, &cached_state);
1501
1502 out_unlock:
1503 if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA))
1504 up_write(&block_group->data_rwsem);
1505
1506 out:
1507 io_ctl->inode = NULL;
1508 io_ctl_free(io_ctl);
1509 if (ret) {
1510 invalidate_inode_pages2(inode->i_mapping);
1511 BTRFS_I(inode)->generation = 0;
1512 }
1513 btrfs_update_inode(trans, root, BTRFS_I(inode));
1514 if (must_iput)
1515 iput(inode);
1516 return ret;
1517 }
1518
btrfs_write_out_cache(struct btrfs_trans_handle * trans,struct btrfs_block_group * block_group,struct btrfs_path * path)1519 int btrfs_write_out_cache(struct btrfs_trans_handle *trans,
1520 struct btrfs_block_group *block_group,
1521 struct btrfs_path *path)
1522 {
1523 struct btrfs_fs_info *fs_info = trans->fs_info;
1524 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
1525 struct inode *inode;
1526 int ret = 0;
1527
1528 spin_lock(&block_group->lock);
1529 if (block_group->disk_cache_state < BTRFS_DC_SETUP) {
1530 spin_unlock(&block_group->lock);
1531 return 0;
1532 }
1533 spin_unlock(&block_group->lock);
1534
1535 inode = lookup_free_space_inode(block_group, path);
1536 if (IS_ERR(inode))
1537 return 0;
1538
1539 ret = __btrfs_write_out_cache(fs_info->tree_root, inode, ctl,
1540 block_group, &block_group->io_ctl, trans);
1541 if (ret) {
1542 btrfs_debug(fs_info,
1543 "failed to write free space cache for block group %llu error %d",
1544 block_group->start, ret);
1545 spin_lock(&block_group->lock);
1546 block_group->disk_cache_state = BTRFS_DC_ERROR;
1547 spin_unlock(&block_group->lock);
1548
1549 block_group->io_ctl.inode = NULL;
1550 iput(inode);
1551 }
1552
1553 /*
1554 * if ret == 0 the caller is expected to call btrfs_wait_cache_io
1555 * to wait for IO and put the inode
1556 */
1557
1558 return ret;
1559 }
1560
offset_to_bit(u64 bitmap_start,u32 unit,u64 offset)1561 static inline unsigned long offset_to_bit(u64 bitmap_start, u32 unit,
1562 u64 offset)
1563 {
1564 ASSERT(offset >= bitmap_start);
1565 offset -= bitmap_start;
1566 return (unsigned long)(div_u64(offset, unit));
1567 }
1568
bytes_to_bits(u64 bytes,u32 unit)1569 static inline unsigned long bytes_to_bits(u64 bytes, u32 unit)
1570 {
1571 return (unsigned long)(div_u64(bytes, unit));
1572 }
1573
offset_to_bitmap(struct btrfs_free_space_ctl * ctl,u64 offset)1574 static inline u64 offset_to_bitmap(struct btrfs_free_space_ctl *ctl,
1575 u64 offset)
1576 {
1577 u64 bitmap_start;
1578 u64 bytes_per_bitmap;
1579
1580 bytes_per_bitmap = BITS_PER_BITMAP * ctl->unit;
1581 bitmap_start = offset - ctl->start;
1582 bitmap_start = div64_u64(bitmap_start, bytes_per_bitmap);
1583 bitmap_start *= bytes_per_bitmap;
1584 bitmap_start += ctl->start;
1585
1586 return bitmap_start;
1587 }
1588
tree_insert_offset(struct rb_root * root,u64 offset,struct rb_node * node,int bitmap)1589 static int tree_insert_offset(struct rb_root *root, u64 offset,
1590 struct rb_node *node, int bitmap)
1591 {
1592 struct rb_node **p = &root->rb_node;
1593 struct rb_node *parent = NULL;
1594 struct btrfs_free_space *info;
1595
1596 while (*p) {
1597 parent = *p;
1598 info = rb_entry(parent, struct btrfs_free_space, offset_index);
1599
1600 if (offset < info->offset) {
1601 p = &(*p)->rb_left;
1602 } else if (offset > info->offset) {
1603 p = &(*p)->rb_right;
1604 } else {
1605 /*
1606 * we could have a bitmap entry and an extent entry
1607 * share the same offset. If this is the case, we want
1608 * the extent entry to always be found first if we do a
1609 * linear search through the tree, since we want to have
1610 * the quickest allocation time, and allocating from an
1611 * extent is faster than allocating from a bitmap. So
1612 * if we're inserting a bitmap and we find an entry at
1613 * this offset, we want to go right, or after this entry
1614 * logically. If we are inserting an extent and we've
1615 * found a bitmap, we want to go left, or before
1616 * logically.
1617 */
1618 if (bitmap) {
1619 if (info->bitmap) {
1620 WARN_ON_ONCE(1);
1621 return -EEXIST;
1622 }
1623 p = &(*p)->rb_right;
1624 } else {
1625 if (!info->bitmap) {
1626 WARN_ON_ONCE(1);
1627 return -EEXIST;
1628 }
1629 p = &(*p)->rb_left;
1630 }
1631 }
1632 }
1633
1634 rb_link_node(node, parent, p);
1635 rb_insert_color(node, root);
1636
1637 return 0;
1638 }
1639
1640 /*
1641 * This is a little subtle. We *only* have ->max_extent_size set if we actually
1642 * searched through the bitmap and figured out the largest ->max_extent_size,
1643 * otherwise it's 0. In the case that it's 0 we don't want to tell the
1644 * allocator the wrong thing, we want to use the actual real max_extent_size
1645 * we've found already if it's larger, or we want to use ->bytes.
1646 *
1647 * This matters because find_free_space() will skip entries who's ->bytes is
1648 * less than the required bytes. So if we didn't search down this bitmap, we
1649 * may pick some previous entry that has a smaller ->max_extent_size than we
1650 * have. For example, assume we have two entries, one that has
1651 * ->max_extent_size set to 4K and ->bytes set to 1M. A second entry hasn't set
1652 * ->max_extent_size yet, has ->bytes set to 8K and it's contiguous. We will
1653 * call into find_free_space(), and return with max_extent_size == 4K, because
1654 * that first bitmap entry had ->max_extent_size set, but the second one did
1655 * not. If instead we returned 8K we'd come in searching for 8K, and find the
1656 * 8K contiguous range.
1657 *
1658 * Consider the other case, we have 2 8K chunks in that second entry and still
1659 * don't have ->max_extent_size set. We'll return 16K, and the next time the
1660 * allocator comes in it'll fully search our second bitmap, and this time it'll
1661 * get an uptodate value of 8K as the maximum chunk size. Then we'll get the
1662 * right allocation the next loop through.
1663 */
get_max_extent_size(const struct btrfs_free_space * entry)1664 static inline u64 get_max_extent_size(const struct btrfs_free_space *entry)
1665 {
1666 if (entry->bitmap && entry->max_extent_size)
1667 return entry->max_extent_size;
1668 return entry->bytes;
1669 }
1670
1671 /*
1672 * We want the largest entry to be leftmost, so this is inverted from what you'd
1673 * normally expect.
1674 */
entry_less(struct rb_node * node,const struct rb_node * parent)1675 static bool entry_less(struct rb_node *node, const struct rb_node *parent)
1676 {
1677 const struct btrfs_free_space *entry, *exist;
1678
1679 entry = rb_entry(node, struct btrfs_free_space, bytes_index);
1680 exist = rb_entry(parent, struct btrfs_free_space, bytes_index);
1681 return get_max_extent_size(exist) < get_max_extent_size(entry);
1682 }
1683
1684 /*
1685 * searches the tree for the given offset.
1686 *
1687 * fuzzy - If this is set, then we are trying to make an allocation, and we just
1688 * want a section that has at least bytes size and comes at or after the given
1689 * offset.
1690 */
1691 static struct btrfs_free_space *
tree_search_offset(struct btrfs_free_space_ctl * ctl,u64 offset,int bitmap_only,int fuzzy)1692 tree_search_offset(struct btrfs_free_space_ctl *ctl,
1693 u64 offset, int bitmap_only, int fuzzy)
1694 {
1695 struct rb_node *n = ctl->free_space_offset.rb_node;
1696 struct btrfs_free_space *entry = NULL, *prev = NULL;
1697
1698 /* find entry that is closest to the 'offset' */
1699 while (n) {
1700 entry = rb_entry(n, struct btrfs_free_space, offset_index);
1701 prev = entry;
1702
1703 if (offset < entry->offset)
1704 n = n->rb_left;
1705 else if (offset > entry->offset)
1706 n = n->rb_right;
1707 else
1708 break;
1709
1710 entry = NULL;
1711 }
1712
1713 if (bitmap_only) {
1714 if (!entry)
1715 return NULL;
1716 if (entry->bitmap)
1717 return entry;
1718
1719 /*
1720 * bitmap entry and extent entry may share same offset,
1721 * in that case, bitmap entry comes after extent entry.
1722 */
1723 n = rb_next(n);
1724 if (!n)
1725 return NULL;
1726 entry = rb_entry(n, struct btrfs_free_space, offset_index);
1727 if (entry->offset != offset)
1728 return NULL;
1729
1730 WARN_ON(!entry->bitmap);
1731 return entry;
1732 } else if (entry) {
1733 if (entry->bitmap) {
1734 /*
1735 * if previous extent entry covers the offset,
1736 * we should return it instead of the bitmap entry
1737 */
1738 n = rb_prev(&entry->offset_index);
1739 if (n) {
1740 prev = rb_entry(n, struct btrfs_free_space,
1741 offset_index);
1742 if (!prev->bitmap &&
1743 prev->offset + prev->bytes > offset)
1744 entry = prev;
1745 }
1746 }
1747 return entry;
1748 }
1749
1750 if (!prev)
1751 return NULL;
1752
1753 /* find last entry before the 'offset' */
1754 entry = prev;
1755 if (entry->offset > offset) {
1756 n = rb_prev(&entry->offset_index);
1757 if (n) {
1758 entry = rb_entry(n, struct btrfs_free_space,
1759 offset_index);
1760 ASSERT(entry->offset <= offset);
1761 } else {
1762 if (fuzzy)
1763 return entry;
1764 else
1765 return NULL;
1766 }
1767 }
1768
1769 if (entry->bitmap) {
1770 n = rb_prev(&entry->offset_index);
1771 if (n) {
1772 prev = rb_entry(n, struct btrfs_free_space,
1773 offset_index);
1774 if (!prev->bitmap &&
1775 prev->offset + prev->bytes > offset)
1776 return prev;
1777 }
1778 if (entry->offset + BITS_PER_BITMAP * ctl->unit > offset)
1779 return entry;
1780 } else if (entry->offset + entry->bytes > offset)
1781 return entry;
1782
1783 if (!fuzzy)
1784 return NULL;
1785
1786 while (1) {
1787 n = rb_next(&entry->offset_index);
1788 if (!n)
1789 return NULL;
1790 entry = rb_entry(n, struct btrfs_free_space, offset_index);
1791 if (entry->bitmap) {
1792 if (entry->offset + BITS_PER_BITMAP *
1793 ctl->unit > offset)
1794 break;
1795 } else {
1796 if (entry->offset + entry->bytes > offset)
1797 break;
1798 }
1799 }
1800 return entry;
1801 }
1802
unlink_free_space(struct btrfs_free_space_ctl * ctl,struct btrfs_free_space * info,bool update_stat)1803 static inline void unlink_free_space(struct btrfs_free_space_ctl *ctl,
1804 struct btrfs_free_space *info,
1805 bool update_stat)
1806 {
1807 rb_erase(&info->offset_index, &ctl->free_space_offset);
1808 rb_erase_cached(&info->bytes_index, &ctl->free_space_bytes);
1809 ctl->free_extents--;
1810
1811 if (!info->bitmap && !btrfs_free_space_trimmed(info)) {
1812 ctl->discardable_extents[BTRFS_STAT_CURR]--;
1813 ctl->discardable_bytes[BTRFS_STAT_CURR] -= info->bytes;
1814 }
1815
1816 if (update_stat)
1817 ctl->free_space -= info->bytes;
1818 }
1819
link_free_space(struct btrfs_free_space_ctl * ctl,struct btrfs_free_space * info)1820 static int link_free_space(struct btrfs_free_space_ctl *ctl,
1821 struct btrfs_free_space *info)
1822 {
1823 int ret = 0;
1824
1825 ASSERT(info->bytes || info->bitmap);
1826 ret = tree_insert_offset(&ctl->free_space_offset, info->offset,
1827 &info->offset_index, (info->bitmap != NULL));
1828 if (ret)
1829 return ret;
1830
1831 rb_add_cached(&info->bytes_index, &ctl->free_space_bytes, entry_less);
1832
1833 if (!info->bitmap && !btrfs_free_space_trimmed(info)) {
1834 ctl->discardable_extents[BTRFS_STAT_CURR]++;
1835 ctl->discardable_bytes[BTRFS_STAT_CURR] += info->bytes;
1836 }
1837
1838 ctl->free_space += info->bytes;
1839 ctl->free_extents++;
1840 return ret;
1841 }
1842
relink_bitmap_entry(struct btrfs_free_space_ctl * ctl,struct btrfs_free_space * info)1843 static void relink_bitmap_entry(struct btrfs_free_space_ctl *ctl,
1844 struct btrfs_free_space *info)
1845 {
1846 ASSERT(info->bitmap);
1847
1848 /*
1849 * If our entry is empty it's because we're on a cluster and we don't
1850 * want to re-link it into our ctl bytes index.
1851 */
1852 if (RB_EMPTY_NODE(&info->bytes_index))
1853 return;
1854
1855 rb_erase_cached(&info->bytes_index, &ctl->free_space_bytes);
1856 rb_add_cached(&info->bytes_index, &ctl->free_space_bytes, entry_less);
1857 }
1858
bitmap_clear_bits(struct btrfs_free_space_ctl * ctl,struct btrfs_free_space * info,u64 offset,u64 bytes,bool update_stat)1859 static inline void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
1860 struct btrfs_free_space *info,
1861 u64 offset, u64 bytes, bool update_stat)
1862 {
1863 unsigned long start, count, end;
1864 int extent_delta = -1;
1865
1866 start = offset_to_bit(info->offset, ctl->unit, offset);
1867 count = bytes_to_bits(bytes, ctl->unit);
1868 end = start + count;
1869 ASSERT(end <= BITS_PER_BITMAP);
1870
1871 bitmap_clear(info->bitmap, start, count);
1872
1873 info->bytes -= bytes;
1874 if (info->max_extent_size > ctl->unit)
1875 info->max_extent_size = 0;
1876
1877 relink_bitmap_entry(ctl, info);
1878
1879 if (start && test_bit(start - 1, info->bitmap))
1880 extent_delta++;
1881
1882 if (end < BITS_PER_BITMAP && test_bit(end, info->bitmap))
1883 extent_delta++;
1884
1885 info->bitmap_extents += extent_delta;
1886 if (!btrfs_free_space_trimmed(info)) {
1887 ctl->discardable_extents[BTRFS_STAT_CURR] += extent_delta;
1888 ctl->discardable_bytes[BTRFS_STAT_CURR] -= bytes;
1889 }
1890
1891 if (update_stat)
1892 ctl->free_space -= bytes;
1893 }
1894
bitmap_set_bits(struct btrfs_free_space_ctl * ctl,struct btrfs_free_space * info,u64 offset,u64 bytes)1895 static void bitmap_set_bits(struct btrfs_free_space_ctl *ctl,
1896 struct btrfs_free_space *info, u64 offset,
1897 u64 bytes)
1898 {
1899 unsigned long start, count, end;
1900 int extent_delta = 1;
1901
1902 start = offset_to_bit(info->offset, ctl->unit, offset);
1903 count = bytes_to_bits(bytes, ctl->unit);
1904 end = start + count;
1905 ASSERT(end <= BITS_PER_BITMAP);
1906
1907 bitmap_set(info->bitmap, start, count);
1908
1909 /*
1910 * We set some bytes, we have no idea what the max extent size is
1911 * anymore.
1912 */
1913 info->max_extent_size = 0;
1914 info->bytes += bytes;
1915 ctl->free_space += bytes;
1916
1917 relink_bitmap_entry(ctl, info);
1918
1919 if (start && test_bit(start - 1, info->bitmap))
1920 extent_delta--;
1921
1922 if (end < BITS_PER_BITMAP && test_bit(end, info->bitmap))
1923 extent_delta--;
1924
1925 info->bitmap_extents += extent_delta;
1926 if (!btrfs_free_space_trimmed(info)) {
1927 ctl->discardable_extents[BTRFS_STAT_CURR] += extent_delta;
1928 ctl->discardable_bytes[BTRFS_STAT_CURR] += bytes;
1929 }
1930 }
1931
1932 /*
1933 * If we can not find suitable extent, we will use bytes to record
1934 * the size of the max extent.
1935 */
search_bitmap(struct btrfs_free_space_ctl * ctl,struct btrfs_free_space * bitmap_info,u64 * offset,u64 * bytes,bool for_alloc)1936 static int search_bitmap(struct btrfs_free_space_ctl *ctl,
1937 struct btrfs_free_space *bitmap_info, u64 *offset,
1938 u64 *bytes, bool for_alloc)
1939 {
1940 unsigned long found_bits = 0;
1941 unsigned long max_bits = 0;
1942 unsigned long bits, i;
1943 unsigned long next_zero;
1944 unsigned long extent_bits;
1945
1946 /*
1947 * Skip searching the bitmap if we don't have a contiguous section that
1948 * is large enough for this allocation.
1949 */
1950 if (for_alloc &&
1951 bitmap_info->max_extent_size &&
1952 bitmap_info->max_extent_size < *bytes) {
1953 *bytes = bitmap_info->max_extent_size;
1954 return -1;
1955 }
1956
1957 i = offset_to_bit(bitmap_info->offset, ctl->unit,
1958 max_t(u64, *offset, bitmap_info->offset));
1959 bits = bytes_to_bits(*bytes, ctl->unit);
1960
1961 for_each_set_bit_from(i, bitmap_info->bitmap, BITS_PER_BITMAP) {
1962 if (for_alloc && bits == 1) {
1963 found_bits = 1;
1964 break;
1965 }
1966 next_zero = find_next_zero_bit(bitmap_info->bitmap,
1967 BITS_PER_BITMAP, i);
1968 extent_bits = next_zero - i;
1969 if (extent_bits >= bits) {
1970 found_bits = extent_bits;
1971 break;
1972 } else if (extent_bits > max_bits) {
1973 max_bits = extent_bits;
1974 }
1975 i = next_zero;
1976 }
1977
1978 if (found_bits) {
1979 *offset = (u64)(i * ctl->unit) + bitmap_info->offset;
1980 *bytes = (u64)(found_bits) * ctl->unit;
1981 return 0;
1982 }
1983
1984 *bytes = (u64)(max_bits) * ctl->unit;
1985 bitmap_info->max_extent_size = *bytes;
1986 relink_bitmap_entry(ctl, bitmap_info);
1987 return -1;
1988 }
1989
1990 /* Cache the size of the max extent in bytes */
1991 static struct btrfs_free_space *
find_free_space(struct btrfs_free_space_ctl * ctl,u64 * offset,u64 * bytes,unsigned long align,u64 * max_extent_size,bool use_bytes_index)1992 find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes,
1993 unsigned long align, u64 *max_extent_size, bool use_bytes_index)
1994 {
1995 struct btrfs_free_space *entry;
1996 struct rb_node *node;
1997 u64 tmp;
1998 u64 align_off;
1999 int ret;
2000
2001 if (!ctl->free_space_offset.rb_node)
2002 goto out;
2003 again:
2004 if (use_bytes_index) {
2005 node = rb_first_cached(&ctl->free_space_bytes);
2006 } else {
2007 entry = tree_search_offset(ctl, offset_to_bitmap(ctl, *offset),
2008 0, 1);
2009 if (!entry)
2010 goto out;
2011 node = &entry->offset_index;
2012 }
2013
2014 for (; node; node = rb_next(node)) {
2015 if (use_bytes_index)
2016 entry = rb_entry(node, struct btrfs_free_space,
2017 bytes_index);
2018 else
2019 entry = rb_entry(node, struct btrfs_free_space,
2020 offset_index);
2021
2022 /*
2023 * If we are using the bytes index then all subsequent entries
2024 * in this tree are going to be < bytes, so simply set the max
2025 * extent size and exit the loop.
2026 *
2027 * If we're using the offset index then we need to keep going
2028 * through the rest of the tree.
2029 */
2030 if (entry->bytes < *bytes) {
2031 *max_extent_size = max(get_max_extent_size(entry),
2032 *max_extent_size);
2033 if (use_bytes_index)
2034 break;
2035 continue;
2036 }
2037
2038 /* make sure the space returned is big enough
2039 * to match our requested alignment
2040 */
2041 if (*bytes >= align) {
2042 tmp = entry->offset - ctl->start + align - 1;
2043 tmp = div64_u64(tmp, align);
2044 tmp = tmp * align + ctl->start;
2045 align_off = tmp - entry->offset;
2046 } else {
2047 align_off = 0;
2048 tmp = entry->offset;
2049 }
2050
2051 /*
2052 * We don't break here if we're using the bytes index because we
2053 * may have another entry that has the correct alignment that is
2054 * the right size, so we don't want to miss that possibility.
2055 * At worst this adds another loop through the logic, but if we
2056 * broke here we could prematurely ENOSPC.
2057 */
2058 if (entry->bytes < *bytes + align_off) {
2059 *max_extent_size = max(get_max_extent_size(entry),
2060 *max_extent_size);
2061 continue;
2062 }
2063
2064 if (entry->bitmap) {
2065 struct rb_node *old_next = rb_next(node);
2066 u64 size = *bytes;
2067
2068 ret = search_bitmap(ctl, entry, &tmp, &size, true);
2069 if (!ret) {
2070 *offset = tmp;
2071 *bytes = size;
2072 return entry;
2073 } else {
2074 *max_extent_size =
2075 max(get_max_extent_size(entry),
2076 *max_extent_size);
2077 }
2078
2079 /*
2080 * The bitmap may have gotten re-arranged in the space
2081 * index here because the max_extent_size may have been
2082 * updated. Start from the beginning again if this
2083 * happened.
2084 */
2085 if (use_bytes_index && old_next != rb_next(node))
2086 goto again;
2087 continue;
2088 }
2089
2090 *offset = tmp;
2091 *bytes = entry->bytes - align_off;
2092 return entry;
2093 }
2094 out:
2095 return NULL;
2096 }
2097
add_new_bitmap(struct btrfs_free_space_ctl * ctl,struct btrfs_free_space * info,u64 offset)2098 static void add_new_bitmap(struct btrfs_free_space_ctl *ctl,
2099 struct btrfs_free_space *info, u64 offset)
2100 {
2101 info->offset = offset_to_bitmap(ctl, offset);
2102 info->bytes = 0;
2103 info->bitmap_extents = 0;
2104 INIT_LIST_HEAD(&info->list);
2105 link_free_space(ctl, info);
2106 ctl->total_bitmaps++;
2107 recalculate_thresholds(ctl);
2108 }
2109
free_bitmap(struct btrfs_free_space_ctl * ctl,struct btrfs_free_space * bitmap_info)2110 static void free_bitmap(struct btrfs_free_space_ctl *ctl,
2111 struct btrfs_free_space *bitmap_info)
2112 {
2113 /*
2114 * Normally when this is called, the bitmap is completely empty. However,
2115 * if we are blowing up the free space cache for one reason or another
2116 * via __btrfs_remove_free_space_cache(), then it may not be freed and
2117 * we may leave stats on the table.
2118 */
2119 if (bitmap_info->bytes && !btrfs_free_space_trimmed(bitmap_info)) {
2120 ctl->discardable_extents[BTRFS_STAT_CURR] -=
2121 bitmap_info->bitmap_extents;
2122 ctl->discardable_bytes[BTRFS_STAT_CURR] -= bitmap_info->bytes;
2123
2124 }
2125 unlink_free_space(ctl, bitmap_info, true);
2126 kmem_cache_free(btrfs_free_space_bitmap_cachep, bitmap_info->bitmap);
2127 kmem_cache_free(btrfs_free_space_cachep, bitmap_info);
2128 ctl->total_bitmaps--;
2129 recalculate_thresholds(ctl);
2130 }
2131
remove_from_bitmap(struct btrfs_free_space_ctl * ctl,struct btrfs_free_space * bitmap_info,u64 * offset,u64 * bytes)2132 static noinline int remove_from_bitmap(struct btrfs_free_space_ctl *ctl,
2133 struct btrfs_free_space *bitmap_info,
2134 u64 *offset, u64 *bytes)
2135 {
2136 u64 end;
2137 u64 search_start, search_bytes;
2138 int ret;
2139
2140 again:
2141 end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit) - 1;
2142
2143 /*
2144 * We need to search for bits in this bitmap. We could only cover some
2145 * of the extent in this bitmap thanks to how we add space, so we need
2146 * to search for as much as it as we can and clear that amount, and then
2147 * go searching for the next bit.
2148 */
2149 search_start = *offset;
2150 search_bytes = ctl->unit;
2151 search_bytes = min(search_bytes, end - search_start + 1);
2152 ret = search_bitmap(ctl, bitmap_info, &search_start, &search_bytes,
2153 false);
2154 if (ret < 0 || search_start != *offset)
2155 return -EINVAL;
2156
2157 /* We may have found more bits than what we need */
2158 search_bytes = min(search_bytes, *bytes);
2159
2160 /* Cannot clear past the end of the bitmap */
2161 search_bytes = min(search_bytes, end - search_start + 1);
2162
2163 bitmap_clear_bits(ctl, bitmap_info, search_start, search_bytes, true);
2164 *offset += search_bytes;
2165 *bytes -= search_bytes;
2166
2167 if (*bytes) {
2168 struct rb_node *next = rb_next(&bitmap_info->offset_index);
2169 if (!bitmap_info->bytes)
2170 free_bitmap(ctl, bitmap_info);
2171
2172 /*
2173 * no entry after this bitmap, but we still have bytes to
2174 * remove, so something has gone wrong.
2175 */
2176 if (!next)
2177 return -EINVAL;
2178
2179 bitmap_info = rb_entry(next, struct btrfs_free_space,
2180 offset_index);
2181
2182 /*
2183 * if the next entry isn't a bitmap we need to return to let the
2184 * extent stuff do its work.
2185 */
2186 if (!bitmap_info->bitmap)
2187 return -EAGAIN;
2188
2189 /*
2190 * Ok the next item is a bitmap, but it may not actually hold
2191 * the information for the rest of this free space stuff, so
2192 * look for it, and if we don't find it return so we can try
2193 * everything over again.
2194 */
2195 search_start = *offset;
2196 search_bytes = ctl->unit;
2197 ret = search_bitmap(ctl, bitmap_info, &search_start,
2198 &search_bytes, false);
2199 if (ret < 0 || search_start != *offset)
2200 return -EAGAIN;
2201
2202 goto again;
2203 } else if (!bitmap_info->bytes)
2204 free_bitmap(ctl, bitmap_info);
2205
2206 return 0;
2207 }
2208
add_bytes_to_bitmap(struct btrfs_free_space_ctl * ctl,struct btrfs_free_space * info,u64 offset,u64 bytes,enum btrfs_trim_state trim_state)2209 static u64 add_bytes_to_bitmap(struct btrfs_free_space_ctl *ctl,
2210 struct btrfs_free_space *info, u64 offset,
2211 u64 bytes, enum btrfs_trim_state trim_state)
2212 {
2213 u64 bytes_to_set = 0;
2214 u64 end;
2215
2216 /*
2217 * This is a tradeoff to make bitmap trim state minimal. We mark the
2218 * whole bitmap untrimmed if at any point we add untrimmed regions.
2219 */
2220 if (trim_state == BTRFS_TRIM_STATE_UNTRIMMED) {
2221 if (btrfs_free_space_trimmed(info)) {
2222 ctl->discardable_extents[BTRFS_STAT_CURR] +=
2223 info->bitmap_extents;
2224 ctl->discardable_bytes[BTRFS_STAT_CURR] += info->bytes;
2225 }
2226 info->trim_state = BTRFS_TRIM_STATE_UNTRIMMED;
2227 }
2228
2229 end = info->offset + (u64)(BITS_PER_BITMAP * ctl->unit);
2230
2231 bytes_to_set = min(end - offset, bytes);
2232
2233 bitmap_set_bits(ctl, info, offset, bytes_to_set);
2234
2235 return bytes_to_set;
2236
2237 }
2238
use_bitmap(struct btrfs_free_space_ctl * ctl,struct btrfs_free_space * info)2239 static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
2240 struct btrfs_free_space *info)
2241 {
2242 struct btrfs_block_group *block_group = ctl->block_group;
2243 struct btrfs_fs_info *fs_info = block_group->fs_info;
2244 bool forced = false;
2245
2246 #ifdef CONFIG_BTRFS_DEBUG
2247 if (btrfs_should_fragment_free_space(block_group))
2248 forced = true;
2249 #endif
2250
2251 /* This is a way to reclaim large regions from the bitmaps. */
2252 if (!forced && info->bytes >= FORCE_EXTENT_THRESHOLD)
2253 return false;
2254
2255 /*
2256 * If we are below the extents threshold then we can add this as an
2257 * extent, and don't have to deal with the bitmap
2258 */
2259 if (!forced && ctl->free_extents < ctl->extents_thresh) {
2260 /*
2261 * If this block group has some small extents we don't want to
2262 * use up all of our free slots in the cache with them, we want
2263 * to reserve them to larger extents, however if we have plenty
2264 * of cache left then go ahead an dadd them, no sense in adding
2265 * the overhead of a bitmap if we don't have to.
2266 */
2267 if (info->bytes <= fs_info->sectorsize * 8) {
2268 if (ctl->free_extents * 3 <= ctl->extents_thresh)
2269 return false;
2270 } else {
2271 return false;
2272 }
2273 }
2274
2275 /*
2276 * The original block groups from mkfs can be really small, like 8
2277 * megabytes, so don't bother with a bitmap for those entries. However
2278 * some block groups can be smaller than what a bitmap would cover but
2279 * are still large enough that they could overflow the 32k memory limit,
2280 * so allow those block groups to still be allowed to have a bitmap
2281 * entry.
2282 */
2283 if (((BITS_PER_BITMAP * ctl->unit) >> 1) > block_group->length)
2284 return false;
2285
2286 return true;
2287 }
2288
2289 static const struct btrfs_free_space_op free_space_op = {
2290 .use_bitmap = use_bitmap,
2291 };
2292
insert_into_bitmap(struct btrfs_free_space_ctl * ctl,struct btrfs_free_space * info)2293 static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl,
2294 struct btrfs_free_space *info)
2295 {
2296 struct btrfs_free_space *bitmap_info;
2297 struct btrfs_block_group *block_group = NULL;
2298 int added = 0;
2299 u64 bytes, offset, bytes_added;
2300 enum btrfs_trim_state trim_state;
2301 int ret;
2302
2303 bytes = info->bytes;
2304 offset = info->offset;
2305 trim_state = info->trim_state;
2306
2307 if (!ctl->op->use_bitmap(ctl, info))
2308 return 0;
2309
2310 if (ctl->op == &free_space_op)
2311 block_group = ctl->block_group;
2312 again:
2313 /*
2314 * Since we link bitmaps right into the cluster we need to see if we
2315 * have a cluster here, and if so and it has our bitmap we need to add
2316 * the free space to that bitmap.
2317 */
2318 if (block_group && !list_empty(&block_group->cluster_list)) {
2319 struct btrfs_free_cluster *cluster;
2320 struct rb_node *node;
2321 struct btrfs_free_space *entry;
2322
2323 cluster = list_entry(block_group->cluster_list.next,
2324 struct btrfs_free_cluster,
2325 block_group_list);
2326 spin_lock(&cluster->lock);
2327 node = rb_first(&cluster->root);
2328 if (!node) {
2329 spin_unlock(&cluster->lock);
2330 goto no_cluster_bitmap;
2331 }
2332
2333 entry = rb_entry(node, struct btrfs_free_space, offset_index);
2334 if (!entry->bitmap) {
2335 spin_unlock(&cluster->lock);
2336 goto no_cluster_bitmap;
2337 }
2338
2339 if (entry->offset == offset_to_bitmap(ctl, offset)) {
2340 bytes_added = add_bytes_to_bitmap(ctl, entry, offset,
2341 bytes, trim_state);
2342 bytes -= bytes_added;
2343 offset += bytes_added;
2344 }
2345 spin_unlock(&cluster->lock);
2346 if (!bytes) {
2347 ret = 1;
2348 goto out;
2349 }
2350 }
2351
2352 no_cluster_bitmap:
2353 bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
2354 1, 0);
2355 if (!bitmap_info) {
2356 ASSERT(added == 0);
2357 goto new_bitmap;
2358 }
2359
2360 bytes_added = add_bytes_to_bitmap(ctl, bitmap_info, offset, bytes,
2361 trim_state);
2362 bytes -= bytes_added;
2363 offset += bytes_added;
2364 added = 0;
2365
2366 if (!bytes) {
2367 ret = 1;
2368 goto out;
2369 } else
2370 goto again;
2371
2372 new_bitmap:
2373 if (info && info->bitmap) {
2374 add_new_bitmap(ctl, info, offset);
2375 added = 1;
2376 info = NULL;
2377 goto again;
2378 } else {
2379 spin_unlock(&ctl->tree_lock);
2380
2381 /* no pre-allocated info, allocate a new one */
2382 if (!info) {
2383 info = kmem_cache_zalloc(btrfs_free_space_cachep,
2384 GFP_NOFS);
2385 if (!info) {
2386 spin_lock(&ctl->tree_lock);
2387 ret = -ENOMEM;
2388 goto out;
2389 }
2390 }
2391
2392 /* allocate the bitmap */
2393 info->bitmap = kmem_cache_zalloc(btrfs_free_space_bitmap_cachep,
2394 GFP_NOFS);
2395 info->trim_state = BTRFS_TRIM_STATE_TRIMMED;
2396 spin_lock(&ctl->tree_lock);
2397 if (!info->bitmap) {
2398 ret = -ENOMEM;
2399 goto out;
2400 }
2401 goto again;
2402 }
2403
2404 out:
2405 if (info) {
2406 if (info->bitmap)
2407 kmem_cache_free(btrfs_free_space_bitmap_cachep,
2408 info->bitmap);
2409 kmem_cache_free(btrfs_free_space_cachep, info);
2410 }
2411
2412 return ret;
2413 }
2414
2415 /*
2416 * Free space merging rules:
2417 * 1) Merge trimmed areas together
2418 * 2) Let untrimmed areas coalesce with trimmed areas
2419 * 3) Always pull neighboring regions from bitmaps
2420 *
2421 * The above rules are for when we merge free space based on btrfs_trim_state.
2422 * Rules 2 and 3 are subtle because they are suboptimal, but are done for the
2423 * same reason: to promote larger extent regions which makes life easier for
2424 * find_free_extent(). Rule 2 enables coalescing based on the common path
2425 * being returning free space from btrfs_finish_extent_commit(). So when free
2426 * space is trimmed, it will prevent aggregating trimmed new region and
2427 * untrimmed regions in the rb_tree. Rule 3 is purely to obtain larger extents
2428 * and provide find_free_extent() with the largest extents possible hoping for
2429 * the reuse path.
2430 */
try_merge_free_space(struct btrfs_free_space_ctl * ctl,struct btrfs_free_space * info,bool update_stat)2431 static bool try_merge_free_space(struct btrfs_free_space_ctl *ctl,
2432 struct btrfs_free_space *info, bool update_stat)
2433 {
2434 struct btrfs_free_space *left_info = NULL;
2435 struct btrfs_free_space *right_info;
2436 bool merged = false;
2437 u64 offset = info->offset;
2438 u64 bytes = info->bytes;
2439 const bool is_trimmed = btrfs_free_space_trimmed(info);
2440
2441 /*
2442 * first we want to see if there is free space adjacent to the range we
2443 * are adding, if there is remove that struct and add a new one to
2444 * cover the entire range
2445 */
2446 right_info = tree_search_offset(ctl, offset + bytes, 0, 0);
2447 if (right_info && rb_prev(&right_info->offset_index))
2448 left_info = rb_entry(rb_prev(&right_info->offset_index),
2449 struct btrfs_free_space, offset_index);
2450 else if (!right_info)
2451 left_info = tree_search_offset(ctl, offset - 1, 0, 0);
2452
2453 /* See try_merge_free_space() comment. */
2454 if (right_info && !right_info->bitmap &&
2455 (!is_trimmed || btrfs_free_space_trimmed(right_info))) {
2456 unlink_free_space(ctl, right_info, update_stat);
2457 info->bytes += right_info->bytes;
2458 kmem_cache_free(btrfs_free_space_cachep, right_info);
2459 merged = true;
2460 }
2461
2462 /* See try_merge_free_space() comment. */
2463 if (left_info && !left_info->bitmap &&
2464 left_info->offset + left_info->bytes == offset &&
2465 (!is_trimmed || btrfs_free_space_trimmed(left_info))) {
2466 unlink_free_space(ctl, left_info, update_stat);
2467 info->offset = left_info->offset;
2468 info->bytes += left_info->bytes;
2469 kmem_cache_free(btrfs_free_space_cachep, left_info);
2470 merged = true;
2471 }
2472
2473 return merged;
2474 }
2475
steal_from_bitmap_to_end(struct btrfs_free_space_ctl * ctl,struct btrfs_free_space * info,bool update_stat)2476 static bool steal_from_bitmap_to_end(struct btrfs_free_space_ctl *ctl,
2477 struct btrfs_free_space *info,
2478 bool update_stat)
2479 {
2480 struct btrfs_free_space *bitmap;
2481 unsigned long i;
2482 unsigned long j;
2483 const u64 end = info->offset + info->bytes;
2484 const u64 bitmap_offset = offset_to_bitmap(ctl, end);
2485 u64 bytes;
2486
2487 bitmap = tree_search_offset(ctl, bitmap_offset, 1, 0);
2488 if (!bitmap)
2489 return false;
2490
2491 i = offset_to_bit(bitmap->offset, ctl->unit, end);
2492 j = find_next_zero_bit(bitmap->bitmap, BITS_PER_BITMAP, i);
2493 if (j == i)
2494 return false;
2495 bytes = (j - i) * ctl->unit;
2496 info->bytes += bytes;
2497
2498 /* See try_merge_free_space() comment. */
2499 if (!btrfs_free_space_trimmed(bitmap))
2500 info->trim_state = BTRFS_TRIM_STATE_UNTRIMMED;
2501
2502 bitmap_clear_bits(ctl, bitmap, end, bytes, update_stat);
2503
2504 if (!bitmap->bytes)
2505 free_bitmap(ctl, bitmap);
2506
2507 return true;
2508 }
2509
steal_from_bitmap_to_front(struct btrfs_free_space_ctl * ctl,struct btrfs_free_space * info,bool update_stat)2510 static bool steal_from_bitmap_to_front(struct btrfs_free_space_ctl *ctl,
2511 struct btrfs_free_space *info,
2512 bool update_stat)
2513 {
2514 struct btrfs_free_space *bitmap;
2515 u64 bitmap_offset;
2516 unsigned long i;
2517 unsigned long j;
2518 unsigned long prev_j;
2519 u64 bytes;
2520
2521 bitmap_offset = offset_to_bitmap(ctl, info->offset);
2522 /* If we're on a boundary, try the previous logical bitmap. */
2523 if (bitmap_offset == info->offset) {
2524 if (info->offset == 0)
2525 return false;
2526 bitmap_offset = offset_to_bitmap(ctl, info->offset - 1);
2527 }
2528
2529 bitmap = tree_search_offset(ctl, bitmap_offset, 1, 0);
2530 if (!bitmap)
2531 return false;
2532
2533 i = offset_to_bit(bitmap->offset, ctl->unit, info->offset) - 1;
2534 j = 0;
2535 prev_j = (unsigned long)-1;
2536 for_each_clear_bit_from(j, bitmap->bitmap, BITS_PER_BITMAP) {
2537 if (j > i)
2538 break;
2539 prev_j = j;
2540 }
2541 if (prev_j == i)
2542 return false;
2543
2544 if (prev_j == (unsigned long)-1)
2545 bytes = (i + 1) * ctl->unit;
2546 else
2547 bytes = (i - prev_j) * ctl->unit;
2548
2549 info->offset -= bytes;
2550 info->bytes += bytes;
2551
2552 /* See try_merge_free_space() comment. */
2553 if (!btrfs_free_space_trimmed(bitmap))
2554 info->trim_state = BTRFS_TRIM_STATE_UNTRIMMED;
2555
2556 bitmap_clear_bits(ctl, bitmap, info->offset, bytes, update_stat);
2557
2558 if (!bitmap->bytes)
2559 free_bitmap(ctl, bitmap);
2560
2561 return true;
2562 }
2563
2564 /*
2565 * We prefer always to allocate from extent entries, both for clustered and
2566 * non-clustered allocation requests. So when attempting to add a new extent
2567 * entry, try to see if there's adjacent free space in bitmap entries, and if
2568 * there is, migrate that space from the bitmaps to the extent.
2569 * Like this we get better chances of satisfying space allocation requests
2570 * because we attempt to satisfy them based on a single cache entry, and never
2571 * on 2 or more entries - even if the entries represent a contiguous free space
2572 * region (e.g. 1 extent entry + 1 bitmap entry starting where the extent entry
2573 * ends).
2574 */
steal_from_bitmap(struct btrfs_free_space_ctl * ctl,struct btrfs_free_space * info,bool update_stat)2575 static void steal_from_bitmap(struct btrfs_free_space_ctl *ctl,
2576 struct btrfs_free_space *info,
2577 bool update_stat)
2578 {
2579 /*
2580 * Only work with disconnected entries, as we can change their offset,
2581 * and must be extent entries.
2582 */
2583 ASSERT(!info->bitmap);
2584 ASSERT(RB_EMPTY_NODE(&info->offset_index));
2585
2586 if (ctl->total_bitmaps > 0) {
2587 bool stole_end;
2588 bool stole_front = false;
2589
2590 stole_end = steal_from_bitmap_to_end(ctl, info, update_stat);
2591 if (ctl->total_bitmaps > 0)
2592 stole_front = steal_from_bitmap_to_front(ctl, info,
2593 update_stat);
2594
2595 if (stole_end || stole_front)
2596 try_merge_free_space(ctl, info, update_stat);
2597 }
2598 }
2599
__btrfs_add_free_space(struct btrfs_block_group * block_group,u64 offset,u64 bytes,enum btrfs_trim_state trim_state)2600 int __btrfs_add_free_space(struct btrfs_block_group *block_group,
2601 u64 offset, u64 bytes,
2602 enum btrfs_trim_state trim_state)
2603 {
2604 struct btrfs_fs_info *fs_info = block_group->fs_info;
2605 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2606 struct btrfs_free_space *info;
2607 int ret = 0;
2608 u64 filter_bytes = bytes;
2609
2610 ASSERT(!btrfs_is_zoned(fs_info));
2611
2612 info = kmem_cache_zalloc(btrfs_free_space_cachep, GFP_NOFS);
2613 if (!info)
2614 return -ENOMEM;
2615
2616 info->offset = offset;
2617 info->bytes = bytes;
2618 info->trim_state = trim_state;
2619 RB_CLEAR_NODE(&info->offset_index);
2620 RB_CLEAR_NODE(&info->bytes_index);
2621
2622 spin_lock(&ctl->tree_lock);
2623
2624 if (try_merge_free_space(ctl, info, true))
2625 goto link;
2626
2627 /*
2628 * There was no extent directly to the left or right of this new
2629 * extent then we know we're going to have to allocate a new extent, so
2630 * before we do that see if we need to drop this into a bitmap
2631 */
2632 ret = insert_into_bitmap(ctl, info);
2633 if (ret < 0) {
2634 goto out;
2635 } else if (ret) {
2636 ret = 0;
2637 goto out;
2638 }
2639 link:
2640 /*
2641 * Only steal free space from adjacent bitmaps if we're sure we're not
2642 * going to add the new free space to existing bitmap entries - because
2643 * that would mean unnecessary work that would be reverted. Therefore
2644 * attempt to steal space from bitmaps if we're adding an extent entry.
2645 */
2646 steal_from_bitmap(ctl, info, true);
2647
2648 filter_bytes = max(filter_bytes, info->bytes);
2649
2650 ret = link_free_space(ctl, info);
2651 if (ret)
2652 kmem_cache_free(btrfs_free_space_cachep, info);
2653 out:
2654 btrfs_discard_update_discardable(block_group);
2655 spin_unlock(&ctl->tree_lock);
2656
2657 if (ret) {
2658 btrfs_crit(fs_info, "unable to add free space :%d", ret);
2659 ASSERT(ret != -EEXIST);
2660 }
2661
2662 if (trim_state != BTRFS_TRIM_STATE_TRIMMED) {
2663 btrfs_discard_check_filter(block_group, filter_bytes);
2664 btrfs_discard_queue_work(&fs_info->discard_ctl, block_group);
2665 }
2666
2667 return ret;
2668 }
2669
__btrfs_add_free_space_zoned(struct btrfs_block_group * block_group,u64 bytenr,u64 size,bool used)2670 static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group,
2671 u64 bytenr, u64 size, bool used)
2672 {
2673 struct btrfs_space_info *sinfo = block_group->space_info;
2674 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2675 u64 offset = bytenr - block_group->start;
2676 u64 to_free, to_unusable;
2677 int bg_reclaim_threshold = 0;
2678 bool initial = (size == block_group->length);
2679 u64 reclaimable_unusable;
2680
2681 WARN_ON(!initial && offset + size > block_group->zone_capacity);
2682
2683 if (!initial)
2684 bg_reclaim_threshold = READ_ONCE(sinfo->bg_reclaim_threshold);
2685
2686 spin_lock(&ctl->tree_lock);
2687 if (!used)
2688 to_free = size;
2689 else if (initial)
2690 to_free = block_group->zone_capacity;
2691 else if (offset >= block_group->alloc_offset)
2692 to_free = size;
2693 else if (offset + size <= block_group->alloc_offset)
2694 to_free = 0;
2695 else
2696 to_free = offset + size - block_group->alloc_offset;
2697 to_unusable = size - to_free;
2698
2699 ctl->free_space += to_free;
2700 /*
2701 * If the block group is read-only, we should account freed space into
2702 * bytes_readonly.
2703 */
2704 if (!block_group->ro)
2705 block_group->zone_unusable += to_unusable;
2706 spin_unlock(&ctl->tree_lock);
2707 if (!used) {
2708 spin_lock(&block_group->lock);
2709 block_group->alloc_offset -= size;
2710 spin_unlock(&block_group->lock);
2711 }
2712
2713 reclaimable_unusable = block_group->zone_unusable -
2714 (block_group->length - block_group->zone_capacity);
2715 /* All the region is now unusable. Mark it as unused and reclaim */
2716 if (block_group->zone_unusable == block_group->length) {
2717 btrfs_mark_bg_unused(block_group);
2718 } else if (bg_reclaim_threshold &&
2719 reclaimable_unusable >=
2720 div_factor_fine(block_group->zone_capacity,
2721 bg_reclaim_threshold)) {
2722 btrfs_mark_bg_to_reclaim(block_group);
2723 }
2724
2725 return 0;
2726 }
2727
btrfs_add_free_space(struct btrfs_block_group * block_group,u64 bytenr,u64 size)2728 int btrfs_add_free_space(struct btrfs_block_group *block_group,
2729 u64 bytenr, u64 size)
2730 {
2731 enum btrfs_trim_state trim_state = BTRFS_TRIM_STATE_UNTRIMMED;
2732
2733 if (btrfs_is_zoned(block_group->fs_info))
2734 return __btrfs_add_free_space_zoned(block_group, bytenr, size,
2735 true);
2736
2737 if (btrfs_test_opt(block_group->fs_info, DISCARD_SYNC))
2738 trim_state = BTRFS_TRIM_STATE_TRIMMED;
2739
2740 return __btrfs_add_free_space(block_group, bytenr, size, trim_state);
2741 }
2742
btrfs_add_free_space_unused(struct btrfs_block_group * block_group,u64 bytenr,u64 size)2743 int btrfs_add_free_space_unused(struct btrfs_block_group *block_group,
2744 u64 bytenr, u64 size)
2745 {
2746 if (btrfs_is_zoned(block_group->fs_info))
2747 return __btrfs_add_free_space_zoned(block_group, bytenr, size,
2748 false);
2749
2750 return btrfs_add_free_space(block_group, bytenr, size);
2751 }
2752
2753 /*
2754 * This is a subtle distinction because when adding free space back in general,
2755 * we want it to be added as untrimmed for async. But in the case where we add
2756 * it on loading of a block group, we want to consider it trimmed.
2757 */
btrfs_add_free_space_async_trimmed(struct btrfs_block_group * block_group,u64 bytenr,u64 size)2758 int btrfs_add_free_space_async_trimmed(struct btrfs_block_group *block_group,
2759 u64 bytenr, u64 size)
2760 {
2761 enum btrfs_trim_state trim_state = BTRFS_TRIM_STATE_UNTRIMMED;
2762
2763 if (btrfs_is_zoned(block_group->fs_info))
2764 return __btrfs_add_free_space_zoned(block_group, bytenr, size,
2765 true);
2766
2767 if (btrfs_test_opt(block_group->fs_info, DISCARD_SYNC) ||
2768 btrfs_test_opt(block_group->fs_info, DISCARD_ASYNC))
2769 trim_state = BTRFS_TRIM_STATE_TRIMMED;
2770
2771 return __btrfs_add_free_space(block_group, bytenr, size, trim_state);
2772 }
2773
btrfs_remove_free_space(struct btrfs_block_group * block_group,u64 offset,u64 bytes)2774 int btrfs_remove_free_space(struct btrfs_block_group *block_group,
2775 u64 offset, u64 bytes)
2776 {
2777 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2778 struct btrfs_free_space *info;
2779 int ret;
2780 bool re_search = false;
2781
2782 if (btrfs_is_zoned(block_group->fs_info)) {
2783 /*
2784 * This can happen with conventional zones when replaying log.
2785 * Since the allocation info of tree-log nodes are not recorded
2786 * to the extent-tree, calculate_alloc_pointer() failed to
2787 * advance the allocation pointer after last allocated tree log
2788 * node blocks.
2789 *
2790 * This function is called from
2791 * btrfs_pin_extent_for_log_replay() when replaying the log.
2792 * Advance the pointer not to overwrite the tree-log nodes.
2793 */
2794 if (block_group->start + block_group->alloc_offset <
2795 offset + bytes) {
2796 block_group->alloc_offset =
2797 offset + bytes - block_group->start;
2798 }
2799 return 0;
2800 }
2801
2802 spin_lock(&ctl->tree_lock);
2803
2804 again:
2805 ret = 0;
2806 if (!bytes)
2807 goto out_lock;
2808
2809 info = tree_search_offset(ctl, offset, 0, 0);
2810 if (!info) {
2811 /*
2812 * oops didn't find an extent that matched the space we wanted
2813 * to remove, look for a bitmap instead
2814 */
2815 info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
2816 1, 0);
2817 if (!info) {
2818 /*
2819 * If we found a partial bit of our free space in a
2820 * bitmap but then couldn't find the other part this may
2821 * be a problem, so WARN about it.
2822 */
2823 WARN_ON(re_search);
2824 goto out_lock;
2825 }
2826 }
2827
2828 re_search = false;
2829 if (!info->bitmap) {
2830 unlink_free_space(ctl, info, true);
2831 if (offset == info->offset) {
2832 u64 to_free = min(bytes, info->bytes);
2833
2834 info->bytes -= to_free;
2835 info->offset += to_free;
2836 if (info->bytes) {
2837 ret = link_free_space(ctl, info);
2838 WARN_ON(ret);
2839 } else {
2840 kmem_cache_free(btrfs_free_space_cachep, info);
2841 }
2842
2843 offset += to_free;
2844 bytes -= to_free;
2845 goto again;
2846 } else {
2847 u64 old_end = info->bytes + info->offset;
2848
2849 info->bytes = offset - info->offset;
2850 ret = link_free_space(ctl, info);
2851 WARN_ON(ret);
2852 if (ret)
2853 goto out_lock;
2854
2855 /* Not enough bytes in this entry to satisfy us */
2856 if (old_end < offset + bytes) {
2857 bytes -= old_end - offset;
2858 offset = old_end;
2859 goto again;
2860 } else if (old_end == offset + bytes) {
2861 /* all done */
2862 goto out_lock;
2863 }
2864 spin_unlock(&ctl->tree_lock);
2865
2866 ret = __btrfs_add_free_space(block_group,
2867 offset + bytes,
2868 old_end - (offset + bytes),
2869 info->trim_state);
2870 WARN_ON(ret);
2871 goto out;
2872 }
2873 }
2874
2875 ret = remove_from_bitmap(ctl, info, &offset, &bytes);
2876 if (ret == -EAGAIN) {
2877 re_search = true;
2878 goto again;
2879 }
2880 out_lock:
2881 btrfs_discard_update_discardable(block_group);
2882 spin_unlock(&ctl->tree_lock);
2883 out:
2884 return ret;
2885 }
2886
btrfs_dump_free_space(struct btrfs_block_group * block_group,u64 bytes)2887 void btrfs_dump_free_space(struct btrfs_block_group *block_group,
2888 u64 bytes)
2889 {
2890 struct btrfs_fs_info *fs_info = block_group->fs_info;
2891 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2892 struct btrfs_free_space *info;
2893 struct rb_node *n;
2894 int count = 0;
2895
2896 /*
2897 * Zoned btrfs does not use free space tree and cluster. Just print
2898 * out the free space after the allocation offset.
2899 */
2900 if (btrfs_is_zoned(fs_info)) {
2901 btrfs_info(fs_info, "free space %llu active %d",
2902 block_group->zone_capacity - block_group->alloc_offset,
2903 test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE,
2904 &block_group->runtime_flags));
2905 return;
2906 }
2907
2908 spin_lock(&ctl->tree_lock);
2909 for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) {
2910 info = rb_entry(n, struct btrfs_free_space, offset_index);
2911 if (info->bytes >= bytes && !block_group->ro)
2912 count++;
2913 btrfs_crit(fs_info, "entry offset %llu, bytes %llu, bitmap %s",
2914 info->offset, info->bytes,
2915 (info->bitmap) ? "yes" : "no");
2916 }
2917 spin_unlock(&ctl->tree_lock);
2918 btrfs_info(fs_info, "block group has cluster?: %s",
2919 list_empty(&block_group->cluster_list) ? "no" : "yes");
2920 btrfs_info(fs_info,
2921 "%d blocks of free space at or bigger than bytes is", count);
2922 }
2923
btrfs_init_free_space_ctl(struct btrfs_block_group * block_group,struct btrfs_free_space_ctl * ctl)2924 void btrfs_init_free_space_ctl(struct btrfs_block_group *block_group,
2925 struct btrfs_free_space_ctl *ctl)
2926 {
2927 struct btrfs_fs_info *fs_info = block_group->fs_info;
2928
2929 spin_lock_init(&ctl->tree_lock);
2930 ctl->unit = fs_info->sectorsize;
2931 ctl->start = block_group->start;
2932 ctl->block_group = block_group;
2933 ctl->op = &free_space_op;
2934 ctl->free_space_bytes = RB_ROOT_CACHED;
2935 INIT_LIST_HEAD(&ctl->trimming_ranges);
2936 mutex_init(&ctl->cache_writeout_mutex);
2937
2938 /*
2939 * we only want to have 32k of ram per block group for keeping
2940 * track of free space, and if we pass 1/2 of that we want to
2941 * start converting things over to using bitmaps
2942 */
2943 ctl->extents_thresh = (SZ_32K / 2) / sizeof(struct btrfs_free_space);
2944 }
2945
2946 /*
2947 * for a given cluster, put all of its extents back into the free
2948 * space cache. If the block group passed doesn't match the block group
2949 * pointed to by the cluster, someone else raced in and freed the
2950 * cluster already. In that case, we just return without changing anything
2951 */
__btrfs_return_cluster_to_free_space(struct btrfs_block_group * block_group,struct btrfs_free_cluster * cluster)2952 static void __btrfs_return_cluster_to_free_space(
2953 struct btrfs_block_group *block_group,
2954 struct btrfs_free_cluster *cluster)
2955 {
2956 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2957 struct btrfs_free_space *entry;
2958 struct rb_node *node;
2959
2960 spin_lock(&cluster->lock);
2961 if (cluster->block_group != block_group) {
2962 spin_unlock(&cluster->lock);
2963 return;
2964 }
2965
2966 cluster->block_group = NULL;
2967 cluster->window_start = 0;
2968 list_del_init(&cluster->block_group_list);
2969
2970 node = rb_first(&cluster->root);
2971 while (node) {
2972 bool bitmap;
2973
2974 entry = rb_entry(node, struct btrfs_free_space, offset_index);
2975 node = rb_next(&entry->offset_index);
2976 rb_erase(&entry->offset_index, &cluster->root);
2977 RB_CLEAR_NODE(&entry->offset_index);
2978
2979 bitmap = (entry->bitmap != NULL);
2980 if (!bitmap) {
2981 /* Merging treats extents as if they were new */
2982 if (!btrfs_free_space_trimmed(entry)) {
2983 ctl->discardable_extents[BTRFS_STAT_CURR]--;
2984 ctl->discardable_bytes[BTRFS_STAT_CURR] -=
2985 entry->bytes;
2986 }
2987
2988 try_merge_free_space(ctl, entry, false);
2989 steal_from_bitmap(ctl, entry, false);
2990
2991 /* As we insert directly, update these statistics */
2992 if (!btrfs_free_space_trimmed(entry)) {
2993 ctl->discardable_extents[BTRFS_STAT_CURR]++;
2994 ctl->discardable_bytes[BTRFS_STAT_CURR] +=
2995 entry->bytes;
2996 }
2997 }
2998 tree_insert_offset(&ctl->free_space_offset,
2999 entry->offset, &entry->offset_index, bitmap);
3000 rb_add_cached(&entry->bytes_index, &ctl->free_space_bytes,
3001 entry_less);
3002 }
3003 cluster->root = RB_ROOT;
3004 spin_unlock(&cluster->lock);
3005 btrfs_put_block_group(block_group);
3006 }
3007
btrfs_remove_free_space_cache(struct btrfs_block_group * block_group)3008 void btrfs_remove_free_space_cache(struct btrfs_block_group *block_group)
3009 {
3010 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3011 struct btrfs_free_cluster *cluster;
3012 struct list_head *head;
3013
3014 spin_lock(&ctl->tree_lock);
3015 while ((head = block_group->cluster_list.next) !=
3016 &block_group->cluster_list) {
3017 cluster = list_entry(head, struct btrfs_free_cluster,
3018 block_group_list);
3019
3020 WARN_ON(cluster->block_group != block_group);
3021 __btrfs_return_cluster_to_free_space(block_group, cluster);
3022
3023 cond_resched_lock(&ctl->tree_lock);
3024 }
3025 __btrfs_remove_free_space_cache(ctl);
3026 btrfs_discard_update_discardable(block_group);
3027 spin_unlock(&ctl->tree_lock);
3028
3029 }
3030
3031 /**
3032 * btrfs_is_free_space_trimmed - see if everything is trimmed
3033 * @block_group: block_group of interest
3034 *
3035 * Walk @block_group's free space rb_tree to determine if everything is trimmed.
3036 */
btrfs_is_free_space_trimmed(struct btrfs_block_group * block_group)3037 bool btrfs_is_free_space_trimmed(struct btrfs_block_group *block_group)
3038 {
3039 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3040 struct btrfs_free_space *info;
3041 struct rb_node *node;
3042 bool ret = true;
3043
3044 spin_lock(&ctl->tree_lock);
3045 node = rb_first(&ctl->free_space_offset);
3046
3047 while (node) {
3048 info = rb_entry(node, struct btrfs_free_space, offset_index);
3049
3050 if (!btrfs_free_space_trimmed(info)) {
3051 ret = false;
3052 break;
3053 }
3054
3055 node = rb_next(node);
3056 }
3057
3058 spin_unlock(&ctl->tree_lock);
3059 return ret;
3060 }
3061
btrfs_find_space_for_alloc(struct btrfs_block_group * block_group,u64 offset,u64 bytes,u64 empty_size,u64 * max_extent_size)3062 u64 btrfs_find_space_for_alloc(struct btrfs_block_group *block_group,
3063 u64 offset, u64 bytes, u64 empty_size,
3064 u64 *max_extent_size)
3065 {
3066 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3067 struct btrfs_discard_ctl *discard_ctl =
3068 &block_group->fs_info->discard_ctl;
3069 struct btrfs_free_space *entry = NULL;
3070 u64 bytes_search = bytes + empty_size;
3071 u64 ret = 0;
3072 u64 align_gap = 0;
3073 u64 align_gap_len = 0;
3074 enum btrfs_trim_state align_gap_trim_state = BTRFS_TRIM_STATE_UNTRIMMED;
3075 bool use_bytes_index = (offset == block_group->start);
3076
3077 ASSERT(!btrfs_is_zoned(block_group->fs_info));
3078
3079 spin_lock(&ctl->tree_lock);
3080 entry = find_free_space(ctl, &offset, &bytes_search,
3081 block_group->full_stripe_len, max_extent_size,
3082 use_bytes_index);
3083 if (!entry)
3084 goto out;
3085
3086 ret = offset;
3087 if (entry->bitmap) {
3088 bitmap_clear_bits(ctl, entry, offset, bytes, true);
3089
3090 if (!btrfs_free_space_trimmed(entry))
3091 atomic64_add(bytes, &discard_ctl->discard_bytes_saved);
3092
3093 if (!entry->bytes)
3094 free_bitmap(ctl, entry);
3095 } else {
3096 unlink_free_space(ctl, entry, true);
3097 align_gap_len = offset - entry->offset;
3098 align_gap = entry->offset;
3099 align_gap_trim_state = entry->trim_state;
3100
3101 if (!btrfs_free_space_trimmed(entry))
3102 atomic64_add(bytes, &discard_ctl->discard_bytes_saved);
3103
3104 entry->offset = offset + bytes;
3105 WARN_ON(entry->bytes < bytes + align_gap_len);
3106
3107 entry->bytes -= bytes + align_gap_len;
3108 if (!entry->bytes)
3109 kmem_cache_free(btrfs_free_space_cachep, entry);
3110 else
3111 link_free_space(ctl, entry);
3112 }
3113 out:
3114 btrfs_discard_update_discardable(block_group);
3115 spin_unlock(&ctl->tree_lock);
3116
3117 if (align_gap_len)
3118 __btrfs_add_free_space(block_group, align_gap, align_gap_len,
3119 align_gap_trim_state);
3120 return ret;
3121 }
3122
3123 /*
3124 * given a cluster, put all of its extents back into the free space
3125 * cache. If a block group is passed, this function will only free
3126 * a cluster that belongs to the passed block group.
3127 *
3128 * Otherwise, it'll get a reference on the block group pointed to by the
3129 * cluster and remove the cluster from it.
3130 */
btrfs_return_cluster_to_free_space(struct btrfs_block_group * block_group,struct btrfs_free_cluster * cluster)3131 void btrfs_return_cluster_to_free_space(
3132 struct btrfs_block_group *block_group,
3133 struct btrfs_free_cluster *cluster)
3134 {
3135 struct btrfs_free_space_ctl *ctl;
3136
3137 /* first, get a safe pointer to the block group */
3138 spin_lock(&cluster->lock);
3139 if (!block_group) {
3140 block_group = cluster->block_group;
3141 if (!block_group) {
3142 spin_unlock(&cluster->lock);
3143 return;
3144 }
3145 } else if (cluster->block_group != block_group) {
3146 /* someone else has already freed it don't redo their work */
3147 spin_unlock(&cluster->lock);
3148 return;
3149 }
3150 btrfs_get_block_group(block_group);
3151 spin_unlock(&cluster->lock);
3152
3153 ctl = block_group->free_space_ctl;
3154
3155 /* now return any extents the cluster had on it */
3156 spin_lock(&ctl->tree_lock);
3157 __btrfs_return_cluster_to_free_space(block_group, cluster);
3158 spin_unlock(&ctl->tree_lock);
3159
3160 btrfs_discard_queue_work(&block_group->fs_info->discard_ctl, block_group);
3161
3162 /* finally drop our ref */
3163 btrfs_put_block_group(block_group);
3164 }
3165
btrfs_alloc_from_bitmap(struct btrfs_block_group * block_group,struct btrfs_free_cluster * cluster,struct btrfs_free_space * entry,u64 bytes,u64 min_start,u64 * max_extent_size)3166 static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group *block_group,
3167 struct btrfs_free_cluster *cluster,
3168 struct btrfs_free_space *entry,
3169 u64 bytes, u64 min_start,
3170 u64 *max_extent_size)
3171 {
3172 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3173 int err;
3174 u64 search_start = cluster->window_start;
3175 u64 search_bytes = bytes;
3176 u64 ret = 0;
3177
3178 search_start = min_start;
3179 search_bytes = bytes;
3180
3181 err = search_bitmap(ctl, entry, &search_start, &search_bytes, true);
3182 if (err) {
3183 *max_extent_size = max(get_max_extent_size(entry),
3184 *max_extent_size);
3185 return 0;
3186 }
3187
3188 ret = search_start;
3189 bitmap_clear_bits(ctl, entry, ret, bytes, false);
3190
3191 return ret;
3192 }
3193
3194 /*
3195 * given a cluster, try to allocate 'bytes' from it, returns 0
3196 * if it couldn't find anything suitably large, or a logical disk offset
3197 * if things worked out
3198 */
btrfs_alloc_from_cluster(struct btrfs_block_group * block_group,struct btrfs_free_cluster * cluster,u64 bytes,u64 min_start,u64 * max_extent_size)3199 u64 btrfs_alloc_from_cluster(struct btrfs_block_group *block_group,
3200 struct btrfs_free_cluster *cluster, u64 bytes,
3201 u64 min_start, u64 *max_extent_size)
3202 {
3203 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3204 struct btrfs_discard_ctl *discard_ctl =
3205 &block_group->fs_info->discard_ctl;
3206 struct btrfs_free_space *entry = NULL;
3207 struct rb_node *node;
3208 u64 ret = 0;
3209
3210 ASSERT(!btrfs_is_zoned(block_group->fs_info));
3211
3212 spin_lock(&cluster->lock);
3213 if (bytes > cluster->max_size)
3214 goto out;
3215
3216 if (cluster->block_group != block_group)
3217 goto out;
3218
3219 node = rb_first(&cluster->root);
3220 if (!node)
3221 goto out;
3222
3223 entry = rb_entry(node, struct btrfs_free_space, offset_index);
3224 while (1) {
3225 if (entry->bytes < bytes)
3226 *max_extent_size = max(get_max_extent_size(entry),
3227 *max_extent_size);
3228
3229 if (entry->bytes < bytes ||
3230 (!entry->bitmap && entry->offset < min_start)) {
3231 node = rb_next(&entry->offset_index);
3232 if (!node)
3233 break;
3234 entry = rb_entry(node, struct btrfs_free_space,
3235 offset_index);
3236 continue;
3237 }
3238
3239 if (entry->bitmap) {
3240 ret = btrfs_alloc_from_bitmap(block_group,
3241 cluster, entry, bytes,
3242 cluster->window_start,
3243 max_extent_size);
3244 if (ret == 0) {
3245 node = rb_next(&entry->offset_index);
3246 if (!node)
3247 break;
3248 entry = rb_entry(node, struct btrfs_free_space,
3249 offset_index);
3250 continue;
3251 }
3252 cluster->window_start += bytes;
3253 } else {
3254 ret = entry->offset;
3255
3256 entry->offset += bytes;
3257 entry->bytes -= bytes;
3258 }
3259
3260 break;
3261 }
3262 out:
3263 spin_unlock(&cluster->lock);
3264
3265 if (!ret)
3266 return 0;
3267
3268 spin_lock(&ctl->tree_lock);
3269
3270 if (!btrfs_free_space_trimmed(entry))
3271 atomic64_add(bytes, &discard_ctl->discard_bytes_saved);
3272
3273 ctl->free_space -= bytes;
3274 if (!entry->bitmap && !btrfs_free_space_trimmed(entry))
3275 ctl->discardable_bytes[BTRFS_STAT_CURR] -= bytes;
3276
3277 spin_lock(&cluster->lock);
3278 if (entry->bytes == 0) {
3279 rb_erase(&entry->offset_index, &cluster->root);
3280 ctl->free_extents--;
3281 if (entry->bitmap) {
3282 kmem_cache_free(btrfs_free_space_bitmap_cachep,
3283 entry->bitmap);
3284 ctl->total_bitmaps--;
3285 recalculate_thresholds(ctl);
3286 } else if (!btrfs_free_space_trimmed(entry)) {
3287 ctl->discardable_extents[BTRFS_STAT_CURR]--;
3288 }
3289 kmem_cache_free(btrfs_free_space_cachep, entry);
3290 }
3291
3292 spin_unlock(&cluster->lock);
3293 spin_unlock(&ctl->tree_lock);
3294
3295 return ret;
3296 }
3297
btrfs_bitmap_cluster(struct btrfs_block_group * block_group,struct btrfs_free_space * entry,struct btrfs_free_cluster * cluster,u64 offset,u64 bytes,u64 cont1_bytes,u64 min_bytes)3298 static int btrfs_bitmap_cluster(struct btrfs_block_group *block_group,
3299 struct btrfs_free_space *entry,
3300 struct btrfs_free_cluster *cluster,
3301 u64 offset, u64 bytes,
3302 u64 cont1_bytes, u64 min_bytes)
3303 {
3304 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3305 unsigned long next_zero;
3306 unsigned long i;
3307 unsigned long want_bits;
3308 unsigned long min_bits;
3309 unsigned long found_bits;
3310 unsigned long max_bits = 0;
3311 unsigned long start = 0;
3312 unsigned long total_found = 0;
3313 int ret;
3314
3315 i = offset_to_bit(entry->offset, ctl->unit,
3316 max_t(u64, offset, entry->offset));
3317 want_bits = bytes_to_bits(bytes, ctl->unit);
3318 min_bits = bytes_to_bits(min_bytes, ctl->unit);
3319
3320 /*
3321 * Don't bother looking for a cluster in this bitmap if it's heavily
3322 * fragmented.
3323 */
3324 if (entry->max_extent_size &&
3325 entry->max_extent_size < cont1_bytes)
3326 return -ENOSPC;
3327 again:
3328 found_bits = 0;
3329 for_each_set_bit_from(i, entry->bitmap, BITS_PER_BITMAP) {
3330 next_zero = find_next_zero_bit(entry->bitmap,
3331 BITS_PER_BITMAP, i);
3332 if (next_zero - i >= min_bits) {
3333 found_bits = next_zero - i;
3334 if (found_bits > max_bits)
3335 max_bits = found_bits;
3336 break;
3337 }
3338 if (next_zero - i > max_bits)
3339 max_bits = next_zero - i;
3340 i = next_zero;
3341 }
3342
3343 if (!found_bits) {
3344 entry->max_extent_size = (u64)max_bits * ctl->unit;
3345 return -ENOSPC;
3346 }
3347
3348 if (!total_found) {
3349 start = i;
3350 cluster->max_size = 0;
3351 }
3352
3353 total_found += found_bits;
3354
3355 if (cluster->max_size < found_bits * ctl->unit)
3356 cluster->max_size = found_bits * ctl->unit;
3357
3358 if (total_found < want_bits || cluster->max_size < cont1_bytes) {
3359 i = next_zero + 1;
3360 goto again;
3361 }
3362
3363 cluster->window_start = start * ctl->unit + entry->offset;
3364 rb_erase(&entry->offset_index, &ctl->free_space_offset);
3365 rb_erase_cached(&entry->bytes_index, &ctl->free_space_bytes);
3366
3367 /*
3368 * We need to know if we're currently on the normal space index when we
3369 * manipulate the bitmap so that we know we need to remove and re-insert
3370 * it into the space_index tree. Clear the bytes_index node here so the
3371 * bitmap manipulation helpers know not to mess with the space_index
3372 * until this bitmap entry is added back into the normal cache.
3373 */
3374 RB_CLEAR_NODE(&entry->bytes_index);
3375
3376 ret = tree_insert_offset(&cluster->root, entry->offset,
3377 &entry->offset_index, 1);
3378 ASSERT(!ret); /* -EEXIST; Logic error */
3379
3380 trace_btrfs_setup_cluster(block_group, cluster,
3381 total_found * ctl->unit, 1);
3382 return 0;
3383 }
3384
3385 /*
3386 * This searches the block group for just extents to fill the cluster with.
3387 * Try to find a cluster with at least bytes total bytes, at least one
3388 * extent of cont1_bytes, and other clusters of at least min_bytes.
3389 */
3390 static noinline int
setup_cluster_no_bitmap(struct btrfs_block_group * block_group,struct btrfs_free_cluster * cluster,struct list_head * bitmaps,u64 offset,u64 bytes,u64 cont1_bytes,u64 min_bytes)3391 setup_cluster_no_bitmap(struct btrfs_block_group *block_group,
3392 struct btrfs_free_cluster *cluster,
3393 struct list_head *bitmaps, u64 offset, u64 bytes,
3394 u64 cont1_bytes, u64 min_bytes)
3395 {
3396 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3397 struct btrfs_free_space *first = NULL;
3398 struct btrfs_free_space *entry = NULL;
3399 struct btrfs_free_space *last;
3400 struct rb_node *node;
3401 u64 window_free;
3402 u64 max_extent;
3403 u64 total_size = 0;
3404
3405 entry = tree_search_offset(ctl, offset, 0, 1);
3406 if (!entry)
3407 return -ENOSPC;
3408
3409 /*
3410 * We don't want bitmaps, so just move along until we find a normal
3411 * extent entry.
3412 */
3413 while (entry->bitmap || entry->bytes < min_bytes) {
3414 if (entry->bitmap && list_empty(&entry->list))
3415 list_add_tail(&entry->list, bitmaps);
3416 node = rb_next(&entry->offset_index);
3417 if (!node)
3418 return -ENOSPC;
3419 entry = rb_entry(node, struct btrfs_free_space, offset_index);
3420 }
3421
3422 window_free = entry->bytes;
3423 max_extent = entry->bytes;
3424 first = entry;
3425 last = entry;
3426
3427 for (node = rb_next(&entry->offset_index); node;
3428 node = rb_next(&entry->offset_index)) {
3429 entry = rb_entry(node, struct btrfs_free_space, offset_index);
3430
3431 if (entry->bitmap) {
3432 if (list_empty(&entry->list))
3433 list_add_tail(&entry->list, bitmaps);
3434 continue;
3435 }
3436
3437 if (entry->bytes < min_bytes)
3438 continue;
3439
3440 last = entry;
3441 window_free += entry->bytes;
3442 if (entry->bytes > max_extent)
3443 max_extent = entry->bytes;
3444 }
3445
3446 if (window_free < bytes || max_extent < cont1_bytes)
3447 return -ENOSPC;
3448
3449 cluster->window_start = first->offset;
3450
3451 node = &first->offset_index;
3452
3453 /*
3454 * now we've found our entries, pull them out of the free space
3455 * cache and put them into the cluster rbtree
3456 */
3457 do {
3458 int ret;
3459
3460 entry = rb_entry(node, struct btrfs_free_space, offset_index);
3461 node = rb_next(&entry->offset_index);
3462 if (entry->bitmap || entry->bytes < min_bytes)
3463 continue;
3464
3465 rb_erase(&entry->offset_index, &ctl->free_space_offset);
3466 rb_erase_cached(&entry->bytes_index, &ctl->free_space_bytes);
3467 ret = tree_insert_offset(&cluster->root, entry->offset,
3468 &entry->offset_index, 0);
3469 total_size += entry->bytes;
3470 ASSERT(!ret); /* -EEXIST; Logic error */
3471 } while (node && entry != last);
3472
3473 cluster->max_size = max_extent;
3474 trace_btrfs_setup_cluster(block_group, cluster, total_size, 0);
3475 return 0;
3476 }
3477
3478 /*
3479 * This specifically looks for bitmaps that may work in the cluster, we assume
3480 * that we have already failed to find extents that will work.
3481 */
3482 static noinline int
setup_cluster_bitmap(struct btrfs_block_group * block_group,struct btrfs_free_cluster * cluster,struct list_head * bitmaps,u64 offset,u64 bytes,u64 cont1_bytes,u64 min_bytes)3483 setup_cluster_bitmap(struct btrfs_block_group *block_group,
3484 struct btrfs_free_cluster *cluster,
3485 struct list_head *bitmaps, u64 offset, u64 bytes,
3486 u64 cont1_bytes, u64 min_bytes)
3487 {
3488 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3489 struct btrfs_free_space *entry = NULL;
3490 int ret = -ENOSPC;
3491 u64 bitmap_offset = offset_to_bitmap(ctl, offset);
3492
3493 if (ctl->total_bitmaps == 0)
3494 return -ENOSPC;
3495
3496 /*
3497 * The bitmap that covers offset won't be in the list unless offset
3498 * is just its start offset.
3499 */
3500 if (!list_empty(bitmaps))
3501 entry = list_first_entry(bitmaps, struct btrfs_free_space, list);
3502
3503 if (!entry || entry->offset != bitmap_offset) {
3504 entry = tree_search_offset(ctl, bitmap_offset, 1, 0);
3505 if (entry && list_empty(&entry->list))
3506 list_add(&entry->list, bitmaps);
3507 }
3508
3509 list_for_each_entry(entry, bitmaps, list) {
3510 if (entry->bytes < bytes)
3511 continue;
3512 ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset,
3513 bytes, cont1_bytes, min_bytes);
3514 if (!ret)
3515 return 0;
3516 }
3517
3518 /*
3519 * The bitmaps list has all the bitmaps that record free space
3520 * starting after offset, so no more search is required.
3521 */
3522 return -ENOSPC;
3523 }
3524
3525 /*
3526 * here we try to find a cluster of blocks in a block group. The goal
3527 * is to find at least bytes+empty_size.
3528 * We might not find them all in one contiguous area.
3529 *
3530 * returns zero and sets up cluster if things worked out, otherwise
3531 * it returns -enospc
3532 */
btrfs_find_space_cluster(struct btrfs_block_group * block_group,struct btrfs_free_cluster * cluster,u64 offset,u64 bytes,u64 empty_size)3533 int btrfs_find_space_cluster(struct btrfs_block_group *block_group,
3534 struct btrfs_free_cluster *cluster,
3535 u64 offset, u64 bytes, u64 empty_size)
3536 {
3537 struct btrfs_fs_info *fs_info = block_group->fs_info;
3538 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3539 struct btrfs_free_space *entry, *tmp;
3540 LIST_HEAD(bitmaps);
3541 u64 min_bytes;
3542 u64 cont1_bytes;
3543 int ret;
3544
3545 /*
3546 * Choose the minimum extent size we'll require for this
3547 * cluster. For SSD_SPREAD, don't allow any fragmentation.
3548 * For metadata, allow allocates with smaller extents. For
3549 * data, keep it dense.
3550 */
3551 if (btrfs_test_opt(fs_info, SSD_SPREAD)) {
3552 cont1_bytes = bytes + empty_size;
3553 min_bytes = cont1_bytes;
3554 } else if (block_group->flags & BTRFS_BLOCK_GROUP_METADATA) {
3555 cont1_bytes = bytes;
3556 min_bytes = fs_info->sectorsize;
3557 } else {
3558 cont1_bytes = max(bytes, (bytes + empty_size) >> 2);
3559 min_bytes = fs_info->sectorsize;
3560 }
3561
3562 spin_lock(&ctl->tree_lock);
3563
3564 /*
3565 * If we know we don't have enough space to make a cluster don't even
3566 * bother doing all the work to try and find one.
3567 */
3568 if (ctl->free_space < bytes) {
3569 spin_unlock(&ctl->tree_lock);
3570 return -ENOSPC;
3571 }
3572
3573 spin_lock(&cluster->lock);
3574
3575 /* someone already found a cluster, hooray */
3576 if (cluster->block_group) {
3577 ret = 0;
3578 goto out;
3579 }
3580
3581 trace_btrfs_find_cluster(block_group, offset, bytes, empty_size,
3582 min_bytes);
3583
3584 ret = setup_cluster_no_bitmap(block_group, cluster, &bitmaps, offset,
3585 bytes + empty_size,
3586 cont1_bytes, min_bytes);
3587 if (ret)
3588 ret = setup_cluster_bitmap(block_group, cluster, &bitmaps,
3589 offset, bytes + empty_size,
3590 cont1_bytes, min_bytes);
3591
3592 /* Clear our temporary list */
3593 list_for_each_entry_safe(entry, tmp, &bitmaps, list)
3594 list_del_init(&entry->list);
3595
3596 if (!ret) {
3597 btrfs_get_block_group(block_group);
3598 list_add_tail(&cluster->block_group_list,
3599 &block_group->cluster_list);
3600 cluster->block_group = block_group;
3601 } else {
3602 trace_btrfs_failed_cluster_setup(block_group);
3603 }
3604 out:
3605 spin_unlock(&cluster->lock);
3606 spin_unlock(&ctl->tree_lock);
3607
3608 return ret;
3609 }
3610
3611 /*
3612 * simple code to zero out a cluster
3613 */
btrfs_init_free_cluster(struct btrfs_free_cluster * cluster)3614 void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster)
3615 {
3616 spin_lock_init(&cluster->lock);
3617 spin_lock_init(&cluster->refill_lock);
3618 cluster->root = RB_ROOT;
3619 cluster->max_size = 0;
3620 cluster->fragmented = false;
3621 INIT_LIST_HEAD(&cluster->block_group_list);
3622 cluster->block_group = NULL;
3623 }
3624
do_trimming(struct btrfs_block_group * block_group,u64 * total_trimmed,u64 start,u64 bytes,u64 reserved_start,u64 reserved_bytes,enum btrfs_trim_state reserved_trim_state,struct btrfs_trim_range * trim_entry)3625 static int do_trimming(struct btrfs_block_group *block_group,
3626 u64 *total_trimmed, u64 start, u64 bytes,
3627 u64 reserved_start, u64 reserved_bytes,
3628 enum btrfs_trim_state reserved_trim_state,
3629 struct btrfs_trim_range *trim_entry)
3630 {
3631 struct btrfs_space_info *space_info = block_group->space_info;
3632 struct btrfs_fs_info *fs_info = block_group->fs_info;
3633 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3634 int ret;
3635 int update = 0;
3636 const u64 end = start + bytes;
3637 const u64 reserved_end = reserved_start + reserved_bytes;
3638 enum btrfs_trim_state trim_state = BTRFS_TRIM_STATE_UNTRIMMED;
3639 u64 trimmed = 0;
3640
3641 spin_lock(&space_info->lock);
3642 spin_lock(&block_group->lock);
3643 if (!block_group->ro) {
3644 block_group->reserved += reserved_bytes;
3645 space_info->bytes_reserved += reserved_bytes;
3646 update = 1;
3647 }
3648 spin_unlock(&block_group->lock);
3649 spin_unlock(&space_info->lock);
3650
3651 ret = btrfs_discard_extent(fs_info, start, bytes, &trimmed);
3652 if (!ret) {
3653 *total_trimmed += trimmed;
3654 trim_state = BTRFS_TRIM_STATE_TRIMMED;
3655 }
3656
3657 mutex_lock(&ctl->cache_writeout_mutex);
3658 if (reserved_start < start)
3659 __btrfs_add_free_space(block_group, reserved_start,
3660 start - reserved_start,
3661 reserved_trim_state);
3662 if (start + bytes < reserved_start + reserved_bytes)
3663 __btrfs_add_free_space(block_group, end, reserved_end - end,
3664 reserved_trim_state);
3665 __btrfs_add_free_space(block_group, start, bytes, trim_state);
3666 list_del(&trim_entry->list);
3667 mutex_unlock(&ctl->cache_writeout_mutex);
3668
3669 if (update) {
3670 spin_lock(&space_info->lock);
3671 spin_lock(&block_group->lock);
3672 if (block_group->ro)
3673 space_info->bytes_readonly += reserved_bytes;
3674 block_group->reserved -= reserved_bytes;
3675 space_info->bytes_reserved -= reserved_bytes;
3676 spin_unlock(&block_group->lock);
3677 spin_unlock(&space_info->lock);
3678 }
3679
3680 return ret;
3681 }
3682
3683 /*
3684 * If @async is set, then we will trim 1 region and return.
3685 */
trim_no_bitmap(struct btrfs_block_group * block_group,u64 * total_trimmed,u64 start,u64 end,u64 minlen,bool async)3686 static int trim_no_bitmap(struct btrfs_block_group *block_group,
3687 u64 *total_trimmed, u64 start, u64 end, u64 minlen,
3688 bool async)
3689 {
3690 struct btrfs_discard_ctl *discard_ctl =
3691 &block_group->fs_info->discard_ctl;
3692 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3693 struct btrfs_free_space *entry;
3694 struct rb_node *node;
3695 int ret = 0;
3696 u64 extent_start;
3697 u64 extent_bytes;
3698 enum btrfs_trim_state extent_trim_state;
3699 u64 bytes;
3700 const u64 max_discard_size = READ_ONCE(discard_ctl->max_discard_size);
3701
3702 while (start < end) {
3703 struct btrfs_trim_range trim_entry;
3704
3705 mutex_lock(&ctl->cache_writeout_mutex);
3706 spin_lock(&ctl->tree_lock);
3707
3708 if (ctl->free_space < minlen)
3709 goto out_unlock;
3710
3711 entry = tree_search_offset(ctl, start, 0, 1);
3712 if (!entry)
3713 goto out_unlock;
3714
3715 /* Skip bitmaps and if async, already trimmed entries */
3716 while (entry->bitmap ||
3717 (async && btrfs_free_space_trimmed(entry))) {
3718 node = rb_next(&entry->offset_index);
3719 if (!node)
3720 goto out_unlock;
3721 entry = rb_entry(node, struct btrfs_free_space,
3722 offset_index);
3723 }
3724
3725 if (entry->offset >= end)
3726 goto out_unlock;
3727
3728 extent_start = entry->offset;
3729 extent_bytes = entry->bytes;
3730 extent_trim_state = entry->trim_state;
3731 if (async) {
3732 start = entry->offset;
3733 bytes = entry->bytes;
3734 if (bytes < minlen) {
3735 spin_unlock(&ctl->tree_lock);
3736 mutex_unlock(&ctl->cache_writeout_mutex);
3737 goto next;
3738 }
3739 unlink_free_space(ctl, entry, true);
3740 /*
3741 * Let bytes = BTRFS_MAX_DISCARD_SIZE + X.
3742 * If X < BTRFS_ASYNC_DISCARD_MIN_FILTER, we won't trim
3743 * X when we come back around. So trim it now.
3744 */
3745 if (max_discard_size &&
3746 bytes >= (max_discard_size +
3747 BTRFS_ASYNC_DISCARD_MIN_FILTER)) {
3748 bytes = max_discard_size;
3749 extent_bytes = max_discard_size;
3750 entry->offset += max_discard_size;
3751 entry->bytes -= max_discard_size;
3752 link_free_space(ctl, entry);
3753 } else {
3754 kmem_cache_free(btrfs_free_space_cachep, entry);
3755 }
3756 } else {
3757 start = max(start, extent_start);
3758 bytes = min(extent_start + extent_bytes, end) - start;
3759 if (bytes < minlen) {
3760 spin_unlock(&ctl->tree_lock);
3761 mutex_unlock(&ctl->cache_writeout_mutex);
3762 goto next;
3763 }
3764
3765 unlink_free_space(ctl, entry, true);
3766 kmem_cache_free(btrfs_free_space_cachep, entry);
3767 }
3768
3769 spin_unlock(&ctl->tree_lock);
3770 trim_entry.start = extent_start;
3771 trim_entry.bytes = extent_bytes;
3772 list_add_tail(&trim_entry.list, &ctl->trimming_ranges);
3773 mutex_unlock(&ctl->cache_writeout_mutex);
3774
3775 ret = do_trimming(block_group, total_trimmed, start, bytes,
3776 extent_start, extent_bytes, extent_trim_state,
3777 &trim_entry);
3778 if (ret) {
3779 block_group->discard_cursor = start + bytes;
3780 break;
3781 }
3782 next:
3783 start += bytes;
3784 block_group->discard_cursor = start;
3785 if (async && *total_trimmed)
3786 break;
3787
3788 if (fatal_signal_pending(current)) {
3789 ret = -ERESTARTSYS;
3790 break;
3791 }
3792
3793 cond_resched();
3794 }
3795
3796 return ret;
3797
3798 out_unlock:
3799 block_group->discard_cursor = btrfs_block_group_end(block_group);
3800 spin_unlock(&ctl->tree_lock);
3801 mutex_unlock(&ctl->cache_writeout_mutex);
3802
3803 return ret;
3804 }
3805
3806 /*
3807 * If we break out of trimming a bitmap prematurely, we should reset the
3808 * trimming bit. In a rather contrieved case, it's possible to race here so
3809 * reset the state to BTRFS_TRIM_STATE_UNTRIMMED.
3810 *
3811 * start = start of bitmap
3812 * end = near end of bitmap
3813 *
3814 * Thread 1: Thread 2:
3815 * trim_bitmaps(start)
3816 * trim_bitmaps(end)
3817 * end_trimming_bitmap()
3818 * reset_trimming_bitmap()
3819 */
reset_trimming_bitmap(struct btrfs_free_space_ctl * ctl,u64 offset)3820 static void reset_trimming_bitmap(struct btrfs_free_space_ctl *ctl, u64 offset)
3821 {
3822 struct btrfs_free_space *entry;
3823
3824 spin_lock(&ctl->tree_lock);
3825 entry = tree_search_offset(ctl, offset, 1, 0);
3826 if (entry) {
3827 if (btrfs_free_space_trimmed(entry)) {
3828 ctl->discardable_extents[BTRFS_STAT_CURR] +=
3829 entry->bitmap_extents;
3830 ctl->discardable_bytes[BTRFS_STAT_CURR] += entry->bytes;
3831 }
3832 entry->trim_state = BTRFS_TRIM_STATE_UNTRIMMED;
3833 }
3834
3835 spin_unlock(&ctl->tree_lock);
3836 }
3837
end_trimming_bitmap(struct btrfs_free_space_ctl * ctl,struct btrfs_free_space * entry)3838 static void end_trimming_bitmap(struct btrfs_free_space_ctl *ctl,
3839 struct btrfs_free_space *entry)
3840 {
3841 if (btrfs_free_space_trimming_bitmap(entry)) {
3842 entry->trim_state = BTRFS_TRIM_STATE_TRIMMED;
3843 ctl->discardable_extents[BTRFS_STAT_CURR] -=
3844 entry->bitmap_extents;
3845 ctl->discardable_bytes[BTRFS_STAT_CURR] -= entry->bytes;
3846 }
3847 }
3848
3849 /*
3850 * If @async is set, then we will trim 1 region and return.
3851 */
trim_bitmaps(struct btrfs_block_group * block_group,u64 * total_trimmed,u64 start,u64 end,u64 minlen,u64 maxlen,bool async)3852 static int trim_bitmaps(struct btrfs_block_group *block_group,
3853 u64 *total_trimmed, u64 start, u64 end, u64 minlen,
3854 u64 maxlen, bool async)
3855 {
3856 struct btrfs_discard_ctl *discard_ctl =
3857 &block_group->fs_info->discard_ctl;
3858 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3859 struct btrfs_free_space *entry;
3860 int ret = 0;
3861 int ret2;
3862 u64 bytes;
3863 u64 offset = offset_to_bitmap(ctl, start);
3864 const u64 max_discard_size = READ_ONCE(discard_ctl->max_discard_size);
3865
3866 while (offset < end) {
3867 bool next_bitmap = false;
3868 struct btrfs_trim_range trim_entry;
3869
3870 mutex_lock(&ctl->cache_writeout_mutex);
3871 spin_lock(&ctl->tree_lock);
3872
3873 if (ctl->free_space < minlen) {
3874 block_group->discard_cursor =
3875 btrfs_block_group_end(block_group);
3876 spin_unlock(&ctl->tree_lock);
3877 mutex_unlock(&ctl->cache_writeout_mutex);
3878 break;
3879 }
3880
3881 entry = tree_search_offset(ctl, offset, 1, 0);
3882 /*
3883 * Bitmaps are marked trimmed lossily now to prevent constant
3884 * discarding of the same bitmap (the reason why we are bound
3885 * by the filters). So, retrim the block group bitmaps when we
3886 * are preparing to punt to the unused_bgs list. This uses
3887 * @minlen to determine if we are in BTRFS_DISCARD_INDEX_UNUSED
3888 * which is the only discard index which sets minlen to 0.
3889 */
3890 if (!entry || (async && minlen && start == offset &&
3891 btrfs_free_space_trimmed(entry))) {
3892 spin_unlock(&ctl->tree_lock);
3893 mutex_unlock(&ctl->cache_writeout_mutex);
3894 next_bitmap = true;
3895 goto next;
3896 }
3897
3898 /*
3899 * Async discard bitmap trimming begins at by setting the start
3900 * to be key.objectid and the offset_to_bitmap() aligns to the
3901 * start of the bitmap. This lets us know we are fully
3902 * scanning the bitmap rather than only some portion of it.
3903 */
3904 if (start == offset)
3905 entry->trim_state = BTRFS_TRIM_STATE_TRIMMING;
3906
3907 bytes = minlen;
3908 ret2 = search_bitmap(ctl, entry, &start, &bytes, false);
3909 if (ret2 || start >= end) {
3910 /*
3911 * We lossily consider a bitmap trimmed if we only skip
3912 * over regions <= BTRFS_ASYNC_DISCARD_MIN_FILTER.
3913 */
3914 if (ret2 && minlen <= BTRFS_ASYNC_DISCARD_MIN_FILTER)
3915 end_trimming_bitmap(ctl, entry);
3916 else
3917 entry->trim_state = BTRFS_TRIM_STATE_UNTRIMMED;
3918 spin_unlock(&ctl->tree_lock);
3919 mutex_unlock(&ctl->cache_writeout_mutex);
3920 next_bitmap = true;
3921 goto next;
3922 }
3923
3924 /*
3925 * We already trimmed a region, but are using the locking above
3926 * to reset the trim_state.
3927 */
3928 if (async && *total_trimmed) {
3929 spin_unlock(&ctl->tree_lock);
3930 mutex_unlock(&ctl->cache_writeout_mutex);
3931 goto out;
3932 }
3933
3934 bytes = min(bytes, end - start);
3935 if (bytes < minlen || (async && maxlen && bytes > maxlen)) {
3936 spin_unlock(&ctl->tree_lock);
3937 mutex_unlock(&ctl->cache_writeout_mutex);
3938 goto next;
3939 }
3940
3941 /*
3942 * Let bytes = BTRFS_MAX_DISCARD_SIZE + X.
3943 * If X < @minlen, we won't trim X when we come back around.
3944 * So trim it now. We differ here from trimming extents as we
3945 * don't keep individual state per bit.
3946 */
3947 if (async &&
3948 max_discard_size &&
3949 bytes > (max_discard_size + minlen))
3950 bytes = max_discard_size;
3951
3952 bitmap_clear_bits(ctl, entry, start, bytes, true);
3953 if (entry->bytes == 0)
3954 free_bitmap(ctl, entry);
3955
3956 spin_unlock(&ctl->tree_lock);
3957 trim_entry.start = start;
3958 trim_entry.bytes = bytes;
3959 list_add_tail(&trim_entry.list, &ctl->trimming_ranges);
3960 mutex_unlock(&ctl->cache_writeout_mutex);
3961
3962 ret = do_trimming(block_group, total_trimmed, start, bytes,
3963 start, bytes, 0, &trim_entry);
3964 if (ret) {
3965 reset_trimming_bitmap(ctl, offset);
3966 block_group->discard_cursor =
3967 btrfs_block_group_end(block_group);
3968 break;
3969 }
3970 next:
3971 if (next_bitmap) {
3972 offset += BITS_PER_BITMAP * ctl->unit;
3973 start = offset;
3974 } else {
3975 start += bytes;
3976 }
3977 block_group->discard_cursor = start;
3978
3979 if (fatal_signal_pending(current)) {
3980 if (start != offset)
3981 reset_trimming_bitmap(ctl, offset);
3982 ret = -ERESTARTSYS;
3983 break;
3984 }
3985
3986 cond_resched();
3987 }
3988
3989 if (offset >= end)
3990 block_group->discard_cursor = end;
3991
3992 out:
3993 return ret;
3994 }
3995
btrfs_trim_block_group(struct btrfs_block_group * block_group,u64 * trimmed,u64 start,u64 end,u64 minlen)3996 int btrfs_trim_block_group(struct btrfs_block_group *block_group,
3997 u64 *trimmed, u64 start, u64 end, u64 minlen)
3998 {
3999 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
4000 int ret;
4001 u64 rem = 0;
4002
4003 ASSERT(!btrfs_is_zoned(block_group->fs_info));
4004
4005 *trimmed = 0;
4006
4007 spin_lock(&block_group->lock);
4008 if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags)) {
4009 spin_unlock(&block_group->lock);
4010 return 0;
4011 }
4012 btrfs_freeze_block_group(block_group);
4013 spin_unlock(&block_group->lock);
4014
4015 ret = trim_no_bitmap(block_group, trimmed, start, end, minlen, false);
4016 if (ret)
4017 goto out;
4018
4019 ret = trim_bitmaps(block_group, trimmed, start, end, minlen, 0, false);
4020 div64_u64_rem(end, BITS_PER_BITMAP * ctl->unit, &rem);
4021 /* If we ended in the middle of a bitmap, reset the trimming flag */
4022 if (rem)
4023 reset_trimming_bitmap(ctl, offset_to_bitmap(ctl, end));
4024 out:
4025 btrfs_unfreeze_block_group(block_group);
4026 return ret;
4027 }
4028
btrfs_trim_block_group_extents(struct btrfs_block_group * block_group,u64 * trimmed,u64 start,u64 end,u64 minlen,bool async)4029 int btrfs_trim_block_group_extents(struct btrfs_block_group *block_group,
4030 u64 *trimmed, u64 start, u64 end, u64 minlen,
4031 bool async)
4032 {
4033 int ret;
4034
4035 *trimmed = 0;
4036
4037 spin_lock(&block_group->lock);
4038 if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags)) {
4039 spin_unlock(&block_group->lock);
4040 return 0;
4041 }
4042 btrfs_freeze_block_group(block_group);
4043 spin_unlock(&block_group->lock);
4044
4045 ret = trim_no_bitmap(block_group, trimmed, start, end, minlen, async);
4046 btrfs_unfreeze_block_group(block_group);
4047
4048 return ret;
4049 }
4050
btrfs_trim_block_group_bitmaps(struct btrfs_block_group * block_group,u64 * trimmed,u64 start,u64 end,u64 minlen,u64 maxlen,bool async)4051 int btrfs_trim_block_group_bitmaps(struct btrfs_block_group *block_group,
4052 u64 *trimmed, u64 start, u64 end, u64 minlen,
4053 u64 maxlen, bool async)
4054 {
4055 int ret;
4056
4057 *trimmed = 0;
4058
4059 spin_lock(&block_group->lock);
4060 if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags)) {
4061 spin_unlock(&block_group->lock);
4062 return 0;
4063 }
4064 btrfs_freeze_block_group(block_group);
4065 spin_unlock(&block_group->lock);
4066
4067 ret = trim_bitmaps(block_group, trimmed, start, end, minlen, maxlen,
4068 async);
4069
4070 btrfs_unfreeze_block_group(block_group);
4071
4072 return ret;
4073 }
4074
btrfs_free_space_cache_v1_active(struct btrfs_fs_info * fs_info)4075 bool btrfs_free_space_cache_v1_active(struct btrfs_fs_info *fs_info)
4076 {
4077 return btrfs_super_cache_generation(fs_info->super_copy);
4078 }
4079
cleanup_free_space_cache_v1(struct btrfs_fs_info * fs_info,struct btrfs_trans_handle * trans)4080 static int cleanup_free_space_cache_v1(struct btrfs_fs_info *fs_info,
4081 struct btrfs_trans_handle *trans)
4082 {
4083 struct btrfs_block_group *block_group;
4084 struct rb_node *node;
4085 int ret = 0;
4086
4087 btrfs_info(fs_info, "cleaning free space cache v1");
4088
4089 node = rb_first_cached(&fs_info->block_group_cache_tree);
4090 while (node) {
4091 block_group = rb_entry(node, struct btrfs_block_group, cache_node);
4092 ret = btrfs_remove_free_space_inode(trans, NULL, block_group);
4093 if (ret)
4094 goto out;
4095 node = rb_next(node);
4096 }
4097 out:
4098 return ret;
4099 }
4100
btrfs_set_free_space_cache_v1_active(struct btrfs_fs_info * fs_info,bool active)4101 int btrfs_set_free_space_cache_v1_active(struct btrfs_fs_info *fs_info, bool active)
4102 {
4103 struct btrfs_trans_handle *trans;
4104 int ret;
4105
4106 /*
4107 * update_super_roots will appropriately set or unset
4108 * super_copy->cache_generation based on SPACE_CACHE and
4109 * BTRFS_FS_CLEANUP_SPACE_CACHE_V1. For this reason, we need a
4110 * transaction commit whether we are enabling space cache v1 and don't
4111 * have any other work to do, or are disabling it and removing free
4112 * space inodes.
4113 */
4114 trans = btrfs_start_transaction(fs_info->tree_root, 0);
4115 if (IS_ERR(trans))
4116 return PTR_ERR(trans);
4117
4118 if (!active) {
4119 set_bit(BTRFS_FS_CLEANUP_SPACE_CACHE_V1, &fs_info->flags);
4120 ret = cleanup_free_space_cache_v1(fs_info, trans);
4121 if (ret) {
4122 btrfs_abort_transaction(trans, ret);
4123 btrfs_end_transaction(trans);
4124 goto out;
4125 }
4126 }
4127
4128 ret = btrfs_commit_transaction(trans);
4129 out:
4130 clear_bit(BTRFS_FS_CLEANUP_SPACE_CACHE_V1, &fs_info->flags);
4131
4132 return ret;
4133 }
4134
4135 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
4136 /*
4137 * Use this if you need to make a bitmap or extent entry specifically, it
4138 * doesn't do any of the merging that add_free_space does, this acts a lot like
4139 * how the free space cache loading stuff works, so you can get really weird
4140 * configurations.
4141 */
test_add_free_space_entry(struct btrfs_block_group * cache,u64 offset,u64 bytes,bool bitmap)4142 int test_add_free_space_entry(struct btrfs_block_group *cache,
4143 u64 offset, u64 bytes, bool bitmap)
4144 {
4145 struct btrfs_free_space_ctl *ctl = cache->free_space_ctl;
4146 struct btrfs_free_space *info = NULL, *bitmap_info;
4147 void *map = NULL;
4148 enum btrfs_trim_state trim_state = BTRFS_TRIM_STATE_TRIMMED;
4149 u64 bytes_added;
4150 int ret;
4151
4152 again:
4153 if (!info) {
4154 info = kmem_cache_zalloc(btrfs_free_space_cachep, GFP_NOFS);
4155 if (!info)
4156 return -ENOMEM;
4157 }
4158
4159 if (!bitmap) {
4160 spin_lock(&ctl->tree_lock);
4161 info->offset = offset;
4162 info->bytes = bytes;
4163 info->max_extent_size = 0;
4164 ret = link_free_space(ctl, info);
4165 spin_unlock(&ctl->tree_lock);
4166 if (ret)
4167 kmem_cache_free(btrfs_free_space_cachep, info);
4168 return ret;
4169 }
4170
4171 if (!map) {
4172 map = kmem_cache_zalloc(btrfs_free_space_bitmap_cachep, GFP_NOFS);
4173 if (!map) {
4174 kmem_cache_free(btrfs_free_space_cachep, info);
4175 return -ENOMEM;
4176 }
4177 }
4178
4179 spin_lock(&ctl->tree_lock);
4180 bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
4181 1, 0);
4182 if (!bitmap_info) {
4183 info->bitmap = map;
4184 map = NULL;
4185 add_new_bitmap(ctl, info, offset);
4186 bitmap_info = info;
4187 info = NULL;
4188 }
4189
4190 bytes_added = add_bytes_to_bitmap(ctl, bitmap_info, offset, bytes,
4191 trim_state);
4192
4193 bytes -= bytes_added;
4194 offset += bytes_added;
4195 spin_unlock(&ctl->tree_lock);
4196
4197 if (bytes)
4198 goto again;
4199
4200 if (info)
4201 kmem_cache_free(btrfs_free_space_cachep, info);
4202 if (map)
4203 kmem_cache_free(btrfs_free_space_bitmap_cachep, map);
4204 return 0;
4205 }
4206
4207 /*
4208 * Checks to see if the given range is in the free space cache. This is really
4209 * just used to check the absence of space, so if there is free space in the
4210 * range at all we will return 1.
4211 */
test_check_exists(struct btrfs_block_group * cache,u64 offset,u64 bytes)4212 int test_check_exists(struct btrfs_block_group *cache,
4213 u64 offset, u64 bytes)
4214 {
4215 struct btrfs_free_space_ctl *ctl = cache->free_space_ctl;
4216 struct btrfs_free_space *info;
4217 int ret = 0;
4218
4219 spin_lock(&ctl->tree_lock);
4220 info = tree_search_offset(ctl, offset, 0, 0);
4221 if (!info) {
4222 info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
4223 1, 0);
4224 if (!info)
4225 goto out;
4226 }
4227
4228 have_info:
4229 if (info->bitmap) {
4230 u64 bit_off, bit_bytes;
4231 struct rb_node *n;
4232 struct btrfs_free_space *tmp;
4233
4234 bit_off = offset;
4235 bit_bytes = ctl->unit;
4236 ret = search_bitmap(ctl, info, &bit_off, &bit_bytes, false);
4237 if (!ret) {
4238 if (bit_off == offset) {
4239 ret = 1;
4240 goto out;
4241 } else if (bit_off > offset &&
4242 offset + bytes > bit_off) {
4243 ret = 1;
4244 goto out;
4245 }
4246 }
4247
4248 n = rb_prev(&info->offset_index);
4249 while (n) {
4250 tmp = rb_entry(n, struct btrfs_free_space,
4251 offset_index);
4252 if (tmp->offset + tmp->bytes < offset)
4253 break;
4254 if (offset + bytes < tmp->offset) {
4255 n = rb_prev(&tmp->offset_index);
4256 continue;
4257 }
4258 info = tmp;
4259 goto have_info;
4260 }
4261
4262 n = rb_next(&info->offset_index);
4263 while (n) {
4264 tmp = rb_entry(n, struct btrfs_free_space,
4265 offset_index);
4266 if (offset + bytes < tmp->offset)
4267 break;
4268 if (tmp->offset + tmp->bytes < offset) {
4269 n = rb_next(&tmp->offset_index);
4270 continue;
4271 }
4272 info = tmp;
4273 goto have_info;
4274 }
4275
4276 ret = 0;
4277 goto out;
4278 }
4279
4280 if (info->offset == offset) {
4281 ret = 1;
4282 goto out;
4283 }
4284
4285 if (offset > info->offset && offset < info->offset + info->bytes)
4286 ret = 1;
4287 out:
4288 spin_unlock(&ctl->tree_lock);
4289 return ret;
4290 }
4291 #endif /* CONFIG_BTRFS_FS_RUN_SANITY_TESTS */
4292