1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2011 STRATO. All rights reserved.
4 */
5
6 #include <linux/mm.h>
7 #include <linux/rbtree.h>
8 #include <trace/events/btrfs.h>
9 #include "ctree.h"
10 #include "disk-io.h"
11 #include "backref.h"
12 #include "ulist.h"
13 #include "transaction.h"
14 #include "delayed-ref.h"
15 #include "locking.h"
16 #include "misc.h"
17 #include "tree-mod-log.h"
18
19 /* Just an arbitrary number so we can be sure this happened */
20 #define BACKREF_FOUND_SHARED 6
21
22 struct extent_inode_elem {
23 u64 inum;
24 u64 offset;
25 struct extent_inode_elem *next;
26 };
27
check_extent_in_eb(const struct btrfs_key * key,const struct extent_buffer * eb,const struct btrfs_file_extent_item * fi,u64 extent_item_pos,struct extent_inode_elem ** eie,bool ignore_offset)28 static int check_extent_in_eb(const struct btrfs_key *key,
29 const struct extent_buffer *eb,
30 const struct btrfs_file_extent_item *fi,
31 u64 extent_item_pos,
32 struct extent_inode_elem **eie,
33 bool ignore_offset)
34 {
35 u64 offset = 0;
36 struct extent_inode_elem *e;
37
38 if (!ignore_offset &&
39 !btrfs_file_extent_compression(eb, fi) &&
40 !btrfs_file_extent_encryption(eb, fi) &&
41 !btrfs_file_extent_other_encoding(eb, fi)) {
42 u64 data_offset;
43 u64 data_len;
44
45 data_offset = btrfs_file_extent_offset(eb, fi);
46 data_len = btrfs_file_extent_num_bytes(eb, fi);
47
48 if (extent_item_pos < data_offset ||
49 extent_item_pos >= data_offset + data_len)
50 return 1;
51 offset = extent_item_pos - data_offset;
52 }
53
54 e = kmalloc(sizeof(*e), GFP_NOFS);
55 if (!e)
56 return -ENOMEM;
57
58 e->next = *eie;
59 e->inum = key->objectid;
60 e->offset = key->offset + offset;
61 *eie = e;
62
63 return 0;
64 }
65
free_inode_elem_list(struct extent_inode_elem * eie)66 static void free_inode_elem_list(struct extent_inode_elem *eie)
67 {
68 struct extent_inode_elem *eie_next;
69
70 for (; eie; eie = eie_next) {
71 eie_next = eie->next;
72 kfree(eie);
73 }
74 }
75
find_extent_in_eb(const struct extent_buffer * eb,u64 wanted_disk_byte,u64 extent_item_pos,struct extent_inode_elem ** eie,bool ignore_offset)76 static int find_extent_in_eb(const struct extent_buffer *eb,
77 u64 wanted_disk_byte, u64 extent_item_pos,
78 struct extent_inode_elem **eie,
79 bool ignore_offset)
80 {
81 u64 disk_byte;
82 struct btrfs_key key;
83 struct btrfs_file_extent_item *fi;
84 int slot;
85 int nritems;
86 int extent_type;
87 int ret;
88
89 /*
90 * from the shared data ref, we only have the leaf but we need
91 * the key. thus, we must look into all items and see that we
92 * find one (some) with a reference to our extent item.
93 */
94 nritems = btrfs_header_nritems(eb);
95 for (slot = 0; slot < nritems; ++slot) {
96 btrfs_item_key_to_cpu(eb, &key, slot);
97 if (key.type != BTRFS_EXTENT_DATA_KEY)
98 continue;
99 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
100 extent_type = btrfs_file_extent_type(eb, fi);
101 if (extent_type == BTRFS_FILE_EXTENT_INLINE)
102 continue;
103 /* don't skip BTRFS_FILE_EXTENT_PREALLOC, we can handle that */
104 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
105 if (disk_byte != wanted_disk_byte)
106 continue;
107
108 ret = check_extent_in_eb(&key, eb, fi, extent_item_pos, eie, ignore_offset);
109 if (ret < 0)
110 return ret;
111 }
112
113 return 0;
114 }
115
116 struct preftree {
117 struct rb_root_cached root;
118 unsigned int count;
119 };
120
121 #define PREFTREE_INIT { .root = RB_ROOT_CACHED, .count = 0 }
122
123 struct preftrees {
124 struct preftree direct; /* BTRFS_SHARED_[DATA|BLOCK]_REF_KEY */
125 struct preftree indirect; /* BTRFS_[TREE_BLOCK|EXTENT_DATA]_REF_KEY */
126 struct preftree indirect_missing_keys;
127 };
128
129 /*
130 * Checks for a shared extent during backref search.
131 *
132 * The share_count tracks prelim_refs (direct and indirect) having a
133 * ref->count >0:
134 * - incremented when a ref->count transitions to >0
135 * - decremented when a ref->count transitions to <1
136 */
137 struct share_check {
138 u64 root_objectid;
139 u64 inum;
140 int share_count;
141 bool have_delayed_delete_refs;
142 };
143
extent_is_shared(struct share_check * sc)144 static inline int extent_is_shared(struct share_check *sc)
145 {
146 return (sc && sc->share_count > 1) ? BACKREF_FOUND_SHARED : 0;
147 }
148
149 static struct kmem_cache *btrfs_prelim_ref_cache;
150
btrfs_prelim_ref_init(void)151 int __init btrfs_prelim_ref_init(void)
152 {
153 btrfs_prelim_ref_cache = kmem_cache_create("btrfs_prelim_ref",
154 sizeof(struct prelim_ref),
155 0,
156 SLAB_MEM_SPREAD,
157 NULL);
158 if (!btrfs_prelim_ref_cache)
159 return -ENOMEM;
160 return 0;
161 }
162
btrfs_prelim_ref_exit(void)163 void __cold btrfs_prelim_ref_exit(void)
164 {
165 kmem_cache_destroy(btrfs_prelim_ref_cache);
166 }
167
free_pref(struct prelim_ref * ref)168 static void free_pref(struct prelim_ref *ref)
169 {
170 kmem_cache_free(btrfs_prelim_ref_cache, ref);
171 }
172
173 /*
174 * Return 0 when both refs are for the same block (and can be merged).
175 * A -1 return indicates ref1 is a 'lower' block than ref2, while 1
176 * indicates a 'higher' block.
177 */
prelim_ref_compare(struct prelim_ref * ref1,struct prelim_ref * ref2)178 static int prelim_ref_compare(struct prelim_ref *ref1,
179 struct prelim_ref *ref2)
180 {
181 if (ref1->level < ref2->level)
182 return -1;
183 if (ref1->level > ref2->level)
184 return 1;
185 if (ref1->root_id < ref2->root_id)
186 return -1;
187 if (ref1->root_id > ref2->root_id)
188 return 1;
189 if (ref1->key_for_search.type < ref2->key_for_search.type)
190 return -1;
191 if (ref1->key_for_search.type > ref2->key_for_search.type)
192 return 1;
193 if (ref1->key_for_search.objectid < ref2->key_for_search.objectid)
194 return -1;
195 if (ref1->key_for_search.objectid > ref2->key_for_search.objectid)
196 return 1;
197 if (ref1->key_for_search.offset < ref2->key_for_search.offset)
198 return -1;
199 if (ref1->key_for_search.offset > ref2->key_for_search.offset)
200 return 1;
201 if (ref1->parent < ref2->parent)
202 return -1;
203 if (ref1->parent > ref2->parent)
204 return 1;
205
206 return 0;
207 }
208
update_share_count(struct share_check * sc,int oldcount,int newcount)209 static void update_share_count(struct share_check *sc, int oldcount,
210 int newcount)
211 {
212 if ((!sc) || (oldcount == 0 && newcount < 1))
213 return;
214
215 if (oldcount > 0 && newcount < 1)
216 sc->share_count--;
217 else if (oldcount < 1 && newcount > 0)
218 sc->share_count++;
219 }
220
221 /*
222 * Add @newref to the @root rbtree, merging identical refs.
223 *
224 * Callers should assume that newref has been freed after calling.
225 */
prelim_ref_insert(const struct btrfs_fs_info * fs_info,struct preftree * preftree,struct prelim_ref * newref,struct share_check * sc)226 static void prelim_ref_insert(const struct btrfs_fs_info *fs_info,
227 struct preftree *preftree,
228 struct prelim_ref *newref,
229 struct share_check *sc)
230 {
231 struct rb_root_cached *root;
232 struct rb_node **p;
233 struct rb_node *parent = NULL;
234 struct prelim_ref *ref;
235 int result;
236 bool leftmost = true;
237
238 root = &preftree->root;
239 p = &root->rb_root.rb_node;
240
241 while (*p) {
242 parent = *p;
243 ref = rb_entry(parent, struct prelim_ref, rbnode);
244 result = prelim_ref_compare(ref, newref);
245 if (result < 0) {
246 p = &(*p)->rb_left;
247 } else if (result > 0) {
248 p = &(*p)->rb_right;
249 leftmost = false;
250 } else {
251 /* Identical refs, merge them and free @newref */
252 struct extent_inode_elem *eie = ref->inode_list;
253
254 while (eie && eie->next)
255 eie = eie->next;
256
257 if (!eie)
258 ref->inode_list = newref->inode_list;
259 else
260 eie->next = newref->inode_list;
261 trace_btrfs_prelim_ref_merge(fs_info, ref, newref,
262 preftree->count);
263 /*
264 * A delayed ref can have newref->count < 0.
265 * The ref->count is updated to follow any
266 * BTRFS_[ADD|DROP]_DELAYED_REF actions.
267 */
268 update_share_count(sc, ref->count,
269 ref->count + newref->count);
270 ref->count += newref->count;
271 free_pref(newref);
272 return;
273 }
274 }
275
276 update_share_count(sc, 0, newref->count);
277 preftree->count++;
278 trace_btrfs_prelim_ref_insert(fs_info, newref, NULL, preftree->count);
279 rb_link_node(&newref->rbnode, parent, p);
280 rb_insert_color_cached(&newref->rbnode, root, leftmost);
281 }
282
283 /*
284 * Release the entire tree. We don't care about internal consistency so
285 * just free everything and then reset the tree root.
286 */
prelim_release(struct preftree * preftree)287 static void prelim_release(struct preftree *preftree)
288 {
289 struct prelim_ref *ref, *next_ref;
290
291 rbtree_postorder_for_each_entry_safe(ref, next_ref,
292 &preftree->root.rb_root, rbnode) {
293 free_inode_elem_list(ref->inode_list);
294 free_pref(ref);
295 }
296
297 preftree->root = RB_ROOT_CACHED;
298 preftree->count = 0;
299 }
300
301 /*
302 * the rules for all callers of this function are:
303 * - obtaining the parent is the goal
304 * - if you add a key, you must know that it is a correct key
305 * - if you cannot add the parent or a correct key, then we will look into the
306 * block later to set a correct key
307 *
308 * delayed refs
309 * ============
310 * backref type | shared | indirect | shared | indirect
311 * information | tree | tree | data | data
312 * --------------------+--------+----------+--------+----------
313 * parent logical | y | - | - | -
314 * key to resolve | - | y | y | y
315 * tree block logical | - | - | - | -
316 * root for resolving | y | y | y | y
317 *
318 * - column 1: we've the parent -> done
319 * - column 2, 3, 4: we use the key to find the parent
320 *
321 * on disk refs (inline or keyed)
322 * ==============================
323 * backref type | shared | indirect | shared | indirect
324 * information | tree | tree | data | data
325 * --------------------+--------+----------+--------+----------
326 * parent logical | y | - | y | -
327 * key to resolve | - | - | - | y
328 * tree block logical | y | y | y | y
329 * root for resolving | - | y | y | y
330 *
331 * - column 1, 3: we've the parent -> done
332 * - column 2: we take the first key from the block to find the parent
333 * (see add_missing_keys)
334 * - column 4: we use the key to find the parent
335 *
336 * additional information that's available but not required to find the parent
337 * block might help in merging entries to gain some speed.
338 */
add_prelim_ref(const struct btrfs_fs_info * fs_info,struct preftree * preftree,u64 root_id,const struct btrfs_key * key,int level,u64 parent,u64 wanted_disk_byte,int count,struct share_check * sc,gfp_t gfp_mask)339 static int add_prelim_ref(const struct btrfs_fs_info *fs_info,
340 struct preftree *preftree, u64 root_id,
341 const struct btrfs_key *key, int level, u64 parent,
342 u64 wanted_disk_byte, int count,
343 struct share_check *sc, gfp_t gfp_mask)
344 {
345 struct prelim_ref *ref;
346
347 if (root_id == BTRFS_DATA_RELOC_TREE_OBJECTID)
348 return 0;
349
350 ref = kmem_cache_alloc(btrfs_prelim_ref_cache, gfp_mask);
351 if (!ref)
352 return -ENOMEM;
353
354 ref->root_id = root_id;
355 if (key)
356 ref->key_for_search = *key;
357 else
358 memset(&ref->key_for_search, 0, sizeof(ref->key_for_search));
359
360 ref->inode_list = NULL;
361 ref->level = level;
362 ref->count = count;
363 ref->parent = parent;
364 ref->wanted_disk_byte = wanted_disk_byte;
365 prelim_ref_insert(fs_info, preftree, ref, sc);
366 return extent_is_shared(sc);
367 }
368
369 /* direct refs use root == 0, key == NULL */
add_direct_ref(const struct btrfs_fs_info * fs_info,struct preftrees * preftrees,int level,u64 parent,u64 wanted_disk_byte,int count,struct share_check * sc,gfp_t gfp_mask)370 static int add_direct_ref(const struct btrfs_fs_info *fs_info,
371 struct preftrees *preftrees, int level, u64 parent,
372 u64 wanted_disk_byte, int count,
373 struct share_check *sc, gfp_t gfp_mask)
374 {
375 return add_prelim_ref(fs_info, &preftrees->direct, 0, NULL, level,
376 parent, wanted_disk_byte, count, sc, gfp_mask);
377 }
378
379 /* indirect refs use parent == 0 */
add_indirect_ref(const struct btrfs_fs_info * fs_info,struct preftrees * preftrees,u64 root_id,const struct btrfs_key * key,int level,u64 wanted_disk_byte,int count,struct share_check * sc,gfp_t gfp_mask)380 static int add_indirect_ref(const struct btrfs_fs_info *fs_info,
381 struct preftrees *preftrees, u64 root_id,
382 const struct btrfs_key *key, int level,
383 u64 wanted_disk_byte, int count,
384 struct share_check *sc, gfp_t gfp_mask)
385 {
386 struct preftree *tree = &preftrees->indirect;
387
388 if (!key)
389 tree = &preftrees->indirect_missing_keys;
390 return add_prelim_ref(fs_info, tree, root_id, key, level, 0,
391 wanted_disk_byte, count, sc, gfp_mask);
392 }
393
is_shared_data_backref(struct preftrees * preftrees,u64 bytenr)394 static int is_shared_data_backref(struct preftrees *preftrees, u64 bytenr)
395 {
396 struct rb_node **p = &preftrees->direct.root.rb_root.rb_node;
397 struct rb_node *parent = NULL;
398 struct prelim_ref *ref = NULL;
399 struct prelim_ref target = {};
400 int result;
401
402 target.parent = bytenr;
403
404 while (*p) {
405 parent = *p;
406 ref = rb_entry(parent, struct prelim_ref, rbnode);
407 result = prelim_ref_compare(ref, &target);
408
409 if (result < 0)
410 p = &(*p)->rb_left;
411 else if (result > 0)
412 p = &(*p)->rb_right;
413 else
414 return 1;
415 }
416 return 0;
417 }
418
add_all_parents(struct btrfs_root * root,struct btrfs_path * path,struct ulist * parents,struct preftrees * preftrees,struct prelim_ref * ref,int level,u64 time_seq,const u64 * extent_item_pos,bool ignore_offset)419 static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
420 struct ulist *parents,
421 struct preftrees *preftrees, struct prelim_ref *ref,
422 int level, u64 time_seq, const u64 *extent_item_pos,
423 bool ignore_offset)
424 {
425 int ret = 0;
426 int slot;
427 struct extent_buffer *eb;
428 struct btrfs_key key;
429 struct btrfs_key *key_for_search = &ref->key_for_search;
430 struct btrfs_file_extent_item *fi;
431 struct extent_inode_elem *eie = NULL, *old = NULL;
432 u64 disk_byte;
433 u64 wanted_disk_byte = ref->wanted_disk_byte;
434 u64 count = 0;
435 u64 data_offset;
436 u8 type;
437
438 if (level != 0) {
439 eb = path->nodes[level];
440 ret = ulist_add(parents, eb->start, 0, GFP_NOFS);
441 if (ret < 0)
442 return ret;
443 return 0;
444 }
445
446 /*
447 * 1. We normally enter this function with the path already pointing to
448 * the first item to check. But sometimes, we may enter it with
449 * slot == nritems.
450 * 2. We are searching for normal backref but bytenr of this leaf
451 * matches shared data backref
452 * 3. The leaf owner is not equal to the root we are searching
453 *
454 * For these cases, go to the next leaf before we continue.
455 */
456 eb = path->nodes[0];
457 if (path->slots[0] >= btrfs_header_nritems(eb) ||
458 is_shared_data_backref(preftrees, eb->start) ||
459 ref->root_id != btrfs_header_owner(eb)) {
460 if (time_seq == BTRFS_SEQ_LAST)
461 ret = btrfs_next_leaf(root, path);
462 else
463 ret = btrfs_next_old_leaf(root, path, time_seq);
464 }
465
466 while (!ret && count < ref->count) {
467 eb = path->nodes[0];
468 slot = path->slots[0];
469
470 btrfs_item_key_to_cpu(eb, &key, slot);
471
472 if (key.objectid != key_for_search->objectid ||
473 key.type != BTRFS_EXTENT_DATA_KEY)
474 break;
475
476 /*
477 * We are searching for normal backref but bytenr of this leaf
478 * matches shared data backref, OR
479 * the leaf owner is not equal to the root we are searching for
480 */
481 if (slot == 0 &&
482 (is_shared_data_backref(preftrees, eb->start) ||
483 ref->root_id != btrfs_header_owner(eb))) {
484 if (time_seq == BTRFS_SEQ_LAST)
485 ret = btrfs_next_leaf(root, path);
486 else
487 ret = btrfs_next_old_leaf(root, path, time_seq);
488 continue;
489 }
490 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
491 type = btrfs_file_extent_type(eb, fi);
492 if (type == BTRFS_FILE_EXTENT_INLINE)
493 goto next;
494 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
495 data_offset = btrfs_file_extent_offset(eb, fi);
496
497 if (disk_byte == wanted_disk_byte) {
498 eie = NULL;
499 old = NULL;
500 if (ref->key_for_search.offset == key.offset - data_offset)
501 count++;
502 else
503 goto next;
504 if (extent_item_pos) {
505 ret = check_extent_in_eb(&key, eb, fi,
506 *extent_item_pos,
507 &eie, ignore_offset);
508 if (ret < 0)
509 break;
510 }
511 if (ret > 0)
512 goto next;
513 ret = ulist_add_merge_ptr(parents, eb->start,
514 eie, (void **)&old, GFP_NOFS);
515 if (ret < 0)
516 break;
517 if (!ret && extent_item_pos) {
518 while (old->next)
519 old = old->next;
520 old->next = eie;
521 }
522 eie = NULL;
523 }
524 next:
525 if (time_seq == BTRFS_SEQ_LAST)
526 ret = btrfs_next_item(root, path);
527 else
528 ret = btrfs_next_old_item(root, path, time_seq);
529 }
530
531 if (ret > 0)
532 ret = 0;
533 else if (ret < 0)
534 free_inode_elem_list(eie);
535 return ret;
536 }
537
538 /*
539 * resolve an indirect backref in the form (root_id, key, level)
540 * to a logical address
541 */
resolve_indirect_ref(struct btrfs_fs_info * fs_info,struct btrfs_path * path,u64 time_seq,struct preftrees * preftrees,struct prelim_ref * ref,struct ulist * parents,const u64 * extent_item_pos,bool ignore_offset)542 static int resolve_indirect_ref(struct btrfs_fs_info *fs_info,
543 struct btrfs_path *path, u64 time_seq,
544 struct preftrees *preftrees,
545 struct prelim_ref *ref, struct ulist *parents,
546 const u64 *extent_item_pos, bool ignore_offset)
547 {
548 struct btrfs_root *root;
549 struct extent_buffer *eb;
550 int ret = 0;
551 int root_level;
552 int level = ref->level;
553 struct btrfs_key search_key = ref->key_for_search;
554
555 /*
556 * If we're search_commit_root we could possibly be holding locks on
557 * other tree nodes. This happens when qgroups does backref walks when
558 * adding new delayed refs. To deal with this we need to look in cache
559 * for the root, and if we don't find it then we need to search the
560 * tree_root's commit root, thus the btrfs_get_fs_root_commit_root usage
561 * here.
562 */
563 if (path->search_commit_root)
564 root = btrfs_get_fs_root_commit_root(fs_info, path, ref->root_id);
565 else
566 root = btrfs_get_fs_root(fs_info, ref->root_id, false);
567 if (IS_ERR(root)) {
568 ret = PTR_ERR(root);
569 goto out_free;
570 }
571
572 if (!path->search_commit_root &&
573 test_bit(BTRFS_ROOT_DELETING, &root->state)) {
574 ret = -ENOENT;
575 goto out;
576 }
577
578 if (btrfs_is_testing(fs_info)) {
579 ret = -ENOENT;
580 goto out;
581 }
582
583 if (path->search_commit_root)
584 root_level = btrfs_header_level(root->commit_root);
585 else if (time_seq == BTRFS_SEQ_LAST)
586 root_level = btrfs_header_level(root->node);
587 else
588 root_level = btrfs_old_root_level(root, time_seq);
589
590 if (root_level + 1 == level)
591 goto out;
592
593 /*
594 * We can often find data backrefs with an offset that is too large
595 * (>= LLONG_MAX, maximum allowed file offset) due to underflows when
596 * subtracting a file's offset with the data offset of its
597 * corresponding extent data item. This can happen for example in the
598 * clone ioctl.
599 *
600 * So if we detect such case we set the search key's offset to zero to
601 * make sure we will find the matching file extent item at
602 * add_all_parents(), otherwise we will miss it because the offset
603 * taken form the backref is much larger then the offset of the file
604 * extent item. This can make us scan a very large number of file
605 * extent items, but at least it will not make us miss any.
606 *
607 * This is an ugly workaround for a behaviour that should have never
608 * existed, but it does and a fix for the clone ioctl would touch a lot
609 * of places, cause backwards incompatibility and would not fix the
610 * problem for extents cloned with older kernels.
611 */
612 if (search_key.type == BTRFS_EXTENT_DATA_KEY &&
613 search_key.offset >= LLONG_MAX)
614 search_key.offset = 0;
615 path->lowest_level = level;
616 if (time_seq == BTRFS_SEQ_LAST)
617 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
618 else
619 ret = btrfs_search_old_slot(root, &search_key, path, time_seq);
620
621 btrfs_debug(fs_info,
622 "search slot in root %llu (level %d, ref count %d) returned %d for key (%llu %u %llu)",
623 ref->root_id, level, ref->count, ret,
624 ref->key_for_search.objectid, ref->key_for_search.type,
625 ref->key_for_search.offset);
626 if (ret < 0)
627 goto out;
628
629 eb = path->nodes[level];
630 while (!eb) {
631 if (WARN_ON(!level)) {
632 ret = 1;
633 goto out;
634 }
635 level--;
636 eb = path->nodes[level];
637 }
638
639 ret = add_all_parents(root, path, parents, preftrees, ref, level,
640 time_seq, extent_item_pos, ignore_offset);
641 out:
642 btrfs_put_root(root);
643 out_free:
644 path->lowest_level = 0;
645 btrfs_release_path(path);
646 return ret;
647 }
648
649 static struct extent_inode_elem *
unode_aux_to_inode_list(struct ulist_node * node)650 unode_aux_to_inode_list(struct ulist_node *node)
651 {
652 if (!node)
653 return NULL;
654 return (struct extent_inode_elem *)(uintptr_t)node->aux;
655 }
656
free_leaf_list(struct ulist * ulist)657 static void free_leaf_list(struct ulist *ulist)
658 {
659 struct ulist_node *node;
660 struct ulist_iterator uiter;
661
662 ULIST_ITER_INIT(&uiter);
663 while ((node = ulist_next(ulist, &uiter)))
664 free_inode_elem_list(unode_aux_to_inode_list(node));
665
666 ulist_free(ulist);
667 }
668
669 /*
670 * We maintain three separate rbtrees: one for direct refs, one for
671 * indirect refs which have a key, and one for indirect refs which do not
672 * have a key. Each tree does merge on insertion.
673 *
674 * Once all of the references are located, we iterate over the tree of
675 * indirect refs with missing keys. An appropriate key is located and
676 * the ref is moved onto the tree for indirect refs. After all missing
677 * keys are thus located, we iterate over the indirect ref tree, resolve
678 * each reference, and then insert the resolved reference onto the
679 * direct tree (merging there too).
680 *
681 * New backrefs (i.e., for parent nodes) are added to the appropriate
682 * rbtree as they are encountered. The new backrefs are subsequently
683 * resolved as above.
684 */
resolve_indirect_refs(struct btrfs_fs_info * fs_info,struct btrfs_path * path,u64 time_seq,struct preftrees * preftrees,const u64 * extent_item_pos,struct share_check * sc,bool ignore_offset)685 static int resolve_indirect_refs(struct btrfs_fs_info *fs_info,
686 struct btrfs_path *path, u64 time_seq,
687 struct preftrees *preftrees,
688 const u64 *extent_item_pos,
689 struct share_check *sc, bool ignore_offset)
690 {
691 int err;
692 int ret = 0;
693 struct ulist *parents;
694 struct ulist_node *node;
695 struct ulist_iterator uiter;
696 struct rb_node *rnode;
697
698 parents = ulist_alloc(GFP_NOFS);
699 if (!parents)
700 return -ENOMEM;
701
702 /*
703 * We could trade memory usage for performance here by iterating
704 * the tree, allocating new refs for each insertion, and then
705 * freeing the entire indirect tree when we're done. In some test
706 * cases, the tree can grow quite large (~200k objects).
707 */
708 while ((rnode = rb_first_cached(&preftrees->indirect.root))) {
709 struct prelim_ref *ref;
710
711 ref = rb_entry(rnode, struct prelim_ref, rbnode);
712 if (WARN(ref->parent,
713 "BUG: direct ref found in indirect tree")) {
714 ret = -EINVAL;
715 goto out;
716 }
717
718 rb_erase_cached(&ref->rbnode, &preftrees->indirect.root);
719 preftrees->indirect.count--;
720
721 if (ref->count == 0) {
722 free_pref(ref);
723 continue;
724 }
725
726 if (sc && sc->root_objectid &&
727 ref->root_id != sc->root_objectid) {
728 free_pref(ref);
729 ret = BACKREF_FOUND_SHARED;
730 goto out;
731 }
732 err = resolve_indirect_ref(fs_info, path, time_seq, preftrees,
733 ref, parents, extent_item_pos,
734 ignore_offset);
735 /*
736 * we can only tolerate ENOENT,otherwise,we should catch error
737 * and return directly.
738 */
739 if (err == -ENOENT) {
740 prelim_ref_insert(fs_info, &preftrees->direct, ref,
741 NULL);
742 continue;
743 } else if (err) {
744 free_pref(ref);
745 ret = err;
746 goto out;
747 }
748
749 /* we put the first parent into the ref at hand */
750 ULIST_ITER_INIT(&uiter);
751 node = ulist_next(parents, &uiter);
752 ref->parent = node ? node->val : 0;
753 ref->inode_list = unode_aux_to_inode_list(node);
754
755 /* Add a prelim_ref(s) for any other parent(s). */
756 while ((node = ulist_next(parents, &uiter))) {
757 struct prelim_ref *new_ref;
758
759 new_ref = kmem_cache_alloc(btrfs_prelim_ref_cache,
760 GFP_NOFS);
761 if (!new_ref) {
762 free_pref(ref);
763 ret = -ENOMEM;
764 goto out;
765 }
766 memcpy(new_ref, ref, sizeof(*ref));
767 new_ref->parent = node->val;
768 new_ref->inode_list = unode_aux_to_inode_list(node);
769 prelim_ref_insert(fs_info, &preftrees->direct,
770 new_ref, NULL);
771 }
772
773 /*
774 * Now it's a direct ref, put it in the direct tree. We must
775 * do this last because the ref could be merged/freed here.
776 */
777 prelim_ref_insert(fs_info, &preftrees->direct, ref, NULL);
778
779 ulist_reinit(parents);
780 cond_resched();
781 }
782 out:
783 /*
784 * We may have inode lists attached to refs in the parents ulist, so we
785 * must free them before freeing the ulist and its refs.
786 */
787 free_leaf_list(parents);
788 return ret;
789 }
790
791 /*
792 * read tree blocks and add keys where required.
793 */
add_missing_keys(struct btrfs_fs_info * fs_info,struct preftrees * preftrees,bool lock)794 static int add_missing_keys(struct btrfs_fs_info *fs_info,
795 struct preftrees *preftrees, bool lock)
796 {
797 struct prelim_ref *ref;
798 struct extent_buffer *eb;
799 struct preftree *tree = &preftrees->indirect_missing_keys;
800 struct rb_node *node;
801
802 while ((node = rb_first_cached(&tree->root))) {
803 ref = rb_entry(node, struct prelim_ref, rbnode);
804 rb_erase_cached(node, &tree->root);
805
806 BUG_ON(ref->parent); /* should not be a direct ref */
807 BUG_ON(ref->key_for_search.type);
808 BUG_ON(!ref->wanted_disk_byte);
809
810 eb = read_tree_block(fs_info, ref->wanted_disk_byte,
811 ref->root_id, 0, ref->level - 1, NULL);
812 if (IS_ERR(eb)) {
813 free_pref(ref);
814 return PTR_ERR(eb);
815 }
816 if (!extent_buffer_uptodate(eb)) {
817 free_pref(ref);
818 free_extent_buffer(eb);
819 return -EIO;
820 }
821
822 if (lock)
823 btrfs_tree_read_lock(eb);
824 if (btrfs_header_level(eb) == 0)
825 btrfs_item_key_to_cpu(eb, &ref->key_for_search, 0);
826 else
827 btrfs_node_key_to_cpu(eb, &ref->key_for_search, 0);
828 if (lock)
829 btrfs_tree_read_unlock(eb);
830 free_extent_buffer(eb);
831 prelim_ref_insert(fs_info, &preftrees->indirect, ref, NULL);
832 cond_resched();
833 }
834 return 0;
835 }
836
837 /*
838 * add all currently queued delayed refs from this head whose seq nr is
839 * smaller or equal that seq to the list
840 */
add_delayed_refs(const struct btrfs_fs_info * fs_info,struct btrfs_delayed_ref_head * head,u64 seq,struct preftrees * preftrees,struct share_check * sc)841 static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
842 struct btrfs_delayed_ref_head *head, u64 seq,
843 struct preftrees *preftrees, struct share_check *sc)
844 {
845 struct btrfs_delayed_ref_node *node;
846 struct btrfs_key key;
847 struct rb_node *n;
848 int count;
849 int ret = 0;
850
851 spin_lock(&head->lock);
852 for (n = rb_first_cached(&head->ref_tree); n; n = rb_next(n)) {
853 node = rb_entry(n, struct btrfs_delayed_ref_node,
854 ref_node);
855 if (node->seq > seq)
856 continue;
857
858 switch (node->action) {
859 case BTRFS_ADD_DELAYED_EXTENT:
860 case BTRFS_UPDATE_DELAYED_HEAD:
861 WARN_ON(1);
862 continue;
863 case BTRFS_ADD_DELAYED_REF:
864 count = node->ref_mod;
865 break;
866 case BTRFS_DROP_DELAYED_REF:
867 count = node->ref_mod * -1;
868 break;
869 default:
870 BUG();
871 }
872 switch (node->type) {
873 case BTRFS_TREE_BLOCK_REF_KEY: {
874 /* NORMAL INDIRECT METADATA backref */
875 struct btrfs_delayed_tree_ref *ref;
876 struct btrfs_key *key_ptr = NULL;
877
878 if (head->extent_op && head->extent_op->update_key) {
879 btrfs_disk_key_to_cpu(&key, &head->extent_op->key);
880 key_ptr = &key;
881 }
882
883 ref = btrfs_delayed_node_to_tree_ref(node);
884 ret = add_indirect_ref(fs_info, preftrees, ref->root,
885 key_ptr, ref->level + 1,
886 node->bytenr, count, sc,
887 GFP_ATOMIC);
888 break;
889 }
890 case BTRFS_SHARED_BLOCK_REF_KEY: {
891 /* SHARED DIRECT METADATA backref */
892 struct btrfs_delayed_tree_ref *ref;
893
894 ref = btrfs_delayed_node_to_tree_ref(node);
895
896 ret = add_direct_ref(fs_info, preftrees, ref->level + 1,
897 ref->parent, node->bytenr, count,
898 sc, GFP_ATOMIC);
899 break;
900 }
901 case BTRFS_EXTENT_DATA_REF_KEY: {
902 /* NORMAL INDIRECT DATA backref */
903 struct btrfs_delayed_data_ref *ref;
904 ref = btrfs_delayed_node_to_data_ref(node);
905
906 key.objectid = ref->objectid;
907 key.type = BTRFS_EXTENT_DATA_KEY;
908 key.offset = ref->offset;
909
910 /*
911 * If we have a share check context and a reference for
912 * another inode, we can't exit immediately. This is
913 * because even if this is a BTRFS_ADD_DELAYED_REF
914 * reference we may find next a BTRFS_DROP_DELAYED_REF
915 * which cancels out this ADD reference.
916 *
917 * If this is a DROP reference and there was no previous
918 * ADD reference, then we need to signal that when we
919 * process references from the extent tree (through
920 * add_inline_refs() and add_keyed_refs()), we should
921 * not exit early if we find a reference for another
922 * inode, because one of the delayed DROP references
923 * may cancel that reference in the extent tree.
924 */
925 if (sc && count < 0)
926 sc->have_delayed_delete_refs = true;
927
928 ret = add_indirect_ref(fs_info, preftrees, ref->root,
929 &key, 0, node->bytenr, count, sc,
930 GFP_ATOMIC);
931 break;
932 }
933 case BTRFS_SHARED_DATA_REF_KEY: {
934 /* SHARED DIRECT FULL backref */
935 struct btrfs_delayed_data_ref *ref;
936
937 ref = btrfs_delayed_node_to_data_ref(node);
938
939 ret = add_direct_ref(fs_info, preftrees, 0, ref->parent,
940 node->bytenr, count, sc,
941 GFP_ATOMIC);
942 break;
943 }
944 default:
945 WARN_ON(1);
946 }
947 /*
948 * We must ignore BACKREF_FOUND_SHARED until all delayed
949 * refs have been checked.
950 */
951 if (ret && (ret != BACKREF_FOUND_SHARED))
952 break;
953 }
954 if (!ret)
955 ret = extent_is_shared(sc);
956
957 spin_unlock(&head->lock);
958 return ret;
959 }
960
961 /*
962 * add all inline backrefs for bytenr to the list
963 *
964 * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED.
965 */
add_inline_refs(const struct btrfs_fs_info * fs_info,struct btrfs_path * path,u64 bytenr,int * info_level,struct preftrees * preftrees,struct share_check * sc)966 static int add_inline_refs(const struct btrfs_fs_info *fs_info,
967 struct btrfs_path *path, u64 bytenr,
968 int *info_level, struct preftrees *preftrees,
969 struct share_check *sc)
970 {
971 int ret = 0;
972 int slot;
973 struct extent_buffer *leaf;
974 struct btrfs_key key;
975 struct btrfs_key found_key;
976 unsigned long ptr;
977 unsigned long end;
978 struct btrfs_extent_item *ei;
979 u64 flags;
980 u64 item_size;
981
982 /*
983 * enumerate all inline refs
984 */
985 leaf = path->nodes[0];
986 slot = path->slots[0];
987
988 item_size = btrfs_item_size(leaf, slot);
989 BUG_ON(item_size < sizeof(*ei));
990
991 ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
992 flags = btrfs_extent_flags(leaf, ei);
993 btrfs_item_key_to_cpu(leaf, &found_key, slot);
994
995 ptr = (unsigned long)(ei + 1);
996 end = (unsigned long)ei + item_size;
997
998 if (found_key.type == BTRFS_EXTENT_ITEM_KEY &&
999 flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1000 struct btrfs_tree_block_info *info;
1001
1002 info = (struct btrfs_tree_block_info *)ptr;
1003 *info_level = btrfs_tree_block_level(leaf, info);
1004 ptr += sizeof(struct btrfs_tree_block_info);
1005 BUG_ON(ptr > end);
1006 } else if (found_key.type == BTRFS_METADATA_ITEM_KEY) {
1007 *info_level = found_key.offset;
1008 } else {
1009 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
1010 }
1011
1012 while (ptr < end) {
1013 struct btrfs_extent_inline_ref *iref;
1014 u64 offset;
1015 int type;
1016
1017 iref = (struct btrfs_extent_inline_ref *)ptr;
1018 type = btrfs_get_extent_inline_ref_type(leaf, iref,
1019 BTRFS_REF_TYPE_ANY);
1020 if (type == BTRFS_REF_TYPE_INVALID)
1021 return -EUCLEAN;
1022
1023 offset = btrfs_extent_inline_ref_offset(leaf, iref);
1024
1025 switch (type) {
1026 case BTRFS_SHARED_BLOCK_REF_KEY:
1027 ret = add_direct_ref(fs_info, preftrees,
1028 *info_level + 1, offset,
1029 bytenr, 1, NULL, GFP_NOFS);
1030 break;
1031 case BTRFS_SHARED_DATA_REF_KEY: {
1032 struct btrfs_shared_data_ref *sdref;
1033 int count;
1034
1035 sdref = (struct btrfs_shared_data_ref *)(iref + 1);
1036 count = btrfs_shared_data_ref_count(leaf, sdref);
1037
1038 ret = add_direct_ref(fs_info, preftrees, 0, offset,
1039 bytenr, count, sc, GFP_NOFS);
1040 break;
1041 }
1042 case BTRFS_TREE_BLOCK_REF_KEY:
1043 ret = add_indirect_ref(fs_info, preftrees, offset,
1044 NULL, *info_level + 1,
1045 bytenr, 1, NULL, GFP_NOFS);
1046 break;
1047 case BTRFS_EXTENT_DATA_REF_KEY: {
1048 struct btrfs_extent_data_ref *dref;
1049 int count;
1050 u64 root;
1051
1052 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1053 count = btrfs_extent_data_ref_count(leaf, dref);
1054 key.objectid = btrfs_extent_data_ref_objectid(leaf,
1055 dref);
1056 key.type = BTRFS_EXTENT_DATA_KEY;
1057 key.offset = btrfs_extent_data_ref_offset(leaf, dref);
1058
1059 if (sc && sc->inum && key.objectid != sc->inum &&
1060 !sc->have_delayed_delete_refs) {
1061 ret = BACKREF_FOUND_SHARED;
1062 break;
1063 }
1064
1065 root = btrfs_extent_data_ref_root(leaf, dref);
1066
1067 ret = add_indirect_ref(fs_info, preftrees, root,
1068 &key, 0, bytenr, count,
1069 sc, GFP_NOFS);
1070
1071 break;
1072 }
1073 default:
1074 WARN_ON(1);
1075 }
1076 if (ret)
1077 return ret;
1078 ptr += btrfs_extent_inline_ref_size(type);
1079 }
1080
1081 return 0;
1082 }
1083
1084 /*
1085 * add all non-inline backrefs for bytenr to the list
1086 *
1087 * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED.
1088 */
add_keyed_refs(struct btrfs_root * extent_root,struct btrfs_path * path,u64 bytenr,int info_level,struct preftrees * preftrees,struct share_check * sc)1089 static int add_keyed_refs(struct btrfs_root *extent_root,
1090 struct btrfs_path *path, u64 bytenr,
1091 int info_level, struct preftrees *preftrees,
1092 struct share_check *sc)
1093 {
1094 struct btrfs_fs_info *fs_info = extent_root->fs_info;
1095 int ret;
1096 int slot;
1097 struct extent_buffer *leaf;
1098 struct btrfs_key key;
1099
1100 while (1) {
1101 ret = btrfs_next_item(extent_root, path);
1102 if (ret < 0)
1103 break;
1104 if (ret) {
1105 ret = 0;
1106 break;
1107 }
1108
1109 slot = path->slots[0];
1110 leaf = path->nodes[0];
1111 btrfs_item_key_to_cpu(leaf, &key, slot);
1112
1113 if (key.objectid != bytenr)
1114 break;
1115 if (key.type < BTRFS_TREE_BLOCK_REF_KEY)
1116 continue;
1117 if (key.type > BTRFS_SHARED_DATA_REF_KEY)
1118 break;
1119
1120 switch (key.type) {
1121 case BTRFS_SHARED_BLOCK_REF_KEY:
1122 /* SHARED DIRECT METADATA backref */
1123 ret = add_direct_ref(fs_info, preftrees,
1124 info_level + 1, key.offset,
1125 bytenr, 1, NULL, GFP_NOFS);
1126 break;
1127 case BTRFS_SHARED_DATA_REF_KEY: {
1128 /* SHARED DIRECT FULL backref */
1129 struct btrfs_shared_data_ref *sdref;
1130 int count;
1131
1132 sdref = btrfs_item_ptr(leaf, slot,
1133 struct btrfs_shared_data_ref);
1134 count = btrfs_shared_data_ref_count(leaf, sdref);
1135 ret = add_direct_ref(fs_info, preftrees, 0,
1136 key.offset, bytenr, count,
1137 sc, GFP_NOFS);
1138 break;
1139 }
1140 case BTRFS_TREE_BLOCK_REF_KEY:
1141 /* NORMAL INDIRECT METADATA backref */
1142 ret = add_indirect_ref(fs_info, preftrees, key.offset,
1143 NULL, info_level + 1, bytenr,
1144 1, NULL, GFP_NOFS);
1145 break;
1146 case BTRFS_EXTENT_DATA_REF_KEY: {
1147 /* NORMAL INDIRECT DATA backref */
1148 struct btrfs_extent_data_ref *dref;
1149 int count;
1150 u64 root;
1151
1152 dref = btrfs_item_ptr(leaf, slot,
1153 struct btrfs_extent_data_ref);
1154 count = btrfs_extent_data_ref_count(leaf, dref);
1155 key.objectid = btrfs_extent_data_ref_objectid(leaf,
1156 dref);
1157 key.type = BTRFS_EXTENT_DATA_KEY;
1158 key.offset = btrfs_extent_data_ref_offset(leaf, dref);
1159
1160 if (sc && sc->inum && key.objectid != sc->inum &&
1161 !sc->have_delayed_delete_refs) {
1162 ret = BACKREF_FOUND_SHARED;
1163 break;
1164 }
1165
1166 root = btrfs_extent_data_ref_root(leaf, dref);
1167 ret = add_indirect_ref(fs_info, preftrees, root,
1168 &key, 0, bytenr, count,
1169 sc, GFP_NOFS);
1170 break;
1171 }
1172 default:
1173 WARN_ON(1);
1174 }
1175 if (ret)
1176 return ret;
1177
1178 }
1179
1180 return ret;
1181 }
1182
1183 /*
1184 * this adds all existing backrefs (inline backrefs, backrefs and delayed
1185 * refs) for the given bytenr to the refs list, merges duplicates and resolves
1186 * indirect refs to their parent bytenr.
1187 * When roots are found, they're added to the roots list
1188 *
1189 * If time_seq is set to BTRFS_SEQ_LAST, it will not search delayed_refs, and
1190 * behave much like trans == NULL case, the difference only lies in it will not
1191 * commit root.
1192 * The special case is for qgroup to search roots in commit_transaction().
1193 *
1194 * @sc - if !NULL, then immediately return BACKREF_FOUND_SHARED when a
1195 * shared extent is detected.
1196 *
1197 * Otherwise this returns 0 for success and <0 for an error.
1198 *
1199 * If ignore_offset is set to false, only extent refs whose offsets match
1200 * extent_item_pos are returned. If true, every extent ref is returned
1201 * and extent_item_pos is ignored.
1202 *
1203 * FIXME some caching might speed things up
1204 */
find_parent_nodes(struct btrfs_trans_handle * trans,struct btrfs_fs_info * fs_info,u64 bytenr,u64 time_seq,struct ulist * refs,struct ulist * roots,const u64 * extent_item_pos,struct share_check * sc,bool ignore_offset)1205 static int find_parent_nodes(struct btrfs_trans_handle *trans,
1206 struct btrfs_fs_info *fs_info, u64 bytenr,
1207 u64 time_seq, struct ulist *refs,
1208 struct ulist *roots, const u64 *extent_item_pos,
1209 struct share_check *sc, bool ignore_offset)
1210 {
1211 struct btrfs_root *root = btrfs_extent_root(fs_info, bytenr);
1212 struct btrfs_key key;
1213 struct btrfs_path *path;
1214 struct btrfs_delayed_ref_root *delayed_refs = NULL;
1215 struct btrfs_delayed_ref_head *head;
1216 int info_level = 0;
1217 int ret;
1218 struct prelim_ref *ref;
1219 struct rb_node *node;
1220 struct extent_inode_elem *eie = NULL;
1221 struct preftrees preftrees = {
1222 .direct = PREFTREE_INIT,
1223 .indirect = PREFTREE_INIT,
1224 .indirect_missing_keys = PREFTREE_INIT
1225 };
1226
1227 key.objectid = bytenr;
1228 key.offset = (u64)-1;
1229 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
1230 key.type = BTRFS_METADATA_ITEM_KEY;
1231 else
1232 key.type = BTRFS_EXTENT_ITEM_KEY;
1233
1234 path = btrfs_alloc_path();
1235 if (!path)
1236 return -ENOMEM;
1237 if (!trans) {
1238 path->search_commit_root = 1;
1239 path->skip_locking = 1;
1240 }
1241
1242 if (time_seq == BTRFS_SEQ_LAST)
1243 path->skip_locking = 1;
1244
1245 again:
1246 head = NULL;
1247
1248 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1249 if (ret < 0)
1250 goto out;
1251 if (ret == 0) {
1252 /* This shouldn't happen, indicates a bug or fs corruption. */
1253 ASSERT(ret != 0);
1254 ret = -EUCLEAN;
1255 goto out;
1256 }
1257
1258 if (trans && likely(trans->type != __TRANS_DUMMY) &&
1259 time_seq != BTRFS_SEQ_LAST) {
1260 /*
1261 * We have a specific time_seq we care about and trans which
1262 * means we have the path lock, we need to grab the ref head and
1263 * lock it so we have a consistent view of the refs at the given
1264 * time.
1265 */
1266 delayed_refs = &trans->transaction->delayed_refs;
1267 spin_lock(&delayed_refs->lock);
1268 head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
1269 if (head) {
1270 if (!mutex_trylock(&head->mutex)) {
1271 refcount_inc(&head->refs);
1272 spin_unlock(&delayed_refs->lock);
1273
1274 btrfs_release_path(path);
1275
1276 /*
1277 * Mutex was contended, block until it's
1278 * released and try again
1279 */
1280 mutex_lock(&head->mutex);
1281 mutex_unlock(&head->mutex);
1282 btrfs_put_delayed_ref_head(head);
1283 goto again;
1284 }
1285 spin_unlock(&delayed_refs->lock);
1286 ret = add_delayed_refs(fs_info, head, time_seq,
1287 &preftrees, sc);
1288 mutex_unlock(&head->mutex);
1289 if (ret)
1290 goto out;
1291 } else {
1292 spin_unlock(&delayed_refs->lock);
1293 }
1294 }
1295
1296 if (path->slots[0]) {
1297 struct extent_buffer *leaf;
1298 int slot;
1299
1300 path->slots[0]--;
1301 leaf = path->nodes[0];
1302 slot = path->slots[0];
1303 btrfs_item_key_to_cpu(leaf, &key, slot);
1304 if (key.objectid == bytenr &&
1305 (key.type == BTRFS_EXTENT_ITEM_KEY ||
1306 key.type == BTRFS_METADATA_ITEM_KEY)) {
1307 ret = add_inline_refs(fs_info, path, bytenr,
1308 &info_level, &preftrees, sc);
1309 if (ret)
1310 goto out;
1311 ret = add_keyed_refs(root, path, bytenr, info_level,
1312 &preftrees, sc);
1313 if (ret)
1314 goto out;
1315 }
1316 }
1317
1318 btrfs_release_path(path);
1319
1320 ret = add_missing_keys(fs_info, &preftrees, path->skip_locking == 0);
1321 if (ret)
1322 goto out;
1323
1324 WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect_missing_keys.root.rb_root));
1325
1326 ret = resolve_indirect_refs(fs_info, path, time_seq, &preftrees,
1327 extent_item_pos, sc, ignore_offset);
1328 if (ret)
1329 goto out;
1330
1331 WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect.root.rb_root));
1332
1333 /*
1334 * This walks the tree of merged and resolved refs. Tree blocks are
1335 * read in as needed. Unique entries are added to the ulist, and
1336 * the list of found roots is updated.
1337 *
1338 * We release the entire tree in one go before returning.
1339 */
1340 node = rb_first_cached(&preftrees.direct.root);
1341 while (node) {
1342 ref = rb_entry(node, struct prelim_ref, rbnode);
1343 node = rb_next(&ref->rbnode);
1344 /*
1345 * ref->count < 0 can happen here if there are delayed
1346 * refs with a node->action of BTRFS_DROP_DELAYED_REF.
1347 * prelim_ref_insert() relies on this when merging
1348 * identical refs to keep the overall count correct.
1349 * prelim_ref_insert() will merge only those refs
1350 * which compare identically. Any refs having
1351 * e.g. different offsets would not be merged,
1352 * and would retain their original ref->count < 0.
1353 */
1354 if (roots && ref->count && ref->root_id && ref->parent == 0) {
1355 if (sc && sc->root_objectid &&
1356 ref->root_id != sc->root_objectid) {
1357 ret = BACKREF_FOUND_SHARED;
1358 goto out;
1359 }
1360
1361 /* no parent == root of tree */
1362 ret = ulist_add(roots, ref->root_id, 0, GFP_NOFS);
1363 if (ret < 0)
1364 goto out;
1365 }
1366 if (ref->count && ref->parent) {
1367 if (extent_item_pos && !ref->inode_list &&
1368 ref->level == 0) {
1369 struct extent_buffer *eb;
1370
1371 eb = read_tree_block(fs_info, ref->parent, 0,
1372 0, ref->level, NULL);
1373 if (IS_ERR(eb)) {
1374 ret = PTR_ERR(eb);
1375 goto out;
1376 }
1377 if (!extent_buffer_uptodate(eb)) {
1378 free_extent_buffer(eb);
1379 ret = -EIO;
1380 goto out;
1381 }
1382
1383 if (!path->skip_locking)
1384 btrfs_tree_read_lock(eb);
1385 ret = find_extent_in_eb(eb, bytenr,
1386 *extent_item_pos, &eie, ignore_offset);
1387 if (!path->skip_locking)
1388 btrfs_tree_read_unlock(eb);
1389 free_extent_buffer(eb);
1390 if (ret < 0)
1391 goto out;
1392 ref->inode_list = eie;
1393 /*
1394 * We transferred the list ownership to the ref,
1395 * so set to NULL to avoid a double free in case
1396 * an error happens after this.
1397 */
1398 eie = NULL;
1399 }
1400 ret = ulist_add_merge_ptr(refs, ref->parent,
1401 ref->inode_list,
1402 (void **)&eie, GFP_NOFS);
1403 if (ret < 0)
1404 goto out;
1405 if (!ret && extent_item_pos) {
1406 /*
1407 * We've recorded that parent, so we must extend
1408 * its inode list here.
1409 *
1410 * However if there was corruption we may not
1411 * have found an eie, return an error in this
1412 * case.
1413 */
1414 ASSERT(eie);
1415 if (!eie) {
1416 ret = -EUCLEAN;
1417 goto out;
1418 }
1419 while (eie->next)
1420 eie = eie->next;
1421 eie->next = ref->inode_list;
1422 }
1423 eie = NULL;
1424 /*
1425 * We have transferred the inode list ownership from
1426 * this ref to the ref we added to the 'refs' ulist.
1427 * So set this ref's inode list to NULL to avoid
1428 * use-after-free when our caller uses it or double
1429 * frees in case an error happens before we return.
1430 */
1431 ref->inode_list = NULL;
1432 }
1433 cond_resched();
1434 }
1435
1436 out:
1437 btrfs_free_path(path);
1438
1439 prelim_release(&preftrees.direct);
1440 prelim_release(&preftrees.indirect);
1441 prelim_release(&preftrees.indirect_missing_keys);
1442
1443 if (ret < 0)
1444 free_inode_elem_list(eie);
1445 return ret;
1446 }
1447
1448 /*
1449 * Finds all leafs with a reference to the specified combination of bytenr and
1450 * offset. key_list_head will point to a list of corresponding keys (caller must
1451 * free each list element). The leafs will be stored in the leafs ulist, which
1452 * must be freed with ulist_free.
1453 *
1454 * returns 0 on success, <0 on error
1455 */
btrfs_find_all_leafs(struct btrfs_trans_handle * trans,struct btrfs_fs_info * fs_info,u64 bytenr,u64 time_seq,struct ulist ** leafs,const u64 * extent_item_pos,bool ignore_offset)1456 int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
1457 struct btrfs_fs_info *fs_info, u64 bytenr,
1458 u64 time_seq, struct ulist **leafs,
1459 const u64 *extent_item_pos, bool ignore_offset)
1460 {
1461 int ret;
1462
1463 *leafs = ulist_alloc(GFP_NOFS);
1464 if (!*leafs)
1465 return -ENOMEM;
1466
1467 ret = find_parent_nodes(trans, fs_info, bytenr, time_seq,
1468 *leafs, NULL, extent_item_pos, NULL, ignore_offset);
1469 if (ret < 0 && ret != -ENOENT) {
1470 free_leaf_list(*leafs);
1471 return ret;
1472 }
1473
1474 return 0;
1475 }
1476
1477 /*
1478 * walk all backrefs for a given extent to find all roots that reference this
1479 * extent. Walking a backref means finding all extents that reference this
1480 * extent and in turn walk the backrefs of those, too. Naturally this is a
1481 * recursive process, but here it is implemented in an iterative fashion: We
1482 * find all referencing extents for the extent in question and put them on a
1483 * list. In turn, we find all referencing extents for those, further appending
1484 * to the list. The way we iterate the list allows adding more elements after
1485 * the current while iterating. The process stops when we reach the end of the
1486 * list. Found roots are added to the roots list.
1487 *
1488 * returns 0 on success, < 0 on error.
1489 */
btrfs_find_all_roots_safe(struct btrfs_trans_handle * trans,struct btrfs_fs_info * fs_info,u64 bytenr,u64 time_seq,struct ulist ** roots,bool ignore_offset)1490 static int btrfs_find_all_roots_safe(struct btrfs_trans_handle *trans,
1491 struct btrfs_fs_info *fs_info, u64 bytenr,
1492 u64 time_seq, struct ulist **roots,
1493 bool ignore_offset)
1494 {
1495 struct ulist *tmp;
1496 struct ulist_node *node = NULL;
1497 struct ulist_iterator uiter;
1498 int ret;
1499
1500 tmp = ulist_alloc(GFP_NOFS);
1501 if (!tmp)
1502 return -ENOMEM;
1503 *roots = ulist_alloc(GFP_NOFS);
1504 if (!*roots) {
1505 ulist_free(tmp);
1506 return -ENOMEM;
1507 }
1508
1509 ULIST_ITER_INIT(&uiter);
1510 while (1) {
1511 ret = find_parent_nodes(trans, fs_info, bytenr, time_seq,
1512 tmp, *roots, NULL, NULL, ignore_offset);
1513 if (ret < 0 && ret != -ENOENT) {
1514 ulist_free(tmp);
1515 ulist_free(*roots);
1516 *roots = NULL;
1517 return ret;
1518 }
1519 node = ulist_next(tmp, &uiter);
1520 if (!node)
1521 break;
1522 bytenr = node->val;
1523 cond_resched();
1524 }
1525
1526 ulist_free(tmp);
1527 return 0;
1528 }
1529
btrfs_find_all_roots(struct btrfs_trans_handle * trans,struct btrfs_fs_info * fs_info,u64 bytenr,u64 time_seq,struct ulist ** roots,bool skip_commit_root_sem)1530 int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
1531 struct btrfs_fs_info *fs_info, u64 bytenr,
1532 u64 time_seq, struct ulist **roots,
1533 bool skip_commit_root_sem)
1534 {
1535 int ret;
1536
1537 if (!trans && !skip_commit_root_sem)
1538 down_read(&fs_info->commit_root_sem);
1539 ret = btrfs_find_all_roots_safe(trans, fs_info, bytenr,
1540 time_seq, roots, false);
1541 if (!trans && !skip_commit_root_sem)
1542 up_read(&fs_info->commit_root_sem);
1543 return ret;
1544 }
1545
1546 /*
1547 * The caller has joined a transaction or is holding a read lock on the
1548 * fs_info->commit_root_sem semaphore, so no need to worry about the root's last
1549 * snapshot field changing while updating or checking the cache.
1550 */
lookup_backref_shared_cache(struct btrfs_backref_shared_cache * cache,struct btrfs_root * root,u64 bytenr,int level,bool * is_shared)1551 static bool lookup_backref_shared_cache(struct btrfs_backref_shared_cache *cache,
1552 struct btrfs_root *root,
1553 u64 bytenr, int level, bool *is_shared)
1554 {
1555 struct btrfs_backref_shared_cache_entry *entry;
1556
1557 if (!cache->use_cache)
1558 return false;
1559
1560 if (WARN_ON_ONCE(level >= BTRFS_MAX_LEVEL))
1561 return false;
1562
1563 /*
1564 * Level -1 is used for the data extent, which is not reliable to cache
1565 * because its reference count can increase or decrease without us
1566 * realizing. We cache results only for extent buffers that lead from
1567 * the root node down to the leaf with the file extent item.
1568 */
1569 ASSERT(level >= 0);
1570
1571 entry = &cache->entries[level];
1572
1573 /* Unused cache entry or being used for some other extent buffer. */
1574 if (entry->bytenr != bytenr)
1575 return false;
1576
1577 /*
1578 * We cached a false result, but the last snapshot generation of the
1579 * root changed, so we now have a snapshot. Don't trust the result.
1580 */
1581 if (!entry->is_shared &&
1582 entry->gen != btrfs_root_last_snapshot(&root->root_item))
1583 return false;
1584
1585 /*
1586 * If we cached a true result and the last generation used for dropping
1587 * a root changed, we can not trust the result, because the dropped root
1588 * could be a snapshot sharing this extent buffer.
1589 */
1590 if (entry->is_shared &&
1591 entry->gen != btrfs_get_last_root_drop_gen(root->fs_info))
1592 return false;
1593
1594 *is_shared = entry->is_shared;
1595 /*
1596 * If the node at this level is shared, than all nodes below are also
1597 * shared. Currently some of the nodes below may be marked as not shared
1598 * because we have just switched from one leaf to another, and switched
1599 * also other nodes above the leaf and below the current level, so mark
1600 * them as shared.
1601 */
1602 if (*is_shared) {
1603 for (int i = 0; i < level; i++) {
1604 cache->entries[i].is_shared = true;
1605 cache->entries[i].gen = entry->gen;
1606 }
1607 }
1608
1609 return true;
1610 }
1611
1612 /*
1613 * The caller has joined a transaction or is holding a read lock on the
1614 * fs_info->commit_root_sem semaphore, so no need to worry about the root's last
1615 * snapshot field changing while updating or checking the cache.
1616 */
store_backref_shared_cache(struct btrfs_backref_shared_cache * cache,struct btrfs_root * root,u64 bytenr,int level,bool is_shared)1617 static void store_backref_shared_cache(struct btrfs_backref_shared_cache *cache,
1618 struct btrfs_root *root,
1619 u64 bytenr, int level, bool is_shared)
1620 {
1621 struct btrfs_backref_shared_cache_entry *entry;
1622 u64 gen;
1623
1624 if (!cache->use_cache)
1625 return;
1626
1627 if (WARN_ON_ONCE(level >= BTRFS_MAX_LEVEL))
1628 return;
1629
1630 /*
1631 * Level -1 is used for the data extent, which is not reliable to cache
1632 * because its reference count can increase or decrease without us
1633 * realizing. We cache results only for extent buffers that lead from
1634 * the root node down to the leaf with the file extent item.
1635 */
1636 ASSERT(level >= 0);
1637
1638 if (is_shared)
1639 gen = btrfs_get_last_root_drop_gen(root->fs_info);
1640 else
1641 gen = btrfs_root_last_snapshot(&root->root_item);
1642
1643 entry = &cache->entries[level];
1644 entry->bytenr = bytenr;
1645 entry->is_shared = is_shared;
1646 entry->gen = gen;
1647
1648 /*
1649 * If we found an extent buffer is shared, set the cache result for all
1650 * extent buffers below it to true. As nodes in the path are COWed,
1651 * their sharedness is moved to their children, and if a leaf is COWed,
1652 * then the sharedness of a data extent becomes direct, the refcount of
1653 * data extent is increased in the extent item at the extent tree.
1654 */
1655 if (is_shared) {
1656 for (int i = 0; i < level; i++) {
1657 entry = &cache->entries[i];
1658 entry->is_shared = is_shared;
1659 entry->gen = gen;
1660 }
1661 }
1662 }
1663
1664 /*
1665 * Check if a data extent is shared or not.
1666 *
1667 * @root: The root the inode belongs to.
1668 * @inum: Number of the inode whose extent we are checking.
1669 * @bytenr: Logical bytenr of the extent we are checking.
1670 * @extent_gen: Generation of the extent (file extent item) or 0 if it is
1671 * not known.
1672 * @roots: List of roots this extent is shared among.
1673 * @tmp: Temporary list used for iteration.
1674 * @cache: A backref lookup result cache.
1675 *
1676 * btrfs_is_data_extent_shared uses the backref walking code but will short
1677 * circuit as soon as it finds a root or inode that doesn't match the
1678 * one passed in. This provides a significant performance benefit for
1679 * callers (such as fiemap) which want to know whether the extent is
1680 * shared but do not need a ref count.
1681 *
1682 * This attempts to attach to the running transaction in order to account for
1683 * delayed refs, but continues on even when no running transaction exists.
1684 *
1685 * Return: 0 if extent is not shared, 1 if it is shared, < 0 on error.
1686 */
btrfs_is_data_extent_shared(struct btrfs_root * root,u64 inum,u64 bytenr,u64 extent_gen,struct ulist * roots,struct ulist * tmp,struct btrfs_backref_shared_cache * cache)1687 int btrfs_is_data_extent_shared(struct btrfs_root *root, u64 inum, u64 bytenr,
1688 u64 extent_gen,
1689 struct ulist *roots, struct ulist *tmp,
1690 struct btrfs_backref_shared_cache *cache)
1691 {
1692 struct btrfs_fs_info *fs_info = root->fs_info;
1693 struct btrfs_trans_handle *trans;
1694 struct ulist_iterator uiter;
1695 struct ulist_node *node;
1696 struct btrfs_seq_list elem = BTRFS_SEQ_LIST_INIT(elem);
1697 int ret = 0;
1698 struct share_check shared = {
1699 .root_objectid = root->root_key.objectid,
1700 .inum = inum,
1701 .share_count = 0,
1702 .have_delayed_delete_refs = false,
1703 };
1704 int level;
1705
1706 ulist_init(roots);
1707 ulist_init(tmp);
1708
1709 trans = btrfs_join_transaction_nostart(root);
1710 if (IS_ERR(trans)) {
1711 if (PTR_ERR(trans) != -ENOENT && PTR_ERR(trans) != -EROFS) {
1712 ret = PTR_ERR(trans);
1713 goto out;
1714 }
1715 trans = NULL;
1716 down_read(&fs_info->commit_root_sem);
1717 } else {
1718 btrfs_get_tree_mod_seq(fs_info, &elem);
1719 }
1720
1721 /* -1 means we are in the bytenr of the data extent. */
1722 level = -1;
1723 ULIST_ITER_INIT(&uiter);
1724 cache->use_cache = true;
1725 while (1) {
1726 bool is_shared;
1727 bool cached;
1728
1729 ret = find_parent_nodes(trans, fs_info, bytenr, elem.seq, tmp,
1730 roots, NULL, &shared, false);
1731 if (ret == BACKREF_FOUND_SHARED) {
1732 /* this is the only condition under which we return 1 */
1733 ret = 1;
1734 if (level >= 0)
1735 store_backref_shared_cache(cache, root, bytenr,
1736 level, true);
1737 break;
1738 }
1739 if (ret < 0 && ret != -ENOENT)
1740 break;
1741 ret = 0;
1742 /*
1743 * If our data extent is not shared through reflinks and it was
1744 * created in a generation after the last one used to create a
1745 * snapshot of the inode's root, then it can not be shared
1746 * indirectly through subtrees, as that can only happen with
1747 * snapshots. In this case bail out, no need to check for the
1748 * sharedness of extent buffers.
1749 */
1750 if (level == -1 &&
1751 extent_gen > btrfs_root_last_snapshot(&root->root_item))
1752 break;
1753
1754 /*
1755 * If our data extent was not directly shared (without multiple
1756 * reference items), than it might have a single reference item
1757 * with a count > 1 for the same offset, which means there are 2
1758 * (or more) file extent items that point to the data extent -
1759 * this happens when a file extent item needs to be split and
1760 * then one item gets moved to another leaf due to a b+tree leaf
1761 * split when inserting some item. In this case the file extent
1762 * items may be located in different leaves and therefore some
1763 * of the leaves may be referenced through shared subtrees while
1764 * others are not. Since our extent buffer cache only works for
1765 * a single path (by far the most common case and simpler to
1766 * deal with), we can not use it if we have multiple leaves
1767 * (which implies multiple paths).
1768 */
1769 if (level == -1 && tmp->nnodes > 1)
1770 cache->use_cache = false;
1771
1772 if (level >= 0)
1773 store_backref_shared_cache(cache, root, bytenr,
1774 level, false);
1775 node = ulist_next(tmp, &uiter);
1776 if (!node)
1777 break;
1778 bytenr = node->val;
1779 level++;
1780 cached = lookup_backref_shared_cache(cache, root, bytenr, level,
1781 &is_shared);
1782 if (cached) {
1783 ret = (is_shared ? 1 : 0);
1784 break;
1785 }
1786 shared.share_count = 0;
1787 shared.have_delayed_delete_refs = false;
1788 cond_resched();
1789 }
1790
1791 if (trans) {
1792 btrfs_put_tree_mod_seq(fs_info, &elem);
1793 btrfs_end_transaction(trans);
1794 } else {
1795 up_read(&fs_info->commit_root_sem);
1796 }
1797 out:
1798 ulist_release(roots);
1799 ulist_release(tmp);
1800 return ret;
1801 }
1802
btrfs_find_one_extref(struct btrfs_root * root,u64 inode_objectid,u64 start_off,struct btrfs_path * path,struct btrfs_inode_extref ** ret_extref,u64 * found_off)1803 int btrfs_find_one_extref(struct btrfs_root *root, u64 inode_objectid,
1804 u64 start_off, struct btrfs_path *path,
1805 struct btrfs_inode_extref **ret_extref,
1806 u64 *found_off)
1807 {
1808 int ret, slot;
1809 struct btrfs_key key;
1810 struct btrfs_key found_key;
1811 struct btrfs_inode_extref *extref;
1812 const struct extent_buffer *leaf;
1813 unsigned long ptr;
1814
1815 key.objectid = inode_objectid;
1816 key.type = BTRFS_INODE_EXTREF_KEY;
1817 key.offset = start_off;
1818
1819 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1820 if (ret < 0)
1821 return ret;
1822
1823 while (1) {
1824 leaf = path->nodes[0];
1825 slot = path->slots[0];
1826 if (slot >= btrfs_header_nritems(leaf)) {
1827 /*
1828 * If the item at offset is not found,
1829 * btrfs_search_slot will point us to the slot
1830 * where it should be inserted. In our case
1831 * that will be the slot directly before the
1832 * next INODE_REF_KEY_V2 item. In the case
1833 * that we're pointing to the last slot in a
1834 * leaf, we must move one leaf over.
1835 */
1836 ret = btrfs_next_leaf(root, path);
1837 if (ret) {
1838 if (ret >= 1)
1839 ret = -ENOENT;
1840 break;
1841 }
1842 continue;
1843 }
1844
1845 btrfs_item_key_to_cpu(leaf, &found_key, slot);
1846
1847 /*
1848 * Check that we're still looking at an extended ref key for
1849 * this particular objectid. If we have different
1850 * objectid or type then there are no more to be found
1851 * in the tree and we can exit.
1852 */
1853 ret = -ENOENT;
1854 if (found_key.objectid != inode_objectid)
1855 break;
1856 if (found_key.type != BTRFS_INODE_EXTREF_KEY)
1857 break;
1858
1859 ret = 0;
1860 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1861 extref = (struct btrfs_inode_extref *)ptr;
1862 *ret_extref = extref;
1863 if (found_off)
1864 *found_off = found_key.offset;
1865 break;
1866 }
1867
1868 return ret;
1869 }
1870
1871 /*
1872 * this iterates to turn a name (from iref/extref) into a full filesystem path.
1873 * Elements of the path are separated by '/' and the path is guaranteed to be
1874 * 0-terminated. the path is only given within the current file system.
1875 * Therefore, it never starts with a '/'. the caller is responsible to provide
1876 * "size" bytes in "dest". the dest buffer will be filled backwards. finally,
1877 * the start point of the resulting string is returned. this pointer is within
1878 * dest, normally.
1879 * in case the path buffer would overflow, the pointer is decremented further
1880 * as if output was written to the buffer, though no more output is actually
1881 * generated. that way, the caller can determine how much space would be
1882 * required for the path to fit into the buffer. in that case, the returned
1883 * value will be smaller than dest. callers must check this!
1884 */
btrfs_ref_to_path(struct btrfs_root * fs_root,struct btrfs_path * path,u32 name_len,unsigned long name_off,struct extent_buffer * eb_in,u64 parent,char * dest,u32 size)1885 char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
1886 u32 name_len, unsigned long name_off,
1887 struct extent_buffer *eb_in, u64 parent,
1888 char *dest, u32 size)
1889 {
1890 int slot;
1891 u64 next_inum;
1892 int ret;
1893 s64 bytes_left = ((s64)size) - 1;
1894 struct extent_buffer *eb = eb_in;
1895 struct btrfs_key found_key;
1896 struct btrfs_inode_ref *iref;
1897
1898 if (bytes_left >= 0)
1899 dest[bytes_left] = '\0';
1900
1901 while (1) {
1902 bytes_left -= name_len;
1903 if (bytes_left >= 0)
1904 read_extent_buffer(eb, dest + bytes_left,
1905 name_off, name_len);
1906 if (eb != eb_in) {
1907 if (!path->skip_locking)
1908 btrfs_tree_read_unlock(eb);
1909 free_extent_buffer(eb);
1910 }
1911 ret = btrfs_find_item(fs_root, path, parent, 0,
1912 BTRFS_INODE_REF_KEY, &found_key);
1913 if (ret > 0)
1914 ret = -ENOENT;
1915 if (ret)
1916 break;
1917
1918 next_inum = found_key.offset;
1919
1920 /* regular exit ahead */
1921 if (parent == next_inum)
1922 break;
1923
1924 slot = path->slots[0];
1925 eb = path->nodes[0];
1926 /* make sure we can use eb after releasing the path */
1927 if (eb != eb_in) {
1928 path->nodes[0] = NULL;
1929 path->locks[0] = 0;
1930 }
1931 btrfs_release_path(path);
1932 iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
1933
1934 name_len = btrfs_inode_ref_name_len(eb, iref);
1935 name_off = (unsigned long)(iref + 1);
1936
1937 parent = next_inum;
1938 --bytes_left;
1939 if (bytes_left >= 0)
1940 dest[bytes_left] = '/';
1941 }
1942
1943 btrfs_release_path(path);
1944
1945 if (ret)
1946 return ERR_PTR(ret);
1947
1948 return dest + bytes_left;
1949 }
1950
1951 /*
1952 * this makes the path point to (logical EXTENT_ITEM *)
1953 * returns BTRFS_EXTENT_FLAG_DATA for data, BTRFS_EXTENT_FLAG_TREE_BLOCK for
1954 * tree blocks and <0 on error.
1955 */
extent_from_logical(struct btrfs_fs_info * fs_info,u64 logical,struct btrfs_path * path,struct btrfs_key * found_key,u64 * flags_ret)1956 int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
1957 struct btrfs_path *path, struct btrfs_key *found_key,
1958 u64 *flags_ret)
1959 {
1960 struct btrfs_root *extent_root = btrfs_extent_root(fs_info, logical);
1961 int ret;
1962 u64 flags;
1963 u64 size = 0;
1964 u32 item_size;
1965 const struct extent_buffer *eb;
1966 struct btrfs_extent_item *ei;
1967 struct btrfs_key key;
1968
1969 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
1970 key.type = BTRFS_METADATA_ITEM_KEY;
1971 else
1972 key.type = BTRFS_EXTENT_ITEM_KEY;
1973 key.objectid = logical;
1974 key.offset = (u64)-1;
1975
1976 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
1977 if (ret < 0)
1978 return ret;
1979
1980 ret = btrfs_previous_extent_item(extent_root, path, 0);
1981 if (ret) {
1982 if (ret > 0)
1983 ret = -ENOENT;
1984 return ret;
1985 }
1986 btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]);
1987 if (found_key->type == BTRFS_METADATA_ITEM_KEY)
1988 size = fs_info->nodesize;
1989 else if (found_key->type == BTRFS_EXTENT_ITEM_KEY)
1990 size = found_key->offset;
1991
1992 if (found_key->objectid > logical ||
1993 found_key->objectid + size <= logical) {
1994 btrfs_debug(fs_info,
1995 "logical %llu is not within any extent", logical);
1996 return -ENOENT;
1997 }
1998
1999 eb = path->nodes[0];
2000 item_size = btrfs_item_size(eb, path->slots[0]);
2001 BUG_ON(item_size < sizeof(*ei));
2002
2003 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
2004 flags = btrfs_extent_flags(eb, ei);
2005
2006 btrfs_debug(fs_info,
2007 "logical %llu is at position %llu within the extent (%llu EXTENT_ITEM %llu) flags %#llx size %u",
2008 logical, logical - found_key->objectid, found_key->objectid,
2009 found_key->offset, flags, item_size);
2010
2011 WARN_ON(!flags_ret);
2012 if (flags_ret) {
2013 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
2014 *flags_ret = BTRFS_EXTENT_FLAG_TREE_BLOCK;
2015 else if (flags & BTRFS_EXTENT_FLAG_DATA)
2016 *flags_ret = BTRFS_EXTENT_FLAG_DATA;
2017 else
2018 BUG();
2019 return 0;
2020 }
2021
2022 return -EIO;
2023 }
2024
2025 /*
2026 * helper function to iterate extent inline refs. ptr must point to a 0 value
2027 * for the first call and may be modified. it is used to track state.
2028 * if more refs exist, 0 is returned and the next call to
2029 * get_extent_inline_ref must pass the modified ptr parameter to get the
2030 * next ref. after the last ref was processed, 1 is returned.
2031 * returns <0 on error
2032 */
get_extent_inline_ref(unsigned long * ptr,const struct extent_buffer * eb,const struct btrfs_key * key,const struct btrfs_extent_item * ei,u32 item_size,struct btrfs_extent_inline_ref ** out_eiref,int * out_type)2033 static int get_extent_inline_ref(unsigned long *ptr,
2034 const struct extent_buffer *eb,
2035 const struct btrfs_key *key,
2036 const struct btrfs_extent_item *ei,
2037 u32 item_size,
2038 struct btrfs_extent_inline_ref **out_eiref,
2039 int *out_type)
2040 {
2041 unsigned long end;
2042 u64 flags;
2043 struct btrfs_tree_block_info *info;
2044
2045 if (!*ptr) {
2046 /* first call */
2047 flags = btrfs_extent_flags(eb, ei);
2048 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
2049 if (key->type == BTRFS_METADATA_ITEM_KEY) {
2050 /* a skinny metadata extent */
2051 *out_eiref =
2052 (struct btrfs_extent_inline_ref *)(ei + 1);
2053 } else {
2054 WARN_ON(key->type != BTRFS_EXTENT_ITEM_KEY);
2055 info = (struct btrfs_tree_block_info *)(ei + 1);
2056 *out_eiref =
2057 (struct btrfs_extent_inline_ref *)(info + 1);
2058 }
2059 } else {
2060 *out_eiref = (struct btrfs_extent_inline_ref *)(ei + 1);
2061 }
2062 *ptr = (unsigned long)*out_eiref;
2063 if ((unsigned long)(*ptr) >= (unsigned long)ei + item_size)
2064 return -ENOENT;
2065 }
2066
2067 end = (unsigned long)ei + item_size;
2068 *out_eiref = (struct btrfs_extent_inline_ref *)(*ptr);
2069 *out_type = btrfs_get_extent_inline_ref_type(eb, *out_eiref,
2070 BTRFS_REF_TYPE_ANY);
2071 if (*out_type == BTRFS_REF_TYPE_INVALID)
2072 return -EUCLEAN;
2073
2074 *ptr += btrfs_extent_inline_ref_size(*out_type);
2075 WARN_ON(*ptr > end);
2076 if (*ptr == end)
2077 return 1; /* last */
2078
2079 return 0;
2080 }
2081
2082 /*
2083 * reads the tree block backref for an extent. tree level and root are returned
2084 * through out_level and out_root. ptr must point to a 0 value for the first
2085 * call and may be modified (see get_extent_inline_ref comment).
2086 * returns 0 if data was provided, 1 if there was no more data to provide or
2087 * <0 on error.
2088 */
tree_backref_for_extent(unsigned long * ptr,struct extent_buffer * eb,struct btrfs_key * key,struct btrfs_extent_item * ei,u32 item_size,u64 * out_root,u8 * out_level)2089 int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
2090 struct btrfs_key *key, struct btrfs_extent_item *ei,
2091 u32 item_size, u64 *out_root, u8 *out_level)
2092 {
2093 int ret;
2094 int type;
2095 struct btrfs_extent_inline_ref *eiref;
2096
2097 if (*ptr == (unsigned long)-1)
2098 return 1;
2099
2100 while (1) {
2101 ret = get_extent_inline_ref(ptr, eb, key, ei, item_size,
2102 &eiref, &type);
2103 if (ret < 0)
2104 return ret;
2105
2106 if (type == BTRFS_TREE_BLOCK_REF_KEY ||
2107 type == BTRFS_SHARED_BLOCK_REF_KEY)
2108 break;
2109
2110 if (ret == 1)
2111 return 1;
2112 }
2113
2114 /* we can treat both ref types equally here */
2115 *out_root = btrfs_extent_inline_ref_offset(eb, eiref);
2116
2117 if (key->type == BTRFS_EXTENT_ITEM_KEY) {
2118 struct btrfs_tree_block_info *info;
2119
2120 info = (struct btrfs_tree_block_info *)(ei + 1);
2121 *out_level = btrfs_tree_block_level(eb, info);
2122 } else {
2123 ASSERT(key->type == BTRFS_METADATA_ITEM_KEY);
2124 *out_level = (u8)key->offset;
2125 }
2126
2127 if (ret == 1)
2128 *ptr = (unsigned long)-1;
2129
2130 return 0;
2131 }
2132
iterate_leaf_refs(struct btrfs_fs_info * fs_info,struct extent_inode_elem * inode_list,u64 root,u64 extent_item_objectid,iterate_extent_inodes_t * iterate,void * ctx)2133 static int iterate_leaf_refs(struct btrfs_fs_info *fs_info,
2134 struct extent_inode_elem *inode_list,
2135 u64 root, u64 extent_item_objectid,
2136 iterate_extent_inodes_t *iterate, void *ctx)
2137 {
2138 struct extent_inode_elem *eie;
2139 int ret = 0;
2140
2141 for (eie = inode_list; eie; eie = eie->next) {
2142 btrfs_debug(fs_info,
2143 "ref for %llu resolved, key (%llu EXTEND_DATA %llu), root %llu",
2144 extent_item_objectid, eie->inum,
2145 eie->offset, root);
2146 ret = iterate(eie->inum, eie->offset, root, ctx);
2147 if (ret) {
2148 btrfs_debug(fs_info,
2149 "stopping iteration for %llu due to ret=%d",
2150 extent_item_objectid, ret);
2151 break;
2152 }
2153 }
2154
2155 return ret;
2156 }
2157
2158 /*
2159 * calls iterate() for every inode that references the extent identified by
2160 * the given parameters.
2161 * when the iterator function returns a non-zero value, iteration stops.
2162 */
iterate_extent_inodes(struct btrfs_fs_info * fs_info,u64 extent_item_objectid,u64 extent_item_pos,int search_commit_root,iterate_extent_inodes_t * iterate,void * ctx,bool ignore_offset)2163 int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
2164 u64 extent_item_objectid, u64 extent_item_pos,
2165 int search_commit_root,
2166 iterate_extent_inodes_t *iterate, void *ctx,
2167 bool ignore_offset)
2168 {
2169 int ret;
2170 struct btrfs_trans_handle *trans = NULL;
2171 struct ulist *refs = NULL;
2172 struct ulist *roots = NULL;
2173 struct ulist_node *ref_node = NULL;
2174 struct ulist_node *root_node = NULL;
2175 struct btrfs_seq_list seq_elem = BTRFS_SEQ_LIST_INIT(seq_elem);
2176 struct ulist_iterator ref_uiter;
2177 struct ulist_iterator root_uiter;
2178
2179 btrfs_debug(fs_info, "resolving all inodes for extent %llu",
2180 extent_item_objectid);
2181
2182 if (!search_commit_root) {
2183 trans = btrfs_attach_transaction(fs_info->tree_root);
2184 if (IS_ERR(trans)) {
2185 if (PTR_ERR(trans) != -ENOENT &&
2186 PTR_ERR(trans) != -EROFS)
2187 return PTR_ERR(trans);
2188 trans = NULL;
2189 }
2190 }
2191
2192 if (trans)
2193 btrfs_get_tree_mod_seq(fs_info, &seq_elem);
2194 else
2195 down_read(&fs_info->commit_root_sem);
2196
2197 ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid,
2198 seq_elem.seq, &refs,
2199 &extent_item_pos, ignore_offset);
2200 if (ret)
2201 goto out;
2202
2203 ULIST_ITER_INIT(&ref_uiter);
2204 while (!ret && (ref_node = ulist_next(refs, &ref_uiter))) {
2205 ret = btrfs_find_all_roots_safe(trans, fs_info, ref_node->val,
2206 seq_elem.seq, &roots,
2207 ignore_offset);
2208 if (ret)
2209 break;
2210 ULIST_ITER_INIT(&root_uiter);
2211 while (!ret && (root_node = ulist_next(roots, &root_uiter))) {
2212 btrfs_debug(fs_info,
2213 "root %llu references leaf %llu, data list %#llx",
2214 root_node->val, ref_node->val,
2215 ref_node->aux);
2216 ret = iterate_leaf_refs(fs_info,
2217 (struct extent_inode_elem *)
2218 (uintptr_t)ref_node->aux,
2219 root_node->val,
2220 extent_item_objectid,
2221 iterate, ctx);
2222 }
2223 ulist_free(roots);
2224 }
2225
2226 free_leaf_list(refs);
2227 out:
2228 if (trans) {
2229 btrfs_put_tree_mod_seq(fs_info, &seq_elem);
2230 btrfs_end_transaction(trans);
2231 } else {
2232 up_read(&fs_info->commit_root_sem);
2233 }
2234
2235 return ret;
2236 }
2237
build_ino_list(u64 inum,u64 offset,u64 root,void * ctx)2238 static int build_ino_list(u64 inum, u64 offset, u64 root, void *ctx)
2239 {
2240 struct btrfs_data_container *inodes = ctx;
2241 const size_t c = 3 * sizeof(u64);
2242
2243 if (inodes->bytes_left >= c) {
2244 inodes->bytes_left -= c;
2245 inodes->val[inodes->elem_cnt] = inum;
2246 inodes->val[inodes->elem_cnt + 1] = offset;
2247 inodes->val[inodes->elem_cnt + 2] = root;
2248 inodes->elem_cnt += 3;
2249 } else {
2250 inodes->bytes_missing += c - inodes->bytes_left;
2251 inodes->bytes_left = 0;
2252 inodes->elem_missed += 3;
2253 }
2254
2255 return 0;
2256 }
2257
iterate_inodes_from_logical(u64 logical,struct btrfs_fs_info * fs_info,struct btrfs_path * path,void * ctx,bool ignore_offset)2258 int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
2259 struct btrfs_path *path,
2260 void *ctx, bool ignore_offset)
2261 {
2262 int ret;
2263 u64 extent_item_pos;
2264 u64 flags = 0;
2265 struct btrfs_key found_key;
2266 int search_commit_root = path->search_commit_root;
2267
2268 ret = extent_from_logical(fs_info, logical, path, &found_key, &flags);
2269 btrfs_release_path(path);
2270 if (ret < 0)
2271 return ret;
2272 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
2273 return -EINVAL;
2274
2275 extent_item_pos = logical - found_key.objectid;
2276 ret = iterate_extent_inodes(fs_info, found_key.objectid,
2277 extent_item_pos, search_commit_root,
2278 build_ino_list, ctx, ignore_offset);
2279
2280 return ret;
2281 }
2282
2283 static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off,
2284 struct extent_buffer *eb, struct inode_fs_paths *ipath);
2285
iterate_inode_refs(u64 inum,struct inode_fs_paths * ipath)2286 static int iterate_inode_refs(u64 inum, struct inode_fs_paths *ipath)
2287 {
2288 int ret = 0;
2289 int slot;
2290 u32 cur;
2291 u32 len;
2292 u32 name_len;
2293 u64 parent = 0;
2294 int found = 0;
2295 struct btrfs_root *fs_root = ipath->fs_root;
2296 struct btrfs_path *path = ipath->btrfs_path;
2297 struct extent_buffer *eb;
2298 struct btrfs_inode_ref *iref;
2299 struct btrfs_key found_key;
2300
2301 while (!ret) {
2302 ret = btrfs_find_item(fs_root, path, inum,
2303 parent ? parent + 1 : 0, BTRFS_INODE_REF_KEY,
2304 &found_key);
2305
2306 if (ret < 0)
2307 break;
2308 if (ret) {
2309 ret = found ? 0 : -ENOENT;
2310 break;
2311 }
2312 ++found;
2313
2314 parent = found_key.offset;
2315 slot = path->slots[0];
2316 eb = btrfs_clone_extent_buffer(path->nodes[0]);
2317 if (!eb) {
2318 ret = -ENOMEM;
2319 break;
2320 }
2321 btrfs_release_path(path);
2322
2323 iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
2324
2325 for (cur = 0; cur < btrfs_item_size(eb, slot); cur += len) {
2326 name_len = btrfs_inode_ref_name_len(eb, iref);
2327 /* path must be released before calling iterate()! */
2328 btrfs_debug(fs_root->fs_info,
2329 "following ref at offset %u for inode %llu in tree %llu",
2330 cur, found_key.objectid,
2331 fs_root->root_key.objectid);
2332 ret = inode_to_path(parent, name_len,
2333 (unsigned long)(iref + 1), eb, ipath);
2334 if (ret)
2335 break;
2336 len = sizeof(*iref) + name_len;
2337 iref = (struct btrfs_inode_ref *)((char *)iref + len);
2338 }
2339 free_extent_buffer(eb);
2340 }
2341
2342 btrfs_release_path(path);
2343
2344 return ret;
2345 }
2346
iterate_inode_extrefs(u64 inum,struct inode_fs_paths * ipath)2347 static int iterate_inode_extrefs(u64 inum, struct inode_fs_paths *ipath)
2348 {
2349 int ret;
2350 int slot;
2351 u64 offset = 0;
2352 u64 parent;
2353 int found = 0;
2354 struct btrfs_root *fs_root = ipath->fs_root;
2355 struct btrfs_path *path = ipath->btrfs_path;
2356 struct extent_buffer *eb;
2357 struct btrfs_inode_extref *extref;
2358 u32 item_size;
2359 u32 cur_offset;
2360 unsigned long ptr;
2361
2362 while (1) {
2363 ret = btrfs_find_one_extref(fs_root, inum, offset, path, &extref,
2364 &offset);
2365 if (ret < 0)
2366 break;
2367 if (ret) {
2368 ret = found ? 0 : -ENOENT;
2369 break;
2370 }
2371 ++found;
2372
2373 slot = path->slots[0];
2374 eb = btrfs_clone_extent_buffer(path->nodes[0]);
2375 if (!eb) {
2376 ret = -ENOMEM;
2377 break;
2378 }
2379 btrfs_release_path(path);
2380
2381 item_size = btrfs_item_size(eb, slot);
2382 ptr = btrfs_item_ptr_offset(eb, slot);
2383 cur_offset = 0;
2384
2385 while (cur_offset < item_size) {
2386 u32 name_len;
2387
2388 extref = (struct btrfs_inode_extref *)(ptr + cur_offset);
2389 parent = btrfs_inode_extref_parent(eb, extref);
2390 name_len = btrfs_inode_extref_name_len(eb, extref);
2391 ret = inode_to_path(parent, name_len,
2392 (unsigned long)&extref->name, eb, ipath);
2393 if (ret)
2394 break;
2395
2396 cur_offset += btrfs_inode_extref_name_len(eb, extref);
2397 cur_offset += sizeof(*extref);
2398 }
2399 free_extent_buffer(eb);
2400
2401 offset++;
2402 }
2403
2404 btrfs_release_path(path);
2405
2406 return ret;
2407 }
2408
2409 /*
2410 * returns 0 if the path could be dumped (probably truncated)
2411 * returns <0 in case of an error
2412 */
inode_to_path(u64 inum,u32 name_len,unsigned long name_off,struct extent_buffer * eb,struct inode_fs_paths * ipath)2413 static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off,
2414 struct extent_buffer *eb, struct inode_fs_paths *ipath)
2415 {
2416 char *fspath;
2417 char *fspath_min;
2418 int i = ipath->fspath->elem_cnt;
2419 const int s_ptr = sizeof(char *);
2420 u32 bytes_left;
2421
2422 bytes_left = ipath->fspath->bytes_left > s_ptr ?
2423 ipath->fspath->bytes_left - s_ptr : 0;
2424
2425 fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr;
2426 fspath = btrfs_ref_to_path(ipath->fs_root, ipath->btrfs_path, name_len,
2427 name_off, eb, inum, fspath_min, bytes_left);
2428 if (IS_ERR(fspath))
2429 return PTR_ERR(fspath);
2430
2431 if (fspath > fspath_min) {
2432 ipath->fspath->val[i] = (u64)(unsigned long)fspath;
2433 ++ipath->fspath->elem_cnt;
2434 ipath->fspath->bytes_left = fspath - fspath_min;
2435 } else {
2436 ++ipath->fspath->elem_missed;
2437 ipath->fspath->bytes_missing += fspath_min - fspath;
2438 ipath->fspath->bytes_left = 0;
2439 }
2440
2441 return 0;
2442 }
2443
2444 /*
2445 * this dumps all file system paths to the inode into the ipath struct, provided
2446 * is has been created large enough. each path is zero-terminated and accessed
2447 * from ipath->fspath->val[i].
2448 * when it returns, there are ipath->fspath->elem_cnt number of paths available
2449 * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the
2450 * number of missed paths is recorded in ipath->fspath->elem_missed, otherwise,
2451 * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would
2452 * have been needed to return all paths.
2453 */
paths_from_inode(u64 inum,struct inode_fs_paths * ipath)2454 int paths_from_inode(u64 inum, struct inode_fs_paths *ipath)
2455 {
2456 int ret;
2457 int found_refs = 0;
2458
2459 ret = iterate_inode_refs(inum, ipath);
2460 if (!ret)
2461 ++found_refs;
2462 else if (ret != -ENOENT)
2463 return ret;
2464
2465 ret = iterate_inode_extrefs(inum, ipath);
2466 if (ret == -ENOENT && found_refs)
2467 return 0;
2468
2469 return ret;
2470 }
2471
init_data_container(u32 total_bytes)2472 struct btrfs_data_container *init_data_container(u32 total_bytes)
2473 {
2474 struct btrfs_data_container *data;
2475 size_t alloc_bytes;
2476
2477 alloc_bytes = max_t(size_t, total_bytes, sizeof(*data));
2478 data = kvmalloc(alloc_bytes, GFP_KERNEL);
2479 if (!data)
2480 return ERR_PTR(-ENOMEM);
2481
2482 if (total_bytes >= sizeof(*data)) {
2483 data->bytes_left = total_bytes - sizeof(*data);
2484 data->bytes_missing = 0;
2485 } else {
2486 data->bytes_missing = sizeof(*data) - total_bytes;
2487 data->bytes_left = 0;
2488 }
2489
2490 data->elem_cnt = 0;
2491 data->elem_missed = 0;
2492
2493 return data;
2494 }
2495
2496 /*
2497 * allocates space to return multiple file system paths for an inode.
2498 * total_bytes to allocate are passed, note that space usable for actual path
2499 * information will be total_bytes - sizeof(struct inode_fs_paths).
2500 * the returned pointer must be freed with free_ipath() in the end.
2501 */
init_ipath(s32 total_bytes,struct btrfs_root * fs_root,struct btrfs_path * path)2502 struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
2503 struct btrfs_path *path)
2504 {
2505 struct inode_fs_paths *ifp;
2506 struct btrfs_data_container *fspath;
2507
2508 fspath = init_data_container(total_bytes);
2509 if (IS_ERR(fspath))
2510 return ERR_CAST(fspath);
2511
2512 ifp = kmalloc(sizeof(*ifp), GFP_KERNEL);
2513 if (!ifp) {
2514 kvfree(fspath);
2515 return ERR_PTR(-ENOMEM);
2516 }
2517
2518 ifp->btrfs_path = path;
2519 ifp->fspath = fspath;
2520 ifp->fs_root = fs_root;
2521
2522 return ifp;
2523 }
2524
free_ipath(struct inode_fs_paths * ipath)2525 void free_ipath(struct inode_fs_paths *ipath)
2526 {
2527 if (!ipath)
2528 return;
2529 kvfree(ipath->fspath);
2530 kfree(ipath);
2531 }
2532
btrfs_backref_iter_alloc(struct btrfs_fs_info * fs_info,gfp_t gfp_flag)2533 struct btrfs_backref_iter *btrfs_backref_iter_alloc(
2534 struct btrfs_fs_info *fs_info, gfp_t gfp_flag)
2535 {
2536 struct btrfs_backref_iter *ret;
2537
2538 ret = kzalloc(sizeof(*ret), gfp_flag);
2539 if (!ret)
2540 return NULL;
2541
2542 ret->path = btrfs_alloc_path();
2543 if (!ret->path) {
2544 kfree(ret);
2545 return NULL;
2546 }
2547
2548 /* Current backref iterator only supports iteration in commit root */
2549 ret->path->search_commit_root = 1;
2550 ret->path->skip_locking = 1;
2551 ret->fs_info = fs_info;
2552
2553 return ret;
2554 }
2555
btrfs_backref_iter_start(struct btrfs_backref_iter * iter,u64 bytenr)2556 int btrfs_backref_iter_start(struct btrfs_backref_iter *iter, u64 bytenr)
2557 {
2558 struct btrfs_fs_info *fs_info = iter->fs_info;
2559 struct btrfs_root *extent_root = btrfs_extent_root(fs_info, bytenr);
2560 struct btrfs_path *path = iter->path;
2561 struct btrfs_extent_item *ei;
2562 struct btrfs_key key;
2563 int ret;
2564
2565 key.objectid = bytenr;
2566 key.type = BTRFS_METADATA_ITEM_KEY;
2567 key.offset = (u64)-1;
2568 iter->bytenr = bytenr;
2569
2570 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2571 if (ret < 0)
2572 return ret;
2573 if (ret == 0) {
2574 ret = -EUCLEAN;
2575 goto release;
2576 }
2577 if (path->slots[0] == 0) {
2578 WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
2579 ret = -EUCLEAN;
2580 goto release;
2581 }
2582 path->slots[0]--;
2583
2584 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2585 if ((key.type != BTRFS_EXTENT_ITEM_KEY &&
2586 key.type != BTRFS_METADATA_ITEM_KEY) || key.objectid != bytenr) {
2587 ret = -ENOENT;
2588 goto release;
2589 }
2590 memcpy(&iter->cur_key, &key, sizeof(key));
2591 iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2592 path->slots[0]);
2593 iter->end_ptr = (u32)(iter->item_ptr +
2594 btrfs_item_size(path->nodes[0], path->slots[0]));
2595 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
2596 struct btrfs_extent_item);
2597
2598 /*
2599 * Only support iteration on tree backref yet.
2600 *
2601 * This is an extra precaution for non skinny-metadata, where
2602 * EXTENT_ITEM is also used for tree blocks, that we can only use
2603 * extent flags to determine if it's a tree block.
2604 */
2605 if (btrfs_extent_flags(path->nodes[0], ei) & BTRFS_EXTENT_FLAG_DATA) {
2606 ret = -ENOTSUPP;
2607 goto release;
2608 }
2609 iter->cur_ptr = (u32)(iter->item_ptr + sizeof(*ei));
2610
2611 /* If there is no inline backref, go search for keyed backref */
2612 if (iter->cur_ptr >= iter->end_ptr) {
2613 ret = btrfs_next_item(extent_root, path);
2614
2615 /* No inline nor keyed ref */
2616 if (ret > 0) {
2617 ret = -ENOENT;
2618 goto release;
2619 }
2620 if (ret < 0)
2621 goto release;
2622
2623 btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key,
2624 path->slots[0]);
2625 if (iter->cur_key.objectid != bytenr ||
2626 (iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY &&
2627 iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY)) {
2628 ret = -ENOENT;
2629 goto release;
2630 }
2631 iter->cur_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2632 path->slots[0]);
2633 iter->item_ptr = iter->cur_ptr;
2634 iter->end_ptr = (u32)(iter->item_ptr + btrfs_item_size(
2635 path->nodes[0], path->slots[0]));
2636 }
2637
2638 return 0;
2639 release:
2640 btrfs_backref_iter_release(iter);
2641 return ret;
2642 }
2643
2644 /*
2645 * Go to the next backref item of current bytenr, can be either inlined or
2646 * keyed.
2647 *
2648 * Caller needs to check whether it's inline ref or not by iter->cur_key.
2649 *
2650 * Return 0 if we get next backref without problem.
2651 * Return >0 if there is no extra backref for this bytenr.
2652 * Return <0 if there is something wrong happened.
2653 */
btrfs_backref_iter_next(struct btrfs_backref_iter * iter)2654 int btrfs_backref_iter_next(struct btrfs_backref_iter *iter)
2655 {
2656 struct extent_buffer *eb = btrfs_backref_get_eb(iter);
2657 struct btrfs_root *extent_root;
2658 struct btrfs_path *path = iter->path;
2659 struct btrfs_extent_inline_ref *iref;
2660 int ret;
2661 u32 size;
2662
2663 if (btrfs_backref_iter_is_inline_ref(iter)) {
2664 /* We're still inside the inline refs */
2665 ASSERT(iter->cur_ptr < iter->end_ptr);
2666
2667 if (btrfs_backref_has_tree_block_info(iter)) {
2668 /* First tree block info */
2669 size = sizeof(struct btrfs_tree_block_info);
2670 } else {
2671 /* Use inline ref type to determine the size */
2672 int type;
2673
2674 iref = (struct btrfs_extent_inline_ref *)
2675 ((unsigned long)iter->cur_ptr);
2676 type = btrfs_extent_inline_ref_type(eb, iref);
2677
2678 size = btrfs_extent_inline_ref_size(type);
2679 }
2680 iter->cur_ptr += size;
2681 if (iter->cur_ptr < iter->end_ptr)
2682 return 0;
2683
2684 /* All inline items iterated, fall through */
2685 }
2686
2687 /* We're at keyed items, there is no inline item, go to the next one */
2688 extent_root = btrfs_extent_root(iter->fs_info, iter->bytenr);
2689 ret = btrfs_next_item(extent_root, iter->path);
2690 if (ret)
2691 return ret;
2692
2693 btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key, path->slots[0]);
2694 if (iter->cur_key.objectid != iter->bytenr ||
2695 (iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY &&
2696 iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY))
2697 return 1;
2698 iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2699 path->slots[0]);
2700 iter->cur_ptr = iter->item_ptr;
2701 iter->end_ptr = iter->item_ptr + (u32)btrfs_item_size(path->nodes[0],
2702 path->slots[0]);
2703 return 0;
2704 }
2705
btrfs_backref_init_cache(struct btrfs_fs_info * fs_info,struct btrfs_backref_cache * cache,int is_reloc)2706 void btrfs_backref_init_cache(struct btrfs_fs_info *fs_info,
2707 struct btrfs_backref_cache *cache, int is_reloc)
2708 {
2709 int i;
2710
2711 cache->rb_root = RB_ROOT;
2712 for (i = 0; i < BTRFS_MAX_LEVEL; i++)
2713 INIT_LIST_HEAD(&cache->pending[i]);
2714 INIT_LIST_HEAD(&cache->changed);
2715 INIT_LIST_HEAD(&cache->detached);
2716 INIT_LIST_HEAD(&cache->leaves);
2717 INIT_LIST_HEAD(&cache->pending_edge);
2718 INIT_LIST_HEAD(&cache->useless_node);
2719 cache->fs_info = fs_info;
2720 cache->is_reloc = is_reloc;
2721 }
2722
btrfs_backref_alloc_node(struct btrfs_backref_cache * cache,u64 bytenr,int level)2723 struct btrfs_backref_node *btrfs_backref_alloc_node(
2724 struct btrfs_backref_cache *cache, u64 bytenr, int level)
2725 {
2726 struct btrfs_backref_node *node;
2727
2728 ASSERT(level >= 0 && level < BTRFS_MAX_LEVEL);
2729 node = kzalloc(sizeof(*node), GFP_NOFS);
2730 if (!node)
2731 return node;
2732
2733 INIT_LIST_HEAD(&node->list);
2734 INIT_LIST_HEAD(&node->upper);
2735 INIT_LIST_HEAD(&node->lower);
2736 RB_CLEAR_NODE(&node->rb_node);
2737 cache->nr_nodes++;
2738 node->level = level;
2739 node->bytenr = bytenr;
2740
2741 return node;
2742 }
2743
btrfs_backref_alloc_edge(struct btrfs_backref_cache * cache)2744 struct btrfs_backref_edge *btrfs_backref_alloc_edge(
2745 struct btrfs_backref_cache *cache)
2746 {
2747 struct btrfs_backref_edge *edge;
2748
2749 edge = kzalloc(sizeof(*edge), GFP_NOFS);
2750 if (edge)
2751 cache->nr_edges++;
2752 return edge;
2753 }
2754
2755 /*
2756 * Drop the backref node from cache, also cleaning up all its
2757 * upper edges and any uncached nodes in the path.
2758 *
2759 * This cleanup happens bottom up, thus the node should either
2760 * be the lowest node in the cache or a detached node.
2761 */
btrfs_backref_cleanup_node(struct btrfs_backref_cache * cache,struct btrfs_backref_node * node)2762 void btrfs_backref_cleanup_node(struct btrfs_backref_cache *cache,
2763 struct btrfs_backref_node *node)
2764 {
2765 struct btrfs_backref_node *upper;
2766 struct btrfs_backref_edge *edge;
2767
2768 if (!node)
2769 return;
2770
2771 BUG_ON(!node->lowest && !node->detached);
2772 while (!list_empty(&node->upper)) {
2773 edge = list_entry(node->upper.next, struct btrfs_backref_edge,
2774 list[LOWER]);
2775 upper = edge->node[UPPER];
2776 list_del(&edge->list[LOWER]);
2777 list_del(&edge->list[UPPER]);
2778 btrfs_backref_free_edge(cache, edge);
2779
2780 /*
2781 * Add the node to leaf node list if no other child block
2782 * cached.
2783 */
2784 if (list_empty(&upper->lower)) {
2785 list_add_tail(&upper->lower, &cache->leaves);
2786 upper->lowest = 1;
2787 }
2788 }
2789
2790 btrfs_backref_drop_node(cache, node);
2791 }
2792
2793 /*
2794 * Release all nodes/edges from current cache
2795 */
btrfs_backref_release_cache(struct btrfs_backref_cache * cache)2796 void btrfs_backref_release_cache(struct btrfs_backref_cache *cache)
2797 {
2798 struct btrfs_backref_node *node;
2799 int i;
2800
2801 while (!list_empty(&cache->detached)) {
2802 node = list_entry(cache->detached.next,
2803 struct btrfs_backref_node, list);
2804 btrfs_backref_cleanup_node(cache, node);
2805 }
2806
2807 while (!list_empty(&cache->leaves)) {
2808 node = list_entry(cache->leaves.next,
2809 struct btrfs_backref_node, lower);
2810 btrfs_backref_cleanup_node(cache, node);
2811 }
2812
2813 cache->last_trans = 0;
2814
2815 for (i = 0; i < BTRFS_MAX_LEVEL; i++)
2816 ASSERT(list_empty(&cache->pending[i]));
2817 ASSERT(list_empty(&cache->pending_edge));
2818 ASSERT(list_empty(&cache->useless_node));
2819 ASSERT(list_empty(&cache->changed));
2820 ASSERT(list_empty(&cache->detached));
2821 ASSERT(RB_EMPTY_ROOT(&cache->rb_root));
2822 ASSERT(!cache->nr_nodes);
2823 ASSERT(!cache->nr_edges);
2824 }
2825
2826 /*
2827 * Handle direct tree backref
2828 *
2829 * Direct tree backref means, the backref item shows its parent bytenr
2830 * directly. This is for SHARED_BLOCK_REF backref (keyed or inlined).
2831 *
2832 * @ref_key: The converted backref key.
2833 * For keyed backref, it's the item key.
2834 * For inlined backref, objectid is the bytenr,
2835 * type is btrfs_inline_ref_type, offset is
2836 * btrfs_inline_ref_offset.
2837 */
handle_direct_tree_backref(struct btrfs_backref_cache * cache,struct btrfs_key * ref_key,struct btrfs_backref_node * cur)2838 static int handle_direct_tree_backref(struct btrfs_backref_cache *cache,
2839 struct btrfs_key *ref_key,
2840 struct btrfs_backref_node *cur)
2841 {
2842 struct btrfs_backref_edge *edge;
2843 struct btrfs_backref_node *upper;
2844 struct rb_node *rb_node;
2845
2846 ASSERT(ref_key->type == BTRFS_SHARED_BLOCK_REF_KEY);
2847
2848 /* Only reloc root uses backref pointing to itself */
2849 if (ref_key->objectid == ref_key->offset) {
2850 struct btrfs_root *root;
2851
2852 cur->is_reloc_root = 1;
2853 /* Only reloc backref cache cares about a specific root */
2854 if (cache->is_reloc) {
2855 root = find_reloc_root(cache->fs_info, cur->bytenr);
2856 if (!root)
2857 return -ENOENT;
2858 cur->root = root;
2859 } else {
2860 /*
2861 * For generic purpose backref cache, reloc root node
2862 * is useless.
2863 */
2864 list_add(&cur->list, &cache->useless_node);
2865 }
2866 return 0;
2867 }
2868
2869 edge = btrfs_backref_alloc_edge(cache);
2870 if (!edge)
2871 return -ENOMEM;
2872
2873 rb_node = rb_simple_search(&cache->rb_root, ref_key->offset);
2874 if (!rb_node) {
2875 /* Parent node not yet cached */
2876 upper = btrfs_backref_alloc_node(cache, ref_key->offset,
2877 cur->level + 1);
2878 if (!upper) {
2879 btrfs_backref_free_edge(cache, edge);
2880 return -ENOMEM;
2881 }
2882
2883 /*
2884 * Backrefs for the upper level block isn't cached, add the
2885 * block to pending list
2886 */
2887 list_add_tail(&edge->list[UPPER], &cache->pending_edge);
2888 } else {
2889 /* Parent node already cached */
2890 upper = rb_entry(rb_node, struct btrfs_backref_node, rb_node);
2891 ASSERT(upper->checked);
2892 INIT_LIST_HEAD(&edge->list[UPPER]);
2893 }
2894 btrfs_backref_link_edge(edge, cur, upper, LINK_LOWER);
2895 return 0;
2896 }
2897
2898 /*
2899 * Handle indirect tree backref
2900 *
2901 * Indirect tree backref means, we only know which tree the node belongs to.
2902 * We still need to do a tree search to find out the parents. This is for
2903 * TREE_BLOCK_REF backref (keyed or inlined).
2904 *
2905 * @ref_key: The same as @ref_key in handle_direct_tree_backref()
2906 * @tree_key: The first key of this tree block.
2907 * @path: A clean (released) path, to avoid allocating path every time
2908 * the function get called.
2909 */
handle_indirect_tree_backref(struct btrfs_backref_cache * cache,struct btrfs_path * path,struct btrfs_key * ref_key,struct btrfs_key * tree_key,struct btrfs_backref_node * cur)2910 static int handle_indirect_tree_backref(struct btrfs_backref_cache *cache,
2911 struct btrfs_path *path,
2912 struct btrfs_key *ref_key,
2913 struct btrfs_key *tree_key,
2914 struct btrfs_backref_node *cur)
2915 {
2916 struct btrfs_fs_info *fs_info = cache->fs_info;
2917 struct btrfs_backref_node *upper;
2918 struct btrfs_backref_node *lower;
2919 struct btrfs_backref_edge *edge;
2920 struct extent_buffer *eb;
2921 struct btrfs_root *root;
2922 struct rb_node *rb_node;
2923 int level;
2924 bool need_check = true;
2925 int ret;
2926
2927 root = btrfs_get_fs_root(fs_info, ref_key->offset, false);
2928 if (IS_ERR(root))
2929 return PTR_ERR(root);
2930 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
2931 cur->cowonly = 1;
2932
2933 if (btrfs_root_level(&root->root_item) == cur->level) {
2934 /* Tree root */
2935 ASSERT(btrfs_root_bytenr(&root->root_item) == cur->bytenr);
2936 /*
2937 * For reloc backref cache, we may ignore reloc root. But for
2938 * general purpose backref cache, we can't rely on
2939 * btrfs_should_ignore_reloc_root() as it may conflict with
2940 * current running relocation and lead to missing root.
2941 *
2942 * For general purpose backref cache, reloc root detection is
2943 * completely relying on direct backref (key->offset is parent
2944 * bytenr), thus only do such check for reloc cache.
2945 */
2946 if (btrfs_should_ignore_reloc_root(root) && cache->is_reloc) {
2947 btrfs_put_root(root);
2948 list_add(&cur->list, &cache->useless_node);
2949 } else {
2950 cur->root = root;
2951 }
2952 return 0;
2953 }
2954
2955 level = cur->level + 1;
2956
2957 /* Search the tree to find parent blocks referring to the block */
2958 path->search_commit_root = 1;
2959 path->skip_locking = 1;
2960 path->lowest_level = level;
2961 ret = btrfs_search_slot(NULL, root, tree_key, path, 0, 0);
2962 path->lowest_level = 0;
2963 if (ret < 0) {
2964 btrfs_put_root(root);
2965 return ret;
2966 }
2967 if (ret > 0 && path->slots[level] > 0)
2968 path->slots[level]--;
2969
2970 eb = path->nodes[level];
2971 if (btrfs_node_blockptr(eb, path->slots[level]) != cur->bytenr) {
2972 btrfs_err(fs_info,
2973 "couldn't find block (%llu) (level %d) in tree (%llu) with key (%llu %u %llu)",
2974 cur->bytenr, level - 1, root->root_key.objectid,
2975 tree_key->objectid, tree_key->type, tree_key->offset);
2976 btrfs_put_root(root);
2977 ret = -ENOENT;
2978 goto out;
2979 }
2980 lower = cur;
2981
2982 /* Add all nodes and edges in the path */
2983 for (; level < BTRFS_MAX_LEVEL; level++) {
2984 if (!path->nodes[level]) {
2985 ASSERT(btrfs_root_bytenr(&root->root_item) ==
2986 lower->bytenr);
2987 /* Same as previous should_ignore_reloc_root() call */
2988 if (btrfs_should_ignore_reloc_root(root) &&
2989 cache->is_reloc) {
2990 btrfs_put_root(root);
2991 list_add(&lower->list, &cache->useless_node);
2992 } else {
2993 lower->root = root;
2994 }
2995 break;
2996 }
2997
2998 edge = btrfs_backref_alloc_edge(cache);
2999 if (!edge) {
3000 btrfs_put_root(root);
3001 ret = -ENOMEM;
3002 goto out;
3003 }
3004
3005 eb = path->nodes[level];
3006 rb_node = rb_simple_search(&cache->rb_root, eb->start);
3007 if (!rb_node) {
3008 upper = btrfs_backref_alloc_node(cache, eb->start,
3009 lower->level + 1);
3010 if (!upper) {
3011 btrfs_put_root(root);
3012 btrfs_backref_free_edge(cache, edge);
3013 ret = -ENOMEM;
3014 goto out;
3015 }
3016 upper->owner = btrfs_header_owner(eb);
3017 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
3018 upper->cowonly = 1;
3019
3020 /*
3021 * If we know the block isn't shared we can avoid
3022 * checking its backrefs.
3023 */
3024 if (btrfs_block_can_be_shared(root, eb))
3025 upper->checked = 0;
3026 else
3027 upper->checked = 1;
3028
3029 /*
3030 * Add the block to pending list if we need to check its
3031 * backrefs, we only do this once while walking up a
3032 * tree as we will catch anything else later on.
3033 */
3034 if (!upper->checked && need_check) {
3035 need_check = false;
3036 list_add_tail(&edge->list[UPPER],
3037 &cache->pending_edge);
3038 } else {
3039 if (upper->checked)
3040 need_check = true;
3041 INIT_LIST_HEAD(&edge->list[UPPER]);
3042 }
3043 } else {
3044 upper = rb_entry(rb_node, struct btrfs_backref_node,
3045 rb_node);
3046 ASSERT(upper->checked);
3047 INIT_LIST_HEAD(&edge->list[UPPER]);
3048 if (!upper->owner)
3049 upper->owner = btrfs_header_owner(eb);
3050 }
3051 btrfs_backref_link_edge(edge, lower, upper, LINK_LOWER);
3052
3053 if (rb_node) {
3054 btrfs_put_root(root);
3055 break;
3056 }
3057 lower = upper;
3058 upper = NULL;
3059 }
3060 out:
3061 btrfs_release_path(path);
3062 return ret;
3063 }
3064
3065 /*
3066 * Add backref node @cur into @cache.
3067 *
3068 * NOTE: Even if the function returned 0, @cur is not yet cached as its upper
3069 * links aren't yet bi-directional. Needs to finish such links.
3070 * Use btrfs_backref_finish_upper_links() to finish such linkage.
3071 *
3072 * @path: Released path for indirect tree backref lookup
3073 * @iter: Released backref iter for extent tree search
3074 * @node_key: The first key of the tree block
3075 */
btrfs_backref_add_tree_node(struct btrfs_backref_cache * cache,struct btrfs_path * path,struct btrfs_backref_iter * iter,struct btrfs_key * node_key,struct btrfs_backref_node * cur)3076 int btrfs_backref_add_tree_node(struct btrfs_backref_cache *cache,
3077 struct btrfs_path *path,
3078 struct btrfs_backref_iter *iter,
3079 struct btrfs_key *node_key,
3080 struct btrfs_backref_node *cur)
3081 {
3082 struct btrfs_fs_info *fs_info = cache->fs_info;
3083 struct btrfs_backref_edge *edge;
3084 struct btrfs_backref_node *exist;
3085 int ret;
3086
3087 ret = btrfs_backref_iter_start(iter, cur->bytenr);
3088 if (ret < 0)
3089 return ret;
3090 /*
3091 * We skip the first btrfs_tree_block_info, as we don't use the key
3092 * stored in it, but fetch it from the tree block
3093 */
3094 if (btrfs_backref_has_tree_block_info(iter)) {
3095 ret = btrfs_backref_iter_next(iter);
3096 if (ret < 0)
3097 goto out;
3098 /* No extra backref? This means the tree block is corrupted */
3099 if (ret > 0) {
3100 ret = -EUCLEAN;
3101 goto out;
3102 }
3103 }
3104 WARN_ON(cur->checked);
3105 if (!list_empty(&cur->upper)) {
3106 /*
3107 * The backref was added previously when processing backref of
3108 * type BTRFS_TREE_BLOCK_REF_KEY
3109 */
3110 ASSERT(list_is_singular(&cur->upper));
3111 edge = list_entry(cur->upper.next, struct btrfs_backref_edge,
3112 list[LOWER]);
3113 ASSERT(list_empty(&edge->list[UPPER]));
3114 exist = edge->node[UPPER];
3115 /*
3116 * Add the upper level block to pending list if we need check
3117 * its backrefs
3118 */
3119 if (!exist->checked)
3120 list_add_tail(&edge->list[UPPER], &cache->pending_edge);
3121 } else {
3122 exist = NULL;
3123 }
3124
3125 for (; ret == 0; ret = btrfs_backref_iter_next(iter)) {
3126 struct extent_buffer *eb;
3127 struct btrfs_key key;
3128 int type;
3129
3130 cond_resched();
3131 eb = btrfs_backref_get_eb(iter);
3132
3133 key.objectid = iter->bytenr;
3134 if (btrfs_backref_iter_is_inline_ref(iter)) {
3135 struct btrfs_extent_inline_ref *iref;
3136
3137 /* Update key for inline backref */
3138 iref = (struct btrfs_extent_inline_ref *)
3139 ((unsigned long)iter->cur_ptr);
3140 type = btrfs_get_extent_inline_ref_type(eb, iref,
3141 BTRFS_REF_TYPE_BLOCK);
3142 if (type == BTRFS_REF_TYPE_INVALID) {
3143 ret = -EUCLEAN;
3144 goto out;
3145 }
3146 key.type = type;
3147 key.offset = btrfs_extent_inline_ref_offset(eb, iref);
3148 } else {
3149 key.type = iter->cur_key.type;
3150 key.offset = iter->cur_key.offset;
3151 }
3152
3153 /*
3154 * Parent node found and matches current inline ref, no need to
3155 * rebuild this node for this inline ref
3156 */
3157 if (exist &&
3158 ((key.type == BTRFS_TREE_BLOCK_REF_KEY &&
3159 exist->owner == key.offset) ||
3160 (key.type == BTRFS_SHARED_BLOCK_REF_KEY &&
3161 exist->bytenr == key.offset))) {
3162 exist = NULL;
3163 continue;
3164 }
3165
3166 /* SHARED_BLOCK_REF means key.offset is the parent bytenr */
3167 if (key.type == BTRFS_SHARED_BLOCK_REF_KEY) {
3168 ret = handle_direct_tree_backref(cache, &key, cur);
3169 if (ret < 0)
3170 goto out;
3171 continue;
3172 } else if (unlikely(key.type == BTRFS_EXTENT_REF_V0_KEY)) {
3173 ret = -EINVAL;
3174 btrfs_print_v0_err(fs_info);
3175 btrfs_handle_fs_error(fs_info, ret, NULL);
3176 goto out;
3177 } else if (key.type != BTRFS_TREE_BLOCK_REF_KEY) {
3178 continue;
3179 }
3180
3181 /*
3182 * key.type == BTRFS_TREE_BLOCK_REF_KEY, inline ref offset
3183 * means the root objectid. We need to search the tree to get
3184 * its parent bytenr.
3185 */
3186 ret = handle_indirect_tree_backref(cache, path, &key, node_key,
3187 cur);
3188 if (ret < 0)
3189 goto out;
3190 }
3191 ret = 0;
3192 cur->checked = 1;
3193 WARN_ON(exist);
3194 out:
3195 btrfs_backref_iter_release(iter);
3196 return ret;
3197 }
3198
3199 /*
3200 * Finish the upwards linkage created by btrfs_backref_add_tree_node()
3201 */
btrfs_backref_finish_upper_links(struct btrfs_backref_cache * cache,struct btrfs_backref_node * start)3202 int btrfs_backref_finish_upper_links(struct btrfs_backref_cache *cache,
3203 struct btrfs_backref_node *start)
3204 {
3205 struct list_head *useless_node = &cache->useless_node;
3206 struct btrfs_backref_edge *edge;
3207 struct rb_node *rb_node;
3208 LIST_HEAD(pending_edge);
3209
3210 ASSERT(start->checked);
3211
3212 /* Insert this node to cache if it's not COW-only */
3213 if (!start->cowonly) {
3214 rb_node = rb_simple_insert(&cache->rb_root, start->bytenr,
3215 &start->rb_node);
3216 if (rb_node)
3217 btrfs_backref_panic(cache->fs_info, start->bytenr,
3218 -EEXIST);
3219 list_add_tail(&start->lower, &cache->leaves);
3220 }
3221
3222 /*
3223 * Use breadth first search to iterate all related edges.
3224 *
3225 * The starting points are all the edges of this node
3226 */
3227 list_for_each_entry(edge, &start->upper, list[LOWER])
3228 list_add_tail(&edge->list[UPPER], &pending_edge);
3229
3230 while (!list_empty(&pending_edge)) {
3231 struct btrfs_backref_node *upper;
3232 struct btrfs_backref_node *lower;
3233
3234 edge = list_first_entry(&pending_edge,
3235 struct btrfs_backref_edge, list[UPPER]);
3236 list_del_init(&edge->list[UPPER]);
3237 upper = edge->node[UPPER];
3238 lower = edge->node[LOWER];
3239
3240 /* Parent is detached, no need to keep any edges */
3241 if (upper->detached) {
3242 list_del(&edge->list[LOWER]);
3243 btrfs_backref_free_edge(cache, edge);
3244
3245 /* Lower node is orphan, queue for cleanup */
3246 if (list_empty(&lower->upper))
3247 list_add(&lower->list, useless_node);
3248 continue;
3249 }
3250
3251 /*
3252 * All new nodes added in current build_backref_tree() haven't
3253 * been linked to the cache rb tree.
3254 * So if we have upper->rb_node populated, this means a cache
3255 * hit. We only need to link the edge, as @upper and all its
3256 * parents have already been linked.
3257 */
3258 if (!RB_EMPTY_NODE(&upper->rb_node)) {
3259 if (upper->lowest) {
3260 list_del_init(&upper->lower);
3261 upper->lowest = 0;
3262 }
3263
3264 list_add_tail(&edge->list[UPPER], &upper->lower);
3265 continue;
3266 }
3267
3268 /* Sanity check, we shouldn't have any unchecked nodes */
3269 if (!upper->checked) {
3270 ASSERT(0);
3271 return -EUCLEAN;
3272 }
3273
3274 /* Sanity check, COW-only node has non-COW-only parent */
3275 if (start->cowonly != upper->cowonly) {
3276 ASSERT(0);
3277 return -EUCLEAN;
3278 }
3279
3280 /* Only cache non-COW-only (subvolume trees) tree blocks */
3281 if (!upper->cowonly) {
3282 rb_node = rb_simple_insert(&cache->rb_root, upper->bytenr,
3283 &upper->rb_node);
3284 if (rb_node) {
3285 btrfs_backref_panic(cache->fs_info,
3286 upper->bytenr, -EEXIST);
3287 return -EUCLEAN;
3288 }
3289 }
3290
3291 list_add_tail(&edge->list[UPPER], &upper->lower);
3292
3293 /*
3294 * Also queue all the parent edges of this uncached node
3295 * to finish the upper linkage
3296 */
3297 list_for_each_entry(edge, &upper->upper, list[LOWER])
3298 list_add_tail(&edge->list[UPPER], &pending_edge);
3299 }
3300 return 0;
3301 }
3302
btrfs_backref_error_cleanup(struct btrfs_backref_cache * cache,struct btrfs_backref_node * node)3303 void btrfs_backref_error_cleanup(struct btrfs_backref_cache *cache,
3304 struct btrfs_backref_node *node)
3305 {
3306 struct btrfs_backref_node *lower;
3307 struct btrfs_backref_node *upper;
3308 struct btrfs_backref_edge *edge;
3309
3310 while (!list_empty(&cache->useless_node)) {
3311 lower = list_first_entry(&cache->useless_node,
3312 struct btrfs_backref_node, list);
3313 list_del_init(&lower->list);
3314 }
3315 while (!list_empty(&cache->pending_edge)) {
3316 edge = list_first_entry(&cache->pending_edge,
3317 struct btrfs_backref_edge, list[UPPER]);
3318 list_del(&edge->list[UPPER]);
3319 list_del(&edge->list[LOWER]);
3320 lower = edge->node[LOWER];
3321 upper = edge->node[UPPER];
3322 btrfs_backref_free_edge(cache, edge);
3323
3324 /*
3325 * Lower is no longer linked to any upper backref nodes and
3326 * isn't in the cache, we can free it ourselves.
3327 */
3328 if (list_empty(&lower->upper) &&
3329 RB_EMPTY_NODE(&lower->rb_node))
3330 list_add(&lower->list, &cache->useless_node);
3331
3332 if (!RB_EMPTY_NODE(&upper->rb_node))
3333 continue;
3334
3335 /* Add this guy's upper edges to the list to process */
3336 list_for_each_entry(edge, &upper->upper, list[LOWER])
3337 list_add_tail(&edge->list[UPPER],
3338 &cache->pending_edge);
3339 if (list_empty(&upper->upper))
3340 list_add(&upper->list, &cache->useless_node);
3341 }
3342
3343 while (!list_empty(&cache->useless_node)) {
3344 lower = list_first_entry(&cache->useless_node,
3345 struct btrfs_backref_node, list);
3346 list_del_init(&lower->list);
3347 if (lower == node)
3348 node = NULL;
3349 btrfs_backref_drop_node(cache, lower);
3350 }
3351
3352 btrfs_backref_cleanup_node(cache, node);
3353 ASSERT(list_empty(&cache->useless_node) &&
3354 list_empty(&cache->pending_edge));
3355 }
3356