1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2007,2008 Oracle. All rights reserved.
4 */
5
6 #include <linux/sched.h>
7 #include <linux/slab.h>
8 #include <linux/rbtree.h>
9 #include <linux/mm.h>
10 #include <linux/error-injection.h>
11 #include "messages.h"
12 #include "ctree.h"
13 #include "disk-io.h"
14 #include "transaction.h"
15 #include "print-tree.h"
16 #include "locking.h"
17 #include "volumes.h"
18 #include "qgroup.h"
19 #include "tree-mod-log.h"
20 #include "tree-checker.h"
21 #include "fs.h"
22 #include "accessors.h"
23 #include "extent-tree.h"
24 #include "relocation.h"
25 #include "file-item.h"
26
27 static struct kmem_cache *btrfs_path_cachep;
28
29 static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
30 *root, struct btrfs_path *path, int level);
31 static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root *root,
32 const struct btrfs_key *ins_key, struct btrfs_path *path,
33 int data_size, int extend);
34 static int push_node_left(struct btrfs_trans_handle *trans,
35 struct extent_buffer *dst,
36 struct extent_buffer *src, int empty);
37 static int balance_node_right(struct btrfs_trans_handle *trans,
38 struct extent_buffer *dst_buf,
39 struct extent_buffer *src_buf);
40
41 static const struct btrfs_csums {
42 u16 size;
43 const char name[10];
44 const char driver[12];
45 } btrfs_csums[] = {
46 [BTRFS_CSUM_TYPE_CRC32] = { .size = 4, .name = "crc32c" },
47 [BTRFS_CSUM_TYPE_XXHASH] = { .size = 8, .name = "xxhash64" },
48 [BTRFS_CSUM_TYPE_SHA256] = { .size = 32, .name = "sha256" },
49 [BTRFS_CSUM_TYPE_BLAKE2] = { .size = 32, .name = "blake2b",
50 .driver = "blake2b-256" },
51 };
52
53 /*
54 * The leaf data grows from end-to-front in the node. this returns the address
55 * of the start of the last item, which is the stop of the leaf data stack.
56 */
leaf_data_end(const struct extent_buffer * leaf)57 static unsigned int leaf_data_end(const struct extent_buffer *leaf)
58 {
59 u32 nr = btrfs_header_nritems(leaf);
60
61 if (nr == 0)
62 return BTRFS_LEAF_DATA_SIZE(leaf->fs_info);
63 return btrfs_item_offset(leaf, nr - 1);
64 }
65
66 /*
67 * Move data in a @leaf (using memmove, safe for overlapping ranges).
68 *
69 * @leaf: leaf that we're doing a memmove on
70 * @dst_offset: item data offset we're moving to
71 * @src_offset: item data offset were' moving from
72 * @len: length of the data we're moving
73 *
74 * Wrapper around memmove_extent_buffer() that takes into account the header on
75 * the leaf. The btrfs_item offset's start directly after the header, so we
76 * have to adjust any offsets to account for the header in the leaf. This
77 * handles that math to simplify the callers.
78 */
memmove_leaf_data(const struct extent_buffer * leaf,unsigned long dst_offset,unsigned long src_offset,unsigned long len)79 static inline void memmove_leaf_data(const struct extent_buffer *leaf,
80 unsigned long dst_offset,
81 unsigned long src_offset,
82 unsigned long len)
83 {
84 memmove_extent_buffer(leaf, btrfs_item_nr_offset(leaf, 0) + dst_offset,
85 btrfs_item_nr_offset(leaf, 0) + src_offset, len);
86 }
87
88 /*
89 * Copy item data from @src into @dst at the given @offset.
90 *
91 * @dst: destination leaf that we're copying into
92 * @src: source leaf that we're copying from
93 * @dst_offset: item data offset we're copying to
94 * @src_offset: item data offset were' copying from
95 * @len: length of the data we're copying
96 *
97 * Wrapper around copy_extent_buffer() that takes into account the header on
98 * the leaf. The btrfs_item offset's start directly after the header, so we
99 * have to adjust any offsets to account for the header in the leaf. This
100 * handles that math to simplify the callers.
101 */
copy_leaf_data(const struct extent_buffer * dst,const struct extent_buffer * src,unsigned long dst_offset,unsigned long src_offset,unsigned long len)102 static inline void copy_leaf_data(const struct extent_buffer *dst,
103 const struct extent_buffer *src,
104 unsigned long dst_offset,
105 unsigned long src_offset, unsigned long len)
106 {
107 copy_extent_buffer(dst, src, btrfs_item_nr_offset(dst, 0) + dst_offset,
108 btrfs_item_nr_offset(src, 0) + src_offset, len);
109 }
110
111 /*
112 * Move items in a @leaf (using memmove).
113 *
114 * @dst: destination leaf for the items
115 * @dst_item: the item nr we're copying into
116 * @src_item: the item nr we're copying from
117 * @nr_items: the number of items to copy
118 *
119 * Wrapper around memmove_extent_buffer() that does the math to get the
120 * appropriate offsets into the leaf from the item numbers.
121 */
memmove_leaf_items(const struct extent_buffer * leaf,int dst_item,int src_item,int nr_items)122 static inline void memmove_leaf_items(const struct extent_buffer *leaf,
123 int dst_item, int src_item, int nr_items)
124 {
125 memmove_extent_buffer(leaf, btrfs_item_nr_offset(leaf, dst_item),
126 btrfs_item_nr_offset(leaf, src_item),
127 nr_items * sizeof(struct btrfs_item));
128 }
129
130 /*
131 * Copy items from @src into @dst at the given @offset.
132 *
133 * @dst: destination leaf for the items
134 * @src: source leaf for the items
135 * @dst_item: the item nr we're copying into
136 * @src_item: the item nr we're copying from
137 * @nr_items: the number of items to copy
138 *
139 * Wrapper around copy_extent_buffer() that does the math to get the
140 * appropriate offsets into the leaf from the item numbers.
141 */
copy_leaf_items(const struct extent_buffer * dst,const struct extent_buffer * src,int dst_item,int src_item,int nr_items)142 static inline void copy_leaf_items(const struct extent_buffer *dst,
143 const struct extent_buffer *src,
144 int dst_item, int src_item, int nr_items)
145 {
146 copy_extent_buffer(dst, src, btrfs_item_nr_offset(dst, dst_item),
147 btrfs_item_nr_offset(src, src_item),
148 nr_items * sizeof(struct btrfs_item));
149 }
150
151 /* This exists for btrfs-progs usages. */
btrfs_csum_type_size(u16 type)152 u16 btrfs_csum_type_size(u16 type)
153 {
154 return btrfs_csums[type].size;
155 }
156
btrfs_super_csum_size(const struct btrfs_super_block * s)157 int btrfs_super_csum_size(const struct btrfs_super_block *s)
158 {
159 u16 t = btrfs_super_csum_type(s);
160 /*
161 * csum type is validated at mount time
162 */
163 return btrfs_csum_type_size(t);
164 }
165
btrfs_super_csum_name(u16 csum_type)166 const char *btrfs_super_csum_name(u16 csum_type)
167 {
168 /* csum type is validated at mount time */
169 return btrfs_csums[csum_type].name;
170 }
171
172 /*
173 * Return driver name if defined, otherwise the name that's also a valid driver
174 * name
175 */
btrfs_super_csum_driver(u16 csum_type)176 const char *btrfs_super_csum_driver(u16 csum_type)
177 {
178 /* csum type is validated at mount time */
179 return btrfs_csums[csum_type].driver[0] ?
180 btrfs_csums[csum_type].driver :
181 btrfs_csums[csum_type].name;
182 }
183
btrfs_get_num_csums(void)184 size_t __attribute_const__ btrfs_get_num_csums(void)
185 {
186 return ARRAY_SIZE(btrfs_csums);
187 }
188
btrfs_alloc_path(void)189 struct btrfs_path *btrfs_alloc_path(void)
190 {
191 might_sleep();
192
193 return kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
194 }
195
196 /* this also releases the path */
btrfs_free_path(struct btrfs_path * p)197 void btrfs_free_path(struct btrfs_path *p)
198 {
199 if (!p)
200 return;
201 btrfs_release_path(p);
202 kmem_cache_free(btrfs_path_cachep, p);
203 }
204
205 /*
206 * path release drops references on the extent buffers in the path
207 * and it drops any locks held by this path
208 *
209 * It is safe to call this on paths that no locks or extent buffers held.
210 */
btrfs_release_path(struct btrfs_path * p)211 noinline void btrfs_release_path(struct btrfs_path *p)
212 {
213 int i;
214
215 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
216 p->slots[i] = 0;
217 if (!p->nodes[i])
218 continue;
219 if (p->locks[i]) {
220 btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]);
221 p->locks[i] = 0;
222 }
223 free_extent_buffer(p->nodes[i]);
224 p->nodes[i] = NULL;
225 }
226 }
227
228 /*
229 * We want the transaction abort to print stack trace only for errors where the
230 * cause could be a bug, eg. due to ENOSPC, and not for common errors that are
231 * caused by external factors.
232 */
abort_should_print_stack(int errno)233 bool __cold abort_should_print_stack(int errno)
234 {
235 switch (errno) {
236 case -EIO:
237 case -EROFS:
238 case -ENOMEM:
239 return false;
240 }
241 return true;
242 }
243
244 /*
245 * safely gets a reference on the root node of a tree. A lock
246 * is not taken, so a concurrent writer may put a different node
247 * at the root of the tree. See btrfs_lock_root_node for the
248 * looping required.
249 *
250 * The extent buffer returned by this has a reference taken, so
251 * it won't disappear. It may stop being the root of the tree
252 * at any time because there are no locks held.
253 */
btrfs_root_node(struct btrfs_root * root)254 struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
255 {
256 struct extent_buffer *eb;
257
258 while (1) {
259 rcu_read_lock();
260 eb = rcu_dereference(root->node);
261
262 /*
263 * RCU really hurts here, we could free up the root node because
264 * it was COWed but we may not get the new root node yet so do
265 * the inc_not_zero dance and if it doesn't work then
266 * synchronize_rcu and try again.
267 */
268 if (atomic_inc_not_zero(&eb->refs)) {
269 rcu_read_unlock();
270 break;
271 }
272 rcu_read_unlock();
273 synchronize_rcu();
274 }
275 return eb;
276 }
277
278 /*
279 * Cowonly root (not-shareable trees, everything not subvolume or reloc roots),
280 * just get put onto a simple dirty list. Transaction walks this list to make
281 * sure they get properly updated on disk.
282 */
add_root_to_dirty_list(struct btrfs_root * root)283 static void add_root_to_dirty_list(struct btrfs_root *root)
284 {
285 struct btrfs_fs_info *fs_info = root->fs_info;
286
287 if (test_bit(BTRFS_ROOT_DIRTY, &root->state) ||
288 !test_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state))
289 return;
290
291 spin_lock(&fs_info->trans_lock);
292 if (!test_and_set_bit(BTRFS_ROOT_DIRTY, &root->state)) {
293 /* Want the extent tree to be the last on the list */
294 if (root->root_key.objectid == BTRFS_EXTENT_TREE_OBJECTID)
295 list_move_tail(&root->dirty_list,
296 &fs_info->dirty_cowonly_roots);
297 else
298 list_move(&root->dirty_list,
299 &fs_info->dirty_cowonly_roots);
300 }
301 spin_unlock(&fs_info->trans_lock);
302 }
303
304 /*
305 * used by snapshot creation to make a copy of a root for a tree with
306 * a given objectid. The buffer with the new root node is returned in
307 * cow_ret, and this func returns zero on success or a negative error code.
308 */
btrfs_copy_root(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct extent_buffer * buf,struct extent_buffer ** cow_ret,u64 new_root_objectid)309 int btrfs_copy_root(struct btrfs_trans_handle *trans,
310 struct btrfs_root *root,
311 struct extent_buffer *buf,
312 struct extent_buffer **cow_ret, u64 new_root_objectid)
313 {
314 struct btrfs_fs_info *fs_info = root->fs_info;
315 struct extent_buffer *cow;
316 int ret = 0;
317 int level;
318 struct btrfs_disk_key disk_key;
319
320 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
321 trans->transid != fs_info->running_transaction->transid);
322 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
323 trans->transid != root->last_trans);
324
325 level = btrfs_header_level(buf);
326 if (level == 0)
327 btrfs_item_key(buf, &disk_key, 0);
328 else
329 btrfs_node_key(buf, &disk_key, 0);
330
331 cow = btrfs_alloc_tree_block(trans, root, 0, new_root_objectid,
332 &disk_key, level, buf->start, 0,
333 BTRFS_NESTING_NEW_ROOT);
334 if (IS_ERR(cow))
335 return PTR_ERR(cow);
336
337 copy_extent_buffer_full(cow, buf);
338 btrfs_set_header_bytenr(cow, cow->start);
339 btrfs_set_header_generation(cow, trans->transid);
340 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
341 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
342 BTRFS_HEADER_FLAG_RELOC);
343 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
344 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
345 else
346 btrfs_set_header_owner(cow, new_root_objectid);
347
348 write_extent_buffer_fsid(cow, fs_info->fs_devices->metadata_uuid);
349
350 WARN_ON(btrfs_header_generation(buf) > trans->transid);
351 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
352 ret = btrfs_inc_ref(trans, root, cow, 1);
353 else
354 ret = btrfs_inc_ref(trans, root, cow, 0);
355 if (ret) {
356 btrfs_tree_unlock(cow);
357 free_extent_buffer(cow);
358 btrfs_abort_transaction(trans, ret);
359 return ret;
360 }
361
362 btrfs_mark_buffer_dirty(trans, cow);
363 *cow_ret = cow;
364 return 0;
365 }
366
367 /*
368 * check if the tree block can be shared by multiple trees
369 */
btrfs_block_can_be_shared(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct extent_buffer * buf)370 int btrfs_block_can_be_shared(struct btrfs_trans_handle *trans,
371 struct btrfs_root *root,
372 struct extent_buffer *buf)
373 {
374 /*
375 * Tree blocks not in shareable trees and tree roots are never shared.
376 * If a block was allocated after the last snapshot and the block was
377 * not allocated by tree relocation, we know the block is not shared.
378 */
379 if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
380 buf != root->node &&
381 (btrfs_header_generation(buf) <=
382 btrfs_root_last_snapshot(&root->root_item) ||
383 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC))) {
384 if (buf != root->commit_root)
385 return 1;
386 /*
387 * An extent buffer that used to be the commit root may still be
388 * shared because the tree height may have increased and it
389 * became a child of a higher level root. This can happen when
390 * snapshotting a subvolume created in the current transaction.
391 */
392 if (btrfs_header_generation(buf) == trans->transid)
393 return 1;
394 }
395
396 return 0;
397 }
398
update_ref_for_cow(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct extent_buffer * buf,struct extent_buffer * cow,int * last_ref)399 static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
400 struct btrfs_root *root,
401 struct extent_buffer *buf,
402 struct extent_buffer *cow,
403 int *last_ref)
404 {
405 struct btrfs_fs_info *fs_info = root->fs_info;
406 u64 refs;
407 u64 owner;
408 u64 flags;
409 u64 new_flags = 0;
410 int ret;
411
412 /*
413 * Backrefs update rules:
414 *
415 * Always use full backrefs for extent pointers in tree block
416 * allocated by tree relocation.
417 *
418 * If a shared tree block is no longer referenced by its owner
419 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
420 * use full backrefs for extent pointers in tree block.
421 *
422 * If a tree block is been relocating
423 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
424 * use full backrefs for extent pointers in tree block.
425 * The reason for this is some operations (such as drop tree)
426 * are only allowed for blocks use full backrefs.
427 */
428
429 if (btrfs_block_can_be_shared(trans, root, buf)) {
430 ret = btrfs_lookup_extent_info(trans, fs_info, buf->start,
431 btrfs_header_level(buf), 1,
432 &refs, &flags);
433 if (ret)
434 return ret;
435 if (unlikely(refs == 0)) {
436 btrfs_crit(fs_info,
437 "found 0 references for tree block at bytenr %llu level %d root %llu",
438 buf->start, btrfs_header_level(buf),
439 btrfs_root_id(root));
440 ret = -EUCLEAN;
441 btrfs_abort_transaction(trans, ret);
442 return ret;
443 }
444 } else {
445 refs = 1;
446 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
447 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
448 flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
449 else
450 flags = 0;
451 }
452
453 owner = btrfs_header_owner(buf);
454 BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID &&
455 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
456
457 if (refs > 1) {
458 if ((owner == root->root_key.objectid ||
459 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
460 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
461 ret = btrfs_inc_ref(trans, root, buf, 1);
462 if (ret)
463 return ret;
464
465 if (root->root_key.objectid ==
466 BTRFS_TREE_RELOC_OBJECTID) {
467 ret = btrfs_dec_ref(trans, root, buf, 0);
468 if (ret)
469 return ret;
470 ret = btrfs_inc_ref(trans, root, cow, 1);
471 if (ret)
472 return ret;
473 }
474 new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
475 } else {
476
477 if (root->root_key.objectid ==
478 BTRFS_TREE_RELOC_OBJECTID)
479 ret = btrfs_inc_ref(trans, root, cow, 1);
480 else
481 ret = btrfs_inc_ref(trans, root, cow, 0);
482 if (ret)
483 return ret;
484 }
485 if (new_flags != 0) {
486 ret = btrfs_set_disk_extent_flags(trans, buf, new_flags);
487 if (ret)
488 return ret;
489 }
490 } else {
491 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
492 if (root->root_key.objectid ==
493 BTRFS_TREE_RELOC_OBJECTID)
494 ret = btrfs_inc_ref(trans, root, cow, 1);
495 else
496 ret = btrfs_inc_ref(trans, root, cow, 0);
497 if (ret)
498 return ret;
499 ret = btrfs_dec_ref(trans, root, buf, 1);
500 if (ret)
501 return ret;
502 }
503 btrfs_clear_buffer_dirty(trans, buf);
504 *last_ref = 1;
505 }
506 return 0;
507 }
508
509 /*
510 * does the dirty work in cow of a single block. The parent block (if
511 * supplied) is updated to point to the new cow copy. The new buffer is marked
512 * dirty and returned locked. If you modify the block it needs to be marked
513 * dirty again.
514 *
515 * search_start -- an allocation hint for the new block
516 *
517 * empty_size -- a hint that you plan on doing more cow. This is the size in
518 * bytes the allocator should try to find free next to the block it returns.
519 * This is just a hint and may be ignored by the allocator.
520 */
__btrfs_cow_block(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct extent_buffer * buf,struct extent_buffer * parent,int parent_slot,struct extent_buffer ** cow_ret,u64 search_start,u64 empty_size,enum btrfs_lock_nesting nest)521 static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
522 struct btrfs_root *root,
523 struct extent_buffer *buf,
524 struct extent_buffer *parent, int parent_slot,
525 struct extent_buffer **cow_ret,
526 u64 search_start, u64 empty_size,
527 enum btrfs_lock_nesting nest)
528 {
529 struct btrfs_fs_info *fs_info = root->fs_info;
530 struct btrfs_disk_key disk_key;
531 struct extent_buffer *cow;
532 int level, ret;
533 int last_ref = 0;
534 int unlock_orig = 0;
535 u64 parent_start = 0;
536
537 if (*cow_ret == buf)
538 unlock_orig = 1;
539
540 btrfs_assert_tree_write_locked(buf);
541
542 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
543 trans->transid != fs_info->running_transaction->transid);
544 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
545 trans->transid != root->last_trans);
546
547 level = btrfs_header_level(buf);
548
549 if (level == 0)
550 btrfs_item_key(buf, &disk_key, 0);
551 else
552 btrfs_node_key(buf, &disk_key, 0);
553
554 if ((root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && parent)
555 parent_start = parent->start;
556
557 cow = btrfs_alloc_tree_block(trans, root, parent_start,
558 root->root_key.objectid, &disk_key, level,
559 search_start, empty_size, nest);
560 if (IS_ERR(cow))
561 return PTR_ERR(cow);
562
563 /* cow is set to blocking by btrfs_init_new_buffer */
564
565 copy_extent_buffer_full(cow, buf);
566 btrfs_set_header_bytenr(cow, cow->start);
567 btrfs_set_header_generation(cow, trans->transid);
568 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
569 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
570 BTRFS_HEADER_FLAG_RELOC);
571 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
572 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
573 else
574 btrfs_set_header_owner(cow, root->root_key.objectid);
575
576 write_extent_buffer_fsid(cow, fs_info->fs_devices->metadata_uuid);
577
578 ret = update_ref_for_cow(trans, root, buf, cow, &last_ref);
579 if (ret) {
580 btrfs_tree_unlock(cow);
581 free_extent_buffer(cow);
582 btrfs_abort_transaction(trans, ret);
583 return ret;
584 }
585
586 if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) {
587 ret = btrfs_reloc_cow_block(trans, root, buf, cow);
588 if (ret) {
589 btrfs_tree_unlock(cow);
590 free_extent_buffer(cow);
591 btrfs_abort_transaction(trans, ret);
592 return ret;
593 }
594 }
595
596 if (buf == root->node) {
597 WARN_ON(parent && parent != buf);
598 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
599 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
600 parent_start = buf->start;
601
602 ret = btrfs_tree_mod_log_insert_root(root->node, cow, true);
603 if (ret < 0) {
604 btrfs_tree_unlock(cow);
605 free_extent_buffer(cow);
606 btrfs_abort_transaction(trans, ret);
607 return ret;
608 }
609 atomic_inc(&cow->refs);
610 rcu_assign_pointer(root->node, cow);
611
612 btrfs_free_tree_block(trans, btrfs_root_id(root), buf,
613 parent_start, last_ref);
614 free_extent_buffer(buf);
615 add_root_to_dirty_list(root);
616 } else {
617 WARN_ON(trans->transid != btrfs_header_generation(parent));
618 ret = btrfs_tree_mod_log_insert_key(parent, parent_slot,
619 BTRFS_MOD_LOG_KEY_REPLACE);
620 if (ret) {
621 btrfs_tree_unlock(cow);
622 free_extent_buffer(cow);
623 btrfs_abort_transaction(trans, ret);
624 return ret;
625 }
626 btrfs_set_node_blockptr(parent, parent_slot,
627 cow->start);
628 btrfs_set_node_ptr_generation(parent, parent_slot,
629 trans->transid);
630 btrfs_mark_buffer_dirty(trans, parent);
631 if (last_ref) {
632 ret = btrfs_tree_mod_log_free_eb(buf);
633 if (ret) {
634 btrfs_tree_unlock(cow);
635 free_extent_buffer(cow);
636 btrfs_abort_transaction(trans, ret);
637 return ret;
638 }
639 }
640 btrfs_free_tree_block(trans, btrfs_root_id(root), buf,
641 parent_start, last_ref);
642 }
643 if (unlock_orig)
644 btrfs_tree_unlock(buf);
645 free_extent_buffer_stale(buf);
646 btrfs_mark_buffer_dirty(trans, cow);
647 *cow_ret = cow;
648 return 0;
649 }
650
should_cow_block(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct extent_buffer * buf)651 static inline int should_cow_block(struct btrfs_trans_handle *trans,
652 struct btrfs_root *root,
653 struct extent_buffer *buf)
654 {
655 if (btrfs_is_testing(root->fs_info))
656 return 0;
657
658 /* Ensure we can see the FORCE_COW bit */
659 smp_mb__before_atomic();
660
661 /*
662 * We do not need to cow a block if
663 * 1) this block is not created or changed in this transaction;
664 * 2) this block does not belong to TREE_RELOC tree;
665 * 3) the root is not forced COW.
666 *
667 * What is forced COW:
668 * when we create snapshot during committing the transaction,
669 * after we've finished copying src root, we must COW the shared
670 * block to ensure the metadata consistency.
671 */
672 if (btrfs_header_generation(buf) == trans->transid &&
673 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
674 !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
675 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) &&
676 !test_bit(BTRFS_ROOT_FORCE_COW, &root->state))
677 return 0;
678 return 1;
679 }
680
681 /*
682 * cows a single block, see __btrfs_cow_block for the real work.
683 * This version of it has extra checks so that a block isn't COWed more than
684 * once per transaction, as long as it hasn't been written yet
685 */
btrfs_cow_block(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct extent_buffer * buf,struct extent_buffer * parent,int parent_slot,struct extent_buffer ** cow_ret,enum btrfs_lock_nesting nest)686 noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
687 struct btrfs_root *root, struct extent_buffer *buf,
688 struct extent_buffer *parent, int parent_slot,
689 struct extent_buffer **cow_ret,
690 enum btrfs_lock_nesting nest)
691 {
692 struct btrfs_fs_info *fs_info = root->fs_info;
693 u64 search_start;
694 int ret;
695
696 if (unlikely(test_bit(BTRFS_ROOT_DELETING, &root->state))) {
697 btrfs_abort_transaction(trans, -EUCLEAN);
698 btrfs_crit(fs_info,
699 "attempt to COW block %llu on root %llu that is being deleted",
700 buf->start, btrfs_root_id(root));
701 return -EUCLEAN;
702 }
703
704 /*
705 * COWing must happen through a running transaction, which always
706 * matches the current fs generation (it's a transaction with a state
707 * less than TRANS_STATE_UNBLOCKED). If it doesn't, then turn the fs
708 * into error state to prevent the commit of any transaction.
709 */
710 if (unlikely(trans->transaction != fs_info->running_transaction ||
711 trans->transid != fs_info->generation)) {
712 btrfs_abort_transaction(trans, -EUCLEAN);
713 btrfs_crit(fs_info,
714 "unexpected transaction when attempting to COW block %llu on root %llu, transaction %llu running transaction %llu fs generation %llu",
715 buf->start, btrfs_root_id(root), trans->transid,
716 fs_info->running_transaction->transid,
717 fs_info->generation);
718 return -EUCLEAN;
719 }
720
721 if (!should_cow_block(trans, root, buf)) {
722 *cow_ret = buf;
723 return 0;
724 }
725
726 search_start = buf->start & ~((u64)SZ_1G - 1);
727
728 /*
729 * Before CoWing this block for later modification, check if it's
730 * the subtree root and do the delayed subtree trace if needed.
731 *
732 * Also We don't care about the error, as it's handled internally.
733 */
734 btrfs_qgroup_trace_subtree_after_cow(trans, root, buf);
735 ret = __btrfs_cow_block(trans, root, buf, parent,
736 parent_slot, cow_ret, search_start, 0, nest);
737
738 trace_btrfs_cow_block(root, buf, *cow_ret);
739
740 return ret;
741 }
742 ALLOW_ERROR_INJECTION(btrfs_cow_block, ERRNO);
743
744 /*
745 * helper function for defrag to decide if two blocks pointed to by a
746 * node are actually close by
747 */
close_blocks(u64 blocknr,u64 other,u32 blocksize)748 static int close_blocks(u64 blocknr, u64 other, u32 blocksize)
749 {
750 if (blocknr < other && other - (blocknr + blocksize) < 32768)
751 return 1;
752 if (blocknr > other && blocknr - (other + blocksize) < 32768)
753 return 1;
754 return 0;
755 }
756
757 #ifdef __LITTLE_ENDIAN
758
759 /*
760 * Compare two keys, on little-endian the disk order is same as CPU order and
761 * we can avoid the conversion.
762 */
comp_keys(const struct btrfs_disk_key * disk_key,const struct btrfs_key * k2)763 static int comp_keys(const struct btrfs_disk_key *disk_key,
764 const struct btrfs_key *k2)
765 {
766 const struct btrfs_key *k1 = (const struct btrfs_key *)disk_key;
767
768 return btrfs_comp_cpu_keys(k1, k2);
769 }
770
771 #else
772
773 /*
774 * compare two keys in a memcmp fashion
775 */
comp_keys(const struct btrfs_disk_key * disk,const struct btrfs_key * k2)776 static int comp_keys(const struct btrfs_disk_key *disk,
777 const struct btrfs_key *k2)
778 {
779 struct btrfs_key k1;
780
781 btrfs_disk_key_to_cpu(&k1, disk);
782
783 return btrfs_comp_cpu_keys(&k1, k2);
784 }
785 #endif
786
787 /*
788 * same as comp_keys only with two btrfs_key's
789 */
btrfs_comp_cpu_keys(const struct btrfs_key * k1,const struct btrfs_key * k2)790 int __pure btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2)
791 {
792 if (k1->objectid > k2->objectid)
793 return 1;
794 if (k1->objectid < k2->objectid)
795 return -1;
796 if (k1->type > k2->type)
797 return 1;
798 if (k1->type < k2->type)
799 return -1;
800 if (k1->offset > k2->offset)
801 return 1;
802 if (k1->offset < k2->offset)
803 return -1;
804 return 0;
805 }
806
807 /*
808 * this is used by the defrag code to go through all the
809 * leaves pointed to by a node and reallocate them so that
810 * disk order is close to key order
811 */
btrfs_realloc_node(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct extent_buffer * parent,int start_slot,u64 * last_ret,struct btrfs_key * progress)812 int btrfs_realloc_node(struct btrfs_trans_handle *trans,
813 struct btrfs_root *root, struct extent_buffer *parent,
814 int start_slot, u64 *last_ret,
815 struct btrfs_key *progress)
816 {
817 struct btrfs_fs_info *fs_info = root->fs_info;
818 struct extent_buffer *cur;
819 u64 blocknr;
820 u64 search_start = *last_ret;
821 u64 last_block = 0;
822 u64 other;
823 u32 parent_nritems;
824 int end_slot;
825 int i;
826 int err = 0;
827 u32 blocksize;
828 int progress_passed = 0;
829 struct btrfs_disk_key disk_key;
830
831 /*
832 * COWing must happen through a running transaction, which always
833 * matches the current fs generation (it's a transaction with a state
834 * less than TRANS_STATE_UNBLOCKED). If it doesn't, then turn the fs
835 * into error state to prevent the commit of any transaction.
836 */
837 if (unlikely(trans->transaction != fs_info->running_transaction ||
838 trans->transid != fs_info->generation)) {
839 btrfs_abort_transaction(trans, -EUCLEAN);
840 btrfs_crit(fs_info,
841 "unexpected transaction when attempting to reallocate parent %llu for root %llu, transaction %llu running transaction %llu fs generation %llu",
842 parent->start, btrfs_root_id(root), trans->transid,
843 fs_info->running_transaction->transid,
844 fs_info->generation);
845 return -EUCLEAN;
846 }
847
848 parent_nritems = btrfs_header_nritems(parent);
849 blocksize = fs_info->nodesize;
850 end_slot = parent_nritems - 1;
851
852 if (parent_nritems <= 1)
853 return 0;
854
855 for (i = start_slot; i <= end_slot; i++) {
856 int close = 1;
857
858 btrfs_node_key(parent, &disk_key, i);
859 if (!progress_passed && comp_keys(&disk_key, progress) < 0)
860 continue;
861
862 progress_passed = 1;
863 blocknr = btrfs_node_blockptr(parent, i);
864 if (last_block == 0)
865 last_block = blocknr;
866
867 if (i > 0) {
868 other = btrfs_node_blockptr(parent, i - 1);
869 close = close_blocks(blocknr, other, blocksize);
870 }
871 if (!close && i < end_slot) {
872 other = btrfs_node_blockptr(parent, i + 1);
873 close = close_blocks(blocknr, other, blocksize);
874 }
875 if (close) {
876 last_block = blocknr;
877 continue;
878 }
879
880 cur = btrfs_read_node_slot(parent, i);
881 if (IS_ERR(cur))
882 return PTR_ERR(cur);
883 if (search_start == 0)
884 search_start = last_block;
885
886 btrfs_tree_lock(cur);
887 err = __btrfs_cow_block(trans, root, cur, parent, i,
888 &cur, search_start,
889 min(16 * blocksize,
890 (end_slot - i) * blocksize),
891 BTRFS_NESTING_COW);
892 if (err) {
893 btrfs_tree_unlock(cur);
894 free_extent_buffer(cur);
895 break;
896 }
897 search_start = cur->start;
898 last_block = cur->start;
899 *last_ret = search_start;
900 btrfs_tree_unlock(cur);
901 free_extent_buffer(cur);
902 }
903 return err;
904 }
905
906 /*
907 * Search for a key in the given extent_buffer.
908 *
909 * The lower boundary for the search is specified by the slot number @first_slot.
910 * Use a value of 0 to search over the whole extent buffer. Works for both
911 * leaves and nodes.
912 *
913 * The slot in the extent buffer is returned via @slot. If the key exists in the
914 * extent buffer, then @slot will point to the slot where the key is, otherwise
915 * it points to the slot where you would insert the key.
916 *
917 * Slot may point to the total number of items (i.e. one position beyond the last
918 * key) if the key is bigger than the last key in the extent buffer.
919 */
btrfs_bin_search(struct extent_buffer * eb,int first_slot,const struct btrfs_key * key,int * slot)920 int btrfs_bin_search(struct extent_buffer *eb, int first_slot,
921 const struct btrfs_key *key, int *slot)
922 {
923 unsigned long p;
924 int item_size;
925 /*
926 * Use unsigned types for the low and high slots, so that we get a more
927 * efficient division in the search loop below.
928 */
929 u32 low = first_slot;
930 u32 high = btrfs_header_nritems(eb);
931 int ret;
932 const int key_size = sizeof(struct btrfs_disk_key);
933
934 if (unlikely(low > high)) {
935 btrfs_err(eb->fs_info,
936 "%s: low (%u) > high (%u) eb %llu owner %llu level %d",
937 __func__, low, high, eb->start,
938 btrfs_header_owner(eb), btrfs_header_level(eb));
939 return -EINVAL;
940 }
941
942 if (btrfs_header_level(eb) == 0) {
943 p = offsetof(struct btrfs_leaf, items);
944 item_size = sizeof(struct btrfs_item);
945 } else {
946 p = offsetof(struct btrfs_node, ptrs);
947 item_size = sizeof(struct btrfs_key_ptr);
948 }
949
950 while (low < high) {
951 unsigned long oip;
952 unsigned long offset;
953 struct btrfs_disk_key *tmp;
954 struct btrfs_disk_key unaligned;
955 int mid;
956
957 mid = (low + high) / 2;
958 offset = p + mid * item_size;
959 oip = offset_in_page(offset);
960
961 if (oip + key_size <= PAGE_SIZE) {
962 const unsigned long idx = get_eb_page_index(offset);
963 char *kaddr = page_address(eb->pages[idx]);
964
965 oip = get_eb_offset_in_page(eb, offset);
966 tmp = (struct btrfs_disk_key *)(kaddr + oip);
967 } else {
968 read_extent_buffer(eb, &unaligned, offset, key_size);
969 tmp = &unaligned;
970 }
971
972 ret = comp_keys(tmp, key);
973
974 if (ret < 0)
975 low = mid + 1;
976 else if (ret > 0)
977 high = mid;
978 else {
979 *slot = mid;
980 return 0;
981 }
982 }
983 *slot = low;
984 return 1;
985 }
986
root_add_used(struct btrfs_root * root,u32 size)987 static void root_add_used(struct btrfs_root *root, u32 size)
988 {
989 spin_lock(&root->accounting_lock);
990 btrfs_set_root_used(&root->root_item,
991 btrfs_root_used(&root->root_item) + size);
992 spin_unlock(&root->accounting_lock);
993 }
994
root_sub_used(struct btrfs_root * root,u32 size)995 static void root_sub_used(struct btrfs_root *root, u32 size)
996 {
997 spin_lock(&root->accounting_lock);
998 btrfs_set_root_used(&root->root_item,
999 btrfs_root_used(&root->root_item) - size);
1000 spin_unlock(&root->accounting_lock);
1001 }
1002
1003 /* given a node and slot number, this reads the blocks it points to. The
1004 * extent buffer is returned with a reference taken (but unlocked).
1005 */
btrfs_read_node_slot(struct extent_buffer * parent,int slot)1006 struct extent_buffer *btrfs_read_node_slot(struct extent_buffer *parent,
1007 int slot)
1008 {
1009 int level = btrfs_header_level(parent);
1010 struct btrfs_tree_parent_check check = { 0 };
1011 struct extent_buffer *eb;
1012
1013 if (slot < 0 || slot >= btrfs_header_nritems(parent))
1014 return ERR_PTR(-ENOENT);
1015
1016 ASSERT(level);
1017
1018 check.level = level - 1;
1019 check.transid = btrfs_node_ptr_generation(parent, slot);
1020 check.owner_root = btrfs_header_owner(parent);
1021 check.has_first_key = true;
1022 btrfs_node_key_to_cpu(parent, &check.first_key, slot);
1023
1024 eb = read_tree_block(parent->fs_info, btrfs_node_blockptr(parent, slot),
1025 &check);
1026 if (IS_ERR(eb))
1027 return eb;
1028 if (!extent_buffer_uptodate(eb)) {
1029 free_extent_buffer(eb);
1030 return ERR_PTR(-EIO);
1031 }
1032
1033 return eb;
1034 }
1035
1036 /*
1037 * node level balancing, used to make sure nodes are in proper order for
1038 * item deletion. We balance from the top down, so we have to make sure
1039 * that a deletion won't leave an node completely empty later on.
1040 */
balance_level(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,int level)1041 static noinline int balance_level(struct btrfs_trans_handle *trans,
1042 struct btrfs_root *root,
1043 struct btrfs_path *path, int level)
1044 {
1045 struct btrfs_fs_info *fs_info = root->fs_info;
1046 struct extent_buffer *right = NULL;
1047 struct extent_buffer *mid;
1048 struct extent_buffer *left = NULL;
1049 struct extent_buffer *parent = NULL;
1050 int ret = 0;
1051 int wret;
1052 int pslot;
1053 int orig_slot = path->slots[level];
1054 u64 orig_ptr;
1055
1056 ASSERT(level > 0);
1057
1058 mid = path->nodes[level];
1059
1060 WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK);
1061 WARN_ON(btrfs_header_generation(mid) != trans->transid);
1062
1063 orig_ptr = btrfs_node_blockptr(mid, orig_slot);
1064
1065 if (level < BTRFS_MAX_LEVEL - 1) {
1066 parent = path->nodes[level + 1];
1067 pslot = path->slots[level + 1];
1068 }
1069
1070 /*
1071 * deal with the case where there is only one pointer in the root
1072 * by promoting the node below to a root
1073 */
1074 if (!parent) {
1075 struct extent_buffer *child;
1076
1077 if (btrfs_header_nritems(mid) != 1)
1078 return 0;
1079
1080 /* promote the child to a root */
1081 child = btrfs_read_node_slot(mid, 0);
1082 if (IS_ERR(child)) {
1083 ret = PTR_ERR(child);
1084 goto out;
1085 }
1086
1087 btrfs_tree_lock(child);
1088 ret = btrfs_cow_block(trans, root, child, mid, 0, &child,
1089 BTRFS_NESTING_COW);
1090 if (ret) {
1091 btrfs_tree_unlock(child);
1092 free_extent_buffer(child);
1093 goto out;
1094 }
1095
1096 ret = btrfs_tree_mod_log_insert_root(root->node, child, true);
1097 if (ret < 0) {
1098 btrfs_tree_unlock(child);
1099 free_extent_buffer(child);
1100 btrfs_abort_transaction(trans, ret);
1101 goto out;
1102 }
1103 rcu_assign_pointer(root->node, child);
1104
1105 add_root_to_dirty_list(root);
1106 btrfs_tree_unlock(child);
1107
1108 path->locks[level] = 0;
1109 path->nodes[level] = NULL;
1110 btrfs_clear_buffer_dirty(trans, mid);
1111 btrfs_tree_unlock(mid);
1112 /* once for the path */
1113 free_extent_buffer(mid);
1114
1115 root_sub_used(root, mid->len);
1116 btrfs_free_tree_block(trans, btrfs_root_id(root), mid, 0, 1);
1117 /* once for the root ptr */
1118 free_extent_buffer_stale(mid);
1119 return 0;
1120 }
1121 if (btrfs_header_nritems(mid) >
1122 BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 4)
1123 return 0;
1124
1125 if (pslot) {
1126 left = btrfs_read_node_slot(parent, pslot - 1);
1127 if (IS_ERR(left)) {
1128 ret = PTR_ERR(left);
1129 left = NULL;
1130 goto out;
1131 }
1132
1133 __btrfs_tree_lock(left, BTRFS_NESTING_LEFT);
1134 wret = btrfs_cow_block(trans, root, left,
1135 parent, pslot - 1, &left,
1136 BTRFS_NESTING_LEFT_COW);
1137 if (wret) {
1138 ret = wret;
1139 goto out;
1140 }
1141 }
1142
1143 if (pslot + 1 < btrfs_header_nritems(parent)) {
1144 right = btrfs_read_node_slot(parent, pslot + 1);
1145 if (IS_ERR(right)) {
1146 ret = PTR_ERR(right);
1147 right = NULL;
1148 goto out;
1149 }
1150
1151 __btrfs_tree_lock(right, BTRFS_NESTING_RIGHT);
1152 wret = btrfs_cow_block(trans, root, right,
1153 parent, pslot + 1, &right,
1154 BTRFS_NESTING_RIGHT_COW);
1155 if (wret) {
1156 ret = wret;
1157 goto out;
1158 }
1159 }
1160
1161 /* first, try to make some room in the middle buffer */
1162 if (left) {
1163 orig_slot += btrfs_header_nritems(left);
1164 wret = push_node_left(trans, left, mid, 1);
1165 if (wret < 0)
1166 ret = wret;
1167 }
1168
1169 /*
1170 * then try to empty the right most buffer into the middle
1171 */
1172 if (right) {
1173 wret = push_node_left(trans, mid, right, 1);
1174 if (wret < 0 && wret != -ENOSPC)
1175 ret = wret;
1176 if (btrfs_header_nritems(right) == 0) {
1177 btrfs_clear_buffer_dirty(trans, right);
1178 btrfs_tree_unlock(right);
1179 ret = btrfs_del_ptr(trans, root, path, level + 1, pslot + 1);
1180 if (ret < 0) {
1181 free_extent_buffer_stale(right);
1182 right = NULL;
1183 goto out;
1184 }
1185 root_sub_used(root, right->len);
1186 btrfs_free_tree_block(trans, btrfs_root_id(root), right,
1187 0, 1);
1188 free_extent_buffer_stale(right);
1189 right = NULL;
1190 } else {
1191 struct btrfs_disk_key right_key;
1192 btrfs_node_key(right, &right_key, 0);
1193 ret = btrfs_tree_mod_log_insert_key(parent, pslot + 1,
1194 BTRFS_MOD_LOG_KEY_REPLACE);
1195 if (ret < 0) {
1196 btrfs_abort_transaction(trans, ret);
1197 goto out;
1198 }
1199 btrfs_set_node_key(parent, &right_key, pslot + 1);
1200 btrfs_mark_buffer_dirty(trans, parent);
1201 }
1202 }
1203 if (btrfs_header_nritems(mid) == 1) {
1204 /*
1205 * we're not allowed to leave a node with one item in the
1206 * tree during a delete. A deletion from lower in the tree
1207 * could try to delete the only pointer in this node.
1208 * So, pull some keys from the left.
1209 * There has to be a left pointer at this point because
1210 * otherwise we would have pulled some pointers from the
1211 * right
1212 */
1213 if (unlikely(!left)) {
1214 btrfs_crit(fs_info,
1215 "missing left child when middle child only has 1 item, parent bytenr %llu level %d mid bytenr %llu root %llu",
1216 parent->start, btrfs_header_level(parent),
1217 mid->start, btrfs_root_id(root));
1218 ret = -EUCLEAN;
1219 btrfs_abort_transaction(trans, ret);
1220 goto out;
1221 }
1222 wret = balance_node_right(trans, mid, left);
1223 if (wret < 0) {
1224 ret = wret;
1225 goto out;
1226 }
1227 if (wret == 1) {
1228 wret = push_node_left(trans, left, mid, 1);
1229 if (wret < 0)
1230 ret = wret;
1231 }
1232 BUG_ON(wret == 1);
1233 }
1234 if (btrfs_header_nritems(mid) == 0) {
1235 btrfs_clear_buffer_dirty(trans, mid);
1236 btrfs_tree_unlock(mid);
1237 ret = btrfs_del_ptr(trans, root, path, level + 1, pslot);
1238 if (ret < 0) {
1239 free_extent_buffer_stale(mid);
1240 mid = NULL;
1241 goto out;
1242 }
1243 root_sub_used(root, mid->len);
1244 btrfs_free_tree_block(trans, btrfs_root_id(root), mid, 0, 1);
1245 free_extent_buffer_stale(mid);
1246 mid = NULL;
1247 } else {
1248 /* update the parent key to reflect our changes */
1249 struct btrfs_disk_key mid_key;
1250 btrfs_node_key(mid, &mid_key, 0);
1251 ret = btrfs_tree_mod_log_insert_key(parent, pslot,
1252 BTRFS_MOD_LOG_KEY_REPLACE);
1253 if (ret < 0) {
1254 btrfs_abort_transaction(trans, ret);
1255 goto out;
1256 }
1257 btrfs_set_node_key(parent, &mid_key, pslot);
1258 btrfs_mark_buffer_dirty(trans, parent);
1259 }
1260
1261 /* update the path */
1262 if (left) {
1263 if (btrfs_header_nritems(left) > orig_slot) {
1264 atomic_inc(&left->refs);
1265 /* left was locked after cow */
1266 path->nodes[level] = left;
1267 path->slots[level + 1] -= 1;
1268 path->slots[level] = orig_slot;
1269 if (mid) {
1270 btrfs_tree_unlock(mid);
1271 free_extent_buffer(mid);
1272 }
1273 } else {
1274 orig_slot -= btrfs_header_nritems(left);
1275 path->slots[level] = orig_slot;
1276 }
1277 }
1278 /* double check we haven't messed things up */
1279 if (orig_ptr !=
1280 btrfs_node_blockptr(path->nodes[level], path->slots[level]))
1281 BUG();
1282 out:
1283 if (right) {
1284 btrfs_tree_unlock(right);
1285 free_extent_buffer(right);
1286 }
1287 if (left) {
1288 if (path->nodes[level] != left)
1289 btrfs_tree_unlock(left);
1290 free_extent_buffer(left);
1291 }
1292 return ret;
1293 }
1294
1295 /* Node balancing for insertion. Here we only split or push nodes around
1296 * when they are completely full. This is also done top down, so we
1297 * have to be pessimistic.
1298 */
push_nodes_for_insert(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,int level)1299 static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
1300 struct btrfs_root *root,
1301 struct btrfs_path *path, int level)
1302 {
1303 struct btrfs_fs_info *fs_info = root->fs_info;
1304 struct extent_buffer *right = NULL;
1305 struct extent_buffer *mid;
1306 struct extent_buffer *left = NULL;
1307 struct extent_buffer *parent = NULL;
1308 int ret = 0;
1309 int wret;
1310 int pslot;
1311 int orig_slot = path->slots[level];
1312
1313 if (level == 0)
1314 return 1;
1315
1316 mid = path->nodes[level];
1317 WARN_ON(btrfs_header_generation(mid) != trans->transid);
1318
1319 if (level < BTRFS_MAX_LEVEL - 1) {
1320 parent = path->nodes[level + 1];
1321 pslot = path->slots[level + 1];
1322 }
1323
1324 if (!parent)
1325 return 1;
1326
1327 /* first, try to make some room in the middle buffer */
1328 if (pslot) {
1329 u32 left_nr;
1330
1331 left = btrfs_read_node_slot(parent, pslot - 1);
1332 if (IS_ERR(left))
1333 return PTR_ERR(left);
1334
1335 __btrfs_tree_lock(left, BTRFS_NESTING_LEFT);
1336
1337 left_nr = btrfs_header_nritems(left);
1338 if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) {
1339 wret = 1;
1340 } else {
1341 ret = btrfs_cow_block(trans, root, left, parent,
1342 pslot - 1, &left,
1343 BTRFS_NESTING_LEFT_COW);
1344 if (ret)
1345 wret = 1;
1346 else {
1347 wret = push_node_left(trans, left, mid, 0);
1348 }
1349 }
1350 if (wret < 0)
1351 ret = wret;
1352 if (wret == 0) {
1353 struct btrfs_disk_key disk_key;
1354 orig_slot += left_nr;
1355 btrfs_node_key(mid, &disk_key, 0);
1356 ret = btrfs_tree_mod_log_insert_key(parent, pslot,
1357 BTRFS_MOD_LOG_KEY_REPLACE);
1358 if (ret < 0) {
1359 btrfs_tree_unlock(left);
1360 free_extent_buffer(left);
1361 btrfs_abort_transaction(trans, ret);
1362 return ret;
1363 }
1364 btrfs_set_node_key(parent, &disk_key, pslot);
1365 btrfs_mark_buffer_dirty(trans, parent);
1366 if (btrfs_header_nritems(left) > orig_slot) {
1367 path->nodes[level] = left;
1368 path->slots[level + 1] -= 1;
1369 path->slots[level] = orig_slot;
1370 btrfs_tree_unlock(mid);
1371 free_extent_buffer(mid);
1372 } else {
1373 orig_slot -=
1374 btrfs_header_nritems(left);
1375 path->slots[level] = orig_slot;
1376 btrfs_tree_unlock(left);
1377 free_extent_buffer(left);
1378 }
1379 return 0;
1380 }
1381 btrfs_tree_unlock(left);
1382 free_extent_buffer(left);
1383 }
1384
1385 /*
1386 * then try to empty the right most buffer into the middle
1387 */
1388 if (pslot + 1 < btrfs_header_nritems(parent)) {
1389 u32 right_nr;
1390
1391 right = btrfs_read_node_slot(parent, pslot + 1);
1392 if (IS_ERR(right))
1393 return PTR_ERR(right);
1394
1395 __btrfs_tree_lock(right, BTRFS_NESTING_RIGHT);
1396
1397 right_nr = btrfs_header_nritems(right);
1398 if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) {
1399 wret = 1;
1400 } else {
1401 ret = btrfs_cow_block(trans, root, right,
1402 parent, pslot + 1,
1403 &right, BTRFS_NESTING_RIGHT_COW);
1404 if (ret)
1405 wret = 1;
1406 else {
1407 wret = balance_node_right(trans, right, mid);
1408 }
1409 }
1410 if (wret < 0)
1411 ret = wret;
1412 if (wret == 0) {
1413 struct btrfs_disk_key disk_key;
1414
1415 btrfs_node_key(right, &disk_key, 0);
1416 ret = btrfs_tree_mod_log_insert_key(parent, pslot + 1,
1417 BTRFS_MOD_LOG_KEY_REPLACE);
1418 if (ret < 0) {
1419 btrfs_tree_unlock(right);
1420 free_extent_buffer(right);
1421 btrfs_abort_transaction(trans, ret);
1422 return ret;
1423 }
1424 btrfs_set_node_key(parent, &disk_key, pslot + 1);
1425 btrfs_mark_buffer_dirty(trans, parent);
1426
1427 if (btrfs_header_nritems(mid) <= orig_slot) {
1428 path->nodes[level] = right;
1429 path->slots[level + 1] += 1;
1430 path->slots[level] = orig_slot -
1431 btrfs_header_nritems(mid);
1432 btrfs_tree_unlock(mid);
1433 free_extent_buffer(mid);
1434 } else {
1435 btrfs_tree_unlock(right);
1436 free_extent_buffer(right);
1437 }
1438 return 0;
1439 }
1440 btrfs_tree_unlock(right);
1441 free_extent_buffer(right);
1442 }
1443 return 1;
1444 }
1445
1446 /*
1447 * readahead one full node of leaves, finding things that are close
1448 * to the block in 'slot', and triggering ra on them.
1449 */
reada_for_search(struct btrfs_fs_info * fs_info,struct btrfs_path * path,int level,int slot,u64 objectid)1450 static void reada_for_search(struct btrfs_fs_info *fs_info,
1451 struct btrfs_path *path,
1452 int level, int slot, u64 objectid)
1453 {
1454 struct extent_buffer *node;
1455 struct btrfs_disk_key disk_key;
1456 u32 nritems;
1457 u64 search;
1458 u64 target;
1459 u64 nread = 0;
1460 u64 nread_max;
1461 u32 nr;
1462 u32 blocksize;
1463 u32 nscan = 0;
1464
1465 if (level != 1 && path->reada != READA_FORWARD_ALWAYS)
1466 return;
1467
1468 if (!path->nodes[level])
1469 return;
1470
1471 node = path->nodes[level];
1472
1473 /*
1474 * Since the time between visiting leaves is much shorter than the time
1475 * between visiting nodes, limit read ahead of nodes to 1, to avoid too
1476 * much IO at once (possibly random).
1477 */
1478 if (path->reada == READA_FORWARD_ALWAYS) {
1479 if (level > 1)
1480 nread_max = node->fs_info->nodesize;
1481 else
1482 nread_max = SZ_128K;
1483 } else {
1484 nread_max = SZ_64K;
1485 }
1486
1487 search = btrfs_node_blockptr(node, slot);
1488 blocksize = fs_info->nodesize;
1489 if (path->reada != READA_FORWARD_ALWAYS) {
1490 struct extent_buffer *eb;
1491
1492 eb = find_extent_buffer(fs_info, search);
1493 if (eb) {
1494 free_extent_buffer(eb);
1495 return;
1496 }
1497 }
1498
1499 target = search;
1500
1501 nritems = btrfs_header_nritems(node);
1502 nr = slot;
1503
1504 while (1) {
1505 if (path->reada == READA_BACK) {
1506 if (nr == 0)
1507 break;
1508 nr--;
1509 } else if (path->reada == READA_FORWARD ||
1510 path->reada == READA_FORWARD_ALWAYS) {
1511 nr++;
1512 if (nr >= nritems)
1513 break;
1514 }
1515 if (path->reada == READA_BACK && objectid) {
1516 btrfs_node_key(node, &disk_key, nr);
1517 if (btrfs_disk_key_objectid(&disk_key) != objectid)
1518 break;
1519 }
1520 search = btrfs_node_blockptr(node, nr);
1521 if (path->reada == READA_FORWARD_ALWAYS ||
1522 (search <= target && target - search <= 65536) ||
1523 (search > target && search - target <= 65536)) {
1524 btrfs_readahead_node_child(node, nr);
1525 nread += blocksize;
1526 }
1527 nscan++;
1528 if (nread > nread_max || nscan > 32)
1529 break;
1530 }
1531 }
1532
reada_for_balance(struct btrfs_path * path,int level)1533 static noinline void reada_for_balance(struct btrfs_path *path, int level)
1534 {
1535 struct extent_buffer *parent;
1536 int slot;
1537 int nritems;
1538
1539 parent = path->nodes[level + 1];
1540 if (!parent)
1541 return;
1542
1543 nritems = btrfs_header_nritems(parent);
1544 slot = path->slots[level + 1];
1545
1546 if (slot > 0)
1547 btrfs_readahead_node_child(parent, slot - 1);
1548 if (slot + 1 < nritems)
1549 btrfs_readahead_node_child(parent, slot + 1);
1550 }
1551
1552
1553 /*
1554 * when we walk down the tree, it is usually safe to unlock the higher layers
1555 * in the tree. The exceptions are when our path goes through slot 0, because
1556 * operations on the tree might require changing key pointers higher up in the
1557 * tree.
1558 *
1559 * callers might also have set path->keep_locks, which tells this code to keep
1560 * the lock if the path points to the last slot in the block. This is part of
1561 * walking through the tree, and selecting the next slot in the higher block.
1562 *
1563 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so
1564 * if lowest_unlock is 1, level 0 won't be unlocked
1565 */
unlock_up(struct btrfs_path * path,int level,int lowest_unlock,int min_write_lock_level,int * write_lock_level)1566 static noinline void unlock_up(struct btrfs_path *path, int level,
1567 int lowest_unlock, int min_write_lock_level,
1568 int *write_lock_level)
1569 {
1570 int i;
1571 int skip_level = level;
1572 bool check_skip = true;
1573
1574 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
1575 if (!path->nodes[i])
1576 break;
1577 if (!path->locks[i])
1578 break;
1579
1580 if (check_skip) {
1581 if (path->slots[i] == 0) {
1582 skip_level = i + 1;
1583 continue;
1584 }
1585
1586 if (path->keep_locks) {
1587 u32 nritems;
1588
1589 nritems = btrfs_header_nritems(path->nodes[i]);
1590 if (nritems < 1 || path->slots[i] >= nritems - 1) {
1591 skip_level = i + 1;
1592 continue;
1593 }
1594 }
1595 }
1596
1597 if (i >= lowest_unlock && i > skip_level) {
1598 check_skip = false;
1599 btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]);
1600 path->locks[i] = 0;
1601 if (write_lock_level &&
1602 i > min_write_lock_level &&
1603 i <= *write_lock_level) {
1604 *write_lock_level = i - 1;
1605 }
1606 }
1607 }
1608 }
1609
1610 /*
1611 * Helper function for btrfs_search_slot() and other functions that do a search
1612 * on a btree. The goal is to find a tree block in the cache (the radix tree at
1613 * fs_info->buffer_radix), but if we can't find it, or it's not up to date, read
1614 * its pages from disk.
1615 *
1616 * Returns -EAGAIN, with the path unlocked, if the caller needs to repeat the
1617 * whole btree search, starting again from the current root node.
1618 */
1619 static int
read_block_for_search(struct btrfs_root * root,struct btrfs_path * p,struct extent_buffer ** eb_ret,int level,int slot,const struct btrfs_key * key)1620 read_block_for_search(struct btrfs_root *root, struct btrfs_path *p,
1621 struct extent_buffer **eb_ret, int level, int slot,
1622 const struct btrfs_key *key)
1623 {
1624 struct btrfs_fs_info *fs_info = root->fs_info;
1625 struct btrfs_tree_parent_check check = { 0 };
1626 u64 blocknr;
1627 u64 gen;
1628 struct extent_buffer *tmp;
1629 int ret;
1630 int parent_level;
1631 bool unlock_up;
1632
1633 unlock_up = ((level + 1 < BTRFS_MAX_LEVEL) && p->locks[level + 1]);
1634 blocknr = btrfs_node_blockptr(*eb_ret, slot);
1635 gen = btrfs_node_ptr_generation(*eb_ret, slot);
1636 parent_level = btrfs_header_level(*eb_ret);
1637 btrfs_node_key_to_cpu(*eb_ret, &check.first_key, slot);
1638 check.has_first_key = true;
1639 check.level = parent_level - 1;
1640 check.transid = gen;
1641 check.owner_root = root->root_key.objectid;
1642
1643 /*
1644 * If we need to read an extent buffer from disk and we are holding locks
1645 * on upper level nodes, we unlock all the upper nodes before reading the
1646 * extent buffer, and then return -EAGAIN to the caller as it needs to
1647 * restart the search. We don't release the lock on the current level
1648 * because we need to walk this node to figure out which blocks to read.
1649 */
1650 tmp = find_extent_buffer(fs_info, blocknr);
1651 if (tmp) {
1652 if (p->reada == READA_FORWARD_ALWAYS)
1653 reada_for_search(fs_info, p, level, slot, key->objectid);
1654
1655 /* first we do an atomic uptodate check */
1656 if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
1657 /*
1658 * Do extra check for first_key, eb can be stale due to
1659 * being cached, read from scrub, or have multiple
1660 * parents (shared tree blocks).
1661 */
1662 if (btrfs_verify_level_key(tmp,
1663 parent_level - 1, &check.first_key, gen)) {
1664 free_extent_buffer(tmp);
1665 return -EUCLEAN;
1666 }
1667 *eb_ret = tmp;
1668 return 0;
1669 }
1670
1671 if (p->nowait) {
1672 free_extent_buffer(tmp);
1673 return -EAGAIN;
1674 }
1675
1676 if (unlock_up)
1677 btrfs_unlock_up_safe(p, level + 1);
1678
1679 /* now we're allowed to do a blocking uptodate check */
1680 ret = btrfs_read_extent_buffer(tmp, &check);
1681 if (ret) {
1682 free_extent_buffer(tmp);
1683 btrfs_release_path(p);
1684 return -EIO;
1685 }
1686 if (btrfs_check_eb_owner(tmp, root->root_key.objectid)) {
1687 free_extent_buffer(tmp);
1688 btrfs_release_path(p);
1689 return -EUCLEAN;
1690 }
1691
1692 if (unlock_up)
1693 ret = -EAGAIN;
1694
1695 goto out;
1696 } else if (p->nowait) {
1697 return -EAGAIN;
1698 }
1699
1700 if (unlock_up) {
1701 btrfs_unlock_up_safe(p, level + 1);
1702 ret = -EAGAIN;
1703 } else {
1704 ret = 0;
1705 }
1706
1707 if (p->reada != READA_NONE)
1708 reada_for_search(fs_info, p, level, slot, key->objectid);
1709
1710 tmp = read_tree_block(fs_info, blocknr, &check);
1711 if (IS_ERR(tmp)) {
1712 btrfs_release_path(p);
1713 return PTR_ERR(tmp);
1714 }
1715 /*
1716 * If the read above didn't mark this buffer up to date,
1717 * it will never end up being up to date. Set ret to EIO now
1718 * and give up so that our caller doesn't loop forever
1719 * on our EAGAINs.
1720 */
1721 if (!extent_buffer_uptodate(tmp))
1722 ret = -EIO;
1723
1724 out:
1725 if (ret == 0) {
1726 *eb_ret = tmp;
1727 } else {
1728 free_extent_buffer(tmp);
1729 btrfs_release_path(p);
1730 }
1731
1732 return ret;
1733 }
1734
1735 /*
1736 * helper function for btrfs_search_slot. This does all of the checks
1737 * for node-level blocks and does any balancing required based on
1738 * the ins_len.
1739 *
1740 * If no extra work was required, zero is returned. If we had to
1741 * drop the path, -EAGAIN is returned and btrfs_search_slot must
1742 * start over
1743 */
1744 static int
setup_nodes_for_search(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * p,struct extent_buffer * b,int level,int ins_len,int * write_lock_level)1745 setup_nodes_for_search(struct btrfs_trans_handle *trans,
1746 struct btrfs_root *root, struct btrfs_path *p,
1747 struct extent_buffer *b, int level, int ins_len,
1748 int *write_lock_level)
1749 {
1750 struct btrfs_fs_info *fs_info = root->fs_info;
1751 int ret = 0;
1752
1753 if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >=
1754 BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3) {
1755
1756 if (*write_lock_level < level + 1) {
1757 *write_lock_level = level + 1;
1758 btrfs_release_path(p);
1759 return -EAGAIN;
1760 }
1761
1762 reada_for_balance(p, level);
1763 ret = split_node(trans, root, p, level);
1764
1765 b = p->nodes[level];
1766 } else if (ins_len < 0 && btrfs_header_nritems(b) <
1767 BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 2) {
1768
1769 if (*write_lock_level < level + 1) {
1770 *write_lock_level = level + 1;
1771 btrfs_release_path(p);
1772 return -EAGAIN;
1773 }
1774
1775 reada_for_balance(p, level);
1776 ret = balance_level(trans, root, p, level);
1777 if (ret)
1778 return ret;
1779
1780 b = p->nodes[level];
1781 if (!b) {
1782 btrfs_release_path(p);
1783 return -EAGAIN;
1784 }
1785 BUG_ON(btrfs_header_nritems(b) == 1);
1786 }
1787 return ret;
1788 }
1789
btrfs_find_item(struct btrfs_root * fs_root,struct btrfs_path * path,u64 iobjectid,u64 ioff,u8 key_type,struct btrfs_key * found_key)1790 int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path,
1791 u64 iobjectid, u64 ioff, u8 key_type,
1792 struct btrfs_key *found_key)
1793 {
1794 int ret;
1795 struct btrfs_key key;
1796 struct extent_buffer *eb;
1797
1798 ASSERT(path);
1799 ASSERT(found_key);
1800
1801 key.type = key_type;
1802 key.objectid = iobjectid;
1803 key.offset = ioff;
1804
1805 ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0);
1806 if (ret < 0)
1807 return ret;
1808
1809 eb = path->nodes[0];
1810 if (ret && path->slots[0] >= btrfs_header_nritems(eb)) {
1811 ret = btrfs_next_leaf(fs_root, path);
1812 if (ret)
1813 return ret;
1814 eb = path->nodes[0];
1815 }
1816
1817 btrfs_item_key_to_cpu(eb, found_key, path->slots[0]);
1818 if (found_key->type != key.type ||
1819 found_key->objectid != key.objectid)
1820 return 1;
1821
1822 return 0;
1823 }
1824
btrfs_search_slot_get_root(struct btrfs_root * root,struct btrfs_path * p,int write_lock_level)1825 static struct extent_buffer *btrfs_search_slot_get_root(struct btrfs_root *root,
1826 struct btrfs_path *p,
1827 int write_lock_level)
1828 {
1829 struct extent_buffer *b;
1830 int root_lock = 0;
1831 int level = 0;
1832
1833 if (p->search_commit_root) {
1834 b = root->commit_root;
1835 atomic_inc(&b->refs);
1836 level = btrfs_header_level(b);
1837 /*
1838 * Ensure that all callers have set skip_locking when
1839 * p->search_commit_root = 1.
1840 */
1841 ASSERT(p->skip_locking == 1);
1842
1843 goto out;
1844 }
1845
1846 if (p->skip_locking) {
1847 b = btrfs_root_node(root);
1848 level = btrfs_header_level(b);
1849 goto out;
1850 }
1851
1852 /* We try very hard to do read locks on the root */
1853 root_lock = BTRFS_READ_LOCK;
1854
1855 /*
1856 * If the level is set to maximum, we can skip trying to get the read
1857 * lock.
1858 */
1859 if (write_lock_level < BTRFS_MAX_LEVEL) {
1860 /*
1861 * We don't know the level of the root node until we actually
1862 * have it read locked
1863 */
1864 if (p->nowait) {
1865 b = btrfs_try_read_lock_root_node(root);
1866 if (IS_ERR(b))
1867 return b;
1868 } else {
1869 b = btrfs_read_lock_root_node(root);
1870 }
1871 level = btrfs_header_level(b);
1872 if (level > write_lock_level)
1873 goto out;
1874
1875 /* Whoops, must trade for write lock */
1876 btrfs_tree_read_unlock(b);
1877 free_extent_buffer(b);
1878 }
1879
1880 b = btrfs_lock_root_node(root);
1881 root_lock = BTRFS_WRITE_LOCK;
1882
1883 /* The level might have changed, check again */
1884 level = btrfs_header_level(b);
1885
1886 out:
1887 /*
1888 * The root may have failed to write out at some point, and thus is no
1889 * longer valid, return an error in this case.
1890 */
1891 if (!extent_buffer_uptodate(b)) {
1892 if (root_lock)
1893 btrfs_tree_unlock_rw(b, root_lock);
1894 free_extent_buffer(b);
1895 return ERR_PTR(-EIO);
1896 }
1897
1898 p->nodes[level] = b;
1899 if (!p->skip_locking)
1900 p->locks[level] = root_lock;
1901 /*
1902 * Callers are responsible for dropping b's references.
1903 */
1904 return b;
1905 }
1906
1907 /*
1908 * Replace the extent buffer at the lowest level of the path with a cloned
1909 * version. The purpose is to be able to use it safely, after releasing the
1910 * commit root semaphore, even if relocation is happening in parallel, the
1911 * transaction used for relocation is committed and the extent buffer is
1912 * reallocated in the next transaction.
1913 *
1914 * This is used in a context where the caller does not prevent transaction
1915 * commits from happening, either by holding a transaction handle or holding
1916 * some lock, while it's doing searches through a commit root.
1917 * At the moment it's only used for send operations.
1918 */
finish_need_commit_sem_search(struct btrfs_path * path)1919 static int finish_need_commit_sem_search(struct btrfs_path *path)
1920 {
1921 const int i = path->lowest_level;
1922 const int slot = path->slots[i];
1923 struct extent_buffer *lowest = path->nodes[i];
1924 struct extent_buffer *clone;
1925
1926 ASSERT(path->need_commit_sem);
1927
1928 if (!lowest)
1929 return 0;
1930
1931 lockdep_assert_held_read(&lowest->fs_info->commit_root_sem);
1932
1933 clone = btrfs_clone_extent_buffer(lowest);
1934 if (!clone)
1935 return -ENOMEM;
1936
1937 btrfs_release_path(path);
1938 path->nodes[i] = clone;
1939 path->slots[i] = slot;
1940
1941 return 0;
1942 }
1943
search_for_key_slot(struct extent_buffer * eb,int search_low_slot,const struct btrfs_key * key,int prev_cmp,int * slot)1944 static inline int search_for_key_slot(struct extent_buffer *eb,
1945 int search_low_slot,
1946 const struct btrfs_key *key,
1947 int prev_cmp,
1948 int *slot)
1949 {
1950 /*
1951 * If a previous call to btrfs_bin_search() on a parent node returned an
1952 * exact match (prev_cmp == 0), we can safely assume the target key will
1953 * always be at slot 0 on lower levels, since each key pointer
1954 * (struct btrfs_key_ptr) refers to the lowest key accessible from the
1955 * subtree it points to. Thus we can skip searching lower levels.
1956 */
1957 if (prev_cmp == 0) {
1958 *slot = 0;
1959 return 0;
1960 }
1961
1962 return btrfs_bin_search(eb, search_low_slot, key, slot);
1963 }
1964
search_leaf(struct btrfs_trans_handle * trans,struct btrfs_root * root,const struct btrfs_key * key,struct btrfs_path * path,int ins_len,int prev_cmp)1965 static int search_leaf(struct btrfs_trans_handle *trans,
1966 struct btrfs_root *root,
1967 const struct btrfs_key *key,
1968 struct btrfs_path *path,
1969 int ins_len,
1970 int prev_cmp)
1971 {
1972 struct extent_buffer *leaf = path->nodes[0];
1973 int leaf_free_space = -1;
1974 int search_low_slot = 0;
1975 int ret;
1976 bool do_bin_search = true;
1977
1978 /*
1979 * If we are doing an insertion, the leaf has enough free space and the
1980 * destination slot for the key is not slot 0, then we can unlock our
1981 * write lock on the parent, and any other upper nodes, before doing the
1982 * binary search on the leaf (with search_for_key_slot()), allowing other
1983 * tasks to lock the parent and any other upper nodes.
1984 */
1985 if (ins_len > 0) {
1986 /*
1987 * Cache the leaf free space, since we will need it later and it
1988 * will not change until then.
1989 */
1990 leaf_free_space = btrfs_leaf_free_space(leaf);
1991
1992 /*
1993 * !path->locks[1] means we have a single node tree, the leaf is
1994 * the root of the tree.
1995 */
1996 if (path->locks[1] && leaf_free_space >= ins_len) {
1997 struct btrfs_disk_key first_key;
1998
1999 ASSERT(btrfs_header_nritems(leaf) > 0);
2000 btrfs_item_key(leaf, &first_key, 0);
2001
2002 /*
2003 * Doing the extra comparison with the first key is cheap,
2004 * taking into account that the first key is very likely
2005 * already in a cache line because it immediately follows
2006 * the extent buffer's header and we have recently accessed
2007 * the header's level field.
2008 */
2009 ret = comp_keys(&first_key, key);
2010 if (ret < 0) {
2011 /*
2012 * The first key is smaller than the key we want
2013 * to insert, so we are safe to unlock all upper
2014 * nodes and we have to do the binary search.
2015 *
2016 * We do use btrfs_unlock_up_safe() and not
2017 * unlock_up() because the later does not unlock
2018 * nodes with a slot of 0 - we can safely unlock
2019 * any node even if its slot is 0 since in this
2020 * case the key does not end up at slot 0 of the
2021 * leaf and there's no need to split the leaf.
2022 */
2023 btrfs_unlock_up_safe(path, 1);
2024 search_low_slot = 1;
2025 } else {
2026 /*
2027 * The first key is >= then the key we want to
2028 * insert, so we can skip the binary search as
2029 * the target key will be at slot 0.
2030 *
2031 * We can not unlock upper nodes when the key is
2032 * less than the first key, because we will need
2033 * to update the key at slot 0 of the parent node
2034 * and possibly of other upper nodes too.
2035 * If the key matches the first key, then we can
2036 * unlock all the upper nodes, using
2037 * btrfs_unlock_up_safe() instead of unlock_up()
2038 * as stated above.
2039 */
2040 if (ret == 0)
2041 btrfs_unlock_up_safe(path, 1);
2042 /*
2043 * ret is already 0 or 1, matching the result of
2044 * a btrfs_bin_search() call, so there is no need
2045 * to adjust it.
2046 */
2047 do_bin_search = false;
2048 path->slots[0] = 0;
2049 }
2050 }
2051 }
2052
2053 if (do_bin_search) {
2054 ret = search_for_key_slot(leaf, search_low_slot, key,
2055 prev_cmp, &path->slots[0]);
2056 if (ret < 0)
2057 return ret;
2058 }
2059
2060 if (ins_len > 0) {
2061 /*
2062 * Item key already exists. In this case, if we are allowed to
2063 * insert the item (for example, in dir_item case, item key
2064 * collision is allowed), it will be merged with the original
2065 * item. Only the item size grows, no new btrfs item will be
2066 * added. If search_for_extension is not set, ins_len already
2067 * accounts the size btrfs_item, deduct it here so leaf space
2068 * check will be correct.
2069 */
2070 if (ret == 0 && !path->search_for_extension) {
2071 ASSERT(ins_len >= sizeof(struct btrfs_item));
2072 ins_len -= sizeof(struct btrfs_item);
2073 }
2074
2075 ASSERT(leaf_free_space >= 0);
2076
2077 if (leaf_free_space < ins_len) {
2078 int err;
2079
2080 err = split_leaf(trans, root, key, path, ins_len,
2081 (ret == 0));
2082 ASSERT(err <= 0);
2083 if (WARN_ON(err > 0))
2084 err = -EUCLEAN;
2085 if (err)
2086 ret = err;
2087 }
2088 }
2089
2090 return ret;
2091 }
2092
2093 /*
2094 * btrfs_search_slot - look for a key in a tree and perform necessary
2095 * modifications to preserve tree invariants.
2096 *
2097 * @trans: Handle of transaction, used when modifying the tree
2098 * @p: Holds all btree nodes along the search path
2099 * @root: The root node of the tree
2100 * @key: The key we are looking for
2101 * @ins_len: Indicates purpose of search:
2102 * >0 for inserts it's size of item inserted (*)
2103 * <0 for deletions
2104 * 0 for plain searches, not modifying the tree
2105 *
2106 * (*) If size of item inserted doesn't include
2107 * sizeof(struct btrfs_item), then p->search_for_extension must
2108 * be set.
2109 * @cow: boolean should CoW operations be performed. Must always be 1
2110 * when modifying the tree.
2111 *
2112 * If @ins_len > 0, nodes and leaves will be split as we walk down the tree.
2113 * If @ins_len < 0, nodes will be merged as we walk down the tree (if possible)
2114 *
2115 * If @key is found, 0 is returned and you can find the item in the leaf level
2116 * of the path (level 0)
2117 *
2118 * If @key isn't found, 1 is returned and the leaf level of the path (level 0)
2119 * points to the slot where it should be inserted
2120 *
2121 * If an error is encountered while searching the tree a negative error number
2122 * is returned
2123 */
btrfs_search_slot(struct btrfs_trans_handle * trans,struct btrfs_root * root,const struct btrfs_key * key,struct btrfs_path * p,int ins_len,int cow)2124 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2125 const struct btrfs_key *key, struct btrfs_path *p,
2126 int ins_len, int cow)
2127 {
2128 struct btrfs_fs_info *fs_info = root->fs_info;
2129 struct extent_buffer *b;
2130 int slot;
2131 int ret;
2132 int err;
2133 int level;
2134 int lowest_unlock = 1;
2135 /* everything at write_lock_level or lower must be write locked */
2136 int write_lock_level = 0;
2137 u8 lowest_level = 0;
2138 int min_write_lock_level;
2139 int prev_cmp;
2140
2141 might_sleep();
2142
2143 lowest_level = p->lowest_level;
2144 WARN_ON(lowest_level && ins_len > 0);
2145 WARN_ON(p->nodes[0] != NULL);
2146 BUG_ON(!cow && ins_len);
2147
2148 /*
2149 * For now only allow nowait for read only operations. There's no
2150 * strict reason why we can't, we just only need it for reads so it's
2151 * only implemented for reads.
2152 */
2153 ASSERT(!p->nowait || !cow);
2154
2155 if (ins_len < 0) {
2156 lowest_unlock = 2;
2157
2158 /* when we are removing items, we might have to go up to level
2159 * two as we update tree pointers Make sure we keep write
2160 * for those levels as well
2161 */
2162 write_lock_level = 2;
2163 } else if (ins_len > 0) {
2164 /*
2165 * for inserting items, make sure we have a write lock on
2166 * level 1 so we can update keys
2167 */
2168 write_lock_level = 1;
2169 }
2170
2171 if (!cow)
2172 write_lock_level = -1;
2173
2174 if (cow && (p->keep_locks || p->lowest_level))
2175 write_lock_level = BTRFS_MAX_LEVEL;
2176
2177 min_write_lock_level = write_lock_level;
2178
2179 if (p->need_commit_sem) {
2180 ASSERT(p->search_commit_root);
2181 if (p->nowait) {
2182 if (!down_read_trylock(&fs_info->commit_root_sem))
2183 return -EAGAIN;
2184 } else {
2185 down_read(&fs_info->commit_root_sem);
2186 }
2187 }
2188
2189 again:
2190 prev_cmp = -1;
2191 b = btrfs_search_slot_get_root(root, p, write_lock_level);
2192 if (IS_ERR(b)) {
2193 ret = PTR_ERR(b);
2194 goto done;
2195 }
2196
2197 while (b) {
2198 int dec = 0;
2199
2200 level = btrfs_header_level(b);
2201
2202 if (cow) {
2203 bool last_level = (level == (BTRFS_MAX_LEVEL - 1));
2204
2205 /*
2206 * if we don't really need to cow this block
2207 * then we don't want to set the path blocking,
2208 * so we test it here
2209 */
2210 if (!should_cow_block(trans, root, b))
2211 goto cow_done;
2212
2213 /*
2214 * must have write locks on this node and the
2215 * parent
2216 */
2217 if (level > write_lock_level ||
2218 (level + 1 > write_lock_level &&
2219 level + 1 < BTRFS_MAX_LEVEL &&
2220 p->nodes[level + 1])) {
2221 write_lock_level = level + 1;
2222 btrfs_release_path(p);
2223 goto again;
2224 }
2225
2226 if (last_level)
2227 err = btrfs_cow_block(trans, root, b, NULL, 0,
2228 &b,
2229 BTRFS_NESTING_COW);
2230 else
2231 err = btrfs_cow_block(trans, root, b,
2232 p->nodes[level + 1],
2233 p->slots[level + 1], &b,
2234 BTRFS_NESTING_COW);
2235 if (err) {
2236 ret = err;
2237 goto done;
2238 }
2239 }
2240 cow_done:
2241 p->nodes[level] = b;
2242
2243 /*
2244 * we have a lock on b and as long as we aren't changing
2245 * the tree, there is no way to for the items in b to change.
2246 * It is safe to drop the lock on our parent before we
2247 * go through the expensive btree search on b.
2248 *
2249 * If we're inserting or deleting (ins_len != 0), then we might
2250 * be changing slot zero, which may require changing the parent.
2251 * So, we can't drop the lock until after we know which slot
2252 * we're operating on.
2253 */
2254 if (!ins_len && !p->keep_locks) {
2255 int u = level + 1;
2256
2257 if (u < BTRFS_MAX_LEVEL && p->locks[u]) {
2258 btrfs_tree_unlock_rw(p->nodes[u], p->locks[u]);
2259 p->locks[u] = 0;
2260 }
2261 }
2262
2263 if (level == 0) {
2264 if (ins_len > 0)
2265 ASSERT(write_lock_level >= 1);
2266
2267 ret = search_leaf(trans, root, key, p, ins_len, prev_cmp);
2268 if (!p->search_for_split)
2269 unlock_up(p, level, lowest_unlock,
2270 min_write_lock_level, NULL);
2271 goto done;
2272 }
2273
2274 ret = search_for_key_slot(b, 0, key, prev_cmp, &slot);
2275 if (ret < 0)
2276 goto done;
2277 prev_cmp = ret;
2278
2279 if (ret && slot > 0) {
2280 dec = 1;
2281 slot--;
2282 }
2283 p->slots[level] = slot;
2284 err = setup_nodes_for_search(trans, root, p, b, level, ins_len,
2285 &write_lock_level);
2286 if (err == -EAGAIN)
2287 goto again;
2288 if (err) {
2289 ret = err;
2290 goto done;
2291 }
2292 b = p->nodes[level];
2293 slot = p->slots[level];
2294
2295 /*
2296 * Slot 0 is special, if we change the key we have to update
2297 * the parent pointer which means we must have a write lock on
2298 * the parent
2299 */
2300 if (slot == 0 && ins_len && write_lock_level < level + 1) {
2301 write_lock_level = level + 1;
2302 btrfs_release_path(p);
2303 goto again;
2304 }
2305
2306 unlock_up(p, level, lowest_unlock, min_write_lock_level,
2307 &write_lock_level);
2308
2309 if (level == lowest_level) {
2310 if (dec)
2311 p->slots[level]++;
2312 goto done;
2313 }
2314
2315 err = read_block_for_search(root, p, &b, level, slot, key);
2316 if (err == -EAGAIN)
2317 goto again;
2318 if (err) {
2319 ret = err;
2320 goto done;
2321 }
2322
2323 if (!p->skip_locking) {
2324 level = btrfs_header_level(b);
2325
2326 btrfs_maybe_reset_lockdep_class(root, b);
2327
2328 if (level <= write_lock_level) {
2329 btrfs_tree_lock(b);
2330 p->locks[level] = BTRFS_WRITE_LOCK;
2331 } else {
2332 if (p->nowait) {
2333 if (!btrfs_try_tree_read_lock(b)) {
2334 free_extent_buffer(b);
2335 ret = -EAGAIN;
2336 goto done;
2337 }
2338 } else {
2339 btrfs_tree_read_lock(b);
2340 }
2341 p->locks[level] = BTRFS_READ_LOCK;
2342 }
2343 p->nodes[level] = b;
2344 }
2345 }
2346 ret = 1;
2347 done:
2348 if (ret < 0 && !p->skip_release_on_error)
2349 btrfs_release_path(p);
2350
2351 if (p->need_commit_sem) {
2352 int ret2;
2353
2354 ret2 = finish_need_commit_sem_search(p);
2355 up_read(&fs_info->commit_root_sem);
2356 if (ret2)
2357 ret = ret2;
2358 }
2359
2360 return ret;
2361 }
2362 ALLOW_ERROR_INJECTION(btrfs_search_slot, ERRNO);
2363
2364 /*
2365 * Like btrfs_search_slot, this looks for a key in the given tree. It uses the
2366 * current state of the tree together with the operations recorded in the tree
2367 * modification log to search for the key in a previous version of this tree, as
2368 * denoted by the time_seq parameter.
2369 *
2370 * Naturally, there is no support for insert, delete or cow operations.
2371 *
2372 * The resulting path and return value will be set up as if we called
2373 * btrfs_search_slot at that point in time with ins_len and cow both set to 0.
2374 */
btrfs_search_old_slot(struct btrfs_root * root,const struct btrfs_key * key,struct btrfs_path * p,u64 time_seq)2375 int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key,
2376 struct btrfs_path *p, u64 time_seq)
2377 {
2378 struct btrfs_fs_info *fs_info = root->fs_info;
2379 struct extent_buffer *b;
2380 int slot;
2381 int ret;
2382 int err;
2383 int level;
2384 int lowest_unlock = 1;
2385 u8 lowest_level = 0;
2386
2387 lowest_level = p->lowest_level;
2388 WARN_ON(p->nodes[0] != NULL);
2389 ASSERT(!p->nowait);
2390
2391 if (p->search_commit_root) {
2392 BUG_ON(time_seq);
2393 return btrfs_search_slot(NULL, root, key, p, 0, 0);
2394 }
2395
2396 again:
2397 b = btrfs_get_old_root(root, time_seq);
2398 if (!b) {
2399 ret = -EIO;
2400 goto done;
2401 }
2402 level = btrfs_header_level(b);
2403 p->locks[level] = BTRFS_READ_LOCK;
2404
2405 while (b) {
2406 int dec = 0;
2407
2408 level = btrfs_header_level(b);
2409 p->nodes[level] = b;
2410
2411 /*
2412 * we have a lock on b and as long as we aren't changing
2413 * the tree, there is no way to for the items in b to change.
2414 * It is safe to drop the lock on our parent before we
2415 * go through the expensive btree search on b.
2416 */
2417 btrfs_unlock_up_safe(p, level + 1);
2418
2419 ret = btrfs_bin_search(b, 0, key, &slot);
2420 if (ret < 0)
2421 goto done;
2422
2423 if (level == 0) {
2424 p->slots[level] = slot;
2425 unlock_up(p, level, lowest_unlock, 0, NULL);
2426 goto done;
2427 }
2428
2429 if (ret && slot > 0) {
2430 dec = 1;
2431 slot--;
2432 }
2433 p->slots[level] = slot;
2434 unlock_up(p, level, lowest_unlock, 0, NULL);
2435
2436 if (level == lowest_level) {
2437 if (dec)
2438 p->slots[level]++;
2439 goto done;
2440 }
2441
2442 err = read_block_for_search(root, p, &b, level, slot, key);
2443 if (err == -EAGAIN)
2444 goto again;
2445 if (err) {
2446 ret = err;
2447 goto done;
2448 }
2449
2450 level = btrfs_header_level(b);
2451 btrfs_tree_read_lock(b);
2452 b = btrfs_tree_mod_log_rewind(fs_info, p, b, time_seq);
2453 if (!b) {
2454 ret = -ENOMEM;
2455 goto done;
2456 }
2457 p->locks[level] = BTRFS_READ_LOCK;
2458 p->nodes[level] = b;
2459 }
2460 ret = 1;
2461 done:
2462 if (ret < 0)
2463 btrfs_release_path(p);
2464
2465 return ret;
2466 }
2467
2468 /*
2469 * Search the tree again to find a leaf with smaller keys.
2470 * Returns 0 if it found something.
2471 * Returns 1 if there are no smaller keys.
2472 * Returns < 0 on error.
2473 *
2474 * This may release the path, and so you may lose any locks held at the
2475 * time you call it.
2476 */
btrfs_prev_leaf(struct btrfs_root * root,struct btrfs_path * path)2477 static int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
2478 {
2479 struct btrfs_key key;
2480 struct btrfs_key orig_key;
2481 struct btrfs_disk_key found_key;
2482 int ret;
2483
2484 btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
2485 orig_key = key;
2486
2487 if (key.offset > 0) {
2488 key.offset--;
2489 } else if (key.type > 0) {
2490 key.type--;
2491 key.offset = (u64)-1;
2492 } else if (key.objectid > 0) {
2493 key.objectid--;
2494 key.type = (u8)-1;
2495 key.offset = (u64)-1;
2496 } else {
2497 return 1;
2498 }
2499
2500 btrfs_release_path(path);
2501 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2502 if (ret <= 0)
2503 return ret;
2504
2505 /*
2506 * Previous key not found. Even if we were at slot 0 of the leaf we had
2507 * before releasing the path and calling btrfs_search_slot(), we now may
2508 * be in a slot pointing to the same original key - this can happen if
2509 * after we released the path, one of more items were moved from a
2510 * sibling leaf into the front of the leaf we had due to an insertion
2511 * (see push_leaf_right()).
2512 * If we hit this case and our slot is > 0 and just decrement the slot
2513 * so that the caller does not process the same key again, which may or
2514 * may not break the caller, depending on its logic.
2515 */
2516 if (path->slots[0] < btrfs_header_nritems(path->nodes[0])) {
2517 btrfs_item_key(path->nodes[0], &found_key, path->slots[0]);
2518 ret = comp_keys(&found_key, &orig_key);
2519 if (ret == 0) {
2520 if (path->slots[0] > 0) {
2521 path->slots[0]--;
2522 return 0;
2523 }
2524 /*
2525 * At slot 0, same key as before, it means orig_key is
2526 * the lowest, leftmost, key in the tree. We're done.
2527 */
2528 return 1;
2529 }
2530 }
2531
2532 btrfs_item_key(path->nodes[0], &found_key, 0);
2533 ret = comp_keys(&found_key, &key);
2534 /*
2535 * We might have had an item with the previous key in the tree right
2536 * before we released our path. And after we released our path, that
2537 * item might have been pushed to the first slot (0) of the leaf we
2538 * were holding due to a tree balance. Alternatively, an item with the
2539 * previous key can exist as the only element of a leaf (big fat item).
2540 * Therefore account for these 2 cases, so that our callers (like
2541 * btrfs_previous_item) don't miss an existing item with a key matching
2542 * the previous key we computed above.
2543 */
2544 if (ret <= 0)
2545 return 0;
2546 return 1;
2547 }
2548
2549 /*
2550 * helper to use instead of search slot if no exact match is needed but
2551 * instead the next or previous item should be returned.
2552 * When find_higher is true, the next higher item is returned, the next lower
2553 * otherwise.
2554 * When return_any and find_higher are both true, and no higher item is found,
2555 * return the next lower instead.
2556 * When return_any is true and find_higher is false, and no lower item is found,
2557 * return the next higher instead.
2558 * It returns 0 if any item is found, 1 if none is found (tree empty), and
2559 * < 0 on error
2560 */
btrfs_search_slot_for_read(struct btrfs_root * root,const struct btrfs_key * key,struct btrfs_path * p,int find_higher,int return_any)2561 int btrfs_search_slot_for_read(struct btrfs_root *root,
2562 const struct btrfs_key *key,
2563 struct btrfs_path *p, int find_higher,
2564 int return_any)
2565 {
2566 int ret;
2567 struct extent_buffer *leaf;
2568
2569 again:
2570 ret = btrfs_search_slot(NULL, root, key, p, 0, 0);
2571 if (ret <= 0)
2572 return ret;
2573 /*
2574 * a return value of 1 means the path is at the position where the
2575 * item should be inserted. Normally this is the next bigger item,
2576 * but in case the previous item is the last in a leaf, path points
2577 * to the first free slot in the previous leaf, i.e. at an invalid
2578 * item.
2579 */
2580 leaf = p->nodes[0];
2581
2582 if (find_higher) {
2583 if (p->slots[0] >= btrfs_header_nritems(leaf)) {
2584 ret = btrfs_next_leaf(root, p);
2585 if (ret <= 0)
2586 return ret;
2587 if (!return_any)
2588 return 1;
2589 /*
2590 * no higher item found, return the next
2591 * lower instead
2592 */
2593 return_any = 0;
2594 find_higher = 0;
2595 btrfs_release_path(p);
2596 goto again;
2597 }
2598 } else {
2599 if (p->slots[0] == 0) {
2600 ret = btrfs_prev_leaf(root, p);
2601 if (ret < 0)
2602 return ret;
2603 if (!ret) {
2604 leaf = p->nodes[0];
2605 if (p->slots[0] == btrfs_header_nritems(leaf))
2606 p->slots[0]--;
2607 return 0;
2608 }
2609 if (!return_any)
2610 return 1;
2611 /*
2612 * no lower item found, return the next
2613 * higher instead
2614 */
2615 return_any = 0;
2616 find_higher = 1;
2617 btrfs_release_path(p);
2618 goto again;
2619 } else {
2620 --p->slots[0];
2621 }
2622 }
2623 return 0;
2624 }
2625
2626 /*
2627 * Execute search and call btrfs_previous_item to traverse backwards if the item
2628 * was not found.
2629 *
2630 * Return 0 if found, 1 if not found and < 0 if error.
2631 */
btrfs_search_backwards(struct btrfs_root * root,struct btrfs_key * key,struct btrfs_path * path)2632 int btrfs_search_backwards(struct btrfs_root *root, struct btrfs_key *key,
2633 struct btrfs_path *path)
2634 {
2635 int ret;
2636
2637 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
2638 if (ret > 0)
2639 ret = btrfs_previous_item(root, path, key->objectid, key->type);
2640
2641 if (ret == 0)
2642 btrfs_item_key_to_cpu(path->nodes[0], key, path->slots[0]);
2643
2644 return ret;
2645 }
2646
2647 /*
2648 * Search for a valid slot for the given path.
2649 *
2650 * @root: The root node of the tree.
2651 * @key: Will contain a valid item if found.
2652 * @path: The starting point to validate the slot.
2653 *
2654 * Return: 0 if the item is valid
2655 * 1 if not found
2656 * <0 if error.
2657 */
btrfs_get_next_valid_item(struct btrfs_root * root,struct btrfs_key * key,struct btrfs_path * path)2658 int btrfs_get_next_valid_item(struct btrfs_root *root, struct btrfs_key *key,
2659 struct btrfs_path *path)
2660 {
2661 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
2662 int ret;
2663
2664 ret = btrfs_next_leaf(root, path);
2665 if (ret)
2666 return ret;
2667 }
2668
2669 btrfs_item_key_to_cpu(path->nodes[0], key, path->slots[0]);
2670 return 0;
2671 }
2672
2673 /*
2674 * adjust the pointers going up the tree, starting at level
2675 * making sure the right key of each node is points to 'key'.
2676 * This is used after shifting pointers to the left, so it stops
2677 * fixing up pointers when a given leaf/node is not in slot 0 of the
2678 * higher levels
2679 *
2680 */
fixup_low_keys(struct btrfs_trans_handle * trans,struct btrfs_path * path,struct btrfs_disk_key * key,int level)2681 static void fixup_low_keys(struct btrfs_trans_handle *trans,
2682 struct btrfs_path *path,
2683 struct btrfs_disk_key *key, int level)
2684 {
2685 int i;
2686 struct extent_buffer *t;
2687 int ret;
2688
2689 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2690 int tslot = path->slots[i];
2691
2692 if (!path->nodes[i])
2693 break;
2694 t = path->nodes[i];
2695 ret = btrfs_tree_mod_log_insert_key(t, tslot,
2696 BTRFS_MOD_LOG_KEY_REPLACE);
2697 BUG_ON(ret < 0);
2698 btrfs_set_node_key(t, key, tslot);
2699 btrfs_mark_buffer_dirty(trans, path->nodes[i]);
2700 if (tslot != 0)
2701 break;
2702 }
2703 }
2704
2705 /*
2706 * update item key.
2707 *
2708 * This function isn't completely safe. It's the caller's responsibility
2709 * that the new key won't break the order
2710 */
btrfs_set_item_key_safe(struct btrfs_trans_handle * trans,struct btrfs_path * path,const struct btrfs_key * new_key)2711 void btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,
2712 struct btrfs_path *path,
2713 const struct btrfs_key *new_key)
2714 {
2715 struct btrfs_fs_info *fs_info = trans->fs_info;
2716 struct btrfs_disk_key disk_key;
2717 struct extent_buffer *eb;
2718 int slot;
2719
2720 eb = path->nodes[0];
2721 slot = path->slots[0];
2722 if (slot > 0) {
2723 btrfs_item_key(eb, &disk_key, slot - 1);
2724 if (unlikely(comp_keys(&disk_key, new_key) >= 0)) {
2725 btrfs_print_leaf(eb);
2726 btrfs_crit(fs_info,
2727 "slot %u key (%llu %u %llu) new key (%llu %u %llu)",
2728 slot, btrfs_disk_key_objectid(&disk_key),
2729 btrfs_disk_key_type(&disk_key),
2730 btrfs_disk_key_offset(&disk_key),
2731 new_key->objectid, new_key->type,
2732 new_key->offset);
2733 BUG();
2734 }
2735 }
2736 if (slot < btrfs_header_nritems(eb) - 1) {
2737 btrfs_item_key(eb, &disk_key, slot + 1);
2738 if (unlikely(comp_keys(&disk_key, new_key) <= 0)) {
2739 btrfs_print_leaf(eb);
2740 btrfs_crit(fs_info,
2741 "slot %u key (%llu %u %llu) new key (%llu %u %llu)",
2742 slot, btrfs_disk_key_objectid(&disk_key),
2743 btrfs_disk_key_type(&disk_key),
2744 btrfs_disk_key_offset(&disk_key),
2745 new_key->objectid, new_key->type,
2746 new_key->offset);
2747 BUG();
2748 }
2749 }
2750
2751 btrfs_cpu_key_to_disk(&disk_key, new_key);
2752 btrfs_set_item_key(eb, &disk_key, slot);
2753 btrfs_mark_buffer_dirty(trans, eb);
2754 if (slot == 0)
2755 fixup_low_keys(trans, path, &disk_key, 1);
2756 }
2757
2758 /*
2759 * Check key order of two sibling extent buffers.
2760 *
2761 * Return true if something is wrong.
2762 * Return false if everything is fine.
2763 *
2764 * Tree-checker only works inside one tree block, thus the following
2765 * corruption can not be detected by tree-checker:
2766 *
2767 * Leaf @left | Leaf @right
2768 * --------------------------------------------------------------
2769 * | 1 | 2 | 3 | 4 | 5 | f6 | | 7 | 8 |
2770 *
2771 * Key f6 in leaf @left itself is valid, but not valid when the next
2772 * key in leaf @right is 7.
2773 * This can only be checked at tree block merge time.
2774 * And since tree checker has ensured all key order in each tree block
2775 * is correct, we only need to bother the last key of @left and the first
2776 * key of @right.
2777 */
check_sibling_keys(struct extent_buffer * left,struct extent_buffer * right)2778 static bool check_sibling_keys(struct extent_buffer *left,
2779 struct extent_buffer *right)
2780 {
2781 struct btrfs_key left_last;
2782 struct btrfs_key right_first;
2783 int level = btrfs_header_level(left);
2784 int nr_left = btrfs_header_nritems(left);
2785 int nr_right = btrfs_header_nritems(right);
2786
2787 /* No key to check in one of the tree blocks */
2788 if (!nr_left || !nr_right)
2789 return false;
2790
2791 if (level) {
2792 btrfs_node_key_to_cpu(left, &left_last, nr_left - 1);
2793 btrfs_node_key_to_cpu(right, &right_first, 0);
2794 } else {
2795 btrfs_item_key_to_cpu(left, &left_last, nr_left - 1);
2796 btrfs_item_key_to_cpu(right, &right_first, 0);
2797 }
2798
2799 if (unlikely(btrfs_comp_cpu_keys(&left_last, &right_first) >= 0)) {
2800 btrfs_crit(left->fs_info, "left extent buffer:");
2801 btrfs_print_tree(left, false);
2802 btrfs_crit(left->fs_info, "right extent buffer:");
2803 btrfs_print_tree(right, false);
2804 btrfs_crit(left->fs_info,
2805 "bad key order, sibling blocks, left last (%llu %u %llu) right first (%llu %u %llu)",
2806 left_last.objectid, left_last.type,
2807 left_last.offset, right_first.objectid,
2808 right_first.type, right_first.offset);
2809 return true;
2810 }
2811 return false;
2812 }
2813
2814 /*
2815 * try to push data from one node into the next node left in the
2816 * tree.
2817 *
2818 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
2819 * error, and > 0 if there was no room in the left hand block.
2820 */
push_node_left(struct btrfs_trans_handle * trans,struct extent_buffer * dst,struct extent_buffer * src,int empty)2821 static int push_node_left(struct btrfs_trans_handle *trans,
2822 struct extent_buffer *dst,
2823 struct extent_buffer *src, int empty)
2824 {
2825 struct btrfs_fs_info *fs_info = trans->fs_info;
2826 int push_items = 0;
2827 int src_nritems;
2828 int dst_nritems;
2829 int ret = 0;
2830
2831 src_nritems = btrfs_header_nritems(src);
2832 dst_nritems = btrfs_header_nritems(dst);
2833 push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems;
2834 WARN_ON(btrfs_header_generation(src) != trans->transid);
2835 WARN_ON(btrfs_header_generation(dst) != trans->transid);
2836
2837 if (!empty && src_nritems <= 8)
2838 return 1;
2839
2840 if (push_items <= 0)
2841 return 1;
2842
2843 if (empty) {
2844 push_items = min(src_nritems, push_items);
2845 if (push_items < src_nritems) {
2846 /* leave at least 8 pointers in the node if
2847 * we aren't going to empty it
2848 */
2849 if (src_nritems - push_items < 8) {
2850 if (push_items <= 8)
2851 return 1;
2852 push_items -= 8;
2853 }
2854 }
2855 } else
2856 push_items = min(src_nritems - 8, push_items);
2857
2858 /* dst is the left eb, src is the middle eb */
2859 if (check_sibling_keys(dst, src)) {
2860 ret = -EUCLEAN;
2861 btrfs_abort_transaction(trans, ret);
2862 return ret;
2863 }
2864 ret = btrfs_tree_mod_log_eb_copy(dst, src, dst_nritems, 0, push_items);
2865 if (ret) {
2866 btrfs_abort_transaction(trans, ret);
2867 return ret;
2868 }
2869 copy_extent_buffer(dst, src,
2870 btrfs_node_key_ptr_offset(dst, dst_nritems),
2871 btrfs_node_key_ptr_offset(src, 0),
2872 push_items * sizeof(struct btrfs_key_ptr));
2873
2874 if (push_items < src_nritems) {
2875 /*
2876 * btrfs_tree_mod_log_eb_copy handles logging the move, so we
2877 * don't need to do an explicit tree mod log operation for it.
2878 */
2879 memmove_extent_buffer(src, btrfs_node_key_ptr_offset(src, 0),
2880 btrfs_node_key_ptr_offset(src, push_items),
2881 (src_nritems - push_items) *
2882 sizeof(struct btrfs_key_ptr));
2883 }
2884 btrfs_set_header_nritems(src, src_nritems - push_items);
2885 btrfs_set_header_nritems(dst, dst_nritems + push_items);
2886 btrfs_mark_buffer_dirty(trans, src);
2887 btrfs_mark_buffer_dirty(trans, dst);
2888
2889 return ret;
2890 }
2891
2892 /*
2893 * try to push data from one node into the next node right in the
2894 * tree.
2895 *
2896 * returns 0 if some ptrs were pushed, < 0 if there was some horrible
2897 * error, and > 0 if there was no room in the right hand block.
2898 *
2899 * this will only push up to 1/2 the contents of the left node over
2900 */
balance_node_right(struct btrfs_trans_handle * trans,struct extent_buffer * dst,struct extent_buffer * src)2901 static int balance_node_right(struct btrfs_trans_handle *trans,
2902 struct extent_buffer *dst,
2903 struct extent_buffer *src)
2904 {
2905 struct btrfs_fs_info *fs_info = trans->fs_info;
2906 int push_items = 0;
2907 int max_push;
2908 int src_nritems;
2909 int dst_nritems;
2910 int ret = 0;
2911
2912 WARN_ON(btrfs_header_generation(src) != trans->transid);
2913 WARN_ON(btrfs_header_generation(dst) != trans->transid);
2914
2915 src_nritems = btrfs_header_nritems(src);
2916 dst_nritems = btrfs_header_nritems(dst);
2917 push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems;
2918 if (push_items <= 0)
2919 return 1;
2920
2921 if (src_nritems < 4)
2922 return 1;
2923
2924 max_push = src_nritems / 2 + 1;
2925 /* don't try to empty the node */
2926 if (max_push >= src_nritems)
2927 return 1;
2928
2929 if (max_push < push_items)
2930 push_items = max_push;
2931
2932 /* dst is the right eb, src is the middle eb */
2933 if (check_sibling_keys(src, dst)) {
2934 ret = -EUCLEAN;
2935 btrfs_abort_transaction(trans, ret);
2936 return ret;
2937 }
2938
2939 /*
2940 * btrfs_tree_mod_log_eb_copy handles logging the move, so we don't
2941 * need to do an explicit tree mod log operation for it.
2942 */
2943 memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(dst, push_items),
2944 btrfs_node_key_ptr_offset(dst, 0),
2945 (dst_nritems) *
2946 sizeof(struct btrfs_key_ptr));
2947
2948 ret = btrfs_tree_mod_log_eb_copy(dst, src, 0, src_nritems - push_items,
2949 push_items);
2950 if (ret) {
2951 btrfs_abort_transaction(trans, ret);
2952 return ret;
2953 }
2954 copy_extent_buffer(dst, src,
2955 btrfs_node_key_ptr_offset(dst, 0),
2956 btrfs_node_key_ptr_offset(src, src_nritems - push_items),
2957 push_items * sizeof(struct btrfs_key_ptr));
2958
2959 btrfs_set_header_nritems(src, src_nritems - push_items);
2960 btrfs_set_header_nritems(dst, dst_nritems + push_items);
2961
2962 btrfs_mark_buffer_dirty(trans, src);
2963 btrfs_mark_buffer_dirty(trans, dst);
2964
2965 return ret;
2966 }
2967
2968 /*
2969 * helper function to insert a new root level in the tree.
2970 * A new node is allocated, and a single item is inserted to
2971 * point to the existing root
2972 *
2973 * returns zero on success or < 0 on failure.
2974 */
insert_new_root(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,int level)2975 static noinline int insert_new_root(struct btrfs_trans_handle *trans,
2976 struct btrfs_root *root,
2977 struct btrfs_path *path, int level)
2978 {
2979 struct btrfs_fs_info *fs_info = root->fs_info;
2980 u64 lower_gen;
2981 struct extent_buffer *lower;
2982 struct extent_buffer *c;
2983 struct extent_buffer *old;
2984 struct btrfs_disk_key lower_key;
2985 int ret;
2986
2987 BUG_ON(path->nodes[level]);
2988 BUG_ON(path->nodes[level-1] != root->node);
2989
2990 lower = path->nodes[level-1];
2991 if (level == 1)
2992 btrfs_item_key(lower, &lower_key, 0);
2993 else
2994 btrfs_node_key(lower, &lower_key, 0);
2995
2996 c = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
2997 &lower_key, level, root->node->start, 0,
2998 BTRFS_NESTING_NEW_ROOT);
2999 if (IS_ERR(c))
3000 return PTR_ERR(c);
3001
3002 root_add_used(root, fs_info->nodesize);
3003
3004 btrfs_set_header_nritems(c, 1);
3005 btrfs_set_node_key(c, &lower_key, 0);
3006 btrfs_set_node_blockptr(c, 0, lower->start);
3007 lower_gen = btrfs_header_generation(lower);
3008 WARN_ON(lower_gen != trans->transid);
3009
3010 btrfs_set_node_ptr_generation(c, 0, lower_gen);
3011
3012 btrfs_mark_buffer_dirty(trans, c);
3013
3014 old = root->node;
3015 ret = btrfs_tree_mod_log_insert_root(root->node, c, false);
3016 if (ret < 0) {
3017 btrfs_free_tree_block(trans, btrfs_root_id(root), c, 0, 1);
3018 btrfs_tree_unlock(c);
3019 free_extent_buffer(c);
3020 return ret;
3021 }
3022 rcu_assign_pointer(root->node, c);
3023
3024 /* the super has an extra ref to root->node */
3025 free_extent_buffer(old);
3026
3027 add_root_to_dirty_list(root);
3028 atomic_inc(&c->refs);
3029 path->nodes[level] = c;
3030 path->locks[level] = BTRFS_WRITE_LOCK;
3031 path->slots[level] = 0;
3032 return 0;
3033 }
3034
3035 /*
3036 * worker function to insert a single pointer in a node.
3037 * the node should have enough room for the pointer already
3038 *
3039 * slot and level indicate where you want the key to go, and
3040 * blocknr is the block the key points to.
3041 */
insert_ptr(struct btrfs_trans_handle * trans,struct btrfs_path * path,struct btrfs_disk_key * key,u64 bytenr,int slot,int level)3042 static int insert_ptr(struct btrfs_trans_handle *trans,
3043 struct btrfs_path *path,
3044 struct btrfs_disk_key *key, u64 bytenr,
3045 int slot, int level)
3046 {
3047 struct extent_buffer *lower;
3048 int nritems;
3049 int ret;
3050
3051 BUG_ON(!path->nodes[level]);
3052 btrfs_assert_tree_write_locked(path->nodes[level]);
3053 lower = path->nodes[level];
3054 nritems = btrfs_header_nritems(lower);
3055 BUG_ON(slot > nritems);
3056 BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(trans->fs_info));
3057 if (slot != nritems) {
3058 if (level) {
3059 ret = btrfs_tree_mod_log_insert_move(lower, slot + 1,
3060 slot, nritems - slot);
3061 if (ret < 0) {
3062 btrfs_abort_transaction(trans, ret);
3063 return ret;
3064 }
3065 }
3066 memmove_extent_buffer(lower,
3067 btrfs_node_key_ptr_offset(lower, slot + 1),
3068 btrfs_node_key_ptr_offset(lower, slot),
3069 (nritems - slot) * sizeof(struct btrfs_key_ptr));
3070 }
3071 if (level) {
3072 ret = btrfs_tree_mod_log_insert_key(lower, slot,
3073 BTRFS_MOD_LOG_KEY_ADD);
3074 if (ret < 0) {
3075 btrfs_abort_transaction(trans, ret);
3076 return ret;
3077 }
3078 }
3079 btrfs_set_node_key(lower, key, slot);
3080 btrfs_set_node_blockptr(lower, slot, bytenr);
3081 WARN_ON(trans->transid == 0);
3082 btrfs_set_node_ptr_generation(lower, slot, trans->transid);
3083 btrfs_set_header_nritems(lower, nritems + 1);
3084 btrfs_mark_buffer_dirty(trans, lower);
3085
3086 return 0;
3087 }
3088
3089 /*
3090 * split the node at the specified level in path in two.
3091 * The path is corrected to point to the appropriate node after the split
3092 *
3093 * Before splitting this tries to make some room in the node by pushing
3094 * left and right, if either one works, it returns right away.
3095 *
3096 * returns 0 on success and < 0 on failure
3097 */
split_node(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,int level)3098 static noinline int split_node(struct btrfs_trans_handle *trans,
3099 struct btrfs_root *root,
3100 struct btrfs_path *path, int level)
3101 {
3102 struct btrfs_fs_info *fs_info = root->fs_info;
3103 struct extent_buffer *c;
3104 struct extent_buffer *split;
3105 struct btrfs_disk_key disk_key;
3106 int mid;
3107 int ret;
3108 u32 c_nritems;
3109
3110 c = path->nodes[level];
3111 WARN_ON(btrfs_header_generation(c) != trans->transid);
3112 if (c == root->node) {
3113 /*
3114 * trying to split the root, lets make a new one
3115 *
3116 * tree mod log: We don't log_removal old root in
3117 * insert_new_root, because that root buffer will be kept as a
3118 * normal node. We are going to log removal of half of the
3119 * elements below with btrfs_tree_mod_log_eb_copy(). We're
3120 * holding a tree lock on the buffer, which is why we cannot
3121 * race with other tree_mod_log users.
3122 */
3123 ret = insert_new_root(trans, root, path, level + 1);
3124 if (ret)
3125 return ret;
3126 } else {
3127 ret = push_nodes_for_insert(trans, root, path, level);
3128 c = path->nodes[level];
3129 if (!ret && btrfs_header_nritems(c) <
3130 BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3)
3131 return 0;
3132 if (ret < 0)
3133 return ret;
3134 }
3135
3136 c_nritems = btrfs_header_nritems(c);
3137 mid = (c_nritems + 1) / 2;
3138 btrfs_node_key(c, &disk_key, mid);
3139
3140 split = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
3141 &disk_key, level, c->start, 0,
3142 BTRFS_NESTING_SPLIT);
3143 if (IS_ERR(split))
3144 return PTR_ERR(split);
3145
3146 root_add_used(root, fs_info->nodesize);
3147 ASSERT(btrfs_header_level(c) == level);
3148
3149 ret = btrfs_tree_mod_log_eb_copy(split, c, 0, mid, c_nritems - mid);
3150 if (ret) {
3151 btrfs_tree_unlock(split);
3152 free_extent_buffer(split);
3153 btrfs_abort_transaction(trans, ret);
3154 return ret;
3155 }
3156 copy_extent_buffer(split, c,
3157 btrfs_node_key_ptr_offset(split, 0),
3158 btrfs_node_key_ptr_offset(c, mid),
3159 (c_nritems - mid) * sizeof(struct btrfs_key_ptr));
3160 btrfs_set_header_nritems(split, c_nritems - mid);
3161 btrfs_set_header_nritems(c, mid);
3162
3163 btrfs_mark_buffer_dirty(trans, c);
3164 btrfs_mark_buffer_dirty(trans, split);
3165
3166 ret = insert_ptr(trans, path, &disk_key, split->start,
3167 path->slots[level + 1] + 1, level + 1);
3168 if (ret < 0) {
3169 btrfs_tree_unlock(split);
3170 free_extent_buffer(split);
3171 return ret;
3172 }
3173
3174 if (path->slots[level] >= mid) {
3175 path->slots[level] -= mid;
3176 btrfs_tree_unlock(c);
3177 free_extent_buffer(c);
3178 path->nodes[level] = split;
3179 path->slots[level + 1] += 1;
3180 } else {
3181 btrfs_tree_unlock(split);
3182 free_extent_buffer(split);
3183 }
3184 return 0;
3185 }
3186
3187 /*
3188 * how many bytes are required to store the items in a leaf. start
3189 * and nr indicate which items in the leaf to check. This totals up the
3190 * space used both by the item structs and the item data
3191 */
leaf_space_used(const struct extent_buffer * l,int start,int nr)3192 static int leaf_space_used(const struct extent_buffer *l, int start, int nr)
3193 {
3194 int data_len;
3195 int nritems = btrfs_header_nritems(l);
3196 int end = min(nritems, start + nr) - 1;
3197
3198 if (!nr)
3199 return 0;
3200 data_len = btrfs_item_offset(l, start) + btrfs_item_size(l, start);
3201 data_len = data_len - btrfs_item_offset(l, end);
3202 data_len += sizeof(struct btrfs_item) * nr;
3203 WARN_ON(data_len < 0);
3204 return data_len;
3205 }
3206
3207 /*
3208 * The space between the end of the leaf items and
3209 * the start of the leaf data. IOW, how much room
3210 * the leaf has left for both items and data
3211 */
btrfs_leaf_free_space(const struct extent_buffer * leaf)3212 int btrfs_leaf_free_space(const struct extent_buffer *leaf)
3213 {
3214 struct btrfs_fs_info *fs_info = leaf->fs_info;
3215 int nritems = btrfs_header_nritems(leaf);
3216 int ret;
3217
3218 ret = BTRFS_LEAF_DATA_SIZE(fs_info) - leaf_space_used(leaf, 0, nritems);
3219 if (ret < 0) {
3220 btrfs_crit(fs_info,
3221 "leaf free space ret %d, leaf data size %lu, used %d nritems %d",
3222 ret,
3223 (unsigned long) BTRFS_LEAF_DATA_SIZE(fs_info),
3224 leaf_space_used(leaf, 0, nritems), nritems);
3225 }
3226 return ret;
3227 }
3228
3229 /*
3230 * min slot controls the lowest index we're willing to push to the
3231 * right. We'll push up to and including min_slot, but no lower
3232 */
__push_leaf_right(struct btrfs_trans_handle * trans,struct btrfs_path * path,int data_size,int empty,struct extent_buffer * right,int free_space,u32 left_nritems,u32 min_slot)3233 static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
3234 struct btrfs_path *path,
3235 int data_size, int empty,
3236 struct extent_buffer *right,
3237 int free_space, u32 left_nritems,
3238 u32 min_slot)
3239 {
3240 struct btrfs_fs_info *fs_info = right->fs_info;
3241 struct extent_buffer *left = path->nodes[0];
3242 struct extent_buffer *upper = path->nodes[1];
3243 struct btrfs_map_token token;
3244 struct btrfs_disk_key disk_key;
3245 int slot;
3246 u32 i;
3247 int push_space = 0;
3248 int push_items = 0;
3249 u32 nr;
3250 u32 right_nritems;
3251 u32 data_end;
3252 u32 this_item_size;
3253
3254 if (empty)
3255 nr = 0;
3256 else
3257 nr = max_t(u32, 1, min_slot);
3258
3259 if (path->slots[0] >= left_nritems)
3260 push_space += data_size;
3261
3262 slot = path->slots[1];
3263 i = left_nritems - 1;
3264 while (i >= nr) {
3265 if (!empty && push_items > 0) {
3266 if (path->slots[0] > i)
3267 break;
3268 if (path->slots[0] == i) {
3269 int space = btrfs_leaf_free_space(left);
3270
3271 if (space + push_space * 2 > free_space)
3272 break;
3273 }
3274 }
3275
3276 if (path->slots[0] == i)
3277 push_space += data_size;
3278
3279 this_item_size = btrfs_item_size(left, i);
3280 if (this_item_size + sizeof(struct btrfs_item) +
3281 push_space > free_space)
3282 break;
3283
3284 push_items++;
3285 push_space += this_item_size + sizeof(struct btrfs_item);
3286 if (i == 0)
3287 break;
3288 i--;
3289 }
3290
3291 if (push_items == 0)
3292 goto out_unlock;
3293
3294 WARN_ON(!empty && push_items == left_nritems);
3295
3296 /* push left to right */
3297 right_nritems = btrfs_header_nritems(right);
3298
3299 push_space = btrfs_item_data_end(left, left_nritems - push_items);
3300 push_space -= leaf_data_end(left);
3301
3302 /* make room in the right data area */
3303 data_end = leaf_data_end(right);
3304 memmove_leaf_data(right, data_end - push_space, data_end,
3305 BTRFS_LEAF_DATA_SIZE(fs_info) - data_end);
3306
3307 /* copy from the left data area */
3308 copy_leaf_data(right, left, BTRFS_LEAF_DATA_SIZE(fs_info) - push_space,
3309 leaf_data_end(left), push_space);
3310
3311 memmove_leaf_items(right, push_items, 0, right_nritems);
3312
3313 /* copy the items from left to right */
3314 copy_leaf_items(right, left, 0, left_nritems - push_items, push_items);
3315
3316 /* update the item pointers */
3317 btrfs_init_map_token(&token, right);
3318 right_nritems += push_items;
3319 btrfs_set_header_nritems(right, right_nritems);
3320 push_space = BTRFS_LEAF_DATA_SIZE(fs_info);
3321 for (i = 0; i < right_nritems; i++) {
3322 push_space -= btrfs_token_item_size(&token, i);
3323 btrfs_set_token_item_offset(&token, i, push_space);
3324 }
3325
3326 left_nritems -= push_items;
3327 btrfs_set_header_nritems(left, left_nritems);
3328
3329 if (left_nritems)
3330 btrfs_mark_buffer_dirty(trans, left);
3331 else
3332 btrfs_clear_buffer_dirty(trans, left);
3333
3334 btrfs_mark_buffer_dirty(trans, right);
3335
3336 btrfs_item_key(right, &disk_key, 0);
3337 btrfs_set_node_key(upper, &disk_key, slot + 1);
3338 btrfs_mark_buffer_dirty(trans, upper);
3339
3340 /* then fixup the leaf pointer in the path */
3341 if (path->slots[0] >= left_nritems) {
3342 path->slots[0] -= left_nritems;
3343 if (btrfs_header_nritems(path->nodes[0]) == 0)
3344 btrfs_clear_buffer_dirty(trans, path->nodes[0]);
3345 btrfs_tree_unlock(path->nodes[0]);
3346 free_extent_buffer(path->nodes[0]);
3347 path->nodes[0] = right;
3348 path->slots[1] += 1;
3349 } else {
3350 btrfs_tree_unlock(right);
3351 free_extent_buffer(right);
3352 }
3353 return 0;
3354
3355 out_unlock:
3356 btrfs_tree_unlock(right);
3357 free_extent_buffer(right);
3358 return 1;
3359 }
3360
3361 /*
3362 * push some data in the path leaf to the right, trying to free up at
3363 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3364 *
3365 * returns 1 if the push failed because the other node didn't have enough
3366 * room, 0 if everything worked out and < 0 if there were major errors.
3367 *
3368 * this will push starting from min_slot to the end of the leaf. It won't
3369 * push any slot lower than min_slot
3370 */
push_leaf_right(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,int min_data_size,int data_size,int empty,u32 min_slot)3371 static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
3372 *root, struct btrfs_path *path,
3373 int min_data_size, int data_size,
3374 int empty, u32 min_slot)
3375 {
3376 struct extent_buffer *left = path->nodes[0];
3377 struct extent_buffer *right;
3378 struct extent_buffer *upper;
3379 int slot;
3380 int free_space;
3381 u32 left_nritems;
3382 int ret;
3383
3384 if (!path->nodes[1])
3385 return 1;
3386
3387 slot = path->slots[1];
3388 upper = path->nodes[1];
3389 if (slot >= btrfs_header_nritems(upper) - 1)
3390 return 1;
3391
3392 btrfs_assert_tree_write_locked(path->nodes[1]);
3393
3394 right = btrfs_read_node_slot(upper, slot + 1);
3395 if (IS_ERR(right))
3396 return PTR_ERR(right);
3397
3398 __btrfs_tree_lock(right, BTRFS_NESTING_RIGHT);
3399
3400 free_space = btrfs_leaf_free_space(right);
3401 if (free_space < data_size)
3402 goto out_unlock;
3403
3404 ret = btrfs_cow_block(trans, root, right, upper,
3405 slot + 1, &right, BTRFS_NESTING_RIGHT_COW);
3406 if (ret)
3407 goto out_unlock;
3408
3409 left_nritems = btrfs_header_nritems(left);
3410 if (left_nritems == 0)
3411 goto out_unlock;
3412
3413 if (check_sibling_keys(left, right)) {
3414 ret = -EUCLEAN;
3415 btrfs_abort_transaction(trans, ret);
3416 btrfs_tree_unlock(right);
3417 free_extent_buffer(right);
3418 return ret;
3419 }
3420 if (path->slots[0] == left_nritems && !empty) {
3421 /* Key greater than all keys in the leaf, right neighbor has
3422 * enough room for it and we're not emptying our leaf to delete
3423 * it, therefore use right neighbor to insert the new item and
3424 * no need to touch/dirty our left leaf. */
3425 btrfs_tree_unlock(left);
3426 free_extent_buffer(left);
3427 path->nodes[0] = right;
3428 path->slots[0] = 0;
3429 path->slots[1]++;
3430 return 0;
3431 }
3432
3433 return __push_leaf_right(trans, path, min_data_size, empty, right,
3434 free_space, left_nritems, min_slot);
3435 out_unlock:
3436 btrfs_tree_unlock(right);
3437 free_extent_buffer(right);
3438 return 1;
3439 }
3440
3441 /*
3442 * push some data in the path leaf to the left, trying to free up at
3443 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3444 *
3445 * max_slot can put a limit on how far into the leaf we'll push items. The
3446 * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the
3447 * items
3448 */
__push_leaf_left(struct btrfs_trans_handle * trans,struct btrfs_path * path,int data_size,int empty,struct extent_buffer * left,int free_space,u32 right_nritems,u32 max_slot)3449 static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
3450 struct btrfs_path *path, int data_size,
3451 int empty, struct extent_buffer *left,
3452 int free_space, u32 right_nritems,
3453 u32 max_slot)
3454 {
3455 struct btrfs_fs_info *fs_info = left->fs_info;
3456 struct btrfs_disk_key disk_key;
3457 struct extent_buffer *right = path->nodes[0];
3458 int i;
3459 int push_space = 0;
3460 int push_items = 0;
3461 u32 old_left_nritems;
3462 u32 nr;
3463 int ret = 0;
3464 u32 this_item_size;
3465 u32 old_left_item_size;
3466 struct btrfs_map_token token;
3467
3468 if (empty)
3469 nr = min(right_nritems, max_slot);
3470 else
3471 nr = min(right_nritems - 1, max_slot);
3472
3473 for (i = 0; i < nr; i++) {
3474 if (!empty && push_items > 0) {
3475 if (path->slots[0] < i)
3476 break;
3477 if (path->slots[0] == i) {
3478 int space = btrfs_leaf_free_space(right);
3479
3480 if (space + push_space * 2 > free_space)
3481 break;
3482 }
3483 }
3484
3485 if (path->slots[0] == i)
3486 push_space += data_size;
3487
3488 this_item_size = btrfs_item_size(right, i);
3489 if (this_item_size + sizeof(struct btrfs_item) + push_space >
3490 free_space)
3491 break;
3492
3493 push_items++;
3494 push_space += this_item_size + sizeof(struct btrfs_item);
3495 }
3496
3497 if (push_items == 0) {
3498 ret = 1;
3499 goto out;
3500 }
3501 WARN_ON(!empty && push_items == btrfs_header_nritems(right));
3502
3503 /* push data from right to left */
3504 copy_leaf_items(left, right, btrfs_header_nritems(left), 0, push_items);
3505
3506 push_space = BTRFS_LEAF_DATA_SIZE(fs_info) -
3507 btrfs_item_offset(right, push_items - 1);
3508
3509 copy_leaf_data(left, right, leaf_data_end(left) - push_space,
3510 btrfs_item_offset(right, push_items - 1), push_space);
3511 old_left_nritems = btrfs_header_nritems(left);
3512 BUG_ON(old_left_nritems <= 0);
3513
3514 btrfs_init_map_token(&token, left);
3515 old_left_item_size = btrfs_item_offset(left, old_left_nritems - 1);
3516 for (i = old_left_nritems; i < old_left_nritems + push_items; i++) {
3517 u32 ioff;
3518
3519 ioff = btrfs_token_item_offset(&token, i);
3520 btrfs_set_token_item_offset(&token, i,
3521 ioff - (BTRFS_LEAF_DATA_SIZE(fs_info) - old_left_item_size));
3522 }
3523 btrfs_set_header_nritems(left, old_left_nritems + push_items);
3524
3525 /* fixup right node */
3526 if (push_items > right_nritems)
3527 WARN(1, KERN_CRIT "push items %d nr %u\n", push_items,
3528 right_nritems);
3529
3530 if (push_items < right_nritems) {
3531 push_space = btrfs_item_offset(right, push_items - 1) -
3532 leaf_data_end(right);
3533 memmove_leaf_data(right,
3534 BTRFS_LEAF_DATA_SIZE(fs_info) - push_space,
3535 leaf_data_end(right), push_space);
3536
3537 memmove_leaf_items(right, 0, push_items,
3538 btrfs_header_nritems(right) - push_items);
3539 }
3540
3541 btrfs_init_map_token(&token, right);
3542 right_nritems -= push_items;
3543 btrfs_set_header_nritems(right, right_nritems);
3544 push_space = BTRFS_LEAF_DATA_SIZE(fs_info);
3545 for (i = 0; i < right_nritems; i++) {
3546 push_space = push_space - btrfs_token_item_size(&token, i);
3547 btrfs_set_token_item_offset(&token, i, push_space);
3548 }
3549
3550 btrfs_mark_buffer_dirty(trans, left);
3551 if (right_nritems)
3552 btrfs_mark_buffer_dirty(trans, right);
3553 else
3554 btrfs_clear_buffer_dirty(trans, right);
3555
3556 btrfs_item_key(right, &disk_key, 0);
3557 fixup_low_keys(trans, path, &disk_key, 1);
3558
3559 /* then fixup the leaf pointer in the path */
3560 if (path->slots[0] < push_items) {
3561 path->slots[0] += old_left_nritems;
3562 btrfs_tree_unlock(path->nodes[0]);
3563 free_extent_buffer(path->nodes[0]);
3564 path->nodes[0] = left;
3565 path->slots[1] -= 1;
3566 } else {
3567 btrfs_tree_unlock(left);
3568 free_extent_buffer(left);
3569 path->slots[0] -= push_items;
3570 }
3571 BUG_ON(path->slots[0] < 0);
3572 return ret;
3573 out:
3574 btrfs_tree_unlock(left);
3575 free_extent_buffer(left);
3576 return ret;
3577 }
3578
3579 /*
3580 * push some data in the path leaf to the left, trying to free up at
3581 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3582 *
3583 * max_slot can put a limit on how far into the leaf we'll push items. The
3584 * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the
3585 * items
3586 */
push_leaf_left(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,int min_data_size,int data_size,int empty,u32 max_slot)3587 static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
3588 *root, struct btrfs_path *path, int min_data_size,
3589 int data_size, int empty, u32 max_slot)
3590 {
3591 struct extent_buffer *right = path->nodes[0];
3592 struct extent_buffer *left;
3593 int slot;
3594 int free_space;
3595 u32 right_nritems;
3596 int ret = 0;
3597
3598 slot = path->slots[1];
3599 if (slot == 0)
3600 return 1;
3601 if (!path->nodes[1])
3602 return 1;
3603
3604 right_nritems = btrfs_header_nritems(right);
3605 if (right_nritems == 0)
3606 return 1;
3607
3608 btrfs_assert_tree_write_locked(path->nodes[1]);
3609
3610 left = btrfs_read_node_slot(path->nodes[1], slot - 1);
3611 if (IS_ERR(left))
3612 return PTR_ERR(left);
3613
3614 __btrfs_tree_lock(left, BTRFS_NESTING_LEFT);
3615
3616 free_space = btrfs_leaf_free_space(left);
3617 if (free_space < data_size) {
3618 ret = 1;
3619 goto out;
3620 }
3621
3622 ret = btrfs_cow_block(trans, root, left,
3623 path->nodes[1], slot - 1, &left,
3624 BTRFS_NESTING_LEFT_COW);
3625 if (ret) {
3626 /* we hit -ENOSPC, but it isn't fatal here */
3627 if (ret == -ENOSPC)
3628 ret = 1;
3629 goto out;
3630 }
3631
3632 if (check_sibling_keys(left, right)) {
3633 ret = -EUCLEAN;
3634 btrfs_abort_transaction(trans, ret);
3635 goto out;
3636 }
3637 return __push_leaf_left(trans, path, min_data_size, empty, left,
3638 free_space, right_nritems, max_slot);
3639 out:
3640 btrfs_tree_unlock(left);
3641 free_extent_buffer(left);
3642 return ret;
3643 }
3644
3645 /*
3646 * split the path's leaf in two, making sure there is at least data_size
3647 * available for the resulting leaf level of the path.
3648 */
copy_for_split(struct btrfs_trans_handle * trans,struct btrfs_path * path,struct extent_buffer * l,struct extent_buffer * right,int slot,int mid,int nritems)3649 static noinline int copy_for_split(struct btrfs_trans_handle *trans,
3650 struct btrfs_path *path,
3651 struct extent_buffer *l,
3652 struct extent_buffer *right,
3653 int slot, int mid, int nritems)
3654 {
3655 struct btrfs_fs_info *fs_info = trans->fs_info;
3656 int data_copy_size;
3657 int rt_data_off;
3658 int i;
3659 int ret;
3660 struct btrfs_disk_key disk_key;
3661 struct btrfs_map_token token;
3662
3663 nritems = nritems - mid;
3664 btrfs_set_header_nritems(right, nritems);
3665 data_copy_size = btrfs_item_data_end(l, mid) - leaf_data_end(l);
3666
3667 copy_leaf_items(right, l, 0, mid, nritems);
3668
3669 copy_leaf_data(right, l, BTRFS_LEAF_DATA_SIZE(fs_info) - data_copy_size,
3670 leaf_data_end(l), data_copy_size);
3671
3672 rt_data_off = BTRFS_LEAF_DATA_SIZE(fs_info) - btrfs_item_data_end(l, mid);
3673
3674 btrfs_init_map_token(&token, right);
3675 for (i = 0; i < nritems; i++) {
3676 u32 ioff;
3677
3678 ioff = btrfs_token_item_offset(&token, i);
3679 btrfs_set_token_item_offset(&token, i, ioff + rt_data_off);
3680 }
3681
3682 btrfs_set_header_nritems(l, mid);
3683 btrfs_item_key(right, &disk_key, 0);
3684 ret = insert_ptr(trans, path, &disk_key, right->start, path->slots[1] + 1, 1);
3685 if (ret < 0)
3686 return ret;
3687
3688 btrfs_mark_buffer_dirty(trans, right);
3689 btrfs_mark_buffer_dirty(trans, l);
3690 BUG_ON(path->slots[0] != slot);
3691
3692 if (mid <= slot) {
3693 btrfs_tree_unlock(path->nodes[0]);
3694 free_extent_buffer(path->nodes[0]);
3695 path->nodes[0] = right;
3696 path->slots[0] -= mid;
3697 path->slots[1] += 1;
3698 } else {
3699 btrfs_tree_unlock(right);
3700 free_extent_buffer(right);
3701 }
3702
3703 BUG_ON(path->slots[0] < 0);
3704
3705 return 0;
3706 }
3707
3708 /*
3709 * double splits happen when we need to insert a big item in the middle
3710 * of a leaf. A double split can leave us with 3 mostly empty leaves:
3711 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
3712 * A B C
3713 *
3714 * We avoid this by trying to push the items on either side of our target
3715 * into the adjacent leaves. If all goes well we can avoid the double split
3716 * completely.
3717 */
push_for_double_split(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,int data_size)3718 static noinline int push_for_double_split(struct btrfs_trans_handle *trans,
3719 struct btrfs_root *root,
3720 struct btrfs_path *path,
3721 int data_size)
3722 {
3723 int ret;
3724 int progress = 0;
3725 int slot;
3726 u32 nritems;
3727 int space_needed = data_size;
3728
3729 slot = path->slots[0];
3730 if (slot < btrfs_header_nritems(path->nodes[0]))
3731 space_needed -= btrfs_leaf_free_space(path->nodes[0]);
3732
3733 /*
3734 * try to push all the items after our slot into the
3735 * right leaf
3736 */
3737 ret = push_leaf_right(trans, root, path, 1, space_needed, 0, slot);
3738 if (ret < 0)
3739 return ret;
3740
3741 if (ret == 0)
3742 progress++;
3743
3744 nritems = btrfs_header_nritems(path->nodes[0]);
3745 /*
3746 * our goal is to get our slot at the start or end of a leaf. If
3747 * we've done so we're done
3748 */
3749 if (path->slots[0] == 0 || path->slots[0] == nritems)
3750 return 0;
3751
3752 if (btrfs_leaf_free_space(path->nodes[0]) >= data_size)
3753 return 0;
3754
3755 /* try to push all the items before our slot into the next leaf */
3756 slot = path->slots[0];
3757 space_needed = data_size;
3758 if (slot > 0)
3759 space_needed -= btrfs_leaf_free_space(path->nodes[0]);
3760 ret = push_leaf_left(trans, root, path, 1, space_needed, 0, slot);
3761 if (ret < 0)
3762 return ret;
3763
3764 if (ret == 0)
3765 progress++;
3766
3767 if (progress)
3768 return 0;
3769 return 1;
3770 }
3771
3772 /*
3773 * split the path's leaf in two, making sure there is at least data_size
3774 * available for the resulting leaf level of the path.
3775 *
3776 * returns 0 if all went well and < 0 on failure.
3777 */
split_leaf(struct btrfs_trans_handle * trans,struct btrfs_root * root,const struct btrfs_key * ins_key,struct btrfs_path * path,int data_size,int extend)3778 static noinline int split_leaf(struct btrfs_trans_handle *trans,
3779 struct btrfs_root *root,
3780 const struct btrfs_key *ins_key,
3781 struct btrfs_path *path, int data_size,
3782 int extend)
3783 {
3784 struct btrfs_disk_key disk_key;
3785 struct extent_buffer *l;
3786 u32 nritems;
3787 int mid;
3788 int slot;
3789 struct extent_buffer *right;
3790 struct btrfs_fs_info *fs_info = root->fs_info;
3791 int ret = 0;
3792 int wret;
3793 int split;
3794 int num_doubles = 0;
3795 int tried_avoid_double = 0;
3796
3797 l = path->nodes[0];
3798 slot = path->slots[0];
3799 if (extend && data_size + btrfs_item_size(l, slot) +
3800 sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(fs_info))
3801 return -EOVERFLOW;
3802
3803 /* first try to make some room by pushing left and right */
3804 if (data_size && path->nodes[1]) {
3805 int space_needed = data_size;
3806
3807 if (slot < btrfs_header_nritems(l))
3808 space_needed -= btrfs_leaf_free_space(l);
3809
3810 wret = push_leaf_right(trans, root, path, space_needed,
3811 space_needed, 0, 0);
3812 if (wret < 0)
3813 return wret;
3814 if (wret) {
3815 space_needed = data_size;
3816 if (slot > 0)
3817 space_needed -= btrfs_leaf_free_space(l);
3818 wret = push_leaf_left(trans, root, path, space_needed,
3819 space_needed, 0, (u32)-1);
3820 if (wret < 0)
3821 return wret;
3822 }
3823 l = path->nodes[0];
3824
3825 /* did the pushes work? */
3826 if (btrfs_leaf_free_space(l) >= data_size)
3827 return 0;
3828 }
3829
3830 if (!path->nodes[1]) {
3831 ret = insert_new_root(trans, root, path, 1);
3832 if (ret)
3833 return ret;
3834 }
3835 again:
3836 split = 1;
3837 l = path->nodes[0];
3838 slot = path->slots[0];
3839 nritems = btrfs_header_nritems(l);
3840 mid = (nritems + 1) / 2;
3841
3842 if (mid <= slot) {
3843 if (nritems == 1 ||
3844 leaf_space_used(l, mid, nritems - mid) + data_size >
3845 BTRFS_LEAF_DATA_SIZE(fs_info)) {
3846 if (slot >= nritems) {
3847 split = 0;
3848 } else {
3849 mid = slot;
3850 if (mid != nritems &&
3851 leaf_space_used(l, mid, nritems - mid) +
3852 data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) {
3853 if (data_size && !tried_avoid_double)
3854 goto push_for_double;
3855 split = 2;
3856 }
3857 }
3858 }
3859 } else {
3860 if (leaf_space_used(l, 0, mid) + data_size >
3861 BTRFS_LEAF_DATA_SIZE(fs_info)) {
3862 if (!extend && data_size && slot == 0) {
3863 split = 0;
3864 } else if ((extend || !data_size) && slot == 0) {
3865 mid = 1;
3866 } else {
3867 mid = slot;
3868 if (mid != nritems &&
3869 leaf_space_used(l, mid, nritems - mid) +
3870 data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) {
3871 if (data_size && !tried_avoid_double)
3872 goto push_for_double;
3873 split = 2;
3874 }
3875 }
3876 }
3877 }
3878
3879 if (split == 0)
3880 btrfs_cpu_key_to_disk(&disk_key, ins_key);
3881 else
3882 btrfs_item_key(l, &disk_key, mid);
3883
3884 /*
3885 * We have to about BTRFS_NESTING_NEW_ROOT here if we've done a double
3886 * split, because we're only allowed to have MAX_LOCKDEP_SUBCLASSES
3887 * subclasses, which is 8 at the time of this patch, and we've maxed it
3888 * out. In the future we could add a
3889 * BTRFS_NESTING_SPLIT_THE_SPLITTENING if we need to, but for now just
3890 * use BTRFS_NESTING_NEW_ROOT.
3891 */
3892 right = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
3893 &disk_key, 0, l->start, 0,
3894 num_doubles ? BTRFS_NESTING_NEW_ROOT :
3895 BTRFS_NESTING_SPLIT);
3896 if (IS_ERR(right))
3897 return PTR_ERR(right);
3898
3899 root_add_used(root, fs_info->nodesize);
3900
3901 if (split == 0) {
3902 if (mid <= slot) {
3903 btrfs_set_header_nritems(right, 0);
3904 ret = insert_ptr(trans, path, &disk_key,
3905 right->start, path->slots[1] + 1, 1);
3906 if (ret < 0) {
3907 btrfs_tree_unlock(right);
3908 free_extent_buffer(right);
3909 return ret;
3910 }
3911 btrfs_tree_unlock(path->nodes[0]);
3912 free_extent_buffer(path->nodes[0]);
3913 path->nodes[0] = right;
3914 path->slots[0] = 0;
3915 path->slots[1] += 1;
3916 } else {
3917 btrfs_set_header_nritems(right, 0);
3918 ret = insert_ptr(trans, path, &disk_key,
3919 right->start, path->slots[1], 1);
3920 if (ret < 0) {
3921 btrfs_tree_unlock(right);
3922 free_extent_buffer(right);
3923 return ret;
3924 }
3925 btrfs_tree_unlock(path->nodes[0]);
3926 free_extent_buffer(path->nodes[0]);
3927 path->nodes[0] = right;
3928 path->slots[0] = 0;
3929 if (path->slots[1] == 0)
3930 fixup_low_keys(trans, path, &disk_key, 1);
3931 }
3932 /*
3933 * We create a new leaf 'right' for the required ins_len and
3934 * we'll do btrfs_mark_buffer_dirty() on this leaf after copying
3935 * the content of ins_len to 'right'.
3936 */
3937 return ret;
3938 }
3939
3940 ret = copy_for_split(trans, path, l, right, slot, mid, nritems);
3941 if (ret < 0) {
3942 btrfs_tree_unlock(right);
3943 free_extent_buffer(right);
3944 return ret;
3945 }
3946
3947 if (split == 2) {
3948 BUG_ON(num_doubles != 0);
3949 num_doubles++;
3950 goto again;
3951 }
3952
3953 return 0;
3954
3955 push_for_double:
3956 push_for_double_split(trans, root, path, data_size);
3957 tried_avoid_double = 1;
3958 if (btrfs_leaf_free_space(path->nodes[0]) >= data_size)
3959 return 0;
3960 goto again;
3961 }
3962
setup_leaf_for_split(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,int ins_len)3963 static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
3964 struct btrfs_root *root,
3965 struct btrfs_path *path, int ins_len)
3966 {
3967 struct btrfs_key key;
3968 struct extent_buffer *leaf;
3969 struct btrfs_file_extent_item *fi;
3970 u64 extent_len = 0;
3971 u32 item_size;
3972 int ret;
3973
3974 leaf = path->nodes[0];
3975 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3976
3977 BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY &&
3978 key.type != BTRFS_EXTENT_CSUM_KEY);
3979
3980 if (btrfs_leaf_free_space(leaf) >= ins_len)
3981 return 0;
3982
3983 item_size = btrfs_item_size(leaf, path->slots[0]);
3984 if (key.type == BTRFS_EXTENT_DATA_KEY) {
3985 fi = btrfs_item_ptr(leaf, path->slots[0],
3986 struct btrfs_file_extent_item);
3987 extent_len = btrfs_file_extent_num_bytes(leaf, fi);
3988 }
3989 btrfs_release_path(path);
3990
3991 path->keep_locks = 1;
3992 path->search_for_split = 1;
3993 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
3994 path->search_for_split = 0;
3995 if (ret > 0)
3996 ret = -EAGAIN;
3997 if (ret < 0)
3998 goto err;
3999
4000 ret = -EAGAIN;
4001 leaf = path->nodes[0];
4002 /* if our item isn't there, return now */
4003 if (item_size != btrfs_item_size(leaf, path->slots[0]))
4004 goto err;
4005
4006 /* the leaf has changed, it now has room. return now */
4007 if (btrfs_leaf_free_space(path->nodes[0]) >= ins_len)
4008 goto err;
4009
4010 if (key.type == BTRFS_EXTENT_DATA_KEY) {
4011 fi = btrfs_item_ptr(leaf, path->slots[0],
4012 struct btrfs_file_extent_item);
4013 if (extent_len != btrfs_file_extent_num_bytes(leaf, fi))
4014 goto err;
4015 }
4016
4017 ret = split_leaf(trans, root, &key, path, ins_len, 1);
4018 if (ret)
4019 goto err;
4020
4021 path->keep_locks = 0;
4022 btrfs_unlock_up_safe(path, 1);
4023 return 0;
4024 err:
4025 path->keep_locks = 0;
4026 return ret;
4027 }
4028
split_item(struct btrfs_trans_handle * trans,struct btrfs_path * path,const struct btrfs_key * new_key,unsigned long split_offset)4029 static noinline int split_item(struct btrfs_trans_handle *trans,
4030 struct btrfs_path *path,
4031 const struct btrfs_key *new_key,
4032 unsigned long split_offset)
4033 {
4034 struct extent_buffer *leaf;
4035 int orig_slot, slot;
4036 char *buf;
4037 u32 nritems;
4038 u32 item_size;
4039 u32 orig_offset;
4040 struct btrfs_disk_key disk_key;
4041
4042 leaf = path->nodes[0];
4043 /*
4044 * Shouldn't happen because the caller must have previously called
4045 * setup_leaf_for_split() to make room for the new item in the leaf.
4046 */
4047 if (WARN_ON(btrfs_leaf_free_space(leaf) < sizeof(struct btrfs_item)))
4048 return -ENOSPC;
4049
4050 orig_slot = path->slots[0];
4051 orig_offset = btrfs_item_offset(leaf, path->slots[0]);
4052 item_size = btrfs_item_size(leaf, path->slots[0]);
4053
4054 buf = kmalloc(item_size, GFP_NOFS);
4055 if (!buf)
4056 return -ENOMEM;
4057
4058 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf,
4059 path->slots[0]), item_size);
4060
4061 slot = path->slots[0] + 1;
4062 nritems = btrfs_header_nritems(leaf);
4063 if (slot != nritems) {
4064 /* shift the items */
4065 memmove_leaf_items(leaf, slot + 1, slot, nritems - slot);
4066 }
4067
4068 btrfs_cpu_key_to_disk(&disk_key, new_key);
4069 btrfs_set_item_key(leaf, &disk_key, slot);
4070
4071 btrfs_set_item_offset(leaf, slot, orig_offset);
4072 btrfs_set_item_size(leaf, slot, item_size - split_offset);
4073
4074 btrfs_set_item_offset(leaf, orig_slot,
4075 orig_offset + item_size - split_offset);
4076 btrfs_set_item_size(leaf, orig_slot, split_offset);
4077
4078 btrfs_set_header_nritems(leaf, nritems + 1);
4079
4080 /* write the data for the start of the original item */
4081 write_extent_buffer(leaf, buf,
4082 btrfs_item_ptr_offset(leaf, path->slots[0]),
4083 split_offset);
4084
4085 /* write the data for the new item */
4086 write_extent_buffer(leaf, buf + split_offset,
4087 btrfs_item_ptr_offset(leaf, slot),
4088 item_size - split_offset);
4089 btrfs_mark_buffer_dirty(trans, leaf);
4090
4091 BUG_ON(btrfs_leaf_free_space(leaf) < 0);
4092 kfree(buf);
4093 return 0;
4094 }
4095
4096 /*
4097 * This function splits a single item into two items,
4098 * giving 'new_key' to the new item and splitting the
4099 * old one at split_offset (from the start of the item).
4100 *
4101 * The path may be released by this operation. After
4102 * the split, the path is pointing to the old item. The
4103 * new item is going to be in the same node as the old one.
4104 *
4105 * Note, the item being split must be smaller enough to live alone on
4106 * a tree block with room for one extra struct btrfs_item
4107 *
4108 * This allows us to split the item in place, keeping a lock on the
4109 * leaf the entire time.
4110 */
btrfs_split_item(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,const struct btrfs_key * new_key,unsigned long split_offset)4111 int btrfs_split_item(struct btrfs_trans_handle *trans,
4112 struct btrfs_root *root,
4113 struct btrfs_path *path,
4114 const struct btrfs_key *new_key,
4115 unsigned long split_offset)
4116 {
4117 int ret;
4118 ret = setup_leaf_for_split(trans, root, path,
4119 sizeof(struct btrfs_item));
4120 if (ret)
4121 return ret;
4122
4123 ret = split_item(trans, path, new_key, split_offset);
4124 return ret;
4125 }
4126
4127 /*
4128 * make the item pointed to by the path smaller. new_size indicates
4129 * how small to make it, and from_end tells us if we just chop bytes
4130 * off the end of the item or if we shift the item to chop bytes off
4131 * the front.
4132 */
btrfs_truncate_item(struct btrfs_trans_handle * trans,struct btrfs_path * path,u32 new_size,int from_end)4133 void btrfs_truncate_item(struct btrfs_trans_handle *trans,
4134 struct btrfs_path *path, u32 new_size, int from_end)
4135 {
4136 int slot;
4137 struct extent_buffer *leaf;
4138 u32 nritems;
4139 unsigned int data_end;
4140 unsigned int old_data_start;
4141 unsigned int old_size;
4142 unsigned int size_diff;
4143 int i;
4144 struct btrfs_map_token token;
4145
4146 leaf = path->nodes[0];
4147 slot = path->slots[0];
4148
4149 old_size = btrfs_item_size(leaf, slot);
4150 if (old_size == new_size)
4151 return;
4152
4153 nritems = btrfs_header_nritems(leaf);
4154 data_end = leaf_data_end(leaf);
4155
4156 old_data_start = btrfs_item_offset(leaf, slot);
4157
4158 size_diff = old_size - new_size;
4159
4160 BUG_ON(slot < 0);
4161 BUG_ON(slot >= nritems);
4162
4163 /*
4164 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4165 */
4166 /* first correct the data pointers */
4167 btrfs_init_map_token(&token, leaf);
4168 for (i = slot; i < nritems; i++) {
4169 u32 ioff;
4170
4171 ioff = btrfs_token_item_offset(&token, i);
4172 btrfs_set_token_item_offset(&token, i, ioff + size_diff);
4173 }
4174
4175 /* shift the data */
4176 if (from_end) {
4177 memmove_leaf_data(leaf, data_end + size_diff, data_end,
4178 old_data_start + new_size - data_end);
4179 } else {
4180 struct btrfs_disk_key disk_key;
4181 u64 offset;
4182
4183 btrfs_item_key(leaf, &disk_key, slot);
4184
4185 if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) {
4186 unsigned long ptr;
4187 struct btrfs_file_extent_item *fi;
4188
4189 fi = btrfs_item_ptr(leaf, slot,
4190 struct btrfs_file_extent_item);
4191 fi = (struct btrfs_file_extent_item *)(
4192 (unsigned long)fi - size_diff);
4193
4194 if (btrfs_file_extent_type(leaf, fi) ==
4195 BTRFS_FILE_EXTENT_INLINE) {
4196 ptr = btrfs_item_ptr_offset(leaf, slot);
4197 memmove_extent_buffer(leaf, ptr,
4198 (unsigned long)fi,
4199 BTRFS_FILE_EXTENT_INLINE_DATA_START);
4200 }
4201 }
4202
4203 memmove_leaf_data(leaf, data_end + size_diff, data_end,
4204 old_data_start - data_end);
4205
4206 offset = btrfs_disk_key_offset(&disk_key);
4207 btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
4208 btrfs_set_item_key(leaf, &disk_key, slot);
4209 if (slot == 0)
4210 fixup_low_keys(trans, path, &disk_key, 1);
4211 }
4212
4213 btrfs_set_item_size(leaf, slot, new_size);
4214 btrfs_mark_buffer_dirty(trans, leaf);
4215
4216 if (btrfs_leaf_free_space(leaf) < 0) {
4217 btrfs_print_leaf(leaf);
4218 BUG();
4219 }
4220 }
4221
4222 /*
4223 * make the item pointed to by the path bigger, data_size is the added size.
4224 */
btrfs_extend_item(struct btrfs_trans_handle * trans,struct btrfs_path * path,u32 data_size)4225 void btrfs_extend_item(struct btrfs_trans_handle *trans,
4226 struct btrfs_path *path, u32 data_size)
4227 {
4228 int slot;
4229 struct extent_buffer *leaf;
4230 u32 nritems;
4231 unsigned int data_end;
4232 unsigned int old_data;
4233 unsigned int old_size;
4234 int i;
4235 struct btrfs_map_token token;
4236
4237 leaf = path->nodes[0];
4238
4239 nritems = btrfs_header_nritems(leaf);
4240 data_end = leaf_data_end(leaf);
4241
4242 if (btrfs_leaf_free_space(leaf) < data_size) {
4243 btrfs_print_leaf(leaf);
4244 BUG();
4245 }
4246 slot = path->slots[0];
4247 old_data = btrfs_item_data_end(leaf, slot);
4248
4249 BUG_ON(slot < 0);
4250 if (slot >= nritems) {
4251 btrfs_print_leaf(leaf);
4252 btrfs_crit(leaf->fs_info, "slot %d too large, nritems %d",
4253 slot, nritems);
4254 BUG();
4255 }
4256
4257 /*
4258 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4259 */
4260 /* first correct the data pointers */
4261 btrfs_init_map_token(&token, leaf);
4262 for (i = slot; i < nritems; i++) {
4263 u32 ioff;
4264
4265 ioff = btrfs_token_item_offset(&token, i);
4266 btrfs_set_token_item_offset(&token, i, ioff - data_size);
4267 }
4268
4269 /* shift the data */
4270 memmove_leaf_data(leaf, data_end - data_size, data_end,
4271 old_data - data_end);
4272
4273 data_end = old_data;
4274 old_size = btrfs_item_size(leaf, slot);
4275 btrfs_set_item_size(leaf, slot, old_size + data_size);
4276 btrfs_mark_buffer_dirty(trans, leaf);
4277
4278 if (btrfs_leaf_free_space(leaf) < 0) {
4279 btrfs_print_leaf(leaf);
4280 BUG();
4281 }
4282 }
4283
4284 /*
4285 * Make space in the node before inserting one or more items.
4286 *
4287 * @trans: transaction handle
4288 * @root: root we are inserting items to
4289 * @path: points to the leaf/slot where we are going to insert new items
4290 * @batch: information about the batch of items to insert
4291 *
4292 * Main purpose is to save stack depth by doing the bulk of the work in a
4293 * function that doesn't call btrfs_search_slot
4294 */
setup_items_for_insert(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,const struct btrfs_item_batch * batch)4295 static void setup_items_for_insert(struct btrfs_trans_handle *trans,
4296 struct btrfs_root *root, struct btrfs_path *path,
4297 const struct btrfs_item_batch *batch)
4298 {
4299 struct btrfs_fs_info *fs_info = root->fs_info;
4300 int i;
4301 u32 nritems;
4302 unsigned int data_end;
4303 struct btrfs_disk_key disk_key;
4304 struct extent_buffer *leaf;
4305 int slot;
4306 struct btrfs_map_token token;
4307 u32 total_size;
4308
4309 /*
4310 * Before anything else, update keys in the parent and other ancestors
4311 * if needed, then release the write locks on them, so that other tasks
4312 * can use them while we modify the leaf.
4313 */
4314 if (path->slots[0] == 0) {
4315 btrfs_cpu_key_to_disk(&disk_key, &batch->keys[0]);
4316 fixup_low_keys(trans, path, &disk_key, 1);
4317 }
4318 btrfs_unlock_up_safe(path, 1);
4319
4320 leaf = path->nodes[0];
4321 slot = path->slots[0];
4322
4323 nritems = btrfs_header_nritems(leaf);
4324 data_end = leaf_data_end(leaf);
4325 total_size = batch->total_data_size + (batch->nr * sizeof(struct btrfs_item));
4326
4327 if (btrfs_leaf_free_space(leaf) < total_size) {
4328 btrfs_print_leaf(leaf);
4329 btrfs_crit(fs_info, "not enough freespace need %u have %d",
4330 total_size, btrfs_leaf_free_space(leaf));
4331 BUG();
4332 }
4333
4334 btrfs_init_map_token(&token, leaf);
4335 if (slot != nritems) {
4336 unsigned int old_data = btrfs_item_data_end(leaf, slot);
4337
4338 if (old_data < data_end) {
4339 btrfs_print_leaf(leaf);
4340 btrfs_crit(fs_info,
4341 "item at slot %d with data offset %u beyond data end of leaf %u",
4342 slot, old_data, data_end);
4343 BUG();
4344 }
4345 /*
4346 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4347 */
4348 /* first correct the data pointers */
4349 for (i = slot; i < nritems; i++) {
4350 u32 ioff;
4351
4352 ioff = btrfs_token_item_offset(&token, i);
4353 btrfs_set_token_item_offset(&token, i,
4354 ioff - batch->total_data_size);
4355 }
4356 /* shift the items */
4357 memmove_leaf_items(leaf, slot + batch->nr, slot, nritems - slot);
4358
4359 /* shift the data */
4360 memmove_leaf_data(leaf, data_end - batch->total_data_size,
4361 data_end, old_data - data_end);
4362 data_end = old_data;
4363 }
4364
4365 /* setup the item for the new data */
4366 for (i = 0; i < batch->nr; i++) {
4367 btrfs_cpu_key_to_disk(&disk_key, &batch->keys[i]);
4368 btrfs_set_item_key(leaf, &disk_key, slot + i);
4369 data_end -= batch->data_sizes[i];
4370 btrfs_set_token_item_offset(&token, slot + i, data_end);
4371 btrfs_set_token_item_size(&token, slot + i, batch->data_sizes[i]);
4372 }
4373
4374 btrfs_set_header_nritems(leaf, nritems + batch->nr);
4375 btrfs_mark_buffer_dirty(trans, leaf);
4376
4377 if (btrfs_leaf_free_space(leaf) < 0) {
4378 btrfs_print_leaf(leaf);
4379 BUG();
4380 }
4381 }
4382
4383 /*
4384 * Insert a new item into a leaf.
4385 *
4386 * @trans: Transaction handle.
4387 * @root: The root of the btree.
4388 * @path: A path pointing to the target leaf and slot.
4389 * @key: The key of the new item.
4390 * @data_size: The size of the data associated with the new key.
4391 */
btrfs_setup_item_for_insert(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,const struct btrfs_key * key,u32 data_size)4392 void btrfs_setup_item_for_insert(struct btrfs_trans_handle *trans,
4393 struct btrfs_root *root,
4394 struct btrfs_path *path,
4395 const struct btrfs_key *key,
4396 u32 data_size)
4397 {
4398 struct btrfs_item_batch batch;
4399
4400 batch.keys = key;
4401 batch.data_sizes = &data_size;
4402 batch.total_data_size = data_size;
4403 batch.nr = 1;
4404
4405 setup_items_for_insert(trans, root, path, &batch);
4406 }
4407
4408 /*
4409 * Given a key and some data, insert items into the tree.
4410 * This does all the path init required, making room in the tree if needed.
4411 */
btrfs_insert_empty_items(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,const struct btrfs_item_batch * batch)4412 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
4413 struct btrfs_root *root,
4414 struct btrfs_path *path,
4415 const struct btrfs_item_batch *batch)
4416 {
4417 int ret = 0;
4418 int slot;
4419 u32 total_size;
4420
4421 total_size = batch->total_data_size + (batch->nr * sizeof(struct btrfs_item));
4422 ret = btrfs_search_slot(trans, root, &batch->keys[0], path, total_size, 1);
4423 if (ret == 0)
4424 return -EEXIST;
4425 if (ret < 0)
4426 return ret;
4427
4428 slot = path->slots[0];
4429 BUG_ON(slot < 0);
4430
4431 setup_items_for_insert(trans, root, path, batch);
4432 return 0;
4433 }
4434
4435 /*
4436 * Given a key and some data, insert an item into the tree.
4437 * This does all the path init required, making room in the tree if needed.
4438 */
btrfs_insert_item(struct btrfs_trans_handle * trans,struct btrfs_root * root,const struct btrfs_key * cpu_key,void * data,u32 data_size)4439 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4440 const struct btrfs_key *cpu_key, void *data,
4441 u32 data_size)
4442 {
4443 int ret = 0;
4444 struct btrfs_path *path;
4445 struct extent_buffer *leaf;
4446 unsigned long ptr;
4447
4448 path = btrfs_alloc_path();
4449 if (!path)
4450 return -ENOMEM;
4451 ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
4452 if (!ret) {
4453 leaf = path->nodes[0];
4454 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
4455 write_extent_buffer(leaf, data, ptr, data_size);
4456 btrfs_mark_buffer_dirty(trans, leaf);
4457 }
4458 btrfs_free_path(path);
4459 return ret;
4460 }
4461
4462 /*
4463 * This function duplicates an item, giving 'new_key' to the new item.
4464 * It guarantees both items live in the same tree leaf and the new item is
4465 * contiguous with the original item.
4466 *
4467 * This allows us to split a file extent in place, keeping a lock on the leaf
4468 * the entire time.
4469 */
btrfs_duplicate_item(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,const struct btrfs_key * new_key)4470 int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
4471 struct btrfs_root *root,
4472 struct btrfs_path *path,
4473 const struct btrfs_key *new_key)
4474 {
4475 struct extent_buffer *leaf;
4476 int ret;
4477 u32 item_size;
4478
4479 leaf = path->nodes[0];
4480 item_size = btrfs_item_size(leaf, path->slots[0]);
4481 ret = setup_leaf_for_split(trans, root, path,
4482 item_size + sizeof(struct btrfs_item));
4483 if (ret)
4484 return ret;
4485
4486 path->slots[0]++;
4487 btrfs_setup_item_for_insert(trans, root, path, new_key, item_size);
4488 leaf = path->nodes[0];
4489 memcpy_extent_buffer(leaf,
4490 btrfs_item_ptr_offset(leaf, path->slots[0]),
4491 btrfs_item_ptr_offset(leaf, path->slots[0] - 1),
4492 item_size);
4493 return 0;
4494 }
4495
4496 /*
4497 * delete the pointer from a given node.
4498 *
4499 * the tree should have been previously balanced so the deletion does not
4500 * empty a node.
4501 *
4502 * This is exported for use inside btrfs-progs, don't un-export it.
4503 */
btrfs_del_ptr(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,int level,int slot)4504 int btrfs_del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4505 struct btrfs_path *path, int level, int slot)
4506 {
4507 struct extent_buffer *parent = path->nodes[level];
4508 u32 nritems;
4509 int ret;
4510
4511 nritems = btrfs_header_nritems(parent);
4512 if (slot != nritems - 1) {
4513 if (level) {
4514 ret = btrfs_tree_mod_log_insert_move(parent, slot,
4515 slot + 1, nritems - slot - 1);
4516 if (ret < 0) {
4517 btrfs_abort_transaction(trans, ret);
4518 return ret;
4519 }
4520 }
4521 memmove_extent_buffer(parent,
4522 btrfs_node_key_ptr_offset(parent, slot),
4523 btrfs_node_key_ptr_offset(parent, slot + 1),
4524 sizeof(struct btrfs_key_ptr) *
4525 (nritems - slot - 1));
4526 } else if (level) {
4527 ret = btrfs_tree_mod_log_insert_key(parent, slot,
4528 BTRFS_MOD_LOG_KEY_REMOVE);
4529 if (ret < 0) {
4530 btrfs_abort_transaction(trans, ret);
4531 return ret;
4532 }
4533 }
4534
4535 nritems--;
4536 btrfs_set_header_nritems(parent, nritems);
4537 if (nritems == 0 && parent == root->node) {
4538 BUG_ON(btrfs_header_level(root->node) != 1);
4539 /* just turn the root into a leaf and break */
4540 btrfs_set_header_level(root->node, 0);
4541 } else if (slot == 0) {
4542 struct btrfs_disk_key disk_key;
4543
4544 btrfs_node_key(parent, &disk_key, 0);
4545 fixup_low_keys(trans, path, &disk_key, level + 1);
4546 }
4547 btrfs_mark_buffer_dirty(trans, parent);
4548 return 0;
4549 }
4550
4551 /*
4552 * a helper function to delete the leaf pointed to by path->slots[1] and
4553 * path->nodes[1].
4554 *
4555 * This deletes the pointer in path->nodes[1] and frees the leaf
4556 * block extent. zero is returned if it all worked out, < 0 otherwise.
4557 *
4558 * The path must have already been setup for deleting the leaf, including
4559 * all the proper balancing. path->nodes[1] must be locked.
4560 */
btrfs_del_leaf(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct extent_buffer * leaf)4561 static noinline int btrfs_del_leaf(struct btrfs_trans_handle *trans,
4562 struct btrfs_root *root,
4563 struct btrfs_path *path,
4564 struct extent_buffer *leaf)
4565 {
4566 int ret;
4567
4568 WARN_ON(btrfs_header_generation(leaf) != trans->transid);
4569 ret = btrfs_del_ptr(trans, root, path, 1, path->slots[1]);
4570 if (ret < 0)
4571 return ret;
4572
4573 /*
4574 * btrfs_free_extent is expensive, we want to make sure we
4575 * aren't holding any locks when we call it
4576 */
4577 btrfs_unlock_up_safe(path, 0);
4578
4579 root_sub_used(root, leaf->len);
4580
4581 atomic_inc(&leaf->refs);
4582 btrfs_free_tree_block(trans, btrfs_root_id(root), leaf, 0, 1);
4583 free_extent_buffer_stale(leaf);
4584 return 0;
4585 }
4586 /*
4587 * delete the item at the leaf level in path. If that empties
4588 * the leaf, remove it from the tree
4589 */
btrfs_del_items(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,int slot,int nr)4590 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4591 struct btrfs_path *path, int slot, int nr)
4592 {
4593 struct btrfs_fs_info *fs_info = root->fs_info;
4594 struct extent_buffer *leaf;
4595 int ret = 0;
4596 int wret;
4597 u32 nritems;
4598
4599 leaf = path->nodes[0];
4600 nritems = btrfs_header_nritems(leaf);
4601
4602 if (slot + nr != nritems) {
4603 const u32 last_off = btrfs_item_offset(leaf, slot + nr - 1);
4604 const int data_end = leaf_data_end(leaf);
4605 struct btrfs_map_token token;
4606 u32 dsize = 0;
4607 int i;
4608
4609 for (i = 0; i < nr; i++)
4610 dsize += btrfs_item_size(leaf, slot + i);
4611
4612 memmove_leaf_data(leaf, data_end + dsize, data_end,
4613 last_off - data_end);
4614
4615 btrfs_init_map_token(&token, leaf);
4616 for (i = slot + nr; i < nritems; i++) {
4617 u32 ioff;
4618
4619 ioff = btrfs_token_item_offset(&token, i);
4620 btrfs_set_token_item_offset(&token, i, ioff + dsize);
4621 }
4622
4623 memmove_leaf_items(leaf, slot, slot + nr, nritems - slot - nr);
4624 }
4625 btrfs_set_header_nritems(leaf, nritems - nr);
4626 nritems -= nr;
4627
4628 /* delete the leaf if we've emptied it */
4629 if (nritems == 0) {
4630 if (leaf == root->node) {
4631 btrfs_set_header_level(leaf, 0);
4632 } else {
4633 btrfs_clear_buffer_dirty(trans, leaf);
4634 ret = btrfs_del_leaf(trans, root, path, leaf);
4635 if (ret < 0)
4636 return ret;
4637 }
4638 } else {
4639 int used = leaf_space_used(leaf, 0, nritems);
4640 if (slot == 0) {
4641 struct btrfs_disk_key disk_key;
4642
4643 btrfs_item_key(leaf, &disk_key, 0);
4644 fixup_low_keys(trans, path, &disk_key, 1);
4645 }
4646
4647 /*
4648 * Try to delete the leaf if it is mostly empty. We do this by
4649 * trying to move all its items into its left and right neighbours.
4650 * If we can't move all the items, then we don't delete it - it's
4651 * not ideal, but future insertions might fill the leaf with more
4652 * items, or items from other leaves might be moved later into our
4653 * leaf due to deletions on those leaves.
4654 */
4655 if (used < BTRFS_LEAF_DATA_SIZE(fs_info) / 3) {
4656 u32 min_push_space;
4657
4658 /* push_leaf_left fixes the path.
4659 * make sure the path still points to our leaf
4660 * for possible call to btrfs_del_ptr below
4661 */
4662 slot = path->slots[1];
4663 atomic_inc(&leaf->refs);
4664 /*
4665 * We want to be able to at least push one item to the
4666 * left neighbour leaf, and that's the first item.
4667 */
4668 min_push_space = sizeof(struct btrfs_item) +
4669 btrfs_item_size(leaf, 0);
4670 wret = push_leaf_left(trans, root, path, 0,
4671 min_push_space, 1, (u32)-1);
4672 if (wret < 0 && wret != -ENOSPC)
4673 ret = wret;
4674
4675 if (path->nodes[0] == leaf &&
4676 btrfs_header_nritems(leaf)) {
4677 /*
4678 * If we were not able to push all items from our
4679 * leaf to its left neighbour, then attempt to
4680 * either push all the remaining items to the
4681 * right neighbour or none. There's no advantage
4682 * in pushing only some items, instead of all, as
4683 * it's pointless to end up with a leaf having
4684 * too few items while the neighbours can be full
4685 * or nearly full.
4686 */
4687 nritems = btrfs_header_nritems(leaf);
4688 min_push_space = leaf_space_used(leaf, 0, nritems);
4689 wret = push_leaf_right(trans, root, path, 0,
4690 min_push_space, 1, 0);
4691 if (wret < 0 && wret != -ENOSPC)
4692 ret = wret;
4693 }
4694
4695 if (btrfs_header_nritems(leaf) == 0) {
4696 path->slots[1] = slot;
4697 ret = btrfs_del_leaf(trans, root, path, leaf);
4698 if (ret < 0)
4699 return ret;
4700 free_extent_buffer(leaf);
4701 ret = 0;
4702 } else {
4703 /* if we're still in the path, make sure
4704 * we're dirty. Otherwise, one of the
4705 * push_leaf functions must have already
4706 * dirtied this buffer
4707 */
4708 if (path->nodes[0] == leaf)
4709 btrfs_mark_buffer_dirty(trans, leaf);
4710 free_extent_buffer(leaf);
4711 }
4712 } else {
4713 btrfs_mark_buffer_dirty(trans, leaf);
4714 }
4715 }
4716 return ret;
4717 }
4718
4719 /*
4720 * A helper function to walk down the tree starting at min_key, and looking
4721 * for nodes or leaves that are have a minimum transaction id.
4722 * This is used by the btree defrag code, and tree logging
4723 *
4724 * This does not cow, but it does stuff the starting key it finds back
4725 * into min_key, so you can call btrfs_search_slot with cow=1 on the
4726 * key and get a writable path.
4727 *
4728 * This honors path->lowest_level to prevent descent past a given level
4729 * of the tree.
4730 *
4731 * min_trans indicates the oldest transaction that you are interested
4732 * in walking through. Any nodes or leaves older than min_trans are
4733 * skipped over (without reading them).
4734 *
4735 * returns zero if something useful was found, < 0 on error and 1 if there
4736 * was nothing in the tree that matched the search criteria.
4737 */
btrfs_search_forward(struct btrfs_root * root,struct btrfs_key * min_key,struct btrfs_path * path,u64 min_trans)4738 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
4739 struct btrfs_path *path,
4740 u64 min_trans)
4741 {
4742 struct extent_buffer *cur;
4743 struct btrfs_key found_key;
4744 int slot;
4745 int sret;
4746 u32 nritems;
4747 int level;
4748 int ret = 1;
4749 int keep_locks = path->keep_locks;
4750
4751 ASSERT(!path->nowait);
4752 path->keep_locks = 1;
4753 again:
4754 cur = btrfs_read_lock_root_node(root);
4755 level = btrfs_header_level(cur);
4756 WARN_ON(path->nodes[level]);
4757 path->nodes[level] = cur;
4758 path->locks[level] = BTRFS_READ_LOCK;
4759
4760 if (btrfs_header_generation(cur) < min_trans) {
4761 ret = 1;
4762 goto out;
4763 }
4764 while (1) {
4765 nritems = btrfs_header_nritems(cur);
4766 level = btrfs_header_level(cur);
4767 sret = btrfs_bin_search(cur, 0, min_key, &slot);
4768 if (sret < 0) {
4769 ret = sret;
4770 goto out;
4771 }
4772
4773 /* at the lowest level, we're done, setup the path and exit */
4774 if (level == path->lowest_level) {
4775 if (slot >= nritems)
4776 goto find_next_key;
4777 ret = 0;
4778 path->slots[level] = slot;
4779 btrfs_item_key_to_cpu(cur, &found_key, slot);
4780 goto out;
4781 }
4782 if (sret && slot > 0)
4783 slot--;
4784 /*
4785 * check this node pointer against the min_trans parameters.
4786 * If it is too old, skip to the next one.
4787 */
4788 while (slot < nritems) {
4789 u64 gen;
4790
4791 gen = btrfs_node_ptr_generation(cur, slot);
4792 if (gen < min_trans) {
4793 slot++;
4794 continue;
4795 }
4796 break;
4797 }
4798 find_next_key:
4799 /*
4800 * we didn't find a candidate key in this node, walk forward
4801 * and find another one
4802 */
4803 if (slot >= nritems) {
4804 path->slots[level] = slot;
4805 sret = btrfs_find_next_key(root, path, min_key, level,
4806 min_trans);
4807 if (sret == 0) {
4808 btrfs_release_path(path);
4809 goto again;
4810 } else {
4811 goto out;
4812 }
4813 }
4814 /* save our key for returning back */
4815 btrfs_node_key_to_cpu(cur, &found_key, slot);
4816 path->slots[level] = slot;
4817 if (level == path->lowest_level) {
4818 ret = 0;
4819 goto out;
4820 }
4821 cur = btrfs_read_node_slot(cur, slot);
4822 if (IS_ERR(cur)) {
4823 ret = PTR_ERR(cur);
4824 goto out;
4825 }
4826
4827 btrfs_tree_read_lock(cur);
4828
4829 path->locks[level - 1] = BTRFS_READ_LOCK;
4830 path->nodes[level - 1] = cur;
4831 unlock_up(path, level, 1, 0, NULL);
4832 }
4833 out:
4834 path->keep_locks = keep_locks;
4835 if (ret == 0) {
4836 btrfs_unlock_up_safe(path, path->lowest_level + 1);
4837 memcpy(min_key, &found_key, sizeof(found_key));
4838 }
4839 return ret;
4840 }
4841
4842 /*
4843 * this is similar to btrfs_next_leaf, but does not try to preserve
4844 * and fixup the path. It looks for and returns the next key in the
4845 * tree based on the current path and the min_trans parameters.
4846 *
4847 * 0 is returned if another key is found, < 0 if there are any errors
4848 * and 1 is returned if there are no higher keys in the tree
4849 *
4850 * path->keep_locks should be set to 1 on the search made before
4851 * calling this function.
4852 */
btrfs_find_next_key(struct btrfs_root * root,struct btrfs_path * path,struct btrfs_key * key,int level,u64 min_trans)4853 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
4854 struct btrfs_key *key, int level, u64 min_trans)
4855 {
4856 int slot;
4857 struct extent_buffer *c;
4858
4859 WARN_ON(!path->keep_locks && !path->skip_locking);
4860 while (level < BTRFS_MAX_LEVEL) {
4861 if (!path->nodes[level])
4862 return 1;
4863
4864 slot = path->slots[level] + 1;
4865 c = path->nodes[level];
4866 next:
4867 if (slot >= btrfs_header_nritems(c)) {
4868 int ret;
4869 int orig_lowest;
4870 struct btrfs_key cur_key;
4871 if (level + 1 >= BTRFS_MAX_LEVEL ||
4872 !path->nodes[level + 1])
4873 return 1;
4874
4875 if (path->locks[level + 1] || path->skip_locking) {
4876 level++;
4877 continue;
4878 }
4879
4880 slot = btrfs_header_nritems(c) - 1;
4881 if (level == 0)
4882 btrfs_item_key_to_cpu(c, &cur_key, slot);
4883 else
4884 btrfs_node_key_to_cpu(c, &cur_key, slot);
4885
4886 orig_lowest = path->lowest_level;
4887 btrfs_release_path(path);
4888 path->lowest_level = level;
4889 ret = btrfs_search_slot(NULL, root, &cur_key, path,
4890 0, 0);
4891 path->lowest_level = orig_lowest;
4892 if (ret < 0)
4893 return ret;
4894
4895 c = path->nodes[level];
4896 slot = path->slots[level];
4897 if (ret == 0)
4898 slot++;
4899 goto next;
4900 }
4901
4902 if (level == 0)
4903 btrfs_item_key_to_cpu(c, key, slot);
4904 else {
4905 u64 gen = btrfs_node_ptr_generation(c, slot);
4906
4907 if (gen < min_trans) {
4908 slot++;
4909 goto next;
4910 }
4911 btrfs_node_key_to_cpu(c, key, slot);
4912 }
4913 return 0;
4914 }
4915 return 1;
4916 }
4917
btrfs_next_old_leaf(struct btrfs_root * root,struct btrfs_path * path,u64 time_seq)4918 int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
4919 u64 time_seq)
4920 {
4921 int slot;
4922 int level;
4923 struct extent_buffer *c;
4924 struct extent_buffer *next;
4925 struct btrfs_fs_info *fs_info = root->fs_info;
4926 struct btrfs_key key;
4927 bool need_commit_sem = false;
4928 u32 nritems;
4929 int ret;
4930 int i;
4931
4932 /*
4933 * The nowait semantics are used only for write paths, where we don't
4934 * use the tree mod log and sequence numbers.
4935 */
4936 if (time_seq)
4937 ASSERT(!path->nowait);
4938
4939 nritems = btrfs_header_nritems(path->nodes[0]);
4940 if (nritems == 0)
4941 return 1;
4942
4943 btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
4944 again:
4945 level = 1;
4946 next = NULL;
4947 btrfs_release_path(path);
4948
4949 path->keep_locks = 1;
4950
4951 if (time_seq) {
4952 ret = btrfs_search_old_slot(root, &key, path, time_seq);
4953 } else {
4954 if (path->need_commit_sem) {
4955 path->need_commit_sem = 0;
4956 need_commit_sem = true;
4957 if (path->nowait) {
4958 if (!down_read_trylock(&fs_info->commit_root_sem)) {
4959 ret = -EAGAIN;
4960 goto done;
4961 }
4962 } else {
4963 down_read(&fs_info->commit_root_sem);
4964 }
4965 }
4966 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4967 }
4968 path->keep_locks = 0;
4969
4970 if (ret < 0)
4971 goto done;
4972
4973 nritems = btrfs_header_nritems(path->nodes[0]);
4974 /*
4975 * by releasing the path above we dropped all our locks. A balance
4976 * could have added more items next to the key that used to be
4977 * at the very end of the block. So, check again here and
4978 * advance the path if there are now more items available.
4979 */
4980 if (nritems > 0 && path->slots[0] < nritems - 1) {
4981 if (ret == 0)
4982 path->slots[0]++;
4983 ret = 0;
4984 goto done;
4985 }
4986 /*
4987 * So the above check misses one case:
4988 * - after releasing the path above, someone has removed the item that
4989 * used to be at the very end of the block, and balance between leafs
4990 * gets another one with bigger key.offset to replace it.
4991 *
4992 * This one should be returned as well, or we can get leaf corruption
4993 * later(esp. in __btrfs_drop_extents()).
4994 *
4995 * And a bit more explanation about this check,
4996 * with ret > 0, the key isn't found, the path points to the slot
4997 * where it should be inserted, so the path->slots[0] item must be the
4998 * bigger one.
4999 */
5000 if (nritems > 0 && ret > 0 && path->slots[0] == nritems - 1) {
5001 ret = 0;
5002 goto done;
5003 }
5004
5005 while (level < BTRFS_MAX_LEVEL) {
5006 if (!path->nodes[level]) {
5007 ret = 1;
5008 goto done;
5009 }
5010
5011 slot = path->slots[level] + 1;
5012 c = path->nodes[level];
5013 if (slot >= btrfs_header_nritems(c)) {
5014 level++;
5015 if (level == BTRFS_MAX_LEVEL) {
5016 ret = 1;
5017 goto done;
5018 }
5019 continue;
5020 }
5021
5022
5023 /*
5024 * Our current level is where we're going to start from, and to
5025 * make sure lockdep doesn't complain we need to drop our locks
5026 * and nodes from 0 to our current level.
5027 */
5028 for (i = 0; i < level; i++) {
5029 if (path->locks[level]) {
5030 btrfs_tree_read_unlock(path->nodes[i]);
5031 path->locks[i] = 0;
5032 }
5033 free_extent_buffer(path->nodes[i]);
5034 path->nodes[i] = NULL;
5035 }
5036
5037 next = c;
5038 ret = read_block_for_search(root, path, &next, level,
5039 slot, &key);
5040 if (ret == -EAGAIN && !path->nowait)
5041 goto again;
5042
5043 if (ret < 0) {
5044 btrfs_release_path(path);
5045 goto done;
5046 }
5047
5048 if (!path->skip_locking) {
5049 ret = btrfs_try_tree_read_lock(next);
5050 if (!ret && path->nowait) {
5051 ret = -EAGAIN;
5052 goto done;
5053 }
5054 if (!ret && time_seq) {
5055 /*
5056 * If we don't get the lock, we may be racing
5057 * with push_leaf_left, holding that lock while
5058 * itself waiting for the leaf we've currently
5059 * locked. To solve this situation, we give up
5060 * on our lock and cycle.
5061 */
5062 free_extent_buffer(next);
5063 btrfs_release_path(path);
5064 cond_resched();
5065 goto again;
5066 }
5067 if (!ret)
5068 btrfs_tree_read_lock(next);
5069 }
5070 break;
5071 }
5072 path->slots[level] = slot;
5073 while (1) {
5074 level--;
5075 path->nodes[level] = next;
5076 path->slots[level] = 0;
5077 if (!path->skip_locking)
5078 path->locks[level] = BTRFS_READ_LOCK;
5079 if (!level)
5080 break;
5081
5082 ret = read_block_for_search(root, path, &next, level,
5083 0, &key);
5084 if (ret == -EAGAIN && !path->nowait)
5085 goto again;
5086
5087 if (ret < 0) {
5088 btrfs_release_path(path);
5089 goto done;
5090 }
5091
5092 if (!path->skip_locking) {
5093 if (path->nowait) {
5094 if (!btrfs_try_tree_read_lock(next)) {
5095 ret = -EAGAIN;
5096 goto done;
5097 }
5098 } else {
5099 btrfs_tree_read_lock(next);
5100 }
5101 }
5102 }
5103 ret = 0;
5104 done:
5105 unlock_up(path, 0, 1, 0, NULL);
5106 if (need_commit_sem) {
5107 int ret2;
5108
5109 path->need_commit_sem = 1;
5110 ret2 = finish_need_commit_sem_search(path);
5111 up_read(&fs_info->commit_root_sem);
5112 if (ret2)
5113 ret = ret2;
5114 }
5115
5116 return ret;
5117 }
5118
btrfs_next_old_item(struct btrfs_root * root,struct btrfs_path * path,u64 time_seq)5119 int btrfs_next_old_item(struct btrfs_root *root, struct btrfs_path *path, u64 time_seq)
5120 {
5121 path->slots[0]++;
5122 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0]))
5123 return btrfs_next_old_leaf(root, path, time_seq);
5124 return 0;
5125 }
5126
5127 /*
5128 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
5129 * searching until it gets past min_objectid or finds an item of 'type'
5130 *
5131 * returns 0 if something is found, 1 if nothing was found and < 0 on error
5132 */
btrfs_previous_item(struct btrfs_root * root,struct btrfs_path * path,u64 min_objectid,int type)5133 int btrfs_previous_item(struct btrfs_root *root,
5134 struct btrfs_path *path, u64 min_objectid,
5135 int type)
5136 {
5137 struct btrfs_key found_key;
5138 struct extent_buffer *leaf;
5139 u32 nritems;
5140 int ret;
5141
5142 while (1) {
5143 if (path->slots[0] == 0) {
5144 ret = btrfs_prev_leaf(root, path);
5145 if (ret != 0)
5146 return ret;
5147 } else {
5148 path->slots[0]--;
5149 }
5150 leaf = path->nodes[0];
5151 nritems = btrfs_header_nritems(leaf);
5152 if (nritems == 0)
5153 return 1;
5154 if (path->slots[0] == nritems)
5155 path->slots[0]--;
5156
5157 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5158 if (found_key.objectid < min_objectid)
5159 break;
5160 if (found_key.type == type)
5161 return 0;
5162 if (found_key.objectid == min_objectid &&
5163 found_key.type < type)
5164 break;
5165 }
5166 return 1;
5167 }
5168
5169 /*
5170 * search in extent tree to find a previous Metadata/Data extent item with
5171 * min objecitd.
5172 *
5173 * returns 0 if something is found, 1 if nothing was found and < 0 on error
5174 */
btrfs_previous_extent_item(struct btrfs_root * root,struct btrfs_path * path,u64 min_objectid)5175 int btrfs_previous_extent_item(struct btrfs_root *root,
5176 struct btrfs_path *path, u64 min_objectid)
5177 {
5178 struct btrfs_key found_key;
5179 struct extent_buffer *leaf;
5180 u32 nritems;
5181 int ret;
5182
5183 while (1) {
5184 if (path->slots[0] == 0) {
5185 ret = btrfs_prev_leaf(root, path);
5186 if (ret != 0)
5187 return ret;
5188 } else {
5189 path->slots[0]--;
5190 }
5191 leaf = path->nodes[0];
5192 nritems = btrfs_header_nritems(leaf);
5193 if (nritems == 0)
5194 return 1;
5195 if (path->slots[0] == nritems)
5196 path->slots[0]--;
5197
5198 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5199 if (found_key.objectid < min_objectid)
5200 break;
5201 if (found_key.type == BTRFS_EXTENT_ITEM_KEY ||
5202 found_key.type == BTRFS_METADATA_ITEM_KEY)
5203 return 0;
5204 if (found_key.objectid == min_objectid &&
5205 found_key.type < BTRFS_EXTENT_ITEM_KEY)
5206 break;
5207 }
5208 return 1;
5209 }
5210
btrfs_ctree_init(void)5211 int __init btrfs_ctree_init(void)
5212 {
5213 btrfs_path_cachep = kmem_cache_create("btrfs_path",
5214 sizeof(struct btrfs_path), 0,
5215 SLAB_MEM_SPREAD, NULL);
5216 if (!btrfs_path_cachep)
5217 return -ENOMEM;
5218 return 0;
5219 }
5220
btrfs_ctree_exit(void)5221 void __cold btrfs_ctree_exit(void)
5222 {
5223 kmem_cache_destroy(btrfs_path_cachep);
5224 }
5225