1 /*
2 * Copyright (C) 2009 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/sort.h>
22 #include "ctree.h"
23 #include "delayed-ref.h"
24 #include "transaction.h"
25
26 /*
27 * delayed back reference update tracking. For subvolume trees
28 * we queue up extent allocations and backref maintenance for
29 * delayed processing. This avoids deep call chains where we
30 * add extents in the middle of btrfs_search_slot, and it allows
31 * us to buffer up frequently modified backrefs in an rb tree instead
32 * of hammering updates on the extent allocation tree.
33 */
34
35 /*
36 * compare two delayed tree backrefs with same bytenr and type
37 */
comp_tree_refs(struct btrfs_delayed_tree_ref * ref2,struct btrfs_delayed_tree_ref * ref1)38 static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref2,
39 struct btrfs_delayed_tree_ref *ref1)
40 {
41 if (ref1->node.type == BTRFS_TREE_BLOCK_REF_KEY) {
42 if (ref1->root < ref2->root)
43 return -1;
44 if (ref1->root > ref2->root)
45 return 1;
46 } else {
47 if (ref1->parent < ref2->parent)
48 return -1;
49 if (ref1->parent > ref2->parent)
50 return 1;
51 }
52 return 0;
53 }
54
55 /*
56 * compare two delayed data backrefs with same bytenr and type
57 */
comp_data_refs(struct btrfs_delayed_data_ref * ref2,struct btrfs_delayed_data_ref * ref1)58 static int comp_data_refs(struct btrfs_delayed_data_ref *ref2,
59 struct btrfs_delayed_data_ref *ref1)
60 {
61 if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) {
62 if (ref1->root < ref2->root)
63 return -1;
64 if (ref1->root > ref2->root)
65 return 1;
66 if (ref1->objectid < ref2->objectid)
67 return -1;
68 if (ref1->objectid > ref2->objectid)
69 return 1;
70 if (ref1->offset < ref2->offset)
71 return -1;
72 if (ref1->offset > ref2->offset)
73 return 1;
74 } else {
75 if (ref1->parent < ref2->parent)
76 return -1;
77 if (ref1->parent > ref2->parent)
78 return 1;
79 }
80 return 0;
81 }
82
83 /*
84 * entries in the rb tree are ordered by the byte number of the extent,
85 * type of the delayed backrefs and content of delayed backrefs.
86 */
comp_entry(struct btrfs_delayed_ref_node * ref2,struct btrfs_delayed_ref_node * ref1)87 static int comp_entry(struct btrfs_delayed_ref_node *ref2,
88 struct btrfs_delayed_ref_node *ref1)
89 {
90 if (ref1->bytenr < ref2->bytenr)
91 return -1;
92 if (ref1->bytenr > ref2->bytenr)
93 return 1;
94 if (ref1->is_head && ref2->is_head)
95 return 0;
96 if (ref2->is_head)
97 return -1;
98 if (ref1->is_head)
99 return 1;
100 if (ref1->type < ref2->type)
101 return -1;
102 if (ref1->type > ref2->type)
103 return 1;
104 if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY ||
105 ref1->type == BTRFS_SHARED_BLOCK_REF_KEY) {
106 return comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref2),
107 btrfs_delayed_node_to_tree_ref(ref1));
108 } else if (ref1->type == BTRFS_EXTENT_DATA_REF_KEY ||
109 ref1->type == BTRFS_SHARED_DATA_REF_KEY) {
110 return comp_data_refs(btrfs_delayed_node_to_data_ref(ref2),
111 btrfs_delayed_node_to_data_ref(ref1));
112 }
113 BUG();
114 return 0;
115 }
116
117 /*
118 * insert a new ref into the rbtree. This returns any existing refs
119 * for the same (bytenr,parent) tuple, or NULL if the new node was properly
120 * inserted.
121 */
tree_insert(struct rb_root * root,struct rb_node * node)122 static struct btrfs_delayed_ref_node *tree_insert(struct rb_root *root,
123 struct rb_node *node)
124 {
125 struct rb_node **p = &root->rb_node;
126 struct rb_node *parent_node = NULL;
127 struct btrfs_delayed_ref_node *entry;
128 struct btrfs_delayed_ref_node *ins;
129 int cmp;
130
131 ins = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
132 while (*p) {
133 parent_node = *p;
134 entry = rb_entry(parent_node, struct btrfs_delayed_ref_node,
135 rb_node);
136
137 cmp = comp_entry(entry, ins);
138 if (cmp < 0)
139 p = &(*p)->rb_left;
140 else if (cmp > 0)
141 p = &(*p)->rb_right;
142 else
143 return entry;
144 }
145
146 rb_link_node(node, parent_node, p);
147 rb_insert_color(node, root);
148 return NULL;
149 }
150
151 /*
152 * find an head entry based on bytenr. This returns the delayed ref
153 * head if it was able to find one, or NULL if nothing was in that spot
154 */
find_ref_head(struct rb_root * root,u64 bytenr,struct btrfs_delayed_ref_node ** last)155 static struct btrfs_delayed_ref_node *find_ref_head(struct rb_root *root,
156 u64 bytenr,
157 struct btrfs_delayed_ref_node **last)
158 {
159 struct rb_node *n = root->rb_node;
160 struct btrfs_delayed_ref_node *entry;
161 int cmp;
162
163 while (n) {
164 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
165 WARN_ON(!entry->in_tree);
166 if (last)
167 *last = entry;
168
169 if (bytenr < entry->bytenr)
170 cmp = -1;
171 else if (bytenr > entry->bytenr)
172 cmp = 1;
173 else if (!btrfs_delayed_ref_is_head(entry))
174 cmp = 1;
175 else
176 cmp = 0;
177
178 if (cmp < 0)
179 n = n->rb_left;
180 else if (cmp > 0)
181 n = n->rb_right;
182 else
183 return entry;
184 }
185 return NULL;
186 }
187
btrfs_delayed_ref_lock(struct btrfs_trans_handle * trans,struct btrfs_delayed_ref_head * head)188 int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
189 struct btrfs_delayed_ref_head *head)
190 {
191 struct btrfs_delayed_ref_root *delayed_refs;
192
193 delayed_refs = &trans->transaction->delayed_refs;
194 assert_spin_locked(&delayed_refs->lock);
195 if (mutex_trylock(&head->mutex))
196 return 0;
197
198 atomic_inc(&head->node.refs);
199 spin_unlock(&delayed_refs->lock);
200
201 mutex_lock(&head->mutex);
202 spin_lock(&delayed_refs->lock);
203 if (!head->node.in_tree) {
204 mutex_unlock(&head->mutex);
205 btrfs_put_delayed_ref(&head->node);
206 return -EAGAIN;
207 }
208 btrfs_put_delayed_ref(&head->node);
209 return 0;
210 }
211
btrfs_find_ref_cluster(struct btrfs_trans_handle * trans,struct list_head * cluster,u64 start)212 int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans,
213 struct list_head *cluster, u64 start)
214 {
215 int count = 0;
216 struct btrfs_delayed_ref_root *delayed_refs;
217 struct rb_node *node;
218 struct btrfs_delayed_ref_node *ref;
219 struct btrfs_delayed_ref_head *head;
220
221 delayed_refs = &trans->transaction->delayed_refs;
222 if (start == 0) {
223 node = rb_first(&delayed_refs->root);
224 } else {
225 ref = NULL;
226 find_ref_head(&delayed_refs->root, start, &ref);
227 if (ref) {
228 struct btrfs_delayed_ref_node *tmp;
229
230 node = rb_prev(&ref->rb_node);
231 while (node) {
232 tmp = rb_entry(node,
233 struct btrfs_delayed_ref_node,
234 rb_node);
235 if (tmp->bytenr < start)
236 break;
237 ref = tmp;
238 node = rb_prev(&ref->rb_node);
239 }
240 node = &ref->rb_node;
241 } else
242 node = rb_first(&delayed_refs->root);
243 }
244 again:
245 while (node && count < 32) {
246 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
247 if (btrfs_delayed_ref_is_head(ref)) {
248 head = btrfs_delayed_node_to_head(ref);
249 if (list_empty(&head->cluster)) {
250 list_add_tail(&head->cluster, cluster);
251 delayed_refs->run_delayed_start =
252 head->node.bytenr;
253 count++;
254
255 WARN_ON(delayed_refs->num_heads_ready == 0);
256 delayed_refs->num_heads_ready--;
257 } else if (count) {
258 /* the goal of the clustering is to find extents
259 * that are likely to end up in the same extent
260 * leaf on disk. So, we don't want them spread
261 * all over the tree. Stop now if we've hit
262 * a head that was already in use
263 */
264 break;
265 }
266 }
267 node = rb_next(node);
268 }
269 if (count) {
270 return 0;
271 } else if (start) {
272 /*
273 * we've gone to the end of the rbtree without finding any
274 * clusters. start from the beginning and try again
275 */
276 start = 0;
277 node = rb_first(&delayed_refs->root);
278 goto again;
279 }
280 return 1;
281 }
282
283 /*
284 * This checks to see if there are any delayed refs in the
285 * btree for a given bytenr. It returns one if it finds any
286 * and zero otherwise.
287 *
288 * If it only finds a head node, it returns 0.
289 *
290 * The idea is to use this when deciding if you can safely delete an
291 * extent from the extent allocation tree. There may be a pending
292 * ref in the rbtree that adds or removes references, so as long as this
293 * returns one you need to leave the BTRFS_EXTENT_ITEM in the extent
294 * allocation tree.
295 */
btrfs_delayed_ref_pending(struct btrfs_trans_handle * trans,u64 bytenr)296 int btrfs_delayed_ref_pending(struct btrfs_trans_handle *trans, u64 bytenr)
297 {
298 struct btrfs_delayed_ref_node *ref;
299 struct btrfs_delayed_ref_root *delayed_refs;
300 struct rb_node *prev_node;
301 int ret = 0;
302
303 delayed_refs = &trans->transaction->delayed_refs;
304 spin_lock(&delayed_refs->lock);
305
306 ref = find_ref_head(&delayed_refs->root, bytenr, NULL);
307 if (ref) {
308 prev_node = rb_prev(&ref->rb_node);
309 if (!prev_node)
310 goto out;
311 ref = rb_entry(prev_node, struct btrfs_delayed_ref_node,
312 rb_node);
313 if (ref->bytenr == bytenr)
314 ret = 1;
315 }
316 out:
317 spin_unlock(&delayed_refs->lock);
318 return ret;
319 }
320
321 /*
322 * helper function to update an extent delayed ref in the
323 * rbtree. existing and update must both have the same
324 * bytenr and parent
325 *
326 * This may free existing if the update cancels out whatever
327 * operation it was doing.
328 */
329 static noinline void
update_existing_ref(struct btrfs_trans_handle * trans,struct btrfs_delayed_ref_root * delayed_refs,struct btrfs_delayed_ref_node * existing,struct btrfs_delayed_ref_node * update)330 update_existing_ref(struct btrfs_trans_handle *trans,
331 struct btrfs_delayed_ref_root *delayed_refs,
332 struct btrfs_delayed_ref_node *existing,
333 struct btrfs_delayed_ref_node *update)
334 {
335 if (update->action != existing->action) {
336 /*
337 * this is effectively undoing either an add or a
338 * drop. We decrement the ref_mod, and if it goes
339 * down to zero we just delete the entry without
340 * every changing the extent allocation tree.
341 */
342 existing->ref_mod--;
343 if (existing->ref_mod == 0) {
344 rb_erase(&existing->rb_node,
345 &delayed_refs->root);
346 existing->in_tree = 0;
347 btrfs_put_delayed_ref(existing);
348 delayed_refs->num_entries--;
349 if (trans->delayed_ref_updates)
350 trans->delayed_ref_updates--;
351 } else {
352 WARN_ON(existing->type == BTRFS_TREE_BLOCK_REF_KEY ||
353 existing->type == BTRFS_SHARED_BLOCK_REF_KEY);
354 }
355 } else {
356 WARN_ON(existing->type == BTRFS_TREE_BLOCK_REF_KEY ||
357 existing->type == BTRFS_SHARED_BLOCK_REF_KEY);
358 /*
359 * the action on the existing ref matches
360 * the action on the ref we're trying to add.
361 * Bump the ref_mod by one so the backref that
362 * is eventually added/removed has the correct
363 * reference count
364 */
365 existing->ref_mod += update->ref_mod;
366 }
367 }
368
369 /*
370 * helper function to update the accounting in the head ref
371 * existing and update must have the same bytenr
372 */
373 static noinline void
update_existing_head_ref(struct btrfs_delayed_ref_node * existing,struct btrfs_delayed_ref_node * update)374 update_existing_head_ref(struct btrfs_delayed_ref_node *existing,
375 struct btrfs_delayed_ref_node *update)
376 {
377 struct btrfs_delayed_ref_head *existing_ref;
378 struct btrfs_delayed_ref_head *ref;
379
380 existing_ref = btrfs_delayed_node_to_head(existing);
381 ref = btrfs_delayed_node_to_head(update);
382 BUG_ON(existing_ref->is_data != ref->is_data);
383
384 if (ref->must_insert_reserved) {
385 /* if the extent was freed and then
386 * reallocated before the delayed ref
387 * entries were processed, we can end up
388 * with an existing head ref without
389 * the must_insert_reserved flag set.
390 * Set it again here
391 */
392 existing_ref->must_insert_reserved = ref->must_insert_reserved;
393
394 /*
395 * update the num_bytes so we make sure the accounting
396 * is done correctly
397 */
398 existing->num_bytes = update->num_bytes;
399
400 }
401
402 if (ref->extent_op) {
403 if (!existing_ref->extent_op) {
404 existing_ref->extent_op = ref->extent_op;
405 } else {
406 if (ref->extent_op->update_key) {
407 memcpy(&existing_ref->extent_op->key,
408 &ref->extent_op->key,
409 sizeof(ref->extent_op->key));
410 existing_ref->extent_op->update_key = 1;
411 }
412 if (ref->extent_op->update_flags) {
413 existing_ref->extent_op->flags_to_set |=
414 ref->extent_op->flags_to_set;
415 existing_ref->extent_op->update_flags = 1;
416 }
417 kfree(ref->extent_op);
418 }
419 }
420 /*
421 * update the reference mod on the head to reflect this new operation
422 */
423 existing->ref_mod += update->ref_mod;
424 }
425
426 /*
427 * helper function to actually insert a head node into the rbtree.
428 * this does all the dirty work in terms of maintaining the correct
429 * overall modification count.
430 */
add_delayed_ref_head(struct btrfs_trans_handle * trans,struct btrfs_delayed_ref_node * ref,u64 bytenr,u64 num_bytes,int action,int is_data)431 static noinline int add_delayed_ref_head(struct btrfs_trans_handle *trans,
432 struct btrfs_delayed_ref_node *ref,
433 u64 bytenr, u64 num_bytes,
434 int action, int is_data)
435 {
436 struct btrfs_delayed_ref_node *existing;
437 struct btrfs_delayed_ref_head *head_ref = NULL;
438 struct btrfs_delayed_ref_root *delayed_refs;
439 int count_mod = 1;
440 int must_insert_reserved = 0;
441
442 /*
443 * the head node stores the sum of all the mods, so dropping a ref
444 * should drop the sum in the head node by one.
445 */
446 if (action == BTRFS_UPDATE_DELAYED_HEAD)
447 count_mod = 0;
448 else if (action == BTRFS_DROP_DELAYED_REF)
449 count_mod = -1;
450
451 /*
452 * BTRFS_ADD_DELAYED_EXTENT means that we need to update
453 * the reserved accounting when the extent is finally added, or
454 * if a later modification deletes the delayed ref without ever
455 * inserting the extent into the extent allocation tree.
456 * ref->must_insert_reserved is the flag used to record
457 * that accounting mods are required.
458 *
459 * Once we record must_insert_reserved, switch the action to
460 * BTRFS_ADD_DELAYED_REF because other special casing is not required.
461 */
462 if (action == BTRFS_ADD_DELAYED_EXTENT)
463 must_insert_reserved = 1;
464 else
465 must_insert_reserved = 0;
466
467 delayed_refs = &trans->transaction->delayed_refs;
468
469 /* first set the basic ref node struct up */
470 atomic_set(&ref->refs, 1);
471 ref->bytenr = bytenr;
472 ref->num_bytes = num_bytes;
473 ref->ref_mod = count_mod;
474 ref->type = 0;
475 ref->action = 0;
476 ref->is_head = 1;
477 ref->in_tree = 1;
478
479 head_ref = btrfs_delayed_node_to_head(ref);
480 head_ref->must_insert_reserved = must_insert_reserved;
481 head_ref->is_data = is_data;
482
483 INIT_LIST_HEAD(&head_ref->cluster);
484 mutex_init(&head_ref->mutex);
485
486 trace_btrfs_delayed_ref_head(ref, head_ref, action);
487
488 existing = tree_insert(&delayed_refs->root, &ref->rb_node);
489
490 if (existing) {
491 update_existing_head_ref(existing, ref);
492 /*
493 * we've updated the existing ref, free the newly
494 * allocated ref
495 */
496 kfree(ref);
497 } else {
498 delayed_refs->num_heads++;
499 delayed_refs->num_heads_ready++;
500 delayed_refs->num_entries++;
501 trans->delayed_ref_updates++;
502 }
503 return 0;
504 }
505
506 /*
507 * helper to insert a delayed tree ref into the rbtree.
508 */
add_delayed_tree_ref(struct btrfs_trans_handle * trans,struct btrfs_delayed_ref_node * ref,u64 bytenr,u64 num_bytes,u64 parent,u64 ref_root,int level,int action)509 static noinline int add_delayed_tree_ref(struct btrfs_trans_handle *trans,
510 struct btrfs_delayed_ref_node *ref,
511 u64 bytenr, u64 num_bytes, u64 parent,
512 u64 ref_root, int level, int action)
513 {
514 struct btrfs_delayed_ref_node *existing;
515 struct btrfs_delayed_tree_ref *full_ref;
516 struct btrfs_delayed_ref_root *delayed_refs;
517
518 if (action == BTRFS_ADD_DELAYED_EXTENT)
519 action = BTRFS_ADD_DELAYED_REF;
520
521 delayed_refs = &trans->transaction->delayed_refs;
522
523 /* first set the basic ref node struct up */
524 atomic_set(&ref->refs, 1);
525 ref->bytenr = bytenr;
526 ref->num_bytes = num_bytes;
527 ref->ref_mod = 1;
528 ref->action = action;
529 ref->is_head = 0;
530 ref->in_tree = 1;
531
532 full_ref = btrfs_delayed_node_to_tree_ref(ref);
533 if (parent) {
534 full_ref->parent = parent;
535 ref->type = BTRFS_SHARED_BLOCK_REF_KEY;
536 } else {
537 full_ref->root = ref_root;
538 ref->type = BTRFS_TREE_BLOCK_REF_KEY;
539 }
540 full_ref->level = level;
541
542 trace_btrfs_delayed_tree_ref(ref, full_ref, action);
543
544 existing = tree_insert(&delayed_refs->root, &ref->rb_node);
545
546 if (existing) {
547 update_existing_ref(trans, delayed_refs, existing, ref);
548 /*
549 * we've updated the existing ref, free the newly
550 * allocated ref
551 */
552 kfree(ref);
553 } else {
554 delayed_refs->num_entries++;
555 trans->delayed_ref_updates++;
556 }
557 return 0;
558 }
559
560 /*
561 * helper to insert a delayed data ref into the rbtree.
562 */
add_delayed_data_ref(struct btrfs_trans_handle * trans,struct btrfs_delayed_ref_node * ref,u64 bytenr,u64 num_bytes,u64 parent,u64 ref_root,u64 owner,u64 offset,int action)563 static noinline int add_delayed_data_ref(struct btrfs_trans_handle *trans,
564 struct btrfs_delayed_ref_node *ref,
565 u64 bytenr, u64 num_bytes, u64 parent,
566 u64 ref_root, u64 owner, u64 offset,
567 int action)
568 {
569 struct btrfs_delayed_ref_node *existing;
570 struct btrfs_delayed_data_ref *full_ref;
571 struct btrfs_delayed_ref_root *delayed_refs;
572
573 if (action == BTRFS_ADD_DELAYED_EXTENT)
574 action = BTRFS_ADD_DELAYED_REF;
575
576 delayed_refs = &trans->transaction->delayed_refs;
577
578 /* first set the basic ref node struct up */
579 atomic_set(&ref->refs, 1);
580 ref->bytenr = bytenr;
581 ref->num_bytes = num_bytes;
582 ref->ref_mod = 1;
583 ref->action = action;
584 ref->is_head = 0;
585 ref->in_tree = 1;
586
587 full_ref = btrfs_delayed_node_to_data_ref(ref);
588 if (parent) {
589 full_ref->parent = parent;
590 ref->type = BTRFS_SHARED_DATA_REF_KEY;
591 } else {
592 full_ref->root = ref_root;
593 ref->type = BTRFS_EXTENT_DATA_REF_KEY;
594 }
595 full_ref->objectid = owner;
596 full_ref->offset = offset;
597
598 trace_btrfs_delayed_data_ref(ref, full_ref, action);
599
600 existing = tree_insert(&delayed_refs->root, &ref->rb_node);
601
602 if (existing) {
603 update_existing_ref(trans, delayed_refs, existing, ref);
604 /*
605 * we've updated the existing ref, free the newly
606 * allocated ref
607 */
608 kfree(ref);
609 } else {
610 delayed_refs->num_entries++;
611 trans->delayed_ref_updates++;
612 }
613 return 0;
614 }
615
616 /*
617 * add a delayed tree ref. This does all of the accounting required
618 * to make sure the delayed ref is eventually processed before this
619 * transaction commits.
620 */
btrfs_add_delayed_tree_ref(struct btrfs_trans_handle * trans,u64 bytenr,u64 num_bytes,u64 parent,u64 ref_root,int level,int action,struct btrfs_delayed_extent_op * extent_op)621 int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
622 u64 bytenr, u64 num_bytes, u64 parent,
623 u64 ref_root, int level, int action,
624 struct btrfs_delayed_extent_op *extent_op)
625 {
626 struct btrfs_delayed_tree_ref *ref;
627 struct btrfs_delayed_ref_head *head_ref;
628 struct btrfs_delayed_ref_root *delayed_refs;
629 int ret;
630
631 BUG_ON(extent_op && extent_op->is_data);
632 ref = kmalloc(sizeof(*ref), GFP_NOFS);
633 if (!ref)
634 return -ENOMEM;
635
636 head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS);
637 if (!head_ref) {
638 kfree(ref);
639 return -ENOMEM;
640 }
641
642 head_ref->extent_op = extent_op;
643
644 delayed_refs = &trans->transaction->delayed_refs;
645 spin_lock(&delayed_refs->lock);
646
647 /*
648 * insert both the head node and the new ref without dropping
649 * the spin lock
650 */
651 ret = add_delayed_ref_head(trans, &head_ref->node, bytenr, num_bytes,
652 action, 0);
653 BUG_ON(ret);
654
655 ret = add_delayed_tree_ref(trans, &ref->node, bytenr, num_bytes,
656 parent, ref_root, level, action);
657 BUG_ON(ret);
658 spin_unlock(&delayed_refs->lock);
659 return 0;
660 }
661
662 /*
663 * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
664 */
btrfs_add_delayed_data_ref(struct btrfs_trans_handle * trans,u64 bytenr,u64 num_bytes,u64 parent,u64 ref_root,u64 owner,u64 offset,int action,struct btrfs_delayed_extent_op * extent_op)665 int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
666 u64 bytenr, u64 num_bytes,
667 u64 parent, u64 ref_root,
668 u64 owner, u64 offset, int action,
669 struct btrfs_delayed_extent_op *extent_op)
670 {
671 struct btrfs_delayed_data_ref *ref;
672 struct btrfs_delayed_ref_head *head_ref;
673 struct btrfs_delayed_ref_root *delayed_refs;
674 int ret;
675
676 BUG_ON(extent_op && !extent_op->is_data);
677 ref = kmalloc(sizeof(*ref), GFP_NOFS);
678 if (!ref)
679 return -ENOMEM;
680
681 head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS);
682 if (!head_ref) {
683 kfree(ref);
684 return -ENOMEM;
685 }
686
687 head_ref->extent_op = extent_op;
688
689 delayed_refs = &trans->transaction->delayed_refs;
690 spin_lock(&delayed_refs->lock);
691
692 /*
693 * insert both the head node and the new ref without dropping
694 * the spin lock
695 */
696 ret = add_delayed_ref_head(trans, &head_ref->node, bytenr, num_bytes,
697 action, 1);
698 BUG_ON(ret);
699
700 ret = add_delayed_data_ref(trans, &ref->node, bytenr, num_bytes,
701 parent, ref_root, owner, offset, action);
702 BUG_ON(ret);
703 spin_unlock(&delayed_refs->lock);
704 return 0;
705 }
706
btrfs_add_delayed_extent_op(struct btrfs_trans_handle * trans,u64 bytenr,u64 num_bytes,struct btrfs_delayed_extent_op * extent_op)707 int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans,
708 u64 bytenr, u64 num_bytes,
709 struct btrfs_delayed_extent_op *extent_op)
710 {
711 struct btrfs_delayed_ref_head *head_ref;
712 struct btrfs_delayed_ref_root *delayed_refs;
713 int ret;
714
715 head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS);
716 if (!head_ref)
717 return -ENOMEM;
718
719 head_ref->extent_op = extent_op;
720
721 delayed_refs = &trans->transaction->delayed_refs;
722 spin_lock(&delayed_refs->lock);
723
724 ret = add_delayed_ref_head(trans, &head_ref->node, bytenr,
725 num_bytes, BTRFS_UPDATE_DELAYED_HEAD,
726 extent_op->is_data);
727 BUG_ON(ret);
728
729 spin_unlock(&delayed_refs->lock);
730 return 0;
731 }
732
733 /*
734 * this does a simple search for the head node for a given extent.
735 * It must be called with the delayed ref spinlock held, and it returns
736 * the head node if any where found, or NULL if not.
737 */
738 struct btrfs_delayed_ref_head *
btrfs_find_delayed_ref_head(struct btrfs_trans_handle * trans,u64 bytenr)739 btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr)
740 {
741 struct btrfs_delayed_ref_node *ref;
742 struct btrfs_delayed_ref_root *delayed_refs;
743
744 delayed_refs = &trans->transaction->delayed_refs;
745 ref = find_ref_head(&delayed_refs->root, bytenr, NULL);
746 if (ref)
747 return btrfs_delayed_node_to_head(ref);
748 return NULL;
749 }
750
751 /*
752 * add a delayed ref to the tree. This does all of the accounting required
753 * to make sure the delayed ref is eventually processed before this
754 * transaction commits.
755 *
756 * The main point of this call is to add and remove a backreference in a single
757 * shot, taking the lock only once, and only searching for the head node once.
758 *
759 * It is the same as doing a ref add and delete in two separate calls.
760 */
761 #if 0
762 int btrfs_update_delayed_ref(struct btrfs_trans_handle *trans,
763 u64 bytenr, u64 num_bytes, u64 orig_parent,
764 u64 parent, u64 orig_ref_root, u64 ref_root,
765 u64 orig_ref_generation, u64 ref_generation,
766 u64 owner_objectid, int pin)
767 {
768 struct btrfs_delayed_ref *ref;
769 struct btrfs_delayed_ref *old_ref;
770 struct btrfs_delayed_ref_head *head_ref;
771 struct btrfs_delayed_ref_root *delayed_refs;
772 int ret;
773
774 ref = kmalloc(sizeof(*ref), GFP_NOFS);
775 if (!ref)
776 return -ENOMEM;
777
778 old_ref = kmalloc(sizeof(*old_ref), GFP_NOFS);
779 if (!old_ref) {
780 kfree(ref);
781 return -ENOMEM;
782 }
783
784 /*
785 * the parent = 0 case comes from cases where we don't actually
786 * know the parent yet. It will get updated later via a add/drop
787 * pair.
788 */
789 if (parent == 0)
790 parent = bytenr;
791 if (orig_parent == 0)
792 orig_parent = bytenr;
793
794 head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS);
795 if (!head_ref) {
796 kfree(ref);
797 kfree(old_ref);
798 return -ENOMEM;
799 }
800 delayed_refs = &trans->transaction->delayed_refs;
801 spin_lock(&delayed_refs->lock);
802
803 /*
804 * insert both the head node and the new ref without dropping
805 * the spin lock
806 */
807 ret = __btrfs_add_delayed_ref(trans, &head_ref->node, bytenr, num_bytes,
808 (u64)-1, 0, 0, 0,
809 BTRFS_UPDATE_DELAYED_HEAD, 0);
810 BUG_ON(ret);
811
812 ret = __btrfs_add_delayed_ref(trans, &ref->node, bytenr, num_bytes,
813 parent, ref_root, ref_generation,
814 owner_objectid, BTRFS_ADD_DELAYED_REF, 0);
815 BUG_ON(ret);
816
817 ret = __btrfs_add_delayed_ref(trans, &old_ref->node, bytenr, num_bytes,
818 orig_parent, orig_ref_root,
819 orig_ref_generation, owner_objectid,
820 BTRFS_DROP_DELAYED_REF, pin);
821 BUG_ON(ret);
822 spin_unlock(&delayed_refs->lock);
823 return 0;
824 }
825 #endif
826