1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2007 Oracle. All rights reserved.
4 */
5
6 #include <linux/slab.h>
7 #include <linux/blkdev.h>
8 #include <linux/writeback.h>
9 #include <linux/sched/mm.h>
10 #include "misc.h"
11 #include "ctree.h"
12 #include "transaction.h"
13 #include "btrfs_inode.h"
14 #include "extent_io.h"
15 #include "disk-io.h"
16 #include "compression.h"
17 #include "delalloc-space.h"
18 #include "qgroup.h"
19 #include "subpage.h"
20
21 static struct kmem_cache *btrfs_ordered_extent_cache;
22
entry_end(struct btrfs_ordered_extent * entry)23 static u64 entry_end(struct btrfs_ordered_extent *entry)
24 {
25 if (entry->file_offset + entry->num_bytes < entry->file_offset)
26 return (u64)-1;
27 return entry->file_offset + entry->num_bytes;
28 }
29
30 /* returns NULL if the insertion worked, or it returns the node it did find
31 * in the tree
32 */
tree_insert(struct rb_root * root,u64 file_offset,struct rb_node * node)33 static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset,
34 struct rb_node *node)
35 {
36 struct rb_node **p = &root->rb_node;
37 struct rb_node *parent = NULL;
38 struct btrfs_ordered_extent *entry;
39
40 while (*p) {
41 parent = *p;
42 entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node);
43
44 if (file_offset < entry->file_offset)
45 p = &(*p)->rb_left;
46 else if (file_offset >= entry_end(entry))
47 p = &(*p)->rb_right;
48 else
49 return parent;
50 }
51
52 rb_link_node(node, parent, p);
53 rb_insert_color(node, root);
54 return NULL;
55 }
56
57 /*
58 * look for a given offset in the tree, and if it can't be found return the
59 * first lesser offset
60 */
__tree_search(struct rb_root * root,u64 file_offset,struct rb_node ** prev_ret)61 static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset,
62 struct rb_node **prev_ret)
63 {
64 struct rb_node *n = root->rb_node;
65 struct rb_node *prev = NULL;
66 struct rb_node *test;
67 struct btrfs_ordered_extent *entry;
68 struct btrfs_ordered_extent *prev_entry = NULL;
69
70 while (n) {
71 entry = rb_entry(n, struct btrfs_ordered_extent, rb_node);
72 prev = n;
73 prev_entry = entry;
74
75 if (file_offset < entry->file_offset)
76 n = n->rb_left;
77 else if (file_offset >= entry_end(entry))
78 n = n->rb_right;
79 else
80 return n;
81 }
82 if (!prev_ret)
83 return NULL;
84
85 while (prev && file_offset >= entry_end(prev_entry)) {
86 test = rb_next(prev);
87 if (!test)
88 break;
89 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
90 rb_node);
91 if (file_offset < entry_end(prev_entry))
92 break;
93
94 prev = test;
95 }
96 if (prev)
97 prev_entry = rb_entry(prev, struct btrfs_ordered_extent,
98 rb_node);
99 while (prev && file_offset < entry_end(prev_entry)) {
100 test = rb_prev(prev);
101 if (!test)
102 break;
103 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
104 rb_node);
105 prev = test;
106 }
107 *prev_ret = prev;
108 return NULL;
109 }
110
range_overlaps(struct btrfs_ordered_extent * entry,u64 file_offset,u64 len)111 static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset,
112 u64 len)
113 {
114 if (file_offset + len <= entry->file_offset ||
115 entry->file_offset + entry->num_bytes <= file_offset)
116 return 0;
117 return 1;
118 }
119
120 /*
121 * look find the first ordered struct that has this offset, otherwise
122 * the first one less than this offset
123 */
tree_search(struct btrfs_ordered_inode_tree * tree,u64 file_offset)124 static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree,
125 u64 file_offset)
126 {
127 struct rb_root *root = &tree->tree;
128 struct rb_node *prev = NULL;
129 struct rb_node *ret;
130 struct btrfs_ordered_extent *entry;
131
132 if (tree->last) {
133 entry = rb_entry(tree->last, struct btrfs_ordered_extent,
134 rb_node);
135 if (in_range(file_offset, entry->file_offset, entry->num_bytes))
136 return tree->last;
137 }
138 ret = __tree_search(root, file_offset, &prev);
139 if (!ret)
140 ret = prev;
141 if (ret)
142 tree->last = ret;
143 return ret;
144 }
145
146 /**
147 * Add an ordered extent to the per-inode tree.
148 *
149 * @inode: Inode that this extent is for.
150 * @file_offset: Logical offset in file where the extent starts.
151 * @num_bytes: Logical length of extent in file.
152 * @ram_bytes: Full length of unencoded data.
153 * @disk_bytenr: Offset of extent on disk.
154 * @disk_num_bytes: Size of extent on disk.
155 * @offset: Offset into unencoded data where file data starts.
156 * @flags: Flags specifying type of extent (1 << BTRFS_ORDERED_*).
157 * @compress_type: Compression algorithm used for data.
158 *
159 * Most of these parameters correspond to &struct btrfs_file_extent_item. The
160 * tree is given a single reference on the ordered extent that was inserted.
161 *
162 * Return: 0 or -ENOMEM.
163 */
btrfs_add_ordered_extent(struct btrfs_inode * inode,u64 file_offset,u64 num_bytes,u64 ram_bytes,u64 disk_bytenr,u64 disk_num_bytes,u64 offset,unsigned flags,int compress_type)164 int btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset,
165 u64 num_bytes, u64 ram_bytes, u64 disk_bytenr,
166 u64 disk_num_bytes, u64 offset, unsigned flags,
167 int compress_type)
168 {
169 struct btrfs_root *root = inode->root;
170 struct btrfs_fs_info *fs_info = root->fs_info;
171 struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
172 struct rb_node *node;
173 struct btrfs_ordered_extent *entry;
174 int ret;
175
176 if (flags &
177 ((1 << BTRFS_ORDERED_NOCOW) | (1 << BTRFS_ORDERED_PREALLOC))) {
178 /* For nocow write, we can release the qgroup rsv right now */
179 ret = btrfs_qgroup_free_data(inode, NULL, file_offset, num_bytes);
180 if (ret < 0)
181 return ret;
182 ret = 0;
183 } else {
184 /*
185 * The ordered extent has reserved qgroup space, release now
186 * and pass the reserved number for qgroup_record to free.
187 */
188 ret = btrfs_qgroup_release_data(inode, file_offset, num_bytes);
189 if (ret < 0)
190 return ret;
191 }
192 entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS);
193 if (!entry)
194 return -ENOMEM;
195
196 entry->file_offset = file_offset;
197 entry->num_bytes = num_bytes;
198 entry->ram_bytes = ram_bytes;
199 entry->disk_bytenr = disk_bytenr;
200 entry->disk_num_bytes = disk_num_bytes;
201 entry->offset = offset;
202 entry->bytes_left = num_bytes;
203 entry->inode = igrab(&inode->vfs_inode);
204 entry->compress_type = compress_type;
205 entry->truncated_len = (u64)-1;
206 entry->qgroup_rsv = ret;
207 entry->physical = (u64)-1;
208
209 ASSERT((flags & ~BTRFS_ORDERED_TYPE_FLAGS) == 0);
210 entry->flags = flags;
211
212 percpu_counter_add_batch(&fs_info->ordered_bytes, num_bytes,
213 fs_info->delalloc_batch);
214
215 /* one ref for the tree */
216 refcount_set(&entry->refs, 1);
217 init_waitqueue_head(&entry->wait);
218 INIT_LIST_HEAD(&entry->list);
219 INIT_LIST_HEAD(&entry->log_list);
220 INIT_LIST_HEAD(&entry->root_extent_list);
221 INIT_LIST_HEAD(&entry->work_list);
222 init_completion(&entry->completion);
223
224 trace_btrfs_ordered_extent_add(inode, entry);
225
226 spin_lock_irq(&tree->lock);
227 node = tree_insert(&tree->tree, file_offset,
228 &entry->rb_node);
229 if (node)
230 btrfs_panic(fs_info, -EEXIST,
231 "inconsistency in ordered tree at offset %llu",
232 file_offset);
233 spin_unlock_irq(&tree->lock);
234
235 spin_lock(&root->ordered_extent_lock);
236 list_add_tail(&entry->root_extent_list,
237 &root->ordered_extents);
238 root->nr_ordered_extents++;
239 if (root->nr_ordered_extents == 1) {
240 spin_lock(&fs_info->ordered_root_lock);
241 BUG_ON(!list_empty(&root->ordered_root));
242 list_add_tail(&root->ordered_root, &fs_info->ordered_roots);
243 spin_unlock(&fs_info->ordered_root_lock);
244 }
245 spin_unlock(&root->ordered_extent_lock);
246
247 /*
248 * We don't need the count_max_extents here, we can assume that all of
249 * that work has been done at higher layers, so this is truly the
250 * smallest the extent is going to get.
251 */
252 spin_lock(&inode->lock);
253 btrfs_mod_outstanding_extents(inode, 1);
254 spin_unlock(&inode->lock);
255
256 return 0;
257 }
258
259 /*
260 * Add a struct btrfs_ordered_sum into the list of checksums to be inserted
261 * when an ordered extent is finished. If the list covers more than one
262 * ordered extent, it is split across multiples.
263 */
btrfs_add_ordered_sum(struct btrfs_ordered_extent * entry,struct btrfs_ordered_sum * sum)264 void btrfs_add_ordered_sum(struct btrfs_ordered_extent *entry,
265 struct btrfs_ordered_sum *sum)
266 {
267 struct btrfs_ordered_inode_tree *tree;
268
269 tree = &BTRFS_I(entry->inode)->ordered_tree;
270 spin_lock_irq(&tree->lock);
271 list_add_tail(&sum->list, &entry->list);
272 spin_unlock_irq(&tree->lock);
273 }
274
finish_ordered_fn(struct btrfs_work * work)275 static void finish_ordered_fn(struct btrfs_work *work)
276 {
277 struct btrfs_ordered_extent *ordered_extent;
278
279 ordered_extent = container_of(work, struct btrfs_ordered_extent, work);
280 btrfs_finish_ordered_io(ordered_extent);
281 }
282
283 /*
284 * Mark all ordered extents io inside the specified range finished.
285 *
286 * @page: The involved page for the operation.
287 * For uncompressed buffered IO, the page status also needs to be
288 * updated to indicate whether the pending ordered io is finished.
289 * Can be NULL for direct IO and compressed write.
290 * For these cases, callers are ensured they won't execute the
291 * endio function twice.
292 *
293 * This function is called for endio, thus the range must have ordered
294 * extent(s) covering it.
295 */
btrfs_mark_ordered_io_finished(struct btrfs_inode * inode,struct page * page,u64 file_offset,u64 num_bytes,bool uptodate)296 void btrfs_mark_ordered_io_finished(struct btrfs_inode *inode,
297 struct page *page, u64 file_offset,
298 u64 num_bytes, bool uptodate)
299 {
300 struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
301 struct btrfs_fs_info *fs_info = inode->root->fs_info;
302 struct btrfs_workqueue *wq;
303 struct rb_node *node;
304 struct btrfs_ordered_extent *entry = NULL;
305 unsigned long flags;
306 u64 cur = file_offset;
307
308 if (btrfs_is_free_space_inode(inode))
309 wq = fs_info->endio_freespace_worker;
310 else
311 wq = fs_info->endio_write_workers;
312
313 if (page)
314 ASSERT(page->mapping && page_offset(page) <= file_offset &&
315 file_offset + num_bytes <= page_offset(page) + PAGE_SIZE);
316
317 spin_lock_irqsave(&tree->lock, flags);
318 while (cur < file_offset + num_bytes) {
319 u64 entry_end;
320 u64 end;
321 u32 len;
322
323 node = tree_search(tree, cur);
324 /* No ordered extents at all */
325 if (!node)
326 break;
327
328 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
329 entry_end = entry->file_offset + entry->num_bytes;
330 /*
331 * |<-- OE --->| |
332 * cur
333 * Go to next OE.
334 */
335 if (cur >= entry_end) {
336 node = rb_next(node);
337 /* No more ordered extents, exit */
338 if (!node)
339 break;
340 entry = rb_entry(node, struct btrfs_ordered_extent,
341 rb_node);
342
343 /* Go to next ordered extent and continue */
344 cur = entry->file_offset;
345 continue;
346 }
347 /*
348 * | |<--- OE --->|
349 * cur
350 * Go to the start of OE.
351 */
352 if (cur < entry->file_offset) {
353 cur = entry->file_offset;
354 continue;
355 }
356
357 /*
358 * Now we are definitely inside one ordered extent.
359 *
360 * |<--- OE --->|
361 * |
362 * cur
363 */
364 end = min(entry->file_offset + entry->num_bytes,
365 file_offset + num_bytes) - 1;
366 ASSERT(end + 1 - cur < U32_MAX);
367 len = end + 1 - cur;
368
369 if (page) {
370 /*
371 * Ordered (Private2) bit indicates whether we still
372 * have pending io unfinished for the ordered extent.
373 *
374 * If there's no such bit, we need to skip to next range.
375 */
376 if (!btrfs_page_test_ordered(fs_info, page, cur, len)) {
377 cur += len;
378 continue;
379 }
380 btrfs_page_clear_ordered(fs_info, page, cur, len);
381 }
382
383 /* Now we're fine to update the accounting */
384 if (unlikely(len > entry->bytes_left)) {
385 WARN_ON(1);
386 btrfs_crit(fs_info,
387 "bad ordered extent accounting, root=%llu ino=%llu OE offset=%llu OE len=%llu to_dec=%u left=%llu",
388 inode->root->root_key.objectid,
389 btrfs_ino(inode),
390 entry->file_offset,
391 entry->num_bytes,
392 len, entry->bytes_left);
393 entry->bytes_left = 0;
394 } else {
395 entry->bytes_left -= len;
396 }
397
398 if (!uptodate)
399 set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
400
401 /*
402 * All the IO of the ordered extent is finished, we need to queue
403 * the finish_func to be executed.
404 */
405 if (entry->bytes_left == 0) {
406 set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
407 cond_wake_up(&entry->wait);
408 refcount_inc(&entry->refs);
409 trace_btrfs_ordered_extent_mark_finished(inode, entry);
410 spin_unlock_irqrestore(&tree->lock, flags);
411 btrfs_init_work(&entry->work, finish_ordered_fn, NULL, NULL);
412 btrfs_queue_work(wq, &entry->work);
413 spin_lock_irqsave(&tree->lock, flags);
414 }
415 cur += len;
416 }
417 spin_unlock_irqrestore(&tree->lock, flags);
418 }
419
420 /*
421 * Finish IO for one ordered extent across a given range. The range can only
422 * contain one ordered extent.
423 *
424 * @cached: The cached ordered extent. If not NULL, we can skip the tree
425 * search and use the ordered extent directly.
426 * Will be also used to store the finished ordered extent.
427 * @file_offset: File offset for the finished IO
428 * @io_size: Length of the finish IO range
429 *
430 * Return true if the ordered extent is finished in the range, and update
431 * @cached.
432 * Return false otherwise.
433 *
434 * NOTE: The range can NOT cross multiple ordered extents.
435 * Thus caller should ensure the range doesn't cross ordered extents.
436 */
btrfs_dec_test_ordered_pending(struct btrfs_inode * inode,struct btrfs_ordered_extent ** cached,u64 file_offset,u64 io_size)437 bool btrfs_dec_test_ordered_pending(struct btrfs_inode *inode,
438 struct btrfs_ordered_extent **cached,
439 u64 file_offset, u64 io_size)
440 {
441 struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
442 struct rb_node *node;
443 struct btrfs_ordered_extent *entry = NULL;
444 unsigned long flags;
445 bool finished = false;
446
447 spin_lock_irqsave(&tree->lock, flags);
448 if (cached && *cached) {
449 entry = *cached;
450 goto have_entry;
451 }
452
453 node = tree_search(tree, file_offset);
454 if (!node)
455 goto out;
456
457 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
458 have_entry:
459 if (!in_range(file_offset, entry->file_offset, entry->num_bytes))
460 goto out;
461
462 if (io_size > entry->bytes_left)
463 btrfs_crit(inode->root->fs_info,
464 "bad ordered accounting left %llu size %llu",
465 entry->bytes_left, io_size);
466
467 entry->bytes_left -= io_size;
468
469 if (entry->bytes_left == 0) {
470 /*
471 * Ensure only one caller can set the flag and finished_ret
472 * accordingly
473 */
474 finished = !test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
475 /* test_and_set_bit implies a barrier */
476 cond_wake_up_nomb(&entry->wait);
477 }
478 out:
479 if (finished && cached && entry) {
480 *cached = entry;
481 refcount_inc(&entry->refs);
482 trace_btrfs_ordered_extent_dec_test_pending(inode, entry);
483 }
484 spin_unlock_irqrestore(&tree->lock, flags);
485 return finished;
486 }
487
488 /*
489 * used to drop a reference on an ordered extent. This will free
490 * the extent if the last reference is dropped
491 */
btrfs_put_ordered_extent(struct btrfs_ordered_extent * entry)492 void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
493 {
494 struct list_head *cur;
495 struct btrfs_ordered_sum *sum;
496
497 trace_btrfs_ordered_extent_put(BTRFS_I(entry->inode), entry);
498
499 if (refcount_dec_and_test(&entry->refs)) {
500 ASSERT(list_empty(&entry->root_extent_list));
501 ASSERT(list_empty(&entry->log_list));
502 ASSERT(RB_EMPTY_NODE(&entry->rb_node));
503 if (entry->inode)
504 btrfs_add_delayed_iput(entry->inode);
505 while (!list_empty(&entry->list)) {
506 cur = entry->list.next;
507 sum = list_entry(cur, struct btrfs_ordered_sum, list);
508 list_del(&sum->list);
509 kvfree(sum);
510 }
511 kmem_cache_free(btrfs_ordered_extent_cache, entry);
512 }
513 }
514
515 /*
516 * remove an ordered extent from the tree. No references are dropped
517 * and waiters are woken up.
518 */
btrfs_remove_ordered_extent(struct btrfs_inode * btrfs_inode,struct btrfs_ordered_extent * entry)519 void btrfs_remove_ordered_extent(struct btrfs_inode *btrfs_inode,
520 struct btrfs_ordered_extent *entry)
521 {
522 struct btrfs_ordered_inode_tree *tree;
523 struct btrfs_root *root = btrfs_inode->root;
524 struct btrfs_fs_info *fs_info = root->fs_info;
525 struct rb_node *node;
526 bool pending;
527 bool freespace_inode;
528
529 /*
530 * If this is a free space inode the thread has not acquired the ordered
531 * extents lockdep map.
532 */
533 freespace_inode = btrfs_is_free_space_inode(btrfs_inode);
534
535 btrfs_lockdep_acquire(fs_info, btrfs_trans_pending_ordered);
536 /* This is paired with btrfs_add_ordered_extent. */
537 spin_lock(&btrfs_inode->lock);
538 btrfs_mod_outstanding_extents(btrfs_inode, -1);
539 spin_unlock(&btrfs_inode->lock);
540 if (root != fs_info->tree_root) {
541 u64 release;
542
543 if (test_bit(BTRFS_ORDERED_ENCODED, &entry->flags))
544 release = entry->disk_num_bytes;
545 else
546 release = entry->num_bytes;
547 btrfs_delalloc_release_metadata(btrfs_inode, release, false);
548 }
549
550 percpu_counter_add_batch(&fs_info->ordered_bytes, -entry->num_bytes,
551 fs_info->delalloc_batch);
552
553 tree = &btrfs_inode->ordered_tree;
554 spin_lock_irq(&tree->lock);
555 node = &entry->rb_node;
556 rb_erase(node, &tree->tree);
557 RB_CLEAR_NODE(node);
558 if (tree->last == node)
559 tree->last = NULL;
560 set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
561 pending = test_and_clear_bit(BTRFS_ORDERED_PENDING, &entry->flags);
562 spin_unlock_irq(&tree->lock);
563
564 /*
565 * The current running transaction is waiting on us, we need to let it
566 * know that we're complete and wake it up.
567 */
568 if (pending) {
569 struct btrfs_transaction *trans;
570
571 /*
572 * The checks for trans are just a formality, it should be set,
573 * but if it isn't we don't want to deref/assert under the spin
574 * lock, so be nice and check if trans is set, but ASSERT() so
575 * if it isn't set a developer will notice.
576 */
577 spin_lock(&fs_info->trans_lock);
578 trans = fs_info->running_transaction;
579 if (trans)
580 refcount_inc(&trans->use_count);
581 spin_unlock(&fs_info->trans_lock);
582
583 ASSERT(trans);
584 if (trans) {
585 if (atomic_dec_and_test(&trans->pending_ordered))
586 wake_up(&trans->pending_wait);
587 btrfs_put_transaction(trans);
588 }
589 }
590
591 btrfs_lockdep_release(fs_info, btrfs_trans_pending_ordered);
592
593 spin_lock(&root->ordered_extent_lock);
594 list_del_init(&entry->root_extent_list);
595 root->nr_ordered_extents--;
596
597 trace_btrfs_ordered_extent_remove(btrfs_inode, entry);
598
599 if (!root->nr_ordered_extents) {
600 spin_lock(&fs_info->ordered_root_lock);
601 BUG_ON(list_empty(&root->ordered_root));
602 list_del_init(&root->ordered_root);
603 spin_unlock(&fs_info->ordered_root_lock);
604 }
605 spin_unlock(&root->ordered_extent_lock);
606 wake_up(&entry->wait);
607 if (!freespace_inode)
608 btrfs_lockdep_release(fs_info, btrfs_ordered_extent);
609 }
610
btrfs_run_ordered_extent_work(struct btrfs_work * work)611 static void btrfs_run_ordered_extent_work(struct btrfs_work *work)
612 {
613 struct btrfs_ordered_extent *ordered;
614
615 ordered = container_of(work, struct btrfs_ordered_extent, flush_work);
616 btrfs_start_ordered_extent(ordered, 1);
617 complete(&ordered->completion);
618 }
619
620 /*
621 * wait for all the ordered extents in a root. This is done when balancing
622 * space between drives.
623 */
btrfs_wait_ordered_extents(struct btrfs_root * root,u64 nr,const u64 range_start,const u64 range_len)624 u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
625 const u64 range_start, const u64 range_len)
626 {
627 struct btrfs_fs_info *fs_info = root->fs_info;
628 LIST_HEAD(splice);
629 LIST_HEAD(skipped);
630 LIST_HEAD(works);
631 struct btrfs_ordered_extent *ordered, *next;
632 u64 count = 0;
633 const u64 range_end = range_start + range_len;
634
635 mutex_lock(&root->ordered_extent_mutex);
636 spin_lock(&root->ordered_extent_lock);
637 list_splice_init(&root->ordered_extents, &splice);
638 while (!list_empty(&splice) && nr) {
639 ordered = list_first_entry(&splice, struct btrfs_ordered_extent,
640 root_extent_list);
641
642 if (range_end <= ordered->disk_bytenr ||
643 ordered->disk_bytenr + ordered->disk_num_bytes <= range_start) {
644 list_move_tail(&ordered->root_extent_list, &skipped);
645 cond_resched_lock(&root->ordered_extent_lock);
646 continue;
647 }
648
649 list_move_tail(&ordered->root_extent_list,
650 &root->ordered_extents);
651 refcount_inc(&ordered->refs);
652 spin_unlock(&root->ordered_extent_lock);
653
654 btrfs_init_work(&ordered->flush_work,
655 btrfs_run_ordered_extent_work, NULL, NULL);
656 list_add_tail(&ordered->work_list, &works);
657 btrfs_queue_work(fs_info->flush_workers, &ordered->flush_work);
658
659 cond_resched();
660 spin_lock(&root->ordered_extent_lock);
661 if (nr != U64_MAX)
662 nr--;
663 count++;
664 }
665 list_splice_tail(&skipped, &root->ordered_extents);
666 list_splice_tail(&splice, &root->ordered_extents);
667 spin_unlock(&root->ordered_extent_lock);
668
669 list_for_each_entry_safe(ordered, next, &works, work_list) {
670 list_del_init(&ordered->work_list);
671 wait_for_completion(&ordered->completion);
672 btrfs_put_ordered_extent(ordered);
673 cond_resched();
674 }
675 mutex_unlock(&root->ordered_extent_mutex);
676
677 return count;
678 }
679
btrfs_wait_ordered_roots(struct btrfs_fs_info * fs_info,u64 nr,const u64 range_start,const u64 range_len)680 void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
681 const u64 range_start, const u64 range_len)
682 {
683 struct btrfs_root *root;
684 struct list_head splice;
685 u64 done;
686
687 INIT_LIST_HEAD(&splice);
688
689 mutex_lock(&fs_info->ordered_operations_mutex);
690 spin_lock(&fs_info->ordered_root_lock);
691 list_splice_init(&fs_info->ordered_roots, &splice);
692 while (!list_empty(&splice) && nr) {
693 root = list_first_entry(&splice, struct btrfs_root,
694 ordered_root);
695 root = btrfs_grab_root(root);
696 BUG_ON(!root);
697 list_move_tail(&root->ordered_root,
698 &fs_info->ordered_roots);
699 spin_unlock(&fs_info->ordered_root_lock);
700
701 done = btrfs_wait_ordered_extents(root, nr,
702 range_start, range_len);
703 btrfs_put_root(root);
704
705 spin_lock(&fs_info->ordered_root_lock);
706 if (nr != U64_MAX) {
707 nr -= done;
708 }
709 }
710 list_splice_tail(&splice, &fs_info->ordered_roots);
711 spin_unlock(&fs_info->ordered_root_lock);
712 mutex_unlock(&fs_info->ordered_operations_mutex);
713 }
714
715 /*
716 * Used to start IO or wait for a given ordered extent to finish.
717 *
718 * If wait is one, this effectively waits on page writeback for all the pages
719 * in the extent, and it waits on the io completion code to insert
720 * metadata into the btree corresponding to the extent
721 */
btrfs_start_ordered_extent(struct btrfs_ordered_extent * entry,int wait)722 void btrfs_start_ordered_extent(struct btrfs_ordered_extent *entry, int wait)
723 {
724 u64 start = entry->file_offset;
725 u64 end = start + entry->num_bytes - 1;
726 struct btrfs_inode *inode = BTRFS_I(entry->inode);
727 bool freespace_inode;
728
729 trace_btrfs_ordered_extent_start(inode, entry);
730
731 /*
732 * If this is a free space inode do not take the ordered extents lockdep
733 * map.
734 */
735 freespace_inode = btrfs_is_free_space_inode(inode);
736
737 /*
738 * pages in the range can be dirty, clean or writeback. We
739 * start IO on any dirty ones so the wait doesn't stall waiting
740 * for the flusher thread to find them
741 */
742 if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
743 filemap_fdatawrite_range(inode->vfs_inode.i_mapping, start, end);
744 if (wait) {
745 if (!freespace_inode)
746 btrfs_might_wait_for_event(inode->root->fs_info, btrfs_ordered_extent);
747 wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE,
748 &entry->flags));
749 }
750 }
751
752 /*
753 * Used to wait on ordered extents across a large range of bytes.
754 */
btrfs_wait_ordered_range(struct inode * inode,u64 start,u64 len)755 int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
756 {
757 int ret = 0;
758 int ret_wb = 0;
759 u64 end;
760 u64 orig_end;
761 struct btrfs_ordered_extent *ordered;
762
763 if (start + len < start) {
764 orig_end = INT_LIMIT(loff_t);
765 } else {
766 orig_end = start + len - 1;
767 if (orig_end > INT_LIMIT(loff_t))
768 orig_end = INT_LIMIT(loff_t);
769 }
770
771 /* start IO across the range first to instantiate any delalloc
772 * extents
773 */
774 ret = btrfs_fdatawrite_range(inode, start, orig_end);
775 if (ret)
776 return ret;
777
778 /*
779 * If we have a writeback error don't return immediately. Wait first
780 * for any ordered extents that haven't completed yet. This is to make
781 * sure no one can dirty the same page ranges and call writepages()
782 * before the ordered extents complete - to avoid failures (-EEXIST)
783 * when adding the new ordered extents to the ordered tree.
784 */
785 ret_wb = filemap_fdatawait_range(inode->i_mapping, start, orig_end);
786
787 end = orig_end;
788 while (1) {
789 ordered = btrfs_lookup_first_ordered_extent(BTRFS_I(inode), end);
790 if (!ordered)
791 break;
792 if (ordered->file_offset > orig_end) {
793 btrfs_put_ordered_extent(ordered);
794 break;
795 }
796 if (ordered->file_offset + ordered->num_bytes <= start) {
797 btrfs_put_ordered_extent(ordered);
798 break;
799 }
800 btrfs_start_ordered_extent(ordered, 1);
801 end = ordered->file_offset;
802 /*
803 * If the ordered extent had an error save the error but don't
804 * exit without waiting first for all other ordered extents in
805 * the range to complete.
806 */
807 if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags))
808 ret = -EIO;
809 btrfs_put_ordered_extent(ordered);
810 if (end == 0 || end == start)
811 break;
812 end--;
813 }
814 return ret_wb ? ret_wb : ret;
815 }
816
817 /*
818 * find an ordered extent corresponding to file_offset. return NULL if
819 * nothing is found, otherwise take a reference on the extent and return it
820 */
btrfs_lookup_ordered_extent(struct btrfs_inode * inode,u64 file_offset)821 struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct btrfs_inode *inode,
822 u64 file_offset)
823 {
824 struct btrfs_ordered_inode_tree *tree;
825 struct rb_node *node;
826 struct btrfs_ordered_extent *entry = NULL;
827 unsigned long flags;
828
829 tree = &inode->ordered_tree;
830 spin_lock_irqsave(&tree->lock, flags);
831 node = tree_search(tree, file_offset);
832 if (!node)
833 goto out;
834
835 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
836 if (!in_range(file_offset, entry->file_offset, entry->num_bytes))
837 entry = NULL;
838 if (entry) {
839 refcount_inc(&entry->refs);
840 trace_btrfs_ordered_extent_lookup(inode, entry);
841 }
842 out:
843 spin_unlock_irqrestore(&tree->lock, flags);
844 return entry;
845 }
846
847 /* Since the DIO code tries to lock a wide area we need to look for any ordered
848 * extents that exist in the range, rather than just the start of the range.
849 */
btrfs_lookup_ordered_range(struct btrfs_inode * inode,u64 file_offset,u64 len)850 struct btrfs_ordered_extent *btrfs_lookup_ordered_range(
851 struct btrfs_inode *inode, u64 file_offset, u64 len)
852 {
853 struct btrfs_ordered_inode_tree *tree;
854 struct rb_node *node;
855 struct btrfs_ordered_extent *entry = NULL;
856
857 tree = &inode->ordered_tree;
858 spin_lock_irq(&tree->lock);
859 node = tree_search(tree, file_offset);
860 if (!node) {
861 node = tree_search(tree, file_offset + len);
862 if (!node)
863 goto out;
864 }
865
866 while (1) {
867 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
868 if (range_overlaps(entry, file_offset, len))
869 break;
870
871 if (entry->file_offset >= file_offset + len) {
872 entry = NULL;
873 break;
874 }
875 entry = NULL;
876 node = rb_next(node);
877 if (!node)
878 break;
879 }
880 out:
881 if (entry) {
882 refcount_inc(&entry->refs);
883 trace_btrfs_ordered_extent_lookup_range(inode, entry);
884 }
885 spin_unlock_irq(&tree->lock);
886 return entry;
887 }
888
889 /*
890 * Adds all ordered extents to the given list. The list ends up sorted by the
891 * file_offset of the ordered extents.
892 */
btrfs_get_ordered_extents_for_logging(struct btrfs_inode * inode,struct list_head * list)893 void btrfs_get_ordered_extents_for_logging(struct btrfs_inode *inode,
894 struct list_head *list)
895 {
896 struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
897 struct rb_node *n;
898
899 ASSERT(inode_is_locked(&inode->vfs_inode));
900
901 spin_lock_irq(&tree->lock);
902 for (n = rb_first(&tree->tree); n; n = rb_next(n)) {
903 struct btrfs_ordered_extent *ordered;
904
905 ordered = rb_entry(n, struct btrfs_ordered_extent, rb_node);
906
907 if (test_bit(BTRFS_ORDERED_LOGGED, &ordered->flags))
908 continue;
909
910 ASSERT(list_empty(&ordered->log_list));
911 list_add_tail(&ordered->log_list, list);
912 refcount_inc(&ordered->refs);
913 trace_btrfs_ordered_extent_lookup_for_logging(inode, ordered);
914 }
915 spin_unlock_irq(&tree->lock);
916 }
917
918 /*
919 * lookup and return any extent before 'file_offset'. NULL is returned
920 * if none is found
921 */
922 struct btrfs_ordered_extent *
btrfs_lookup_first_ordered_extent(struct btrfs_inode * inode,u64 file_offset)923 btrfs_lookup_first_ordered_extent(struct btrfs_inode *inode, u64 file_offset)
924 {
925 struct btrfs_ordered_inode_tree *tree;
926 struct rb_node *node;
927 struct btrfs_ordered_extent *entry = NULL;
928
929 tree = &inode->ordered_tree;
930 spin_lock_irq(&tree->lock);
931 node = tree_search(tree, file_offset);
932 if (!node)
933 goto out;
934
935 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
936 refcount_inc(&entry->refs);
937 trace_btrfs_ordered_extent_lookup_first(inode, entry);
938 out:
939 spin_unlock_irq(&tree->lock);
940 return entry;
941 }
942
943 /*
944 * Lookup the first ordered extent that overlaps the range
945 * [@file_offset, @file_offset + @len).
946 *
947 * The difference between this and btrfs_lookup_first_ordered_extent() is
948 * that this one won't return any ordered extent that does not overlap the range.
949 * And the difference against btrfs_lookup_ordered_extent() is, this function
950 * ensures the first ordered extent gets returned.
951 */
btrfs_lookup_first_ordered_range(struct btrfs_inode * inode,u64 file_offset,u64 len)952 struct btrfs_ordered_extent *btrfs_lookup_first_ordered_range(
953 struct btrfs_inode *inode, u64 file_offset, u64 len)
954 {
955 struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
956 struct rb_node *node;
957 struct rb_node *cur;
958 struct rb_node *prev;
959 struct rb_node *next;
960 struct btrfs_ordered_extent *entry = NULL;
961
962 spin_lock_irq(&tree->lock);
963 node = tree->tree.rb_node;
964 /*
965 * Here we don't want to use tree_search() which will use tree->last
966 * and screw up the search order.
967 * And __tree_search() can't return the adjacent ordered extents
968 * either, thus here we do our own search.
969 */
970 while (node) {
971 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
972
973 if (file_offset < entry->file_offset) {
974 node = node->rb_left;
975 } else if (file_offset >= entry_end(entry)) {
976 node = node->rb_right;
977 } else {
978 /*
979 * Direct hit, got an ordered extent that starts at
980 * @file_offset
981 */
982 goto out;
983 }
984 }
985 if (!entry) {
986 /* Empty tree */
987 goto out;
988 }
989
990 cur = &entry->rb_node;
991 /* We got an entry around @file_offset, check adjacent entries */
992 if (entry->file_offset < file_offset) {
993 prev = cur;
994 next = rb_next(cur);
995 } else {
996 prev = rb_prev(cur);
997 next = cur;
998 }
999 if (prev) {
1000 entry = rb_entry(prev, struct btrfs_ordered_extent, rb_node);
1001 if (range_overlaps(entry, file_offset, len))
1002 goto out;
1003 }
1004 if (next) {
1005 entry = rb_entry(next, struct btrfs_ordered_extent, rb_node);
1006 if (range_overlaps(entry, file_offset, len))
1007 goto out;
1008 }
1009 /* No ordered extent in the range */
1010 entry = NULL;
1011 out:
1012 if (entry) {
1013 refcount_inc(&entry->refs);
1014 trace_btrfs_ordered_extent_lookup_first_range(inode, entry);
1015 }
1016
1017 spin_unlock_irq(&tree->lock);
1018 return entry;
1019 }
1020
1021 /*
1022 * btrfs_flush_ordered_range - Lock the passed range and ensures all pending
1023 * ordered extents in it are run to completion.
1024 *
1025 * @inode: Inode whose ordered tree is to be searched
1026 * @start: Beginning of range to flush
1027 * @end: Last byte of range to lock
1028 * @cached_state: If passed, will return the extent state responsible for the
1029 * locked range. It's the caller's responsibility to free the cached state.
1030 *
1031 * This function always returns with the given range locked, ensuring after it's
1032 * called no order extent can be pending.
1033 */
btrfs_lock_and_flush_ordered_range(struct btrfs_inode * inode,u64 start,u64 end,struct extent_state ** cached_state)1034 void btrfs_lock_and_flush_ordered_range(struct btrfs_inode *inode, u64 start,
1035 u64 end,
1036 struct extent_state **cached_state)
1037 {
1038 struct btrfs_ordered_extent *ordered;
1039 struct extent_state *cache = NULL;
1040 struct extent_state **cachedp = &cache;
1041
1042 if (cached_state)
1043 cachedp = cached_state;
1044
1045 while (1) {
1046 lock_extent(&inode->io_tree, start, end, cachedp);
1047 ordered = btrfs_lookup_ordered_range(inode, start,
1048 end - start + 1);
1049 if (!ordered) {
1050 /*
1051 * If no external cached_state has been passed then
1052 * decrement the extra ref taken for cachedp since we
1053 * aren't exposing it outside of this function
1054 */
1055 if (!cached_state)
1056 refcount_dec(&cache->refs);
1057 break;
1058 }
1059 unlock_extent(&inode->io_tree, start, end, cachedp);
1060 btrfs_start_ordered_extent(ordered, 1);
1061 btrfs_put_ordered_extent(ordered);
1062 }
1063 }
1064
1065 /*
1066 * Lock the passed range and ensure all pending ordered extents in it are run
1067 * to completion in nowait mode.
1068 *
1069 * Return true if btrfs_lock_ordered_range does not return any extents,
1070 * otherwise false.
1071 */
btrfs_try_lock_ordered_range(struct btrfs_inode * inode,u64 start,u64 end)1072 bool btrfs_try_lock_ordered_range(struct btrfs_inode *inode, u64 start, u64 end)
1073 {
1074 struct btrfs_ordered_extent *ordered;
1075
1076 if (!try_lock_extent(&inode->io_tree, start, end))
1077 return false;
1078
1079 ordered = btrfs_lookup_ordered_range(inode, start, end - start + 1);
1080 if (!ordered)
1081 return true;
1082
1083 btrfs_put_ordered_extent(ordered);
1084 unlock_extent(&inode->io_tree, start, end, NULL);
1085
1086 return false;
1087 }
1088
1089
clone_ordered_extent(struct btrfs_ordered_extent * ordered,u64 pos,u64 len)1090 static int clone_ordered_extent(struct btrfs_ordered_extent *ordered, u64 pos,
1091 u64 len)
1092 {
1093 struct inode *inode = ordered->inode;
1094 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
1095 u64 file_offset = ordered->file_offset + pos;
1096 u64 disk_bytenr = ordered->disk_bytenr + pos;
1097 unsigned long flags = ordered->flags & BTRFS_ORDERED_TYPE_FLAGS;
1098
1099 /*
1100 * The splitting extent is already counted and will be added again in
1101 * btrfs_add_ordered_extent_*(). Subtract len to avoid double counting.
1102 */
1103 percpu_counter_add_batch(&fs_info->ordered_bytes, -len,
1104 fs_info->delalloc_batch);
1105 WARN_ON_ONCE(flags & (1 << BTRFS_ORDERED_COMPRESSED));
1106 return btrfs_add_ordered_extent(BTRFS_I(inode), file_offset, len, len,
1107 disk_bytenr, len, 0, flags,
1108 ordered->compress_type);
1109 }
1110
btrfs_split_ordered_extent(struct btrfs_ordered_extent * ordered,u64 pre,u64 post)1111 int btrfs_split_ordered_extent(struct btrfs_ordered_extent *ordered, u64 pre,
1112 u64 post)
1113 {
1114 struct inode *inode = ordered->inode;
1115 struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
1116 struct rb_node *node;
1117 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1118 int ret = 0;
1119
1120 trace_btrfs_ordered_extent_split(BTRFS_I(inode), ordered);
1121
1122 spin_lock_irq(&tree->lock);
1123 /* Remove from tree once */
1124 node = &ordered->rb_node;
1125 rb_erase(node, &tree->tree);
1126 RB_CLEAR_NODE(node);
1127 if (tree->last == node)
1128 tree->last = NULL;
1129
1130 ordered->file_offset += pre;
1131 ordered->disk_bytenr += pre;
1132 ordered->num_bytes -= (pre + post);
1133 ordered->disk_num_bytes -= (pre + post);
1134 ordered->bytes_left -= (pre + post);
1135
1136 /* Re-insert the node */
1137 node = tree_insert(&tree->tree, ordered->file_offset, &ordered->rb_node);
1138 if (node)
1139 btrfs_panic(fs_info, -EEXIST,
1140 "zoned: inconsistency in ordered tree at offset %llu",
1141 ordered->file_offset);
1142
1143 spin_unlock_irq(&tree->lock);
1144
1145 if (pre)
1146 ret = clone_ordered_extent(ordered, 0, pre);
1147 if (ret == 0 && post)
1148 ret = clone_ordered_extent(ordered, pre + ordered->disk_num_bytes,
1149 post);
1150
1151 return ret;
1152 }
1153
ordered_data_init(void)1154 int __init ordered_data_init(void)
1155 {
1156 btrfs_ordered_extent_cache = kmem_cache_create("btrfs_ordered_extent",
1157 sizeof(struct btrfs_ordered_extent), 0,
1158 SLAB_MEM_SPREAD,
1159 NULL);
1160 if (!btrfs_ordered_extent_cache)
1161 return -ENOMEM;
1162
1163 return 0;
1164 }
1165
ordered_data_exit(void)1166 void __cold ordered_data_exit(void)
1167 {
1168 kmem_cache_destroy(btrfs_ordered_extent_cache);
1169 }
1170