1 #include <linux/bitops.h>
2 #include <linux/slab.h>
3 #include <linux/bio.h>
4 #include <linux/mm.h>
5 #include <linux/pagemap.h>
6 #include <linux/page-flags.h>
7 #include <linux/module.h>
8 #include <linux/spinlock.h>
9 #include <linux/blkdev.h>
10 #include <linux/swap.h>
11 #include <linux/writeback.h>
12 #include <linux/pagevec.h>
13 #include "extent_io.h"
14 #include "extent_map.h"
15 #include "compat.h"
16 #include "ctree.h"
17 #include "btrfs_inode.h"
18 
19 static struct kmem_cache *extent_state_cache;
20 static struct kmem_cache *extent_buffer_cache;
21 
22 static LIST_HEAD(buffers);
23 static LIST_HEAD(states);
24 
25 #define LEAK_DEBUG 0
26 #if LEAK_DEBUG
27 static DEFINE_SPINLOCK(leak_lock);
28 #endif
29 
30 #define BUFFER_LRU_MAX 64
31 
32 struct tree_entry {
33 	u64 start;
34 	u64 end;
35 	struct rb_node rb_node;
36 };
37 
38 struct extent_page_data {
39 	struct bio *bio;
40 	struct extent_io_tree *tree;
41 	get_extent_t *get_extent;
42 
43 	/* tells writepage not to lock the state bits for this range
44 	 * it still does the unlocking
45 	 */
46 	unsigned int extent_locked:1;
47 
48 	/* tells the submit_bio code to use a WRITE_SYNC */
49 	unsigned int sync_io:1;
50 };
51 
extent_io_init(void)52 int __init extent_io_init(void)
53 {
54 	extent_state_cache = kmem_cache_create("extent_state",
55 			sizeof(struct extent_state), 0,
56 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
57 	if (!extent_state_cache)
58 		return -ENOMEM;
59 
60 	extent_buffer_cache = kmem_cache_create("extent_buffers",
61 			sizeof(struct extent_buffer), 0,
62 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
63 	if (!extent_buffer_cache)
64 		goto free_state_cache;
65 	return 0;
66 
67 free_state_cache:
68 	kmem_cache_destroy(extent_state_cache);
69 	return -ENOMEM;
70 }
71 
extent_io_exit(void)72 void extent_io_exit(void)
73 {
74 	struct extent_state *state;
75 	struct extent_buffer *eb;
76 
77 	while (!list_empty(&states)) {
78 		state = list_entry(states.next, struct extent_state, leak_list);
79 		printk(KERN_ERR "btrfs state leak: start %llu end %llu "
80 		       "state %lu in tree %p refs %d\n",
81 		       (unsigned long long)state->start,
82 		       (unsigned long long)state->end,
83 		       state->state, state->tree, atomic_read(&state->refs));
84 		list_del(&state->leak_list);
85 		kmem_cache_free(extent_state_cache, state);
86 
87 	}
88 
89 	while (!list_empty(&buffers)) {
90 		eb = list_entry(buffers.next, struct extent_buffer, leak_list);
91 		printk(KERN_ERR "btrfs buffer leak start %llu len %lu "
92 		       "refs %d\n", (unsigned long long)eb->start,
93 		       eb->len, atomic_read(&eb->refs));
94 		list_del(&eb->leak_list);
95 		kmem_cache_free(extent_buffer_cache, eb);
96 	}
97 	if (extent_state_cache)
98 		kmem_cache_destroy(extent_state_cache);
99 	if (extent_buffer_cache)
100 		kmem_cache_destroy(extent_buffer_cache);
101 }
102 
extent_io_tree_init(struct extent_io_tree * tree,struct address_space * mapping,gfp_t mask)103 void extent_io_tree_init(struct extent_io_tree *tree,
104 			  struct address_space *mapping, gfp_t mask)
105 {
106 	tree->state = RB_ROOT;
107 	INIT_RADIX_TREE(&tree->buffer, GFP_ATOMIC);
108 	tree->ops = NULL;
109 	tree->dirty_bytes = 0;
110 	spin_lock_init(&tree->lock);
111 	spin_lock_init(&tree->buffer_lock);
112 	tree->mapping = mapping;
113 }
114 
alloc_extent_state(gfp_t mask)115 static struct extent_state *alloc_extent_state(gfp_t mask)
116 {
117 	struct extent_state *state;
118 #if LEAK_DEBUG
119 	unsigned long flags;
120 #endif
121 
122 	state = kmem_cache_alloc(extent_state_cache, mask);
123 	if (!state)
124 		return state;
125 	state->state = 0;
126 	state->private = 0;
127 	state->tree = NULL;
128 #if LEAK_DEBUG
129 	spin_lock_irqsave(&leak_lock, flags);
130 	list_add(&state->leak_list, &states);
131 	spin_unlock_irqrestore(&leak_lock, flags);
132 #endif
133 	atomic_set(&state->refs, 1);
134 	init_waitqueue_head(&state->wq);
135 	return state;
136 }
137 
free_extent_state(struct extent_state * state)138 void free_extent_state(struct extent_state *state)
139 {
140 	if (!state)
141 		return;
142 	if (atomic_dec_and_test(&state->refs)) {
143 #if LEAK_DEBUG
144 		unsigned long flags;
145 #endif
146 		WARN_ON(state->tree);
147 #if LEAK_DEBUG
148 		spin_lock_irqsave(&leak_lock, flags);
149 		list_del(&state->leak_list);
150 		spin_unlock_irqrestore(&leak_lock, flags);
151 #endif
152 		kmem_cache_free(extent_state_cache, state);
153 	}
154 }
155 
tree_insert(struct rb_root * root,u64 offset,struct rb_node * node)156 static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
157 				   struct rb_node *node)
158 {
159 	struct rb_node **p = &root->rb_node;
160 	struct rb_node *parent = NULL;
161 	struct tree_entry *entry;
162 
163 	while (*p) {
164 		parent = *p;
165 		entry = rb_entry(parent, struct tree_entry, rb_node);
166 
167 		if (offset < entry->start)
168 			p = &(*p)->rb_left;
169 		else if (offset > entry->end)
170 			p = &(*p)->rb_right;
171 		else
172 			return parent;
173 	}
174 
175 	entry = rb_entry(node, struct tree_entry, rb_node);
176 	rb_link_node(node, parent, p);
177 	rb_insert_color(node, root);
178 	return NULL;
179 }
180 
__etree_search(struct extent_io_tree * tree,u64 offset,struct rb_node ** prev_ret,struct rb_node ** next_ret)181 static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
182 				     struct rb_node **prev_ret,
183 				     struct rb_node **next_ret)
184 {
185 	struct rb_root *root = &tree->state;
186 	struct rb_node *n = root->rb_node;
187 	struct rb_node *prev = NULL;
188 	struct rb_node *orig_prev = NULL;
189 	struct tree_entry *entry;
190 	struct tree_entry *prev_entry = NULL;
191 
192 	while (n) {
193 		entry = rb_entry(n, struct tree_entry, rb_node);
194 		prev = n;
195 		prev_entry = entry;
196 
197 		if (offset < entry->start)
198 			n = n->rb_left;
199 		else if (offset > entry->end)
200 			n = n->rb_right;
201 		else
202 			return n;
203 	}
204 
205 	if (prev_ret) {
206 		orig_prev = prev;
207 		while (prev && offset > prev_entry->end) {
208 			prev = rb_next(prev);
209 			prev_entry = rb_entry(prev, struct tree_entry, rb_node);
210 		}
211 		*prev_ret = prev;
212 		prev = orig_prev;
213 	}
214 
215 	if (next_ret) {
216 		prev_entry = rb_entry(prev, struct tree_entry, rb_node);
217 		while (prev && offset < prev_entry->start) {
218 			prev = rb_prev(prev);
219 			prev_entry = rb_entry(prev, struct tree_entry, rb_node);
220 		}
221 		*next_ret = prev;
222 	}
223 	return NULL;
224 }
225 
tree_search(struct extent_io_tree * tree,u64 offset)226 static inline struct rb_node *tree_search(struct extent_io_tree *tree,
227 					  u64 offset)
228 {
229 	struct rb_node *prev = NULL;
230 	struct rb_node *ret;
231 
232 	ret = __etree_search(tree, offset, &prev, NULL);
233 	if (!ret)
234 		return prev;
235 	return ret;
236 }
237 
merge_cb(struct extent_io_tree * tree,struct extent_state * new,struct extent_state * other)238 static void merge_cb(struct extent_io_tree *tree, struct extent_state *new,
239 		     struct extent_state *other)
240 {
241 	if (tree->ops && tree->ops->merge_extent_hook)
242 		tree->ops->merge_extent_hook(tree->mapping->host, new,
243 					     other);
244 }
245 
246 /*
247  * utility function to look for merge candidates inside a given range.
248  * Any extents with matching state are merged together into a single
249  * extent in the tree.  Extents with EXTENT_IO in their state field
250  * are not merged because the end_io handlers need to be able to do
251  * operations on them without sleeping (or doing allocations/splits).
252  *
253  * This should be called with the tree lock held.
254  */
merge_state(struct extent_io_tree * tree,struct extent_state * state)255 static int merge_state(struct extent_io_tree *tree,
256 		       struct extent_state *state)
257 {
258 	struct extent_state *other;
259 	struct rb_node *other_node;
260 
261 	if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY))
262 		return 0;
263 
264 	other_node = rb_prev(&state->rb_node);
265 	if (other_node) {
266 		other = rb_entry(other_node, struct extent_state, rb_node);
267 		if (other->end == state->start - 1 &&
268 		    other->state == state->state) {
269 			merge_cb(tree, state, other);
270 			state->start = other->start;
271 			other->tree = NULL;
272 			rb_erase(&other->rb_node, &tree->state);
273 			free_extent_state(other);
274 		}
275 	}
276 	other_node = rb_next(&state->rb_node);
277 	if (other_node) {
278 		other = rb_entry(other_node, struct extent_state, rb_node);
279 		if (other->start == state->end + 1 &&
280 		    other->state == state->state) {
281 			merge_cb(tree, state, other);
282 			other->start = state->start;
283 			state->tree = NULL;
284 			rb_erase(&state->rb_node, &tree->state);
285 			free_extent_state(state);
286 			state = NULL;
287 		}
288 	}
289 
290 	return 0;
291 }
292 
set_state_cb(struct extent_io_tree * tree,struct extent_state * state,int * bits)293 static int set_state_cb(struct extent_io_tree *tree,
294 			 struct extent_state *state, int *bits)
295 {
296 	if (tree->ops && tree->ops->set_bit_hook) {
297 		return tree->ops->set_bit_hook(tree->mapping->host,
298 					       state, bits);
299 	}
300 
301 	return 0;
302 }
303 
clear_state_cb(struct extent_io_tree * tree,struct extent_state * state,int * bits)304 static void clear_state_cb(struct extent_io_tree *tree,
305 			   struct extent_state *state, int *bits)
306 {
307 	if (tree->ops && tree->ops->clear_bit_hook)
308 		tree->ops->clear_bit_hook(tree->mapping->host, state, bits);
309 }
310 
311 /*
312  * insert an extent_state struct into the tree.  'bits' are set on the
313  * struct before it is inserted.
314  *
315  * This may return -EEXIST if the extent is already there, in which case the
316  * state struct is freed.
317  *
318  * The tree lock is not taken internally.  This is a utility function and
319  * probably isn't what you want to call (see set/clear_extent_bit).
320  */
insert_state(struct extent_io_tree * tree,struct extent_state * state,u64 start,u64 end,int * bits)321 static int insert_state(struct extent_io_tree *tree,
322 			struct extent_state *state, u64 start, u64 end,
323 			int *bits)
324 {
325 	struct rb_node *node;
326 	int bits_to_set = *bits & ~EXTENT_CTLBITS;
327 	int ret;
328 
329 	if (end < start) {
330 		printk(KERN_ERR "btrfs end < start %llu %llu\n",
331 		       (unsigned long long)end,
332 		       (unsigned long long)start);
333 		WARN_ON(1);
334 	}
335 	state->start = start;
336 	state->end = end;
337 	ret = set_state_cb(tree, state, bits);
338 	if (ret)
339 		return ret;
340 
341 	if (bits_to_set & EXTENT_DIRTY)
342 		tree->dirty_bytes += end - start + 1;
343 	state->state |= bits_to_set;
344 	node = tree_insert(&tree->state, end, &state->rb_node);
345 	if (node) {
346 		struct extent_state *found;
347 		found = rb_entry(node, struct extent_state, rb_node);
348 		printk(KERN_ERR "btrfs found node %llu %llu on insert of "
349 		       "%llu %llu\n", (unsigned long long)found->start,
350 		       (unsigned long long)found->end,
351 		       (unsigned long long)start, (unsigned long long)end);
352 		free_extent_state(state);
353 		return -EEXIST;
354 	}
355 	state->tree = tree;
356 	merge_state(tree, state);
357 	return 0;
358 }
359 
split_cb(struct extent_io_tree * tree,struct extent_state * orig,u64 split)360 static int split_cb(struct extent_io_tree *tree, struct extent_state *orig,
361 		     u64 split)
362 {
363 	if (tree->ops && tree->ops->split_extent_hook)
364 		return tree->ops->split_extent_hook(tree->mapping->host,
365 						    orig, split);
366 	return 0;
367 }
368 
369 /*
370  * split a given extent state struct in two, inserting the preallocated
371  * struct 'prealloc' as the newly created second half.  'split' indicates an
372  * offset inside 'orig' where it should be split.
373  *
374  * Before calling,
375  * the tree has 'orig' at [orig->start, orig->end].  After calling, there
376  * are two extent state structs in the tree:
377  * prealloc: [orig->start, split - 1]
378  * orig: [ split, orig->end ]
379  *
380  * The tree locks are not taken by this function. They need to be held
381  * by the caller.
382  */
split_state(struct extent_io_tree * tree,struct extent_state * orig,struct extent_state * prealloc,u64 split)383 static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
384 		       struct extent_state *prealloc, u64 split)
385 {
386 	struct rb_node *node;
387 
388 	split_cb(tree, orig, split);
389 
390 	prealloc->start = orig->start;
391 	prealloc->end = split - 1;
392 	prealloc->state = orig->state;
393 	orig->start = split;
394 
395 	node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
396 	if (node) {
397 		free_extent_state(prealloc);
398 		return -EEXIST;
399 	}
400 	prealloc->tree = tree;
401 	return 0;
402 }
403 
404 /*
405  * utility function to clear some bits in an extent state struct.
406  * it will optionally wake up any one waiting on this state (wake == 1), or
407  * forcibly remove the state from the tree (delete == 1).
408  *
409  * If no bits are set on the state struct after clearing things, the
410  * struct is freed and removed from the tree
411  */
clear_state_bit(struct extent_io_tree * tree,struct extent_state * state,int * bits,int wake)412 static int clear_state_bit(struct extent_io_tree *tree,
413 			    struct extent_state *state,
414 			    int *bits, int wake)
415 {
416 	int bits_to_clear = *bits & ~EXTENT_CTLBITS;
417 	int ret = state->state & bits_to_clear;
418 
419 	if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
420 		u64 range = state->end - state->start + 1;
421 		WARN_ON(range > tree->dirty_bytes);
422 		tree->dirty_bytes -= range;
423 	}
424 	clear_state_cb(tree, state, bits);
425 	state->state &= ~bits_to_clear;
426 	if (wake)
427 		wake_up(&state->wq);
428 	if (state->state == 0) {
429 		if (state->tree) {
430 			rb_erase(&state->rb_node, &tree->state);
431 			state->tree = NULL;
432 			free_extent_state(state);
433 		} else {
434 			WARN_ON(1);
435 		}
436 	} else {
437 		merge_state(tree, state);
438 	}
439 	return ret;
440 }
441 
442 /*
443  * clear some bits on a range in the tree.  This may require splitting
444  * or inserting elements in the tree, so the gfp mask is used to
445  * indicate which allocations or sleeping are allowed.
446  *
447  * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
448  * the given range from the tree regardless of state (ie for truncate).
449  *
450  * the range [start, end] is inclusive.
451  *
452  * This takes the tree lock, and returns < 0 on error, > 0 if any of the
453  * bits were already set, or zero if none of the bits were already set.
454  */
clear_extent_bit(struct extent_io_tree * tree,u64 start,u64 end,int bits,int wake,int delete,struct extent_state ** cached_state,gfp_t mask)455 int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
456 		     int bits, int wake, int delete,
457 		     struct extent_state **cached_state,
458 		     gfp_t mask)
459 {
460 	struct extent_state *state;
461 	struct extent_state *cached;
462 	struct extent_state *prealloc = NULL;
463 	struct rb_node *next_node;
464 	struct rb_node *node;
465 	u64 last_end;
466 	int err;
467 	int set = 0;
468 	int clear = 0;
469 
470 	if (delete)
471 		bits |= ~EXTENT_CTLBITS;
472 	bits |= EXTENT_FIRST_DELALLOC;
473 
474 	if (bits & (EXTENT_IOBITS | EXTENT_BOUNDARY))
475 		clear = 1;
476 again:
477 	if (!prealloc && (mask & __GFP_WAIT)) {
478 		prealloc = alloc_extent_state(mask);
479 		if (!prealloc)
480 			return -ENOMEM;
481 	}
482 
483 	spin_lock(&tree->lock);
484 	if (cached_state) {
485 		cached = *cached_state;
486 
487 		if (clear) {
488 			*cached_state = NULL;
489 			cached_state = NULL;
490 		}
491 
492 		if (cached && cached->tree && cached->start == start) {
493 			if (clear)
494 				atomic_dec(&cached->refs);
495 			state = cached;
496 			goto hit_next;
497 		}
498 		if (clear)
499 			free_extent_state(cached);
500 	}
501 	/*
502 	 * this search will find the extents that end after
503 	 * our range starts
504 	 */
505 	node = tree_search(tree, start);
506 	if (!node)
507 		goto out;
508 	state = rb_entry(node, struct extent_state, rb_node);
509 hit_next:
510 	if (state->start > end)
511 		goto out;
512 	WARN_ON(state->end < start);
513 	last_end = state->end;
514 
515 	/*
516 	 *     | ---- desired range ---- |
517 	 *  | state | or
518 	 *  | ------------- state -------------- |
519 	 *
520 	 * We need to split the extent we found, and may flip
521 	 * bits on second half.
522 	 *
523 	 * If the extent we found extends past our range, we
524 	 * just split and search again.  It'll get split again
525 	 * the next time though.
526 	 *
527 	 * If the extent we found is inside our range, we clear
528 	 * the desired bit on it.
529 	 */
530 
531 	if (state->start < start) {
532 		if (!prealloc)
533 			prealloc = alloc_extent_state(GFP_ATOMIC);
534 		err = split_state(tree, state, prealloc, start);
535 		BUG_ON(err == -EEXIST);
536 		prealloc = NULL;
537 		if (err)
538 			goto out;
539 		if (state->end <= end) {
540 			set |= clear_state_bit(tree, state, &bits, wake);
541 			if (last_end == (u64)-1)
542 				goto out;
543 			start = last_end + 1;
544 		}
545 		goto search_again;
546 	}
547 	/*
548 	 * | ---- desired range ---- |
549 	 *                        | state |
550 	 * We need to split the extent, and clear the bit
551 	 * on the first half
552 	 */
553 	if (state->start <= end && state->end > end) {
554 		if (!prealloc)
555 			prealloc = alloc_extent_state(GFP_ATOMIC);
556 		err = split_state(tree, state, prealloc, end + 1);
557 		BUG_ON(err == -EEXIST);
558 		if (wake)
559 			wake_up(&state->wq);
560 
561 		set |= clear_state_bit(tree, prealloc, &bits, wake);
562 
563 		prealloc = NULL;
564 		goto out;
565 	}
566 
567 	if (state->end < end && prealloc && !need_resched())
568 		next_node = rb_next(&state->rb_node);
569 	else
570 		next_node = NULL;
571 
572 	set |= clear_state_bit(tree, state, &bits, wake);
573 	if (last_end == (u64)-1)
574 		goto out;
575 	start = last_end + 1;
576 	if (start <= end && next_node) {
577 		state = rb_entry(next_node, struct extent_state,
578 				 rb_node);
579 		if (state->start == start)
580 			goto hit_next;
581 	}
582 	goto search_again;
583 
584 out:
585 	spin_unlock(&tree->lock);
586 	if (prealloc)
587 		free_extent_state(prealloc);
588 
589 	return set;
590 
591 search_again:
592 	if (start > end)
593 		goto out;
594 	spin_unlock(&tree->lock);
595 	if (mask & __GFP_WAIT)
596 		cond_resched();
597 	goto again;
598 }
599 
wait_on_state(struct extent_io_tree * tree,struct extent_state * state)600 static int wait_on_state(struct extent_io_tree *tree,
601 			 struct extent_state *state)
602 		__releases(tree->lock)
603 		__acquires(tree->lock)
604 {
605 	DEFINE_WAIT(wait);
606 	prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
607 	spin_unlock(&tree->lock);
608 	schedule();
609 	spin_lock(&tree->lock);
610 	finish_wait(&state->wq, &wait);
611 	return 0;
612 }
613 
614 /*
615  * waits for one or more bits to clear on a range in the state tree.
616  * The range [start, end] is inclusive.
617  * The tree lock is taken by this function
618  */
wait_extent_bit(struct extent_io_tree * tree,u64 start,u64 end,int bits)619 int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits)
620 {
621 	struct extent_state *state;
622 	struct rb_node *node;
623 
624 	spin_lock(&tree->lock);
625 again:
626 	while (1) {
627 		/*
628 		 * this search will find all the extents that end after
629 		 * our range starts
630 		 */
631 		node = tree_search(tree, start);
632 		if (!node)
633 			break;
634 
635 		state = rb_entry(node, struct extent_state, rb_node);
636 
637 		if (state->start > end)
638 			goto out;
639 
640 		if (state->state & bits) {
641 			start = state->start;
642 			atomic_inc(&state->refs);
643 			wait_on_state(tree, state);
644 			free_extent_state(state);
645 			goto again;
646 		}
647 		start = state->end + 1;
648 
649 		if (start > end)
650 			break;
651 
652 		if (need_resched()) {
653 			spin_unlock(&tree->lock);
654 			cond_resched();
655 			spin_lock(&tree->lock);
656 		}
657 	}
658 out:
659 	spin_unlock(&tree->lock);
660 	return 0;
661 }
662 
set_state_bits(struct extent_io_tree * tree,struct extent_state * state,int * bits)663 static int set_state_bits(struct extent_io_tree *tree,
664 			   struct extent_state *state,
665 			   int *bits)
666 {
667 	int ret;
668 	int bits_to_set = *bits & ~EXTENT_CTLBITS;
669 
670 	ret = set_state_cb(tree, state, bits);
671 	if (ret)
672 		return ret;
673 	if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
674 		u64 range = state->end - state->start + 1;
675 		tree->dirty_bytes += range;
676 	}
677 	state->state |= bits_to_set;
678 
679 	return 0;
680 }
681 
cache_state(struct extent_state * state,struct extent_state ** cached_ptr)682 static void cache_state(struct extent_state *state,
683 			struct extent_state **cached_ptr)
684 {
685 	if (cached_ptr && !(*cached_ptr)) {
686 		if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY)) {
687 			*cached_ptr = state;
688 			atomic_inc(&state->refs);
689 		}
690 	}
691 }
692 
uncache_state(struct extent_state ** cached_ptr)693 static void uncache_state(struct extent_state **cached_ptr)
694 {
695 	if (cached_ptr && (*cached_ptr)) {
696 		struct extent_state *state = *cached_ptr;
697 		*cached_ptr = NULL;
698 		free_extent_state(state);
699 	}
700 }
701 
702 /*
703  * set some bits on a range in the tree.  This may require allocations or
704  * sleeping, so the gfp mask is used to indicate what is allowed.
705  *
706  * If any of the exclusive bits are set, this will fail with -EEXIST if some
707  * part of the range already has the desired bits set.  The start of the
708  * existing range is returned in failed_start in this case.
709  *
710  * [start, end] is inclusive This takes the tree lock.
711  */
712 
set_extent_bit(struct extent_io_tree * tree,u64 start,u64 end,int bits,int exclusive_bits,u64 * failed_start,struct extent_state ** cached_state,gfp_t mask)713 int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
714 		   int bits, int exclusive_bits, u64 *failed_start,
715 		   struct extent_state **cached_state, gfp_t mask)
716 {
717 	struct extent_state *state;
718 	struct extent_state *prealloc = NULL;
719 	struct rb_node *node;
720 	int err = 0;
721 	u64 last_start;
722 	u64 last_end;
723 
724 	bits |= EXTENT_FIRST_DELALLOC;
725 again:
726 	if (!prealloc && (mask & __GFP_WAIT)) {
727 		prealloc = alloc_extent_state(mask);
728 		if (!prealloc)
729 			return -ENOMEM;
730 	}
731 
732 	spin_lock(&tree->lock);
733 	if (cached_state && *cached_state) {
734 		state = *cached_state;
735 		if (state->start == start && state->tree) {
736 			node = &state->rb_node;
737 			goto hit_next;
738 		}
739 	}
740 	/*
741 	 * this search will find all the extents that end after
742 	 * our range starts.
743 	 */
744 	node = tree_search(tree, start);
745 	if (!node) {
746 		err = insert_state(tree, prealloc, start, end, &bits);
747 		prealloc = NULL;
748 		BUG_ON(err == -EEXIST);
749 		goto out;
750 	}
751 	state = rb_entry(node, struct extent_state, rb_node);
752 hit_next:
753 	last_start = state->start;
754 	last_end = state->end;
755 
756 	/*
757 	 * | ---- desired range ---- |
758 	 * | state |
759 	 *
760 	 * Just lock what we found and keep going
761 	 */
762 	if (state->start == start && state->end <= end) {
763 		struct rb_node *next_node;
764 		if (state->state & exclusive_bits) {
765 			*failed_start = state->start;
766 			err = -EEXIST;
767 			goto out;
768 		}
769 
770 		err = set_state_bits(tree, state, &bits);
771 		if (err)
772 			goto out;
773 
774 		cache_state(state, cached_state);
775 		merge_state(tree, state);
776 		if (last_end == (u64)-1)
777 			goto out;
778 
779 		start = last_end + 1;
780 		if (start < end && prealloc && !need_resched()) {
781 			next_node = rb_next(node);
782 			if (next_node) {
783 				state = rb_entry(next_node, struct extent_state,
784 						 rb_node);
785 				if (state->start == start)
786 					goto hit_next;
787 			}
788 		}
789 		goto search_again;
790 	}
791 
792 	/*
793 	 *     | ---- desired range ---- |
794 	 * | state |
795 	 *   or
796 	 * | ------------- state -------------- |
797 	 *
798 	 * We need to split the extent we found, and may flip bits on
799 	 * second half.
800 	 *
801 	 * If the extent we found extends past our
802 	 * range, we just split and search again.  It'll get split
803 	 * again the next time though.
804 	 *
805 	 * If the extent we found is inside our range, we set the
806 	 * desired bit on it.
807 	 */
808 	if (state->start < start) {
809 		if (state->state & exclusive_bits) {
810 			*failed_start = start;
811 			err = -EEXIST;
812 			goto out;
813 		}
814 		err = split_state(tree, state, prealloc, start);
815 		BUG_ON(err == -EEXIST);
816 		prealloc = NULL;
817 		if (err)
818 			goto out;
819 		if (state->end <= end) {
820 			err = set_state_bits(tree, state, &bits);
821 			if (err)
822 				goto out;
823 			cache_state(state, cached_state);
824 			merge_state(tree, state);
825 			if (last_end == (u64)-1)
826 				goto out;
827 			start = last_end + 1;
828 		}
829 		goto search_again;
830 	}
831 	/*
832 	 * | ---- desired range ---- |
833 	 *     | state | or               | state |
834 	 *
835 	 * There's a hole, we need to insert something in it and
836 	 * ignore the extent we found.
837 	 */
838 	if (state->start > start) {
839 		u64 this_end;
840 		if (end < last_start)
841 			this_end = end;
842 		else
843 			this_end = last_start - 1;
844 		err = insert_state(tree, prealloc, start, this_end,
845 				   &bits);
846 		BUG_ON(err == -EEXIST);
847 		if (err) {
848 			prealloc = NULL;
849 			goto out;
850 		}
851 		cache_state(prealloc, cached_state);
852 		prealloc = NULL;
853 		start = this_end + 1;
854 		goto search_again;
855 	}
856 	/*
857 	 * | ---- desired range ---- |
858 	 *                        | state |
859 	 * We need to split the extent, and set the bit
860 	 * on the first half
861 	 */
862 	if (state->start <= end && state->end > end) {
863 		if (state->state & exclusive_bits) {
864 			*failed_start = start;
865 			err = -EEXIST;
866 			goto out;
867 		}
868 		err = split_state(tree, state, prealloc, end + 1);
869 		BUG_ON(err == -EEXIST);
870 
871 		err = set_state_bits(tree, prealloc, &bits);
872 		if (err) {
873 			prealloc = NULL;
874 			goto out;
875 		}
876 		cache_state(prealloc, cached_state);
877 		merge_state(tree, prealloc);
878 		prealloc = NULL;
879 		goto out;
880 	}
881 
882 	goto search_again;
883 
884 out:
885 	spin_unlock(&tree->lock);
886 	if (prealloc)
887 		free_extent_state(prealloc);
888 
889 	return err;
890 
891 search_again:
892 	if (start > end)
893 		goto out;
894 	spin_unlock(&tree->lock);
895 	if (mask & __GFP_WAIT)
896 		cond_resched();
897 	goto again;
898 }
899 
900 /* wrappers around set/clear extent bit */
set_extent_dirty(struct extent_io_tree * tree,u64 start,u64 end,gfp_t mask)901 int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
902 		     gfp_t mask)
903 {
904 	return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL,
905 			      NULL, mask);
906 }
907 
set_extent_bits(struct extent_io_tree * tree,u64 start,u64 end,int bits,gfp_t mask)908 int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
909 		    int bits, gfp_t mask)
910 {
911 	return set_extent_bit(tree, start, end, bits, 0, NULL,
912 			      NULL, mask);
913 }
914 
clear_extent_bits(struct extent_io_tree * tree,u64 start,u64 end,int bits,gfp_t mask)915 int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
916 		      int bits, gfp_t mask)
917 {
918 	return clear_extent_bit(tree, start, end, bits, 0, 0, NULL, mask);
919 }
920 
set_extent_delalloc(struct extent_io_tree * tree,u64 start,u64 end,struct extent_state ** cached_state,gfp_t mask)921 int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
922 			struct extent_state **cached_state, gfp_t mask)
923 {
924 	return set_extent_bit(tree, start, end,
925 			      EXTENT_DELALLOC | EXTENT_DIRTY | EXTENT_UPTODATE,
926 			      0, NULL, cached_state, mask);
927 }
928 
clear_extent_dirty(struct extent_io_tree * tree,u64 start,u64 end,gfp_t mask)929 int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
930 		       gfp_t mask)
931 {
932 	return clear_extent_bit(tree, start, end,
933 				EXTENT_DIRTY | EXTENT_DELALLOC |
934 				EXTENT_DO_ACCOUNTING, 0, 0, NULL, mask);
935 }
936 
set_extent_new(struct extent_io_tree * tree,u64 start,u64 end,gfp_t mask)937 int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
938 		     gfp_t mask)
939 {
940 	return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL,
941 			      NULL, mask);
942 }
943 
clear_extent_new(struct extent_io_tree * tree,u64 start,u64 end,gfp_t mask)944 static int clear_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
945 		       gfp_t mask)
946 {
947 	return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0,
948 				NULL, mask);
949 }
950 
set_extent_uptodate(struct extent_io_tree * tree,u64 start,u64 end,struct extent_state ** cached_state,gfp_t mask)951 int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
952 			struct extent_state **cached_state, gfp_t mask)
953 {
954 	return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0,
955 			      NULL, cached_state, mask);
956 }
957 
clear_extent_uptodate(struct extent_io_tree * tree,u64 start,u64 end,struct extent_state ** cached_state,gfp_t mask)958 static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
959 				 u64 end, struct extent_state **cached_state,
960 				 gfp_t mask)
961 {
962 	return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
963 				cached_state, mask);
964 }
965 
wait_on_extent_writeback(struct extent_io_tree * tree,u64 start,u64 end)966 int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end)
967 {
968 	return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK);
969 }
970 
971 /*
972  * either insert or lock state struct between start and end use mask to tell
973  * us if waiting is desired.
974  */
lock_extent_bits(struct extent_io_tree * tree,u64 start,u64 end,int bits,struct extent_state ** cached_state,gfp_t mask)975 int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
976 		     int bits, struct extent_state **cached_state, gfp_t mask)
977 {
978 	int err;
979 	u64 failed_start;
980 	while (1) {
981 		err = set_extent_bit(tree, start, end, EXTENT_LOCKED | bits,
982 				     EXTENT_LOCKED, &failed_start,
983 				     cached_state, mask);
984 		if (err == -EEXIST && (mask & __GFP_WAIT)) {
985 			wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
986 			start = failed_start;
987 		} else {
988 			break;
989 		}
990 		WARN_ON(start > end);
991 	}
992 	return err;
993 }
994 
lock_extent(struct extent_io_tree * tree,u64 start,u64 end,gfp_t mask)995 int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
996 {
997 	return lock_extent_bits(tree, start, end, 0, NULL, mask);
998 }
999 
try_lock_extent(struct extent_io_tree * tree,u64 start,u64 end,gfp_t mask)1000 int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
1001 		    gfp_t mask)
1002 {
1003 	int err;
1004 	u64 failed_start;
1005 
1006 	err = set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED,
1007 			     &failed_start, NULL, mask);
1008 	if (err == -EEXIST) {
1009 		if (failed_start > start)
1010 			clear_extent_bit(tree, start, failed_start - 1,
1011 					 EXTENT_LOCKED, 1, 0, NULL, mask);
1012 		return 0;
1013 	}
1014 	return 1;
1015 }
1016 
unlock_extent_cached(struct extent_io_tree * tree,u64 start,u64 end,struct extent_state ** cached,gfp_t mask)1017 int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end,
1018 			 struct extent_state **cached, gfp_t mask)
1019 {
1020 	return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
1021 				mask);
1022 }
1023 
unlock_extent(struct extent_io_tree * tree,u64 start,u64 end,gfp_t mask)1024 int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
1025 {
1026 	return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL,
1027 				mask);
1028 }
1029 
1030 /*
1031  * helper function to set pages and extents in the tree dirty
1032  */
set_range_dirty(struct extent_io_tree * tree,u64 start,u64 end)1033 int set_range_dirty(struct extent_io_tree *tree, u64 start, u64 end)
1034 {
1035 	unsigned long index = start >> PAGE_CACHE_SHIFT;
1036 	unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1037 	struct page *page;
1038 
1039 	while (index <= end_index) {
1040 		page = find_get_page(tree->mapping, index);
1041 		BUG_ON(!page);
1042 		__set_page_dirty_nobuffers(page);
1043 		page_cache_release(page);
1044 		index++;
1045 	}
1046 	return 0;
1047 }
1048 
1049 /*
1050  * helper function to set both pages and extents in the tree writeback
1051  */
set_range_writeback(struct extent_io_tree * tree,u64 start,u64 end)1052 static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
1053 {
1054 	unsigned long index = start >> PAGE_CACHE_SHIFT;
1055 	unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1056 	struct page *page;
1057 
1058 	while (index <= end_index) {
1059 		page = find_get_page(tree->mapping, index);
1060 		BUG_ON(!page);
1061 		set_page_writeback(page);
1062 		page_cache_release(page);
1063 		index++;
1064 	}
1065 	return 0;
1066 }
1067 
1068 /*
1069  * find the first offset in the io tree with 'bits' set. zero is
1070  * returned if we find something, and *start_ret and *end_ret are
1071  * set to reflect the state struct that was found.
1072  *
1073  * If nothing was found, 1 is returned, < 0 on error
1074  */
find_first_extent_bit(struct extent_io_tree * tree,u64 start,u64 * start_ret,u64 * end_ret,int bits)1075 int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
1076 			  u64 *start_ret, u64 *end_ret, int bits)
1077 {
1078 	struct rb_node *node;
1079 	struct extent_state *state;
1080 	int ret = 1;
1081 
1082 	spin_lock(&tree->lock);
1083 	/*
1084 	 * this search will find all the extents that end after
1085 	 * our range starts.
1086 	 */
1087 	node = tree_search(tree, start);
1088 	if (!node)
1089 		goto out;
1090 
1091 	while (1) {
1092 		state = rb_entry(node, struct extent_state, rb_node);
1093 		if (state->end >= start && (state->state & bits)) {
1094 			*start_ret = state->start;
1095 			*end_ret = state->end;
1096 			ret = 0;
1097 			break;
1098 		}
1099 		node = rb_next(node);
1100 		if (!node)
1101 			break;
1102 	}
1103 out:
1104 	spin_unlock(&tree->lock);
1105 	return ret;
1106 }
1107 
1108 /* find the first state struct with 'bits' set after 'start', and
1109  * return it.  tree->lock must be held.  NULL will returned if
1110  * nothing was found after 'start'
1111  */
find_first_extent_bit_state(struct extent_io_tree * tree,u64 start,int bits)1112 struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree,
1113 						 u64 start, int bits)
1114 {
1115 	struct rb_node *node;
1116 	struct extent_state *state;
1117 
1118 	/*
1119 	 * this search will find all the extents that end after
1120 	 * our range starts.
1121 	 */
1122 	node = tree_search(tree, start);
1123 	if (!node)
1124 		goto out;
1125 
1126 	while (1) {
1127 		state = rb_entry(node, struct extent_state, rb_node);
1128 		if (state->end >= start && (state->state & bits))
1129 			return state;
1130 
1131 		node = rb_next(node);
1132 		if (!node)
1133 			break;
1134 	}
1135 out:
1136 	return NULL;
1137 }
1138 
1139 /*
1140  * find a contiguous range of bytes in the file marked as delalloc, not
1141  * more than 'max_bytes'.  start and end are used to return the range,
1142  *
1143  * 1 is returned if we find something, 0 if nothing was in the tree
1144  */
find_delalloc_range(struct extent_io_tree * tree,u64 * start,u64 * end,u64 max_bytes,struct extent_state ** cached_state)1145 static noinline u64 find_delalloc_range(struct extent_io_tree *tree,
1146 					u64 *start, u64 *end, u64 max_bytes,
1147 					struct extent_state **cached_state)
1148 {
1149 	struct rb_node *node;
1150 	struct extent_state *state;
1151 	u64 cur_start = *start;
1152 	u64 found = 0;
1153 	u64 total_bytes = 0;
1154 
1155 	spin_lock(&tree->lock);
1156 
1157 	/*
1158 	 * this search will find all the extents that end after
1159 	 * our range starts.
1160 	 */
1161 	node = tree_search(tree, cur_start);
1162 	if (!node) {
1163 		if (!found)
1164 			*end = (u64)-1;
1165 		goto out;
1166 	}
1167 
1168 	while (1) {
1169 		state = rb_entry(node, struct extent_state, rb_node);
1170 		if (found && (state->start != cur_start ||
1171 			      (state->state & EXTENT_BOUNDARY))) {
1172 			goto out;
1173 		}
1174 		if (!(state->state & EXTENT_DELALLOC)) {
1175 			if (!found)
1176 				*end = state->end;
1177 			goto out;
1178 		}
1179 		if (!found) {
1180 			*start = state->start;
1181 			*cached_state = state;
1182 			atomic_inc(&state->refs);
1183 		}
1184 		found++;
1185 		*end = state->end;
1186 		cur_start = state->end + 1;
1187 		node = rb_next(node);
1188 		if (!node)
1189 			break;
1190 		total_bytes += state->end - state->start + 1;
1191 		if (total_bytes >= max_bytes)
1192 			break;
1193 	}
1194 out:
1195 	spin_unlock(&tree->lock);
1196 	return found;
1197 }
1198 
__unlock_for_delalloc(struct inode * inode,struct page * locked_page,u64 start,u64 end)1199 static noinline int __unlock_for_delalloc(struct inode *inode,
1200 					  struct page *locked_page,
1201 					  u64 start, u64 end)
1202 {
1203 	int ret;
1204 	struct page *pages[16];
1205 	unsigned long index = start >> PAGE_CACHE_SHIFT;
1206 	unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1207 	unsigned long nr_pages = end_index - index + 1;
1208 	int i;
1209 
1210 	if (index == locked_page->index && end_index == index)
1211 		return 0;
1212 
1213 	while (nr_pages > 0) {
1214 		ret = find_get_pages_contig(inode->i_mapping, index,
1215 				     min_t(unsigned long, nr_pages,
1216 				     ARRAY_SIZE(pages)), pages);
1217 		for (i = 0; i < ret; i++) {
1218 			if (pages[i] != locked_page)
1219 				unlock_page(pages[i]);
1220 			page_cache_release(pages[i]);
1221 		}
1222 		nr_pages -= ret;
1223 		index += ret;
1224 		cond_resched();
1225 	}
1226 	return 0;
1227 }
1228 
lock_delalloc_pages(struct inode * inode,struct page * locked_page,u64 delalloc_start,u64 delalloc_end)1229 static noinline int lock_delalloc_pages(struct inode *inode,
1230 					struct page *locked_page,
1231 					u64 delalloc_start,
1232 					u64 delalloc_end)
1233 {
1234 	unsigned long index = delalloc_start >> PAGE_CACHE_SHIFT;
1235 	unsigned long start_index = index;
1236 	unsigned long end_index = delalloc_end >> PAGE_CACHE_SHIFT;
1237 	unsigned long pages_locked = 0;
1238 	struct page *pages[16];
1239 	unsigned long nrpages;
1240 	int ret;
1241 	int i;
1242 
1243 	/* the caller is responsible for locking the start index */
1244 	if (index == locked_page->index && index == end_index)
1245 		return 0;
1246 
1247 	/* skip the page at the start index */
1248 	nrpages = end_index - index + 1;
1249 	while (nrpages > 0) {
1250 		ret = find_get_pages_contig(inode->i_mapping, index,
1251 				     min_t(unsigned long,
1252 				     nrpages, ARRAY_SIZE(pages)), pages);
1253 		if (ret == 0) {
1254 			ret = -EAGAIN;
1255 			goto done;
1256 		}
1257 		/* now we have an array of pages, lock them all */
1258 		for (i = 0; i < ret; i++) {
1259 			/*
1260 			 * the caller is taking responsibility for
1261 			 * locked_page
1262 			 */
1263 			if (pages[i] != locked_page) {
1264 				lock_page(pages[i]);
1265 				if (!PageDirty(pages[i]) ||
1266 				    pages[i]->mapping != inode->i_mapping) {
1267 					ret = -EAGAIN;
1268 					unlock_page(pages[i]);
1269 					page_cache_release(pages[i]);
1270 					goto done;
1271 				}
1272 			}
1273 			page_cache_release(pages[i]);
1274 			pages_locked++;
1275 		}
1276 		nrpages -= ret;
1277 		index += ret;
1278 		cond_resched();
1279 	}
1280 	ret = 0;
1281 done:
1282 	if (ret && pages_locked) {
1283 		__unlock_for_delalloc(inode, locked_page,
1284 			      delalloc_start,
1285 			      ((u64)(start_index + pages_locked - 1)) <<
1286 			      PAGE_CACHE_SHIFT);
1287 	}
1288 	return ret;
1289 }
1290 
1291 /*
1292  * find a contiguous range of bytes in the file marked as delalloc, not
1293  * more than 'max_bytes'.  start and end are used to return the range,
1294  *
1295  * 1 is returned if we find something, 0 if nothing was in the tree
1296  */
find_lock_delalloc_range(struct inode * inode,struct extent_io_tree * tree,struct page * locked_page,u64 * start,u64 * end,u64 max_bytes)1297 static noinline u64 find_lock_delalloc_range(struct inode *inode,
1298 					     struct extent_io_tree *tree,
1299 					     struct page *locked_page,
1300 					     u64 *start, u64 *end,
1301 					     u64 max_bytes)
1302 {
1303 	u64 delalloc_start;
1304 	u64 delalloc_end;
1305 	u64 found;
1306 	struct extent_state *cached_state = NULL;
1307 	int ret;
1308 	int loops = 0;
1309 
1310 again:
1311 	/* step one, find a bunch of delalloc bytes starting at start */
1312 	delalloc_start = *start;
1313 	delalloc_end = 0;
1314 	found = find_delalloc_range(tree, &delalloc_start, &delalloc_end,
1315 				    max_bytes, &cached_state);
1316 	if (!found || delalloc_end <= *start) {
1317 		*start = delalloc_start;
1318 		*end = delalloc_end;
1319 		free_extent_state(cached_state);
1320 		return found;
1321 	}
1322 
1323 	/*
1324 	 * start comes from the offset of locked_page.  We have to lock
1325 	 * pages in order, so we can't process delalloc bytes before
1326 	 * locked_page
1327 	 */
1328 	if (delalloc_start < *start)
1329 		delalloc_start = *start;
1330 
1331 	/*
1332 	 * make sure to limit the number of pages we try to lock down
1333 	 * if we're looping.
1334 	 */
1335 	if (delalloc_end + 1 - delalloc_start > max_bytes && loops)
1336 		delalloc_end = delalloc_start + PAGE_CACHE_SIZE - 1;
1337 
1338 	/* step two, lock all the pages after the page that has start */
1339 	ret = lock_delalloc_pages(inode, locked_page,
1340 				  delalloc_start, delalloc_end);
1341 	if (ret == -EAGAIN) {
1342 		/* some of the pages are gone, lets avoid looping by
1343 		 * shortening the size of the delalloc range we're searching
1344 		 */
1345 		free_extent_state(cached_state);
1346 		if (!loops) {
1347 			unsigned long offset = (*start) & (PAGE_CACHE_SIZE - 1);
1348 			max_bytes = PAGE_CACHE_SIZE - offset;
1349 			loops = 1;
1350 			goto again;
1351 		} else {
1352 			found = 0;
1353 			goto out_failed;
1354 		}
1355 	}
1356 	BUG_ON(ret);
1357 
1358 	/* step three, lock the state bits for the whole range */
1359 	lock_extent_bits(tree, delalloc_start, delalloc_end,
1360 			 0, &cached_state, GFP_NOFS);
1361 
1362 	/* then test to make sure it is all still delalloc */
1363 	ret = test_range_bit(tree, delalloc_start, delalloc_end,
1364 			     EXTENT_DELALLOC, 1, cached_state);
1365 	if (!ret) {
1366 		unlock_extent_cached(tree, delalloc_start, delalloc_end,
1367 				     &cached_state, GFP_NOFS);
1368 		__unlock_for_delalloc(inode, locked_page,
1369 			      delalloc_start, delalloc_end);
1370 		cond_resched();
1371 		goto again;
1372 	}
1373 	free_extent_state(cached_state);
1374 	*start = delalloc_start;
1375 	*end = delalloc_end;
1376 out_failed:
1377 	return found;
1378 }
1379 
extent_clear_unlock_delalloc(struct inode * inode,struct extent_io_tree * tree,u64 start,u64 end,struct page * locked_page,unsigned long op)1380 int extent_clear_unlock_delalloc(struct inode *inode,
1381 				struct extent_io_tree *tree,
1382 				u64 start, u64 end, struct page *locked_page,
1383 				unsigned long op)
1384 {
1385 	int ret;
1386 	struct page *pages[16];
1387 	unsigned long index = start >> PAGE_CACHE_SHIFT;
1388 	unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1389 	unsigned long nr_pages = end_index - index + 1;
1390 	int i;
1391 	int clear_bits = 0;
1392 
1393 	if (op & EXTENT_CLEAR_UNLOCK)
1394 		clear_bits |= EXTENT_LOCKED;
1395 	if (op & EXTENT_CLEAR_DIRTY)
1396 		clear_bits |= EXTENT_DIRTY;
1397 
1398 	if (op & EXTENT_CLEAR_DELALLOC)
1399 		clear_bits |= EXTENT_DELALLOC;
1400 
1401 	clear_extent_bit(tree, start, end, clear_bits, 1, 0, NULL, GFP_NOFS);
1402 	if (!(op & (EXTENT_CLEAR_UNLOCK_PAGE | EXTENT_CLEAR_DIRTY |
1403 		    EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK |
1404 		    EXTENT_SET_PRIVATE2)))
1405 		return 0;
1406 
1407 	while (nr_pages > 0) {
1408 		ret = find_get_pages_contig(inode->i_mapping, index,
1409 				     min_t(unsigned long,
1410 				     nr_pages, ARRAY_SIZE(pages)), pages);
1411 		for (i = 0; i < ret; i++) {
1412 
1413 			if (op & EXTENT_SET_PRIVATE2)
1414 				SetPagePrivate2(pages[i]);
1415 
1416 			if (pages[i] == locked_page) {
1417 				page_cache_release(pages[i]);
1418 				continue;
1419 			}
1420 			if (op & EXTENT_CLEAR_DIRTY)
1421 				clear_page_dirty_for_io(pages[i]);
1422 			if (op & EXTENT_SET_WRITEBACK)
1423 				set_page_writeback(pages[i]);
1424 			if (op & EXTENT_END_WRITEBACK)
1425 				end_page_writeback(pages[i]);
1426 			if (op & EXTENT_CLEAR_UNLOCK_PAGE)
1427 				unlock_page(pages[i]);
1428 			page_cache_release(pages[i]);
1429 		}
1430 		nr_pages -= ret;
1431 		index += ret;
1432 		cond_resched();
1433 	}
1434 	return 0;
1435 }
1436 
1437 /*
1438  * count the number of bytes in the tree that have a given bit(s)
1439  * set.  This can be fairly slow, except for EXTENT_DIRTY which is
1440  * cached.  The total number found is returned.
1441  */
count_range_bits(struct extent_io_tree * tree,u64 * start,u64 search_end,u64 max_bytes,unsigned long bits,int contig)1442 u64 count_range_bits(struct extent_io_tree *tree,
1443 		     u64 *start, u64 search_end, u64 max_bytes,
1444 		     unsigned long bits, int contig)
1445 {
1446 	struct rb_node *node;
1447 	struct extent_state *state;
1448 	u64 cur_start = *start;
1449 	u64 total_bytes = 0;
1450 	u64 last = 0;
1451 	int found = 0;
1452 
1453 	if (search_end <= cur_start) {
1454 		WARN_ON(1);
1455 		return 0;
1456 	}
1457 
1458 	spin_lock(&tree->lock);
1459 	if (cur_start == 0 && bits == EXTENT_DIRTY) {
1460 		total_bytes = tree->dirty_bytes;
1461 		goto out;
1462 	}
1463 	/*
1464 	 * this search will find all the extents that end after
1465 	 * our range starts.
1466 	 */
1467 	node = tree_search(tree, cur_start);
1468 	if (!node)
1469 		goto out;
1470 
1471 	while (1) {
1472 		state = rb_entry(node, struct extent_state, rb_node);
1473 		if (state->start > search_end)
1474 			break;
1475 		if (contig && found && state->start > last + 1)
1476 			break;
1477 		if (state->end >= cur_start && (state->state & bits) == bits) {
1478 			total_bytes += min(search_end, state->end) + 1 -
1479 				       max(cur_start, state->start);
1480 			if (total_bytes >= max_bytes)
1481 				break;
1482 			if (!found) {
1483 				*start = state->start;
1484 				found = 1;
1485 			}
1486 			last = state->end;
1487 		} else if (contig && found) {
1488 			break;
1489 		}
1490 		node = rb_next(node);
1491 		if (!node)
1492 			break;
1493 	}
1494 out:
1495 	spin_unlock(&tree->lock);
1496 	return total_bytes;
1497 }
1498 
1499 /*
1500  * set the private field for a given byte offset in the tree.  If there isn't
1501  * an extent_state there already, this does nothing.
1502  */
set_state_private(struct extent_io_tree * tree,u64 start,u64 private)1503 int set_state_private(struct extent_io_tree *tree, u64 start, u64 private)
1504 {
1505 	struct rb_node *node;
1506 	struct extent_state *state;
1507 	int ret = 0;
1508 
1509 	spin_lock(&tree->lock);
1510 	/*
1511 	 * this search will find all the extents that end after
1512 	 * our range starts.
1513 	 */
1514 	node = tree_search(tree, start);
1515 	if (!node) {
1516 		ret = -ENOENT;
1517 		goto out;
1518 	}
1519 	state = rb_entry(node, struct extent_state, rb_node);
1520 	if (state->start != start) {
1521 		ret = -ENOENT;
1522 		goto out;
1523 	}
1524 	state->private = private;
1525 out:
1526 	spin_unlock(&tree->lock);
1527 	return ret;
1528 }
1529 
get_state_private(struct extent_io_tree * tree,u64 start,u64 * private)1530 int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
1531 {
1532 	struct rb_node *node;
1533 	struct extent_state *state;
1534 	int ret = 0;
1535 
1536 	spin_lock(&tree->lock);
1537 	/*
1538 	 * this search will find all the extents that end after
1539 	 * our range starts.
1540 	 */
1541 	node = tree_search(tree, start);
1542 	if (!node) {
1543 		ret = -ENOENT;
1544 		goto out;
1545 	}
1546 	state = rb_entry(node, struct extent_state, rb_node);
1547 	if (state->start != start) {
1548 		ret = -ENOENT;
1549 		goto out;
1550 	}
1551 	*private = state->private;
1552 out:
1553 	spin_unlock(&tree->lock);
1554 	return ret;
1555 }
1556 
1557 /*
1558  * searches a range in the state tree for a given mask.
1559  * If 'filled' == 1, this returns 1 only if every extent in the tree
1560  * has the bits set.  Otherwise, 1 is returned if any bit in the
1561  * range is found set.
1562  */
test_range_bit(struct extent_io_tree * tree,u64 start,u64 end,int bits,int filled,struct extent_state * cached)1563 int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
1564 		   int bits, int filled, struct extent_state *cached)
1565 {
1566 	struct extent_state *state = NULL;
1567 	struct rb_node *node;
1568 	int bitset = 0;
1569 
1570 	spin_lock(&tree->lock);
1571 	if (cached && cached->tree && cached->start == start)
1572 		node = &cached->rb_node;
1573 	else
1574 		node = tree_search(tree, start);
1575 	while (node && start <= end) {
1576 		state = rb_entry(node, struct extent_state, rb_node);
1577 
1578 		if (filled && state->start > start) {
1579 			bitset = 0;
1580 			break;
1581 		}
1582 
1583 		if (state->start > end)
1584 			break;
1585 
1586 		if (state->state & bits) {
1587 			bitset = 1;
1588 			if (!filled)
1589 				break;
1590 		} else if (filled) {
1591 			bitset = 0;
1592 			break;
1593 		}
1594 
1595 		if (state->end == (u64)-1)
1596 			break;
1597 
1598 		start = state->end + 1;
1599 		if (start > end)
1600 			break;
1601 		node = rb_next(node);
1602 		if (!node) {
1603 			if (filled)
1604 				bitset = 0;
1605 			break;
1606 		}
1607 	}
1608 	spin_unlock(&tree->lock);
1609 	return bitset;
1610 }
1611 
1612 /*
1613  * helper function to set a given page up to date if all the
1614  * extents in the tree for that page are up to date
1615  */
check_page_uptodate(struct extent_io_tree * tree,struct page * page)1616 static int check_page_uptodate(struct extent_io_tree *tree,
1617 			       struct page *page)
1618 {
1619 	u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1620 	u64 end = start + PAGE_CACHE_SIZE - 1;
1621 	if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL))
1622 		SetPageUptodate(page);
1623 	return 0;
1624 }
1625 
1626 /*
1627  * helper function to unlock a page if all the extents in the tree
1628  * for that page are unlocked
1629  */
check_page_locked(struct extent_io_tree * tree,struct page * page)1630 static int check_page_locked(struct extent_io_tree *tree,
1631 			     struct page *page)
1632 {
1633 	u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1634 	u64 end = start + PAGE_CACHE_SIZE - 1;
1635 	if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL))
1636 		unlock_page(page);
1637 	return 0;
1638 }
1639 
1640 /*
1641  * helper function to end page writeback if all the extents
1642  * in the tree for that page are done with writeback
1643  */
check_page_writeback(struct extent_io_tree * tree,struct page * page)1644 static int check_page_writeback(struct extent_io_tree *tree,
1645 			     struct page *page)
1646 {
1647 	end_page_writeback(page);
1648 	return 0;
1649 }
1650 
1651 /* lots and lots of room for performance fixes in the end_bio funcs */
1652 
1653 /*
1654  * after a writepage IO is done, we need to:
1655  * clear the uptodate bits on error
1656  * clear the writeback bits in the extent tree for this IO
1657  * end_page_writeback if the page has no more pending IO
1658  *
1659  * Scheduling is not allowed, so the extent state tree is expected
1660  * to have one and only one object corresponding to this IO.
1661  */
end_bio_extent_writepage(struct bio * bio,int err)1662 static void end_bio_extent_writepage(struct bio *bio, int err)
1663 {
1664 	int uptodate = err == 0;
1665 	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1666 	struct extent_io_tree *tree;
1667 	u64 start;
1668 	u64 end;
1669 	int whole_page;
1670 	int ret;
1671 
1672 	do {
1673 		struct page *page = bvec->bv_page;
1674 		tree = &BTRFS_I(page->mapping->host)->io_tree;
1675 
1676 		start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1677 			 bvec->bv_offset;
1678 		end = start + bvec->bv_len - 1;
1679 
1680 		if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1681 			whole_page = 1;
1682 		else
1683 			whole_page = 0;
1684 
1685 		if (--bvec >= bio->bi_io_vec)
1686 			prefetchw(&bvec->bv_page->flags);
1687 		if (tree->ops && tree->ops->writepage_end_io_hook) {
1688 			ret = tree->ops->writepage_end_io_hook(page, start,
1689 						       end, NULL, uptodate);
1690 			if (ret)
1691 				uptodate = 0;
1692 		}
1693 
1694 		if (!uptodate && tree->ops &&
1695 		    tree->ops->writepage_io_failed_hook) {
1696 			ret = tree->ops->writepage_io_failed_hook(bio, page,
1697 							 start, end, NULL);
1698 			if (ret == 0) {
1699 				uptodate = (err == 0);
1700 				continue;
1701 			}
1702 		}
1703 
1704 		if (!uptodate) {
1705 			clear_extent_uptodate(tree, start, end, NULL, GFP_NOFS);
1706 			ClearPageUptodate(page);
1707 			SetPageError(page);
1708 		}
1709 
1710 		if (whole_page)
1711 			end_page_writeback(page);
1712 		else
1713 			check_page_writeback(tree, page);
1714 	} while (bvec >= bio->bi_io_vec);
1715 
1716 	bio_put(bio);
1717 }
1718 
1719 /*
1720  * after a readpage IO is done, we need to:
1721  * clear the uptodate bits on error
1722  * set the uptodate bits if things worked
1723  * set the page up to date if all extents in the tree are uptodate
1724  * clear the lock bit in the extent tree
1725  * unlock the page if there are no other extents locked for it
1726  *
1727  * Scheduling is not allowed, so the extent state tree is expected
1728  * to have one and only one object corresponding to this IO.
1729  */
end_bio_extent_readpage(struct bio * bio,int err)1730 static void end_bio_extent_readpage(struct bio *bio, int err)
1731 {
1732 	int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1733 	struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1;
1734 	struct bio_vec *bvec = bio->bi_io_vec;
1735 	struct extent_io_tree *tree;
1736 	u64 start;
1737 	u64 end;
1738 	int whole_page;
1739 	int ret;
1740 
1741 	if (err)
1742 		uptodate = 0;
1743 
1744 	do {
1745 		struct page *page = bvec->bv_page;
1746 		struct extent_state *cached = NULL;
1747 		struct extent_state *state;
1748 
1749 		tree = &BTRFS_I(page->mapping->host)->io_tree;
1750 
1751 		start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1752 			bvec->bv_offset;
1753 		end = start + bvec->bv_len - 1;
1754 
1755 		if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1756 			whole_page = 1;
1757 		else
1758 			whole_page = 0;
1759 
1760 		if (++bvec <= bvec_end)
1761 			prefetchw(&bvec->bv_page->flags);
1762 
1763 		spin_lock(&tree->lock);
1764 		state = find_first_extent_bit_state(tree, start, EXTENT_LOCKED);
1765 		if (state && state->start == start) {
1766 			/*
1767 			 * take a reference on the state, unlock will drop
1768 			 * the ref
1769 			 */
1770 			cache_state(state, &cached);
1771 		}
1772 		spin_unlock(&tree->lock);
1773 
1774 		if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
1775 			ret = tree->ops->readpage_end_io_hook(page, start, end,
1776 							      state);
1777 			if (ret)
1778 				uptodate = 0;
1779 		}
1780 		if (!uptodate && tree->ops &&
1781 		    tree->ops->readpage_io_failed_hook) {
1782 			ret = tree->ops->readpage_io_failed_hook(bio, page,
1783 							 start, end, NULL);
1784 			if (ret == 0) {
1785 				uptodate =
1786 					test_bit(BIO_UPTODATE, &bio->bi_flags);
1787 				if (err)
1788 					uptodate = 0;
1789 				uncache_state(&cached);
1790 				continue;
1791 			}
1792 		}
1793 
1794 		if (uptodate) {
1795 			set_extent_uptodate(tree, start, end, &cached,
1796 					    GFP_ATOMIC);
1797 		}
1798 		unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
1799 
1800 		if (whole_page) {
1801 			if (uptodate) {
1802 				SetPageUptodate(page);
1803 			} else {
1804 				ClearPageUptodate(page);
1805 				SetPageError(page);
1806 			}
1807 			unlock_page(page);
1808 		} else {
1809 			if (uptodate) {
1810 				check_page_uptodate(tree, page);
1811 			} else {
1812 				ClearPageUptodate(page);
1813 				SetPageError(page);
1814 			}
1815 			check_page_locked(tree, page);
1816 		}
1817 	} while (bvec <= bvec_end);
1818 
1819 	bio_put(bio);
1820 }
1821 
1822 /*
1823  * IO done from prepare_write is pretty simple, we just unlock
1824  * the structs in the extent tree when done, and set the uptodate bits
1825  * as appropriate.
1826  */
end_bio_extent_preparewrite(struct bio * bio,int err)1827 static void end_bio_extent_preparewrite(struct bio *bio, int err)
1828 {
1829 	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1830 	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1831 	struct extent_io_tree *tree;
1832 	u64 start;
1833 	u64 end;
1834 
1835 	do {
1836 		struct page *page = bvec->bv_page;
1837 		struct extent_state *cached = NULL;
1838 		tree = &BTRFS_I(page->mapping->host)->io_tree;
1839 
1840 		start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1841 			bvec->bv_offset;
1842 		end = start + bvec->bv_len - 1;
1843 
1844 		if (--bvec >= bio->bi_io_vec)
1845 			prefetchw(&bvec->bv_page->flags);
1846 
1847 		if (uptodate) {
1848 			set_extent_uptodate(tree, start, end, &cached,
1849 					    GFP_ATOMIC);
1850 		} else {
1851 			ClearPageUptodate(page);
1852 			SetPageError(page);
1853 		}
1854 
1855 		unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
1856 
1857 	} while (bvec >= bio->bi_io_vec);
1858 
1859 	bio_put(bio);
1860 }
1861 
1862 struct bio *
btrfs_bio_alloc(struct block_device * bdev,u64 first_sector,int nr_vecs,gfp_t gfp_flags)1863 btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
1864 		gfp_t gfp_flags)
1865 {
1866 	struct bio *bio;
1867 
1868 	bio = bio_alloc(gfp_flags, nr_vecs);
1869 
1870 	if (bio == NULL && (current->flags & PF_MEMALLOC)) {
1871 		while (!bio && (nr_vecs /= 2))
1872 			bio = bio_alloc(gfp_flags, nr_vecs);
1873 	}
1874 
1875 	if (bio) {
1876 		bio->bi_size = 0;
1877 		bio->bi_bdev = bdev;
1878 		bio->bi_sector = first_sector;
1879 	}
1880 	return bio;
1881 }
1882 
submit_one_bio(int rw,struct bio * bio,int mirror_num,unsigned long bio_flags)1883 static int submit_one_bio(int rw, struct bio *bio, int mirror_num,
1884 			  unsigned long bio_flags)
1885 {
1886 	int ret = 0;
1887 	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1888 	struct page *page = bvec->bv_page;
1889 	struct extent_io_tree *tree = bio->bi_private;
1890 	u64 start;
1891 
1892 	start = ((u64)page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset;
1893 
1894 	bio->bi_private = NULL;
1895 
1896 	bio_get(bio);
1897 
1898 	if (tree->ops && tree->ops->submit_bio_hook)
1899 		ret = tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
1900 					   mirror_num, bio_flags, start);
1901 	else
1902 		submit_bio(rw, bio);
1903 	if (bio_flagged(bio, BIO_EOPNOTSUPP))
1904 		ret = -EOPNOTSUPP;
1905 	bio_put(bio);
1906 	return ret;
1907 }
1908 
submit_extent_page(int rw,struct extent_io_tree * tree,struct page * page,sector_t sector,size_t size,unsigned long offset,struct block_device * bdev,struct bio ** bio_ret,unsigned long max_pages,bio_end_io_t end_io_func,int mirror_num,unsigned long prev_bio_flags,unsigned long bio_flags)1909 static int submit_extent_page(int rw, struct extent_io_tree *tree,
1910 			      struct page *page, sector_t sector,
1911 			      size_t size, unsigned long offset,
1912 			      struct block_device *bdev,
1913 			      struct bio **bio_ret,
1914 			      unsigned long max_pages,
1915 			      bio_end_io_t end_io_func,
1916 			      int mirror_num,
1917 			      unsigned long prev_bio_flags,
1918 			      unsigned long bio_flags)
1919 {
1920 	int ret = 0;
1921 	struct bio *bio;
1922 	int nr;
1923 	int contig = 0;
1924 	int this_compressed = bio_flags & EXTENT_BIO_COMPRESSED;
1925 	int old_compressed = prev_bio_flags & EXTENT_BIO_COMPRESSED;
1926 	size_t page_size = min_t(size_t, size, PAGE_CACHE_SIZE);
1927 
1928 	if (bio_ret && *bio_ret) {
1929 		bio = *bio_ret;
1930 		if (old_compressed)
1931 			contig = bio->bi_sector == sector;
1932 		else
1933 			contig = bio->bi_sector + (bio->bi_size >> 9) ==
1934 				sector;
1935 
1936 		if (prev_bio_flags != bio_flags || !contig ||
1937 		    (tree->ops && tree->ops->merge_bio_hook &&
1938 		     tree->ops->merge_bio_hook(page, offset, page_size, bio,
1939 					       bio_flags)) ||
1940 		    bio_add_page(bio, page, page_size, offset) < page_size) {
1941 			ret = submit_one_bio(rw, bio, mirror_num,
1942 					     prev_bio_flags);
1943 			bio = NULL;
1944 		} else {
1945 			return 0;
1946 		}
1947 	}
1948 	if (this_compressed)
1949 		nr = BIO_MAX_PAGES;
1950 	else
1951 		nr = bio_get_nr_vecs(bdev);
1952 
1953 	bio = btrfs_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
1954 	if (!bio)
1955 		return -ENOMEM;
1956 
1957 	bio_add_page(bio, page, page_size, offset);
1958 	bio->bi_end_io = end_io_func;
1959 	bio->bi_private = tree;
1960 
1961 	if (bio_ret)
1962 		*bio_ret = bio;
1963 	else
1964 		ret = submit_one_bio(rw, bio, mirror_num, bio_flags);
1965 
1966 	return ret;
1967 }
1968 
set_page_extent_mapped(struct page * page)1969 void set_page_extent_mapped(struct page *page)
1970 {
1971 	if (!PagePrivate(page)) {
1972 		SetPagePrivate(page);
1973 		page_cache_get(page);
1974 		set_page_private(page, EXTENT_PAGE_PRIVATE);
1975 	}
1976 }
1977 
set_page_extent_head(struct page * page,unsigned long len)1978 static void set_page_extent_head(struct page *page, unsigned long len)
1979 {
1980 	WARN_ON(!PagePrivate(page));
1981 	set_page_private(page, EXTENT_PAGE_PRIVATE_FIRST_PAGE | len << 2);
1982 }
1983 
1984 /*
1985  * basic readpage implementation.  Locked extent state structs are inserted
1986  * into the tree that are removed when the IO is done (by the end_io
1987  * handlers)
1988  */
__extent_read_full_page(struct extent_io_tree * tree,struct page * page,get_extent_t * get_extent,struct bio ** bio,int mirror_num,unsigned long * bio_flags)1989 static int __extent_read_full_page(struct extent_io_tree *tree,
1990 				   struct page *page,
1991 				   get_extent_t *get_extent,
1992 				   struct bio **bio, int mirror_num,
1993 				   unsigned long *bio_flags)
1994 {
1995 	struct inode *inode = page->mapping->host;
1996 	u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1997 	u64 page_end = start + PAGE_CACHE_SIZE - 1;
1998 	u64 end;
1999 	u64 cur = start;
2000 	u64 extent_offset;
2001 	u64 last_byte = i_size_read(inode);
2002 	u64 block_start;
2003 	u64 cur_end;
2004 	sector_t sector;
2005 	struct extent_map *em;
2006 	struct block_device *bdev;
2007 	struct btrfs_ordered_extent *ordered;
2008 	int ret;
2009 	int nr = 0;
2010 	size_t page_offset = 0;
2011 	size_t iosize;
2012 	size_t disk_io_size;
2013 	size_t blocksize = inode->i_sb->s_blocksize;
2014 	unsigned long this_bio_flag = 0;
2015 
2016 	set_page_extent_mapped(page);
2017 
2018 	end = page_end;
2019 	while (1) {
2020 		lock_extent(tree, start, end, GFP_NOFS);
2021 		ordered = btrfs_lookup_ordered_extent(inode, start);
2022 		if (!ordered)
2023 			break;
2024 		unlock_extent(tree, start, end, GFP_NOFS);
2025 		btrfs_start_ordered_extent(inode, ordered, 1);
2026 		btrfs_put_ordered_extent(ordered);
2027 	}
2028 
2029 	if (page->index == last_byte >> PAGE_CACHE_SHIFT) {
2030 		char *userpage;
2031 		size_t zero_offset = last_byte & (PAGE_CACHE_SIZE - 1);
2032 
2033 		if (zero_offset) {
2034 			iosize = PAGE_CACHE_SIZE - zero_offset;
2035 			userpage = kmap_atomic(page, KM_USER0);
2036 			memset(userpage + zero_offset, 0, iosize);
2037 			flush_dcache_page(page);
2038 			kunmap_atomic(userpage, KM_USER0);
2039 		}
2040 	}
2041 	while (cur <= end) {
2042 		if (cur >= last_byte) {
2043 			char *userpage;
2044 			struct extent_state *cached = NULL;
2045 
2046 			iosize = PAGE_CACHE_SIZE - page_offset;
2047 			userpage = kmap_atomic(page, KM_USER0);
2048 			memset(userpage + page_offset, 0, iosize);
2049 			flush_dcache_page(page);
2050 			kunmap_atomic(userpage, KM_USER0);
2051 			set_extent_uptodate(tree, cur, cur + iosize - 1,
2052 					    &cached, GFP_NOFS);
2053 			unlock_extent_cached(tree, cur, cur + iosize - 1,
2054 					     &cached, GFP_NOFS);
2055 			break;
2056 		}
2057 		em = get_extent(inode, page, page_offset, cur,
2058 				end - cur + 1, 0);
2059 		if (IS_ERR(em) || !em) {
2060 			SetPageError(page);
2061 			unlock_extent(tree, cur, end, GFP_NOFS);
2062 			break;
2063 		}
2064 		extent_offset = cur - em->start;
2065 		BUG_ON(extent_map_end(em) <= cur);
2066 		BUG_ON(end < cur);
2067 
2068 		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
2069 			this_bio_flag = EXTENT_BIO_COMPRESSED;
2070 			extent_set_compress_type(&this_bio_flag,
2071 						 em->compress_type);
2072 		}
2073 
2074 		iosize = min(extent_map_end(em) - cur, end - cur + 1);
2075 		cur_end = min(extent_map_end(em) - 1, end);
2076 		iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
2077 		if (this_bio_flag & EXTENT_BIO_COMPRESSED) {
2078 			disk_io_size = em->block_len;
2079 			sector = em->block_start >> 9;
2080 		} else {
2081 			sector = (em->block_start + extent_offset) >> 9;
2082 			disk_io_size = iosize;
2083 		}
2084 		bdev = em->bdev;
2085 		block_start = em->block_start;
2086 		if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
2087 			block_start = EXTENT_MAP_HOLE;
2088 		free_extent_map(em);
2089 		em = NULL;
2090 
2091 		/* we've found a hole, just zero and go on */
2092 		if (block_start == EXTENT_MAP_HOLE) {
2093 			char *userpage;
2094 			struct extent_state *cached = NULL;
2095 
2096 			userpage = kmap_atomic(page, KM_USER0);
2097 			memset(userpage + page_offset, 0, iosize);
2098 			flush_dcache_page(page);
2099 			kunmap_atomic(userpage, KM_USER0);
2100 
2101 			set_extent_uptodate(tree, cur, cur + iosize - 1,
2102 					    &cached, GFP_NOFS);
2103 			unlock_extent_cached(tree, cur, cur + iosize - 1,
2104 			                     &cached, GFP_NOFS);
2105 			cur = cur + iosize;
2106 			page_offset += iosize;
2107 			continue;
2108 		}
2109 		/* the get_extent function already copied into the page */
2110 		if (test_range_bit(tree, cur, cur_end,
2111 				   EXTENT_UPTODATE, 1, NULL)) {
2112 			check_page_uptodate(tree, page);
2113 			unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2114 			cur = cur + iosize;
2115 			page_offset += iosize;
2116 			continue;
2117 		}
2118 		/* we have an inline extent but it didn't get marked up
2119 		 * to date.  Error out
2120 		 */
2121 		if (block_start == EXTENT_MAP_INLINE) {
2122 			SetPageError(page);
2123 			unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2124 			cur = cur + iosize;
2125 			page_offset += iosize;
2126 			continue;
2127 		}
2128 
2129 		ret = 0;
2130 		if (tree->ops && tree->ops->readpage_io_hook) {
2131 			ret = tree->ops->readpage_io_hook(page, cur,
2132 							  cur + iosize - 1);
2133 		}
2134 		if (!ret) {
2135 			unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
2136 			pnr -= page->index;
2137 			ret = submit_extent_page(READ, tree, page,
2138 					 sector, disk_io_size, page_offset,
2139 					 bdev, bio, pnr,
2140 					 end_bio_extent_readpage, mirror_num,
2141 					 *bio_flags,
2142 					 this_bio_flag);
2143 			nr++;
2144 			*bio_flags = this_bio_flag;
2145 		}
2146 		if (ret)
2147 			SetPageError(page);
2148 		cur = cur + iosize;
2149 		page_offset += iosize;
2150 	}
2151 	if (!nr) {
2152 		if (!PageError(page))
2153 			SetPageUptodate(page);
2154 		unlock_page(page);
2155 	}
2156 	return 0;
2157 }
2158 
extent_read_full_page(struct extent_io_tree * tree,struct page * page,get_extent_t * get_extent)2159 int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
2160 			    get_extent_t *get_extent)
2161 {
2162 	struct bio *bio = NULL;
2163 	unsigned long bio_flags = 0;
2164 	int ret;
2165 
2166 	ret = __extent_read_full_page(tree, page, get_extent, &bio, 0,
2167 				      &bio_flags);
2168 	if (bio)
2169 		ret = submit_one_bio(READ, bio, 0, bio_flags);
2170 	return ret;
2171 }
2172 
update_nr_written(struct page * page,struct writeback_control * wbc,unsigned long nr_written)2173 static noinline void update_nr_written(struct page *page,
2174 				      struct writeback_control *wbc,
2175 				      unsigned long nr_written)
2176 {
2177 	wbc->nr_to_write -= nr_written;
2178 	if (wbc->range_cyclic || (wbc->nr_to_write > 0 &&
2179 	    wbc->range_start == 0 && wbc->range_end == LLONG_MAX))
2180 		page->mapping->writeback_index = page->index + nr_written;
2181 }
2182 
2183 /*
2184  * the writepage semantics are similar to regular writepage.  extent
2185  * records are inserted to lock ranges in the tree, and as dirty areas
2186  * are found, they are marked writeback.  Then the lock bits are removed
2187  * and the end_io handler clears the writeback ranges
2188  */
__extent_writepage(struct page * page,struct writeback_control * wbc,void * data)2189 static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2190 			      void *data)
2191 {
2192 	struct inode *inode = page->mapping->host;
2193 	struct extent_page_data *epd = data;
2194 	struct extent_io_tree *tree = epd->tree;
2195 	u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2196 	u64 delalloc_start;
2197 	u64 page_end = start + PAGE_CACHE_SIZE - 1;
2198 	u64 end;
2199 	u64 cur = start;
2200 	u64 extent_offset;
2201 	u64 last_byte = i_size_read(inode);
2202 	u64 block_start;
2203 	u64 iosize;
2204 	sector_t sector;
2205 	struct extent_state *cached_state = NULL;
2206 	struct extent_map *em;
2207 	struct block_device *bdev;
2208 	int ret;
2209 	int nr = 0;
2210 	size_t pg_offset = 0;
2211 	size_t blocksize;
2212 	loff_t i_size = i_size_read(inode);
2213 	unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
2214 	u64 nr_delalloc;
2215 	u64 delalloc_end;
2216 	int page_started;
2217 	int compressed;
2218 	int write_flags;
2219 	unsigned long nr_written = 0;
2220 
2221 	if (wbc->sync_mode == WB_SYNC_ALL)
2222 		write_flags = WRITE_SYNC;
2223 	else
2224 		write_flags = WRITE;
2225 
2226 	trace___extent_writepage(page, inode, wbc);
2227 
2228 	WARN_ON(!PageLocked(page));
2229 	pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
2230 	if (page->index > end_index ||
2231 	   (page->index == end_index && !pg_offset)) {
2232 		page->mapping->a_ops->invalidatepage(page, 0);
2233 		unlock_page(page);
2234 		return 0;
2235 	}
2236 
2237 	if (page->index == end_index) {
2238 		char *userpage;
2239 
2240 		userpage = kmap_atomic(page, KM_USER0);
2241 		memset(userpage + pg_offset, 0,
2242 		       PAGE_CACHE_SIZE - pg_offset);
2243 		kunmap_atomic(userpage, KM_USER0);
2244 		flush_dcache_page(page);
2245 	}
2246 	pg_offset = 0;
2247 
2248 	set_page_extent_mapped(page);
2249 
2250 	delalloc_start = start;
2251 	delalloc_end = 0;
2252 	page_started = 0;
2253 	if (!epd->extent_locked) {
2254 		u64 delalloc_to_write = 0;
2255 		/*
2256 		 * make sure the wbc mapping index is at least updated
2257 		 * to this page.
2258 		 */
2259 		update_nr_written(page, wbc, 0);
2260 
2261 		while (delalloc_end < page_end) {
2262 			nr_delalloc = find_lock_delalloc_range(inode, tree,
2263 						       page,
2264 						       &delalloc_start,
2265 						       &delalloc_end,
2266 						       128 * 1024 * 1024);
2267 			if (nr_delalloc == 0) {
2268 				delalloc_start = delalloc_end + 1;
2269 				continue;
2270 			}
2271 			tree->ops->fill_delalloc(inode, page, delalloc_start,
2272 						 delalloc_end, &page_started,
2273 						 &nr_written);
2274 			/*
2275 			 * delalloc_end is already one less than the total
2276 			 * length, so we don't subtract one from
2277 			 * PAGE_CACHE_SIZE
2278 			 */
2279 			delalloc_to_write += (delalloc_end - delalloc_start +
2280 					      PAGE_CACHE_SIZE) >>
2281 					      PAGE_CACHE_SHIFT;
2282 			delalloc_start = delalloc_end + 1;
2283 		}
2284 		if (wbc->nr_to_write < delalloc_to_write) {
2285 			int thresh = 8192;
2286 
2287 			if (delalloc_to_write < thresh * 2)
2288 				thresh = delalloc_to_write;
2289 			wbc->nr_to_write = min_t(u64, delalloc_to_write,
2290 						 thresh);
2291 		}
2292 
2293 		/* did the fill delalloc function already unlock and start
2294 		 * the IO?
2295 		 */
2296 		if (page_started) {
2297 			ret = 0;
2298 			/*
2299 			 * we've unlocked the page, so we can't update
2300 			 * the mapping's writeback index, just update
2301 			 * nr_to_write.
2302 			 */
2303 			wbc->nr_to_write -= nr_written;
2304 			goto done_unlocked;
2305 		}
2306 	}
2307 	if (tree->ops && tree->ops->writepage_start_hook) {
2308 		ret = tree->ops->writepage_start_hook(page, start,
2309 						      page_end);
2310 		if (ret == -EAGAIN) {
2311 			redirty_page_for_writepage(wbc, page);
2312 			update_nr_written(page, wbc, nr_written);
2313 			unlock_page(page);
2314 			ret = 0;
2315 			goto done_unlocked;
2316 		}
2317 	}
2318 
2319 	/*
2320 	 * we don't want to touch the inode after unlocking the page,
2321 	 * so we update the mapping writeback index now
2322 	 */
2323 	update_nr_written(page, wbc, nr_written + 1);
2324 
2325 	end = page_end;
2326 	if (last_byte <= start) {
2327 		if (tree->ops && tree->ops->writepage_end_io_hook)
2328 			tree->ops->writepage_end_io_hook(page, start,
2329 							 page_end, NULL, 1);
2330 		goto done;
2331 	}
2332 
2333 	blocksize = inode->i_sb->s_blocksize;
2334 
2335 	while (cur <= end) {
2336 		if (cur >= last_byte) {
2337 			if (tree->ops && tree->ops->writepage_end_io_hook)
2338 				tree->ops->writepage_end_io_hook(page, cur,
2339 							 page_end, NULL, 1);
2340 			break;
2341 		}
2342 		em = epd->get_extent(inode, page, pg_offset, cur,
2343 				     end - cur + 1, 1);
2344 		if (IS_ERR(em) || !em) {
2345 			SetPageError(page);
2346 			break;
2347 		}
2348 
2349 		extent_offset = cur - em->start;
2350 		BUG_ON(extent_map_end(em) <= cur);
2351 		BUG_ON(end < cur);
2352 		iosize = min(extent_map_end(em) - cur, end - cur + 1);
2353 		iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
2354 		sector = (em->block_start + extent_offset) >> 9;
2355 		bdev = em->bdev;
2356 		block_start = em->block_start;
2357 		compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
2358 		free_extent_map(em);
2359 		em = NULL;
2360 
2361 		/*
2362 		 * compressed and inline extents are written through other
2363 		 * paths in the FS
2364 		 */
2365 		if (compressed || block_start == EXTENT_MAP_HOLE ||
2366 		    block_start == EXTENT_MAP_INLINE) {
2367 			/*
2368 			 * end_io notification does not happen here for
2369 			 * compressed extents
2370 			 */
2371 			if (!compressed && tree->ops &&
2372 			    tree->ops->writepage_end_io_hook)
2373 				tree->ops->writepage_end_io_hook(page, cur,
2374 							 cur + iosize - 1,
2375 							 NULL, 1);
2376 			else if (compressed) {
2377 				/* we don't want to end_page_writeback on
2378 				 * a compressed extent.  this happens
2379 				 * elsewhere
2380 				 */
2381 				nr++;
2382 			}
2383 
2384 			cur += iosize;
2385 			pg_offset += iosize;
2386 			continue;
2387 		}
2388 		/* leave this out until we have a page_mkwrite call */
2389 		if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
2390 				   EXTENT_DIRTY, 0, NULL)) {
2391 			cur = cur + iosize;
2392 			pg_offset += iosize;
2393 			continue;
2394 		}
2395 
2396 		if (tree->ops && tree->ops->writepage_io_hook) {
2397 			ret = tree->ops->writepage_io_hook(page, cur,
2398 						cur + iosize - 1);
2399 		} else {
2400 			ret = 0;
2401 		}
2402 		if (ret) {
2403 			SetPageError(page);
2404 		} else {
2405 			unsigned long max_nr = end_index + 1;
2406 
2407 			set_range_writeback(tree, cur, cur + iosize - 1);
2408 			if (!PageWriteback(page)) {
2409 				printk(KERN_ERR "btrfs warning page %lu not "
2410 				       "writeback, cur %llu end %llu\n",
2411 				       page->index, (unsigned long long)cur,
2412 				       (unsigned long long)end);
2413 			}
2414 
2415 			ret = submit_extent_page(write_flags, tree, page,
2416 						 sector, iosize, pg_offset,
2417 						 bdev, &epd->bio, max_nr,
2418 						 end_bio_extent_writepage,
2419 						 0, 0, 0);
2420 			if (ret)
2421 				SetPageError(page);
2422 		}
2423 		cur = cur + iosize;
2424 		pg_offset += iosize;
2425 		nr++;
2426 	}
2427 done:
2428 	if (nr == 0) {
2429 		/* make sure the mapping tag for page dirty gets cleared */
2430 		set_page_writeback(page);
2431 		end_page_writeback(page);
2432 	}
2433 	unlock_page(page);
2434 
2435 done_unlocked:
2436 
2437 	/* drop our reference on any cached states */
2438 	free_extent_state(cached_state);
2439 	return 0;
2440 }
2441 
2442 /**
2443  * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
2444  * @mapping: address space structure to write
2445  * @wbc: subtract the number of written pages from *@wbc->nr_to_write
2446  * @writepage: function called for each page
2447  * @data: data passed to writepage function
2448  *
2449  * If a page is already under I/O, write_cache_pages() skips it, even
2450  * if it's dirty.  This is desirable behaviour for memory-cleaning writeback,
2451  * but it is INCORRECT for data-integrity system calls such as fsync().  fsync()
2452  * and msync() need to guarantee that all the data which was dirty at the time
2453  * the call was made get new I/O started against them.  If wbc->sync_mode is
2454  * WB_SYNC_ALL then we were called for data integrity and we must wait for
2455  * existing IO to complete.
2456  */
extent_write_cache_pages(struct extent_io_tree * tree,struct address_space * mapping,struct writeback_control * wbc,writepage_t writepage,void * data,void (* flush_fn)(void *))2457 static int extent_write_cache_pages(struct extent_io_tree *tree,
2458 			     struct address_space *mapping,
2459 			     struct writeback_control *wbc,
2460 			     writepage_t writepage, void *data,
2461 			     void (*flush_fn)(void *))
2462 {
2463 	int ret = 0;
2464 	int done = 0;
2465 	int nr_to_write_done = 0;
2466 	struct pagevec pvec;
2467 	int nr_pages;
2468 	pgoff_t index;
2469 	pgoff_t end;		/* Inclusive */
2470 	int scanned = 0;
2471 
2472 	pagevec_init(&pvec, 0);
2473 	if (wbc->range_cyclic) {
2474 		index = mapping->writeback_index; /* Start from prev offset */
2475 		end = -1;
2476 	} else {
2477 		index = wbc->range_start >> PAGE_CACHE_SHIFT;
2478 		end = wbc->range_end >> PAGE_CACHE_SHIFT;
2479 		scanned = 1;
2480 	}
2481 retry:
2482 	while (!done && !nr_to_write_done && (index <= end) &&
2483 	       (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
2484 			      PAGECACHE_TAG_DIRTY, min(end - index,
2485 				  (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
2486 		unsigned i;
2487 
2488 		scanned = 1;
2489 		for (i = 0; i < nr_pages; i++) {
2490 			struct page *page = pvec.pages[i];
2491 
2492 			/*
2493 			 * At this point we hold neither mapping->tree_lock nor
2494 			 * lock on the page itself: the page may be truncated or
2495 			 * invalidated (changing page->mapping to NULL), or even
2496 			 * swizzled back from swapper_space to tmpfs file
2497 			 * mapping
2498 			 */
2499 			if (tree->ops && tree->ops->write_cache_pages_lock_hook)
2500 				tree->ops->write_cache_pages_lock_hook(page);
2501 			else
2502 				lock_page(page);
2503 
2504 			if (unlikely(page->mapping != mapping)) {
2505 				unlock_page(page);
2506 				continue;
2507 			}
2508 
2509 			if (!wbc->range_cyclic && page->index > end) {
2510 				done = 1;
2511 				unlock_page(page);
2512 				continue;
2513 			}
2514 
2515 			if (wbc->sync_mode != WB_SYNC_NONE) {
2516 				if (PageWriteback(page))
2517 					flush_fn(data);
2518 				wait_on_page_writeback(page);
2519 			}
2520 
2521 			if (PageWriteback(page) ||
2522 			    !clear_page_dirty_for_io(page)) {
2523 				unlock_page(page);
2524 				continue;
2525 			}
2526 
2527 			ret = (*writepage)(page, wbc, data);
2528 
2529 			if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
2530 				unlock_page(page);
2531 				ret = 0;
2532 			}
2533 			if (ret)
2534 				done = 1;
2535 
2536 			/*
2537 			 * the filesystem may choose to bump up nr_to_write.
2538 			 * We have to make sure to honor the new nr_to_write
2539 			 * at any time
2540 			 */
2541 			nr_to_write_done = wbc->nr_to_write <= 0;
2542 		}
2543 		pagevec_release(&pvec);
2544 		cond_resched();
2545 	}
2546 	if (!scanned && !done) {
2547 		/*
2548 		 * We hit the last page and there is more work to be done: wrap
2549 		 * back to the start of the file
2550 		 */
2551 		scanned = 1;
2552 		index = 0;
2553 		goto retry;
2554 	}
2555 	return ret;
2556 }
2557 
flush_epd_write_bio(struct extent_page_data * epd)2558 static void flush_epd_write_bio(struct extent_page_data *epd)
2559 {
2560 	if (epd->bio) {
2561 		if (epd->sync_io)
2562 			submit_one_bio(WRITE_SYNC, epd->bio, 0, 0);
2563 		else
2564 			submit_one_bio(WRITE, epd->bio, 0, 0);
2565 		epd->bio = NULL;
2566 	}
2567 }
2568 
flush_write_bio(void * data)2569 static noinline void flush_write_bio(void *data)
2570 {
2571 	struct extent_page_data *epd = data;
2572 	flush_epd_write_bio(epd);
2573 }
2574 
extent_write_full_page(struct extent_io_tree * tree,struct page * page,get_extent_t * get_extent,struct writeback_control * wbc)2575 int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
2576 			  get_extent_t *get_extent,
2577 			  struct writeback_control *wbc)
2578 {
2579 	int ret;
2580 	struct address_space *mapping = page->mapping;
2581 	struct extent_page_data epd = {
2582 		.bio = NULL,
2583 		.tree = tree,
2584 		.get_extent = get_extent,
2585 		.extent_locked = 0,
2586 		.sync_io = wbc->sync_mode == WB_SYNC_ALL,
2587 	};
2588 	struct writeback_control wbc_writepages = {
2589 		.sync_mode	= wbc->sync_mode,
2590 		.older_than_this = NULL,
2591 		.nr_to_write	= 64,
2592 		.range_start	= page_offset(page) + PAGE_CACHE_SIZE,
2593 		.range_end	= (loff_t)-1,
2594 	};
2595 
2596 	ret = __extent_writepage(page, wbc, &epd);
2597 
2598 	extent_write_cache_pages(tree, mapping, &wbc_writepages,
2599 				 __extent_writepage, &epd, flush_write_bio);
2600 	flush_epd_write_bio(&epd);
2601 	return ret;
2602 }
2603 
extent_write_locked_range(struct extent_io_tree * tree,struct inode * inode,u64 start,u64 end,get_extent_t * get_extent,int mode)2604 int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
2605 			      u64 start, u64 end, get_extent_t *get_extent,
2606 			      int mode)
2607 {
2608 	int ret = 0;
2609 	struct address_space *mapping = inode->i_mapping;
2610 	struct page *page;
2611 	unsigned long nr_pages = (end - start + PAGE_CACHE_SIZE) >>
2612 		PAGE_CACHE_SHIFT;
2613 
2614 	struct extent_page_data epd = {
2615 		.bio = NULL,
2616 		.tree = tree,
2617 		.get_extent = get_extent,
2618 		.extent_locked = 1,
2619 		.sync_io = mode == WB_SYNC_ALL,
2620 	};
2621 	struct writeback_control wbc_writepages = {
2622 		.sync_mode	= mode,
2623 		.older_than_this = NULL,
2624 		.nr_to_write	= nr_pages * 2,
2625 		.range_start	= start,
2626 		.range_end	= end + 1,
2627 	};
2628 
2629 	while (start <= end) {
2630 		page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT);
2631 		if (clear_page_dirty_for_io(page))
2632 			ret = __extent_writepage(page, &wbc_writepages, &epd);
2633 		else {
2634 			if (tree->ops && tree->ops->writepage_end_io_hook)
2635 				tree->ops->writepage_end_io_hook(page, start,
2636 						 start + PAGE_CACHE_SIZE - 1,
2637 						 NULL, 1);
2638 			unlock_page(page);
2639 		}
2640 		page_cache_release(page);
2641 		start += PAGE_CACHE_SIZE;
2642 	}
2643 
2644 	flush_epd_write_bio(&epd);
2645 	return ret;
2646 }
2647 
extent_writepages(struct extent_io_tree * tree,struct address_space * mapping,get_extent_t * get_extent,struct writeback_control * wbc)2648 int extent_writepages(struct extent_io_tree *tree,
2649 		      struct address_space *mapping,
2650 		      get_extent_t *get_extent,
2651 		      struct writeback_control *wbc)
2652 {
2653 	int ret = 0;
2654 	struct extent_page_data epd = {
2655 		.bio = NULL,
2656 		.tree = tree,
2657 		.get_extent = get_extent,
2658 		.extent_locked = 0,
2659 		.sync_io = wbc->sync_mode == WB_SYNC_ALL,
2660 	};
2661 
2662 	ret = extent_write_cache_pages(tree, mapping, wbc,
2663 				       __extent_writepage, &epd,
2664 				       flush_write_bio);
2665 	flush_epd_write_bio(&epd);
2666 	return ret;
2667 }
2668 
extent_readpages(struct extent_io_tree * tree,struct address_space * mapping,struct list_head * pages,unsigned nr_pages,get_extent_t get_extent)2669 int extent_readpages(struct extent_io_tree *tree,
2670 		     struct address_space *mapping,
2671 		     struct list_head *pages, unsigned nr_pages,
2672 		     get_extent_t get_extent)
2673 {
2674 	struct bio *bio = NULL;
2675 	unsigned page_idx;
2676 	unsigned long bio_flags = 0;
2677 
2678 	for (page_idx = 0; page_idx < nr_pages; page_idx++) {
2679 		struct page *page = list_entry(pages->prev, struct page, lru);
2680 
2681 		prefetchw(&page->flags);
2682 		list_del(&page->lru);
2683 		if (!add_to_page_cache_lru(page, mapping,
2684 					page->index, GFP_NOFS)) {
2685 			__extent_read_full_page(tree, page, get_extent,
2686 						&bio, 0, &bio_flags);
2687 		}
2688 		page_cache_release(page);
2689 	}
2690 	BUG_ON(!list_empty(pages));
2691 	if (bio)
2692 		submit_one_bio(READ, bio, 0, bio_flags);
2693 	return 0;
2694 }
2695 
2696 /*
2697  * basic invalidatepage code, this waits on any locked or writeback
2698  * ranges corresponding to the page, and then deletes any extent state
2699  * records from the tree
2700  */
extent_invalidatepage(struct extent_io_tree * tree,struct page * page,unsigned long offset)2701 int extent_invalidatepage(struct extent_io_tree *tree,
2702 			  struct page *page, unsigned long offset)
2703 {
2704 	struct extent_state *cached_state = NULL;
2705 	u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
2706 	u64 end = start + PAGE_CACHE_SIZE - 1;
2707 	size_t blocksize = page->mapping->host->i_sb->s_blocksize;
2708 
2709 	start += (offset + blocksize - 1) & ~(blocksize - 1);
2710 	if (start > end)
2711 		return 0;
2712 
2713 	lock_extent_bits(tree, start, end, 0, &cached_state, GFP_NOFS);
2714 	wait_on_page_writeback(page);
2715 	clear_extent_bit(tree, start, end,
2716 			 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
2717 			 EXTENT_DO_ACCOUNTING,
2718 			 1, 1, &cached_state, GFP_NOFS);
2719 	return 0;
2720 }
2721 
2722 /*
2723  * simple commit_write call, set_range_dirty is used to mark both
2724  * the pages and the extent records as dirty
2725  */
extent_commit_write(struct extent_io_tree * tree,struct inode * inode,struct page * page,unsigned from,unsigned to)2726 int extent_commit_write(struct extent_io_tree *tree,
2727 			struct inode *inode, struct page *page,
2728 			unsigned from, unsigned to)
2729 {
2730 	loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2731 
2732 	set_page_extent_mapped(page);
2733 	set_page_dirty(page);
2734 
2735 	if (pos > inode->i_size) {
2736 		i_size_write(inode, pos);
2737 		mark_inode_dirty(inode);
2738 	}
2739 	return 0;
2740 }
2741 
extent_prepare_write(struct extent_io_tree * tree,struct inode * inode,struct page * page,unsigned from,unsigned to,get_extent_t * get_extent)2742 int extent_prepare_write(struct extent_io_tree *tree,
2743 			 struct inode *inode, struct page *page,
2744 			 unsigned from, unsigned to, get_extent_t *get_extent)
2745 {
2746 	u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
2747 	u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
2748 	u64 block_start;
2749 	u64 orig_block_start;
2750 	u64 block_end;
2751 	u64 cur_end;
2752 	struct extent_map *em;
2753 	unsigned blocksize = 1 << inode->i_blkbits;
2754 	size_t page_offset = 0;
2755 	size_t block_off_start;
2756 	size_t block_off_end;
2757 	int err = 0;
2758 	int iocount = 0;
2759 	int ret = 0;
2760 	int isnew;
2761 
2762 	set_page_extent_mapped(page);
2763 
2764 	block_start = (page_start + from) & ~((u64)blocksize - 1);
2765 	block_end = (page_start + to - 1) | (blocksize - 1);
2766 	orig_block_start = block_start;
2767 
2768 	lock_extent(tree, page_start, page_end, GFP_NOFS);
2769 	while (block_start <= block_end) {
2770 		em = get_extent(inode, page, page_offset, block_start,
2771 				block_end - block_start + 1, 1);
2772 		if (IS_ERR(em) || !em)
2773 			goto err;
2774 
2775 		cur_end = min(block_end, extent_map_end(em) - 1);
2776 		block_off_start = block_start & (PAGE_CACHE_SIZE - 1);
2777 		block_off_end = block_off_start + blocksize;
2778 		isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS);
2779 
2780 		if (!PageUptodate(page) && isnew &&
2781 		    (block_off_end > to || block_off_start < from)) {
2782 			void *kaddr;
2783 
2784 			kaddr = kmap_atomic(page, KM_USER0);
2785 			if (block_off_end > to)
2786 				memset(kaddr + to, 0, block_off_end - to);
2787 			if (block_off_start < from)
2788 				memset(kaddr + block_off_start, 0,
2789 				       from - block_off_start);
2790 			flush_dcache_page(page);
2791 			kunmap_atomic(kaddr, KM_USER0);
2792 		}
2793 		if ((em->block_start != EXTENT_MAP_HOLE &&
2794 		     em->block_start != EXTENT_MAP_INLINE) &&
2795 		    !isnew && !PageUptodate(page) &&
2796 		    (block_off_end > to || block_off_start < from) &&
2797 		    !test_range_bit(tree, block_start, cur_end,
2798 				    EXTENT_UPTODATE, 1, NULL)) {
2799 			u64 sector;
2800 			u64 extent_offset = block_start - em->start;
2801 			size_t iosize;
2802 			sector = (em->block_start + extent_offset) >> 9;
2803 			iosize = (cur_end - block_start + blocksize) &
2804 				~((u64)blocksize - 1);
2805 			/*
2806 			 * we've already got the extent locked, but we
2807 			 * need to split the state such that our end_bio
2808 			 * handler can clear the lock.
2809 			 */
2810 			set_extent_bit(tree, block_start,
2811 				       block_start + iosize - 1,
2812 				       EXTENT_LOCKED, 0, NULL, NULL, GFP_NOFS);
2813 			ret = submit_extent_page(READ, tree, page,
2814 					 sector, iosize, page_offset, em->bdev,
2815 					 NULL, 1,
2816 					 end_bio_extent_preparewrite, 0,
2817 					 0, 0);
2818 			if (ret && !err)
2819 				err = ret;
2820 			iocount++;
2821 			block_start = block_start + iosize;
2822 		} else {
2823 			struct extent_state *cached = NULL;
2824 
2825 			set_extent_uptodate(tree, block_start, cur_end, &cached,
2826 					    GFP_NOFS);
2827 			unlock_extent_cached(tree, block_start, cur_end,
2828 					     &cached, GFP_NOFS);
2829 			block_start = cur_end + 1;
2830 		}
2831 		page_offset = block_start & (PAGE_CACHE_SIZE - 1);
2832 		free_extent_map(em);
2833 	}
2834 	if (iocount) {
2835 		wait_extent_bit(tree, orig_block_start,
2836 				block_end, EXTENT_LOCKED);
2837 	}
2838 	check_page_uptodate(tree, page);
2839 err:
2840 	/* FIXME, zero out newly allocated blocks on error */
2841 	return err;
2842 }
2843 
2844 /*
2845  * a helper for releasepage, this tests for areas of the page that
2846  * are locked or under IO and drops the related state bits if it is safe
2847  * to drop the page.
2848  */
try_release_extent_state(struct extent_map_tree * map,struct extent_io_tree * tree,struct page * page,gfp_t mask)2849 int try_release_extent_state(struct extent_map_tree *map,
2850 			     struct extent_io_tree *tree, struct page *page,
2851 			     gfp_t mask)
2852 {
2853 	u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2854 	u64 end = start + PAGE_CACHE_SIZE - 1;
2855 	int ret = 1;
2856 
2857 	if (test_range_bit(tree, start, end,
2858 			   EXTENT_IOBITS, 0, NULL))
2859 		ret = 0;
2860 	else {
2861 		if ((mask & GFP_NOFS) == GFP_NOFS)
2862 			mask = GFP_NOFS;
2863 		/*
2864 		 * at this point we can safely clear everything except the
2865 		 * locked bit and the nodatasum bit
2866 		 */
2867 		ret = clear_extent_bit(tree, start, end,
2868 				 ~(EXTENT_LOCKED | EXTENT_NODATASUM),
2869 				 0, 0, NULL, mask);
2870 
2871 		/* if clear_extent_bit failed for enomem reasons,
2872 		 * we can't allow the release to continue.
2873 		 */
2874 		if (ret < 0)
2875 			ret = 0;
2876 		else
2877 			ret = 1;
2878 	}
2879 	return ret;
2880 }
2881 
2882 /*
2883  * a helper for releasepage.  As long as there are no locked extents
2884  * in the range corresponding to the page, both state records and extent
2885  * map records are removed
2886  */
try_release_extent_mapping(struct extent_map_tree * map,struct extent_io_tree * tree,struct page * page,gfp_t mask)2887 int try_release_extent_mapping(struct extent_map_tree *map,
2888 			       struct extent_io_tree *tree, struct page *page,
2889 			       gfp_t mask)
2890 {
2891 	struct extent_map *em;
2892 	u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2893 	u64 end = start + PAGE_CACHE_SIZE - 1;
2894 
2895 	if ((mask & __GFP_WAIT) &&
2896 	    page->mapping->host->i_size > 16 * 1024 * 1024) {
2897 		u64 len;
2898 		while (start <= end) {
2899 			len = end - start + 1;
2900 			write_lock(&map->lock);
2901 			em = lookup_extent_mapping(map, start, len);
2902 			if (!em || IS_ERR(em)) {
2903 				write_unlock(&map->lock);
2904 				break;
2905 			}
2906 			if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
2907 			    em->start != start) {
2908 				write_unlock(&map->lock);
2909 				free_extent_map(em);
2910 				break;
2911 			}
2912 			if (!test_range_bit(tree, em->start,
2913 					    extent_map_end(em) - 1,
2914 					    EXTENT_LOCKED | EXTENT_WRITEBACK,
2915 					    0, NULL)) {
2916 				remove_extent_mapping(map, em);
2917 				/* once for the rb tree */
2918 				free_extent_map(em);
2919 			}
2920 			start = extent_map_end(em);
2921 			write_unlock(&map->lock);
2922 
2923 			/* once for us */
2924 			free_extent_map(em);
2925 		}
2926 	}
2927 	return try_release_extent_state(map, tree, page, mask);
2928 }
2929 
extent_bmap(struct address_space * mapping,sector_t iblock,get_extent_t * get_extent)2930 sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
2931 		get_extent_t *get_extent)
2932 {
2933 	struct inode *inode = mapping->host;
2934 	struct extent_state *cached_state = NULL;
2935 	u64 start = iblock << inode->i_blkbits;
2936 	sector_t sector = 0;
2937 	size_t blksize = (1 << inode->i_blkbits);
2938 	struct extent_map *em;
2939 
2940 	lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + blksize - 1,
2941 			 0, &cached_state, GFP_NOFS);
2942 	em = get_extent(inode, NULL, 0, start, blksize, 0);
2943 	unlock_extent_cached(&BTRFS_I(inode)->io_tree, start,
2944 			     start + blksize - 1, &cached_state, GFP_NOFS);
2945 	if (!em || IS_ERR(em))
2946 		return 0;
2947 
2948 	if (em->block_start > EXTENT_MAP_LAST_BYTE)
2949 		goto out;
2950 
2951 	sector = (em->block_start + start - em->start) >> inode->i_blkbits;
2952 out:
2953 	free_extent_map(em);
2954 	return sector;
2955 }
2956 
2957 /*
2958  * helper function for fiemap, which doesn't want to see any holes.
2959  * This maps until we find something past 'last'
2960  */
get_extent_skip_holes(struct inode * inode,u64 offset,u64 last,get_extent_t * get_extent)2961 static struct extent_map *get_extent_skip_holes(struct inode *inode,
2962 						u64 offset,
2963 						u64 last,
2964 						get_extent_t *get_extent)
2965 {
2966 	u64 sectorsize = BTRFS_I(inode)->root->sectorsize;
2967 	struct extent_map *em;
2968 	u64 len;
2969 
2970 	if (offset >= last)
2971 		return NULL;
2972 
2973 	while(1) {
2974 		len = last - offset;
2975 		if (len == 0)
2976 			break;
2977 		len = (len + sectorsize - 1) & ~(sectorsize - 1);
2978 		em = get_extent(inode, NULL, 0, offset, len, 0);
2979 		if (!em || IS_ERR(em))
2980 			return em;
2981 
2982 		/* if this isn't a hole return it */
2983 		if (!test_bit(EXTENT_FLAG_VACANCY, &em->flags) &&
2984 		    em->block_start != EXTENT_MAP_HOLE) {
2985 			return em;
2986 		}
2987 
2988 		/* this is a hole, advance to the next extent */
2989 		offset = extent_map_end(em);
2990 		free_extent_map(em);
2991 		if (offset >= last)
2992 			break;
2993 	}
2994 	return NULL;
2995 }
2996 
extent_fiemap(struct inode * inode,struct fiemap_extent_info * fieinfo,__u64 start,__u64 len,get_extent_t * get_extent)2997 int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
2998 		__u64 start, __u64 len, get_extent_t *get_extent)
2999 {
3000 	int ret = 0;
3001 	u64 off = start;
3002 	u64 max = start + len;
3003 	u32 flags = 0;
3004 	u32 found_type;
3005 	u64 last;
3006 	u64 last_for_get_extent = 0;
3007 	u64 disko = 0;
3008 	u64 isize = i_size_read(inode);
3009 	struct btrfs_key found_key;
3010 	struct extent_map *em = NULL;
3011 	struct extent_state *cached_state = NULL;
3012 	struct btrfs_path *path;
3013 	struct btrfs_file_extent_item *item;
3014 	int end = 0;
3015 	u64 em_start = 0;
3016 	u64 em_len = 0;
3017 	u64 em_end = 0;
3018 	unsigned long emflags;
3019 
3020 	if (len == 0)
3021 		return -EINVAL;
3022 
3023 	path = btrfs_alloc_path();
3024 	if (!path)
3025 		return -ENOMEM;
3026 	path->leave_spinning = 1;
3027 
3028 	/*
3029 	 * lookup the last file extent.  We're not using i_size here
3030 	 * because there might be preallocation past i_size
3031 	 */
3032 	ret = btrfs_lookup_file_extent(NULL, BTRFS_I(inode)->root,
3033 				       path, inode->i_ino, -1, 0);
3034 	if (ret < 0) {
3035 		btrfs_free_path(path);
3036 		return ret;
3037 	}
3038 	WARN_ON(!ret);
3039 	path->slots[0]--;
3040 	item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3041 			      struct btrfs_file_extent_item);
3042 	btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
3043 	found_type = btrfs_key_type(&found_key);
3044 
3045 	/* No extents, but there might be delalloc bits */
3046 	if (found_key.objectid != inode->i_ino ||
3047 	    found_type != BTRFS_EXTENT_DATA_KEY) {
3048 		/* have to trust i_size as the end */
3049 		last = (u64)-1;
3050 		last_for_get_extent = isize;
3051 	} else {
3052 		/*
3053 		 * remember the start of the last extent.  There are a
3054 		 * bunch of different factors that go into the length of the
3055 		 * extent, so its much less complex to remember where it started
3056 		 */
3057 		last = found_key.offset;
3058 		last_for_get_extent = last + 1;
3059 	}
3060 	btrfs_free_path(path);
3061 
3062 	/*
3063 	 * we might have some extents allocated but more delalloc past those
3064 	 * extents.  so, we trust isize unless the start of the last extent is
3065 	 * beyond isize
3066 	 */
3067 	if (last < isize) {
3068 		last = (u64)-1;
3069 		last_for_get_extent = isize;
3070 	}
3071 
3072 	lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0,
3073 			 &cached_state, GFP_NOFS);
3074 
3075 	em = get_extent_skip_holes(inode, off, last_for_get_extent,
3076 				   get_extent);
3077 	if (!em)
3078 		goto out;
3079 	if (IS_ERR(em)) {
3080 		ret = PTR_ERR(em);
3081 		goto out;
3082 	}
3083 
3084 	while (!end) {
3085 		u64 offset_in_extent;
3086 
3087 		/* break if the extent we found is outside the range */
3088 		if (em->start >= max || extent_map_end(em) < off)
3089 			break;
3090 
3091 		/*
3092 		 * get_extent may return an extent that starts before our
3093 		 * requested range.  We have to make sure the ranges
3094 		 * we return to fiemap always move forward and don't
3095 		 * overlap, so adjust the offsets here
3096 		 */
3097 		em_start = max(em->start, off);
3098 
3099 		/*
3100 		 * record the offset from the start of the extent
3101 		 * for adjusting the disk offset below
3102 		 */
3103 		offset_in_extent = em_start - em->start;
3104 		em_end = extent_map_end(em);
3105 		em_len = em_end - em_start;
3106 		emflags = em->flags;
3107 		disko = 0;
3108 		flags = 0;
3109 
3110 		/*
3111 		 * bump off for our next call to get_extent
3112 		 */
3113 		off = extent_map_end(em);
3114 		if (off >= max)
3115 			end = 1;
3116 
3117 		if (em->block_start == EXTENT_MAP_LAST_BYTE) {
3118 			end = 1;
3119 			flags |= FIEMAP_EXTENT_LAST;
3120 		} else if (em->block_start == EXTENT_MAP_INLINE) {
3121 			flags |= (FIEMAP_EXTENT_DATA_INLINE |
3122 				  FIEMAP_EXTENT_NOT_ALIGNED);
3123 		} else if (em->block_start == EXTENT_MAP_DELALLOC) {
3124 			flags |= (FIEMAP_EXTENT_DELALLOC |
3125 				  FIEMAP_EXTENT_UNKNOWN);
3126 		} else {
3127 			disko = em->block_start + offset_in_extent;
3128 		}
3129 		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
3130 			flags |= FIEMAP_EXTENT_ENCODED;
3131 
3132 		free_extent_map(em);
3133 		em = NULL;
3134 		if ((em_start >= last) || em_len == (u64)-1 ||
3135 		   (last == (u64)-1 && isize <= em_end)) {
3136 			flags |= FIEMAP_EXTENT_LAST;
3137 			end = 1;
3138 		}
3139 
3140 		/* now scan forward to see if this is really the last extent. */
3141 		em = get_extent_skip_holes(inode, off, last_for_get_extent,
3142 					   get_extent);
3143 		if (IS_ERR(em)) {
3144 			ret = PTR_ERR(em);
3145 			goto out;
3146 		}
3147 		if (!em) {
3148 			flags |= FIEMAP_EXTENT_LAST;
3149 			end = 1;
3150 		}
3151 		ret = fiemap_fill_next_extent(fieinfo, em_start, disko,
3152 					      em_len, flags);
3153 		if (ret)
3154 			goto out_free;
3155 	}
3156 out_free:
3157 	free_extent_map(em);
3158 out:
3159 	unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len,
3160 			     &cached_state, GFP_NOFS);
3161 	return ret;
3162 }
3163 
extent_buffer_page(struct extent_buffer * eb,unsigned long i)3164 static inline struct page *extent_buffer_page(struct extent_buffer *eb,
3165 					      unsigned long i)
3166 {
3167 	struct page *p;
3168 	struct address_space *mapping;
3169 
3170 	if (i == 0)
3171 		return eb->first_page;
3172 	i += eb->start >> PAGE_CACHE_SHIFT;
3173 	mapping = eb->first_page->mapping;
3174 	if (!mapping)
3175 		return NULL;
3176 
3177 	/*
3178 	 * extent_buffer_page is only called after pinning the page
3179 	 * by increasing the reference count.  So we know the page must
3180 	 * be in the radix tree.
3181 	 */
3182 	rcu_read_lock();
3183 	p = radix_tree_lookup(&mapping->page_tree, i);
3184 	rcu_read_unlock();
3185 
3186 	return p;
3187 }
3188 
num_extent_pages(u64 start,u64 len)3189 static inline unsigned long num_extent_pages(u64 start, u64 len)
3190 {
3191 	return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
3192 		(start >> PAGE_CACHE_SHIFT);
3193 }
3194 
__alloc_extent_buffer(struct extent_io_tree * tree,u64 start,unsigned long len,gfp_t mask)3195 static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
3196 						   u64 start,
3197 						   unsigned long len,
3198 						   gfp_t mask)
3199 {
3200 	struct extent_buffer *eb = NULL;
3201 #if LEAK_DEBUG
3202 	unsigned long flags;
3203 #endif
3204 
3205 	eb = kmem_cache_zalloc(extent_buffer_cache, mask);
3206 	if (eb == NULL)
3207 		return NULL;
3208 	eb->start = start;
3209 	eb->len = len;
3210 	spin_lock_init(&eb->lock);
3211 	init_waitqueue_head(&eb->lock_wq);
3212 
3213 #if LEAK_DEBUG
3214 	spin_lock_irqsave(&leak_lock, flags);
3215 	list_add(&eb->leak_list, &buffers);
3216 	spin_unlock_irqrestore(&leak_lock, flags);
3217 #endif
3218 	atomic_set(&eb->refs, 1);
3219 
3220 	return eb;
3221 }
3222 
__free_extent_buffer(struct extent_buffer * eb)3223 static void __free_extent_buffer(struct extent_buffer *eb)
3224 {
3225 #if LEAK_DEBUG
3226 	unsigned long flags;
3227 	spin_lock_irqsave(&leak_lock, flags);
3228 	list_del(&eb->leak_list);
3229 	spin_unlock_irqrestore(&leak_lock, flags);
3230 #endif
3231 	kmem_cache_free(extent_buffer_cache, eb);
3232 }
3233 
3234 /*
3235  * Helper for releasing extent buffer page.
3236  */
btrfs_release_extent_buffer_page(struct extent_buffer * eb,unsigned long start_idx)3237 static void btrfs_release_extent_buffer_page(struct extent_buffer *eb,
3238 						unsigned long start_idx)
3239 {
3240 	unsigned long index;
3241 	struct page *page;
3242 
3243 	if (!eb->first_page)
3244 		return;
3245 
3246 	index = num_extent_pages(eb->start, eb->len);
3247 	if (start_idx >= index)
3248 		return;
3249 
3250 	do {
3251 		index--;
3252 		page = extent_buffer_page(eb, index);
3253 		if (page)
3254 			page_cache_release(page);
3255 	} while (index != start_idx);
3256 }
3257 
3258 /*
3259  * Helper for releasing the extent buffer.
3260  */
btrfs_release_extent_buffer(struct extent_buffer * eb)3261 static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
3262 {
3263 	btrfs_release_extent_buffer_page(eb, 0);
3264 	__free_extent_buffer(eb);
3265 }
3266 
alloc_extent_buffer(struct extent_io_tree * tree,u64 start,unsigned long len,struct page * page0,gfp_t mask)3267 struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
3268 					  u64 start, unsigned long len,
3269 					  struct page *page0,
3270 					  gfp_t mask)
3271 {
3272 	unsigned long num_pages = num_extent_pages(start, len);
3273 	unsigned long i;
3274 	unsigned long index = start >> PAGE_CACHE_SHIFT;
3275 	struct extent_buffer *eb;
3276 	struct extent_buffer *exists = NULL;
3277 	struct page *p;
3278 	struct address_space *mapping = tree->mapping;
3279 	int uptodate = 1;
3280 	int ret;
3281 
3282 	rcu_read_lock();
3283 	eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
3284 	if (eb && atomic_inc_not_zero(&eb->refs)) {
3285 		rcu_read_unlock();
3286 		mark_page_accessed(eb->first_page);
3287 		return eb;
3288 	}
3289 	rcu_read_unlock();
3290 
3291 	eb = __alloc_extent_buffer(tree, start, len, mask);
3292 	if (!eb)
3293 		return NULL;
3294 
3295 	if (page0) {
3296 		eb->first_page = page0;
3297 		i = 1;
3298 		index++;
3299 		page_cache_get(page0);
3300 		mark_page_accessed(page0);
3301 		set_page_extent_mapped(page0);
3302 		set_page_extent_head(page0, len);
3303 		uptodate = PageUptodate(page0);
3304 	} else {
3305 		i = 0;
3306 	}
3307 	for (; i < num_pages; i++, index++) {
3308 		p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM);
3309 		if (!p) {
3310 			WARN_ON(1);
3311 			goto free_eb;
3312 		}
3313 		set_page_extent_mapped(p);
3314 		mark_page_accessed(p);
3315 		if (i == 0) {
3316 			eb->first_page = p;
3317 			set_page_extent_head(p, len);
3318 		} else {
3319 			set_page_private(p, EXTENT_PAGE_PRIVATE);
3320 		}
3321 		if (!PageUptodate(p))
3322 			uptodate = 0;
3323 
3324 		/*
3325 		 * see below about how we avoid a nasty race with release page
3326 		 * and why we unlock later
3327 		 */
3328 		if (i != 0)
3329 			unlock_page(p);
3330 	}
3331 	if (uptodate)
3332 		set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3333 
3334 	ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
3335 	if (ret)
3336 		goto free_eb;
3337 
3338 	spin_lock(&tree->buffer_lock);
3339 	ret = radix_tree_insert(&tree->buffer, start >> PAGE_CACHE_SHIFT, eb);
3340 	if (ret == -EEXIST) {
3341 		exists = radix_tree_lookup(&tree->buffer,
3342 						start >> PAGE_CACHE_SHIFT);
3343 		/* add one reference for the caller */
3344 		atomic_inc(&exists->refs);
3345 		spin_unlock(&tree->buffer_lock);
3346 		radix_tree_preload_end();
3347 		goto free_eb;
3348 	}
3349 	/* add one reference for the tree */
3350 	atomic_inc(&eb->refs);
3351 	spin_unlock(&tree->buffer_lock);
3352 	radix_tree_preload_end();
3353 
3354 	/*
3355 	 * there is a race where release page may have
3356 	 * tried to find this extent buffer in the radix
3357 	 * but failed.  It will tell the VM it is safe to
3358 	 * reclaim the, and it will clear the page private bit.
3359 	 * We must make sure to set the page private bit properly
3360 	 * after the extent buffer is in the radix tree so
3361 	 * it doesn't get lost
3362 	 */
3363 	set_page_extent_mapped(eb->first_page);
3364 	set_page_extent_head(eb->first_page, eb->len);
3365 	if (!page0)
3366 		unlock_page(eb->first_page);
3367 	return eb;
3368 
3369 free_eb:
3370 	if (eb->first_page && !page0)
3371 		unlock_page(eb->first_page);
3372 
3373 	if (!atomic_dec_and_test(&eb->refs))
3374 		return exists;
3375 	btrfs_release_extent_buffer(eb);
3376 	return exists;
3377 }
3378 
find_extent_buffer(struct extent_io_tree * tree,u64 start,unsigned long len,gfp_t mask)3379 struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
3380 					 u64 start, unsigned long len,
3381 					  gfp_t mask)
3382 {
3383 	struct extent_buffer *eb;
3384 
3385 	rcu_read_lock();
3386 	eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
3387 	if (eb && atomic_inc_not_zero(&eb->refs)) {
3388 		rcu_read_unlock();
3389 		mark_page_accessed(eb->first_page);
3390 		return eb;
3391 	}
3392 	rcu_read_unlock();
3393 
3394 	return NULL;
3395 }
3396 
free_extent_buffer(struct extent_buffer * eb)3397 void free_extent_buffer(struct extent_buffer *eb)
3398 {
3399 	if (!eb)
3400 		return;
3401 
3402 	if (!atomic_dec_and_test(&eb->refs))
3403 		return;
3404 
3405 	WARN_ON(1);
3406 }
3407 
clear_extent_buffer_dirty(struct extent_io_tree * tree,struct extent_buffer * eb)3408 int clear_extent_buffer_dirty(struct extent_io_tree *tree,
3409 			      struct extent_buffer *eb)
3410 {
3411 	unsigned long i;
3412 	unsigned long num_pages;
3413 	struct page *page;
3414 
3415 	num_pages = num_extent_pages(eb->start, eb->len);
3416 
3417 	for (i = 0; i < num_pages; i++) {
3418 		page = extent_buffer_page(eb, i);
3419 		if (!PageDirty(page))
3420 			continue;
3421 
3422 		lock_page(page);
3423 		WARN_ON(!PagePrivate(page));
3424 
3425 		set_page_extent_mapped(page);
3426 		if (i == 0)
3427 			set_page_extent_head(page, eb->len);
3428 
3429 		clear_page_dirty_for_io(page);
3430 		spin_lock_irq(&page->mapping->tree_lock);
3431 		if (!PageDirty(page)) {
3432 			radix_tree_tag_clear(&page->mapping->page_tree,
3433 						page_index(page),
3434 						PAGECACHE_TAG_DIRTY);
3435 		}
3436 		spin_unlock_irq(&page->mapping->tree_lock);
3437 		unlock_page(page);
3438 	}
3439 	return 0;
3440 }
3441 
wait_on_extent_buffer_writeback(struct extent_io_tree * tree,struct extent_buffer * eb)3442 int wait_on_extent_buffer_writeback(struct extent_io_tree *tree,
3443 				    struct extent_buffer *eb)
3444 {
3445 	return wait_on_extent_writeback(tree, eb->start,
3446 					eb->start + eb->len - 1);
3447 }
3448 
set_extent_buffer_dirty(struct extent_io_tree * tree,struct extent_buffer * eb)3449 int set_extent_buffer_dirty(struct extent_io_tree *tree,
3450 			     struct extent_buffer *eb)
3451 {
3452 	unsigned long i;
3453 	unsigned long num_pages;
3454 	int was_dirty = 0;
3455 
3456 	was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
3457 	num_pages = num_extent_pages(eb->start, eb->len);
3458 	for (i = 0; i < num_pages; i++)
3459 		__set_page_dirty_nobuffers(extent_buffer_page(eb, i));
3460 	return was_dirty;
3461 }
3462 
clear_extent_buffer_uptodate(struct extent_io_tree * tree,struct extent_buffer * eb,struct extent_state ** cached_state)3463 int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
3464 				struct extent_buffer *eb,
3465 				struct extent_state **cached_state)
3466 {
3467 	unsigned long i;
3468 	struct page *page;
3469 	unsigned long num_pages;
3470 
3471 	num_pages = num_extent_pages(eb->start, eb->len);
3472 	clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3473 
3474 	clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
3475 			      cached_state, GFP_NOFS);
3476 	for (i = 0; i < num_pages; i++) {
3477 		page = extent_buffer_page(eb, i);
3478 		if (page)
3479 			ClearPageUptodate(page);
3480 	}
3481 	return 0;
3482 }
3483 
set_extent_buffer_uptodate(struct extent_io_tree * tree,struct extent_buffer * eb)3484 int set_extent_buffer_uptodate(struct extent_io_tree *tree,
3485 				struct extent_buffer *eb)
3486 {
3487 	unsigned long i;
3488 	struct page *page;
3489 	unsigned long num_pages;
3490 
3491 	num_pages = num_extent_pages(eb->start, eb->len);
3492 
3493 	set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
3494 			    NULL, GFP_NOFS);
3495 	for (i = 0; i < num_pages; i++) {
3496 		page = extent_buffer_page(eb, i);
3497 		if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
3498 		    ((i == num_pages - 1) &&
3499 		     ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
3500 			check_page_uptodate(tree, page);
3501 			continue;
3502 		}
3503 		SetPageUptodate(page);
3504 	}
3505 	return 0;
3506 }
3507 
extent_range_uptodate(struct extent_io_tree * tree,u64 start,u64 end)3508 int extent_range_uptodate(struct extent_io_tree *tree,
3509 			  u64 start, u64 end)
3510 {
3511 	struct page *page;
3512 	int ret;
3513 	int pg_uptodate = 1;
3514 	int uptodate;
3515 	unsigned long index;
3516 
3517 	ret = test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL);
3518 	if (ret)
3519 		return 1;
3520 	while (start <= end) {
3521 		index = start >> PAGE_CACHE_SHIFT;
3522 		page = find_get_page(tree->mapping, index);
3523 		uptodate = PageUptodate(page);
3524 		page_cache_release(page);
3525 		if (!uptodate) {
3526 			pg_uptodate = 0;
3527 			break;
3528 		}
3529 		start += PAGE_CACHE_SIZE;
3530 	}
3531 	return pg_uptodate;
3532 }
3533 
extent_buffer_uptodate(struct extent_io_tree * tree,struct extent_buffer * eb,struct extent_state * cached_state)3534 int extent_buffer_uptodate(struct extent_io_tree *tree,
3535 			   struct extent_buffer *eb,
3536 			   struct extent_state *cached_state)
3537 {
3538 	int ret = 0;
3539 	unsigned long num_pages;
3540 	unsigned long i;
3541 	struct page *page;
3542 	int pg_uptodate = 1;
3543 
3544 	if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
3545 		return 1;
3546 
3547 	ret = test_range_bit(tree, eb->start, eb->start + eb->len - 1,
3548 			   EXTENT_UPTODATE, 1, cached_state);
3549 	if (ret)
3550 		return ret;
3551 
3552 	num_pages = num_extent_pages(eb->start, eb->len);
3553 	for (i = 0; i < num_pages; i++) {
3554 		page = extent_buffer_page(eb, i);
3555 		if (!PageUptodate(page)) {
3556 			pg_uptodate = 0;
3557 			break;
3558 		}
3559 	}
3560 	return pg_uptodate;
3561 }
3562 
read_extent_buffer_pages(struct extent_io_tree * tree,struct extent_buffer * eb,u64 start,int wait,get_extent_t * get_extent,int mirror_num)3563 int read_extent_buffer_pages(struct extent_io_tree *tree,
3564 			     struct extent_buffer *eb,
3565 			     u64 start, int wait,
3566 			     get_extent_t *get_extent, int mirror_num)
3567 {
3568 	unsigned long i;
3569 	unsigned long start_i;
3570 	struct page *page;
3571 	int err;
3572 	int ret = 0;
3573 	int locked_pages = 0;
3574 	int all_uptodate = 1;
3575 	int inc_all_pages = 0;
3576 	unsigned long num_pages;
3577 	struct bio *bio = NULL;
3578 	unsigned long bio_flags = 0;
3579 
3580 	if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
3581 		return 0;
3582 
3583 	if (test_range_bit(tree, eb->start, eb->start + eb->len - 1,
3584 			   EXTENT_UPTODATE, 1, NULL)) {
3585 		return 0;
3586 	}
3587 
3588 	if (start) {
3589 		WARN_ON(start < eb->start);
3590 		start_i = (start >> PAGE_CACHE_SHIFT) -
3591 			(eb->start >> PAGE_CACHE_SHIFT);
3592 	} else {
3593 		start_i = 0;
3594 	}
3595 
3596 	num_pages = num_extent_pages(eb->start, eb->len);
3597 	for (i = start_i; i < num_pages; i++) {
3598 		page = extent_buffer_page(eb, i);
3599 		if (!wait) {
3600 			if (!trylock_page(page))
3601 				goto unlock_exit;
3602 		} else {
3603 			lock_page(page);
3604 		}
3605 		locked_pages++;
3606 		if (!PageUptodate(page))
3607 			all_uptodate = 0;
3608 	}
3609 	if (all_uptodate) {
3610 		if (start_i == 0)
3611 			set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3612 		goto unlock_exit;
3613 	}
3614 
3615 	for (i = start_i; i < num_pages; i++) {
3616 		page = extent_buffer_page(eb, i);
3617 
3618 		WARN_ON(!PagePrivate(page));
3619 
3620 		set_page_extent_mapped(page);
3621 		if (i == 0)
3622 			set_page_extent_head(page, eb->len);
3623 
3624 		if (inc_all_pages)
3625 			page_cache_get(page);
3626 		if (!PageUptodate(page)) {
3627 			if (start_i == 0)
3628 				inc_all_pages = 1;
3629 			ClearPageError(page);
3630 			err = __extent_read_full_page(tree, page,
3631 						      get_extent, &bio,
3632 						      mirror_num, &bio_flags);
3633 			if (err)
3634 				ret = err;
3635 		} else {
3636 			unlock_page(page);
3637 		}
3638 	}
3639 
3640 	if (bio)
3641 		submit_one_bio(READ, bio, mirror_num, bio_flags);
3642 
3643 	if (ret || !wait)
3644 		return ret;
3645 
3646 	for (i = start_i; i < num_pages; i++) {
3647 		page = extent_buffer_page(eb, i);
3648 		wait_on_page_locked(page);
3649 		if (!PageUptodate(page))
3650 			ret = -EIO;
3651 	}
3652 
3653 	if (!ret)
3654 		set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3655 	return ret;
3656 
3657 unlock_exit:
3658 	i = start_i;
3659 	while (locked_pages > 0) {
3660 		page = extent_buffer_page(eb, i);
3661 		i++;
3662 		unlock_page(page);
3663 		locked_pages--;
3664 	}
3665 	return ret;
3666 }
3667 
read_extent_buffer(struct extent_buffer * eb,void * dstv,unsigned long start,unsigned long len)3668 void read_extent_buffer(struct extent_buffer *eb, void *dstv,
3669 			unsigned long start,
3670 			unsigned long len)
3671 {
3672 	size_t cur;
3673 	size_t offset;
3674 	struct page *page;
3675 	char *kaddr;
3676 	char *dst = (char *)dstv;
3677 	size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3678 	unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3679 
3680 	WARN_ON(start > eb->len);
3681 	WARN_ON(start + len > eb->start + eb->len);
3682 
3683 	offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3684 
3685 	while (len > 0) {
3686 		page = extent_buffer_page(eb, i);
3687 
3688 		cur = min(len, (PAGE_CACHE_SIZE - offset));
3689 		kaddr = kmap_atomic(page, KM_USER1);
3690 		memcpy(dst, kaddr + offset, cur);
3691 		kunmap_atomic(kaddr, KM_USER1);
3692 
3693 		dst += cur;
3694 		len -= cur;
3695 		offset = 0;
3696 		i++;
3697 	}
3698 }
3699 
map_private_extent_buffer(struct extent_buffer * eb,unsigned long start,unsigned long min_len,char ** token,char ** map,unsigned long * map_start,unsigned long * map_len,int km)3700 int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
3701 			       unsigned long min_len, char **token, char **map,
3702 			       unsigned long *map_start,
3703 			       unsigned long *map_len, int km)
3704 {
3705 	size_t offset = start & (PAGE_CACHE_SIZE - 1);
3706 	char *kaddr;
3707 	struct page *p;
3708 	size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3709 	unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3710 	unsigned long end_i = (start_offset + start + min_len - 1) >>
3711 		PAGE_CACHE_SHIFT;
3712 
3713 	if (i != end_i)
3714 		return -EINVAL;
3715 
3716 	if (i == 0) {
3717 		offset = start_offset;
3718 		*map_start = 0;
3719 	} else {
3720 		offset = 0;
3721 		*map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
3722 	}
3723 
3724 	if (start + min_len > eb->len) {
3725 		printk(KERN_ERR "btrfs bad mapping eb start %llu len %lu, "
3726 		       "wanted %lu %lu\n", (unsigned long long)eb->start,
3727 		       eb->len, start, min_len);
3728 		WARN_ON(1);
3729 		return -EINVAL;
3730 	}
3731 
3732 	p = extent_buffer_page(eb, i);
3733 	kaddr = kmap_atomic(p, km);
3734 	*token = kaddr;
3735 	*map = kaddr + offset;
3736 	*map_len = PAGE_CACHE_SIZE - offset;
3737 	return 0;
3738 }
3739 
map_extent_buffer(struct extent_buffer * eb,unsigned long start,unsigned long min_len,char ** token,char ** map,unsigned long * map_start,unsigned long * map_len,int km)3740 int map_extent_buffer(struct extent_buffer *eb, unsigned long start,
3741 		      unsigned long min_len,
3742 		      char **token, char **map,
3743 		      unsigned long *map_start,
3744 		      unsigned long *map_len, int km)
3745 {
3746 	int err;
3747 	int save = 0;
3748 	if (eb->map_token) {
3749 		unmap_extent_buffer(eb, eb->map_token, km);
3750 		eb->map_token = NULL;
3751 		save = 1;
3752 	}
3753 	err = map_private_extent_buffer(eb, start, min_len, token, map,
3754 				       map_start, map_len, km);
3755 	if (!err && save) {
3756 		eb->map_token = *token;
3757 		eb->kaddr = *map;
3758 		eb->map_start = *map_start;
3759 		eb->map_len = *map_len;
3760 	}
3761 	return err;
3762 }
3763 
unmap_extent_buffer(struct extent_buffer * eb,char * token,int km)3764 void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km)
3765 {
3766 	kunmap_atomic(token, km);
3767 }
3768 
memcmp_extent_buffer(struct extent_buffer * eb,const void * ptrv,unsigned long start,unsigned long len)3769 int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
3770 			  unsigned long start,
3771 			  unsigned long len)
3772 {
3773 	size_t cur;
3774 	size_t offset;
3775 	struct page *page;
3776 	char *kaddr;
3777 	char *ptr = (char *)ptrv;
3778 	size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3779 	unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3780 	int ret = 0;
3781 
3782 	WARN_ON(start > eb->len);
3783 	WARN_ON(start + len > eb->start + eb->len);
3784 
3785 	offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3786 
3787 	while (len > 0) {
3788 		page = extent_buffer_page(eb, i);
3789 
3790 		cur = min(len, (PAGE_CACHE_SIZE - offset));
3791 
3792 		kaddr = kmap_atomic(page, KM_USER0);
3793 		ret = memcmp(ptr, kaddr + offset, cur);
3794 		kunmap_atomic(kaddr, KM_USER0);
3795 		if (ret)
3796 			break;
3797 
3798 		ptr += cur;
3799 		len -= cur;
3800 		offset = 0;
3801 		i++;
3802 	}
3803 	return ret;
3804 }
3805 
write_extent_buffer(struct extent_buffer * eb,const void * srcv,unsigned long start,unsigned long len)3806 void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
3807 			 unsigned long start, unsigned long len)
3808 {
3809 	size_t cur;
3810 	size_t offset;
3811 	struct page *page;
3812 	char *kaddr;
3813 	char *src = (char *)srcv;
3814 	size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3815 	unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3816 
3817 	WARN_ON(start > eb->len);
3818 	WARN_ON(start + len > eb->start + eb->len);
3819 
3820 	offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3821 
3822 	while (len > 0) {
3823 		page = extent_buffer_page(eb, i);
3824 		WARN_ON(!PageUptodate(page));
3825 
3826 		cur = min(len, PAGE_CACHE_SIZE - offset);
3827 		kaddr = kmap_atomic(page, KM_USER1);
3828 		memcpy(kaddr + offset, src, cur);
3829 		kunmap_atomic(kaddr, KM_USER1);
3830 
3831 		src += cur;
3832 		len -= cur;
3833 		offset = 0;
3834 		i++;
3835 	}
3836 }
3837 
memset_extent_buffer(struct extent_buffer * eb,char c,unsigned long start,unsigned long len)3838 void memset_extent_buffer(struct extent_buffer *eb, char c,
3839 			  unsigned long start, unsigned long len)
3840 {
3841 	size_t cur;
3842 	size_t offset;
3843 	struct page *page;
3844 	char *kaddr;
3845 	size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3846 	unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3847 
3848 	WARN_ON(start > eb->len);
3849 	WARN_ON(start + len > eb->start + eb->len);
3850 
3851 	offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3852 
3853 	while (len > 0) {
3854 		page = extent_buffer_page(eb, i);
3855 		WARN_ON(!PageUptodate(page));
3856 
3857 		cur = min(len, PAGE_CACHE_SIZE - offset);
3858 		kaddr = kmap_atomic(page, KM_USER0);
3859 		memset(kaddr + offset, c, cur);
3860 		kunmap_atomic(kaddr, KM_USER0);
3861 
3862 		len -= cur;
3863 		offset = 0;
3864 		i++;
3865 	}
3866 }
3867 
copy_extent_buffer(struct extent_buffer * dst,struct extent_buffer * src,unsigned long dst_offset,unsigned long src_offset,unsigned long len)3868 void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
3869 			unsigned long dst_offset, unsigned long src_offset,
3870 			unsigned long len)
3871 {
3872 	u64 dst_len = dst->len;
3873 	size_t cur;
3874 	size_t offset;
3875 	struct page *page;
3876 	char *kaddr;
3877 	size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3878 	unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3879 
3880 	WARN_ON(src->len != dst_len);
3881 
3882 	offset = (start_offset + dst_offset) &
3883 		((unsigned long)PAGE_CACHE_SIZE - 1);
3884 
3885 	while (len > 0) {
3886 		page = extent_buffer_page(dst, i);
3887 		WARN_ON(!PageUptodate(page));
3888 
3889 		cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
3890 
3891 		kaddr = kmap_atomic(page, KM_USER0);
3892 		read_extent_buffer(src, kaddr + offset, src_offset, cur);
3893 		kunmap_atomic(kaddr, KM_USER0);
3894 
3895 		src_offset += cur;
3896 		len -= cur;
3897 		offset = 0;
3898 		i++;
3899 	}
3900 }
3901 
move_pages(struct page * dst_page,struct page * src_page,unsigned long dst_off,unsigned long src_off,unsigned long len)3902 static void move_pages(struct page *dst_page, struct page *src_page,
3903 		       unsigned long dst_off, unsigned long src_off,
3904 		       unsigned long len)
3905 {
3906 	char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
3907 	if (dst_page == src_page) {
3908 		memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
3909 	} else {
3910 		char *src_kaddr = kmap_atomic(src_page, KM_USER1);
3911 		char *p = dst_kaddr + dst_off + len;
3912 		char *s = src_kaddr + src_off + len;
3913 
3914 		while (len--)
3915 			*--p = *--s;
3916 
3917 		kunmap_atomic(src_kaddr, KM_USER1);
3918 	}
3919 	kunmap_atomic(dst_kaddr, KM_USER0);
3920 }
3921 
areas_overlap(unsigned long src,unsigned long dst,unsigned long len)3922 static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
3923 {
3924 	unsigned long distance = (src > dst) ? src - dst : dst - src;
3925 	return distance < len;
3926 }
3927 
copy_pages(struct page * dst_page,struct page * src_page,unsigned long dst_off,unsigned long src_off,unsigned long len)3928 static void copy_pages(struct page *dst_page, struct page *src_page,
3929 		       unsigned long dst_off, unsigned long src_off,
3930 		       unsigned long len)
3931 {
3932 	char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
3933 	char *src_kaddr;
3934 
3935 	if (dst_page != src_page) {
3936 		src_kaddr = kmap_atomic(src_page, KM_USER1);
3937 	} else {
3938 		src_kaddr = dst_kaddr;
3939 		BUG_ON(areas_overlap(src_off, dst_off, len));
3940 	}
3941 
3942 	memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
3943 	kunmap_atomic(dst_kaddr, KM_USER0);
3944 	if (dst_page != src_page)
3945 		kunmap_atomic(src_kaddr, KM_USER1);
3946 }
3947 
memcpy_extent_buffer(struct extent_buffer * dst,unsigned long dst_offset,unsigned long src_offset,unsigned long len)3948 void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3949 			   unsigned long src_offset, unsigned long len)
3950 {
3951 	size_t cur;
3952 	size_t dst_off_in_page;
3953 	size_t src_off_in_page;
3954 	size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3955 	unsigned long dst_i;
3956 	unsigned long src_i;
3957 
3958 	if (src_offset + len > dst->len) {
3959 		printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
3960 		       "len %lu dst len %lu\n", src_offset, len, dst->len);
3961 		BUG_ON(1);
3962 	}
3963 	if (dst_offset + len > dst->len) {
3964 		printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
3965 		       "len %lu dst len %lu\n", dst_offset, len, dst->len);
3966 		BUG_ON(1);
3967 	}
3968 
3969 	while (len > 0) {
3970 		dst_off_in_page = (start_offset + dst_offset) &
3971 			((unsigned long)PAGE_CACHE_SIZE - 1);
3972 		src_off_in_page = (start_offset + src_offset) &
3973 			((unsigned long)PAGE_CACHE_SIZE - 1);
3974 
3975 		dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3976 		src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
3977 
3978 		cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
3979 					       src_off_in_page));
3980 		cur = min_t(unsigned long, cur,
3981 			(unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
3982 
3983 		copy_pages(extent_buffer_page(dst, dst_i),
3984 			   extent_buffer_page(dst, src_i),
3985 			   dst_off_in_page, src_off_in_page, cur);
3986 
3987 		src_offset += cur;
3988 		dst_offset += cur;
3989 		len -= cur;
3990 	}
3991 }
3992 
memmove_extent_buffer(struct extent_buffer * dst,unsigned long dst_offset,unsigned long src_offset,unsigned long len)3993 void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3994 			   unsigned long src_offset, unsigned long len)
3995 {
3996 	size_t cur;
3997 	size_t dst_off_in_page;
3998 	size_t src_off_in_page;
3999 	unsigned long dst_end = dst_offset + len - 1;
4000 	unsigned long src_end = src_offset + len - 1;
4001 	size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
4002 	unsigned long dst_i;
4003 	unsigned long src_i;
4004 
4005 	if (src_offset + len > dst->len) {
4006 		printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
4007 		       "len %lu len %lu\n", src_offset, len, dst->len);
4008 		BUG_ON(1);
4009 	}
4010 	if (dst_offset + len > dst->len) {
4011 		printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
4012 		       "len %lu len %lu\n", dst_offset, len, dst->len);
4013 		BUG_ON(1);
4014 	}
4015 	if (!areas_overlap(src_offset, dst_offset, len)) {
4016 		memcpy_extent_buffer(dst, dst_offset, src_offset, len);
4017 		return;
4018 	}
4019 	while (len > 0) {
4020 		dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
4021 		src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
4022 
4023 		dst_off_in_page = (start_offset + dst_end) &
4024 			((unsigned long)PAGE_CACHE_SIZE - 1);
4025 		src_off_in_page = (start_offset + src_end) &
4026 			((unsigned long)PAGE_CACHE_SIZE - 1);
4027 
4028 		cur = min_t(unsigned long, len, src_off_in_page + 1);
4029 		cur = min(cur, dst_off_in_page + 1);
4030 		move_pages(extent_buffer_page(dst, dst_i),
4031 			   extent_buffer_page(dst, src_i),
4032 			   dst_off_in_page - cur + 1,
4033 			   src_off_in_page - cur + 1, cur);
4034 
4035 		dst_end -= cur;
4036 		src_end -= cur;
4037 		len -= cur;
4038 	}
4039 }
4040 
btrfs_release_extent_buffer_rcu(struct rcu_head * head)4041 static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
4042 {
4043 	struct extent_buffer *eb =
4044 			container_of(head, struct extent_buffer, rcu_head);
4045 
4046 	btrfs_release_extent_buffer(eb);
4047 }
4048 
try_release_extent_buffer(struct extent_io_tree * tree,struct page * page)4049 int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page)
4050 {
4051 	u64 start = page_offset(page);
4052 	struct extent_buffer *eb;
4053 	int ret = 1;
4054 
4055 	spin_lock(&tree->buffer_lock);
4056 	eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
4057 	if (!eb) {
4058 		spin_unlock(&tree->buffer_lock);
4059 		return ret;
4060 	}
4061 
4062 	if (test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
4063 		ret = 0;
4064 		goto out;
4065 	}
4066 
4067 	/*
4068 	 * set @eb->refs to 0 if it is already 1, and then release the @eb.
4069 	 * Or go back.
4070 	 */
4071 	if (atomic_cmpxchg(&eb->refs, 1, 0) != 1) {
4072 		ret = 0;
4073 		goto out;
4074 	}
4075 
4076 	radix_tree_delete(&tree->buffer, start >> PAGE_CACHE_SHIFT);
4077 out:
4078 	spin_unlock(&tree->buffer_lock);
4079 
4080 	/* at this point we can safely release the extent buffer */
4081 	if (atomic_read(&eb->refs) == 0)
4082 		call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
4083 	return ret;
4084 }
4085