1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/buffer_head.h>
22 #include <linux/file.h>
23 #include <linux/fs.h>
24 #include <linux/pagemap.h>
25 #include <linux/highmem.h>
26 #include <linux/time.h>
27 #include <linux/init.h>
28 #include <linux/string.h>
29 #include <linux/backing-dev.h>
30 #include <linux/mpage.h>
31 #include <linux/swap.h>
32 #include <linux/writeback.h>
33 #include <linux/statfs.h>
34 #include <linux/compat.h>
35 #include <linux/bit_spinlock.h>
36 #include <linux/xattr.h>
37 #include <linux/posix_acl.h>
38 #include <linux/falloc.h>
39 #include <linux/slab.h>
40 #include <linux/ratelimit.h>
41 #include <linux/mount.h>
42 #include "compat.h"
43 #include "ctree.h"
44 #include "disk-io.h"
45 #include "transaction.h"
46 #include "btrfs_inode.h"
47 #include "ioctl.h"
48 #include "print-tree.h"
49 #include "ordered-data.h"
50 #include "xattr.h"
51 #include "tree-log.h"
52 #include "volumes.h"
53 #include "compression.h"
54 #include "locking.h"
55 #include "free-space-cache.h"
56 #include "inode-map.h"
57 
58 struct btrfs_iget_args {
59 	u64 ino;
60 	struct btrfs_root *root;
61 };
62 
63 static const struct inode_operations btrfs_dir_inode_operations;
64 static const struct inode_operations btrfs_symlink_inode_operations;
65 static const struct inode_operations btrfs_dir_ro_inode_operations;
66 static const struct inode_operations btrfs_special_inode_operations;
67 static const struct inode_operations btrfs_file_inode_operations;
68 static const struct address_space_operations btrfs_aops;
69 static const struct address_space_operations btrfs_symlink_aops;
70 static const struct file_operations btrfs_dir_file_operations;
71 static struct extent_io_ops btrfs_extent_io_ops;
72 
73 static struct kmem_cache *btrfs_inode_cachep;
74 struct kmem_cache *btrfs_trans_handle_cachep;
75 struct kmem_cache *btrfs_transaction_cachep;
76 struct kmem_cache *btrfs_path_cachep;
77 struct kmem_cache *btrfs_free_space_cachep;
78 
79 #define S_SHIFT 12
80 static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
81 	[S_IFREG >> S_SHIFT]	= BTRFS_FT_REG_FILE,
82 	[S_IFDIR >> S_SHIFT]	= BTRFS_FT_DIR,
83 	[S_IFCHR >> S_SHIFT]	= BTRFS_FT_CHRDEV,
84 	[S_IFBLK >> S_SHIFT]	= BTRFS_FT_BLKDEV,
85 	[S_IFIFO >> S_SHIFT]	= BTRFS_FT_FIFO,
86 	[S_IFSOCK >> S_SHIFT]	= BTRFS_FT_SOCK,
87 	[S_IFLNK >> S_SHIFT]	= BTRFS_FT_SYMLINK,
88 };
89 
90 static int btrfs_setsize(struct inode *inode, loff_t newsize);
91 static int btrfs_truncate(struct inode *inode);
92 static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end);
93 static noinline int cow_file_range(struct inode *inode,
94 				   struct page *locked_page,
95 				   u64 start, u64 end, int *page_started,
96 				   unsigned long *nr_written, int unlock);
97 static noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
98 				struct btrfs_root *root, struct inode *inode);
99 
btrfs_init_inode_security(struct btrfs_trans_handle * trans,struct inode * inode,struct inode * dir,const struct qstr * qstr)100 static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
101 				     struct inode *inode,  struct inode *dir,
102 				     const struct qstr *qstr)
103 {
104 	int err;
105 
106 	err = btrfs_init_acl(trans, inode, dir);
107 	if (!err)
108 		err = btrfs_xattr_security_init(trans, inode, dir, qstr);
109 	return err;
110 }
111 
112 /*
113  * this does all the hard work for inserting an inline extent into
114  * the btree.  The caller should have done a btrfs_drop_extents so that
115  * no overlapping inline items exist in the btree
116  */
insert_inline_extent(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct inode * inode,u64 start,size_t size,size_t compressed_size,int compress_type,struct page ** compressed_pages)117 static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
118 				struct btrfs_root *root, struct inode *inode,
119 				u64 start, size_t size, size_t compressed_size,
120 				int compress_type,
121 				struct page **compressed_pages)
122 {
123 	struct btrfs_key key;
124 	struct btrfs_path *path;
125 	struct extent_buffer *leaf;
126 	struct page *page = NULL;
127 	char *kaddr;
128 	unsigned long ptr;
129 	struct btrfs_file_extent_item *ei;
130 	int err = 0;
131 	int ret;
132 	size_t cur_size = size;
133 	size_t datasize;
134 	unsigned long offset;
135 
136 	if (compressed_size && compressed_pages)
137 		cur_size = compressed_size;
138 
139 	path = btrfs_alloc_path();
140 	if (!path)
141 		return -ENOMEM;
142 
143 	path->leave_spinning = 1;
144 
145 	key.objectid = btrfs_ino(inode);
146 	key.offset = start;
147 	btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
148 	datasize = btrfs_file_extent_calc_inline_size(cur_size);
149 
150 	inode_add_bytes(inode, size);
151 	ret = btrfs_insert_empty_item(trans, root, path, &key,
152 				      datasize);
153 	if (ret) {
154 		err = ret;
155 		goto fail;
156 	}
157 	leaf = path->nodes[0];
158 	ei = btrfs_item_ptr(leaf, path->slots[0],
159 			    struct btrfs_file_extent_item);
160 	btrfs_set_file_extent_generation(leaf, ei, trans->transid);
161 	btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
162 	btrfs_set_file_extent_encryption(leaf, ei, 0);
163 	btrfs_set_file_extent_other_encoding(leaf, ei, 0);
164 	btrfs_set_file_extent_ram_bytes(leaf, ei, size);
165 	ptr = btrfs_file_extent_inline_start(ei);
166 
167 	if (compress_type != BTRFS_COMPRESS_NONE) {
168 		struct page *cpage;
169 		int i = 0;
170 		while (compressed_size > 0) {
171 			cpage = compressed_pages[i];
172 			cur_size = min_t(unsigned long, compressed_size,
173 				       PAGE_CACHE_SIZE);
174 
175 			kaddr = kmap_atomic(cpage);
176 			write_extent_buffer(leaf, kaddr, ptr, cur_size);
177 			kunmap_atomic(kaddr);
178 
179 			i++;
180 			ptr += cur_size;
181 			compressed_size -= cur_size;
182 		}
183 		btrfs_set_file_extent_compression(leaf, ei,
184 						  compress_type);
185 	} else {
186 		page = find_get_page(inode->i_mapping,
187 				     start >> PAGE_CACHE_SHIFT);
188 		btrfs_set_file_extent_compression(leaf, ei, 0);
189 		kaddr = kmap_atomic(page);
190 		offset = start & (PAGE_CACHE_SIZE - 1);
191 		write_extent_buffer(leaf, kaddr + offset, ptr, size);
192 		kunmap_atomic(kaddr);
193 		page_cache_release(page);
194 	}
195 	btrfs_mark_buffer_dirty(leaf);
196 	btrfs_free_path(path);
197 
198 	/*
199 	 * we're an inline extent, so nobody can
200 	 * extend the file past i_size without locking
201 	 * a page we already have locked.
202 	 *
203 	 * We must do any isize and inode updates
204 	 * before we unlock the pages.  Otherwise we
205 	 * could end up racing with unlink.
206 	 */
207 	BTRFS_I(inode)->disk_i_size = inode->i_size;
208 	ret = btrfs_update_inode(trans, root, inode);
209 
210 	return ret;
211 fail:
212 	btrfs_free_path(path);
213 	return err;
214 }
215 
216 
217 /*
218  * conditionally insert an inline extent into the file.  This
219  * does the checks required to make sure the data is small enough
220  * to fit as an inline extent.
221  */
cow_file_range_inline(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct inode * inode,u64 start,u64 end,size_t compressed_size,int compress_type,struct page ** compressed_pages)222 static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans,
223 				 struct btrfs_root *root,
224 				 struct inode *inode, u64 start, u64 end,
225 				 size_t compressed_size, int compress_type,
226 				 struct page **compressed_pages)
227 {
228 	u64 isize = i_size_read(inode);
229 	u64 actual_end = min(end + 1, isize);
230 	u64 inline_len = actual_end - start;
231 	u64 aligned_end = (end + root->sectorsize - 1) &
232 			~((u64)root->sectorsize - 1);
233 	u64 hint_byte;
234 	u64 data_len = inline_len;
235 	int ret;
236 
237 	if (compressed_size)
238 		data_len = compressed_size;
239 
240 	if (start > 0 ||
241 	    actual_end >= PAGE_CACHE_SIZE ||
242 	    data_len >= BTRFS_MAX_INLINE_DATA_SIZE(root) ||
243 	    (!compressed_size &&
244 	    (actual_end & (root->sectorsize - 1)) == 0) ||
245 	    end + 1 < isize ||
246 	    data_len > root->fs_info->max_inline) {
247 		return 1;
248 	}
249 
250 	ret = btrfs_drop_extents(trans, inode, start, aligned_end,
251 				 &hint_byte, 1);
252 	if (ret)
253 		return ret;
254 
255 	if (isize > actual_end)
256 		inline_len = min_t(u64, isize, actual_end);
257 	ret = insert_inline_extent(trans, root, inode, start,
258 				   inline_len, compressed_size,
259 				   compress_type, compressed_pages);
260 	if (ret && ret != -ENOSPC) {
261 		btrfs_abort_transaction(trans, root, ret);
262 		return ret;
263 	} else if (ret == -ENOSPC) {
264 		return 1;
265 	}
266 
267 	btrfs_delalloc_release_metadata(inode, end + 1 - start);
268 	btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0);
269 	return 0;
270 }
271 
272 struct async_extent {
273 	u64 start;
274 	u64 ram_size;
275 	u64 compressed_size;
276 	struct page **pages;
277 	unsigned long nr_pages;
278 	int compress_type;
279 	struct list_head list;
280 };
281 
282 struct async_cow {
283 	struct inode *inode;
284 	struct btrfs_root *root;
285 	struct page *locked_page;
286 	u64 start;
287 	u64 end;
288 	struct list_head extents;
289 	struct btrfs_work work;
290 };
291 
add_async_extent(struct async_cow * cow,u64 start,u64 ram_size,u64 compressed_size,struct page ** pages,unsigned long nr_pages,int compress_type)292 static noinline int add_async_extent(struct async_cow *cow,
293 				     u64 start, u64 ram_size,
294 				     u64 compressed_size,
295 				     struct page **pages,
296 				     unsigned long nr_pages,
297 				     int compress_type)
298 {
299 	struct async_extent *async_extent;
300 
301 	async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
302 	BUG_ON(!async_extent); /* -ENOMEM */
303 	async_extent->start = start;
304 	async_extent->ram_size = ram_size;
305 	async_extent->compressed_size = compressed_size;
306 	async_extent->pages = pages;
307 	async_extent->nr_pages = nr_pages;
308 	async_extent->compress_type = compress_type;
309 	list_add_tail(&async_extent->list, &cow->extents);
310 	return 0;
311 }
312 
313 /*
314  * we create compressed extents in two phases.  The first
315  * phase compresses a range of pages that have already been
316  * locked (both pages and state bits are locked).
317  *
318  * This is done inside an ordered work queue, and the compression
319  * is spread across many cpus.  The actual IO submission is step
320  * two, and the ordered work queue takes care of making sure that
321  * happens in the same order things were put onto the queue by
322  * writepages and friends.
323  *
324  * If this code finds it can't get good compression, it puts an
325  * entry onto the work queue to write the uncompressed bytes.  This
326  * makes sure that both compressed inodes and uncompressed inodes
327  * are written in the same order that pdflush sent them down.
328  */
compress_file_range(struct inode * inode,struct page * locked_page,u64 start,u64 end,struct async_cow * async_cow,int * num_added)329 static noinline int compress_file_range(struct inode *inode,
330 					struct page *locked_page,
331 					u64 start, u64 end,
332 					struct async_cow *async_cow,
333 					int *num_added)
334 {
335 	struct btrfs_root *root = BTRFS_I(inode)->root;
336 	struct btrfs_trans_handle *trans;
337 	u64 num_bytes;
338 	u64 blocksize = root->sectorsize;
339 	u64 actual_end;
340 	u64 isize = i_size_read(inode);
341 	int ret = 0;
342 	struct page **pages = NULL;
343 	unsigned long nr_pages;
344 	unsigned long nr_pages_ret = 0;
345 	unsigned long total_compressed = 0;
346 	unsigned long total_in = 0;
347 	unsigned long max_compressed = 128 * 1024;
348 	unsigned long max_uncompressed = 128 * 1024;
349 	int i;
350 	int will_compress;
351 	int compress_type = root->fs_info->compress_type;
352 	int redirty = 0;
353 
354 	/* if this is a small write inside eof, kick off a defrag */
355 	if ((end - start + 1) < 16 * 1024 &&
356 	    (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
357 		btrfs_add_inode_defrag(NULL, inode);
358 
359 	actual_end = min_t(u64, isize, end + 1);
360 again:
361 	will_compress = 0;
362 	nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1;
363 	nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE);
364 
365 	/*
366 	 * we don't want to send crud past the end of i_size through
367 	 * compression, that's just a waste of CPU time.  So, if the
368 	 * end of the file is before the start of our current
369 	 * requested range of bytes, we bail out to the uncompressed
370 	 * cleanup code that can deal with all of this.
371 	 *
372 	 * It isn't really the fastest way to fix things, but this is a
373 	 * very uncommon corner.
374 	 */
375 	if (actual_end <= start)
376 		goto cleanup_and_bail_uncompressed;
377 
378 	total_compressed = actual_end - start;
379 
380 	/* we want to make sure that amount of ram required to uncompress
381 	 * an extent is reasonable, so we limit the total size in ram
382 	 * of a compressed extent to 128k.  This is a crucial number
383 	 * because it also controls how easily we can spread reads across
384 	 * cpus for decompression.
385 	 *
386 	 * We also want to make sure the amount of IO required to do
387 	 * a random read is reasonably small, so we limit the size of
388 	 * a compressed extent to 128k.
389 	 */
390 	total_compressed = min(total_compressed, max_uncompressed);
391 	num_bytes = (end - start + blocksize) & ~(blocksize - 1);
392 	num_bytes = max(blocksize,  num_bytes);
393 	total_in = 0;
394 	ret = 0;
395 
396 	/*
397 	 * we do compression for mount -o compress and when the
398 	 * inode has not been flagged as nocompress.  This flag can
399 	 * change at any time if we discover bad compression ratios.
400 	 */
401 	if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS) &&
402 	    (btrfs_test_opt(root, COMPRESS) ||
403 	     (BTRFS_I(inode)->force_compress) ||
404 	     (BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS))) {
405 		WARN_ON(pages);
406 		pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS);
407 		if (!pages) {
408 			/* just bail out to the uncompressed code */
409 			goto cont;
410 		}
411 
412 		if (BTRFS_I(inode)->force_compress)
413 			compress_type = BTRFS_I(inode)->force_compress;
414 
415 		/*
416 		 * we need to call clear_page_dirty_for_io on each
417 		 * page in the range.  Otherwise applications with the file
418 		 * mmap'd can wander in and change the page contents while
419 		 * we are compressing them.
420 		 *
421 		 * If the compression fails for any reason, we set the pages
422 		 * dirty again later on.
423 		 */
424 		extent_range_clear_dirty_for_io(inode, start, end);
425 		redirty = 1;
426 		ret = btrfs_compress_pages(compress_type,
427 					   inode->i_mapping, start,
428 					   total_compressed, pages,
429 					   nr_pages, &nr_pages_ret,
430 					   &total_in,
431 					   &total_compressed,
432 					   max_compressed);
433 
434 		if (!ret) {
435 			unsigned long offset = total_compressed &
436 				(PAGE_CACHE_SIZE - 1);
437 			struct page *page = pages[nr_pages_ret - 1];
438 			char *kaddr;
439 
440 			/* zero the tail end of the last page, we might be
441 			 * sending it down to disk
442 			 */
443 			if (offset) {
444 				kaddr = kmap_atomic(page);
445 				memset(kaddr + offset, 0,
446 				       PAGE_CACHE_SIZE - offset);
447 				kunmap_atomic(kaddr);
448 			}
449 			will_compress = 1;
450 		}
451 	}
452 cont:
453 	if (start == 0) {
454 		trans = btrfs_join_transaction(root);
455 		if (IS_ERR(trans)) {
456 			ret = PTR_ERR(trans);
457 			trans = NULL;
458 			goto cleanup_and_out;
459 		}
460 		trans->block_rsv = &root->fs_info->delalloc_block_rsv;
461 
462 		/* lets try to make an inline extent */
463 		if (ret || total_in < (actual_end - start)) {
464 			/* we didn't compress the entire range, try
465 			 * to make an uncompressed inline extent.
466 			 */
467 			ret = cow_file_range_inline(trans, root, inode,
468 						    start, end, 0, 0, NULL);
469 		} else {
470 			/* try making a compressed inline extent */
471 			ret = cow_file_range_inline(trans, root, inode,
472 						    start, end,
473 						    total_compressed,
474 						    compress_type, pages);
475 		}
476 		if (ret <= 0) {
477 			/*
478 			 * inline extent creation worked or returned error,
479 			 * we don't need to create any more async work items.
480 			 * Unlock and free up our temp pages.
481 			 */
482 			extent_clear_unlock_delalloc(inode,
483 			     &BTRFS_I(inode)->io_tree,
484 			     start, end, NULL,
485 			     EXTENT_CLEAR_UNLOCK_PAGE | EXTENT_CLEAR_DIRTY |
486 			     EXTENT_CLEAR_DELALLOC |
487 			     EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK);
488 
489 			btrfs_end_transaction(trans, root);
490 			goto free_pages_out;
491 		}
492 		btrfs_end_transaction(trans, root);
493 	}
494 
495 	if (will_compress) {
496 		/*
497 		 * we aren't doing an inline extent round the compressed size
498 		 * up to a block size boundary so the allocator does sane
499 		 * things
500 		 */
501 		total_compressed = (total_compressed + blocksize - 1) &
502 			~(blocksize - 1);
503 
504 		/*
505 		 * one last check to make sure the compression is really a
506 		 * win, compare the page count read with the blocks on disk
507 		 */
508 		total_in = (total_in + PAGE_CACHE_SIZE - 1) &
509 			~(PAGE_CACHE_SIZE - 1);
510 		if (total_compressed >= total_in) {
511 			will_compress = 0;
512 		} else {
513 			num_bytes = total_in;
514 		}
515 	}
516 	if (!will_compress && pages) {
517 		/*
518 		 * the compression code ran but failed to make things smaller,
519 		 * free any pages it allocated and our page pointer array
520 		 */
521 		for (i = 0; i < nr_pages_ret; i++) {
522 			WARN_ON(pages[i]->mapping);
523 			page_cache_release(pages[i]);
524 		}
525 		kfree(pages);
526 		pages = NULL;
527 		total_compressed = 0;
528 		nr_pages_ret = 0;
529 
530 		/* flag the file so we don't compress in the future */
531 		if (!btrfs_test_opt(root, FORCE_COMPRESS) &&
532 		    !(BTRFS_I(inode)->force_compress)) {
533 			BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
534 		}
535 	}
536 	if (will_compress) {
537 		*num_added += 1;
538 
539 		/* the async work queues will take care of doing actual
540 		 * allocation on disk for these compressed pages,
541 		 * and will submit them to the elevator.
542 		 */
543 		add_async_extent(async_cow, start, num_bytes,
544 				 total_compressed, pages, nr_pages_ret,
545 				 compress_type);
546 
547 		if (start + num_bytes < end) {
548 			start += num_bytes;
549 			pages = NULL;
550 			cond_resched();
551 			goto again;
552 		}
553 	} else {
554 cleanup_and_bail_uncompressed:
555 		/*
556 		 * No compression, but we still need to write the pages in
557 		 * the file we've been given so far.  redirty the locked
558 		 * page if it corresponds to our extent and set things up
559 		 * for the async work queue to run cow_file_range to do
560 		 * the normal delalloc dance
561 		 */
562 		if (page_offset(locked_page) >= start &&
563 		    page_offset(locked_page) <= end) {
564 			__set_page_dirty_nobuffers(locked_page);
565 			/* unlocked later on in the async handlers */
566 		}
567 		if (redirty)
568 			extent_range_redirty_for_io(inode, start, end);
569 		add_async_extent(async_cow, start, end - start + 1,
570 				 0, NULL, 0, BTRFS_COMPRESS_NONE);
571 		*num_added += 1;
572 	}
573 
574 out:
575 	return ret;
576 
577 free_pages_out:
578 	for (i = 0; i < nr_pages_ret; i++) {
579 		WARN_ON(pages[i]->mapping);
580 		page_cache_release(pages[i]);
581 	}
582 	kfree(pages);
583 
584 	goto out;
585 
586 cleanup_and_out:
587 	extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
588 				     start, end, NULL,
589 				     EXTENT_CLEAR_UNLOCK_PAGE |
590 				     EXTENT_CLEAR_DIRTY |
591 				     EXTENT_CLEAR_DELALLOC |
592 				     EXTENT_SET_WRITEBACK |
593 				     EXTENT_END_WRITEBACK);
594 	if (!trans || IS_ERR(trans))
595 		btrfs_error(root->fs_info, ret, "Failed to join transaction");
596 	else
597 		btrfs_abort_transaction(trans, root, ret);
598 	goto free_pages_out;
599 }
600 
601 /*
602  * phase two of compressed writeback.  This is the ordered portion
603  * of the code, which only gets called in the order the work was
604  * queued.  We walk all the async extents created by compress_file_range
605  * and send them down to the disk.
606  */
submit_compressed_extents(struct inode * inode,struct async_cow * async_cow)607 static noinline int submit_compressed_extents(struct inode *inode,
608 					      struct async_cow *async_cow)
609 {
610 	struct async_extent *async_extent;
611 	u64 alloc_hint = 0;
612 	struct btrfs_trans_handle *trans;
613 	struct btrfs_key ins;
614 	struct extent_map *em;
615 	struct btrfs_root *root = BTRFS_I(inode)->root;
616 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
617 	struct extent_io_tree *io_tree;
618 	int ret = 0;
619 
620 	if (list_empty(&async_cow->extents))
621 		return 0;
622 
623 
624 	while (!list_empty(&async_cow->extents)) {
625 		async_extent = list_entry(async_cow->extents.next,
626 					  struct async_extent, list);
627 		list_del(&async_extent->list);
628 
629 		io_tree = &BTRFS_I(inode)->io_tree;
630 
631 retry:
632 		/* did the compression code fall back to uncompressed IO? */
633 		if (!async_extent->pages) {
634 			int page_started = 0;
635 			unsigned long nr_written = 0;
636 
637 			lock_extent(io_tree, async_extent->start,
638 					 async_extent->start +
639 					 async_extent->ram_size - 1);
640 
641 			/* allocate blocks */
642 			ret = cow_file_range(inode, async_cow->locked_page,
643 					     async_extent->start,
644 					     async_extent->start +
645 					     async_extent->ram_size - 1,
646 					     &page_started, &nr_written, 0);
647 
648 			/* JDM XXX */
649 
650 			/*
651 			 * if page_started, cow_file_range inserted an
652 			 * inline extent and took care of all the unlocking
653 			 * and IO for us.  Otherwise, we need to submit
654 			 * all those pages down to the drive.
655 			 */
656 			if (!page_started && !ret)
657 				extent_write_locked_range(io_tree,
658 						  inode, async_extent->start,
659 						  async_extent->start +
660 						  async_extent->ram_size - 1,
661 						  btrfs_get_extent,
662 						  WB_SYNC_ALL);
663 			kfree(async_extent);
664 			cond_resched();
665 			continue;
666 		}
667 
668 		lock_extent(io_tree, async_extent->start,
669 			    async_extent->start + async_extent->ram_size - 1);
670 
671 		trans = btrfs_join_transaction(root);
672 		if (IS_ERR(trans)) {
673 			ret = PTR_ERR(trans);
674 		} else {
675 			trans->block_rsv = &root->fs_info->delalloc_block_rsv;
676 			ret = btrfs_reserve_extent(trans, root,
677 					   async_extent->compressed_size,
678 					   async_extent->compressed_size,
679 					   0, alloc_hint, &ins, 1);
680 			if (ret)
681 				btrfs_abort_transaction(trans, root, ret);
682 			btrfs_end_transaction(trans, root);
683 		}
684 
685 		if (ret) {
686 			int i;
687 			for (i = 0; i < async_extent->nr_pages; i++) {
688 				WARN_ON(async_extent->pages[i]->mapping);
689 				page_cache_release(async_extent->pages[i]);
690 			}
691 			kfree(async_extent->pages);
692 			async_extent->nr_pages = 0;
693 			async_extent->pages = NULL;
694 			unlock_extent(io_tree, async_extent->start,
695 				      async_extent->start +
696 				      async_extent->ram_size - 1);
697 			if (ret == -ENOSPC)
698 				goto retry;
699 			goto out_free; /* JDM: Requeue? */
700 		}
701 
702 		/*
703 		 * here we're doing allocation and writeback of the
704 		 * compressed pages
705 		 */
706 		btrfs_drop_extent_cache(inode, async_extent->start,
707 					async_extent->start +
708 					async_extent->ram_size - 1, 0);
709 
710 		em = alloc_extent_map();
711 		BUG_ON(!em); /* -ENOMEM */
712 		em->start = async_extent->start;
713 		em->len = async_extent->ram_size;
714 		em->orig_start = em->start;
715 
716 		em->block_start = ins.objectid;
717 		em->block_len = ins.offset;
718 		em->bdev = root->fs_info->fs_devices->latest_bdev;
719 		em->compress_type = async_extent->compress_type;
720 		set_bit(EXTENT_FLAG_PINNED, &em->flags);
721 		set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
722 
723 		while (1) {
724 			write_lock(&em_tree->lock);
725 			ret = add_extent_mapping(em_tree, em);
726 			write_unlock(&em_tree->lock);
727 			if (ret != -EEXIST) {
728 				free_extent_map(em);
729 				break;
730 			}
731 			btrfs_drop_extent_cache(inode, async_extent->start,
732 						async_extent->start +
733 						async_extent->ram_size - 1, 0);
734 		}
735 
736 		ret = btrfs_add_ordered_extent_compress(inode,
737 						async_extent->start,
738 						ins.objectid,
739 						async_extent->ram_size,
740 						ins.offset,
741 						BTRFS_ORDERED_COMPRESSED,
742 						async_extent->compress_type);
743 		BUG_ON(ret); /* -ENOMEM */
744 
745 		/*
746 		 * clear dirty, set writeback and unlock the pages.
747 		 */
748 		extent_clear_unlock_delalloc(inode,
749 				&BTRFS_I(inode)->io_tree,
750 				async_extent->start,
751 				async_extent->start +
752 				async_extent->ram_size - 1,
753 				NULL, EXTENT_CLEAR_UNLOCK_PAGE |
754 				EXTENT_CLEAR_UNLOCK |
755 				EXTENT_CLEAR_DELALLOC |
756 				EXTENT_CLEAR_DIRTY | EXTENT_SET_WRITEBACK);
757 
758 		ret = btrfs_submit_compressed_write(inode,
759 				    async_extent->start,
760 				    async_extent->ram_size,
761 				    ins.objectid,
762 				    ins.offset, async_extent->pages,
763 				    async_extent->nr_pages);
764 
765 		BUG_ON(ret); /* -ENOMEM */
766 		alloc_hint = ins.objectid + ins.offset;
767 		kfree(async_extent);
768 		cond_resched();
769 	}
770 	ret = 0;
771 out:
772 	return ret;
773 out_free:
774 	kfree(async_extent);
775 	goto out;
776 }
777 
get_extent_allocation_hint(struct inode * inode,u64 start,u64 num_bytes)778 static u64 get_extent_allocation_hint(struct inode *inode, u64 start,
779 				      u64 num_bytes)
780 {
781 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
782 	struct extent_map *em;
783 	u64 alloc_hint = 0;
784 
785 	read_lock(&em_tree->lock);
786 	em = search_extent_mapping(em_tree, start, num_bytes);
787 	if (em) {
788 		/*
789 		 * if block start isn't an actual block number then find the
790 		 * first block in this inode and use that as a hint.  If that
791 		 * block is also bogus then just don't worry about it.
792 		 */
793 		if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
794 			free_extent_map(em);
795 			em = search_extent_mapping(em_tree, 0, 0);
796 			if (em && em->block_start < EXTENT_MAP_LAST_BYTE)
797 				alloc_hint = em->block_start;
798 			if (em)
799 				free_extent_map(em);
800 		} else {
801 			alloc_hint = em->block_start;
802 			free_extent_map(em);
803 		}
804 	}
805 	read_unlock(&em_tree->lock);
806 
807 	return alloc_hint;
808 }
809 
810 /*
811  * when extent_io.c finds a delayed allocation range in the file,
812  * the call backs end up in this code.  The basic idea is to
813  * allocate extents on disk for the range, and create ordered data structs
814  * in ram to track those extents.
815  *
816  * locked_page is the page that writepage had locked already.  We use
817  * it to make sure we don't do extra locks or unlocks.
818  *
819  * *page_started is set to one if we unlock locked_page and do everything
820  * required to start IO on it.  It may be clean and already done with
821  * IO when we return.
822  */
cow_file_range(struct inode * inode,struct page * locked_page,u64 start,u64 end,int * page_started,unsigned long * nr_written,int unlock)823 static noinline int cow_file_range(struct inode *inode,
824 				   struct page *locked_page,
825 				   u64 start, u64 end, int *page_started,
826 				   unsigned long *nr_written,
827 				   int unlock)
828 {
829 	struct btrfs_root *root = BTRFS_I(inode)->root;
830 	struct btrfs_trans_handle *trans;
831 	u64 alloc_hint = 0;
832 	u64 num_bytes;
833 	unsigned long ram_size;
834 	u64 disk_num_bytes;
835 	u64 cur_alloc_size;
836 	u64 blocksize = root->sectorsize;
837 	struct btrfs_key ins;
838 	struct extent_map *em;
839 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
840 	int ret = 0;
841 
842 	BUG_ON(btrfs_is_free_space_inode(root, inode));
843 	trans = btrfs_join_transaction(root);
844 	if (IS_ERR(trans)) {
845 		extent_clear_unlock_delalloc(inode,
846 			     &BTRFS_I(inode)->io_tree,
847 			     start, end, NULL,
848 			     EXTENT_CLEAR_UNLOCK_PAGE |
849 			     EXTENT_CLEAR_UNLOCK |
850 			     EXTENT_CLEAR_DELALLOC |
851 			     EXTENT_CLEAR_DIRTY |
852 			     EXTENT_SET_WRITEBACK |
853 			     EXTENT_END_WRITEBACK);
854 		return PTR_ERR(trans);
855 	}
856 	trans->block_rsv = &root->fs_info->delalloc_block_rsv;
857 
858 	num_bytes = (end - start + blocksize) & ~(blocksize - 1);
859 	num_bytes = max(blocksize,  num_bytes);
860 	disk_num_bytes = num_bytes;
861 	ret = 0;
862 
863 	/* if this is a small write inside eof, kick off defrag */
864 	if (num_bytes < 64 * 1024 &&
865 	    (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
866 		btrfs_add_inode_defrag(trans, inode);
867 
868 	if (start == 0) {
869 		/* lets try to make an inline extent */
870 		ret = cow_file_range_inline(trans, root, inode,
871 					    start, end, 0, 0, NULL);
872 		if (ret == 0) {
873 			extent_clear_unlock_delalloc(inode,
874 				     &BTRFS_I(inode)->io_tree,
875 				     start, end, NULL,
876 				     EXTENT_CLEAR_UNLOCK_PAGE |
877 				     EXTENT_CLEAR_UNLOCK |
878 				     EXTENT_CLEAR_DELALLOC |
879 				     EXTENT_CLEAR_DIRTY |
880 				     EXTENT_SET_WRITEBACK |
881 				     EXTENT_END_WRITEBACK);
882 
883 			*nr_written = *nr_written +
884 			     (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE;
885 			*page_started = 1;
886 			goto out;
887 		} else if (ret < 0) {
888 			btrfs_abort_transaction(trans, root, ret);
889 			goto out_unlock;
890 		}
891 	}
892 
893 	BUG_ON(disk_num_bytes >
894 	       btrfs_super_total_bytes(root->fs_info->super_copy));
895 
896 	alloc_hint = get_extent_allocation_hint(inode, start, num_bytes);
897 	btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);
898 
899 	while (disk_num_bytes > 0) {
900 		unsigned long op;
901 
902 		cur_alloc_size = disk_num_bytes;
903 		ret = btrfs_reserve_extent(trans, root, cur_alloc_size,
904 					   root->sectorsize, 0, alloc_hint,
905 					   &ins, 1);
906 		if (ret < 0) {
907 			btrfs_abort_transaction(trans, root, ret);
908 			goto out_unlock;
909 		}
910 
911 		em = alloc_extent_map();
912 		BUG_ON(!em); /* -ENOMEM */
913 		em->start = start;
914 		em->orig_start = em->start;
915 		ram_size = ins.offset;
916 		em->len = ins.offset;
917 
918 		em->block_start = ins.objectid;
919 		em->block_len = ins.offset;
920 		em->bdev = root->fs_info->fs_devices->latest_bdev;
921 		set_bit(EXTENT_FLAG_PINNED, &em->flags);
922 
923 		while (1) {
924 			write_lock(&em_tree->lock);
925 			ret = add_extent_mapping(em_tree, em);
926 			write_unlock(&em_tree->lock);
927 			if (ret != -EEXIST) {
928 				free_extent_map(em);
929 				break;
930 			}
931 			btrfs_drop_extent_cache(inode, start,
932 						start + ram_size - 1, 0);
933 		}
934 
935 		cur_alloc_size = ins.offset;
936 		ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
937 					       ram_size, cur_alloc_size, 0);
938 		BUG_ON(ret); /* -ENOMEM */
939 
940 		if (root->root_key.objectid ==
941 		    BTRFS_DATA_RELOC_TREE_OBJECTID) {
942 			ret = btrfs_reloc_clone_csums(inode, start,
943 						      cur_alloc_size);
944 			if (ret) {
945 				btrfs_abort_transaction(trans, root, ret);
946 				goto out_unlock;
947 			}
948 		}
949 
950 		if (disk_num_bytes < cur_alloc_size)
951 			break;
952 
953 		/* we're not doing compressed IO, don't unlock the first
954 		 * page (which the caller expects to stay locked), don't
955 		 * clear any dirty bits and don't set any writeback bits
956 		 *
957 		 * Do set the Private2 bit so we know this page was properly
958 		 * setup for writepage
959 		 */
960 		op = unlock ? EXTENT_CLEAR_UNLOCK_PAGE : 0;
961 		op |= EXTENT_CLEAR_UNLOCK | EXTENT_CLEAR_DELALLOC |
962 			EXTENT_SET_PRIVATE2;
963 
964 		extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
965 					     start, start + ram_size - 1,
966 					     locked_page, op);
967 		disk_num_bytes -= cur_alloc_size;
968 		num_bytes -= cur_alloc_size;
969 		alloc_hint = ins.objectid + ins.offset;
970 		start += cur_alloc_size;
971 	}
972 	ret = 0;
973 out:
974 	btrfs_end_transaction(trans, root);
975 
976 	return ret;
977 out_unlock:
978 	extent_clear_unlock_delalloc(inode,
979 		     &BTRFS_I(inode)->io_tree,
980 		     start, end, NULL,
981 		     EXTENT_CLEAR_UNLOCK_PAGE |
982 		     EXTENT_CLEAR_UNLOCK |
983 		     EXTENT_CLEAR_DELALLOC |
984 		     EXTENT_CLEAR_DIRTY |
985 		     EXTENT_SET_WRITEBACK |
986 		     EXTENT_END_WRITEBACK);
987 
988 	goto out;
989 }
990 
991 /*
992  * work queue call back to started compression on a file and pages
993  */
async_cow_start(struct btrfs_work * work)994 static noinline void async_cow_start(struct btrfs_work *work)
995 {
996 	struct async_cow *async_cow;
997 	int num_added = 0;
998 	async_cow = container_of(work, struct async_cow, work);
999 
1000 	compress_file_range(async_cow->inode, async_cow->locked_page,
1001 			    async_cow->start, async_cow->end, async_cow,
1002 			    &num_added);
1003 	if (num_added == 0)
1004 		async_cow->inode = NULL;
1005 }
1006 
1007 /*
1008  * work queue call back to submit previously compressed pages
1009  */
async_cow_submit(struct btrfs_work * work)1010 static noinline void async_cow_submit(struct btrfs_work *work)
1011 {
1012 	struct async_cow *async_cow;
1013 	struct btrfs_root *root;
1014 	unsigned long nr_pages;
1015 
1016 	async_cow = container_of(work, struct async_cow, work);
1017 
1018 	root = async_cow->root;
1019 	nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >>
1020 		PAGE_CACHE_SHIFT;
1021 
1022 	atomic_sub(nr_pages, &root->fs_info->async_delalloc_pages);
1023 
1024 	if (atomic_read(&root->fs_info->async_delalloc_pages) <
1025 	    5 * 1042 * 1024 &&
1026 	    waitqueue_active(&root->fs_info->async_submit_wait))
1027 		wake_up(&root->fs_info->async_submit_wait);
1028 
1029 	if (async_cow->inode)
1030 		submit_compressed_extents(async_cow->inode, async_cow);
1031 }
1032 
async_cow_free(struct btrfs_work * work)1033 static noinline void async_cow_free(struct btrfs_work *work)
1034 {
1035 	struct async_cow *async_cow;
1036 	async_cow = container_of(work, struct async_cow, work);
1037 	kfree(async_cow);
1038 }
1039 
cow_file_range_async(struct inode * inode,struct page * locked_page,u64 start,u64 end,int * page_started,unsigned long * nr_written)1040 static int cow_file_range_async(struct inode *inode, struct page *locked_page,
1041 				u64 start, u64 end, int *page_started,
1042 				unsigned long *nr_written)
1043 {
1044 	struct async_cow *async_cow;
1045 	struct btrfs_root *root = BTRFS_I(inode)->root;
1046 	unsigned long nr_pages;
1047 	u64 cur_end;
1048 	int limit = 10 * 1024 * 1042;
1049 
1050 	clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED,
1051 			 1, 0, NULL, GFP_NOFS);
1052 	while (start < end) {
1053 		async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
1054 		BUG_ON(!async_cow); /* -ENOMEM */
1055 		async_cow->inode = inode;
1056 		async_cow->root = root;
1057 		async_cow->locked_page = locked_page;
1058 		async_cow->start = start;
1059 
1060 		if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS)
1061 			cur_end = end;
1062 		else
1063 			cur_end = min(end, start + 512 * 1024 - 1);
1064 
1065 		async_cow->end = cur_end;
1066 		INIT_LIST_HEAD(&async_cow->extents);
1067 
1068 		async_cow->work.func = async_cow_start;
1069 		async_cow->work.ordered_func = async_cow_submit;
1070 		async_cow->work.ordered_free = async_cow_free;
1071 		async_cow->work.flags = 0;
1072 
1073 		nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >>
1074 			PAGE_CACHE_SHIFT;
1075 		atomic_add(nr_pages, &root->fs_info->async_delalloc_pages);
1076 
1077 		btrfs_queue_worker(&root->fs_info->delalloc_workers,
1078 				   &async_cow->work);
1079 
1080 		if (atomic_read(&root->fs_info->async_delalloc_pages) > limit) {
1081 			wait_event(root->fs_info->async_submit_wait,
1082 			   (atomic_read(&root->fs_info->async_delalloc_pages) <
1083 			    limit));
1084 		}
1085 
1086 		while (atomic_read(&root->fs_info->async_submit_draining) &&
1087 		      atomic_read(&root->fs_info->async_delalloc_pages)) {
1088 			wait_event(root->fs_info->async_submit_wait,
1089 			  (atomic_read(&root->fs_info->async_delalloc_pages) ==
1090 			   0));
1091 		}
1092 
1093 		*nr_written += nr_pages;
1094 		start = cur_end + 1;
1095 	}
1096 	*page_started = 1;
1097 	return 0;
1098 }
1099 
csum_exist_in_range(struct btrfs_root * root,u64 bytenr,u64 num_bytes)1100 static noinline int csum_exist_in_range(struct btrfs_root *root,
1101 					u64 bytenr, u64 num_bytes)
1102 {
1103 	int ret;
1104 	struct btrfs_ordered_sum *sums;
1105 	LIST_HEAD(list);
1106 
1107 	ret = btrfs_lookup_csums_range(root->fs_info->csum_root, bytenr,
1108 				       bytenr + num_bytes - 1, &list, 0);
1109 	if (ret == 0 && list_empty(&list))
1110 		return 0;
1111 
1112 	while (!list_empty(&list)) {
1113 		sums = list_entry(list.next, struct btrfs_ordered_sum, list);
1114 		list_del(&sums->list);
1115 		kfree(sums);
1116 	}
1117 	return 1;
1118 }
1119 
1120 /*
1121  * when nowcow writeback call back.  This checks for snapshots or COW copies
1122  * of the extents that exist in the file, and COWs the file as required.
1123  *
1124  * If no cow copies or snapshots exist, we write directly to the existing
1125  * blocks on disk
1126  */
run_delalloc_nocow(struct inode * inode,struct page * locked_page,u64 start,u64 end,int * page_started,int force,unsigned long * nr_written)1127 static noinline int run_delalloc_nocow(struct inode *inode,
1128 				       struct page *locked_page,
1129 			      u64 start, u64 end, int *page_started, int force,
1130 			      unsigned long *nr_written)
1131 {
1132 	struct btrfs_root *root = BTRFS_I(inode)->root;
1133 	struct btrfs_trans_handle *trans;
1134 	struct extent_buffer *leaf;
1135 	struct btrfs_path *path;
1136 	struct btrfs_file_extent_item *fi;
1137 	struct btrfs_key found_key;
1138 	u64 cow_start;
1139 	u64 cur_offset;
1140 	u64 extent_end;
1141 	u64 extent_offset;
1142 	u64 disk_bytenr;
1143 	u64 num_bytes;
1144 	int extent_type;
1145 	int ret, err;
1146 	int type;
1147 	int nocow;
1148 	int check_prev = 1;
1149 	bool nolock;
1150 	u64 ino = btrfs_ino(inode);
1151 
1152 	path = btrfs_alloc_path();
1153 	if (!path)
1154 		return -ENOMEM;
1155 
1156 	nolock = btrfs_is_free_space_inode(root, inode);
1157 
1158 	if (nolock)
1159 		trans = btrfs_join_transaction_nolock(root);
1160 	else
1161 		trans = btrfs_join_transaction(root);
1162 
1163 	if (IS_ERR(trans)) {
1164 		btrfs_free_path(path);
1165 		return PTR_ERR(trans);
1166 	}
1167 
1168 	trans->block_rsv = &root->fs_info->delalloc_block_rsv;
1169 
1170 	cow_start = (u64)-1;
1171 	cur_offset = start;
1172 	while (1) {
1173 		ret = btrfs_lookup_file_extent(trans, root, path, ino,
1174 					       cur_offset, 0);
1175 		if (ret < 0) {
1176 			btrfs_abort_transaction(trans, root, ret);
1177 			goto error;
1178 		}
1179 		if (ret > 0 && path->slots[0] > 0 && check_prev) {
1180 			leaf = path->nodes[0];
1181 			btrfs_item_key_to_cpu(leaf, &found_key,
1182 					      path->slots[0] - 1);
1183 			if (found_key.objectid == ino &&
1184 			    found_key.type == BTRFS_EXTENT_DATA_KEY)
1185 				path->slots[0]--;
1186 		}
1187 		check_prev = 0;
1188 next_slot:
1189 		leaf = path->nodes[0];
1190 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1191 			ret = btrfs_next_leaf(root, path);
1192 			if (ret < 0) {
1193 				btrfs_abort_transaction(trans, root, ret);
1194 				goto error;
1195 			}
1196 			if (ret > 0)
1197 				break;
1198 			leaf = path->nodes[0];
1199 		}
1200 
1201 		nocow = 0;
1202 		disk_bytenr = 0;
1203 		num_bytes = 0;
1204 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1205 
1206 		if (found_key.objectid > ino ||
1207 		    found_key.type > BTRFS_EXTENT_DATA_KEY ||
1208 		    found_key.offset > end)
1209 			break;
1210 
1211 		if (found_key.offset > cur_offset) {
1212 			extent_end = found_key.offset;
1213 			extent_type = 0;
1214 			goto out_check;
1215 		}
1216 
1217 		fi = btrfs_item_ptr(leaf, path->slots[0],
1218 				    struct btrfs_file_extent_item);
1219 		extent_type = btrfs_file_extent_type(leaf, fi);
1220 
1221 		if (extent_type == BTRFS_FILE_EXTENT_REG ||
1222 		    extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1223 			disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1224 			extent_offset = btrfs_file_extent_offset(leaf, fi);
1225 			extent_end = found_key.offset +
1226 				btrfs_file_extent_num_bytes(leaf, fi);
1227 			if (extent_end <= start) {
1228 				path->slots[0]++;
1229 				goto next_slot;
1230 			}
1231 			if (disk_bytenr == 0)
1232 				goto out_check;
1233 			if (btrfs_file_extent_compression(leaf, fi) ||
1234 			    btrfs_file_extent_encryption(leaf, fi) ||
1235 			    btrfs_file_extent_other_encoding(leaf, fi))
1236 				goto out_check;
1237 			if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
1238 				goto out_check;
1239 			if (btrfs_extent_readonly(root, disk_bytenr))
1240 				goto out_check;
1241 			if (btrfs_cross_ref_exist(trans, root, ino,
1242 						  found_key.offset -
1243 						  extent_offset, disk_bytenr))
1244 				goto out_check;
1245 			disk_bytenr += extent_offset;
1246 			disk_bytenr += cur_offset - found_key.offset;
1247 			num_bytes = min(end + 1, extent_end) - cur_offset;
1248 			/*
1249 			 * force cow if csum exists in the range.
1250 			 * this ensure that csum for a given extent are
1251 			 * either valid or do not exist.
1252 			 */
1253 			if (csum_exist_in_range(root, disk_bytenr, num_bytes))
1254 				goto out_check;
1255 			nocow = 1;
1256 		} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1257 			extent_end = found_key.offset +
1258 				btrfs_file_extent_inline_len(leaf, fi);
1259 			extent_end = ALIGN(extent_end, root->sectorsize);
1260 		} else {
1261 			BUG_ON(1);
1262 		}
1263 out_check:
1264 		if (extent_end <= start) {
1265 			path->slots[0]++;
1266 			goto next_slot;
1267 		}
1268 		if (!nocow) {
1269 			if (cow_start == (u64)-1)
1270 				cow_start = cur_offset;
1271 			cur_offset = extent_end;
1272 			if (cur_offset > end)
1273 				break;
1274 			path->slots[0]++;
1275 			goto next_slot;
1276 		}
1277 
1278 		btrfs_release_path(path);
1279 		if (cow_start != (u64)-1) {
1280 			ret = cow_file_range(inode, locked_page, cow_start,
1281 					found_key.offset - 1, page_started,
1282 					nr_written, 1);
1283 			if (ret) {
1284 				btrfs_abort_transaction(trans, root, ret);
1285 				goto error;
1286 			}
1287 			cow_start = (u64)-1;
1288 		}
1289 
1290 		if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1291 			struct extent_map *em;
1292 			struct extent_map_tree *em_tree;
1293 			em_tree = &BTRFS_I(inode)->extent_tree;
1294 			em = alloc_extent_map();
1295 			BUG_ON(!em); /* -ENOMEM */
1296 			em->start = cur_offset;
1297 			em->orig_start = em->start;
1298 			em->len = num_bytes;
1299 			em->block_len = num_bytes;
1300 			em->block_start = disk_bytenr;
1301 			em->bdev = root->fs_info->fs_devices->latest_bdev;
1302 			set_bit(EXTENT_FLAG_PINNED, &em->flags);
1303 			while (1) {
1304 				write_lock(&em_tree->lock);
1305 				ret = add_extent_mapping(em_tree, em);
1306 				write_unlock(&em_tree->lock);
1307 				if (ret != -EEXIST) {
1308 					free_extent_map(em);
1309 					break;
1310 				}
1311 				btrfs_drop_extent_cache(inode, em->start,
1312 						em->start + em->len - 1, 0);
1313 			}
1314 			type = BTRFS_ORDERED_PREALLOC;
1315 		} else {
1316 			type = BTRFS_ORDERED_NOCOW;
1317 		}
1318 
1319 		ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr,
1320 					       num_bytes, num_bytes, type);
1321 		BUG_ON(ret); /* -ENOMEM */
1322 
1323 		if (root->root_key.objectid ==
1324 		    BTRFS_DATA_RELOC_TREE_OBJECTID) {
1325 			ret = btrfs_reloc_clone_csums(inode, cur_offset,
1326 						      num_bytes);
1327 			if (ret) {
1328 				btrfs_abort_transaction(trans, root, ret);
1329 				goto error;
1330 			}
1331 		}
1332 
1333 		extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
1334 				cur_offset, cur_offset + num_bytes - 1,
1335 				locked_page, EXTENT_CLEAR_UNLOCK_PAGE |
1336 				EXTENT_CLEAR_UNLOCK | EXTENT_CLEAR_DELALLOC |
1337 				EXTENT_SET_PRIVATE2);
1338 		cur_offset = extent_end;
1339 		if (cur_offset > end)
1340 			break;
1341 	}
1342 	btrfs_release_path(path);
1343 
1344 	if (cur_offset <= end && cow_start == (u64)-1)
1345 		cow_start = cur_offset;
1346 	if (cow_start != (u64)-1) {
1347 		ret = cow_file_range(inode, locked_page, cow_start, end,
1348 				     page_started, nr_written, 1);
1349 		if (ret) {
1350 			btrfs_abort_transaction(trans, root, ret);
1351 			goto error;
1352 		}
1353 	}
1354 
1355 error:
1356 	if (nolock) {
1357 		err = btrfs_end_transaction_nolock(trans, root);
1358 	} else {
1359 		err = btrfs_end_transaction(trans, root);
1360 	}
1361 	if (!ret)
1362 		ret = err;
1363 
1364 	btrfs_free_path(path);
1365 	return ret;
1366 }
1367 
1368 /*
1369  * extent_io.c call back to do delayed allocation processing
1370  */
run_delalloc_range(struct inode * inode,struct page * locked_page,u64 start,u64 end,int * page_started,unsigned long * nr_written)1371 static int run_delalloc_range(struct inode *inode, struct page *locked_page,
1372 			      u64 start, u64 end, int *page_started,
1373 			      unsigned long *nr_written)
1374 {
1375 	int ret;
1376 	struct btrfs_root *root = BTRFS_I(inode)->root;
1377 
1378 	if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW)
1379 		ret = run_delalloc_nocow(inode, locked_page, start, end,
1380 					 page_started, 1, nr_written);
1381 	else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC)
1382 		ret = run_delalloc_nocow(inode, locked_page, start, end,
1383 					 page_started, 0, nr_written);
1384 	else if (!btrfs_test_opt(root, COMPRESS) &&
1385 		 !(BTRFS_I(inode)->force_compress) &&
1386 		 !(BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS))
1387 		ret = cow_file_range(inode, locked_page, start, end,
1388 				      page_started, nr_written, 1);
1389 	else
1390 		ret = cow_file_range_async(inode, locked_page, start, end,
1391 					   page_started, nr_written);
1392 	return ret;
1393 }
1394 
btrfs_split_extent_hook(struct inode * inode,struct extent_state * orig,u64 split)1395 static void btrfs_split_extent_hook(struct inode *inode,
1396 				    struct extent_state *orig, u64 split)
1397 {
1398 	/* not delalloc, ignore it */
1399 	if (!(orig->state & EXTENT_DELALLOC))
1400 		return;
1401 
1402 	spin_lock(&BTRFS_I(inode)->lock);
1403 	BTRFS_I(inode)->outstanding_extents++;
1404 	spin_unlock(&BTRFS_I(inode)->lock);
1405 }
1406 
1407 /*
1408  * extent_io.c merge_extent_hook, used to track merged delayed allocation
1409  * extents so we can keep track of new extents that are just merged onto old
1410  * extents, such as when we are doing sequential writes, so we can properly
1411  * account for the metadata space we'll need.
1412  */
btrfs_merge_extent_hook(struct inode * inode,struct extent_state * new,struct extent_state * other)1413 static void btrfs_merge_extent_hook(struct inode *inode,
1414 				    struct extent_state *new,
1415 				    struct extent_state *other)
1416 {
1417 	/* not delalloc, ignore it */
1418 	if (!(other->state & EXTENT_DELALLOC))
1419 		return;
1420 
1421 	spin_lock(&BTRFS_I(inode)->lock);
1422 	BTRFS_I(inode)->outstanding_extents--;
1423 	spin_unlock(&BTRFS_I(inode)->lock);
1424 }
1425 
1426 /*
1427  * extent_io.c set_bit_hook, used to track delayed allocation
1428  * bytes in this file, and to maintain the list of inodes that
1429  * have pending delalloc work to be done.
1430  */
btrfs_set_bit_hook(struct inode * inode,struct extent_state * state,int * bits)1431 static void btrfs_set_bit_hook(struct inode *inode,
1432 			       struct extent_state *state, int *bits)
1433 {
1434 
1435 	/*
1436 	 * set_bit and clear bit hooks normally require _irqsave/restore
1437 	 * but in this case, we are only testing for the DELALLOC
1438 	 * bit, which is only set or cleared with irqs on
1439 	 */
1440 	if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
1441 		struct btrfs_root *root = BTRFS_I(inode)->root;
1442 		u64 len = state->end + 1 - state->start;
1443 		bool do_list = !btrfs_is_free_space_inode(root, inode);
1444 
1445 		if (*bits & EXTENT_FIRST_DELALLOC) {
1446 			*bits &= ~EXTENT_FIRST_DELALLOC;
1447 		} else {
1448 			spin_lock(&BTRFS_I(inode)->lock);
1449 			BTRFS_I(inode)->outstanding_extents++;
1450 			spin_unlock(&BTRFS_I(inode)->lock);
1451 		}
1452 
1453 		spin_lock(&root->fs_info->delalloc_lock);
1454 		BTRFS_I(inode)->delalloc_bytes += len;
1455 		root->fs_info->delalloc_bytes += len;
1456 		if (do_list && list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1457 			list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
1458 				      &root->fs_info->delalloc_inodes);
1459 		}
1460 		spin_unlock(&root->fs_info->delalloc_lock);
1461 	}
1462 }
1463 
1464 /*
1465  * extent_io.c clear_bit_hook, see set_bit_hook for why
1466  */
btrfs_clear_bit_hook(struct inode * inode,struct extent_state * state,int * bits)1467 static void btrfs_clear_bit_hook(struct inode *inode,
1468 				 struct extent_state *state, int *bits)
1469 {
1470 	/*
1471 	 * set_bit and clear bit hooks normally require _irqsave/restore
1472 	 * but in this case, we are only testing for the DELALLOC
1473 	 * bit, which is only set or cleared with irqs on
1474 	 */
1475 	if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
1476 		struct btrfs_root *root = BTRFS_I(inode)->root;
1477 		u64 len = state->end + 1 - state->start;
1478 		bool do_list = !btrfs_is_free_space_inode(root, inode);
1479 
1480 		if (*bits & EXTENT_FIRST_DELALLOC) {
1481 			*bits &= ~EXTENT_FIRST_DELALLOC;
1482 		} else if (!(*bits & EXTENT_DO_ACCOUNTING)) {
1483 			spin_lock(&BTRFS_I(inode)->lock);
1484 			BTRFS_I(inode)->outstanding_extents--;
1485 			spin_unlock(&BTRFS_I(inode)->lock);
1486 		}
1487 
1488 		if (*bits & EXTENT_DO_ACCOUNTING)
1489 			btrfs_delalloc_release_metadata(inode, len);
1490 
1491 		if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
1492 		    && do_list)
1493 			btrfs_free_reserved_data_space(inode, len);
1494 
1495 		spin_lock(&root->fs_info->delalloc_lock);
1496 		root->fs_info->delalloc_bytes -= len;
1497 		BTRFS_I(inode)->delalloc_bytes -= len;
1498 
1499 		if (do_list && BTRFS_I(inode)->delalloc_bytes == 0 &&
1500 		    !list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1501 			list_del_init(&BTRFS_I(inode)->delalloc_inodes);
1502 		}
1503 		spin_unlock(&root->fs_info->delalloc_lock);
1504 	}
1505 }
1506 
1507 /*
1508  * extent_io.c merge_bio_hook, this must check the chunk tree to make sure
1509  * we don't create bios that span stripes or chunks
1510  */
btrfs_merge_bio_hook(struct page * page,unsigned long offset,size_t size,struct bio * bio,unsigned long bio_flags)1511 int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
1512 			 size_t size, struct bio *bio,
1513 			 unsigned long bio_flags)
1514 {
1515 	struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
1516 	struct btrfs_mapping_tree *map_tree;
1517 	u64 logical = (u64)bio->bi_sector << 9;
1518 	u64 length = 0;
1519 	u64 map_length;
1520 	int ret;
1521 
1522 	if (bio_flags & EXTENT_BIO_COMPRESSED)
1523 		return 0;
1524 
1525 	length = bio->bi_size;
1526 	map_tree = &root->fs_info->mapping_tree;
1527 	map_length = length;
1528 	ret = btrfs_map_block(map_tree, READ, logical,
1529 			      &map_length, NULL, 0);
1530 	/* Will always return 0 or 1 with map_multi == NULL */
1531 	BUG_ON(ret < 0);
1532 	if (map_length < length + size)
1533 		return 1;
1534 	return 0;
1535 }
1536 
1537 /*
1538  * in order to insert checksums into the metadata in large chunks,
1539  * we wait until bio submission time.   All the pages in the bio are
1540  * checksummed and sums are attached onto the ordered extent record.
1541  *
1542  * At IO completion time the cums attached on the ordered extent record
1543  * are inserted into the btree
1544  */
__btrfs_submit_bio_start(struct inode * inode,int rw,struct bio * bio,int mirror_num,unsigned long bio_flags,u64 bio_offset)1545 static int __btrfs_submit_bio_start(struct inode *inode, int rw,
1546 				    struct bio *bio, int mirror_num,
1547 				    unsigned long bio_flags,
1548 				    u64 bio_offset)
1549 {
1550 	struct btrfs_root *root = BTRFS_I(inode)->root;
1551 	int ret = 0;
1552 
1553 	ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
1554 	BUG_ON(ret); /* -ENOMEM */
1555 	return 0;
1556 }
1557 
1558 /*
1559  * in order to insert checksums into the metadata in large chunks,
1560  * we wait until bio submission time.   All the pages in the bio are
1561  * checksummed and sums are attached onto the ordered extent record.
1562  *
1563  * At IO completion time the cums attached on the ordered extent record
1564  * are inserted into the btree
1565  */
__btrfs_submit_bio_done(struct inode * inode,int rw,struct bio * bio,int mirror_num,unsigned long bio_flags,u64 bio_offset)1566 static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
1567 			  int mirror_num, unsigned long bio_flags,
1568 			  u64 bio_offset)
1569 {
1570 	struct btrfs_root *root = BTRFS_I(inode)->root;
1571 	return btrfs_map_bio(root, rw, bio, mirror_num, 1);
1572 }
1573 
1574 /*
1575  * extent_io.c submission hook. This does the right thing for csum calculation
1576  * on write, or reading the csums from the tree before a read
1577  */
btrfs_submit_bio_hook(struct inode * inode,int rw,struct bio * bio,int mirror_num,unsigned long bio_flags,u64 bio_offset)1578 static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
1579 			  int mirror_num, unsigned long bio_flags,
1580 			  u64 bio_offset)
1581 {
1582 	struct btrfs_root *root = BTRFS_I(inode)->root;
1583 	int ret = 0;
1584 	int skip_sum;
1585 	int metadata = 0;
1586 
1587 	skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
1588 
1589 	if (btrfs_is_free_space_inode(root, inode))
1590 		metadata = 2;
1591 
1592 	ret = btrfs_bio_wq_end_io(root->fs_info, bio, metadata);
1593 	if (ret)
1594 		return ret;
1595 
1596 	if (!(rw & REQ_WRITE)) {
1597 		if (bio_flags & EXTENT_BIO_COMPRESSED) {
1598 			return btrfs_submit_compressed_read(inode, bio,
1599 						    mirror_num, bio_flags);
1600 		} else if (!skip_sum) {
1601 			ret = btrfs_lookup_bio_sums(root, inode, bio, NULL);
1602 			if (ret)
1603 				return ret;
1604 		}
1605 		goto mapit;
1606 	} else if (!skip_sum) {
1607 		/* csum items have already been cloned */
1608 		if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
1609 			goto mapit;
1610 		/* we're doing a write, do the async checksumming */
1611 		return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
1612 				   inode, rw, bio, mirror_num,
1613 				   bio_flags, bio_offset,
1614 				   __btrfs_submit_bio_start,
1615 				   __btrfs_submit_bio_done);
1616 	}
1617 
1618 mapit:
1619 	return btrfs_map_bio(root, rw, bio, mirror_num, 0);
1620 }
1621 
1622 /*
1623  * given a list of ordered sums record them in the inode.  This happens
1624  * at IO completion time based on sums calculated at bio submission time.
1625  */
add_pending_csums(struct btrfs_trans_handle * trans,struct inode * inode,u64 file_offset,struct list_head * list)1626 static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
1627 			     struct inode *inode, u64 file_offset,
1628 			     struct list_head *list)
1629 {
1630 	struct btrfs_ordered_sum *sum;
1631 
1632 	list_for_each_entry(sum, list, list) {
1633 		btrfs_csum_file_blocks(trans,
1634 		       BTRFS_I(inode)->root->fs_info->csum_root, sum);
1635 	}
1636 	return 0;
1637 }
1638 
btrfs_set_extent_delalloc(struct inode * inode,u64 start,u64 end,struct extent_state ** cached_state)1639 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
1640 			      struct extent_state **cached_state)
1641 {
1642 	if ((end & (PAGE_CACHE_SIZE - 1)) == 0)
1643 		WARN_ON(1);
1644 	return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
1645 				   cached_state, GFP_NOFS);
1646 }
1647 
1648 /* see btrfs_writepage_start_hook for details on why this is required */
1649 struct btrfs_writepage_fixup {
1650 	struct page *page;
1651 	struct btrfs_work work;
1652 };
1653 
btrfs_writepage_fixup_worker(struct btrfs_work * work)1654 static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
1655 {
1656 	struct btrfs_writepage_fixup *fixup;
1657 	struct btrfs_ordered_extent *ordered;
1658 	struct extent_state *cached_state = NULL;
1659 	struct page *page;
1660 	struct inode *inode;
1661 	u64 page_start;
1662 	u64 page_end;
1663 	int ret;
1664 
1665 	fixup = container_of(work, struct btrfs_writepage_fixup, work);
1666 	page = fixup->page;
1667 again:
1668 	lock_page(page);
1669 	if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
1670 		ClearPageChecked(page);
1671 		goto out_page;
1672 	}
1673 
1674 	inode = page->mapping->host;
1675 	page_start = page_offset(page);
1676 	page_end = page_offset(page) + PAGE_CACHE_SIZE - 1;
1677 
1678 	lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, 0,
1679 			 &cached_state);
1680 
1681 	/* already ordered? We're done */
1682 	if (PagePrivate2(page))
1683 		goto out;
1684 
1685 	ordered = btrfs_lookup_ordered_extent(inode, page_start);
1686 	if (ordered) {
1687 		unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start,
1688 				     page_end, &cached_state, GFP_NOFS);
1689 		unlock_page(page);
1690 		btrfs_start_ordered_extent(inode, ordered, 1);
1691 		btrfs_put_ordered_extent(ordered);
1692 		goto again;
1693 	}
1694 
1695 	ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
1696 	if (ret) {
1697 		mapping_set_error(page->mapping, ret);
1698 		end_extent_writepage(page, ret, page_start, page_end);
1699 		ClearPageChecked(page);
1700 		goto out;
1701 	 }
1702 
1703 	btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state);
1704 	ClearPageChecked(page);
1705 	set_page_dirty(page);
1706 out:
1707 	unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end,
1708 			     &cached_state, GFP_NOFS);
1709 out_page:
1710 	unlock_page(page);
1711 	page_cache_release(page);
1712 	kfree(fixup);
1713 }
1714 
1715 /*
1716  * There are a few paths in the higher layers of the kernel that directly
1717  * set the page dirty bit without asking the filesystem if it is a
1718  * good idea.  This causes problems because we want to make sure COW
1719  * properly happens and the data=ordered rules are followed.
1720  *
1721  * In our case any range that doesn't have the ORDERED bit set
1722  * hasn't been properly setup for IO.  We kick off an async process
1723  * to fix it up.  The async helper will wait for ordered extents, set
1724  * the delalloc bit and make it safe to write the page.
1725  */
btrfs_writepage_start_hook(struct page * page,u64 start,u64 end)1726 static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
1727 {
1728 	struct inode *inode = page->mapping->host;
1729 	struct btrfs_writepage_fixup *fixup;
1730 	struct btrfs_root *root = BTRFS_I(inode)->root;
1731 
1732 	/* this page is properly in the ordered list */
1733 	if (TestClearPagePrivate2(page))
1734 		return 0;
1735 
1736 	if (PageChecked(page))
1737 		return -EAGAIN;
1738 
1739 	fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
1740 	if (!fixup)
1741 		return -EAGAIN;
1742 
1743 	SetPageChecked(page);
1744 	page_cache_get(page);
1745 	fixup->work.func = btrfs_writepage_fixup_worker;
1746 	fixup->page = page;
1747 	btrfs_queue_worker(&root->fs_info->fixup_workers, &fixup->work);
1748 	return -EBUSY;
1749 }
1750 
insert_reserved_file_extent(struct btrfs_trans_handle * trans,struct inode * inode,u64 file_pos,u64 disk_bytenr,u64 disk_num_bytes,u64 num_bytes,u64 ram_bytes,u8 compression,u8 encryption,u16 other_encoding,int extent_type)1751 static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
1752 				       struct inode *inode, u64 file_pos,
1753 				       u64 disk_bytenr, u64 disk_num_bytes,
1754 				       u64 num_bytes, u64 ram_bytes,
1755 				       u8 compression, u8 encryption,
1756 				       u16 other_encoding, int extent_type)
1757 {
1758 	struct btrfs_root *root = BTRFS_I(inode)->root;
1759 	struct btrfs_file_extent_item *fi;
1760 	struct btrfs_path *path;
1761 	struct extent_buffer *leaf;
1762 	struct btrfs_key ins;
1763 	u64 hint;
1764 	int ret;
1765 
1766 	path = btrfs_alloc_path();
1767 	if (!path)
1768 		return -ENOMEM;
1769 
1770 	path->leave_spinning = 1;
1771 
1772 	/*
1773 	 * we may be replacing one extent in the tree with another.
1774 	 * The new extent is pinned in the extent map, and we don't want
1775 	 * to drop it from the cache until it is completely in the btree.
1776 	 *
1777 	 * So, tell btrfs_drop_extents to leave this extent in the cache.
1778 	 * the caller is expected to unpin it and allow it to be merged
1779 	 * with the others.
1780 	 */
1781 	ret = btrfs_drop_extents(trans, inode, file_pos, file_pos + num_bytes,
1782 				 &hint, 0);
1783 	if (ret)
1784 		goto out;
1785 
1786 	ins.objectid = btrfs_ino(inode);
1787 	ins.offset = file_pos;
1788 	ins.type = BTRFS_EXTENT_DATA_KEY;
1789 	ret = btrfs_insert_empty_item(trans, root, path, &ins, sizeof(*fi));
1790 	if (ret)
1791 		goto out;
1792 	leaf = path->nodes[0];
1793 	fi = btrfs_item_ptr(leaf, path->slots[0],
1794 			    struct btrfs_file_extent_item);
1795 	btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1796 	btrfs_set_file_extent_type(leaf, fi, extent_type);
1797 	btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr);
1798 	btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes);
1799 	btrfs_set_file_extent_offset(leaf, fi, 0);
1800 	btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
1801 	btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes);
1802 	btrfs_set_file_extent_compression(leaf, fi, compression);
1803 	btrfs_set_file_extent_encryption(leaf, fi, encryption);
1804 	btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding);
1805 
1806 	btrfs_unlock_up_safe(path, 1);
1807 	btrfs_set_lock_blocking(leaf);
1808 
1809 	btrfs_mark_buffer_dirty(leaf);
1810 
1811 	inode_add_bytes(inode, num_bytes);
1812 
1813 	ins.objectid = disk_bytenr;
1814 	ins.offset = disk_num_bytes;
1815 	ins.type = BTRFS_EXTENT_ITEM_KEY;
1816 	ret = btrfs_alloc_reserved_file_extent(trans, root,
1817 					root->root_key.objectid,
1818 					btrfs_ino(inode), file_pos, &ins);
1819 out:
1820 	btrfs_free_path(path);
1821 
1822 	return ret;
1823 }
1824 
1825 /*
1826  * helper function for btrfs_finish_ordered_io, this
1827  * just reads in some of the csum leaves to prime them into ram
1828  * before we start the transaction.  It limits the amount of btree
1829  * reads required while inside the transaction.
1830  */
1831 /* as ordered data IO finishes, this gets called so we can finish
1832  * an ordered extent if the range of bytes in the file it covers are
1833  * fully written.
1834  */
btrfs_finish_ordered_io(struct inode * inode,u64 start,u64 end)1835 static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1836 {
1837 	struct btrfs_root *root = BTRFS_I(inode)->root;
1838 	struct btrfs_trans_handle *trans = NULL;
1839 	struct btrfs_ordered_extent *ordered_extent = NULL;
1840 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1841 	struct extent_state *cached_state = NULL;
1842 	int compress_type = 0;
1843 	int ret;
1844 	bool nolock;
1845 
1846 	ret = btrfs_dec_test_ordered_pending(inode, &ordered_extent, start,
1847 					     end - start + 1);
1848 	if (!ret)
1849 		return 0;
1850 	BUG_ON(!ordered_extent); /* Logic error */
1851 
1852 	nolock = btrfs_is_free_space_inode(root, inode);
1853 
1854 	if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
1855 		BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */
1856 		ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
1857 		if (!ret) {
1858 			if (nolock)
1859 				trans = btrfs_join_transaction_nolock(root);
1860 			else
1861 				trans = btrfs_join_transaction(root);
1862 			if (IS_ERR(trans))
1863 				return PTR_ERR(trans);
1864 			trans->block_rsv = &root->fs_info->delalloc_block_rsv;
1865 			ret = btrfs_update_inode_fallback(trans, root, inode);
1866 			if (ret) /* -ENOMEM or corruption */
1867 				btrfs_abort_transaction(trans, root, ret);
1868 		}
1869 		goto out;
1870 	}
1871 
1872 	lock_extent_bits(io_tree, ordered_extent->file_offset,
1873 			 ordered_extent->file_offset + ordered_extent->len - 1,
1874 			 0, &cached_state);
1875 
1876 	if (nolock)
1877 		trans = btrfs_join_transaction_nolock(root);
1878 	else
1879 		trans = btrfs_join_transaction(root);
1880 	if (IS_ERR(trans)) {
1881 		ret = PTR_ERR(trans);
1882 		trans = NULL;
1883 		goto out_unlock;
1884 	}
1885 	trans->block_rsv = &root->fs_info->delalloc_block_rsv;
1886 
1887 	if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
1888 		compress_type = ordered_extent->compress_type;
1889 	if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
1890 		BUG_ON(compress_type);
1891 		ret = btrfs_mark_extent_written(trans, inode,
1892 						ordered_extent->file_offset,
1893 						ordered_extent->file_offset +
1894 						ordered_extent->len);
1895 	} else {
1896 		BUG_ON(root == root->fs_info->tree_root);
1897 		ret = insert_reserved_file_extent(trans, inode,
1898 						ordered_extent->file_offset,
1899 						ordered_extent->start,
1900 						ordered_extent->disk_len,
1901 						ordered_extent->len,
1902 						ordered_extent->len,
1903 						compress_type, 0, 0,
1904 						BTRFS_FILE_EXTENT_REG);
1905 		unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
1906 				   ordered_extent->file_offset,
1907 				   ordered_extent->len);
1908 	}
1909 	unlock_extent_cached(io_tree, ordered_extent->file_offset,
1910 			     ordered_extent->file_offset +
1911 			     ordered_extent->len - 1, &cached_state, GFP_NOFS);
1912 	if (ret < 0) {
1913 		btrfs_abort_transaction(trans, root, ret);
1914 		goto out;
1915 	}
1916 
1917 	add_pending_csums(trans, inode, ordered_extent->file_offset,
1918 			  &ordered_extent->list);
1919 
1920 	ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
1921 	if (!ret || !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
1922 		ret = btrfs_update_inode_fallback(trans, root, inode);
1923 		if (ret) { /* -ENOMEM or corruption */
1924 			btrfs_abort_transaction(trans, root, ret);
1925 			goto out;
1926 		}
1927 	}
1928 	ret = 0;
1929 out:
1930 	if (root != root->fs_info->tree_root)
1931 		btrfs_delalloc_release_metadata(inode, ordered_extent->len);
1932 	if (trans) {
1933 		if (nolock)
1934 			btrfs_end_transaction_nolock(trans, root);
1935 		else
1936 			btrfs_end_transaction(trans, root);
1937 	}
1938 
1939 	/* once for us */
1940 	btrfs_put_ordered_extent(ordered_extent);
1941 	/* once for the tree */
1942 	btrfs_put_ordered_extent(ordered_extent);
1943 
1944 	return 0;
1945 out_unlock:
1946 	unlock_extent_cached(io_tree, ordered_extent->file_offset,
1947 			     ordered_extent->file_offset +
1948 			     ordered_extent->len - 1, &cached_state, GFP_NOFS);
1949 	goto out;
1950 }
1951 
btrfs_writepage_end_io_hook(struct page * page,u64 start,u64 end,struct extent_state * state,int uptodate)1952 static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
1953 				struct extent_state *state, int uptodate)
1954 {
1955 	trace_btrfs_writepage_end_io_hook(page, start, end, uptodate);
1956 
1957 	ClearPagePrivate2(page);
1958 	return btrfs_finish_ordered_io(page->mapping->host, start, end);
1959 }
1960 
1961 /*
1962  * when reads are done, we need to check csums to verify the data is correct
1963  * if there's a match, we allow the bio to finish.  If not, the code in
1964  * extent_io.c will try to find good copies for us.
1965  */
btrfs_readpage_end_io_hook(struct page * page,u64 start,u64 end,struct extent_state * state,int mirror)1966 static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
1967 			       struct extent_state *state, int mirror)
1968 {
1969 	size_t offset = start - ((u64)page->index << PAGE_CACHE_SHIFT);
1970 	struct inode *inode = page->mapping->host;
1971 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1972 	char *kaddr;
1973 	u64 private = ~(u32)0;
1974 	int ret;
1975 	struct btrfs_root *root = BTRFS_I(inode)->root;
1976 	u32 csum = ~(u32)0;
1977 
1978 	if (PageChecked(page)) {
1979 		ClearPageChecked(page);
1980 		goto good;
1981 	}
1982 
1983 	if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
1984 		goto good;
1985 
1986 	if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
1987 	    test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) {
1988 		clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM,
1989 				  GFP_NOFS);
1990 		return 0;
1991 	}
1992 
1993 	if (state && state->start == start) {
1994 		private = state->private;
1995 		ret = 0;
1996 	} else {
1997 		ret = get_state_private(io_tree, start, &private);
1998 	}
1999 	kaddr = kmap_atomic(page);
2000 	if (ret)
2001 		goto zeroit;
2002 
2003 	csum = btrfs_csum_data(root, kaddr + offset, csum,  end - start + 1);
2004 	btrfs_csum_final(csum, (char *)&csum);
2005 	if (csum != private)
2006 		goto zeroit;
2007 
2008 	kunmap_atomic(kaddr);
2009 good:
2010 	return 0;
2011 
2012 zeroit:
2013 	printk_ratelimited(KERN_INFO "btrfs csum failed ino %llu off %llu csum %u "
2014 		       "private %llu\n",
2015 		       (unsigned long long)btrfs_ino(page->mapping->host),
2016 		       (unsigned long long)start, csum,
2017 		       (unsigned long long)private);
2018 	memset(kaddr + offset, 1, end - start + 1);
2019 	flush_dcache_page(page);
2020 	kunmap_atomic(kaddr);
2021 	if (private == 0)
2022 		return 0;
2023 	return -EIO;
2024 }
2025 
2026 struct delayed_iput {
2027 	struct list_head list;
2028 	struct inode *inode;
2029 };
2030 
2031 /* JDM: If this is fs-wide, why can't we add a pointer to
2032  * btrfs_inode instead and avoid the allocation? */
btrfs_add_delayed_iput(struct inode * inode)2033 void btrfs_add_delayed_iput(struct inode *inode)
2034 {
2035 	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
2036 	struct delayed_iput *delayed;
2037 
2038 	if (atomic_add_unless(&inode->i_count, -1, 1))
2039 		return;
2040 
2041 	delayed = kmalloc(sizeof(*delayed), GFP_NOFS | __GFP_NOFAIL);
2042 	delayed->inode = inode;
2043 
2044 	spin_lock(&fs_info->delayed_iput_lock);
2045 	list_add_tail(&delayed->list, &fs_info->delayed_iputs);
2046 	spin_unlock(&fs_info->delayed_iput_lock);
2047 }
2048 
btrfs_run_delayed_iputs(struct btrfs_root * root)2049 void btrfs_run_delayed_iputs(struct btrfs_root *root)
2050 {
2051 	LIST_HEAD(list);
2052 	struct btrfs_fs_info *fs_info = root->fs_info;
2053 	struct delayed_iput *delayed;
2054 	int empty;
2055 
2056 	spin_lock(&fs_info->delayed_iput_lock);
2057 	empty = list_empty(&fs_info->delayed_iputs);
2058 	spin_unlock(&fs_info->delayed_iput_lock);
2059 	if (empty)
2060 		return;
2061 
2062 	down_read(&root->fs_info->cleanup_work_sem);
2063 	spin_lock(&fs_info->delayed_iput_lock);
2064 	list_splice_init(&fs_info->delayed_iputs, &list);
2065 	spin_unlock(&fs_info->delayed_iput_lock);
2066 
2067 	while (!list_empty(&list)) {
2068 		delayed = list_entry(list.next, struct delayed_iput, list);
2069 		list_del(&delayed->list);
2070 		iput(delayed->inode);
2071 		kfree(delayed);
2072 	}
2073 	up_read(&root->fs_info->cleanup_work_sem);
2074 }
2075 
2076 enum btrfs_orphan_cleanup_state {
2077 	ORPHAN_CLEANUP_STARTED	= 1,
2078 	ORPHAN_CLEANUP_DONE	= 2,
2079 };
2080 
2081 /*
2082  * This is called in transaction commit time. If there are no orphan
2083  * files in the subvolume, it removes orphan item and frees block_rsv
2084  * structure.
2085  */
btrfs_orphan_commit_root(struct btrfs_trans_handle * trans,struct btrfs_root * root)2086 void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans,
2087 			      struct btrfs_root *root)
2088 {
2089 	struct btrfs_block_rsv *block_rsv;
2090 	int ret;
2091 
2092 	if (!list_empty(&root->orphan_list) ||
2093 	    root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE)
2094 		return;
2095 
2096 	spin_lock(&root->orphan_lock);
2097 	if (!list_empty(&root->orphan_list)) {
2098 		spin_unlock(&root->orphan_lock);
2099 		return;
2100 	}
2101 
2102 	if (root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE) {
2103 		spin_unlock(&root->orphan_lock);
2104 		return;
2105 	}
2106 
2107 	block_rsv = root->orphan_block_rsv;
2108 	root->orphan_block_rsv = NULL;
2109 	spin_unlock(&root->orphan_lock);
2110 
2111 	if (root->orphan_item_inserted &&
2112 	    btrfs_root_refs(&root->root_item) > 0) {
2113 		ret = btrfs_del_orphan_item(trans, root->fs_info->tree_root,
2114 					    root->root_key.objectid);
2115 		BUG_ON(ret);
2116 		root->orphan_item_inserted = 0;
2117 	}
2118 
2119 	if (block_rsv) {
2120 		WARN_ON(block_rsv->size > 0);
2121 		btrfs_free_block_rsv(root, block_rsv);
2122 	}
2123 }
2124 
2125 /*
2126  * This creates an orphan entry for the given inode in case something goes
2127  * wrong in the middle of an unlink/truncate.
2128  *
2129  * NOTE: caller of this function should reserve 5 units of metadata for
2130  *	 this function.
2131  */
btrfs_orphan_add(struct btrfs_trans_handle * trans,struct inode * inode)2132 int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
2133 {
2134 	struct btrfs_root *root = BTRFS_I(inode)->root;
2135 	struct btrfs_block_rsv *block_rsv = NULL;
2136 	int reserve = 0;
2137 	int insert = 0;
2138 	int ret;
2139 
2140 	if (!root->orphan_block_rsv) {
2141 		block_rsv = btrfs_alloc_block_rsv(root);
2142 		if (!block_rsv)
2143 			return -ENOMEM;
2144 	}
2145 
2146 	spin_lock(&root->orphan_lock);
2147 	if (!root->orphan_block_rsv) {
2148 		root->orphan_block_rsv = block_rsv;
2149 	} else if (block_rsv) {
2150 		btrfs_free_block_rsv(root, block_rsv);
2151 		block_rsv = NULL;
2152 	}
2153 
2154 	if (list_empty(&BTRFS_I(inode)->i_orphan)) {
2155 		list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
2156 #if 0
2157 		/*
2158 		 * For proper ENOSPC handling, we should do orphan
2159 		 * cleanup when mounting. But this introduces backward
2160 		 * compatibility issue.
2161 		 */
2162 		if (!xchg(&root->orphan_item_inserted, 1))
2163 			insert = 2;
2164 		else
2165 			insert = 1;
2166 #endif
2167 		insert = 1;
2168 	}
2169 
2170 	if (!BTRFS_I(inode)->orphan_meta_reserved) {
2171 		BTRFS_I(inode)->orphan_meta_reserved = 1;
2172 		reserve = 1;
2173 	}
2174 	spin_unlock(&root->orphan_lock);
2175 
2176 	/* grab metadata reservation from transaction handle */
2177 	if (reserve) {
2178 		ret = btrfs_orphan_reserve_metadata(trans, inode);
2179 		BUG_ON(ret); /* -ENOSPC in reservation; Logic error? JDM */
2180 	}
2181 
2182 	/* insert an orphan item to track this unlinked/truncated file */
2183 	if (insert >= 1) {
2184 		ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode));
2185 		if (ret && ret != -EEXIST) {
2186 			btrfs_abort_transaction(trans, root, ret);
2187 			return ret;
2188 		}
2189 		ret = 0;
2190 	}
2191 
2192 	/* insert an orphan item to track subvolume contains orphan files */
2193 	if (insert >= 2) {
2194 		ret = btrfs_insert_orphan_item(trans, root->fs_info->tree_root,
2195 					       root->root_key.objectid);
2196 		if (ret && ret != -EEXIST) {
2197 			btrfs_abort_transaction(trans, root, ret);
2198 			return ret;
2199 		}
2200 	}
2201 	return 0;
2202 }
2203 
2204 /*
2205  * We have done the truncate/delete so we can go ahead and remove the orphan
2206  * item for this particular inode.
2207  */
btrfs_orphan_del(struct btrfs_trans_handle * trans,struct inode * inode)2208 int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode)
2209 {
2210 	struct btrfs_root *root = BTRFS_I(inode)->root;
2211 	int delete_item = 0;
2212 	int release_rsv = 0;
2213 	int ret = 0;
2214 
2215 	spin_lock(&root->orphan_lock);
2216 	if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
2217 		list_del_init(&BTRFS_I(inode)->i_orphan);
2218 		delete_item = 1;
2219 	}
2220 
2221 	if (BTRFS_I(inode)->orphan_meta_reserved) {
2222 		BTRFS_I(inode)->orphan_meta_reserved = 0;
2223 		release_rsv = 1;
2224 	}
2225 	spin_unlock(&root->orphan_lock);
2226 
2227 	if (trans && delete_item) {
2228 		ret = btrfs_del_orphan_item(trans, root, btrfs_ino(inode));
2229 		BUG_ON(ret); /* -ENOMEM or corruption (JDM: Recheck) */
2230 	}
2231 
2232 	if (release_rsv)
2233 		btrfs_orphan_release_metadata(inode);
2234 
2235 	return 0;
2236 }
2237 
2238 /*
2239  * this cleans up any orphans that may be left on the list from the last use
2240  * of this root.
2241  */
btrfs_orphan_cleanup(struct btrfs_root * root)2242 int btrfs_orphan_cleanup(struct btrfs_root *root)
2243 {
2244 	struct btrfs_path *path;
2245 	struct extent_buffer *leaf;
2246 	struct btrfs_key key, found_key;
2247 	struct btrfs_trans_handle *trans;
2248 	struct inode *inode;
2249 	u64 last_objectid = 0;
2250 	int ret = 0, nr_unlink = 0, nr_truncate = 0;
2251 
2252 	if (cmpxchg(&root->orphan_cleanup_state, 0, ORPHAN_CLEANUP_STARTED))
2253 		return 0;
2254 
2255 	path = btrfs_alloc_path();
2256 	if (!path) {
2257 		ret = -ENOMEM;
2258 		goto out;
2259 	}
2260 	path->reada = -1;
2261 
2262 	key.objectid = BTRFS_ORPHAN_OBJECTID;
2263 	btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY);
2264 	key.offset = (u64)-1;
2265 
2266 	while (1) {
2267 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2268 		if (ret < 0)
2269 			goto out;
2270 
2271 		/*
2272 		 * if ret == 0 means we found what we were searching for, which
2273 		 * is weird, but possible, so only screw with path if we didn't
2274 		 * find the key and see if we have stuff that matches
2275 		 */
2276 		if (ret > 0) {
2277 			ret = 0;
2278 			if (path->slots[0] == 0)
2279 				break;
2280 			path->slots[0]--;
2281 		}
2282 
2283 		/* pull out the item */
2284 		leaf = path->nodes[0];
2285 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2286 
2287 		/* make sure the item matches what we want */
2288 		if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
2289 			break;
2290 		if (btrfs_key_type(&found_key) != BTRFS_ORPHAN_ITEM_KEY)
2291 			break;
2292 
2293 		/* release the path since we're done with it */
2294 		btrfs_release_path(path);
2295 
2296 		/*
2297 		 * this is where we are basically btrfs_lookup, without the
2298 		 * crossing root thing.  we store the inode number in the
2299 		 * offset of the orphan item.
2300 		 */
2301 
2302 		if (found_key.offset == last_objectid) {
2303 			printk(KERN_ERR "btrfs: Error removing orphan entry, "
2304 			       "stopping orphan cleanup\n");
2305 			ret = -EINVAL;
2306 			goto out;
2307 		}
2308 
2309 		last_objectid = found_key.offset;
2310 
2311 		found_key.objectid = found_key.offset;
2312 		found_key.type = BTRFS_INODE_ITEM_KEY;
2313 		found_key.offset = 0;
2314 		inode = btrfs_iget(root->fs_info->sb, &found_key, root, NULL);
2315 		ret = PTR_RET(inode);
2316 		if (ret && ret != -ESTALE)
2317 			goto out;
2318 
2319 		if (ret == -ESTALE && root == root->fs_info->tree_root) {
2320 			struct btrfs_root *dead_root;
2321 			struct btrfs_fs_info *fs_info = root->fs_info;
2322 			int is_dead_root = 0;
2323 
2324 			/*
2325 			 * this is an orphan in the tree root. Currently these
2326 			 * could come from 2 sources:
2327 			 *  a) a snapshot deletion in progress
2328 			 *  b) a free space cache inode
2329 			 * We need to distinguish those two, as the snapshot
2330 			 * orphan must not get deleted.
2331 			 * find_dead_roots already ran before us, so if this
2332 			 * is a snapshot deletion, we should find the root
2333 			 * in the dead_roots list
2334 			 */
2335 			spin_lock(&fs_info->trans_lock);
2336 			list_for_each_entry(dead_root, &fs_info->dead_roots,
2337 					    root_list) {
2338 				if (dead_root->root_key.objectid ==
2339 				    found_key.objectid) {
2340 					is_dead_root = 1;
2341 					break;
2342 				}
2343 			}
2344 			spin_unlock(&fs_info->trans_lock);
2345 			if (is_dead_root) {
2346 				/* prevent this orphan from being found again */
2347 				key.offset = found_key.objectid - 1;
2348 				continue;
2349 			}
2350 		}
2351 		/*
2352 		 * Inode is already gone but the orphan item is still there,
2353 		 * kill the orphan item.
2354 		 */
2355 		if (ret == -ESTALE) {
2356 			trans = btrfs_start_transaction(root, 1);
2357 			if (IS_ERR(trans)) {
2358 				ret = PTR_ERR(trans);
2359 				goto out;
2360 			}
2361 			ret = btrfs_del_orphan_item(trans, root,
2362 						    found_key.objectid);
2363 			BUG_ON(ret); /* -ENOMEM or corruption (JDM: Recheck) */
2364 			btrfs_end_transaction(trans, root);
2365 			continue;
2366 		}
2367 
2368 		/*
2369 		 * add this inode to the orphan list so btrfs_orphan_del does
2370 		 * the proper thing when we hit it
2371 		 */
2372 		spin_lock(&root->orphan_lock);
2373 		list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
2374 		spin_unlock(&root->orphan_lock);
2375 
2376 		/* if we have links, this was a truncate, lets do that */
2377 		if (inode->i_nlink) {
2378 			if (!S_ISREG(inode->i_mode)) {
2379 				WARN_ON(1);
2380 				iput(inode);
2381 				continue;
2382 			}
2383 			nr_truncate++;
2384 			ret = btrfs_truncate(inode);
2385 		} else {
2386 			nr_unlink++;
2387 		}
2388 
2389 		/* this will do delete_inode and everything for us */
2390 		iput(inode);
2391 		if (ret)
2392 			goto out;
2393 	}
2394 	/* release the path since we're done with it */
2395 	btrfs_release_path(path);
2396 
2397 	root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE;
2398 
2399 	if (root->orphan_block_rsv)
2400 		btrfs_block_rsv_release(root, root->orphan_block_rsv,
2401 					(u64)-1);
2402 
2403 	if (root->orphan_block_rsv || root->orphan_item_inserted) {
2404 		trans = btrfs_join_transaction(root);
2405 		if (!IS_ERR(trans))
2406 			btrfs_end_transaction(trans, root);
2407 	}
2408 
2409 	if (nr_unlink)
2410 		printk(KERN_INFO "btrfs: unlinked %d orphans\n", nr_unlink);
2411 	if (nr_truncate)
2412 		printk(KERN_INFO "btrfs: truncated %d orphans\n", nr_truncate);
2413 
2414 out:
2415 	if (ret)
2416 		printk(KERN_CRIT "btrfs: could not do orphan cleanup %d\n", ret);
2417 	btrfs_free_path(path);
2418 	return ret;
2419 }
2420 
2421 /*
2422  * very simple check to peek ahead in the leaf looking for xattrs.  If we
2423  * don't find any xattrs, we know there can't be any acls.
2424  *
2425  * slot is the slot the inode is in, objectid is the objectid of the inode
2426  */
acls_after_inode_item(struct extent_buffer * leaf,int slot,u64 objectid)2427 static noinline int acls_after_inode_item(struct extent_buffer *leaf,
2428 					  int slot, u64 objectid)
2429 {
2430 	u32 nritems = btrfs_header_nritems(leaf);
2431 	struct btrfs_key found_key;
2432 	int scanned = 0;
2433 
2434 	slot++;
2435 	while (slot < nritems) {
2436 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
2437 
2438 		/* we found a different objectid, there must not be acls */
2439 		if (found_key.objectid != objectid)
2440 			return 0;
2441 
2442 		/* we found an xattr, assume we've got an acl */
2443 		if (found_key.type == BTRFS_XATTR_ITEM_KEY)
2444 			return 1;
2445 
2446 		/*
2447 		 * we found a key greater than an xattr key, there can't
2448 		 * be any acls later on
2449 		 */
2450 		if (found_key.type > BTRFS_XATTR_ITEM_KEY)
2451 			return 0;
2452 
2453 		slot++;
2454 		scanned++;
2455 
2456 		/*
2457 		 * it goes inode, inode backrefs, xattrs, extents,
2458 		 * so if there are a ton of hard links to an inode there can
2459 		 * be a lot of backrefs.  Don't waste time searching too hard,
2460 		 * this is just an optimization
2461 		 */
2462 		if (scanned >= 8)
2463 			break;
2464 	}
2465 	/* we hit the end of the leaf before we found an xattr or
2466 	 * something larger than an xattr.  We have to assume the inode
2467 	 * has acls
2468 	 */
2469 	return 1;
2470 }
2471 
2472 /*
2473  * read an inode from the btree into the in-memory inode
2474  */
btrfs_read_locked_inode(struct inode * inode)2475 static void btrfs_read_locked_inode(struct inode *inode)
2476 {
2477 	struct btrfs_path *path;
2478 	struct extent_buffer *leaf;
2479 	struct btrfs_inode_item *inode_item;
2480 	struct btrfs_timespec *tspec;
2481 	struct btrfs_root *root = BTRFS_I(inode)->root;
2482 	struct btrfs_key location;
2483 	int maybe_acls;
2484 	u32 rdev;
2485 	int ret;
2486 	bool filled = false;
2487 
2488 	ret = btrfs_fill_inode(inode, &rdev);
2489 	if (!ret)
2490 		filled = true;
2491 
2492 	path = btrfs_alloc_path();
2493 	if (!path)
2494 		goto make_bad;
2495 
2496 	path->leave_spinning = 1;
2497 	memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
2498 
2499 	ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
2500 	if (ret)
2501 		goto make_bad;
2502 
2503 	leaf = path->nodes[0];
2504 
2505 	if (filled)
2506 		goto cache_acl;
2507 
2508 	inode_item = btrfs_item_ptr(leaf, path->slots[0],
2509 				    struct btrfs_inode_item);
2510 	inode->i_mode = btrfs_inode_mode(leaf, inode_item);
2511 	set_nlink(inode, btrfs_inode_nlink(leaf, inode_item));
2512 	inode->i_uid = btrfs_inode_uid(leaf, inode_item);
2513 	inode->i_gid = btrfs_inode_gid(leaf, inode_item);
2514 	btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item));
2515 
2516 	tspec = btrfs_inode_atime(inode_item);
2517 	inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2518 	inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2519 
2520 	tspec = btrfs_inode_mtime(inode_item);
2521 	inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2522 	inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2523 
2524 	tspec = btrfs_inode_ctime(inode_item);
2525 	inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2526 	inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2527 
2528 	inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
2529 	BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
2530 	BTRFS_I(inode)->sequence = btrfs_inode_sequence(leaf, inode_item);
2531 	inode->i_generation = BTRFS_I(inode)->generation;
2532 	inode->i_rdev = 0;
2533 	rdev = btrfs_inode_rdev(leaf, inode_item);
2534 
2535 	BTRFS_I(inode)->index_cnt = (u64)-1;
2536 	BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
2537 cache_acl:
2538 	/*
2539 	 * try to precache a NULL acl entry for files that don't have
2540 	 * any xattrs or acls
2541 	 */
2542 	maybe_acls = acls_after_inode_item(leaf, path->slots[0],
2543 					   btrfs_ino(inode));
2544 	if (!maybe_acls)
2545 		cache_no_acl(inode);
2546 
2547 	btrfs_free_path(path);
2548 
2549 	switch (inode->i_mode & S_IFMT) {
2550 	case S_IFREG:
2551 		inode->i_mapping->a_ops = &btrfs_aops;
2552 		inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
2553 		BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
2554 		inode->i_fop = &btrfs_file_operations;
2555 		inode->i_op = &btrfs_file_inode_operations;
2556 		break;
2557 	case S_IFDIR:
2558 		inode->i_fop = &btrfs_dir_file_operations;
2559 		if (root == root->fs_info->tree_root)
2560 			inode->i_op = &btrfs_dir_ro_inode_operations;
2561 		else
2562 			inode->i_op = &btrfs_dir_inode_operations;
2563 		break;
2564 	case S_IFLNK:
2565 		inode->i_op = &btrfs_symlink_inode_operations;
2566 		inode->i_mapping->a_ops = &btrfs_symlink_aops;
2567 		inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
2568 		break;
2569 	default:
2570 		inode->i_op = &btrfs_special_inode_operations;
2571 		init_special_inode(inode, inode->i_mode, rdev);
2572 		break;
2573 	}
2574 
2575 	btrfs_update_iflags(inode);
2576 	return;
2577 
2578 make_bad:
2579 	btrfs_free_path(path);
2580 	make_bad_inode(inode);
2581 }
2582 
2583 /*
2584  * given a leaf and an inode, copy the inode fields into the leaf
2585  */
fill_inode_item(struct btrfs_trans_handle * trans,struct extent_buffer * leaf,struct btrfs_inode_item * item,struct inode * inode)2586 static void fill_inode_item(struct btrfs_trans_handle *trans,
2587 			    struct extent_buffer *leaf,
2588 			    struct btrfs_inode_item *item,
2589 			    struct inode *inode)
2590 {
2591 	btrfs_set_inode_uid(leaf, item, inode->i_uid);
2592 	btrfs_set_inode_gid(leaf, item, inode->i_gid);
2593 	btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size);
2594 	btrfs_set_inode_mode(leaf, item, inode->i_mode);
2595 	btrfs_set_inode_nlink(leaf, item, inode->i_nlink);
2596 
2597 	btrfs_set_timespec_sec(leaf, btrfs_inode_atime(item),
2598 			       inode->i_atime.tv_sec);
2599 	btrfs_set_timespec_nsec(leaf, btrfs_inode_atime(item),
2600 				inode->i_atime.tv_nsec);
2601 
2602 	btrfs_set_timespec_sec(leaf, btrfs_inode_mtime(item),
2603 			       inode->i_mtime.tv_sec);
2604 	btrfs_set_timespec_nsec(leaf, btrfs_inode_mtime(item),
2605 				inode->i_mtime.tv_nsec);
2606 
2607 	btrfs_set_timespec_sec(leaf, btrfs_inode_ctime(item),
2608 			       inode->i_ctime.tv_sec);
2609 	btrfs_set_timespec_nsec(leaf, btrfs_inode_ctime(item),
2610 				inode->i_ctime.tv_nsec);
2611 
2612 	btrfs_set_inode_nbytes(leaf, item, inode_get_bytes(inode));
2613 	btrfs_set_inode_generation(leaf, item, BTRFS_I(inode)->generation);
2614 	btrfs_set_inode_sequence(leaf, item, BTRFS_I(inode)->sequence);
2615 	btrfs_set_inode_transid(leaf, item, trans->transid);
2616 	btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
2617 	btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags);
2618 	btrfs_set_inode_block_group(leaf, item, 0);
2619 }
2620 
2621 /*
2622  * copy everything in the in-memory inode into the btree.
2623  */
btrfs_update_inode_item(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct inode * inode)2624 static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans,
2625 				struct btrfs_root *root, struct inode *inode)
2626 {
2627 	struct btrfs_inode_item *inode_item;
2628 	struct btrfs_path *path;
2629 	struct extent_buffer *leaf;
2630 	int ret;
2631 
2632 	path = btrfs_alloc_path();
2633 	if (!path)
2634 		return -ENOMEM;
2635 
2636 	path->leave_spinning = 1;
2637 	ret = btrfs_lookup_inode(trans, root, path, &BTRFS_I(inode)->location,
2638 				 1);
2639 	if (ret) {
2640 		if (ret > 0)
2641 			ret = -ENOENT;
2642 		goto failed;
2643 	}
2644 
2645 	btrfs_unlock_up_safe(path, 1);
2646 	leaf = path->nodes[0];
2647 	inode_item = btrfs_item_ptr(leaf, path->slots[0],
2648 				    struct btrfs_inode_item);
2649 
2650 	fill_inode_item(trans, leaf, inode_item, inode);
2651 	btrfs_mark_buffer_dirty(leaf);
2652 	btrfs_set_inode_last_trans(trans, inode);
2653 	ret = 0;
2654 failed:
2655 	btrfs_free_path(path);
2656 	return ret;
2657 }
2658 
2659 /*
2660  * copy everything in the in-memory inode into the btree.
2661  */
btrfs_update_inode(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct inode * inode)2662 noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
2663 				struct btrfs_root *root, struct inode *inode)
2664 {
2665 	int ret;
2666 
2667 	/*
2668 	 * If the inode is a free space inode, we can deadlock during commit
2669 	 * if we put it into the delayed code.
2670 	 *
2671 	 * The data relocation inode should also be directly updated
2672 	 * without delay
2673 	 */
2674 	if (!btrfs_is_free_space_inode(root, inode)
2675 	    && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID) {
2676 		ret = btrfs_delayed_update_inode(trans, root, inode);
2677 		if (!ret)
2678 			btrfs_set_inode_last_trans(trans, inode);
2679 		return ret;
2680 	}
2681 
2682 	return btrfs_update_inode_item(trans, root, inode);
2683 }
2684 
btrfs_update_inode_fallback(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct inode * inode)2685 static noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
2686 				struct btrfs_root *root, struct inode *inode)
2687 {
2688 	int ret;
2689 
2690 	ret = btrfs_update_inode(trans, root, inode);
2691 	if (ret == -ENOSPC)
2692 		return btrfs_update_inode_item(trans, root, inode);
2693 	return ret;
2694 }
2695 
2696 /*
2697  * unlink helper that gets used here in inode.c and in the tree logging
2698  * recovery code.  It remove a link in a directory with a given name, and
2699  * also drops the back refs in the inode to the directory
2700  */
__btrfs_unlink_inode(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct inode * dir,struct inode * inode,const char * name,int name_len)2701 static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
2702 				struct btrfs_root *root,
2703 				struct inode *dir, struct inode *inode,
2704 				const char *name, int name_len)
2705 {
2706 	struct btrfs_path *path;
2707 	int ret = 0;
2708 	struct extent_buffer *leaf;
2709 	struct btrfs_dir_item *di;
2710 	struct btrfs_key key;
2711 	u64 index;
2712 	u64 ino = btrfs_ino(inode);
2713 	u64 dir_ino = btrfs_ino(dir);
2714 
2715 	path = btrfs_alloc_path();
2716 	if (!path) {
2717 		ret = -ENOMEM;
2718 		goto out;
2719 	}
2720 
2721 	path->leave_spinning = 1;
2722 	di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
2723 				    name, name_len, -1);
2724 	if (IS_ERR(di)) {
2725 		ret = PTR_ERR(di);
2726 		goto err;
2727 	}
2728 	if (!di) {
2729 		ret = -ENOENT;
2730 		goto err;
2731 	}
2732 	leaf = path->nodes[0];
2733 	btrfs_dir_item_key_to_cpu(leaf, di, &key);
2734 	ret = btrfs_delete_one_dir_name(trans, root, path, di);
2735 	if (ret)
2736 		goto err;
2737 	btrfs_release_path(path);
2738 
2739 	ret = btrfs_del_inode_ref(trans, root, name, name_len, ino,
2740 				  dir_ino, &index);
2741 	if (ret) {
2742 		printk(KERN_INFO "btrfs failed to delete reference to %.*s, "
2743 		       "inode %llu parent %llu\n", name_len, name,
2744 		       (unsigned long long)ino, (unsigned long long)dir_ino);
2745 		btrfs_abort_transaction(trans, root, ret);
2746 		goto err;
2747 	}
2748 
2749 	ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
2750 	if (ret) {
2751 		btrfs_abort_transaction(trans, root, ret);
2752 		goto err;
2753 	}
2754 
2755 	ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len,
2756 					 inode, dir_ino);
2757 	if (ret != 0 && ret != -ENOENT) {
2758 		btrfs_abort_transaction(trans, root, ret);
2759 		goto err;
2760 	}
2761 
2762 	ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len,
2763 					   dir, index);
2764 	if (ret == -ENOENT)
2765 		ret = 0;
2766 err:
2767 	btrfs_free_path(path);
2768 	if (ret)
2769 		goto out;
2770 
2771 	btrfs_i_size_write(dir, dir->i_size - name_len * 2);
2772 	inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME;
2773 	btrfs_update_inode(trans, root, dir);
2774 out:
2775 	return ret;
2776 }
2777 
btrfs_unlink_inode(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct inode * dir,struct inode * inode,const char * name,int name_len)2778 int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
2779 		       struct btrfs_root *root,
2780 		       struct inode *dir, struct inode *inode,
2781 		       const char *name, int name_len)
2782 {
2783 	int ret;
2784 	ret = __btrfs_unlink_inode(trans, root, dir, inode, name, name_len);
2785 	if (!ret) {
2786 		btrfs_drop_nlink(inode);
2787 		ret = btrfs_update_inode(trans, root, inode);
2788 	}
2789 	return ret;
2790 }
2791 
2792 
2793 /* helper to check if there is any shared block in the path */
check_path_shared(struct btrfs_root * root,struct btrfs_path * path)2794 static int check_path_shared(struct btrfs_root *root,
2795 			     struct btrfs_path *path)
2796 {
2797 	struct extent_buffer *eb;
2798 	int level;
2799 	u64 refs = 1;
2800 
2801 	for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
2802 		int ret;
2803 
2804 		if (!path->nodes[level])
2805 			break;
2806 		eb = path->nodes[level];
2807 		if (!btrfs_block_can_be_shared(root, eb))
2808 			continue;
2809 		ret = btrfs_lookup_extent_info(NULL, root, eb->start, eb->len,
2810 					       &refs, NULL);
2811 		if (refs > 1)
2812 			return 1;
2813 	}
2814 	return 0;
2815 }
2816 
2817 /*
2818  * helper to start transaction for unlink and rmdir.
2819  *
2820  * unlink and rmdir are special in btrfs, they do not always free space.
2821  * so in enospc case, we should make sure they will free space before
2822  * allowing them to use the global metadata reservation.
2823  */
__unlink_start_trans(struct inode * dir,struct dentry * dentry)2824 static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir,
2825 						       struct dentry *dentry)
2826 {
2827 	struct btrfs_trans_handle *trans;
2828 	struct btrfs_root *root = BTRFS_I(dir)->root;
2829 	struct btrfs_path *path;
2830 	struct btrfs_inode_ref *ref;
2831 	struct btrfs_dir_item *di;
2832 	struct inode *inode = dentry->d_inode;
2833 	u64 index;
2834 	int check_link = 1;
2835 	int err = -ENOSPC;
2836 	int ret;
2837 	u64 ino = btrfs_ino(inode);
2838 	u64 dir_ino = btrfs_ino(dir);
2839 
2840 	/*
2841 	 * 1 for the possible orphan item
2842 	 * 1 for the dir item
2843 	 * 1 for the dir index
2844 	 * 1 for the inode ref
2845 	 * 1 for the inode ref in the tree log
2846 	 * 2 for the dir entries in the log
2847 	 * 1 for the inode
2848 	 */
2849 	trans = btrfs_start_transaction(root, 8);
2850 	if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC)
2851 		return trans;
2852 
2853 	if (ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
2854 		return ERR_PTR(-ENOSPC);
2855 
2856 	/* check if there is someone else holds reference */
2857 	if (S_ISDIR(inode->i_mode) && atomic_read(&inode->i_count) > 1)
2858 		return ERR_PTR(-ENOSPC);
2859 
2860 	if (atomic_read(&inode->i_count) > 2)
2861 		return ERR_PTR(-ENOSPC);
2862 
2863 	if (xchg(&root->fs_info->enospc_unlink, 1))
2864 		return ERR_PTR(-ENOSPC);
2865 
2866 	path = btrfs_alloc_path();
2867 	if (!path) {
2868 		root->fs_info->enospc_unlink = 0;
2869 		return ERR_PTR(-ENOMEM);
2870 	}
2871 
2872 	/* 1 for the orphan item */
2873 	trans = btrfs_start_transaction(root, 1);
2874 	if (IS_ERR(trans)) {
2875 		btrfs_free_path(path);
2876 		root->fs_info->enospc_unlink = 0;
2877 		return trans;
2878 	}
2879 
2880 	path->skip_locking = 1;
2881 	path->search_commit_root = 1;
2882 
2883 	ret = btrfs_lookup_inode(trans, root, path,
2884 				&BTRFS_I(dir)->location, 0);
2885 	if (ret < 0) {
2886 		err = ret;
2887 		goto out;
2888 	}
2889 	if (ret == 0) {
2890 		if (check_path_shared(root, path))
2891 			goto out;
2892 	} else {
2893 		check_link = 0;
2894 	}
2895 	btrfs_release_path(path);
2896 
2897 	ret = btrfs_lookup_inode(trans, root, path,
2898 				&BTRFS_I(inode)->location, 0);
2899 	if (ret < 0) {
2900 		err = ret;
2901 		goto out;
2902 	}
2903 	if (ret == 0) {
2904 		if (check_path_shared(root, path))
2905 			goto out;
2906 	} else {
2907 		check_link = 0;
2908 	}
2909 	btrfs_release_path(path);
2910 
2911 	if (ret == 0 && S_ISREG(inode->i_mode)) {
2912 		ret = btrfs_lookup_file_extent(trans, root, path,
2913 					       ino, (u64)-1, 0);
2914 		if (ret < 0) {
2915 			err = ret;
2916 			goto out;
2917 		}
2918 		BUG_ON(ret == 0); /* Corruption */
2919 		if (check_path_shared(root, path))
2920 			goto out;
2921 		btrfs_release_path(path);
2922 	}
2923 
2924 	if (!check_link) {
2925 		err = 0;
2926 		goto out;
2927 	}
2928 
2929 	di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
2930 				dentry->d_name.name, dentry->d_name.len, 0);
2931 	if (IS_ERR(di)) {
2932 		err = PTR_ERR(di);
2933 		goto out;
2934 	}
2935 	if (di) {
2936 		if (check_path_shared(root, path))
2937 			goto out;
2938 	} else {
2939 		err = 0;
2940 		goto out;
2941 	}
2942 	btrfs_release_path(path);
2943 
2944 	ref = btrfs_lookup_inode_ref(trans, root, path,
2945 				dentry->d_name.name, dentry->d_name.len,
2946 				ino, dir_ino, 0);
2947 	if (IS_ERR(ref)) {
2948 		err = PTR_ERR(ref);
2949 		goto out;
2950 	}
2951 	BUG_ON(!ref); /* Logic error */
2952 	if (check_path_shared(root, path))
2953 		goto out;
2954 	index = btrfs_inode_ref_index(path->nodes[0], ref);
2955 	btrfs_release_path(path);
2956 
2957 	/*
2958 	 * This is a commit root search, if we can lookup inode item and other
2959 	 * relative items in the commit root, it means the transaction of
2960 	 * dir/file creation has been committed, and the dir index item that we
2961 	 * delay to insert has also been inserted into the commit root. So
2962 	 * we needn't worry about the delayed insertion of the dir index item
2963 	 * here.
2964 	 */
2965 	di = btrfs_lookup_dir_index_item(trans, root, path, dir_ino, index,
2966 				dentry->d_name.name, dentry->d_name.len, 0);
2967 	if (IS_ERR(di)) {
2968 		err = PTR_ERR(di);
2969 		goto out;
2970 	}
2971 	BUG_ON(ret == -ENOENT);
2972 	if (check_path_shared(root, path))
2973 		goto out;
2974 
2975 	err = 0;
2976 out:
2977 	btrfs_free_path(path);
2978 	/* Migrate the orphan reservation over */
2979 	if (!err)
2980 		err = btrfs_block_rsv_migrate(trans->block_rsv,
2981 				&root->fs_info->global_block_rsv,
2982 				trans->bytes_reserved);
2983 
2984 	if (err) {
2985 		btrfs_end_transaction(trans, root);
2986 		root->fs_info->enospc_unlink = 0;
2987 		return ERR_PTR(err);
2988 	}
2989 
2990 	trans->block_rsv = &root->fs_info->global_block_rsv;
2991 	return trans;
2992 }
2993 
__unlink_end_trans(struct btrfs_trans_handle * trans,struct btrfs_root * root)2994 static void __unlink_end_trans(struct btrfs_trans_handle *trans,
2995 			       struct btrfs_root *root)
2996 {
2997 	if (trans->block_rsv == &root->fs_info->global_block_rsv) {
2998 		btrfs_block_rsv_release(root, trans->block_rsv,
2999 					trans->bytes_reserved);
3000 		trans->block_rsv = &root->fs_info->trans_block_rsv;
3001 		BUG_ON(!root->fs_info->enospc_unlink);
3002 		root->fs_info->enospc_unlink = 0;
3003 	}
3004 	btrfs_end_transaction(trans, root);
3005 }
3006 
btrfs_unlink(struct inode * dir,struct dentry * dentry)3007 static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
3008 {
3009 	struct btrfs_root *root = BTRFS_I(dir)->root;
3010 	struct btrfs_trans_handle *trans;
3011 	struct inode *inode = dentry->d_inode;
3012 	int ret;
3013 	unsigned long nr = 0;
3014 
3015 	trans = __unlink_start_trans(dir, dentry);
3016 	if (IS_ERR(trans))
3017 		return PTR_ERR(trans);
3018 
3019 	btrfs_record_unlink_dir(trans, dir, dentry->d_inode, 0);
3020 
3021 	ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
3022 				 dentry->d_name.name, dentry->d_name.len);
3023 	if (ret)
3024 		goto out;
3025 
3026 	if (inode->i_nlink == 0) {
3027 		ret = btrfs_orphan_add(trans, inode);
3028 		if (ret)
3029 			goto out;
3030 	}
3031 
3032 out:
3033 	nr = trans->blocks_used;
3034 	__unlink_end_trans(trans, root);
3035 	btrfs_btree_balance_dirty(root, nr);
3036 	return ret;
3037 }
3038 
btrfs_unlink_subvol(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct inode * dir,u64 objectid,const char * name,int name_len)3039 int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
3040 			struct btrfs_root *root,
3041 			struct inode *dir, u64 objectid,
3042 			const char *name, int name_len)
3043 {
3044 	struct btrfs_path *path;
3045 	struct extent_buffer *leaf;
3046 	struct btrfs_dir_item *di;
3047 	struct btrfs_key key;
3048 	u64 index;
3049 	int ret;
3050 	u64 dir_ino = btrfs_ino(dir);
3051 
3052 	path = btrfs_alloc_path();
3053 	if (!path)
3054 		return -ENOMEM;
3055 
3056 	di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
3057 				   name, name_len, -1);
3058 	if (IS_ERR_OR_NULL(di)) {
3059 		if (!di)
3060 			ret = -ENOENT;
3061 		else
3062 			ret = PTR_ERR(di);
3063 		goto out;
3064 	}
3065 
3066 	leaf = path->nodes[0];
3067 	btrfs_dir_item_key_to_cpu(leaf, di, &key);
3068 	WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
3069 	ret = btrfs_delete_one_dir_name(trans, root, path, di);
3070 	if (ret) {
3071 		btrfs_abort_transaction(trans, root, ret);
3072 		goto out;
3073 	}
3074 	btrfs_release_path(path);
3075 
3076 	ret = btrfs_del_root_ref(trans, root->fs_info->tree_root,
3077 				 objectid, root->root_key.objectid,
3078 				 dir_ino, &index, name, name_len);
3079 	if (ret < 0) {
3080 		if (ret != -ENOENT) {
3081 			btrfs_abort_transaction(trans, root, ret);
3082 			goto out;
3083 		}
3084 		di = btrfs_search_dir_index_item(root, path, dir_ino,
3085 						 name, name_len);
3086 		if (IS_ERR_OR_NULL(di)) {
3087 			if (!di)
3088 				ret = -ENOENT;
3089 			else
3090 				ret = PTR_ERR(di);
3091 			btrfs_abort_transaction(trans, root, ret);
3092 			goto out;
3093 		}
3094 
3095 		leaf = path->nodes[0];
3096 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3097 		btrfs_release_path(path);
3098 		index = key.offset;
3099 	}
3100 	btrfs_release_path(path);
3101 
3102 	ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
3103 	if (ret) {
3104 		btrfs_abort_transaction(trans, root, ret);
3105 		goto out;
3106 	}
3107 
3108 	btrfs_i_size_write(dir, dir->i_size - name_len * 2);
3109 	dir->i_mtime = dir->i_ctime = CURRENT_TIME;
3110 	ret = btrfs_update_inode(trans, root, dir);
3111 	if (ret)
3112 		btrfs_abort_transaction(trans, root, ret);
3113 out:
3114 	btrfs_free_path(path);
3115 	return ret;
3116 }
3117 
btrfs_rmdir(struct inode * dir,struct dentry * dentry)3118 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
3119 {
3120 	struct inode *inode = dentry->d_inode;
3121 	int err = 0;
3122 	struct btrfs_root *root = BTRFS_I(dir)->root;
3123 	struct btrfs_trans_handle *trans;
3124 	unsigned long nr = 0;
3125 
3126 	if (inode->i_size > BTRFS_EMPTY_DIR_SIZE ||
3127 	    btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID)
3128 		return -ENOTEMPTY;
3129 
3130 	trans = __unlink_start_trans(dir, dentry);
3131 	if (IS_ERR(trans))
3132 		return PTR_ERR(trans);
3133 
3134 	if (unlikely(btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
3135 		err = btrfs_unlink_subvol(trans, root, dir,
3136 					  BTRFS_I(inode)->location.objectid,
3137 					  dentry->d_name.name,
3138 					  dentry->d_name.len);
3139 		goto out;
3140 	}
3141 
3142 	err = btrfs_orphan_add(trans, inode);
3143 	if (err)
3144 		goto out;
3145 
3146 	/* now the directory is empty */
3147 	err = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
3148 				 dentry->d_name.name, dentry->d_name.len);
3149 	if (!err)
3150 		btrfs_i_size_write(inode, 0);
3151 out:
3152 	nr = trans->blocks_used;
3153 	__unlink_end_trans(trans, root);
3154 	btrfs_btree_balance_dirty(root, nr);
3155 
3156 	return err;
3157 }
3158 
3159 /*
3160  * this can truncate away extent items, csum items and directory items.
3161  * It starts at a high offset and removes keys until it can't find
3162  * any higher than new_size
3163  *
3164  * csum items that cross the new i_size are truncated to the new size
3165  * as well.
3166  *
3167  * min_type is the minimum key type to truncate down to.  If set to 0, this
3168  * will kill all the items on this inode, including the INODE_ITEM_KEY.
3169  */
btrfs_truncate_inode_items(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct inode * inode,u64 new_size,u32 min_type)3170 int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
3171 			       struct btrfs_root *root,
3172 			       struct inode *inode,
3173 			       u64 new_size, u32 min_type)
3174 {
3175 	struct btrfs_path *path;
3176 	struct extent_buffer *leaf;
3177 	struct btrfs_file_extent_item *fi;
3178 	struct btrfs_key key;
3179 	struct btrfs_key found_key;
3180 	u64 extent_start = 0;
3181 	u64 extent_num_bytes = 0;
3182 	u64 extent_offset = 0;
3183 	u64 item_end = 0;
3184 	u64 mask = root->sectorsize - 1;
3185 	u32 found_type = (u8)-1;
3186 	int found_extent;
3187 	int del_item;
3188 	int pending_del_nr = 0;
3189 	int pending_del_slot = 0;
3190 	int extent_type = -1;
3191 	int ret;
3192 	int err = 0;
3193 	u64 ino = btrfs_ino(inode);
3194 
3195 	BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY);
3196 
3197 	path = btrfs_alloc_path();
3198 	if (!path)
3199 		return -ENOMEM;
3200 	path->reada = -1;
3201 
3202 	if (root->ref_cows || root == root->fs_info->tree_root)
3203 		btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0);
3204 
3205 	/*
3206 	 * This function is also used to drop the items in the log tree before
3207 	 * we relog the inode, so if root != BTRFS_I(inode)->root, it means
3208 	 * it is used to drop the loged items. So we shouldn't kill the delayed
3209 	 * items.
3210 	 */
3211 	if (min_type == 0 && root == BTRFS_I(inode)->root)
3212 		btrfs_kill_delayed_inode_items(inode);
3213 
3214 	key.objectid = ino;
3215 	key.offset = (u64)-1;
3216 	key.type = (u8)-1;
3217 
3218 search_again:
3219 	path->leave_spinning = 1;
3220 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
3221 	if (ret < 0) {
3222 		err = ret;
3223 		goto out;
3224 	}
3225 
3226 	if (ret > 0) {
3227 		/* there are no items in the tree for us to truncate, we're
3228 		 * done
3229 		 */
3230 		if (path->slots[0] == 0)
3231 			goto out;
3232 		path->slots[0]--;
3233 	}
3234 
3235 	while (1) {
3236 		fi = NULL;
3237 		leaf = path->nodes[0];
3238 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3239 		found_type = btrfs_key_type(&found_key);
3240 
3241 		if (found_key.objectid != ino)
3242 			break;
3243 
3244 		if (found_type < min_type)
3245 			break;
3246 
3247 		item_end = found_key.offset;
3248 		if (found_type == BTRFS_EXTENT_DATA_KEY) {
3249 			fi = btrfs_item_ptr(leaf, path->slots[0],
3250 					    struct btrfs_file_extent_item);
3251 			extent_type = btrfs_file_extent_type(leaf, fi);
3252 			if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
3253 				item_end +=
3254 				    btrfs_file_extent_num_bytes(leaf, fi);
3255 			} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
3256 				item_end += btrfs_file_extent_inline_len(leaf,
3257 									 fi);
3258 			}
3259 			item_end--;
3260 		}
3261 		if (found_type > min_type) {
3262 			del_item = 1;
3263 		} else {
3264 			if (item_end < new_size)
3265 				break;
3266 			if (found_key.offset >= new_size)
3267 				del_item = 1;
3268 			else
3269 				del_item = 0;
3270 		}
3271 		found_extent = 0;
3272 		/* FIXME, shrink the extent if the ref count is only 1 */
3273 		if (found_type != BTRFS_EXTENT_DATA_KEY)
3274 			goto delete;
3275 
3276 		if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
3277 			u64 num_dec;
3278 			extent_start = btrfs_file_extent_disk_bytenr(leaf, fi);
3279 			if (!del_item) {
3280 				u64 orig_num_bytes =
3281 					btrfs_file_extent_num_bytes(leaf, fi);
3282 				extent_num_bytes = new_size -
3283 					found_key.offset + root->sectorsize - 1;
3284 				extent_num_bytes = extent_num_bytes &
3285 					~((u64)root->sectorsize - 1);
3286 				btrfs_set_file_extent_num_bytes(leaf, fi,
3287 							 extent_num_bytes);
3288 				num_dec = (orig_num_bytes -
3289 					   extent_num_bytes);
3290 				if (root->ref_cows && extent_start != 0)
3291 					inode_sub_bytes(inode, num_dec);
3292 				btrfs_mark_buffer_dirty(leaf);
3293 			} else {
3294 				extent_num_bytes =
3295 					btrfs_file_extent_disk_num_bytes(leaf,
3296 									 fi);
3297 				extent_offset = found_key.offset -
3298 					btrfs_file_extent_offset(leaf, fi);
3299 
3300 				/* FIXME blocksize != 4096 */
3301 				num_dec = btrfs_file_extent_num_bytes(leaf, fi);
3302 				if (extent_start != 0) {
3303 					found_extent = 1;
3304 					if (root->ref_cows)
3305 						inode_sub_bytes(inode, num_dec);
3306 				}
3307 			}
3308 		} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
3309 			/*
3310 			 * we can't truncate inline items that have had
3311 			 * special encodings
3312 			 */
3313 			if (!del_item &&
3314 			    btrfs_file_extent_compression(leaf, fi) == 0 &&
3315 			    btrfs_file_extent_encryption(leaf, fi) == 0 &&
3316 			    btrfs_file_extent_other_encoding(leaf, fi) == 0) {
3317 				u32 size = new_size - found_key.offset;
3318 
3319 				if (root->ref_cows) {
3320 					inode_sub_bytes(inode, item_end + 1 -
3321 							new_size);
3322 				}
3323 				size =
3324 				    btrfs_file_extent_calc_inline_size(size);
3325 				btrfs_truncate_item(trans, root, path,
3326 						    size, 1);
3327 			} else if (root->ref_cows) {
3328 				inode_sub_bytes(inode, item_end + 1 -
3329 						found_key.offset);
3330 			}
3331 		}
3332 delete:
3333 		if (del_item) {
3334 			if (!pending_del_nr) {
3335 				/* no pending yet, add ourselves */
3336 				pending_del_slot = path->slots[0];
3337 				pending_del_nr = 1;
3338 			} else if (pending_del_nr &&
3339 				   path->slots[0] + 1 == pending_del_slot) {
3340 				/* hop on the pending chunk */
3341 				pending_del_nr++;
3342 				pending_del_slot = path->slots[0];
3343 			} else {
3344 				BUG();
3345 			}
3346 		} else {
3347 			break;
3348 		}
3349 		if (found_extent && (root->ref_cows ||
3350 				     root == root->fs_info->tree_root)) {
3351 			btrfs_set_path_blocking(path);
3352 			ret = btrfs_free_extent(trans, root, extent_start,
3353 						extent_num_bytes, 0,
3354 						btrfs_header_owner(leaf),
3355 						ino, extent_offset, 0);
3356 			BUG_ON(ret);
3357 		}
3358 
3359 		if (found_type == BTRFS_INODE_ITEM_KEY)
3360 			break;
3361 
3362 		if (path->slots[0] == 0 ||
3363 		    path->slots[0] != pending_del_slot) {
3364 			if (root->ref_cows &&
3365 			    BTRFS_I(inode)->location.objectid !=
3366 						BTRFS_FREE_INO_OBJECTID) {
3367 				err = -EAGAIN;
3368 				goto out;
3369 			}
3370 			if (pending_del_nr) {
3371 				ret = btrfs_del_items(trans, root, path,
3372 						pending_del_slot,
3373 						pending_del_nr);
3374 				if (ret) {
3375 					btrfs_abort_transaction(trans,
3376 								root, ret);
3377 					goto error;
3378 				}
3379 				pending_del_nr = 0;
3380 			}
3381 			btrfs_release_path(path);
3382 			goto search_again;
3383 		} else {
3384 			path->slots[0]--;
3385 		}
3386 	}
3387 out:
3388 	if (pending_del_nr) {
3389 		ret = btrfs_del_items(trans, root, path, pending_del_slot,
3390 				      pending_del_nr);
3391 		if (ret)
3392 			btrfs_abort_transaction(trans, root, ret);
3393 	}
3394 error:
3395 	btrfs_free_path(path);
3396 	return err;
3397 }
3398 
3399 /*
3400  * taken from block_truncate_page, but does cow as it zeros out
3401  * any bytes left in the last page in the file.
3402  */
btrfs_truncate_page(struct address_space * mapping,loff_t from)3403 static int btrfs_truncate_page(struct address_space *mapping, loff_t from)
3404 {
3405 	struct inode *inode = mapping->host;
3406 	struct btrfs_root *root = BTRFS_I(inode)->root;
3407 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3408 	struct btrfs_ordered_extent *ordered;
3409 	struct extent_state *cached_state = NULL;
3410 	char *kaddr;
3411 	u32 blocksize = root->sectorsize;
3412 	pgoff_t index = from >> PAGE_CACHE_SHIFT;
3413 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
3414 	struct page *page;
3415 	gfp_t mask = btrfs_alloc_write_mask(mapping);
3416 	int ret = 0;
3417 	u64 page_start;
3418 	u64 page_end;
3419 
3420 	if ((offset & (blocksize - 1)) == 0)
3421 		goto out;
3422 	ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
3423 	if (ret)
3424 		goto out;
3425 
3426 	ret = -ENOMEM;
3427 again:
3428 	page = find_or_create_page(mapping, index, mask);
3429 	if (!page) {
3430 		btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
3431 		goto out;
3432 	}
3433 
3434 	page_start = page_offset(page);
3435 	page_end = page_start + PAGE_CACHE_SIZE - 1;
3436 
3437 	if (!PageUptodate(page)) {
3438 		ret = btrfs_readpage(NULL, page);
3439 		lock_page(page);
3440 		if (page->mapping != mapping) {
3441 			unlock_page(page);
3442 			page_cache_release(page);
3443 			goto again;
3444 		}
3445 		if (!PageUptodate(page)) {
3446 			ret = -EIO;
3447 			goto out_unlock;
3448 		}
3449 	}
3450 	wait_on_page_writeback(page);
3451 
3452 	lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state);
3453 	set_page_extent_mapped(page);
3454 
3455 	ordered = btrfs_lookup_ordered_extent(inode, page_start);
3456 	if (ordered) {
3457 		unlock_extent_cached(io_tree, page_start, page_end,
3458 				     &cached_state, GFP_NOFS);
3459 		unlock_page(page);
3460 		page_cache_release(page);
3461 		btrfs_start_ordered_extent(inode, ordered, 1);
3462 		btrfs_put_ordered_extent(ordered);
3463 		goto again;
3464 	}
3465 
3466 	clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
3467 			  EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING,
3468 			  0, 0, &cached_state, GFP_NOFS);
3469 
3470 	ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
3471 					&cached_state);
3472 	if (ret) {
3473 		unlock_extent_cached(io_tree, page_start, page_end,
3474 				     &cached_state, GFP_NOFS);
3475 		goto out_unlock;
3476 	}
3477 
3478 	ret = 0;
3479 	if (offset != PAGE_CACHE_SIZE) {
3480 		kaddr = kmap(page);
3481 		memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
3482 		flush_dcache_page(page);
3483 		kunmap(page);
3484 	}
3485 	ClearPageChecked(page);
3486 	set_page_dirty(page);
3487 	unlock_extent_cached(io_tree, page_start, page_end, &cached_state,
3488 			     GFP_NOFS);
3489 
3490 out_unlock:
3491 	if (ret)
3492 		btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
3493 	unlock_page(page);
3494 	page_cache_release(page);
3495 out:
3496 	return ret;
3497 }
3498 
3499 /*
3500  * This function puts in dummy file extents for the area we're creating a hole
3501  * for.  So if we are truncating this file to a larger size we need to insert
3502  * these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for
3503  * the range between oldsize and size
3504  */
btrfs_cont_expand(struct inode * inode,loff_t oldsize,loff_t size)3505 int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
3506 {
3507 	struct btrfs_trans_handle *trans;
3508 	struct btrfs_root *root = BTRFS_I(inode)->root;
3509 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3510 	struct extent_map *em = NULL;
3511 	struct extent_state *cached_state = NULL;
3512 	u64 mask = root->sectorsize - 1;
3513 	u64 hole_start = (oldsize + mask) & ~mask;
3514 	u64 block_end = (size + mask) & ~mask;
3515 	u64 last_byte;
3516 	u64 cur_offset;
3517 	u64 hole_size;
3518 	int err = 0;
3519 
3520 	if (size <= hole_start)
3521 		return 0;
3522 
3523 	while (1) {
3524 		struct btrfs_ordered_extent *ordered;
3525 		btrfs_wait_ordered_range(inode, hole_start,
3526 					 block_end - hole_start);
3527 		lock_extent_bits(io_tree, hole_start, block_end - 1, 0,
3528 				 &cached_state);
3529 		ordered = btrfs_lookup_ordered_extent(inode, hole_start);
3530 		if (!ordered)
3531 			break;
3532 		unlock_extent_cached(io_tree, hole_start, block_end - 1,
3533 				     &cached_state, GFP_NOFS);
3534 		btrfs_put_ordered_extent(ordered);
3535 	}
3536 
3537 	cur_offset = hole_start;
3538 	while (1) {
3539 		em = btrfs_get_extent(inode, NULL, 0, cur_offset,
3540 				block_end - cur_offset, 0);
3541 		if (IS_ERR(em)) {
3542 			err = PTR_ERR(em);
3543 			break;
3544 		}
3545 		last_byte = min(extent_map_end(em), block_end);
3546 		last_byte = (last_byte + mask) & ~mask;
3547 		if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
3548 			u64 hint_byte = 0;
3549 			hole_size = last_byte - cur_offset;
3550 
3551 			trans = btrfs_start_transaction(root, 3);
3552 			if (IS_ERR(trans)) {
3553 				err = PTR_ERR(trans);
3554 				break;
3555 			}
3556 
3557 			err = btrfs_drop_extents(trans, inode, cur_offset,
3558 						 cur_offset + hole_size,
3559 						 &hint_byte, 1);
3560 			if (err) {
3561 				btrfs_abort_transaction(trans, root, err);
3562 				btrfs_end_transaction(trans, root);
3563 				break;
3564 			}
3565 
3566 			err = btrfs_insert_file_extent(trans, root,
3567 					btrfs_ino(inode), cur_offset, 0,
3568 					0, hole_size, 0, hole_size,
3569 					0, 0, 0);
3570 			if (err) {
3571 				btrfs_abort_transaction(trans, root, err);
3572 				btrfs_end_transaction(trans, root);
3573 				break;
3574 			}
3575 
3576 			btrfs_drop_extent_cache(inode, hole_start,
3577 					last_byte - 1, 0);
3578 
3579 			btrfs_update_inode(trans, root, inode);
3580 			btrfs_end_transaction(trans, root);
3581 		}
3582 		free_extent_map(em);
3583 		em = NULL;
3584 		cur_offset = last_byte;
3585 		if (cur_offset >= block_end)
3586 			break;
3587 	}
3588 
3589 	free_extent_map(em);
3590 	unlock_extent_cached(io_tree, hole_start, block_end - 1, &cached_state,
3591 			     GFP_NOFS);
3592 	return err;
3593 }
3594 
btrfs_setsize(struct inode * inode,loff_t newsize)3595 static int btrfs_setsize(struct inode *inode, loff_t newsize)
3596 {
3597 	struct btrfs_root *root = BTRFS_I(inode)->root;
3598 	struct btrfs_trans_handle *trans;
3599 	loff_t oldsize = i_size_read(inode);
3600 	int ret;
3601 
3602 	if (newsize == oldsize)
3603 		return 0;
3604 
3605 	if (newsize > oldsize) {
3606 		truncate_pagecache(inode, oldsize, newsize);
3607 		ret = btrfs_cont_expand(inode, oldsize, newsize);
3608 		if (ret)
3609 			return ret;
3610 
3611 		trans = btrfs_start_transaction(root, 1);
3612 		if (IS_ERR(trans))
3613 			return PTR_ERR(trans);
3614 
3615 		i_size_write(inode, newsize);
3616 		btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL);
3617 		ret = btrfs_update_inode(trans, root, inode);
3618 		btrfs_end_transaction(trans, root);
3619 	} else {
3620 
3621 		/*
3622 		 * We're truncating a file that used to have good data down to
3623 		 * zero. Make sure it gets into the ordered flush list so that
3624 		 * any new writes get down to disk quickly.
3625 		 */
3626 		if (newsize == 0)
3627 			BTRFS_I(inode)->ordered_data_close = 1;
3628 
3629 		/* we don't support swapfiles, so vmtruncate shouldn't fail */
3630 		truncate_setsize(inode, newsize);
3631 		ret = btrfs_truncate(inode);
3632 	}
3633 
3634 	return ret;
3635 }
3636 
btrfs_setattr(struct dentry * dentry,struct iattr * attr)3637 static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
3638 {
3639 	struct inode *inode = dentry->d_inode;
3640 	struct btrfs_root *root = BTRFS_I(inode)->root;
3641 	int err;
3642 
3643 	if (btrfs_root_readonly(root))
3644 		return -EROFS;
3645 
3646 	err = inode_change_ok(inode, attr);
3647 	if (err)
3648 		return err;
3649 
3650 	if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
3651 		err = btrfs_setsize(inode, attr->ia_size);
3652 		if (err)
3653 			return err;
3654 	}
3655 
3656 	if (attr->ia_valid) {
3657 		setattr_copy(inode, attr);
3658 		err = btrfs_dirty_inode(inode);
3659 
3660 		if (!err && attr->ia_valid & ATTR_MODE)
3661 			err = btrfs_acl_chmod(inode);
3662 	}
3663 
3664 	return err;
3665 }
3666 
btrfs_evict_inode(struct inode * inode)3667 void btrfs_evict_inode(struct inode *inode)
3668 {
3669 	struct btrfs_trans_handle *trans;
3670 	struct btrfs_root *root = BTRFS_I(inode)->root;
3671 	struct btrfs_block_rsv *rsv, *global_rsv;
3672 	u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
3673 	unsigned long nr;
3674 	int ret;
3675 
3676 	trace_btrfs_inode_evict(inode);
3677 
3678 	truncate_inode_pages(&inode->i_data, 0);
3679 	if (inode->i_nlink && (btrfs_root_refs(&root->root_item) != 0 ||
3680 			       btrfs_is_free_space_inode(root, inode)))
3681 		goto no_delete;
3682 
3683 	if (is_bad_inode(inode)) {
3684 		btrfs_orphan_del(NULL, inode);
3685 		goto no_delete;
3686 	}
3687 	/* do we really want it for ->i_nlink > 0 and zero btrfs_root_refs? */
3688 	btrfs_wait_ordered_range(inode, 0, (u64)-1);
3689 
3690 	if (root->fs_info->log_root_recovering) {
3691 		BUG_ON(!list_empty(&BTRFS_I(inode)->i_orphan));
3692 		goto no_delete;
3693 	}
3694 
3695 	if (inode->i_nlink > 0) {
3696 		BUG_ON(btrfs_root_refs(&root->root_item) != 0);
3697 		goto no_delete;
3698 	}
3699 
3700 	rsv = btrfs_alloc_block_rsv(root);
3701 	if (!rsv) {
3702 		btrfs_orphan_del(NULL, inode);
3703 		goto no_delete;
3704 	}
3705 	rsv->size = min_size;
3706 	global_rsv = &root->fs_info->global_block_rsv;
3707 
3708 	btrfs_i_size_write(inode, 0);
3709 
3710 	/*
3711 	 * This is a bit simpler than btrfs_truncate since
3712 	 *
3713 	 * 1) We've already reserved our space for our orphan item in the
3714 	 *    unlink.
3715 	 * 2) We're going to delete the inode item, so we don't need to update
3716 	 *    it at all.
3717 	 *
3718 	 * So we just need to reserve some slack space in case we add bytes when
3719 	 * doing the truncate.
3720 	 */
3721 	while (1) {
3722 		ret = btrfs_block_rsv_refill_noflush(root, rsv, min_size);
3723 
3724 		/*
3725 		 * Try and steal from the global reserve since we will
3726 		 * likely not use this space anyway, we want to try as
3727 		 * hard as possible to get this to work.
3728 		 */
3729 		if (ret)
3730 			ret = btrfs_block_rsv_migrate(global_rsv, rsv, min_size);
3731 
3732 		if (ret) {
3733 			printk(KERN_WARNING "Could not get space for a "
3734 			       "delete, will truncate on mount %d\n", ret);
3735 			btrfs_orphan_del(NULL, inode);
3736 			btrfs_free_block_rsv(root, rsv);
3737 			goto no_delete;
3738 		}
3739 
3740 		trans = btrfs_start_transaction(root, 0);
3741 		if (IS_ERR(trans)) {
3742 			btrfs_orphan_del(NULL, inode);
3743 			btrfs_free_block_rsv(root, rsv);
3744 			goto no_delete;
3745 		}
3746 
3747 		trans->block_rsv = rsv;
3748 
3749 		ret = btrfs_truncate_inode_items(trans, root, inode, 0, 0);
3750 		if (ret != -EAGAIN)
3751 			break;
3752 
3753 		nr = trans->blocks_used;
3754 		btrfs_end_transaction(trans, root);
3755 		trans = NULL;
3756 		btrfs_btree_balance_dirty(root, nr);
3757 	}
3758 
3759 	btrfs_free_block_rsv(root, rsv);
3760 
3761 	if (ret == 0) {
3762 		trans->block_rsv = root->orphan_block_rsv;
3763 		ret = btrfs_orphan_del(trans, inode);
3764 		BUG_ON(ret);
3765 	}
3766 
3767 	trans->block_rsv = &root->fs_info->trans_block_rsv;
3768 	if (!(root == root->fs_info->tree_root ||
3769 	      root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID))
3770 		btrfs_return_ino(root, btrfs_ino(inode));
3771 
3772 	nr = trans->blocks_used;
3773 	btrfs_end_transaction(trans, root);
3774 	btrfs_btree_balance_dirty(root, nr);
3775 no_delete:
3776 	end_writeback(inode);
3777 	return;
3778 }
3779 
3780 /*
3781  * this returns the key found in the dir entry in the location pointer.
3782  * If no dir entries were found, location->objectid is 0.
3783  */
btrfs_inode_by_name(struct inode * dir,struct dentry * dentry,struct btrfs_key * location)3784 static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
3785 			       struct btrfs_key *location)
3786 {
3787 	const char *name = dentry->d_name.name;
3788 	int namelen = dentry->d_name.len;
3789 	struct btrfs_dir_item *di;
3790 	struct btrfs_path *path;
3791 	struct btrfs_root *root = BTRFS_I(dir)->root;
3792 	int ret = 0;
3793 
3794 	path = btrfs_alloc_path();
3795 	if (!path)
3796 		return -ENOMEM;
3797 
3798 	di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(dir), name,
3799 				    namelen, 0);
3800 	if (IS_ERR(di))
3801 		ret = PTR_ERR(di);
3802 
3803 	if (IS_ERR_OR_NULL(di))
3804 		goto out_err;
3805 
3806 	btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
3807 out:
3808 	btrfs_free_path(path);
3809 	return ret;
3810 out_err:
3811 	location->objectid = 0;
3812 	goto out;
3813 }
3814 
3815 /*
3816  * when we hit a tree root in a directory, the btrfs part of the inode
3817  * needs to be changed to reflect the root directory of the tree root.  This
3818  * is kind of like crossing a mount point.
3819  */
fixup_tree_root_location(struct btrfs_root * root,struct inode * dir,struct dentry * dentry,struct btrfs_key * location,struct btrfs_root ** sub_root)3820 static int fixup_tree_root_location(struct btrfs_root *root,
3821 				    struct inode *dir,
3822 				    struct dentry *dentry,
3823 				    struct btrfs_key *location,
3824 				    struct btrfs_root **sub_root)
3825 {
3826 	struct btrfs_path *path;
3827 	struct btrfs_root *new_root;
3828 	struct btrfs_root_ref *ref;
3829 	struct extent_buffer *leaf;
3830 	int ret;
3831 	int err = 0;
3832 
3833 	path = btrfs_alloc_path();
3834 	if (!path) {
3835 		err = -ENOMEM;
3836 		goto out;
3837 	}
3838 
3839 	err = -ENOENT;
3840 	ret = btrfs_find_root_ref(root->fs_info->tree_root, path,
3841 				  BTRFS_I(dir)->root->root_key.objectid,
3842 				  location->objectid);
3843 	if (ret) {
3844 		if (ret < 0)
3845 			err = ret;
3846 		goto out;
3847 	}
3848 
3849 	leaf = path->nodes[0];
3850 	ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
3851 	if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(dir) ||
3852 	    btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len)
3853 		goto out;
3854 
3855 	ret = memcmp_extent_buffer(leaf, dentry->d_name.name,
3856 				   (unsigned long)(ref + 1),
3857 				   dentry->d_name.len);
3858 	if (ret)
3859 		goto out;
3860 
3861 	btrfs_release_path(path);
3862 
3863 	new_root = btrfs_read_fs_root_no_name(root->fs_info, location);
3864 	if (IS_ERR(new_root)) {
3865 		err = PTR_ERR(new_root);
3866 		goto out;
3867 	}
3868 
3869 	if (btrfs_root_refs(&new_root->root_item) == 0) {
3870 		err = -ENOENT;
3871 		goto out;
3872 	}
3873 
3874 	*sub_root = new_root;
3875 	location->objectid = btrfs_root_dirid(&new_root->root_item);
3876 	location->type = BTRFS_INODE_ITEM_KEY;
3877 	location->offset = 0;
3878 	err = 0;
3879 out:
3880 	btrfs_free_path(path);
3881 	return err;
3882 }
3883 
inode_tree_add(struct inode * inode)3884 static void inode_tree_add(struct inode *inode)
3885 {
3886 	struct btrfs_root *root = BTRFS_I(inode)->root;
3887 	struct btrfs_inode *entry;
3888 	struct rb_node **p;
3889 	struct rb_node *parent;
3890 	u64 ino = btrfs_ino(inode);
3891 again:
3892 	p = &root->inode_tree.rb_node;
3893 	parent = NULL;
3894 
3895 	if (inode_unhashed(inode))
3896 		return;
3897 
3898 	spin_lock(&root->inode_lock);
3899 	while (*p) {
3900 		parent = *p;
3901 		entry = rb_entry(parent, struct btrfs_inode, rb_node);
3902 
3903 		if (ino < btrfs_ino(&entry->vfs_inode))
3904 			p = &parent->rb_left;
3905 		else if (ino > btrfs_ino(&entry->vfs_inode))
3906 			p = &parent->rb_right;
3907 		else {
3908 			WARN_ON(!(entry->vfs_inode.i_state &
3909 				  (I_WILL_FREE | I_FREEING)));
3910 			rb_erase(parent, &root->inode_tree);
3911 			RB_CLEAR_NODE(parent);
3912 			spin_unlock(&root->inode_lock);
3913 			goto again;
3914 		}
3915 	}
3916 	rb_link_node(&BTRFS_I(inode)->rb_node, parent, p);
3917 	rb_insert_color(&BTRFS_I(inode)->rb_node, &root->inode_tree);
3918 	spin_unlock(&root->inode_lock);
3919 }
3920 
inode_tree_del(struct inode * inode)3921 static void inode_tree_del(struct inode *inode)
3922 {
3923 	struct btrfs_root *root = BTRFS_I(inode)->root;
3924 	int empty = 0;
3925 
3926 	spin_lock(&root->inode_lock);
3927 	if (!RB_EMPTY_NODE(&BTRFS_I(inode)->rb_node)) {
3928 		rb_erase(&BTRFS_I(inode)->rb_node, &root->inode_tree);
3929 		RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
3930 		empty = RB_EMPTY_ROOT(&root->inode_tree);
3931 	}
3932 	spin_unlock(&root->inode_lock);
3933 
3934 	/*
3935 	 * Free space cache has inodes in the tree root, but the tree root has a
3936 	 * root_refs of 0, so this could end up dropping the tree root as a
3937 	 * snapshot, so we need the extra !root->fs_info->tree_root check to
3938 	 * make sure we don't drop it.
3939 	 */
3940 	if (empty && btrfs_root_refs(&root->root_item) == 0 &&
3941 	    root != root->fs_info->tree_root) {
3942 		synchronize_srcu(&root->fs_info->subvol_srcu);
3943 		spin_lock(&root->inode_lock);
3944 		empty = RB_EMPTY_ROOT(&root->inode_tree);
3945 		spin_unlock(&root->inode_lock);
3946 		if (empty)
3947 			btrfs_add_dead_root(root);
3948 	}
3949 }
3950 
btrfs_invalidate_inodes(struct btrfs_root * root)3951 void btrfs_invalidate_inodes(struct btrfs_root *root)
3952 {
3953 	struct rb_node *node;
3954 	struct rb_node *prev;
3955 	struct btrfs_inode *entry;
3956 	struct inode *inode;
3957 	u64 objectid = 0;
3958 
3959 	WARN_ON(btrfs_root_refs(&root->root_item) != 0);
3960 
3961 	spin_lock(&root->inode_lock);
3962 again:
3963 	node = root->inode_tree.rb_node;
3964 	prev = NULL;
3965 	while (node) {
3966 		prev = node;
3967 		entry = rb_entry(node, struct btrfs_inode, rb_node);
3968 
3969 		if (objectid < btrfs_ino(&entry->vfs_inode))
3970 			node = node->rb_left;
3971 		else if (objectid > btrfs_ino(&entry->vfs_inode))
3972 			node = node->rb_right;
3973 		else
3974 			break;
3975 	}
3976 	if (!node) {
3977 		while (prev) {
3978 			entry = rb_entry(prev, struct btrfs_inode, rb_node);
3979 			if (objectid <= btrfs_ino(&entry->vfs_inode)) {
3980 				node = prev;
3981 				break;
3982 			}
3983 			prev = rb_next(prev);
3984 		}
3985 	}
3986 	while (node) {
3987 		entry = rb_entry(node, struct btrfs_inode, rb_node);
3988 		objectid = btrfs_ino(&entry->vfs_inode) + 1;
3989 		inode = igrab(&entry->vfs_inode);
3990 		if (inode) {
3991 			spin_unlock(&root->inode_lock);
3992 			if (atomic_read(&inode->i_count) > 1)
3993 				d_prune_aliases(inode);
3994 			/*
3995 			 * btrfs_drop_inode will have it removed from
3996 			 * the inode cache when its usage count
3997 			 * hits zero.
3998 			 */
3999 			iput(inode);
4000 			cond_resched();
4001 			spin_lock(&root->inode_lock);
4002 			goto again;
4003 		}
4004 
4005 		if (cond_resched_lock(&root->inode_lock))
4006 			goto again;
4007 
4008 		node = rb_next(node);
4009 	}
4010 	spin_unlock(&root->inode_lock);
4011 }
4012 
btrfs_init_locked_inode(struct inode * inode,void * p)4013 static int btrfs_init_locked_inode(struct inode *inode, void *p)
4014 {
4015 	struct btrfs_iget_args *args = p;
4016 	inode->i_ino = args->ino;
4017 	BTRFS_I(inode)->root = args->root;
4018 	btrfs_set_inode_space_info(args->root, inode);
4019 	return 0;
4020 }
4021 
btrfs_find_actor(struct inode * inode,void * opaque)4022 static int btrfs_find_actor(struct inode *inode, void *opaque)
4023 {
4024 	struct btrfs_iget_args *args = opaque;
4025 	return args->ino == btrfs_ino(inode) &&
4026 		args->root == BTRFS_I(inode)->root;
4027 }
4028 
btrfs_iget_locked(struct super_block * s,u64 objectid,struct btrfs_root * root)4029 static struct inode *btrfs_iget_locked(struct super_block *s,
4030 				       u64 objectid,
4031 				       struct btrfs_root *root)
4032 {
4033 	struct inode *inode;
4034 	struct btrfs_iget_args args;
4035 	args.ino = objectid;
4036 	args.root = root;
4037 
4038 	inode = iget5_locked(s, objectid, btrfs_find_actor,
4039 			     btrfs_init_locked_inode,
4040 			     (void *)&args);
4041 	return inode;
4042 }
4043 
4044 /* Get an inode object given its location and corresponding root.
4045  * Returns in *is_new if the inode was read from disk
4046  */
btrfs_iget(struct super_block * s,struct btrfs_key * location,struct btrfs_root * root,int * new)4047 struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
4048 			 struct btrfs_root *root, int *new)
4049 {
4050 	struct inode *inode;
4051 
4052 	inode = btrfs_iget_locked(s, location->objectid, root);
4053 	if (!inode)
4054 		return ERR_PTR(-ENOMEM);
4055 
4056 	if (inode->i_state & I_NEW) {
4057 		BTRFS_I(inode)->root = root;
4058 		memcpy(&BTRFS_I(inode)->location, location, sizeof(*location));
4059 		btrfs_read_locked_inode(inode);
4060 		if (!is_bad_inode(inode)) {
4061 			inode_tree_add(inode);
4062 			unlock_new_inode(inode);
4063 			if (new)
4064 				*new = 1;
4065 		} else {
4066 			unlock_new_inode(inode);
4067 			iput(inode);
4068 			inode = ERR_PTR(-ESTALE);
4069 		}
4070 	}
4071 
4072 	return inode;
4073 }
4074 
new_simple_dir(struct super_block * s,struct btrfs_key * key,struct btrfs_root * root)4075 static struct inode *new_simple_dir(struct super_block *s,
4076 				    struct btrfs_key *key,
4077 				    struct btrfs_root *root)
4078 {
4079 	struct inode *inode = new_inode(s);
4080 
4081 	if (!inode)
4082 		return ERR_PTR(-ENOMEM);
4083 
4084 	BTRFS_I(inode)->root = root;
4085 	memcpy(&BTRFS_I(inode)->location, key, sizeof(*key));
4086 	BTRFS_I(inode)->dummy_inode = 1;
4087 
4088 	inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID;
4089 	inode->i_op = &btrfs_dir_ro_inode_operations;
4090 	inode->i_fop = &simple_dir_operations;
4091 	inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
4092 	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
4093 
4094 	return inode;
4095 }
4096 
btrfs_lookup_dentry(struct inode * dir,struct dentry * dentry)4097 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
4098 {
4099 	struct inode *inode;
4100 	struct btrfs_root *root = BTRFS_I(dir)->root;
4101 	struct btrfs_root *sub_root = root;
4102 	struct btrfs_key location;
4103 	int index;
4104 	int ret = 0;
4105 
4106 	if (dentry->d_name.len > BTRFS_NAME_LEN)
4107 		return ERR_PTR(-ENAMETOOLONG);
4108 
4109 	if (unlikely(d_need_lookup(dentry))) {
4110 		memcpy(&location, dentry->d_fsdata, sizeof(struct btrfs_key));
4111 		kfree(dentry->d_fsdata);
4112 		dentry->d_fsdata = NULL;
4113 		/* This thing is hashed, drop it for now */
4114 		d_drop(dentry);
4115 	} else {
4116 		ret = btrfs_inode_by_name(dir, dentry, &location);
4117 	}
4118 
4119 	if (ret < 0)
4120 		return ERR_PTR(ret);
4121 
4122 	if (location.objectid == 0)
4123 		return NULL;
4124 
4125 	if (location.type == BTRFS_INODE_ITEM_KEY) {
4126 		inode = btrfs_iget(dir->i_sb, &location, root, NULL);
4127 		return inode;
4128 	}
4129 
4130 	BUG_ON(location.type != BTRFS_ROOT_ITEM_KEY);
4131 
4132 	index = srcu_read_lock(&root->fs_info->subvol_srcu);
4133 	ret = fixup_tree_root_location(root, dir, dentry,
4134 				       &location, &sub_root);
4135 	if (ret < 0) {
4136 		if (ret != -ENOENT)
4137 			inode = ERR_PTR(ret);
4138 		else
4139 			inode = new_simple_dir(dir->i_sb, &location, sub_root);
4140 	} else {
4141 		inode = btrfs_iget(dir->i_sb, &location, sub_root, NULL);
4142 	}
4143 	srcu_read_unlock(&root->fs_info->subvol_srcu, index);
4144 
4145 	if (!IS_ERR(inode) && root != sub_root) {
4146 		down_read(&root->fs_info->cleanup_work_sem);
4147 		if (!(inode->i_sb->s_flags & MS_RDONLY))
4148 			ret = btrfs_orphan_cleanup(sub_root);
4149 		up_read(&root->fs_info->cleanup_work_sem);
4150 		if (ret)
4151 			inode = ERR_PTR(ret);
4152 	}
4153 
4154 	return inode;
4155 }
4156 
btrfs_dentry_delete(const struct dentry * dentry)4157 static int btrfs_dentry_delete(const struct dentry *dentry)
4158 {
4159 	struct btrfs_root *root;
4160 	struct inode *inode = dentry->d_inode;
4161 
4162 	if (!inode && !IS_ROOT(dentry))
4163 		inode = dentry->d_parent->d_inode;
4164 
4165 	if (inode) {
4166 		root = BTRFS_I(inode)->root;
4167 		if (btrfs_root_refs(&root->root_item) == 0)
4168 			return 1;
4169 
4170 		if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
4171 			return 1;
4172 	}
4173 	return 0;
4174 }
4175 
btrfs_dentry_release(struct dentry * dentry)4176 static void btrfs_dentry_release(struct dentry *dentry)
4177 {
4178 	if (dentry->d_fsdata)
4179 		kfree(dentry->d_fsdata);
4180 }
4181 
btrfs_lookup(struct inode * dir,struct dentry * dentry,struct nameidata * nd)4182 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
4183 				   struct nameidata *nd)
4184 {
4185 	struct dentry *ret;
4186 
4187 	ret = d_splice_alias(btrfs_lookup_dentry(dir, dentry), dentry);
4188 	if (unlikely(d_need_lookup(dentry))) {
4189 		spin_lock(&dentry->d_lock);
4190 		dentry->d_flags &= ~DCACHE_NEED_LOOKUP;
4191 		spin_unlock(&dentry->d_lock);
4192 	}
4193 	return ret;
4194 }
4195 
4196 unsigned char btrfs_filetype_table[] = {
4197 	DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
4198 };
4199 
btrfs_real_readdir(struct file * filp,void * dirent,filldir_t filldir)4200 static int btrfs_real_readdir(struct file *filp, void *dirent,
4201 			      filldir_t filldir)
4202 {
4203 	struct inode *inode = filp->f_dentry->d_inode;
4204 	struct btrfs_root *root = BTRFS_I(inode)->root;
4205 	struct btrfs_item *item;
4206 	struct btrfs_dir_item *di;
4207 	struct btrfs_key key;
4208 	struct btrfs_key found_key;
4209 	struct btrfs_path *path;
4210 	struct list_head ins_list;
4211 	struct list_head del_list;
4212 	int ret;
4213 	struct extent_buffer *leaf;
4214 	int slot;
4215 	unsigned char d_type;
4216 	int over = 0;
4217 	u32 di_cur;
4218 	u32 di_total;
4219 	u32 di_len;
4220 	int key_type = BTRFS_DIR_INDEX_KEY;
4221 	char tmp_name[32];
4222 	char *name_ptr;
4223 	int name_len;
4224 	int is_curr = 0;	/* filp->f_pos points to the current index? */
4225 
4226 	/* FIXME, use a real flag for deciding about the key type */
4227 	if (root->fs_info->tree_root == root)
4228 		key_type = BTRFS_DIR_ITEM_KEY;
4229 
4230 	/* special case for "." */
4231 	if (filp->f_pos == 0) {
4232 		over = filldir(dirent, ".", 1,
4233 			       filp->f_pos, btrfs_ino(inode), DT_DIR);
4234 		if (over)
4235 			return 0;
4236 		filp->f_pos = 1;
4237 	}
4238 	/* special case for .., just use the back ref */
4239 	if (filp->f_pos == 1) {
4240 		u64 pino = parent_ino(filp->f_path.dentry);
4241 		over = filldir(dirent, "..", 2,
4242 			       filp->f_pos, pino, DT_DIR);
4243 		if (over)
4244 			return 0;
4245 		filp->f_pos = 2;
4246 	}
4247 	path = btrfs_alloc_path();
4248 	if (!path)
4249 		return -ENOMEM;
4250 
4251 	path->reada = 1;
4252 
4253 	if (key_type == BTRFS_DIR_INDEX_KEY) {
4254 		INIT_LIST_HEAD(&ins_list);
4255 		INIT_LIST_HEAD(&del_list);
4256 		btrfs_get_delayed_items(inode, &ins_list, &del_list);
4257 	}
4258 
4259 	btrfs_set_key_type(&key, key_type);
4260 	key.offset = filp->f_pos;
4261 	key.objectid = btrfs_ino(inode);
4262 
4263 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4264 	if (ret < 0)
4265 		goto err;
4266 
4267 	while (1) {
4268 		leaf = path->nodes[0];
4269 		slot = path->slots[0];
4270 		if (slot >= btrfs_header_nritems(leaf)) {
4271 			ret = btrfs_next_leaf(root, path);
4272 			if (ret < 0)
4273 				goto err;
4274 			else if (ret > 0)
4275 				break;
4276 			continue;
4277 		}
4278 
4279 		item = btrfs_item_nr(leaf, slot);
4280 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
4281 
4282 		if (found_key.objectid != key.objectid)
4283 			break;
4284 		if (btrfs_key_type(&found_key) != key_type)
4285 			break;
4286 		if (found_key.offset < filp->f_pos)
4287 			goto next;
4288 		if (key_type == BTRFS_DIR_INDEX_KEY &&
4289 		    btrfs_should_delete_dir_index(&del_list,
4290 						  found_key.offset))
4291 			goto next;
4292 
4293 		filp->f_pos = found_key.offset;
4294 		is_curr = 1;
4295 
4296 		di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
4297 		di_cur = 0;
4298 		di_total = btrfs_item_size(leaf, item);
4299 
4300 		while (di_cur < di_total) {
4301 			struct btrfs_key location;
4302 
4303 			if (verify_dir_item(root, leaf, di))
4304 				break;
4305 
4306 			name_len = btrfs_dir_name_len(leaf, di);
4307 			if (name_len <= sizeof(tmp_name)) {
4308 				name_ptr = tmp_name;
4309 			} else {
4310 				name_ptr = kmalloc(name_len, GFP_NOFS);
4311 				if (!name_ptr) {
4312 					ret = -ENOMEM;
4313 					goto err;
4314 				}
4315 			}
4316 			read_extent_buffer(leaf, name_ptr,
4317 					   (unsigned long)(di + 1), name_len);
4318 
4319 			d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
4320 			btrfs_dir_item_key_to_cpu(leaf, di, &location);
4321 
4322 
4323 			/* is this a reference to our own snapshot? If so
4324 			 * skip it.
4325 			 *
4326 			 * In contrast to old kernels, we insert the snapshot's
4327 			 * dir item and dir index after it has been created, so
4328 			 * we won't find a reference to our own snapshot. We
4329 			 * still keep the following code for backward
4330 			 * compatibility.
4331 			 */
4332 			if (location.type == BTRFS_ROOT_ITEM_KEY &&
4333 			    location.objectid == root->root_key.objectid) {
4334 				over = 0;
4335 				goto skip;
4336 			}
4337 			over = filldir(dirent, name_ptr, name_len,
4338 				       found_key.offset, location.objectid,
4339 				       d_type);
4340 
4341 skip:
4342 			if (name_ptr != tmp_name)
4343 				kfree(name_ptr);
4344 
4345 			if (over)
4346 				goto nopos;
4347 			di_len = btrfs_dir_name_len(leaf, di) +
4348 				 btrfs_dir_data_len(leaf, di) + sizeof(*di);
4349 			di_cur += di_len;
4350 			di = (struct btrfs_dir_item *)((char *)di + di_len);
4351 		}
4352 next:
4353 		path->slots[0]++;
4354 	}
4355 
4356 	if (key_type == BTRFS_DIR_INDEX_KEY) {
4357 		if (is_curr)
4358 			filp->f_pos++;
4359 		ret = btrfs_readdir_delayed_dir_index(filp, dirent, filldir,
4360 						      &ins_list);
4361 		if (ret)
4362 			goto nopos;
4363 	}
4364 
4365 	/* Reached end of directory/root. Bump pos past the last item. */
4366 	if (key_type == BTRFS_DIR_INDEX_KEY)
4367 		/*
4368 		 * 32-bit glibc will use getdents64, but then strtol -
4369 		 * so the last number we can serve is this.
4370 		 */
4371 		filp->f_pos = 0x7fffffff;
4372 	else
4373 		filp->f_pos++;
4374 nopos:
4375 	ret = 0;
4376 err:
4377 	if (key_type == BTRFS_DIR_INDEX_KEY)
4378 		btrfs_put_delayed_items(&ins_list, &del_list);
4379 	btrfs_free_path(path);
4380 	return ret;
4381 }
4382 
btrfs_write_inode(struct inode * inode,struct writeback_control * wbc)4383 int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
4384 {
4385 	struct btrfs_root *root = BTRFS_I(inode)->root;
4386 	struct btrfs_trans_handle *trans;
4387 	int ret = 0;
4388 	bool nolock = false;
4389 
4390 	if (BTRFS_I(inode)->dummy_inode)
4391 		return 0;
4392 
4393 	if (btrfs_fs_closing(root->fs_info) && btrfs_is_free_space_inode(root, inode))
4394 		nolock = true;
4395 
4396 	if (wbc->sync_mode == WB_SYNC_ALL) {
4397 		if (nolock)
4398 			trans = btrfs_join_transaction_nolock(root);
4399 		else
4400 			trans = btrfs_join_transaction(root);
4401 		if (IS_ERR(trans))
4402 			return PTR_ERR(trans);
4403 		if (nolock)
4404 			ret = btrfs_end_transaction_nolock(trans, root);
4405 		else
4406 			ret = btrfs_commit_transaction(trans, root);
4407 	}
4408 	return ret;
4409 }
4410 
4411 /*
4412  * This is somewhat expensive, updating the tree every time the
4413  * inode changes.  But, it is most likely to find the inode in cache.
4414  * FIXME, needs more benchmarking...there are no reasons other than performance
4415  * to keep or drop this code.
4416  */
btrfs_dirty_inode(struct inode * inode)4417 int btrfs_dirty_inode(struct inode *inode)
4418 {
4419 	struct btrfs_root *root = BTRFS_I(inode)->root;
4420 	struct btrfs_trans_handle *trans;
4421 	int ret;
4422 
4423 	if (BTRFS_I(inode)->dummy_inode)
4424 		return 0;
4425 
4426 	trans = btrfs_join_transaction(root);
4427 	if (IS_ERR(trans))
4428 		return PTR_ERR(trans);
4429 
4430 	ret = btrfs_update_inode(trans, root, inode);
4431 	if (ret && ret == -ENOSPC) {
4432 		/* whoops, lets try again with the full transaction */
4433 		btrfs_end_transaction(trans, root);
4434 		trans = btrfs_start_transaction(root, 1);
4435 		if (IS_ERR(trans))
4436 			return PTR_ERR(trans);
4437 
4438 		ret = btrfs_update_inode(trans, root, inode);
4439 	}
4440 	btrfs_end_transaction(trans, root);
4441 	if (BTRFS_I(inode)->delayed_node)
4442 		btrfs_balance_delayed_items(root);
4443 
4444 	return ret;
4445 }
4446 
4447 /*
4448  * This is a copy of file_update_time.  We need this so we can return error on
4449  * ENOSPC for updating the inode in the case of file write and mmap writes.
4450  */
btrfs_update_time(struct file * file)4451 int btrfs_update_time(struct file *file)
4452 {
4453 	struct inode *inode = file->f_path.dentry->d_inode;
4454 	struct timespec now;
4455 	int ret;
4456 	enum { S_MTIME = 1, S_CTIME = 2, S_VERSION = 4 } sync_it = 0;
4457 
4458 	/* First try to exhaust all avenues to not sync */
4459 	if (IS_NOCMTIME(inode))
4460 		return 0;
4461 
4462 	now = current_fs_time(inode->i_sb);
4463 	if (!timespec_equal(&inode->i_mtime, &now))
4464 		sync_it = S_MTIME;
4465 
4466 	if (!timespec_equal(&inode->i_ctime, &now))
4467 		sync_it |= S_CTIME;
4468 
4469 	if (IS_I_VERSION(inode))
4470 		sync_it |= S_VERSION;
4471 
4472 	if (!sync_it)
4473 		return 0;
4474 
4475 	/* Finally allowed to write? Takes lock. */
4476 	if (mnt_want_write_file(file))
4477 		return 0;
4478 
4479 	/* Only change inode inside the lock region */
4480 	if (sync_it & S_VERSION)
4481 		inode_inc_iversion(inode);
4482 	if (sync_it & S_CTIME)
4483 		inode->i_ctime = now;
4484 	if (sync_it & S_MTIME)
4485 		inode->i_mtime = now;
4486 	ret = btrfs_dirty_inode(inode);
4487 	if (!ret)
4488 		mark_inode_dirty_sync(inode);
4489 	mnt_drop_write(file->f_path.mnt);
4490 	return ret;
4491 }
4492 
4493 /*
4494  * find the highest existing sequence number in a directory
4495  * and then set the in-memory index_cnt variable to reflect
4496  * free sequence numbers
4497  */
btrfs_set_inode_index_count(struct inode * inode)4498 static int btrfs_set_inode_index_count(struct inode *inode)
4499 {
4500 	struct btrfs_root *root = BTRFS_I(inode)->root;
4501 	struct btrfs_key key, found_key;
4502 	struct btrfs_path *path;
4503 	struct extent_buffer *leaf;
4504 	int ret;
4505 
4506 	key.objectid = btrfs_ino(inode);
4507 	btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY);
4508 	key.offset = (u64)-1;
4509 
4510 	path = btrfs_alloc_path();
4511 	if (!path)
4512 		return -ENOMEM;
4513 
4514 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4515 	if (ret < 0)
4516 		goto out;
4517 	/* FIXME: we should be able to handle this */
4518 	if (ret == 0)
4519 		goto out;
4520 	ret = 0;
4521 
4522 	/*
4523 	 * MAGIC NUMBER EXPLANATION:
4524 	 * since we search a directory based on f_pos we have to start at 2
4525 	 * since '.' and '..' have f_pos of 0 and 1 respectively, so everybody
4526 	 * else has to start at 2
4527 	 */
4528 	if (path->slots[0] == 0) {
4529 		BTRFS_I(inode)->index_cnt = 2;
4530 		goto out;
4531 	}
4532 
4533 	path->slots[0]--;
4534 
4535 	leaf = path->nodes[0];
4536 	btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4537 
4538 	if (found_key.objectid != btrfs_ino(inode) ||
4539 	    btrfs_key_type(&found_key) != BTRFS_DIR_INDEX_KEY) {
4540 		BTRFS_I(inode)->index_cnt = 2;
4541 		goto out;
4542 	}
4543 
4544 	BTRFS_I(inode)->index_cnt = found_key.offset + 1;
4545 out:
4546 	btrfs_free_path(path);
4547 	return ret;
4548 }
4549 
4550 /*
4551  * helper to find a free sequence number in a given directory.  This current
4552  * code is very simple, later versions will do smarter things in the btree
4553  */
btrfs_set_inode_index(struct inode * dir,u64 * index)4554 int btrfs_set_inode_index(struct inode *dir, u64 *index)
4555 {
4556 	int ret = 0;
4557 
4558 	if (BTRFS_I(dir)->index_cnt == (u64)-1) {
4559 		ret = btrfs_inode_delayed_dir_index_count(dir);
4560 		if (ret) {
4561 			ret = btrfs_set_inode_index_count(dir);
4562 			if (ret)
4563 				return ret;
4564 		}
4565 	}
4566 
4567 	*index = BTRFS_I(dir)->index_cnt;
4568 	BTRFS_I(dir)->index_cnt++;
4569 
4570 	return ret;
4571 }
4572 
btrfs_new_inode(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct inode * dir,const char * name,int name_len,u64 ref_objectid,u64 objectid,umode_t mode,u64 * index)4573 static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
4574 				     struct btrfs_root *root,
4575 				     struct inode *dir,
4576 				     const char *name, int name_len,
4577 				     u64 ref_objectid, u64 objectid,
4578 				     umode_t mode, u64 *index)
4579 {
4580 	struct inode *inode;
4581 	struct btrfs_inode_item *inode_item;
4582 	struct btrfs_key *location;
4583 	struct btrfs_path *path;
4584 	struct btrfs_inode_ref *ref;
4585 	struct btrfs_key key[2];
4586 	u32 sizes[2];
4587 	unsigned long ptr;
4588 	int ret;
4589 	int owner;
4590 
4591 	path = btrfs_alloc_path();
4592 	if (!path)
4593 		return ERR_PTR(-ENOMEM);
4594 
4595 	inode = new_inode(root->fs_info->sb);
4596 	if (!inode) {
4597 		btrfs_free_path(path);
4598 		return ERR_PTR(-ENOMEM);
4599 	}
4600 
4601 	/*
4602 	 * we have to initialize this early, so we can reclaim the inode
4603 	 * number if we fail afterwards in this function.
4604 	 */
4605 	inode->i_ino = objectid;
4606 
4607 	if (dir) {
4608 		trace_btrfs_inode_request(dir);
4609 
4610 		ret = btrfs_set_inode_index(dir, index);
4611 		if (ret) {
4612 			btrfs_free_path(path);
4613 			iput(inode);
4614 			return ERR_PTR(ret);
4615 		}
4616 	}
4617 	/*
4618 	 * index_cnt is ignored for everything but a dir,
4619 	 * btrfs_get_inode_index_count has an explanation for the magic
4620 	 * number
4621 	 */
4622 	BTRFS_I(inode)->index_cnt = 2;
4623 	BTRFS_I(inode)->root = root;
4624 	BTRFS_I(inode)->generation = trans->transid;
4625 	inode->i_generation = BTRFS_I(inode)->generation;
4626 	btrfs_set_inode_space_info(root, inode);
4627 
4628 	if (S_ISDIR(mode))
4629 		owner = 0;
4630 	else
4631 		owner = 1;
4632 
4633 	key[0].objectid = objectid;
4634 	btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY);
4635 	key[0].offset = 0;
4636 
4637 	key[1].objectid = objectid;
4638 	btrfs_set_key_type(&key[1], BTRFS_INODE_REF_KEY);
4639 	key[1].offset = ref_objectid;
4640 
4641 	sizes[0] = sizeof(struct btrfs_inode_item);
4642 	sizes[1] = name_len + sizeof(*ref);
4643 
4644 	path->leave_spinning = 1;
4645 	ret = btrfs_insert_empty_items(trans, root, path, key, sizes, 2);
4646 	if (ret != 0)
4647 		goto fail;
4648 
4649 	inode_init_owner(inode, dir, mode);
4650 	inode_set_bytes(inode, 0);
4651 	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
4652 	inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
4653 				  struct btrfs_inode_item);
4654 	fill_inode_item(trans, path->nodes[0], inode_item, inode);
4655 
4656 	ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
4657 			     struct btrfs_inode_ref);
4658 	btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
4659 	btrfs_set_inode_ref_index(path->nodes[0], ref, *index);
4660 	ptr = (unsigned long)(ref + 1);
4661 	write_extent_buffer(path->nodes[0], name, ptr, name_len);
4662 
4663 	btrfs_mark_buffer_dirty(path->nodes[0]);
4664 	btrfs_free_path(path);
4665 
4666 	location = &BTRFS_I(inode)->location;
4667 	location->objectid = objectid;
4668 	location->offset = 0;
4669 	btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
4670 
4671 	btrfs_inherit_iflags(inode, dir);
4672 
4673 	if (S_ISREG(mode)) {
4674 		if (btrfs_test_opt(root, NODATASUM))
4675 			BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
4676 		if (btrfs_test_opt(root, NODATACOW) ||
4677 		    (BTRFS_I(dir)->flags & BTRFS_INODE_NODATACOW))
4678 			BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW;
4679 	}
4680 
4681 	insert_inode_hash(inode);
4682 	inode_tree_add(inode);
4683 
4684 	trace_btrfs_inode_new(inode);
4685 	btrfs_set_inode_last_trans(trans, inode);
4686 
4687 	return inode;
4688 fail:
4689 	if (dir)
4690 		BTRFS_I(dir)->index_cnt--;
4691 	btrfs_free_path(path);
4692 	iput(inode);
4693 	return ERR_PTR(ret);
4694 }
4695 
btrfs_inode_type(struct inode * inode)4696 static inline u8 btrfs_inode_type(struct inode *inode)
4697 {
4698 	return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT];
4699 }
4700 
4701 /*
4702  * utility function to add 'inode' into 'parent_inode' with
4703  * a give name and a given sequence number.
4704  * if 'add_backref' is true, also insert a backref from the
4705  * inode to the parent directory.
4706  */
btrfs_add_link(struct btrfs_trans_handle * trans,struct inode * parent_inode,struct inode * inode,const char * name,int name_len,int add_backref,u64 index)4707 int btrfs_add_link(struct btrfs_trans_handle *trans,
4708 		   struct inode *parent_inode, struct inode *inode,
4709 		   const char *name, int name_len, int add_backref, u64 index)
4710 {
4711 	int ret = 0;
4712 	struct btrfs_key key;
4713 	struct btrfs_root *root = BTRFS_I(parent_inode)->root;
4714 	u64 ino = btrfs_ino(inode);
4715 	u64 parent_ino = btrfs_ino(parent_inode);
4716 
4717 	if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
4718 		memcpy(&key, &BTRFS_I(inode)->root->root_key, sizeof(key));
4719 	} else {
4720 		key.objectid = ino;
4721 		btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
4722 		key.offset = 0;
4723 	}
4724 
4725 	if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
4726 		ret = btrfs_add_root_ref(trans, root->fs_info->tree_root,
4727 					 key.objectid, root->root_key.objectid,
4728 					 parent_ino, index, name, name_len);
4729 	} else if (add_backref) {
4730 		ret = btrfs_insert_inode_ref(trans, root, name, name_len, ino,
4731 					     parent_ino, index);
4732 	}
4733 
4734 	/* Nothing to clean up yet */
4735 	if (ret)
4736 		return ret;
4737 
4738 	ret = btrfs_insert_dir_item(trans, root, name, name_len,
4739 				    parent_inode, &key,
4740 				    btrfs_inode_type(inode), index);
4741 	if (ret == -EEXIST)
4742 		goto fail_dir_item;
4743 	else if (ret) {
4744 		btrfs_abort_transaction(trans, root, ret);
4745 		return ret;
4746 	}
4747 
4748 	btrfs_i_size_write(parent_inode, parent_inode->i_size +
4749 			   name_len * 2);
4750 	parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
4751 	ret = btrfs_update_inode(trans, root, parent_inode);
4752 	if (ret)
4753 		btrfs_abort_transaction(trans, root, ret);
4754 	return ret;
4755 
4756 fail_dir_item:
4757 	if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
4758 		u64 local_index;
4759 		int err;
4760 		err = btrfs_del_root_ref(trans, root->fs_info->tree_root,
4761 				 key.objectid, root->root_key.objectid,
4762 				 parent_ino, &local_index, name, name_len);
4763 
4764 	} else if (add_backref) {
4765 		u64 local_index;
4766 		int err;
4767 
4768 		err = btrfs_del_inode_ref(trans, root, name, name_len,
4769 					  ino, parent_ino, &local_index);
4770 	}
4771 	return ret;
4772 }
4773 
btrfs_add_nondir(struct btrfs_trans_handle * trans,struct inode * dir,struct dentry * dentry,struct inode * inode,int backref,u64 index)4774 static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
4775 			    struct inode *dir, struct dentry *dentry,
4776 			    struct inode *inode, int backref, u64 index)
4777 {
4778 	int err = btrfs_add_link(trans, dir, inode,
4779 				 dentry->d_name.name, dentry->d_name.len,
4780 				 backref, index);
4781 	if (err > 0)
4782 		err = -EEXIST;
4783 	return err;
4784 }
4785 
btrfs_mknod(struct inode * dir,struct dentry * dentry,umode_t mode,dev_t rdev)4786 static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
4787 			umode_t mode, dev_t rdev)
4788 {
4789 	struct btrfs_trans_handle *trans;
4790 	struct btrfs_root *root = BTRFS_I(dir)->root;
4791 	struct inode *inode = NULL;
4792 	int err;
4793 	int drop_inode = 0;
4794 	u64 objectid;
4795 	unsigned long nr = 0;
4796 	u64 index = 0;
4797 
4798 	if (!new_valid_dev(rdev))
4799 		return -EINVAL;
4800 
4801 	/*
4802 	 * 2 for inode item and ref
4803 	 * 2 for dir items
4804 	 * 1 for xattr if selinux is on
4805 	 */
4806 	trans = btrfs_start_transaction(root, 5);
4807 	if (IS_ERR(trans))
4808 		return PTR_ERR(trans);
4809 
4810 	err = btrfs_find_free_ino(root, &objectid);
4811 	if (err)
4812 		goto out_unlock;
4813 
4814 	inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4815 				dentry->d_name.len, btrfs_ino(dir), objectid,
4816 				mode, &index);
4817 	if (IS_ERR(inode)) {
4818 		err = PTR_ERR(inode);
4819 		goto out_unlock;
4820 	}
4821 
4822 	err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
4823 	if (err) {
4824 		drop_inode = 1;
4825 		goto out_unlock;
4826 	}
4827 
4828 	/*
4829 	* If the active LSM wants to access the inode during
4830 	* d_instantiate it needs these. Smack checks to see
4831 	* if the filesystem supports xattrs by looking at the
4832 	* ops vector.
4833 	*/
4834 
4835 	inode->i_op = &btrfs_special_inode_operations;
4836 	err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
4837 	if (err)
4838 		drop_inode = 1;
4839 	else {
4840 		init_special_inode(inode, inode->i_mode, rdev);
4841 		btrfs_update_inode(trans, root, inode);
4842 		d_instantiate(dentry, inode);
4843 	}
4844 out_unlock:
4845 	nr = trans->blocks_used;
4846 	btrfs_end_transaction(trans, root);
4847 	btrfs_btree_balance_dirty(root, nr);
4848 	if (drop_inode) {
4849 		inode_dec_link_count(inode);
4850 		iput(inode);
4851 	}
4852 	return err;
4853 }
4854 
btrfs_create(struct inode * dir,struct dentry * dentry,umode_t mode,struct nameidata * nd)4855 static int btrfs_create(struct inode *dir, struct dentry *dentry,
4856 			umode_t mode, struct nameidata *nd)
4857 {
4858 	struct btrfs_trans_handle *trans;
4859 	struct btrfs_root *root = BTRFS_I(dir)->root;
4860 	struct inode *inode = NULL;
4861 	int drop_inode = 0;
4862 	int err;
4863 	unsigned long nr = 0;
4864 	u64 objectid;
4865 	u64 index = 0;
4866 
4867 	/*
4868 	 * 2 for inode item and ref
4869 	 * 2 for dir items
4870 	 * 1 for xattr if selinux is on
4871 	 */
4872 	trans = btrfs_start_transaction(root, 5);
4873 	if (IS_ERR(trans))
4874 		return PTR_ERR(trans);
4875 
4876 	err = btrfs_find_free_ino(root, &objectid);
4877 	if (err)
4878 		goto out_unlock;
4879 
4880 	inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4881 				dentry->d_name.len, btrfs_ino(dir), objectid,
4882 				mode, &index);
4883 	if (IS_ERR(inode)) {
4884 		err = PTR_ERR(inode);
4885 		goto out_unlock;
4886 	}
4887 
4888 	err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
4889 	if (err) {
4890 		drop_inode = 1;
4891 		goto out_unlock;
4892 	}
4893 
4894 	/*
4895 	* If the active LSM wants to access the inode during
4896 	* d_instantiate it needs these. Smack checks to see
4897 	* if the filesystem supports xattrs by looking at the
4898 	* ops vector.
4899 	*/
4900 	inode->i_fop = &btrfs_file_operations;
4901 	inode->i_op = &btrfs_file_inode_operations;
4902 
4903 	err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
4904 	if (err)
4905 		drop_inode = 1;
4906 	else {
4907 		inode->i_mapping->a_ops = &btrfs_aops;
4908 		inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
4909 		BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
4910 		d_instantiate(dentry, inode);
4911 	}
4912 out_unlock:
4913 	nr = trans->blocks_used;
4914 	btrfs_end_transaction(trans, root);
4915 	if (drop_inode) {
4916 		inode_dec_link_count(inode);
4917 		iput(inode);
4918 	}
4919 	btrfs_btree_balance_dirty(root, nr);
4920 	return err;
4921 }
4922 
btrfs_link(struct dentry * old_dentry,struct inode * dir,struct dentry * dentry)4923 static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
4924 		      struct dentry *dentry)
4925 {
4926 	struct btrfs_trans_handle *trans;
4927 	struct btrfs_root *root = BTRFS_I(dir)->root;
4928 	struct inode *inode = old_dentry->d_inode;
4929 	u64 index;
4930 	unsigned long nr = 0;
4931 	int err;
4932 	int drop_inode = 0;
4933 
4934 	/* do not allow sys_link's with other subvols of the same device */
4935 	if (root->objectid != BTRFS_I(inode)->root->objectid)
4936 		return -EXDEV;
4937 
4938 	if (inode->i_nlink == ~0U)
4939 		return -EMLINK;
4940 
4941 	err = btrfs_set_inode_index(dir, &index);
4942 	if (err)
4943 		goto fail;
4944 
4945 	/*
4946 	 * 2 items for inode and inode ref
4947 	 * 2 items for dir items
4948 	 * 1 item for parent inode
4949 	 */
4950 	trans = btrfs_start_transaction(root, 5);
4951 	if (IS_ERR(trans)) {
4952 		err = PTR_ERR(trans);
4953 		goto fail;
4954 	}
4955 
4956 	btrfs_inc_nlink(inode);
4957 	inode->i_ctime = CURRENT_TIME;
4958 	ihold(inode);
4959 
4960 	err = btrfs_add_nondir(trans, dir, dentry, inode, 1, index);
4961 
4962 	if (err) {
4963 		drop_inode = 1;
4964 	} else {
4965 		struct dentry *parent = dentry->d_parent;
4966 		err = btrfs_update_inode(trans, root, inode);
4967 		if (err)
4968 			goto fail;
4969 		d_instantiate(dentry, inode);
4970 		btrfs_log_new_name(trans, inode, NULL, parent);
4971 	}
4972 
4973 	nr = trans->blocks_used;
4974 	btrfs_end_transaction(trans, root);
4975 fail:
4976 	if (drop_inode) {
4977 		inode_dec_link_count(inode);
4978 		iput(inode);
4979 	}
4980 	btrfs_btree_balance_dirty(root, nr);
4981 	return err;
4982 }
4983 
btrfs_mkdir(struct inode * dir,struct dentry * dentry,umode_t mode)4984 static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
4985 {
4986 	struct inode *inode = NULL;
4987 	struct btrfs_trans_handle *trans;
4988 	struct btrfs_root *root = BTRFS_I(dir)->root;
4989 	int err = 0;
4990 	int drop_on_err = 0;
4991 	u64 objectid = 0;
4992 	u64 index = 0;
4993 	unsigned long nr = 1;
4994 
4995 	/*
4996 	 * 2 items for inode and ref
4997 	 * 2 items for dir items
4998 	 * 1 for xattr if selinux is on
4999 	 */
5000 	trans = btrfs_start_transaction(root, 5);
5001 	if (IS_ERR(trans))
5002 		return PTR_ERR(trans);
5003 
5004 	err = btrfs_find_free_ino(root, &objectid);
5005 	if (err)
5006 		goto out_fail;
5007 
5008 	inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
5009 				dentry->d_name.len, btrfs_ino(dir), objectid,
5010 				S_IFDIR | mode, &index);
5011 	if (IS_ERR(inode)) {
5012 		err = PTR_ERR(inode);
5013 		goto out_fail;
5014 	}
5015 
5016 	drop_on_err = 1;
5017 
5018 	err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
5019 	if (err)
5020 		goto out_fail;
5021 
5022 	inode->i_op = &btrfs_dir_inode_operations;
5023 	inode->i_fop = &btrfs_dir_file_operations;
5024 
5025 	btrfs_i_size_write(inode, 0);
5026 	err = btrfs_update_inode(trans, root, inode);
5027 	if (err)
5028 		goto out_fail;
5029 
5030 	err = btrfs_add_link(trans, dir, inode, dentry->d_name.name,
5031 			     dentry->d_name.len, 0, index);
5032 	if (err)
5033 		goto out_fail;
5034 
5035 	d_instantiate(dentry, inode);
5036 	drop_on_err = 0;
5037 
5038 out_fail:
5039 	nr = trans->blocks_used;
5040 	btrfs_end_transaction(trans, root);
5041 	if (drop_on_err)
5042 		iput(inode);
5043 	btrfs_btree_balance_dirty(root, nr);
5044 	return err;
5045 }
5046 
5047 /* helper for btfs_get_extent.  Given an existing extent in the tree,
5048  * and an extent that you want to insert, deal with overlap and insert
5049  * the new extent into the tree.
5050  */
merge_extent_mapping(struct extent_map_tree * em_tree,struct extent_map * existing,struct extent_map * em,u64 map_start,u64 map_len)5051 static int merge_extent_mapping(struct extent_map_tree *em_tree,
5052 				struct extent_map *existing,
5053 				struct extent_map *em,
5054 				u64 map_start, u64 map_len)
5055 {
5056 	u64 start_diff;
5057 
5058 	BUG_ON(map_start < em->start || map_start >= extent_map_end(em));
5059 	start_diff = map_start - em->start;
5060 	em->start = map_start;
5061 	em->len = map_len;
5062 	if (em->block_start < EXTENT_MAP_LAST_BYTE &&
5063 	    !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
5064 		em->block_start += start_diff;
5065 		em->block_len -= start_diff;
5066 	}
5067 	return add_extent_mapping(em_tree, em);
5068 }
5069 
uncompress_inline(struct btrfs_path * path,struct inode * inode,struct page * page,size_t pg_offset,u64 extent_offset,struct btrfs_file_extent_item * item)5070 static noinline int uncompress_inline(struct btrfs_path *path,
5071 				      struct inode *inode, struct page *page,
5072 				      size_t pg_offset, u64 extent_offset,
5073 				      struct btrfs_file_extent_item *item)
5074 {
5075 	int ret;
5076 	struct extent_buffer *leaf = path->nodes[0];
5077 	char *tmp;
5078 	size_t max_size;
5079 	unsigned long inline_size;
5080 	unsigned long ptr;
5081 	int compress_type;
5082 
5083 	WARN_ON(pg_offset != 0);
5084 	compress_type = btrfs_file_extent_compression(leaf, item);
5085 	max_size = btrfs_file_extent_ram_bytes(leaf, item);
5086 	inline_size = btrfs_file_extent_inline_item_len(leaf,
5087 					btrfs_item_nr(leaf, path->slots[0]));
5088 	tmp = kmalloc(inline_size, GFP_NOFS);
5089 	if (!tmp)
5090 		return -ENOMEM;
5091 	ptr = btrfs_file_extent_inline_start(item);
5092 
5093 	read_extent_buffer(leaf, tmp, ptr, inline_size);
5094 
5095 	max_size = min_t(unsigned long, PAGE_CACHE_SIZE, max_size);
5096 	ret = btrfs_decompress(compress_type, tmp, page,
5097 			       extent_offset, inline_size, max_size);
5098 	if (ret) {
5099 		char *kaddr = kmap_atomic(page);
5100 		unsigned long copy_size = min_t(u64,
5101 				  PAGE_CACHE_SIZE - pg_offset,
5102 				  max_size - extent_offset);
5103 		memset(kaddr + pg_offset, 0, copy_size);
5104 		kunmap_atomic(kaddr);
5105 	}
5106 	kfree(tmp);
5107 	return 0;
5108 }
5109 
5110 /*
5111  * a bit scary, this does extent mapping from logical file offset to the disk.
5112  * the ugly parts come from merging extents from the disk with the in-ram
5113  * representation.  This gets more complex because of the data=ordered code,
5114  * where the in-ram extents might be locked pending data=ordered completion.
5115  *
5116  * This also copies inline extents directly into the page.
5117  */
5118 
btrfs_get_extent(struct inode * inode,struct page * page,size_t pg_offset,u64 start,u64 len,int create)5119 struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
5120 				    size_t pg_offset, u64 start, u64 len,
5121 				    int create)
5122 {
5123 	int ret;
5124 	int err = 0;
5125 	u64 bytenr;
5126 	u64 extent_start = 0;
5127 	u64 extent_end = 0;
5128 	u64 objectid = btrfs_ino(inode);
5129 	u32 found_type;
5130 	struct btrfs_path *path = NULL;
5131 	struct btrfs_root *root = BTRFS_I(inode)->root;
5132 	struct btrfs_file_extent_item *item;
5133 	struct extent_buffer *leaf;
5134 	struct btrfs_key found_key;
5135 	struct extent_map *em = NULL;
5136 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
5137 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
5138 	struct btrfs_trans_handle *trans = NULL;
5139 	int compress_type;
5140 
5141 again:
5142 	read_lock(&em_tree->lock);
5143 	em = lookup_extent_mapping(em_tree, start, len);
5144 	if (em)
5145 		em->bdev = root->fs_info->fs_devices->latest_bdev;
5146 	read_unlock(&em_tree->lock);
5147 
5148 	if (em) {
5149 		if (em->start > start || em->start + em->len <= start)
5150 			free_extent_map(em);
5151 		else if (em->block_start == EXTENT_MAP_INLINE && page)
5152 			free_extent_map(em);
5153 		else
5154 			goto out;
5155 	}
5156 	em = alloc_extent_map();
5157 	if (!em) {
5158 		err = -ENOMEM;
5159 		goto out;
5160 	}
5161 	em->bdev = root->fs_info->fs_devices->latest_bdev;
5162 	em->start = EXTENT_MAP_HOLE;
5163 	em->orig_start = EXTENT_MAP_HOLE;
5164 	em->len = (u64)-1;
5165 	em->block_len = (u64)-1;
5166 
5167 	if (!path) {
5168 		path = btrfs_alloc_path();
5169 		if (!path) {
5170 			err = -ENOMEM;
5171 			goto out;
5172 		}
5173 		/*
5174 		 * Chances are we'll be called again, so go ahead and do
5175 		 * readahead
5176 		 */
5177 		path->reada = 1;
5178 	}
5179 
5180 	ret = btrfs_lookup_file_extent(trans, root, path,
5181 				       objectid, start, trans != NULL);
5182 	if (ret < 0) {
5183 		err = ret;
5184 		goto out;
5185 	}
5186 
5187 	if (ret != 0) {
5188 		if (path->slots[0] == 0)
5189 			goto not_found;
5190 		path->slots[0]--;
5191 	}
5192 
5193 	leaf = path->nodes[0];
5194 	item = btrfs_item_ptr(leaf, path->slots[0],
5195 			      struct btrfs_file_extent_item);
5196 	/* are we inside the extent that was found? */
5197 	btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5198 	found_type = btrfs_key_type(&found_key);
5199 	if (found_key.objectid != objectid ||
5200 	    found_type != BTRFS_EXTENT_DATA_KEY) {
5201 		goto not_found;
5202 	}
5203 
5204 	found_type = btrfs_file_extent_type(leaf, item);
5205 	extent_start = found_key.offset;
5206 	compress_type = btrfs_file_extent_compression(leaf, item);
5207 	if (found_type == BTRFS_FILE_EXTENT_REG ||
5208 	    found_type == BTRFS_FILE_EXTENT_PREALLOC) {
5209 		extent_end = extent_start +
5210 		       btrfs_file_extent_num_bytes(leaf, item);
5211 	} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
5212 		size_t size;
5213 		size = btrfs_file_extent_inline_len(leaf, item);
5214 		extent_end = (extent_start + size + root->sectorsize - 1) &
5215 			~((u64)root->sectorsize - 1);
5216 	}
5217 
5218 	if (start >= extent_end) {
5219 		path->slots[0]++;
5220 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
5221 			ret = btrfs_next_leaf(root, path);
5222 			if (ret < 0) {
5223 				err = ret;
5224 				goto out;
5225 			}
5226 			if (ret > 0)
5227 				goto not_found;
5228 			leaf = path->nodes[0];
5229 		}
5230 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5231 		if (found_key.objectid != objectid ||
5232 		    found_key.type != BTRFS_EXTENT_DATA_KEY)
5233 			goto not_found;
5234 		if (start + len <= found_key.offset)
5235 			goto not_found;
5236 		em->start = start;
5237 		em->len = found_key.offset - start;
5238 		goto not_found_em;
5239 	}
5240 
5241 	if (found_type == BTRFS_FILE_EXTENT_REG ||
5242 	    found_type == BTRFS_FILE_EXTENT_PREALLOC) {
5243 		em->start = extent_start;
5244 		em->len = extent_end - extent_start;
5245 		em->orig_start = extent_start -
5246 				 btrfs_file_extent_offset(leaf, item);
5247 		bytenr = btrfs_file_extent_disk_bytenr(leaf, item);
5248 		if (bytenr == 0) {
5249 			em->block_start = EXTENT_MAP_HOLE;
5250 			goto insert;
5251 		}
5252 		if (compress_type != BTRFS_COMPRESS_NONE) {
5253 			set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
5254 			em->compress_type = compress_type;
5255 			em->block_start = bytenr;
5256 			em->block_len = btrfs_file_extent_disk_num_bytes(leaf,
5257 									 item);
5258 		} else {
5259 			bytenr += btrfs_file_extent_offset(leaf, item);
5260 			em->block_start = bytenr;
5261 			em->block_len = em->len;
5262 			if (found_type == BTRFS_FILE_EXTENT_PREALLOC)
5263 				set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
5264 		}
5265 		goto insert;
5266 	} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
5267 		unsigned long ptr;
5268 		char *map;
5269 		size_t size;
5270 		size_t extent_offset;
5271 		size_t copy_size;
5272 
5273 		em->block_start = EXTENT_MAP_INLINE;
5274 		if (!page || create) {
5275 			em->start = extent_start;
5276 			em->len = extent_end - extent_start;
5277 			goto out;
5278 		}
5279 
5280 		size = btrfs_file_extent_inline_len(leaf, item);
5281 		extent_offset = page_offset(page) + pg_offset - extent_start;
5282 		copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset,
5283 				size - extent_offset);
5284 		em->start = extent_start + extent_offset;
5285 		em->len = (copy_size + root->sectorsize - 1) &
5286 			~((u64)root->sectorsize - 1);
5287 		em->orig_start = EXTENT_MAP_INLINE;
5288 		if (compress_type) {
5289 			set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
5290 			em->compress_type = compress_type;
5291 		}
5292 		ptr = btrfs_file_extent_inline_start(item) + extent_offset;
5293 		if (create == 0 && !PageUptodate(page)) {
5294 			if (btrfs_file_extent_compression(leaf, item) !=
5295 			    BTRFS_COMPRESS_NONE) {
5296 				ret = uncompress_inline(path, inode, page,
5297 							pg_offset,
5298 							extent_offset, item);
5299 				BUG_ON(ret); /* -ENOMEM */
5300 			} else {
5301 				map = kmap(page);
5302 				read_extent_buffer(leaf, map + pg_offset, ptr,
5303 						   copy_size);
5304 				if (pg_offset + copy_size < PAGE_CACHE_SIZE) {
5305 					memset(map + pg_offset + copy_size, 0,
5306 					       PAGE_CACHE_SIZE - pg_offset -
5307 					       copy_size);
5308 				}
5309 				kunmap(page);
5310 			}
5311 			flush_dcache_page(page);
5312 		} else if (create && PageUptodate(page)) {
5313 			BUG();
5314 			if (!trans) {
5315 				kunmap(page);
5316 				free_extent_map(em);
5317 				em = NULL;
5318 
5319 				btrfs_release_path(path);
5320 				trans = btrfs_join_transaction(root);
5321 
5322 				if (IS_ERR(trans))
5323 					return ERR_CAST(trans);
5324 				goto again;
5325 			}
5326 			map = kmap(page);
5327 			write_extent_buffer(leaf, map + pg_offset, ptr,
5328 					    copy_size);
5329 			kunmap(page);
5330 			btrfs_mark_buffer_dirty(leaf);
5331 		}
5332 		set_extent_uptodate(io_tree, em->start,
5333 				    extent_map_end(em) - 1, NULL, GFP_NOFS);
5334 		goto insert;
5335 	} else {
5336 		printk(KERN_ERR "btrfs unknown found_type %d\n", found_type);
5337 		WARN_ON(1);
5338 	}
5339 not_found:
5340 	em->start = start;
5341 	em->len = len;
5342 not_found_em:
5343 	em->block_start = EXTENT_MAP_HOLE;
5344 	set_bit(EXTENT_FLAG_VACANCY, &em->flags);
5345 insert:
5346 	btrfs_release_path(path);
5347 	if (em->start > start || extent_map_end(em) <= start) {
5348 		printk(KERN_ERR "Btrfs: bad extent! em: [%llu %llu] passed "
5349 		       "[%llu %llu]\n", (unsigned long long)em->start,
5350 		       (unsigned long long)em->len,
5351 		       (unsigned long long)start,
5352 		       (unsigned long long)len);
5353 		err = -EIO;
5354 		goto out;
5355 	}
5356 
5357 	err = 0;
5358 	write_lock(&em_tree->lock);
5359 	ret = add_extent_mapping(em_tree, em);
5360 	/* it is possible that someone inserted the extent into the tree
5361 	 * while we had the lock dropped.  It is also possible that
5362 	 * an overlapping map exists in the tree
5363 	 */
5364 	if (ret == -EEXIST) {
5365 		struct extent_map *existing;
5366 
5367 		ret = 0;
5368 
5369 		existing = lookup_extent_mapping(em_tree, start, len);
5370 		if (existing && (existing->start > start ||
5371 		    existing->start + existing->len <= start)) {
5372 			free_extent_map(existing);
5373 			existing = NULL;
5374 		}
5375 		if (!existing) {
5376 			existing = lookup_extent_mapping(em_tree, em->start,
5377 							 em->len);
5378 			if (existing) {
5379 				err = merge_extent_mapping(em_tree, existing,
5380 							   em, start,
5381 							   root->sectorsize);
5382 				free_extent_map(existing);
5383 				if (err) {
5384 					free_extent_map(em);
5385 					em = NULL;
5386 				}
5387 			} else {
5388 				err = -EIO;
5389 				free_extent_map(em);
5390 				em = NULL;
5391 			}
5392 		} else {
5393 			free_extent_map(em);
5394 			em = existing;
5395 			err = 0;
5396 		}
5397 	}
5398 	write_unlock(&em_tree->lock);
5399 out:
5400 
5401 	trace_btrfs_get_extent(root, em);
5402 
5403 	if (path)
5404 		btrfs_free_path(path);
5405 	if (trans) {
5406 		ret = btrfs_end_transaction(trans, root);
5407 		if (!err)
5408 			err = ret;
5409 	}
5410 	if (err) {
5411 		free_extent_map(em);
5412 		return ERR_PTR(err);
5413 	}
5414 	BUG_ON(!em); /* Error is always set */
5415 	return em;
5416 }
5417 
btrfs_get_extent_fiemap(struct inode * inode,struct page * page,size_t pg_offset,u64 start,u64 len,int create)5418 struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *page,
5419 					   size_t pg_offset, u64 start, u64 len,
5420 					   int create)
5421 {
5422 	struct extent_map *em;
5423 	struct extent_map *hole_em = NULL;
5424 	u64 range_start = start;
5425 	u64 end;
5426 	u64 found;
5427 	u64 found_end;
5428 	int err = 0;
5429 
5430 	em = btrfs_get_extent(inode, page, pg_offset, start, len, create);
5431 	if (IS_ERR(em))
5432 		return em;
5433 	if (em) {
5434 		/*
5435 		 * if our em maps to a hole, there might
5436 		 * actually be delalloc bytes behind it
5437 		 */
5438 		if (em->block_start != EXTENT_MAP_HOLE)
5439 			return em;
5440 		else
5441 			hole_em = em;
5442 	}
5443 
5444 	/* check to see if we've wrapped (len == -1 or similar) */
5445 	end = start + len;
5446 	if (end < start)
5447 		end = (u64)-1;
5448 	else
5449 		end -= 1;
5450 
5451 	em = NULL;
5452 
5453 	/* ok, we didn't find anything, lets look for delalloc */
5454 	found = count_range_bits(&BTRFS_I(inode)->io_tree, &range_start,
5455 				 end, len, EXTENT_DELALLOC, 1);
5456 	found_end = range_start + found;
5457 	if (found_end < range_start)
5458 		found_end = (u64)-1;
5459 
5460 	/*
5461 	 * we didn't find anything useful, return
5462 	 * the original results from get_extent()
5463 	 */
5464 	if (range_start > end || found_end <= start) {
5465 		em = hole_em;
5466 		hole_em = NULL;
5467 		goto out;
5468 	}
5469 
5470 	/* adjust the range_start to make sure it doesn't
5471 	 * go backwards from the start they passed in
5472 	 */
5473 	range_start = max(start,range_start);
5474 	found = found_end - range_start;
5475 
5476 	if (found > 0) {
5477 		u64 hole_start = start;
5478 		u64 hole_len = len;
5479 
5480 		em = alloc_extent_map();
5481 		if (!em) {
5482 			err = -ENOMEM;
5483 			goto out;
5484 		}
5485 		/*
5486 		 * when btrfs_get_extent can't find anything it
5487 		 * returns one huge hole
5488 		 *
5489 		 * make sure what it found really fits our range, and
5490 		 * adjust to make sure it is based on the start from
5491 		 * the caller
5492 		 */
5493 		if (hole_em) {
5494 			u64 calc_end = extent_map_end(hole_em);
5495 
5496 			if (calc_end <= start || (hole_em->start > end)) {
5497 				free_extent_map(hole_em);
5498 				hole_em = NULL;
5499 			} else {
5500 				hole_start = max(hole_em->start, start);
5501 				hole_len = calc_end - hole_start;
5502 			}
5503 		}
5504 		em->bdev = NULL;
5505 		if (hole_em && range_start > hole_start) {
5506 			/* our hole starts before our delalloc, so we
5507 			 * have to return just the parts of the hole
5508 			 * that go until  the delalloc starts
5509 			 */
5510 			em->len = min(hole_len,
5511 				      range_start - hole_start);
5512 			em->start = hole_start;
5513 			em->orig_start = hole_start;
5514 			/*
5515 			 * don't adjust block start at all,
5516 			 * it is fixed at EXTENT_MAP_HOLE
5517 			 */
5518 			em->block_start = hole_em->block_start;
5519 			em->block_len = hole_len;
5520 		} else {
5521 			em->start = range_start;
5522 			em->len = found;
5523 			em->orig_start = range_start;
5524 			em->block_start = EXTENT_MAP_DELALLOC;
5525 			em->block_len = found;
5526 		}
5527 	} else if (hole_em) {
5528 		return hole_em;
5529 	}
5530 out:
5531 
5532 	free_extent_map(hole_em);
5533 	if (err) {
5534 		free_extent_map(em);
5535 		return ERR_PTR(err);
5536 	}
5537 	return em;
5538 }
5539 
btrfs_new_extent_direct(struct inode * inode,struct extent_map * em,u64 start,u64 len)5540 static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
5541 						  struct extent_map *em,
5542 						  u64 start, u64 len)
5543 {
5544 	struct btrfs_root *root = BTRFS_I(inode)->root;
5545 	struct btrfs_trans_handle *trans;
5546 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
5547 	struct btrfs_key ins;
5548 	u64 alloc_hint;
5549 	int ret;
5550 	bool insert = false;
5551 
5552 	/*
5553 	 * Ok if the extent map we looked up is a hole and is for the exact
5554 	 * range we want, there is no reason to allocate a new one, however if
5555 	 * it is not right then we need to free this one and drop the cache for
5556 	 * our range.
5557 	 */
5558 	if (em->block_start != EXTENT_MAP_HOLE || em->start != start ||
5559 	    em->len != len) {
5560 		free_extent_map(em);
5561 		em = NULL;
5562 		insert = true;
5563 		btrfs_drop_extent_cache(inode, start, start + len - 1, 0);
5564 	}
5565 
5566 	trans = btrfs_join_transaction(root);
5567 	if (IS_ERR(trans))
5568 		return ERR_CAST(trans);
5569 
5570 	if (start <= BTRFS_I(inode)->disk_i_size && len < 64 * 1024)
5571 		btrfs_add_inode_defrag(trans, inode);
5572 
5573 	trans->block_rsv = &root->fs_info->delalloc_block_rsv;
5574 
5575 	alloc_hint = get_extent_allocation_hint(inode, start, len);
5576 	ret = btrfs_reserve_extent(trans, root, len, root->sectorsize, 0,
5577 				   alloc_hint, &ins, 1);
5578 	if (ret) {
5579 		em = ERR_PTR(ret);
5580 		goto out;
5581 	}
5582 
5583 	if (!em) {
5584 		em = alloc_extent_map();
5585 		if (!em) {
5586 			em = ERR_PTR(-ENOMEM);
5587 			goto out;
5588 		}
5589 	}
5590 
5591 	em->start = start;
5592 	em->orig_start = em->start;
5593 	em->len = ins.offset;
5594 
5595 	em->block_start = ins.objectid;
5596 	em->block_len = ins.offset;
5597 	em->bdev = root->fs_info->fs_devices->latest_bdev;
5598 
5599 	/*
5600 	 * We need to do this because if we're using the original em we searched
5601 	 * for, we could have EXTENT_FLAG_VACANCY set, and we don't want that.
5602 	 */
5603 	em->flags = 0;
5604 	set_bit(EXTENT_FLAG_PINNED, &em->flags);
5605 
5606 	while (insert) {
5607 		write_lock(&em_tree->lock);
5608 		ret = add_extent_mapping(em_tree, em);
5609 		write_unlock(&em_tree->lock);
5610 		if (ret != -EEXIST)
5611 			break;
5612 		btrfs_drop_extent_cache(inode, start, start + em->len - 1, 0);
5613 	}
5614 
5615 	ret = btrfs_add_ordered_extent_dio(inode, start, ins.objectid,
5616 					   ins.offset, ins.offset, 0);
5617 	if (ret) {
5618 		btrfs_free_reserved_extent(root, ins.objectid, ins.offset);
5619 		em = ERR_PTR(ret);
5620 	}
5621 out:
5622 	btrfs_end_transaction(trans, root);
5623 	return em;
5624 }
5625 
5626 /*
5627  * returns 1 when the nocow is safe, < 1 on error, 0 if the
5628  * block must be cow'd
5629  */
can_nocow_odirect(struct btrfs_trans_handle * trans,struct inode * inode,u64 offset,u64 len)5630 static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans,
5631 				      struct inode *inode, u64 offset, u64 len)
5632 {
5633 	struct btrfs_path *path;
5634 	int ret;
5635 	struct extent_buffer *leaf;
5636 	struct btrfs_root *root = BTRFS_I(inode)->root;
5637 	struct btrfs_file_extent_item *fi;
5638 	struct btrfs_key key;
5639 	u64 disk_bytenr;
5640 	u64 backref_offset;
5641 	u64 extent_end;
5642 	u64 num_bytes;
5643 	int slot;
5644 	int found_type;
5645 
5646 	path = btrfs_alloc_path();
5647 	if (!path)
5648 		return -ENOMEM;
5649 
5650 	ret = btrfs_lookup_file_extent(trans, root, path, btrfs_ino(inode),
5651 				       offset, 0);
5652 	if (ret < 0)
5653 		goto out;
5654 
5655 	slot = path->slots[0];
5656 	if (ret == 1) {
5657 		if (slot == 0) {
5658 			/* can't find the item, must cow */
5659 			ret = 0;
5660 			goto out;
5661 		}
5662 		slot--;
5663 	}
5664 	ret = 0;
5665 	leaf = path->nodes[0];
5666 	btrfs_item_key_to_cpu(leaf, &key, slot);
5667 	if (key.objectid != btrfs_ino(inode) ||
5668 	    key.type != BTRFS_EXTENT_DATA_KEY) {
5669 		/* not our file or wrong item type, must cow */
5670 		goto out;
5671 	}
5672 
5673 	if (key.offset > offset) {
5674 		/* Wrong offset, must cow */
5675 		goto out;
5676 	}
5677 
5678 	fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
5679 	found_type = btrfs_file_extent_type(leaf, fi);
5680 	if (found_type != BTRFS_FILE_EXTENT_REG &&
5681 	    found_type != BTRFS_FILE_EXTENT_PREALLOC) {
5682 		/* not a regular extent, must cow */
5683 		goto out;
5684 	}
5685 	disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
5686 	backref_offset = btrfs_file_extent_offset(leaf, fi);
5687 
5688 	extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
5689 	if (extent_end < offset + len) {
5690 		/* extent doesn't include our full range, must cow */
5691 		goto out;
5692 	}
5693 
5694 	if (btrfs_extent_readonly(root, disk_bytenr))
5695 		goto out;
5696 
5697 	/*
5698 	 * look for other files referencing this extent, if we
5699 	 * find any we must cow
5700 	 */
5701 	if (btrfs_cross_ref_exist(trans, root, btrfs_ino(inode),
5702 				  key.offset - backref_offset, disk_bytenr))
5703 		goto out;
5704 
5705 	/*
5706 	 * adjust disk_bytenr and num_bytes to cover just the bytes
5707 	 * in this extent we are about to write.  If there
5708 	 * are any csums in that range we have to cow in order
5709 	 * to keep the csums correct
5710 	 */
5711 	disk_bytenr += backref_offset;
5712 	disk_bytenr += offset - key.offset;
5713 	num_bytes = min(offset + len, extent_end) - offset;
5714 	if (csum_exist_in_range(root, disk_bytenr, num_bytes))
5715 				goto out;
5716 	/*
5717 	 * all of the above have passed, it is safe to overwrite this extent
5718 	 * without cow
5719 	 */
5720 	ret = 1;
5721 out:
5722 	btrfs_free_path(path);
5723 	return ret;
5724 }
5725 
btrfs_get_blocks_direct(struct inode * inode,sector_t iblock,struct buffer_head * bh_result,int create)5726 static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
5727 				   struct buffer_head *bh_result, int create)
5728 {
5729 	struct extent_map *em;
5730 	struct btrfs_root *root = BTRFS_I(inode)->root;
5731 	u64 start = iblock << inode->i_blkbits;
5732 	u64 len = bh_result->b_size;
5733 	struct btrfs_trans_handle *trans;
5734 
5735 	em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
5736 	if (IS_ERR(em))
5737 		return PTR_ERR(em);
5738 
5739 	/*
5740 	 * Ok for INLINE and COMPRESSED extents we need to fallback on buffered
5741 	 * io.  INLINE is special, and we could probably kludge it in here, but
5742 	 * it's still buffered so for safety lets just fall back to the generic
5743 	 * buffered path.
5744 	 *
5745 	 * For COMPRESSED we _have_ to read the entire extent in so we can
5746 	 * decompress it, so there will be buffering required no matter what we
5747 	 * do, so go ahead and fallback to buffered.
5748 	 *
5749 	 * We return -ENOTBLK because thats what makes DIO go ahead and go back
5750 	 * to buffered IO.  Don't blame me, this is the price we pay for using
5751 	 * the generic code.
5752 	 */
5753 	if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) ||
5754 	    em->block_start == EXTENT_MAP_INLINE) {
5755 		free_extent_map(em);
5756 		return -ENOTBLK;
5757 	}
5758 
5759 	/* Just a good old fashioned hole, return */
5760 	if (!create && (em->block_start == EXTENT_MAP_HOLE ||
5761 			test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
5762 		free_extent_map(em);
5763 		/* DIO will do one hole at a time, so just unlock a sector */
5764 		unlock_extent(&BTRFS_I(inode)->io_tree, start,
5765 			      start + root->sectorsize - 1);
5766 		return 0;
5767 	}
5768 
5769 	/*
5770 	 * We don't allocate a new extent in the following cases
5771 	 *
5772 	 * 1) The inode is marked as NODATACOW.  In this case we'll just use the
5773 	 * existing extent.
5774 	 * 2) The extent is marked as PREALLOC.  We're good to go here and can
5775 	 * just use the extent.
5776 	 *
5777 	 */
5778 	if (!create) {
5779 		len = em->len - (start - em->start);
5780 		goto map;
5781 	}
5782 
5783 	if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
5784 	    ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
5785 	     em->block_start != EXTENT_MAP_HOLE)) {
5786 		int type;
5787 		int ret;
5788 		u64 block_start;
5789 
5790 		if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
5791 			type = BTRFS_ORDERED_PREALLOC;
5792 		else
5793 			type = BTRFS_ORDERED_NOCOW;
5794 		len = min(len, em->len - (start - em->start));
5795 		block_start = em->block_start + (start - em->start);
5796 
5797 		/*
5798 		 * we're not going to log anything, but we do need
5799 		 * to make sure the current transaction stays open
5800 		 * while we look for nocow cross refs
5801 		 */
5802 		trans = btrfs_join_transaction(root);
5803 		if (IS_ERR(trans))
5804 			goto must_cow;
5805 
5806 		if (can_nocow_odirect(trans, inode, start, len) == 1) {
5807 			ret = btrfs_add_ordered_extent_dio(inode, start,
5808 					   block_start, len, len, type);
5809 			btrfs_end_transaction(trans, root);
5810 			if (ret) {
5811 				free_extent_map(em);
5812 				return ret;
5813 			}
5814 			goto unlock;
5815 		}
5816 		btrfs_end_transaction(trans, root);
5817 	}
5818 must_cow:
5819 	/*
5820 	 * this will cow the extent, reset the len in case we changed
5821 	 * it above
5822 	 */
5823 	len = bh_result->b_size;
5824 	em = btrfs_new_extent_direct(inode, em, start, len);
5825 	if (IS_ERR(em))
5826 		return PTR_ERR(em);
5827 	len = min(len, em->len - (start - em->start));
5828 unlock:
5829 	clear_extent_bit(&BTRFS_I(inode)->io_tree, start, start + len - 1,
5830 			  EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DIRTY, 1,
5831 			  0, NULL, GFP_NOFS);
5832 map:
5833 	bh_result->b_blocknr = (em->block_start + (start - em->start)) >>
5834 		inode->i_blkbits;
5835 	bh_result->b_size = len;
5836 	bh_result->b_bdev = em->bdev;
5837 	set_buffer_mapped(bh_result);
5838 	if (create && !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
5839 		set_buffer_new(bh_result);
5840 
5841 	free_extent_map(em);
5842 
5843 	return 0;
5844 }
5845 
5846 struct btrfs_dio_private {
5847 	struct inode *inode;
5848 	u64 logical_offset;
5849 	u64 disk_bytenr;
5850 	u64 bytes;
5851 	u32 *csums;
5852 	void *private;
5853 
5854 	/* number of bios pending for this dio */
5855 	atomic_t pending_bios;
5856 
5857 	/* IO errors */
5858 	int errors;
5859 
5860 	struct bio *orig_bio;
5861 };
5862 
btrfs_endio_direct_read(struct bio * bio,int err)5863 static void btrfs_endio_direct_read(struct bio *bio, int err)
5864 {
5865 	struct btrfs_dio_private *dip = bio->bi_private;
5866 	struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1;
5867 	struct bio_vec *bvec = bio->bi_io_vec;
5868 	struct inode *inode = dip->inode;
5869 	struct btrfs_root *root = BTRFS_I(inode)->root;
5870 	u64 start;
5871 	u32 *private = dip->csums;
5872 
5873 	start = dip->logical_offset;
5874 	do {
5875 		if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
5876 			struct page *page = bvec->bv_page;
5877 			char *kaddr;
5878 			u32 csum = ~(u32)0;
5879 			unsigned long flags;
5880 
5881 			local_irq_save(flags);
5882 			kaddr = kmap_atomic(page);
5883 			csum = btrfs_csum_data(root, kaddr + bvec->bv_offset,
5884 					       csum, bvec->bv_len);
5885 			btrfs_csum_final(csum, (char *)&csum);
5886 			kunmap_atomic(kaddr);
5887 			local_irq_restore(flags);
5888 
5889 			flush_dcache_page(bvec->bv_page);
5890 			if (csum != *private) {
5891 				printk(KERN_ERR "btrfs csum failed ino %llu off"
5892 				      " %llu csum %u private %u\n",
5893 				      (unsigned long long)btrfs_ino(inode),
5894 				      (unsigned long long)start,
5895 				      csum, *private);
5896 				err = -EIO;
5897 			}
5898 		}
5899 
5900 		start += bvec->bv_len;
5901 		private++;
5902 		bvec++;
5903 	} while (bvec <= bvec_end);
5904 
5905 	unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset,
5906 		      dip->logical_offset + dip->bytes - 1);
5907 	bio->bi_private = dip->private;
5908 
5909 	kfree(dip->csums);
5910 	kfree(dip);
5911 
5912 	/* If we had a csum failure make sure to clear the uptodate flag */
5913 	if (err)
5914 		clear_bit(BIO_UPTODATE, &bio->bi_flags);
5915 	dio_end_io(bio, err);
5916 }
5917 
btrfs_endio_direct_write(struct bio * bio,int err)5918 static void btrfs_endio_direct_write(struct bio *bio, int err)
5919 {
5920 	struct btrfs_dio_private *dip = bio->bi_private;
5921 	struct inode *inode = dip->inode;
5922 	struct btrfs_root *root = BTRFS_I(inode)->root;
5923 	struct btrfs_trans_handle *trans;
5924 	struct btrfs_ordered_extent *ordered = NULL;
5925 	struct extent_state *cached_state = NULL;
5926 	u64 ordered_offset = dip->logical_offset;
5927 	u64 ordered_bytes = dip->bytes;
5928 	int ret;
5929 
5930 	if (err)
5931 		goto out_done;
5932 again:
5933 	ret = btrfs_dec_test_first_ordered_pending(inode, &ordered,
5934 						   &ordered_offset,
5935 						   ordered_bytes);
5936 	if (!ret)
5937 		goto out_test;
5938 
5939 	BUG_ON(!ordered);
5940 
5941 	trans = btrfs_join_transaction(root);
5942 	if (IS_ERR(trans)) {
5943 		err = -ENOMEM;
5944 		goto out;
5945 	}
5946 	trans->block_rsv = &root->fs_info->delalloc_block_rsv;
5947 
5948 	if (test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags)) {
5949 		ret = btrfs_ordered_update_i_size(inode, 0, ordered);
5950 		if (!ret)
5951 			err = btrfs_update_inode_fallback(trans, root, inode);
5952 		goto out;
5953 	}
5954 
5955 	lock_extent_bits(&BTRFS_I(inode)->io_tree, ordered->file_offset,
5956 			 ordered->file_offset + ordered->len - 1, 0,
5957 			 &cached_state);
5958 
5959 	if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags)) {
5960 		ret = btrfs_mark_extent_written(trans, inode,
5961 						ordered->file_offset,
5962 						ordered->file_offset +
5963 						ordered->len);
5964 		if (ret) {
5965 			err = ret;
5966 			goto out_unlock;
5967 		}
5968 	} else {
5969 		ret = insert_reserved_file_extent(trans, inode,
5970 						  ordered->file_offset,
5971 						  ordered->start,
5972 						  ordered->disk_len,
5973 						  ordered->len,
5974 						  ordered->len,
5975 						  0, 0, 0,
5976 						  BTRFS_FILE_EXTENT_REG);
5977 		unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
5978 				   ordered->file_offset, ordered->len);
5979 		if (ret) {
5980 			err = ret;
5981 			WARN_ON(1);
5982 			goto out_unlock;
5983 		}
5984 	}
5985 
5986 	add_pending_csums(trans, inode, ordered->file_offset, &ordered->list);
5987 	ret = btrfs_ordered_update_i_size(inode, 0, ordered);
5988 	if (!ret || !test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags))
5989 		btrfs_update_inode_fallback(trans, root, inode);
5990 	ret = 0;
5991 out_unlock:
5992 	unlock_extent_cached(&BTRFS_I(inode)->io_tree, ordered->file_offset,
5993 			     ordered->file_offset + ordered->len - 1,
5994 			     &cached_state, GFP_NOFS);
5995 out:
5996 	btrfs_delalloc_release_metadata(inode, ordered->len);
5997 	btrfs_end_transaction(trans, root);
5998 	ordered_offset = ordered->file_offset + ordered->len;
5999 	btrfs_put_ordered_extent(ordered);
6000 	btrfs_put_ordered_extent(ordered);
6001 
6002 out_test:
6003 	/*
6004 	 * our bio might span multiple ordered extents.  If we haven't
6005 	 * completed the accounting for the whole dio, go back and try again
6006 	 */
6007 	if (ordered_offset < dip->logical_offset + dip->bytes) {
6008 		ordered_bytes = dip->logical_offset + dip->bytes -
6009 			ordered_offset;
6010 		goto again;
6011 	}
6012 out_done:
6013 	bio->bi_private = dip->private;
6014 
6015 	kfree(dip->csums);
6016 	kfree(dip);
6017 
6018 	/* If we had an error make sure to clear the uptodate flag */
6019 	if (err)
6020 		clear_bit(BIO_UPTODATE, &bio->bi_flags);
6021 	dio_end_io(bio, err);
6022 }
6023 
__btrfs_submit_bio_start_direct_io(struct inode * inode,int rw,struct bio * bio,int mirror_num,unsigned long bio_flags,u64 offset)6024 static int __btrfs_submit_bio_start_direct_io(struct inode *inode, int rw,
6025 				    struct bio *bio, int mirror_num,
6026 				    unsigned long bio_flags, u64 offset)
6027 {
6028 	int ret;
6029 	struct btrfs_root *root = BTRFS_I(inode)->root;
6030 	ret = btrfs_csum_one_bio(root, inode, bio, offset, 1);
6031 	BUG_ON(ret); /* -ENOMEM */
6032 	return 0;
6033 }
6034 
btrfs_end_dio_bio(struct bio * bio,int err)6035 static void btrfs_end_dio_bio(struct bio *bio, int err)
6036 {
6037 	struct btrfs_dio_private *dip = bio->bi_private;
6038 
6039 	if (err) {
6040 		printk(KERN_ERR "btrfs direct IO failed ino %llu rw %lu "
6041 		      "sector %#Lx len %u err no %d\n",
6042 		      (unsigned long long)btrfs_ino(dip->inode), bio->bi_rw,
6043 		      (unsigned long long)bio->bi_sector, bio->bi_size, err);
6044 		dip->errors = 1;
6045 
6046 		/*
6047 		 * before atomic variable goto zero, we must make sure
6048 		 * dip->errors is perceived to be set.
6049 		 */
6050 		smp_mb__before_atomic_dec();
6051 	}
6052 
6053 	/* if there are more bios still pending for this dio, just exit */
6054 	if (!atomic_dec_and_test(&dip->pending_bios))
6055 		goto out;
6056 
6057 	if (dip->errors)
6058 		bio_io_error(dip->orig_bio);
6059 	else {
6060 		set_bit(BIO_UPTODATE, &dip->orig_bio->bi_flags);
6061 		bio_endio(dip->orig_bio, 0);
6062 	}
6063 out:
6064 	bio_put(bio);
6065 }
6066 
btrfs_dio_bio_alloc(struct block_device * bdev,u64 first_sector,gfp_t gfp_flags)6067 static struct bio *btrfs_dio_bio_alloc(struct block_device *bdev,
6068 				       u64 first_sector, gfp_t gfp_flags)
6069 {
6070 	int nr_vecs = bio_get_nr_vecs(bdev);
6071 	return btrfs_bio_alloc(bdev, first_sector, nr_vecs, gfp_flags);
6072 }
6073 
__btrfs_submit_dio_bio(struct bio * bio,struct inode * inode,int rw,u64 file_offset,int skip_sum,u32 * csums,int async_submit)6074 static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
6075 					 int rw, u64 file_offset, int skip_sum,
6076 					 u32 *csums, int async_submit)
6077 {
6078 	int write = rw & REQ_WRITE;
6079 	struct btrfs_root *root = BTRFS_I(inode)->root;
6080 	int ret;
6081 
6082 	bio_get(bio);
6083 	ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
6084 	if (ret)
6085 		goto err;
6086 
6087 	if (skip_sum)
6088 		goto map;
6089 
6090 	if (write && async_submit) {
6091 		ret = btrfs_wq_submit_bio(root->fs_info,
6092 				   inode, rw, bio, 0, 0,
6093 				   file_offset,
6094 				   __btrfs_submit_bio_start_direct_io,
6095 				   __btrfs_submit_bio_done);
6096 		goto err;
6097 	} else if (write) {
6098 		/*
6099 		 * If we aren't doing async submit, calculate the csum of the
6100 		 * bio now.
6101 		 */
6102 		ret = btrfs_csum_one_bio(root, inode, bio, file_offset, 1);
6103 		if (ret)
6104 			goto err;
6105 	} else if (!skip_sum) {
6106 		ret = btrfs_lookup_bio_sums_dio(root, inode, bio,
6107 					  file_offset, csums);
6108 		if (ret)
6109 			goto err;
6110 	}
6111 
6112 map:
6113 	ret = btrfs_map_bio(root, rw, bio, 0, async_submit);
6114 err:
6115 	bio_put(bio);
6116 	return ret;
6117 }
6118 
btrfs_submit_direct_hook(int rw,struct btrfs_dio_private * dip,int skip_sum)6119 static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
6120 				    int skip_sum)
6121 {
6122 	struct inode *inode = dip->inode;
6123 	struct btrfs_root *root = BTRFS_I(inode)->root;
6124 	struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
6125 	struct bio *bio;
6126 	struct bio *orig_bio = dip->orig_bio;
6127 	struct bio_vec *bvec = orig_bio->bi_io_vec;
6128 	u64 start_sector = orig_bio->bi_sector;
6129 	u64 file_offset = dip->logical_offset;
6130 	u64 submit_len = 0;
6131 	u64 map_length;
6132 	int nr_pages = 0;
6133 	u32 *csums = dip->csums;
6134 	int ret = 0;
6135 	int async_submit = 0;
6136 	int write = rw & REQ_WRITE;
6137 
6138 	map_length = orig_bio->bi_size;
6139 	ret = btrfs_map_block(map_tree, READ, start_sector << 9,
6140 			      &map_length, NULL, 0);
6141 	if (ret) {
6142 		bio_put(orig_bio);
6143 		return -EIO;
6144 	}
6145 
6146 	if (map_length >= orig_bio->bi_size) {
6147 		bio = orig_bio;
6148 		goto submit;
6149 	}
6150 
6151 	async_submit = 1;
6152 	bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS);
6153 	if (!bio)
6154 		return -ENOMEM;
6155 	bio->bi_private = dip;
6156 	bio->bi_end_io = btrfs_end_dio_bio;
6157 	atomic_inc(&dip->pending_bios);
6158 
6159 	while (bvec <= (orig_bio->bi_io_vec + orig_bio->bi_vcnt - 1)) {
6160 		if (unlikely(map_length < submit_len + bvec->bv_len ||
6161 		    bio_add_page(bio, bvec->bv_page, bvec->bv_len,
6162 				 bvec->bv_offset) < bvec->bv_len)) {
6163 			/*
6164 			 * inc the count before we submit the bio so
6165 			 * we know the end IO handler won't happen before
6166 			 * we inc the count. Otherwise, the dip might get freed
6167 			 * before we're done setting it up
6168 			 */
6169 			atomic_inc(&dip->pending_bios);
6170 			ret = __btrfs_submit_dio_bio(bio, inode, rw,
6171 						     file_offset, skip_sum,
6172 						     csums, async_submit);
6173 			if (ret) {
6174 				bio_put(bio);
6175 				atomic_dec(&dip->pending_bios);
6176 				goto out_err;
6177 			}
6178 
6179 			/* Write's use the ordered csums */
6180 			if (!write && !skip_sum)
6181 				csums = csums + nr_pages;
6182 			start_sector += submit_len >> 9;
6183 			file_offset += submit_len;
6184 
6185 			submit_len = 0;
6186 			nr_pages = 0;
6187 
6188 			bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev,
6189 						  start_sector, GFP_NOFS);
6190 			if (!bio)
6191 				goto out_err;
6192 			bio->bi_private = dip;
6193 			bio->bi_end_io = btrfs_end_dio_bio;
6194 
6195 			map_length = orig_bio->bi_size;
6196 			ret = btrfs_map_block(map_tree, READ, start_sector << 9,
6197 					      &map_length, NULL, 0);
6198 			if (ret) {
6199 				bio_put(bio);
6200 				goto out_err;
6201 			}
6202 		} else {
6203 			submit_len += bvec->bv_len;
6204 			nr_pages ++;
6205 			bvec++;
6206 		}
6207 	}
6208 
6209 submit:
6210 	ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum,
6211 				     csums, async_submit);
6212 	if (!ret)
6213 		return 0;
6214 
6215 	bio_put(bio);
6216 out_err:
6217 	dip->errors = 1;
6218 	/*
6219 	 * before atomic variable goto zero, we must
6220 	 * make sure dip->errors is perceived to be set.
6221 	 */
6222 	smp_mb__before_atomic_dec();
6223 	if (atomic_dec_and_test(&dip->pending_bios))
6224 		bio_io_error(dip->orig_bio);
6225 
6226 	/* bio_end_io() will handle error, so we needn't return it */
6227 	return 0;
6228 }
6229 
btrfs_submit_direct(int rw,struct bio * bio,struct inode * inode,loff_t file_offset)6230 static void btrfs_submit_direct(int rw, struct bio *bio, struct inode *inode,
6231 				loff_t file_offset)
6232 {
6233 	struct btrfs_root *root = BTRFS_I(inode)->root;
6234 	struct btrfs_dio_private *dip;
6235 	struct bio_vec *bvec = bio->bi_io_vec;
6236 	int skip_sum;
6237 	int write = rw & REQ_WRITE;
6238 	int ret = 0;
6239 
6240 	skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
6241 
6242 	dip = kmalloc(sizeof(*dip), GFP_NOFS);
6243 	if (!dip) {
6244 		ret = -ENOMEM;
6245 		goto free_ordered;
6246 	}
6247 	dip->csums = NULL;
6248 
6249 	/* Write's use the ordered csum stuff, so we don't need dip->csums */
6250 	if (!write && !skip_sum) {
6251 		dip->csums = kmalloc(sizeof(u32) * bio->bi_vcnt, GFP_NOFS);
6252 		if (!dip->csums) {
6253 			kfree(dip);
6254 			ret = -ENOMEM;
6255 			goto free_ordered;
6256 		}
6257 	}
6258 
6259 	dip->private = bio->bi_private;
6260 	dip->inode = inode;
6261 	dip->logical_offset = file_offset;
6262 
6263 	dip->bytes = 0;
6264 	do {
6265 		dip->bytes += bvec->bv_len;
6266 		bvec++;
6267 	} while (bvec <= (bio->bi_io_vec + bio->bi_vcnt - 1));
6268 
6269 	dip->disk_bytenr = (u64)bio->bi_sector << 9;
6270 	bio->bi_private = dip;
6271 	dip->errors = 0;
6272 	dip->orig_bio = bio;
6273 	atomic_set(&dip->pending_bios, 0);
6274 
6275 	if (write)
6276 		bio->bi_end_io = btrfs_endio_direct_write;
6277 	else
6278 		bio->bi_end_io = btrfs_endio_direct_read;
6279 
6280 	ret = btrfs_submit_direct_hook(rw, dip, skip_sum);
6281 	if (!ret)
6282 		return;
6283 free_ordered:
6284 	/*
6285 	 * If this is a write, we need to clean up the reserved space and kill
6286 	 * the ordered extent.
6287 	 */
6288 	if (write) {
6289 		struct btrfs_ordered_extent *ordered;
6290 		ordered = btrfs_lookup_ordered_extent(inode, file_offset);
6291 		if (!test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags) &&
6292 		    !test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags))
6293 			btrfs_free_reserved_extent(root, ordered->start,
6294 						   ordered->disk_len);
6295 		btrfs_put_ordered_extent(ordered);
6296 		btrfs_put_ordered_extent(ordered);
6297 	}
6298 	bio_endio(bio, ret);
6299 }
6300 
check_direct_IO(struct btrfs_root * root,int rw,struct kiocb * iocb,const struct iovec * iov,loff_t offset,unsigned long nr_segs)6301 static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *iocb,
6302 			const struct iovec *iov, loff_t offset,
6303 			unsigned long nr_segs)
6304 {
6305 	int seg;
6306 	int i;
6307 	size_t size;
6308 	unsigned long addr;
6309 	unsigned blocksize_mask = root->sectorsize - 1;
6310 	ssize_t retval = -EINVAL;
6311 	loff_t end = offset;
6312 
6313 	if (offset & blocksize_mask)
6314 		goto out;
6315 
6316 	/* Check the memory alignment.  Blocks cannot straddle pages */
6317 	for (seg = 0; seg < nr_segs; seg++) {
6318 		addr = (unsigned long)iov[seg].iov_base;
6319 		size = iov[seg].iov_len;
6320 		end += size;
6321 		if ((addr & blocksize_mask) || (size & blocksize_mask))
6322 			goto out;
6323 
6324 		/* If this is a write we don't need to check anymore */
6325 		if (rw & WRITE)
6326 			continue;
6327 
6328 		/*
6329 		 * Check to make sure we don't have duplicate iov_base's in this
6330 		 * iovec, if so return EINVAL, otherwise we'll get csum errors
6331 		 * when reading back.
6332 		 */
6333 		for (i = seg + 1; i < nr_segs; i++) {
6334 			if (iov[seg].iov_base == iov[i].iov_base)
6335 				goto out;
6336 		}
6337 	}
6338 	retval = 0;
6339 out:
6340 	return retval;
6341 }
btrfs_direct_IO(int rw,struct kiocb * iocb,const struct iovec * iov,loff_t offset,unsigned long nr_segs)6342 static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
6343 			const struct iovec *iov, loff_t offset,
6344 			unsigned long nr_segs)
6345 {
6346 	struct file *file = iocb->ki_filp;
6347 	struct inode *inode = file->f_mapping->host;
6348 	struct btrfs_ordered_extent *ordered;
6349 	struct extent_state *cached_state = NULL;
6350 	u64 lockstart, lockend;
6351 	ssize_t ret;
6352 	int writing = rw & WRITE;
6353 	int write_bits = 0;
6354 	size_t count = iov_length(iov, nr_segs);
6355 
6356 	if (check_direct_IO(BTRFS_I(inode)->root, rw, iocb, iov,
6357 			    offset, nr_segs)) {
6358 		return 0;
6359 	}
6360 
6361 	lockstart = offset;
6362 	lockend = offset + count - 1;
6363 
6364 	if (writing) {
6365 		ret = btrfs_delalloc_reserve_space(inode, count);
6366 		if (ret)
6367 			goto out;
6368 	}
6369 
6370 	while (1) {
6371 		lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
6372 				 0, &cached_state);
6373 		/*
6374 		 * We're concerned with the entire range that we're going to be
6375 		 * doing DIO to, so we need to make sure theres no ordered
6376 		 * extents in this range.
6377 		 */
6378 		ordered = btrfs_lookup_ordered_range(inode, lockstart,
6379 						     lockend - lockstart + 1);
6380 		if (!ordered)
6381 			break;
6382 		unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
6383 				     &cached_state, GFP_NOFS);
6384 		btrfs_start_ordered_extent(inode, ordered, 1);
6385 		btrfs_put_ordered_extent(ordered);
6386 		cond_resched();
6387 	}
6388 
6389 	/*
6390 	 * we don't use btrfs_set_extent_delalloc because we don't want
6391 	 * the dirty or uptodate bits
6392 	 */
6393 	if (writing) {
6394 		write_bits = EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING;
6395 		ret = set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend,
6396 				     EXTENT_DELALLOC, NULL, &cached_state,
6397 				     GFP_NOFS);
6398 		if (ret) {
6399 			clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
6400 					 lockend, EXTENT_LOCKED | write_bits,
6401 					 1, 0, &cached_state, GFP_NOFS);
6402 			goto out;
6403 		}
6404 	}
6405 
6406 	free_extent_state(cached_state);
6407 	cached_state = NULL;
6408 
6409 	ret = __blockdev_direct_IO(rw, iocb, inode,
6410 		   BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev,
6411 		   iov, offset, nr_segs, btrfs_get_blocks_direct, NULL,
6412 		   btrfs_submit_direct, 0);
6413 
6414 	if (ret < 0 && ret != -EIOCBQUEUED) {
6415 		clear_extent_bit(&BTRFS_I(inode)->io_tree, offset,
6416 			      offset + iov_length(iov, nr_segs) - 1,
6417 			      EXTENT_LOCKED | write_bits, 1, 0,
6418 			      &cached_state, GFP_NOFS);
6419 	} else if (ret >= 0 && ret < iov_length(iov, nr_segs)) {
6420 		/*
6421 		 * We're falling back to buffered, unlock the section we didn't
6422 		 * do IO on.
6423 		 */
6424 		clear_extent_bit(&BTRFS_I(inode)->io_tree, offset + ret,
6425 			      offset + iov_length(iov, nr_segs) - 1,
6426 			      EXTENT_LOCKED | write_bits, 1, 0,
6427 			      &cached_state, GFP_NOFS);
6428 	}
6429 out:
6430 	free_extent_state(cached_state);
6431 	return ret;
6432 }
6433 
btrfs_fiemap(struct inode * inode,struct fiemap_extent_info * fieinfo,__u64 start,__u64 len)6434 static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
6435 		__u64 start, __u64 len)
6436 {
6437 	return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent_fiemap);
6438 }
6439 
btrfs_readpage(struct file * file,struct page * page)6440 int btrfs_readpage(struct file *file, struct page *page)
6441 {
6442 	struct extent_io_tree *tree;
6443 	tree = &BTRFS_I(page->mapping->host)->io_tree;
6444 	return extent_read_full_page(tree, page, btrfs_get_extent, 0);
6445 }
6446 
btrfs_writepage(struct page * page,struct writeback_control * wbc)6447 static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
6448 {
6449 	struct extent_io_tree *tree;
6450 
6451 
6452 	if (current->flags & PF_MEMALLOC) {
6453 		redirty_page_for_writepage(wbc, page);
6454 		unlock_page(page);
6455 		return 0;
6456 	}
6457 	tree = &BTRFS_I(page->mapping->host)->io_tree;
6458 	return extent_write_full_page(tree, page, btrfs_get_extent, wbc);
6459 }
6460 
btrfs_writepages(struct address_space * mapping,struct writeback_control * wbc)6461 int btrfs_writepages(struct address_space *mapping,
6462 		     struct writeback_control *wbc)
6463 {
6464 	struct extent_io_tree *tree;
6465 
6466 	tree = &BTRFS_I(mapping->host)->io_tree;
6467 	return extent_writepages(tree, mapping, btrfs_get_extent, wbc);
6468 }
6469 
6470 static int
btrfs_readpages(struct file * file,struct address_space * mapping,struct list_head * pages,unsigned nr_pages)6471 btrfs_readpages(struct file *file, struct address_space *mapping,
6472 		struct list_head *pages, unsigned nr_pages)
6473 {
6474 	struct extent_io_tree *tree;
6475 	tree = &BTRFS_I(mapping->host)->io_tree;
6476 	return extent_readpages(tree, mapping, pages, nr_pages,
6477 				btrfs_get_extent);
6478 }
__btrfs_releasepage(struct page * page,gfp_t gfp_flags)6479 static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
6480 {
6481 	struct extent_io_tree *tree;
6482 	struct extent_map_tree *map;
6483 	int ret;
6484 
6485 	tree = &BTRFS_I(page->mapping->host)->io_tree;
6486 	map = &BTRFS_I(page->mapping->host)->extent_tree;
6487 	ret = try_release_extent_mapping(map, tree, page, gfp_flags);
6488 	if (ret == 1) {
6489 		ClearPagePrivate(page);
6490 		set_page_private(page, 0);
6491 		page_cache_release(page);
6492 	}
6493 	return ret;
6494 }
6495 
btrfs_releasepage(struct page * page,gfp_t gfp_flags)6496 static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
6497 {
6498 	if (PageWriteback(page) || PageDirty(page))
6499 		return 0;
6500 	return __btrfs_releasepage(page, gfp_flags & GFP_NOFS);
6501 }
6502 
btrfs_invalidatepage(struct page * page,unsigned long offset)6503 static void btrfs_invalidatepage(struct page *page, unsigned long offset)
6504 {
6505 	struct extent_io_tree *tree;
6506 	struct btrfs_ordered_extent *ordered;
6507 	struct extent_state *cached_state = NULL;
6508 	u64 page_start = page_offset(page);
6509 	u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
6510 
6511 
6512 	/*
6513 	 * we have the page locked, so new writeback can't start,
6514 	 * and the dirty bit won't be cleared while we are here.
6515 	 *
6516 	 * Wait for IO on this page so that we can safely clear
6517 	 * the PagePrivate2 bit and do ordered accounting
6518 	 */
6519 	wait_on_page_writeback(page);
6520 
6521 	tree = &BTRFS_I(page->mapping->host)->io_tree;
6522 	if (offset) {
6523 		btrfs_releasepage(page, GFP_NOFS);
6524 		return;
6525 	}
6526 	lock_extent_bits(tree, page_start, page_end, 0, &cached_state);
6527 	ordered = btrfs_lookup_ordered_extent(page->mapping->host,
6528 					   page_offset(page));
6529 	if (ordered) {
6530 		/*
6531 		 * IO on this page will never be started, so we need
6532 		 * to account for any ordered extents now
6533 		 */
6534 		clear_extent_bit(tree, page_start, page_end,
6535 				 EXTENT_DIRTY | EXTENT_DELALLOC |
6536 				 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING, 1, 0,
6537 				 &cached_state, GFP_NOFS);
6538 		/*
6539 		 * whoever cleared the private bit is responsible
6540 		 * for the finish_ordered_io
6541 		 */
6542 		if (TestClearPagePrivate2(page)) {
6543 			btrfs_finish_ordered_io(page->mapping->host,
6544 						page_start, page_end);
6545 		}
6546 		btrfs_put_ordered_extent(ordered);
6547 		cached_state = NULL;
6548 		lock_extent_bits(tree, page_start, page_end, 0, &cached_state);
6549 	}
6550 	clear_extent_bit(tree, page_start, page_end,
6551 		 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
6552 		 EXTENT_DO_ACCOUNTING, 1, 1, &cached_state, GFP_NOFS);
6553 	__btrfs_releasepage(page, GFP_NOFS);
6554 
6555 	ClearPageChecked(page);
6556 	if (PagePrivate(page)) {
6557 		ClearPagePrivate(page);
6558 		set_page_private(page, 0);
6559 		page_cache_release(page);
6560 	}
6561 }
6562 
6563 /*
6564  * btrfs_page_mkwrite() is not allowed to change the file size as it gets
6565  * called from a page fault handler when a page is first dirtied. Hence we must
6566  * be careful to check for EOF conditions here. We set the page up correctly
6567  * for a written page which means we get ENOSPC checking when writing into
6568  * holes and correct delalloc and unwritten extent mapping on filesystems that
6569  * support these features.
6570  *
6571  * We are not allowed to take the i_mutex here so we have to play games to
6572  * protect against truncate races as the page could now be beyond EOF.  Because
6573  * vmtruncate() writes the inode size before removing pages, once we have the
6574  * page lock we can determine safely if the page is beyond EOF. If it is not
6575  * beyond EOF, then the page is guaranteed safe against truncation until we
6576  * unlock the page.
6577  */
btrfs_page_mkwrite(struct vm_area_struct * vma,struct vm_fault * vmf)6578 int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
6579 {
6580 	struct page *page = vmf->page;
6581 	struct inode *inode = fdentry(vma->vm_file)->d_inode;
6582 	struct btrfs_root *root = BTRFS_I(inode)->root;
6583 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
6584 	struct btrfs_ordered_extent *ordered;
6585 	struct extent_state *cached_state = NULL;
6586 	char *kaddr;
6587 	unsigned long zero_start;
6588 	loff_t size;
6589 	int ret;
6590 	int reserved = 0;
6591 	u64 page_start;
6592 	u64 page_end;
6593 
6594 	ret  = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
6595 	if (!ret) {
6596 		ret = btrfs_update_time(vma->vm_file);
6597 		reserved = 1;
6598 	}
6599 	if (ret) {
6600 		if (ret == -ENOMEM)
6601 			ret = VM_FAULT_OOM;
6602 		else /* -ENOSPC, -EIO, etc */
6603 			ret = VM_FAULT_SIGBUS;
6604 		if (reserved)
6605 			goto out;
6606 		goto out_noreserve;
6607 	}
6608 
6609 	ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
6610 again:
6611 	lock_page(page);
6612 	size = i_size_read(inode);
6613 	page_start = page_offset(page);
6614 	page_end = page_start + PAGE_CACHE_SIZE - 1;
6615 
6616 	if ((page->mapping != inode->i_mapping) ||
6617 	    (page_start >= size)) {
6618 		/* page got truncated out from underneath us */
6619 		goto out_unlock;
6620 	}
6621 	wait_on_page_writeback(page);
6622 
6623 	lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state);
6624 	set_page_extent_mapped(page);
6625 
6626 	/*
6627 	 * we can't set the delalloc bits if there are pending ordered
6628 	 * extents.  Drop our locks and wait for them to finish
6629 	 */
6630 	ordered = btrfs_lookup_ordered_extent(inode, page_start);
6631 	if (ordered) {
6632 		unlock_extent_cached(io_tree, page_start, page_end,
6633 				     &cached_state, GFP_NOFS);
6634 		unlock_page(page);
6635 		btrfs_start_ordered_extent(inode, ordered, 1);
6636 		btrfs_put_ordered_extent(ordered);
6637 		goto again;
6638 	}
6639 
6640 	/*
6641 	 * XXX - page_mkwrite gets called every time the page is dirtied, even
6642 	 * if it was already dirty, so for space accounting reasons we need to
6643 	 * clear any delalloc bits for the range we are fixing to save.  There
6644 	 * is probably a better way to do this, but for now keep consistent with
6645 	 * prepare_pages in the normal write path.
6646 	 */
6647 	clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
6648 			  EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING,
6649 			  0, 0, &cached_state, GFP_NOFS);
6650 
6651 	ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
6652 					&cached_state);
6653 	if (ret) {
6654 		unlock_extent_cached(io_tree, page_start, page_end,
6655 				     &cached_state, GFP_NOFS);
6656 		ret = VM_FAULT_SIGBUS;
6657 		goto out_unlock;
6658 	}
6659 	ret = 0;
6660 
6661 	/* page is wholly or partially inside EOF */
6662 	if (page_start + PAGE_CACHE_SIZE > size)
6663 		zero_start = size & ~PAGE_CACHE_MASK;
6664 	else
6665 		zero_start = PAGE_CACHE_SIZE;
6666 
6667 	if (zero_start != PAGE_CACHE_SIZE) {
6668 		kaddr = kmap(page);
6669 		memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start);
6670 		flush_dcache_page(page);
6671 		kunmap(page);
6672 	}
6673 	ClearPageChecked(page);
6674 	set_page_dirty(page);
6675 	SetPageUptodate(page);
6676 
6677 	BTRFS_I(inode)->last_trans = root->fs_info->generation;
6678 	BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid;
6679 
6680 	unlock_extent_cached(io_tree, page_start, page_end, &cached_state, GFP_NOFS);
6681 
6682 out_unlock:
6683 	if (!ret)
6684 		return VM_FAULT_LOCKED;
6685 	unlock_page(page);
6686 out:
6687 	btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
6688 out_noreserve:
6689 	return ret;
6690 }
6691 
btrfs_truncate(struct inode * inode)6692 static int btrfs_truncate(struct inode *inode)
6693 {
6694 	struct btrfs_root *root = BTRFS_I(inode)->root;
6695 	struct btrfs_block_rsv *rsv;
6696 	int ret;
6697 	int err = 0;
6698 	struct btrfs_trans_handle *trans;
6699 	unsigned long nr;
6700 	u64 mask = root->sectorsize - 1;
6701 	u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
6702 
6703 	ret = btrfs_truncate_page(inode->i_mapping, inode->i_size);
6704 	if (ret)
6705 		return ret;
6706 
6707 	btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1);
6708 	btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
6709 
6710 	/*
6711 	 * Yes ladies and gentelment, this is indeed ugly.  The fact is we have
6712 	 * 3 things going on here
6713 	 *
6714 	 * 1) We need to reserve space for our orphan item and the space to
6715 	 * delete our orphan item.  Lord knows we don't want to have a dangling
6716 	 * orphan item because we didn't reserve space to remove it.
6717 	 *
6718 	 * 2) We need to reserve space to update our inode.
6719 	 *
6720 	 * 3) We need to have something to cache all the space that is going to
6721 	 * be free'd up by the truncate operation, but also have some slack
6722 	 * space reserved in case it uses space during the truncate (thank you
6723 	 * very much snapshotting).
6724 	 *
6725 	 * And we need these to all be seperate.  The fact is we can use alot of
6726 	 * space doing the truncate, and we have no earthly idea how much space
6727 	 * we will use, so we need the truncate reservation to be seperate so it
6728 	 * doesn't end up using space reserved for updating the inode or
6729 	 * removing the orphan item.  We also need to be able to stop the
6730 	 * transaction and start a new one, which means we need to be able to
6731 	 * update the inode several times, and we have no idea of knowing how
6732 	 * many times that will be, so we can't just reserve 1 item for the
6733 	 * entirety of the opration, so that has to be done seperately as well.
6734 	 * Then there is the orphan item, which does indeed need to be held on
6735 	 * to for the whole operation, and we need nobody to touch this reserved
6736 	 * space except the orphan code.
6737 	 *
6738 	 * So that leaves us with
6739 	 *
6740 	 * 1) root->orphan_block_rsv - for the orphan deletion.
6741 	 * 2) rsv - for the truncate reservation, which we will steal from the
6742 	 * transaction reservation.
6743 	 * 3) fs_info->trans_block_rsv - this will have 1 items worth left for
6744 	 * updating the inode.
6745 	 */
6746 	rsv = btrfs_alloc_block_rsv(root);
6747 	if (!rsv)
6748 		return -ENOMEM;
6749 	rsv->size = min_size;
6750 
6751 	/*
6752 	 * 1 for the truncate slack space
6753 	 * 1 for the orphan item we're going to add
6754 	 * 1 for the orphan item deletion
6755 	 * 1 for updating the inode.
6756 	 */
6757 	trans = btrfs_start_transaction(root, 4);
6758 	if (IS_ERR(trans)) {
6759 		err = PTR_ERR(trans);
6760 		goto out;
6761 	}
6762 
6763 	/* Migrate the slack space for the truncate to our reserve */
6764 	ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv, rsv,
6765 				      min_size);
6766 	BUG_ON(ret);
6767 
6768 	ret = btrfs_orphan_add(trans, inode);
6769 	if (ret) {
6770 		btrfs_end_transaction(trans, root);
6771 		goto out;
6772 	}
6773 
6774 	/*
6775 	 * setattr is responsible for setting the ordered_data_close flag,
6776 	 * but that is only tested during the last file release.  That
6777 	 * could happen well after the next commit, leaving a great big
6778 	 * window where new writes may get lost if someone chooses to write
6779 	 * to this file after truncating to zero
6780 	 *
6781 	 * The inode doesn't have any dirty data here, and so if we commit
6782 	 * this is a noop.  If someone immediately starts writing to the inode
6783 	 * it is very likely we'll catch some of their writes in this
6784 	 * transaction, and the commit will find this file on the ordered
6785 	 * data list with good things to send down.
6786 	 *
6787 	 * This is a best effort solution, there is still a window where
6788 	 * using truncate to replace the contents of the file will
6789 	 * end up with a zero length file after a crash.
6790 	 */
6791 	if (inode->i_size == 0 && BTRFS_I(inode)->ordered_data_close)
6792 		btrfs_add_ordered_operation(trans, root, inode);
6793 
6794 	while (1) {
6795 		ret = btrfs_block_rsv_refill(root, rsv, min_size);
6796 		if (ret) {
6797 			/*
6798 			 * This can only happen with the original transaction we
6799 			 * started above, every other time we shouldn't have a
6800 			 * transaction started yet.
6801 			 */
6802 			if (ret == -EAGAIN)
6803 				goto end_trans;
6804 			err = ret;
6805 			break;
6806 		}
6807 
6808 		if (!trans) {
6809 			/* Just need the 1 for updating the inode */
6810 			trans = btrfs_start_transaction(root, 1);
6811 			if (IS_ERR(trans)) {
6812 				ret = err = PTR_ERR(trans);
6813 				trans = NULL;
6814 				break;
6815 			}
6816 		}
6817 
6818 		trans->block_rsv = rsv;
6819 
6820 		ret = btrfs_truncate_inode_items(trans, root, inode,
6821 						 inode->i_size,
6822 						 BTRFS_EXTENT_DATA_KEY);
6823 		if (ret != -EAGAIN) {
6824 			err = ret;
6825 			break;
6826 		}
6827 
6828 		trans->block_rsv = &root->fs_info->trans_block_rsv;
6829 		ret = btrfs_update_inode(trans, root, inode);
6830 		if (ret) {
6831 			err = ret;
6832 			break;
6833 		}
6834 end_trans:
6835 		nr = trans->blocks_used;
6836 		btrfs_end_transaction(trans, root);
6837 		trans = NULL;
6838 		btrfs_btree_balance_dirty(root, nr);
6839 	}
6840 
6841 	if (ret == 0 && inode->i_nlink > 0) {
6842 		trans->block_rsv = root->orphan_block_rsv;
6843 		ret = btrfs_orphan_del(trans, inode);
6844 		if (ret)
6845 			err = ret;
6846 	} else if (ret && inode->i_nlink > 0) {
6847 		/*
6848 		 * Failed to do the truncate, remove us from the in memory
6849 		 * orphan list.
6850 		 */
6851 		ret = btrfs_orphan_del(NULL, inode);
6852 	}
6853 
6854 	if (trans) {
6855 		trans->block_rsv = &root->fs_info->trans_block_rsv;
6856 		ret = btrfs_update_inode(trans, root, inode);
6857 		if (ret && !err)
6858 			err = ret;
6859 
6860 		nr = trans->blocks_used;
6861 		ret = btrfs_end_transaction(trans, root);
6862 		btrfs_btree_balance_dirty(root, nr);
6863 	}
6864 
6865 out:
6866 	btrfs_free_block_rsv(root, rsv);
6867 
6868 	if (ret && !err)
6869 		err = ret;
6870 
6871 	return err;
6872 }
6873 
6874 /*
6875  * create a new subvolume directory/inode (helper for the ioctl).
6876  */
btrfs_create_subvol_root(struct btrfs_trans_handle * trans,struct btrfs_root * new_root,u64 new_dirid)6877 int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
6878 			     struct btrfs_root *new_root, u64 new_dirid)
6879 {
6880 	struct inode *inode;
6881 	int err;
6882 	u64 index = 0;
6883 
6884 	inode = btrfs_new_inode(trans, new_root, NULL, "..", 2,
6885 				new_dirid, new_dirid,
6886 				S_IFDIR | (~current_umask() & S_IRWXUGO),
6887 				&index);
6888 	if (IS_ERR(inode))
6889 		return PTR_ERR(inode);
6890 	inode->i_op = &btrfs_dir_inode_operations;
6891 	inode->i_fop = &btrfs_dir_file_operations;
6892 
6893 	set_nlink(inode, 1);
6894 	btrfs_i_size_write(inode, 0);
6895 
6896 	err = btrfs_update_inode(trans, new_root, inode);
6897 
6898 	iput(inode);
6899 	return err;
6900 }
6901 
btrfs_alloc_inode(struct super_block * sb)6902 struct inode *btrfs_alloc_inode(struct super_block *sb)
6903 {
6904 	struct btrfs_inode *ei;
6905 	struct inode *inode;
6906 
6907 	ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS);
6908 	if (!ei)
6909 		return NULL;
6910 
6911 	ei->root = NULL;
6912 	ei->space_info = NULL;
6913 	ei->generation = 0;
6914 	ei->sequence = 0;
6915 	ei->last_trans = 0;
6916 	ei->last_sub_trans = 0;
6917 	ei->logged_trans = 0;
6918 	ei->delalloc_bytes = 0;
6919 	ei->disk_i_size = 0;
6920 	ei->flags = 0;
6921 	ei->csum_bytes = 0;
6922 	ei->index_cnt = (u64)-1;
6923 	ei->last_unlink_trans = 0;
6924 
6925 	spin_lock_init(&ei->lock);
6926 	ei->outstanding_extents = 0;
6927 	ei->reserved_extents = 0;
6928 
6929 	ei->ordered_data_close = 0;
6930 	ei->orphan_meta_reserved = 0;
6931 	ei->dummy_inode = 0;
6932 	ei->in_defrag = 0;
6933 	ei->delalloc_meta_reserved = 0;
6934 	ei->force_compress = BTRFS_COMPRESS_NONE;
6935 
6936 	ei->delayed_node = NULL;
6937 
6938 	inode = &ei->vfs_inode;
6939 	extent_map_tree_init(&ei->extent_tree);
6940 	extent_io_tree_init(&ei->io_tree, &inode->i_data);
6941 	extent_io_tree_init(&ei->io_failure_tree, &inode->i_data);
6942 	ei->io_tree.track_uptodate = 1;
6943 	ei->io_failure_tree.track_uptodate = 1;
6944 	mutex_init(&ei->log_mutex);
6945 	mutex_init(&ei->delalloc_mutex);
6946 	btrfs_ordered_inode_tree_init(&ei->ordered_tree);
6947 	INIT_LIST_HEAD(&ei->i_orphan);
6948 	INIT_LIST_HEAD(&ei->delalloc_inodes);
6949 	INIT_LIST_HEAD(&ei->ordered_operations);
6950 	RB_CLEAR_NODE(&ei->rb_node);
6951 
6952 	return inode;
6953 }
6954 
btrfs_i_callback(struct rcu_head * head)6955 static void btrfs_i_callback(struct rcu_head *head)
6956 {
6957 	struct inode *inode = container_of(head, struct inode, i_rcu);
6958 	kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
6959 }
6960 
btrfs_destroy_inode(struct inode * inode)6961 void btrfs_destroy_inode(struct inode *inode)
6962 {
6963 	struct btrfs_ordered_extent *ordered;
6964 	struct btrfs_root *root = BTRFS_I(inode)->root;
6965 
6966 	WARN_ON(!list_empty(&inode->i_dentry));
6967 	WARN_ON(inode->i_data.nrpages);
6968 	WARN_ON(BTRFS_I(inode)->outstanding_extents);
6969 	WARN_ON(BTRFS_I(inode)->reserved_extents);
6970 	WARN_ON(BTRFS_I(inode)->delalloc_bytes);
6971 	WARN_ON(BTRFS_I(inode)->csum_bytes);
6972 
6973 	/*
6974 	 * This can happen where we create an inode, but somebody else also
6975 	 * created the same inode and we need to destroy the one we already
6976 	 * created.
6977 	 */
6978 	if (!root)
6979 		goto free;
6980 
6981 	/*
6982 	 * Make sure we're properly removed from the ordered operation
6983 	 * lists.
6984 	 */
6985 	smp_mb();
6986 	if (!list_empty(&BTRFS_I(inode)->ordered_operations)) {
6987 		spin_lock(&root->fs_info->ordered_extent_lock);
6988 		list_del_init(&BTRFS_I(inode)->ordered_operations);
6989 		spin_unlock(&root->fs_info->ordered_extent_lock);
6990 	}
6991 
6992 	spin_lock(&root->orphan_lock);
6993 	if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
6994 		printk(KERN_INFO "BTRFS: inode %llu still on the orphan list\n",
6995 		       (unsigned long long)btrfs_ino(inode));
6996 		list_del_init(&BTRFS_I(inode)->i_orphan);
6997 	}
6998 	spin_unlock(&root->orphan_lock);
6999 
7000 	while (1) {
7001 		ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
7002 		if (!ordered)
7003 			break;
7004 		else {
7005 			printk(KERN_ERR "btrfs found ordered "
7006 			       "extent %llu %llu on inode cleanup\n",
7007 			       (unsigned long long)ordered->file_offset,
7008 			       (unsigned long long)ordered->len);
7009 			btrfs_remove_ordered_extent(inode, ordered);
7010 			btrfs_put_ordered_extent(ordered);
7011 			btrfs_put_ordered_extent(ordered);
7012 		}
7013 	}
7014 	inode_tree_del(inode);
7015 	btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
7016 free:
7017 	btrfs_remove_delayed_node(inode);
7018 	call_rcu(&inode->i_rcu, btrfs_i_callback);
7019 }
7020 
btrfs_drop_inode(struct inode * inode)7021 int btrfs_drop_inode(struct inode *inode)
7022 {
7023 	struct btrfs_root *root = BTRFS_I(inode)->root;
7024 
7025 	if (btrfs_root_refs(&root->root_item) == 0 &&
7026 	    !btrfs_is_free_space_inode(root, inode))
7027 		return 1;
7028 	else
7029 		return generic_drop_inode(inode);
7030 }
7031 
init_once(void * foo)7032 static void init_once(void *foo)
7033 {
7034 	struct btrfs_inode *ei = (struct btrfs_inode *) foo;
7035 
7036 	inode_init_once(&ei->vfs_inode);
7037 }
7038 
btrfs_destroy_cachep(void)7039 void btrfs_destroy_cachep(void)
7040 {
7041 	if (btrfs_inode_cachep)
7042 		kmem_cache_destroy(btrfs_inode_cachep);
7043 	if (btrfs_trans_handle_cachep)
7044 		kmem_cache_destroy(btrfs_trans_handle_cachep);
7045 	if (btrfs_transaction_cachep)
7046 		kmem_cache_destroy(btrfs_transaction_cachep);
7047 	if (btrfs_path_cachep)
7048 		kmem_cache_destroy(btrfs_path_cachep);
7049 	if (btrfs_free_space_cachep)
7050 		kmem_cache_destroy(btrfs_free_space_cachep);
7051 }
7052 
btrfs_init_cachep(void)7053 int btrfs_init_cachep(void)
7054 {
7055 	btrfs_inode_cachep = kmem_cache_create("btrfs_inode_cache",
7056 			sizeof(struct btrfs_inode), 0,
7057 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, init_once);
7058 	if (!btrfs_inode_cachep)
7059 		goto fail;
7060 
7061 	btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle_cache",
7062 			sizeof(struct btrfs_trans_handle), 0,
7063 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
7064 	if (!btrfs_trans_handle_cachep)
7065 		goto fail;
7066 
7067 	btrfs_transaction_cachep = kmem_cache_create("btrfs_transaction_cache",
7068 			sizeof(struct btrfs_transaction), 0,
7069 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
7070 	if (!btrfs_transaction_cachep)
7071 		goto fail;
7072 
7073 	btrfs_path_cachep = kmem_cache_create("btrfs_path_cache",
7074 			sizeof(struct btrfs_path), 0,
7075 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
7076 	if (!btrfs_path_cachep)
7077 		goto fail;
7078 
7079 	btrfs_free_space_cachep = kmem_cache_create("btrfs_free_space_cache",
7080 			sizeof(struct btrfs_free_space), 0,
7081 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
7082 	if (!btrfs_free_space_cachep)
7083 		goto fail;
7084 
7085 	return 0;
7086 fail:
7087 	btrfs_destroy_cachep();
7088 	return -ENOMEM;
7089 }
7090 
btrfs_getattr(struct vfsmount * mnt,struct dentry * dentry,struct kstat * stat)7091 static int btrfs_getattr(struct vfsmount *mnt,
7092 			 struct dentry *dentry, struct kstat *stat)
7093 {
7094 	struct inode *inode = dentry->d_inode;
7095 	u32 blocksize = inode->i_sb->s_blocksize;
7096 
7097 	generic_fillattr(inode, stat);
7098 	stat->dev = BTRFS_I(inode)->root->anon_dev;
7099 	stat->blksize = PAGE_CACHE_SIZE;
7100 	stat->blocks = (ALIGN(inode_get_bytes(inode), blocksize) +
7101 		ALIGN(BTRFS_I(inode)->delalloc_bytes, blocksize)) >> 9;
7102 	return 0;
7103 }
7104 
7105 /*
7106  * If a file is moved, it will inherit the cow and compression flags of the new
7107  * directory.
7108  */
fixup_inode_flags(struct inode * dir,struct inode * inode)7109 static void fixup_inode_flags(struct inode *dir, struct inode *inode)
7110 {
7111 	struct btrfs_inode *b_dir = BTRFS_I(dir);
7112 	struct btrfs_inode *b_inode = BTRFS_I(inode);
7113 
7114 	if (b_dir->flags & BTRFS_INODE_NODATACOW)
7115 		b_inode->flags |= BTRFS_INODE_NODATACOW;
7116 	else
7117 		b_inode->flags &= ~BTRFS_INODE_NODATACOW;
7118 
7119 	if (b_dir->flags & BTRFS_INODE_COMPRESS)
7120 		b_inode->flags |= BTRFS_INODE_COMPRESS;
7121 	else
7122 		b_inode->flags &= ~BTRFS_INODE_COMPRESS;
7123 }
7124 
btrfs_rename(struct inode * old_dir,struct dentry * old_dentry,struct inode * new_dir,struct dentry * new_dentry)7125 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
7126 			   struct inode *new_dir, struct dentry *new_dentry)
7127 {
7128 	struct btrfs_trans_handle *trans;
7129 	struct btrfs_root *root = BTRFS_I(old_dir)->root;
7130 	struct btrfs_root *dest = BTRFS_I(new_dir)->root;
7131 	struct inode *new_inode = new_dentry->d_inode;
7132 	struct inode *old_inode = old_dentry->d_inode;
7133 	struct timespec ctime = CURRENT_TIME;
7134 	u64 index = 0;
7135 	u64 root_objectid;
7136 	int ret;
7137 	u64 old_ino = btrfs_ino(old_inode);
7138 
7139 	if (btrfs_ino(new_dir) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
7140 		return -EPERM;
7141 
7142 	/* we only allow rename subvolume link between subvolumes */
7143 	if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
7144 		return -EXDEV;
7145 
7146 	if (old_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID ||
7147 	    (new_inode && btrfs_ino(new_inode) == BTRFS_FIRST_FREE_OBJECTID))
7148 		return -ENOTEMPTY;
7149 
7150 	if (S_ISDIR(old_inode->i_mode) && new_inode &&
7151 	    new_inode->i_size > BTRFS_EMPTY_DIR_SIZE)
7152 		return -ENOTEMPTY;
7153 	/*
7154 	 * we're using rename to replace one file with another.
7155 	 * and the replacement file is large.  Start IO on it now so
7156 	 * we don't add too much work to the end of the transaction
7157 	 */
7158 	if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size &&
7159 	    old_inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
7160 		filemap_flush(old_inode->i_mapping);
7161 
7162 	/* close the racy window with snapshot create/destroy ioctl */
7163 	if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
7164 		down_read(&root->fs_info->subvol_sem);
7165 	/*
7166 	 * We want to reserve the absolute worst case amount of items.  So if
7167 	 * both inodes are subvols and we need to unlink them then that would
7168 	 * require 4 item modifications, but if they are both normal inodes it
7169 	 * would require 5 item modifications, so we'll assume their normal
7170 	 * inodes.  So 5 * 2 is 10, plus 1 for the new link, so 11 total items
7171 	 * should cover the worst case number of items we'll modify.
7172 	 */
7173 	trans = btrfs_start_transaction(root, 20);
7174 	if (IS_ERR(trans)) {
7175                 ret = PTR_ERR(trans);
7176                 goto out_notrans;
7177         }
7178 
7179 	if (dest != root)
7180 		btrfs_record_root_in_trans(trans, dest);
7181 
7182 	ret = btrfs_set_inode_index(new_dir, &index);
7183 	if (ret)
7184 		goto out_fail;
7185 
7186 	if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
7187 		/* force full log commit if subvolume involved. */
7188 		root->fs_info->last_trans_log_full_commit = trans->transid;
7189 	} else {
7190 		ret = btrfs_insert_inode_ref(trans, dest,
7191 					     new_dentry->d_name.name,
7192 					     new_dentry->d_name.len,
7193 					     old_ino,
7194 					     btrfs_ino(new_dir), index);
7195 		if (ret)
7196 			goto out_fail;
7197 		/*
7198 		 * this is an ugly little race, but the rename is required
7199 		 * to make sure that if we crash, the inode is either at the
7200 		 * old name or the new one.  pinning the log transaction lets
7201 		 * us make sure we don't allow a log commit to come in after
7202 		 * we unlink the name but before we add the new name back in.
7203 		 */
7204 		btrfs_pin_log_trans(root);
7205 	}
7206 	/*
7207 	 * make sure the inode gets flushed if it is replacing
7208 	 * something.
7209 	 */
7210 	if (new_inode && new_inode->i_size && S_ISREG(old_inode->i_mode))
7211 		btrfs_add_ordered_operation(trans, root, old_inode);
7212 
7213 	old_dir->i_ctime = old_dir->i_mtime = ctime;
7214 	new_dir->i_ctime = new_dir->i_mtime = ctime;
7215 	old_inode->i_ctime = ctime;
7216 
7217 	if (old_dentry->d_parent != new_dentry->d_parent)
7218 		btrfs_record_unlink_dir(trans, old_dir, old_inode, 1);
7219 
7220 	if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
7221 		root_objectid = BTRFS_I(old_inode)->root->root_key.objectid;
7222 		ret = btrfs_unlink_subvol(trans, root, old_dir, root_objectid,
7223 					old_dentry->d_name.name,
7224 					old_dentry->d_name.len);
7225 	} else {
7226 		ret = __btrfs_unlink_inode(trans, root, old_dir,
7227 					old_dentry->d_inode,
7228 					old_dentry->d_name.name,
7229 					old_dentry->d_name.len);
7230 		if (!ret)
7231 			ret = btrfs_update_inode(trans, root, old_inode);
7232 	}
7233 	if (ret) {
7234 		btrfs_abort_transaction(trans, root, ret);
7235 		goto out_fail;
7236 	}
7237 
7238 	if (new_inode) {
7239 		new_inode->i_ctime = CURRENT_TIME;
7240 		if (unlikely(btrfs_ino(new_inode) ==
7241 			     BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
7242 			root_objectid = BTRFS_I(new_inode)->location.objectid;
7243 			ret = btrfs_unlink_subvol(trans, dest, new_dir,
7244 						root_objectid,
7245 						new_dentry->d_name.name,
7246 						new_dentry->d_name.len);
7247 			BUG_ON(new_inode->i_nlink == 0);
7248 		} else {
7249 			ret = btrfs_unlink_inode(trans, dest, new_dir,
7250 						 new_dentry->d_inode,
7251 						 new_dentry->d_name.name,
7252 						 new_dentry->d_name.len);
7253 		}
7254 		if (!ret && new_inode->i_nlink == 0) {
7255 			ret = btrfs_orphan_add(trans, new_dentry->d_inode);
7256 			BUG_ON(ret);
7257 		}
7258 		if (ret) {
7259 			btrfs_abort_transaction(trans, root, ret);
7260 			goto out_fail;
7261 		}
7262 	}
7263 
7264 	fixup_inode_flags(new_dir, old_inode);
7265 
7266 	ret = btrfs_add_link(trans, new_dir, old_inode,
7267 			     new_dentry->d_name.name,
7268 			     new_dentry->d_name.len, 0, index);
7269 	if (ret) {
7270 		btrfs_abort_transaction(trans, root, ret);
7271 		goto out_fail;
7272 	}
7273 
7274 	if (old_ino != BTRFS_FIRST_FREE_OBJECTID) {
7275 		struct dentry *parent = new_dentry->d_parent;
7276 		btrfs_log_new_name(trans, old_inode, old_dir, parent);
7277 		btrfs_end_log_trans(root);
7278 	}
7279 out_fail:
7280 	btrfs_end_transaction(trans, root);
7281 out_notrans:
7282 	if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
7283 		up_read(&root->fs_info->subvol_sem);
7284 
7285 	return ret;
7286 }
7287 
7288 /*
7289  * some fairly slow code that needs optimization. This walks the list
7290  * of all the inodes with pending delalloc and forces them to disk.
7291  */
btrfs_start_delalloc_inodes(struct btrfs_root * root,int delay_iput)7292 int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
7293 {
7294 	struct list_head *head = &root->fs_info->delalloc_inodes;
7295 	struct btrfs_inode *binode;
7296 	struct inode *inode;
7297 
7298 	if (root->fs_info->sb->s_flags & MS_RDONLY)
7299 		return -EROFS;
7300 
7301 	spin_lock(&root->fs_info->delalloc_lock);
7302 	while (!list_empty(head)) {
7303 		binode = list_entry(head->next, struct btrfs_inode,
7304 				    delalloc_inodes);
7305 		inode = igrab(&binode->vfs_inode);
7306 		if (!inode)
7307 			list_del_init(&binode->delalloc_inodes);
7308 		spin_unlock(&root->fs_info->delalloc_lock);
7309 		if (inode) {
7310 			filemap_flush(inode->i_mapping);
7311 			if (delay_iput)
7312 				btrfs_add_delayed_iput(inode);
7313 			else
7314 				iput(inode);
7315 		}
7316 		cond_resched();
7317 		spin_lock(&root->fs_info->delalloc_lock);
7318 	}
7319 	spin_unlock(&root->fs_info->delalloc_lock);
7320 
7321 	/* the filemap_flush will queue IO into the worker threads, but
7322 	 * we have to make sure the IO is actually started and that
7323 	 * ordered extents get created before we return
7324 	 */
7325 	atomic_inc(&root->fs_info->async_submit_draining);
7326 	while (atomic_read(&root->fs_info->nr_async_submits) ||
7327 	      atomic_read(&root->fs_info->async_delalloc_pages)) {
7328 		wait_event(root->fs_info->async_submit_wait,
7329 		   (atomic_read(&root->fs_info->nr_async_submits) == 0 &&
7330 		    atomic_read(&root->fs_info->async_delalloc_pages) == 0));
7331 	}
7332 	atomic_dec(&root->fs_info->async_submit_draining);
7333 	return 0;
7334 }
7335 
btrfs_symlink(struct inode * dir,struct dentry * dentry,const char * symname)7336 static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
7337 			 const char *symname)
7338 {
7339 	struct btrfs_trans_handle *trans;
7340 	struct btrfs_root *root = BTRFS_I(dir)->root;
7341 	struct btrfs_path *path;
7342 	struct btrfs_key key;
7343 	struct inode *inode = NULL;
7344 	int err;
7345 	int drop_inode = 0;
7346 	u64 objectid;
7347 	u64 index = 0 ;
7348 	int name_len;
7349 	int datasize;
7350 	unsigned long ptr;
7351 	struct btrfs_file_extent_item *ei;
7352 	struct extent_buffer *leaf;
7353 	unsigned long nr = 0;
7354 
7355 	name_len = strlen(symname) + 1;
7356 	if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root))
7357 		return -ENAMETOOLONG;
7358 
7359 	/*
7360 	 * 2 items for inode item and ref
7361 	 * 2 items for dir items
7362 	 * 1 item for xattr if selinux is on
7363 	 */
7364 	trans = btrfs_start_transaction(root, 5);
7365 	if (IS_ERR(trans))
7366 		return PTR_ERR(trans);
7367 
7368 	err = btrfs_find_free_ino(root, &objectid);
7369 	if (err)
7370 		goto out_unlock;
7371 
7372 	inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
7373 				dentry->d_name.len, btrfs_ino(dir), objectid,
7374 				S_IFLNK|S_IRWXUGO, &index);
7375 	if (IS_ERR(inode)) {
7376 		err = PTR_ERR(inode);
7377 		goto out_unlock;
7378 	}
7379 
7380 	err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
7381 	if (err) {
7382 		drop_inode = 1;
7383 		goto out_unlock;
7384 	}
7385 
7386 	/*
7387 	* If the active LSM wants to access the inode during
7388 	* d_instantiate it needs these. Smack checks to see
7389 	* if the filesystem supports xattrs by looking at the
7390 	* ops vector.
7391 	*/
7392 	inode->i_fop = &btrfs_file_operations;
7393 	inode->i_op = &btrfs_file_inode_operations;
7394 
7395 	err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
7396 	if (err)
7397 		drop_inode = 1;
7398 	else {
7399 		inode->i_mapping->a_ops = &btrfs_aops;
7400 		inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
7401 		BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
7402 	}
7403 	if (drop_inode)
7404 		goto out_unlock;
7405 
7406 	path = btrfs_alloc_path();
7407 	if (!path) {
7408 		err = -ENOMEM;
7409 		drop_inode = 1;
7410 		goto out_unlock;
7411 	}
7412 	key.objectid = btrfs_ino(inode);
7413 	key.offset = 0;
7414 	btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
7415 	datasize = btrfs_file_extent_calc_inline_size(name_len);
7416 	err = btrfs_insert_empty_item(trans, root, path, &key,
7417 				      datasize);
7418 	if (err) {
7419 		drop_inode = 1;
7420 		btrfs_free_path(path);
7421 		goto out_unlock;
7422 	}
7423 	leaf = path->nodes[0];
7424 	ei = btrfs_item_ptr(leaf, path->slots[0],
7425 			    struct btrfs_file_extent_item);
7426 	btrfs_set_file_extent_generation(leaf, ei, trans->transid);
7427 	btrfs_set_file_extent_type(leaf, ei,
7428 				   BTRFS_FILE_EXTENT_INLINE);
7429 	btrfs_set_file_extent_encryption(leaf, ei, 0);
7430 	btrfs_set_file_extent_compression(leaf, ei, 0);
7431 	btrfs_set_file_extent_other_encoding(leaf, ei, 0);
7432 	btrfs_set_file_extent_ram_bytes(leaf, ei, name_len);
7433 
7434 	ptr = btrfs_file_extent_inline_start(ei);
7435 	write_extent_buffer(leaf, symname, ptr, name_len);
7436 	btrfs_mark_buffer_dirty(leaf);
7437 	btrfs_free_path(path);
7438 
7439 	inode->i_op = &btrfs_symlink_inode_operations;
7440 	inode->i_mapping->a_ops = &btrfs_symlink_aops;
7441 	inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
7442 	inode_set_bytes(inode, name_len);
7443 	btrfs_i_size_write(inode, name_len - 1);
7444 	err = btrfs_update_inode(trans, root, inode);
7445 	if (err)
7446 		drop_inode = 1;
7447 
7448 out_unlock:
7449 	if (!err)
7450 		d_instantiate(dentry, inode);
7451 	nr = trans->blocks_used;
7452 	btrfs_end_transaction(trans, root);
7453 	if (drop_inode) {
7454 		inode_dec_link_count(inode);
7455 		iput(inode);
7456 	}
7457 	btrfs_btree_balance_dirty(root, nr);
7458 	return err;
7459 }
7460 
__btrfs_prealloc_file_range(struct inode * inode,int mode,u64 start,u64 num_bytes,u64 min_size,loff_t actual_len,u64 * alloc_hint,struct btrfs_trans_handle * trans)7461 static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
7462 				       u64 start, u64 num_bytes, u64 min_size,
7463 				       loff_t actual_len, u64 *alloc_hint,
7464 				       struct btrfs_trans_handle *trans)
7465 {
7466 	struct btrfs_root *root = BTRFS_I(inode)->root;
7467 	struct btrfs_key ins;
7468 	u64 cur_offset = start;
7469 	u64 i_size;
7470 	int ret = 0;
7471 	bool own_trans = true;
7472 
7473 	if (trans)
7474 		own_trans = false;
7475 	while (num_bytes > 0) {
7476 		if (own_trans) {
7477 			trans = btrfs_start_transaction(root, 3);
7478 			if (IS_ERR(trans)) {
7479 				ret = PTR_ERR(trans);
7480 				break;
7481 			}
7482 		}
7483 
7484 		ret = btrfs_reserve_extent(trans, root, num_bytes, min_size,
7485 					   0, *alloc_hint, &ins, 1);
7486 		if (ret) {
7487 			if (own_trans)
7488 				btrfs_end_transaction(trans, root);
7489 			break;
7490 		}
7491 
7492 		ret = insert_reserved_file_extent(trans, inode,
7493 						  cur_offset, ins.objectid,
7494 						  ins.offset, ins.offset,
7495 						  ins.offset, 0, 0, 0,
7496 						  BTRFS_FILE_EXTENT_PREALLOC);
7497 		if (ret) {
7498 			btrfs_abort_transaction(trans, root, ret);
7499 			if (own_trans)
7500 				btrfs_end_transaction(trans, root);
7501 			break;
7502 		}
7503 		btrfs_drop_extent_cache(inode, cur_offset,
7504 					cur_offset + ins.offset -1, 0);
7505 
7506 		num_bytes -= ins.offset;
7507 		cur_offset += ins.offset;
7508 		*alloc_hint = ins.objectid + ins.offset;
7509 
7510 		inode->i_ctime = CURRENT_TIME;
7511 		BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
7512 		if (!(mode & FALLOC_FL_KEEP_SIZE) &&
7513 		    (actual_len > inode->i_size) &&
7514 		    (cur_offset > inode->i_size)) {
7515 			if (cur_offset > actual_len)
7516 				i_size = actual_len;
7517 			else
7518 				i_size = cur_offset;
7519 			i_size_write(inode, i_size);
7520 			btrfs_ordered_update_i_size(inode, i_size, NULL);
7521 		}
7522 
7523 		ret = btrfs_update_inode(trans, root, inode);
7524 
7525 		if (ret) {
7526 			btrfs_abort_transaction(trans, root, ret);
7527 			if (own_trans)
7528 				btrfs_end_transaction(trans, root);
7529 			break;
7530 		}
7531 
7532 		if (own_trans)
7533 			btrfs_end_transaction(trans, root);
7534 	}
7535 	return ret;
7536 }
7537 
btrfs_prealloc_file_range(struct inode * inode,int mode,u64 start,u64 num_bytes,u64 min_size,loff_t actual_len,u64 * alloc_hint)7538 int btrfs_prealloc_file_range(struct inode *inode, int mode,
7539 			      u64 start, u64 num_bytes, u64 min_size,
7540 			      loff_t actual_len, u64 *alloc_hint)
7541 {
7542 	return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
7543 					   min_size, actual_len, alloc_hint,
7544 					   NULL);
7545 }
7546 
btrfs_prealloc_file_range_trans(struct inode * inode,struct btrfs_trans_handle * trans,int mode,u64 start,u64 num_bytes,u64 min_size,loff_t actual_len,u64 * alloc_hint)7547 int btrfs_prealloc_file_range_trans(struct inode *inode,
7548 				    struct btrfs_trans_handle *trans, int mode,
7549 				    u64 start, u64 num_bytes, u64 min_size,
7550 				    loff_t actual_len, u64 *alloc_hint)
7551 {
7552 	return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
7553 					   min_size, actual_len, alloc_hint, trans);
7554 }
7555 
btrfs_set_page_dirty(struct page * page)7556 static int btrfs_set_page_dirty(struct page *page)
7557 {
7558 	return __set_page_dirty_nobuffers(page);
7559 }
7560 
btrfs_permission(struct inode * inode,int mask)7561 static int btrfs_permission(struct inode *inode, int mask)
7562 {
7563 	struct btrfs_root *root = BTRFS_I(inode)->root;
7564 	umode_t mode = inode->i_mode;
7565 
7566 	if (mask & MAY_WRITE &&
7567 	    (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) {
7568 		if (btrfs_root_readonly(root))
7569 			return -EROFS;
7570 		if (BTRFS_I(inode)->flags & BTRFS_INODE_READONLY)
7571 			return -EACCES;
7572 	}
7573 	return generic_permission(inode, mask);
7574 }
7575 
7576 static const struct inode_operations btrfs_dir_inode_operations = {
7577 	.getattr	= btrfs_getattr,
7578 	.lookup		= btrfs_lookup,
7579 	.create		= btrfs_create,
7580 	.unlink		= btrfs_unlink,
7581 	.link		= btrfs_link,
7582 	.mkdir		= btrfs_mkdir,
7583 	.rmdir		= btrfs_rmdir,
7584 	.rename		= btrfs_rename,
7585 	.symlink	= btrfs_symlink,
7586 	.setattr	= btrfs_setattr,
7587 	.mknod		= btrfs_mknod,
7588 	.setxattr	= btrfs_setxattr,
7589 	.getxattr	= btrfs_getxattr,
7590 	.listxattr	= btrfs_listxattr,
7591 	.removexattr	= btrfs_removexattr,
7592 	.permission	= btrfs_permission,
7593 	.get_acl	= btrfs_get_acl,
7594 };
7595 static const struct inode_operations btrfs_dir_ro_inode_operations = {
7596 	.lookup		= btrfs_lookup,
7597 	.permission	= btrfs_permission,
7598 	.get_acl	= btrfs_get_acl,
7599 };
7600 
7601 static const struct file_operations btrfs_dir_file_operations = {
7602 	.llseek		= generic_file_llseek,
7603 	.read		= generic_read_dir,
7604 	.readdir	= btrfs_real_readdir,
7605 	.unlocked_ioctl	= btrfs_ioctl,
7606 #ifdef CONFIG_COMPAT
7607 	.compat_ioctl	= btrfs_ioctl,
7608 #endif
7609 	.release        = btrfs_release_file,
7610 	.fsync		= btrfs_sync_file,
7611 };
7612 
7613 static struct extent_io_ops btrfs_extent_io_ops = {
7614 	.fill_delalloc = run_delalloc_range,
7615 	.submit_bio_hook = btrfs_submit_bio_hook,
7616 	.merge_bio_hook = btrfs_merge_bio_hook,
7617 	.readpage_end_io_hook = btrfs_readpage_end_io_hook,
7618 	.writepage_end_io_hook = btrfs_writepage_end_io_hook,
7619 	.writepage_start_hook = btrfs_writepage_start_hook,
7620 	.set_bit_hook = btrfs_set_bit_hook,
7621 	.clear_bit_hook = btrfs_clear_bit_hook,
7622 	.merge_extent_hook = btrfs_merge_extent_hook,
7623 	.split_extent_hook = btrfs_split_extent_hook,
7624 };
7625 
7626 /*
7627  * btrfs doesn't support the bmap operation because swapfiles
7628  * use bmap to make a mapping of extents in the file.  They assume
7629  * these extents won't change over the life of the file and they
7630  * use the bmap result to do IO directly to the drive.
7631  *
7632  * the btrfs bmap call would return logical addresses that aren't
7633  * suitable for IO and they also will change frequently as COW
7634  * operations happen.  So, swapfile + btrfs == corruption.
7635  *
7636  * For now we're avoiding this by dropping bmap.
7637  */
7638 static const struct address_space_operations btrfs_aops = {
7639 	.readpage	= btrfs_readpage,
7640 	.writepage	= btrfs_writepage,
7641 	.writepages	= btrfs_writepages,
7642 	.readpages	= btrfs_readpages,
7643 	.direct_IO	= btrfs_direct_IO,
7644 	.invalidatepage = btrfs_invalidatepage,
7645 	.releasepage	= btrfs_releasepage,
7646 	.set_page_dirty	= btrfs_set_page_dirty,
7647 	.error_remove_page = generic_error_remove_page,
7648 };
7649 
7650 static const struct address_space_operations btrfs_symlink_aops = {
7651 	.readpage	= btrfs_readpage,
7652 	.writepage	= btrfs_writepage,
7653 	.invalidatepage = btrfs_invalidatepage,
7654 	.releasepage	= btrfs_releasepage,
7655 };
7656 
7657 static const struct inode_operations btrfs_file_inode_operations = {
7658 	.getattr	= btrfs_getattr,
7659 	.setattr	= btrfs_setattr,
7660 	.setxattr	= btrfs_setxattr,
7661 	.getxattr	= btrfs_getxattr,
7662 	.listxattr      = btrfs_listxattr,
7663 	.removexattr	= btrfs_removexattr,
7664 	.permission	= btrfs_permission,
7665 	.fiemap		= btrfs_fiemap,
7666 	.get_acl	= btrfs_get_acl,
7667 };
7668 static const struct inode_operations btrfs_special_inode_operations = {
7669 	.getattr	= btrfs_getattr,
7670 	.setattr	= btrfs_setattr,
7671 	.permission	= btrfs_permission,
7672 	.setxattr	= btrfs_setxattr,
7673 	.getxattr	= btrfs_getxattr,
7674 	.listxattr	= btrfs_listxattr,
7675 	.removexattr	= btrfs_removexattr,
7676 	.get_acl	= btrfs_get_acl,
7677 };
7678 static const struct inode_operations btrfs_symlink_inode_operations = {
7679 	.readlink	= generic_readlink,
7680 	.follow_link	= page_follow_link_light,
7681 	.put_link	= page_put_link,
7682 	.getattr	= btrfs_getattr,
7683 	.setattr	= btrfs_setattr,
7684 	.permission	= btrfs_permission,
7685 	.setxattr	= btrfs_setxattr,
7686 	.getxattr	= btrfs_getxattr,
7687 	.listxattr	= btrfs_listxattr,
7688 	.removexattr	= btrfs_removexattr,
7689 	.get_acl	= btrfs_get_acl,
7690 };
7691 
7692 const struct dentry_operations btrfs_dentry_operations = {
7693 	.d_delete	= btrfs_dentry_delete,
7694 	.d_release	= btrfs_dentry_release,
7695 };
7696