1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2007 Oracle.  All rights reserved.
4  */
5 
6 #include <crypto/hash.h>
7 #include <linux/kernel.h>
8 #include <linux/bio.h>
9 #include <linux/blk-cgroup.h>
10 #include <linux/file.h>
11 #include <linux/fs.h>
12 #include <linux/pagemap.h>
13 #include <linux/highmem.h>
14 #include <linux/time.h>
15 #include <linux/init.h>
16 #include <linux/string.h>
17 #include <linux/backing-dev.h>
18 #include <linux/writeback.h>
19 #include <linux/compat.h>
20 #include <linux/xattr.h>
21 #include <linux/posix_acl.h>
22 #include <linux/falloc.h>
23 #include <linux/slab.h>
24 #include <linux/ratelimit.h>
25 #include <linux/btrfs.h>
26 #include <linux/blkdev.h>
27 #include <linux/posix_acl_xattr.h>
28 #include <linux/uio.h>
29 #include <linux/magic.h>
30 #include <linux/iversion.h>
31 #include <linux/swap.h>
32 #include <linux/migrate.h>
33 #include <linux/sched/mm.h>
34 #include <linux/iomap.h>
35 #include <asm/unaligned.h>
36 #include <linux/fsverity.h>
37 #include "misc.h"
38 #include "ctree.h"
39 #include "disk-io.h"
40 #include "transaction.h"
41 #include "btrfs_inode.h"
42 #include "print-tree.h"
43 #include "ordered-data.h"
44 #include "xattr.h"
45 #include "tree-log.h"
46 #include "volumes.h"
47 #include "compression.h"
48 #include "locking.h"
49 #include "free-space-cache.h"
50 #include "props.h"
51 #include "qgroup.h"
52 #include "delalloc-space.h"
53 #include "block-group.h"
54 #include "space-info.h"
55 #include "zoned.h"
56 #include "subpage.h"
57 #include "inode-item.h"
58 
59 struct btrfs_iget_args {
60 	u64 ino;
61 	struct btrfs_root *root;
62 };
63 
64 struct btrfs_dio_data {
65 	ssize_t submitted;
66 	struct extent_changeset *data_reserved;
67 	bool data_space_reserved;
68 	bool nocow_done;
69 };
70 
71 struct btrfs_dio_private {
72 	struct inode *inode;
73 
74 	/*
75 	 * Since DIO can use anonymous page, we cannot use page_offset() to
76 	 * grab the file offset, thus need a dedicated member for file offset.
77 	 */
78 	u64 file_offset;
79 	/* Used for bio::bi_size */
80 	u32 bytes;
81 
82 	/*
83 	 * References to this structure. There is one reference per in-flight
84 	 * bio plus one while we're still setting up.
85 	 */
86 	refcount_t refs;
87 
88 	/* Array of checksums */
89 	u8 *csums;
90 
91 	/* This must be last */
92 	struct bio bio;
93 };
94 
95 static struct bio_set btrfs_dio_bioset;
96 
97 struct btrfs_rename_ctx {
98 	/* Output field. Stores the index number of the old directory entry. */
99 	u64 index;
100 };
101 
102 static const struct inode_operations btrfs_dir_inode_operations;
103 static const struct inode_operations btrfs_symlink_inode_operations;
104 static const struct inode_operations btrfs_special_inode_operations;
105 static const struct inode_operations btrfs_file_inode_operations;
106 static const struct address_space_operations btrfs_aops;
107 static const struct file_operations btrfs_dir_file_operations;
108 
109 static struct kmem_cache *btrfs_inode_cachep;
110 struct kmem_cache *btrfs_trans_handle_cachep;
111 struct kmem_cache *btrfs_path_cachep;
112 struct kmem_cache *btrfs_free_space_cachep;
113 struct kmem_cache *btrfs_free_space_bitmap_cachep;
114 
115 static int btrfs_setsize(struct inode *inode, struct iattr *attr);
116 static int btrfs_truncate(struct inode *inode, bool skip_writeback);
117 static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent);
118 static noinline int cow_file_range(struct btrfs_inode *inode,
119 				   struct page *locked_page,
120 				   u64 start, u64 end, int *page_started,
121 				   unsigned long *nr_written, int unlock,
122 				   u64 *done_offset);
123 static struct extent_map *create_io_em(struct btrfs_inode *inode, u64 start,
124 				       u64 len, u64 orig_start, u64 block_start,
125 				       u64 block_len, u64 orig_block_len,
126 				       u64 ram_bytes, int compress_type,
127 				       int type);
128 
129 static void __endio_write_update_ordered(struct btrfs_inode *inode,
130 					 const u64 offset, const u64 bytes,
131 					 const bool uptodate);
132 
133 /*
134  * btrfs_inode_lock - lock inode i_rwsem based on arguments passed
135  *
136  * ilock_flags can have the following bit set:
137  *
138  * BTRFS_ILOCK_SHARED - acquire a shared lock on the inode
139  * BTRFS_ILOCK_TRY - try to acquire the lock, if fails on first attempt
140  *		     return -EAGAIN
141  * BTRFS_ILOCK_MMAP - acquire a write lock on the i_mmap_lock
142  */
btrfs_inode_lock(struct inode * inode,unsigned int ilock_flags)143 int btrfs_inode_lock(struct inode *inode, unsigned int ilock_flags)
144 {
145 	if (ilock_flags & BTRFS_ILOCK_SHARED) {
146 		if (ilock_flags & BTRFS_ILOCK_TRY) {
147 			if (!inode_trylock_shared(inode))
148 				return -EAGAIN;
149 			else
150 				return 0;
151 		}
152 		inode_lock_shared(inode);
153 	} else {
154 		if (ilock_flags & BTRFS_ILOCK_TRY) {
155 			if (!inode_trylock(inode))
156 				return -EAGAIN;
157 			else
158 				return 0;
159 		}
160 		inode_lock(inode);
161 	}
162 	if (ilock_flags & BTRFS_ILOCK_MMAP)
163 		down_write(&BTRFS_I(inode)->i_mmap_lock);
164 	return 0;
165 }
166 
167 /*
168  * btrfs_inode_unlock - unock inode i_rwsem
169  *
170  * ilock_flags should contain the same bits set as passed to btrfs_inode_lock()
171  * to decide whether the lock acquired is shared or exclusive.
172  */
btrfs_inode_unlock(struct inode * inode,unsigned int ilock_flags)173 void btrfs_inode_unlock(struct inode *inode, unsigned int ilock_flags)
174 {
175 	if (ilock_flags & BTRFS_ILOCK_MMAP)
176 		up_write(&BTRFS_I(inode)->i_mmap_lock);
177 	if (ilock_flags & BTRFS_ILOCK_SHARED)
178 		inode_unlock_shared(inode);
179 	else
180 		inode_unlock(inode);
181 }
182 
183 /*
184  * Cleanup all submitted ordered extents in specified range to handle errors
185  * from the btrfs_run_delalloc_range() callback.
186  *
187  * NOTE: caller must ensure that when an error happens, it can not call
188  * extent_clear_unlock_delalloc() to clear both the bits EXTENT_DO_ACCOUNTING
189  * and EXTENT_DELALLOC simultaneously, because that causes the reserved metadata
190  * to be released, which we want to happen only when finishing the ordered
191  * extent (btrfs_finish_ordered_io()).
192  */
btrfs_cleanup_ordered_extents(struct btrfs_inode * inode,struct page * locked_page,u64 offset,u64 bytes)193 static inline void btrfs_cleanup_ordered_extents(struct btrfs_inode *inode,
194 						 struct page *locked_page,
195 						 u64 offset, u64 bytes)
196 {
197 	unsigned long index = offset >> PAGE_SHIFT;
198 	unsigned long end_index = (offset + bytes - 1) >> PAGE_SHIFT;
199 	u64 page_start = page_offset(locked_page);
200 	u64 page_end = page_start + PAGE_SIZE - 1;
201 
202 	struct page *page;
203 
204 	while (index <= end_index) {
205 		/*
206 		 * For locked page, we will call end_extent_writepage() on it
207 		 * in run_delalloc_range() for the error handling.  That
208 		 * end_extent_writepage() function will call
209 		 * btrfs_mark_ordered_io_finished() to clear page Ordered and
210 		 * run the ordered extent accounting.
211 		 *
212 		 * Here we can't just clear the Ordered bit, or
213 		 * btrfs_mark_ordered_io_finished() would skip the accounting
214 		 * for the page range, and the ordered extent will never finish.
215 		 */
216 		if (index == (page_offset(locked_page) >> PAGE_SHIFT)) {
217 			index++;
218 			continue;
219 		}
220 		page = find_get_page(inode->vfs_inode.i_mapping, index);
221 		index++;
222 		if (!page)
223 			continue;
224 
225 		/*
226 		 * Here we just clear all Ordered bits for every page in the
227 		 * range, then __endio_write_update_ordered() will handle
228 		 * the ordered extent accounting for the range.
229 		 */
230 		btrfs_page_clamp_clear_ordered(inode->root->fs_info, page,
231 					       offset, bytes);
232 		put_page(page);
233 	}
234 
235 	/* The locked page covers the full range, nothing needs to be done */
236 	if (bytes + offset <= page_offset(locked_page) + PAGE_SIZE)
237 		return;
238 	/*
239 	 * In case this page belongs to the delalloc range being instantiated
240 	 * then skip it, since the first page of a range is going to be
241 	 * properly cleaned up by the caller of run_delalloc_range
242 	 */
243 	if (page_start >= offset && page_end <= (offset + bytes - 1)) {
244 		bytes = offset + bytes - page_offset(locked_page) - PAGE_SIZE;
245 		offset = page_offset(locked_page) + PAGE_SIZE;
246 	}
247 
248 	return __endio_write_update_ordered(inode, offset, bytes, false);
249 }
250 
251 static int btrfs_dirty_inode(struct inode *inode);
252 
btrfs_init_inode_security(struct btrfs_trans_handle * trans,struct btrfs_new_inode_args * args)253 static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
254 				     struct btrfs_new_inode_args *args)
255 {
256 	int err;
257 
258 	if (args->default_acl) {
259 		err = __btrfs_set_acl(trans, args->inode, args->default_acl,
260 				      ACL_TYPE_DEFAULT);
261 		if (err)
262 			return err;
263 	}
264 	if (args->acl) {
265 		err = __btrfs_set_acl(trans, args->inode, args->acl, ACL_TYPE_ACCESS);
266 		if (err)
267 			return err;
268 	}
269 	if (!args->default_acl && !args->acl)
270 		cache_no_acl(args->inode);
271 	return btrfs_xattr_security_init(trans, args->inode, args->dir,
272 					 &args->dentry->d_name);
273 }
274 
275 /*
276  * this does all the hard work for inserting an inline extent into
277  * the btree.  The caller should have done a btrfs_drop_extents so that
278  * no overlapping inline items exist in the btree
279  */
insert_inline_extent(struct btrfs_trans_handle * trans,struct btrfs_path * path,struct btrfs_inode * inode,bool extent_inserted,size_t size,size_t compressed_size,int compress_type,struct page ** compressed_pages,bool update_i_size)280 static int insert_inline_extent(struct btrfs_trans_handle *trans,
281 				struct btrfs_path *path,
282 				struct btrfs_inode *inode, bool extent_inserted,
283 				size_t size, size_t compressed_size,
284 				int compress_type,
285 				struct page **compressed_pages,
286 				bool update_i_size)
287 {
288 	struct btrfs_root *root = inode->root;
289 	struct extent_buffer *leaf;
290 	struct page *page = NULL;
291 	char *kaddr;
292 	unsigned long ptr;
293 	struct btrfs_file_extent_item *ei;
294 	int ret;
295 	size_t cur_size = size;
296 	u64 i_size;
297 
298 	ASSERT((compressed_size > 0 && compressed_pages) ||
299 	       (compressed_size == 0 && !compressed_pages));
300 
301 	if (compressed_size && compressed_pages)
302 		cur_size = compressed_size;
303 
304 	if (!extent_inserted) {
305 		struct btrfs_key key;
306 		size_t datasize;
307 
308 		key.objectid = btrfs_ino(inode);
309 		key.offset = 0;
310 		key.type = BTRFS_EXTENT_DATA_KEY;
311 
312 		datasize = btrfs_file_extent_calc_inline_size(cur_size);
313 		ret = btrfs_insert_empty_item(trans, root, path, &key,
314 					      datasize);
315 		if (ret)
316 			goto fail;
317 	}
318 	leaf = path->nodes[0];
319 	ei = btrfs_item_ptr(leaf, path->slots[0],
320 			    struct btrfs_file_extent_item);
321 	btrfs_set_file_extent_generation(leaf, ei, trans->transid);
322 	btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
323 	btrfs_set_file_extent_encryption(leaf, ei, 0);
324 	btrfs_set_file_extent_other_encoding(leaf, ei, 0);
325 	btrfs_set_file_extent_ram_bytes(leaf, ei, size);
326 	ptr = btrfs_file_extent_inline_start(ei);
327 
328 	if (compress_type != BTRFS_COMPRESS_NONE) {
329 		struct page *cpage;
330 		int i = 0;
331 		while (compressed_size > 0) {
332 			cpage = compressed_pages[i];
333 			cur_size = min_t(unsigned long, compressed_size,
334 				       PAGE_SIZE);
335 
336 			kaddr = kmap_atomic(cpage);
337 			write_extent_buffer(leaf, kaddr, ptr, cur_size);
338 			kunmap_atomic(kaddr);
339 
340 			i++;
341 			ptr += cur_size;
342 			compressed_size -= cur_size;
343 		}
344 		btrfs_set_file_extent_compression(leaf, ei,
345 						  compress_type);
346 	} else {
347 		page = find_get_page(inode->vfs_inode.i_mapping, 0);
348 		btrfs_set_file_extent_compression(leaf, ei, 0);
349 		kaddr = kmap_atomic(page);
350 		write_extent_buffer(leaf, kaddr, ptr, size);
351 		kunmap_atomic(kaddr);
352 		put_page(page);
353 	}
354 	btrfs_mark_buffer_dirty(leaf);
355 	btrfs_release_path(path);
356 
357 	/*
358 	 * We align size to sectorsize for inline extents just for simplicity
359 	 * sake.
360 	 */
361 	ret = btrfs_inode_set_file_extent_range(inode, 0,
362 					ALIGN(size, root->fs_info->sectorsize));
363 	if (ret)
364 		goto fail;
365 
366 	/*
367 	 * We're an inline extent, so nobody can extend the file past i_size
368 	 * without locking a page we already have locked.
369 	 *
370 	 * We must do any i_size and inode updates before we unlock the pages.
371 	 * Otherwise we could end up racing with unlink.
372 	 */
373 	i_size = i_size_read(&inode->vfs_inode);
374 	if (update_i_size && size > i_size) {
375 		i_size_write(&inode->vfs_inode, size);
376 		i_size = size;
377 	}
378 	inode->disk_i_size = i_size;
379 
380 fail:
381 	return ret;
382 }
383 
384 
385 /*
386  * conditionally insert an inline extent into the file.  This
387  * does the checks required to make sure the data is small enough
388  * to fit as an inline extent.
389  */
cow_file_range_inline(struct btrfs_inode * inode,u64 size,size_t compressed_size,int compress_type,struct page ** compressed_pages,bool update_i_size)390 static noinline int cow_file_range_inline(struct btrfs_inode *inode, u64 size,
391 					  size_t compressed_size,
392 					  int compress_type,
393 					  struct page **compressed_pages,
394 					  bool update_i_size)
395 {
396 	struct btrfs_drop_extents_args drop_args = { 0 };
397 	struct btrfs_root *root = inode->root;
398 	struct btrfs_fs_info *fs_info = root->fs_info;
399 	struct btrfs_trans_handle *trans;
400 	u64 data_len = (compressed_size ?: size);
401 	int ret;
402 	struct btrfs_path *path;
403 
404 	/*
405 	 * We can create an inline extent if it ends at or beyond the current
406 	 * i_size, is no larger than a sector (decompressed), and the (possibly
407 	 * compressed) data fits in a leaf and the configured maximum inline
408 	 * size.
409 	 */
410 	if (size < i_size_read(&inode->vfs_inode) ||
411 	    size > fs_info->sectorsize ||
412 	    data_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info) ||
413 	    data_len > fs_info->max_inline)
414 		return 1;
415 
416 	path = btrfs_alloc_path();
417 	if (!path)
418 		return -ENOMEM;
419 
420 	trans = btrfs_join_transaction(root);
421 	if (IS_ERR(trans)) {
422 		btrfs_free_path(path);
423 		return PTR_ERR(trans);
424 	}
425 	trans->block_rsv = &inode->block_rsv;
426 
427 	drop_args.path = path;
428 	drop_args.start = 0;
429 	drop_args.end = fs_info->sectorsize;
430 	drop_args.drop_cache = true;
431 	drop_args.replace_extent = true;
432 	drop_args.extent_item_size = btrfs_file_extent_calc_inline_size(data_len);
433 	ret = btrfs_drop_extents(trans, root, inode, &drop_args);
434 	if (ret) {
435 		btrfs_abort_transaction(trans, ret);
436 		goto out;
437 	}
438 
439 	ret = insert_inline_extent(trans, path, inode, drop_args.extent_inserted,
440 				   size, compressed_size, compress_type,
441 				   compressed_pages, update_i_size);
442 	if (ret && ret != -ENOSPC) {
443 		btrfs_abort_transaction(trans, ret);
444 		goto out;
445 	} else if (ret == -ENOSPC) {
446 		ret = 1;
447 		goto out;
448 	}
449 
450 	btrfs_update_inode_bytes(inode, size, drop_args.bytes_found);
451 	ret = btrfs_update_inode(trans, root, inode);
452 	if (ret && ret != -ENOSPC) {
453 		btrfs_abort_transaction(trans, ret);
454 		goto out;
455 	} else if (ret == -ENOSPC) {
456 		ret = 1;
457 		goto out;
458 	}
459 
460 	btrfs_set_inode_full_sync(inode);
461 out:
462 	/*
463 	 * Don't forget to free the reserved space, as for inlined extent
464 	 * it won't count as data extent, free them directly here.
465 	 * And at reserve time, it's always aligned to page size, so
466 	 * just free one page here.
467 	 */
468 	btrfs_qgroup_free_data(inode, NULL, 0, PAGE_SIZE);
469 	btrfs_free_path(path);
470 	btrfs_end_transaction(trans);
471 	return ret;
472 }
473 
474 struct async_extent {
475 	u64 start;
476 	u64 ram_size;
477 	u64 compressed_size;
478 	struct page **pages;
479 	unsigned long nr_pages;
480 	int compress_type;
481 	struct list_head list;
482 };
483 
484 struct async_chunk {
485 	struct inode *inode;
486 	struct page *locked_page;
487 	u64 start;
488 	u64 end;
489 	unsigned int write_flags;
490 	struct list_head extents;
491 	struct cgroup_subsys_state *blkcg_css;
492 	struct btrfs_work work;
493 	struct async_cow *async_cow;
494 };
495 
496 struct async_cow {
497 	atomic_t num_chunks;
498 	struct async_chunk chunks[];
499 };
500 
add_async_extent(struct async_chunk * cow,u64 start,u64 ram_size,u64 compressed_size,struct page ** pages,unsigned long nr_pages,int compress_type)501 static noinline int add_async_extent(struct async_chunk *cow,
502 				     u64 start, u64 ram_size,
503 				     u64 compressed_size,
504 				     struct page **pages,
505 				     unsigned long nr_pages,
506 				     int compress_type)
507 {
508 	struct async_extent *async_extent;
509 
510 	async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
511 	BUG_ON(!async_extent); /* -ENOMEM */
512 	async_extent->start = start;
513 	async_extent->ram_size = ram_size;
514 	async_extent->compressed_size = compressed_size;
515 	async_extent->pages = pages;
516 	async_extent->nr_pages = nr_pages;
517 	async_extent->compress_type = compress_type;
518 	list_add_tail(&async_extent->list, &cow->extents);
519 	return 0;
520 }
521 
522 /*
523  * Check if the inode needs to be submitted to compression, based on mount
524  * options, defragmentation, properties or heuristics.
525  */
inode_need_compress(struct btrfs_inode * inode,u64 start,u64 end)526 static inline int inode_need_compress(struct btrfs_inode *inode, u64 start,
527 				      u64 end)
528 {
529 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
530 
531 	if (!btrfs_inode_can_compress(inode)) {
532 		WARN(IS_ENABLED(CONFIG_BTRFS_DEBUG),
533 			KERN_ERR "BTRFS: unexpected compression for ino %llu\n",
534 			btrfs_ino(inode));
535 		return 0;
536 	}
537 	/*
538 	 * Special check for subpage.
539 	 *
540 	 * We lock the full page then run each delalloc range in the page, thus
541 	 * for the following case, we will hit some subpage specific corner case:
542 	 *
543 	 * 0		32K		64K
544 	 * |	|///////|	|///////|
545 	 *		\- A		\- B
546 	 *
547 	 * In above case, both range A and range B will try to unlock the full
548 	 * page [0, 64K), causing the one finished later will have page
549 	 * unlocked already, triggering various page lock requirement BUG_ON()s.
550 	 *
551 	 * So here we add an artificial limit that subpage compression can only
552 	 * if the range is fully page aligned.
553 	 *
554 	 * In theory we only need to ensure the first page is fully covered, but
555 	 * the tailing partial page will be locked until the full compression
556 	 * finishes, delaying the write of other range.
557 	 *
558 	 * TODO: Make btrfs_run_delalloc_range() to lock all delalloc range
559 	 * first to prevent any submitted async extent to unlock the full page.
560 	 * By this, we can ensure for subpage case that only the last async_cow
561 	 * will unlock the full page.
562 	 */
563 	if (fs_info->sectorsize < PAGE_SIZE) {
564 		if (!IS_ALIGNED(start, PAGE_SIZE) ||
565 		    !IS_ALIGNED(end + 1, PAGE_SIZE))
566 			return 0;
567 	}
568 
569 	/* force compress */
570 	if (btrfs_test_opt(fs_info, FORCE_COMPRESS))
571 		return 1;
572 	/* defrag ioctl */
573 	if (inode->defrag_compress)
574 		return 1;
575 	/* bad compression ratios */
576 	if (inode->flags & BTRFS_INODE_NOCOMPRESS)
577 		return 0;
578 	if (btrfs_test_opt(fs_info, COMPRESS) ||
579 	    inode->flags & BTRFS_INODE_COMPRESS ||
580 	    inode->prop_compress)
581 		return btrfs_compress_heuristic(&inode->vfs_inode, start, end);
582 	return 0;
583 }
584 
inode_should_defrag(struct btrfs_inode * inode,u64 start,u64 end,u64 num_bytes,u32 small_write)585 static inline void inode_should_defrag(struct btrfs_inode *inode,
586 		u64 start, u64 end, u64 num_bytes, u32 small_write)
587 {
588 	/* If this is a small write inside eof, kick off a defrag */
589 	if (num_bytes < small_write &&
590 	    (start > 0 || end + 1 < inode->disk_i_size))
591 		btrfs_add_inode_defrag(NULL, inode, small_write);
592 }
593 
594 /*
595  * we create compressed extents in two phases.  The first
596  * phase compresses a range of pages that have already been
597  * locked (both pages and state bits are locked).
598  *
599  * This is done inside an ordered work queue, and the compression
600  * is spread across many cpus.  The actual IO submission is step
601  * two, and the ordered work queue takes care of making sure that
602  * happens in the same order things were put onto the queue by
603  * writepages and friends.
604  *
605  * If this code finds it can't get good compression, it puts an
606  * entry onto the work queue to write the uncompressed bytes.  This
607  * makes sure that both compressed inodes and uncompressed inodes
608  * are written in the same order that the flusher thread sent them
609  * down.
610  */
compress_file_range(struct async_chunk * async_chunk)611 static noinline int compress_file_range(struct async_chunk *async_chunk)
612 {
613 	struct inode *inode = async_chunk->inode;
614 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
615 	u64 blocksize = fs_info->sectorsize;
616 	u64 start = async_chunk->start;
617 	u64 end = async_chunk->end;
618 	u64 actual_end;
619 	u64 i_size;
620 	int ret = 0;
621 	struct page **pages = NULL;
622 	unsigned long nr_pages;
623 	unsigned long total_compressed = 0;
624 	unsigned long total_in = 0;
625 	int i;
626 	int will_compress;
627 	int compress_type = fs_info->compress_type;
628 	int compressed_extents = 0;
629 	int redirty = 0;
630 
631 	inode_should_defrag(BTRFS_I(inode), start, end, end - start + 1,
632 			SZ_16K);
633 
634 	/*
635 	 * We need to save i_size before now because it could change in between
636 	 * us evaluating the size and assigning it.  This is because we lock and
637 	 * unlock the page in truncate and fallocate, and then modify the i_size
638 	 * later on.
639 	 *
640 	 * The barriers are to emulate READ_ONCE, remove that once i_size_read
641 	 * does that for us.
642 	 */
643 	barrier();
644 	i_size = i_size_read(inode);
645 	barrier();
646 	actual_end = min_t(u64, i_size, end + 1);
647 again:
648 	will_compress = 0;
649 	nr_pages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1;
650 	nr_pages = min_t(unsigned long, nr_pages,
651 			BTRFS_MAX_COMPRESSED / PAGE_SIZE);
652 
653 	/*
654 	 * we don't want to send crud past the end of i_size through
655 	 * compression, that's just a waste of CPU time.  So, if the
656 	 * end of the file is before the start of our current
657 	 * requested range of bytes, we bail out to the uncompressed
658 	 * cleanup code that can deal with all of this.
659 	 *
660 	 * It isn't really the fastest way to fix things, but this is a
661 	 * very uncommon corner.
662 	 */
663 	if (actual_end <= start)
664 		goto cleanup_and_bail_uncompressed;
665 
666 	total_compressed = actual_end - start;
667 
668 	/*
669 	 * Skip compression for a small file range(<=blocksize) that
670 	 * isn't an inline extent, since it doesn't save disk space at all.
671 	 */
672 	if (total_compressed <= blocksize &&
673 	   (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
674 		goto cleanup_and_bail_uncompressed;
675 
676 	/*
677 	 * For subpage case, we require full page alignment for the sector
678 	 * aligned range.
679 	 * Thus we must also check against @actual_end, not just @end.
680 	 */
681 	if (blocksize < PAGE_SIZE) {
682 		if (!IS_ALIGNED(start, PAGE_SIZE) ||
683 		    !IS_ALIGNED(round_up(actual_end, blocksize), PAGE_SIZE))
684 			goto cleanup_and_bail_uncompressed;
685 	}
686 
687 	total_compressed = min_t(unsigned long, total_compressed,
688 			BTRFS_MAX_UNCOMPRESSED);
689 	total_in = 0;
690 	ret = 0;
691 
692 	/*
693 	 * we do compression for mount -o compress and when the
694 	 * inode has not been flagged as nocompress.  This flag can
695 	 * change at any time if we discover bad compression ratios.
696 	 */
697 	if (inode_need_compress(BTRFS_I(inode), start, end)) {
698 		WARN_ON(pages);
699 		pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
700 		if (!pages) {
701 			/* just bail out to the uncompressed code */
702 			nr_pages = 0;
703 			goto cont;
704 		}
705 
706 		if (BTRFS_I(inode)->defrag_compress)
707 			compress_type = BTRFS_I(inode)->defrag_compress;
708 		else if (BTRFS_I(inode)->prop_compress)
709 			compress_type = BTRFS_I(inode)->prop_compress;
710 
711 		/*
712 		 * we need to call clear_page_dirty_for_io on each
713 		 * page in the range.  Otherwise applications with the file
714 		 * mmap'd can wander in and change the page contents while
715 		 * we are compressing them.
716 		 *
717 		 * If the compression fails for any reason, we set the pages
718 		 * dirty again later on.
719 		 *
720 		 * Note that the remaining part is redirtied, the start pointer
721 		 * has moved, the end is the original one.
722 		 */
723 		if (!redirty) {
724 			extent_range_clear_dirty_for_io(inode, start, end);
725 			redirty = 1;
726 		}
727 
728 		/* Compression level is applied here and only here */
729 		ret = btrfs_compress_pages(
730 			compress_type | (fs_info->compress_level << 4),
731 					   inode->i_mapping, start,
732 					   pages,
733 					   &nr_pages,
734 					   &total_in,
735 					   &total_compressed);
736 
737 		if (!ret) {
738 			unsigned long offset = offset_in_page(total_compressed);
739 			struct page *page = pages[nr_pages - 1];
740 
741 			/* zero the tail end of the last page, we might be
742 			 * sending it down to disk
743 			 */
744 			if (offset)
745 				memzero_page(page, offset, PAGE_SIZE - offset);
746 			will_compress = 1;
747 		}
748 	}
749 cont:
750 	/*
751 	 * Check cow_file_range() for why we don't even try to create inline
752 	 * extent for subpage case.
753 	 */
754 	if (start == 0 && fs_info->sectorsize == PAGE_SIZE) {
755 		/* lets try to make an inline extent */
756 		if (ret || total_in < actual_end) {
757 			/* we didn't compress the entire range, try
758 			 * to make an uncompressed inline extent.
759 			 */
760 			ret = cow_file_range_inline(BTRFS_I(inode), actual_end,
761 						    0, BTRFS_COMPRESS_NONE,
762 						    NULL, false);
763 		} else {
764 			/* try making a compressed inline extent */
765 			ret = cow_file_range_inline(BTRFS_I(inode), actual_end,
766 						    total_compressed,
767 						    compress_type, pages,
768 						    false);
769 		}
770 		if (ret <= 0) {
771 			unsigned long clear_flags = EXTENT_DELALLOC |
772 				EXTENT_DELALLOC_NEW | EXTENT_DEFRAG |
773 				EXTENT_DO_ACCOUNTING;
774 			unsigned long page_error_op;
775 
776 			page_error_op = ret < 0 ? PAGE_SET_ERROR : 0;
777 
778 			/*
779 			 * inline extent creation worked or returned error,
780 			 * we don't need to create any more async work items.
781 			 * Unlock and free up our temp pages.
782 			 *
783 			 * We use DO_ACCOUNTING here because we need the
784 			 * delalloc_release_metadata to be done _after_ we drop
785 			 * our outstanding extent for clearing delalloc for this
786 			 * range.
787 			 */
788 			extent_clear_unlock_delalloc(BTRFS_I(inode), start, end,
789 						     NULL,
790 						     clear_flags,
791 						     PAGE_UNLOCK |
792 						     PAGE_START_WRITEBACK |
793 						     page_error_op |
794 						     PAGE_END_WRITEBACK);
795 
796 			/*
797 			 * Ensure we only free the compressed pages if we have
798 			 * them allocated, as we can still reach here with
799 			 * inode_need_compress() == false.
800 			 */
801 			if (pages) {
802 				for (i = 0; i < nr_pages; i++) {
803 					WARN_ON(pages[i]->mapping);
804 					put_page(pages[i]);
805 				}
806 				kfree(pages);
807 			}
808 			return 0;
809 		}
810 	}
811 
812 	if (will_compress) {
813 		/*
814 		 * we aren't doing an inline extent round the compressed size
815 		 * up to a block size boundary so the allocator does sane
816 		 * things
817 		 */
818 		total_compressed = ALIGN(total_compressed, blocksize);
819 
820 		/*
821 		 * one last check to make sure the compression is really a
822 		 * win, compare the page count read with the blocks on disk,
823 		 * compression must free at least one sector size
824 		 */
825 		total_in = round_up(total_in, fs_info->sectorsize);
826 		if (total_compressed + blocksize <= total_in) {
827 			compressed_extents++;
828 
829 			/*
830 			 * The async work queues will take care of doing actual
831 			 * allocation on disk for these compressed pages, and
832 			 * will submit them to the elevator.
833 			 */
834 			add_async_extent(async_chunk, start, total_in,
835 					total_compressed, pages, nr_pages,
836 					compress_type);
837 
838 			if (start + total_in < end) {
839 				start += total_in;
840 				pages = NULL;
841 				cond_resched();
842 				goto again;
843 			}
844 			return compressed_extents;
845 		}
846 	}
847 	if (pages) {
848 		/*
849 		 * the compression code ran but failed to make things smaller,
850 		 * free any pages it allocated and our page pointer array
851 		 */
852 		for (i = 0; i < nr_pages; i++) {
853 			WARN_ON(pages[i]->mapping);
854 			put_page(pages[i]);
855 		}
856 		kfree(pages);
857 		pages = NULL;
858 		total_compressed = 0;
859 		nr_pages = 0;
860 
861 		/* flag the file so we don't compress in the future */
862 		if (!btrfs_test_opt(fs_info, FORCE_COMPRESS) &&
863 		    !(BTRFS_I(inode)->prop_compress)) {
864 			BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
865 		}
866 	}
867 cleanup_and_bail_uncompressed:
868 	/*
869 	 * No compression, but we still need to write the pages in the file
870 	 * we've been given so far.  redirty the locked page if it corresponds
871 	 * to our extent and set things up for the async work queue to run
872 	 * cow_file_range to do the normal delalloc dance.
873 	 */
874 	if (async_chunk->locked_page &&
875 	    (page_offset(async_chunk->locked_page) >= start &&
876 	     page_offset(async_chunk->locked_page)) <= end) {
877 		__set_page_dirty_nobuffers(async_chunk->locked_page);
878 		/* unlocked later on in the async handlers */
879 	}
880 
881 	if (redirty)
882 		extent_range_redirty_for_io(inode, start, end);
883 	add_async_extent(async_chunk, start, end - start + 1, 0, NULL, 0,
884 			 BTRFS_COMPRESS_NONE);
885 	compressed_extents++;
886 
887 	return compressed_extents;
888 }
889 
free_async_extent_pages(struct async_extent * async_extent)890 static void free_async_extent_pages(struct async_extent *async_extent)
891 {
892 	int i;
893 
894 	if (!async_extent->pages)
895 		return;
896 
897 	for (i = 0; i < async_extent->nr_pages; i++) {
898 		WARN_ON(async_extent->pages[i]->mapping);
899 		put_page(async_extent->pages[i]);
900 	}
901 	kfree(async_extent->pages);
902 	async_extent->nr_pages = 0;
903 	async_extent->pages = NULL;
904 }
905 
submit_uncompressed_range(struct btrfs_inode * inode,struct async_extent * async_extent,struct page * locked_page)906 static int submit_uncompressed_range(struct btrfs_inode *inode,
907 				     struct async_extent *async_extent,
908 				     struct page *locked_page)
909 {
910 	u64 start = async_extent->start;
911 	u64 end = async_extent->start + async_extent->ram_size - 1;
912 	unsigned long nr_written = 0;
913 	int page_started = 0;
914 	int ret;
915 
916 	/*
917 	 * Call cow_file_range() to run the delalloc range directly, since we
918 	 * won't go to NOCOW or async path again.
919 	 *
920 	 * Also we call cow_file_range() with @unlock_page == 0, so that we
921 	 * can directly submit them without interruption.
922 	 */
923 	ret = cow_file_range(inode, locked_page, start, end, &page_started,
924 			     &nr_written, 0, NULL);
925 	/* Inline extent inserted, page gets unlocked and everything is done */
926 	if (page_started) {
927 		ret = 0;
928 		goto out;
929 	}
930 	if (ret < 0) {
931 		btrfs_cleanup_ordered_extents(inode, locked_page, start, end - start + 1);
932 		if (locked_page) {
933 			const u64 page_start = page_offset(locked_page);
934 			const u64 page_end = page_start + PAGE_SIZE - 1;
935 
936 			btrfs_page_set_error(inode->root->fs_info, locked_page,
937 					     page_start, PAGE_SIZE);
938 			set_page_writeback(locked_page);
939 			end_page_writeback(locked_page);
940 			end_extent_writepage(locked_page, ret, page_start, page_end);
941 			unlock_page(locked_page);
942 		}
943 		goto out;
944 	}
945 
946 	ret = extent_write_locked_range(&inode->vfs_inode, start, end);
947 	/* All pages will be unlocked, including @locked_page */
948 out:
949 	kfree(async_extent);
950 	return ret;
951 }
952 
submit_one_async_extent(struct btrfs_inode * inode,struct async_chunk * async_chunk,struct async_extent * async_extent,u64 * alloc_hint)953 static int submit_one_async_extent(struct btrfs_inode *inode,
954 				   struct async_chunk *async_chunk,
955 				   struct async_extent *async_extent,
956 				   u64 *alloc_hint)
957 {
958 	struct extent_io_tree *io_tree = &inode->io_tree;
959 	struct btrfs_root *root = inode->root;
960 	struct btrfs_fs_info *fs_info = root->fs_info;
961 	struct btrfs_key ins;
962 	struct page *locked_page = NULL;
963 	struct extent_map *em;
964 	int ret = 0;
965 	u64 start = async_extent->start;
966 	u64 end = async_extent->start + async_extent->ram_size - 1;
967 
968 	/*
969 	 * If async_chunk->locked_page is in the async_extent range, we need to
970 	 * handle it.
971 	 */
972 	if (async_chunk->locked_page) {
973 		u64 locked_page_start = page_offset(async_chunk->locked_page);
974 		u64 locked_page_end = locked_page_start + PAGE_SIZE - 1;
975 
976 		if (!(start >= locked_page_end || end <= locked_page_start))
977 			locked_page = async_chunk->locked_page;
978 	}
979 	lock_extent(io_tree, start, end);
980 
981 	/* We have fall back to uncompressed write */
982 	if (!async_extent->pages)
983 		return submit_uncompressed_range(inode, async_extent, locked_page);
984 
985 	ret = btrfs_reserve_extent(root, async_extent->ram_size,
986 				   async_extent->compressed_size,
987 				   async_extent->compressed_size,
988 				   0, *alloc_hint, &ins, 1, 1);
989 	if (ret) {
990 		free_async_extent_pages(async_extent);
991 		/*
992 		 * Here we used to try again by going back to non-compressed
993 		 * path for ENOSPC.  But we can't reserve space even for
994 		 * compressed size, how could it work for uncompressed size
995 		 * which requires larger size?  So here we directly go error
996 		 * path.
997 		 */
998 		goto out_free;
999 	}
1000 
1001 	/* Here we're doing allocation and writeback of the compressed pages */
1002 	em = create_io_em(inode, start,
1003 			  async_extent->ram_size,	/* len */
1004 			  start,			/* orig_start */
1005 			  ins.objectid,			/* block_start */
1006 			  ins.offset,			/* block_len */
1007 			  ins.offset,			/* orig_block_len */
1008 			  async_extent->ram_size,	/* ram_bytes */
1009 			  async_extent->compress_type,
1010 			  BTRFS_ORDERED_COMPRESSED);
1011 	if (IS_ERR(em)) {
1012 		ret = PTR_ERR(em);
1013 		goto out_free_reserve;
1014 	}
1015 	free_extent_map(em);
1016 
1017 	ret = btrfs_add_ordered_extent(inode, start,		/* file_offset */
1018 				       async_extent->ram_size,	/* num_bytes */
1019 				       async_extent->ram_size,	/* ram_bytes */
1020 				       ins.objectid,		/* disk_bytenr */
1021 				       ins.offset,		/* disk_num_bytes */
1022 				       0,			/* offset */
1023 				       1 << BTRFS_ORDERED_COMPRESSED,
1024 				       async_extent->compress_type);
1025 	if (ret) {
1026 		btrfs_drop_extent_cache(inode, start, end, 0);
1027 		goto out_free_reserve;
1028 	}
1029 	btrfs_dec_block_group_reservations(fs_info, ins.objectid);
1030 
1031 	/* Clear dirty, set writeback and unlock the pages. */
1032 	extent_clear_unlock_delalloc(inode, start, end,
1033 			NULL, EXTENT_LOCKED | EXTENT_DELALLOC,
1034 			PAGE_UNLOCK | PAGE_START_WRITEBACK);
1035 	if (btrfs_submit_compressed_write(inode, start,	/* file_offset */
1036 			    async_extent->ram_size,	/* num_bytes */
1037 			    ins.objectid,		/* disk_bytenr */
1038 			    ins.offset,			/* compressed_len */
1039 			    async_extent->pages,	/* compressed_pages */
1040 			    async_extent->nr_pages,
1041 			    async_chunk->write_flags,
1042 			    async_chunk->blkcg_css, true)) {
1043 		const u64 start = async_extent->start;
1044 		const u64 end = start + async_extent->ram_size - 1;
1045 
1046 		btrfs_writepage_endio_finish_ordered(inode, NULL, start, end, 0);
1047 
1048 		extent_clear_unlock_delalloc(inode, start, end, NULL, 0,
1049 					     PAGE_END_WRITEBACK | PAGE_SET_ERROR);
1050 		free_async_extent_pages(async_extent);
1051 	}
1052 	*alloc_hint = ins.objectid + ins.offset;
1053 	kfree(async_extent);
1054 	return ret;
1055 
1056 out_free_reserve:
1057 	btrfs_dec_block_group_reservations(fs_info, ins.objectid);
1058 	btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1);
1059 out_free:
1060 	extent_clear_unlock_delalloc(inode, start, end,
1061 				     NULL, EXTENT_LOCKED | EXTENT_DELALLOC |
1062 				     EXTENT_DELALLOC_NEW |
1063 				     EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING,
1064 				     PAGE_UNLOCK | PAGE_START_WRITEBACK |
1065 				     PAGE_END_WRITEBACK | PAGE_SET_ERROR);
1066 	free_async_extent_pages(async_extent);
1067 	kfree(async_extent);
1068 	return ret;
1069 }
1070 
1071 /*
1072  * Phase two of compressed writeback.  This is the ordered portion of the code,
1073  * which only gets called in the order the work was queued.  We walk all the
1074  * async extents created by compress_file_range and send them down to the disk.
1075  */
submit_compressed_extents(struct async_chunk * async_chunk)1076 static noinline void submit_compressed_extents(struct async_chunk *async_chunk)
1077 {
1078 	struct btrfs_inode *inode = BTRFS_I(async_chunk->inode);
1079 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
1080 	struct async_extent *async_extent;
1081 	u64 alloc_hint = 0;
1082 	int ret = 0;
1083 
1084 	while (!list_empty(&async_chunk->extents)) {
1085 		u64 extent_start;
1086 		u64 ram_size;
1087 
1088 		async_extent = list_entry(async_chunk->extents.next,
1089 					  struct async_extent, list);
1090 		list_del(&async_extent->list);
1091 		extent_start = async_extent->start;
1092 		ram_size = async_extent->ram_size;
1093 
1094 		ret = submit_one_async_extent(inode, async_chunk, async_extent,
1095 					      &alloc_hint);
1096 		btrfs_debug(fs_info,
1097 "async extent submission failed root=%lld inode=%llu start=%llu len=%llu ret=%d",
1098 			    inode->root->root_key.objectid,
1099 			    btrfs_ino(inode), extent_start, ram_size, ret);
1100 	}
1101 }
1102 
get_extent_allocation_hint(struct btrfs_inode * inode,u64 start,u64 num_bytes)1103 static u64 get_extent_allocation_hint(struct btrfs_inode *inode, u64 start,
1104 				      u64 num_bytes)
1105 {
1106 	struct extent_map_tree *em_tree = &inode->extent_tree;
1107 	struct extent_map *em;
1108 	u64 alloc_hint = 0;
1109 
1110 	read_lock(&em_tree->lock);
1111 	em = search_extent_mapping(em_tree, start, num_bytes);
1112 	if (em) {
1113 		/*
1114 		 * if block start isn't an actual block number then find the
1115 		 * first block in this inode and use that as a hint.  If that
1116 		 * block is also bogus then just don't worry about it.
1117 		 */
1118 		if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
1119 			free_extent_map(em);
1120 			em = search_extent_mapping(em_tree, 0, 0);
1121 			if (em && em->block_start < EXTENT_MAP_LAST_BYTE)
1122 				alloc_hint = em->block_start;
1123 			if (em)
1124 				free_extent_map(em);
1125 		} else {
1126 			alloc_hint = em->block_start;
1127 			free_extent_map(em);
1128 		}
1129 	}
1130 	read_unlock(&em_tree->lock);
1131 
1132 	return alloc_hint;
1133 }
1134 
1135 /*
1136  * when extent_io.c finds a delayed allocation range in the file,
1137  * the call backs end up in this code.  The basic idea is to
1138  * allocate extents on disk for the range, and create ordered data structs
1139  * in ram to track those extents.
1140  *
1141  * locked_page is the page that writepage had locked already.  We use
1142  * it to make sure we don't do extra locks or unlocks.
1143  *
1144  * *page_started is set to one if we unlock locked_page and do everything
1145  * required to start IO on it.  It may be clean and already done with
1146  * IO when we return.
1147  *
1148  * When unlock == 1, we unlock the pages in successfully allocated regions.
1149  * When unlock == 0, we leave them locked for writing them out.
1150  *
1151  * However, we unlock all the pages except @locked_page in case of failure.
1152  *
1153  * In summary, page locking state will be as follow:
1154  *
1155  * - page_started == 1 (return value)
1156  *     - All the pages are unlocked. IO is started.
1157  *     - Note that this can happen only on success
1158  * - unlock == 1
1159  *     - All the pages except @locked_page are unlocked in any case
1160  * - unlock == 0
1161  *     - On success, all the pages are locked for writing out them
1162  *     - On failure, all the pages except @locked_page are unlocked
1163  *
1164  * When a failure happens in the second or later iteration of the
1165  * while-loop, the ordered extents created in previous iterations are kept
1166  * intact. So, the caller must clean them up by calling
1167  * btrfs_cleanup_ordered_extents(). See btrfs_run_delalloc_range() for
1168  * example.
1169  */
cow_file_range(struct btrfs_inode * inode,struct page * locked_page,u64 start,u64 end,int * page_started,unsigned long * nr_written,int unlock,u64 * done_offset)1170 static noinline int cow_file_range(struct btrfs_inode *inode,
1171 				   struct page *locked_page,
1172 				   u64 start, u64 end, int *page_started,
1173 				   unsigned long *nr_written, int unlock,
1174 				   u64 *done_offset)
1175 {
1176 	struct btrfs_root *root = inode->root;
1177 	struct btrfs_fs_info *fs_info = root->fs_info;
1178 	u64 alloc_hint = 0;
1179 	u64 orig_start = start;
1180 	u64 num_bytes;
1181 	unsigned long ram_size;
1182 	u64 cur_alloc_size = 0;
1183 	u64 min_alloc_size;
1184 	u64 blocksize = fs_info->sectorsize;
1185 	struct btrfs_key ins;
1186 	struct extent_map *em;
1187 	unsigned clear_bits;
1188 	unsigned long page_ops;
1189 	bool extent_reserved = false;
1190 	int ret = 0;
1191 
1192 	if (btrfs_is_free_space_inode(inode)) {
1193 		ret = -EINVAL;
1194 		goto out_unlock;
1195 	}
1196 
1197 	num_bytes = ALIGN(end - start + 1, blocksize);
1198 	num_bytes = max(blocksize,  num_bytes);
1199 	ASSERT(num_bytes <= btrfs_super_total_bytes(fs_info->super_copy));
1200 
1201 	inode_should_defrag(inode, start, end, num_bytes, SZ_64K);
1202 
1203 	/*
1204 	 * Due to the page size limit, for subpage we can only trigger the
1205 	 * writeback for the dirty sectors of page, that means data writeback
1206 	 * is doing more writeback than what we want.
1207 	 *
1208 	 * This is especially unexpected for some call sites like fallocate,
1209 	 * where we only increase i_size after everything is done.
1210 	 * This means we can trigger inline extent even if we didn't want to.
1211 	 * So here we skip inline extent creation completely.
1212 	 */
1213 	if (start == 0 && fs_info->sectorsize == PAGE_SIZE) {
1214 		u64 actual_end = min_t(u64, i_size_read(&inode->vfs_inode),
1215 				       end + 1);
1216 
1217 		/* lets try to make an inline extent */
1218 		ret = cow_file_range_inline(inode, actual_end, 0,
1219 					    BTRFS_COMPRESS_NONE, NULL, false);
1220 		if (ret == 0) {
1221 			/*
1222 			 * We use DO_ACCOUNTING here because we need the
1223 			 * delalloc_release_metadata to be run _after_ we drop
1224 			 * our outstanding extent for clearing delalloc for this
1225 			 * range.
1226 			 */
1227 			extent_clear_unlock_delalloc(inode, start, end,
1228 				     locked_page,
1229 				     EXTENT_LOCKED | EXTENT_DELALLOC |
1230 				     EXTENT_DELALLOC_NEW | EXTENT_DEFRAG |
1231 				     EXTENT_DO_ACCOUNTING, PAGE_UNLOCK |
1232 				     PAGE_START_WRITEBACK | PAGE_END_WRITEBACK);
1233 			*nr_written = *nr_written +
1234 			     (end - start + PAGE_SIZE) / PAGE_SIZE;
1235 			*page_started = 1;
1236 			/*
1237 			 * locked_page is locked by the caller of
1238 			 * writepage_delalloc(), not locked by
1239 			 * __process_pages_contig().
1240 			 *
1241 			 * We can't let __process_pages_contig() to unlock it,
1242 			 * as it doesn't have any subpage::writers recorded.
1243 			 *
1244 			 * Here we manually unlock the page, since the caller
1245 			 * can't use page_started to determine if it's an
1246 			 * inline extent or a compressed extent.
1247 			 */
1248 			unlock_page(locked_page);
1249 			goto out;
1250 		} else if (ret < 0) {
1251 			goto out_unlock;
1252 		}
1253 	}
1254 
1255 	alloc_hint = get_extent_allocation_hint(inode, start, num_bytes);
1256 	btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);
1257 
1258 	/*
1259 	 * Relocation relies on the relocated extents to have exactly the same
1260 	 * size as the original extents. Normally writeback for relocation data
1261 	 * extents follows a NOCOW path because relocation preallocates the
1262 	 * extents. However, due to an operation such as scrub turning a block
1263 	 * group to RO mode, it may fallback to COW mode, so we must make sure
1264 	 * an extent allocated during COW has exactly the requested size and can
1265 	 * not be split into smaller extents, otherwise relocation breaks and
1266 	 * fails during the stage where it updates the bytenr of file extent
1267 	 * items.
1268 	 */
1269 	if (btrfs_is_data_reloc_root(root))
1270 		min_alloc_size = num_bytes;
1271 	else
1272 		min_alloc_size = fs_info->sectorsize;
1273 
1274 	while (num_bytes > 0) {
1275 		cur_alloc_size = num_bytes;
1276 		ret = btrfs_reserve_extent(root, cur_alloc_size, cur_alloc_size,
1277 					   min_alloc_size, 0, alloc_hint,
1278 					   &ins, 1, 1);
1279 		if (ret < 0)
1280 			goto out_unlock;
1281 		cur_alloc_size = ins.offset;
1282 		extent_reserved = true;
1283 
1284 		ram_size = ins.offset;
1285 		em = create_io_em(inode, start, ins.offset, /* len */
1286 				  start, /* orig_start */
1287 				  ins.objectid, /* block_start */
1288 				  ins.offset, /* block_len */
1289 				  ins.offset, /* orig_block_len */
1290 				  ram_size, /* ram_bytes */
1291 				  BTRFS_COMPRESS_NONE, /* compress_type */
1292 				  BTRFS_ORDERED_REGULAR /* type */);
1293 		if (IS_ERR(em)) {
1294 			ret = PTR_ERR(em);
1295 			goto out_reserve;
1296 		}
1297 		free_extent_map(em);
1298 
1299 		ret = btrfs_add_ordered_extent(inode, start, ram_size, ram_size,
1300 					       ins.objectid, cur_alloc_size, 0,
1301 					       1 << BTRFS_ORDERED_REGULAR,
1302 					       BTRFS_COMPRESS_NONE);
1303 		if (ret)
1304 			goto out_drop_extent_cache;
1305 
1306 		if (btrfs_is_data_reloc_root(root)) {
1307 			ret = btrfs_reloc_clone_csums(inode, start,
1308 						      cur_alloc_size);
1309 			/*
1310 			 * Only drop cache here, and process as normal.
1311 			 *
1312 			 * We must not allow extent_clear_unlock_delalloc()
1313 			 * at out_unlock label to free meta of this ordered
1314 			 * extent, as its meta should be freed by
1315 			 * btrfs_finish_ordered_io().
1316 			 *
1317 			 * So we must continue until @start is increased to
1318 			 * skip current ordered extent.
1319 			 */
1320 			if (ret)
1321 				btrfs_drop_extent_cache(inode, start,
1322 						start + ram_size - 1, 0);
1323 		}
1324 
1325 		btrfs_dec_block_group_reservations(fs_info, ins.objectid);
1326 
1327 		/*
1328 		 * We're not doing compressed IO, don't unlock the first page
1329 		 * (which the caller expects to stay locked), don't clear any
1330 		 * dirty bits and don't set any writeback bits
1331 		 *
1332 		 * Do set the Ordered (Private2) bit so we know this page was
1333 		 * properly setup for writepage.
1334 		 */
1335 		page_ops = unlock ? PAGE_UNLOCK : 0;
1336 		page_ops |= PAGE_SET_ORDERED;
1337 
1338 		extent_clear_unlock_delalloc(inode, start, start + ram_size - 1,
1339 					     locked_page,
1340 					     EXTENT_LOCKED | EXTENT_DELALLOC,
1341 					     page_ops);
1342 		if (num_bytes < cur_alloc_size)
1343 			num_bytes = 0;
1344 		else
1345 			num_bytes -= cur_alloc_size;
1346 		alloc_hint = ins.objectid + ins.offset;
1347 		start += cur_alloc_size;
1348 		extent_reserved = false;
1349 
1350 		/*
1351 		 * btrfs_reloc_clone_csums() error, since start is increased
1352 		 * extent_clear_unlock_delalloc() at out_unlock label won't
1353 		 * free metadata of current ordered extent, we're OK to exit.
1354 		 */
1355 		if (ret)
1356 			goto out_unlock;
1357 	}
1358 out:
1359 	return ret;
1360 
1361 out_drop_extent_cache:
1362 	btrfs_drop_extent_cache(inode, start, start + ram_size - 1, 0);
1363 out_reserve:
1364 	btrfs_dec_block_group_reservations(fs_info, ins.objectid);
1365 	btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1);
1366 out_unlock:
1367 	/*
1368 	 * If done_offset is non-NULL and ret == -EAGAIN, we expect the
1369 	 * caller to write out the successfully allocated region and retry.
1370 	 */
1371 	if (done_offset && ret == -EAGAIN) {
1372 		if (orig_start < start)
1373 			*done_offset = start - 1;
1374 		else
1375 			*done_offset = start;
1376 		return ret;
1377 	} else if (ret == -EAGAIN) {
1378 		/* Convert to -ENOSPC since the caller cannot retry. */
1379 		ret = -ENOSPC;
1380 	}
1381 
1382 	/*
1383 	 * Now, we have three regions to clean up:
1384 	 *
1385 	 * |-------(1)----|---(2)---|-------------(3)----------|
1386 	 * `- orig_start  `- start  `- start + cur_alloc_size  `- end
1387 	 *
1388 	 * We process each region below.
1389 	 */
1390 
1391 	clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
1392 		EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV;
1393 	page_ops = PAGE_UNLOCK | PAGE_START_WRITEBACK | PAGE_END_WRITEBACK;
1394 
1395 	/*
1396 	 * For the range (1). We have already instantiated the ordered extents
1397 	 * for this region. They are cleaned up by
1398 	 * btrfs_cleanup_ordered_extents() in e.g,
1399 	 * btrfs_run_delalloc_range(). EXTENT_LOCKED | EXTENT_DELALLOC are
1400 	 * already cleared in the above loop. And, EXTENT_DELALLOC_NEW |
1401 	 * EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV are handled by the cleanup
1402 	 * function.
1403 	 *
1404 	 * However, in case of unlock == 0, we still need to unlock the pages
1405 	 * (except @locked_page) to ensure all the pages are unlocked.
1406 	 */
1407 	if (!unlock && orig_start < start) {
1408 		if (!locked_page)
1409 			mapping_set_error(inode->vfs_inode.i_mapping, ret);
1410 		extent_clear_unlock_delalloc(inode, orig_start, start - 1,
1411 					     locked_page, 0, page_ops);
1412 	}
1413 
1414 	/*
1415 	 * For the range (2). If we reserved an extent for our delalloc range
1416 	 * (or a subrange) and failed to create the respective ordered extent,
1417 	 * then it means that when we reserved the extent we decremented the
1418 	 * extent's size from the data space_info's bytes_may_use counter and
1419 	 * incremented the space_info's bytes_reserved counter by the same
1420 	 * amount. We must make sure extent_clear_unlock_delalloc() does not try
1421 	 * to decrement again the data space_info's bytes_may_use counter,
1422 	 * therefore we do not pass it the flag EXTENT_CLEAR_DATA_RESV.
1423 	 */
1424 	if (extent_reserved) {
1425 		extent_clear_unlock_delalloc(inode, start,
1426 					     start + cur_alloc_size - 1,
1427 					     locked_page,
1428 					     clear_bits,
1429 					     page_ops);
1430 		start += cur_alloc_size;
1431 		if (start >= end)
1432 			goto out;
1433 	}
1434 
1435 	/*
1436 	 * For the range (3). We never touched the region. In addition to the
1437 	 * clear_bits above, we add EXTENT_CLEAR_DATA_RESV to release the data
1438 	 * space_info's bytes_may_use counter, reserved in
1439 	 * btrfs_check_data_free_space().
1440 	 */
1441 	extent_clear_unlock_delalloc(inode, start, end, locked_page,
1442 				     clear_bits | EXTENT_CLEAR_DATA_RESV,
1443 				     page_ops);
1444 	goto out;
1445 }
1446 
1447 /*
1448  * work queue call back to started compression on a file and pages
1449  */
async_cow_start(struct btrfs_work * work)1450 static noinline void async_cow_start(struct btrfs_work *work)
1451 {
1452 	struct async_chunk *async_chunk;
1453 	int compressed_extents;
1454 
1455 	async_chunk = container_of(work, struct async_chunk, work);
1456 
1457 	compressed_extents = compress_file_range(async_chunk);
1458 	if (compressed_extents == 0) {
1459 		btrfs_add_delayed_iput(async_chunk->inode);
1460 		async_chunk->inode = NULL;
1461 	}
1462 }
1463 
1464 /*
1465  * work queue call back to submit previously compressed pages
1466  */
async_cow_submit(struct btrfs_work * work)1467 static noinline void async_cow_submit(struct btrfs_work *work)
1468 {
1469 	struct async_chunk *async_chunk = container_of(work, struct async_chunk,
1470 						     work);
1471 	struct btrfs_fs_info *fs_info = btrfs_work_owner(work);
1472 	unsigned long nr_pages;
1473 
1474 	nr_pages = (async_chunk->end - async_chunk->start + PAGE_SIZE) >>
1475 		PAGE_SHIFT;
1476 
1477 	/*
1478 	 * ->inode could be NULL if async_chunk_start has failed to compress,
1479 	 * in which case we don't have anything to submit, yet we need to
1480 	 * always adjust ->async_delalloc_pages as its paired with the init
1481 	 * happening in cow_file_range_async
1482 	 */
1483 	if (async_chunk->inode)
1484 		submit_compressed_extents(async_chunk);
1485 
1486 	/* atomic_sub_return implies a barrier */
1487 	if (atomic_sub_return(nr_pages, &fs_info->async_delalloc_pages) <
1488 	    5 * SZ_1M)
1489 		cond_wake_up_nomb(&fs_info->async_submit_wait);
1490 }
1491 
async_cow_free(struct btrfs_work * work)1492 static noinline void async_cow_free(struct btrfs_work *work)
1493 {
1494 	struct async_chunk *async_chunk;
1495 	struct async_cow *async_cow;
1496 
1497 	async_chunk = container_of(work, struct async_chunk, work);
1498 	if (async_chunk->inode)
1499 		btrfs_add_delayed_iput(async_chunk->inode);
1500 	if (async_chunk->blkcg_css)
1501 		css_put(async_chunk->blkcg_css);
1502 
1503 	async_cow = async_chunk->async_cow;
1504 	if (atomic_dec_and_test(&async_cow->num_chunks))
1505 		kvfree(async_cow);
1506 }
1507 
cow_file_range_async(struct btrfs_inode * inode,struct writeback_control * wbc,struct page * locked_page,u64 start,u64 end,int * page_started,unsigned long * nr_written)1508 static int cow_file_range_async(struct btrfs_inode *inode,
1509 				struct writeback_control *wbc,
1510 				struct page *locked_page,
1511 				u64 start, u64 end, int *page_started,
1512 				unsigned long *nr_written)
1513 {
1514 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
1515 	struct cgroup_subsys_state *blkcg_css = wbc_blkcg_css(wbc);
1516 	struct async_cow *ctx;
1517 	struct async_chunk *async_chunk;
1518 	unsigned long nr_pages;
1519 	u64 cur_end;
1520 	u64 num_chunks = DIV_ROUND_UP(end - start, SZ_512K);
1521 	int i;
1522 	bool should_compress;
1523 	unsigned nofs_flag;
1524 	const unsigned int write_flags = wbc_to_write_flags(wbc);
1525 
1526 	unlock_extent(&inode->io_tree, start, end);
1527 
1528 	if (inode->flags & BTRFS_INODE_NOCOMPRESS &&
1529 	    !btrfs_test_opt(fs_info, FORCE_COMPRESS)) {
1530 		num_chunks = 1;
1531 		should_compress = false;
1532 	} else {
1533 		should_compress = true;
1534 	}
1535 
1536 	nofs_flag = memalloc_nofs_save();
1537 	ctx = kvmalloc(struct_size(ctx, chunks, num_chunks), GFP_KERNEL);
1538 	memalloc_nofs_restore(nofs_flag);
1539 
1540 	if (!ctx) {
1541 		unsigned clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC |
1542 			EXTENT_DELALLOC_NEW | EXTENT_DEFRAG |
1543 			EXTENT_DO_ACCOUNTING;
1544 		unsigned long page_ops = PAGE_UNLOCK | PAGE_START_WRITEBACK |
1545 					 PAGE_END_WRITEBACK | PAGE_SET_ERROR;
1546 
1547 		extent_clear_unlock_delalloc(inode, start, end, locked_page,
1548 					     clear_bits, page_ops);
1549 		return -ENOMEM;
1550 	}
1551 
1552 	async_chunk = ctx->chunks;
1553 	atomic_set(&ctx->num_chunks, num_chunks);
1554 
1555 	for (i = 0; i < num_chunks; i++) {
1556 		if (should_compress)
1557 			cur_end = min(end, start + SZ_512K - 1);
1558 		else
1559 			cur_end = end;
1560 
1561 		/*
1562 		 * igrab is called higher up in the call chain, take only the
1563 		 * lightweight reference for the callback lifetime
1564 		 */
1565 		ihold(&inode->vfs_inode);
1566 		async_chunk[i].async_cow = ctx;
1567 		async_chunk[i].inode = &inode->vfs_inode;
1568 		async_chunk[i].start = start;
1569 		async_chunk[i].end = cur_end;
1570 		async_chunk[i].write_flags = write_flags;
1571 		INIT_LIST_HEAD(&async_chunk[i].extents);
1572 
1573 		/*
1574 		 * The locked_page comes all the way from writepage and its
1575 		 * the original page we were actually given.  As we spread
1576 		 * this large delalloc region across multiple async_chunk
1577 		 * structs, only the first struct needs a pointer to locked_page
1578 		 *
1579 		 * This way we don't need racey decisions about who is supposed
1580 		 * to unlock it.
1581 		 */
1582 		if (locked_page) {
1583 			/*
1584 			 * Depending on the compressibility, the pages might or
1585 			 * might not go through async.  We want all of them to
1586 			 * be accounted against wbc once.  Let's do it here
1587 			 * before the paths diverge.  wbc accounting is used
1588 			 * only for foreign writeback detection and doesn't
1589 			 * need full accuracy.  Just account the whole thing
1590 			 * against the first page.
1591 			 */
1592 			wbc_account_cgroup_owner(wbc, locked_page,
1593 						 cur_end - start);
1594 			async_chunk[i].locked_page = locked_page;
1595 			locked_page = NULL;
1596 		} else {
1597 			async_chunk[i].locked_page = NULL;
1598 		}
1599 
1600 		if (blkcg_css != blkcg_root_css) {
1601 			css_get(blkcg_css);
1602 			async_chunk[i].blkcg_css = blkcg_css;
1603 		} else {
1604 			async_chunk[i].blkcg_css = NULL;
1605 		}
1606 
1607 		btrfs_init_work(&async_chunk[i].work, async_cow_start,
1608 				async_cow_submit, async_cow_free);
1609 
1610 		nr_pages = DIV_ROUND_UP(cur_end - start, PAGE_SIZE);
1611 		atomic_add(nr_pages, &fs_info->async_delalloc_pages);
1612 
1613 		btrfs_queue_work(fs_info->delalloc_workers, &async_chunk[i].work);
1614 
1615 		*nr_written += nr_pages;
1616 		start = cur_end + 1;
1617 	}
1618 	*page_started = 1;
1619 	return 0;
1620 }
1621 
run_delalloc_zoned(struct btrfs_inode * inode,struct page * locked_page,u64 start,u64 end,int * page_started,unsigned long * nr_written)1622 static noinline int run_delalloc_zoned(struct btrfs_inode *inode,
1623 				       struct page *locked_page, u64 start,
1624 				       u64 end, int *page_started,
1625 				       unsigned long *nr_written)
1626 {
1627 	u64 done_offset = end;
1628 	int ret;
1629 	bool locked_page_done = false;
1630 
1631 	while (start <= end) {
1632 		ret = cow_file_range(inode, locked_page, start, end, page_started,
1633 				     nr_written, 0, &done_offset);
1634 		if (ret && ret != -EAGAIN)
1635 			return ret;
1636 
1637 		if (*page_started) {
1638 			ASSERT(ret == 0);
1639 			return 0;
1640 		}
1641 
1642 		if (ret == 0)
1643 			done_offset = end;
1644 
1645 		if (done_offset == start) {
1646 			wait_on_bit_io(&inode->root->fs_info->flags,
1647 				       BTRFS_FS_NEED_ZONE_FINISH,
1648 				       TASK_UNINTERRUPTIBLE);
1649 			continue;
1650 		}
1651 
1652 		if (!locked_page_done) {
1653 			__set_page_dirty_nobuffers(locked_page);
1654 			account_page_redirty(locked_page);
1655 		}
1656 		locked_page_done = true;
1657 		extent_write_locked_range(&inode->vfs_inode, start, done_offset);
1658 
1659 		start = done_offset + 1;
1660 	}
1661 
1662 	*page_started = 1;
1663 
1664 	return 0;
1665 }
1666 
csum_exist_in_range(struct btrfs_fs_info * fs_info,u64 bytenr,u64 num_bytes)1667 static noinline int csum_exist_in_range(struct btrfs_fs_info *fs_info,
1668 					u64 bytenr, u64 num_bytes)
1669 {
1670 	struct btrfs_root *csum_root = btrfs_csum_root(fs_info, bytenr);
1671 	struct btrfs_ordered_sum *sums;
1672 	int ret;
1673 	LIST_HEAD(list);
1674 
1675 	ret = btrfs_lookup_csums_range(csum_root, bytenr,
1676 				       bytenr + num_bytes - 1, &list, 0);
1677 	if (ret == 0 && list_empty(&list))
1678 		return 0;
1679 
1680 	while (!list_empty(&list)) {
1681 		sums = list_entry(list.next, struct btrfs_ordered_sum, list);
1682 		list_del(&sums->list);
1683 		kfree(sums);
1684 	}
1685 	if (ret < 0)
1686 		return ret;
1687 	return 1;
1688 }
1689 
fallback_to_cow(struct btrfs_inode * inode,struct page * locked_page,const u64 start,const u64 end,int * page_started,unsigned long * nr_written)1690 static int fallback_to_cow(struct btrfs_inode *inode, struct page *locked_page,
1691 			   const u64 start, const u64 end,
1692 			   int *page_started, unsigned long *nr_written)
1693 {
1694 	const bool is_space_ino = btrfs_is_free_space_inode(inode);
1695 	const bool is_reloc_ino = btrfs_is_data_reloc_root(inode->root);
1696 	const u64 range_bytes = end + 1 - start;
1697 	struct extent_io_tree *io_tree = &inode->io_tree;
1698 	u64 range_start = start;
1699 	u64 count;
1700 
1701 	/*
1702 	 * If EXTENT_NORESERVE is set it means that when the buffered write was
1703 	 * made we had not enough available data space and therefore we did not
1704 	 * reserve data space for it, since we though we could do NOCOW for the
1705 	 * respective file range (either there is prealloc extent or the inode
1706 	 * has the NOCOW bit set).
1707 	 *
1708 	 * However when we need to fallback to COW mode (because for example the
1709 	 * block group for the corresponding extent was turned to RO mode by a
1710 	 * scrub or relocation) we need to do the following:
1711 	 *
1712 	 * 1) We increment the bytes_may_use counter of the data space info.
1713 	 *    If COW succeeds, it allocates a new data extent and after doing
1714 	 *    that it decrements the space info's bytes_may_use counter and
1715 	 *    increments its bytes_reserved counter by the same amount (we do
1716 	 *    this at btrfs_add_reserved_bytes()). So we need to increment the
1717 	 *    bytes_may_use counter to compensate (when space is reserved at
1718 	 *    buffered write time, the bytes_may_use counter is incremented);
1719 	 *
1720 	 * 2) We clear the EXTENT_NORESERVE bit from the range. We do this so
1721 	 *    that if the COW path fails for any reason, it decrements (through
1722 	 *    extent_clear_unlock_delalloc()) the bytes_may_use counter of the
1723 	 *    data space info, which we incremented in the step above.
1724 	 *
1725 	 * If we need to fallback to cow and the inode corresponds to a free
1726 	 * space cache inode or an inode of the data relocation tree, we must
1727 	 * also increment bytes_may_use of the data space_info for the same
1728 	 * reason. Space caches and relocated data extents always get a prealloc
1729 	 * extent for them, however scrub or balance may have set the block
1730 	 * group that contains that extent to RO mode and therefore force COW
1731 	 * when starting writeback.
1732 	 */
1733 	count = count_range_bits(io_tree, &range_start, end, range_bytes,
1734 				 EXTENT_NORESERVE, 0);
1735 	if (count > 0 || is_space_ino || is_reloc_ino) {
1736 		u64 bytes = count;
1737 		struct btrfs_fs_info *fs_info = inode->root->fs_info;
1738 		struct btrfs_space_info *sinfo = fs_info->data_sinfo;
1739 
1740 		if (is_space_ino || is_reloc_ino)
1741 			bytes = range_bytes;
1742 
1743 		spin_lock(&sinfo->lock);
1744 		btrfs_space_info_update_bytes_may_use(fs_info, sinfo, bytes);
1745 		spin_unlock(&sinfo->lock);
1746 
1747 		if (count > 0)
1748 			clear_extent_bit(io_tree, start, end, EXTENT_NORESERVE,
1749 					 0, 0, NULL);
1750 	}
1751 
1752 	return cow_file_range(inode, locked_page, start, end, page_started,
1753 			      nr_written, 1, NULL);
1754 }
1755 
1756 struct can_nocow_file_extent_args {
1757 	/* Input fields. */
1758 
1759 	/* Start file offset of the range we want to NOCOW. */
1760 	u64 start;
1761 	/* End file offset (inclusive) of the range we want to NOCOW. */
1762 	u64 end;
1763 	bool writeback_path;
1764 	bool strict;
1765 	/*
1766 	 * Free the path passed to can_nocow_file_extent() once it's not needed
1767 	 * anymore.
1768 	 */
1769 	bool free_path;
1770 
1771 	/* Output fields. Only set when can_nocow_file_extent() returns 1. */
1772 
1773 	u64 disk_bytenr;
1774 	u64 disk_num_bytes;
1775 	u64 extent_offset;
1776 	/* Number of bytes that can be written to in NOCOW mode. */
1777 	u64 num_bytes;
1778 };
1779 
1780 /*
1781  * Check if we can NOCOW the file extent that the path points to.
1782  * This function may return with the path released, so the caller should check
1783  * if path->nodes[0] is NULL or not if it needs to use the path afterwards.
1784  *
1785  * Returns: < 0 on error
1786  *            0 if we can not NOCOW
1787  *            1 if we can NOCOW
1788  */
can_nocow_file_extent(struct btrfs_path * path,struct btrfs_key * key,struct btrfs_inode * inode,struct can_nocow_file_extent_args * args)1789 static int can_nocow_file_extent(struct btrfs_path *path,
1790 				 struct btrfs_key *key,
1791 				 struct btrfs_inode *inode,
1792 				 struct can_nocow_file_extent_args *args)
1793 {
1794 	const bool is_freespace_inode = btrfs_is_free_space_inode(inode);
1795 	struct extent_buffer *leaf = path->nodes[0];
1796 	struct btrfs_root *root = inode->root;
1797 	struct btrfs_file_extent_item *fi;
1798 	u64 extent_end;
1799 	u8 extent_type;
1800 	int can_nocow = 0;
1801 	int ret = 0;
1802 
1803 	fi = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
1804 	extent_type = btrfs_file_extent_type(leaf, fi);
1805 
1806 	if (extent_type == BTRFS_FILE_EXTENT_INLINE)
1807 		goto out;
1808 
1809 	/* Can't access these fields unless we know it's not an inline extent. */
1810 	args->disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1811 	args->disk_num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
1812 	args->extent_offset = btrfs_file_extent_offset(leaf, fi);
1813 
1814 	if (!(inode->flags & BTRFS_INODE_NODATACOW) &&
1815 	    extent_type == BTRFS_FILE_EXTENT_REG)
1816 		goto out;
1817 
1818 	/*
1819 	 * If the extent was created before the generation where the last snapshot
1820 	 * for its subvolume was created, then this implies the extent is shared,
1821 	 * hence we must COW.
1822 	 */
1823 	if (!args->strict &&
1824 	    btrfs_file_extent_generation(leaf, fi) <=
1825 	    btrfs_root_last_snapshot(&root->root_item))
1826 		goto out;
1827 
1828 	/* An explicit hole, must COW. */
1829 	if (args->disk_bytenr == 0)
1830 		goto out;
1831 
1832 	/* Compressed/encrypted/encoded extents must be COWed. */
1833 	if (btrfs_file_extent_compression(leaf, fi) ||
1834 	    btrfs_file_extent_encryption(leaf, fi) ||
1835 	    btrfs_file_extent_other_encoding(leaf, fi))
1836 		goto out;
1837 
1838 	extent_end = btrfs_file_extent_end(path);
1839 
1840 	/*
1841 	 * The following checks can be expensive, as they need to take other
1842 	 * locks and do btree or rbtree searches, so release the path to avoid
1843 	 * blocking other tasks for too long.
1844 	 */
1845 	btrfs_release_path(path);
1846 
1847 	ret = btrfs_cross_ref_exist(root, btrfs_ino(inode),
1848 				    key->offset - args->extent_offset,
1849 				    args->disk_bytenr, false, path);
1850 	WARN_ON_ONCE(ret > 0 && is_freespace_inode);
1851 	if (ret != 0)
1852 		goto out;
1853 
1854 	if (args->free_path) {
1855 		/*
1856 		 * We don't need the path anymore, plus through the
1857 		 * csum_exist_in_range() call below we will end up allocating
1858 		 * another path. So free the path to avoid unnecessary extra
1859 		 * memory usage.
1860 		 */
1861 		btrfs_free_path(path);
1862 		path = NULL;
1863 	}
1864 
1865 	/* If there are pending snapshots for this root, we must COW. */
1866 	if (args->writeback_path && !is_freespace_inode &&
1867 	    atomic_read(&root->snapshot_force_cow))
1868 		goto out;
1869 
1870 	args->disk_bytenr += args->extent_offset;
1871 	args->disk_bytenr += args->start - key->offset;
1872 	args->num_bytes = min(args->end + 1, extent_end) - args->start;
1873 
1874 	/*
1875 	 * Force COW if csums exist in the range. This ensures that csums for a
1876 	 * given extent are either valid or do not exist.
1877 	 */
1878 	ret = csum_exist_in_range(root->fs_info, args->disk_bytenr, args->num_bytes);
1879 	WARN_ON_ONCE(ret > 0 && is_freespace_inode);
1880 	if (ret != 0)
1881 		goto out;
1882 
1883 	can_nocow = 1;
1884  out:
1885 	if (args->free_path && path)
1886 		btrfs_free_path(path);
1887 
1888 	return ret < 0 ? ret : can_nocow;
1889 }
1890 
1891 /*
1892  * when nowcow writeback call back.  This checks for snapshots or COW copies
1893  * of the extents that exist in the file, and COWs the file as required.
1894  *
1895  * If no cow copies or snapshots exist, we write directly to the existing
1896  * blocks on disk
1897  */
run_delalloc_nocow(struct btrfs_inode * inode,struct page * locked_page,const u64 start,const u64 end,int * page_started,unsigned long * nr_written)1898 static noinline int run_delalloc_nocow(struct btrfs_inode *inode,
1899 				       struct page *locked_page,
1900 				       const u64 start, const u64 end,
1901 				       int *page_started,
1902 				       unsigned long *nr_written)
1903 {
1904 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
1905 	struct btrfs_root *root = inode->root;
1906 	struct btrfs_path *path;
1907 	u64 cow_start = (u64)-1;
1908 	u64 cur_offset = start;
1909 	int ret;
1910 	bool check_prev = true;
1911 	u64 ino = btrfs_ino(inode);
1912 	struct btrfs_block_group *bg;
1913 	bool nocow = false;
1914 	struct can_nocow_file_extent_args nocow_args = { 0 };
1915 
1916 	path = btrfs_alloc_path();
1917 	if (!path) {
1918 		extent_clear_unlock_delalloc(inode, start, end, locked_page,
1919 					     EXTENT_LOCKED | EXTENT_DELALLOC |
1920 					     EXTENT_DO_ACCOUNTING |
1921 					     EXTENT_DEFRAG, PAGE_UNLOCK |
1922 					     PAGE_START_WRITEBACK |
1923 					     PAGE_END_WRITEBACK);
1924 		return -ENOMEM;
1925 	}
1926 
1927 	nocow_args.end = end;
1928 	nocow_args.writeback_path = true;
1929 
1930 	while (1) {
1931 		struct btrfs_key found_key;
1932 		struct btrfs_file_extent_item *fi;
1933 		struct extent_buffer *leaf;
1934 		u64 extent_end;
1935 		u64 ram_bytes;
1936 		u64 nocow_end;
1937 		int extent_type;
1938 
1939 		nocow = false;
1940 
1941 		ret = btrfs_lookup_file_extent(NULL, root, path, ino,
1942 					       cur_offset, 0);
1943 		if (ret < 0)
1944 			goto error;
1945 
1946 		/*
1947 		 * If there is no extent for our range when doing the initial
1948 		 * search, then go back to the previous slot as it will be the
1949 		 * one containing the search offset
1950 		 */
1951 		if (ret > 0 && path->slots[0] > 0 && check_prev) {
1952 			leaf = path->nodes[0];
1953 			btrfs_item_key_to_cpu(leaf, &found_key,
1954 					      path->slots[0] - 1);
1955 			if (found_key.objectid == ino &&
1956 			    found_key.type == BTRFS_EXTENT_DATA_KEY)
1957 				path->slots[0]--;
1958 		}
1959 		check_prev = false;
1960 next_slot:
1961 		/* Go to next leaf if we have exhausted the current one */
1962 		leaf = path->nodes[0];
1963 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1964 			ret = btrfs_next_leaf(root, path);
1965 			if (ret < 0) {
1966 				if (cow_start != (u64)-1)
1967 					cur_offset = cow_start;
1968 				goto error;
1969 			}
1970 			if (ret > 0)
1971 				break;
1972 			leaf = path->nodes[0];
1973 		}
1974 
1975 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1976 
1977 		/* Didn't find anything for our INO */
1978 		if (found_key.objectid > ino)
1979 			break;
1980 		/*
1981 		 * Keep searching until we find an EXTENT_ITEM or there are no
1982 		 * more extents for this inode
1983 		 */
1984 		if (WARN_ON_ONCE(found_key.objectid < ino) ||
1985 		    found_key.type < BTRFS_EXTENT_DATA_KEY) {
1986 			path->slots[0]++;
1987 			goto next_slot;
1988 		}
1989 
1990 		/* Found key is not EXTENT_DATA_KEY or starts after req range */
1991 		if (found_key.type > BTRFS_EXTENT_DATA_KEY ||
1992 		    found_key.offset > end)
1993 			break;
1994 
1995 		/*
1996 		 * If the found extent starts after requested offset, then
1997 		 * adjust extent_end to be right before this extent begins
1998 		 */
1999 		if (found_key.offset > cur_offset) {
2000 			extent_end = found_key.offset;
2001 			extent_type = 0;
2002 			goto out_check;
2003 		}
2004 
2005 		/*
2006 		 * Found extent which begins before our range and potentially
2007 		 * intersect it
2008 		 */
2009 		fi = btrfs_item_ptr(leaf, path->slots[0],
2010 				    struct btrfs_file_extent_item);
2011 		extent_type = btrfs_file_extent_type(leaf, fi);
2012 		/* If this is triggered then we have a memory corruption. */
2013 		ASSERT(extent_type < BTRFS_NR_FILE_EXTENT_TYPES);
2014 		if (WARN_ON(extent_type >= BTRFS_NR_FILE_EXTENT_TYPES)) {
2015 			ret = -EUCLEAN;
2016 			goto error;
2017 		}
2018 		ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
2019 		extent_end = btrfs_file_extent_end(path);
2020 
2021 		/*
2022 		 * If the extent we got ends before our current offset, skip to
2023 		 * the next extent.
2024 		 */
2025 		if (extent_end <= cur_offset) {
2026 			path->slots[0]++;
2027 			goto next_slot;
2028 		}
2029 
2030 		nocow_args.start = cur_offset;
2031 		ret = can_nocow_file_extent(path, &found_key, inode, &nocow_args);
2032 		if (ret < 0) {
2033 			if (cow_start != (u64)-1)
2034 				cur_offset = cow_start;
2035 			goto error;
2036 		} else if (ret == 0) {
2037 			goto out_check;
2038 		}
2039 
2040 		ret = 0;
2041 		bg = btrfs_inc_nocow_writers(fs_info, nocow_args.disk_bytenr);
2042 		if (bg)
2043 			nocow = true;
2044 out_check:
2045 		/*
2046 		 * If nocow is false then record the beginning of the range
2047 		 * that needs to be COWed
2048 		 */
2049 		if (!nocow) {
2050 			if (cow_start == (u64)-1)
2051 				cow_start = cur_offset;
2052 			cur_offset = extent_end;
2053 			if (cur_offset > end)
2054 				break;
2055 			if (!path->nodes[0])
2056 				continue;
2057 			path->slots[0]++;
2058 			goto next_slot;
2059 		}
2060 
2061 		/*
2062 		 * COW range from cow_start to found_key.offset - 1. As the key
2063 		 * will contain the beginning of the first extent that can be
2064 		 * NOCOW, following one which needs to be COW'ed
2065 		 */
2066 		if (cow_start != (u64)-1) {
2067 			ret = fallback_to_cow(inode, locked_page,
2068 					      cow_start, found_key.offset - 1,
2069 					      page_started, nr_written);
2070 			if (ret)
2071 				goto error;
2072 			cow_start = (u64)-1;
2073 		}
2074 
2075 		nocow_end = cur_offset + nocow_args.num_bytes - 1;
2076 
2077 		if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
2078 			u64 orig_start = found_key.offset - nocow_args.extent_offset;
2079 			struct extent_map *em;
2080 
2081 			em = create_io_em(inode, cur_offset, nocow_args.num_bytes,
2082 					  orig_start,
2083 					  nocow_args.disk_bytenr, /* block_start */
2084 					  nocow_args.num_bytes, /* block_len */
2085 					  nocow_args.disk_num_bytes, /* orig_block_len */
2086 					  ram_bytes, BTRFS_COMPRESS_NONE,
2087 					  BTRFS_ORDERED_PREALLOC);
2088 			if (IS_ERR(em)) {
2089 				ret = PTR_ERR(em);
2090 				goto error;
2091 			}
2092 			free_extent_map(em);
2093 			ret = btrfs_add_ordered_extent(inode,
2094 					cur_offset, nocow_args.num_bytes,
2095 					nocow_args.num_bytes,
2096 					nocow_args.disk_bytenr,
2097 					nocow_args.num_bytes, 0,
2098 					1 << BTRFS_ORDERED_PREALLOC,
2099 					BTRFS_COMPRESS_NONE);
2100 			if (ret) {
2101 				btrfs_drop_extent_cache(inode, cur_offset,
2102 							nocow_end, 0);
2103 				goto error;
2104 			}
2105 		} else {
2106 			ret = btrfs_add_ordered_extent(inode, cur_offset,
2107 						       nocow_args.num_bytes,
2108 						       nocow_args.num_bytes,
2109 						       nocow_args.disk_bytenr,
2110 						       nocow_args.num_bytes,
2111 						       0,
2112 						       1 << BTRFS_ORDERED_NOCOW,
2113 						       BTRFS_COMPRESS_NONE);
2114 			if (ret)
2115 				goto error;
2116 		}
2117 
2118 		if (nocow) {
2119 			btrfs_dec_nocow_writers(bg);
2120 			nocow = false;
2121 		}
2122 
2123 		if (btrfs_is_data_reloc_root(root))
2124 			/*
2125 			 * Error handled later, as we must prevent
2126 			 * extent_clear_unlock_delalloc() in error handler
2127 			 * from freeing metadata of created ordered extent.
2128 			 */
2129 			ret = btrfs_reloc_clone_csums(inode, cur_offset,
2130 						      nocow_args.num_bytes);
2131 
2132 		extent_clear_unlock_delalloc(inode, cur_offset, nocow_end,
2133 					     locked_page, EXTENT_LOCKED |
2134 					     EXTENT_DELALLOC |
2135 					     EXTENT_CLEAR_DATA_RESV,
2136 					     PAGE_UNLOCK | PAGE_SET_ORDERED);
2137 
2138 		cur_offset = extent_end;
2139 
2140 		/*
2141 		 * btrfs_reloc_clone_csums() error, now we're OK to call error
2142 		 * handler, as metadata for created ordered extent will only
2143 		 * be freed by btrfs_finish_ordered_io().
2144 		 */
2145 		if (ret)
2146 			goto error;
2147 		if (cur_offset > end)
2148 			break;
2149 	}
2150 	btrfs_release_path(path);
2151 
2152 	if (cur_offset <= end && cow_start == (u64)-1)
2153 		cow_start = cur_offset;
2154 
2155 	if (cow_start != (u64)-1) {
2156 		cur_offset = end;
2157 		ret = fallback_to_cow(inode, locked_page, cow_start, end,
2158 				      page_started, nr_written);
2159 		if (ret)
2160 			goto error;
2161 	}
2162 
2163 error:
2164 	if (nocow)
2165 		btrfs_dec_nocow_writers(bg);
2166 
2167 	if (ret && cur_offset < end)
2168 		extent_clear_unlock_delalloc(inode, cur_offset, end,
2169 					     locked_page, EXTENT_LOCKED |
2170 					     EXTENT_DELALLOC | EXTENT_DEFRAG |
2171 					     EXTENT_DO_ACCOUNTING, PAGE_UNLOCK |
2172 					     PAGE_START_WRITEBACK |
2173 					     PAGE_END_WRITEBACK);
2174 	btrfs_free_path(path);
2175 	return ret;
2176 }
2177 
should_nocow(struct btrfs_inode * inode,u64 start,u64 end)2178 static bool should_nocow(struct btrfs_inode *inode, u64 start, u64 end)
2179 {
2180 	if (inode->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)) {
2181 		if (inode->defrag_bytes &&
2182 		    test_range_bit(&inode->io_tree, start, end, EXTENT_DEFRAG,
2183 				   0, NULL))
2184 			return false;
2185 		return true;
2186 	}
2187 	return false;
2188 }
2189 
2190 /*
2191  * Function to process delayed allocation (create CoW) for ranges which are
2192  * being touched for the first time.
2193  */
btrfs_run_delalloc_range(struct btrfs_inode * inode,struct page * locked_page,u64 start,u64 end,int * page_started,unsigned long * nr_written,struct writeback_control * wbc)2194 int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct page *locked_page,
2195 		u64 start, u64 end, int *page_started, unsigned long *nr_written,
2196 		struct writeback_control *wbc)
2197 {
2198 	int ret;
2199 	const bool zoned = btrfs_is_zoned(inode->root->fs_info);
2200 
2201 	/*
2202 	 * The range must cover part of the @locked_page, or the returned
2203 	 * @page_started can confuse the caller.
2204 	 */
2205 	ASSERT(!(end <= page_offset(locked_page) ||
2206 		 start >= page_offset(locked_page) + PAGE_SIZE));
2207 
2208 	if (should_nocow(inode, start, end)) {
2209 		/*
2210 		 * Normally on a zoned device we're only doing COW writes, but
2211 		 * in case of relocation on a zoned filesystem we have taken
2212 		 * precaution, that we're only writing sequentially. It's safe
2213 		 * to use run_delalloc_nocow() here, like for  regular
2214 		 * preallocated inodes.
2215 		 */
2216 		ASSERT(!zoned || btrfs_is_data_reloc_root(inode->root));
2217 		ret = run_delalloc_nocow(inode, locked_page, start, end,
2218 					 page_started, nr_written);
2219 	} else if (!btrfs_inode_can_compress(inode) ||
2220 		   !inode_need_compress(inode, start, end)) {
2221 		if (zoned)
2222 			ret = run_delalloc_zoned(inode, locked_page, start, end,
2223 						 page_started, nr_written);
2224 		else
2225 			ret = cow_file_range(inode, locked_page, start, end,
2226 					     page_started, nr_written, 1, NULL);
2227 	} else {
2228 		set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, &inode->runtime_flags);
2229 		ret = cow_file_range_async(inode, wbc, locked_page, start, end,
2230 					   page_started, nr_written);
2231 	}
2232 	ASSERT(ret <= 0);
2233 	if (ret)
2234 		btrfs_cleanup_ordered_extents(inode, locked_page, start,
2235 					      end - start + 1);
2236 	return ret;
2237 }
2238 
btrfs_split_delalloc_extent(struct inode * inode,struct extent_state * orig,u64 split)2239 void btrfs_split_delalloc_extent(struct inode *inode,
2240 				 struct extent_state *orig, u64 split)
2241 {
2242 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2243 	u64 size;
2244 
2245 	/* not delalloc, ignore it */
2246 	if (!(orig->state & EXTENT_DELALLOC))
2247 		return;
2248 
2249 	size = orig->end - orig->start + 1;
2250 	if (size > fs_info->max_extent_size) {
2251 		u32 num_extents;
2252 		u64 new_size;
2253 
2254 		/*
2255 		 * See the explanation in btrfs_merge_delalloc_extent, the same
2256 		 * applies here, just in reverse.
2257 		 */
2258 		new_size = orig->end - split + 1;
2259 		num_extents = count_max_extents(fs_info, new_size);
2260 		new_size = split - orig->start;
2261 		num_extents += count_max_extents(fs_info, new_size);
2262 		if (count_max_extents(fs_info, size) >= num_extents)
2263 			return;
2264 	}
2265 
2266 	spin_lock(&BTRFS_I(inode)->lock);
2267 	btrfs_mod_outstanding_extents(BTRFS_I(inode), 1);
2268 	spin_unlock(&BTRFS_I(inode)->lock);
2269 }
2270 
2271 /*
2272  * Handle merged delayed allocation extents so we can keep track of new extents
2273  * that are just merged onto old extents, such as when we are doing sequential
2274  * writes, so we can properly account for the metadata space we'll need.
2275  */
btrfs_merge_delalloc_extent(struct inode * inode,struct extent_state * new,struct extent_state * other)2276 void btrfs_merge_delalloc_extent(struct inode *inode, struct extent_state *new,
2277 				 struct extent_state *other)
2278 {
2279 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2280 	u64 new_size, old_size;
2281 	u32 num_extents;
2282 
2283 	/* not delalloc, ignore it */
2284 	if (!(other->state & EXTENT_DELALLOC))
2285 		return;
2286 
2287 	if (new->start > other->start)
2288 		new_size = new->end - other->start + 1;
2289 	else
2290 		new_size = other->end - new->start + 1;
2291 
2292 	/* we're not bigger than the max, unreserve the space and go */
2293 	if (new_size <= fs_info->max_extent_size) {
2294 		spin_lock(&BTRFS_I(inode)->lock);
2295 		btrfs_mod_outstanding_extents(BTRFS_I(inode), -1);
2296 		spin_unlock(&BTRFS_I(inode)->lock);
2297 		return;
2298 	}
2299 
2300 	/*
2301 	 * We have to add up either side to figure out how many extents were
2302 	 * accounted for before we merged into one big extent.  If the number of
2303 	 * extents we accounted for is <= the amount we need for the new range
2304 	 * then we can return, otherwise drop.  Think of it like this
2305 	 *
2306 	 * [ 4k][MAX_SIZE]
2307 	 *
2308 	 * So we've grown the extent by a MAX_SIZE extent, this would mean we
2309 	 * need 2 outstanding extents, on one side we have 1 and the other side
2310 	 * we have 1 so they are == and we can return.  But in this case
2311 	 *
2312 	 * [MAX_SIZE+4k][MAX_SIZE+4k]
2313 	 *
2314 	 * Each range on their own accounts for 2 extents, but merged together
2315 	 * they are only 3 extents worth of accounting, so we need to drop in
2316 	 * this case.
2317 	 */
2318 	old_size = other->end - other->start + 1;
2319 	num_extents = count_max_extents(fs_info, old_size);
2320 	old_size = new->end - new->start + 1;
2321 	num_extents += count_max_extents(fs_info, old_size);
2322 	if (count_max_extents(fs_info, new_size) >= num_extents)
2323 		return;
2324 
2325 	spin_lock(&BTRFS_I(inode)->lock);
2326 	btrfs_mod_outstanding_extents(BTRFS_I(inode), -1);
2327 	spin_unlock(&BTRFS_I(inode)->lock);
2328 }
2329 
btrfs_add_delalloc_inodes(struct btrfs_root * root,struct inode * inode)2330 static void btrfs_add_delalloc_inodes(struct btrfs_root *root,
2331 				      struct inode *inode)
2332 {
2333 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2334 
2335 	spin_lock(&root->delalloc_lock);
2336 	if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
2337 		list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
2338 			      &root->delalloc_inodes);
2339 		set_bit(BTRFS_INODE_IN_DELALLOC_LIST,
2340 			&BTRFS_I(inode)->runtime_flags);
2341 		root->nr_delalloc_inodes++;
2342 		if (root->nr_delalloc_inodes == 1) {
2343 			spin_lock(&fs_info->delalloc_root_lock);
2344 			BUG_ON(!list_empty(&root->delalloc_root));
2345 			list_add_tail(&root->delalloc_root,
2346 				      &fs_info->delalloc_roots);
2347 			spin_unlock(&fs_info->delalloc_root_lock);
2348 		}
2349 	}
2350 	spin_unlock(&root->delalloc_lock);
2351 }
2352 
2353 
__btrfs_del_delalloc_inode(struct btrfs_root * root,struct btrfs_inode * inode)2354 void __btrfs_del_delalloc_inode(struct btrfs_root *root,
2355 				struct btrfs_inode *inode)
2356 {
2357 	struct btrfs_fs_info *fs_info = root->fs_info;
2358 
2359 	if (!list_empty(&inode->delalloc_inodes)) {
2360 		list_del_init(&inode->delalloc_inodes);
2361 		clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
2362 			  &inode->runtime_flags);
2363 		root->nr_delalloc_inodes--;
2364 		if (!root->nr_delalloc_inodes) {
2365 			ASSERT(list_empty(&root->delalloc_inodes));
2366 			spin_lock(&fs_info->delalloc_root_lock);
2367 			BUG_ON(list_empty(&root->delalloc_root));
2368 			list_del_init(&root->delalloc_root);
2369 			spin_unlock(&fs_info->delalloc_root_lock);
2370 		}
2371 	}
2372 }
2373 
btrfs_del_delalloc_inode(struct btrfs_root * root,struct btrfs_inode * inode)2374 static void btrfs_del_delalloc_inode(struct btrfs_root *root,
2375 				     struct btrfs_inode *inode)
2376 {
2377 	spin_lock(&root->delalloc_lock);
2378 	__btrfs_del_delalloc_inode(root, inode);
2379 	spin_unlock(&root->delalloc_lock);
2380 }
2381 
2382 /*
2383  * Properly track delayed allocation bytes in the inode and to maintain the
2384  * list of inodes that have pending delalloc work to be done.
2385  */
btrfs_set_delalloc_extent(struct inode * inode,struct extent_state * state,unsigned * bits)2386 void btrfs_set_delalloc_extent(struct inode *inode, struct extent_state *state,
2387 			       unsigned *bits)
2388 {
2389 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2390 
2391 	if ((*bits & EXTENT_DEFRAG) && !(*bits & EXTENT_DELALLOC))
2392 		WARN_ON(1);
2393 	/*
2394 	 * set_bit and clear bit hooks normally require _irqsave/restore
2395 	 * but in this case, we are only testing for the DELALLOC
2396 	 * bit, which is only set or cleared with irqs on
2397 	 */
2398 	if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
2399 		struct btrfs_root *root = BTRFS_I(inode)->root;
2400 		u64 len = state->end + 1 - state->start;
2401 		u32 num_extents = count_max_extents(fs_info, len);
2402 		bool do_list = !btrfs_is_free_space_inode(BTRFS_I(inode));
2403 
2404 		spin_lock(&BTRFS_I(inode)->lock);
2405 		btrfs_mod_outstanding_extents(BTRFS_I(inode), num_extents);
2406 		spin_unlock(&BTRFS_I(inode)->lock);
2407 
2408 		/* For sanity tests */
2409 		if (btrfs_is_testing(fs_info))
2410 			return;
2411 
2412 		percpu_counter_add_batch(&fs_info->delalloc_bytes, len,
2413 					 fs_info->delalloc_batch);
2414 		spin_lock(&BTRFS_I(inode)->lock);
2415 		BTRFS_I(inode)->delalloc_bytes += len;
2416 		if (*bits & EXTENT_DEFRAG)
2417 			BTRFS_I(inode)->defrag_bytes += len;
2418 		if (do_list && !test_bit(BTRFS_INODE_IN_DELALLOC_LIST,
2419 					 &BTRFS_I(inode)->runtime_flags))
2420 			btrfs_add_delalloc_inodes(root, inode);
2421 		spin_unlock(&BTRFS_I(inode)->lock);
2422 	}
2423 
2424 	if (!(state->state & EXTENT_DELALLOC_NEW) &&
2425 	    (*bits & EXTENT_DELALLOC_NEW)) {
2426 		spin_lock(&BTRFS_I(inode)->lock);
2427 		BTRFS_I(inode)->new_delalloc_bytes += state->end + 1 -
2428 			state->start;
2429 		spin_unlock(&BTRFS_I(inode)->lock);
2430 	}
2431 }
2432 
2433 /*
2434  * Once a range is no longer delalloc this function ensures that proper
2435  * accounting happens.
2436  */
btrfs_clear_delalloc_extent(struct inode * vfs_inode,struct extent_state * state,unsigned * bits)2437 void btrfs_clear_delalloc_extent(struct inode *vfs_inode,
2438 				 struct extent_state *state, unsigned *bits)
2439 {
2440 	struct btrfs_inode *inode = BTRFS_I(vfs_inode);
2441 	struct btrfs_fs_info *fs_info = btrfs_sb(vfs_inode->i_sb);
2442 	u64 len = state->end + 1 - state->start;
2443 	u32 num_extents = count_max_extents(fs_info, len);
2444 
2445 	if ((state->state & EXTENT_DEFRAG) && (*bits & EXTENT_DEFRAG)) {
2446 		spin_lock(&inode->lock);
2447 		inode->defrag_bytes -= len;
2448 		spin_unlock(&inode->lock);
2449 	}
2450 
2451 	/*
2452 	 * set_bit and clear bit hooks normally require _irqsave/restore
2453 	 * but in this case, we are only testing for the DELALLOC
2454 	 * bit, which is only set or cleared with irqs on
2455 	 */
2456 	if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
2457 		struct btrfs_root *root = inode->root;
2458 		bool do_list = !btrfs_is_free_space_inode(inode);
2459 
2460 		spin_lock(&inode->lock);
2461 		btrfs_mod_outstanding_extents(inode, -num_extents);
2462 		spin_unlock(&inode->lock);
2463 
2464 		/*
2465 		 * We don't reserve metadata space for space cache inodes so we
2466 		 * don't need to call delalloc_release_metadata if there is an
2467 		 * error.
2468 		 */
2469 		if (*bits & EXTENT_CLEAR_META_RESV &&
2470 		    root != fs_info->tree_root)
2471 			btrfs_delalloc_release_metadata(inode, len, false);
2472 
2473 		/* For sanity tests. */
2474 		if (btrfs_is_testing(fs_info))
2475 			return;
2476 
2477 		if (!btrfs_is_data_reloc_root(root) &&
2478 		    do_list && !(state->state & EXTENT_NORESERVE) &&
2479 		    (*bits & EXTENT_CLEAR_DATA_RESV))
2480 			btrfs_free_reserved_data_space_noquota(fs_info, len);
2481 
2482 		percpu_counter_add_batch(&fs_info->delalloc_bytes, -len,
2483 					 fs_info->delalloc_batch);
2484 		spin_lock(&inode->lock);
2485 		inode->delalloc_bytes -= len;
2486 		if (do_list && inode->delalloc_bytes == 0 &&
2487 		    test_bit(BTRFS_INODE_IN_DELALLOC_LIST,
2488 					&inode->runtime_flags))
2489 			btrfs_del_delalloc_inode(root, inode);
2490 		spin_unlock(&inode->lock);
2491 	}
2492 
2493 	if ((state->state & EXTENT_DELALLOC_NEW) &&
2494 	    (*bits & EXTENT_DELALLOC_NEW)) {
2495 		spin_lock(&inode->lock);
2496 		ASSERT(inode->new_delalloc_bytes >= len);
2497 		inode->new_delalloc_bytes -= len;
2498 		if (*bits & EXTENT_ADD_INODE_BYTES)
2499 			inode_add_bytes(&inode->vfs_inode, len);
2500 		spin_unlock(&inode->lock);
2501 	}
2502 }
2503 
2504 /*
2505  * in order to insert checksums into the metadata in large chunks,
2506  * we wait until bio submission time.   All the pages in the bio are
2507  * checksummed and sums are attached onto the ordered extent record.
2508  *
2509  * At IO completion time the cums attached on the ordered extent record
2510  * are inserted into the btree
2511  */
btrfs_submit_bio_start(struct inode * inode,struct bio * bio,u64 dio_file_offset)2512 static blk_status_t btrfs_submit_bio_start(struct inode *inode, struct bio *bio,
2513 					   u64 dio_file_offset)
2514 {
2515 	return btrfs_csum_one_bio(BTRFS_I(inode), bio, (u64)-1, false);
2516 }
2517 
2518 /*
2519  * Split an extent_map at [start, start + len]
2520  *
2521  * This function is intended to be used only for extract_ordered_extent().
2522  */
split_zoned_em(struct btrfs_inode * inode,u64 start,u64 len,u64 pre,u64 post)2523 static int split_zoned_em(struct btrfs_inode *inode, u64 start, u64 len,
2524 			  u64 pre, u64 post)
2525 {
2526 	struct extent_map_tree *em_tree = &inode->extent_tree;
2527 	struct extent_map *em;
2528 	struct extent_map *split_pre = NULL;
2529 	struct extent_map *split_mid = NULL;
2530 	struct extent_map *split_post = NULL;
2531 	int ret = 0;
2532 	unsigned long flags;
2533 
2534 	/* Sanity check */
2535 	if (pre == 0 && post == 0)
2536 		return 0;
2537 
2538 	split_pre = alloc_extent_map();
2539 	if (pre)
2540 		split_mid = alloc_extent_map();
2541 	if (post)
2542 		split_post = alloc_extent_map();
2543 	if (!split_pre || (pre && !split_mid) || (post && !split_post)) {
2544 		ret = -ENOMEM;
2545 		goto out;
2546 	}
2547 
2548 	ASSERT(pre + post < len);
2549 
2550 	lock_extent(&inode->io_tree, start, start + len - 1);
2551 	write_lock(&em_tree->lock);
2552 	em = lookup_extent_mapping(em_tree, start, len);
2553 	if (!em) {
2554 		ret = -EIO;
2555 		goto out_unlock;
2556 	}
2557 
2558 	ASSERT(em->len == len);
2559 	ASSERT(!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags));
2560 	ASSERT(em->block_start < EXTENT_MAP_LAST_BYTE);
2561 	ASSERT(test_bit(EXTENT_FLAG_PINNED, &em->flags));
2562 	ASSERT(!test_bit(EXTENT_FLAG_LOGGING, &em->flags));
2563 	ASSERT(!list_empty(&em->list));
2564 
2565 	flags = em->flags;
2566 	clear_bit(EXTENT_FLAG_PINNED, &em->flags);
2567 
2568 	/* First, replace the em with a new extent_map starting from * em->start */
2569 	split_pre->start = em->start;
2570 	split_pre->len = (pre ? pre : em->len - post);
2571 	split_pre->orig_start = split_pre->start;
2572 	split_pre->block_start = em->block_start;
2573 	split_pre->block_len = split_pre->len;
2574 	split_pre->orig_block_len = split_pre->block_len;
2575 	split_pre->ram_bytes = split_pre->len;
2576 	split_pre->flags = flags;
2577 	split_pre->compress_type = em->compress_type;
2578 	split_pre->generation = em->generation;
2579 
2580 	replace_extent_mapping(em_tree, em, split_pre, 1);
2581 
2582 	/*
2583 	 * Now we only have an extent_map at:
2584 	 *     [em->start, em->start + pre] if pre != 0
2585 	 *     [em->start, em->start + em->len - post] if pre == 0
2586 	 */
2587 
2588 	if (pre) {
2589 		/* Insert the middle extent_map */
2590 		split_mid->start = em->start + pre;
2591 		split_mid->len = em->len - pre - post;
2592 		split_mid->orig_start = split_mid->start;
2593 		split_mid->block_start = em->block_start + pre;
2594 		split_mid->block_len = split_mid->len;
2595 		split_mid->orig_block_len = split_mid->block_len;
2596 		split_mid->ram_bytes = split_mid->len;
2597 		split_mid->flags = flags;
2598 		split_mid->compress_type = em->compress_type;
2599 		split_mid->generation = em->generation;
2600 		add_extent_mapping(em_tree, split_mid, 1);
2601 	}
2602 
2603 	if (post) {
2604 		split_post->start = em->start + em->len - post;
2605 		split_post->len = post;
2606 		split_post->orig_start = split_post->start;
2607 		split_post->block_start = em->block_start + em->len - post;
2608 		split_post->block_len = split_post->len;
2609 		split_post->orig_block_len = split_post->block_len;
2610 		split_post->ram_bytes = split_post->len;
2611 		split_post->flags = flags;
2612 		split_post->compress_type = em->compress_type;
2613 		split_post->generation = em->generation;
2614 		add_extent_mapping(em_tree, split_post, 1);
2615 	}
2616 
2617 	/* Once for us */
2618 	free_extent_map(em);
2619 	/* Once for the tree */
2620 	free_extent_map(em);
2621 
2622 out_unlock:
2623 	write_unlock(&em_tree->lock);
2624 	unlock_extent(&inode->io_tree, start, start + len - 1);
2625 out:
2626 	free_extent_map(split_pre);
2627 	free_extent_map(split_mid);
2628 	free_extent_map(split_post);
2629 
2630 	return ret;
2631 }
2632 
extract_ordered_extent(struct btrfs_inode * inode,struct bio * bio,loff_t file_offset)2633 static blk_status_t extract_ordered_extent(struct btrfs_inode *inode,
2634 					   struct bio *bio, loff_t file_offset)
2635 {
2636 	struct btrfs_ordered_extent *ordered;
2637 	u64 start = (u64)bio->bi_iter.bi_sector << SECTOR_SHIFT;
2638 	u64 file_len;
2639 	u64 len = bio->bi_iter.bi_size;
2640 	u64 end = start + len;
2641 	u64 ordered_end;
2642 	u64 pre, post;
2643 	int ret = 0;
2644 
2645 	ordered = btrfs_lookup_ordered_extent(inode, file_offset);
2646 	if (WARN_ON_ONCE(!ordered))
2647 		return BLK_STS_IOERR;
2648 
2649 	/* No need to split */
2650 	if (ordered->disk_num_bytes == len)
2651 		goto out;
2652 
2653 	/* We cannot split once end_bio'd ordered extent */
2654 	if (WARN_ON_ONCE(ordered->bytes_left != ordered->disk_num_bytes)) {
2655 		ret = -EINVAL;
2656 		goto out;
2657 	}
2658 
2659 	/* We cannot split a compressed ordered extent */
2660 	if (WARN_ON_ONCE(ordered->disk_num_bytes != ordered->num_bytes)) {
2661 		ret = -EINVAL;
2662 		goto out;
2663 	}
2664 
2665 	ordered_end = ordered->disk_bytenr + ordered->disk_num_bytes;
2666 	/* bio must be in one ordered extent */
2667 	if (WARN_ON_ONCE(start < ordered->disk_bytenr || end > ordered_end)) {
2668 		ret = -EINVAL;
2669 		goto out;
2670 	}
2671 
2672 	/* Checksum list should be empty */
2673 	if (WARN_ON_ONCE(!list_empty(&ordered->list))) {
2674 		ret = -EINVAL;
2675 		goto out;
2676 	}
2677 
2678 	file_len = ordered->num_bytes;
2679 	pre = start - ordered->disk_bytenr;
2680 	post = ordered_end - end;
2681 
2682 	ret = btrfs_split_ordered_extent(ordered, pre, post);
2683 	if (ret)
2684 		goto out;
2685 	ret = split_zoned_em(inode, file_offset, file_len, pre, post);
2686 
2687 out:
2688 	btrfs_put_ordered_extent(ordered);
2689 
2690 	return errno_to_blk_status(ret);
2691 }
2692 
2693 /*
2694  * extent_io.c submission hook. This does the right thing for csum calculation
2695  * on write, or reading the csums from the tree before a read.
2696  *
2697  * Rules about async/sync submit,
2698  * a) read:				sync submit
2699  *
2700  * b) write without checksum:		sync submit
2701  *
2702  * c) write with checksum:
2703  *    c-1) if bio is issued by fsync:	sync submit
2704  *         (sync_writers != 0)
2705  *
2706  *    c-2) if root is reloc root:	sync submit
2707  *         (only in case of buffered IO)
2708  *
2709  *    c-3) otherwise:			async submit
2710  */
btrfs_submit_data_bio(struct inode * inode,struct bio * bio,int mirror_num,enum btrfs_compression_type compress_type)2711 void btrfs_submit_data_bio(struct inode *inode, struct bio *bio,
2712 			   int mirror_num, enum btrfs_compression_type compress_type)
2713 {
2714 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2715 	struct btrfs_root *root = BTRFS_I(inode)->root;
2716 	enum btrfs_wq_endio_type metadata = BTRFS_WQ_ENDIO_DATA;
2717 	blk_status_t ret = 0;
2718 	int skip_sum;
2719 	int async = !atomic_read(&BTRFS_I(inode)->sync_writers);
2720 
2721 	skip_sum = (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM) ||
2722 		test_bit(BTRFS_FS_STATE_NO_CSUMS, &fs_info->fs_state);
2723 
2724 	if (btrfs_is_free_space_inode(BTRFS_I(inode)))
2725 		metadata = BTRFS_WQ_ENDIO_FREE_SPACE;
2726 
2727 	if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
2728 		struct page *page = bio_first_bvec_all(bio)->bv_page;
2729 		loff_t file_offset = page_offset(page);
2730 
2731 		ret = extract_ordered_extent(BTRFS_I(inode), bio, file_offset);
2732 		if (ret)
2733 			goto out;
2734 	}
2735 
2736 	if (btrfs_op(bio) != BTRFS_MAP_WRITE) {
2737 		ret = btrfs_bio_wq_end_io(fs_info, bio, metadata);
2738 		if (ret)
2739 			goto out;
2740 
2741 		if (compress_type != BTRFS_COMPRESS_NONE) {
2742 			/*
2743 			 * btrfs_submit_compressed_read will handle completing
2744 			 * the bio if there were any errors, so just return
2745 			 * here.
2746 			 */
2747 			btrfs_submit_compressed_read(inode, bio, mirror_num);
2748 			return;
2749 		} else {
2750 			/*
2751 			 * Lookup bio sums does extra checks around whether we
2752 			 * need to csum or not, which is why we ignore skip_sum
2753 			 * here.
2754 			 */
2755 			ret = btrfs_lookup_bio_sums(inode, bio, NULL);
2756 			if (ret)
2757 				goto out;
2758 		}
2759 		goto mapit;
2760 	} else if (async && !skip_sum) {
2761 		/* csum items have already been cloned */
2762 		if (btrfs_is_data_reloc_root(root))
2763 			goto mapit;
2764 		/* we're doing a write, do the async checksumming */
2765 		ret = btrfs_wq_submit_bio(inode, bio, mirror_num,
2766 					  0, btrfs_submit_bio_start);
2767 		goto out;
2768 	} else if (!skip_sum) {
2769 		ret = btrfs_csum_one_bio(BTRFS_I(inode), bio, (u64)-1, false);
2770 		if (ret)
2771 			goto out;
2772 	}
2773 
2774 mapit:
2775 	ret = btrfs_map_bio(fs_info, bio, mirror_num);
2776 
2777 out:
2778 	if (ret) {
2779 		bio->bi_status = ret;
2780 		bio_endio(bio);
2781 	}
2782 }
2783 
2784 /*
2785  * given a list of ordered sums record them in the inode.  This happens
2786  * at IO completion time based on sums calculated at bio submission time.
2787  */
add_pending_csums(struct btrfs_trans_handle * trans,struct list_head * list)2788 static int add_pending_csums(struct btrfs_trans_handle *trans,
2789 			     struct list_head *list)
2790 {
2791 	struct btrfs_ordered_sum *sum;
2792 	struct btrfs_root *csum_root = NULL;
2793 	int ret;
2794 
2795 	list_for_each_entry(sum, list, list) {
2796 		trans->adding_csums = true;
2797 		if (!csum_root)
2798 			csum_root = btrfs_csum_root(trans->fs_info,
2799 						    sum->bytenr);
2800 		ret = btrfs_csum_file_blocks(trans, csum_root, sum);
2801 		trans->adding_csums = false;
2802 		if (ret)
2803 			return ret;
2804 	}
2805 	return 0;
2806 }
2807 
btrfs_find_new_delalloc_bytes(struct btrfs_inode * inode,const u64 start,const u64 len,struct extent_state ** cached_state)2808 static int btrfs_find_new_delalloc_bytes(struct btrfs_inode *inode,
2809 					 const u64 start,
2810 					 const u64 len,
2811 					 struct extent_state **cached_state)
2812 {
2813 	u64 search_start = start;
2814 	const u64 end = start + len - 1;
2815 
2816 	while (search_start < end) {
2817 		const u64 search_len = end - search_start + 1;
2818 		struct extent_map *em;
2819 		u64 em_len;
2820 		int ret = 0;
2821 
2822 		em = btrfs_get_extent(inode, NULL, 0, search_start, search_len);
2823 		if (IS_ERR(em))
2824 			return PTR_ERR(em);
2825 
2826 		if (em->block_start != EXTENT_MAP_HOLE)
2827 			goto next;
2828 
2829 		em_len = em->len;
2830 		if (em->start < search_start)
2831 			em_len -= search_start - em->start;
2832 		if (em_len > search_len)
2833 			em_len = search_len;
2834 
2835 		ret = set_extent_bit(&inode->io_tree, search_start,
2836 				     search_start + em_len - 1,
2837 				     EXTENT_DELALLOC_NEW, 0, NULL, cached_state,
2838 				     GFP_NOFS, NULL);
2839 next:
2840 		search_start = extent_map_end(em);
2841 		free_extent_map(em);
2842 		if (ret)
2843 			return ret;
2844 	}
2845 	return 0;
2846 }
2847 
btrfs_set_extent_delalloc(struct btrfs_inode * inode,u64 start,u64 end,unsigned int extra_bits,struct extent_state ** cached_state)2848 int btrfs_set_extent_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
2849 			      unsigned int extra_bits,
2850 			      struct extent_state **cached_state)
2851 {
2852 	WARN_ON(PAGE_ALIGNED(end));
2853 
2854 	if (start >= i_size_read(&inode->vfs_inode) &&
2855 	    !(inode->flags & BTRFS_INODE_PREALLOC)) {
2856 		/*
2857 		 * There can't be any extents following eof in this case so just
2858 		 * set the delalloc new bit for the range directly.
2859 		 */
2860 		extra_bits |= EXTENT_DELALLOC_NEW;
2861 	} else {
2862 		int ret;
2863 
2864 		ret = btrfs_find_new_delalloc_bytes(inode, start,
2865 						    end + 1 - start,
2866 						    cached_state);
2867 		if (ret)
2868 			return ret;
2869 	}
2870 
2871 	return set_extent_delalloc(&inode->io_tree, start, end, extra_bits,
2872 				   cached_state);
2873 }
2874 
2875 /* see btrfs_writepage_start_hook for details on why this is required */
2876 struct btrfs_writepage_fixup {
2877 	struct page *page;
2878 	struct inode *inode;
2879 	struct btrfs_work work;
2880 };
2881 
btrfs_writepage_fixup_worker(struct btrfs_work * work)2882 static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
2883 {
2884 	struct btrfs_writepage_fixup *fixup;
2885 	struct btrfs_ordered_extent *ordered;
2886 	struct extent_state *cached_state = NULL;
2887 	struct extent_changeset *data_reserved = NULL;
2888 	struct page *page;
2889 	struct btrfs_inode *inode;
2890 	u64 page_start;
2891 	u64 page_end;
2892 	int ret = 0;
2893 	bool free_delalloc_space = true;
2894 
2895 	fixup = container_of(work, struct btrfs_writepage_fixup, work);
2896 	page = fixup->page;
2897 	inode = BTRFS_I(fixup->inode);
2898 	page_start = page_offset(page);
2899 	page_end = page_offset(page) + PAGE_SIZE - 1;
2900 
2901 	/*
2902 	 * This is similar to page_mkwrite, we need to reserve the space before
2903 	 * we take the page lock.
2904 	 */
2905 	ret = btrfs_delalloc_reserve_space(inode, &data_reserved, page_start,
2906 					   PAGE_SIZE);
2907 again:
2908 	lock_page(page);
2909 
2910 	/*
2911 	 * Before we queued this fixup, we took a reference on the page.
2912 	 * page->mapping may go NULL, but it shouldn't be moved to a different
2913 	 * address space.
2914 	 */
2915 	if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
2916 		/*
2917 		 * Unfortunately this is a little tricky, either
2918 		 *
2919 		 * 1) We got here and our page had already been dealt with and
2920 		 *    we reserved our space, thus ret == 0, so we need to just
2921 		 *    drop our space reservation and bail.  This can happen the
2922 		 *    first time we come into the fixup worker, or could happen
2923 		 *    while waiting for the ordered extent.
2924 		 * 2) Our page was already dealt with, but we happened to get an
2925 		 *    ENOSPC above from the btrfs_delalloc_reserve_space.  In
2926 		 *    this case we obviously don't have anything to release, but
2927 		 *    because the page was already dealt with we don't want to
2928 		 *    mark the page with an error, so make sure we're resetting
2929 		 *    ret to 0.  This is why we have this check _before_ the ret
2930 		 *    check, because we do not want to have a surprise ENOSPC
2931 		 *    when the page was already properly dealt with.
2932 		 */
2933 		if (!ret) {
2934 			btrfs_delalloc_release_extents(inode, PAGE_SIZE);
2935 			btrfs_delalloc_release_space(inode, data_reserved,
2936 						     page_start, PAGE_SIZE,
2937 						     true);
2938 		}
2939 		ret = 0;
2940 		goto out_page;
2941 	}
2942 
2943 	/*
2944 	 * We can't mess with the page state unless it is locked, so now that
2945 	 * it is locked bail if we failed to make our space reservation.
2946 	 */
2947 	if (ret)
2948 		goto out_page;
2949 
2950 	lock_extent_bits(&inode->io_tree, page_start, page_end, &cached_state);
2951 
2952 	/* already ordered? We're done */
2953 	if (PageOrdered(page))
2954 		goto out_reserved;
2955 
2956 	ordered = btrfs_lookup_ordered_range(inode, page_start, PAGE_SIZE);
2957 	if (ordered) {
2958 		unlock_extent_cached(&inode->io_tree, page_start, page_end,
2959 				     &cached_state);
2960 		unlock_page(page);
2961 		btrfs_start_ordered_extent(ordered, 1);
2962 		btrfs_put_ordered_extent(ordered);
2963 		goto again;
2964 	}
2965 
2966 	ret = btrfs_set_extent_delalloc(inode, page_start, page_end, 0,
2967 					&cached_state);
2968 	if (ret)
2969 		goto out_reserved;
2970 
2971 	/*
2972 	 * Everything went as planned, we're now the owner of a dirty page with
2973 	 * delayed allocation bits set and space reserved for our COW
2974 	 * destination.
2975 	 *
2976 	 * The page was dirty when we started, nothing should have cleaned it.
2977 	 */
2978 	BUG_ON(!PageDirty(page));
2979 	free_delalloc_space = false;
2980 out_reserved:
2981 	btrfs_delalloc_release_extents(inode, PAGE_SIZE);
2982 	if (free_delalloc_space)
2983 		btrfs_delalloc_release_space(inode, data_reserved, page_start,
2984 					     PAGE_SIZE, true);
2985 	unlock_extent_cached(&inode->io_tree, page_start, page_end,
2986 			     &cached_state);
2987 out_page:
2988 	if (ret) {
2989 		/*
2990 		 * We hit ENOSPC or other errors.  Update the mapping and page
2991 		 * to reflect the errors and clean the page.
2992 		 */
2993 		mapping_set_error(page->mapping, ret);
2994 		end_extent_writepage(page, ret, page_start, page_end);
2995 		clear_page_dirty_for_io(page);
2996 		SetPageError(page);
2997 	}
2998 	btrfs_page_clear_checked(inode->root->fs_info, page, page_start, PAGE_SIZE);
2999 	unlock_page(page);
3000 	put_page(page);
3001 	kfree(fixup);
3002 	extent_changeset_free(data_reserved);
3003 	/*
3004 	 * As a precaution, do a delayed iput in case it would be the last iput
3005 	 * that could need flushing space. Recursing back to fixup worker would
3006 	 * deadlock.
3007 	 */
3008 	btrfs_add_delayed_iput(&inode->vfs_inode);
3009 }
3010 
3011 /*
3012  * There are a few paths in the higher layers of the kernel that directly
3013  * set the page dirty bit without asking the filesystem if it is a
3014  * good idea.  This causes problems because we want to make sure COW
3015  * properly happens and the data=ordered rules are followed.
3016  *
3017  * In our case any range that doesn't have the ORDERED bit set
3018  * hasn't been properly setup for IO.  We kick off an async process
3019  * to fix it up.  The async helper will wait for ordered extents, set
3020  * the delalloc bit and make it safe to write the page.
3021  */
btrfs_writepage_cow_fixup(struct page * page)3022 int btrfs_writepage_cow_fixup(struct page *page)
3023 {
3024 	struct inode *inode = page->mapping->host;
3025 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3026 	struct btrfs_writepage_fixup *fixup;
3027 
3028 	/* This page has ordered extent covering it already */
3029 	if (PageOrdered(page))
3030 		return 0;
3031 
3032 	/*
3033 	 * PageChecked is set below when we create a fixup worker for this page,
3034 	 * don't try to create another one if we're already PageChecked()
3035 	 *
3036 	 * The extent_io writepage code will redirty the page if we send back
3037 	 * EAGAIN.
3038 	 */
3039 	if (PageChecked(page))
3040 		return -EAGAIN;
3041 
3042 	fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
3043 	if (!fixup)
3044 		return -EAGAIN;
3045 
3046 	/*
3047 	 * We are already holding a reference to this inode from
3048 	 * write_cache_pages.  We need to hold it because the space reservation
3049 	 * takes place outside of the page lock, and we can't trust
3050 	 * page->mapping outside of the page lock.
3051 	 */
3052 	ihold(inode);
3053 	btrfs_page_set_checked(fs_info, page, page_offset(page), PAGE_SIZE);
3054 	get_page(page);
3055 	btrfs_init_work(&fixup->work, btrfs_writepage_fixup_worker, NULL, NULL);
3056 	fixup->page = page;
3057 	fixup->inode = inode;
3058 	btrfs_queue_work(fs_info->fixup_workers, &fixup->work);
3059 
3060 	return -EAGAIN;
3061 }
3062 
insert_reserved_file_extent(struct btrfs_trans_handle * trans,struct btrfs_inode * inode,u64 file_pos,struct btrfs_file_extent_item * stack_fi,const bool update_inode_bytes,u64 qgroup_reserved)3063 static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
3064 				       struct btrfs_inode *inode, u64 file_pos,
3065 				       struct btrfs_file_extent_item *stack_fi,
3066 				       const bool update_inode_bytes,
3067 				       u64 qgroup_reserved)
3068 {
3069 	struct btrfs_root *root = inode->root;
3070 	const u64 sectorsize = root->fs_info->sectorsize;
3071 	struct btrfs_path *path;
3072 	struct extent_buffer *leaf;
3073 	struct btrfs_key ins;
3074 	u64 disk_num_bytes = btrfs_stack_file_extent_disk_num_bytes(stack_fi);
3075 	u64 disk_bytenr = btrfs_stack_file_extent_disk_bytenr(stack_fi);
3076 	u64 offset = btrfs_stack_file_extent_offset(stack_fi);
3077 	u64 num_bytes = btrfs_stack_file_extent_num_bytes(stack_fi);
3078 	u64 ram_bytes = btrfs_stack_file_extent_ram_bytes(stack_fi);
3079 	struct btrfs_drop_extents_args drop_args = { 0 };
3080 	int ret;
3081 
3082 	path = btrfs_alloc_path();
3083 	if (!path)
3084 		return -ENOMEM;
3085 
3086 	/*
3087 	 * we may be replacing one extent in the tree with another.
3088 	 * The new extent is pinned in the extent map, and we don't want
3089 	 * to drop it from the cache until it is completely in the btree.
3090 	 *
3091 	 * So, tell btrfs_drop_extents to leave this extent in the cache.
3092 	 * the caller is expected to unpin it and allow it to be merged
3093 	 * with the others.
3094 	 */
3095 	drop_args.path = path;
3096 	drop_args.start = file_pos;
3097 	drop_args.end = file_pos + num_bytes;
3098 	drop_args.replace_extent = true;
3099 	drop_args.extent_item_size = sizeof(*stack_fi);
3100 	ret = btrfs_drop_extents(trans, root, inode, &drop_args);
3101 	if (ret)
3102 		goto out;
3103 
3104 	if (!drop_args.extent_inserted) {
3105 		ins.objectid = btrfs_ino(inode);
3106 		ins.offset = file_pos;
3107 		ins.type = BTRFS_EXTENT_DATA_KEY;
3108 
3109 		ret = btrfs_insert_empty_item(trans, root, path, &ins,
3110 					      sizeof(*stack_fi));
3111 		if (ret)
3112 			goto out;
3113 	}
3114 	leaf = path->nodes[0];
3115 	btrfs_set_stack_file_extent_generation(stack_fi, trans->transid);
3116 	write_extent_buffer(leaf, stack_fi,
3117 			btrfs_item_ptr_offset(leaf, path->slots[0]),
3118 			sizeof(struct btrfs_file_extent_item));
3119 
3120 	btrfs_mark_buffer_dirty(leaf);
3121 	btrfs_release_path(path);
3122 
3123 	/*
3124 	 * If we dropped an inline extent here, we know the range where it is
3125 	 * was not marked with the EXTENT_DELALLOC_NEW bit, so we update the
3126 	 * number of bytes only for that range containing the inline extent.
3127 	 * The remaining of the range will be processed when clearning the
3128 	 * EXTENT_DELALLOC_BIT bit through the ordered extent completion.
3129 	 */
3130 	if (file_pos == 0 && !IS_ALIGNED(drop_args.bytes_found, sectorsize)) {
3131 		u64 inline_size = round_down(drop_args.bytes_found, sectorsize);
3132 
3133 		inline_size = drop_args.bytes_found - inline_size;
3134 		btrfs_update_inode_bytes(inode, sectorsize, inline_size);
3135 		drop_args.bytes_found -= inline_size;
3136 		num_bytes -= sectorsize;
3137 	}
3138 
3139 	if (update_inode_bytes)
3140 		btrfs_update_inode_bytes(inode, num_bytes, drop_args.bytes_found);
3141 
3142 	ins.objectid = disk_bytenr;
3143 	ins.offset = disk_num_bytes;
3144 	ins.type = BTRFS_EXTENT_ITEM_KEY;
3145 
3146 	ret = btrfs_inode_set_file_extent_range(inode, file_pos, ram_bytes);
3147 	if (ret)
3148 		goto out;
3149 
3150 	ret = btrfs_alloc_reserved_file_extent(trans, root, btrfs_ino(inode),
3151 					       file_pos - offset,
3152 					       qgroup_reserved, &ins);
3153 out:
3154 	btrfs_free_path(path);
3155 
3156 	return ret;
3157 }
3158 
btrfs_release_delalloc_bytes(struct btrfs_fs_info * fs_info,u64 start,u64 len)3159 static void btrfs_release_delalloc_bytes(struct btrfs_fs_info *fs_info,
3160 					 u64 start, u64 len)
3161 {
3162 	struct btrfs_block_group *cache;
3163 
3164 	cache = btrfs_lookup_block_group(fs_info, start);
3165 	ASSERT(cache);
3166 
3167 	spin_lock(&cache->lock);
3168 	cache->delalloc_bytes -= len;
3169 	spin_unlock(&cache->lock);
3170 
3171 	btrfs_put_block_group(cache);
3172 }
3173 
insert_ordered_extent_file_extent(struct btrfs_trans_handle * trans,struct btrfs_ordered_extent * oe)3174 static int insert_ordered_extent_file_extent(struct btrfs_trans_handle *trans,
3175 					     struct btrfs_ordered_extent *oe)
3176 {
3177 	struct btrfs_file_extent_item stack_fi;
3178 	bool update_inode_bytes;
3179 	u64 num_bytes = oe->num_bytes;
3180 	u64 ram_bytes = oe->ram_bytes;
3181 
3182 	memset(&stack_fi, 0, sizeof(stack_fi));
3183 	btrfs_set_stack_file_extent_type(&stack_fi, BTRFS_FILE_EXTENT_REG);
3184 	btrfs_set_stack_file_extent_disk_bytenr(&stack_fi, oe->disk_bytenr);
3185 	btrfs_set_stack_file_extent_disk_num_bytes(&stack_fi,
3186 						   oe->disk_num_bytes);
3187 	btrfs_set_stack_file_extent_offset(&stack_fi, oe->offset);
3188 	if (test_bit(BTRFS_ORDERED_TRUNCATED, &oe->flags))
3189 		num_bytes = ram_bytes = oe->truncated_len;
3190 	btrfs_set_stack_file_extent_num_bytes(&stack_fi, num_bytes);
3191 	btrfs_set_stack_file_extent_ram_bytes(&stack_fi, ram_bytes);
3192 	btrfs_set_stack_file_extent_compression(&stack_fi, oe->compress_type);
3193 	/* Encryption and other encoding is reserved and all 0 */
3194 
3195 	/*
3196 	 * For delalloc, when completing an ordered extent we update the inode's
3197 	 * bytes when clearing the range in the inode's io tree, so pass false
3198 	 * as the argument 'update_inode_bytes' to insert_reserved_file_extent(),
3199 	 * except if the ordered extent was truncated.
3200 	 */
3201 	update_inode_bytes = test_bit(BTRFS_ORDERED_DIRECT, &oe->flags) ||
3202 			     test_bit(BTRFS_ORDERED_ENCODED, &oe->flags) ||
3203 			     test_bit(BTRFS_ORDERED_TRUNCATED, &oe->flags);
3204 
3205 	return insert_reserved_file_extent(trans, BTRFS_I(oe->inode),
3206 					   oe->file_offset, &stack_fi,
3207 					   update_inode_bytes, oe->qgroup_rsv);
3208 }
3209 
3210 /*
3211  * As ordered data IO finishes, this gets called so we can finish
3212  * an ordered extent if the range of bytes in the file it covers are
3213  * fully written.
3214  */
btrfs_finish_ordered_io(struct btrfs_ordered_extent * ordered_extent)3215 static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
3216 {
3217 	struct btrfs_inode *inode = BTRFS_I(ordered_extent->inode);
3218 	struct btrfs_root *root = inode->root;
3219 	struct btrfs_fs_info *fs_info = root->fs_info;
3220 	struct btrfs_trans_handle *trans = NULL;
3221 	struct extent_io_tree *io_tree = &inode->io_tree;
3222 	struct extent_state *cached_state = NULL;
3223 	u64 start, end;
3224 	int compress_type = 0;
3225 	int ret = 0;
3226 	u64 logical_len = ordered_extent->num_bytes;
3227 	bool freespace_inode;
3228 	bool truncated = false;
3229 	bool clear_reserved_extent = true;
3230 	unsigned int clear_bits = EXTENT_DEFRAG;
3231 
3232 	start = ordered_extent->file_offset;
3233 	end = start + ordered_extent->num_bytes - 1;
3234 
3235 	if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
3236 	    !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags) &&
3237 	    !test_bit(BTRFS_ORDERED_DIRECT, &ordered_extent->flags) &&
3238 	    !test_bit(BTRFS_ORDERED_ENCODED, &ordered_extent->flags))
3239 		clear_bits |= EXTENT_DELALLOC_NEW;
3240 
3241 	freespace_inode = btrfs_is_free_space_inode(inode);
3242 
3243 	if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) {
3244 		ret = -EIO;
3245 		goto out;
3246 	}
3247 
3248 	/* A valid bdev implies a write on a sequential zone */
3249 	if (ordered_extent->bdev) {
3250 		btrfs_rewrite_logical_zoned(ordered_extent);
3251 		btrfs_zone_finish_endio(fs_info, ordered_extent->disk_bytenr,
3252 					ordered_extent->disk_num_bytes);
3253 	}
3254 
3255 	btrfs_free_io_failure_record(inode, start, end);
3256 
3257 	if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) {
3258 		truncated = true;
3259 		logical_len = ordered_extent->truncated_len;
3260 		/* Truncated the entire extent, don't bother adding */
3261 		if (!logical_len)
3262 			goto out;
3263 	}
3264 
3265 	if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
3266 		BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */
3267 
3268 		btrfs_inode_safe_disk_i_size_write(inode, 0);
3269 		if (freespace_inode)
3270 			trans = btrfs_join_transaction_spacecache(root);
3271 		else
3272 			trans = btrfs_join_transaction(root);
3273 		if (IS_ERR(trans)) {
3274 			ret = PTR_ERR(trans);
3275 			trans = NULL;
3276 			goto out;
3277 		}
3278 		trans->block_rsv = &inode->block_rsv;
3279 		ret = btrfs_update_inode_fallback(trans, root, inode);
3280 		if (ret) /* -ENOMEM or corruption */
3281 			btrfs_abort_transaction(trans, ret);
3282 		goto out;
3283 	}
3284 
3285 	clear_bits |= EXTENT_LOCKED;
3286 	lock_extent_bits(io_tree, start, end, &cached_state);
3287 
3288 	if (freespace_inode)
3289 		trans = btrfs_join_transaction_spacecache(root);
3290 	else
3291 		trans = btrfs_join_transaction(root);
3292 	if (IS_ERR(trans)) {
3293 		ret = PTR_ERR(trans);
3294 		trans = NULL;
3295 		goto out;
3296 	}
3297 
3298 	trans->block_rsv = &inode->block_rsv;
3299 
3300 	if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
3301 		compress_type = ordered_extent->compress_type;
3302 	if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
3303 		BUG_ON(compress_type);
3304 		ret = btrfs_mark_extent_written(trans, inode,
3305 						ordered_extent->file_offset,
3306 						ordered_extent->file_offset +
3307 						logical_len);
3308 		btrfs_zoned_release_data_reloc_bg(fs_info, ordered_extent->disk_bytenr,
3309 						  ordered_extent->disk_num_bytes);
3310 	} else {
3311 		BUG_ON(root == fs_info->tree_root);
3312 		ret = insert_ordered_extent_file_extent(trans, ordered_extent);
3313 		if (!ret) {
3314 			clear_reserved_extent = false;
3315 			btrfs_release_delalloc_bytes(fs_info,
3316 						ordered_extent->disk_bytenr,
3317 						ordered_extent->disk_num_bytes);
3318 		}
3319 	}
3320 	unpin_extent_cache(&inode->extent_tree, ordered_extent->file_offset,
3321 			   ordered_extent->num_bytes, trans->transid);
3322 	if (ret < 0) {
3323 		btrfs_abort_transaction(trans, ret);
3324 		goto out;
3325 	}
3326 
3327 	ret = add_pending_csums(trans, &ordered_extent->list);
3328 	if (ret) {
3329 		btrfs_abort_transaction(trans, ret);
3330 		goto out;
3331 	}
3332 
3333 	/*
3334 	 * If this is a new delalloc range, clear its new delalloc flag to
3335 	 * update the inode's number of bytes. This needs to be done first
3336 	 * before updating the inode item.
3337 	 */
3338 	if ((clear_bits & EXTENT_DELALLOC_NEW) &&
3339 	    !test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags))
3340 		clear_extent_bit(&inode->io_tree, start, end,
3341 				 EXTENT_DELALLOC_NEW | EXTENT_ADD_INODE_BYTES,
3342 				 0, 0, &cached_state);
3343 
3344 	btrfs_inode_safe_disk_i_size_write(inode, 0);
3345 	ret = btrfs_update_inode_fallback(trans, root, inode);
3346 	if (ret) { /* -ENOMEM or corruption */
3347 		btrfs_abort_transaction(trans, ret);
3348 		goto out;
3349 	}
3350 	ret = 0;
3351 out:
3352 	clear_extent_bit(&inode->io_tree, start, end, clear_bits,
3353 			 (clear_bits & EXTENT_LOCKED) ? 1 : 0, 0,
3354 			 &cached_state);
3355 
3356 	if (trans)
3357 		btrfs_end_transaction(trans);
3358 
3359 	if (ret || truncated) {
3360 		u64 unwritten_start = start;
3361 
3362 		/*
3363 		 * If we failed to finish this ordered extent for any reason we
3364 		 * need to make sure BTRFS_ORDERED_IOERR is set on the ordered
3365 		 * extent, and mark the inode with the error if it wasn't
3366 		 * already set.  Any error during writeback would have already
3367 		 * set the mapping error, so we need to set it if we're the ones
3368 		 * marking this ordered extent as failed.
3369 		 */
3370 		if (ret && !test_and_set_bit(BTRFS_ORDERED_IOERR,
3371 					     &ordered_extent->flags))
3372 			mapping_set_error(ordered_extent->inode->i_mapping, -EIO);
3373 
3374 		if (truncated)
3375 			unwritten_start += logical_len;
3376 		clear_extent_uptodate(io_tree, unwritten_start, end, NULL);
3377 
3378 		/* Drop the cache for the part of the extent we didn't write. */
3379 		btrfs_drop_extent_cache(inode, unwritten_start, end, 0);
3380 
3381 		/*
3382 		 * If the ordered extent had an IOERR or something else went
3383 		 * wrong we need to return the space for this ordered extent
3384 		 * back to the allocator.  We only free the extent in the
3385 		 * truncated case if we didn't write out the extent at all.
3386 		 *
3387 		 * If we made it past insert_reserved_file_extent before we
3388 		 * errored out then we don't need to do this as the accounting
3389 		 * has already been done.
3390 		 */
3391 		if ((ret || !logical_len) &&
3392 		    clear_reserved_extent &&
3393 		    !test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
3394 		    !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
3395 			/*
3396 			 * Discard the range before returning it back to the
3397 			 * free space pool
3398 			 */
3399 			if (ret && btrfs_test_opt(fs_info, DISCARD_SYNC))
3400 				btrfs_discard_extent(fs_info,
3401 						ordered_extent->disk_bytenr,
3402 						ordered_extent->disk_num_bytes,
3403 						NULL);
3404 			btrfs_free_reserved_extent(fs_info,
3405 					ordered_extent->disk_bytenr,
3406 					ordered_extent->disk_num_bytes, 1);
3407 		}
3408 	}
3409 
3410 	/*
3411 	 * This needs to be done to make sure anybody waiting knows we are done
3412 	 * updating everything for this ordered extent.
3413 	 */
3414 	btrfs_remove_ordered_extent(inode, ordered_extent);
3415 
3416 	/* once for us */
3417 	btrfs_put_ordered_extent(ordered_extent);
3418 	/* once for the tree */
3419 	btrfs_put_ordered_extent(ordered_extent);
3420 
3421 	return ret;
3422 }
3423 
finish_ordered_fn(struct btrfs_work * work)3424 static void finish_ordered_fn(struct btrfs_work *work)
3425 {
3426 	struct btrfs_ordered_extent *ordered_extent;
3427 	ordered_extent = container_of(work, struct btrfs_ordered_extent, work);
3428 	btrfs_finish_ordered_io(ordered_extent);
3429 }
3430 
btrfs_writepage_endio_finish_ordered(struct btrfs_inode * inode,struct page * page,u64 start,u64 end,bool uptodate)3431 void btrfs_writepage_endio_finish_ordered(struct btrfs_inode *inode,
3432 					  struct page *page, u64 start,
3433 					  u64 end, bool uptodate)
3434 {
3435 	trace_btrfs_writepage_end_io_hook(inode, start, end, uptodate);
3436 
3437 	btrfs_mark_ordered_io_finished(inode, page, start, end + 1 - start,
3438 				       finish_ordered_fn, uptodate);
3439 }
3440 
3441 /*
3442  * check_data_csum - verify checksum of one sector of uncompressed data
3443  * @inode:	inode
3444  * @io_bio:	btrfs_io_bio which contains the csum
3445  * @bio_offset:	offset to the beginning of the bio (in bytes)
3446  * @page:	page where is the data to be verified
3447  * @pgoff:	offset inside the page
3448  * @start:	logical offset in the file
3449  *
3450  * The length of such check is always one sector size.
3451  */
check_data_csum(struct inode * inode,struct btrfs_bio * bbio,u32 bio_offset,struct page * page,u32 pgoff,u64 start)3452 static int check_data_csum(struct inode *inode, struct btrfs_bio *bbio,
3453 			   u32 bio_offset, struct page *page, u32 pgoff,
3454 			   u64 start)
3455 {
3456 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3457 	SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
3458 	char *kaddr;
3459 	u32 len = fs_info->sectorsize;
3460 	const u32 csum_size = fs_info->csum_size;
3461 	unsigned int offset_sectors;
3462 	u8 *csum_expected;
3463 	u8 csum[BTRFS_CSUM_SIZE];
3464 
3465 	ASSERT(pgoff + len <= PAGE_SIZE);
3466 
3467 	offset_sectors = bio_offset >> fs_info->sectorsize_bits;
3468 	csum_expected = ((u8 *)bbio->csum) + offset_sectors * csum_size;
3469 
3470 	kaddr = kmap_atomic(page);
3471 	shash->tfm = fs_info->csum_shash;
3472 
3473 	crypto_shash_digest(shash, kaddr + pgoff, len, csum);
3474 	kunmap_atomic(kaddr);
3475 
3476 	if (memcmp(csum, csum_expected, csum_size))
3477 		goto zeroit;
3478 
3479 	return 0;
3480 zeroit:
3481 	btrfs_print_data_csum_error(BTRFS_I(inode), start, csum, csum_expected,
3482 				    bbio->mirror_num);
3483 	if (bbio->device)
3484 		btrfs_dev_stat_inc_and_print(bbio->device,
3485 					     BTRFS_DEV_STAT_CORRUPTION_ERRS);
3486 	memzero_page(page, pgoff, len);
3487 	return -EIO;
3488 }
3489 
3490 /*
3491  * When reads are done, we need to check csums to verify the data is correct.
3492  * if there's a match, we allow the bio to finish.  If not, the code in
3493  * extent_io.c will try to find good copies for us.
3494  *
3495  * @bio_offset:	offset to the beginning of the bio (in bytes)
3496  * @start:	file offset of the range start
3497  * @end:	file offset of the range end (inclusive)
3498  *
3499  * Return a bitmap where bit set means a csum mismatch, and bit not set means
3500  * csum match.
3501  */
btrfs_verify_data_csum(struct btrfs_bio * bbio,u32 bio_offset,struct page * page,u64 start,u64 end)3502 unsigned int btrfs_verify_data_csum(struct btrfs_bio *bbio,
3503 				    u32 bio_offset, struct page *page,
3504 				    u64 start, u64 end)
3505 {
3506 	struct inode *inode = page->mapping->host;
3507 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3508 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3509 	struct btrfs_root *root = BTRFS_I(inode)->root;
3510 	const u32 sectorsize = root->fs_info->sectorsize;
3511 	u32 pg_off;
3512 	unsigned int result = 0;
3513 
3514 	if (btrfs_page_test_checked(fs_info, page, start, end + 1 - start)) {
3515 		btrfs_page_clear_checked(fs_info, page, start, end + 1 - start);
3516 		return 0;
3517 	}
3518 
3519 	/*
3520 	 * This only happens for NODATASUM or compressed read.
3521 	 * Normally this should be covered by above check for compressed read
3522 	 * or the next check for NODATASUM.  Just do a quicker exit here.
3523 	 */
3524 	if (bbio->csum == NULL)
3525 		return 0;
3526 
3527 	if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
3528 		return 0;
3529 
3530 	if (unlikely(test_bit(BTRFS_FS_STATE_NO_CSUMS, &fs_info->fs_state)))
3531 		return 0;
3532 
3533 	ASSERT(page_offset(page) <= start &&
3534 	       end <= page_offset(page) + PAGE_SIZE - 1);
3535 	for (pg_off = offset_in_page(start);
3536 	     pg_off < offset_in_page(end);
3537 	     pg_off += sectorsize, bio_offset += sectorsize) {
3538 		u64 file_offset = pg_off + page_offset(page);
3539 		int ret;
3540 
3541 		if (btrfs_is_data_reloc_root(root) &&
3542 		    test_range_bit(io_tree, file_offset,
3543 				   file_offset + sectorsize - 1,
3544 				   EXTENT_NODATASUM, 1, NULL)) {
3545 			/* Skip the range without csum for data reloc inode */
3546 			clear_extent_bits(io_tree, file_offset,
3547 					  file_offset + sectorsize - 1,
3548 					  EXTENT_NODATASUM);
3549 			continue;
3550 		}
3551 		ret = check_data_csum(inode, bbio, bio_offset, page, pg_off,
3552 				      page_offset(page) + pg_off);
3553 		if (ret < 0) {
3554 			const int nr_bit = (pg_off - offset_in_page(start)) >>
3555 				     root->fs_info->sectorsize_bits;
3556 
3557 			result |= (1U << nr_bit);
3558 		}
3559 	}
3560 	return result;
3561 }
3562 
3563 /*
3564  * btrfs_add_delayed_iput - perform a delayed iput on @inode
3565  *
3566  * @inode: The inode we want to perform iput on
3567  *
3568  * This function uses the generic vfs_inode::i_count to track whether we should
3569  * just decrement it (in case it's > 1) or if this is the last iput then link
3570  * the inode to the delayed iput machinery. Delayed iputs are processed at
3571  * transaction commit time/superblock commit/cleaner kthread.
3572  */
btrfs_add_delayed_iput(struct inode * inode)3573 void btrfs_add_delayed_iput(struct inode *inode)
3574 {
3575 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3576 	struct btrfs_inode *binode = BTRFS_I(inode);
3577 
3578 	if (atomic_add_unless(&inode->i_count, -1, 1))
3579 		return;
3580 
3581 	atomic_inc(&fs_info->nr_delayed_iputs);
3582 	spin_lock(&fs_info->delayed_iput_lock);
3583 	ASSERT(list_empty(&binode->delayed_iput));
3584 	list_add_tail(&binode->delayed_iput, &fs_info->delayed_iputs);
3585 	spin_unlock(&fs_info->delayed_iput_lock);
3586 	if (!test_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags))
3587 		wake_up_process(fs_info->cleaner_kthread);
3588 }
3589 
run_delayed_iput_locked(struct btrfs_fs_info * fs_info,struct btrfs_inode * inode)3590 static void run_delayed_iput_locked(struct btrfs_fs_info *fs_info,
3591 				    struct btrfs_inode *inode)
3592 {
3593 	list_del_init(&inode->delayed_iput);
3594 	spin_unlock(&fs_info->delayed_iput_lock);
3595 	iput(&inode->vfs_inode);
3596 	if (atomic_dec_and_test(&fs_info->nr_delayed_iputs))
3597 		wake_up(&fs_info->delayed_iputs_wait);
3598 	spin_lock(&fs_info->delayed_iput_lock);
3599 }
3600 
btrfs_run_delayed_iput(struct btrfs_fs_info * fs_info,struct btrfs_inode * inode)3601 static void btrfs_run_delayed_iput(struct btrfs_fs_info *fs_info,
3602 				   struct btrfs_inode *inode)
3603 {
3604 	if (!list_empty(&inode->delayed_iput)) {
3605 		spin_lock(&fs_info->delayed_iput_lock);
3606 		if (!list_empty(&inode->delayed_iput))
3607 			run_delayed_iput_locked(fs_info, inode);
3608 		spin_unlock(&fs_info->delayed_iput_lock);
3609 	}
3610 }
3611 
btrfs_run_delayed_iputs(struct btrfs_fs_info * fs_info)3612 void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info)
3613 {
3614 
3615 	spin_lock(&fs_info->delayed_iput_lock);
3616 	while (!list_empty(&fs_info->delayed_iputs)) {
3617 		struct btrfs_inode *inode;
3618 
3619 		inode = list_first_entry(&fs_info->delayed_iputs,
3620 				struct btrfs_inode, delayed_iput);
3621 		run_delayed_iput_locked(fs_info, inode);
3622 		cond_resched_lock(&fs_info->delayed_iput_lock);
3623 	}
3624 	spin_unlock(&fs_info->delayed_iput_lock);
3625 }
3626 
3627 /**
3628  * Wait for flushing all delayed iputs
3629  *
3630  * @fs_info:  the filesystem
3631  *
3632  * This will wait on any delayed iputs that are currently running with KILLABLE
3633  * set.  Once they are all done running we will return, unless we are killed in
3634  * which case we return EINTR. This helps in user operations like fallocate etc
3635  * that might get blocked on the iputs.
3636  *
3637  * Return EINTR if we were killed, 0 if nothing's pending
3638  */
btrfs_wait_on_delayed_iputs(struct btrfs_fs_info * fs_info)3639 int btrfs_wait_on_delayed_iputs(struct btrfs_fs_info *fs_info)
3640 {
3641 	int ret = wait_event_killable(fs_info->delayed_iputs_wait,
3642 			atomic_read(&fs_info->nr_delayed_iputs) == 0);
3643 	if (ret)
3644 		return -EINTR;
3645 	return 0;
3646 }
3647 
3648 /*
3649  * This creates an orphan entry for the given inode in case something goes wrong
3650  * in the middle of an unlink.
3651  */
btrfs_orphan_add(struct btrfs_trans_handle * trans,struct btrfs_inode * inode)3652 int btrfs_orphan_add(struct btrfs_trans_handle *trans,
3653 		     struct btrfs_inode *inode)
3654 {
3655 	int ret;
3656 
3657 	ret = btrfs_insert_orphan_item(trans, inode->root, btrfs_ino(inode));
3658 	if (ret && ret != -EEXIST) {
3659 		btrfs_abort_transaction(trans, ret);
3660 		return ret;
3661 	}
3662 
3663 	return 0;
3664 }
3665 
3666 /*
3667  * We have done the delete so we can go ahead and remove the orphan item for
3668  * this particular inode.
3669  */
btrfs_orphan_del(struct btrfs_trans_handle * trans,struct btrfs_inode * inode)3670 static int btrfs_orphan_del(struct btrfs_trans_handle *trans,
3671 			    struct btrfs_inode *inode)
3672 {
3673 	return btrfs_del_orphan_item(trans, inode->root, btrfs_ino(inode));
3674 }
3675 
3676 /*
3677  * this cleans up any orphans that may be left on the list from the last use
3678  * of this root.
3679  */
btrfs_orphan_cleanup(struct btrfs_root * root)3680 int btrfs_orphan_cleanup(struct btrfs_root *root)
3681 {
3682 	struct btrfs_fs_info *fs_info = root->fs_info;
3683 	struct btrfs_path *path;
3684 	struct extent_buffer *leaf;
3685 	struct btrfs_key key, found_key;
3686 	struct btrfs_trans_handle *trans;
3687 	struct inode *inode;
3688 	u64 last_objectid = 0;
3689 	int ret = 0, nr_unlink = 0;
3690 
3691 	if (test_and_set_bit(BTRFS_ROOT_ORPHAN_CLEANUP, &root->state))
3692 		return 0;
3693 
3694 	path = btrfs_alloc_path();
3695 	if (!path) {
3696 		ret = -ENOMEM;
3697 		goto out;
3698 	}
3699 	path->reada = READA_BACK;
3700 
3701 	key.objectid = BTRFS_ORPHAN_OBJECTID;
3702 	key.type = BTRFS_ORPHAN_ITEM_KEY;
3703 	key.offset = (u64)-1;
3704 
3705 	while (1) {
3706 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3707 		if (ret < 0)
3708 			goto out;
3709 
3710 		/*
3711 		 * if ret == 0 means we found what we were searching for, which
3712 		 * is weird, but possible, so only screw with path if we didn't
3713 		 * find the key and see if we have stuff that matches
3714 		 */
3715 		if (ret > 0) {
3716 			ret = 0;
3717 			if (path->slots[0] == 0)
3718 				break;
3719 			path->slots[0]--;
3720 		}
3721 
3722 		/* pull out the item */
3723 		leaf = path->nodes[0];
3724 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3725 
3726 		/* make sure the item matches what we want */
3727 		if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
3728 			break;
3729 		if (found_key.type != BTRFS_ORPHAN_ITEM_KEY)
3730 			break;
3731 
3732 		/* release the path since we're done with it */
3733 		btrfs_release_path(path);
3734 
3735 		/*
3736 		 * this is where we are basically btrfs_lookup, without the
3737 		 * crossing root thing.  we store the inode number in the
3738 		 * offset of the orphan item.
3739 		 */
3740 
3741 		if (found_key.offset == last_objectid) {
3742 			btrfs_err(fs_info,
3743 				  "Error removing orphan entry, stopping orphan cleanup");
3744 			ret = -EINVAL;
3745 			goto out;
3746 		}
3747 
3748 		last_objectid = found_key.offset;
3749 
3750 		found_key.objectid = found_key.offset;
3751 		found_key.type = BTRFS_INODE_ITEM_KEY;
3752 		found_key.offset = 0;
3753 		inode = btrfs_iget(fs_info->sb, last_objectid, root);
3754 		ret = PTR_ERR_OR_ZERO(inode);
3755 		if (ret && ret != -ENOENT)
3756 			goto out;
3757 
3758 		if (ret == -ENOENT && root == fs_info->tree_root) {
3759 			struct btrfs_root *dead_root;
3760 			int is_dead_root = 0;
3761 
3762 			/*
3763 			 * This is an orphan in the tree root. Currently these
3764 			 * could come from 2 sources:
3765 			 *  a) a root (snapshot/subvolume) deletion in progress
3766 			 *  b) a free space cache inode
3767 			 * We need to distinguish those two, as the orphan item
3768 			 * for a root must not get deleted before the deletion
3769 			 * of the snapshot/subvolume's tree completes.
3770 			 *
3771 			 * btrfs_find_orphan_roots() ran before us, which has
3772 			 * found all deleted roots and loaded them into
3773 			 * fs_info->fs_roots_radix. So here we can find if an
3774 			 * orphan item corresponds to a deleted root by looking
3775 			 * up the root from that radix tree.
3776 			 */
3777 
3778 			spin_lock(&fs_info->fs_roots_radix_lock);
3779 			dead_root = radix_tree_lookup(&fs_info->fs_roots_radix,
3780 							 (unsigned long)found_key.objectid);
3781 			if (dead_root && btrfs_root_refs(&dead_root->root_item) == 0)
3782 				is_dead_root = 1;
3783 			spin_unlock(&fs_info->fs_roots_radix_lock);
3784 
3785 			if (is_dead_root) {
3786 				/* prevent this orphan from being found again */
3787 				key.offset = found_key.objectid - 1;
3788 				continue;
3789 			}
3790 
3791 		}
3792 
3793 		/*
3794 		 * If we have an inode with links, there are a couple of
3795 		 * possibilities:
3796 		 *
3797 		 * 1. We were halfway through creating fsverity metadata for the
3798 		 * file. In that case, the orphan item represents incomplete
3799 		 * fsverity metadata which must be cleaned up with
3800 		 * btrfs_drop_verity_items and deleting the orphan item.
3801 
3802 		 * 2. Old kernels (before v3.12) used to create an
3803 		 * orphan item for truncate indicating that there were possibly
3804 		 * extent items past i_size that needed to be deleted. In v3.12,
3805 		 * truncate was changed to update i_size in sync with the extent
3806 		 * items, but the (useless) orphan item was still created. Since
3807 		 * v4.18, we don't create the orphan item for truncate at all.
3808 		 *
3809 		 * So, this item could mean that we need to do a truncate, but
3810 		 * only if this filesystem was last used on a pre-v3.12 kernel
3811 		 * and was not cleanly unmounted. The odds of that are quite
3812 		 * slim, and it's a pain to do the truncate now, so just delete
3813 		 * the orphan item.
3814 		 *
3815 		 * It's also possible that this orphan item was supposed to be
3816 		 * deleted but wasn't. The inode number may have been reused,
3817 		 * but either way, we can delete the orphan item.
3818 		 */
3819 		if (ret == -ENOENT || inode->i_nlink) {
3820 			if (!ret) {
3821 				ret = btrfs_drop_verity_items(BTRFS_I(inode));
3822 				iput(inode);
3823 				if (ret)
3824 					goto out;
3825 			}
3826 			trans = btrfs_start_transaction(root, 1);
3827 			if (IS_ERR(trans)) {
3828 				ret = PTR_ERR(trans);
3829 				goto out;
3830 			}
3831 			btrfs_debug(fs_info, "auto deleting %Lu",
3832 				    found_key.objectid);
3833 			ret = btrfs_del_orphan_item(trans, root,
3834 						    found_key.objectid);
3835 			btrfs_end_transaction(trans);
3836 			if (ret)
3837 				goto out;
3838 			continue;
3839 		}
3840 
3841 		nr_unlink++;
3842 
3843 		/* this will do delete_inode and everything for us */
3844 		iput(inode);
3845 	}
3846 	/* release the path since we're done with it */
3847 	btrfs_release_path(path);
3848 
3849 	if (test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state)) {
3850 		trans = btrfs_join_transaction(root);
3851 		if (!IS_ERR(trans))
3852 			btrfs_end_transaction(trans);
3853 	}
3854 
3855 	if (nr_unlink)
3856 		btrfs_debug(fs_info, "unlinked %d orphans", nr_unlink);
3857 
3858 out:
3859 	if (ret)
3860 		btrfs_err(fs_info, "could not do orphan cleanup %d", ret);
3861 	btrfs_free_path(path);
3862 	return ret;
3863 }
3864 
3865 /*
3866  * very simple check to peek ahead in the leaf looking for xattrs.  If we
3867  * don't find any xattrs, we know there can't be any acls.
3868  *
3869  * slot is the slot the inode is in, objectid is the objectid of the inode
3870  */
acls_after_inode_item(struct extent_buffer * leaf,int slot,u64 objectid,int * first_xattr_slot)3871 static noinline int acls_after_inode_item(struct extent_buffer *leaf,
3872 					  int slot, u64 objectid,
3873 					  int *first_xattr_slot)
3874 {
3875 	u32 nritems = btrfs_header_nritems(leaf);
3876 	struct btrfs_key found_key;
3877 	static u64 xattr_access = 0;
3878 	static u64 xattr_default = 0;
3879 	int scanned = 0;
3880 
3881 	if (!xattr_access) {
3882 		xattr_access = btrfs_name_hash(XATTR_NAME_POSIX_ACL_ACCESS,
3883 					strlen(XATTR_NAME_POSIX_ACL_ACCESS));
3884 		xattr_default = btrfs_name_hash(XATTR_NAME_POSIX_ACL_DEFAULT,
3885 					strlen(XATTR_NAME_POSIX_ACL_DEFAULT));
3886 	}
3887 
3888 	slot++;
3889 	*first_xattr_slot = -1;
3890 	while (slot < nritems) {
3891 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
3892 
3893 		/* we found a different objectid, there must not be acls */
3894 		if (found_key.objectid != objectid)
3895 			return 0;
3896 
3897 		/* we found an xattr, assume we've got an acl */
3898 		if (found_key.type == BTRFS_XATTR_ITEM_KEY) {
3899 			if (*first_xattr_slot == -1)
3900 				*first_xattr_slot = slot;
3901 			if (found_key.offset == xattr_access ||
3902 			    found_key.offset == xattr_default)
3903 				return 1;
3904 		}
3905 
3906 		/*
3907 		 * we found a key greater than an xattr key, there can't
3908 		 * be any acls later on
3909 		 */
3910 		if (found_key.type > BTRFS_XATTR_ITEM_KEY)
3911 			return 0;
3912 
3913 		slot++;
3914 		scanned++;
3915 
3916 		/*
3917 		 * it goes inode, inode backrefs, xattrs, extents,
3918 		 * so if there are a ton of hard links to an inode there can
3919 		 * be a lot of backrefs.  Don't waste time searching too hard,
3920 		 * this is just an optimization
3921 		 */
3922 		if (scanned >= 8)
3923 			break;
3924 	}
3925 	/* we hit the end of the leaf before we found an xattr or
3926 	 * something larger than an xattr.  We have to assume the inode
3927 	 * has acls
3928 	 */
3929 	if (*first_xattr_slot == -1)
3930 		*first_xattr_slot = slot;
3931 	return 1;
3932 }
3933 
3934 /*
3935  * read an inode from the btree into the in-memory inode
3936  */
btrfs_read_locked_inode(struct inode * inode,struct btrfs_path * in_path)3937 static int btrfs_read_locked_inode(struct inode *inode,
3938 				   struct btrfs_path *in_path)
3939 {
3940 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3941 	struct btrfs_path *path = in_path;
3942 	struct extent_buffer *leaf;
3943 	struct btrfs_inode_item *inode_item;
3944 	struct btrfs_root *root = BTRFS_I(inode)->root;
3945 	struct btrfs_key location;
3946 	unsigned long ptr;
3947 	int maybe_acls;
3948 	u32 rdev;
3949 	int ret;
3950 	bool filled = false;
3951 	int first_xattr_slot;
3952 
3953 	ret = btrfs_fill_inode(inode, &rdev);
3954 	if (!ret)
3955 		filled = true;
3956 
3957 	if (!path) {
3958 		path = btrfs_alloc_path();
3959 		if (!path)
3960 			return -ENOMEM;
3961 	}
3962 
3963 	memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
3964 
3965 	ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
3966 	if (ret) {
3967 		if (path != in_path)
3968 			btrfs_free_path(path);
3969 		return ret;
3970 	}
3971 
3972 	leaf = path->nodes[0];
3973 
3974 	if (filled)
3975 		goto cache_index;
3976 
3977 	inode_item = btrfs_item_ptr(leaf, path->slots[0],
3978 				    struct btrfs_inode_item);
3979 	inode->i_mode = btrfs_inode_mode(leaf, inode_item);
3980 	set_nlink(inode, btrfs_inode_nlink(leaf, inode_item));
3981 	i_uid_write(inode, btrfs_inode_uid(leaf, inode_item));
3982 	i_gid_write(inode, btrfs_inode_gid(leaf, inode_item));
3983 	btrfs_i_size_write(BTRFS_I(inode), btrfs_inode_size(leaf, inode_item));
3984 	btrfs_inode_set_file_extent_range(BTRFS_I(inode), 0,
3985 			round_up(i_size_read(inode), fs_info->sectorsize));
3986 
3987 	inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->atime);
3988 	inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->atime);
3989 
3990 	inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->mtime);
3991 	inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->mtime);
3992 
3993 	inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->ctime);
3994 	inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->ctime);
3995 
3996 	BTRFS_I(inode)->i_otime.tv_sec =
3997 		btrfs_timespec_sec(leaf, &inode_item->otime);
3998 	BTRFS_I(inode)->i_otime.tv_nsec =
3999 		btrfs_timespec_nsec(leaf, &inode_item->otime);
4000 
4001 	inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
4002 	BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
4003 	BTRFS_I(inode)->last_trans = btrfs_inode_transid(leaf, inode_item);
4004 
4005 	inode_set_iversion_queried(inode,
4006 				   btrfs_inode_sequence(leaf, inode_item));
4007 	inode->i_generation = BTRFS_I(inode)->generation;
4008 	inode->i_rdev = 0;
4009 	rdev = btrfs_inode_rdev(leaf, inode_item);
4010 
4011 	BTRFS_I(inode)->index_cnt = (u64)-1;
4012 	btrfs_inode_split_flags(btrfs_inode_flags(leaf, inode_item),
4013 				&BTRFS_I(inode)->flags, &BTRFS_I(inode)->ro_flags);
4014 
4015 cache_index:
4016 	/*
4017 	 * If we were modified in the current generation and evicted from memory
4018 	 * and then re-read we need to do a full sync since we don't have any
4019 	 * idea about which extents were modified before we were evicted from
4020 	 * cache.
4021 	 *
4022 	 * This is required for both inode re-read from disk and delayed inode
4023 	 * in delayed_nodes_tree.
4024 	 */
4025 	if (BTRFS_I(inode)->last_trans == fs_info->generation)
4026 		set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
4027 			&BTRFS_I(inode)->runtime_flags);
4028 
4029 	/*
4030 	 * We don't persist the id of the transaction where an unlink operation
4031 	 * against the inode was last made. So here we assume the inode might
4032 	 * have been evicted, and therefore the exact value of last_unlink_trans
4033 	 * lost, and set it to last_trans to avoid metadata inconsistencies
4034 	 * between the inode and its parent if the inode is fsync'ed and the log
4035 	 * replayed. For example, in the scenario:
4036 	 *
4037 	 * touch mydir/foo
4038 	 * ln mydir/foo mydir/bar
4039 	 * sync
4040 	 * unlink mydir/bar
4041 	 * echo 2 > /proc/sys/vm/drop_caches   # evicts inode
4042 	 * xfs_io -c fsync mydir/foo
4043 	 * <power failure>
4044 	 * mount fs, triggers fsync log replay
4045 	 *
4046 	 * We must make sure that when we fsync our inode foo we also log its
4047 	 * parent inode, otherwise after log replay the parent still has the
4048 	 * dentry with the "bar" name but our inode foo has a link count of 1
4049 	 * and doesn't have an inode ref with the name "bar" anymore.
4050 	 *
4051 	 * Setting last_unlink_trans to last_trans is a pessimistic approach,
4052 	 * but it guarantees correctness at the expense of occasional full
4053 	 * transaction commits on fsync if our inode is a directory, or if our
4054 	 * inode is not a directory, logging its parent unnecessarily.
4055 	 */
4056 	BTRFS_I(inode)->last_unlink_trans = BTRFS_I(inode)->last_trans;
4057 
4058 	/*
4059 	 * Same logic as for last_unlink_trans. We don't persist the generation
4060 	 * of the last transaction where this inode was used for a reflink
4061 	 * operation, so after eviction and reloading the inode we must be
4062 	 * pessimistic and assume the last transaction that modified the inode.
4063 	 */
4064 	BTRFS_I(inode)->last_reflink_trans = BTRFS_I(inode)->last_trans;
4065 
4066 	path->slots[0]++;
4067 	if (inode->i_nlink != 1 ||
4068 	    path->slots[0] >= btrfs_header_nritems(leaf))
4069 		goto cache_acl;
4070 
4071 	btrfs_item_key_to_cpu(leaf, &location, path->slots[0]);
4072 	if (location.objectid != btrfs_ino(BTRFS_I(inode)))
4073 		goto cache_acl;
4074 
4075 	ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
4076 	if (location.type == BTRFS_INODE_REF_KEY) {
4077 		struct btrfs_inode_ref *ref;
4078 
4079 		ref = (struct btrfs_inode_ref *)ptr;
4080 		BTRFS_I(inode)->dir_index = btrfs_inode_ref_index(leaf, ref);
4081 	} else if (location.type == BTRFS_INODE_EXTREF_KEY) {
4082 		struct btrfs_inode_extref *extref;
4083 
4084 		extref = (struct btrfs_inode_extref *)ptr;
4085 		BTRFS_I(inode)->dir_index = btrfs_inode_extref_index(leaf,
4086 								     extref);
4087 	}
4088 cache_acl:
4089 	/*
4090 	 * try to precache a NULL acl entry for files that don't have
4091 	 * any xattrs or acls
4092 	 */
4093 	maybe_acls = acls_after_inode_item(leaf, path->slots[0],
4094 			btrfs_ino(BTRFS_I(inode)), &first_xattr_slot);
4095 	if (first_xattr_slot != -1) {
4096 		path->slots[0] = first_xattr_slot;
4097 		ret = btrfs_load_inode_props(inode, path);
4098 		if (ret)
4099 			btrfs_err(fs_info,
4100 				  "error loading props for ino %llu (root %llu): %d",
4101 				  btrfs_ino(BTRFS_I(inode)),
4102 				  root->root_key.objectid, ret);
4103 	}
4104 	if (path != in_path)
4105 		btrfs_free_path(path);
4106 
4107 	if (!maybe_acls)
4108 		cache_no_acl(inode);
4109 
4110 	switch (inode->i_mode & S_IFMT) {
4111 	case S_IFREG:
4112 		inode->i_mapping->a_ops = &btrfs_aops;
4113 		inode->i_fop = &btrfs_file_operations;
4114 		inode->i_op = &btrfs_file_inode_operations;
4115 		break;
4116 	case S_IFDIR:
4117 		inode->i_fop = &btrfs_dir_file_operations;
4118 		inode->i_op = &btrfs_dir_inode_operations;
4119 		break;
4120 	case S_IFLNK:
4121 		inode->i_op = &btrfs_symlink_inode_operations;
4122 		inode_nohighmem(inode);
4123 		inode->i_mapping->a_ops = &btrfs_aops;
4124 		break;
4125 	default:
4126 		inode->i_op = &btrfs_special_inode_operations;
4127 		init_special_inode(inode, inode->i_mode, rdev);
4128 		break;
4129 	}
4130 
4131 	btrfs_sync_inode_flags_to_i_flags(inode);
4132 	return 0;
4133 }
4134 
4135 /*
4136  * given a leaf and an inode, copy the inode fields into the leaf
4137  */
fill_inode_item(struct btrfs_trans_handle * trans,struct extent_buffer * leaf,struct btrfs_inode_item * item,struct inode * inode)4138 static void fill_inode_item(struct btrfs_trans_handle *trans,
4139 			    struct extent_buffer *leaf,
4140 			    struct btrfs_inode_item *item,
4141 			    struct inode *inode)
4142 {
4143 	struct btrfs_map_token token;
4144 	u64 flags;
4145 
4146 	btrfs_init_map_token(&token, leaf);
4147 
4148 	btrfs_set_token_inode_uid(&token, item, i_uid_read(inode));
4149 	btrfs_set_token_inode_gid(&token, item, i_gid_read(inode));
4150 	btrfs_set_token_inode_size(&token, item, BTRFS_I(inode)->disk_i_size);
4151 	btrfs_set_token_inode_mode(&token, item, inode->i_mode);
4152 	btrfs_set_token_inode_nlink(&token, item, inode->i_nlink);
4153 
4154 	btrfs_set_token_timespec_sec(&token, &item->atime,
4155 				     inode->i_atime.tv_sec);
4156 	btrfs_set_token_timespec_nsec(&token, &item->atime,
4157 				      inode->i_atime.tv_nsec);
4158 
4159 	btrfs_set_token_timespec_sec(&token, &item->mtime,
4160 				     inode->i_mtime.tv_sec);
4161 	btrfs_set_token_timespec_nsec(&token, &item->mtime,
4162 				      inode->i_mtime.tv_nsec);
4163 
4164 	btrfs_set_token_timespec_sec(&token, &item->ctime,
4165 				     inode->i_ctime.tv_sec);
4166 	btrfs_set_token_timespec_nsec(&token, &item->ctime,
4167 				      inode->i_ctime.tv_nsec);
4168 
4169 	btrfs_set_token_timespec_sec(&token, &item->otime,
4170 				     BTRFS_I(inode)->i_otime.tv_sec);
4171 	btrfs_set_token_timespec_nsec(&token, &item->otime,
4172 				      BTRFS_I(inode)->i_otime.tv_nsec);
4173 
4174 	btrfs_set_token_inode_nbytes(&token, item, inode_get_bytes(inode));
4175 	btrfs_set_token_inode_generation(&token, item,
4176 					 BTRFS_I(inode)->generation);
4177 	btrfs_set_token_inode_sequence(&token, item, inode_peek_iversion(inode));
4178 	btrfs_set_token_inode_transid(&token, item, trans->transid);
4179 	btrfs_set_token_inode_rdev(&token, item, inode->i_rdev);
4180 	flags = btrfs_inode_combine_flags(BTRFS_I(inode)->flags,
4181 					  BTRFS_I(inode)->ro_flags);
4182 	btrfs_set_token_inode_flags(&token, item, flags);
4183 	btrfs_set_token_inode_block_group(&token, item, 0);
4184 }
4185 
4186 /*
4187  * copy everything in the in-memory inode into the btree.
4188  */
btrfs_update_inode_item(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_inode * inode)4189 static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans,
4190 				struct btrfs_root *root,
4191 				struct btrfs_inode *inode)
4192 {
4193 	struct btrfs_inode_item *inode_item;
4194 	struct btrfs_path *path;
4195 	struct extent_buffer *leaf;
4196 	int ret;
4197 
4198 	path = btrfs_alloc_path();
4199 	if (!path)
4200 		return -ENOMEM;
4201 
4202 	ret = btrfs_lookup_inode(trans, root, path, &inode->location, 1);
4203 	if (ret) {
4204 		if (ret > 0)
4205 			ret = -ENOENT;
4206 		goto failed;
4207 	}
4208 
4209 	leaf = path->nodes[0];
4210 	inode_item = btrfs_item_ptr(leaf, path->slots[0],
4211 				    struct btrfs_inode_item);
4212 
4213 	fill_inode_item(trans, leaf, inode_item, &inode->vfs_inode);
4214 	btrfs_mark_buffer_dirty(leaf);
4215 	btrfs_set_inode_last_trans(trans, inode);
4216 	ret = 0;
4217 failed:
4218 	btrfs_free_path(path);
4219 	return ret;
4220 }
4221 
4222 /*
4223  * copy everything in the in-memory inode into the btree.
4224  */
btrfs_update_inode(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_inode * inode)4225 noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
4226 				struct btrfs_root *root,
4227 				struct btrfs_inode *inode)
4228 {
4229 	struct btrfs_fs_info *fs_info = root->fs_info;
4230 	int ret;
4231 
4232 	/*
4233 	 * If the inode is a free space inode, we can deadlock during commit
4234 	 * if we put it into the delayed code.
4235 	 *
4236 	 * The data relocation inode should also be directly updated
4237 	 * without delay
4238 	 */
4239 	if (!btrfs_is_free_space_inode(inode)
4240 	    && !btrfs_is_data_reloc_root(root)
4241 	    && !test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) {
4242 		btrfs_update_root_times(trans, root);
4243 
4244 		ret = btrfs_delayed_update_inode(trans, root, inode);
4245 		if (!ret)
4246 			btrfs_set_inode_last_trans(trans, inode);
4247 		return ret;
4248 	}
4249 
4250 	return btrfs_update_inode_item(trans, root, inode);
4251 }
4252 
btrfs_update_inode_fallback(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_inode * inode)4253 int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
4254 				struct btrfs_root *root, struct btrfs_inode *inode)
4255 {
4256 	int ret;
4257 
4258 	ret = btrfs_update_inode(trans, root, inode);
4259 	if (ret == -ENOSPC)
4260 		return btrfs_update_inode_item(trans, root, inode);
4261 	return ret;
4262 }
4263 
4264 /*
4265  * unlink helper that gets used here in inode.c and in the tree logging
4266  * recovery code.  It remove a link in a directory with a given name, and
4267  * also drops the back refs in the inode to the directory
4268  */
__btrfs_unlink_inode(struct btrfs_trans_handle * trans,struct btrfs_inode * dir,struct btrfs_inode * inode,const char * name,int name_len,struct btrfs_rename_ctx * rename_ctx)4269 static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
4270 				struct btrfs_inode *dir,
4271 				struct btrfs_inode *inode,
4272 				const char *name, int name_len,
4273 				struct btrfs_rename_ctx *rename_ctx)
4274 {
4275 	struct btrfs_root *root = dir->root;
4276 	struct btrfs_fs_info *fs_info = root->fs_info;
4277 	struct btrfs_path *path;
4278 	int ret = 0;
4279 	struct btrfs_dir_item *di;
4280 	u64 index;
4281 	u64 ino = btrfs_ino(inode);
4282 	u64 dir_ino = btrfs_ino(dir);
4283 
4284 	path = btrfs_alloc_path();
4285 	if (!path) {
4286 		ret = -ENOMEM;
4287 		goto out;
4288 	}
4289 
4290 	di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
4291 				    name, name_len, -1);
4292 	if (IS_ERR_OR_NULL(di)) {
4293 		ret = di ? PTR_ERR(di) : -ENOENT;
4294 		goto err;
4295 	}
4296 	ret = btrfs_delete_one_dir_name(trans, root, path, di);
4297 	if (ret)
4298 		goto err;
4299 	btrfs_release_path(path);
4300 
4301 	/*
4302 	 * If we don't have dir index, we have to get it by looking up
4303 	 * the inode ref, since we get the inode ref, remove it directly,
4304 	 * it is unnecessary to do delayed deletion.
4305 	 *
4306 	 * But if we have dir index, needn't search inode ref to get it.
4307 	 * Since the inode ref is close to the inode item, it is better
4308 	 * that we delay to delete it, and just do this deletion when
4309 	 * we update the inode item.
4310 	 */
4311 	if (inode->dir_index) {
4312 		ret = btrfs_delayed_delete_inode_ref(inode);
4313 		if (!ret) {
4314 			index = inode->dir_index;
4315 			goto skip_backref;
4316 		}
4317 	}
4318 
4319 	ret = btrfs_del_inode_ref(trans, root, name, name_len, ino,
4320 				  dir_ino, &index);
4321 	if (ret) {
4322 		btrfs_info(fs_info,
4323 			"failed to delete reference to %.*s, inode %llu parent %llu",
4324 			name_len, name, ino, dir_ino);
4325 		btrfs_abort_transaction(trans, ret);
4326 		goto err;
4327 	}
4328 skip_backref:
4329 	if (rename_ctx)
4330 		rename_ctx->index = index;
4331 
4332 	ret = btrfs_delete_delayed_dir_index(trans, dir, index);
4333 	if (ret) {
4334 		btrfs_abort_transaction(trans, ret);
4335 		goto err;
4336 	}
4337 
4338 	/*
4339 	 * If we are in a rename context, we don't need to update anything in the
4340 	 * log. That will be done later during the rename by btrfs_log_new_name().
4341 	 * Besides that, doing it here would only cause extra unncessary btree
4342 	 * operations on the log tree, increasing latency for applications.
4343 	 */
4344 	if (!rename_ctx) {
4345 		btrfs_del_inode_ref_in_log(trans, root, name, name_len, inode,
4346 					   dir_ino);
4347 		btrfs_del_dir_entries_in_log(trans, root, name, name_len, dir,
4348 					     index);
4349 	}
4350 
4351 	/*
4352 	 * If we have a pending delayed iput we could end up with the final iput
4353 	 * being run in btrfs-cleaner context.  If we have enough of these built
4354 	 * up we can end up burning a lot of time in btrfs-cleaner without any
4355 	 * way to throttle the unlinks.  Since we're currently holding a ref on
4356 	 * the inode we can run the delayed iput here without any issues as the
4357 	 * final iput won't be done until after we drop the ref we're currently
4358 	 * holding.
4359 	 */
4360 	btrfs_run_delayed_iput(fs_info, inode);
4361 err:
4362 	btrfs_free_path(path);
4363 	if (ret)
4364 		goto out;
4365 
4366 	btrfs_i_size_write(dir, dir->vfs_inode.i_size - name_len * 2);
4367 	inode_inc_iversion(&inode->vfs_inode);
4368 	inode_inc_iversion(&dir->vfs_inode);
4369 	inode->vfs_inode.i_ctime = dir->vfs_inode.i_mtime =
4370 		dir->vfs_inode.i_ctime = current_time(&inode->vfs_inode);
4371 	ret = btrfs_update_inode(trans, root, dir);
4372 out:
4373 	return ret;
4374 }
4375 
btrfs_unlink_inode(struct btrfs_trans_handle * trans,struct btrfs_inode * dir,struct btrfs_inode * inode,const char * name,int name_len)4376 int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
4377 		       struct btrfs_inode *dir, struct btrfs_inode *inode,
4378 		       const char *name, int name_len)
4379 {
4380 	int ret;
4381 	ret = __btrfs_unlink_inode(trans, dir, inode, name, name_len, NULL);
4382 	if (!ret) {
4383 		drop_nlink(&inode->vfs_inode);
4384 		ret = btrfs_update_inode(trans, inode->root, inode);
4385 	}
4386 	return ret;
4387 }
4388 
4389 /*
4390  * helper to start transaction for unlink and rmdir.
4391  *
4392  * unlink and rmdir are special in btrfs, they do not always free space, so
4393  * if we cannot make our reservations the normal way try and see if there is
4394  * plenty of slack room in the global reserve to migrate, otherwise we cannot
4395  * allow the unlink to occur.
4396  */
__unlink_start_trans(struct inode * dir)4397 static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir)
4398 {
4399 	struct btrfs_root *root = BTRFS_I(dir)->root;
4400 
4401 	/*
4402 	 * 1 for the possible orphan item
4403 	 * 1 for the dir item
4404 	 * 1 for the dir index
4405 	 * 1 for the inode ref
4406 	 * 1 for the inode
4407 	 * 1 for the parent inode
4408 	 */
4409 	return btrfs_start_transaction_fallback_global_rsv(root, 6);
4410 }
4411 
btrfs_unlink(struct inode * dir,struct dentry * dentry)4412 static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
4413 {
4414 	struct btrfs_trans_handle *trans;
4415 	struct inode *inode = d_inode(dentry);
4416 	int ret;
4417 
4418 	trans = __unlink_start_trans(dir);
4419 	if (IS_ERR(trans))
4420 		return PTR_ERR(trans);
4421 
4422 	btrfs_record_unlink_dir(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)),
4423 			0);
4424 
4425 	ret = btrfs_unlink_inode(trans, BTRFS_I(dir),
4426 			BTRFS_I(d_inode(dentry)), dentry->d_name.name,
4427 			dentry->d_name.len);
4428 	if (ret)
4429 		goto out;
4430 
4431 	if (inode->i_nlink == 0) {
4432 		ret = btrfs_orphan_add(trans, BTRFS_I(inode));
4433 		if (ret)
4434 			goto out;
4435 	}
4436 
4437 out:
4438 	btrfs_end_transaction(trans);
4439 	btrfs_btree_balance_dirty(BTRFS_I(dir)->root->fs_info);
4440 	return ret;
4441 }
4442 
btrfs_unlink_subvol(struct btrfs_trans_handle * trans,struct inode * dir,struct dentry * dentry)4443 static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
4444 			       struct inode *dir, struct dentry *dentry)
4445 {
4446 	struct btrfs_root *root = BTRFS_I(dir)->root;
4447 	struct btrfs_inode *inode = BTRFS_I(d_inode(dentry));
4448 	struct btrfs_path *path;
4449 	struct extent_buffer *leaf;
4450 	struct btrfs_dir_item *di;
4451 	struct btrfs_key key;
4452 	const char *name = dentry->d_name.name;
4453 	int name_len = dentry->d_name.len;
4454 	u64 index;
4455 	int ret;
4456 	u64 objectid;
4457 	u64 dir_ino = btrfs_ino(BTRFS_I(dir));
4458 
4459 	if (btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID) {
4460 		objectid = inode->root->root_key.objectid;
4461 	} else if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) {
4462 		objectid = inode->location.objectid;
4463 	} else {
4464 		WARN_ON(1);
4465 		return -EINVAL;
4466 	}
4467 
4468 	path = btrfs_alloc_path();
4469 	if (!path)
4470 		return -ENOMEM;
4471 
4472 	di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
4473 				   name, name_len, -1);
4474 	if (IS_ERR_OR_NULL(di)) {
4475 		ret = di ? PTR_ERR(di) : -ENOENT;
4476 		goto out;
4477 	}
4478 
4479 	leaf = path->nodes[0];
4480 	btrfs_dir_item_key_to_cpu(leaf, di, &key);
4481 	WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
4482 	ret = btrfs_delete_one_dir_name(trans, root, path, di);
4483 	if (ret) {
4484 		btrfs_abort_transaction(trans, ret);
4485 		goto out;
4486 	}
4487 	btrfs_release_path(path);
4488 
4489 	/*
4490 	 * This is a placeholder inode for a subvolume we didn't have a
4491 	 * reference to at the time of the snapshot creation.  In the meantime
4492 	 * we could have renamed the real subvol link into our snapshot, so
4493 	 * depending on btrfs_del_root_ref to return -ENOENT here is incorrect.
4494 	 * Instead simply lookup the dir_index_item for this entry so we can
4495 	 * remove it.  Otherwise we know we have a ref to the root and we can
4496 	 * call btrfs_del_root_ref, and it _shouldn't_ fail.
4497 	 */
4498 	if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) {
4499 		di = btrfs_search_dir_index_item(root, path, dir_ino,
4500 						 name, name_len);
4501 		if (IS_ERR_OR_NULL(di)) {
4502 			if (!di)
4503 				ret = -ENOENT;
4504 			else
4505 				ret = PTR_ERR(di);
4506 			btrfs_abort_transaction(trans, ret);
4507 			goto out;
4508 		}
4509 
4510 		leaf = path->nodes[0];
4511 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4512 		index = key.offset;
4513 		btrfs_release_path(path);
4514 	} else {
4515 		ret = btrfs_del_root_ref(trans, objectid,
4516 					 root->root_key.objectid, dir_ino,
4517 					 &index, name, name_len);
4518 		if (ret) {
4519 			btrfs_abort_transaction(trans, ret);
4520 			goto out;
4521 		}
4522 	}
4523 
4524 	ret = btrfs_delete_delayed_dir_index(trans, BTRFS_I(dir), index);
4525 	if (ret) {
4526 		btrfs_abort_transaction(trans, ret);
4527 		goto out;
4528 	}
4529 
4530 	btrfs_i_size_write(BTRFS_I(dir), dir->i_size - name_len * 2);
4531 	inode_inc_iversion(dir);
4532 	dir->i_mtime = dir->i_ctime = current_time(dir);
4533 	ret = btrfs_update_inode_fallback(trans, root, BTRFS_I(dir));
4534 	if (ret)
4535 		btrfs_abort_transaction(trans, ret);
4536 out:
4537 	btrfs_free_path(path);
4538 	return ret;
4539 }
4540 
4541 /*
4542  * Helper to check if the subvolume references other subvolumes or if it's
4543  * default.
4544  */
may_destroy_subvol(struct btrfs_root * root)4545 static noinline int may_destroy_subvol(struct btrfs_root *root)
4546 {
4547 	struct btrfs_fs_info *fs_info = root->fs_info;
4548 	struct btrfs_path *path;
4549 	struct btrfs_dir_item *di;
4550 	struct btrfs_key key;
4551 	u64 dir_id;
4552 	int ret;
4553 
4554 	path = btrfs_alloc_path();
4555 	if (!path)
4556 		return -ENOMEM;
4557 
4558 	/* Make sure this root isn't set as the default subvol */
4559 	dir_id = btrfs_super_root_dir(fs_info->super_copy);
4560 	di = btrfs_lookup_dir_item(NULL, fs_info->tree_root, path,
4561 				   dir_id, "default", 7, 0);
4562 	if (di && !IS_ERR(di)) {
4563 		btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key);
4564 		if (key.objectid == root->root_key.objectid) {
4565 			ret = -EPERM;
4566 			btrfs_err(fs_info,
4567 				  "deleting default subvolume %llu is not allowed",
4568 				  key.objectid);
4569 			goto out;
4570 		}
4571 		btrfs_release_path(path);
4572 	}
4573 
4574 	key.objectid = root->root_key.objectid;
4575 	key.type = BTRFS_ROOT_REF_KEY;
4576 	key.offset = (u64)-1;
4577 
4578 	ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
4579 	if (ret < 0)
4580 		goto out;
4581 	BUG_ON(ret == 0);
4582 
4583 	ret = 0;
4584 	if (path->slots[0] > 0) {
4585 		path->slots[0]--;
4586 		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
4587 		if (key.objectid == root->root_key.objectid &&
4588 		    key.type == BTRFS_ROOT_REF_KEY)
4589 			ret = -ENOTEMPTY;
4590 	}
4591 out:
4592 	btrfs_free_path(path);
4593 	return ret;
4594 }
4595 
4596 /* Delete all dentries for inodes belonging to the root */
btrfs_prune_dentries(struct btrfs_root * root)4597 static void btrfs_prune_dentries(struct btrfs_root *root)
4598 {
4599 	struct btrfs_fs_info *fs_info = root->fs_info;
4600 	struct rb_node *node;
4601 	struct rb_node *prev;
4602 	struct btrfs_inode *entry;
4603 	struct inode *inode;
4604 	u64 objectid = 0;
4605 
4606 	if (!BTRFS_FS_ERROR(fs_info))
4607 		WARN_ON(btrfs_root_refs(&root->root_item) != 0);
4608 
4609 	spin_lock(&root->inode_lock);
4610 again:
4611 	node = root->inode_tree.rb_node;
4612 	prev = NULL;
4613 	while (node) {
4614 		prev = node;
4615 		entry = rb_entry(node, struct btrfs_inode, rb_node);
4616 
4617 		if (objectid < btrfs_ino(entry))
4618 			node = node->rb_left;
4619 		else if (objectid > btrfs_ino(entry))
4620 			node = node->rb_right;
4621 		else
4622 			break;
4623 	}
4624 	if (!node) {
4625 		while (prev) {
4626 			entry = rb_entry(prev, struct btrfs_inode, rb_node);
4627 			if (objectid <= btrfs_ino(entry)) {
4628 				node = prev;
4629 				break;
4630 			}
4631 			prev = rb_next(prev);
4632 		}
4633 	}
4634 	while (node) {
4635 		entry = rb_entry(node, struct btrfs_inode, rb_node);
4636 		objectid = btrfs_ino(entry) + 1;
4637 		inode = igrab(&entry->vfs_inode);
4638 		if (inode) {
4639 			spin_unlock(&root->inode_lock);
4640 			if (atomic_read(&inode->i_count) > 1)
4641 				d_prune_aliases(inode);
4642 			/*
4643 			 * btrfs_drop_inode will have it removed from the inode
4644 			 * cache when its usage count hits zero.
4645 			 */
4646 			iput(inode);
4647 			cond_resched();
4648 			spin_lock(&root->inode_lock);
4649 			goto again;
4650 		}
4651 
4652 		if (cond_resched_lock(&root->inode_lock))
4653 			goto again;
4654 
4655 		node = rb_next(node);
4656 	}
4657 	spin_unlock(&root->inode_lock);
4658 }
4659 
btrfs_delete_subvolume(struct inode * dir,struct dentry * dentry)4660 int btrfs_delete_subvolume(struct inode *dir, struct dentry *dentry)
4661 {
4662 	struct btrfs_fs_info *fs_info = btrfs_sb(dentry->d_sb);
4663 	struct btrfs_root *root = BTRFS_I(dir)->root;
4664 	struct inode *inode = d_inode(dentry);
4665 	struct btrfs_root *dest = BTRFS_I(inode)->root;
4666 	struct btrfs_trans_handle *trans;
4667 	struct btrfs_block_rsv block_rsv;
4668 	u64 root_flags;
4669 	int ret;
4670 
4671 	/*
4672 	 * Don't allow to delete a subvolume with send in progress. This is
4673 	 * inside the inode lock so the error handling that has to drop the bit
4674 	 * again is not run concurrently.
4675 	 */
4676 	spin_lock(&dest->root_item_lock);
4677 	if (dest->send_in_progress) {
4678 		spin_unlock(&dest->root_item_lock);
4679 		btrfs_warn(fs_info,
4680 			   "attempt to delete subvolume %llu during send",
4681 			   dest->root_key.objectid);
4682 		return -EPERM;
4683 	}
4684 	if (atomic_read(&dest->nr_swapfiles)) {
4685 		spin_unlock(&dest->root_item_lock);
4686 		btrfs_warn(fs_info,
4687 			   "attempt to delete subvolume %llu with active swapfile",
4688 			   root->root_key.objectid);
4689 		return -EPERM;
4690 	}
4691 	root_flags = btrfs_root_flags(&dest->root_item);
4692 	btrfs_set_root_flags(&dest->root_item,
4693 			     root_flags | BTRFS_ROOT_SUBVOL_DEAD);
4694 	spin_unlock(&dest->root_item_lock);
4695 
4696 	down_write(&fs_info->subvol_sem);
4697 
4698 	ret = may_destroy_subvol(dest);
4699 	if (ret)
4700 		goto out_up_write;
4701 
4702 	btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP);
4703 	/*
4704 	 * One for dir inode,
4705 	 * two for dir entries,
4706 	 * two for root ref/backref.
4707 	 */
4708 	ret = btrfs_subvolume_reserve_metadata(root, &block_rsv, 5, true);
4709 	if (ret)
4710 		goto out_up_write;
4711 
4712 	trans = btrfs_start_transaction(root, 0);
4713 	if (IS_ERR(trans)) {
4714 		ret = PTR_ERR(trans);
4715 		goto out_release;
4716 	}
4717 	trans->block_rsv = &block_rsv;
4718 	trans->bytes_reserved = block_rsv.size;
4719 
4720 	btrfs_record_snapshot_destroy(trans, BTRFS_I(dir));
4721 
4722 	ret = btrfs_unlink_subvol(trans, dir, dentry);
4723 	if (ret) {
4724 		btrfs_abort_transaction(trans, ret);
4725 		goto out_end_trans;
4726 	}
4727 
4728 	ret = btrfs_record_root_in_trans(trans, dest);
4729 	if (ret) {
4730 		btrfs_abort_transaction(trans, ret);
4731 		goto out_end_trans;
4732 	}
4733 
4734 	memset(&dest->root_item.drop_progress, 0,
4735 		sizeof(dest->root_item.drop_progress));
4736 	btrfs_set_root_drop_level(&dest->root_item, 0);
4737 	btrfs_set_root_refs(&dest->root_item, 0);
4738 
4739 	if (!test_and_set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &dest->state)) {
4740 		ret = btrfs_insert_orphan_item(trans,
4741 					fs_info->tree_root,
4742 					dest->root_key.objectid);
4743 		if (ret) {
4744 			btrfs_abort_transaction(trans, ret);
4745 			goto out_end_trans;
4746 		}
4747 	}
4748 
4749 	ret = btrfs_uuid_tree_remove(trans, dest->root_item.uuid,
4750 				  BTRFS_UUID_KEY_SUBVOL,
4751 				  dest->root_key.objectid);
4752 	if (ret && ret != -ENOENT) {
4753 		btrfs_abort_transaction(trans, ret);
4754 		goto out_end_trans;
4755 	}
4756 	if (!btrfs_is_empty_uuid(dest->root_item.received_uuid)) {
4757 		ret = btrfs_uuid_tree_remove(trans,
4758 					  dest->root_item.received_uuid,
4759 					  BTRFS_UUID_KEY_RECEIVED_SUBVOL,
4760 					  dest->root_key.objectid);
4761 		if (ret && ret != -ENOENT) {
4762 			btrfs_abort_transaction(trans, ret);
4763 			goto out_end_trans;
4764 		}
4765 	}
4766 
4767 	free_anon_bdev(dest->anon_dev);
4768 	dest->anon_dev = 0;
4769 out_end_trans:
4770 	trans->block_rsv = NULL;
4771 	trans->bytes_reserved = 0;
4772 	ret = btrfs_end_transaction(trans);
4773 	inode->i_flags |= S_DEAD;
4774 out_release:
4775 	btrfs_subvolume_release_metadata(root, &block_rsv);
4776 out_up_write:
4777 	up_write(&fs_info->subvol_sem);
4778 	if (ret) {
4779 		spin_lock(&dest->root_item_lock);
4780 		root_flags = btrfs_root_flags(&dest->root_item);
4781 		btrfs_set_root_flags(&dest->root_item,
4782 				root_flags & ~BTRFS_ROOT_SUBVOL_DEAD);
4783 		spin_unlock(&dest->root_item_lock);
4784 	} else {
4785 		d_invalidate(dentry);
4786 		btrfs_prune_dentries(dest);
4787 		ASSERT(dest->send_in_progress == 0);
4788 	}
4789 
4790 	return ret;
4791 }
4792 
btrfs_rmdir(struct inode * dir,struct dentry * dentry)4793 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
4794 {
4795 	struct inode *inode = d_inode(dentry);
4796 	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
4797 	int err = 0;
4798 	struct btrfs_trans_handle *trans;
4799 	u64 last_unlink_trans;
4800 
4801 	if (inode->i_size > BTRFS_EMPTY_DIR_SIZE)
4802 		return -ENOTEMPTY;
4803 	if (btrfs_ino(BTRFS_I(inode)) == BTRFS_FIRST_FREE_OBJECTID) {
4804 		if (unlikely(btrfs_fs_incompat(fs_info, EXTENT_TREE_V2))) {
4805 			btrfs_err(fs_info,
4806 			"extent tree v2 doesn't support snapshot deletion yet");
4807 			return -EOPNOTSUPP;
4808 		}
4809 		return btrfs_delete_subvolume(dir, dentry);
4810 	}
4811 
4812 	trans = __unlink_start_trans(dir);
4813 	if (IS_ERR(trans))
4814 		return PTR_ERR(trans);
4815 
4816 	if (unlikely(btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
4817 		err = btrfs_unlink_subvol(trans, dir, dentry);
4818 		goto out;
4819 	}
4820 
4821 	err = btrfs_orphan_add(trans, BTRFS_I(inode));
4822 	if (err)
4823 		goto out;
4824 
4825 	last_unlink_trans = BTRFS_I(inode)->last_unlink_trans;
4826 
4827 	/* now the directory is empty */
4828 	err = btrfs_unlink_inode(trans, BTRFS_I(dir),
4829 			BTRFS_I(d_inode(dentry)), dentry->d_name.name,
4830 			dentry->d_name.len);
4831 	if (!err) {
4832 		btrfs_i_size_write(BTRFS_I(inode), 0);
4833 		/*
4834 		 * Propagate the last_unlink_trans value of the deleted dir to
4835 		 * its parent directory. This is to prevent an unrecoverable
4836 		 * log tree in the case we do something like this:
4837 		 * 1) create dir foo
4838 		 * 2) create snapshot under dir foo
4839 		 * 3) delete the snapshot
4840 		 * 4) rmdir foo
4841 		 * 5) mkdir foo
4842 		 * 6) fsync foo or some file inside foo
4843 		 */
4844 		if (last_unlink_trans >= trans->transid)
4845 			BTRFS_I(dir)->last_unlink_trans = last_unlink_trans;
4846 	}
4847 out:
4848 	btrfs_end_transaction(trans);
4849 	btrfs_btree_balance_dirty(fs_info);
4850 
4851 	return err;
4852 }
4853 
4854 /*
4855  * btrfs_truncate_block - read, zero a chunk and write a block
4856  * @inode - inode that we're zeroing
4857  * @from - the offset to start zeroing
4858  * @len - the length to zero, 0 to zero the entire range respective to the
4859  *	offset
4860  * @front - zero up to the offset instead of from the offset on
4861  *
4862  * This will find the block for the "from" offset and cow the block and zero the
4863  * part we want to zero.  This is used with truncate and hole punching.
4864  */
btrfs_truncate_block(struct btrfs_inode * inode,loff_t from,loff_t len,int front)4865 int btrfs_truncate_block(struct btrfs_inode *inode, loff_t from, loff_t len,
4866 			 int front)
4867 {
4868 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
4869 	struct address_space *mapping = inode->vfs_inode.i_mapping;
4870 	struct extent_io_tree *io_tree = &inode->io_tree;
4871 	struct btrfs_ordered_extent *ordered;
4872 	struct extent_state *cached_state = NULL;
4873 	struct extent_changeset *data_reserved = NULL;
4874 	bool only_release_metadata = false;
4875 	u32 blocksize = fs_info->sectorsize;
4876 	pgoff_t index = from >> PAGE_SHIFT;
4877 	unsigned offset = from & (blocksize - 1);
4878 	struct page *page;
4879 	gfp_t mask = btrfs_alloc_write_mask(mapping);
4880 	size_t write_bytes = blocksize;
4881 	int ret = 0;
4882 	u64 block_start;
4883 	u64 block_end;
4884 
4885 	if (IS_ALIGNED(offset, blocksize) &&
4886 	    (!len || IS_ALIGNED(len, blocksize)))
4887 		goto out;
4888 
4889 	block_start = round_down(from, blocksize);
4890 	block_end = block_start + blocksize - 1;
4891 
4892 	ret = btrfs_check_data_free_space(inode, &data_reserved, block_start,
4893 					  blocksize);
4894 	if (ret < 0) {
4895 		if (btrfs_check_nocow_lock(inode, block_start, &write_bytes) > 0) {
4896 			/* For nocow case, no need to reserve data space */
4897 			only_release_metadata = true;
4898 		} else {
4899 			goto out;
4900 		}
4901 	}
4902 	ret = btrfs_delalloc_reserve_metadata(inode, blocksize, blocksize, false);
4903 	if (ret < 0) {
4904 		if (!only_release_metadata)
4905 			btrfs_free_reserved_data_space(inode, data_reserved,
4906 						       block_start, blocksize);
4907 		goto out;
4908 	}
4909 again:
4910 	page = find_or_create_page(mapping, index, mask);
4911 	if (!page) {
4912 		btrfs_delalloc_release_space(inode, data_reserved, block_start,
4913 					     blocksize, true);
4914 		btrfs_delalloc_release_extents(inode, blocksize);
4915 		ret = -ENOMEM;
4916 		goto out;
4917 	}
4918 	ret = set_page_extent_mapped(page);
4919 	if (ret < 0)
4920 		goto out_unlock;
4921 
4922 	if (!PageUptodate(page)) {
4923 		ret = btrfs_read_folio(NULL, page_folio(page));
4924 		lock_page(page);
4925 		if (page->mapping != mapping) {
4926 			unlock_page(page);
4927 			put_page(page);
4928 			goto again;
4929 		}
4930 		if (!PageUptodate(page)) {
4931 			ret = -EIO;
4932 			goto out_unlock;
4933 		}
4934 	}
4935 	wait_on_page_writeback(page);
4936 
4937 	lock_extent_bits(io_tree, block_start, block_end, &cached_state);
4938 
4939 	ordered = btrfs_lookup_ordered_extent(inode, block_start);
4940 	if (ordered) {
4941 		unlock_extent_cached(io_tree, block_start, block_end,
4942 				     &cached_state);
4943 		unlock_page(page);
4944 		put_page(page);
4945 		btrfs_start_ordered_extent(ordered, 1);
4946 		btrfs_put_ordered_extent(ordered);
4947 		goto again;
4948 	}
4949 
4950 	clear_extent_bit(&inode->io_tree, block_start, block_end,
4951 			 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
4952 			 0, 0, &cached_state);
4953 
4954 	ret = btrfs_set_extent_delalloc(inode, block_start, block_end, 0,
4955 					&cached_state);
4956 	if (ret) {
4957 		unlock_extent_cached(io_tree, block_start, block_end,
4958 				     &cached_state);
4959 		goto out_unlock;
4960 	}
4961 
4962 	if (offset != blocksize) {
4963 		if (!len)
4964 			len = blocksize - offset;
4965 		if (front)
4966 			memzero_page(page, (block_start - page_offset(page)),
4967 				     offset);
4968 		else
4969 			memzero_page(page, (block_start - page_offset(page)) + offset,
4970 				     len);
4971 		flush_dcache_page(page);
4972 	}
4973 	btrfs_page_clear_checked(fs_info, page, block_start,
4974 				 block_end + 1 - block_start);
4975 	btrfs_page_set_dirty(fs_info, page, block_start, block_end + 1 - block_start);
4976 	unlock_extent_cached(io_tree, block_start, block_end, &cached_state);
4977 
4978 	if (only_release_metadata)
4979 		set_extent_bit(&inode->io_tree, block_start, block_end,
4980 			       EXTENT_NORESERVE, 0, NULL, NULL, GFP_NOFS, NULL);
4981 
4982 out_unlock:
4983 	if (ret) {
4984 		if (only_release_metadata)
4985 			btrfs_delalloc_release_metadata(inode, blocksize, true);
4986 		else
4987 			btrfs_delalloc_release_space(inode, data_reserved,
4988 					block_start, blocksize, true);
4989 	}
4990 	btrfs_delalloc_release_extents(inode, blocksize);
4991 	unlock_page(page);
4992 	put_page(page);
4993 out:
4994 	if (only_release_metadata)
4995 		btrfs_check_nocow_unlock(inode);
4996 	extent_changeset_free(data_reserved);
4997 	return ret;
4998 }
4999 
maybe_insert_hole(struct btrfs_root * root,struct btrfs_inode * inode,u64 offset,u64 len)5000 static int maybe_insert_hole(struct btrfs_root *root, struct btrfs_inode *inode,
5001 			     u64 offset, u64 len)
5002 {
5003 	struct btrfs_fs_info *fs_info = root->fs_info;
5004 	struct btrfs_trans_handle *trans;
5005 	struct btrfs_drop_extents_args drop_args = { 0 };
5006 	int ret;
5007 
5008 	/*
5009 	 * If NO_HOLES is enabled, we don't need to do anything.
5010 	 * Later, up in the call chain, either btrfs_set_inode_last_sub_trans()
5011 	 * or btrfs_update_inode() will be called, which guarantee that the next
5012 	 * fsync will know this inode was changed and needs to be logged.
5013 	 */
5014 	if (btrfs_fs_incompat(fs_info, NO_HOLES))
5015 		return 0;
5016 
5017 	/*
5018 	 * 1 - for the one we're dropping
5019 	 * 1 - for the one we're adding
5020 	 * 1 - for updating the inode.
5021 	 */
5022 	trans = btrfs_start_transaction(root, 3);
5023 	if (IS_ERR(trans))
5024 		return PTR_ERR(trans);
5025 
5026 	drop_args.start = offset;
5027 	drop_args.end = offset + len;
5028 	drop_args.drop_cache = true;
5029 
5030 	ret = btrfs_drop_extents(trans, root, inode, &drop_args);
5031 	if (ret) {
5032 		btrfs_abort_transaction(trans, ret);
5033 		btrfs_end_transaction(trans);
5034 		return ret;
5035 	}
5036 
5037 	ret = btrfs_insert_file_extent(trans, root, btrfs_ino(inode),
5038 			offset, 0, 0, len, 0, len, 0, 0, 0);
5039 	if (ret) {
5040 		btrfs_abort_transaction(trans, ret);
5041 	} else {
5042 		btrfs_update_inode_bytes(inode, 0, drop_args.bytes_found);
5043 		btrfs_update_inode(trans, root, inode);
5044 	}
5045 	btrfs_end_transaction(trans);
5046 	return ret;
5047 }
5048 
5049 /*
5050  * This function puts in dummy file extents for the area we're creating a hole
5051  * for.  So if we are truncating this file to a larger size we need to insert
5052  * these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for
5053  * the range between oldsize and size
5054  */
btrfs_cont_expand(struct btrfs_inode * inode,loff_t oldsize,loff_t size)5055 int btrfs_cont_expand(struct btrfs_inode *inode, loff_t oldsize, loff_t size)
5056 {
5057 	struct btrfs_root *root = inode->root;
5058 	struct btrfs_fs_info *fs_info = root->fs_info;
5059 	struct extent_io_tree *io_tree = &inode->io_tree;
5060 	struct extent_map *em = NULL;
5061 	struct extent_state *cached_state = NULL;
5062 	struct extent_map_tree *em_tree = &inode->extent_tree;
5063 	u64 hole_start = ALIGN(oldsize, fs_info->sectorsize);
5064 	u64 block_end = ALIGN(size, fs_info->sectorsize);
5065 	u64 last_byte;
5066 	u64 cur_offset;
5067 	u64 hole_size;
5068 	int err = 0;
5069 
5070 	/*
5071 	 * If our size started in the middle of a block we need to zero out the
5072 	 * rest of the block before we expand the i_size, otherwise we could
5073 	 * expose stale data.
5074 	 */
5075 	err = btrfs_truncate_block(inode, oldsize, 0, 0);
5076 	if (err)
5077 		return err;
5078 
5079 	if (size <= hole_start)
5080 		return 0;
5081 
5082 	btrfs_lock_and_flush_ordered_range(inode, hole_start, block_end - 1,
5083 					   &cached_state);
5084 	cur_offset = hole_start;
5085 	while (1) {
5086 		em = btrfs_get_extent(inode, NULL, 0, cur_offset,
5087 				      block_end - cur_offset);
5088 		if (IS_ERR(em)) {
5089 			err = PTR_ERR(em);
5090 			em = NULL;
5091 			break;
5092 		}
5093 		last_byte = min(extent_map_end(em), block_end);
5094 		last_byte = ALIGN(last_byte, fs_info->sectorsize);
5095 		hole_size = last_byte - cur_offset;
5096 
5097 		if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
5098 			struct extent_map *hole_em;
5099 
5100 			err = maybe_insert_hole(root, inode, cur_offset,
5101 						hole_size);
5102 			if (err)
5103 				break;
5104 
5105 			err = btrfs_inode_set_file_extent_range(inode,
5106 							cur_offset, hole_size);
5107 			if (err)
5108 				break;
5109 
5110 			btrfs_drop_extent_cache(inode, cur_offset,
5111 						cur_offset + hole_size - 1, 0);
5112 			hole_em = alloc_extent_map();
5113 			if (!hole_em) {
5114 				btrfs_set_inode_full_sync(inode);
5115 				goto next;
5116 			}
5117 			hole_em->start = cur_offset;
5118 			hole_em->len = hole_size;
5119 			hole_em->orig_start = cur_offset;
5120 
5121 			hole_em->block_start = EXTENT_MAP_HOLE;
5122 			hole_em->block_len = 0;
5123 			hole_em->orig_block_len = 0;
5124 			hole_em->ram_bytes = hole_size;
5125 			hole_em->compress_type = BTRFS_COMPRESS_NONE;
5126 			hole_em->generation = fs_info->generation;
5127 
5128 			while (1) {
5129 				write_lock(&em_tree->lock);
5130 				err = add_extent_mapping(em_tree, hole_em, 1);
5131 				write_unlock(&em_tree->lock);
5132 				if (err != -EEXIST)
5133 					break;
5134 				btrfs_drop_extent_cache(inode, cur_offset,
5135 							cur_offset +
5136 							hole_size - 1, 0);
5137 			}
5138 			free_extent_map(hole_em);
5139 		} else {
5140 			err = btrfs_inode_set_file_extent_range(inode,
5141 							cur_offset, hole_size);
5142 			if (err)
5143 				break;
5144 		}
5145 next:
5146 		free_extent_map(em);
5147 		em = NULL;
5148 		cur_offset = last_byte;
5149 		if (cur_offset >= block_end)
5150 			break;
5151 	}
5152 	free_extent_map(em);
5153 	unlock_extent_cached(io_tree, hole_start, block_end - 1, &cached_state);
5154 	return err;
5155 }
5156 
btrfs_setsize(struct inode * inode,struct iattr * attr)5157 static int btrfs_setsize(struct inode *inode, struct iattr *attr)
5158 {
5159 	struct btrfs_root *root = BTRFS_I(inode)->root;
5160 	struct btrfs_trans_handle *trans;
5161 	loff_t oldsize = i_size_read(inode);
5162 	loff_t newsize = attr->ia_size;
5163 	int mask = attr->ia_valid;
5164 	int ret;
5165 
5166 	/*
5167 	 * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a
5168 	 * special case where we need to update the times despite not having
5169 	 * these flags set.  For all other operations the VFS set these flags
5170 	 * explicitly if it wants a timestamp update.
5171 	 */
5172 	if (newsize != oldsize) {
5173 		inode_inc_iversion(inode);
5174 		if (!(mask & (ATTR_CTIME | ATTR_MTIME)))
5175 			inode->i_ctime = inode->i_mtime =
5176 				current_time(inode);
5177 	}
5178 
5179 	if (newsize > oldsize) {
5180 		/*
5181 		 * Don't do an expanding truncate while snapshotting is ongoing.
5182 		 * This is to ensure the snapshot captures a fully consistent
5183 		 * state of this file - if the snapshot captures this expanding
5184 		 * truncation, it must capture all writes that happened before
5185 		 * this truncation.
5186 		 */
5187 		btrfs_drew_write_lock(&root->snapshot_lock);
5188 		ret = btrfs_cont_expand(BTRFS_I(inode), oldsize, newsize);
5189 		if (ret) {
5190 			btrfs_drew_write_unlock(&root->snapshot_lock);
5191 			return ret;
5192 		}
5193 
5194 		trans = btrfs_start_transaction(root, 1);
5195 		if (IS_ERR(trans)) {
5196 			btrfs_drew_write_unlock(&root->snapshot_lock);
5197 			return PTR_ERR(trans);
5198 		}
5199 
5200 		i_size_write(inode, newsize);
5201 		btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0);
5202 		pagecache_isize_extended(inode, oldsize, newsize);
5203 		ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
5204 		btrfs_drew_write_unlock(&root->snapshot_lock);
5205 		btrfs_end_transaction(trans);
5206 	} else {
5207 		struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5208 
5209 		if (btrfs_is_zoned(fs_info)) {
5210 			ret = btrfs_wait_ordered_range(inode,
5211 					ALIGN(newsize, fs_info->sectorsize),
5212 					(u64)-1);
5213 			if (ret)
5214 				return ret;
5215 		}
5216 
5217 		/*
5218 		 * We're truncating a file that used to have good data down to
5219 		 * zero. Make sure any new writes to the file get on disk
5220 		 * on close.
5221 		 */
5222 		if (newsize == 0)
5223 			set_bit(BTRFS_INODE_FLUSH_ON_CLOSE,
5224 				&BTRFS_I(inode)->runtime_flags);
5225 
5226 		truncate_setsize(inode, newsize);
5227 
5228 		inode_dio_wait(inode);
5229 
5230 		ret = btrfs_truncate(inode, newsize == oldsize);
5231 		if (ret && inode->i_nlink) {
5232 			int err;
5233 
5234 			/*
5235 			 * Truncate failed, so fix up the in-memory size. We
5236 			 * adjusted disk_i_size down as we removed extents, so
5237 			 * wait for disk_i_size to be stable and then update the
5238 			 * in-memory size to match.
5239 			 */
5240 			err = btrfs_wait_ordered_range(inode, 0, (u64)-1);
5241 			if (err)
5242 				return err;
5243 			i_size_write(inode, BTRFS_I(inode)->disk_i_size);
5244 		}
5245 	}
5246 
5247 	return ret;
5248 }
5249 
btrfs_setattr(struct user_namespace * mnt_userns,struct dentry * dentry,struct iattr * attr)5250 static int btrfs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
5251 			 struct iattr *attr)
5252 {
5253 	struct inode *inode = d_inode(dentry);
5254 	struct btrfs_root *root = BTRFS_I(inode)->root;
5255 	int err;
5256 
5257 	if (btrfs_root_readonly(root))
5258 		return -EROFS;
5259 
5260 	err = setattr_prepare(mnt_userns, dentry, attr);
5261 	if (err)
5262 		return err;
5263 
5264 	if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
5265 		err = btrfs_setsize(inode, attr);
5266 		if (err)
5267 			return err;
5268 	}
5269 
5270 	if (attr->ia_valid) {
5271 		setattr_copy(mnt_userns, inode, attr);
5272 		inode_inc_iversion(inode);
5273 		err = btrfs_dirty_inode(inode);
5274 
5275 		if (!err && attr->ia_valid & ATTR_MODE)
5276 			err = posix_acl_chmod(mnt_userns, inode, inode->i_mode);
5277 	}
5278 
5279 	return err;
5280 }
5281 
5282 /*
5283  * While truncating the inode pages during eviction, we get the VFS
5284  * calling btrfs_invalidate_folio() against each folio of the inode. This
5285  * is slow because the calls to btrfs_invalidate_folio() result in a
5286  * huge amount of calls to lock_extent_bits() and clear_extent_bit(),
5287  * which keep merging and splitting extent_state structures over and over,
5288  * wasting lots of time.
5289  *
5290  * Therefore if the inode is being evicted, let btrfs_invalidate_folio()
5291  * skip all those expensive operations on a per folio basis and do only
5292  * the ordered io finishing, while we release here the extent_map and
5293  * extent_state structures, without the excessive merging and splitting.
5294  */
evict_inode_truncate_pages(struct inode * inode)5295 static void evict_inode_truncate_pages(struct inode *inode)
5296 {
5297 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
5298 	struct extent_map_tree *map_tree = &BTRFS_I(inode)->extent_tree;
5299 	struct rb_node *node;
5300 
5301 	ASSERT(inode->i_state & I_FREEING);
5302 	truncate_inode_pages_final(&inode->i_data);
5303 
5304 	write_lock(&map_tree->lock);
5305 	while (!RB_EMPTY_ROOT(&map_tree->map.rb_root)) {
5306 		struct extent_map *em;
5307 
5308 		node = rb_first_cached(&map_tree->map);
5309 		em = rb_entry(node, struct extent_map, rb_node);
5310 		clear_bit(EXTENT_FLAG_PINNED, &em->flags);
5311 		clear_bit(EXTENT_FLAG_LOGGING, &em->flags);
5312 		remove_extent_mapping(map_tree, em);
5313 		free_extent_map(em);
5314 		if (need_resched()) {
5315 			write_unlock(&map_tree->lock);
5316 			cond_resched();
5317 			write_lock(&map_tree->lock);
5318 		}
5319 	}
5320 	write_unlock(&map_tree->lock);
5321 
5322 	/*
5323 	 * Keep looping until we have no more ranges in the io tree.
5324 	 * We can have ongoing bios started by readahead that have
5325 	 * their endio callback (extent_io.c:end_bio_extent_readpage)
5326 	 * still in progress (unlocked the pages in the bio but did not yet
5327 	 * unlocked the ranges in the io tree). Therefore this means some
5328 	 * ranges can still be locked and eviction started because before
5329 	 * submitting those bios, which are executed by a separate task (work
5330 	 * queue kthread), inode references (inode->i_count) were not taken
5331 	 * (which would be dropped in the end io callback of each bio).
5332 	 * Therefore here we effectively end up waiting for those bios and
5333 	 * anyone else holding locked ranges without having bumped the inode's
5334 	 * reference count - if we don't do it, when they access the inode's
5335 	 * io_tree to unlock a range it may be too late, leading to an
5336 	 * use-after-free issue.
5337 	 */
5338 	spin_lock(&io_tree->lock);
5339 	while (!RB_EMPTY_ROOT(&io_tree->state)) {
5340 		struct extent_state *state;
5341 		struct extent_state *cached_state = NULL;
5342 		u64 start;
5343 		u64 end;
5344 		unsigned state_flags;
5345 
5346 		node = rb_first(&io_tree->state);
5347 		state = rb_entry(node, struct extent_state, rb_node);
5348 		start = state->start;
5349 		end = state->end;
5350 		state_flags = state->state;
5351 		spin_unlock(&io_tree->lock);
5352 
5353 		lock_extent_bits(io_tree, start, end, &cached_state);
5354 
5355 		/*
5356 		 * If still has DELALLOC flag, the extent didn't reach disk,
5357 		 * and its reserved space won't be freed by delayed_ref.
5358 		 * So we need to free its reserved space here.
5359 		 * (Refer to comment in btrfs_invalidate_folio, case 2)
5360 		 *
5361 		 * Note, end is the bytenr of last byte, so we need + 1 here.
5362 		 */
5363 		if (state_flags & EXTENT_DELALLOC)
5364 			btrfs_qgroup_free_data(BTRFS_I(inode), NULL, start,
5365 					       end - start + 1);
5366 
5367 		clear_extent_bit(io_tree, start, end,
5368 				 EXTENT_LOCKED | EXTENT_DELALLOC |
5369 				 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 1, 1,
5370 				 &cached_state);
5371 
5372 		cond_resched();
5373 		spin_lock(&io_tree->lock);
5374 	}
5375 	spin_unlock(&io_tree->lock);
5376 }
5377 
evict_refill_and_join(struct btrfs_root * root,struct btrfs_block_rsv * rsv)5378 static struct btrfs_trans_handle *evict_refill_and_join(struct btrfs_root *root,
5379 							struct btrfs_block_rsv *rsv)
5380 {
5381 	struct btrfs_fs_info *fs_info = root->fs_info;
5382 	struct btrfs_trans_handle *trans;
5383 	u64 delayed_refs_extra = btrfs_calc_insert_metadata_size(fs_info, 1);
5384 	int ret;
5385 
5386 	/*
5387 	 * Eviction should be taking place at some place safe because of our
5388 	 * delayed iputs.  However the normal flushing code will run delayed
5389 	 * iputs, so we cannot use FLUSH_ALL otherwise we'll deadlock.
5390 	 *
5391 	 * We reserve the delayed_refs_extra here again because we can't use
5392 	 * btrfs_start_transaction(root, 0) for the same deadlocky reason as
5393 	 * above.  We reserve our extra bit here because we generate a ton of
5394 	 * delayed refs activity by truncating.
5395 	 *
5396 	 * BTRFS_RESERVE_FLUSH_EVICT will steal from the global_rsv if it can,
5397 	 * if we fail to make this reservation we can re-try without the
5398 	 * delayed_refs_extra so we can make some forward progress.
5399 	 */
5400 	ret = btrfs_block_rsv_refill(fs_info, rsv, rsv->size + delayed_refs_extra,
5401 				     BTRFS_RESERVE_FLUSH_EVICT);
5402 	if (ret) {
5403 		ret = btrfs_block_rsv_refill(fs_info, rsv, rsv->size,
5404 					     BTRFS_RESERVE_FLUSH_EVICT);
5405 		if (ret) {
5406 			btrfs_warn(fs_info,
5407 				   "could not allocate space for delete; will truncate on mount");
5408 			return ERR_PTR(-ENOSPC);
5409 		}
5410 		delayed_refs_extra = 0;
5411 	}
5412 
5413 	trans = btrfs_join_transaction(root);
5414 	if (IS_ERR(trans))
5415 		return trans;
5416 
5417 	if (delayed_refs_extra) {
5418 		trans->block_rsv = &fs_info->trans_block_rsv;
5419 		trans->bytes_reserved = delayed_refs_extra;
5420 		btrfs_block_rsv_migrate(rsv, trans->block_rsv,
5421 					delayed_refs_extra, 1);
5422 	}
5423 	return trans;
5424 }
5425 
btrfs_evict_inode(struct inode * inode)5426 void btrfs_evict_inode(struct inode *inode)
5427 {
5428 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5429 	struct btrfs_trans_handle *trans;
5430 	struct btrfs_root *root = BTRFS_I(inode)->root;
5431 	struct btrfs_block_rsv *rsv;
5432 	int ret;
5433 
5434 	trace_btrfs_inode_evict(inode);
5435 
5436 	if (!root) {
5437 		fsverity_cleanup_inode(inode);
5438 		clear_inode(inode);
5439 		return;
5440 	}
5441 
5442 	evict_inode_truncate_pages(inode);
5443 
5444 	if (inode->i_nlink &&
5445 	    ((btrfs_root_refs(&root->root_item) != 0 &&
5446 	      root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID) ||
5447 	     btrfs_is_free_space_inode(BTRFS_I(inode))))
5448 		goto no_delete;
5449 
5450 	if (is_bad_inode(inode))
5451 		goto no_delete;
5452 
5453 	btrfs_free_io_failure_record(BTRFS_I(inode), 0, (u64)-1);
5454 
5455 	if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
5456 		goto no_delete;
5457 
5458 	if (inode->i_nlink > 0) {
5459 		BUG_ON(btrfs_root_refs(&root->root_item) != 0 &&
5460 		       root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID);
5461 		goto no_delete;
5462 	}
5463 
5464 	/*
5465 	 * This makes sure the inode item in tree is uptodate and the space for
5466 	 * the inode update is released.
5467 	 */
5468 	ret = btrfs_commit_inode_delayed_inode(BTRFS_I(inode));
5469 	if (ret)
5470 		goto no_delete;
5471 
5472 	/*
5473 	 * This drops any pending insert or delete operations we have for this
5474 	 * inode.  We could have a delayed dir index deletion queued up, but
5475 	 * we're removing the inode completely so that'll be taken care of in
5476 	 * the truncate.
5477 	 */
5478 	btrfs_kill_delayed_inode_items(BTRFS_I(inode));
5479 
5480 	rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
5481 	if (!rsv)
5482 		goto no_delete;
5483 	rsv->size = btrfs_calc_metadata_size(fs_info, 1);
5484 	rsv->failfast = 1;
5485 
5486 	btrfs_i_size_write(BTRFS_I(inode), 0);
5487 
5488 	while (1) {
5489 		struct btrfs_truncate_control control = {
5490 			.inode = BTRFS_I(inode),
5491 			.ino = btrfs_ino(BTRFS_I(inode)),
5492 			.new_size = 0,
5493 			.min_type = 0,
5494 		};
5495 
5496 		trans = evict_refill_and_join(root, rsv);
5497 		if (IS_ERR(trans))
5498 			goto free_rsv;
5499 
5500 		trans->block_rsv = rsv;
5501 
5502 		ret = btrfs_truncate_inode_items(trans, root, &control);
5503 		trans->block_rsv = &fs_info->trans_block_rsv;
5504 		btrfs_end_transaction(trans);
5505 		btrfs_btree_balance_dirty(fs_info);
5506 		if (ret && ret != -ENOSPC && ret != -EAGAIN)
5507 			goto free_rsv;
5508 		else if (!ret)
5509 			break;
5510 	}
5511 
5512 	/*
5513 	 * Errors here aren't a big deal, it just means we leave orphan items in
5514 	 * the tree. They will be cleaned up on the next mount. If the inode
5515 	 * number gets reused, cleanup deletes the orphan item without doing
5516 	 * anything, and unlink reuses the existing orphan item.
5517 	 *
5518 	 * If it turns out that we are dropping too many of these, we might want
5519 	 * to add a mechanism for retrying these after a commit.
5520 	 */
5521 	trans = evict_refill_and_join(root, rsv);
5522 	if (!IS_ERR(trans)) {
5523 		trans->block_rsv = rsv;
5524 		btrfs_orphan_del(trans, BTRFS_I(inode));
5525 		trans->block_rsv = &fs_info->trans_block_rsv;
5526 		btrfs_end_transaction(trans);
5527 	}
5528 
5529 free_rsv:
5530 	btrfs_free_block_rsv(fs_info, rsv);
5531 no_delete:
5532 	/*
5533 	 * If we didn't successfully delete, the orphan item will still be in
5534 	 * the tree and we'll retry on the next mount. Again, we might also want
5535 	 * to retry these periodically in the future.
5536 	 */
5537 	btrfs_remove_delayed_node(BTRFS_I(inode));
5538 	fsverity_cleanup_inode(inode);
5539 	clear_inode(inode);
5540 }
5541 
5542 /*
5543  * Return the key found in the dir entry in the location pointer, fill @type
5544  * with BTRFS_FT_*, and return 0.
5545  *
5546  * If no dir entries were found, returns -ENOENT.
5547  * If found a corrupted location in dir entry, returns -EUCLEAN.
5548  */
btrfs_inode_by_name(struct inode * dir,struct dentry * dentry,struct btrfs_key * location,u8 * type)5549 static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
5550 			       struct btrfs_key *location, u8 *type)
5551 {
5552 	const char *name = dentry->d_name.name;
5553 	int namelen = dentry->d_name.len;
5554 	struct btrfs_dir_item *di;
5555 	struct btrfs_path *path;
5556 	struct btrfs_root *root = BTRFS_I(dir)->root;
5557 	int ret = 0;
5558 
5559 	path = btrfs_alloc_path();
5560 	if (!path)
5561 		return -ENOMEM;
5562 
5563 	di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(BTRFS_I(dir)),
5564 			name, namelen, 0);
5565 	if (IS_ERR_OR_NULL(di)) {
5566 		ret = di ? PTR_ERR(di) : -ENOENT;
5567 		goto out;
5568 	}
5569 
5570 	btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
5571 	if (location->type != BTRFS_INODE_ITEM_KEY &&
5572 	    location->type != BTRFS_ROOT_ITEM_KEY) {
5573 		ret = -EUCLEAN;
5574 		btrfs_warn(root->fs_info,
5575 "%s gets something invalid in DIR_ITEM (name %s, directory ino %llu, location(%llu %u %llu))",
5576 			   __func__, name, btrfs_ino(BTRFS_I(dir)),
5577 			   location->objectid, location->type, location->offset);
5578 	}
5579 	if (!ret)
5580 		*type = btrfs_dir_type(path->nodes[0], di);
5581 out:
5582 	btrfs_free_path(path);
5583 	return ret;
5584 }
5585 
5586 /*
5587  * when we hit a tree root in a directory, the btrfs part of the inode
5588  * needs to be changed to reflect the root directory of the tree root.  This
5589  * is kind of like crossing a mount point.
5590  */
fixup_tree_root_location(struct btrfs_fs_info * fs_info,struct inode * dir,struct dentry * dentry,struct btrfs_key * location,struct btrfs_root ** sub_root)5591 static int fixup_tree_root_location(struct btrfs_fs_info *fs_info,
5592 				    struct inode *dir,
5593 				    struct dentry *dentry,
5594 				    struct btrfs_key *location,
5595 				    struct btrfs_root **sub_root)
5596 {
5597 	struct btrfs_path *path;
5598 	struct btrfs_root *new_root;
5599 	struct btrfs_root_ref *ref;
5600 	struct extent_buffer *leaf;
5601 	struct btrfs_key key;
5602 	int ret;
5603 	int err = 0;
5604 
5605 	path = btrfs_alloc_path();
5606 	if (!path) {
5607 		err = -ENOMEM;
5608 		goto out;
5609 	}
5610 
5611 	err = -ENOENT;
5612 	key.objectid = BTRFS_I(dir)->root->root_key.objectid;
5613 	key.type = BTRFS_ROOT_REF_KEY;
5614 	key.offset = location->objectid;
5615 
5616 	ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
5617 	if (ret) {
5618 		if (ret < 0)
5619 			err = ret;
5620 		goto out;
5621 	}
5622 
5623 	leaf = path->nodes[0];
5624 	ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
5625 	if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(BTRFS_I(dir)) ||
5626 	    btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len)
5627 		goto out;
5628 
5629 	ret = memcmp_extent_buffer(leaf, dentry->d_name.name,
5630 				   (unsigned long)(ref + 1),
5631 				   dentry->d_name.len);
5632 	if (ret)
5633 		goto out;
5634 
5635 	btrfs_release_path(path);
5636 
5637 	new_root = btrfs_get_fs_root(fs_info, location->objectid, true);
5638 	if (IS_ERR(new_root)) {
5639 		err = PTR_ERR(new_root);
5640 		goto out;
5641 	}
5642 
5643 	*sub_root = new_root;
5644 	location->objectid = btrfs_root_dirid(&new_root->root_item);
5645 	location->type = BTRFS_INODE_ITEM_KEY;
5646 	location->offset = 0;
5647 	err = 0;
5648 out:
5649 	btrfs_free_path(path);
5650 	return err;
5651 }
5652 
inode_tree_add(struct inode * inode)5653 static void inode_tree_add(struct inode *inode)
5654 {
5655 	struct btrfs_root *root = BTRFS_I(inode)->root;
5656 	struct btrfs_inode *entry;
5657 	struct rb_node **p;
5658 	struct rb_node *parent;
5659 	struct rb_node *new = &BTRFS_I(inode)->rb_node;
5660 	u64 ino = btrfs_ino(BTRFS_I(inode));
5661 
5662 	if (inode_unhashed(inode))
5663 		return;
5664 	parent = NULL;
5665 	spin_lock(&root->inode_lock);
5666 	p = &root->inode_tree.rb_node;
5667 	while (*p) {
5668 		parent = *p;
5669 		entry = rb_entry(parent, struct btrfs_inode, rb_node);
5670 
5671 		if (ino < btrfs_ino(entry))
5672 			p = &parent->rb_left;
5673 		else if (ino > btrfs_ino(entry))
5674 			p = &parent->rb_right;
5675 		else {
5676 			WARN_ON(!(entry->vfs_inode.i_state &
5677 				  (I_WILL_FREE | I_FREEING)));
5678 			rb_replace_node(parent, new, &root->inode_tree);
5679 			RB_CLEAR_NODE(parent);
5680 			spin_unlock(&root->inode_lock);
5681 			return;
5682 		}
5683 	}
5684 	rb_link_node(new, parent, p);
5685 	rb_insert_color(new, &root->inode_tree);
5686 	spin_unlock(&root->inode_lock);
5687 }
5688 
inode_tree_del(struct btrfs_inode * inode)5689 static void inode_tree_del(struct btrfs_inode *inode)
5690 {
5691 	struct btrfs_root *root = inode->root;
5692 	int empty = 0;
5693 
5694 	spin_lock(&root->inode_lock);
5695 	if (!RB_EMPTY_NODE(&inode->rb_node)) {
5696 		rb_erase(&inode->rb_node, &root->inode_tree);
5697 		RB_CLEAR_NODE(&inode->rb_node);
5698 		empty = RB_EMPTY_ROOT(&root->inode_tree);
5699 	}
5700 	spin_unlock(&root->inode_lock);
5701 
5702 	if (empty && btrfs_root_refs(&root->root_item) == 0) {
5703 		spin_lock(&root->inode_lock);
5704 		empty = RB_EMPTY_ROOT(&root->inode_tree);
5705 		spin_unlock(&root->inode_lock);
5706 		if (empty)
5707 			btrfs_add_dead_root(root);
5708 	}
5709 }
5710 
5711 
btrfs_init_locked_inode(struct inode * inode,void * p)5712 static int btrfs_init_locked_inode(struct inode *inode, void *p)
5713 {
5714 	struct btrfs_iget_args *args = p;
5715 
5716 	inode->i_ino = args->ino;
5717 	BTRFS_I(inode)->location.objectid = args->ino;
5718 	BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY;
5719 	BTRFS_I(inode)->location.offset = 0;
5720 	BTRFS_I(inode)->root = btrfs_grab_root(args->root);
5721 	BUG_ON(args->root && !BTRFS_I(inode)->root);
5722 	return 0;
5723 }
5724 
btrfs_find_actor(struct inode * inode,void * opaque)5725 static int btrfs_find_actor(struct inode *inode, void *opaque)
5726 {
5727 	struct btrfs_iget_args *args = opaque;
5728 
5729 	return args->ino == BTRFS_I(inode)->location.objectid &&
5730 		args->root == BTRFS_I(inode)->root;
5731 }
5732 
btrfs_iget_locked(struct super_block * s,u64 ino,struct btrfs_root * root)5733 static struct inode *btrfs_iget_locked(struct super_block *s, u64 ino,
5734 				       struct btrfs_root *root)
5735 {
5736 	struct inode *inode;
5737 	struct btrfs_iget_args args;
5738 	unsigned long hashval = btrfs_inode_hash(ino, root);
5739 
5740 	args.ino = ino;
5741 	args.root = root;
5742 
5743 	inode = iget5_locked(s, hashval, btrfs_find_actor,
5744 			     btrfs_init_locked_inode,
5745 			     (void *)&args);
5746 	return inode;
5747 }
5748 
5749 /*
5750  * Get an inode object given its inode number and corresponding root.
5751  * Path can be preallocated to prevent recursing back to iget through
5752  * allocator. NULL is also valid but may require an additional allocation
5753  * later.
5754  */
btrfs_iget_path(struct super_block * s,u64 ino,struct btrfs_root * root,struct btrfs_path * path)5755 struct inode *btrfs_iget_path(struct super_block *s, u64 ino,
5756 			      struct btrfs_root *root, struct btrfs_path *path)
5757 {
5758 	struct inode *inode;
5759 
5760 	inode = btrfs_iget_locked(s, ino, root);
5761 	if (!inode)
5762 		return ERR_PTR(-ENOMEM);
5763 
5764 	if (inode->i_state & I_NEW) {
5765 		int ret;
5766 
5767 		ret = btrfs_read_locked_inode(inode, path);
5768 		if (!ret) {
5769 			inode_tree_add(inode);
5770 			unlock_new_inode(inode);
5771 		} else {
5772 			iget_failed(inode);
5773 			/*
5774 			 * ret > 0 can come from btrfs_search_slot called by
5775 			 * btrfs_read_locked_inode, this means the inode item
5776 			 * was not found.
5777 			 */
5778 			if (ret > 0)
5779 				ret = -ENOENT;
5780 			inode = ERR_PTR(ret);
5781 		}
5782 	}
5783 
5784 	return inode;
5785 }
5786 
btrfs_iget(struct super_block * s,u64 ino,struct btrfs_root * root)5787 struct inode *btrfs_iget(struct super_block *s, u64 ino, struct btrfs_root *root)
5788 {
5789 	return btrfs_iget_path(s, ino, root, NULL);
5790 }
5791 
new_simple_dir(struct super_block * s,struct btrfs_key * key,struct btrfs_root * root)5792 static struct inode *new_simple_dir(struct super_block *s,
5793 				    struct btrfs_key *key,
5794 				    struct btrfs_root *root)
5795 {
5796 	struct inode *inode = new_inode(s);
5797 
5798 	if (!inode)
5799 		return ERR_PTR(-ENOMEM);
5800 
5801 	BTRFS_I(inode)->root = btrfs_grab_root(root);
5802 	memcpy(&BTRFS_I(inode)->location, key, sizeof(*key));
5803 	set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags);
5804 
5805 	inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID;
5806 	/*
5807 	 * We only need lookup, the rest is read-only and there's no inode
5808 	 * associated with the dentry
5809 	 */
5810 	inode->i_op = &simple_dir_inode_operations;
5811 	inode->i_opflags &= ~IOP_XATTR;
5812 	inode->i_fop = &simple_dir_operations;
5813 	inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
5814 	inode->i_mtime = current_time(inode);
5815 	inode->i_atime = inode->i_mtime;
5816 	inode->i_ctime = inode->i_mtime;
5817 	BTRFS_I(inode)->i_otime = inode->i_mtime;
5818 
5819 	return inode;
5820 }
5821 
5822 static_assert(BTRFS_FT_UNKNOWN == FT_UNKNOWN);
5823 static_assert(BTRFS_FT_REG_FILE == FT_REG_FILE);
5824 static_assert(BTRFS_FT_DIR == FT_DIR);
5825 static_assert(BTRFS_FT_CHRDEV == FT_CHRDEV);
5826 static_assert(BTRFS_FT_BLKDEV == FT_BLKDEV);
5827 static_assert(BTRFS_FT_FIFO == FT_FIFO);
5828 static_assert(BTRFS_FT_SOCK == FT_SOCK);
5829 static_assert(BTRFS_FT_SYMLINK == FT_SYMLINK);
5830 
btrfs_inode_type(struct inode * inode)5831 static inline u8 btrfs_inode_type(struct inode *inode)
5832 {
5833 	return fs_umode_to_ftype(inode->i_mode);
5834 }
5835 
btrfs_lookup_dentry(struct inode * dir,struct dentry * dentry)5836 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
5837 {
5838 	struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
5839 	struct inode *inode;
5840 	struct btrfs_root *root = BTRFS_I(dir)->root;
5841 	struct btrfs_root *sub_root = root;
5842 	struct btrfs_key location;
5843 	u8 di_type = 0;
5844 	int ret = 0;
5845 
5846 	if (dentry->d_name.len > BTRFS_NAME_LEN)
5847 		return ERR_PTR(-ENAMETOOLONG);
5848 
5849 	ret = btrfs_inode_by_name(dir, dentry, &location, &di_type);
5850 	if (ret < 0)
5851 		return ERR_PTR(ret);
5852 
5853 	if (location.type == BTRFS_INODE_ITEM_KEY) {
5854 		inode = btrfs_iget(dir->i_sb, location.objectid, root);
5855 		if (IS_ERR(inode))
5856 			return inode;
5857 
5858 		/* Do extra check against inode mode with di_type */
5859 		if (btrfs_inode_type(inode) != di_type) {
5860 			btrfs_crit(fs_info,
5861 "inode mode mismatch with dir: inode mode=0%o btrfs type=%u dir type=%u",
5862 				  inode->i_mode, btrfs_inode_type(inode),
5863 				  di_type);
5864 			iput(inode);
5865 			return ERR_PTR(-EUCLEAN);
5866 		}
5867 		return inode;
5868 	}
5869 
5870 	ret = fixup_tree_root_location(fs_info, dir, dentry,
5871 				       &location, &sub_root);
5872 	if (ret < 0) {
5873 		if (ret != -ENOENT)
5874 			inode = ERR_PTR(ret);
5875 		else
5876 			inode = new_simple_dir(dir->i_sb, &location, sub_root);
5877 	} else {
5878 		inode = btrfs_iget(dir->i_sb, location.objectid, sub_root);
5879 	}
5880 	if (root != sub_root)
5881 		btrfs_put_root(sub_root);
5882 
5883 	if (!IS_ERR(inode) && root != sub_root) {
5884 		down_read(&fs_info->cleanup_work_sem);
5885 		if (!sb_rdonly(inode->i_sb))
5886 			ret = btrfs_orphan_cleanup(sub_root);
5887 		up_read(&fs_info->cleanup_work_sem);
5888 		if (ret) {
5889 			iput(inode);
5890 			inode = ERR_PTR(ret);
5891 		}
5892 	}
5893 
5894 	return inode;
5895 }
5896 
btrfs_dentry_delete(const struct dentry * dentry)5897 static int btrfs_dentry_delete(const struct dentry *dentry)
5898 {
5899 	struct btrfs_root *root;
5900 	struct inode *inode = d_inode(dentry);
5901 
5902 	if (!inode && !IS_ROOT(dentry))
5903 		inode = d_inode(dentry->d_parent);
5904 
5905 	if (inode) {
5906 		root = BTRFS_I(inode)->root;
5907 		if (btrfs_root_refs(&root->root_item) == 0)
5908 			return 1;
5909 
5910 		if (btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
5911 			return 1;
5912 	}
5913 	return 0;
5914 }
5915 
btrfs_lookup(struct inode * dir,struct dentry * dentry,unsigned int flags)5916 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
5917 				   unsigned int flags)
5918 {
5919 	struct inode *inode = btrfs_lookup_dentry(dir, dentry);
5920 
5921 	if (inode == ERR_PTR(-ENOENT))
5922 		inode = NULL;
5923 	return d_splice_alias(inode, dentry);
5924 }
5925 
5926 /*
5927  * All this infrastructure exists because dir_emit can fault, and we are holding
5928  * the tree lock when doing readdir.  For now just allocate a buffer and copy
5929  * our information into that, and then dir_emit from the buffer.  This is
5930  * similar to what NFS does, only we don't keep the buffer around in pagecache
5931  * because I'm afraid I'll mess that up.  Long term we need to make filldir do
5932  * copy_to_user_inatomic so we don't have to worry about page faulting under the
5933  * tree lock.
5934  */
btrfs_opendir(struct inode * inode,struct file * file)5935 static int btrfs_opendir(struct inode *inode, struct file *file)
5936 {
5937 	struct btrfs_file_private *private;
5938 
5939 	private = kzalloc(sizeof(struct btrfs_file_private), GFP_KERNEL);
5940 	if (!private)
5941 		return -ENOMEM;
5942 	private->filldir_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
5943 	if (!private->filldir_buf) {
5944 		kfree(private);
5945 		return -ENOMEM;
5946 	}
5947 	file->private_data = private;
5948 	return 0;
5949 }
5950 
5951 struct dir_entry {
5952 	u64 ino;
5953 	u64 offset;
5954 	unsigned type;
5955 	int name_len;
5956 };
5957 
btrfs_filldir(void * addr,int entries,struct dir_context * ctx)5958 static int btrfs_filldir(void *addr, int entries, struct dir_context *ctx)
5959 {
5960 	while (entries--) {
5961 		struct dir_entry *entry = addr;
5962 		char *name = (char *)(entry + 1);
5963 
5964 		ctx->pos = get_unaligned(&entry->offset);
5965 		if (!dir_emit(ctx, name, get_unaligned(&entry->name_len),
5966 					 get_unaligned(&entry->ino),
5967 					 get_unaligned(&entry->type)))
5968 			return 1;
5969 		addr += sizeof(struct dir_entry) +
5970 			get_unaligned(&entry->name_len);
5971 		ctx->pos++;
5972 	}
5973 	return 0;
5974 }
5975 
btrfs_real_readdir(struct file * file,struct dir_context * ctx)5976 static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
5977 {
5978 	struct inode *inode = file_inode(file);
5979 	struct btrfs_root *root = BTRFS_I(inode)->root;
5980 	struct btrfs_file_private *private = file->private_data;
5981 	struct btrfs_dir_item *di;
5982 	struct btrfs_key key;
5983 	struct btrfs_key found_key;
5984 	struct btrfs_path *path;
5985 	void *addr;
5986 	struct list_head ins_list;
5987 	struct list_head del_list;
5988 	int ret;
5989 	char *name_ptr;
5990 	int name_len;
5991 	int entries = 0;
5992 	int total_len = 0;
5993 	bool put = false;
5994 	struct btrfs_key location;
5995 
5996 	if (!dir_emit_dots(file, ctx))
5997 		return 0;
5998 
5999 	path = btrfs_alloc_path();
6000 	if (!path)
6001 		return -ENOMEM;
6002 
6003 	addr = private->filldir_buf;
6004 	path->reada = READA_FORWARD;
6005 
6006 	INIT_LIST_HEAD(&ins_list);
6007 	INIT_LIST_HEAD(&del_list);
6008 	put = btrfs_readdir_get_delayed_items(inode, &ins_list, &del_list);
6009 
6010 again:
6011 	key.type = BTRFS_DIR_INDEX_KEY;
6012 	key.offset = ctx->pos;
6013 	key.objectid = btrfs_ino(BTRFS_I(inode));
6014 
6015 	btrfs_for_each_slot(root, &key, &found_key, path, ret) {
6016 		struct dir_entry *entry;
6017 		struct extent_buffer *leaf = path->nodes[0];
6018 
6019 		if (found_key.objectid != key.objectid)
6020 			break;
6021 		if (found_key.type != BTRFS_DIR_INDEX_KEY)
6022 			break;
6023 		if (found_key.offset < ctx->pos)
6024 			continue;
6025 		if (btrfs_should_delete_dir_index(&del_list, found_key.offset))
6026 			continue;
6027 		di = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dir_item);
6028 		name_len = btrfs_dir_name_len(leaf, di);
6029 		if ((total_len + sizeof(struct dir_entry) + name_len) >=
6030 		    PAGE_SIZE) {
6031 			btrfs_release_path(path);
6032 			ret = btrfs_filldir(private->filldir_buf, entries, ctx);
6033 			if (ret)
6034 				goto nopos;
6035 			addr = private->filldir_buf;
6036 			entries = 0;
6037 			total_len = 0;
6038 			goto again;
6039 		}
6040 
6041 		entry = addr;
6042 		put_unaligned(name_len, &entry->name_len);
6043 		name_ptr = (char *)(entry + 1);
6044 		read_extent_buffer(leaf, name_ptr, (unsigned long)(di + 1),
6045 				   name_len);
6046 		put_unaligned(fs_ftype_to_dtype(btrfs_dir_type(leaf, di)),
6047 				&entry->type);
6048 		btrfs_dir_item_key_to_cpu(leaf, di, &location);
6049 		put_unaligned(location.objectid, &entry->ino);
6050 		put_unaligned(found_key.offset, &entry->offset);
6051 		entries++;
6052 		addr += sizeof(struct dir_entry) + name_len;
6053 		total_len += sizeof(struct dir_entry) + name_len;
6054 	}
6055 	/* Catch error encountered during iteration */
6056 	if (ret < 0)
6057 		goto err;
6058 
6059 	btrfs_release_path(path);
6060 
6061 	ret = btrfs_filldir(private->filldir_buf, entries, ctx);
6062 	if (ret)
6063 		goto nopos;
6064 
6065 	ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list);
6066 	if (ret)
6067 		goto nopos;
6068 
6069 	/*
6070 	 * Stop new entries from being returned after we return the last
6071 	 * entry.
6072 	 *
6073 	 * New directory entries are assigned a strictly increasing
6074 	 * offset.  This means that new entries created during readdir
6075 	 * are *guaranteed* to be seen in the future by that readdir.
6076 	 * This has broken buggy programs which operate on names as
6077 	 * they're returned by readdir.  Until we re-use freed offsets
6078 	 * we have this hack to stop new entries from being returned
6079 	 * under the assumption that they'll never reach this huge
6080 	 * offset.
6081 	 *
6082 	 * This is being careful not to overflow 32bit loff_t unless the
6083 	 * last entry requires it because doing so has broken 32bit apps
6084 	 * in the past.
6085 	 */
6086 	if (ctx->pos >= INT_MAX)
6087 		ctx->pos = LLONG_MAX;
6088 	else
6089 		ctx->pos = INT_MAX;
6090 nopos:
6091 	ret = 0;
6092 err:
6093 	if (put)
6094 		btrfs_readdir_put_delayed_items(inode, &ins_list, &del_list);
6095 	btrfs_free_path(path);
6096 	return ret;
6097 }
6098 
6099 /*
6100  * This is somewhat expensive, updating the tree every time the
6101  * inode changes.  But, it is most likely to find the inode in cache.
6102  * FIXME, needs more benchmarking...there are no reasons other than performance
6103  * to keep or drop this code.
6104  */
btrfs_dirty_inode(struct inode * inode)6105 static int btrfs_dirty_inode(struct inode *inode)
6106 {
6107 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
6108 	struct btrfs_root *root = BTRFS_I(inode)->root;
6109 	struct btrfs_trans_handle *trans;
6110 	int ret;
6111 
6112 	if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags))
6113 		return 0;
6114 
6115 	trans = btrfs_join_transaction(root);
6116 	if (IS_ERR(trans))
6117 		return PTR_ERR(trans);
6118 
6119 	ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
6120 	if (ret && (ret == -ENOSPC || ret == -EDQUOT)) {
6121 		/* whoops, lets try again with the full transaction */
6122 		btrfs_end_transaction(trans);
6123 		trans = btrfs_start_transaction(root, 1);
6124 		if (IS_ERR(trans))
6125 			return PTR_ERR(trans);
6126 
6127 		ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
6128 	}
6129 	btrfs_end_transaction(trans);
6130 	if (BTRFS_I(inode)->delayed_node)
6131 		btrfs_balance_delayed_items(fs_info);
6132 
6133 	return ret;
6134 }
6135 
6136 /*
6137  * This is a copy of file_update_time.  We need this so we can return error on
6138  * ENOSPC for updating the inode in the case of file write and mmap writes.
6139  */
btrfs_update_time(struct inode * inode,struct timespec64 * now,int flags)6140 static int btrfs_update_time(struct inode *inode, struct timespec64 *now,
6141 			     int flags)
6142 {
6143 	struct btrfs_root *root = BTRFS_I(inode)->root;
6144 	bool dirty = flags & ~S_VERSION;
6145 
6146 	if (btrfs_root_readonly(root))
6147 		return -EROFS;
6148 
6149 	if (flags & S_VERSION)
6150 		dirty |= inode_maybe_inc_iversion(inode, dirty);
6151 	if (flags & S_CTIME)
6152 		inode->i_ctime = *now;
6153 	if (flags & S_MTIME)
6154 		inode->i_mtime = *now;
6155 	if (flags & S_ATIME)
6156 		inode->i_atime = *now;
6157 	return dirty ? btrfs_dirty_inode(inode) : 0;
6158 }
6159 
6160 /*
6161  * find the highest existing sequence number in a directory
6162  * and then set the in-memory index_cnt variable to reflect
6163  * free sequence numbers
6164  */
btrfs_set_inode_index_count(struct btrfs_inode * inode)6165 static int btrfs_set_inode_index_count(struct btrfs_inode *inode)
6166 {
6167 	struct btrfs_root *root = inode->root;
6168 	struct btrfs_key key, found_key;
6169 	struct btrfs_path *path;
6170 	struct extent_buffer *leaf;
6171 	int ret;
6172 
6173 	key.objectid = btrfs_ino(inode);
6174 	key.type = BTRFS_DIR_INDEX_KEY;
6175 	key.offset = (u64)-1;
6176 
6177 	path = btrfs_alloc_path();
6178 	if (!path)
6179 		return -ENOMEM;
6180 
6181 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
6182 	if (ret < 0)
6183 		goto out;
6184 	/* FIXME: we should be able to handle this */
6185 	if (ret == 0)
6186 		goto out;
6187 	ret = 0;
6188 
6189 	if (path->slots[0] == 0) {
6190 		inode->index_cnt = BTRFS_DIR_START_INDEX;
6191 		goto out;
6192 	}
6193 
6194 	path->slots[0]--;
6195 
6196 	leaf = path->nodes[0];
6197 	btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6198 
6199 	if (found_key.objectid != btrfs_ino(inode) ||
6200 	    found_key.type != BTRFS_DIR_INDEX_KEY) {
6201 		inode->index_cnt = BTRFS_DIR_START_INDEX;
6202 		goto out;
6203 	}
6204 
6205 	inode->index_cnt = found_key.offset + 1;
6206 out:
6207 	btrfs_free_path(path);
6208 	return ret;
6209 }
6210 
6211 /*
6212  * helper to find a free sequence number in a given directory.  This current
6213  * code is very simple, later versions will do smarter things in the btree
6214  */
btrfs_set_inode_index(struct btrfs_inode * dir,u64 * index)6215 int btrfs_set_inode_index(struct btrfs_inode *dir, u64 *index)
6216 {
6217 	int ret = 0;
6218 
6219 	if (dir->index_cnt == (u64)-1) {
6220 		ret = btrfs_inode_delayed_dir_index_count(dir);
6221 		if (ret) {
6222 			ret = btrfs_set_inode_index_count(dir);
6223 			if (ret)
6224 				return ret;
6225 		}
6226 	}
6227 
6228 	*index = dir->index_cnt;
6229 	dir->index_cnt++;
6230 
6231 	return ret;
6232 }
6233 
btrfs_insert_inode_locked(struct inode * inode)6234 static int btrfs_insert_inode_locked(struct inode *inode)
6235 {
6236 	struct btrfs_iget_args args;
6237 
6238 	args.ino = BTRFS_I(inode)->location.objectid;
6239 	args.root = BTRFS_I(inode)->root;
6240 
6241 	return insert_inode_locked4(inode,
6242 		   btrfs_inode_hash(inode->i_ino, BTRFS_I(inode)->root),
6243 		   btrfs_find_actor, &args);
6244 }
6245 
btrfs_new_inode_prepare(struct btrfs_new_inode_args * args,unsigned int * trans_num_items)6246 int btrfs_new_inode_prepare(struct btrfs_new_inode_args *args,
6247 			    unsigned int *trans_num_items)
6248 {
6249 	struct inode *dir = args->dir;
6250 	struct inode *inode = args->inode;
6251 	int ret;
6252 
6253 	ret = posix_acl_create(dir, &inode->i_mode, &args->default_acl, &args->acl);
6254 	if (ret)
6255 		return ret;
6256 
6257 	/* 1 to add inode item */
6258 	*trans_num_items = 1;
6259 	/* 1 to add compression property */
6260 	if (BTRFS_I(dir)->prop_compress)
6261 		(*trans_num_items)++;
6262 	/* 1 to add default ACL xattr */
6263 	if (args->default_acl)
6264 		(*trans_num_items)++;
6265 	/* 1 to add access ACL xattr */
6266 	if (args->acl)
6267 		(*trans_num_items)++;
6268 #ifdef CONFIG_SECURITY
6269 	/* 1 to add LSM xattr */
6270 	if (dir->i_security)
6271 		(*trans_num_items)++;
6272 #endif
6273 	if (args->orphan) {
6274 		/* 1 to add orphan item */
6275 		(*trans_num_items)++;
6276 	} else {
6277 		/*
6278 		 * 1 to add dir item
6279 		 * 1 to add dir index
6280 		 * 1 to update parent inode item
6281 		 *
6282 		 * No need for 1 unit for the inode ref item because it is
6283 		 * inserted in a batch together with the inode item at
6284 		 * btrfs_create_new_inode().
6285 		 */
6286 		*trans_num_items += 3;
6287 	}
6288 	return 0;
6289 }
6290 
btrfs_new_inode_args_destroy(struct btrfs_new_inode_args * args)6291 void btrfs_new_inode_args_destroy(struct btrfs_new_inode_args *args)
6292 {
6293 	posix_acl_release(args->acl);
6294 	posix_acl_release(args->default_acl);
6295 }
6296 
6297 /*
6298  * Inherit flags from the parent inode.
6299  *
6300  * Currently only the compression flags and the cow flags are inherited.
6301  */
btrfs_inherit_iflags(struct inode * inode,struct inode * dir)6302 static void btrfs_inherit_iflags(struct inode *inode, struct inode *dir)
6303 {
6304 	unsigned int flags;
6305 
6306 	flags = BTRFS_I(dir)->flags;
6307 
6308 	if (flags & BTRFS_INODE_NOCOMPRESS) {
6309 		BTRFS_I(inode)->flags &= ~BTRFS_INODE_COMPRESS;
6310 		BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
6311 	} else if (flags & BTRFS_INODE_COMPRESS) {
6312 		BTRFS_I(inode)->flags &= ~BTRFS_INODE_NOCOMPRESS;
6313 		BTRFS_I(inode)->flags |= BTRFS_INODE_COMPRESS;
6314 	}
6315 
6316 	if (flags & BTRFS_INODE_NODATACOW) {
6317 		BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW;
6318 		if (S_ISREG(inode->i_mode))
6319 			BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
6320 	}
6321 
6322 	btrfs_sync_inode_flags_to_i_flags(inode);
6323 }
6324 
btrfs_create_new_inode(struct btrfs_trans_handle * trans,struct btrfs_new_inode_args * args)6325 int btrfs_create_new_inode(struct btrfs_trans_handle *trans,
6326 			   struct btrfs_new_inode_args *args)
6327 {
6328 	struct inode *dir = args->dir;
6329 	struct inode *inode = args->inode;
6330 	const char *name = args->orphan ? NULL : args->dentry->d_name.name;
6331 	int name_len = args->orphan ? 0 : args->dentry->d_name.len;
6332 	struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
6333 	struct btrfs_root *root;
6334 	struct btrfs_inode_item *inode_item;
6335 	struct btrfs_key *location;
6336 	struct btrfs_path *path;
6337 	u64 objectid;
6338 	struct btrfs_inode_ref *ref;
6339 	struct btrfs_key key[2];
6340 	u32 sizes[2];
6341 	struct btrfs_item_batch batch;
6342 	unsigned long ptr;
6343 	int ret;
6344 
6345 	path = btrfs_alloc_path();
6346 	if (!path)
6347 		return -ENOMEM;
6348 
6349 	if (!args->subvol)
6350 		BTRFS_I(inode)->root = btrfs_grab_root(BTRFS_I(dir)->root);
6351 	root = BTRFS_I(inode)->root;
6352 
6353 	ret = btrfs_get_free_objectid(root, &objectid);
6354 	if (ret)
6355 		goto out;
6356 	inode->i_ino = objectid;
6357 
6358 	if (args->orphan) {
6359 		/*
6360 		 * O_TMPFILE, set link count to 0, so that after this point, we
6361 		 * fill in an inode item with the correct link count.
6362 		 */
6363 		set_nlink(inode, 0);
6364 	} else {
6365 		trace_btrfs_inode_request(dir);
6366 
6367 		ret = btrfs_set_inode_index(BTRFS_I(dir), &BTRFS_I(inode)->dir_index);
6368 		if (ret)
6369 			goto out;
6370 	}
6371 	/* index_cnt is ignored for everything but a dir. */
6372 	BTRFS_I(inode)->index_cnt = BTRFS_DIR_START_INDEX;
6373 	BTRFS_I(inode)->generation = trans->transid;
6374 	inode->i_generation = BTRFS_I(inode)->generation;
6375 
6376 	/*
6377 	 * Subvolumes don't inherit flags from their parent directory.
6378 	 * Originally this was probably by accident, but we probably can't
6379 	 * change it now without compatibility issues.
6380 	 */
6381 	if (!args->subvol)
6382 		btrfs_inherit_iflags(inode, dir);
6383 
6384 	if (S_ISREG(inode->i_mode)) {
6385 		if (btrfs_test_opt(fs_info, NODATASUM))
6386 			BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
6387 		if (btrfs_test_opt(fs_info, NODATACOW))
6388 			BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW |
6389 				BTRFS_INODE_NODATASUM;
6390 	}
6391 
6392 	location = &BTRFS_I(inode)->location;
6393 	location->objectid = objectid;
6394 	location->offset = 0;
6395 	location->type = BTRFS_INODE_ITEM_KEY;
6396 
6397 	ret = btrfs_insert_inode_locked(inode);
6398 	if (ret < 0) {
6399 		if (!args->orphan)
6400 			BTRFS_I(dir)->index_cnt--;
6401 		goto out;
6402 	}
6403 
6404 	/*
6405 	 * We could have gotten an inode number from somebody who was fsynced
6406 	 * and then removed in this same transaction, so let's just set full
6407 	 * sync since it will be a full sync anyway and this will blow away the
6408 	 * old info in the log.
6409 	 */
6410 	btrfs_set_inode_full_sync(BTRFS_I(inode));
6411 
6412 	key[0].objectid = objectid;
6413 	key[0].type = BTRFS_INODE_ITEM_KEY;
6414 	key[0].offset = 0;
6415 
6416 	sizes[0] = sizeof(struct btrfs_inode_item);
6417 
6418 	if (!args->orphan) {
6419 		/*
6420 		 * Start new inodes with an inode_ref. This is slightly more
6421 		 * efficient for small numbers of hard links since they will
6422 		 * be packed into one item. Extended refs will kick in if we
6423 		 * add more hard links than can fit in the ref item.
6424 		 */
6425 		key[1].objectid = objectid;
6426 		key[1].type = BTRFS_INODE_REF_KEY;
6427 		if (args->subvol) {
6428 			key[1].offset = objectid;
6429 			sizes[1] = 2 + sizeof(*ref);
6430 		} else {
6431 			key[1].offset = btrfs_ino(BTRFS_I(dir));
6432 			sizes[1] = name_len + sizeof(*ref);
6433 		}
6434 	}
6435 
6436 	batch.keys = &key[0];
6437 	batch.data_sizes = &sizes[0];
6438 	batch.total_data_size = sizes[0] + (args->orphan ? 0 : sizes[1]);
6439 	batch.nr = args->orphan ? 1 : 2;
6440 	ret = btrfs_insert_empty_items(trans, root, path, &batch);
6441 	if (ret != 0) {
6442 		btrfs_abort_transaction(trans, ret);
6443 		goto discard;
6444 	}
6445 
6446 	inode->i_mtime = current_time(inode);
6447 	inode->i_atime = inode->i_mtime;
6448 	inode->i_ctime = inode->i_mtime;
6449 	BTRFS_I(inode)->i_otime = inode->i_mtime;
6450 
6451 	/*
6452 	 * We're going to fill the inode item now, so at this point the inode
6453 	 * must be fully initialized.
6454 	 */
6455 
6456 	inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
6457 				  struct btrfs_inode_item);
6458 	memzero_extent_buffer(path->nodes[0], (unsigned long)inode_item,
6459 			     sizeof(*inode_item));
6460 	fill_inode_item(trans, path->nodes[0], inode_item, inode);
6461 
6462 	if (!args->orphan) {
6463 		ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
6464 				     struct btrfs_inode_ref);
6465 		ptr = (unsigned long)(ref + 1);
6466 		if (args->subvol) {
6467 			btrfs_set_inode_ref_name_len(path->nodes[0], ref, 2);
6468 			btrfs_set_inode_ref_index(path->nodes[0], ref, 0);
6469 			write_extent_buffer(path->nodes[0], "..", ptr, 2);
6470 		} else {
6471 			btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
6472 			btrfs_set_inode_ref_index(path->nodes[0], ref,
6473 						  BTRFS_I(inode)->dir_index);
6474 			write_extent_buffer(path->nodes[0], name, ptr, name_len);
6475 		}
6476 	}
6477 
6478 	btrfs_mark_buffer_dirty(path->nodes[0]);
6479 	btrfs_release_path(path);
6480 
6481 	if (args->subvol) {
6482 		struct inode *parent;
6483 
6484 		/*
6485 		 * Subvolumes inherit properties from their parent subvolume,
6486 		 * not the directory they were created in.
6487 		 */
6488 		parent = btrfs_iget(fs_info->sb, BTRFS_FIRST_FREE_OBJECTID,
6489 				    BTRFS_I(dir)->root);
6490 		if (IS_ERR(parent)) {
6491 			ret = PTR_ERR(parent);
6492 		} else {
6493 			ret = btrfs_inode_inherit_props(trans, inode, parent);
6494 			iput(parent);
6495 		}
6496 	} else {
6497 		ret = btrfs_inode_inherit_props(trans, inode, dir);
6498 	}
6499 	if (ret) {
6500 		btrfs_err(fs_info,
6501 			  "error inheriting props for ino %llu (root %llu): %d",
6502 			  btrfs_ino(BTRFS_I(inode)), root->root_key.objectid,
6503 			  ret);
6504 	}
6505 
6506 	/*
6507 	 * Subvolumes don't inherit ACLs or get passed to the LSM. This is
6508 	 * probably a bug.
6509 	 */
6510 	if (!args->subvol) {
6511 		ret = btrfs_init_inode_security(trans, args);
6512 		if (ret) {
6513 			btrfs_abort_transaction(trans, ret);
6514 			goto discard;
6515 		}
6516 	}
6517 
6518 	inode_tree_add(inode);
6519 
6520 	trace_btrfs_inode_new(inode);
6521 	btrfs_set_inode_last_trans(trans, BTRFS_I(inode));
6522 
6523 	btrfs_update_root_times(trans, root);
6524 
6525 	if (args->orphan) {
6526 		ret = btrfs_orphan_add(trans, BTRFS_I(inode));
6527 	} else {
6528 		ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), name,
6529 				     name_len, 0, BTRFS_I(inode)->dir_index);
6530 	}
6531 	if (ret) {
6532 		btrfs_abort_transaction(trans, ret);
6533 		goto discard;
6534 	}
6535 
6536 	ret = 0;
6537 	goto out;
6538 
6539 discard:
6540 	/*
6541 	 * discard_new_inode() calls iput(), but the caller owns the reference
6542 	 * to the inode.
6543 	 */
6544 	ihold(inode);
6545 	discard_new_inode(inode);
6546 out:
6547 	btrfs_free_path(path);
6548 	return ret;
6549 }
6550 
6551 /*
6552  * utility function to add 'inode' into 'parent_inode' with
6553  * a give name and a given sequence number.
6554  * if 'add_backref' is true, also insert a backref from the
6555  * inode to the parent directory.
6556  */
btrfs_add_link(struct btrfs_trans_handle * trans,struct btrfs_inode * parent_inode,struct btrfs_inode * inode,const char * name,int name_len,int add_backref,u64 index)6557 int btrfs_add_link(struct btrfs_trans_handle *trans,
6558 		   struct btrfs_inode *parent_inode, struct btrfs_inode *inode,
6559 		   const char *name, int name_len, int add_backref, u64 index)
6560 {
6561 	int ret = 0;
6562 	struct btrfs_key key;
6563 	struct btrfs_root *root = parent_inode->root;
6564 	u64 ino = btrfs_ino(inode);
6565 	u64 parent_ino = btrfs_ino(parent_inode);
6566 
6567 	if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
6568 		memcpy(&key, &inode->root->root_key, sizeof(key));
6569 	} else {
6570 		key.objectid = ino;
6571 		key.type = BTRFS_INODE_ITEM_KEY;
6572 		key.offset = 0;
6573 	}
6574 
6575 	if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
6576 		ret = btrfs_add_root_ref(trans, key.objectid,
6577 					 root->root_key.objectid, parent_ino,
6578 					 index, name, name_len);
6579 	} else if (add_backref) {
6580 		ret = btrfs_insert_inode_ref(trans, root, name, name_len, ino,
6581 					     parent_ino, index);
6582 	}
6583 
6584 	/* Nothing to clean up yet */
6585 	if (ret)
6586 		return ret;
6587 
6588 	ret = btrfs_insert_dir_item(trans, name, name_len, parent_inode, &key,
6589 				    btrfs_inode_type(&inode->vfs_inode), index);
6590 	if (ret == -EEXIST || ret == -EOVERFLOW)
6591 		goto fail_dir_item;
6592 	else if (ret) {
6593 		btrfs_abort_transaction(trans, ret);
6594 		return ret;
6595 	}
6596 
6597 	btrfs_i_size_write(parent_inode, parent_inode->vfs_inode.i_size +
6598 			   name_len * 2);
6599 	inode_inc_iversion(&parent_inode->vfs_inode);
6600 	/*
6601 	 * If we are replaying a log tree, we do not want to update the mtime
6602 	 * and ctime of the parent directory with the current time, since the
6603 	 * log replay procedure is responsible for setting them to their correct
6604 	 * values (the ones it had when the fsync was done).
6605 	 */
6606 	if (!test_bit(BTRFS_FS_LOG_RECOVERING, &root->fs_info->flags)) {
6607 		struct timespec64 now = current_time(&parent_inode->vfs_inode);
6608 
6609 		parent_inode->vfs_inode.i_mtime = now;
6610 		parent_inode->vfs_inode.i_ctime = now;
6611 	}
6612 	ret = btrfs_update_inode(trans, root, parent_inode);
6613 	if (ret)
6614 		btrfs_abort_transaction(trans, ret);
6615 	return ret;
6616 
6617 fail_dir_item:
6618 	if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
6619 		u64 local_index;
6620 		int err;
6621 		err = btrfs_del_root_ref(trans, key.objectid,
6622 					 root->root_key.objectid, parent_ino,
6623 					 &local_index, name, name_len);
6624 		if (err)
6625 			btrfs_abort_transaction(trans, err);
6626 	} else if (add_backref) {
6627 		u64 local_index;
6628 		int err;
6629 
6630 		err = btrfs_del_inode_ref(trans, root, name, name_len,
6631 					  ino, parent_ino, &local_index);
6632 		if (err)
6633 			btrfs_abort_transaction(trans, err);
6634 	}
6635 
6636 	/* Return the original error code */
6637 	return ret;
6638 }
6639 
btrfs_create_common(struct inode * dir,struct dentry * dentry,struct inode * inode)6640 static int btrfs_create_common(struct inode *dir, struct dentry *dentry,
6641 			       struct inode *inode)
6642 {
6643 	struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
6644 	struct btrfs_root *root = BTRFS_I(dir)->root;
6645 	struct btrfs_new_inode_args new_inode_args = {
6646 		.dir = dir,
6647 		.dentry = dentry,
6648 		.inode = inode,
6649 	};
6650 	unsigned int trans_num_items;
6651 	struct btrfs_trans_handle *trans;
6652 	int err;
6653 
6654 	err = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items);
6655 	if (err)
6656 		goto out_inode;
6657 
6658 	trans = btrfs_start_transaction(root, trans_num_items);
6659 	if (IS_ERR(trans)) {
6660 		err = PTR_ERR(trans);
6661 		goto out_new_inode_args;
6662 	}
6663 
6664 	err = btrfs_create_new_inode(trans, &new_inode_args);
6665 	if (!err)
6666 		d_instantiate_new(dentry, inode);
6667 
6668 	btrfs_end_transaction(trans);
6669 	btrfs_btree_balance_dirty(fs_info);
6670 out_new_inode_args:
6671 	btrfs_new_inode_args_destroy(&new_inode_args);
6672 out_inode:
6673 	if (err)
6674 		iput(inode);
6675 	return err;
6676 }
6677 
btrfs_mknod(struct user_namespace * mnt_userns,struct inode * dir,struct dentry * dentry,umode_t mode,dev_t rdev)6678 static int btrfs_mknod(struct user_namespace *mnt_userns, struct inode *dir,
6679 		       struct dentry *dentry, umode_t mode, dev_t rdev)
6680 {
6681 	struct inode *inode;
6682 
6683 	inode = new_inode(dir->i_sb);
6684 	if (!inode)
6685 		return -ENOMEM;
6686 	inode_init_owner(mnt_userns, inode, dir, mode);
6687 	inode->i_op = &btrfs_special_inode_operations;
6688 	init_special_inode(inode, inode->i_mode, rdev);
6689 	return btrfs_create_common(dir, dentry, inode);
6690 }
6691 
btrfs_create(struct user_namespace * mnt_userns,struct inode * dir,struct dentry * dentry,umode_t mode,bool excl)6692 static int btrfs_create(struct user_namespace *mnt_userns, struct inode *dir,
6693 			struct dentry *dentry, umode_t mode, bool excl)
6694 {
6695 	struct inode *inode;
6696 
6697 	inode = new_inode(dir->i_sb);
6698 	if (!inode)
6699 		return -ENOMEM;
6700 	inode_init_owner(mnt_userns, inode, dir, mode);
6701 	inode->i_fop = &btrfs_file_operations;
6702 	inode->i_op = &btrfs_file_inode_operations;
6703 	inode->i_mapping->a_ops = &btrfs_aops;
6704 	return btrfs_create_common(dir, dentry, inode);
6705 }
6706 
btrfs_link(struct dentry * old_dentry,struct inode * dir,struct dentry * dentry)6707 static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
6708 		      struct dentry *dentry)
6709 {
6710 	struct btrfs_trans_handle *trans = NULL;
6711 	struct btrfs_root *root = BTRFS_I(dir)->root;
6712 	struct inode *inode = d_inode(old_dentry);
6713 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
6714 	u64 index;
6715 	int err;
6716 	int drop_inode = 0;
6717 
6718 	/* do not allow sys_link's with other subvols of the same device */
6719 	if (root->root_key.objectid != BTRFS_I(inode)->root->root_key.objectid)
6720 		return -EXDEV;
6721 
6722 	if (inode->i_nlink >= BTRFS_LINK_MAX)
6723 		return -EMLINK;
6724 
6725 	err = btrfs_set_inode_index(BTRFS_I(dir), &index);
6726 	if (err)
6727 		goto fail;
6728 
6729 	/*
6730 	 * 2 items for inode and inode ref
6731 	 * 2 items for dir items
6732 	 * 1 item for parent inode
6733 	 * 1 item for orphan item deletion if O_TMPFILE
6734 	 */
6735 	trans = btrfs_start_transaction(root, inode->i_nlink ? 5 : 6);
6736 	if (IS_ERR(trans)) {
6737 		err = PTR_ERR(trans);
6738 		trans = NULL;
6739 		goto fail;
6740 	}
6741 
6742 	/* There are several dir indexes for this inode, clear the cache. */
6743 	BTRFS_I(inode)->dir_index = 0ULL;
6744 	inc_nlink(inode);
6745 	inode_inc_iversion(inode);
6746 	inode->i_ctime = current_time(inode);
6747 	ihold(inode);
6748 	set_bit(BTRFS_INODE_COPY_EVERYTHING, &BTRFS_I(inode)->runtime_flags);
6749 
6750 	err = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode),
6751 			     dentry->d_name.name, dentry->d_name.len, 1, index);
6752 
6753 	if (err) {
6754 		drop_inode = 1;
6755 	} else {
6756 		struct dentry *parent = dentry->d_parent;
6757 
6758 		err = btrfs_update_inode(trans, root, BTRFS_I(inode));
6759 		if (err)
6760 			goto fail;
6761 		if (inode->i_nlink == 1) {
6762 			/*
6763 			 * If new hard link count is 1, it's a file created
6764 			 * with open(2) O_TMPFILE flag.
6765 			 */
6766 			err = btrfs_orphan_del(trans, BTRFS_I(inode));
6767 			if (err)
6768 				goto fail;
6769 		}
6770 		d_instantiate(dentry, inode);
6771 		btrfs_log_new_name(trans, old_dentry, NULL, 0, parent);
6772 	}
6773 
6774 fail:
6775 	if (trans)
6776 		btrfs_end_transaction(trans);
6777 	if (drop_inode) {
6778 		inode_dec_link_count(inode);
6779 		iput(inode);
6780 	}
6781 	btrfs_btree_balance_dirty(fs_info);
6782 	return err;
6783 }
6784 
btrfs_mkdir(struct user_namespace * mnt_userns,struct inode * dir,struct dentry * dentry,umode_t mode)6785 static int btrfs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
6786 		       struct dentry *dentry, umode_t mode)
6787 {
6788 	struct inode *inode;
6789 
6790 	inode = new_inode(dir->i_sb);
6791 	if (!inode)
6792 		return -ENOMEM;
6793 	inode_init_owner(mnt_userns, inode, dir, S_IFDIR | mode);
6794 	inode->i_op = &btrfs_dir_inode_operations;
6795 	inode->i_fop = &btrfs_dir_file_operations;
6796 	return btrfs_create_common(dir, dentry, inode);
6797 }
6798 
uncompress_inline(struct btrfs_path * path,struct page * page,size_t pg_offset,u64 extent_offset,struct btrfs_file_extent_item * item)6799 static noinline int uncompress_inline(struct btrfs_path *path,
6800 				      struct page *page,
6801 				      size_t pg_offset, u64 extent_offset,
6802 				      struct btrfs_file_extent_item *item)
6803 {
6804 	int ret;
6805 	struct extent_buffer *leaf = path->nodes[0];
6806 	char *tmp;
6807 	size_t max_size;
6808 	unsigned long inline_size;
6809 	unsigned long ptr;
6810 	int compress_type;
6811 
6812 	WARN_ON(pg_offset != 0);
6813 	compress_type = btrfs_file_extent_compression(leaf, item);
6814 	max_size = btrfs_file_extent_ram_bytes(leaf, item);
6815 	inline_size = btrfs_file_extent_inline_item_len(leaf, path->slots[0]);
6816 	tmp = kmalloc(inline_size, GFP_NOFS);
6817 	if (!tmp)
6818 		return -ENOMEM;
6819 	ptr = btrfs_file_extent_inline_start(item);
6820 
6821 	read_extent_buffer(leaf, tmp, ptr, inline_size);
6822 
6823 	max_size = min_t(unsigned long, PAGE_SIZE, max_size);
6824 	ret = btrfs_decompress(compress_type, tmp, page,
6825 			       extent_offset, inline_size, max_size);
6826 
6827 	/*
6828 	 * decompression code contains a memset to fill in any space between the end
6829 	 * of the uncompressed data and the end of max_size in case the decompressed
6830 	 * data ends up shorter than ram_bytes.  That doesn't cover the hole between
6831 	 * the end of an inline extent and the beginning of the next block, so we
6832 	 * cover that region here.
6833 	 */
6834 
6835 	if (max_size + pg_offset < PAGE_SIZE)
6836 		memzero_page(page,  pg_offset + max_size,
6837 			     PAGE_SIZE - max_size - pg_offset);
6838 	kfree(tmp);
6839 	return ret;
6840 }
6841 
6842 /**
6843  * btrfs_get_extent - Lookup the first extent overlapping a range in a file.
6844  * @inode:	file to search in
6845  * @page:	page to read extent data into if the extent is inline
6846  * @pg_offset:	offset into @page to copy to
6847  * @start:	file offset
6848  * @len:	length of range starting at @start
6849  *
6850  * This returns the first &struct extent_map which overlaps with the given
6851  * range, reading it from the B-tree and caching it if necessary. Note that
6852  * there may be more extents which overlap the given range after the returned
6853  * extent_map.
6854  *
6855  * If @page is not NULL and the extent is inline, this also reads the extent
6856  * data directly into the page and marks the extent up to date in the io_tree.
6857  *
6858  * Return: ERR_PTR on error, non-NULL extent_map on success.
6859  */
btrfs_get_extent(struct btrfs_inode * inode,struct page * page,size_t pg_offset,u64 start,u64 len)6860 struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
6861 				    struct page *page, size_t pg_offset,
6862 				    u64 start, u64 len)
6863 {
6864 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
6865 	int ret = 0;
6866 	u64 extent_start = 0;
6867 	u64 extent_end = 0;
6868 	u64 objectid = btrfs_ino(inode);
6869 	int extent_type = -1;
6870 	struct btrfs_path *path = NULL;
6871 	struct btrfs_root *root = inode->root;
6872 	struct btrfs_file_extent_item *item;
6873 	struct extent_buffer *leaf;
6874 	struct btrfs_key found_key;
6875 	struct extent_map *em = NULL;
6876 	struct extent_map_tree *em_tree = &inode->extent_tree;
6877 	struct extent_io_tree *io_tree = &inode->io_tree;
6878 
6879 	read_lock(&em_tree->lock);
6880 	em = lookup_extent_mapping(em_tree, start, len);
6881 	read_unlock(&em_tree->lock);
6882 
6883 	if (em) {
6884 		if (em->start > start || em->start + em->len <= start)
6885 			free_extent_map(em);
6886 		else if (em->block_start == EXTENT_MAP_INLINE && page)
6887 			free_extent_map(em);
6888 		else
6889 			goto out;
6890 	}
6891 	em = alloc_extent_map();
6892 	if (!em) {
6893 		ret = -ENOMEM;
6894 		goto out;
6895 	}
6896 	em->start = EXTENT_MAP_HOLE;
6897 	em->orig_start = EXTENT_MAP_HOLE;
6898 	em->len = (u64)-1;
6899 	em->block_len = (u64)-1;
6900 
6901 	path = btrfs_alloc_path();
6902 	if (!path) {
6903 		ret = -ENOMEM;
6904 		goto out;
6905 	}
6906 
6907 	/* Chances are we'll be called again, so go ahead and do readahead */
6908 	path->reada = READA_FORWARD;
6909 
6910 	/*
6911 	 * The same explanation in load_free_space_cache applies here as well,
6912 	 * we only read when we're loading the free space cache, and at that
6913 	 * point the commit_root has everything we need.
6914 	 */
6915 	if (btrfs_is_free_space_inode(inode)) {
6916 		path->search_commit_root = 1;
6917 		path->skip_locking = 1;
6918 	}
6919 
6920 	ret = btrfs_lookup_file_extent(NULL, root, path, objectid, start, 0);
6921 	if (ret < 0) {
6922 		goto out;
6923 	} else if (ret > 0) {
6924 		if (path->slots[0] == 0)
6925 			goto not_found;
6926 		path->slots[0]--;
6927 		ret = 0;
6928 	}
6929 
6930 	leaf = path->nodes[0];
6931 	item = btrfs_item_ptr(leaf, path->slots[0],
6932 			      struct btrfs_file_extent_item);
6933 	btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6934 	if (found_key.objectid != objectid ||
6935 	    found_key.type != BTRFS_EXTENT_DATA_KEY) {
6936 		/*
6937 		 * If we backup past the first extent we want to move forward
6938 		 * and see if there is an extent in front of us, otherwise we'll
6939 		 * say there is a hole for our whole search range which can
6940 		 * cause problems.
6941 		 */
6942 		extent_end = start;
6943 		goto next;
6944 	}
6945 
6946 	extent_type = btrfs_file_extent_type(leaf, item);
6947 	extent_start = found_key.offset;
6948 	extent_end = btrfs_file_extent_end(path);
6949 	if (extent_type == BTRFS_FILE_EXTENT_REG ||
6950 	    extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
6951 		/* Only regular file could have regular/prealloc extent */
6952 		if (!S_ISREG(inode->vfs_inode.i_mode)) {
6953 			ret = -EUCLEAN;
6954 			btrfs_crit(fs_info,
6955 		"regular/prealloc extent found for non-regular inode %llu",
6956 				   btrfs_ino(inode));
6957 			goto out;
6958 		}
6959 		trace_btrfs_get_extent_show_fi_regular(inode, leaf, item,
6960 						       extent_start);
6961 	} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
6962 		trace_btrfs_get_extent_show_fi_inline(inode, leaf, item,
6963 						      path->slots[0],
6964 						      extent_start);
6965 	}
6966 next:
6967 	if (start >= extent_end) {
6968 		path->slots[0]++;
6969 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
6970 			ret = btrfs_next_leaf(root, path);
6971 			if (ret < 0)
6972 				goto out;
6973 			else if (ret > 0)
6974 				goto not_found;
6975 
6976 			leaf = path->nodes[0];
6977 		}
6978 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6979 		if (found_key.objectid != objectid ||
6980 		    found_key.type != BTRFS_EXTENT_DATA_KEY)
6981 			goto not_found;
6982 		if (start + len <= found_key.offset)
6983 			goto not_found;
6984 		if (start > found_key.offset)
6985 			goto next;
6986 
6987 		/* New extent overlaps with existing one */
6988 		em->start = start;
6989 		em->orig_start = start;
6990 		em->len = found_key.offset - start;
6991 		em->block_start = EXTENT_MAP_HOLE;
6992 		goto insert;
6993 	}
6994 
6995 	btrfs_extent_item_to_extent_map(inode, path, item, !page, em);
6996 
6997 	if (extent_type == BTRFS_FILE_EXTENT_REG ||
6998 	    extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
6999 		goto insert;
7000 	} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
7001 		unsigned long ptr;
7002 		char *map;
7003 		size_t size;
7004 		size_t extent_offset;
7005 		size_t copy_size;
7006 
7007 		if (!page)
7008 			goto out;
7009 
7010 		size = btrfs_file_extent_ram_bytes(leaf, item);
7011 		extent_offset = page_offset(page) + pg_offset - extent_start;
7012 		copy_size = min_t(u64, PAGE_SIZE - pg_offset,
7013 				  size - extent_offset);
7014 		em->start = extent_start + extent_offset;
7015 		em->len = ALIGN(copy_size, fs_info->sectorsize);
7016 		em->orig_block_len = em->len;
7017 		em->orig_start = em->start;
7018 		ptr = btrfs_file_extent_inline_start(item) + extent_offset;
7019 
7020 		if (!PageUptodate(page)) {
7021 			if (btrfs_file_extent_compression(leaf, item) !=
7022 			    BTRFS_COMPRESS_NONE) {
7023 				ret = uncompress_inline(path, page, pg_offset,
7024 							extent_offset, item);
7025 				if (ret)
7026 					goto out;
7027 			} else {
7028 				map = kmap_local_page(page);
7029 				read_extent_buffer(leaf, map + pg_offset, ptr,
7030 						   copy_size);
7031 				if (pg_offset + copy_size < PAGE_SIZE) {
7032 					memset(map + pg_offset + copy_size, 0,
7033 					       PAGE_SIZE - pg_offset -
7034 					       copy_size);
7035 				}
7036 				kunmap_local(map);
7037 			}
7038 			flush_dcache_page(page);
7039 		}
7040 		set_extent_uptodate(io_tree, em->start,
7041 				    extent_map_end(em) - 1, NULL, GFP_NOFS);
7042 		goto insert;
7043 	}
7044 not_found:
7045 	em->start = start;
7046 	em->orig_start = start;
7047 	em->len = len;
7048 	em->block_start = EXTENT_MAP_HOLE;
7049 insert:
7050 	ret = 0;
7051 	btrfs_release_path(path);
7052 	if (em->start > start || extent_map_end(em) <= start) {
7053 		btrfs_err(fs_info,
7054 			  "bad extent! em: [%llu %llu] passed [%llu %llu]",
7055 			  em->start, em->len, start, len);
7056 		ret = -EIO;
7057 		goto out;
7058 	}
7059 
7060 	write_lock(&em_tree->lock);
7061 	ret = btrfs_add_extent_mapping(fs_info, em_tree, &em, start, len);
7062 	write_unlock(&em_tree->lock);
7063 out:
7064 	btrfs_free_path(path);
7065 
7066 	trace_btrfs_get_extent(root, inode, em);
7067 
7068 	if (ret) {
7069 		free_extent_map(em);
7070 		return ERR_PTR(ret);
7071 	}
7072 	return em;
7073 }
7074 
btrfs_get_extent_fiemap(struct btrfs_inode * inode,u64 start,u64 len)7075 struct extent_map *btrfs_get_extent_fiemap(struct btrfs_inode *inode,
7076 					   u64 start, u64 len)
7077 {
7078 	struct extent_map *em;
7079 	struct extent_map *hole_em = NULL;
7080 	u64 delalloc_start = start;
7081 	u64 end;
7082 	u64 delalloc_len;
7083 	u64 delalloc_end;
7084 	int err = 0;
7085 
7086 	em = btrfs_get_extent(inode, NULL, 0, start, len);
7087 	if (IS_ERR(em))
7088 		return em;
7089 	/*
7090 	 * If our em maps to:
7091 	 * - a hole or
7092 	 * - a pre-alloc extent,
7093 	 * there might actually be delalloc bytes behind it.
7094 	 */
7095 	if (em->block_start != EXTENT_MAP_HOLE &&
7096 	    !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
7097 		return em;
7098 	else
7099 		hole_em = em;
7100 
7101 	/* check to see if we've wrapped (len == -1 or similar) */
7102 	end = start + len;
7103 	if (end < start)
7104 		end = (u64)-1;
7105 	else
7106 		end -= 1;
7107 
7108 	em = NULL;
7109 
7110 	/* ok, we didn't find anything, lets look for delalloc */
7111 	delalloc_len = count_range_bits(&inode->io_tree, &delalloc_start,
7112 				 end, len, EXTENT_DELALLOC, 1);
7113 	delalloc_end = delalloc_start + delalloc_len;
7114 	if (delalloc_end < delalloc_start)
7115 		delalloc_end = (u64)-1;
7116 
7117 	/*
7118 	 * We didn't find anything useful, return the original results from
7119 	 * get_extent()
7120 	 */
7121 	if (delalloc_start > end || delalloc_end <= start) {
7122 		em = hole_em;
7123 		hole_em = NULL;
7124 		goto out;
7125 	}
7126 
7127 	/*
7128 	 * Adjust the delalloc_start to make sure it doesn't go backwards from
7129 	 * the start they passed in
7130 	 */
7131 	delalloc_start = max(start, delalloc_start);
7132 	delalloc_len = delalloc_end - delalloc_start;
7133 
7134 	if (delalloc_len > 0) {
7135 		u64 hole_start;
7136 		u64 hole_len;
7137 		const u64 hole_end = extent_map_end(hole_em);
7138 
7139 		em = alloc_extent_map();
7140 		if (!em) {
7141 			err = -ENOMEM;
7142 			goto out;
7143 		}
7144 
7145 		ASSERT(hole_em);
7146 		/*
7147 		 * When btrfs_get_extent can't find anything it returns one
7148 		 * huge hole
7149 		 *
7150 		 * Make sure what it found really fits our range, and adjust to
7151 		 * make sure it is based on the start from the caller
7152 		 */
7153 		if (hole_end <= start || hole_em->start > end) {
7154 		       free_extent_map(hole_em);
7155 		       hole_em = NULL;
7156 		} else {
7157 		       hole_start = max(hole_em->start, start);
7158 		       hole_len = hole_end - hole_start;
7159 		}
7160 
7161 		if (hole_em && delalloc_start > hole_start) {
7162 			/*
7163 			 * Our hole starts before our delalloc, so we have to
7164 			 * return just the parts of the hole that go until the
7165 			 * delalloc starts
7166 			 */
7167 			em->len = min(hole_len, delalloc_start - hole_start);
7168 			em->start = hole_start;
7169 			em->orig_start = hole_start;
7170 			/*
7171 			 * Don't adjust block start at all, it is fixed at
7172 			 * EXTENT_MAP_HOLE
7173 			 */
7174 			em->block_start = hole_em->block_start;
7175 			em->block_len = hole_len;
7176 			if (test_bit(EXTENT_FLAG_PREALLOC, &hole_em->flags))
7177 				set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
7178 		} else {
7179 			/*
7180 			 * Hole is out of passed range or it starts after
7181 			 * delalloc range
7182 			 */
7183 			em->start = delalloc_start;
7184 			em->len = delalloc_len;
7185 			em->orig_start = delalloc_start;
7186 			em->block_start = EXTENT_MAP_DELALLOC;
7187 			em->block_len = delalloc_len;
7188 		}
7189 	} else {
7190 		return hole_em;
7191 	}
7192 out:
7193 
7194 	free_extent_map(hole_em);
7195 	if (err) {
7196 		free_extent_map(em);
7197 		return ERR_PTR(err);
7198 	}
7199 	return em;
7200 }
7201 
btrfs_create_dio_extent(struct btrfs_inode * inode,const u64 start,const u64 len,const u64 orig_start,const u64 block_start,const u64 block_len,const u64 orig_block_len,const u64 ram_bytes,const int type)7202 static struct extent_map *btrfs_create_dio_extent(struct btrfs_inode *inode,
7203 						  const u64 start,
7204 						  const u64 len,
7205 						  const u64 orig_start,
7206 						  const u64 block_start,
7207 						  const u64 block_len,
7208 						  const u64 orig_block_len,
7209 						  const u64 ram_bytes,
7210 						  const int type)
7211 {
7212 	struct extent_map *em = NULL;
7213 	int ret;
7214 
7215 	if (type != BTRFS_ORDERED_NOCOW) {
7216 		em = create_io_em(inode, start, len, orig_start, block_start,
7217 				  block_len, orig_block_len, ram_bytes,
7218 				  BTRFS_COMPRESS_NONE, /* compress_type */
7219 				  type);
7220 		if (IS_ERR(em))
7221 			goto out;
7222 	}
7223 	ret = btrfs_add_ordered_extent(inode, start, len, len, block_start,
7224 				       block_len, 0,
7225 				       (1 << type) |
7226 				       (1 << BTRFS_ORDERED_DIRECT),
7227 				       BTRFS_COMPRESS_NONE);
7228 	if (ret) {
7229 		if (em) {
7230 			free_extent_map(em);
7231 			btrfs_drop_extent_cache(inode, start, start + len - 1, 0);
7232 		}
7233 		em = ERR_PTR(ret);
7234 	}
7235  out:
7236 
7237 	return em;
7238 }
7239 
btrfs_new_extent_direct(struct btrfs_inode * inode,u64 start,u64 len)7240 static struct extent_map *btrfs_new_extent_direct(struct btrfs_inode *inode,
7241 						  u64 start, u64 len)
7242 {
7243 	struct btrfs_root *root = inode->root;
7244 	struct btrfs_fs_info *fs_info = root->fs_info;
7245 	struct extent_map *em;
7246 	struct btrfs_key ins;
7247 	u64 alloc_hint;
7248 	int ret;
7249 
7250 	alloc_hint = get_extent_allocation_hint(inode, start, len);
7251 	ret = btrfs_reserve_extent(root, len, len, fs_info->sectorsize,
7252 				   0, alloc_hint, &ins, 1, 1);
7253 	if (ret)
7254 		return ERR_PTR(ret);
7255 
7256 	em = btrfs_create_dio_extent(inode, start, ins.offset, start,
7257 				     ins.objectid, ins.offset, ins.offset,
7258 				     ins.offset, BTRFS_ORDERED_REGULAR);
7259 	btrfs_dec_block_group_reservations(fs_info, ins.objectid);
7260 	if (IS_ERR(em))
7261 		btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset,
7262 					   1);
7263 
7264 	return em;
7265 }
7266 
btrfs_extent_readonly(struct btrfs_fs_info * fs_info,u64 bytenr)7267 static bool btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr)
7268 {
7269 	struct btrfs_block_group *block_group;
7270 	bool readonly = false;
7271 
7272 	block_group = btrfs_lookup_block_group(fs_info, bytenr);
7273 	if (!block_group || block_group->ro)
7274 		readonly = true;
7275 	if (block_group)
7276 		btrfs_put_block_group(block_group);
7277 	return readonly;
7278 }
7279 
7280 /*
7281  * Check if we can do nocow write into the range [@offset, @offset + @len)
7282  *
7283  * @offset:	File offset
7284  * @len:	The length to write, will be updated to the nocow writeable
7285  *		range
7286  * @orig_start:	(optional) Return the original file offset of the file extent
7287  * @orig_len:	(optional) Return the original on-disk length of the file extent
7288  * @ram_bytes:	(optional) Return the ram_bytes of the file extent
7289  * @strict:	if true, omit optimizations that might force us into unnecessary
7290  *		cow. e.g., don't trust generation number.
7291  *
7292  * Return:
7293  * >0	and update @len if we can do nocow write
7294  *  0	if we can't do nocow write
7295  * <0	if error happened
7296  *
7297  * NOTE: This only checks the file extents, caller is responsible to wait for
7298  *	 any ordered extents.
7299  */
can_nocow_extent(struct inode * inode,u64 offset,u64 * len,u64 * orig_start,u64 * orig_block_len,u64 * ram_bytes,bool strict)7300 noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
7301 			      u64 *orig_start, u64 *orig_block_len,
7302 			      u64 *ram_bytes, bool strict)
7303 {
7304 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
7305 	struct can_nocow_file_extent_args nocow_args = { 0 };
7306 	struct btrfs_path *path;
7307 	int ret;
7308 	struct extent_buffer *leaf;
7309 	struct btrfs_root *root = BTRFS_I(inode)->root;
7310 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
7311 	struct btrfs_file_extent_item *fi;
7312 	struct btrfs_key key;
7313 	int found_type;
7314 
7315 	path = btrfs_alloc_path();
7316 	if (!path)
7317 		return -ENOMEM;
7318 
7319 	ret = btrfs_lookup_file_extent(NULL, root, path,
7320 			btrfs_ino(BTRFS_I(inode)), offset, 0);
7321 	if (ret < 0)
7322 		goto out;
7323 
7324 	if (ret == 1) {
7325 		if (path->slots[0] == 0) {
7326 			/* can't find the item, must cow */
7327 			ret = 0;
7328 			goto out;
7329 		}
7330 		path->slots[0]--;
7331 	}
7332 	ret = 0;
7333 	leaf = path->nodes[0];
7334 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
7335 	if (key.objectid != btrfs_ino(BTRFS_I(inode)) ||
7336 	    key.type != BTRFS_EXTENT_DATA_KEY) {
7337 		/* not our file or wrong item type, must cow */
7338 		goto out;
7339 	}
7340 
7341 	if (key.offset > offset) {
7342 		/* Wrong offset, must cow */
7343 		goto out;
7344 	}
7345 
7346 	if (btrfs_file_extent_end(path) <= offset)
7347 		goto out;
7348 
7349 	fi = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
7350 	found_type = btrfs_file_extent_type(leaf, fi);
7351 	if (ram_bytes)
7352 		*ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
7353 
7354 	nocow_args.start = offset;
7355 	nocow_args.end = offset + *len - 1;
7356 	nocow_args.strict = strict;
7357 	nocow_args.free_path = true;
7358 
7359 	ret = can_nocow_file_extent(path, &key, BTRFS_I(inode), &nocow_args);
7360 	/* can_nocow_file_extent() has freed the path. */
7361 	path = NULL;
7362 
7363 	if (ret != 1) {
7364 		/* Treat errors as not being able to NOCOW. */
7365 		ret = 0;
7366 		goto out;
7367 	}
7368 
7369 	ret = 0;
7370 	if (btrfs_extent_readonly(fs_info, nocow_args.disk_bytenr))
7371 		goto out;
7372 
7373 	if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
7374 	    found_type == BTRFS_FILE_EXTENT_PREALLOC) {
7375 		u64 range_end;
7376 
7377 		range_end = round_up(offset + nocow_args.num_bytes,
7378 				     root->fs_info->sectorsize) - 1;
7379 		ret = test_range_bit(io_tree, offset, range_end,
7380 				     EXTENT_DELALLOC, 0, NULL);
7381 		if (ret) {
7382 			ret = -EAGAIN;
7383 			goto out;
7384 		}
7385 	}
7386 
7387 	if (orig_start)
7388 		*orig_start = key.offset - nocow_args.extent_offset;
7389 	if (orig_block_len)
7390 		*orig_block_len = nocow_args.disk_num_bytes;
7391 
7392 	*len = nocow_args.num_bytes;
7393 	ret = 1;
7394 out:
7395 	btrfs_free_path(path);
7396 	return ret;
7397 }
7398 
lock_extent_direct(struct inode * inode,u64 lockstart,u64 lockend,struct extent_state ** cached_state,unsigned int iomap_flags)7399 static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
7400 			      struct extent_state **cached_state,
7401 			      unsigned int iomap_flags)
7402 {
7403 	const bool writing = (iomap_flags & IOMAP_WRITE);
7404 	const bool nowait = (iomap_flags & IOMAP_NOWAIT);
7405 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
7406 	struct btrfs_ordered_extent *ordered;
7407 	int ret = 0;
7408 
7409 	while (1) {
7410 		if (nowait) {
7411 			if (!try_lock_extent(io_tree, lockstart, lockend))
7412 				return -EAGAIN;
7413 		} else {
7414 			lock_extent_bits(io_tree, lockstart, lockend, cached_state);
7415 		}
7416 		/*
7417 		 * We're concerned with the entire range that we're going to be
7418 		 * doing DIO to, so we need to make sure there's no ordered
7419 		 * extents in this range.
7420 		 */
7421 		ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), lockstart,
7422 						     lockend - lockstart + 1);
7423 
7424 		/*
7425 		 * We need to make sure there are no buffered pages in this
7426 		 * range either, we could have raced between the invalidate in
7427 		 * generic_file_direct_write and locking the extent.  The
7428 		 * invalidate needs to happen so that reads after a write do not
7429 		 * get stale data.
7430 		 */
7431 		if (!ordered &&
7432 		    (!writing || !filemap_range_has_page(inode->i_mapping,
7433 							 lockstart, lockend)))
7434 			break;
7435 
7436 		unlock_extent_cached(io_tree, lockstart, lockend, cached_state);
7437 
7438 		if (ordered) {
7439 			if (nowait) {
7440 				btrfs_put_ordered_extent(ordered);
7441 				ret = -EAGAIN;
7442 				break;
7443 			}
7444 			/*
7445 			 * If we are doing a DIO read and the ordered extent we
7446 			 * found is for a buffered write, we can not wait for it
7447 			 * to complete and retry, because if we do so we can
7448 			 * deadlock with concurrent buffered writes on page
7449 			 * locks. This happens only if our DIO read covers more
7450 			 * than one extent map, if at this point has already
7451 			 * created an ordered extent for a previous extent map
7452 			 * and locked its range in the inode's io tree, and a
7453 			 * concurrent write against that previous extent map's
7454 			 * range and this range started (we unlock the ranges
7455 			 * in the io tree only when the bios complete and
7456 			 * buffered writes always lock pages before attempting
7457 			 * to lock range in the io tree).
7458 			 */
7459 			if (writing ||
7460 			    test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags))
7461 				btrfs_start_ordered_extent(ordered, 1);
7462 			else
7463 				ret = nowait ? -EAGAIN : -ENOTBLK;
7464 			btrfs_put_ordered_extent(ordered);
7465 		} else {
7466 			/*
7467 			 * We could trigger writeback for this range (and wait
7468 			 * for it to complete) and then invalidate the pages for
7469 			 * this range (through invalidate_inode_pages2_range()),
7470 			 * but that can lead us to a deadlock with a concurrent
7471 			 * call to readahead (a buffered read or a defrag call
7472 			 * triggered a readahead) on a page lock due to an
7473 			 * ordered dio extent we created before but did not have
7474 			 * yet a corresponding bio submitted (whence it can not
7475 			 * complete), which makes readahead wait for that
7476 			 * ordered extent to complete while holding a lock on
7477 			 * that page.
7478 			 */
7479 			ret = nowait ? -EAGAIN : -ENOTBLK;
7480 		}
7481 
7482 		if (ret)
7483 			break;
7484 
7485 		cond_resched();
7486 	}
7487 
7488 	return ret;
7489 }
7490 
7491 /* The callers of this must take lock_extent() */
create_io_em(struct btrfs_inode * inode,u64 start,u64 len,u64 orig_start,u64 block_start,u64 block_len,u64 orig_block_len,u64 ram_bytes,int compress_type,int type)7492 static struct extent_map *create_io_em(struct btrfs_inode *inode, u64 start,
7493 				       u64 len, u64 orig_start, u64 block_start,
7494 				       u64 block_len, u64 orig_block_len,
7495 				       u64 ram_bytes, int compress_type,
7496 				       int type)
7497 {
7498 	struct extent_map_tree *em_tree;
7499 	struct extent_map *em;
7500 	int ret;
7501 
7502 	ASSERT(type == BTRFS_ORDERED_PREALLOC ||
7503 	       type == BTRFS_ORDERED_COMPRESSED ||
7504 	       type == BTRFS_ORDERED_NOCOW ||
7505 	       type == BTRFS_ORDERED_REGULAR);
7506 
7507 	em_tree = &inode->extent_tree;
7508 	em = alloc_extent_map();
7509 	if (!em)
7510 		return ERR_PTR(-ENOMEM);
7511 
7512 	em->start = start;
7513 	em->orig_start = orig_start;
7514 	em->len = len;
7515 	em->block_len = block_len;
7516 	em->block_start = block_start;
7517 	em->orig_block_len = orig_block_len;
7518 	em->ram_bytes = ram_bytes;
7519 	em->generation = -1;
7520 	set_bit(EXTENT_FLAG_PINNED, &em->flags);
7521 	if (type == BTRFS_ORDERED_PREALLOC) {
7522 		set_bit(EXTENT_FLAG_FILLING, &em->flags);
7523 	} else if (type == BTRFS_ORDERED_COMPRESSED) {
7524 		set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
7525 		em->compress_type = compress_type;
7526 	}
7527 
7528 	do {
7529 		btrfs_drop_extent_cache(inode, em->start,
7530 					em->start + em->len - 1, 0);
7531 		write_lock(&em_tree->lock);
7532 		ret = add_extent_mapping(em_tree, em, 1);
7533 		write_unlock(&em_tree->lock);
7534 		/*
7535 		 * The caller has taken lock_extent(), who could race with us
7536 		 * to add em?
7537 		 */
7538 	} while (ret == -EEXIST);
7539 
7540 	if (ret) {
7541 		free_extent_map(em);
7542 		return ERR_PTR(ret);
7543 	}
7544 
7545 	/* em got 2 refs now, callers needs to do free_extent_map once. */
7546 	return em;
7547 }
7548 
7549 
btrfs_get_blocks_direct_write(struct extent_map ** map,struct inode * inode,struct btrfs_dio_data * dio_data,u64 start,u64 len,unsigned int iomap_flags)7550 static int btrfs_get_blocks_direct_write(struct extent_map **map,
7551 					 struct inode *inode,
7552 					 struct btrfs_dio_data *dio_data,
7553 					 u64 start, u64 len,
7554 					 unsigned int iomap_flags)
7555 {
7556 	const bool nowait = (iomap_flags & IOMAP_NOWAIT);
7557 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
7558 	struct extent_map *em = *map;
7559 	int type;
7560 	u64 block_start, orig_start, orig_block_len, ram_bytes;
7561 	struct btrfs_block_group *bg;
7562 	bool can_nocow = false;
7563 	bool space_reserved = false;
7564 	u64 prev_len;
7565 	int ret = 0;
7566 
7567 	/*
7568 	 * We don't allocate a new extent in the following cases
7569 	 *
7570 	 * 1) The inode is marked as NODATACOW. In this case we'll just use the
7571 	 * existing extent.
7572 	 * 2) The extent is marked as PREALLOC. We're good to go here and can
7573 	 * just use the extent.
7574 	 *
7575 	 */
7576 	if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
7577 	    ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
7578 	     em->block_start != EXTENT_MAP_HOLE)) {
7579 		if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
7580 			type = BTRFS_ORDERED_PREALLOC;
7581 		else
7582 			type = BTRFS_ORDERED_NOCOW;
7583 		len = min(len, em->len - (start - em->start));
7584 		block_start = em->block_start + (start - em->start);
7585 
7586 		if (can_nocow_extent(inode, start, &len, &orig_start,
7587 				     &orig_block_len, &ram_bytes, false) == 1) {
7588 			bg = btrfs_inc_nocow_writers(fs_info, block_start);
7589 			if (bg)
7590 				can_nocow = true;
7591 		}
7592 	}
7593 
7594 	prev_len = len;
7595 	if (can_nocow) {
7596 		struct extent_map *em2;
7597 
7598 		/* We can NOCOW, so only need to reserve metadata space. */
7599 		ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode), len, len,
7600 						      nowait);
7601 		if (ret < 0) {
7602 			/* Our caller expects us to free the input extent map. */
7603 			free_extent_map(em);
7604 			*map = NULL;
7605 			btrfs_dec_nocow_writers(bg);
7606 			if (nowait && (ret == -ENOSPC || ret == -EDQUOT))
7607 				ret = -EAGAIN;
7608 			goto out;
7609 		}
7610 		space_reserved = true;
7611 
7612 		em2 = btrfs_create_dio_extent(BTRFS_I(inode), start, len,
7613 					      orig_start, block_start,
7614 					      len, orig_block_len,
7615 					      ram_bytes, type);
7616 		btrfs_dec_nocow_writers(bg);
7617 		if (type == BTRFS_ORDERED_PREALLOC) {
7618 			free_extent_map(em);
7619 			*map = em = em2;
7620 		}
7621 
7622 		if (IS_ERR(em2)) {
7623 			ret = PTR_ERR(em2);
7624 			goto out;
7625 		}
7626 
7627 		dio_data->nocow_done = true;
7628 	} else {
7629 		/* Our caller expects us to free the input extent map. */
7630 		free_extent_map(em);
7631 		*map = NULL;
7632 
7633 		if (nowait)
7634 			return -EAGAIN;
7635 
7636 		/*
7637 		 * If we could not allocate data space before locking the file
7638 		 * range and we can't do a NOCOW write, then we have to fail.
7639 		 */
7640 		if (!dio_data->data_space_reserved)
7641 			return -ENOSPC;
7642 
7643 		/*
7644 		 * We have to COW and we have already reserved data space before,
7645 		 * so now we reserve only metadata.
7646 		 */
7647 		ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode), len, len,
7648 						      false);
7649 		if (ret < 0)
7650 			goto out;
7651 		space_reserved = true;
7652 
7653 		em = btrfs_new_extent_direct(BTRFS_I(inode), start, len);
7654 		if (IS_ERR(em)) {
7655 			ret = PTR_ERR(em);
7656 			goto out;
7657 		}
7658 		*map = em;
7659 		len = min(len, em->len - (start - em->start));
7660 		if (len < prev_len)
7661 			btrfs_delalloc_release_metadata(BTRFS_I(inode),
7662 							prev_len - len, true);
7663 	}
7664 
7665 	/*
7666 	 * We have created our ordered extent, so we can now release our reservation
7667 	 * for an outstanding extent.
7668 	 */
7669 	btrfs_delalloc_release_extents(BTRFS_I(inode), prev_len);
7670 
7671 	/*
7672 	 * Need to update the i_size under the extent lock so buffered
7673 	 * readers will get the updated i_size when we unlock.
7674 	 */
7675 	if (start + len > i_size_read(inode))
7676 		i_size_write(inode, start + len);
7677 out:
7678 	if (ret && space_reserved) {
7679 		btrfs_delalloc_release_extents(BTRFS_I(inode), len);
7680 		btrfs_delalloc_release_metadata(BTRFS_I(inode), len, true);
7681 	}
7682 	return ret;
7683 }
7684 
btrfs_dio_iomap_begin(struct inode * inode,loff_t start,loff_t length,unsigned int flags,struct iomap * iomap,struct iomap * srcmap)7685 static int btrfs_dio_iomap_begin(struct inode *inode, loff_t start,
7686 		loff_t length, unsigned int flags, struct iomap *iomap,
7687 		struct iomap *srcmap)
7688 {
7689 	struct iomap_iter *iter = container_of(iomap, struct iomap_iter, iomap);
7690 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
7691 	struct extent_map *em;
7692 	struct extent_state *cached_state = NULL;
7693 	struct btrfs_dio_data *dio_data = iter->private;
7694 	u64 lockstart, lockend;
7695 	const bool write = !!(flags & IOMAP_WRITE);
7696 	int ret = 0;
7697 	u64 len = length;
7698 	const u64 data_alloc_len = length;
7699 	bool unlock_extents = false;
7700 
7701 	if (!write)
7702 		len = min_t(u64, len, fs_info->sectorsize);
7703 
7704 	lockstart = start;
7705 	lockend = start + len - 1;
7706 
7707 	/*
7708 	 * iomap_dio_rw() only does filemap_write_and_wait_range(), which isn't
7709 	 * enough if we've written compressed pages to this area, so we need to
7710 	 * flush the dirty pages again to make absolutely sure that any
7711 	 * outstanding dirty pages are on disk - the first flush only starts
7712 	 * compression on the data, while keeping the pages locked, so by the
7713 	 * time the second flush returns we know bios for the compressed pages
7714 	 * were submitted and finished, and the pages no longer under writeback.
7715 	 *
7716 	 * If we have a NOWAIT request and we have any pages in the range that
7717 	 * are locked, likely due to compression still in progress, we don't want
7718 	 * to block on page locks. We also don't want to block on pages marked as
7719 	 * dirty or under writeback (same as for the non-compression case).
7720 	 * iomap_dio_rw() did the same check, but after that and before we got
7721 	 * here, mmap'ed writes may have happened or buffered reads started
7722 	 * (readpage() and readahead(), which lock pages), as we haven't locked
7723 	 * the file range yet.
7724 	 */
7725 	if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
7726 		     &BTRFS_I(inode)->runtime_flags)) {
7727 		if (flags & IOMAP_NOWAIT) {
7728 			if (filemap_range_needs_writeback(inode->i_mapping,
7729 							  lockstart, lockend))
7730 				return -EAGAIN;
7731 		} else {
7732 			ret = filemap_fdatawrite_range(inode->i_mapping, start,
7733 						       start + length - 1);
7734 			if (ret)
7735 				return ret;
7736 		}
7737 	}
7738 
7739 	memset(dio_data, 0, sizeof(*dio_data));
7740 
7741 	/*
7742 	 * We always try to allocate data space and must do it before locking
7743 	 * the file range, to avoid deadlocks with concurrent writes to the same
7744 	 * range if the range has several extents and the writes don't expand the
7745 	 * current i_size (the inode lock is taken in shared mode). If we fail to
7746 	 * allocate data space here we continue and later, after locking the
7747 	 * file range, we fail with ENOSPC only if we figure out we can not do a
7748 	 * NOCOW write.
7749 	 */
7750 	if (write && !(flags & IOMAP_NOWAIT)) {
7751 		ret = btrfs_check_data_free_space(BTRFS_I(inode),
7752 						  &dio_data->data_reserved,
7753 						  start, data_alloc_len);
7754 		if (!ret)
7755 			dio_data->data_space_reserved = true;
7756 		else if (ret && !(BTRFS_I(inode)->flags &
7757 				  (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)))
7758 			goto err;
7759 	}
7760 
7761 	/*
7762 	 * If this errors out it's because we couldn't invalidate pagecache for
7763 	 * this range and we need to fallback to buffered IO, or we are doing a
7764 	 * NOWAIT read/write and we need to block.
7765 	 */
7766 	ret = lock_extent_direct(inode, lockstart, lockend, &cached_state, flags);
7767 	if (ret < 0)
7768 		goto err;
7769 
7770 	em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, len);
7771 	if (IS_ERR(em)) {
7772 		ret = PTR_ERR(em);
7773 		goto unlock_err;
7774 	}
7775 
7776 	/*
7777 	 * Ok for INLINE and COMPRESSED extents we need to fallback on buffered
7778 	 * io.  INLINE is special, and we could probably kludge it in here, but
7779 	 * it's still buffered so for safety lets just fall back to the generic
7780 	 * buffered path.
7781 	 *
7782 	 * For COMPRESSED we _have_ to read the entire extent in so we can
7783 	 * decompress it, so there will be buffering required no matter what we
7784 	 * do, so go ahead and fallback to buffered.
7785 	 *
7786 	 * We return -ENOTBLK because that's what makes DIO go ahead and go back
7787 	 * to buffered IO.  Don't blame me, this is the price we pay for using
7788 	 * the generic code.
7789 	 */
7790 	if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) ||
7791 	    em->block_start == EXTENT_MAP_INLINE) {
7792 		free_extent_map(em);
7793 		/*
7794 		 * If we are in a NOWAIT context, return -EAGAIN in order to
7795 		 * fallback to buffered IO. This is not only because we can
7796 		 * block with buffered IO (no support for NOWAIT semantics at
7797 		 * the moment) but also to avoid returning short reads to user
7798 		 * space - this happens if we were able to read some data from
7799 		 * previous non-compressed extents and then when we fallback to
7800 		 * buffered IO, at btrfs_file_read_iter() by calling
7801 		 * filemap_read(), we fail to fault in pages for the read buffer,
7802 		 * in which case filemap_read() returns a short read (the number
7803 		 * of bytes previously read is > 0, so it does not return -EFAULT).
7804 		 */
7805 		ret = (flags & IOMAP_NOWAIT) ? -EAGAIN : -ENOTBLK;
7806 		goto unlock_err;
7807 	}
7808 
7809 	len = min(len, em->len - (start - em->start));
7810 
7811 	/*
7812 	 * If we have a NOWAIT request and the range contains multiple extents
7813 	 * (or a mix of extents and holes), then we return -EAGAIN to make the
7814 	 * caller fallback to a context where it can do a blocking (without
7815 	 * NOWAIT) request. This way we avoid doing partial IO and returning
7816 	 * success to the caller, which is not optimal for writes and for reads
7817 	 * it can result in unexpected behaviour for an application.
7818 	 *
7819 	 * When doing a read, because we use IOMAP_DIO_PARTIAL when calling
7820 	 * iomap_dio_rw(), we can end up returning less data then what the caller
7821 	 * asked for, resulting in an unexpected, and incorrect, short read.
7822 	 * That is, the caller asked to read N bytes and we return less than that,
7823 	 * which is wrong unless we are crossing EOF. This happens if we get a
7824 	 * page fault error when trying to fault in pages for the buffer that is
7825 	 * associated to the struct iov_iter passed to iomap_dio_rw(), and we
7826 	 * have previously submitted bios for other extents in the range, in
7827 	 * which case iomap_dio_rw() may return us EIOCBQUEUED if not all of
7828 	 * those bios have completed by the time we get the page fault error,
7829 	 * which we return back to our caller - we should only return EIOCBQUEUED
7830 	 * after we have submitted bios for all the extents in the range.
7831 	 */
7832 	if ((flags & IOMAP_NOWAIT) && len < length) {
7833 		free_extent_map(em);
7834 		ret = -EAGAIN;
7835 		goto unlock_err;
7836 	}
7837 
7838 	if (write) {
7839 		ret = btrfs_get_blocks_direct_write(&em, inode, dio_data,
7840 						    start, len, flags);
7841 		if (ret < 0)
7842 			goto unlock_err;
7843 		unlock_extents = true;
7844 		/* Recalc len in case the new em is smaller than requested */
7845 		len = min(len, em->len - (start - em->start));
7846 		if (dio_data->data_space_reserved) {
7847 			u64 release_offset;
7848 			u64 release_len = 0;
7849 
7850 			if (dio_data->nocow_done) {
7851 				release_offset = start;
7852 				release_len = data_alloc_len;
7853 			} else if (len < data_alloc_len) {
7854 				release_offset = start + len;
7855 				release_len = data_alloc_len - len;
7856 			}
7857 
7858 			if (release_len > 0)
7859 				btrfs_free_reserved_data_space(BTRFS_I(inode),
7860 							       dio_data->data_reserved,
7861 							       release_offset,
7862 							       release_len);
7863 		}
7864 	} else {
7865 		/*
7866 		 * We need to unlock only the end area that we aren't using.
7867 		 * The rest is going to be unlocked by the endio routine.
7868 		 */
7869 		lockstart = start + len;
7870 		if (lockstart < lockend)
7871 			unlock_extents = true;
7872 	}
7873 
7874 	if (unlock_extents)
7875 		unlock_extent_cached(&BTRFS_I(inode)->io_tree,
7876 				     lockstart, lockend, &cached_state);
7877 	else
7878 		free_extent_state(cached_state);
7879 
7880 	/*
7881 	 * Translate extent map information to iomap.
7882 	 * We trim the extents (and move the addr) even though iomap code does
7883 	 * that, since we have locked only the parts we are performing I/O in.
7884 	 */
7885 	if ((em->block_start == EXTENT_MAP_HOLE) ||
7886 	    (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) && !write)) {
7887 		iomap->addr = IOMAP_NULL_ADDR;
7888 		iomap->type = IOMAP_HOLE;
7889 	} else {
7890 		iomap->addr = em->block_start + (start - em->start);
7891 		iomap->type = IOMAP_MAPPED;
7892 	}
7893 	iomap->offset = start;
7894 	iomap->bdev = fs_info->fs_devices->latest_dev->bdev;
7895 	iomap->length = len;
7896 
7897 	if (write && btrfs_use_zone_append(BTRFS_I(inode), em->block_start))
7898 		iomap->flags |= IOMAP_F_ZONE_APPEND;
7899 
7900 	free_extent_map(em);
7901 
7902 	return 0;
7903 
7904 unlock_err:
7905 	unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
7906 			     &cached_state);
7907 err:
7908 	if (dio_data->data_space_reserved) {
7909 		btrfs_free_reserved_data_space(BTRFS_I(inode),
7910 					       dio_data->data_reserved,
7911 					       start, data_alloc_len);
7912 		extent_changeset_free(dio_data->data_reserved);
7913 	}
7914 
7915 	return ret;
7916 }
7917 
btrfs_dio_iomap_end(struct inode * inode,loff_t pos,loff_t length,ssize_t written,unsigned int flags,struct iomap * iomap)7918 static int btrfs_dio_iomap_end(struct inode *inode, loff_t pos, loff_t length,
7919 		ssize_t written, unsigned int flags, struct iomap *iomap)
7920 {
7921 	struct iomap_iter *iter = container_of(iomap, struct iomap_iter, iomap);
7922 	struct btrfs_dio_data *dio_data = iter->private;
7923 	size_t submitted = dio_data->submitted;
7924 	const bool write = !!(flags & IOMAP_WRITE);
7925 	int ret = 0;
7926 
7927 	if (!write && (iomap->type == IOMAP_HOLE)) {
7928 		/* If reading from a hole, unlock and return */
7929 		unlock_extent(&BTRFS_I(inode)->io_tree, pos, pos + length - 1);
7930 		return 0;
7931 	}
7932 
7933 	if (submitted < length) {
7934 		pos += submitted;
7935 		length -= submitted;
7936 		if (write)
7937 			__endio_write_update_ordered(BTRFS_I(inode), pos,
7938 					length, false);
7939 		else
7940 			unlock_extent(&BTRFS_I(inode)->io_tree, pos,
7941 				      pos + length - 1);
7942 		ret = -ENOTBLK;
7943 	}
7944 
7945 	if (write)
7946 		extent_changeset_free(dio_data->data_reserved);
7947 	return ret;
7948 }
7949 
btrfs_dio_private_put(struct btrfs_dio_private * dip)7950 static void btrfs_dio_private_put(struct btrfs_dio_private *dip)
7951 {
7952 	/*
7953 	 * This implies a barrier so that stores to dio_bio->bi_status before
7954 	 * this and loads of dio_bio->bi_status after this are fully ordered.
7955 	 */
7956 	if (!refcount_dec_and_test(&dip->refs))
7957 		return;
7958 
7959 	if (btrfs_op(&dip->bio) == BTRFS_MAP_WRITE) {
7960 		__endio_write_update_ordered(BTRFS_I(dip->inode),
7961 					     dip->file_offset,
7962 					     dip->bytes,
7963 					     !dip->bio.bi_status);
7964 	} else {
7965 		unlock_extent(&BTRFS_I(dip->inode)->io_tree,
7966 			      dip->file_offset,
7967 			      dip->file_offset + dip->bytes - 1);
7968 	}
7969 
7970 	kfree(dip->csums);
7971 	bio_endio(&dip->bio);
7972 }
7973 
submit_dio_repair_bio(struct inode * inode,struct bio * bio,int mirror_num,enum btrfs_compression_type compress_type)7974 static void submit_dio_repair_bio(struct inode *inode, struct bio *bio,
7975 				  int mirror_num,
7976 				  enum btrfs_compression_type compress_type)
7977 {
7978 	struct btrfs_dio_private *dip = bio->bi_private;
7979 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
7980 
7981 	BUG_ON(bio_op(bio) == REQ_OP_WRITE);
7982 
7983 	if (btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA))
7984 		return;
7985 
7986 	refcount_inc(&dip->refs);
7987 	if (btrfs_map_bio(fs_info, bio, mirror_num))
7988 		refcount_dec(&dip->refs);
7989 }
7990 
btrfs_check_read_dio_bio(struct btrfs_dio_private * dip,struct btrfs_bio * bbio,const bool uptodate)7991 static blk_status_t btrfs_check_read_dio_bio(struct btrfs_dio_private *dip,
7992 					     struct btrfs_bio *bbio,
7993 					     const bool uptodate)
7994 {
7995 	struct inode *inode = dip->inode;
7996 	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
7997 	const u32 sectorsize = fs_info->sectorsize;
7998 	struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
7999 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
8000 	const bool csum = !(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM);
8001 	struct bio_vec bvec;
8002 	struct bvec_iter iter;
8003 	u32 bio_offset = 0;
8004 	blk_status_t err = BLK_STS_OK;
8005 
8006 	__bio_for_each_segment(bvec, &bbio->bio, iter, bbio->iter) {
8007 		unsigned int i, nr_sectors, pgoff;
8008 
8009 		nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info, bvec.bv_len);
8010 		pgoff = bvec.bv_offset;
8011 		for (i = 0; i < nr_sectors; i++) {
8012 			u64 start = bbio->file_offset + bio_offset;
8013 
8014 			ASSERT(pgoff < PAGE_SIZE);
8015 			if (uptodate &&
8016 			    (!csum || !check_data_csum(inode, bbio,
8017 						       bio_offset, bvec.bv_page,
8018 						       pgoff, start))) {
8019 				clean_io_failure(fs_info, failure_tree, io_tree,
8020 						 start, bvec.bv_page,
8021 						 btrfs_ino(BTRFS_I(inode)),
8022 						 pgoff);
8023 			} else {
8024 				int ret;
8025 
8026 				ret = btrfs_repair_one_sector(inode, &bbio->bio,
8027 						bio_offset, bvec.bv_page, pgoff,
8028 						start, bbio->mirror_num,
8029 						submit_dio_repair_bio);
8030 				if (ret)
8031 					err = errno_to_blk_status(ret);
8032 			}
8033 			ASSERT(bio_offset + sectorsize > bio_offset);
8034 			bio_offset += sectorsize;
8035 			pgoff += sectorsize;
8036 		}
8037 	}
8038 	return err;
8039 }
8040 
__endio_write_update_ordered(struct btrfs_inode * inode,const u64 offset,const u64 bytes,const bool uptodate)8041 static void __endio_write_update_ordered(struct btrfs_inode *inode,
8042 					 const u64 offset, const u64 bytes,
8043 					 const bool uptodate)
8044 {
8045 	btrfs_mark_ordered_io_finished(inode, NULL, offset, bytes,
8046 				       finish_ordered_fn, uptodate);
8047 }
8048 
btrfs_submit_bio_start_direct_io(struct inode * inode,struct bio * bio,u64 dio_file_offset)8049 static blk_status_t btrfs_submit_bio_start_direct_io(struct inode *inode,
8050 						     struct bio *bio,
8051 						     u64 dio_file_offset)
8052 {
8053 	return btrfs_csum_one_bio(BTRFS_I(inode), bio, dio_file_offset, false);
8054 }
8055 
btrfs_end_dio_bio(struct bio * bio)8056 static void btrfs_end_dio_bio(struct bio *bio)
8057 {
8058 	struct btrfs_dio_private *dip = bio->bi_private;
8059 	struct btrfs_bio *bbio = btrfs_bio(bio);
8060 	blk_status_t err = bio->bi_status;
8061 
8062 	if (err)
8063 		btrfs_warn(BTRFS_I(dip->inode)->root->fs_info,
8064 			   "direct IO failed ino %llu rw %d,%u sector %#Lx len %u err no %d",
8065 			   btrfs_ino(BTRFS_I(dip->inode)), bio_op(bio),
8066 			   bio->bi_opf, bio->bi_iter.bi_sector,
8067 			   bio->bi_iter.bi_size, err);
8068 
8069 	if (bio_op(bio) == REQ_OP_READ)
8070 		err = btrfs_check_read_dio_bio(dip, bbio, !err);
8071 
8072 	if (err)
8073 		dip->bio.bi_status = err;
8074 
8075 	btrfs_record_physical_zoned(dip->inode, bbio->file_offset, bio);
8076 
8077 	bio_put(bio);
8078 	btrfs_dio_private_put(dip);
8079 }
8080 
btrfs_submit_dio_bio(struct bio * bio,struct inode * inode,u64 file_offset,int async_submit)8081 static inline blk_status_t btrfs_submit_dio_bio(struct bio *bio,
8082 		struct inode *inode, u64 file_offset, int async_submit)
8083 {
8084 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
8085 	struct btrfs_dio_private *dip = bio->bi_private;
8086 	bool write = btrfs_op(bio) == BTRFS_MAP_WRITE;
8087 	blk_status_t ret;
8088 
8089 	/* Check btrfs_submit_bio_hook() for rules about async submit. */
8090 	if (async_submit)
8091 		async_submit = !atomic_read(&BTRFS_I(inode)->sync_writers);
8092 
8093 	if (!write) {
8094 		ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA);
8095 		if (ret)
8096 			goto err;
8097 	}
8098 
8099 	if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
8100 		goto map;
8101 
8102 	if (write && async_submit) {
8103 		ret = btrfs_wq_submit_bio(inode, bio, 0, file_offset,
8104 					  btrfs_submit_bio_start_direct_io);
8105 		goto err;
8106 	} else if (write) {
8107 		/*
8108 		 * If we aren't doing async submit, calculate the csum of the
8109 		 * bio now.
8110 		 */
8111 		ret = btrfs_csum_one_bio(BTRFS_I(inode), bio, file_offset, false);
8112 		if (ret)
8113 			goto err;
8114 	} else {
8115 		u64 csum_offset;
8116 
8117 		csum_offset = file_offset - dip->file_offset;
8118 		csum_offset >>= fs_info->sectorsize_bits;
8119 		csum_offset *= fs_info->csum_size;
8120 		btrfs_bio(bio)->csum = dip->csums + csum_offset;
8121 	}
8122 map:
8123 	ret = btrfs_map_bio(fs_info, bio, 0);
8124 err:
8125 	return ret;
8126 }
8127 
btrfs_submit_direct(const struct iomap_iter * iter,struct bio * dio_bio,loff_t file_offset)8128 static void btrfs_submit_direct(const struct iomap_iter *iter,
8129 		struct bio *dio_bio, loff_t file_offset)
8130 {
8131 	struct btrfs_dio_private *dip =
8132 		container_of(dio_bio, struct btrfs_dio_private, bio);
8133 	struct inode *inode = iter->inode;
8134 	const bool write = (btrfs_op(dio_bio) == BTRFS_MAP_WRITE);
8135 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
8136 	const bool raid56 = (btrfs_data_alloc_profile(fs_info) &
8137 			     BTRFS_BLOCK_GROUP_RAID56_MASK);
8138 	struct bio *bio;
8139 	u64 start_sector;
8140 	int async_submit = 0;
8141 	u64 submit_len;
8142 	u64 clone_offset = 0;
8143 	u64 clone_len;
8144 	u64 logical;
8145 	int ret;
8146 	blk_status_t status;
8147 	struct btrfs_io_geometry geom;
8148 	struct btrfs_dio_data *dio_data = iter->private;
8149 	struct extent_map *em = NULL;
8150 
8151 	dip->inode = inode;
8152 	dip->file_offset = file_offset;
8153 	dip->bytes = dio_bio->bi_iter.bi_size;
8154 	refcount_set(&dip->refs, 1);
8155 	dip->csums = NULL;
8156 
8157 	if (!write && !(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
8158 		unsigned int nr_sectors =
8159 			(dio_bio->bi_iter.bi_size >> fs_info->sectorsize_bits);
8160 
8161 		/*
8162 		 * Load the csums up front to reduce csum tree searches and
8163 		 * contention when submitting bios.
8164 		 */
8165 		status = BLK_STS_RESOURCE;
8166 		dip->csums = kcalloc(nr_sectors, fs_info->csum_size, GFP_NOFS);
8167 		if (!dip)
8168 			goto out_err;
8169 
8170 		status = btrfs_lookup_bio_sums(inode, dio_bio, dip->csums);
8171 		if (status != BLK_STS_OK)
8172 			goto out_err;
8173 	}
8174 
8175 	start_sector = dio_bio->bi_iter.bi_sector;
8176 	submit_len = dio_bio->bi_iter.bi_size;
8177 
8178 	do {
8179 		logical = start_sector << 9;
8180 		em = btrfs_get_chunk_map(fs_info, logical, submit_len);
8181 		if (IS_ERR(em)) {
8182 			status = errno_to_blk_status(PTR_ERR(em));
8183 			em = NULL;
8184 			goto out_err_em;
8185 		}
8186 		ret = btrfs_get_io_geometry(fs_info, em, btrfs_op(dio_bio),
8187 					    logical, &geom);
8188 		if (ret) {
8189 			status = errno_to_blk_status(ret);
8190 			goto out_err_em;
8191 		}
8192 
8193 		clone_len = min(submit_len, geom.len);
8194 		ASSERT(clone_len <= UINT_MAX);
8195 
8196 		/*
8197 		 * This will never fail as it's passing GPF_NOFS and
8198 		 * the allocation is backed by btrfs_bioset.
8199 		 */
8200 		bio = btrfs_bio_clone_partial(dio_bio, clone_offset, clone_len);
8201 		bio->bi_private = dip;
8202 		bio->bi_end_io = btrfs_end_dio_bio;
8203 		btrfs_bio(bio)->file_offset = file_offset;
8204 
8205 		if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
8206 			status = extract_ordered_extent(BTRFS_I(inode), bio,
8207 							file_offset);
8208 			if (status) {
8209 				bio_put(bio);
8210 				goto out_err;
8211 			}
8212 		}
8213 
8214 		ASSERT(submit_len >= clone_len);
8215 		submit_len -= clone_len;
8216 
8217 		/*
8218 		 * Increase the count before we submit the bio so we know
8219 		 * the end IO handler won't happen before we increase the
8220 		 * count. Otherwise, the dip might get freed before we're
8221 		 * done setting it up.
8222 		 *
8223 		 * We transfer the initial reference to the last bio, so we
8224 		 * don't need to increment the reference count for the last one.
8225 		 */
8226 		if (submit_len > 0) {
8227 			refcount_inc(&dip->refs);
8228 			/*
8229 			 * If we are submitting more than one bio, submit them
8230 			 * all asynchronously. The exception is RAID 5 or 6, as
8231 			 * asynchronous checksums make it difficult to collect
8232 			 * full stripe writes.
8233 			 */
8234 			if (!raid56)
8235 				async_submit = 1;
8236 		}
8237 
8238 		status = btrfs_submit_dio_bio(bio, inode, file_offset,
8239 						async_submit);
8240 		if (status) {
8241 			bio_put(bio);
8242 			if (submit_len > 0)
8243 				refcount_dec(&dip->refs);
8244 			goto out_err_em;
8245 		}
8246 
8247 		dio_data->submitted += clone_len;
8248 		clone_offset += clone_len;
8249 		start_sector += clone_len >> 9;
8250 		file_offset += clone_len;
8251 
8252 		free_extent_map(em);
8253 	} while (submit_len > 0);
8254 	return;
8255 
8256 out_err_em:
8257 	free_extent_map(em);
8258 out_err:
8259 	dio_bio->bi_status = status;
8260 	btrfs_dio_private_put(dip);
8261 }
8262 
8263 static const struct iomap_ops btrfs_dio_iomap_ops = {
8264 	.iomap_begin            = btrfs_dio_iomap_begin,
8265 	.iomap_end              = btrfs_dio_iomap_end,
8266 };
8267 
8268 static const struct iomap_dio_ops btrfs_dio_ops = {
8269 	.submit_io		= btrfs_submit_direct,
8270 	.bio_set		= &btrfs_dio_bioset,
8271 };
8272 
btrfs_dio_rw(struct kiocb * iocb,struct iov_iter * iter,size_t done_before)8273 ssize_t btrfs_dio_rw(struct kiocb *iocb, struct iov_iter *iter, size_t done_before)
8274 {
8275 	struct btrfs_dio_data data;
8276 
8277 	return iomap_dio_rw(iocb, iter, &btrfs_dio_iomap_ops, &btrfs_dio_ops,
8278 			    IOMAP_DIO_PARTIAL, &data, done_before);
8279 }
8280 
btrfs_fiemap(struct inode * inode,struct fiemap_extent_info * fieinfo,u64 start,u64 len)8281 static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
8282 			u64 start, u64 len)
8283 {
8284 	int	ret;
8285 
8286 	ret = fiemap_prep(inode, fieinfo, start, &len, 0);
8287 	if (ret)
8288 		return ret;
8289 
8290 	return extent_fiemap(BTRFS_I(inode), fieinfo, start, len);
8291 }
8292 
btrfs_writepage(struct page * page,struct writeback_control * wbc)8293 static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
8294 {
8295 	struct inode *inode = page->mapping->host;
8296 	int ret;
8297 
8298 	if (current->flags & PF_MEMALLOC) {
8299 		redirty_page_for_writepage(wbc, page);
8300 		unlock_page(page);
8301 		return 0;
8302 	}
8303 
8304 	/*
8305 	 * If we are under memory pressure we will call this directly from the
8306 	 * VM, we need to make sure we have the inode referenced for the ordered
8307 	 * extent.  If not just return like we didn't do anything.
8308 	 */
8309 	if (!igrab(inode)) {
8310 		redirty_page_for_writepage(wbc, page);
8311 		return AOP_WRITEPAGE_ACTIVATE;
8312 	}
8313 	ret = extent_write_full_page(page, wbc);
8314 	btrfs_add_delayed_iput(inode);
8315 	return ret;
8316 }
8317 
btrfs_writepages(struct address_space * mapping,struct writeback_control * wbc)8318 static int btrfs_writepages(struct address_space *mapping,
8319 			    struct writeback_control *wbc)
8320 {
8321 	return extent_writepages(mapping, wbc);
8322 }
8323 
btrfs_readahead(struct readahead_control * rac)8324 static void btrfs_readahead(struct readahead_control *rac)
8325 {
8326 	extent_readahead(rac);
8327 }
8328 
8329 /*
8330  * For release_folio() and invalidate_folio() we have a race window where
8331  * folio_end_writeback() is called but the subpage spinlock is not yet released.
8332  * If we continue to release/invalidate the page, we could cause use-after-free
8333  * for subpage spinlock.  So this function is to spin and wait for subpage
8334  * spinlock.
8335  */
wait_subpage_spinlock(struct page * page)8336 static void wait_subpage_spinlock(struct page *page)
8337 {
8338 	struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb);
8339 	struct btrfs_subpage *subpage;
8340 
8341 	if (!btrfs_is_subpage(fs_info, page))
8342 		return;
8343 
8344 	ASSERT(PagePrivate(page) && page->private);
8345 	subpage = (struct btrfs_subpage *)page->private;
8346 
8347 	/*
8348 	 * This may look insane as we just acquire the spinlock and release it,
8349 	 * without doing anything.  But we just want to make sure no one is
8350 	 * still holding the subpage spinlock.
8351 	 * And since the page is not dirty nor writeback, and we have page
8352 	 * locked, the only possible way to hold a spinlock is from the endio
8353 	 * function to clear page writeback.
8354 	 *
8355 	 * Here we just acquire the spinlock so that all existing callers
8356 	 * should exit and we're safe to release/invalidate the page.
8357 	 */
8358 	spin_lock_irq(&subpage->lock);
8359 	spin_unlock_irq(&subpage->lock);
8360 }
8361 
__btrfs_release_folio(struct folio * folio,gfp_t gfp_flags)8362 static bool __btrfs_release_folio(struct folio *folio, gfp_t gfp_flags)
8363 {
8364 	int ret = try_release_extent_mapping(&folio->page, gfp_flags);
8365 
8366 	if (ret == 1) {
8367 		wait_subpage_spinlock(&folio->page);
8368 		clear_page_extent_mapped(&folio->page);
8369 	}
8370 	return ret;
8371 }
8372 
btrfs_release_folio(struct folio * folio,gfp_t gfp_flags)8373 static bool btrfs_release_folio(struct folio *folio, gfp_t gfp_flags)
8374 {
8375 	if (folio_test_writeback(folio) || folio_test_dirty(folio))
8376 		return false;
8377 	return __btrfs_release_folio(folio, gfp_flags);
8378 }
8379 
8380 #ifdef CONFIG_MIGRATION
btrfs_migratepage(struct address_space * mapping,struct page * newpage,struct page * page,enum migrate_mode mode)8381 static int btrfs_migratepage(struct address_space *mapping,
8382 			     struct page *newpage, struct page *page,
8383 			     enum migrate_mode mode)
8384 {
8385 	int ret;
8386 
8387 	ret = migrate_page_move_mapping(mapping, newpage, page, 0);
8388 	if (ret != MIGRATEPAGE_SUCCESS)
8389 		return ret;
8390 
8391 	if (page_has_private(page))
8392 		attach_page_private(newpage, detach_page_private(page));
8393 
8394 	if (PageOrdered(page)) {
8395 		ClearPageOrdered(page);
8396 		SetPageOrdered(newpage);
8397 	}
8398 
8399 	if (mode != MIGRATE_SYNC_NO_COPY)
8400 		migrate_page_copy(newpage, page);
8401 	else
8402 		migrate_page_states(newpage, page);
8403 	return MIGRATEPAGE_SUCCESS;
8404 }
8405 #endif
8406 
btrfs_invalidate_folio(struct folio * folio,size_t offset,size_t length)8407 static void btrfs_invalidate_folio(struct folio *folio, size_t offset,
8408 				 size_t length)
8409 {
8410 	struct btrfs_inode *inode = BTRFS_I(folio->mapping->host);
8411 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
8412 	struct extent_io_tree *tree = &inode->io_tree;
8413 	struct extent_state *cached_state = NULL;
8414 	u64 page_start = folio_pos(folio);
8415 	u64 page_end = page_start + folio_size(folio) - 1;
8416 	u64 cur;
8417 	int inode_evicting = inode->vfs_inode.i_state & I_FREEING;
8418 
8419 	/*
8420 	 * We have folio locked so no new ordered extent can be created on this
8421 	 * page, nor bio can be submitted for this folio.
8422 	 *
8423 	 * But already submitted bio can still be finished on this folio.
8424 	 * Furthermore, endio function won't skip folio which has Ordered
8425 	 * (Private2) already cleared, so it's possible for endio and
8426 	 * invalidate_folio to do the same ordered extent accounting twice
8427 	 * on one folio.
8428 	 *
8429 	 * So here we wait for any submitted bios to finish, so that we won't
8430 	 * do double ordered extent accounting on the same folio.
8431 	 */
8432 	folio_wait_writeback(folio);
8433 	wait_subpage_spinlock(&folio->page);
8434 
8435 	/*
8436 	 * For subpage case, we have call sites like
8437 	 * btrfs_punch_hole_lock_range() which passes range not aligned to
8438 	 * sectorsize.
8439 	 * If the range doesn't cover the full folio, we don't need to and
8440 	 * shouldn't clear page extent mapped, as folio->private can still
8441 	 * record subpage dirty bits for other part of the range.
8442 	 *
8443 	 * For cases that invalidate the full folio even the range doesn't
8444 	 * cover the full folio, like invalidating the last folio, we're
8445 	 * still safe to wait for ordered extent to finish.
8446 	 */
8447 	if (!(offset == 0 && length == folio_size(folio))) {
8448 		btrfs_release_folio(folio, GFP_NOFS);
8449 		return;
8450 	}
8451 
8452 	if (!inode_evicting)
8453 		lock_extent_bits(tree, page_start, page_end, &cached_state);
8454 
8455 	cur = page_start;
8456 	while (cur < page_end) {
8457 		struct btrfs_ordered_extent *ordered;
8458 		bool delete_states;
8459 		u64 range_end;
8460 		u32 range_len;
8461 
8462 		ordered = btrfs_lookup_first_ordered_range(inode, cur,
8463 							   page_end + 1 - cur);
8464 		if (!ordered) {
8465 			range_end = page_end;
8466 			/*
8467 			 * No ordered extent covering this range, we are safe
8468 			 * to delete all extent states in the range.
8469 			 */
8470 			delete_states = true;
8471 			goto next;
8472 		}
8473 		if (ordered->file_offset > cur) {
8474 			/*
8475 			 * There is a range between [cur, oe->file_offset) not
8476 			 * covered by any ordered extent.
8477 			 * We are safe to delete all extent states, and handle
8478 			 * the ordered extent in the next iteration.
8479 			 */
8480 			range_end = ordered->file_offset - 1;
8481 			delete_states = true;
8482 			goto next;
8483 		}
8484 
8485 		range_end = min(ordered->file_offset + ordered->num_bytes - 1,
8486 				page_end);
8487 		ASSERT(range_end + 1 - cur < U32_MAX);
8488 		range_len = range_end + 1 - cur;
8489 		if (!btrfs_page_test_ordered(fs_info, &folio->page, cur, range_len)) {
8490 			/*
8491 			 * If Ordered (Private2) is cleared, it means endio has
8492 			 * already been executed for the range.
8493 			 * We can't delete the extent states as
8494 			 * btrfs_finish_ordered_io() may still use some of them.
8495 			 */
8496 			delete_states = false;
8497 			goto next;
8498 		}
8499 		btrfs_page_clear_ordered(fs_info, &folio->page, cur, range_len);
8500 
8501 		/*
8502 		 * IO on this page will never be started, so we need to account
8503 		 * for any ordered extents now. Don't clear EXTENT_DELALLOC_NEW
8504 		 * here, must leave that up for the ordered extent completion.
8505 		 *
8506 		 * This will also unlock the range for incoming
8507 		 * btrfs_finish_ordered_io().
8508 		 */
8509 		if (!inode_evicting)
8510 			clear_extent_bit(tree, cur, range_end,
8511 					 EXTENT_DELALLOC |
8512 					 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING |
8513 					 EXTENT_DEFRAG, 1, 0, &cached_state);
8514 
8515 		spin_lock_irq(&inode->ordered_tree.lock);
8516 		set_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags);
8517 		ordered->truncated_len = min(ordered->truncated_len,
8518 					     cur - ordered->file_offset);
8519 		spin_unlock_irq(&inode->ordered_tree.lock);
8520 
8521 		if (btrfs_dec_test_ordered_pending(inode, &ordered,
8522 						   cur, range_end + 1 - cur)) {
8523 			btrfs_finish_ordered_io(ordered);
8524 			/*
8525 			 * The ordered extent has finished, now we're again
8526 			 * safe to delete all extent states of the range.
8527 			 */
8528 			delete_states = true;
8529 		} else {
8530 			/*
8531 			 * btrfs_finish_ordered_io() will get executed by endio
8532 			 * of other pages, thus we can't delete extent states
8533 			 * anymore
8534 			 */
8535 			delete_states = false;
8536 		}
8537 next:
8538 		if (ordered)
8539 			btrfs_put_ordered_extent(ordered);
8540 		/*
8541 		 * Qgroup reserved space handler
8542 		 * Sector(s) here will be either:
8543 		 *
8544 		 * 1) Already written to disk or bio already finished
8545 		 *    Then its QGROUP_RESERVED bit in io_tree is already cleared.
8546 		 *    Qgroup will be handled by its qgroup_record then.
8547 		 *    btrfs_qgroup_free_data() call will do nothing here.
8548 		 *
8549 		 * 2) Not written to disk yet
8550 		 *    Then btrfs_qgroup_free_data() call will clear the
8551 		 *    QGROUP_RESERVED bit of its io_tree, and free the qgroup
8552 		 *    reserved data space.
8553 		 *    Since the IO will never happen for this page.
8554 		 */
8555 		btrfs_qgroup_free_data(inode, NULL, cur, range_end + 1 - cur);
8556 		if (!inode_evicting) {
8557 			clear_extent_bit(tree, cur, range_end, EXTENT_LOCKED |
8558 				 EXTENT_DELALLOC | EXTENT_UPTODATE |
8559 				 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 1,
8560 				 delete_states, &cached_state);
8561 		}
8562 		cur = range_end + 1;
8563 	}
8564 	/*
8565 	 * We have iterated through all ordered extents of the page, the page
8566 	 * should not have Ordered (Private2) anymore, or the above iteration
8567 	 * did something wrong.
8568 	 */
8569 	ASSERT(!folio_test_ordered(folio));
8570 	btrfs_page_clear_checked(fs_info, &folio->page, folio_pos(folio), folio_size(folio));
8571 	if (!inode_evicting)
8572 		__btrfs_release_folio(folio, GFP_NOFS);
8573 	clear_page_extent_mapped(&folio->page);
8574 }
8575 
8576 /*
8577  * btrfs_page_mkwrite() is not allowed to change the file size as it gets
8578  * called from a page fault handler when a page is first dirtied. Hence we must
8579  * be careful to check for EOF conditions here. We set the page up correctly
8580  * for a written page which means we get ENOSPC checking when writing into
8581  * holes and correct delalloc and unwritten extent mapping on filesystems that
8582  * support these features.
8583  *
8584  * We are not allowed to take the i_mutex here so we have to play games to
8585  * protect against truncate races as the page could now be beyond EOF.  Because
8586  * truncate_setsize() writes the inode size before removing pages, once we have
8587  * the page lock we can determine safely if the page is beyond EOF. If it is not
8588  * beyond EOF, then the page is guaranteed safe against truncation until we
8589  * unlock the page.
8590  */
btrfs_page_mkwrite(struct vm_fault * vmf)8591 vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf)
8592 {
8593 	struct page *page = vmf->page;
8594 	struct inode *inode = file_inode(vmf->vma->vm_file);
8595 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
8596 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
8597 	struct btrfs_ordered_extent *ordered;
8598 	struct extent_state *cached_state = NULL;
8599 	struct extent_changeset *data_reserved = NULL;
8600 	unsigned long zero_start;
8601 	loff_t size;
8602 	vm_fault_t ret;
8603 	int ret2;
8604 	int reserved = 0;
8605 	u64 reserved_space;
8606 	u64 page_start;
8607 	u64 page_end;
8608 	u64 end;
8609 
8610 	reserved_space = PAGE_SIZE;
8611 
8612 	sb_start_pagefault(inode->i_sb);
8613 	page_start = page_offset(page);
8614 	page_end = page_start + PAGE_SIZE - 1;
8615 	end = page_end;
8616 
8617 	/*
8618 	 * Reserving delalloc space after obtaining the page lock can lead to
8619 	 * deadlock. For example, if a dirty page is locked by this function
8620 	 * and the call to btrfs_delalloc_reserve_space() ends up triggering
8621 	 * dirty page write out, then the btrfs_writepage() function could
8622 	 * end up waiting indefinitely to get a lock on the page currently
8623 	 * being processed by btrfs_page_mkwrite() function.
8624 	 */
8625 	ret2 = btrfs_delalloc_reserve_space(BTRFS_I(inode), &data_reserved,
8626 					    page_start, reserved_space);
8627 	if (!ret2) {
8628 		ret2 = file_update_time(vmf->vma->vm_file);
8629 		reserved = 1;
8630 	}
8631 	if (ret2) {
8632 		ret = vmf_error(ret2);
8633 		if (reserved)
8634 			goto out;
8635 		goto out_noreserve;
8636 	}
8637 
8638 	ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
8639 again:
8640 	down_read(&BTRFS_I(inode)->i_mmap_lock);
8641 	lock_page(page);
8642 	size = i_size_read(inode);
8643 
8644 	if ((page->mapping != inode->i_mapping) ||
8645 	    (page_start >= size)) {
8646 		/* page got truncated out from underneath us */
8647 		goto out_unlock;
8648 	}
8649 	wait_on_page_writeback(page);
8650 
8651 	lock_extent_bits(io_tree, page_start, page_end, &cached_state);
8652 	ret2 = set_page_extent_mapped(page);
8653 	if (ret2 < 0) {
8654 		ret = vmf_error(ret2);
8655 		unlock_extent_cached(io_tree, page_start, page_end, &cached_state);
8656 		goto out_unlock;
8657 	}
8658 
8659 	/*
8660 	 * we can't set the delalloc bits if there are pending ordered
8661 	 * extents.  Drop our locks and wait for them to finish
8662 	 */
8663 	ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), page_start,
8664 			PAGE_SIZE);
8665 	if (ordered) {
8666 		unlock_extent_cached(io_tree, page_start, page_end,
8667 				     &cached_state);
8668 		unlock_page(page);
8669 		up_read(&BTRFS_I(inode)->i_mmap_lock);
8670 		btrfs_start_ordered_extent(ordered, 1);
8671 		btrfs_put_ordered_extent(ordered);
8672 		goto again;
8673 	}
8674 
8675 	if (page->index == ((size - 1) >> PAGE_SHIFT)) {
8676 		reserved_space = round_up(size - page_start,
8677 					  fs_info->sectorsize);
8678 		if (reserved_space < PAGE_SIZE) {
8679 			end = page_start + reserved_space - 1;
8680 			btrfs_delalloc_release_space(BTRFS_I(inode),
8681 					data_reserved, page_start,
8682 					PAGE_SIZE - reserved_space, true);
8683 		}
8684 	}
8685 
8686 	/*
8687 	 * page_mkwrite gets called when the page is firstly dirtied after it's
8688 	 * faulted in, but write(2) could also dirty a page and set delalloc
8689 	 * bits, thus in this case for space account reason, we still need to
8690 	 * clear any delalloc bits within this page range since we have to
8691 	 * reserve data&meta space before lock_page() (see above comments).
8692 	 */
8693 	clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, end,
8694 			  EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING |
8695 			  EXTENT_DEFRAG, 0, 0, &cached_state);
8696 
8697 	ret2 = btrfs_set_extent_delalloc(BTRFS_I(inode), page_start, end, 0,
8698 					&cached_state);
8699 	if (ret2) {
8700 		unlock_extent_cached(io_tree, page_start, page_end,
8701 				     &cached_state);
8702 		ret = VM_FAULT_SIGBUS;
8703 		goto out_unlock;
8704 	}
8705 
8706 	/* page is wholly or partially inside EOF */
8707 	if (page_start + PAGE_SIZE > size)
8708 		zero_start = offset_in_page(size);
8709 	else
8710 		zero_start = PAGE_SIZE;
8711 
8712 	if (zero_start != PAGE_SIZE) {
8713 		memzero_page(page, zero_start, PAGE_SIZE - zero_start);
8714 		flush_dcache_page(page);
8715 	}
8716 	btrfs_page_clear_checked(fs_info, page, page_start, PAGE_SIZE);
8717 	btrfs_page_set_dirty(fs_info, page, page_start, end + 1 - page_start);
8718 	btrfs_page_set_uptodate(fs_info, page, page_start, end + 1 - page_start);
8719 
8720 	btrfs_set_inode_last_sub_trans(BTRFS_I(inode));
8721 
8722 	unlock_extent_cached(io_tree, page_start, page_end, &cached_state);
8723 	up_read(&BTRFS_I(inode)->i_mmap_lock);
8724 
8725 	btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
8726 	sb_end_pagefault(inode->i_sb);
8727 	extent_changeset_free(data_reserved);
8728 	return VM_FAULT_LOCKED;
8729 
8730 out_unlock:
8731 	unlock_page(page);
8732 	up_read(&BTRFS_I(inode)->i_mmap_lock);
8733 out:
8734 	btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
8735 	btrfs_delalloc_release_space(BTRFS_I(inode), data_reserved, page_start,
8736 				     reserved_space, (ret != 0));
8737 out_noreserve:
8738 	sb_end_pagefault(inode->i_sb);
8739 	extent_changeset_free(data_reserved);
8740 	return ret;
8741 }
8742 
btrfs_truncate(struct inode * inode,bool skip_writeback)8743 static int btrfs_truncate(struct inode *inode, bool skip_writeback)
8744 {
8745 	struct btrfs_truncate_control control = {
8746 		.inode = BTRFS_I(inode),
8747 		.ino = btrfs_ino(BTRFS_I(inode)),
8748 		.min_type = BTRFS_EXTENT_DATA_KEY,
8749 		.clear_extent_range = true,
8750 	};
8751 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
8752 	struct btrfs_root *root = BTRFS_I(inode)->root;
8753 	struct btrfs_block_rsv *rsv;
8754 	int ret;
8755 	struct btrfs_trans_handle *trans;
8756 	u64 mask = fs_info->sectorsize - 1;
8757 	u64 min_size = btrfs_calc_metadata_size(fs_info, 1);
8758 
8759 	if (!skip_writeback) {
8760 		ret = btrfs_wait_ordered_range(inode, inode->i_size & (~mask),
8761 					       (u64)-1);
8762 		if (ret)
8763 			return ret;
8764 	}
8765 
8766 	/*
8767 	 * Yes ladies and gentlemen, this is indeed ugly.  We have a couple of
8768 	 * things going on here:
8769 	 *
8770 	 * 1) We need to reserve space to update our inode.
8771 	 *
8772 	 * 2) We need to have something to cache all the space that is going to
8773 	 * be free'd up by the truncate operation, but also have some slack
8774 	 * space reserved in case it uses space during the truncate (thank you
8775 	 * very much snapshotting).
8776 	 *
8777 	 * And we need these to be separate.  The fact is we can use a lot of
8778 	 * space doing the truncate, and we have no earthly idea how much space
8779 	 * we will use, so we need the truncate reservation to be separate so it
8780 	 * doesn't end up using space reserved for updating the inode.  We also
8781 	 * need to be able to stop the transaction and start a new one, which
8782 	 * means we need to be able to update the inode several times, and we
8783 	 * have no idea of knowing how many times that will be, so we can't just
8784 	 * reserve 1 item for the entirety of the operation, so that has to be
8785 	 * done separately as well.
8786 	 *
8787 	 * So that leaves us with
8788 	 *
8789 	 * 1) rsv - for the truncate reservation, which we will steal from the
8790 	 * transaction reservation.
8791 	 * 2) fs_info->trans_block_rsv - this will have 1 items worth left for
8792 	 * updating the inode.
8793 	 */
8794 	rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
8795 	if (!rsv)
8796 		return -ENOMEM;
8797 	rsv->size = min_size;
8798 	rsv->failfast = 1;
8799 
8800 	/*
8801 	 * 1 for the truncate slack space
8802 	 * 1 for updating the inode.
8803 	 */
8804 	trans = btrfs_start_transaction(root, 2);
8805 	if (IS_ERR(trans)) {
8806 		ret = PTR_ERR(trans);
8807 		goto out;
8808 	}
8809 
8810 	/* Migrate the slack space for the truncate to our reserve */
8811 	ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv,
8812 				      min_size, false);
8813 	BUG_ON(ret);
8814 
8815 	trans->block_rsv = rsv;
8816 
8817 	while (1) {
8818 		struct extent_state *cached_state = NULL;
8819 		const u64 new_size = inode->i_size;
8820 		const u64 lock_start = ALIGN_DOWN(new_size, fs_info->sectorsize);
8821 
8822 		control.new_size = new_size;
8823 		lock_extent_bits(&BTRFS_I(inode)->io_tree, lock_start, (u64)-1,
8824 				 &cached_state);
8825 		/*
8826 		 * We want to drop from the next block forward in case this new
8827 		 * size is not block aligned since we will be keeping the last
8828 		 * block of the extent just the way it is.
8829 		 */
8830 		btrfs_drop_extent_cache(BTRFS_I(inode),
8831 					ALIGN(new_size, fs_info->sectorsize),
8832 					(u64)-1, 0);
8833 
8834 		ret = btrfs_truncate_inode_items(trans, root, &control);
8835 
8836 		inode_sub_bytes(inode, control.sub_bytes);
8837 		btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), control.last_size);
8838 
8839 		unlock_extent_cached(&BTRFS_I(inode)->io_tree, lock_start,
8840 				     (u64)-1, &cached_state);
8841 
8842 		trans->block_rsv = &fs_info->trans_block_rsv;
8843 		if (ret != -ENOSPC && ret != -EAGAIN)
8844 			break;
8845 
8846 		ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
8847 		if (ret)
8848 			break;
8849 
8850 		btrfs_end_transaction(trans);
8851 		btrfs_btree_balance_dirty(fs_info);
8852 
8853 		trans = btrfs_start_transaction(root, 2);
8854 		if (IS_ERR(trans)) {
8855 			ret = PTR_ERR(trans);
8856 			trans = NULL;
8857 			break;
8858 		}
8859 
8860 		btrfs_block_rsv_release(fs_info, rsv, -1, NULL);
8861 		ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv,
8862 					      rsv, min_size, false);
8863 		BUG_ON(ret);	/* shouldn't happen */
8864 		trans->block_rsv = rsv;
8865 	}
8866 
8867 	/*
8868 	 * We can't call btrfs_truncate_block inside a trans handle as we could
8869 	 * deadlock with freeze, if we got BTRFS_NEED_TRUNCATE_BLOCK then we
8870 	 * know we've truncated everything except the last little bit, and can
8871 	 * do btrfs_truncate_block and then update the disk_i_size.
8872 	 */
8873 	if (ret == BTRFS_NEED_TRUNCATE_BLOCK) {
8874 		btrfs_end_transaction(trans);
8875 		btrfs_btree_balance_dirty(fs_info);
8876 
8877 		ret = btrfs_truncate_block(BTRFS_I(inode), inode->i_size, 0, 0);
8878 		if (ret)
8879 			goto out;
8880 		trans = btrfs_start_transaction(root, 1);
8881 		if (IS_ERR(trans)) {
8882 			ret = PTR_ERR(trans);
8883 			goto out;
8884 		}
8885 		btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0);
8886 	}
8887 
8888 	if (trans) {
8889 		int ret2;
8890 
8891 		trans->block_rsv = &fs_info->trans_block_rsv;
8892 		ret2 = btrfs_update_inode(trans, root, BTRFS_I(inode));
8893 		if (ret2 && !ret)
8894 			ret = ret2;
8895 
8896 		ret2 = btrfs_end_transaction(trans);
8897 		if (ret2 && !ret)
8898 			ret = ret2;
8899 		btrfs_btree_balance_dirty(fs_info);
8900 	}
8901 out:
8902 	btrfs_free_block_rsv(fs_info, rsv);
8903 	/*
8904 	 * So if we truncate and then write and fsync we normally would just
8905 	 * write the extents that changed, which is a problem if we need to
8906 	 * first truncate that entire inode.  So set this flag so we write out
8907 	 * all of the extents in the inode to the sync log so we're completely
8908 	 * safe.
8909 	 *
8910 	 * If no extents were dropped or trimmed we don't need to force the next
8911 	 * fsync to truncate all the inode's items from the log and re-log them
8912 	 * all. This means the truncate operation did not change the file size,
8913 	 * or changed it to a smaller size but there was only an implicit hole
8914 	 * between the old i_size and the new i_size, and there were no prealloc
8915 	 * extents beyond i_size to drop.
8916 	 */
8917 	if (control.extents_found > 0)
8918 		btrfs_set_inode_full_sync(BTRFS_I(inode));
8919 
8920 	return ret;
8921 }
8922 
btrfs_new_subvol_inode(struct user_namespace * mnt_userns,struct inode * dir)8923 struct inode *btrfs_new_subvol_inode(struct user_namespace *mnt_userns,
8924 				     struct inode *dir)
8925 {
8926 	struct inode *inode;
8927 
8928 	inode = new_inode(dir->i_sb);
8929 	if (inode) {
8930 		/*
8931 		 * Subvolumes don't inherit the sgid bit or the parent's gid if
8932 		 * the parent's sgid bit is set. This is probably a bug.
8933 		 */
8934 		inode_init_owner(mnt_userns, inode, NULL,
8935 				 S_IFDIR | (~current_umask() & S_IRWXUGO));
8936 		inode->i_op = &btrfs_dir_inode_operations;
8937 		inode->i_fop = &btrfs_dir_file_operations;
8938 	}
8939 	return inode;
8940 }
8941 
btrfs_alloc_inode(struct super_block * sb)8942 struct inode *btrfs_alloc_inode(struct super_block *sb)
8943 {
8944 	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
8945 	struct btrfs_inode *ei;
8946 	struct inode *inode;
8947 
8948 	ei = alloc_inode_sb(sb, btrfs_inode_cachep, GFP_KERNEL);
8949 	if (!ei)
8950 		return NULL;
8951 
8952 	ei->root = NULL;
8953 	ei->generation = 0;
8954 	ei->last_trans = 0;
8955 	ei->last_sub_trans = 0;
8956 	ei->logged_trans = 0;
8957 	ei->delalloc_bytes = 0;
8958 	ei->new_delalloc_bytes = 0;
8959 	ei->defrag_bytes = 0;
8960 	ei->disk_i_size = 0;
8961 	ei->flags = 0;
8962 	ei->ro_flags = 0;
8963 	ei->csum_bytes = 0;
8964 	ei->index_cnt = (u64)-1;
8965 	ei->dir_index = 0;
8966 	ei->last_unlink_trans = 0;
8967 	ei->last_reflink_trans = 0;
8968 	ei->last_log_commit = 0;
8969 
8970 	spin_lock_init(&ei->lock);
8971 	ei->outstanding_extents = 0;
8972 	if (sb->s_magic != BTRFS_TEST_MAGIC)
8973 		btrfs_init_metadata_block_rsv(fs_info, &ei->block_rsv,
8974 					      BTRFS_BLOCK_RSV_DELALLOC);
8975 	ei->runtime_flags = 0;
8976 	ei->prop_compress = BTRFS_COMPRESS_NONE;
8977 	ei->defrag_compress = BTRFS_COMPRESS_NONE;
8978 
8979 	ei->delayed_node = NULL;
8980 
8981 	ei->i_otime.tv_sec = 0;
8982 	ei->i_otime.tv_nsec = 0;
8983 
8984 	inode = &ei->vfs_inode;
8985 	extent_map_tree_init(&ei->extent_tree);
8986 	extent_io_tree_init(fs_info, &ei->io_tree, IO_TREE_INODE_IO, inode);
8987 	extent_io_tree_init(fs_info, &ei->io_failure_tree,
8988 			    IO_TREE_INODE_IO_FAILURE, inode);
8989 	extent_io_tree_init(fs_info, &ei->file_extent_tree,
8990 			    IO_TREE_INODE_FILE_EXTENT, inode);
8991 	ei->io_tree.track_uptodate = true;
8992 	ei->io_failure_tree.track_uptodate = true;
8993 	atomic_set(&ei->sync_writers, 0);
8994 	mutex_init(&ei->log_mutex);
8995 	btrfs_ordered_inode_tree_init(&ei->ordered_tree);
8996 	INIT_LIST_HEAD(&ei->delalloc_inodes);
8997 	INIT_LIST_HEAD(&ei->delayed_iput);
8998 	RB_CLEAR_NODE(&ei->rb_node);
8999 	init_rwsem(&ei->i_mmap_lock);
9000 
9001 	return inode;
9002 }
9003 
9004 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
btrfs_test_destroy_inode(struct inode * inode)9005 void btrfs_test_destroy_inode(struct inode *inode)
9006 {
9007 	btrfs_drop_extent_cache(BTRFS_I(inode), 0, (u64)-1, 0);
9008 	kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
9009 }
9010 #endif
9011 
btrfs_free_inode(struct inode * inode)9012 void btrfs_free_inode(struct inode *inode)
9013 {
9014 	kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
9015 }
9016 
btrfs_destroy_inode(struct inode * vfs_inode)9017 void btrfs_destroy_inode(struct inode *vfs_inode)
9018 {
9019 	struct btrfs_ordered_extent *ordered;
9020 	struct btrfs_inode *inode = BTRFS_I(vfs_inode);
9021 	struct btrfs_root *root = inode->root;
9022 
9023 	WARN_ON(!hlist_empty(&vfs_inode->i_dentry));
9024 	WARN_ON(vfs_inode->i_data.nrpages);
9025 	WARN_ON(inode->block_rsv.reserved);
9026 	WARN_ON(inode->block_rsv.size);
9027 	WARN_ON(inode->outstanding_extents);
9028 	if (!S_ISDIR(vfs_inode->i_mode)) {
9029 		WARN_ON(inode->delalloc_bytes);
9030 		WARN_ON(inode->new_delalloc_bytes);
9031 	}
9032 	WARN_ON(inode->csum_bytes);
9033 	WARN_ON(inode->defrag_bytes);
9034 
9035 	/*
9036 	 * This can happen where we create an inode, but somebody else also
9037 	 * created the same inode and we need to destroy the one we already
9038 	 * created.
9039 	 */
9040 	if (!root)
9041 		return;
9042 
9043 	while (1) {
9044 		ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
9045 		if (!ordered)
9046 			break;
9047 		else {
9048 			btrfs_err(root->fs_info,
9049 				  "found ordered extent %llu %llu on inode cleanup",
9050 				  ordered->file_offset, ordered->num_bytes);
9051 			btrfs_remove_ordered_extent(inode, ordered);
9052 			btrfs_put_ordered_extent(ordered);
9053 			btrfs_put_ordered_extent(ordered);
9054 		}
9055 	}
9056 	btrfs_qgroup_check_reserved_leak(inode);
9057 	inode_tree_del(inode);
9058 	btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
9059 	btrfs_inode_clear_file_extent_range(inode, 0, (u64)-1);
9060 	btrfs_put_root(inode->root);
9061 }
9062 
btrfs_drop_inode(struct inode * inode)9063 int btrfs_drop_inode(struct inode *inode)
9064 {
9065 	struct btrfs_root *root = BTRFS_I(inode)->root;
9066 
9067 	if (root == NULL)
9068 		return 1;
9069 
9070 	/* the snap/subvol tree is on deleting */
9071 	if (btrfs_root_refs(&root->root_item) == 0)
9072 		return 1;
9073 	else
9074 		return generic_drop_inode(inode);
9075 }
9076 
init_once(void * foo)9077 static void init_once(void *foo)
9078 {
9079 	struct btrfs_inode *ei = foo;
9080 
9081 	inode_init_once(&ei->vfs_inode);
9082 }
9083 
btrfs_destroy_cachep(void)9084 void __cold btrfs_destroy_cachep(void)
9085 {
9086 	/*
9087 	 * Make sure all delayed rcu free inodes are flushed before we
9088 	 * destroy cache.
9089 	 */
9090 	rcu_barrier();
9091 	bioset_exit(&btrfs_dio_bioset);
9092 	kmem_cache_destroy(btrfs_inode_cachep);
9093 	kmem_cache_destroy(btrfs_trans_handle_cachep);
9094 	kmem_cache_destroy(btrfs_path_cachep);
9095 	kmem_cache_destroy(btrfs_free_space_cachep);
9096 	kmem_cache_destroy(btrfs_free_space_bitmap_cachep);
9097 }
9098 
btrfs_init_cachep(void)9099 int __init btrfs_init_cachep(void)
9100 {
9101 	btrfs_inode_cachep = kmem_cache_create("btrfs_inode",
9102 			sizeof(struct btrfs_inode), 0,
9103 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD | SLAB_ACCOUNT,
9104 			init_once);
9105 	if (!btrfs_inode_cachep)
9106 		goto fail;
9107 
9108 	btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle",
9109 			sizeof(struct btrfs_trans_handle), 0,
9110 			SLAB_TEMPORARY | SLAB_MEM_SPREAD, NULL);
9111 	if (!btrfs_trans_handle_cachep)
9112 		goto fail;
9113 
9114 	btrfs_path_cachep = kmem_cache_create("btrfs_path",
9115 			sizeof(struct btrfs_path), 0,
9116 			SLAB_MEM_SPREAD, NULL);
9117 	if (!btrfs_path_cachep)
9118 		goto fail;
9119 
9120 	btrfs_free_space_cachep = kmem_cache_create("btrfs_free_space",
9121 			sizeof(struct btrfs_free_space), 0,
9122 			SLAB_MEM_SPREAD, NULL);
9123 	if (!btrfs_free_space_cachep)
9124 		goto fail;
9125 
9126 	btrfs_free_space_bitmap_cachep = kmem_cache_create("btrfs_free_space_bitmap",
9127 							PAGE_SIZE, PAGE_SIZE,
9128 							SLAB_MEM_SPREAD, NULL);
9129 	if (!btrfs_free_space_bitmap_cachep)
9130 		goto fail;
9131 
9132 	if (bioset_init(&btrfs_dio_bioset, BIO_POOL_SIZE,
9133 			offsetof(struct btrfs_dio_private, bio),
9134 			BIOSET_NEED_BVECS))
9135 		goto fail;
9136 
9137 	return 0;
9138 fail:
9139 	btrfs_destroy_cachep();
9140 	return -ENOMEM;
9141 }
9142 
btrfs_getattr(struct user_namespace * mnt_userns,const struct path * path,struct kstat * stat,u32 request_mask,unsigned int flags)9143 static int btrfs_getattr(struct user_namespace *mnt_userns,
9144 			 const struct path *path, struct kstat *stat,
9145 			 u32 request_mask, unsigned int flags)
9146 {
9147 	u64 delalloc_bytes;
9148 	u64 inode_bytes;
9149 	struct inode *inode = d_inode(path->dentry);
9150 	u32 blocksize = inode->i_sb->s_blocksize;
9151 	u32 bi_flags = BTRFS_I(inode)->flags;
9152 	u32 bi_ro_flags = BTRFS_I(inode)->ro_flags;
9153 
9154 	stat->result_mask |= STATX_BTIME;
9155 	stat->btime.tv_sec = BTRFS_I(inode)->i_otime.tv_sec;
9156 	stat->btime.tv_nsec = BTRFS_I(inode)->i_otime.tv_nsec;
9157 	if (bi_flags & BTRFS_INODE_APPEND)
9158 		stat->attributes |= STATX_ATTR_APPEND;
9159 	if (bi_flags & BTRFS_INODE_COMPRESS)
9160 		stat->attributes |= STATX_ATTR_COMPRESSED;
9161 	if (bi_flags & BTRFS_INODE_IMMUTABLE)
9162 		stat->attributes |= STATX_ATTR_IMMUTABLE;
9163 	if (bi_flags & BTRFS_INODE_NODUMP)
9164 		stat->attributes |= STATX_ATTR_NODUMP;
9165 	if (bi_ro_flags & BTRFS_INODE_RO_VERITY)
9166 		stat->attributes |= STATX_ATTR_VERITY;
9167 
9168 	stat->attributes_mask |= (STATX_ATTR_APPEND |
9169 				  STATX_ATTR_COMPRESSED |
9170 				  STATX_ATTR_IMMUTABLE |
9171 				  STATX_ATTR_NODUMP);
9172 
9173 	generic_fillattr(mnt_userns, inode, stat);
9174 	stat->dev = BTRFS_I(inode)->root->anon_dev;
9175 
9176 	spin_lock(&BTRFS_I(inode)->lock);
9177 	delalloc_bytes = BTRFS_I(inode)->new_delalloc_bytes;
9178 	inode_bytes = inode_get_bytes(inode);
9179 	spin_unlock(&BTRFS_I(inode)->lock);
9180 	stat->blocks = (ALIGN(inode_bytes, blocksize) +
9181 			ALIGN(delalloc_bytes, blocksize)) >> 9;
9182 	return 0;
9183 }
9184 
btrfs_rename_exchange(struct inode * old_dir,struct dentry * old_dentry,struct inode * new_dir,struct dentry * new_dentry)9185 static int btrfs_rename_exchange(struct inode *old_dir,
9186 			      struct dentry *old_dentry,
9187 			      struct inode *new_dir,
9188 			      struct dentry *new_dentry)
9189 {
9190 	struct btrfs_fs_info *fs_info = btrfs_sb(old_dir->i_sb);
9191 	struct btrfs_trans_handle *trans;
9192 	unsigned int trans_num_items;
9193 	struct btrfs_root *root = BTRFS_I(old_dir)->root;
9194 	struct btrfs_root *dest = BTRFS_I(new_dir)->root;
9195 	struct inode *new_inode = new_dentry->d_inode;
9196 	struct inode *old_inode = old_dentry->d_inode;
9197 	struct timespec64 ctime = current_time(old_inode);
9198 	struct btrfs_rename_ctx old_rename_ctx;
9199 	struct btrfs_rename_ctx new_rename_ctx;
9200 	u64 old_ino = btrfs_ino(BTRFS_I(old_inode));
9201 	u64 new_ino = btrfs_ino(BTRFS_I(new_inode));
9202 	u64 old_idx = 0;
9203 	u64 new_idx = 0;
9204 	int ret;
9205 	int ret2;
9206 	bool need_abort = false;
9207 
9208 	/*
9209 	 * For non-subvolumes allow exchange only within one subvolume, in the
9210 	 * same inode namespace. Two subvolumes (represented as directory) can
9211 	 * be exchanged as they're a logical link and have a fixed inode number.
9212 	 */
9213 	if (root != dest &&
9214 	    (old_ino != BTRFS_FIRST_FREE_OBJECTID ||
9215 	     new_ino != BTRFS_FIRST_FREE_OBJECTID))
9216 		return -EXDEV;
9217 
9218 	/* close the race window with snapshot create/destroy ioctl */
9219 	if (old_ino == BTRFS_FIRST_FREE_OBJECTID ||
9220 	    new_ino == BTRFS_FIRST_FREE_OBJECTID)
9221 		down_read(&fs_info->subvol_sem);
9222 
9223 	/*
9224 	 * For each inode:
9225 	 * 1 to remove old dir item
9226 	 * 1 to remove old dir index
9227 	 * 1 to add new dir item
9228 	 * 1 to add new dir index
9229 	 * 1 to update parent inode
9230 	 *
9231 	 * If the parents are the same, we only need to account for one
9232 	 */
9233 	trans_num_items = (old_dir == new_dir ? 9 : 10);
9234 	if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
9235 		/*
9236 		 * 1 to remove old root ref
9237 		 * 1 to remove old root backref
9238 		 * 1 to add new root ref
9239 		 * 1 to add new root backref
9240 		 */
9241 		trans_num_items += 4;
9242 	} else {
9243 		/*
9244 		 * 1 to update inode item
9245 		 * 1 to remove old inode ref
9246 		 * 1 to add new inode ref
9247 		 */
9248 		trans_num_items += 3;
9249 	}
9250 	if (new_ino == BTRFS_FIRST_FREE_OBJECTID)
9251 		trans_num_items += 4;
9252 	else
9253 		trans_num_items += 3;
9254 	trans = btrfs_start_transaction(root, trans_num_items);
9255 	if (IS_ERR(trans)) {
9256 		ret = PTR_ERR(trans);
9257 		goto out_notrans;
9258 	}
9259 
9260 	if (dest != root) {
9261 		ret = btrfs_record_root_in_trans(trans, dest);
9262 		if (ret)
9263 			goto out_fail;
9264 	}
9265 
9266 	/*
9267 	 * We need to find a free sequence number both in the source and
9268 	 * in the destination directory for the exchange.
9269 	 */
9270 	ret = btrfs_set_inode_index(BTRFS_I(new_dir), &old_idx);
9271 	if (ret)
9272 		goto out_fail;
9273 	ret = btrfs_set_inode_index(BTRFS_I(old_dir), &new_idx);
9274 	if (ret)
9275 		goto out_fail;
9276 
9277 	BTRFS_I(old_inode)->dir_index = 0ULL;
9278 	BTRFS_I(new_inode)->dir_index = 0ULL;
9279 
9280 	/* Reference for the source. */
9281 	if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
9282 		/* force full log commit if subvolume involved. */
9283 		btrfs_set_log_full_commit(trans);
9284 	} else {
9285 		ret = btrfs_insert_inode_ref(trans, dest,
9286 					     new_dentry->d_name.name,
9287 					     new_dentry->d_name.len,
9288 					     old_ino,
9289 					     btrfs_ino(BTRFS_I(new_dir)),
9290 					     old_idx);
9291 		if (ret)
9292 			goto out_fail;
9293 		need_abort = true;
9294 	}
9295 
9296 	/* And now for the dest. */
9297 	if (new_ino == BTRFS_FIRST_FREE_OBJECTID) {
9298 		/* force full log commit if subvolume involved. */
9299 		btrfs_set_log_full_commit(trans);
9300 	} else {
9301 		ret = btrfs_insert_inode_ref(trans, root,
9302 					     old_dentry->d_name.name,
9303 					     old_dentry->d_name.len,
9304 					     new_ino,
9305 					     btrfs_ino(BTRFS_I(old_dir)),
9306 					     new_idx);
9307 		if (ret) {
9308 			if (need_abort)
9309 				btrfs_abort_transaction(trans, ret);
9310 			goto out_fail;
9311 		}
9312 	}
9313 
9314 	/* Update inode version and ctime/mtime. */
9315 	inode_inc_iversion(old_dir);
9316 	inode_inc_iversion(new_dir);
9317 	inode_inc_iversion(old_inode);
9318 	inode_inc_iversion(new_inode);
9319 	old_dir->i_ctime = old_dir->i_mtime = ctime;
9320 	new_dir->i_ctime = new_dir->i_mtime = ctime;
9321 	old_inode->i_ctime = ctime;
9322 	new_inode->i_ctime = ctime;
9323 
9324 	if (old_dentry->d_parent != new_dentry->d_parent) {
9325 		btrfs_record_unlink_dir(trans, BTRFS_I(old_dir),
9326 				BTRFS_I(old_inode), 1);
9327 		btrfs_record_unlink_dir(trans, BTRFS_I(new_dir),
9328 				BTRFS_I(new_inode), 1);
9329 	}
9330 
9331 	/* src is a subvolume */
9332 	if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
9333 		ret = btrfs_unlink_subvol(trans, old_dir, old_dentry);
9334 	} else { /* src is an inode */
9335 		ret = __btrfs_unlink_inode(trans, BTRFS_I(old_dir),
9336 					   BTRFS_I(old_dentry->d_inode),
9337 					   old_dentry->d_name.name,
9338 					   old_dentry->d_name.len,
9339 					   &old_rename_ctx);
9340 		if (!ret)
9341 			ret = btrfs_update_inode(trans, root, BTRFS_I(old_inode));
9342 	}
9343 	if (ret) {
9344 		btrfs_abort_transaction(trans, ret);
9345 		goto out_fail;
9346 	}
9347 
9348 	/* dest is a subvolume */
9349 	if (new_ino == BTRFS_FIRST_FREE_OBJECTID) {
9350 		ret = btrfs_unlink_subvol(trans, new_dir, new_dentry);
9351 	} else { /* dest is an inode */
9352 		ret = __btrfs_unlink_inode(trans, BTRFS_I(new_dir),
9353 					   BTRFS_I(new_dentry->d_inode),
9354 					   new_dentry->d_name.name,
9355 					   new_dentry->d_name.len,
9356 					   &new_rename_ctx);
9357 		if (!ret)
9358 			ret = btrfs_update_inode(trans, dest, BTRFS_I(new_inode));
9359 	}
9360 	if (ret) {
9361 		btrfs_abort_transaction(trans, ret);
9362 		goto out_fail;
9363 	}
9364 
9365 	ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode),
9366 			     new_dentry->d_name.name,
9367 			     new_dentry->d_name.len, 0, old_idx);
9368 	if (ret) {
9369 		btrfs_abort_transaction(trans, ret);
9370 		goto out_fail;
9371 	}
9372 
9373 	ret = btrfs_add_link(trans, BTRFS_I(old_dir), BTRFS_I(new_inode),
9374 			     old_dentry->d_name.name,
9375 			     old_dentry->d_name.len, 0, new_idx);
9376 	if (ret) {
9377 		btrfs_abort_transaction(trans, ret);
9378 		goto out_fail;
9379 	}
9380 
9381 	if (old_inode->i_nlink == 1)
9382 		BTRFS_I(old_inode)->dir_index = old_idx;
9383 	if (new_inode->i_nlink == 1)
9384 		BTRFS_I(new_inode)->dir_index = new_idx;
9385 
9386 	/*
9387 	 * Now pin the logs of the roots. We do it to ensure that no other task
9388 	 * can sync the logs while we are in progress with the rename, because
9389 	 * that could result in an inconsistency in case any of the inodes that
9390 	 * are part of this rename operation were logged before.
9391 	 */
9392 	if (old_ino != BTRFS_FIRST_FREE_OBJECTID)
9393 		btrfs_pin_log_trans(root);
9394 	if (new_ino != BTRFS_FIRST_FREE_OBJECTID)
9395 		btrfs_pin_log_trans(dest);
9396 
9397 	/* Do the log updates for all inodes. */
9398 	if (old_ino != BTRFS_FIRST_FREE_OBJECTID)
9399 		btrfs_log_new_name(trans, old_dentry, BTRFS_I(old_dir),
9400 				   old_rename_ctx.index, new_dentry->d_parent);
9401 	if (new_ino != BTRFS_FIRST_FREE_OBJECTID)
9402 		btrfs_log_new_name(trans, new_dentry, BTRFS_I(new_dir),
9403 				   new_rename_ctx.index, old_dentry->d_parent);
9404 
9405 	/* Now unpin the logs. */
9406 	if (old_ino != BTRFS_FIRST_FREE_OBJECTID)
9407 		btrfs_end_log_trans(root);
9408 	if (new_ino != BTRFS_FIRST_FREE_OBJECTID)
9409 		btrfs_end_log_trans(dest);
9410 out_fail:
9411 	ret2 = btrfs_end_transaction(trans);
9412 	ret = ret ? ret : ret2;
9413 out_notrans:
9414 	if (new_ino == BTRFS_FIRST_FREE_OBJECTID ||
9415 	    old_ino == BTRFS_FIRST_FREE_OBJECTID)
9416 		up_read(&fs_info->subvol_sem);
9417 
9418 	return ret;
9419 }
9420 
new_whiteout_inode(struct user_namespace * mnt_userns,struct inode * dir)9421 static struct inode *new_whiteout_inode(struct user_namespace *mnt_userns,
9422 					struct inode *dir)
9423 {
9424 	struct inode *inode;
9425 
9426 	inode = new_inode(dir->i_sb);
9427 	if (inode) {
9428 		inode_init_owner(mnt_userns, inode, dir,
9429 				 S_IFCHR | WHITEOUT_MODE);
9430 		inode->i_op = &btrfs_special_inode_operations;
9431 		init_special_inode(inode, inode->i_mode, WHITEOUT_DEV);
9432 	}
9433 	return inode;
9434 }
9435 
btrfs_rename(struct user_namespace * mnt_userns,struct inode * old_dir,struct dentry * old_dentry,struct inode * new_dir,struct dentry * new_dentry,unsigned int flags)9436 static int btrfs_rename(struct user_namespace *mnt_userns,
9437 			struct inode *old_dir, struct dentry *old_dentry,
9438 			struct inode *new_dir, struct dentry *new_dentry,
9439 			unsigned int flags)
9440 {
9441 	struct btrfs_fs_info *fs_info = btrfs_sb(old_dir->i_sb);
9442 	struct btrfs_new_inode_args whiteout_args = {
9443 		.dir = old_dir,
9444 		.dentry = old_dentry,
9445 	};
9446 	struct btrfs_trans_handle *trans;
9447 	unsigned int trans_num_items;
9448 	struct btrfs_root *root = BTRFS_I(old_dir)->root;
9449 	struct btrfs_root *dest = BTRFS_I(new_dir)->root;
9450 	struct inode *new_inode = d_inode(new_dentry);
9451 	struct inode *old_inode = d_inode(old_dentry);
9452 	struct btrfs_rename_ctx rename_ctx;
9453 	u64 index = 0;
9454 	int ret;
9455 	int ret2;
9456 	u64 old_ino = btrfs_ino(BTRFS_I(old_inode));
9457 
9458 	if (btrfs_ino(BTRFS_I(new_dir)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
9459 		return -EPERM;
9460 
9461 	/* we only allow rename subvolume link between subvolumes */
9462 	if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
9463 		return -EXDEV;
9464 
9465 	if (old_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID ||
9466 	    (new_inode && btrfs_ino(BTRFS_I(new_inode)) == BTRFS_FIRST_FREE_OBJECTID))
9467 		return -ENOTEMPTY;
9468 
9469 	if (S_ISDIR(old_inode->i_mode) && new_inode &&
9470 	    new_inode->i_size > BTRFS_EMPTY_DIR_SIZE)
9471 		return -ENOTEMPTY;
9472 
9473 
9474 	/* check for collisions, even if the  name isn't there */
9475 	ret = btrfs_check_dir_item_collision(dest, new_dir->i_ino,
9476 			     new_dentry->d_name.name,
9477 			     new_dentry->d_name.len);
9478 
9479 	if (ret) {
9480 		if (ret == -EEXIST) {
9481 			/* we shouldn't get
9482 			 * eexist without a new_inode */
9483 			if (WARN_ON(!new_inode)) {
9484 				return ret;
9485 			}
9486 		} else {
9487 			/* maybe -EOVERFLOW */
9488 			return ret;
9489 		}
9490 	}
9491 	ret = 0;
9492 
9493 	/*
9494 	 * we're using rename to replace one file with another.  Start IO on it
9495 	 * now so  we don't add too much work to the end of the transaction
9496 	 */
9497 	if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size)
9498 		filemap_flush(old_inode->i_mapping);
9499 
9500 	if (flags & RENAME_WHITEOUT) {
9501 		whiteout_args.inode = new_whiteout_inode(mnt_userns, old_dir);
9502 		if (!whiteout_args.inode)
9503 			return -ENOMEM;
9504 		ret = btrfs_new_inode_prepare(&whiteout_args, &trans_num_items);
9505 		if (ret)
9506 			goto out_whiteout_inode;
9507 	} else {
9508 		/* 1 to update the old parent inode. */
9509 		trans_num_items = 1;
9510 	}
9511 
9512 	if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
9513 		/* Close the race window with snapshot create/destroy ioctl */
9514 		down_read(&fs_info->subvol_sem);
9515 		/*
9516 		 * 1 to remove old root ref
9517 		 * 1 to remove old root backref
9518 		 * 1 to add new root ref
9519 		 * 1 to add new root backref
9520 		 */
9521 		trans_num_items += 4;
9522 	} else {
9523 		/*
9524 		 * 1 to update inode
9525 		 * 1 to remove old inode ref
9526 		 * 1 to add new inode ref
9527 		 */
9528 		trans_num_items += 3;
9529 	}
9530 	/*
9531 	 * 1 to remove old dir item
9532 	 * 1 to remove old dir index
9533 	 * 1 to add new dir item
9534 	 * 1 to add new dir index
9535 	 */
9536 	trans_num_items += 4;
9537 	/* 1 to update new parent inode if it's not the same as the old parent */
9538 	if (new_dir != old_dir)
9539 		trans_num_items++;
9540 	if (new_inode) {
9541 		/*
9542 		 * 1 to update inode
9543 		 * 1 to remove inode ref
9544 		 * 1 to remove dir item
9545 		 * 1 to remove dir index
9546 		 * 1 to possibly add orphan item
9547 		 */
9548 		trans_num_items += 5;
9549 	}
9550 	trans = btrfs_start_transaction(root, trans_num_items);
9551 	if (IS_ERR(trans)) {
9552 		ret = PTR_ERR(trans);
9553 		goto out_notrans;
9554 	}
9555 
9556 	if (dest != root) {
9557 		ret = btrfs_record_root_in_trans(trans, dest);
9558 		if (ret)
9559 			goto out_fail;
9560 	}
9561 
9562 	ret = btrfs_set_inode_index(BTRFS_I(new_dir), &index);
9563 	if (ret)
9564 		goto out_fail;
9565 
9566 	BTRFS_I(old_inode)->dir_index = 0ULL;
9567 	if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
9568 		/* force full log commit if subvolume involved. */
9569 		btrfs_set_log_full_commit(trans);
9570 	} else {
9571 		ret = btrfs_insert_inode_ref(trans, dest,
9572 					     new_dentry->d_name.name,
9573 					     new_dentry->d_name.len,
9574 					     old_ino,
9575 					     btrfs_ino(BTRFS_I(new_dir)), index);
9576 		if (ret)
9577 			goto out_fail;
9578 	}
9579 
9580 	inode_inc_iversion(old_dir);
9581 	inode_inc_iversion(new_dir);
9582 	inode_inc_iversion(old_inode);
9583 	old_dir->i_ctime = old_dir->i_mtime =
9584 	new_dir->i_ctime = new_dir->i_mtime =
9585 	old_inode->i_ctime = current_time(old_dir);
9586 
9587 	if (old_dentry->d_parent != new_dentry->d_parent)
9588 		btrfs_record_unlink_dir(trans, BTRFS_I(old_dir),
9589 				BTRFS_I(old_inode), 1);
9590 
9591 	if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
9592 		ret = btrfs_unlink_subvol(trans, old_dir, old_dentry);
9593 	} else {
9594 		ret = __btrfs_unlink_inode(trans, BTRFS_I(old_dir),
9595 					BTRFS_I(d_inode(old_dentry)),
9596 					old_dentry->d_name.name,
9597 					old_dentry->d_name.len,
9598 					&rename_ctx);
9599 		if (!ret)
9600 			ret = btrfs_update_inode(trans, root, BTRFS_I(old_inode));
9601 	}
9602 	if (ret) {
9603 		btrfs_abort_transaction(trans, ret);
9604 		goto out_fail;
9605 	}
9606 
9607 	if (new_inode) {
9608 		inode_inc_iversion(new_inode);
9609 		new_inode->i_ctime = current_time(new_inode);
9610 		if (unlikely(btrfs_ino(BTRFS_I(new_inode)) ==
9611 			     BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
9612 			ret = btrfs_unlink_subvol(trans, new_dir, new_dentry);
9613 			BUG_ON(new_inode->i_nlink == 0);
9614 		} else {
9615 			ret = btrfs_unlink_inode(trans, BTRFS_I(new_dir),
9616 						 BTRFS_I(d_inode(new_dentry)),
9617 						 new_dentry->d_name.name,
9618 						 new_dentry->d_name.len);
9619 		}
9620 		if (!ret && new_inode->i_nlink == 0)
9621 			ret = btrfs_orphan_add(trans,
9622 					BTRFS_I(d_inode(new_dentry)));
9623 		if (ret) {
9624 			btrfs_abort_transaction(trans, ret);
9625 			goto out_fail;
9626 		}
9627 	}
9628 
9629 	ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode),
9630 			     new_dentry->d_name.name,
9631 			     new_dentry->d_name.len, 0, index);
9632 	if (ret) {
9633 		btrfs_abort_transaction(trans, ret);
9634 		goto out_fail;
9635 	}
9636 
9637 	if (old_inode->i_nlink == 1)
9638 		BTRFS_I(old_inode)->dir_index = index;
9639 
9640 	if (old_ino != BTRFS_FIRST_FREE_OBJECTID)
9641 		btrfs_log_new_name(trans, old_dentry, BTRFS_I(old_dir),
9642 				   rename_ctx.index, new_dentry->d_parent);
9643 
9644 	if (flags & RENAME_WHITEOUT) {
9645 		ret = btrfs_create_new_inode(trans, &whiteout_args);
9646 		if (ret) {
9647 			btrfs_abort_transaction(trans, ret);
9648 			goto out_fail;
9649 		} else {
9650 			unlock_new_inode(whiteout_args.inode);
9651 			iput(whiteout_args.inode);
9652 			whiteout_args.inode = NULL;
9653 		}
9654 	}
9655 out_fail:
9656 	ret2 = btrfs_end_transaction(trans);
9657 	ret = ret ? ret : ret2;
9658 out_notrans:
9659 	if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
9660 		up_read(&fs_info->subvol_sem);
9661 	if (flags & RENAME_WHITEOUT)
9662 		btrfs_new_inode_args_destroy(&whiteout_args);
9663 out_whiteout_inode:
9664 	if (flags & RENAME_WHITEOUT)
9665 		iput(whiteout_args.inode);
9666 	return ret;
9667 }
9668 
btrfs_rename2(struct user_namespace * mnt_userns,struct inode * old_dir,struct dentry * old_dentry,struct inode * new_dir,struct dentry * new_dentry,unsigned int flags)9669 static int btrfs_rename2(struct user_namespace *mnt_userns, struct inode *old_dir,
9670 			 struct dentry *old_dentry, struct inode *new_dir,
9671 			 struct dentry *new_dentry, unsigned int flags)
9672 {
9673 	if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
9674 		return -EINVAL;
9675 
9676 	if (flags & RENAME_EXCHANGE)
9677 		return btrfs_rename_exchange(old_dir, old_dentry, new_dir,
9678 					  new_dentry);
9679 
9680 	return btrfs_rename(mnt_userns, old_dir, old_dentry, new_dir,
9681 			    new_dentry, flags);
9682 }
9683 
9684 struct btrfs_delalloc_work {
9685 	struct inode *inode;
9686 	struct completion completion;
9687 	struct list_head list;
9688 	struct btrfs_work work;
9689 };
9690 
btrfs_run_delalloc_work(struct btrfs_work * work)9691 static void btrfs_run_delalloc_work(struct btrfs_work *work)
9692 {
9693 	struct btrfs_delalloc_work *delalloc_work;
9694 	struct inode *inode;
9695 
9696 	delalloc_work = container_of(work, struct btrfs_delalloc_work,
9697 				     work);
9698 	inode = delalloc_work->inode;
9699 	filemap_flush(inode->i_mapping);
9700 	if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
9701 				&BTRFS_I(inode)->runtime_flags))
9702 		filemap_flush(inode->i_mapping);
9703 
9704 	iput(inode);
9705 	complete(&delalloc_work->completion);
9706 }
9707 
btrfs_alloc_delalloc_work(struct inode * inode)9708 static struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode)
9709 {
9710 	struct btrfs_delalloc_work *work;
9711 
9712 	work = kmalloc(sizeof(*work), GFP_NOFS);
9713 	if (!work)
9714 		return NULL;
9715 
9716 	init_completion(&work->completion);
9717 	INIT_LIST_HEAD(&work->list);
9718 	work->inode = inode;
9719 	btrfs_init_work(&work->work, btrfs_run_delalloc_work, NULL, NULL);
9720 
9721 	return work;
9722 }
9723 
9724 /*
9725  * some fairly slow code that needs optimization. This walks the list
9726  * of all the inodes with pending delalloc and forces them to disk.
9727  */
start_delalloc_inodes(struct btrfs_root * root,struct writeback_control * wbc,bool snapshot,bool in_reclaim_context)9728 static int start_delalloc_inodes(struct btrfs_root *root,
9729 				 struct writeback_control *wbc, bool snapshot,
9730 				 bool in_reclaim_context)
9731 {
9732 	struct btrfs_inode *binode;
9733 	struct inode *inode;
9734 	struct btrfs_delalloc_work *work, *next;
9735 	struct list_head works;
9736 	struct list_head splice;
9737 	int ret = 0;
9738 	bool full_flush = wbc->nr_to_write == LONG_MAX;
9739 
9740 	INIT_LIST_HEAD(&works);
9741 	INIT_LIST_HEAD(&splice);
9742 
9743 	mutex_lock(&root->delalloc_mutex);
9744 	spin_lock(&root->delalloc_lock);
9745 	list_splice_init(&root->delalloc_inodes, &splice);
9746 	while (!list_empty(&splice)) {
9747 		binode = list_entry(splice.next, struct btrfs_inode,
9748 				    delalloc_inodes);
9749 
9750 		list_move_tail(&binode->delalloc_inodes,
9751 			       &root->delalloc_inodes);
9752 
9753 		if (in_reclaim_context &&
9754 		    test_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &binode->runtime_flags))
9755 			continue;
9756 
9757 		inode = igrab(&binode->vfs_inode);
9758 		if (!inode) {
9759 			cond_resched_lock(&root->delalloc_lock);
9760 			continue;
9761 		}
9762 		spin_unlock(&root->delalloc_lock);
9763 
9764 		if (snapshot)
9765 			set_bit(BTRFS_INODE_SNAPSHOT_FLUSH,
9766 				&binode->runtime_flags);
9767 		if (full_flush) {
9768 			work = btrfs_alloc_delalloc_work(inode);
9769 			if (!work) {
9770 				iput(inode);
9771 				ret = -ENOMEM;
9772 				goto out;
9773 			}
9774 			list_add_tail(&work->list, &works);
9775 			btrfs_queue_work(root->fs_info->flush_workers,
9776 					 &work->work);
9777 		} else {
9778 			ret = filemap_fdatawrite_wbc(inode->i_mapping, wbc);
9779 			btrfs_add_delayed_iput(inode);
9780 			if (ret || wbc->nr_to_write <= 0)
9781 				goto out;
9782 		}
9783 		cond_resched();
9784 		spin_lock(&root->delalloc_lock);
9785 	}
9786 	spin_unlock(&root->delalloc_lock);
9787 
9788 out:
9789 	list_for_each_entry_safe(work, next, &works, list) {
9790 		list_del_init(&work->list);
9791 		wait_for_completion(&work->completion);
9792 		kfree(work);
9793 	}
9794 
9795 	if (!list_empty(&splice)) {
9796 		spin_lock(&root->delalloc_lock);
9797 		list_splice_tail(&splice, &root->delalloc_inodes);
9798 		spin_unlock(&root->delalloc_lock);
9799 	}
9800 	mutex_unlock(&root->delalloc_mutex);
9801 	return ret;
9802 }
9803 
btrfs_start_delalloc_snapshot(struct btrfs_root * root,bool in_reclaim_context)9804 int btrfs_start_delalloc_snapshot(struct btrfs_root *root, bool in_reclaim_context)
9805 {
9806 	struct writeback_control wbc = {
9807 		.nr_to_write = LONG_MAX,
9808 		.sync_mode = WB_SYNC_NONE,
9809 		.range_start = 0,
9810 		.range_end = LLONG_MAX,
9811 	};
9812 	struct btrfs_fs_info *fs_info = root->fs_info;
9813 
9814 	if (BTRFS_FS_ERROR(fs_info))
9815 		return -EROFS;
9816 
9817 	return start_delalloc_inodes(root, &wbc, true, in_reclaim_context);
9818 }
9819 
btrfs_start_delalloc_roots(struct btrfs_fs_info * fs_info,long nr,bool in_reclaim_context)9820 int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, long nr,
9821 			       bool in_reclaim_context)
9822 {
9823 	struct writeback_control wbc = {
9824 		.nr_to_write = nr,
9825 		.sync_mode = WB_SYNC_NONE,
9826 		.range_start = 0,
9827 		.range_end = LLONG_MAX,
9828 	};
9829 	struct btrfs_root *root;
9830 	struct list_head splice;
9831 	int ret;
9832 
9833 	if (BTRFS_FS_ERROR(fs_info))
9834 		return -EROFS;
9835 
9836 	INIT_LIST_HEAD(&splice);
9837 
9838 	mutex_lock(&fs_info->delalloc_root_mutex);
9839 	spin_lock(&fs_info->delalloc_root_lock);
9840 	list_splice_init(&fs_info->delalloc_roots, &splice);
9841 	while (!list_empty(&splice)) {
9842 		/*
9843 		 * Reset nr_to_write here so we know that we're doing a full
9844 		 * flush.
9845 		 */
9846 		if (nr == LONG_MAX)
9847 			wbc.nr_to_write = LONG_MAX;
9848 
9849 		root = list_first_entry(&splice, struct btrfs_root,
9850 					delalloc_root);
9851 		root = btrfs_grab_root(root);
9852 		BUG_ON(!root);
9853 		list_move_tail(&root->delalloc_root,
9854 			       &fs_info->delalloc_roots);
9855 		spin_unlock(&fs_info->delalloc_root_lock);
9856 
9857 		ret = start_delalloc_inodes(root, &wbc, false, in_reclaim_context);
9858 		btrfs_put_root(root);
9859 		if (ret < 0 || wbc.nr_to_write <= 0)
9860 			goto out;
9861 		spin_lock(&fs_info->delalloc_root_lock);
9862 	}
9863 	spin_unlock(&fs_info->delalloc_root_lock);
9864 
9865 	ret = 0;
9866 out:
9867 	if (!list_empty(&splice)) {
9868 		spin_lock(&fs_info->delalloc_root_lock);
9869 		list_splice_tail(&splice, &fs_info->delalloc_roots);
9870 		spin_unlock(&fs_info->delalloc_root_lock);
9871 	}
9872 	mutex_unlock(&fs_info->delalloc_root_mutex);
9873 	return ret;
9874 }
9875 
btrfs_symlink(struct user_namespace * mnt_userns,struct inode * dir,struct dentry * dentry,const char * symname)9876 static int btrfs_symlink(struct user_namespace *mnt_userns, struct inode *dir,
9877 			 struct dentry *dentry, const char *symname)
9878 {
9879 	struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
9880 	struct btrfs_trans_handle *trans;
9881 	struct btrfs_root *root = BTRFS_I(dir)->root;
9882 	struct btrfs_path *path;
9883 	struct btrfs_key key;
9884 	struct inode *inode;
9885 	struct btrfs_new_inode_args new_inode_args = {
9886 		.dir = dir,
9887 		.dentry = dentry,
9888 	};
9889 	unsigned int trans_num_items;
9890 	int err;
9891 	int name_len;
9892 	int datasize;
9893 	unsigned long ptr;
9894 	struct btrfs_file_extent_item *ei;
9895 	struct extent_buffer *leaf;
9896 
9897 	name_len = strlen(symname);
9898 	if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info))
9899 		return -ENAMETOOLONG;
9900 
9901 	inode = new_inode(dir->i_sb);
9902 	if (!inode)
9903 		return -ENOMEM;
9904 	inode_init_owner(mnt_userns, inode, dir, S_IFLNK | S_IRWXUGO);
9905 	inode->i_op = &btrfs_symlink_inode_operations;
9906 	inode_nohighmem(inode);
9907 	inode->i_mapping->a_ops = &btrfs_aops;
9908 	btrfs_i_size_write(BTRFS_I(inode), name_len);
9909 	inode_set_bytes(inode, name_len);
9910 
9911 	new_inode_args.inode = inode;
9912 	err = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items);
9913 	if (err)
9914 		goto out_inode;
9915 	/* 1 additional item for the inline extent */
9916 	trans_num_items++;
9917 
9918 	trans = btrfs_start_transaction(root, trans_num_items);
9919 	if (IS_ERR(trans)) {
9920 		err = PTR_ERR(trans);
9921 		goto out_new_inode_args;
9922 	}
9923 
9924 	err = btrfs_create_new_inode(trans, &new_inode_args);
9925 	if (err)
9926 		goto out;
9927 
9928 	path = btrfs_alloc_path();
9929 	if (!path) {
9930 		err = -ENOMEM;
9931 		btrfs_abort_transaction(trans, err);
9932 		discard_new_inode(inode);
9933 		inode = NULL;
9934 		goto out;
9935 	}
9936 	key.objectid = btrfs_ino(BTRFS_I(inode));
9937 	key.offset = 0;
9938 	key.type = BTRFS_EXTENT_DATA_KEY;
9939 	datasize = btrfs_file_extent_calc_inline_size(name_len);
9940 	err = btrfs_insert_empty_item(trans, root, path, &key,
9941 				      datasize);
9942 	if (err) {
9943 		btrfs_abort_transaction(trans, err);
9944 		btrfs_free_path(path);
9945 		discard_new_inode(inode);
9946 		inode = NULL;
9947 		goto out;
9948 	}
9949 	leaf = path->nodes[0];
9950 	ei = btrfs_item_ptr(leaf, path->slots[0],
9951 			    struct btrfs_file_extent_item);
9952 	btrfs_set_file_extent_generation(leaf, ei, trans->transid);
9953 	btrfs_set_file_extent_type(leaf, ei,
9954 				   BTRFS_FILE_EXTENT_INLINE);
9955 	btrfs_set_file_extent_encryption(leaf, ei, 0);
9956 	btrfs_set_file_extent_compression(leaf, ei, 0);
9957 	btrfs_set_file_extent_other_encoding(leaf, ei, 0);
9958 	btrfs_set_file_extent_ram_bytes(leaf, ei, name_len);
9959 
9960 	ptr = btrfs_file_extent_inline_start(ei);
9961 	write_extent_buffer(leaf, symname, ptr, name_len);
9962 	btrfs_mark_buffer_dirty(leaf);
9963 	btrfs_free_path(path);
9964 
9965 	d_instantiate_new(dentry, inode);
9966 	err = 0;
9967 out:
9968 	btrfs_end_transaction(trans);
9969 	btrfs_btree_balance_dirty(fs_info);
9970 out_new_inode_args:
9971 	btrfs_new_inode_args_destroy(&new_inode_args);
9972 out_inode:
9973 	if (err)
9974 		iput(inode);
9975 	return err;
9976 }
9977 
insert_prealloc_file_extent(struct btrfs_trans_handle * trans_in,struct btrfs_inode * inode,struct btrfs_key * ins,u64 file_offset)9978 static struct btrfs_trans_handle *insert_prealloc_file_extent(
9979 				       struct btrfs_trans_handle *trans_in,
9980 				       struct btrfs_inode *inode,
9981 				       struct btrfs_key *ins,
9982 				       u64 file_offset)
9983 {
9984 	struct btrfs_file_extent_item stack_fi;
9985 	struct btrfs_replace_extent_info extent_info;
9986 	struct btrfs_trans_handle *trans = trans_in;
9987 	struct btrfs_path *path;
9988 	u64 start = ins->objectid;
9989 	u64 len = ins->offset;
9990 	int qgroup_released;
9991 	int ret;
9992 
9993 	memset(&stack_fi, 0, sizeof(stack_fi));
9994 
9995 	btrfs_set_stack_file_extent_type(&stack_fi, BTRFS_FILE_EXTENT_PREALLOC);
9996 	btrfs_set_stack_file_extent_disk_bytenr(&stack_fi, start);
9997 	btrfs_set_stack_file_extent_disk_num_bytes(&stack_fi, len);
9998 	btrfs_set_stack_file_extent_num_bytes(&stack_fi, len);
9999 	btrfs_set_stack_file_extent_ram_bytes(&stack_fi, len);
10000 	btrfs_set_stack_file_extent_compression(&stack_fi, BTRFS_COMPRESS_NONE);
10001 	/* Encryption and other encoding is reserved and all 0 */
10002 
10003 	qgroup_released = btrfs_qgroup_release_data(inode, file_offset, len);
10004 	if (qgroup_released < 0)
10005 		return ERR_PTR(qgroup_released);
10006 
10007 	if (trans) {
10008 		ret = insert_reserved_file_extent(trans, inode,
10009 						  file_offset, &stack_fi,
10010 						  true, qgroup_released);
10011 		if (ret)
10012 			goto free_qgroup;
10013 		return trans;
10014 	}
10015 
10016 	extent_info.disk_offset = start;
10017 	extent_info.disk_len = len;
10018 	extent_info.data_offset = 0;
10019 	extent_info.data_len = len;
10020 	extent_info.file_offset = file_offset;
10021 	extent_info.extent_buf = (char *)&stack_fi;
10022 	extent_info.is_new_extent = true;
10023 	extent_info.update_times = true;
10024 	extent_info.qgroup_reserved = qgroup_released;
10025 	extent_info.insertions = 0;
10026 
10027 	path = btrfs_alloc_path();
10028 	if (!path) {
10029 		ret = -ENOMEM;
10030 		goto free_qgroup;
10031 	}
10032 
10033 	ret = btrfs_replace_file_extents(inode, path, file_offset,
10034 				     file_offset + len - 1, &extent_info,
10035 				     &trans);
10036 	btrfs_free_path(path);
10037 	if (ret)
10038 		goto free_qgroup;
10039 	return trans;
10040 
10041 free_qgroup:
10042 	/*
10043 	 * We have released qgroup data range at the beginning of the function,
10044 	 * and normally qgroup_released bytes will be freed when committing
10045 	 * transaction.
10046 	 * But if we error out early, we have to free what we have released
10047 	 * or we leak qgroup data reservation.
10048 	 */
10049 	btrfs_qgroup_free_refroot(inode->root->fs_info,
10050 			inode->root->root_key.objectid, qgroup_released,
10051 			BTRFS_QGROUP_RSV_DATA);
10052 	return ERR_PTR(ret);
10053 }
10054 
__btrfs_prealloc_file_range(struct inode * inode,int mode,u64 start,u64 num_bytes,u64 min_size,loff_t actual_len,u64 * alloc_hint,struct btrfs_trans_handle * trans)10055 static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
10056 				       u64 start, u64 num_bytes, u64 min_size,
10057 				       loff_t actual_len, u64 *alloc_hint,
10058 				       struct btrfs_trans_handle *trans)
10059 {
10060 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
10061 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
10062 	struct extent_map *em;
10063 	struct btrfs_root *root = BTRFS_I(inode)->root;
10064 	struct btrfs_key ins;
10065 	u64 cur_offset = start;
10066 	u64 clear_offset = start;
10067 	u64 i_size;
10068 	u64 cur_bytes;
10069 	u64 last_alloc = (u64)-1;
10070 	int ret = 0;
10071 	bool own_trans = true;
10072 	u64 end = start + num_bytes - 1;
10073 
10074 	if (trans)
10075 		own_trans = false;
10076 	while (num_bytes > 0) {
10077 		cur_bytes = min_t(u64, num_bytes, SZ_256M);
10078 		cur_bytes = max(cur_bytes, min_size);
10079 		/*
10080 		 * If we are severely fragmented we could end up with really
10081 		 * small allocations, so if the allocator is returning small
10082 		 * chunks lets make its job easier by only searching for those
10083 		 * sized chunks.
10084 		 */
10085 		cur_bytes = min(cur_bytes, last_alloc);
10086 		ret = btrfs_reserve_extent(root, cur_bytes, cur_bytes,
10087 				min_size, 0, *alloc_hint, &ins, 1, 0);
10088 		if (ret)
10089 			break;
10090 
10091 		/*
10092 		 * We've reserved this space, and thus converted it from
10093 		 * ->bytes_may_use to ->bytes_reserved.  Any error that happens
10094 		 * from here on out we will only need to clear our reservation
10095 		 * for the remaining unreserved area, so advance our
10096 		 * clear_offset by our extent size.
10097 		 */
10098 		clear_offset += ins.offset;
10099 
10100 		last_alloc = ins.offset;
10101 		trans = insert_prealloc_file_extent(trans, BTRFS_I(inode),
10102 						    &ins, cur_offset);
10103 		/*
10104 		 * Now that we inserted the prealloc extent we can finally
10105 		 * decrement the number of reservations in the block group.
10106 		 * If we did it before, we could race with relocation and have
10107 		 * relocation miss the reserved extent, making it fail later.
10108 		 */
10109 		btrfs_dec_block_group_reservations(fs_info, ins.objectid);
10110 		if (IS_ERR(trans)) {
10111 			ret = PTR_ERR(trans);
10112 			btrfs_free_reserved_extent(fs_info, ins.objectid,
10113 						   ins.offset, 0);
10114 			break;
10115 		}
10116 
10117 		btrfs_drop_extent_cache(BTRFS_I(inode), cur_offset,
10118 					cur_offset + ins.offset -1, 0);
10119 
10120 		em = alloc_extent_map();
10121 		if (!em) {
10122 			btrfs_set_inode_full_sync(BTRFS_I(inode));
10123 			goto next;
10124 		}
10125 
10126 		em->start = cur_offset;
10127 		em->orig_start = cur_offset;
10128 		em->len = ins.offset;
10129 		em->block_start = ins.objectid;
10130 		em->block_len = ins.offset;
10131 		em->orig_block_len = ins.offset;
10132 		em->ram_bytes = ins.offset;
10133 		set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
10134 		em->generation = trans->transid;
10135 
10136 		while (1) {
10137 			write_lock(&em_tree->lock);
10138 			ret = add_extent_mapping(em_tree, em, 1);
10139 			write_unlock(&em_tree->lock);
10140 			if (ret != -EEXIST)
10141 				break;
10142 			btrfs_drop_extent_cache(BTRFS_I(inode), cur_offset,
10143 						cur_offset + ins.offset - 1,
10144 						0);
10145 		}
10146 		free_extent_map(em);
10147 next:
10148 		num_bytes -= ins.offset;
10149 		cur_offset += ins.offset;
10150 		*alloc_hint = ins.objectid + ins.offset;
10151 
10152 		inode_inc_iversion(inode);
10153 		inode->i_ctime = current_time(inode);
10154 		BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
10155 		if (!(mode & FALLOC_FL_KEEP_SIZE) &&
10156 		    (actual_len > inode->i_size) &&
10157 		    (cur_offset > inode->i_size)) {
10158 			if (cur_offset > actual_len)
10159 				i_size = actual_len;
10160 			else
10161 				i_size = cur_offset;
10162 			i_size_write(inode, i_size);
10163 			btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0);
10164 		}
10165 
10166 		ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
10167 
10168 		if (ret) {
10169 			btrfs_abort_transaction(trans, ret);
10170 			if (own_trans)
10171 				btrfs_end_transaction(trans);
10172 			break;
10173 		}
10174 
10175 		if (own_trans) {
10176 			btrfs_end_transaction(trans);
10177 			trans = NULL;
10178 		}
10179 	}
10180 	if (clear_offset < end)
10181 		btrfs_free_reserved_data_space(BTRFS_I(inode), NULL, clear_offset,
10182 			end - clear_offset + 1);
10183 	return ret;
10184 }
10185 
btrfs_prealloc_file_range(struct inode * inode,int mode,u64 start,u64 num_bytes,u64 min_size,loff_t actual_len,u64 * alloc_hint)10186 int btrfs_prealloc_file_range(struct inode *inode, int mode,
10187 			      u64 start, u64 num_bytes, u64 min_size,
10188 			      loff_t actual_len, u64 *alloc_hint)
10189 {
10190 	return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
10191 					   min_size, actual_len, alloc_hint,
10192 					   NULL);
10193 }
10194 
btrfs_prealloc_file_range_trans(struct inode * inode,struct btrfs_trans_handle * trans,int mode,u64 start,u64 num_bytes,u64 min_size,loff_t actual_len,u64 * alloc_hint)10195 int btrfs_prealloc_file_range_trans(struct inode *inode,
10196 				    struct btrfs_trans_handle *trans, int mode,
10197 				    u64 start, u64 num_bytes, u64 min_size,
10198 				    loff_t actual_len, u64 *alloc_hint)
10199 {
10200 	return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
10201 					   min_size, actual_len, alloc_hint, trans);
10202 }
10203 
btrfs_permission(struct user_namespace * mnt_userns,struct inode * inode,int mask)10204 static int btrfs_permission(struct user_namespace *mnt_userns,
10205 			    struct inode *inode, int mask)
10206 {
10207 	struct btrfs_root *root = BTRFS_I(inode)->root;
10208 	umode_t mode = inode->i_mode;
10209 
10210 	if (mask & MAY_WRITE &&
10211 	    (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) {
10212 		if (btrfs_root_readonly(root))
10213 			return -EROFS;
10214 		if (BTRFS_I(inode)->flags & BTRFS_INODE_READONLY)
10215 			return -EACCES;
10216 	}
10217 	return generic_permission(mnt_userns, inode, mask);
10218 }
10219 
btrfs_tmpfile(struct user_namespace * mnt_userns,struct inode * dir,struct dentry * dentry,umode_t mode)10220 static int btrfs_tmpfile(struct user_namespace *mnt_userns, struct inode *dir,
10221 			 struct dentry *dentry, umode_t mode)
10222 {
10223 	struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
10224 	struct btrfs_trans_handle *trans;
10225 	struct btrfs_root *root = BTRFS_I(dir)->root;
10226 	struct inode *inode;
10227 	struct btrfs_new_inode_args new_inode_args = {
10228 		.dir = dir,
10229 		.dentry = dentry,
10230 		.orphan = true,
10231 	};
10232 	unsigned int trans_num_items;
10233 	int ret;
10234 
10235 	inode = new_inode(dir->i_sb);
10236 	if (!inode)
10237 		return -ENOMEM;
10238 	inode_init_owner(mnt_userns, inode, dir, mode);
10239 	inode->i_fop = &btrfs_file_operations;
10240 	inode->i_op = &btrfs_file_inode_operations;
10241 	inode->i_mapping->a_ops = &btrfs_aops;
10242 
10243 	new_inode_args.inode = inode;
10244 	ret = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items);
10245 	if (ret)
10246 		goto out_inode;
10247 
10248 	trans = btrfs_start_transaction(root, trans_num_items);
10249 	if (IS_ERR(trans)) {
10250 		ret = PTR_ERR(trans);
10251 		goto out_new_inode_args;
10252 	}
10253 
10254 	ret = btrfs_create_new_inode(trans, &new_inode_args);
10255 
10256 	/*
10257 	 * We set number of links to 0 in btrfs_create_new_inode(), and here we
10258 	 * set it to 1 because d_tmpfile() will issue a warning if the count is
10259 	 * 0, through:
10260 	 *
10261 	 *    d_tmpfile() -> inode_dec_link_count() -> drop_nlink()
10262 	 */
10263 	set_nlink(inode, 1);
10264 
10265 	if (!ret) {
10266 		d_tmpfile(dentry, inode);
10267 		unlock_new_inode(inode);
10268 		mark_inode_dirty(inode);
10269 	}
10270 
10271 	btrfs_end_transaction(trans);
10272 	btrfs_btree_balance_dirty(fs_info);
10273 out_new_inode_args:
10274 	btrfs_new_inode_args_destroy(&new_inode_args);
10275 out_inode:
10276 	if (ret)
10277 		iput(inode);
10278 	return ret;
10279 }
10280 
btrfs_set_range_writeback(struct btrfs_inode * inode,u64 start,u64 end)10281 void btrfs_set_range_writeback(struct btrfs_inode *inode, u64 start, u64 end)
10282 {
10283 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
10284 	unsigned long index = start >> PAGE_SHIFT;
10285 	unsigned long end_index = end >> PAGE_SHIFT;
10286 	struct page *page;
10287 	u32 len;
10288 
10289 	ASSERT(end + 1 - start <= U32_MAX);
10290 	len = end + 1 - start;
10291 	while (index <= end_index) {
10292 		page = find_get_page(inode->vfs_inode.i_mapping, index);
10293 		ASSERT(page); /* Pages should be in the extent_io_tree */
10294 
10295 		btrfs_page_set_writeback(fs_info, page, start, len);
10296 		put_page(page);
10297 		index++;
10298 	}
10299 }
10300 
btrfs_encoded_io_compression_from_extent(struct btrfs_fs_info * fs_info,int compress_type)10301 static int btrfs_encoded_io_compression_from_extent(
10302 				struct btrfs_fs_info *fs_info,
10303 				int compress_type)
10304 {
10305 	switch (compress_type) {
10306 	case BTRFS_COMPRESS_NONE:
10307 		return BTRFS_ENCODED_IO_COMPRESSION_NONE;
10308 	case BTRFS_COMPRESS_ZLIB:
10309 		return BTRFS_ENCODED_IO_COMPRESSION_ZLIB;
10310 	case BTRFS_COMPRESS_LZO:
10311 		/*
10312 		 * The LZO format depends on the sector size. 64K is the maximum
10313 		 * sector size that we support.
10314 		 */
10315 		if (fs_info->sectorsize < SZ_4K || fs_info->sectorsize > SZ_64K)
10316 			return -EINVAL;
10317 		return BTRFS_ENCODED_IO_COMPRESSION_LZO_4K +
10318 		       (fs_info->sectorsize_bits - 12);
10319 	case BTRFS_COMPRESS_ZSTD:
10320 		return BTRFS_ENCODED_IO_COMPRESSION_ZSTD;
10321 	default:
10322 		return -EUCLEAN;
10323 	}
10324 }
10325 
btrfs_encoded_read_inline(struct kiocb * iocb,struct iov_iter * iter,u64 start,u64 lockend,struct extent_state ** cached_state,u64 extent_start,size_t count,struct btrfs_ioctl_encoded_io_args * encoded,bool * unlocked)10326 static ssize_t btrfs_encoded_read_inline(
10327 				struct kiocb *iocb,
10328 				struct iov_iter *iter, u64 start,
10329 				u64 lockend,
10330 				struct extent_state **cached_state,
10331 				u64 extent_start, size_t count,
10332 				struct btrfs_ioctl_encoded_io_args *encoded,
10333 				bool *unlocked)
10334 {
10335 	struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp));
10336 	struct btrfs_root *root = inode->root;
10337 	struct btrfs_fs_info *fs_info = root->fs_info;
10338 	struct extent_io_tree *io_tree = &inode->io_tree;
10339 	struct btrfs_path *path;
10340 	struct extent_buffer *leaf;
10341 	struct btrfs_file_extent_item *item;
10342 	u64 ram_bytes;
10343 	unsigned long ptr;
10344 	void *tmp;
10345 	ssize_t ret;
10346 
10347 	path = btrfs_alloc_path();
10348 	if (!path) {
10349 		ret = -ENOMEM;
10350 		goto out;
10351 	}
10352 	ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(inode),
10353 				       extent_start, 0);
10354 	if (ret) {
10355 		if (ret > 0) {
10356 			/* The extent item disappeared? */
10357 			ret = -EIO;
10358 		}
10359 		goto out;
10360 	}
10361 	leaf = path->nodes[0];
10362 	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
10363 
10364 	ram_bytes = btrfs_file_extent_ram_bytes(leaf, item);
10365 	ptr = btrfs_file_extent_inline_start(item);
10366 
10367 	encoded->len = min_t(u64, extent_start + ram_bytes,
10368 			     inode->vfs_inode.i_size) - iocb->ki_pos;
10369 	ret = btrfs_encoded_io_compression_from_extent(fs_info,
10370 				 btrfs_file_extent_compression(leaf, item));
10371 	if (ret < 0)
10372 		goto out;
10373 	encoded->compression = ret;
10374 	if (encoded->compression) {
10375 		size_t inline_size;
10376 
10377 		inline_size = btrfs_file_extent_inline_item_len(leaf,
10378 								path->slots[0]);
10379 		if (inline_size > count) {
10380 			ret = -ENOBUFS;
10381 			goto out;
10382 		}
10383 		count = inline_size;
10384 		encoded->unencoded_len = ram_bytes;
10385 		encoded->unencoded_offset = iocb->ki_pos - extent_start;
10386 	} else {
10387 		count = min_t(u64, count, encoded->len);
10388 		encoded->len = count;
10389 		encoded->unencoded_len = count;
10390 		ptr += iocb->ki_pos - extent_start;
10391 	}
10392 
10393 	tmp = kmalloc(count, GFP_NOFS);
10394 	if (!tmp) {
10395 		ret = -ENOMEM;
10396 		goto out;
10397 	}
10398 	read_extent_buffer(leaf, tmp, ptr, count);
10399 	btrfs_release_path(path);
10400 	unlock_extent_cached(io_tree, start, lockend, cached_state);
10401 	btrfs_inode_unlock(&inode->vfs_inode, BTRFS_ILOCK_SHARED);
10402 	*unlocked = true;
10403 
10404 	ret = copy_to_iter(tmp, count, iter);
10405 	if (ret != count)
10406 		ret = -EFAULT;
10407 	kfree(tmp);
10408 out:
10409 	btrfs_free_path(path);
10410 	return ret;
10411 }
10412 
10413 struct btrfs_encoded_read_private {
10414 	struct btrfs_inode *inode;
10415 	u64 file_offset;
10416 	wait_queue_head_t wait;
10417 	atomic_t pending;
10418 	blk_status_t status;
10419 	bool skip_csum;
10420 };
10421 
submit_encoded_read_bio(struct btrfs_inode * inode,struct bio * bio,int mirror_num)10422 static blk_status_t submit_encoded_read_bio(struct btrfs_inode *inode,
10423 					    struct bio *bio, int mirror_num)
10424 {
10425 	struct btrfs_encoded_read_private *priv = bio->bi_private;
10426 	struct btrfs_bio *bbio = btrfs_bio(bio);
10427 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
10428 	blk_status_t ret;
10429 
10430 	if (!priv->skip_csum) {
10431 		ret = btrfs_lookup_bio_sums(&inode->vfs_inode, bio, NULL);
10432 		if (ret)
10433 			return ret;
10434 	}
10435 
10436 	ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA);
10437 	if (ret) {
10438 		btrfs_bio_free_csum(bbio);
10439 		return ret;
10440 	}
10441 
10442 	atomic_inc(&priv->pending);
10443 	ret = btrfs_map_bio(fs_info, bio, mirror_num);
10444 	if (ret) {
10445 		atomic_dec(&priv->pending);
10446 		btrfs_bio_free_csum(bbio);
10447 	}
10448 	return ret;
10449 }
10450 
btrfs_encoded_read_verify_csum(struct btrfs_bio * bbio)10451 static blk_status_t btrfs_encoded_read_verify_csum(struct btrfs_bio *bbio)
10452 {
10453 	const bool uptodate = (bbio->bio.bi_status == BLK_STS_OK);
10454 	struct btrfs_encoded_read_private *priv = bbio->bio.bi_private;
10455 	struct btrfs_inode *inode = priv->inode;
10456 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
10457 	u32 sectorsize = fs_info->sectorsize;
10458 	struct bio_vec *bvec;
10459 	struct bvec_iter_all iter_all;
10460 	u64 start = priv->file_offset;
10461 	u32 bio_offset = 0;
10462 
10463 	if (priv->skip_csum || !uptodate)
10464 		return bbio->bio.bi_status;
10465 
10466 	bio_for_each_segment_all(bvec, &bbio->bio, iter_all) {
10467 		unsigned int i, nr_sectors, pgoff;
10468 
10469 		nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info, bvec->bv_len);
10470 		pgoff = bvec->bv_offset;
10471 		for (i = 0; i < nr_sectors; i++) {
10472 			ASSERT(pgoff < PAGE_SIZE);
10473 			if (check_data_csum(&inode->vfs_inode, bbio, bio_offset,
10474 					    bvec->bv_page, pgoff, start))
10475 				return BLK_STS_IOERR;
10476 			start += sectorsize;
10477 			bio_offset += sectorsize;
10478 			pgoff += sectorsize;
10479 		}
10480 	}
10481 	return BLK_STS_OK;
10482 }
10483 
btrfs_encoded_read_endio(struct bio * bio)10484 static void btrfs_encoded_read_endio(struct bio *bio)
10485 {
10486 	struct btrfs_encoded_read_private *priv = bio->bi_private;
10487 	struct btrfs_bio *bbio = btrfs_bio(bio);
10488 	blk_status_t status;
10489 
10490 	status = btrfs_encoded_read_verify_csum(bbio);
10491 	if (status) {
10492 		/*
10493 		 * The memory barrier implied by the atomic_dec_return() here
10494 		 * pairs with the memory barrier implied by the
10495 		 * atomic_dec_return() or io_wait_event() in
10496 		 * btrfs_encoded_read_regular_fill_pages() to ensure that this
10497 		 * write is observed before the load of status in
10498 		 * btrfs_encoded_read_regular_fill_pages().
10499 		 */
10500 		WRITE_ONCE(priv->status, status);
10501 	}
10502 	if (!atomic_dec_return(&priv->pending))
10503 		wake_up(&priv->wait);
10504 	btrfs_bio_free_csum(bbio);
10505 	bio_put(bio);
10506 }
10507 
btrfs_encoded_read_regular_fill_pages(struct btrfs_inode * inode,u64 file_offset,u64 disk_bytenr,u64 disk_io_size,struct page ** pages)10508 static int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode,
10509 						 u64 file_offset,
10510 						 u64 disk_bytenr,
10511 						 u64 disk_io_size,
10512 						 struct page **pages)
10513 {
10514 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
10515 	struct btrfs_encoded_read_private priv = {
10516 		.inode = inode,
10517 		.file_offset = file_offset,
10518 		.pending = ATOMIC_INIT(1),
10519 		.skip_csum = (inode->flags & BTRFS_INODE_NODATASUM),
10520 	};
10521 	unsigned long i = 0;
10522 	u64 cur = 0;
10523 	int ret;
10524 
10525 	init_waitqueue_head(&priv.wait);
10526 	/*
10527 	 * Submit bios for the extent, splitting due to bio or stripe limits as
10528 	 * necessary.
10529 	 */
10530 	while (cur < disk_io_size) {
10531 		struct extent_map *em;
10532 		struct btrfs_io_geometry geom;
10533 		struct bio *bio = NULL;
10534 		u64 remaining;
10535 
10536 		em = btrfs_get_chunk_map(fs_info, disk_bytenr + cur,
10537 					 disk_io_size - cur);
10538 		if (IS_ERR(em)) {
10539 			ret = PTR_ERR(em);
10540 		} else {
10541 			ret = btrfs_get_io_geometry(fs_info, em, BTRFS_MAP_READ,
10542 						    disk_bytenr + cur, &geom);
10543 			free_extent_map(em);
10544 		}
10545 		if (ret) {
10546 			WRITE_ONCE(priv.status, errno_to_blk_status(ret));
10547 			break;
10548 		}
10549 		remaining = min(geom.len, disk_io_size - cur);
10550 		while (bio || remaining) {
10551 			size_t bytes = min_t(u64, remaining, PAGE_SIZE);
10552 
10553 			if (!bio) {
10554 				bio = btrfs_bio_alloc(BIO_MAX_VECS);
10555 				bio->bi_iter.bi_sector =
10556 					(disk_bytenr + cur) >> SECTOR_SHIFT;
10557 				bio->bi_end_io = btrfs_encoded_read_endio;
10558 				bio->bi_private = &priv;
10559 				bio->bi_opf = REQ_OP_READ;
10560 			}
10561 
10562 			if (!bytes ||
10563 			    bio_add_page(bio, pages[i], bytes, 0) < bytes) {
10564 				blk_status_t status;
10565 
10566 				status = submit_encoded_read_bio(inode, bio, 0);
10567 				if (status) {
10568 					WRITE_ONCE(priv.status, status);
10569 					bio_put(bio);
10570 					goto out;
10571 				}
10572 				bio = NULL;
10573 				continue;
10574 			}
10575 
10576 			i++;
10577 			cur += bytes;
10578 			remaining -= bytes;
10579 		}
10580 	}
10581 
10582 out:
10583 	if (atomic_dec_return(&priv.pending))
10584 		io_wait_event(priv.wait, !atomic_read(&priv.pending));
10585 	/* See btrfs_encoded_read_endio() for ordering. */
10586 	return blk_status_to_errno(READ_ONCE(priv.status));
10587 }
10588 
btrfs_encoded_read_regular(struct kiocb * iocb,struct iov_iter * iter,u64 start,u64 lockend,struct extent_state ** cached_state,u64 disk_bytenr,u64 disk_io_size,size_t count,bool compressed,bool * unlocked)10589 static ssize_t btrfs_encoded_read_regular(struct kiocb *iocb,
10590 					  struct iov_iter *iter,
10591 					  u64 start, u64 lockend,
10592 					  struct extent_state **cached_state,
10593 					  u64 disk_bytenr, u64 disk_io_size,
10594 					  size_t count, bool compressed,
10595 					  bool *unlocked)
10596 {
10597 	struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp));
10598 	struct extent_io_tree *io_tree = &inode->io_tree;
10599 	struct page **pages;
10600 	unsigned long nr_pages, i;
10601 	u64 cur;
10602 	size_t page_offset;
10603 	ssize_t ret;
10604 
10605 	nr_pages = DIV_ROUND_UP(disk_io_size, PAGE_SIZE);
10606 	pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
10607 	if (!pages)
10608 		return -ENOMEM;
10609 	ret = btrfs_alloc_page_array(nr_pages, pages);
10610 	if (ret) {
10611 		ret = -ENOMEM;
10612 		goto out;
10613 		}
10614 
10615 	ret = btrfs_encoded_read_regular_fill_pages(inode, start, disk_bytenr,
10616 						    disk_io_size, pages);
10617 	if (ret)
10618 		goto out;
10619 
10620 	unlock_extent_cached(io_tree, start, lockend, cached_state);
10621 	btrfs_inode_unlock(&inode->vfs_inode, BTRFS_ILOCK_SHARED);
10622 	*unlocked = true;
10623 
10624 	if (compressed) {
10625 		i = 0;
10626 		page_offset = 0;
10627 	} else {
10628 		i = (iocb->ki_pos - start) >> PAGE_SHIFT;
10629 		page_offset = (iocb->ki_pos - start) & (PAGE_SIZE - 1);
10630 	}
10631 	cur = 0;
10632 	while (cur < count) {
10633 		size_t bytes = min_t(size_t, count - cur,
10634 				     PAGE_SIZE - page_offset);
10635 
10636 		if (copy_page_to_iter(pages[i], page_offset, bytes,
10637 				      iter) != bytes) {
10638 			ret = -EFAULT;
10639 			goto out;
10640 		}
10641 		i++;
10642 		cur += bytes;
10643 		page_offset = 0;
10644 	}
10645 	ret = count;
10646 out:
10647 	for (i = 0; i < nr_pages; i++) {
10648 		if (pages[i])
10649 			__free_page(pages[i]);
10650 	}
10651 	kfree(pages);
10652 	return ret;
10653 }
10654 
btrfs_encoded_read(struct kiocb * iocb,struct iov_iter * iter,struct btrfs_ioctl_encoded_io_args * encoded)10655 ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter,
10656 			   struct btrfs_ioctl_encoded_io_args *encoded)
10657 {
10658 	struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp));
10659 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
10660 	struct extent_io_tree *io_tree = &inode->io_tree;
10661 	ssize_t ret;
10662 	size_t count = iov_iter_count(iter);
10663 	u64 start, lockend, disk_bytenr, disk_io_size;
10664 	struct extent_state *cached_state = NULL;
10665 	struct extent_map *em;
10666 	bool unlocked = false;
10667 
10668 	file_accessed(iocb->ki_filp);
10669 
10670 	btrfs_inode_lock(&inode->vfs_inode, BTRFS_ILOCK_SHARED);
10671 
10672 	if (iocb->ki_pos >= inode->vfs_inode.i_size) {
10673 		btrfs_inode_unlock(&inode->vfs_inode, BTRFS_ILOCK_SHARED);
10674 		return 0;
10675 	}
10676 	start = ALIGN_DOWN(iocb->ki_pos, fs_info->sectorsize);
10677 	/*
10678 	 * We don't know how long the extent containing iocb->ki_pos is, but if
10679 	 * it's compressed we know that it won't be longer than this.
10680 	 */
10681 	lockend = start + BTRFS_MAX_UNCOMPRESSED - 1;
10682 
10683 	for (;;) {
10684 		struct btrfs_ordered_extent *ordered;
10685 
10686 		ret = btrfs_wait_ordered_range(&inode->vfs_inode, start,
10687 					       lockend - start + 1);
10688 		if (ret)
10689 			goto out_unlock_inode;
10690 		lock_extent_bits(io_tree, start, lockend, &cached_state);
10691 		ordered = btrfs_lookup_ordered_range(inode, start,
10692 						     lockend - start + 1);
10693 		if (!ordered)
10694 			break;
10695 		btrfs_put_ordered_extent(ordered);
10696 		unlock_extent_cached(io_tree, start, lockend, &cached_state);
10697 		cond_resched();
10698 	}
10699 
10700 	em = btrfs_get_extent(inode, NULL, 0, start, lockend - start + 1);
10701 	if (IS_ERR(em)) {
10702 		ret = PTR_ERR(em);
10703 		goto out_unlock_extent;
10704 	}
10705 
10706 	if (em->block_start == EXTENT_MAP_INLINE) {
10707 		u64 extent_start = em->start;
10708 
10709 		/*
10710 		 * For inline extents we get everything we need out of the
10711 		 * extent item.
10712 		 */
10713 		free_extent_map(em);
10714 		em = NULL;
10715 		ret = btrfs_encoded_read_inline(iocb, iter, start, lockend,
10716 						&cached_state, extent_start,
10717 						count, encoded, &unlocked);
10718 		goto out;
10719 	}
10720 
10721 	/*
10722 	 * We only want to return up to EOF even if the extent extends beyond
10723 	 * that.
10724 	 */
10725 	encoded->len = min_t(u64, extent_map_end(em),
10726 			     inode->vfs_inode.i_size) - iocb->ki_pos;
10727 	if (em->block_start == EXTENT_MAP_HOLE ||
10728 	    test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
10729 		disk_bytenr = EXTENT_MAP_HOLE;
10730 		count = min_t(u64, count, encoded->len);
10731 		encoded->len = count;
10732 		encoded->unencoded_len = count;
10733 	} else if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
10734 		disk_bytenr = em->block_start;
10735 		/*
10736 		 * Bail if the buffer isn't large enough to return the whole
10737 		 * compressed extent.
10738 		 */
10739 		if (em->block_len > count) {
10740 			ret = -ENOBUFS;
10741 			goto out_em;
10742 		}
10743 		disk_io_size = count = em->block_len;
10744 		encoded->unencoded_len = em->ram_bytes;
10745 		encoded->unencoded_offset = iocb->ki_pos - em->orig_start;
10746 		ret = btrfs_encoded_io_compression_from_extent(fs_info,
10747 							     em->compress_type);
10748 		if (ret < 0)
10749 			goto out_em;
10750 		encoded->compression = ret;
10751 	} else {
10752 		disk_bytenr = em->block_start + (start - em->start);
10753 		if (encoded->len > count)
10754 			encoded->len = count;
10755 		/*
10756 		 * Don't read beyond what we locked. This also limits the page
10757 		 * allocations that we'll do.
10758 		 */
10759 		disk_io_size = min(lockend + 1, iocb->ki_pos + encoded->len) - start;
10760 		count = start + disk_io_size - iocb->ki_pos;
10761 		encoded->len = count;
10762 		encoded->unencoded_len = count;
10763 		disk_io_size = ALIGN(disk_io_size, fs_info->sectorsize);
10764 	}
10765 	free_extent_map(em);
10766 	em = NULL;
10767 
10768 	if (disk_bytenr == EXTENT_MAP_HOLE) {
10769 		unlock_extent_cached(io_tree, start, lockend, &cached_state);
10770 		btrfs_inode_unlock(&inode->vfs_inode, BTRFS_ILOCK_SHARED);
10771 		unlocked = true;
10772 		ret = iov_iter_zero(count, iter);
10773 		if (ret != count)
10774 			ret = -EFAULT;
10775 	} else {
10776 		ret = btrfs_encoded_read_regular(iocb, iter, start, lockend,
10777 						 &cached_state, disk_bytenr,
10778 						 disk_io_size, count,
10779 						 encoded->compression,
10780 						 &unlocked);
10781 	}
10782 
10783 out:
10784 	if (ret >= 0)
10785 		iocb->ki_pos += encoded->len;
10786 out_em:
10787 	free_extent_map(em);
10788 out_unlock_extent:
10789 	if (!unlocked)
10790 		unlock_extent_cached(io_tree, start, lockend, &cached_state);
10791 out_unlock_inode:
10792 	if (!unlocked)
10793 		btrfs_inode_unlock(&inode->vfs_inode, BTRFS_ILOCK_SHARED);
10794 	return ret;
10795 }
10796 
btrfs_do_encoded_write(struct kiocb * iocb,struct iov_iter * from,const struct btrfs_ioctl_encoded_io_args * encoded)10797 ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
10798 			       const struct btrfs_ioctl_encoded_io_args *encoded)
10799 {
10800 	struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp));
10801 	struct btrfs_root *root = inode->root;
10802 	struct btrfs_fs_info *fs_info = root->fs_info;
10803 	struct extent_io_tree *io_tree = &inode->io_tree;
10804 	struct extent_changeset *data_reserved = NULL;
10805 	struct extent_state *cached_state = NULL;
10806 	int compression;
10807 	size_t orig_count;
10808 	u64 start, end;
10809 	u64 num_bytes, ram_bytes, disk_num_bytes;
10810 	unsigned long nr_pages, i;
10811 	struct page **pages;
10812 	struct btrfs_key ins;
10813 	bool extent_reserved = false;
10814 	struct extent_map *em;
10815 	ssize_t ret;
10816 
10817 	switch (encoded->compression) {
10818 	case BTRFS_ENCODED_IO_COMPRESSION_ZLIB:
10819 		compression = BTRFS_COMPRESS_ZLIB;
10820 		break;
10821 	case BTRFS_ENCODED_IO_COMPRESSION_ZSTD:
10822 		compression = BTRFS_COMPRESS_ZSTD;
10823 		break;
10824 	case BTRFS_ENCODED_IO_COMPRESSION_LZO_4K:
10825 	case BTRFS_ENCODED_IO_COMPRESSION_LZO_8K:
10826 	case BTRFS_ENCODED_IO_COMPRESSION_LZO_16K:
10827 	case BTRFS_ENCODED_IO_COMPRESSION_LZO_32K:
10828 	case BTRFS_ENCODED_IO_COMPRESSION_LZO_64K:
10829 		/* The sector size must match for LZO. */
10830 		if (encoded->compression -
10831 		    BTRFS_ENCODED_IO_COMPRESSION_LZO_4K + 12 !=
10832 		    fs_info->sectorsize_bits)
10833 			return -EINVAL;
10834 		compression = BTRFS_COMPRESS_LZO;
10835 		break;
10836 	default:
10837 		return -EINVAL;
10838 	}
10839 	if (encoded->encryption != BTRFS_ENCODED_IO_ENCRYPTION_NONE)
10840 		return -EINVAL;
10841 
10842 	orig_count = iov_iter_count(from);
10843 
10844 	/* The extent size must be sane. */
10845 	if (encoded->unencoded_len > BTRFS_MAX_UNCOMPRESSED ||
10846 	    orig_count > BTRFS_MAX_COMPRESSED || orig_count == 0)
10847 		return -EINVAL;
10848 
10849 	/*
10850 	 * The compressed data must be smaller than the decompressed data.
10851 	 *
10852 	 * It's of course possible for data to compress to larger or the same
10853 	 * size, but the buffered I/O path falls back to no compression for such
10854 	 * data, and we don't want to break any assumptions by creating these
10855 	 * extents.
10856 	 *
10857 	 * Note that this is less strict than the current check we have that the
10858 	 * compressed data must be at least one sector smaller than the
10859 	 * decompressed data. We only want to enforce the weaker requirement
10860 	 * from old kernels that it is at least one byte smaller.
10861 	 */
10862 	if (orig_count >= encoded->unencoded_len)
10863 		return -EINVAL;
10864 
10865 	/* The extent must start on a sector boundary. */
10866 	start = iocb->ki_pos;
10867 	if (!IS_ALIGNED(start, fs_info->sectorsize))
10868 		return -EINVAL;
10869 
10870 	/*
10871 	 * The extent must end on a sector boundary. However, we allow a write
10872 	 * which ends at or extends i_size to have an unaligned length; we round
10873 	 * up the extent size and set i_size to the unaligned end.
10874 	 */
10875 	if (start + encoded->len < inode->vfs_inode.i_size &&
10876 	    !IS_ALIGNED(start + encoded->len, fs_info->sectorsize))
10877 		return -EINVAL;
10878 
10879 	/* Finally, the offset in the unencoded data must be sector-aligned. */
10880 	if (!IS_ALIGNED(encoded->unencoded_offset, fs_info->sectorsize))
10881 		return -EINVAL;
10882 
10883 	num_bytes = ALIGN(encoded->len, fs_info->sectorsize);
10884 	ram_bytes = ALIGN(encoded->unencoded_len, fs_info->sectorsize);
10885 	end = start + num_bytes - 1;
10886 
10887 	/*
10888 	 * If the extent cannot be inline, the compressed data on disk must be
10889 	 * sector-aligned. For convenience, we extend it with zeroes if it
10890 	 * isn't.
10891 	 */
10892 	disk_num_bytes = ALIGN(orig_count, fs_info->sectorsize);
10893 	nr_pages = DIV_ROUND_UP(disk_num_bytes, PAGE_SIZE);
10894 	pages = kvcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL_ACCOUNT);
10895 	if (!pages)
10896 		return -ENOMEM;
10897 	for (i = 0; i < nr_pages; i++) {
10898 		size_t bytes = min_t(size_t, PAGE_SIZE, iov_iter_count(from));
10899 		char *kaddr;
10900 
10901 		pages[i] = alloc_page(GFP_KERNEL_ACCOUNT);
10902 		if (!pages[i]) {
10903 			ret = -ENOMEM;
10904 			goto out_pages;
10905 		}
10906 		kaddr = kmap(pages[i]);
10907 		if (copy_from_iter(kaddr, bytes, from) != bytes) {
10908 			kunmap(pages[i]);
10909 			ret = -EFAULT;
10910 			goto out_pages;
10911 		}
10912 		if (bytes < PAGE_SIZE)
10913 			memset(kaddr + bytes, 0, PAGE_SIZE - bytes);
10914 		kunmap(pages[i]);
10915 	}
10916 
10917 	for (;;) {
10918 		struct btrfs_ordered_extent *ordered;
10919 
10920 		ret = btrfs_wait_ordered_range(&inode->vfs_inode, start, num_bytes);
10921 		if (ret)
10922 			goto out_pages;
10923 		ret = invalidate_inode_pages2_range(inode->vfs_inode.i_mapping,
10924 						    start >> PAGE_SHIFT,
10925 						    end >> PAGE_SHIFT);
10926 		if (ret)
10927 			goto out_pages;
10928 		lock_extent_bits(io_tree, start, end, &cached_state);
10929 		ordered = btrfs_lookup_ordered_range(inode, start, num_bytes);
10930 		if (!ordered &&
10931 		    !filemap_range_has_page(inode->vfs_inode.i_mapping, start, end))
10932 			break;
10933 		if (ordered)
10934 			btrfs_put_ordered_extent(ordered);
10935 		unlock_extent_cached(io_tree, start, end, &cached_state);
10936 		cond_resched();
10937 	}
10938 
10939 	/*
10940 	 * We don't use the higher-level delalloc space functions because our
10941 	 * num_bytes and disk_num_bytes are different.
10942 	 */
10943 	ret = btrfs_alloc_data_chunk_ondemand(inode, disk_num_bytes);
10944 	if (ret)
10945 		goto out_unlock;
10946 	ret = btrfs_qgroup_reserve_data(inode, &data_reserved, start, num_bytes);
10947 	if (ret)
10948 		goto out_free_data_space;
10949 	ret = btrfs_delalloc_reserve_metadata(inode, num_bytes, disk_num_bytes,
10950 					      false);
10951 	if (ret)
10952 		goto out_qgroup_free_data;
10953 
10954 	/* Try an inline extent first. */
10955 	if (start == 0 && encoded->unencoded_len == encoded->len &&
10956 	    encoded->unencoded_offset == 0) {
10957 		ret = cow_file_range_inline(inode, encoded->len, orig_count,
10958 					    compression, pages, true);
10959 		if (ret <= 0) {
10960 			if (ret == 0)
10961 				ret = orig_count;
10962 			goto out_delalloc_release;
10963 		}
10964 	}
10965 
10966 	ret = btrfs_reserve_extent(root, disk_num_bytes, disk_num_bytes,
10967 				   disk_num_bytes, 0, 0, &ins, 1, 1);
10968 	if (ret)
10969 		goto out_delalloc_release;
10970 	extent_reserved = true;
10971 
10972 	em = create_io_em(inode, start, num_bytes,
10973 			  start - encoded->unencoded_offset, ins.objectid,
10974 			  ins.offset, ins.offset, ram_bytes, compression,
10975 			  BTRFS_ORDERED_COMPRESSED);
10976 	if (IS_ERR(em)) {
10977 		ret = PTR_ERR(em);
10978 		goto out_free_reserved;
10979 	}
10980 	free_extent_map(em);
10981 
10982 	ret = btrfs_add_ordered_extent(inode, start, num_bytes, ram_bytes,
10983 				       ins.objectid, ins.offset,
10984 				       encoded->unencoded_offset,
10985 				       (1 << BTRFS_ORDERED_ENCODED) |
10986 				       (1 << BTRFS_ORDERED_COMPRESSED),
10987 				       compression);
10988 	if (ret) {
10989 		btrfs_drop_extent_cache(inode, start, end, 0);
10990 		goto out_free_reserved;
10991 	}
10992 	btrfs_dec_block_group_reservations(fs_info, ins.objectid);
10993 
10994 	if (start + encoded->len > inode->vfs_inode.i_size)
10995 		i_size_write(&inode->vfs_inode, start + encoded->len);
10996 
10997 	unlock_extent_cached(io_tree, start, end, &cached_state);
10998 
10999 	btrfs_delalloc_release_extents(inode, num_bytes);
11000 
11001 	if (btrfs_submit_compressed_write(inode, start, num_bytes, ins.objectid,
11002 					  ins.offset, pages, nr_pages, 0, NULL,
11003 					  false)) {
11004 		btrfs_writepage_endio_finish_ordered(inode, pages[0], start, end, 0);
11005 		ret = -EIO;
11006 		goto out_pages;
11007 	}
11008 	ret = orig_count;
11009 	goto out;
11010 
11011 out_free_reserved:
11012 	btrfs_dec_block_group_reservations(fs_info, ins.objectid);
11013 	btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1);
11014 out_delalloc_release:
11015 	btrfs_delalloc_release_extents(inode, num_bytes);
11016 	btrfs_delalloc_release_metadata(inode, disk_num_bytes, ret < 0);
11017 out_qgroup_free_data:
11018 	if (ret < 0)
11019 		btrfs_qgroup_free_data(inode, data_reserved, start, num_bytes);
11020 out_free_data_space:
11021 	/*
11022 	 * If btrfs_reserve_extent() succeeded, then we already decremented
11023 	 * bytes_may_use.
11024 	 */
11025 	if (!extent_reserved)
11026 		btrfs_free_reserved_data_space_noquota(fs_info, disk_num_bytes);
11027 out_unlock:
11028 	unlock_extent_cached(io_tree, start, end, &cached_state);
11029 out_pages:
11030 	for (i = 0; i < nr_pages; i++) {
11031 		if (pages[i])
11032 			__free_page(pages[i]);
11033 	}
11034 	kvfree(pages);
11035 out:
11036 	if (ret >= 0)
11037 		iocb->ki_pos += encoded->len;
11038 	return ret;
11039 }
11040 
11041 #ifdef CONFIG_SWAP
11042 /*
11043  * Add an entry indicating a block group or device which is pinned by a
11044  * swapfile. Returns 0 on success, 1 if there is already an entry for it, or a
11045  * negative errno on failure.
11046  */
btrfs_add_swapfile_pin(struct inode * inode,void * ptr,bool is_block_group)11047 static int btrfs_add_swapfile_pin(struct inode *inode, void *ptr,
11048 				  bool is_block_group)
11049 {
11050 	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
11051 	struct btrfs_swapfile_pin *sp, *entry;
11052 	struct rb_node **p;
11053 	struct rb_node *parent = NULL;
11054 
11055 	sp = kmalloc(sizeof(*sp), GFP_NOFS);
11056 	if (!sp)
11057 		return -ENOMEM;
11058 	sp->ptr = ptr;
11059 	sp->inode = inode;
11060 	sp->is_block_group = is_block_group;
11061 	sp->bg_extent_count = 1;
11062 
11063 	spin_lock(&fs_info->swapfile_pins_lock);
11064 	p = &fs_info->swapfile_pins.rb_node;
11065 	while (*p) {
11066 		parent = *p;
11067 		entry = rb_entry(parent, struct btrfs_swapfile_pin, node);
11068 		if (sp->ptr < entry->ptr ||
11069 		    (sp->ptr == entry->ptr && sp->inode < entry->inode)) {
11070 			p = &(*p)->rb_left;
11071 		} else if (sp->ptr > entry->ptr ||
11072 			   (sp->ptr == entry->ptr && sp->inode > entry->inode)) {
11073 			p = &(*p)->rb_right;
11074 		} else {
11075 			if (is_block_group)
11076 				entry->bg_extent_count++;
11077 			spin_unlock(&fs_info->swapfile_pins_lock);
11078 			kfree(sp);
11079 			return 1;
11080 		}
11081 	}
11082 	rb_link_node(&sp->node, parent, p);
11083 	rb_insert_color(&sp->node, &fs_info->swapfile_pins);
11084 	spin_unlock(&fs_info->swapfile_pins_lock);
11085 	return 0;
11086 }
11087 
11088 /* Free all of the entries pinned by this swapfile. */
btrfs_free_swapfile_pins(struct inode * inode)11089 static void btrfs_free_swapfile_pins(struct inode *inode)
11090 {
11091 	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
11092 	struct btrfs_swapfile_pin *sp;
11093 	struct rb_node *node, *next;
11094 
11095 	spin_lock(&fs_info->swapfile_pins_lock);
11096 	node = rb_first(&fs_info->swapfile_pins);
11097 	while (node) {
11098 		next = rb_next(node);
11099 		sp = rb_entry(node, struct btrfs_swapfile_pin, node);
11100 		if (sp->inode == inode) {
11101 			rb_erase(&sp->node, &fs_info->swapfile_pins);
11102 			if (sp->is_block_group) {
11103 				btrfs_dec_block_group_swap_extents(sp->ptr,
11104 							   sp->bg_extent_count);
11105 				btrfs_put_block_group(sp->ptr);
11106 			}
11107 			kfree(sp);
11108 		}
11109 		node = next;
11110 	}
11111 	spin_unlock(&fs_info->swapfile_pins_lock);
11112 }
11113 
11114 struct btrfs_swap_info {
11115 	u64 start;
11116 	u64 block_start;
11117 	u64 block_len;
11118 	u64 lowest_ppage;
11119 	u64 highest_ppage;
11120 	unsigned long nr_pages;
11121 	int nr_extents;
11122 };
11123 
btrfs_add_swap_extent(struct swap_info_struct * sis,struct btrfs_swap_info * bsi)11124 static int btrfs_add_swap_extent(struct swap_info_struct *sis,
11125 				 struct btrfs_swap_info *bsi)
11126 {
11127 	unsigned long nr_pages;
11128 	unsigned long max_pages;
11129 	u64 first_ppage, first_ppage_reported, next_ppage;
11130 	int ret;
11131 
11132 	/*
11133 	 * Our swapfile may have had its size extended after the swap header was
11134 	 * written. In that case activating the swapfile should not go beyond
11135 	 * the max size set in the swap header.
11136 	 */
11137 	if (bsi->nr_pages >= sis->max)
11138 		return 0;
11139 
11140 	max_pages = sis->max - bsi->nr_pages;
11141 	first_ppage = ALIGN(bsi->block_start, PAGE_SIZE) >> PAGE_SHIFT;
11142 	next_ppage = ALIGN_DOWN(bsi->block_start + bsi->block_len,
11143 				PAGE_SIZE) >> PAGE_SHIFT;
11144 
11145 	if (first_ppage >= next_ppage)
11146 		return 0;
11147 	nr_pages = next_ppage - first_ppage;
11148 	nr_pages = min(nr_pages, max_pages);
11149 
11150 	first_ppage_reported = first_ppage;
11151 	if (bsi->start == 0)
11152 		first_ppage_reported++;
11153 	if (bsi->lowest_ppage > first_ppage_reported)
11154 		bsi->lowest_ppage = first_ppage_reported;
11155 	if (bsi->highest_ppage < (next_ppage - 1))
11156 		bsi->highest_ppage = next_ppage - 1;
11157 
11158 	ret = add_swap_extent(sis, bsi->nr_pages, nr_pages, first_ppage);
11159 	if (ret < 0)
11160 		return ret;
11161 	bsi->nr_extents += ret;
11162 	bsi->nr_pages += nr_pages;
11163 	return 0;
11164 }
11165 
btrfs_swap_deactivate(struct file * file)11166 static void btrfs_swap_deactivate(struct file *file)
11167 {
11168 	struct inode *inode = file_inode(file);
11169 
11170 	btrfs_free_swapfile_pins(inode);
11171 	atomic_dec(&BTRFS_I(inode)->root->nr_swapfiles);
11172 }
11173 
btrfs_swap_activate(struct swap_info_struct * sis,struct file * file,sector_t * span)11174 static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
11175 			       sector_t *span)
11176 {
11177 	struct inode *inode = file_inode(file);
11178 	struct btrfs_root *root = BTRFS_I(inode)->root;
11179 	struct btrfs_fs_info *fs_info = root->fs_info;
11180 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
11181 	struct extent_state *cached_state = NULL;
11182 	struct extent_map *em = NULL;
11183 	struct btrfs_device *device = NULL;
11184 	struct btrfs_swap_info bsi = {
11185 		.lowest_ppage = (sector_t)-1ULL,
11186 	};
11187 	int ret = 0;
11188 	u64 isize;
11189 	u64 start;
11190 
11191 	/*
11192 	 * If the swap file was just created, make sure delalloc is done. If the
11193 	 * file changes again after this, the user is doing something stupid and
11194 	 * we don't really care.
11195 	 */
11196 	ret = btrfs_wait_ordered_range(inode, 0, (u64)-1);
11197 	if (ret)
11198 		return ret;
11199 
11200 	/*
11201 	 * The inode is locked, so these flags won't change after we check them.
11202 	 */
11203 	if (BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS) {
11204 		btrfs_warn(fs_info, "swapfile must not be compressed");
11205 		return -EINVAL;
11206 	}
11207 	if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW)) {
11208 		btrfs_warn(fs_info, "swapfile must not be copy-on-write");
11209 		return -EINVAL;
11210 	}
11211 	if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
11212 		btrfs_warn(fs_info, "swapfile must not be checksummed");
11213 		return -EINVAL;
11214 	}
11215 
11216 	/*
11217 	 * Balance or device remove/replace/resize can move stuff around from
11218 	 * under us. The exclop protection makes sure they aren't running/won't
11219 	 * run concurrently while we are mapping the swap extents, and
11220 	 * fs_info->swapfile_pins prevents them from running while the swap
11221 	 * file is active and moving the extents. Note that this also prevents
11222 	 * a concurrent device add which isn't actually necessary, but it's not
11223 	 * really worth the trouble to allow it.
11224 	 */
11225 	if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_SWAP_ACTIVATE)) {
11226 		btrfs_warn(fs_info,
11227 	   "cannot activate swapfile while exclusive operation is running");
11228 		return -EBUSY;
11229 	}
11230 
11231 	/*
11232 	 * Prevent snapshot creation while we are activating the swap file.
11233 	 * We do not want to race with snapshot creation. If snapshot creation
11234 	 * already started before we bumped nr_swapfiles from 0 to 1 and
11235 	 * completes before the first write into the swap file after it is
11236 	 * activated, than that write would fallback to COW.
11237 	 */
11238 	if (!btrfs_drew_try_write_lock(&root->snapshot_lock)) {
11239 		btrfs_exclop_finish(fs_info);
11240 		btrfs_warn(fs_info,
11241 	   "cannot activate swapfile because snapshot creation is in progress");
11242 		return -EINVAL;
11243 	}
11244 	/*
11245 	 * Snapshots can create extents which require COW even if NODATACOW is
11246 	 * set. We use this counter to prevent snapshots. We must increment it
11247 	 * before walking the extents because we don't want a concurrent
11248 	 * snapshot to run after we've already checked the extents.
11249 	 *
11250 	 * It is possible that subvolume is marked for deletion but still not
11251 	 * removed yet. To prevent this race, we check the root status before
11252 	 * activating the swapfile.
11253 	 */
11254 	spin_lock(&root->root_item_lock);
11255 	if (btrfs_root_dead(root)) {
11256 		spin_unlock(&root->root_item_lock);
11257 
11258 		btrfs_exclop_finish(fs_info);
11259 		btrfs_warn(fs_info,
11260 		"cannot activate swapfile because subvolume %llu is being deleted",
11261 			root->root_key.objectid);
11262 		return -EPERM;
11263 	}
11264 	atomic_inc(&root->nr_swapfiles);
11265 	spin_unlock(&root->root_item_lock);
11266 
11267 	isize = ALIGN_DOWN(inode->i_size, fs_info->sectorsize);
11268 
11269 	lock_extent_bits(io_tree, 0, isize - 1, &cached_state);
11270 	start = 0;
11271 	while (start < isize) {
11272 		u64 logical_block_start, physical_block_start;
11273 		struct btrfs_block_group *bg;
11274 		u64 len = isize - start;
11275 
11276 		em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, len);
11277 		if (IS_ERR(em)) {
11278 			ret = PTR_ERR(em);
11279 			goto out;
11280 		}
11281 
11282 		if (em->block_start == EXTENT_MAP_HOLE) {
11283 			btrfs_warn(fs_info, "swapfile must not have holes");
11284 			ret = -EINVAL;
11285 			goto out;
11286 		}
11287 		if (em->block_start == EXTENT_MAP_INLINE) {
11288 			/*
11289 			 * It's unlikely we'll ever actually find ourselves
11290 			 * here, as a file small enough to fit inline won't be
11291 			 * big enough to store more than the swap header, but in
11292 			 * case something changes in the future, let's catch it
11293 			 * here rather than later.
11294 			 */
11295 			btrfs_warn(fs_info, "swapfile must not be inline");
11296 			ret = -EINVAL;
11297 			goto out;
11298 		}
11299 		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
11300 			btrfs_warn(fs_info, "swapfile must not be compressed");
11301 			ret = -EINVAL;
11302 			goto out;
11303 		}
11304 
11305 		logical_block_start = em->block_start + (start - em->start);
11306 		len = min(len, em->len - (start - em->start));
11307 		free_extent_map(em);
11308 		em = NULL;
11309 
11310 		ret = can_nocow_extent(inode, start, &len, NULL, NULL, NULL, true);
11311 		if (ret < 0) {
11312 			goto out;
11313 		} else if (ret) {
11314 			ret = 0;
11315 		} else {
11316 			btrfs_warn(fs_info,
11317 				   "swapfile must not be copy-on-write");
11318 			ret = -EINVAL;
11319 			goto out;
11320 		}
11321 
11322 		em = btrfs_get_chunk_map(fs_info, logical_block_start, len);
11323 		if (IS_ERR(em)) {
11324 			ret = PTR_ERR(em);
11325 			goto out;
11326 		}
11327 
11328 		if (em->map_lookup->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
11329 			btrfs_warn(fs_info,
11330 				   "swapfile must have single data profile");
11331 			ret = -EINVAL;
11332 			goto out;
11333 		}
11334 
11335 		if (device == NULL) {
11336 			device = em->map_lookup->stripes[0].dev;
11337 			ret = btrfs_add_swapfile_pin(inode, device, false);
11338 			if (ret == 1)
11339 				ret = 0;
11340 			else if (ret)
11341 				goto out;
11342 		} else if (device != em->map_lookup->stripes[0].dev) {
11343 			btrfs_warn(fs_info, "swapfile must be on one device");
11344 			ret = -EINVAL;
11345 			goto out;
11346 		}
11347 
11348 		physical_block_start = (em->map_lookup->stripes[0].physical +
11349 					(logical_block_start - em->start));
11350 		len = min(len, em->len - (logical_block_start - em->start));
11351 		free_extent_map(em);
11352 		em = NULL;
11353 
11354 		bg = btrfs_lookup_block_group(fs_info, logical_block_start);
11355 		if (!bg) {
11356 			btrfs_warn(fs_info,
11357 			   "could not find block group containing swapfile");
11358 			ret = -EINVAL;
11359 			goto out;
11360 		}
11361 
11362 		if (!btrfs_inc_block_group_swap_extents(bg)) {
11363 			btrfs_warn(fs_info,
11364 			   "block group for swapfile at %llu is read-only%s",
11365 			   bg->start,
11366 			   atomic_read(&fs_info->scrubs_running) ?
11367 				       " (scrub running)" : "");
11368 			btrfs_put_block_group(bg);
11369 			ret = -EINVAL;
11370 			goto out;
11371 		}
11372 
11373 		ret = btrfs_add_swapfile_pin(inode, bg, true);
11374 		if (ret) {
11375 			btrfs_put_block_group(bg);
11376 			if (ret == 1)
11377 				ret = 0;
11378 			else
11379 				goto out;
11380 		}
11381 
11382 		if (bsi.block_len &&
11383 		    bsi.block_start + bsi.block_len == physical_block_start) {
11384 			bsi.block_len += len;
11385 		} else {
11386 			if (bsi.block_len) {
11387 				ret = btrfs_add_swap_extent(sis, &bsi);
11388 				if (ret)
11389 					goto out;
11390 			}
11391 			bsi.start = start;
11392 			bsi.block_start = physical_block_start;
11393 			bsi.block_len = len;
11394 		}
11395 
11396 		start += len;
11397 	}
11398 
11399 	if (bsi.block_len)
11400 		ret = btrfs_add_swap_extent(sis, &bsi);
11401 
11402 out:
11403 	if (!IS_ERR_OR_NULL(em))
11404 		free_extent_map(em);
11405 
11406 	unlock_extent_cached(io_tree, 0, isize - 1, &cached_state);
11407 
11408 	if (ret)
11409 		btrfs_swap_deactivate(file);
11410 
11411 	btrfs_drew_write_unlock(&root->snapshot_lock);
11412 
11413 	btrfs_exclop_finish(fs_info);
11414 
11415 	if (ret)
11416 		return ret;
11417 
11418 	if (device)
11419 		sis->bdev = device->bdev;
11420 	*span = bsi.highest_ppage - bsi.lowest_ppage + 1;
11421 	sis->max = bsi.nr_pages;
11422 	sis->pages = bsi.nr_pages - 1;
11423 	sis->highest_bit = bsi.nr_pages - 1;
11424 	return bsi.nr_extents;
11425 }
11426 #else
btrfs_swap_deactivate(struct file * file)11427 static void btrfs_swap_deactivate(struct file *file)
11428 {
11429 }
11430 
btrfs_swap_activate(struct swap_info_struct * sis,struct file * file,sector_t * span)11431 static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
11432 			       sector_t *span)
11433 {
11434 	return -EOPNOTSUPP;
11435 }
11436 #endif
11437 
11438 /*
11439  * Update the number of bytes used in the VFS' inode. When we replace extents in
11440  * a range (clone, dedupe, fallocate's zero range), we must update the number of
11441  * bytes used by the inode in an atomic manner, so that concurrent stat(2) calls
11442  * always get a correct value.
11443  */
btrfs_update_inode_bytes(struct btrfs_inode * inode,const u64 add_bytes,const u64 del_bytes)11444 void btrfs_update_inode_bytes(struct btrfs_inode *inode,
11445 			      const u64 add_bytes,
11446 			      const u64 del_bytes)
11447 {
11448 	if (add_bytes == del_bytes)
11449 		return;
11450 
11451 	spin_lock(&inode->lock);
11452 	if (del_bytes > 0)
11453 		inode_sub_bytes(&inode->vfs_inode, del_bytes);
11454 	if (add_bytes > 0)
11455 		inode_add_bytes(&inode->vfs_inode, add_bytes);
11456 	spin_unlock(&inode->lock);
11457 }
11458 
11459 /**
11460  * Verify that there are no ordered extents for a given file range.
11461  *
11462  * @inode:   The target inode.
11463  * @start:   Start offset of the file range, should be sector size aligned.
11464  * @end:     End offset (inclusive) of the file range, its value +1 should be
11465  *           sector size aligned.
11466  *
11467  * This should typically be used for cases where we locked an inode's VFS lock in
11468  * exclusive mode, we have also locked the inode's i_mmap_lock in exclusive mode,
11469  * we have flushed all delalloc in the range, we have waited for all ordered
11470  * extents in the range to complete and finally we have locked the file range in
11471  * the inode's io_tree.
11472  */
btrfs_assert_inode_range_clean(struct btrfs_inode * inode,u64 start,u64 end)11473 void btrfs_assert_inode_range_clean(struct btrfs_inode *inode, u64 start, u64 end)
11474 {
11475 	struct btrfs_root *root = inode->root;
11476 	struct btrfs_ordered_extent *ordered;
11477 
11478 	if (!IS_ENABLED(CONFIG_BTRFS_ASSERT))
11479 		return;
11480 
11481 	ordered = btrfs_lookup_first_ordered_range(inode, start, end + 1 - start);
11482 	if (ordered) {
11483 		btrfs_err(root->fs_info,
11484 "found unexpected ordered extent in file range [%llu, %llu] for inode %llu root %llu (ordered range [%llu, %llu])",
11485 			  start, end, btrfs_ino(inode), root->root_key.objectid,
11486 			  ordered->file_offset,
11487 			  ordered->file_offset + ordered->num_bytes - 1);
11488 		btrfs_put_ordered_extent(ordered);
11489 	}
11490 
11491 	ASSERT(ordered == NULL);
11492 }
11493 
11494 static const struct inode_operations btrfs_dir_inode_operations = {
11495 	.getattr	= btrfs_getattr,
11496 	.lookup		= btrfs_lookup,
11497 	.create		= btrfs_create,
11498 	.unlink		= btrfs_unlink,
11499 	.link		= btrfs_link,
11500 	.mkdir		= btrfs_mkdir,
11501 	.rmdir		= btrfs_rmdir,
11502 	.rename		= btrfs_rename2,
11503 	.symlink	= btrfs_symlink,
11504 	.setattr	= btrfs_setattr,
11505 	.mknod		= btrfs_mknod,
11506 	.listxattr	= btrfs_listxattr,
11507 	.permission	= btrfs_permission,
11508 	.get_acl	= btrfs_get_acl,
11509 	.set_acl	= btrfs_set_acl,
11510 	.update_time	= btrfs_update_time,
11511 	.tmpfile        = btrfs_tmpfile,
11512 	.fileattr_get	= btrfs_fileattr_get,
11513 	.fileattr_set	= btrfs_fileattr_set,
11514 };
11515 
11516 static const struct file_operations btrfs_dir_file_operations = {
11517 	.llseek		= generic_file_llseek,
11518 	.read		= generic_read_dir,
11519 	.iterate_shared	= btrfs_real_readdir,
11520 	.open		= btrfs_opendir,
11521 	.unlocked_ioctl	= btrfs_ioctl,
11522 #ifdef CONFIG_COMPAT
11523 	.compat_ioctl	= btrfs_compat_ioctl,
11524 #endif
11525 	.release        = btrfs_release_file,
11526 	.fsync		= btrfs_sync_file,
11527 };
11528 
11529 /*
11530  * btrfs doesn't support the bmap operation because swapfiles
11531  * use bmap to make a mapping of extents in the file.  They assume
11532  * these extents won't change over the life of the file and they
11533  * use the bmap result to do IO directly to the drive.
11534  *
11535  * the btrfs bmap call would return logical addresses that aren't
11536  * suitable for IO and they also will change frequently as COW
11537  * operations happen.  So, swapfile + btrfs == corruption.
11538  *
11539  * For now we're avoiding this by dropping bmap.
11540  */
11541 static const struct address_space_operations btrfs_aops = {
11542 	.read_folio	= btrfs_read_folio,
11543 	.writepage	= btrfs_writepage,
11544 	.writepages	= btrfs_writepages,
11545 	.readahead	= btrfs_readahead,
11546 	.direct_IO	= noop_direct_IO,
11547 	.invalidate_folio = btrfs_invalidate_folio,
11548 	.release_folio	= btrfs_release_folio,
11549 #ifdef CONFIG_MIGRATION
11550 	.migratepage	= btrfs_migratepage,
11551 #endif
11552 	.dirty_folio	= filemap_dirty_folio,
11553 	.error_remove_page = generic_error_remove_page,
11554 	.swap_activate	= btrfs_swap_activate,
11555 	.swap_deactivate = btrfs_swap_deactivate,
11556 };
11557 
11558 static const struct inode_operations btrfs_file_inode_operations = {
11559 	.getattr	= btrfs_getattr,
11560 	.setattr	= btrfs_setattr,
11561 	.listxattr      = btrfs_listxattr,
11562 	.permission	= btrfs_permission,
11563 	.fiemap		= btrfs_fiemap,
11564 	.get_acl	= btrfs_get_acl,
11565 	.set_acl	= btrfs_set_acl,
11566 	.update_time	= btrfs_update_time,
11567 	.fileattr_get	= btrfs_fileattr_get,
11568 	.fileattr_set	= btrfs_fileattr_set,
11569 };
11570 static const struct inode_operations btrfs_special_inode_operations = {
11571 	.getattr	= btrfs_getattr,
11572 	.setattr	= btrfs_setattr,
11573 	.permission	= btrfs_permission,
11574 	.listxattr	= btrfs_listxattr,
11575 	.get_acl	= btrfs_get_acl,
11576 	.set_acl	= btrfs_set_acl,
11577 	.update_time	= btrfs_update_time,
11578 };
11579 static const struct inode_operations btrfs_symlink_inode_operations = {
11580 	.get_link	= page_get_link,
11581 	.getattr	= btrfs_getattr,
11582 	.setattr	= btrfs_setattr,
11583 	.permission	= btrfs_permission,
11584 	.listxattr	= btrfs_listxattr,
11585 	.update_time	= btrfs_update_time,
11586 };
11587 
11588 const struct dentry_operations btrfs_dentry_operations = {
11589 	.d_delete	= btrfs_dentry_delete,
11590 };
11591