1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2007 Oracle.  All rights reserved.
4  */
5 
6 #include <linux/kernel.h>
7 #include <linux/bio.h>
8 #include <linux/file.h>
9 #include <linux/fs.h>
10 #include <linux/fsnotify.h>
11 #include <linux/pagemap.h>
12 #include <linux/highmem.h>
13 #include <linux/time.h>
14 #include <linux/string.h>
15 #include <linux/backing-dev.h>
16 #include <linux/mount.h>
17 #include <linux/namei.h>
18 #include <linux/writeback.h>
19 #include <linux/compat.h>
20 #include <linux/security.h>
21 #include <linux/xattr.h>
22 #include <linux/mm.h>
23 #include <linux/slab.h>
24 #include <linux/blkdev.h>
25 #include <linux/uuid.h>
26 #include <linux/btrfs.h>
27 #include <linux/uaccess.h>
28 #include <linux/iversion.h>
29 #include <linux/fileattr.h>
30 #include <linux/fsverity.h>
31 #include <linux/sched/xacct.h>
32 #include "ctree.h"
33 #include "disk-io.h"
34 #include "export.h"
35 #include "transaction.h"
36 #include "btrfs_inode.h"
37 #include "print-tree.h"
38 #include "volumes.h"
39 #include "locking.h"
40 #include "backref.h"
41 #include "rcu-string.h"
42 #include "send.h"
43 #include "dev-replace.h"
44 #include "props.h"
45 #include "sysfs.h"
46 #include "qgroup.h"
47 #include "tree-log.h"
48 #include "compression.h"
49 #include "space-info.h"
50 #include "delalloc-space.h"
51 #include "block-group.h"
52 #include "subpage.h"
53 
54 #ifdef CONFIG_64BIT
55 /* If we have a 32-bit userspace and 64-bit kernel, then the UAPI
56  * structures are incorrect, as the timespec structure from userspace
57  * is 4 bytes too small. We define these alternatives here to teach
58  * the kernel about the 32-bit struct packing.
59  */
60 struct btrfs_ioctl_timespec_32 {
61 	__u64 sec;
62 	__u32 nsec;
63 } __attribute__ ((__packed__));
64 
65 struct btrfs_ioctl_received_subvol_args_32 {
66 	char	uuid[BTRFS_UUID_SIZE];	/* in */
67 	__u64	stransid;		/* in */
68 	__u64	rtransid;		/* out */
69 	struct btrfs_ioctl_timespec_32 stime; /* in */
70 	struct btrfs_ioctl_timespec_32 rtime; /* out */
71 	__u64	flags;			/* in */
72 	__u64	reserved[16];		/* in */
73 } __attribute__ ((__packed__));
74 
75 #define BTRFS_IOC_SET_RECEIVED_SUBVOL_32 _IOWR(BTRFS_IOCTL_MAGIC, 37, \
76 				struct btrfs_ioctl_received_subvol_args_32)
77 #endif
78 
79 #if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT)
80 struct btrfs_ioctl_send_args_32 {
81 	__s64 send_fd;			/* in */
82 	__u64 clone_sources_count;	/* in */
83 	compat_uptr_t clone_sources;	/* in */
84 	__u64 parent_root;		/* in */
85 	__u64 flags;			/* in */
86 	__u32 version;			/* in */
87 	__u8  reserved[28];		/* in */
88 } __attribute__ ((__packed__));
89 
90 #define BTRFS_IOC_SEND_32 _IOW(BTRFS_IOCTL_MAGIC, 38, \
91 			       struct btrfs_ioctl_send_args_32)
92 
93 struct btrfs_ioctl_encoded_io_args_32 {
94 	compat_uptr_t iov;
95 	compat_ulong_t iovcnt;
96 	__s64 offset;
97 	__u64 flags;
98 	__u64 len;
99 	__u64 unencoded_len;
100 	__u64 unencoded_offset;
101 	__u32 compression;
102 	__u32 encryption;
103 	__u8 reserved[64];
104 };
105 
106 #define BTRFS_IOC_ENCODED_READ_32 _IOR(BTRFS_IOCTL_MAGIC, 64, \
107 				       struct btrfs_ioctl_encoded_io_args_32)
108 #define BTRFS_IOC_ENCODED_WRITE_32 _IOW(BTRFS_IOCTL_MAGIC, 64, \
109 					struct btrfs_ioctl_encoded_io_args_32)
110 #endif
111 
112 /* Mask out flags that are inappropriate for the given type of inode. */
btrfs_mask_fsflags_for_type(struct inode * inode,unsigned int flags)113 static unsigned int btrfs_mask_fsflags_for_type(struct inode *inode,
114 		unsigned int flags)
115 {
116 	if (S_ISDIR(inode->i_mode))
117 		return flags;
118 	else if (S_ISREG(inode->i_mode))
119 		return flags & ~FS_DIRSYNC_FL;
120 	else
121 		return flags & (FS_NODUMP_FL | FS_NOATIME_FL);
122 }
123 
124 /*
125  * Export internal inode flags to the format expected by the FS_IOC_GETFLAGS
126  * ioctl.
127  */
btrfs_inode_flags_to_fsflags(struct btrfs_inode * binode)128 static unsigned int btrfs_inode_flags_to_fsflags(struct btrfs_inode *binode)
129 {
130 	unsigned int iflags = 0;
131 	u32 flags = binode->flags;
132 	u32 ro_flags = binode->ro_flags;
133 
134 	if (flags & BTRFS_INODE_SYNC)
135 		iflags |= FS_SYNC_FL;
136 	if (flags & BTRFS_INODE_IMMUTABLE)
137 		iflags |= FS_IMMUTABLE_FL;
138 	if (flags & BTRFS_INODE_APPEND)
139 		iflags |= FS_APPEND_FL;
140 	if (flags & BTRFS_INODE_NODUMP)
141 		iflags |= FS_NODUMP_FL;
142 	if (flags & BTRFS_INODE_NOATIME)
143 		iflags |= FS_NOATIME_FL;
144 	if (flags & BTRFS_INODE_DIRSYNC)
145 		iflags |= FS_DIRSYNC_FL;
146 	if (flags & BTRFS_INODE_NODATACOW)
147 		iflags |= FS_NOCOW_FL;
148 	if (ro_flags & BTRFS_INODE_RO_VERITY)
149 		iflags |= FS_VERITY_FL;
150 
151 	if (flags & BTRFS_INODE_NOCOMPRESS)
152 		iflags |= FS_NOCOMP_FL;
153 	else if (flags & BTRFS_INODE_COMPRESS)
154 		iflags |= FS_COMPR_FL;
155 
156 	return iflags;
157 }
158 
159 /*
160  * Update inode->i_flags based on the btrfs internal flags.
161  */
btrfs_sync_inode_flags_to_i_flags(struct inode * inode)162 void btrfs_sync_inode_flags_to_i_flags(struct inode *inode)
163 {
164 	struct btrfs_inode *binode = BTRFS_I(inode);
165 	unsigned int new_fl = 0;
166 
167 	if (binode->flags & BTRFS_INODE_SYNC)
168 		new_fl |= S_SYNC;
169 	if (binode->flags & BTRFS_INODE_IMMUTABLE)
170 		new_fl |= S_IMMUTABLE;
171 	if (binode->flags & BTRFS_INODE_APPEND)
172 		new_fl |= S_APPEND;
173 	if (binode->flags & BTRFS_INODE_NOATIME)
174 		new_fl |= S_NOATIME;
175 	if (binode->flags & BTRFS_INODE_DIRSYNC)
176 		new_fl |= S_DIRSYNC;
177 	if (binode->ro_flags & BTRFS_INODE_RO_VERITY)
178 		new_fl |= S_VERITY;
179 
180 	set_mask_bits(&inode->i_flags,
181 		      S_SYNC | S_APPEND | S_IMMUTABLE | S_NOATIME | S_DIRSYNC |
182 		      S_VERITY, new_fl);
183 }
184 
185 /*
186  * Check if @flags are a supported and valid set of FS_*_FL flags and that
187  * the old and new flags are not conflicting
188  */
check_fsflags(unsigned int old_flags,unsigned int flags)189 static int check_fsflags(unsigned int old_flags, unsigned int flags)
190 {
191 	if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \
192 		      FS_NOATIME_FL | FS_NODUMP_FL | \
193 		      FS_SYNC_FL | FS_DIRSYNC_FL | \
194 		      FS_NOCOMP_FL | FS_COMPR_FL |
195 		      FS_NOCOW_FL))
196 		return -EOPNOTSUPP;
197 
198 	/* COMPR and NOCOMP on new/old are valid */
199 	if ((flags & FS_NOCOMP_FL) && (flags & FS_COMPR_FL))
200 		return -EINVAL;
201 
202 	if ((flags & FS_COMPR_FL) && (flags & FS_NOCOW_FL))
203 		return -EINVAL;
204 
205 	/* NOCOW and compression options are mutually exclusive */
206 	if ((old_flags & FS_NOCOW_FL) && (flags & (FS_COMPR_FL | FS_NOCOMP_FL)))
207 		return -EINVAL;
208 	if ((flags & FS_NOCOW_FL) && (old_flags & (FS_COMPR_FL | FS_NOCOMP_FL)))
209 		return -EINVAL;
210 
211 	return 0;
212 }
213 
check_fsflags_compatible(struct btrfs_fs_info * fs_info,unsigned int flags)214 static int check_fsflags_compatible(struct btrfs_fs_info *fs_info,
215 				    unsigned int flags)
216 {
217 	if (btrfs_is_zoned(fs_info) && (flags & FS_NOCOW_FL))
218 		return -EPERM;
219 
220 	return 0;
221 }
222 
223 /*
224  * Set flags/xflags from the internal inode flags. The remaining items of
225  * fsxattr are zeroed.
226  */
btrfs_fileattr_get(struct dentry * dentry,struct fileattr * fa)227 int btrfs_fileattr_get(struct dentry *dentry, struct fileattr *fa)
228 {
229 	struct btrfs_inode *binode = BTRFS_I(d_inode(dentry));
230 
231 	fileattr_fill_flags(fa, btrfs_inode_flags_to_fsflags(binode));
232 	return 0;
233 }
234 
btrfs_fileattr_set(struct user_namespace * mnt_userns,struct dentry * dentry,struct fileattr * fa)235 int btrfs_fileattr_set(struct user_namespace *mnt_userns,
236 		       struct dentry *dentry, struct fileattr *fa)
237 {
238 	struct inode *inode = d_inode(dentry);
239 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
240 	struct btrfs_inode *binode = BTRFS_I(inode);
241 	struct btrfs_root *root = binode->root;
242 	struct btrfs_trans_handle *trans;
243 	unsigned int fsflags, old_fsflags;
244 	int ret;
245 	const char *comp = NULL;
246 	u32 binode_flags;
247 
248 	if (btrfs_root_readonly(root))
249 		return -EROFS;
250 
251 	if (fileattr_has_fsx(fa))
252 		return -EOPNOTSUPP;
253 
254 	fsflags = btrfs_mask_fsflags_for_type(inode, fa->flags);
255 	old_fsflags = btrfs_inode_flags_to_fsflags(binode);
256 	ret = check_fsflags(old_fsflags, fsflags);
257 	if (ret)
258 		return ret;
259 
260 	ret = check_fsflags_compatible(fs_info, fsflags);
261 	if (ret)
262 		return ret;
263 
264 	binode_flags = binode->flags;
265 	if (fsflags & FS_SYNC_FL)
266 		binode_flags |= BTRFS_INODE_SYNC;
267 	else
268 		binode_flags &= ~BTRFS_INODE_SYNC;
269 	if (fsflags & FS_IMMUTABLE_FL)
270 		binode_flags |= BTRFS_INODE_IMMUTABLE;
271 	else
272 		binode_flags &= ~BTRFS_INODE_IMMUTABLE;
273 	if (fsflags & FS_APPEND_FL)
274 		binode_flags |= BTRFS_INODE_APPEND;
275 	else
276 		binode_flags &= ~BTRFS_INODE_APPEND;
277 	if (fsflags & FS_NODUMP_FL)
278 		binode_flags |= BTRFS_INODE_NODUMP;
279 	else
280 		binode_flags &= ~BTRFS_INODE_NODUMP;
281 	if (fsflags & FS_NOATIME_FL)
282 		binode_flags |= BTRFS_INODE_NOATIME;
283 	else
284 		binode_flags &= ~BTRFS_INODE_NOATIME;
285 
286 	/* If coming from FS_IOC_FSSETXATTR then skip unconverted flags */
287 	if (!fa->flags_valid) {
288 		/* 1 item for the inode */
289 		trans = btrfs_start_transaction(root, 1);
290 		if (IS_ERR(trans))
291 			return PTR_ERR(trans);
292 		goto update_flags;
293 	}
294 
295 	if (fsflags & FS_DIRSYNC_FL)
296 		binode_flags |= BTRFS_INODE_DIRSYNC;
297 	else
298 		binode_flags &= ~BTRFS_INODE_DIRSYNC;
299 	if (fsflags & FS_NOCOW_FL) {
300 		if (S_ISREG(inode->i_mode)) {
301 			/*
302 			 * It's safe to turn csums off here, no extents exist.
303 			 * Otherwise we want the flag to reflect the real COW
304 			 * status of the file and will not set it.
305 			 */
306 			if (inode->i_size == 0)
307 				binode_flags |= BTRFS_INODE_NODATACOW |
308 						BTRFS_INODE_NODATASUM;
309 		} else {
310 			binode_flags |= BTRFS_INODE_NODATACOW;
311 		}
312 	} else {
313 		/*
314 		 * Revert back under same assumptions as above
315 		 */
316 		if (S_ISREG(inode->i_mode)) {
317 			if (inode->i_size == 0)
318 				binode_flags &= ~(BTRFS_INODE_NODATACOW |
319 						  BTRFS_INODE_NODATASUM);
320 		} else {
321 			binode_flags &= ~BTRFS_INODE_NODATACOW;
322 		}
323 	}
324 
325 	/*
326 	 * The COMPRESS flag can only be changed by users, while the NOCOMPRESS
327 	 * flag may be changed automatically if compression code won't make
328 	 * things smaller.
329 	 */
330 	if (fsflags & FS_NOCOMP_FL) {
331 		binode_flags &= ~BTRFS_INODE_COMPRESS;
332 		binode_flags |= BTRFS_INODE_NOCOMPRESS;
333 	} else if (fsflags & FS_COMPR_FL) {
334 
335 		if (IS_SWAPFILE(inode))
336 			return -ETXTBSY;
337 
338 		binode_flags |= BTRFS_INODE_COMPRESS;
339 		binode_flags &= ~BTRFS_INODE_NOCOMPRESS;
340 
341 		comp = btrfs_compress_type2str(fs_info->compress_type);
342 		if (!comp || comp[0] == 0)
343 			comp = btrfs_compress_type2str(BTRFS_COMPRESS_ZLIB);
344 	} else {
345 		binode_flags &= ~(BTRFS_INODE_COMPRESS | BTRFS_INODE_NOCOMPRESS);
346 	}
347 
348 	/*
349 	 * 1 for inode item
350 	 * 2 for properties
351 	 */
352 	trans = btrfs_start_transaction(root, 3);
353 	if (IS_ERR(trans))
354 		return PTR_ERR(trans);
355 
356 	if (comp) {
357 		ret = btrfs_set_prop(trans, inode, "btrfs.compression", comp,
358 				     strlen(comp), 0);
359 		if (ret) {
360 			btrfs_abort_transaction(trans, ret);
361 			goto out_end_trans;
362 		}
363 	} else {
364 		ret = btrfs_set_prop(trans, inode, "btrfs.compression", NULL,
365 				     0, 0);
366 		if (ret && ret != -ENODATA) {
367 			btrfs_abort_transaction(trans, ret);
368 			goto out_end_trans;
369 		}
370 	}
371 
372 update_flags:
373 	binode->flags = binode_flags;
374 	btrfs_sync_inode_flags_to_i_flags(inode);
375 	inode_inc_iversion(inode);
376 	inode->i_ctime = current_time(inode);
377 	ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
378 
379  out_end_trans:
380 	btrfs_end_transaction(trans);
381 	return ret;
382 }
383 
384 /*
385  * Start exclusive operation @type, return true on success
386  */
btrfs_exclop_start(struct btrfs_fs_info * fs_info,enum btrfs_exclusive_operation type)387 bool btrfs_exclop_start(struct btrfs_fs_info *fs_info,
388 			enum btrfs_exclusive_operation type)
389 {
390 	bool ret = false;
391 
392 	spin_lock(&fs_info->super_lock);
393 	if (fs_info->exclusive_operation == BTRFS_EXCLOP_NONE) {
394 		fs_info->exclusive_operation = type;
395 		ret = true;
396 	}
397 	spin_unlock(&fs_info->super_lock);
398 
399 	return ret;
400 }
401 
402 /*
403  * Conditionally allow to enter the exclusive operation in case it's compatible
404  * with the running one.  This must be paired with btrfs_exclop_start_unlock and
405  * btrfs_exclop_finish.
406  *
407  * Compatibility:
408  * - the same type is already running
409  * - when trying to add a device and balance has been paused
410  * - not BTRFS_EXCLOP_NONE - this is intentionally incompatible and the caller
411  *   must check the condition first that would allow none -> @type
412  */
btrfs_exclop_start_try_lock(struct btrfs_fs_info * fs_info,enum btrfs_exclusive_operation type)413 bool btrfs_exclop_start_try_lock(struct btrfs_fs_info *fs_info,
414 				 enum btrfs_exclusive_operation type)
415 {
416 	spin_lock(&fs_info->super_lock);
417 	if (fs_info->exclusive_operation == type ||
418 	    (fs_info->exclusive_operation == BTRFS_EXCLOP_BALANCE_PAUSED &&
419 	     type == BTRFS_EXCLOP_DEV_ADD))
420 		return true;
421 
422 	spin_unlock(&fs_info->super_lock);
423 	return false;
424 }
425 
btrfs_exclop_start_unlock(struct btrfs_fs_info * fs_info)426 void btrfs_exclop_start_unlock(struct btrfs_fs_info *fs_info)
427 {
428 	spin_unlock(&fs_info->super_lock);
429 }
430 
btrfs_exclop_finish(struct btrfs_fs_info * fs_info)431 void btrfs_exclop_finish(struct btrfs_fs_info *fs_info)
432 {
433 	spin_lock(&fs_info->super_lock);
434 	WRITE_ONCE(fs_info->exclusive_operation, BTRFS_EXCLOP_NONE);
435 	spin_unlock(&fs_info->super_lock);
436 	sysfs_notify(&fs_info->fs_devices->fsid_kobj, NULL, "exclusive_operation");
437 }
438 
btrfs_exclop_balance(struct btrfs_fs_info * fs_info,enum btrfs_exclusive_operation op)439 void btrfs_exclop_balance(struct btrfs_fs_info *fs_info,
440 			  enum btrfs_exclusive_operation op)
441 {
442 	switch (op) {
443 	case BTRFS_EXCLOP_BALANCE_PAUSED:
444 		spin_lock(&fs_info->super_lock);
445 		ASSERT(fs_info->exclusive_operation == BTRFS_EXCLOP_BALANCE ||
446 		       fs_info->exclusive_operation == BTRFS_EXCLOP_DEV_ADD);
447 		fs_info->exclusive_operation = BTRFS_EXCLOP_BALANCE_PAUSED;
448 		spin_unlock(&fs_info->super_lock);
449 		break;
450 	case BTRFS_EXCLOP_BALANCE:
451 		spin_lock(&fs_info->super_lock);
452 		ASSERT(fs_info->exclusive_operation == BTRFS_EXCLOP_BALANCE_PAUSED);
453 		fs_info->exclusive_operation = BTRFS_EXCLOP_BALANCE;
454 		spin_unlock(&fs_info->super_lock);
455 		break;
456 	default:
457 		btrfs_warn(fs_info,
458 			"invalid exclop balance operation %d requested", op);
459 	}
460 }
461 
btrfs_ioctl_getversion(struct inode * inode,int __user * arg)462 static int btrfs_ioctl_getversion(struct inode *inode, int __user *arg)
463 {
464 	return put_user(inode->i_generation, arg);
465 }
466 
btrfs_ioctl_fitrim(struct btrfs_fs_info * fs_info,void __user * arg)467 static noinline int btrfs_ioctl_fitrim(struct btrfs_fs_info *fs_info,
468 					void __user *arg)
469 {
470 	struct btrfs_device *device;
471 	struct fstrim_range range;
472 	u64 minlen = ULLONG_MAX;
473 	u64 num_devices = 0;
474 	int ret;
475 
476 	if (!capable(CAP_SYS_ADMIN))
477 		return -EPERM;
478 
479 	/*
480 	 * btrfs_trim_block_group() depends on space cache, which is not
481 	 * available in zoned filesystem. So, disallow fitrim on a zoned
482 	 * filesystem for now.
483 	 */
484 	if (btrfs_is_zoned(fs_info))
485 		return -EOPNOTSUPP;
486 
487 	/*
488 	 * If the fs is mounted with nologreplay, which requires it to be
489 	 * mounted in RO mode as well, we can not allow discard on free space
490 	 * inside block groups, because log trees refer to extents that are not
491 	 * pinned in a block group's free space cache (pinning the extents is
492 	 * precisely the first phase of replaying a log tree).
493 	 */
494 	if (btrfs_test_opt(fs_info, NOLOGREPLAY))
495 		return -EROFS;
496 
497 	rcu_read_lock();
498 	list_for_each_entry_rcu(device, &fs_info->fs_devices->devices,
499 				dev_list) {
500 		if (!device->bdev || !bdev_max_discard_sectors(device->bdev))
501 			continue;
502 		num_devices++;
503 		minlen = min_t(u64, bdev_discard_granularity(device->bdev),
504 				    minlen);
505 	}
506 	rcu_read_unlock();
507 
508 	if (!num_devices)
509 		return -EOPNOTSUPP;
510 	if (copy_from_user(&range, arg, sizeof(range)))
511 		return -EFAULT;
512 
513 	/*
514 	 * NOTE: Don't truncate the range using super->total_bytes.  Bytenr of
515 	 * block group is in the logical address space, which can be any
516 	 * sectorsize aligned bytenr in  the range [0, U64_MAX].
517 	 */
518 	if (range.len < fs_info->sb->s_blocksize)
519 		return -EINVAL;
520 
521 	range.minlen = max(range.minlen, minlen);
522 	ret = btrfs_trim_fs(fs_info, &range);
523 	if (ret < 0)
524 		return ret;
525 
526 	if (copy_to_user(arg, &range, sizeof(range)))
527 		return -EFAULT;
528 
529 	return 0;
530 }
531 
btrfs_is_empty_uuid(u8 * uuid)532 int __pure btrfs_is_empty_uuid(u8 *uuid)
533 {
534 	int i;
535 
536 	for (i = 0; i < BTRFS_UUID_SIZE; i++) {
537 		if (uuid[i])
538 			return 0;
539 	}
540 	return 1;
541 }
542 
543 /*
544  * Calculate the number of transaction items to reserve for creating a subvolume
545  * or snapshot, not including the inode, directory entries, or parent directory.
546  */
create_subvol_num_items(struct btrfs_qgroup_inherit * inherit)547 static unsigned int create_subvol_num_items(struct btrfs_qgroup_inherit *inherit)
548 {
549 	/*
550 	 * 1 to add root block
551 	 * 1 to add root item
552 	 * 1 to add root ref
553 	 * 1 to add root backref
554 	 * 1 to add UUID item
555 	 * 1 to add qgroup info
556 	 * 1 to add qgroup limit
557 	 *
558 	 * Ideally the last two would only be accounted if qgroups are enabled,
559 	 * but that can change between now and the time we would insert them.
560 	 */
561 	unsigned int num_items = 7;
562 
563 	if (inherit) {
564 		/* 2 to add qgroup relations for each inherited qgroup */
565 		num_items += 2 * inherit->num_qgroups;
566 	}
567 	return num_items;
568 }
569 
create_subvol(struct user_namespace * mnt_userns,struct inode * dir,struct dentry * dentry,struct btrfs_qgroup_inherit * inherit)570 static noinline int create_subvol(struct user_namespace *mnt_userns,
571 				  struct inode *dir, struct dentry *dentry,
572 				  struct btrfs_qgroup_inherit *inherit)
573 {
574 	struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
575 	struct btrfs_trans_handle *trans;
576 	struct btrfs_key key;
577 	struct btrfs_root_item *root_item;
578 	struct btrfs_inode_item *inode_item;
579 	struct extent_buffer *leaf;
580 	struct btrfs_root *root = BTRFS_I(dir)->root;
581 	struct btrfs_root *new_root;
582 	struct btrfs_block_rsv block_rsv;
583 	struct timespec64 cur_time = current_time(dir);
584 	struct btrfs_new_inode_args new_inode_args = {
585 		.dir = dir,
586 		.dentry = dentry,
587 		.subvol = true,
588 	};
589 	unsigned int trans_num_items;
590 	int ret;
591 	dev_t anon_dev;
592 	u64 objectid;
593 
594 	root_item = kzalloc(sizeof(*root_item), GFP_KERNEL);
595 	if (!root_item)
596 		return -ENOMEM;
597 
598 	ret = btrfs_get_free_objectid(fs_info->tree_root, &objectid);
599 	if (ret)
600 		goto out_root_item;
601 
602 	/*
603 	 * Don't create subvolume whose level is not zero. Or qgroup will be
604 	 * screwed up since it assumes subvolume qgroup's level to be 0.
605 	 */
606 	if (btrfs_qgroup_level(objectid)) {
607 		ret = -ENOSPC;
608 		goto out_root_item;
609 	}
610 
611 	ret = get_anon_bdev(&anon_dev);
612 	if (ret < 0)
613 		goto out_root_item;
614 
615 	new_inode_args.inode = btrfs_new_subvol_inode(mnt_userns, dir);
616 	if (!new_inode_args.inode) {
617 		ret = -ENOMEM;
618 		goto out_anon_dev;
619 	}
620 	ret = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items);
621 	if (ret)
622 		goto out_inode;
623 	trans_num_items += create_subvol_num_items(inherit);
624 
625 	btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP);
626 	ret = btrfs_subvolume_reserve_metadata(root, &block_rsv,
627 					       trans_num_items, false);
628 	if (ret)
629 		goto out_new_inode_args;
630 
631 	trans = btrfs_start_transaction(root, 0);
632 	if (IS_ERR(trans)) {
633 		ret = PTR_ERR(trans);
634 		btrfs_subvolume_release_metadata(root, &block_rsv);
635 		goto out_new_inode_args;
636 	}
637 	trans->block_rsv = &block_rsv;
638 	trans->bytes_reserved = block_rsv.size;
639 
640 	ret = btrfs_qgroup_inherit(trans, 0, objectid, inherit);
641 	if (ret)
642 		goto out;
643 
644 	leaf = btrfs_alloc_tree_block(trans, root, 0, objectid, NULL, 0, 0, 0,
645 				      BTRFS_NESTING_NORMAL);
646 	if (IS_ERR(leaf)) {
647 		ret = PTR_ERR(leaf);
648 		goto out;
649 	}
650 
651 	btrfs_mark_buffer_dirty(leaf);
652 
653 	inode_item = &root_item->inode;
654 	btrfs_set_stack_inode_generation(inode_item, 1);
655 	btrfs_set_stack_inode_size(inode_item, 3);
656 	btrfs_set_stack_inode_nlink(inode_item, 1);
657 	btrfs_set_stack_inode_nbytes(inode_item,
658 				     fs_info->nodesize);
659 	btrfs_set_stack_inode_mode(inode_item, S_IFDIR | 0755);
660 
661 	btrfs_set_root_flags(root_item, 0);
662 	btrfs_set_root_limit(root_item, 0);
663 	btrfs_set_stack_inode_flags(inode_item, BTRFS_INODE_ROOT_ITEM_INIT);
664 
665 	btrfs_set_root_bytenr(root_item, leaf->start);
666 	btrfs_set_root_generation(root_item, trans->transid);
667 	btrfs_set_root_level(root_item, 0);
668 	btrfs_set_root_refs(root_item, 1);
669 	btrfs_set_root_used(root_item, leaf->len);
670 	btrfs_set_root_last_snapshot(root_item, 0);
671 
672 	btrfs_set_root_generation_v2(root_item,
673 			btrfs_root_generation(root_item));
674 	generate_random_guid(root_item->uuid);
675 	btrfs_set_stack_timespec_sec(&root_item->otime, cur_time.tv_sec);
676 	btrfs_set_stack_timespec_nsec(&root_item->otime, cur_time.tv_nsec);
677 	root_item->ctime = root_item->otime;
678 	btrfs_set_root_ctransid(root_item, trans->transid);
679 	btrfs_set_root_otransid(root_item, trans->transid);
680 
681 	btrfs_tree_unlock(leaf);
682 
683 	btrfs_set_root_dirid(root_item, BTRFS_FIRST_FREE_OBJECTID);
684 
685 	key.objectid = objectid;
686 	key.offset = 0;
687 	key.type = BTRFS_ROOT_ITEM_KEY;
688 	ret = btrfs_insert_root(trans, fs_info->tree_root, &key,
689 				root_item);
690 	if (ret) {
691 		/*
692 		 * Since we don't abort the transaction in this case, free the
693 		 * tree block so that we don't leak space and leave the
694 		 * filesystem in an inconsistent state (an extent item in the
695 		 * extent tree with a backreference for a root that does not
696 		 * exists).
697 		 */
698 		btrfs_tree_lock(leaf);
699 		btrfs_clean_tree_block(leaf);
700 		btrfs_tree_unlock(leaf);
701 		btrfs_free_tree_block(trans, objectid, leaf, 0, 1);
702 		free_extent_buffer(leaf);
703 		goto out;
704 	}
705 
706 	free_extent_buffer(leaf);
707 	leaf = NULL;
708 
709 	new_root = btrfs_get_new_fs_root(fs_info, objectid, anon_dev);
710 	if (IS_ERR(new_root)) {
711 		ret = PTR_ERR(new_root);
712 		btrfs_abort_transaction(trans, ret);
713 		goto out;
714 	}
715 	/* anon_dev is owned by new_root now. */
716 	anon_dev = 0;
717 	BTRFS_I(new_inode_args.inode)->root = new_root;
718 	/* ... and new_root is owned by new_inode_args.inode now. */
719 
720 	ret = btrfs_record_root_in_trans(trans, new_root);
721 	if (ret) {
722 		btrfs_abort_transaction(trans, ret);
723 		goto out;
724 	}
725 
726 	ret = btrfs_uuid_tree_add(trans, root_item->uuid,
727 				  BTRFS_UUID_KEY_SUBVOL, objectid);
728 	if (ret) {
729 		btrfs_abort_transaction(trans, ret);
730 		goto out;
731 	}
732 
733 	ret = btrfs_create_new_inode(trans, &new_inode_args);
734 	if (ret) {
735 		btrfs_abort_transaction(trans, ret);
736 		goto out;
737 	}
738 
739 	d_instantiate_new(dentry, new_inode_args.inode);
740 	new_inode_args.inode = NULL;
741 
742 out:
743 	trans->block_rsv = NULL;
744 	trans->bytes_reserved = 0;
745 	btrfs_subvolume_release_metadata(root, &block_rsv);
746 
747 	if (ret)
748 		btrfs_end_transaction(trans);
749 	else
750 		ret = btrfs_commit_transaction(trans);
751 out_new_inode_args:
752 	btrfs_new_inode_args_destroy(&new_inode_args);
753 out_inode:
754 	iput(new_inode_args.inode);
755 out_anon_dev:
756 	if (anon_dev)
757 		free_anon_bdev(anon_dev);
758 out_root_item:
759 	kfree(root_item);
760 	return ret;
761 }
762 
create_snapshot(struct btrfs_root * root,struct inode * dir,struct dentry * dentry,bool readonly,struct btrfs_qgroup_inherit * inherit)763 static int create_snapshot(struct btrfs_root *root, struct inode *dir,
764 			   struct dentry *dentry, bool readonly,
765 			   struct btrfs_qgroup_inherit *inherit)
766 {
767 	struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
768 	struct inode *inode;
769 	struct btrfs_pending_snapshot *pending_snapshot;
770 	unsigned int trans_num_items;
771 	struct btrfs_trans_handle *trans;
772 	int ret;
773 
774 	/* We do not support snapshotting right now. */
775 	if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) {
776 		btrfs_warn(fs_info,
777 			   "extent tree v2 doesn't support snapshotting yet");
778 		return -EOPNOTSUPP;
779 	}
780 
781 	if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
782 		return -EINVAL;
783 
784 	if (atomic_read(&root->nr_swapfiles)) {
785 		btrfs_warn(fs_info,
786 			   "cannot snapshot subvolume with active swapfile");
787 		return -ETXTBSY;
788 	}
789 
790 	pending_snapshot = kzalloc(sizeof(*pending_snapshot), GFP_KERNEL);
791 	if (!pending_snapshot)
792 		return -ENOMEM;
793 
794 	ret = get_anon_bdev(&pending_snapshot->anon_dev);
795 	if (ret < 0)
796 		goto free_pending;
797 	pending_snapshot->root_item = kzalloc(sizeof(struct btrfs_root_item),
798 			GFP_KERNEL);
799 	pending_snapshot->path = btrfs_alloc_path();
800 	if (!pending_snapshot->root_item || !pending_snapshot->path) {
801 		ret = -ENOMEM;
802 		goto free_pending;
803 	}
804 
805 	btrfs_init_block_rsv(&pending_snapshot->block_rsv,
806 			     BTRFS_BLOCK_RSV_TEMP);
807 	/*
808 	 * 1 to add dir item
809 	 * 1 to add dir index
810 	 * 1 to update parent inode item
811 	 */
812 	trans_num_items = create_subvol_num_items(inherit) + 3;
813 	ret = btrfs_subvolume_reserve_metadata(BTRFS_I(dir)->root,
814 					       &pending_snapshot->block_rsv,
815 					       trans_num_items, false);
816 	if (ret)
817 		goto free_pending;
818 
819 	pending_snapshot->dentry = dentry;
820 	pending_snapshot->root = root;
821 	pending_snapshot->readonly = readonly;
822 	pending_snapshot->dir = dir;
823 	pending_snapshot->inherit = inherit;
824 
825 	trans = btrfs_start_transaction(root, 0);
826 	if (IS_ERR(trans)) {
827 		ret = PTR_ERR(trans);
828 		goto fail;
829 	}
830 
831 	trans->pending_snapshot = pending_snapshot;
832 
833 	ret = btrfs_commit_transaction(trans);
834 	if (ret)
835 		goto fail;
836 
837 	ret = pending_snapshot->error;
838 	if (ret)
839 		goto fail;
840 
841 	ret = btrfs_orphan_cleanup(pending_snapshot->snap);
842 	if (ret)
843 		goto fail;
844 
845 	inode = btrfs_lookup_dentry(d_inode(dentry->d_parent), dentry);
846 	if (IS_ERR(inode)) {
847 		ret = PTR_ERR(inode);
848 		goto fail;
849 	}
850 
851 	d_instantiate(dentry, inode);
852 	ret = 0;
853 	pending_snapshot->anon_dev = 0;
854 fail:
855 	/* Prevent double freeing of anon_dev */
856 	if (ret && pending_snapshot->snap)
857 		pending_snapshot->snap->anon_dev = 0;
858 	btrfs_put_root(pending_snapshot->snap);
859 	btrfs_subvolume_release_metadata(root, &pending_snapshot->block_rsv);
860 free_pending:
861 	if (pending_snapshot->anon_dev)
862 		free_anon_bdev(pending_snapshot->anon_dev);
863 	kfree(pending_snapshot->root_item);
864 	btrfs_free_path(pending_snapshot->path);
865 	kfree(pending_snapshot);
866 
867 	return ret;
868 }
869 
870 /*  copy of may_delete in fs/namei.c()
871  *	Check whether we can remove a link victim from directory dir, check
872  *  whether the type of victim is right.
873  *  1. We can't do it if dir is read-only (done in permission())
874  *  2. We should have write and exec permissions on dir
875  *  3. We can't remove anything from append-only dir
876  *  4. We can't do anything with immutable dir (done in permission())
877  *  5. If the sticky bit on dir is set we should either
878  *	a. be owner of dir, or
879  *	b. be owner of victim, or
880  *	c. have CAP_FOWNER capability
881  *  6. If the victim is append-only or immutable we can't do anything with
882  *     links pointing to it.
883  *  7. If we were asked to remove a directory and victim isn't one - ENOTDIR.
884  *  8. If we were asked to remove a non-directory and victim isn't one - EISDIR.
885  *  9. We can't remove a root or mountpoint.
886  * 10. We don't allow removal of NFS sillyrenamed files; it's handled by
887  *     nfs_async_unlink().
888  */
889 
btrfs_may_delete(struct user_namespace * mnt_userns,struct inode * dir,struct dentry * victim,int isdir)890 static int btrfs_may_delete(struct user_namespace *mnt_userns,
891 			    struct inode *dir, struct dentry *victim, int isdir)
892 {
893 	int error;
894 
895 	if (d_really_is_negative(victim))
896 		return -ENOENT;
897 
898 	BUG_ON(d_inode(victim->d_parent) != dir);
899 	audit_inode_child(dir, victim, AUDIT_TYPE_CHILD_DELETE);
900 
901 	error = inode_permission(mnt_userns, dir, MAY_WRITE | MAY_EXEC);
902 	if (error)
903 		return error;
904 	if (IS_APPEND(dir))
905 		return -EPERM;
906 	if (check_sticky(mnt_userns, dir, d_inode(victim)) ||
907 	    IS_APPEND(d_inode(victim)) || IS_IMMUTABLE(d_inode(victim)) ||
908 	    IS_SWAPFILE(d_inode(victim)))
909 		return -EPERM;
910 	if (isdir) {
911 		if (!d_is_dir(victim))
912 			return -ENOTDIR;
913 		if (IS_ROOT(victim))
914 			return -EBUSY;
915 	} else if (d_is_dir(victim))
916 		return -EISDIR;
917 	if (IS_DEADDIR(dir))
918 		return -ENOENT;
919 	if (victim->d_flags & DCACHE_NFSFS_RENAMED)
920 		return -EBUSY;
921 	return 0;
922 }
923 
924 /* copy of may_create in fs/namei.c() */
btrfs_may_create(struct user_namespace * mnt_userns,struct inode * dir,struct dentry * child)925 static inline int btrfs_may_create(struct user_namespace *mnt_userns,
926 				   struct inode *dir, struct dentry *child)
927 {
928 	if (d_really_is_positive(child))
929 		return -EEXIST;
930 	if (IS_DEADDIR(dir))
931 		return -ENOENT;
932 	if (!fsuidgid_has_mapping(dir->i_sb, mnt_userns))
933 		return -EOVERFLOW;
934 	return inode_permission(mnt_userns, dir, MAY_WRITE | MAY_EXEC);
935 }
936 
937 /*
938  * Create a new subvolume below @parent.  This is largely modeled after
939  * sys_mkdirat and vfs_mkdir, but we only do a single component lookup
940  * inside this filesystem so it's quite a bit simpler.
941  */
btrfs_mksubvol(const struct path * parent,struct user_namespace * mnt_userns,const char * name,int namelen,struct btrfs_root * snap_src,bool readonly,struct btrfs_qgroup_inherit * inherit)942 static noinline int btrfs_mksubvol(const struct path *parent,
943 				   struct user_namespace *mnt_userns,
944 				   const char *name, int namelen,
945 				   struct btrfs_root *snap_src,
946 				   bool readonly,
947 				   struct btrfs_qgroup_inherit *inherit)
948 {
949 	struct inode *dir = d_inode(parent->dentry);
950 	struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
951 	struct dentry *dentry;
952 	int error;
953 
954 	error = down_write_killable_nested(&dir->i_rwsem, I_MUTEX_PARENT);
955 	if (error == -EINTR)
956 		return error;
957 
958 	dentry = lookup_one(mnt_userns, name, parent->dentry, namelen);
959 	error = PTR_ERR(dentry);
960 	if (IS_ERR(dentry))
961 		goto out_unlock;
962 
963 	error = btrfs_may_create(mnt_userns, dir, dentry);
964 	if (error)
965 		goto out_dput;
966 
967 	/*
968 	 * even if this name doesn't exist, we may get hash collisions.
969 	 * check for them now when we can safely fail
970 	 */
971 	error = btrfs_check_dir_item_collision(BTRFS_I(dir)->root,
972 					       dir->i_ino, name,
973 					       namelen);
974 	if (error)
975 		goto out_dput;
976 
977 	down_read(&fs_info->subvol_sem);
978 
979 	if (btrfs_root_refs(&BTRFS_I(dir)->root->root_item) == 0)
980 		goto out_up_read;
981 
982 	if (snap_src)
983 		error = create_snapshot(snap_src, dir, dentry, readonly, inherit);
984 	else
985 		error = create_subvol(mnt_userns, dir, dentry, inherit);
986 
987 	if (!error)
988 		fsnotify_mkdir(dir, dentry);
989 out_up_read:
990 	up_read(&fs_info->subvol_sem);
991 out_dput:
992 	dput(dentry);
993 out_unlock:
994 	btrfs_inode_unlock(dir, 0);
995 	return error;
996 }
997 
btrfs_mksnapshot(const struct path * parent,struct user_namespace * mnt_userns,const char * name,int namelen,struct btrfs_root * root,bool readonly,struct btrfs_qgroup_inherit * inherit)998 static noinline int btrfs_mksnapshot(const struct path *parent,
999 				   struct user_namespace *mnt_userns,
1000 				   const char *name, int namelen,
1001 				   struct btrfs_root *root,
1002 				   bool readonly,
1003 				   struct btrfs_qgroup_inherit *inherit)
1004 {
1005 	int ret;
1006 	bool snapshot_force_cow = false;
1007 
1008 	/*
1009 	 * Force new buffered writes to reserve space even when NOCOW is
1010 	 * possible. This is to avoid later writeback (running dealloc) to
1011 	 * fallback to COW mode and unexpectedly fail with ENOSPC.
1012 	 */
1013 	btrfs_drew_read_lock(&root->snapshot_lock);
1014 
1015 	ret = btrfs_start_delalloc_snapshot(root, false);
1016 	if (ret)
1017 		goto out;
1018 
1019 	/*
1020 	 * All previous writes have started writeback in NOCOW mode, so now
1021 	 * we force future writes to fallback to COW mode during snapshot
1022 	 * creation.
1023 	 */
1024 	atomic_inc(&root->snapshot_force_cow);
1025 	snapshot_force_cow = true;
1026 
1027 	btrfs_wait_ordered_extents(root, U64_MAX, 0, (u64)-1);
1028 
1029 	ret = btrfs_mksubvol(parent, mnt_userns, name, namelen,
1030 			     root, readonly, inherit);
1031 out:
1032 	if (snapshot_force_cow)
1033 		atomic_dec(&root->snapshot_force_cow);
1034 	btrfs_drew_read_unlock(&root->snapshot_lock);
1035 	return ret;
1036 }
1037 
1038 /*
1039  * Defrag specific helper to get an extent map.
1040  *
1041  * Differences between this and btrfs_get_extent() are:
1042  *
1043  * - No extent_map will be added to inode->extent_tree
1044  *   To reduce memory usage in the long run.
1045  *
1046  * - Extra optimization to skip file extents older than @newer_than
1047  *   By using btrfs_search_forward() we can skip entire file ranges that
1048  *   have extents created in past transactions, because btrfs_search_forward()
1049  *   will not visit leaves and nodes with a generation smaller than given
1050  *   minimal generation threshold (@newer_than).
1051  *
1052  * Return valid em if we find a file extent matching the requirement.
1053  * Return NULL if we can not find a file extent matching the requirement.
1054  *
1055  * Return ERR_PTR() for error.
1056  */
defrag_get_extent(struct btrfs_inode * inode,u64 start,u64 newer_than)1057 static struct extent_map *defrag_get_extent(struct btrfs_inode *inode,
1058 					    u64 start, u64 newer_than)
1059 {
1060 	struct btrfs_root *root = inode->root;
1061 	struct btrfs_file_extent_item *fi;
1062 	struct btrfs_path path = { 0 };
1063 	struct extent_map *em;
1064 	struct btrfs_key key;
1065 	u64 ino = btrfs_ino(inode);
1066 	int ret;
1067 
1068 	em = alloc_extent_map();
1069 	if (!em) {
1070 		ret = -ENOMEM;
1071 		goto err;
1072 	}
1073 
1074 	key.objectid = ino;
1075 	key.type = BTRFS_EXTENT_DATA_KEY;
1076 	key.offset = start;
1077 
1078 	if (newer_than) {
1079 		ret = btrfs_search_forward(root, &key, &path, newer_than);
1080 		if (ret < 0)
1081 			goto err;
1082 		/* Can't find anything newer */
1083 		if (ret > 0)
1084 			goto not_found;
1085 	} else {
1086 		ret = btrfs_search_slot(NULL, root, &key, &path, 0, 0);
1087 		if (ret < 0)
1088 			goto err;
1089 	}
1090 	if (path.slots[0] >= btrfs_header_nritems(path.nodes[0])) {
1091 		/*
1092 		 * If btrfs_search_slot() makes path to point beyond nritems,
1093 		 * we should not have an empty leaf, as this inode must at
1094 		 * least have its INODE_ITEM.
1095 		 */
1096 		ASSERT(btrfs_header_nritems(path.nodes[0]));
1097 		path.slots[0] = btrfs_header_nritems(path.nodes[0]) - 1;
1098 	}
1099 	btrfs_item_key_to_cpu(path.nodes[0], &key, path.slots[0]);
1100 	/* Perfect match, no need to go one slot back */
1101 	if (key.objectid == ino && key.type == BTRFS_EXTENT_DATA_KEY &&
1102 	    key.offset == start)
1103 		goto iterate;
1104 
1105 	/* We didn't find a perfect match, needs to go one slot back */
1106 	if (path.slots[0] > 0) {
1107 		btrfs_item_key_to_cpu(path.nodes[0], &key, path.slots[0]);
1108 		if (key.objectid == ino && key.type == BTRFS_EXTENT_DATA_KEY)
1109 			path.slots[0]--;
1110 	}
1111 
1112 iterate:
1113 	/* Iterate through the path to find a file extent covering @start */
1114 	while (true) {
1115 		u64 extent_end;
1116 
1117 		if (path.slots[0] >= btrfs_header_nritems(path.nodes[0]))
1118 			goto next;
1119 
1120 		btrfs_item_key_to_cpu(path.nodes[0], &key, path.slots[0]);
1121 
1122 		/*
1123 		 * We may go one slot back to INODE_REF/XATTR item, then
1124 		 * need to go forward until we reach an EXTENT_DATA.
1125 		 * But we should still has the correct ino as key.objectid.
1126 		 */
1127 		if (WARN_ON(key.objectid < ino) || key.type < BTRFS_EXTENT_DATA_KEY)
1128 			goto next;
1129 
1130 		/* It's beyond our target range, definitely not extent found */
1131 		if (key.objectid > ino || key.type > BTRFS_EXTENT_DATA_KEY)
1132 			goto not_found;
1133 
1134 		/*
1135 		 *	|	|<- File extent ->|
1136 		 *	\- start
1137 		 *
1138 		 * This means there is a hole between start and key.offset.
1139 		 */
1140 		if (key.offset > start) {
1141 			em->start = start;
1142 			em->orig_start = start;
1143 			em->block_start = EXTENT_MAP_HOLE;
1144 			em->len = key.offset - start;
1145 			break;
1146 		}
1147 
1148 		fi = btrfs_item_ptr(path.nodes[0], path.slots[0],
1149 				    struct btrfs_file_extent_item);
1150 		extent_end = btrfs_file_extent_end(&path);
1151 
1152 		/*
1153 		 *	|<- file extent ->|	|
1154 		 *				\- start
1155 		 *
1156 		 * We haven't reached start, search next slot.
1157 		 */
1158 		if (extent_end <= start)
1159 			goto next;
1160 
1161 		/* Now this extent covers @start, convert it to em */
1162 		btrfs_extent_item_to_extent_map(inode, &path, fi, false, em);
1163 		break;
1164 next:
1165 		ret = btrfs_next_item(root, &path);
1166 		if (ret < 0)
1167 			goto err;
1168 		if (ret > 0)
1169 			goto not_found;
1170 	}
1171 	btrfs_release_path(&path);
1172 	return em;
1173 
1174 not_found:
1175 	btrfs_release_path(&path);
1176 	free_extent_map(em);
1177 	return NULL;
1178 
1179 err:
1180 	btrfs_release_path(&path);
1181 	free_extent_map(em);
1182 	return ERR_PTR(ret);
1183 }
1184 
defrag_lookup_extent(struct inode * inode,u64 start,u64 newer_than,bool locked)1185 static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start,
1186 					       u64 newer_than, bool locked)
1187 {
1188 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
1189 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1190 	struct extent_map *em;
1191 	const u32 sectorsize = BTRFS_I(inode)->root->fs_info->sectorsize;
1192 
1193 	/*
1194 	 * hopefully we have this extent in the tree already, try without
1195 	 * the full extent lock
1196 	 */
1197 	read_lock(&em_tree->lock);
1198 	em = lookup_extent_mapping(em_tree, start, sectorsize);
1199 	read_unlock(&em_tree->lock);
1200 
1201 	/*
1202 	 * We can get a merged extent, in that case, we need to re-search
1203 	 * tree to get the original em for defrag.
1204 	 *
1205 	 * If @newer_than is 0 or em::generation < newer_than, we can trust
1206 	 * this em, as either we don't care about the generation, or the
1207 	 * merged extent map will be rejected anyway.
1208 	 */
1209 	if (em && test_bit(EXTENT_FLAG_MERGED, &em->flags) &&
1210 	    newer_than && em->generation >= newer_than) {
1211 		free_extent_map(em);
1212 		em = NULL;
1213 	}
1214 
1215 	if (!em) {
1216 		struct extent_state *cached = NULL;
1217 		u64 end = start + sectorsize - 1;
1218 
1219 		/* get the big lock and read metadata off disk */
1220 		if (!locked)
1221 			lock_extent(io_tree, start, end, &cached);
1222 		em = defrag_get_extent(BTRFS_I(inode), start, newer_than);
1223 		if (!locked)
1224 			unlock_extent(io_tree, start, end, &cached);
1225 
1226 		if (IS_ERR(em))
1227 			return NULL;
1228 	}
1229 
1230 	return em;
1231 }
1232 
get_extent_max_capacity(const struct btrfs_fs_info * fs_info,const struct extent_map * em)1233 static u32 get_extent_max_capacity(const struct btrfs_fs_info *fs_info,
1234 				   const struct extent_map *em)
1235 {
1236 	if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
1237 		return BTRFS_MAX_COMPRESSED;
1238 	return fs_info->max_extent_size;
1239 }
1240 
defrag_check_next_extent(struct inode * inode,struct extent_map * em,u32 extent_thresh,u64 newer_than,bool locked)1241 static bool defrag_check_next_extent(struct inode *inode, struct extent_map *em,
1242 				     u32 extent_thresh, u64 newer_than, bool locked)
1243 {
1244 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1245 	struct extent_map *next;
1246 	bool ret = false;
1247 
1248 	/* this is the last extent */
1249 	if (em->start + em->len >= i_size_read(inode))
1250 		return false;
1251 
1252 	/*
1253 	 * Here we need to pass @newer_then when checking the next extent, or
1254 	 * we will hit a case we mark current extent for defrag, but the next
1255 	 * one will not be a target.
1256 	 * This will just cause extra IO without really reducing the fragments.
1257 	 */
1258 	next = defrag_lookup_extent(inode, em->start + em->len, newer_than, locked);
1259 	/* No more em or hole */
1260 	if (!next || next->block_start >= EXTENT_MAP_LAST_BYTE)
1261 		goto out;
1262 	if (test_bit(EXTENT_FLAG_PREALLOC, &next->flags))
1263 		goto out;
1264 	/*
1265 	 * If the next extent is at its max capacity, defragging current extent
1266 	 * makes no sense, as the total number of extents won't change.
1267 	 */
1268 	if (next->len >= get_extent_max_capacity(fs_info, em))
1269 		goto out;
1270 	/* Skip older extent */
1271 	if (next->generation < newer_than)
1272 		goto out;
1273 	/* Also check extent size */
1274 	if (next->len >= extent_thresh)
1275 		goto out;
1276 
1277 	ret = true;
1278 out:
1279 	free_extent_map(next);
1280 	return ret;
1281 }
1282 
1283 /*
1284  * Prepare one page to be defragged.
1285  *
1286  * This will ensure:
1287  *
1288  * - Returned page is locked and has been set up properly.
1289  * - No ordered extent exists in the page.
1290  * - The page is uptodate.
1291  *
1292  * NOTE: Caller should also wait for page writeback after the cluster is
1293  * prepared, here we don't do writeback wait for each page.
1294  */
defrag_prepare_one_page(struct btrfs_inode * inode,pgoff_t index)1295 static struct page *defrag_prepare_one_page(struct btrfs_inode *inode,
1296 					    pgoff_t index)
1297 {
1298 	struct address_space *mapping = inode->vfs_inode.i_mapping;
1299 	gfp_t mask = btrfs_alloc_write_mask(mapping);
1300 	u64 page_start = (u64)index << PAGE_SHIFT;
1301 	u64 page_end = page_start + PAGE_SIZE - 1;
1302 	struct extent_state *cached_state = NULL;
1303 	struct page *page;
1304 	int ret;
1305 
1306 again:
1307 	page = find_or_create_page(mapping, index, mask);
1308 	if (!page)
1309 		return ERR_PTR(-ENOMEM);
1310 
1311 	/*
1312 	 * Since we can defragment files opened read-only, we can encounter
1313 	 * transparent huge pages here (see CONFIG_READ_ONLY_THP_FOR_FS). We
1314 	 * can't do I/O using huge pages yet, so return an error for now.
1315 	 * Filesystem transparent huge pages are typically only used for
1316 	 * executables that explicitly enable them, so this isn't very
1317 	 * restrictive.
1318 	 */
1319 	if (PageCompound(page)) {
1320 		unlock_page(page);
1321 		put_page(page);
1322 		return ERR_PTR(-ETXTBSY);
1323 	}
1324 
1325 	ret = set_page_extent_mapped(page);
1326 	if (ret < 0) {
1327 		unlock_page(page);
1328 		put_page(page);
1329 		return ERR_PTR(ret);
1330 	}
1331 
1332 	/* Wait for any existing ordered extent in the range */
1333 	while (1) {
1334 		struct btrfs_ordered_extent *ordered;
1335 
1336 		lock_extent(&inode->io_tree, page_start, page_end, &cached_state);
1337 		ordered = btrfs_lookup_ordered_range(inode, page_start, PAGE_SIZE);
1338 		unlock_extent(&inode->io_tree, page_start, page_end,
1339 			      &cached_state);
1340 		if (!ordered)
1341 			break;
1342 
1343 		unlock_page(page);
1344 		btrfs_start_ordered_extent(ordered, 1);
1345 		btrfs_put_ordered_extent(ordered);
1346 		lock_page(page);
1347 		/*
1348 		 * We unlocked the page above, so we need check if it was
1349 		 * released or not.
1350 		 */
1351 		if (page->mapping != mapping || !PagePrivate(page)) {
1352 			unlock_page(page);
1353 			put_page(page);
1354 			goto again;
1355 		}
1356 	}
1357 
1358 	/*
1359 	 * Now the page range has no ordered extent any more.  Read the page to
1360 	 * make it uptodate.
1361 	 */
1362 	if (!PageUptodate(page)) {
1363 		btrfs_read_folio(NULL, page_folio(page));
1364 		lock_page(page);
1365 		if (page->mapping != mapping || !PagePrivate(page)) {
1366 			unlock_page(page);
1367 			put_page(page);
1368 			goto again;
1369 		}
1370 		if (!PageUptodate(page)) {
1371 			unlock_page(page);
1372 			put_page(page);
1373 			return ERR_PTR(-EIO);
1374 		}
1375 	}
1376 	return page;
1377 }
1378 
1379 struct defrag_target_range {
1380 	struct list_head list;
1381 	u64 start;
1382 	u64 len;
1383 };
1384 
1385 /*
1386  * Collect all valid target extents.
1387  *
1388  * @start:	   file offset to lookup
1389  * @len:	   length to lookup
1390  * @extent_thresh: file extent size threshold, any extent size >= this value
1391  *		   will be ignored
1392  * @newer_than:    only defrag extents newer than this value
1393  * @do_compress:   whether the defrag is doing compression
1394  *		   if true, @extent_thresh will be ignored and all regular
1395  *		   file extents meeting @newer_than will be targets.
1396  * @locked:	   if the range has already held extent lock
1397  * @target_list:   list of targets file extents
1398  */
defrag_collect_targets(struct btrfs_inode * inode,u64 start,u64 len,u32 extent_thresh,u64 newer_than,bool do_compress,bool locked,struct list_head * target_list,u64 * last_scanned_ret)1399 static int defrag_collect_targets(struct btrfs_inode *inode,
1400 				  u64 start, u64 len, u32 extent_thresh,
1401 				  u64 newer_than, bool do_compress,
1402 				  bool locked, struct list_head *target_list,
1403 				  u64 *last_scanned_ret)
1404 {
1405 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
1406 	bool last_is_target = false;
1407 	u64 cur = start;
1408 	int ret = 0;
1409 
1410 	while (cur < start + len) {
1411 		struct extent_map *em;
1412 		struct defrag_target_range *new;
1413 		bool next_mergeable = true;
1414 		u64 range_len;
1415 
1416 		last_is_target = false;
1417 		em = defrag_lookup_extent(&inode->vfs_inode, cur,
1418 					  newer_than, locked);
1419 		if (!em)
1420 			break;
1421 
1422 		/*
1423 		 * If the file extent is an inlined one, we may still want to
1424 		 * defrag it (fallthrough) if it will cause a regular extent.
1425 		 * This is for users who want to convert inline extents to
1426 		 * regular ones through max_inline= mount option.
1427 		 */
1428 		if (em->block_start == EXTENT_MAP_INLINE &&
1429 		    em->len <= inode->root->fs_info->max_inline)
1430 			goto next;
1431 
1432 		/* Skip hole/delalloc/preallocated extents */
1433 		if (em->block_start == EXTENT_MAP_HOLE ||
1434 		    em->block_start == EXTENT_MAP_DELALLOC ||
1435 		    test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
1436 			goto next;
1437 
1438 		/* Skip older extent */
1439 		if (em->generation < newer_than)
1440 			goto next;
1441 
1442 		/* This em is under writeback, no need to defrag */
1443 		if (em->generation == (u64)-1)
1444 			goto next;
1445 
1446 		/*
1447 		 * Our start offset might be in the middle of an existing extent
1448 		 * map, so take that into account.
1449 		 */
1450 		range_len = em->len - (cur - em->start);
1451 		/*
1452 		 * If this range of the extent map is already flagged for delalloc,
1453 		 * skip it, because:
1454 		 *
1455 		 * 1) We could deadlock later, when trying to reserve space for
1456 		 *    delalloc, because in case we can't immediately reserve space
1457 		 *    the flusher can start delalloc and wait for the respective
1458 		 *    ordered extents to complete. The deadlock would happen
1459 		 *    because we do the space reservation while holding the range
1460 		 *    locked, and starting writeback, or finishing an ordered
1461 		 *    extent, requires locking the range;
1462 		 *
1463 		 * 2) If there's delalloc there, it means there's dirty pages for
1464 		 *    which writeback has not started yet (we clean the delalloc
1465 		 *    flag when starting writeback and after creating an ordered
1466 		 *    extent). If we mark pages in an adjacent range for defrag,
1467 		 *    then we will have a larger contiguous range for delalloc,
1468 		 *    very likely resulting in a larger extent after writeback is
1469 		 *    triggered (except in a case of free space fragmentation).
1470 		 */
1471 		if (test_range_bit(&inode->io_tree, cur, cur + range_len - 1,
1472 				   EXTENT_DELALLOC, 0, NULL))
1473 			goto next;
1474 
1475 		/*
1476 		 * For do_compress case, we want to compress all valid file
1477 		 * extents, thus no @extent_thresh or mergeable check.
1478 		 */
1479 		if (do_compress)
1480 			goto add;
1481 
1482 		/* Skip too large extent */
1483 		if (range_len >= extent_thresh)
1484 			goto next;
1485 
1486 		/*
1487 		 * Skip extents already at its max capacity, this is mostly for
1488 		 * compressed extents, which max cap is only 128K.
1489 		 */
1490 		if (em->len >= get_extent_max_capacity(fs_info, em))
1491 			goto next;
1492 
1493 		/*
1494 		 * Normally there are no more extents after an inline one, thus
1495 		 * @next_mergeable will normally be false and not defragged.
1496 		 * So if an inline extent passed all above checks, just add it
1497 		 * for defrag, and be converted to regular extents.
1498 		 */
1499 		if (em->block_start == EXTENT_MAP_INLINE)
1500 			goto add;
1501 
1502 		next_mergeable = defrag_check_next_extent(&inode->vfs_inode, em,
1503 						extent_thresh, newer_than, locked);
1504 		if (!next_mergeable) {
1505 			struct defrag_target_range *last;
1506 
1507 			/* Empty target list, no way to merge with last entry */
1508 			if (list_empty(target_list))
1509 				goto next;
1510 			last = list_entry(target_list->prev,
1511 					  struct defrag_target_range, list);
1512 			/* Not mergeable with last entry */
1513 			if (last->start + last->len != cur)
1514 				goto next;
1515 
1516 			/* Mergeable, fall through to add it to @target_list. */
1517 		}
1518 
1519 add:
1520 		last_is_target = true;
1521 		range_len = min(extent_map_end(em), start + len) - cur;
1522 		/*
1523 		 * This one is a good target, check if it can be merged into
1524 		 * last range of the target list.
1525 		 */
1526 		if (!list_empty(target_list)) {
1527 			struct defrag_target_range *last;
1528 
1529 			last = list_entry(target_list->prev,
1530 					  struct defrag_target_range, list);
1531 			ASSERT(last->start + last->len <= cur);
1532 			if (last->start + last->len == cur) {
1533 				/* Mergeable, enlarge the last entry */
1534 				last->len += range_len;
1535 				goto next;
1536 			}
1537 			/* Fall through to allocate a new entry */
1538 		}
1539 
1540 		/* Allocate new defrag_target_range */
1541 		new = kmalloc(sizeof(*new), GFP_NOFS);
1542 		if (!new) {
1543 			free_extent_map(em);
1544 			ret = -ENOMEM;
1545 			break;
1546 		}
1547 		new->start = cur;
1548 		new->len = range_len;
1549 		list_add_tail(&new->list, target_list);
1550 
1551 next:
1552 		cur = extent_map_end(em);
1553 		free_extent_map(em);
1554 	}
1555 	if (ret < 0) {
1556 		struct defrag_target_range *entry;
1557 		struct defrag_target_range *tmp;
1558 
1559 		list_for_each_entry_safe(entry, tmp, target_list, list) {
1560 			list_del_init(&entry->list);
1561 			kfree(entry);
1562 		}
1563 	}
1564 	if (!ret && last_scanned_ret) {
1565 		/*
1566 		 * If the last extent is not a target, the caller can skip to
1567 		 * the end of that extent.
1568 		 * Otherwise, we can only go the end of the specified range.
1569 		 */
1570 		if (!last_is_target)
1571 			*last_scanned_ret = max(cur, *last_scanned_ret);
1572 		else
1573 			*last_scanned_ret = max(start + len, *last_scanned_ret);
1574 	}
1575 	return ret;
1576 }
1577 
1578 #define CLUSTER_SIZE	(SZ_256K)
1579 static_assert(IS_ALIGNED(CLUSTER_SIZE, PAGE_SIZE));
1580 
1581 /*
1582  * Defrag one contiguous target range.
1583  *
1584  * @inode:	target inode
1585  * @target:	target range to defrag
1586  * @pages:	locked pages covering the defrag range
1587  * @nr_pages:	number of locked pages
1588  *
1589  * Caller should ensure:
1590  *
1591  * - Pages are prepared
1592  *   Pages should be locked, no ordered extent in the pages range,
1593  *   no writeback.
1594  *
1595  * - Extent bits are locked
1596  */
defrag_one_locked_target(struct btrfs_inode * inode,struct defrag_target_range * target,struct page ** pages,int nr_pages,struct extent_state ** cached_state)1597 static int defrag_one_locked_target(struct btrfs_inode *inode,
1598 				    struct defrag_target_range *target,
1599 				    struct page **pages, int nr_pages,
1600 				    struct extent_state **cached_state)
1601 {
1602 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
1603 	struct extent_changeset *data_reserved = NULL;
1604 	const u64 start = target->start;
1605 	const u64 len = target->len;
1606 	unsigned long last_index = (start + len - 1) >> PAGE_SHIFT;
1607 	unsigned long start_index = start >> PAGE_SHIFT;
1608 	unsigned long first_index = page_index(pages[0]);
1609 	int ret = 0;
1610 	int i;
1611 
1612 	ASSERT(last_index - first_index + 1 <= nr_pages);
1613 
1614 	ret = btrfs_delalloc_reserve_space(inode, &data_reserved, start, len);
1615 	if (ret < 0)
1616 		return ret;
1617 	clear_extent_bit(&inode->io_tree, start, start + len - 1,
1618 			 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING |
1619 			 EXTENT_DEFRAG, cached_state);
1620 	set_extent_defrag(&inode->io_tree, start, start + len - 1, cached_state);
1621 
1622 	/* Update the page status */
1623 	for (i = start_index - first_index; i <= last_index - first_index; i++) {
1624 		ClearPageChecked(pages[i]);
1625 		btrfs_page_clamp_set_dirty(fs_info, pages[i], start, len);
1626 	}
1627 	btrfs_delalloc_release_extents(inode, len);
1628 	extent_changeset_free(data_reserved);
1629 
1630 	return ret;
1631 }
1632 
defrag_one_range(struct btrfs_inode * inode,u64 start,u32 len,u32 extent_thresh,u64 newer_than,bool do_compress,u64 * last_scanned_ret)1633 static int defrag_one_range(struct btrfs_inode *inode, u64 start, u32 len,
1634 			    u32 extent_thresh, u64 newer_than, bool do_compress,
1635 			    u64 *last_scanned_ret)
1636 {
1637 	struct extent_state *cached_state = NULL;
1638 	struct defrag_target_range *entry;
1639 	struct defrag_target_range *tmp;
1640 	LIST_HEAD(target_list);
1641 	struct page **pages;
1642 	const u32 sectorsize = inode->root->fs_info->sectorsize;
1643 	u64 last_index = (start + len - 1) >> PAGE_SHIFT;
1644 	u64 start_index = start >> PAGE_SHIFT;
1645 	unsigned int nr_pages = last_index - start_index + 1;
1646 	int ret = 0;
1647 	int i;
1648 
1649 	ASSERT(nr_pages <= CLUSTER_SIZE / PAGE_SIZE);
1650 	ASSERT(IS_ALIGNED(start, sectorsize) && IS_ALIGNED(len, sectorsize));
1651 
1652 	pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
1653 	if (!pages)
1654 		return -ENOMEM;
1655 
1656 	/* Prepare all pages */
1657 	for (i = 0; i < nr_pages; i++) {
1658 		pages[i] = defrag_prepare_one_page(inode, start_index + i);
1659 		if (IS_ERR(pages[i])) {
1660 			ret = PTR_ERR(pages[i]);
1661 			pages[i] = NULL;
1662 			goto free_pages;
1663 		}
1664 	}
1665 	for (i = 0; i < nr_pages; i++)
1666 		wait_on_page_writeback(pages[i]);
1667 
1668 	/* Lock the pages range */
1669 	lock_extent(&inode->io_tree, start_index << PAGE_SHIFT,
1670 		    (last_index << PAGE_SHIFT) + PAGE_SIZE - 1,
1671 		    &cached_state);
1672 	/*
1673 	 * Now we have a consistent view about the extent map, re-check
1674 	 * which range really needs to be defragged.
1675 	 *
1676 	 * And this time we have extent locked already, pass @locked = true
1677 	 * so that we won't relock the extent range and cause deadlock.
1678 	 */
1679 	ret = defrag_collect_targets(inode, start, len, extent_thresh,
1680 				     newer_than, do_compress, true,
1681 				     &target_list, last_scanned_ret);
1682 	if (ret < 0)
1683 		goto unlock_extent;
1684 
1685 	list_for_each_entry(entry, &target_list, list) {
1686 		ret = defrag_one_locked_target(inode, entry, pages, nr_pages,
1687 					       &cached_state);
1688 		if (ret < 0)
1689 			break;
1690 	}
1691 
1692 	list_for_each_entry_safe(entry, tmp, &target_list, list) {
1693 		list_del_init(&entry->list);
1694 		kfree(entry);
1695 	}
1696 unlock_extent:
1697 	unlock_extent(&inode->io_tree, start_index << PAGE_SHIFT,
1698 		      (last_index << PAGE_SHIFT) + PAGE_SIZE - 1,
1699 		      &cached_state);
1700 free_pages:
1701 	for (i = 0; i < nr_pages; i++) {
1702 		if (pages[i]) {
1703 			unlock_page(pages[i]);
1704 			put_page(pages[i]);
1705 		}
1706 	}
1707 	kfree(pages);
1708 	return ret;
1709 }
1710 
defrag_one_cluster(struct btrfs_inode * inode,struct file_ra_state * ra,u64 start,u32 len,u32 extent_thresh,u64 newer_than,bool do_compress,unsigned long * sectors_defragged,unsigned long max_sectors,u64 * last_scanned_ret)1711 static int defrag_one_cluster(struct btrfs_inode *inode,
1712 			      struct file_ra_state *ra,
1713 			      u64 start, u32 len, u32 extent_thresh,
1714 			      u64 newer_than, bool do_compress,
1715 			      unsigned long *sectors_defragged,
1716 			      unsigned long max_sectors,
1717 			      u64 *last_scanned_ret)
1718 {
1719 	const u32 sectorsize = inode->root->fs_info->sectorsize;
1720 	struct defrag_target_range *entry;
1721 	struct defrag_target_range *tmp;
1722 	LIST_HEAD(target_list);
1723 	int ret;
1724 
1725 	ret = defrag_collect_targets(inode, start, len, extent_thresh,
1726 				     newer_than, do_compress, false,
1727 				     &target_list, NULL);
1728 	if (ret < 0)
1729 		goto out;
1730 
1731 	list_for_each_entry(entry, &target_list, list) {
1732 		u32 range_len = entry->len;
1733 
1734 		/* Reached or beyond the limit */
1735 		if (max_sectors && *sectors_defragged >= max_sectors) {
1736 			ret = 1;
1737 			break;
1738 		}
1739 
1740 		if (max_sectors)
1741 			range_len = min_t(u32, range_len,
1742 				(max_sectors - *sectors_defragged) * sectorsize);
1743 
1744 		/*
1745 		 * If defrag_one_range() has updated last_scanned_ret,
1746 		 * our range may already be invalid (e.g. hole punched).
1747 		 * Skip if our range is before last_scanned_ret, as there is
1748 		 * no need to defrag the range anymore.
1749 		 */
1750 		if (entry->start + range_len <= *last_scanned_ret)
1751 			continue;
1752 
1753 		if (ra)
1754 			page_cache_sync_readahead(inode->vfs_inode.i_mapping,
1755 				ra, NULL, entry->start >> PAGE_SHIFT,
1756 				((entry->start + range_len - 1) >> PAGE_SHIFT) -
1757 				(entry->start >> PAGE_SHIFT) + 1);
1758 		/*
1759 		 * Here we may not defrag any range if holes are punched before
1760 		 * we locked the pages.
1761 		 * But that's fine, it only affects the @sectors_defragged
1762 		 * accounting.
1763 		 */
1764 		ret = defrag_one_range(inode, entry->start, range_len,
1765 				       extent_thresh, newer_than, do_compress,
1766 				       last_scanned_ret);
1767 		if (ret < 0)
1768 			break;
1769 		*sectors_defragged += range_len >>
1770 				      inode->root->fs_info->sectorsize_bits;
1771 	}
1772 out:
1773 	list_for_each_entry_safe(entry, tmp, &target_list, list) {
1774 		list_del_init(&entry->list);
1775 		kfree(entry);
1776 	}
1777 	if (ret >= 0)
1778 		*last_scanned_ret = max(*last_scanned_ret, start + len);
1779 	return ret;
1780 }
1781 
1782 /*
1783  * Entry point to file defragmentation.
1784  *
1785  * @inode:	   inode to be defragged
1786  * @ra:		   readahead state (can be NUL)
1787  * @range:	   defrag options including range and flags
1788  * @newer_than:	   minimum transid to defrag
1789  * @max_to_defrag: max number of sectors to be defragged, if 0, the whole inode
1790  *		   will be defragged.
1791  *
1792  * Return <0 for error.
1793  * Return >=0 for the number of sectors defragged, and range->start will be updated
1794  * to indicate the file offset where next defrag should be started at.
1795  * (Mostly for autodefrag, which sets @max_to_defrag thus we may exit early without
1796  *  defragging all the range).
1797  */
btrfs_defrag_file(struct inode * inode,struct file_ra_state * ra,struct btrfs_ioctl_defrag_range_args * range,u64 newer_than,unsigned long max_to_defrag)1798 int btrfs_defrag_file(struct inode *inode, struct file_ra_state *ra,
1799 		      struct btrfs_ioctl_defrag_range_args *range,
1800 		      u64 newer_than, unsigned long max_to_defrag)
1801 {
1802 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1803 	unsigned long sectors_defragged = 0;
1804 	u64 isize = i_size_read(inode);
1805 	u64 cur;
1806 	u64 last_byte;
1807 	bool do_compress = range->flags & BTRFS_DEFRAG_RANGE_COMPRESS;
1808 	bool ra_allocated = false;
1809 	int compress_type = BTRFS_COMPRESS_ZLIB;
1810 	int ret = 0;
1811 	u32 extent_thresh = range->extent_thresh;
1812 	pgoff_t start_index;
1813 
1814 	if (isize == 0)
1815 		return 0;
1816 
1817 	if (range->start >= isize)
1818 		return -EINVAL;
1819 
1820 	if (do_compress) {
1821 		if (range->compress_type >= BTRFS_NR_COMPRESS_TYPES)
1822 			return -EINVAL;
1823 		if (range->compress_type)
1824 			compress_type = range->compress_type;
1825 	}
1826 
1827 	if (extent_thresh == 0)
1828 		extent_thresh = SZ_256K;
1829 
1830 	if (range->start + range->len > range->start) {
1831 		/* Got a specific range */
1832 		last_byte = min(isize, range->start + range->len);
1833 	} else {
1834 		/* Defrag until file end */
1835 		last_byte = isize;
1836 	}
1837 
1838 	/* Align the range */
1839 	cur = round_down(range->start, fs_info->sectorsize);
1840 	last_byte = round_up(last_byte, fs_info->sectorsize) - 1;
1841 
1842 	/*
1843 	 * If we were not given a ra, allocate a readahead context. As
1844 	 * readahead is just an optimization, defrag will work without it so
1845 	 * we don't error out.
1846 	 */
1847 	if (!ra) {
1848 		ra_allocated = true;
1849 		ra = kzalloc(sizeof(*ra), GFP_KERNEL);
1850 		if (ra)
1851 			file_ra_state_init(ra, inode->i_mapping);
1852 	}
1853 
1854 	/*
1855 	 * Make writeback start from the beginning of the range, so that the
1856 	 * defrag range can be written sequentially.
1857 	 */
1858 	start_index = cur >> PAGE_SHIFT;
1859 	if (start_index < inode->i_mapping->writeback_index)
1860 		inode->i_mapping->writeback_index = start_index;
1861 
1862 	while (cur < last_byte) {
1863 		const unsigned long prev_sectors_defragged = sectors_defragged;
1864 		u64 last_scanned = cur;
1865 		u64 cluster_end;
1866 
1867 		if (btrfs_defrag_cancelled(fs_info)) {
1868 			ret = -EAGAIN;
1869 			break;
1870 		}
1871 
1872 		/* We want the cluster end at page boundary when possible */
1873 		cluster_end = (((cur >> PAGE_SHIFT) +
1874 			       (SZ_256K >> PAGE_SHIFT)) << PAGE_SHIFT) - 1;
1875 		cluster_end = min(cluster_end, last_byte);
1876 
1877 		btrfs_inode_lock(inode, 0);
1878 		if (IS_SWAPFILE(inode)) {
1879 			ret = -ETXTBSY;
1880 			btrfs_inode_unlock(inode, 0);
1881 			break;
1882 		}
1883 		if (!(inode->i_sb->s_flags & SB_ACTIVE)) {
1884 			btrfs_inode_unlock(inode, 0);
1885 			break;
1886 		}
1887 		if (do_compress)
1888 			BTRFS_I(inode)->defrag_compress = compress_type;
1889 		ret = defrag_one_cluster(BTRFS_I(inode), ra, cur,
1890 				cluster_end + 1 - cur, extent_thresh,
1891 				newer_than, do_compress, &sectors_defragged,
1892 				max_to_defrag, &last_scanned);
1893 
1894 		if (sectors_defragged > prev_sectors_defragged)
1895 			balance_dirty_pages_ratelimited(inode->i_mapping);
1896 
1897 		btrfs_inode_unlock(inode, 0);
1898 		if (ret < 0)
1899 			break;
1900 		cur = max(cluster_end + 1, last_scanned);
1901 		if (ret > 0) {
1902 			ret = 0;
1903 			break;
1904 		}
1905 		cond_resched();
1906 	}
1907 
1908 	if (ra_allocated)
1909 		kfree(ra);
1910 	/*
1911 	 * Update range.start for autodefrag, this will indicate where to start
1912 	 * in next run.
1913 	 */
1914 	range->start = cur;
1915 	if (sectors_defragged) {
1916 		/*
1917 		 * We have defragged some sectors, for compression case they
1918 		 * need to be written back immediately.
1919 		 */
1920 		if (range->flags & BTRFS_DEFRAG_RANGE_START_IO) {
1921 			filemap_flush(inode->i_mapping);
1922 			if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
1923 				     &BTRFS_I(inode)->runtime_flags))
1924 				filemap_flush(inode->i_mapping);
1925 		}
1926 		if (range->compress_type == BTRFS_COMPRESS_LZO)
1927 			btrfs_set_fs_incompat(fs_info, COMPRESS_LZO);
1928 		else if (range->compress_type == BTRFS_COMPRESS_ZSTD)
1929 			btrfs_set_fs_incompat(fs_info, COMPRESS_ZSTD);
1930 		ret = sectors_defragged;
1931 	}
1932 	if (do_compress) {
1933 		btrfs_inode_lock(inode, 0);
1934 		BTRFS_I(inode)->defrag_compress = BTRFS_COMPRESS_NONE;
1935 		btrfs_inode_unlock(inode, 0);
1936 	}
1937 	return ret;
1938 }
1939 
1940 /*
1941  * Try to start exclusive operation @type or cancel it if it's running.
1942  *
1943  * Return:
1944  *   0        - normal mode, newly claimed op started
1945  *  >0        - normal mode, something else is running,
1946  *              return BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS to user space
1947  * ECANCELED  - cancel mode, successful cancel
1948  * ENOTCONN   - cancel mode, operation not running anymore
1949  */
exclop_start_or_cancel_reloc(struct btrfs_fs_info * fs_info,enum btrfs_exclusive_operation type,bool cancel)1950 static int exclop_start_or_cancel_reloc(struct btrfs_fs_info *fs_info,
1951 			enum btrfs_exclusive_operation type, bool cancel)
1952 {
1953 	if (!cancel) {
1954 		/* Start normal op */
1955 		if (!btrfs_exclop_start(fs_info, type))
1956 			return BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
1957 		/* Exclusive operation is now claimed */
1958 		return 0;
1959 	}
1960 
1961 	/* Cancel running op */
1962 	if (btrfs_exclop_start_try_lock(fs_info, type)) {
1963 		/*
1964 		 * This blocks any exclop finish from setting it to NONE, so we
1965 		 * request cancellation. Either it runs and we will wait for it,
1966 		 * or it has finished and no waiting will happen.
1967 		 */
1968 		atomic_inc(&fs_info->reloc_cancel_req);
1969 		btrfs_exclop_start_unlock(fs_info);
1970 
1971 		if (test_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags))
1972 			wait_on_bit(&fs_info->flags, BTRFS_FS_RELOC_RUNNING,
1973 				    TASK_INTERRUPTIBLE);
1974 
1975 		return -ECANCELED;
1976 	}
1977 
1978 	/* Something else is running or none */
1979 	return -ENOTCONN;
1980 }
1981 
btrfs_ioctl_resize(struct file * file,void __user * arg)1982 static noinline int btrfs_ioctl_resize(struct file *file,
1983 					void __user *arg)
1984 {
1985 	BTRFS_DEV_LOOKUP_ARGS(args);
1986 	struct inode *inode = file_inode(file);
1987 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1988 	u64 new_size;
1989 	u64 old_size;
1990 	u64 devid = 1;
1991 	struct btrfs_root *root = BTRFS_I(inode)->root;
1992 	struct btrfs_ioctl_vol_args *vol_args;
1993 	struct btrfs_trans_handle *trans;
1994 	struct btrfs_device *device = NULL;
1995 	char *sizestr;
1996 	char *retptr;
1997 	char *devstr = NULL;
1998 	int ret = 0;
1999 	int mod = 0;
2000 	bool cancel;
2001 
2002 	if (!capable(CAP_SYS_ADMIN))
2003 		return -EPERM;
2004 
2005 	ret = mnt_want_write_file(file);
2006 	if (ret)
2007 		return ret;
2008 
2009 	/*
2010 	 * Read the arguments before checking exclusivity to be able to
2011 	 * distinguish regular resize and cancel
2012 	 */
2013 	vol_args = memdup_user(arg, sizeof(*vol_args));
2014 	if (IS_ERR(vol_args)) {
2015 		ret = PTR_ERR(vol_args);
2016 		goto out_drop;
2017 	}
2018 	vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
2019 	sizestr = vol_args->name;
2020 	cancel = (strcmp("cancel", sizestr) == 0);
2021 	ret = exclop_start_or_cancel_reloc(fs_info, BTRFS_EXCLOP_RESIZE, cancel);
2022 	if (ret)
2023 		goto out_free;
2024 	/* Exclusive operation is now claimed */
2025 
2026 	devstr = strchr(sizestr, ':');
2027 	if (devstr) {
2028 		sizestr = devstr + 1;
2029 		*devstr = '\0';
2030 		devstr = vol_args->name;
2031 		ret = kstrtoull(devstr, 10, &devid);
2032 		if (ret)
2033 			goto out_finish;
2034 		if (!devid) {
2035 			ret = -EINVAL;
2036 			goto out_finish;
2037 		}
2038 		btrfs_info(fs_info, "resizing devid %llu", devid);
2039 	}
2040 
2041 	args.devid = devid;
2042 	device = btrfs_find_device(fs_info->fs_devices, &args);
2043 	if (!device) {
2044 		btrfs_info(fs_info, "resizer unable to find device %llu",
2045 			   devid);
2046 		ret = -ENODEV;
2047 		goto out_finish;
2048 	}
2049 
2050 	if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
2051 		btrfs_info(fs_info,
2052 			   "resizer unable to apply on readonly device %llu",
2053 		       devid);
2054 		ret = -EPERM;
2055 		goto out_finish;
2056 	}
2057 
2058 	if (!strcmp(sizestr, "max"))
2059 		new_size = bdev_nr_bytes(device->bdev);
2060 	else {
2061 		if (sizestr[0] == '-') {
2062 			mod = -1;
2063 			sizestr++;
2064 		} else if (sizestr[0] == '+') {
2065 			mod = 1;
2066 			sizestr++;
2067 		}
2068 		new_size = memparse(sizestr, &retptr);
2069 		if (*retptr != '\0' || new_size == 0) {
2070 			ret = -EINVAL;
2071 			goto out_finish;
2072 		}
2073 	}
2074 
2075 	if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
2076 		ret = -EPERM;
2077 		goto out_finish;
2078 	}
2079 
2080 	old_size = btrfs_device_get_total_bytes(device);
2081 
2082 	if (mod < 0) {
2083 		if (new_size > old_size) {
2084 			ret = -EINVAL;
2085 			goto out_finish;
2086 		}
2087 		new_size = old_size - new_size;
2088 	} else if (mod > 0) {
2089 		if (new_size > ULLONG_MAX - old_size) {
2090 			ret = -ERANGE;
2091 			goto out_finish;
2092 		}
2093 		new_size = old_size + new_size;
2094 	}
2095 
2096 	if (new_size < SZ_256M) {
2097 		ret = -EINVAL;
2098 		goto out_finish;
2099 	}
2100 	if (new_size > bdev_nr_bytes(device->bdev)) {
2101 		ret = -EFBIG;
2102 		goto out_finish;
2103 	}
2104 
2105 	new_size = round_down(new_size, fs_info->sectorsize);
2106 
2107 	if (new_size > old_size) {
2108 		trans = btrfs_start_transaction(root, 0);
2109 		if (IS_ERR(trans)) {
2110 			ret = PTR_ERR(trans);
2111 			goto out_finish;
2112 		}
2113 		ret = btrfs_grow_device(trans, device, new_size);
2114 		btrfs_commit_transaction(trans);
2115 	} else if (new_size < old_size) {
2116 		ret = btrfs_shrink_device(device, new_size);
2117 	} /* equal, nothing need to do */
2118 
2119 	if (ret == 0 && new_size != old_size)
2120 		btrfs_info_in_rcu(fs_info,
2121 			"resize device %s (devid %llu) from %llu to %llu",
2122 			rcu_str_deref(device->name), device->devid,
2123 			old_size, new_size);
2124 out_finish:
2125 	btrfs_exclop_finish(fs_info);
2126 out_free:
2127 	kfree(vol_args);
2128 out_drop:
2129 	mnt_drop_write_file(file);
2130 	return ret;
2131 }
2132 
__btrfs_ioctl_snap_create(struct file * file,struct user_namespace * mnt_userns,const char * name,unsigned long fd,int subvol,bool readonly,struct btrfs_qgroup_inherit * inherit)2133 static noinline int __btrfs_ioctl_snap_create(struct file *file,
2134 				struct user_namespace *mnt_userns,
2135 				const char *name, unsigned long fd, int subvol,
2136 				bool readonly,
2137 				struct btrfs_qgroup_inherit *inherit)
2138 {
2139 	int namelen;
2140 	int ret = 0;
2141 
2142 	if (!S_ISDIR(file_inode(file)->i_mode))
2143 		return -ENOTDIR;
2144 
2145 	ret = mnt_want_write_file(file);
2146 	if (ret)
2147 		goto out;
2148 
2149 	namelen = strlen(name);
2150 	if (strchr(name, '/')) {
2151 		ret = -EINVAL;
2152 		goto out_drop_write;
2153 	}
2154 
2155 	if (name[0] == '.' &&
2156 	   (namelen == 1 || (name[1] == '.' && namelen == 2))) {
2157 		ret = -EEXIST;
2158 		goto out_drop_write;
2159 	}
2160 
2161 	if (subvol) {
2162 		ret = btrfs_mksubvol(&file->f_path, mnt_userns, name,
2163 				     namelen, NULL, readonly, inherit);
2164 	} else {
2165 		struct fd src = fdget(fd);
2166 		struct inode *src_inode;
2167 		if (!src.file) {
2168 			ret = -EINVAL;
2169 			goto out_drop_write;
2170 		}
2171 
2172 		src_inode = file_inode(src.file);
2173 		if (src_inode->i_sb != file_inode(file)->i_sb) {
2174 			btrfs_info(BTRFS_I(file_inode(file))->root->fs_info,
2175 				   "Snapshot src from another FS");
2176 			ret = -EXDEV;
2177 		} else if (!inode_owner_or_capable(mnt_userns, src_inode)) {
2178 			/*
2179 			 * Subvolume creation is not restricted, but snapshots
2180 			 * are limited to own subvolumes only
2181 			 */
2182 			ret = -EPERM;
2183 		} else {
2184 			ret = btrfs_mksnapshot(&file->f_path, mnt_userns,
2185 					       name, namelen,
2186 					       BTRFS_I(src_inode)->root,
2187 					       readonly, inherit);
2188 		}
2189 		fdput(src);
2190 	}
2191 out_drop_write:
2192 	mnt_drop_write_file(file);
2193 out:
2194 	return ret;
2195 }
2196 
btrfs_ioctl_snap_create(struct file * file,void __user * arg,int subvol)2197 static noinline int btrfs_ioctl_snap_create(struct file *file,
2198 					    void __user *arg, int subvol)
2199 {
2200 	struct btrfs_ioctl_vol_args *vol_args;
2201 	int ret;
2202 
2203 	if (!S_ISDIR(file_inode(file)->i_mode))
2204 		return -ENOTDIR;
2205 
2206 	vol_args = memdup_user(arg, sizeof(*vol_args));
2207 	if (IS_ERR(vol_args))
2208 		return PTR_ERR(vol_args);
2209 	vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
2210 
2211 	ret = __btrfs_ioctl_snap_create(file, file_mnt_user_ns(file),
2212 					vol_args->name, vol_args->fd, subvol,
2213 					false, NULL);
2214 
2215 	kfree(vol_args);
2216 	return ret;
2217 }
2218 
btrfs_ioctl_snap_create_v2(struct file * file,void __user * arg,int subvol)2219 static noinline int btrfs_ioctl_snap_create_v2(struct file *file,
2220 					       void __user *arg, int subvol)
2221 {
2222 	struct btrfs_ioctl_vol_args_v2 *vol_args;
2223 	int ret;
2224 	bool readonly = false;
2225 	struct btrfs_qgroup_inherit *inherit = NULL;
2226 
2227 	if (!S_ISDIR(file_inode(file)->i_mode))
2228 		return -ENOTDIR;
2229 
2230 	vol_args = memdup_user(arg, sizeof(*vol_args));
2231 	if (IS_ERR(vol_args))
2232 		return PTR_ERR(vol_args);
2233 	vol_args->name[BTRFS_SUBVOL_NAME_MAX] = '\0';
2234 
2235 	if (vol_args->flags & ~BTRFS_SUBVOL_CREATE_ARGS_MASK) {
2236 		ret = -EOPNOTSUPP;
2237 		goto free_args;
2238 	}
2239 
2240 	if (vol_args->flags & BTRFS_SUBVOL_RDONLY)
2241 		readonly = true;
2242 	if (vol_args->flags & BTRFS_SUBVOL_QGROUP_INHERIT) {
2243 		u64 nums;
2244 
2245 		if (vol_args->size < sizeof(*inherit) ||
2246 		    vol_args->size > PAGE_SIZE) {
2247 			ret = -EINVAL;
2248 			goto free_args;
2249 		}
2250 		inherit = memdup_user(vol_args->qgroup_inherit, vol_args->size);
2251 		if (IS_ERR(inherit)) {
2252 			ret = PTR_ERR(inherit);
2253 			goto free_args;
2254 		}
2255 
2256 		if (inherit->num_qgroups > PAGE_SIZE ||
2257 		    inherit->num_ref_copies > PAGE_SIZE ||
2258 		    inherit->num_excl_copies > PAGE_SIZE) {
2259 			ret = -EINVAL;
2260 			goto free_inherit;
2261 		}
2262 
2263 		nums = inherit->num_qgroups + 2 * inherit->num_ref_copies +
2264 		       2 * inherit->num_excl_copies;
2265 		if (vol_args->size != struct_size(inherit, qgroups, nums)) {
2266 			ret = -EINVAL;
2267 			goto free_inherit;
2268 		}
2269 	}
2270 
2271 	ret = __btrfs_ioctl_snap_create(file, file_mnt_user_ns(file),
2272 					vol_args->name, vol_args->fd, subvol,
2273 					readonly, inherit);
2274 	if (ret)
2275 		goto free_inherit;
2276 free_inherit:
2277 	kfree(inherit);
2278 free_args:
2279 	kfree(vol_args);
2280 	return ret;
2281 }
2282 
btrfs_ioctl_subvol_getflags(struct inode * inode,void __user * arg)2283 static noinline int btrfs_ioctl_subvol_getflags(struct inode *inode,
2284 						void __user *arg)
2285 {
2286 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2287 	struct btrfs_root *root = BTRFS_I(inode)->root;
2288 	int ret = 0;
2289 	u64 flags = 0;
2290 
2291 	if (btrfs_ino(BTRFS_I(inode)) != BTRFS_FIRST_FREE_OBJECTID)
2292 		return -EINVAL;
2293 
2294 	down_read(&fs_info->subvol_sem);
2295 	if (btrfs_root_readonly(root))
2296 		flags |= BTRFS_SUBVOL_RDONLY;
2297 	up_read(&fs_info->subvol_sem);
2298 
2299 	if (copy_to_user(arg, &flags, sizeof(flags)))
2300 		ret = -EFAULT;
2301 
2302 	return ret;
2303 }
2304 
btrfs_ioctl_subvol_setflags(struct file * file,void __user * arg)2305 static noinline int btrfs_ioctl_subvol_setflags(struct file *file,
2306 					      void __user *arg)
2307 {
2308 	struct inode *inode = file_inode(file);
2309 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2310 	struct btrfs_root *root = BTRFS_I(inode)->root;
2311 	struct btrfs_trans_handle *trans;
2312 	u64 root_flags;
2313 	u64 flags;
2314 	int ret = 0;
2315 
2316 	if (!inode_owner_or_capable(file_mnt_user_ns(file), inode))
2317 		return -EPERM;
2318 
2319 	ret = mnt_want_write_file(file);
2320 	if (ret)
2321 		goto out;
2322 
2323 	if (btrfs_ino(BTRFS_I(inode)) != BTRFS_FIRST_FREE_OBJECTID) {
2324 		ret = -EINVAL;
2325 		goto out_drop_write;
2326 	}
2327 
2328 	if (copy_from_user(&flags, arg, sizeof(flags))) {
2329 		ret = -EFAULT;
2330 		goto out_drop_write;
2331 	}
2332 
2333 	if (flags & ~BTRFS_SUBVOL_RDONLY) {
2334 		ret = -EOPNOTSUPP;
2335 		goto out_drop_write;
2336 	}
2337 
2338 	down_write(&fs_info->subvol_sem);
2339 
2340 	/* nothing to do */
2341 	if (!!(flags & BTRFS_SUBVOL_RDONLY) == btrfs_root_readonly(root))
2342 		goto out_drop_sem;
2343 
2344 	root_flags = btrfs_root_flags(&root->root_item);
2345 	if (flags & BTRFS_SUBVOL_RDONLY) {
2346 		btrfs_set_root_flags(&root->root_item,
2347 				     root_flags | BTRFS_ROOT_SUBVOL_RDONLY);
2348 	} else {
2349 		/*
2350 		 * Block RO -> RW transition if this subvolume is involved in
2351 		 * send
2352 		 */
2353 		spin_lock(&root->root_item_lock);
2354 		if (root->send_in_progress == 0) {
2355 			btrfs_set_root_flags(&root->root_item,
2356 				     root_flags & ~BTRFS_ROOT_SUBVOL_RDONLY);
2357 			spin_unlock(&root->root_item_lock);
2358 		} else {
2359 			spin_unlock(&root->root_item_lock);
2360 			btrfs_warn(fs_info,
2361 				   "Attempt to set subvolume %llu read-write during send",
2362 				   root->root_key.objectid);
2363 			ret = -EPERM;
2364 			goto out_drop_sem;
2365 		}
2366 	}
2367 
2368 	trans = btrfs_start_transaction(root, 1);
2369 	if (IS_ERR(trans)) {
2370 		ret = PTR_ERR(trans);
2371 		goto out_reset;
2372 	}
2373 
2374 	ret = btrfs_update_root(trans, fs_info->tree_root,
2375 				&root->root_key, &root->root_item);
2376 	if (ret < 0) {
2377 		btrfs_end_transaction(trans);
2378 		goto out_reset;
2379 	}
2380 
2381 	ret = btrfs_commit_transaction(trans);
2382 
2383 out_reset:
2384 	if (ret)
2385 		btrfs_set_root_flags(&root->root_item, root_flags);
2386 out_drop_sem:
2387 	up_write(&fs_info->subvol_sem);
2388 out_drop_write:
2389 	mnt_drop_write_file(file);
2390 out:
2391 	return ret;
2392 }
2393 
key_in_sk(struct btrfs_key * key,struct btrfs_ioctl_search_key * sk)2394 static noinline int key_in_sk(struct btrfs_key *key,
2395 			      struct btrfs_ioctl_search_key *sk)
2396 {
2397 	struct btrfs_key test;
2398 	int ret;
2399 
2400 	test.objectid = sk->min_objectid;
2401 	test.type = sk->min_type;
2402 	test.offset = sk->min_offset;
2403 
2404 	ret = btrfs_comp_cpu_keys(key, &test);
2405 	if (ret < 0)
2406 		return 0;
2407 
2408 	test.objectid = sk->max_objectid;
2409 	test.type = sk->max_type;
2410 	test.offset = sk->max_offset;
2411 
2412 	ret = btrfs_comp_cpu_keys(key, &test);
2413 	if (ret > 0)
2414 		return 0;
2415 	return 1;
2416 }
2417 
copy_to_sk(struct btrfs_path * path,struct btrfs_key * key,struct btrfs_ioctl_search_key * sk,size_t * buf_size,char __user * ubuf,unsigned long * sk_offset,int * num_found)2418 static noinline int copy_to_sk(struct btrfs_path *path,
2419 			       struct btrfs_key *key,
2420 			       struct btrfs_ioctl_search_key *sk,
2421 			       size_t *buf_size,
2422 			       char __user *ubuf,
2423 			       unsigned long *sk_offset,
2424 			       int *num_found)
2425 {
2426 	u64 found_transid;
2427 	struct extent_buffer *leaf;
2428 	struct btrfs_ioctl_search_header sh;
2429 	struct btrfs_key test;
2430 	unsigned long item_off;
2431 	unsigned long item_len;
2432 	int nritems;
2433 	int i;
2434 	int slot;
2435 	int ret = 0;
2436 
2437 	leaf = path->nodes[0];
2438 	slot = path->slots[0];
2439 	nritems = btrfs_header_nritems(leaf);
2440 
2441 	if (btrfs_header_generation(leaf) > sk->max_transid) {
2442 		i = nritems;
2443 		goto advance_key;
2444 	}
2445 	found_transid = btrfs_header_generation(leaf);
2446 
2447 	for (i = slot; i < nritems; i++) {
2448 		item_off = btrfs_item_ptr_offset(leaf, i);
2449 		item_len = btrfs_item_size(leaf, i);
2450 
2451 		btrfs_item_key_to_cpu(leaf, key, i);
2452 		if (!key_in_sk(key, sk))
2453 			continue;
2454 
2455 		if (sizeof(sh) + item_len > *buf_size) {
2456 			if (*num_found) {
2457 				ret = 1;
2458 				goto out;
2459 			}
2460 
2461 			/*
2462 			 * return one empty item back for v1, which does not
2463 			 * handle -EOVERFLOW
2464 			 */
2465 
2466 			*buf_size = sizeof(sh) + item_len;
2467 			item_len = 0;
2468 			ret = -EOVERFLOW;
2469 		}
2470 
2471 		if (sizeof(sh) + item_len + *sk_offset > *buf_size) {
2472 			ret = 1;
2473 			goto out;
2474 		}
2475 
2476 		sh.objectid = key->objectid;
2477 		sh.offset = key->offset;
2478 		sh.type = key->type;
2479 		sh.len = item_len;
2480 		sh.transid = found_transid;
2481 
2482 		/*
2483 		 * Copy search result header. If we fault then loop again so we
2484 		 * can fault in the pages and -EFAULT there if there's a
2485 		 * problem. Otherwise we'll fault and then copy the buffer in
2486 		 * properly this next time through
2487 		 */
2488 		if (copy_to_user_nofault(ubuf + *sk_offset, &sh, sizeof(sh))) {
2489 			ret = 0;
2490 			goto out;
2491 		}
2492 
2493 		*sk_offset += sizeof(sh);
2494 
2495 		if (item_len) {
2496 			char __user *up = ubuf + *sk_offset;
2497 			/*
2498 			 * Copy the item, same behavior as above, but reset the
2499 			 * * sk_offset so we copy the full thing again.
2500 			 */
2501 			if (read_extent_buffer_to_user_nofault(leaf, up,
2502 						item_off, item_len)) {
2503 				ret = 0;
2504 				*sk_offset -= sizeof(sh);
2505 				goto out;
2506 			}
2507 
2508 			*sk_offset += item_len;
2509 		}
2510 		(*num_found)++;
2511 
2512 		if (ret) /* -EOVERFLOW from above */
2513 			goto out;
2514 
2515 		if (*num_found >= sk->nr_items) {
2516 			ret = 1;
2517 			goto out;
2518 		}
2519 	}
2520 advance_key:
2521 	ret = 0;
2522 	test.objectid = sk->max_objectid;
2523 	test.type = sk->max_type;
2524 	test.offset = sk->max_offset;
2525 	if (btrfs_comp_cpu_keys(key, &test) >= 0)
2526 		ret = 1;
2527 	else if (key->offset < (u64)-1)
2528 		key->offset++;
2529 	else if (key->type < (u8)-1) {
2530 		key->offset = 0;
2531 		key->type++;
2532 	} else if (key->objectid < (u64)-1) {
2533 		key->offset = 0;
2534 		key->type = 0;
2535 		key->objectid++;
2536 	} else
2537 		ret = 1;
2538 out:
2539 	/*
2540 	 *  0: all items from this leaf copied, continue with next
2541 	 *  1: * more items can be copied, but unused buffer is too small
2542 	 *     * all items were found
2543 	 *     Either way, it will stops the loop which iterates to the next
2544 	 *     leaf
2545 	 *  -EOVERFLOW: item was to large for buffer
2546 	 *  -EFAULT: could not copy extent buffer back to userspace
2547 	 */
2548 	return ret;
2549 }
2550 
search_ioctl(struct inode * inode,struct btrfs_ioctl_search_key * sk,size_t * buf_size,char __user * ubuf)2551 static noinline int search_ioctl(struct inode *inode,
2552 				 struct btrfs_ioctl_search_key *sk,
2553 				 size_t *buf_size,
2554 				 char __user *ubuf)
2555 {
2556 	struct btrfs_fs_info *info = btrfs_sb(inode->i_sb);
2557 	struct btrfs_root *root;
2558 	struct btrfs_key key;
2559 	struct btrfs_path *path;
2560 	int ret;
2561 	int num_found = 0;
2562 	unsigned long sk_offset = 0;
2563 
2564 	if (*buf_size < sizeof(struct btrfs_ioctl_search_header)) {
2565 		*buf_size = sizeof(struct btrfs_ioctl_search_header);
2566 		return -EOVERFLOW;
2567 	}
2568 
2569 	path = btrfs_alloc_path();
2570 	if (!path)
2571 		return -ENOMEM;
2572 
2573 	if (sk->tree_id == 0) {
2574 		/* search the root of the inode that was passed */
2575 		root = btrfs_grab_root(BTRFS_I(inode)->root);
2576 	} else {
2577 		root = btrfs_get_fs_root(info, sk->tree_id, true);
2578 		if (IS_ERR(root)) {
2579 			btrfs_free_path(path);
2580 			return PTR_ERR(root);
2581 		}
2582 	}
2583 
2584 	key.objectid = sk->min_objectid;
2585 	key.type = sk->min_type;
2586 	key.offset = sk->min_offset;
2587 
2588 	while (1) {
2589 		ret = -EFAULT;
2590 		/*
2591 		 * Ensure that the whole user buffer is faulted in at sub-page
2592 		 * granularity, otherwise the loop may live-lock.
2593 		 */
2594 		if (fault_in_subpage_writeable(ubuf + sk_offset,
2595 					       *buf_size - sk_offset))
2596 			break;
2597 
2598 		ret = btrfs_search_forward(root, &key, path, sk->min_transid);
2599 		if (ret != 0) {
2600 			if (ret > 0)
2601 				ret = 0;
2602 			goto err;
2603 		}
2604 		ret = copy_to_sk(path, &key, sk, buf_size, ubuf,
2605 				 &sk_offset, &num_found);
2606 		btrfs_release_path(path);
2607 		if (ret)
2608 			break;
2609 
2610 	}
2611 	if (ret > 0)
2612 		ret = 0;
2613 err:
2614 	sk->nr_items = num_found;
2615 	btrfs_put_root(root);
2616 	btrfs_free_path(path);
2617 	return ret;
2618 }
2619 
btrfs_ioctl_tree_search(struct inode * inode,void __user * argp)2620 static noinline int btrfs_ioctl_tree_search(struct inode *inode,
2621 					    void __user *argp)
2622 {
2623 	struct btrfs_ioctl_search_args __user *uargs = argp;
2624 	struct btrfs_ioctl_search_key sk;
2625 	int ret;
2626 	size_t buf_size;
2627 
2628 	if (!capable(CAP_SYS_ADMIN))
2629 		return -EPERM;
2630 
2631 	if (copy_from_user(&sk, &uargs->key, sizeof(sk)))
2632 		return -EFAULT;
2633 
2634 	buf_size = sizeof(uargs->buf);
2635 
2636 	ret = search_ioctl(inode, &sk, &buf_size, uargs->buf);
2637 
2638 	/*
2639 	 * In the origin implementation an overflow is handled by returning a
2640 	 * search header with a len of zero, so reset ret.
2641 	 */
2642 	if (ret == -EOVERFLOW)
2643 		ret = 0;
2644 
2645 	if (ret == 0 && copy_to_user(&uargs->key, &sk, sizeof(sk)))
2646 		ret = -EFAULT;
2647 	return ret;
2648 }
2649 
btrfs_ioctl_tree_search_v2(struct inode * inode,void __user * argp)2650 static noinline int btrfs_ioctl_tree_search_v2(struct inode *inode,
2651 					       void __user *argp)
2652 {
2653 	struct btrfs_ioctl_search_args_v2 __user *uarg = argp;
2654 	struct btrfs_ioctl_search_args_v2 args;
2655 	int ret;
2656 	size_t buf_size;
2657 	const size_t buf_limit = SZ_16M;
2658 
2659 	if (!capable(CAP_SYS_ADMIN))
2660 		return -EPERM;
2661 
2662 	/* copy search header and buffer size */
2663 	if (copy_from_user(&args, uarg, sizeof(args)))
2664 		return -EFAULT;
2665 
2666 	buf_size = args.buf_size;
2667 
2668 	/* limit result size to 16MB */
2669 	if (buf_size > buf_limit)
2670 		buf_size = buf_limit;
2671 
2672 	ret = search_ioctl(inode, &args.key, &buf_size,
2673 			   (char __user *)(&uarg->buf[0]));
2674 	if (ret == 0 && copy_to_user(&uarg->key, &args.key, sizeof(args.key)))
2675 		ret = -EFAULT;
2676 	else if (ret == -EOVERFLOW &&
2677 		copy_to_user(&uarg->buf_size, &buf_size, sizeof(buf_size)))
2678 		ret = -EFAULT;
2679 
2680 	return ret;
2681 }
2682 
2683 /*
2684  * Search INODE_REFs to identify path name of 'dirid' directory
2685  * in a 'tree_id' tree. and sets path name to 'name'.
2686  */
btrfs_search_path_in_tree(struct btrfs_fs_info * info,u64 tree_id,u64 dirid,char * name)2687 static noinline int btrfs_search_path_in_tree(struct btrfs_fs_info *info,
2688 				u64 tree_id, u64 dirid, char *name)
2689 {
2690 	struct btrfs_root *root;
2691 	struct btrfs_key key;
2692 	char *ptr;
2693 	int ret = -1;
2694 	int slot;
2695 	int len;
2696 	int total_len = 0;
2697 	struct btrfs_inode_ref *iref;
2698 	struct extent_buffer *l;
2699 	struct btrfs_path *path;
2700 
2701 	if (dirid == BTRFS_FIRST_FREE_OBJECTID) {
2702 		name[0]='\0';
2703 		return 0;
2704 	}
2705 
2706 	path = btrfs_alloc_path();
2707 	if (!path)
2708 		return -ENOMEM;
2709 
2710 	ptr = &name[BTRFS_INO_LOOKUP_PATH_MAX - 1];
2711 
2712 	root = btrfs_get_fs_root(info, tree_id, true);
2713 	if (IS_ERR(root)) {
2714 		ret = PTR_ERR(root);
2715 		root = NULL;
2716 		goto out;
2717 	}
2718 
2719 	key.objectid = dirid;
2720 	key.type = BTRFS_INODE_REF_KEY;
2721 	key.offset = (u64)-1;
2722 
2723 	while (1) {
2724 		ret = btrfs_search_backwards(root, &key, path);
2725 		if (ret < 0)
2726 			goto out;
2727 		else if (ret > 0) {
2728 			ret = -ENOENT;
2729 			goto out;
2730 		}
2731 
2732 		l = path->nodes[0];
2733 		slot = path->slots[0];
2734 
2735 		iref = btrfs_item_ptr(l, slot, struct btrfs_inode_ref);
2736 		len = btrfs_inode_ref_name_len(l, iref);
2737 		ptr -= len + 1;
2738 		total_len += len + 1;
2739 		if (ptr < name) {
2740 			ret = -ENAMETOOLONG;
2741 			goto out;
2742 		}
2743 
2744 		*(ptr + len) = '/';
2745 		read_extent_buffer(l, ptr, (unsigned long)(iref + 1), len);
2746 
2747 		if (key.offset == BTRFS_FIRST_FREE_OBJECTID)
2748 			break;
2749 
2750 		btrfs_release_path(path);
2751 		key.objectid = key.offset;
2752 		key.offset = (u64)-1;
2753 		dirid = key.objectid;
2754 	}
2755 	memmove(name, ptr, total_len);
2756 	name[total_len] = '\0';
2757 	ret = 0;
2758 out:
2759 	btrfs_put_root(root);
2760 	btrfs_free_path(path);
2761 	return ret;
2762 }
2763 
btrfs_search_path_in_tree_user(struct user_namespace * mnt_userns,struct inode * inode,struct btrfs_ioctl_ino_lookup_user_args * args)2764 static int btrfs_search_path_in_tree_user(struct user_namespace *mnt_userns,
2765 				struct inode *inode,
2766 				struct btrfs_ioctl_ino_lookup_user_args *args)
2767 {
2768 	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
2769 	struct super_block *sb = inode->i_sb;
2770 	struct btrfs_key upper_limit = BTRFS_I(inode)->location;
2771 	u64 treeid = BTRFS_I(inode)->root->root_key.objectid;
2772 	u64 dirid = args->dirid;
2773 	unsigned long item_off;
2774 	unsigned long item_len;
2775 	struct btrfs_inode_ref *iref;
2776 	struct btrfs_root_ref *rref;
2777 	struct btrfs_root *root = NULL;
2778 	struct btrfs_path *path;
2779 	struct btrfs_key key, key2;
2780 	struct extent_buffer *leaf;
2781 	struct inode *temp_inode;
2782 	char *ptr;
2783 	int slot;
2784 	int len;
2785 	int total_len = 0;
2786 	int ret;
2787 
2788 	path = btrfs_alloc_path();
2789 	if (!path)
2790 		return -ENOMEM;
2791 
2792 	/*
2793 	 * If the bottom subvolume does not exist directly under upper_limit,
2794 	 * construct the path in from the bottom up.
2795 	 */
2796 	if (dirid != upper_limit.objectid) {
2797 		ptr = &args->path[BTRFS_INO_LOOKUP_USER_PATH_MAX - 1];
2798 
2799 		root = btrfs_get_fs_root(fs_info, treeid, true);
2800 		if (IS_ERR(root)) {
2801 			ret = PTR_ERR(root);
2802 			goto out;
2803 		}
2804 
2805 		key.objectid = dirid;
2806 		key.type = BTRFS_INODE_REF_KEY;
2807 		key.offset = (u64)-1;
2808 		while (1) {
2809 			ret = btrfs_search_backwards(root, &key, path);
2810 			if (ret < 0)
2811 				goto out_put;
2812 			else if (ret > 0) {
2813 				ret = -ENOENT;
2814 				goto out_put;
2815 			}
2816 
2817 			leaf = path->nodes[0];
2818 			slot = path->slots[0];
2819 
2820 			iref = btrfs_item_ptr(leaf, slot, struct btrfs_inode_ref);
2821 			len = btrfs_inode_ref_name_len(leaf, iref);
2822 			ptr -= len + 1;
2823 			total_len += len + 1;
2824 			if (ptr < args->path) {
2825 				ret = -ENAMETOOLONG;
2826 				goto out_put;
2827 			}
2828 
2829 			*(ptr + len) = '/';
2830 			read_extent_buffer(leaf, ptr,
2831 					(unsigned long)(iref + 1), len);
2832 
2833 			/* Check the read+exec permission of this directory */
2834 			ret = btrfs_previous_item(root, path, dirid,
2835 						  BTRFS_INODE_ITEM_KEY);
2836 			if (ret < 0) {
2837 				goto out_put;
2838 			} else if (ret > 0) {
2839 				ret = -ENOENT;
2840 				goto out_put;
2841 			}
2842 
2843 			leaf = path->nodes[0];
2844 			slot = path->slots[0];
2845 			btrfs_item_key_to_cpu(leaf, &key2, slot);
2846 			if (key2.objectid != dirid) {
2847 				ret = -ENOENT;
2848 				goto out_put;
2849 			}
2850 
2851 			temp_inode = btrfs_iget(sb, key2.objectid, root);
2852 			if (IS_ERR(temp_inode)) {
2853 				ret = PTR_ERR(temp_inode);
2854 				goto out_put;
2855 			}
2856 			ret = inode_permission(mnt_userns, temp_inode,
2857 					       MAY_READ | MAY_EXEC);
2858 			iput(temp_inode);
2859 			if (ret) {
2860 				ret = -EACCES;
2861 				goto out_put;
2862 			}
2863 
2864 			if (key.offset == upper_limit.objectid)
2865 				break;
2866 			if (key.objectid == BTRFS_FIRST_FREE_OBJECTID) {
2867 				ret = -EACCES;
2868 				goto out_put;
2869 			}
2870 
2871 			btrfs_release_path(path);
2872 			key.objectid = key.offset;
2873 			key.offset = (u64)-1;
2874 			dirid = key.objectid;
2875 		}
2876 
2877 		memmove(args->path, ptr, total_len);
2878 		args->path[total_len] = '\0';
2879 		btrfs_put_root(root);
2880 		root = NULL;
2881 		btrfs_release_path(path);
2882 	}
2883 
2884 	/* Get the bottom subvolume's name from ROOT_REF */
2885 	key.objectid = treeid;
2886 	key.type = BTRFS_ROOT_REF_KEY;
2887 	key.offset = args->treeid;
2888 	ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
2889 	if (ret < 0) {
2890 		goto out;
2891 	} else if (ret > 0) {
2892 		ret = -ENOENT;
2893 		goto out;
2894 	}
2895 
2896 	leaf = path->nodes[0];
2897 	slot = path->slots[0];
2898 	btrfs_item_key_to_cpu(leaf, &key, slot);
2899 
2900 	item_off = btrfs_item_ptr_offset(leaf, slot);
2901 	item_len = btrfs_item_size(leaf, slot);
2902 	/* Check if dirid in ROOT_REF corresponds to passed dirid */
2903 	rref = btrfs_item_ptr(leaf, slot, struct btrfs_root_ref);
2904 	if (args->dirid != btrfs_root_ref_dirid(leaf, rref)) {
2905 		ret = -EINVAL;
2906 		goto out;
2907 	}
2908 
2909 	/* Copy subvolume's name */
2910 	item_off += sizeof(struct btrfs_root_ref);
2911 	item_len -= sizeof(struct btrfs_root_ref);
2912 	read_extent_buffer(leaf, args->name, item_off, item_len);
2913 	args->name[item_len] = 0;
2914 
2915 out_put:
2916 	btrfs_put_root(root);
2917 out:
2918 	btrfs_free_path(path);
2919 	return ret;
2920 }
2921 
btrfs_ioctl_ino_lookup(struct btrfs_root * root,void __user * argp)2922 static noinline int btrfs_ioctl_ino_lookup(struct btrfs_root *root,
2923 					   void __user *argp)
2924 {
2925 	struct btrfs_ioctl_ino_lookup_args *args;
2926 	int ret = 0;
2927 
2928 	args = memdup_user(argp, sizeof(*args));
2929 	if (IS_ERR(args))
2930 		return PTR_ERR(args);
2931 
2932 	/*
2933 	 * Unprivileged query to obtain the containing subvolume root id. The
2934 	 * path is reset so it's consistent with btrfs_search_path_in_tree.
2935 	 */
2936 	if (args->treeid == 0)
2937 		args->treeid = root->root_key.objectid;
2938 
2939 	if (args->objectid == BTRFS_FIRST_FREE_OBJECTID) {
2940 		args->name[0] = 0;
2941 		goto out;
2942 	}
2943 
2944 	if (!capable(CAP_SYS_ADMIN)) {
2945 		ret = -EPERM;
2946 		goto out;
2947 	}
2948 
2949 	ret = btrfs_search_path_in_tree(root->fs_info,
2950 					args->treeid, args->objectid,
2951 					args->name);
2952 
2953 out:
2954 	if (ret == 0 && copy_to_user(argp, args, sizeof(*args)))
2955 		ret = -EFAULT;
2956 
2957 	kfree(args);
2958 	return ret;
2959 }
2960 
2961 /*
2962  * Version of ino_lookup ioctl (unprivileged)
2963  *
2964  * The main differences from ino_lookup ioctl are:
2965  *
2966  *   1. Read + Exec permission will be checked using inode_permission() during
2967  *      path construction. -EACCES will be returned in case of failure.
2968  *   2. Path construction will be stopped at the inode number which corresponds
2969  *      to the fd with which this ioctl is called. If constructed path does not
2970  *      exist under fd's inode, -EACCES will be returned.
2971  *   3. The name of bottom subvolume is also searched and filled.
2972  */
btrfs_ioctl_ino_lookup_user(struct file * file,void __user * argp)2973 static int btrfs_ioctl_ino_lookup_user(struct file *file, void __user *argp)
2974 {
2975 	struct btrfs_ioctl_ino_lookup_user_args *args;
2976 	struct inode *inode;
2977 	int ret;
2978 
2979 	args = memdup_user(argp, sizeof(*args));
2980 	if (IS_ERR(args))
2981 		return PTR_ERR(args);
2982 
2983 	inode = file_inode(file);
2984 
2985 	if (args->dirid == BTRFS_FIRST_FREE_OBJECTID &&
2986 	    BTRFS_I(inode)->location.objectid != BTRFS_FIRST_FREE_OBJECTID) {
2987 		/*
2988 		 * The subvolume does not exist under fd with which this is
2989 		 * called
2990 		 */
2991 		kfree(args);
2992 		return -EACCES;
2993 	}
2994 
2995 	ret = btrfs_search_path_in_tree_user(file_mnt_user_ns(file), inode, args);
2996 
2997 	if (ret == 0 && copy_to_user(argp, args, sizeof(*args)))
2998 		ret = -EFAULT;
2999 
3000 	kfree(args);
3001 	return ret;
3002 }
3003 
3004 /* Get the subvolume information in BTRFS_ROOT_ITEM and BTRFS_ROOT_BACKREF */
btrfs_ioctl_get_subvol_info(struct inode * inode,void __user * argp)3005 static int btrfs_ioctl_get_subvol_info(struct inode *inode, void __user *argp)
3006 {
3007 	struct btrfs_ioctl_get_subvol_info_args *subvol_info;
3008 	struct btrfs_fs_info *fs_info;
3009 	struct btrfs_root *root;
3010 	struct btrfs_path *path;
3011 	struct btrfs_key key;
3012 	struct btrfs_root_item *root_item;
3013 	struct btrfs_root_ref *rref;
3014 	struct extent_buffer *leaf;
3015 	unsigned long item_off;
3016 	unsigned long item_len;
3017 	int slot;
3018 	int ret = 0;
3019 
3020 	path = btrfs_alloc_path();
3021 	if (!path)
3022 		return -ENOMEM;
3023 
3024 	subvol_info = kzalloc(sizeof(*subvol_info), GFP_KERNEL);
3025 	if (!subvol_info) {
3026 		btrfs_free_path(path);
3027 		return -ENOMEM;
3028 	}
3029 
3030 	fs_info = BTRFS_I(inode)->root->fs_info;
3031 
3032 	/* Get root_item of inode's subvolume */
3033 	key.objectid = BTRFS_I(inode)->root->root_key.objectid;
3034 	root = btrfs_get_fs_root(fs_info, key.objectid, true);
3035 	if (IS_ERR(root)) {
3036 		ret = PTR_ERR(root);
3037 		goto out_free;
3038 	}
3039 	root_item = &root->root_item;
3040 
3041 	subvol_info->treeid = key.objectid;
3042 
3043 	subvol_info->generation = btrfs_root_generation(root_item);
3044 	subvol_info->flags = btrfs_root_flags(root_item);
3045 
3046 	memcpy(subvol_info->uuid, root_item->uuid, BTRFS_UUID_SIZE);
3047 	memcpy(subvol_info->parent_uuid, root_item->parent_uuid,
3048 						    BTRFS_UUID_SIZE);
3049 	memcpy(subvol_info->received_uuid, root_item->received_uuid,
3050 						    BTRFS_UUID_SIZE);
3051 
3052 	subvol_info->ctransid = btrfs_root_ctransid(root_item);
3053 	subvol_info->ctime.sec = btrfs_stack_timespec_sec(&root_item->ctime);
3054 	subvol_info->ctime.nsec = btrfs_stack_timespec_nsec(&root_item->ctime);
3055 
3056 	subvol_info->otransid = btrfs_root_otransid(root_item);
3057 	subvol_info->otime.sec = btrfs_stack_timespec_sec(&root_item->otime);
3058 	subvol_info->otime.nsec = btrfs_stack_timespec_nsec(&root_item->otime);
3059 
3060 	subvol_info->stransid = btrfs_root_stransid(root_item);
3061 	subvol_info->stime.sec = btrfs_stack_timespec_sec(&root_item->stime);
3062 	subvol_info->stime.nsec = btrfs_stack_timespec_nsec(&root_item->stime);
3063 
3064 	subvol_info->rtransid = btrfs_root_rtransid(root_item);
3065 	subvol_info->rtime.sec = btrfs_stack_timespec_sec(&root_item->rtime);
3066 	subvol_info->rtime.nsec = btrfs_stack_timespec_nsec(&root_item->rtime);
3067 
3068 	if (key.objectid != BTRFS_FS_TREE_OBJECTID) {
3069 		/* Search root tree for ROOT_BACKREF of this subvolume */
3070 		key.type = BTRFS_ROOT_BACKREF_KEY;
3071 		key.offset = 0;
3072 		ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
3073 		if (ret < 0) {
3074 			goto out;
3075 		} else if (path->slots[0] >=
3076 			   btrfs_header_nritems(path->nodes[0])) {
3077 			ret = btrfs_next_leaf(fs_info->tree_root, path);
3078 			if (ret < 0) {
3079 				goto out;
3080 			} else if (ret > 0) {
3081 				ret = -EUCLEAN;
3082 				goto out;
3083 			}
3084 		}
3085 
3086 		leaf = path->nodes[0];
3087 		slot = path->slots[0];
3088 		btrfs_item_key_to_cpu(leaf, &key, slot);
3089 		if (key.objectid == subvol_info->treeid &&
3090 		    key.type == BTRFS_ROOT_BACKREF_KEY) {
3091 			subvol_info->parent_id = key.offset;
3092 
3093 			rref = btrfs_item_ptr(leaf, slot, struct btrfs_root_ref);
3094 			subvol_info->dirid = btrfs_root_ref_dirid(leaf, rref);
3095 
3096 			item_off = btrfs_item_ptr_offset(leaf, slot)
3097 					+ sizeof(struct btrfs_root_ref);
3098 			item_len = btrfs_item_size(leaf, slot)
3099 					- sizeof(struct btrfs_root_ref);
3100 			read_extent_buffer(leaf, subvol_info->name,
3101 					   item_off, item_len);
3102 		} else {
3103 			ret = -ENOENT;
3104 			goto out;
3105 		}
3106 	}
3107 
3108 	btrfs_free_path(path);
3109 	path = NULL;
3110 	if (copy_to_user(argp, subvol_info, sizeof(*subvol_info)))
3111 		ret = -EFAULT;
3112 
3113 out:
3114 	btrfs_put_root(root);
3115 out_free:
3116 	btrfs_free_path(path);
3117 	kfree(subvol_info);
3118 	return ret;
3119 }
3120 
3121 /*
3122  * Return ROOT_REF information of the subvolume containing this inode
3123  * except the subvolume name.
3124  */
btrfs_ioctl_get_subvol_rootref(struct btrfs_root * root,void __user * argp)3125 static int btrfs_ioctl_get_subvol_rootref(struct btrfs_root *root,
3126 					  void __user *argp)
3127 {
3128 	struct btrfs_ioctl_get_subvol_rootref_args *rootrefs;
3129 	struct btrfs_root_ref *rref;
3130 	struct btrfs_path *path;
3131 	struct btrfs_key key;
3132 	struct extent_buffer *leaf;
3133 	u64 objectid;
3134 	int slot;
3135 	int ret;
3136 	u8 found;
3137 
3138 	path = btrfs_alloc_path();
3139 	if (!path)
3140 		return -ENOMEM;
3141 
3142 	rootrefs = memdup_user(argp, sizeof(*rootrefs));
3143 	if (IS_ERR(rootrefs)) {
3144 		btrfs_free_path(path);
3145 		return PTR_ERR(rootrefs);
3146 	}
3147 
3148 	objectid = root->root_key.objectid;
3149 	key.objectid = objectid;
3150 	key.type = BTRFS_ROOT_REF_KEY;
3151 	key.offset = rootrefs->min_treeid;
3152 	found = 0;
3153 
3154 	root = root->fs_info->tree_root;
3155 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3156 	if (ret < 0) {
3157 		goto out;
3158 	} else if (path->slots[0] >=
3159 		   btrfs_header_nritems(path->nodes[0])) {
3160 		ret = btrfs_next_leaf(root, path);
3161 		if (ret < 0) {
3162 			goto out;
3163 		} else if (ret > 0) {
3164 			ret = -EUCLEAN;
3165 			goto out;
3166 		}
3167 	}
3168 	while (1) {
3169 		leaf = path->nodes[0];
3170 		slot = path->slots[0];
3171 
3172 		btrfs_item_key_to_cpu(leaf, &key, slot);
3173 		if (key.objectid != objectid || key.type != BTRFS_ROOT_REF_KEY) {
3174 			ret = 0;
3175 			goto out;
3176 		}
3177 
3178 		if (found == BTRFS_MAX_ROOTREF_BUFFER_NUM) {
3179 			ret = -EOVERFLOW;
3180 			goto out;
3181 		}
3182 
3183 		rref = btrfs_item_ptr(leaf, slot, struct btrfs_root_ref);
3184 		rootrefs->rootref[found].treeid = key.offset;
3185 		rootrefs->rootref[found].dirid =
3186 				  btrfs_root_ref_dirid(leaf, rref);
3187 		found++;
3188 
3189 		ret = btrfs_next_item(root, path);
3190 		if (ret < 0) {
3191 			goto out;
3192 		} else if (ret > 0) {
3193 			ret = -EUCLEAN;
3194 			goto out;
3195 		}
3196 	}
3197 
3198 out:
3199 	btrfs_free_path(path);
3200 
3201 	if (!ret || ret == -EOVERFLOW) {
3202 		rootrefs->num_items = found;
3203 		/* update min_treeid for next search */
3204 		if (found)
3205 			rootrefs->min_treeid =
3206 				rootrefs->rootref[found - 1].treeid + 1;
3207 		if (copy_to_user(argp, rootrefs, sizeof(*rootrefs)))
3208 			ret = -EFAULT;
3209 	}
3210 
3211 	kfree(rootrefs);
3212 
3213 	return ret;
3214 }
3215 
btrfs_ioctl_snap_destroy(struct file * file,void __user * arg,bool destroy_v2)3216 static noinline int btrfs_ioctl_snap_destroy(struct file *file,
3217 					     void __user *arg,
3218 					     bool destroy_v2)
3219 {
3220 	struct dentry *parent = file->f_path.dentry;
3221 	struct btrfs_fs_info *fs_info = btrfs_sb(parent->d_sb);
3222 	struct dentry *dentry;
3223 	struct inode *dir = d_inode(parent);
3224 	struct inode *inode;
3225 	struct btrfs_root *root = BTRFS_I(dir)->root;
3226 	struct btrfs_root *dest = NULL;
3227 	struct btrfs_ioctl_vol_args *vol_args = NULL;
3228 	struct btrfs_ioctl_vol_args_v2 *vol_args2 = NULL;
3229 	struct user_namespace *mnt_userns = file_mnt_user_ns(file);
3230 	char *subvol_name, *subvol_name_ptr = NULL;
3231 	int subvol_namelen;
3232 	int err = 0;
3233 	bool destroy_parent = false;
3234 
3235 	/* We don't support snapshots with extent tree v2 yet. */
3236 	if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) {
3237 		btrfs_err(fs_info,
3238 			  "extent tree v2 doesn't support snapshot deletion yet");
3239 		return -EOPNOTSUPP;
3240 	}
3241 
3242 	if (destroy_v2) {
3243 		vol_args2 = memdup_user(arg, sizeof(*vol_args2));
3244 		if (IS_ERR(vol_args2))
3245 			return PTR_ERR(vol_args2);
3246 
3247 		if (vol_args2->flags & ~BTRFS_SUBVOL_DELETE_ARGS_MASK) {
3248 			err = -EOPNOTSUPP;
3249 			goto out;
3250 		}
3251 
3252 		/*
3253 		 * If SPEC_BY_ID is not set, we are looking for the subvolume by
3254 		 * name, same as v1 currently does.
3255 		 */
3256 		if (!(vol_args2->flags & BTRFS_SUBVOL_SPEC_BY_ID)) {
3257 			vol_args2->name[BTRFS_SUBVOL_NAME_MAX] = 0;
3258 			subvol_name = vol_args2->name;
3259 
3260 			err = mnt_want_write_file(file);
3261 			if (err)
3262 				goto out;
3263 		} else {
3264 			struct inode *old_dir;
3265 
3266 			if (vol_args2->subvolid < BTRFS_FIRST_FREE_OBJECTID) {
3267 				err = -EINVAL;
3268 				goto out;
3269 			}
3270 
3271 			err = mnt_want_write_file(file);
3272 			if (err)
3273 				goto out;
3274 
3275 			dentry = btrfs_get_dentry(fs_info->sb,
3276 					BTRFS_FIRST_FREE_OBJECTID,
3277 					vol_args2->subvolid, 0, 0);
3278 			if (IS_ERR(dentry)) {
3279 				err = PTR_ERR(dentry);
3280 				goto out_drop_write;
3281 			}
3282 
3283 			/*
3284 			 * Change the default parent since the subvolume being
3285 			 * deleted can be outside of the current mount point.
3286 			 */
3287 			parent = btrfs_get_parent(dentry);
3288 
3289 			/*
3290 			 * At this point dentry->d_name can point to '/' if the
3291 			 * subvolume we want to destroy is outsite of the
3292 			 * current mount point, so we need to release the
3293 			 * current dentry and execute the lookup to return a new
3294 			 * one with ->d_name pointing to the
3295 			 * <mount point>/subvol_name.
3296 			 */
3297 			dput(dentry);
3298 			if (IS_ERR(parent)) {
3299 				err = PTR_ERR(parent);
3300 				goto out_drop_write;
3301 			}
3302 			old_dir = dir;
3303 			dir = d_inode(parent);
3304 
3305 			/*
3306 			 * If v2 was used with SPEC_BY_ID, a new parent was
3307 			 * allocated since the subvolume can be outside of the
3308 			 * current mount point. Later on we need to release this
3309 			 * new parent dentry.
3310 			 */
3311 			destroy_parent = true;
3312 
3313 			/*
3314 			 * On idmapped mounts, deletion via subvolid is
3315 			 * restricted to subvolumes that are immediate
3316 			 * ancestors of the inode referenced by the file
3317 			 * descriptor in the ioctl. Otherwise the idmapping
3318 			 * could potentially be abused to delete subvolumes
3319 			 * anywhere in the filesystem the user wouldn't be able
3320 			 * to delete without an idmapped mount.
3321 			 */
3322 			if (old_dir != dir && mnt_userns != &init_user_ns) {
3323 				err = -EOPNOTSUPP;
3324 				goto free_parent;
3325 			}
3326 
3327 			subvol_name_ptr = btrfs_get_subvol_name_from_objectid(
3328 						fs_info, vol_args2->subvolid);
3329 			if (IS_ERR(subvol_name_ptr)) {
3330 				err = PTR_ERR(subvol_name_ptr);
3331 				goto free_parent;
3332 			}
3333 			/* subvol_name_ptr is already nul terminated */
3334 			subvol_name = (char *)kbasename(subvol_name_ptr);
3335 		}
3336 	} else {
3337 		vol_args = memdup_user(arg, sizeof(*vol_args));
3338 		if (IS_ERR(vol_args))
3339 			return PTR_ERR(vol_args);
3340 
3341 		vol_args->name[BTRFS_PATH_NAME_MAX] = 0;
3342 		subvol_name = vol_args->name;
3343 
3344 		err = mnt_want_write_file(file);
3345 		if (err)
3346 			goto out;
3347 	}
3348 
3349 	subvol_namelen = strlen(subvol_name);
3350 
3351 	if (strchr(subvol_name, '/') ||
3352 	    strncmp(subvol_name, "..", subvol_namelen) == 0) {
3353 		err = -EINVAL;
3354 		goto free_subvol_name;
3355 	}
3356 
3357 	if (!S_ISDIR(dir->i_mode)) {
3358 		err = -ENOTDIR;
3359 		goto free_subvol_name;
3360 	}
3361 
3362 	err = down_write_killable_nested(&dir->i_rwsem, I_MUTEX_PARENT);
3363 	if (err == -EINTR)
3364 		goto free_subvol_name;
3365 	dentry = lookup_one(mnt_userns, subvol_name, parent, subvol_namelen);
3366 	if (IS_ERR(dentry)) {
3367 		err = PTR_ERR(dentry);
3368 		goto out_unlock_dir;
3369 	}
3370 
3371 	if (d_really_is_negative(dentry)) {
3372 		err = -ENOENT;
3373 		goto out_dput;
3374 	}
3375 
3376 	inode = d_inode(dentry);
3377 	dest = BTRFS_I(inode)->root;
3378 	if (!capable(CAP_SYS_ADMIN)) {
3379 		/*
3380 		 * Regular user.  Only allow this with a special mount
3381 		 * option, when the user has write+exec access to the
3382 		 * subvol root, and when rmdir(2) would have been
3383 		 * allowed.
3384 		 *
3385 		 * Note that this is _not_ check that the subvol is
3386 		 * empty or doesn't contain data that we wouldn't
3387 		 * otherwise be able to delete.
3388 		 *
3389 		 * Users who want to delete empty subvols should try
3390 		 * rmdir(2).
3391 		 */
3392 		err = -EPERM;
3393 		if (!btrfs_test_opt(fs_info, USER_SUBVOL_RM_ALLOWED))
3394 			goto out_dput;
3395 
3396 		/*
3397 		 * Do not allow deletion if the parent dir is the same
3398 		 * as the dir to be deleted.  That means the ioctl
3399 		 * must be called on the dentry referencing the root
3400 		 * of the subvol, not a random directory contained
3401 		 * within it.
3402 		 */
3403 		err = -EINVAL;
3404 		if (root == dest)
3405 			goto out_dput;
3406 
3407 		err = inode_permission(mnt_userns, inode, MAY_WRITE | MAY_EXEC);
3408 		if (err)
3409 			goto out_dput;
3410 	}
3411 
3412 	/* check if subvolume may be deleted by a user */
3413 	err = btrfs_may_delete(mnt_userns, dir, dentry, 1);
3414 	if (err)
3415 		goto out_dput;
3416 
3417 	if (btrfs_ino(BTRFS_I(inode)) != BTRFS_FIRST_FREE_OBJECTID) {
3418 		err = -EINVAL;
3419 		goto out_dput;
3420 	}
3421 
3422 	btrfs_inode_lock(inode, 0);
3423 	err = btrfs_delete_subvolume(dir, dentry);
3424 	btrfs_inode_unlock(inode, 0);
3425 	if (!err)
3426 		d_delete_notify(dir, dentry);
3427 
3428 out_dput:
3429 	dput(dentry);
3430 out_unlock_dir:
3431 	btrfs_inode_unlock(dir, 0);
3432 free_subvol_name:
3433 	kfree(subvol_name_ptr);
3434 free_parent:
3435 	if (destroy_parent)
3436 		dput(parent);
3437 out_drop_write:
3438 	mnt_drop_write_file(file);
3439 out:
3440 	kfree(vol_args2);
3441 	kfree(vol_args);
3442 	return err;
3443 }
3444 
btrfs_ioctl_defrag(struct file * file,void __user * argp)3445 static int btrfs_ioctl_defrag(struct file *file, void __user *argp)
3446 {
3447 	struct inode *inode = file_inode(file);
3448 	struct btrfs_root *root = BTRFS_I(inode)->root;
3449 	struct btrfs_ioctl_defrag_range_args range = {0};
3450 	int ret;
3451 
3452 	ret = mnt_want_write_file(file);
3453 	if (ret)
3454 		return ret;
3455 
3456 	if (btrfs_root_readonly(root)) {
3457 		ret = -EROFS;
3458 		goto out;
3459 	}
3460 
3461 	switch (inode->i_mode & S_IFMT) {
3462 	case S_IFDIR:
3463 		if (!capable(CAP_SYS_ADMIN)) {
3464 			ret = -EPERM;
3465 			goto out;
3466 		}
3467 		ret = btrfs_defrag_root(root);
3468 		break;
3469 	case S_IFREG:
3470 		/*
3471 		 * Note that this does not check the file descriptor for write
3472 		 * access. This prevents defragmenting executables that are
3473 		 * running and allows defrag on files open in read-only mode.
3474 		 */
3475 		if (!capable(CAP_SYS_ADMIN) &&
3476 		    inode_permission(&init_user_ns, inode, MAY_WRITE)) {
3477 			ret = -EPERM;
3478 			goto out;
3479 		}
3480 
3481 		if (argp) {
3482 			if (copy_from_user(&range, argp, sizeof(range))) {
3483 				ret = -EFAULT;
3484 				goto out;
3485 			}
3486 			/* compression requires us to start the IO */
3487 			if ((range.flags & BTRFS_DEFRAG_RANGE_COMPRESS)) {
3488 				range.flags |= BTRFS_DEFRAG_RANGE_START_IO;
3489 				range.extent_thresh = (u32)-1;
3490 			}
3491 		} else {
3492 			/* the rest are all set to zero by kzalloc */
3493 			range.len = (u64)-1;
3494 		}
3495 		ret = btrfs_defrag_file(file_inode(file), &file->f_ra,
3496 					&range, BTRFS_OLDEST_GENERATION, 0);
3497 		if (ret > 0)
3498 			ret = 0;
3499 		break;
3500 	default:
3501 		ret = -EINVAL;
3502 	}
3503 out:
3504 	mnt_drop_write_file(file);
3505 	return ret;
3506 }
3507 
btrfs_ioctl_add_dev(struct btrfs_fs_info * fs_info,void __user * arg)3508 static long btrfs_ioctl_add_dev(struct btrfs_fs_info *fs_info, void __user *arg)
3509 {
3510 	struct btrfs_ioctl_vol_args *vol_args;
3511 	bool restore_op = false;
3512 	int ret;
3513 
3514 	if (!capable(CAP_SYS_ADMIN))
3515 		return -EPERM;
3516 
3517 	if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) {
3518 		btrfs_err(fs_info, "device add not supported on extent tree v2 yet");
3519 		return -EINVAL;
3520 	}
3521 
3522 	if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_DEV_ADD)) {
3523 		if (!btrfs_exclop_start_try_lock(fs_info, BTRFS_EXCLOP_DEV_ADD))
3524 			return BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
3525 
3526 		/*
3527 		 * We can do the device add because we have a paused balanced,
3528 		 * change the exclusive op type and remember we should bring
3529 		 * back the paused balance
3530 		 */
3531 		fs_info->exclusive_operation = BTRFS_EXCLOP_DEV_ADD;
3532 		btrfs_exclop_start_unlock(fs_info);
3533 		restore_op = true;
3534 	}
3535 
3536 	vol_args = memdup_user(arg, sizeof(*vol_args));
3537 	if (IS_ERR(vol_args)) {
3538 		ret = PTR_ERR(vol_args);
3539 		goto out;
3540 	}
3541 
3542 	vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
3543 	ret = btrfs_init_new_device(fs_info, vol_args->name);
3544 
3545 	if (!ret)
3546 		btrfs_info(fs_info, "disk added %s", vol_args->name);
3547 
3548 	kfree(vol_args);
3549 out:
3550 	if (restore_op)
3551 		btrfs_exclop_balance(fs_info, BTRFS_EXCLOP_BALANCE_PAUSED);
3552 	else
3553 		btrfs_exclop_finish(fs_info);
3554 	return ret;
3555 }
3556 
btrfs_ioctl_rm_dev_v2(struct file * file,void __user * arg)3557 static long btrfs_ioctl_rm_dev_v2(struct file *file, void __user *arg)
3558 {
3559 	BTRFS_DEV_LOOKUP_ARGS(args);
3560 	struct inode *inode = file_inode(file);
3561 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3562 	struct btrfs_ioctl_vol_args_v2 *vol_args;
3563 	struct block_device *bdev = NULL;
3564 	fmode_t mode;
3565 	int ret;
3566 	bool cancel = false;
3567 
3568 	if (!capable(CAP_SYS_ADMIN))
3569 		return -EPERM;
3570 
3571 	vol_args = memdup_user(arg, sizeof(*vol_args));
3572 	if (IS_ERR(vol_args))
3573 		return PTR_ERR(vol_args);
3574 
3575 	if (vol_args->flags & ~BTRFS_DEVICE_REMOVE_ARGS_MASK) {
3576 		ret = -EOPNOTSUPP;
3577 		goto out;
3578 	}
3579 
3580 	vol_args->name[BTRFS_SUBVOL_NAME_MAX] = '\0';
3581 	if (vol_args->flags & BTRFS_DEVICE_SPEC_BY_ID) {
3582 		args.devid = vol_args->devid;
3583 	} else if (!strcmp("cancel", vol_args->name)) {
3584 		cancel = true;
3585 	} else {
3586 		ret = btrfs_get_dev_args_from_path(fs_info, &args, vol_args->name);
3587 		if (ret)
3588 			goto out;
3589 	}
3590 
3591 	ret = mnt_want_write_file(file);
3592 	if (ret)
3593 		goto out;
3594 
3595 	ret = exclop_start_or_cancel_reloc(fs_info, BTRFS_EXCLOP_DEV_REMOVE,
3596 					   cancel);
3597 	if (ret)
3598 		goto err_drop;
3599 
3600 	/* Exclusive operation is now claimed */
3601 	ret = btrfs_rm_device(fs_info, &args, &bdev, &mode);
3602 
3603 	btrfs_exclop_finish(fs_info);
3604 
3605 	if (!ret) {
3606 		if (vol_args->flags & BTRFS_DEVICE_SPEC_BY_ID)
3607 			btrfs_info(fs_info, "device deleted: id %llu",
3608 					vol_args->devid);
3609 		else
3610 			btrfs_info(fs_info, "device deleted: %s",
3611 					vol_args->name);
3612 	}
3613 err_drop:
3614 	mnt_drop_write_file(file);
3615 	if (bdev)
3616 		blkdev_put(bdev, mode);
3617 out:
3618 	btrfs_put_dev_args_from_path(&args);
3619 	kfree(vol_args);
3620 	return ret;
3621 }
3622 
btrfs_ioctl_rm_dev(struct file * file,void __user * arg)3623 static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg)
3624 {
3625 	BTRFS_DEV_LOOKUP_ARGS(args);
3626 	struct inode *inode = file_inode(file);
3627 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3628 	struct btrfs_ioctl_vol_args *vol_args;
3629 	struct block_device *bdev = NULL;
3630 	fmode_t mode;
3631 	int ret;
3632 	bool cancel = false;
3633 
3634 	if (!capable(CAP_SYS_ADMIN))
3635 		return -EPERM;
3636 
3637 	vol_args = memdup_user(arg, sizeof(*vol_args));
3638 	if (IS_ERR(vol_args))
3639 		return PTR_ERR(vol_args);
3640 
3641 	vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
3642 	if (!strcmp("cancel", vol_args->name)) {
3643 		cancel = true;
3644 	} else {
3645 		ret = btrfs_get_dev_args_from_path(fs_info, &args, vol_args->name);
3646 		if (ret)
3647 			goto out;
3648 	}
3649 
3650 	ret = mnt_want_write_file(file);
3651 	if (ret)
3652 		goto out;
3653 
3654 	ret = exclop_start_or_cancel_reloc(fs_info, BTRFS_EXCLOP_DEV_REMOVE,
3655 					   cancel);
3656 	if (ret == 0) {
3657 		ret = btrfs_rm_device(fs_info, &args, &bdev, &mode);
3658 		if (!ret)
3659 			btrfs_info(fs_info, "disk deleted %s", vol_args->name);
3660 		btrfs_exclop_finish(fs_info);
3661 	}
3662 
3663 	mnt_drop_write_file(file);
3664 	if (bdev)
3665 		blkdev_put(bdev, mode);
3666 out:
3667 	btrfs_put_dev_args_from_path(&args);
3668 	kfree(vol_args);
3669 	return ret;
3670 }
3671 
btrfs_ioctl_fs_info(struct btrfs_fs_info * fs_info,void __user * arg)3672 static long btrfs_ioctl_fs_info(struct btrfs_fs_info *fs_info,
3673 				void __user *arg)
3674 {
3675 	struct btrfs_ioctl_fs_info_args *fi_args;
3676 	struct btrfs_device *device;
3677 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
3678 	u64 flags_in;
3679 	int ret = 0;
3680 
3681 	fi_args = memdup_user(arg, sizeof(*fi_args));
3682 	if (IS_ERR(fi_args))
3683 		return PTR_ERR(fi_args);
3684 
3685 	flags_in = fi_args->flags;
3686 	memset(fi_args, 0, sizeof(*fi_args));
3687 
3688 	rcu_read_lock();
3689 	fi_args->num_devices = fs_devices->num_devices;
3690 
3691 	list_for_each_entry_rcu(device, &fs_devices->devices, dev_list) {
3692 		if (device->devid > fi_args->max_id)
3693 			fi_args->max_id = device->devid;
3694 	}
3695 	rcu_read_unlock();
3696 
3697 	memcpy(&fi_args->fsid, fs_devices->fsid, sizeof(fi_args->fsid));
3698 	fi_args->nodesize = fs_info->nodesize;
3699 	fi_args->sectorsize = fs_info->sectorsize;
3700 	fi_args->clone_alignment = fs_info->sectorsize;
3701 
3702 	if (flags_in & BTRFS_FS_INFO_FLAG_CSUM_INFO) {
3703 		fi_args->csum_type = btrfs_super_csum_type(fs_info->super_copy);
3704 		fi_args->csum_size = btrfs_super_csum_size(fs_info->super_copy);
3705 		fi_args->flags |= BTRFS_FS_INFO_FLAG_CSUM_INFO;
3706 	}
3707 
3708 	if (flags_in & BTRFS_FS_INFO_FLAG_GENERATION) {
3709 		fi_args->generation = fs_info->generation;
3710 		fi_args->flags |= BTRFS_FS_INFO_FLAG_GENERATION;
3711 	}
3712 
3713 	if (flags_in & BTRFS_FS_INFO_FLAG_METADATA_UUID) {
3714 		memcpy(&fi_args->metadata_uuid, fs_devices->metadata_uuid,
3715 		       sizeof(fi_args->metadata_uuid));
3716 		fi_args->flags |= BTRFS_FS_INFO_FLAG_METADATA_UUID;
3717 	}
3718 
3719 	if (copy_to_user(arg, fi_args, sizeof(*fi_args)))
3720 		ret = -EFAULT;
3721 
3722 	kfree(fi_args);
3723 	return ret;
3724 }
3725 
btrfs_ioctl_dev_info(struct btrfs_fs_info * fs_info,void __user * arg)3726 static long btrfs_ioctl_dev_info(struct btrfs_fs_info *fs_info,
3727 				 void __user *arg)
3728 {
3729 	BTRFS_DEV_LOOKUP_ARGS(args);
3730 	struct btrfs_ioctl_dev_info_args *di_args;
3731 	struct btrfs_device *dev;
3732 	int ret = 0;
3733 
3734 	di_args = memdup_user(arg, sizeof(*di_args));
3735 	if (IS_ERR(di_args))
3736 		return PTR_ERR(di_args);
3737 
3738 	args.devid = di_args->devid;
3739 	if (!btrfs_is_empty_uuid(di_args->uuid))
3740 		args.uuid = di_args->uuid;
3741 
3742 	rcu_read_lock();
3743 	dev = btrfs_find_device(fs_info->fs_devices, &args);
3744 	if (!dev) {
3745 		ret = -ENODEV;
3746 		goto out;
3747 	}
3748 
3749 	di_args->devid = dev->devid;
3750 	di_args->bytes_used = btrfs_device_get_bytes_used(dev);
3751 	di_args->total_bytes = btrfs_device_get_total_bytes(dev);
3752 	memcpy(di_args->uuid, dev->uuid, sizeof(di_args->uuid));
3753 	if (dev->name)
3754 		strscpy(di_args->path, rcu_str_deref(dev->name), sizeof(di_args->path));
3755 	else
3756 		di_args->path[0] = '\0';
3757 
3758 out:
3759 	rcu_read_unlock();
3760 	if (ret == 0 && copy_to_user(arg, di_args, sizeof(*di_args)))
3761 		ret = -EFAULT;
3762 
3763 	kfree(di_args);
3764 	return ret;
3765 }
3766 
btrfs_ioctl_default_subvol(struct file * file,void __user * argp)3767 static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
3768 {
3769 	struct inode *inode = file_inode(file);
3770 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3771 	struct btrfs_root *root = BTRFS_I(inode)->root;
3772 	struct btrfs_root *new_root;
3773 	struct btrfs_dir_item *di;
3774 	struct btrfs_trans_handle *trans;
3775 	struct btrfs_path *path = NULL;
3776 	struct btrfs_disk_key disk_key;
3777 	u64 objectid = 0;
3778 	u64 dir_id;
3779 	int ret;
3780 
3781 	if (!capable(CAP_SYS_ADMIN))
3782 		return -EPERM;
3783 
3784 	ret = mnt_want_write_file(file);
3785 	if (ret)
3786 		return ret;
3787 
3788 	if (copy_from_user(&objectid, argp, sizeof(objectid))) {
3789 		ret = -EFAULT;
3790 		goto out;
3791 	}
3792 
3793 	if (!objectid)
3794 		objectid = BTRFS_FS_TREE_OBJECTID;
3795 
3796 	new_root = btrfs_get_fs_root(fs_info, objectid, true);
3797 	if (IS_ERR(new_root)) {
3798 		ret = PTR_ERR(new_root);
3799 		goto out;
3800 	}
3801 	if (!is_fstree(new_root->root_key.objectid)) {
3802 		ret = -ENOENT;
3803 		goto out_free;
3804 	}
3805 
3806 	path = btrfs_alloc_path();
3807 	if (!path) {
3808 		ret = -ENOMEM;
3809 		goto out_free;
3810 	}
3811 
3812 	trans = btrfs_start_transaction(root, 1);
3813 	if (IS_ERR(trans)) {
3814 		ret = PTR_ERR(trans);
3815 		goto out_free;
3816 	}
3817 
3818 	dir_id = btrfs_super_root_dir(fs_info->super_copy);
3819 	di = btrfs_lookup_dir_item(trans, fs_info->tree_root, path,
3820 				   dir_id, "default", 7, 1);
3821 	if (IS_ERR_OR_NULL(di)) {
3822 		btrfs_release_path(path);
3823 		btrfs_end_transaction(trans);
3824 		btrfs_err(fs_info,
3825 			  "Umm, you don't have the default diritem, this isn't going to work");
3826 		ret = -ENOENT;
3827 		goto out_free;
3828 	}
3829 
3830 	btrfs_cpu_key_to_disk(&disk_key, &new_root->root_key);
3831 	btrfs_set_dir_item_key(path->nodes[0], di, &disk_key);
3832 	btrfs_mark_buffer_dirty(path->nodes[0]);
3833 	btrfs_release_path(path);
3834 
3835 	btrfs_set_fs_incompat(fs_info, DEFAULT_SUBVOL);
3836 	btrfs_end_transaction(trans);
3837 out_free:
3838 	btrfs_put_root(new_root);
3839 	btrfs_free_path(path);
3840 out:
3841 	mnt_drop_write_file(file);
3842 	return ret;
3843 }
3844 
get_block_group_info(struct list_head * groups_list,struct btrfs_ioctl_space_info * space)3845 static void get_block_group_info(struct list_head *groups_list,
3846 				 struct btrfs_ioctl_space_info *space)
3847 {
3848 	struct btrfs_block_group *block_group;
3849 
3850 	space->total_bytes = 0;
3851 	space->used_bytes = 0;
3852 	space->flags = 0;
3853 	list_for_each_entry(block_group, groups_list, list) {
3854 		space->flags = block_group->flags;
3855 		space->total_bytes += block_group->length;
3856 		space->used_bytes += block_group->used;
3857 	}
3858 }
3859 
btrfs_ioctl_space_info(struct btrfs_fs_info * fs_info,void __user * arg)3860 static long btrfs_ioctl_space_info(struct btrfs_fs_info *fs_info,
3861 				   void __user *arg)
3862 {
3863 	struct btrfs_ioctl_space_args space_args;
3864 	struct btrfs_ioctl_space_info space;
3865 	struct btrfs_ioctl_space_info *dest;
3866 	struct btrfs_ioctl_space_info *dest_orig;
3867 	struct btrfs_ioctl_space_info __user *user_dest;
3868 	struct btrfs_space_info *info;
3869 	static const u64 types[] = {
3870 		BTRFS_BLOCK_GROUP_DATA,
3871 		BTRFS_BLOCK_GROUP_SYSTEM,
3872 		BTRFS_BLOCK_GROUP_METADATA,
3873 		BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA
3874 	};
3875 	int num_types = 4;
3876 	int alloc_size;
3877 	int ret = 0;
3878 	u64 slot_count = 0;
3879 	int i, c;
3880 
3881 	if (copy_from_user(&space_args,
3882 			   (struct btrfs_ioctl_space_args __user *)arg,
3883 			   sizeof(space_args)))
3884 		return -EFAULT;
3885 
3886 	for (i = 0; i < num_types; i++) {
3887 		struct btrfs_space_info *tmp;
3888 
3889 		info = NULL;
3890 		list_for_each_entry(tmp, &fs_info->space_info, list) {
3891 			if (tmp->flags == types[i]) {
3892 				info = tmp;
3893 				break;
3894 			}
3895 		}
3896 
3897 		if (!info)
3898 			continue;
3899 
3900 		down_read(&info->groups_sem);
3901 		for (c = 0; c < BTRFS_NR_RAID_TYPES; c++) {
3902 			if (!list_empty(&info->block_groups[c]))
3903 				slot_count++;
3904 		}
3905 		up_read(&info->groups_sem);
3906 	}
3907 
3908 	/*
3909 	 * Global block reserve, exported as a space_info
3910 	 */
3911 	slot_count++;
3912 
3913 	/* space_slots == 0 means they are asking for a count */
3914 	if (space_args.space_slots == 0) {
3915 		space_args.total_spaces = slot_count;
3916 		goto out;
3917 	}
3918 
3919 	slot_count = min_t(u64, space_args.space_slots, slot_count);
3920 
3921 	alloc_size = sizeof(*dest) * slot_count;
3922 
3923 	/* we generally have at most 6 or so space infos, one for each raid
3924 	 * level.  So, a whole page should be more than enough for everyone
3925 	 */
3926 	if (alloc_size > PAGE_SIZE)
3927 		return -ENOMEM;
3928 
3929 	space_args.total_spaces = 0;
3930 	dest = kmalloc(alloc_size, GFP_KERNEL);
3931 	if (!dest)
3932 		return -ENOMEM;
3933 	dest_orig = dest;
3934 
3935 	/* now we have a buffer to copy into */
3936 	for (i = 0; i < num_types; i++) {
3937 		struct btrfs_space_info *tmp;
3938 
3939 		if (!slot_count)
3940 			break;
3941 
3942 		info = NULL;
3943 		list_for_each_entry(tmp, &fs_info->space_info, list) {
3944 			if (tmp->flags == types[i]) {
3945 				info = tmp;
3946 				break;
3947 			}
3948 		}
3949 
3950 		if (!info)
3951 			continue;
3952 		down_read(&info->groups_sem);
3953 		for (c = 0; c < BTRFS_NR_RAID_TYPES; c++) {
3954 			if (!list_empty(&info->block_groups[c])) {
3955 				get_block_group_info(&info->block_groups[c],
3956 						     &space);
3957 				memcpy(dest, &space, sizeof(space));
3958 				dest++;
3959 				space_args.total_spaces++;
3960 				slot_count--;
3961 			}
3962 			if (!slot_count)
3963 				break;
3964 		}
3965 		up_read(&info->groups_sem);
3966 	}
3967 
3968 	/*
3969 	 * Add global block reserve
3970 	 */
3971 	if (slot_count) {
3972 		struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
3973 
3974 		spin_lock(&block_rsv->lock);
3975 		space.total_bytes = block_rsv->size;
3976 		space.used_bytes = block_rsv->size - block_rsv->reserved;
3977 		spin_unlock(&block_rsv->lock);
3978 		space.flags = BTRFS_SPACE_INFO_GLOBAL_RSV;
3979 		memcpy(dest, &space, sizeof(space));
3980 		space_args.total_spaces++;
3981 	}
3982 
3983 	user_dest = (struct btrfs_ioctl_space_info __user *)
3984 		(arg + sizeof(struct btrfs_ioctl_space_args));
3985 
3986 	if (copy_to_user(user_dest, dest_orig, alloc_size))
3987 		ret = -EFAULT;
3988 
3989 	kfree(dest_orig);
3990 out:
3991 	if (ret == 0 && copy_to_user(arg, &space_args, sizeof(space_args)))
3992 		ret = -EFAULT;
3993 
3994 	return ret;
3995 }
3996 
btrfs_ioctl_start_sync(struct btrfs_root * root,void __user * argp)3997 static noinline long btrfs_ioctl_start_sync(struct btrfs_root *root,
3998 					    void __user *argp)
3999 {
4000 	struct btrfs_trans_handle *trans;
4001 	u64 transid;
4002 
4003 	trans = btrfs_attach_transaction_barrier(root);
4004 	if (IS_ERR(trans)) {
4005 		if (PTR_ERR(trans) != -ENOENT)
4006 			return PTR_ERR(trans);
4007 
4008 		/* No running transaction, don't bother */
4009 		transid = root->fs_info->last_trans_committed;
4010 		goto out;
4011 	}
4012 	transid = trans->transid;
4013 	btrfs_commit_transaction_async(trans);
4014 out:
4015 	if (argp)
4016 		if (copy_to_user(argp, &transid, sizeof(transid)))
4017 			return -EFAULT;
4018 	return 0;
4019 }
4020 
btrfs_ioctl_wait_sync(struct btrfs_fs_info * fs_info,void __user * argp)4021 static noinline long btrfs_ioctl_wait_sync(struct btrfs_fs_info *fs_info,
4022 					   void __user *argp)
4023 {
4024 	u64 transid;
4025 
4026 	if (argp) {
4027 		if (copy_from_user(&transid, argp, sizeof(transid)))
4028 			return -EFAULT;
4029 	} else {
4030 		transid = 0;  /* current trans */
4031 	}
4032 	return btrfs_wait_for_commit(fs_info, transid);
4033 }
4034 
btrfs_ioctl_scrub(struct file * file,void __user * arg)4035 static long btrfs_ioctl_scrub(struct file *file, void __user *arg)
4036 {
4037 	struct btrfs_fs_info *fs_info = btrfs_sb(file_inode(file)->i_sb);
4038 	struct btrfs_ioctl_scrub_args *sa;
4039 	int ret;
4040 
4041 	if (!capable(CAP_SYS_ADMIN))
4042 		return -EPERM;
4043 
4044 	if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) {
4045 		btrfs_err(fs_info, "scrub is not supported on extent tree v2 yet");
4046 		return -EINVAL;
4047 	}
4048 
4049 	sa = memdup_user(arg, sizeof(*sa));
4050 	if (IS_ERR(sa))
4051 		return PTR_ERR(sa);
4052 
4053 	if (!(sa->flags & BTRFS_SCRUB_READONLY)) {
4054 		ret = mnt_want_write_file(file);
4055 		if (ret)
4056 			goto out;
4057 	}
4058 
4059 	ret = btrfs_scrub_dev(fs_info, sa->devid, sa->start, sa->end,
4060 			      &sa->progress, sa->flags & BTRFS_SCRUB_READONLY,
4061 			      0);
4062 
4063 	/*
4064 	 * Copy scrub args to user space even if btrfs_scrub_dev() returned an
4065 	 * error. This is important as it allows user space to know how much
4066 	 * progress scrub has done. For example, if scrub is canceled we get
4067 	 * -ECANCELED from btrfs_scrub_dev() and return that error back to user
4068 	 * space. Later user space can inspect the progress from the structure
4069 	 * btrfs_ioctl_scrub_args and resume scrub from where it left off
4070 	 * previously (btrfs-progs does this).
4071 	 * If we fail to copy the btrfs_ioctl_scrub_args structure to user space
4072 	 * then return -EFAULT to signal the structure was not copied or it may
4073 	 * be corrupt and unreliable due to a partial copy.
4074 	 */
4075 	if (copy_to_user(arg, sa, sizeof(*sa)))
4076 		ret = -EFAULT;
4077 
4078 	if (!(sa->flags & BTRFS_SCRUB_READONLY))
4079 		mnt_drop_write_file(file);
4080 out:
4081 	kfree(sa);
4082 	return ret;
4083 }
4084 
btrfs_ioctl_scrub_cancel(struct btrfs_fs_info * fs_info)4085 static long btrfs_ioctl_scrub_cancel(struct btrfs_fs_info *fs_info)
4086 {
4087 	if (!capable(CAP_SYS_ADMIN))
4088 		return -EPERM;
4089 
4090 	return btrfs_scrub_cancel(fs_info);
4091 }
4092 
btrfs_ioctl_scrub_progress(struct btrfs_fs_info * fs_info,void __user * arg)4093 static long btrfs_ioctl_scrub_progress(struct btrfs_fs_info *fs_info,
4094 				       void __user *arg)
4095 {
4096 	struct btrfs_ioctl_scrub_args *sa;
4097 	int ret;
4098 
4099 	if (!capable(CAP_SYS_ADMIN))
4100 		return -EPERM;
4101 
4102 	sa = memdup_user(arg, sizeof(*sa));
4103 	if (IS_ERR(sa))
4104 		return PTR_ERR(sa);
4105 
4106 	ret = btrfs_scrub_progress(fs_info, sa->devid, &sa->progress);
4107 
4108 	if (ret == 0 && copy_to_user(arg, sa, sizeof(*sa)))
4109 		ret = -EFAULT;
4110 
4111 	kfree(sa);
4112 	return ret;
4113 }
4114 
btrfs_ioctl_get_dev_stats(struct btrfs_fs_info * fs_info,void __user * arg)4115 static long btrfs_ioctl_get_dev_stats(struct btrfs_fs_info *fs_info,
4116 				      void __user *arg)
4117 {
4118 	struct btrfs_ioctl_get_dev_stats *sa;
4119 	int ret;
4120 
4121 	sa = memdup_user(arg, sizeof(*sa));
4122 	if (IS_ERR(sa))
4123 		return PTR_ERR(sa);
4124 
4125 	if ((sa->flags & BTRFS_DEV_STATS_RESET) && !capable(CAP_SYS_ADMIN)) {
4126 		kfree(sa);
4127 		return -EPERM;
4128 	}
4129 
4130 	ret = btrfs_get_dev_stats(fs_info, sa);
4131 
4132 	if (ret == 0 && copy_to_user(arg, sa, sizeof(*sa)))
4133 		ret = -EFAULT;
4134 
4135 	kfree(sa);
4136 	return ret;
4137 }
4138 
btrfs_ioctl_dev_replace(struct btrfs_fs_info * fs_info,void __user * arg)4139 static long btrfs_ioctl_dev_replace(struct btrfs_fs_info *fs_info,
4140 				    void __user *arg)
4141 {
4142 	struct btrfs_ioctl_dev_replace_args *p;
4143 	int ret;
4144 
4145 	if (!capable(CAP_SYS_ADMIN))
4146 		return -EPERM;
4147 
4148 	if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) {
4149 		btrfs_err(fs_info, "device replace not supported on extent tree v2 yet");
4150 		return -EINVAL;
4151 	}
4152 
4153 	p = memdup_user(arg, sizeof(*p));
4154 	if (IS_ERR(p))
4155 		return PTR_ERR(p);
4156 
4157 	switch (p->cmd) {
4158 	case BTRFS_IOCTL_DEV_REPLACE_CMD_START:
4159 		if (sb_rdonly(fs_info->sb)) {
4160 			ret = -EROFS;
4161 			goto out;
4162 		}
4163 		if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_DEV_REPLACE)) {
4164 			ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
4165 		} else {
4166 			ret = btrfs_dev_replace_by_ioctl(fs_info, p);
4167 			btrfs_exclop_finish(fs_info);
4168 		}
4169 		break;
4170 	case BTRFS_IOCTL_DEV_REPLACE_CMD_STATUS:
4171 		btrfs_dev_replace_status(fs_info, p);
4172 		ret = 0;
4173 		break;
4174 	case BTRFS_IOCTL_DEV_REPLACE_CMD_CANCEL:
4175 		p->result = btrfs_dev_replace_cancel(fs_info);
4176 		ret = 0;
4177 		break;
4178 	default:
4179 		ret = -EINVAL;
4180 		break;
4181 	}
4182 
4183 	if ((ret == 0 || ret == -ECANCELED) && copy_to_user(arg, p, sizeof(*p)))
4184 		ret = -EFAULT;
4185 out:
4186 	kfree(p);
4187 	return ret;
4188 }
4189 
btrfs_ioctl_ino_to_path(struct btrfs_root * root,void __user * arg)4190 static long btrfs_ioctl_ino_to_path(struct btrfs_root *root, void __user *arg)
4191 {
4192 	int ret = 0;
4193 	int i;
4194 	u64 rel_ptr;
4195 	int size;
4196 	struct btrfs_ioctl_ino_path_args *ipa = NULL;
4197 	struct inode_fs_paths *ipath = NULL;
4198 	struct btrfs_path *path;
4199 
4200 	if (!capable(CAP_DAC_READ_SEARCH))
4201 		return -EPERM;
4202 
4203 	path = btrfs_alloc_path();
4204 	if (!path) {
4205 		ret = -ENOMEM;
4206 		goto out;
4207 	}
4208 
4209 	ipa = memdup_user(arg, sizeof(*ipa));
4210 	if (IS_ERR(ipa)) {
4211 		ret = PTR_ERR(ipa);
4212 		ipa = NULL;
4213 		goto out;
4214 	}
4215 
4216 	size = min_t(u32, ipa->size, 4096);
4217 	ipath = init_ipath(size, root, path);
4218 	if (IS_ERR(ipath)) {
4219 		ret = PTR_ERR(ipath);
4220 		ipath = NULL;
4221 		goto out;
4222 	}
4223 
4224 	ret = paths_from_inode(ipa->inum, ipath);
4225 	if (ret < 0)
4226 		goto out;
4227 
4228 	for (i = 0; i < ipath->fspath->elem_cnt; ++i) {
4229 		rel_ptr = ipath->fspath->val[i] -
4230 			  (u64)(unsigned long)ipath->fspath->val;
4231 		ipath->fspath->val[i] = rel_ptr;
4232 	}
4233 
4234 	btrfs_free_path(path);
4235 	path = NULL;
4236 	ret = copy_to_user((void __user *)(unsigned long)ipa->fspath,
4237 			   ipath->fspath, size);
4238 	if (ret) {
4239 		ret = -EFAULT;
4240 		goto out;
4241 	}
4242 
4243 out:
4244 	btrfs_free_path(path);
4245 	free_ipath(ipath);
4246 	kfree(ipa);
4247 
4248 	return ret;
4249 }
4250 
btrfs_ioctl_logical_to_ino(struct btrfs_fs_info * fs_info,void __user * arg,int version)4251 static long btrfs_ioctl_logical_to_ino(struct btrfs_fs_info *fs_info,
4252 					void __user *arg, int version)
4253 {
4254 	int ret = 0;
4255 	int size;
4256 	struct btrfs_ioctl_logical_ino_args *loi;
4257 	struct btrfs_data_container *inodes = NULL;
4258 	struct btrfs_path *path = NULL;
4259 	bool ignore_offset;
4260 
4261 	if (!capable(CAP_SYS_ADMIN))
4262 		return -EPERM;
4263 
4264 	loi = memdup_user(arg, sizeof(*loi));
4265 	if (IS_ERR(loi))
4266 		return PTR_ERR(loi);
4267 
4268 	if (version == 1) {
4269 		ignore_offset = false;
4270 		size = min_t(u32, loi->size, SZ_64K);
4271 	} else {
4272 		/* All reserved bits must be 0 for now */
4273 		if (memchr_inv(loi->reserved, 0, sizeof(loi->reserved))) {
4274 			ret = -EINVAL;
4275 			goto out_loi;
4276 		}
4277 		/* Only accept flags we have defined so far */
4278 		if (loi->flags & ~(BTRFS_LOGICAL_INO_ARGS_IGNORE_OFFSET)) {
4279 			ret = -EINVAL;
4280 			goto out_loi;
4281 		}
4282 		ignore_offset = loi->flags & BTRFS_LOGICAL_INO_ARGS_IGNORE_OFFSET;
4283 		size = min_t(u32, loi->size, SZ_16M);
4284 	}
4285 
4286 	inodes = init_data_container(size);
4287 	if (IS_ERR(inodes)) {
4288 		ret = PTR_ERR(inodes);
4289 		goto out_loi;
4290 	}
4291 
4292 	path = btrfs_alloc_path();
4293 	if (!path) {
4294 		ret = -ENOMEM;
4295 		goto out;
4296 	}
4297 	ret = iterate_inodes_from_logical(loi->logical, fs_info, path,
4298 					  inodes, ignore_offset);
4299 	btrfs_free_path(path);
4300 	if (ret == -EINVAL)
4301 		ret = -ENOENT;
4302 	if (ret < 0)
4303 		goto out;
4304 
4305 	ret = copy_to_user((void __user *)(unsigned long)loi->inodes, inodes,
4306 			   size);
4307 	if (ret)
4308 		ret = -EFAULT;
4309 
4310 out:
4311 	kvfree(inodes);
4312 out_loi:
4313 	kfree(loi);
4314 
4315 	return ret;
4316 }
4317 
btrfs_update_ioctl_balance_args(struct btrfs_fs_info * fs_info,struct btrfs_ioctl_balance_args * bargs)4318 void btrfs_update_ioctl_balance_args(struct btrfs_fs_info *fs_info,
4319 			       struct btrfs_ioctl_balance_args *bargs)
4320 {
4321 	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
4322 
4323 	bargs->flags = bctl->flags;
4324 
4325 	if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags))
4326 		bargs->state |= BTRFS_BALANCE_STATE_RUNNING;
4327 	if (atomic_read(&fs_info->balance_pause_req))
4328 		bargs->state |= BTRFS_BALANCE_STATE_PAUSE_REQ;
4329 	if (atomic_read(&fs_info->balance_cancel_req))
4330 		bargs->state |= BTRFS_BALANCE_STATE_CANCEL_REQ;
4331 
4332 	memcpy(&bargs->data, &bctl->data, sizeof(bargs->data));
4333 	memcpy(&bargs->meta, &bctl->meta, sizeof(bargs->meta));
4334 	memcpy(&bargs->sys, &bctl->sys, sizeof(bargs->sys));
4335 
4336 	spin_lock(&fs_info->balance_lock);
4337 	memcpy(&bargs->stat, &bctl->stat, sizeof(bargs->stat));
4338 	spin_unlock(&fs_info->balance_lock);
4339 }
4340 
4341 /**
4342  * Try to acquire fs_info::balance_mutex as well as set BTRFS_EXLCOP_BALANCE as
4343  * required.
4344  *
4345  * @fs_info:       the filesystem
4346  * @excl_acquired: ptr to boolean value which is set to false in case balance
4347  *                 is being resumed
4348  *
4349  * Return 0 on success in which case both fs_info::balance is acquired as well
4350  * as exclusive ops are blocked. In case of failure return an error code.
4351  */
btrfs_try_lock_balance(struct btrfs_fs_info * fs_info,bool * excl_acquired)4352 static int btrfs_try_lock_balance(struct btrfs_fs_info *fs_info, bool *excl_acquired)
4353 {
4354 	int ret;
4355 
4356 	/*
4357 	 * Exclusive operation is locked. Three possibilities:
4358 	 *   (1) some other op is running
4359 	 *   (2) balance is running
4360 	 *   (3) balance is paused -- special case (think resume)
4361 	 */
4362 	while (1) {
4363 		if (btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE)) {
4364 			*excl_acquired = true;
4365 			mutex_lock(&fs_info->balance_mutex);
4366 			return 0;
4367 		}
4368 
4369 		mutex_lock(&fs_info->balance_mutex);
4370 		if (fs_info->balance_ctl) {
4371 			/* This is either (2) or (3) */
4372 			if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
4373 				/* This is (2) */
4374 				ret = -EINPROGRESS;
4375 				goto out_failure;
4376 
4377 			} else {
4378 				mutex_unlock(&fs_info->balance_mutex);
4379 				/*
4380 				 * Lock released to allow other waiters to
4381 				 * continue, we'll reexamine the status again.
4382 				 */
4383 				mutex_lock(&fs_info->balance_mutex);
4384 
4385 				if (fs_info->balance_ctl &&
4386 				    !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
4387 					/* This is (3) */
4388 					*excl_acquired = false;
4389 					return 0;
4390 				}
4391 			}
4392 		} else {
4393 			/* This is (1) */
4394 			ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
4395 			goto out_failure;
4396 		}
4397 
4398 		mutex_unlock(&fs_info->balance_mutex);
4399 	}
4400 
4401 out_failure:
4402 	mutex_unlock(&fs_info->balance_mutex);
4403 	*excl_acquired = false;
4404 	return ret;
4405 }
4406 
btrfs_ioctl_balance(struct file * file,void __user * arg)4407 static long btrfs_ioctl_balance(struct file *file, void __user *arg)
4408 {
4409 	struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
4410 	struct btrfs_fs_info *fs_info = root->fs_info;
4411 	struct btrfs_ioctl_balance_args *bargs;
4412 	struct btrfs_balance_control *bctl;
4413 	bool need_unlock = true;
4414 	int ret;
4415 
4416 	if (!capable(CAP_SYS_ADMIN))
4417 		return -EPERM;
4418 
4419 	ret = mnt_want_write_file(file);
4420 	if (ret)
4421 		return ret;
4422 
4423 	bargs = memdup_user(arg, sizeof(*bargs));
4424 	if (IS_ERR(bargs)) {
4425 		ret = PTR_ERR(bargs);
4426 		bargs = NULL;
4427 		goto out;
4428 	}
4429 
4430 	ret = btrfs_try_lock_balance(fs_info, &need_unlock);
4431 	if (ret)
4432 		goto out;
4433 
4434 	lockdep_assert_held(&fs_info->balance_mutex);
4435 
4436 	if (bargs->flags & BTRFS_BALANCE_RESUME) {
4437 		if (!fs_info->balance_ctl) {
4438 			ret = -ENOTCONN;
4439 			goto out_unlock;
4440 		}
4441 
4442 		bctl = fs_info->balance_ctl;
4443 		spin_lock(&fs_info->balance_lock);
4444 		bctl->flags |= BTRFS_BALANCE_RESUME;
4445 		spin_unlock(&fs_info->balance_lock);
4446 		btrfs_exclop_balance(fs_info, BTRFS_EXCLOP_BALANCE);
4447 
4448 		goto do_balance;
4449 	}
4450 
4451 	if (bargs->flags & ~(BTRFS_BALANCE_ARGS_MASK | BTRFS_BALANCE_TYPE_MASK)) {
4452 		ret = -EINVAL;
4453 		goto out_unlock;
4454 	}
4455 
4456 	if (fs_info->balance_ctl) {
4457 		ret = -EINPROGRESS;
4458 		goto out_unlock;
4459 	}
4460 
4461 	bctl = kzalloc(sizeof(*bctl), GFP_KERNEL);
4462 	if (!bctl) {
4463 		ret = -ENOMEM;
4464 		goto out_unlock;
4465 	}
4466 
4467 	memcpy(&bctl->data, &bargs->data, sizeof(bctl->data));
4468 	memcpy(&bctl->meta, &bargs->meta, sizeof(bctl->meta));
4469 	memcpy(&bctl->sys, &bargs->sys, sizeof(bctl->sys));
4470 
4471 	bctl->flags = bargs->flags;
4472 do_balance:
4473 	/*
4474 	 * Ownership of bctl and exclusive operation goes to btrfs_balance.
4475 	 * bctl is freed in reset_balance_state, or, if restriper was paused
4476 	 * all the way until unmount, in free_fs_info.  The flag should be
4477 	 * cleared after reset_balance_state.
4478 	 */
4479 	need_unlock = false;
4480 
4481 	ret = btrfs_balance(fs_info, bctl, bargs);
4482 	bctl = NULL;
4483 
4484 	if (ret == 0 || ret == -ECANCELED) {
4485 		if (copy_to_user(arg, bargs, sizeof(*bargs)))
4486 			ret = -EFAULT;
4487 	}
4488 
4489 	kfree(bctl);
4490 out_unlock:
4491 	mutex_unlock(&fs_info->balance_mutex);
4492 	if (need_unlock)
4493 		btrfs_exclop_finish(fs_info);
4494 out:
4495 	mnt_drop_write_file(file);
4496 	kfree(bargs);
4497 	return ret;
4498 }
4499 
btrfs_ioctl_balance_ctl(struct btrfs_fs_info * fs_info,int cmd)4500 static long btrfs_ioctl_balance_ctl(struct btrfs_fs_info *fs_info, int cmd)
4501 {
4502 	if (!capable(CAP_SYS_ADMIN))
4503 		return -EPERM;
4504 
4505 	switch (cmd) {
4506 	case BTRFS_BALANCE_CTL_PAUSE:
4507 		return btrfs_pause_balance(fs_info);
4508 	case BTRFS_BALANCE_CTL_CANCEL:
4509 		return btrfs_cancel_balance(fs_info);
4510 	}
4511 
4512 	return -EINVAL;
4513 }
4514 
btrfs_ioctl_balance_progress(struct btrfs_fs_info * fs_info,void __user * arg)4515 static long btrfs_ioctl_balance_progress(struct btrfs_fs_info *fs_info,
4516 					 void __user *arg)
4517 {
4518 	struct btrfs_ioctl_balance_args *bargs;
4519 	int ret = 0;
4520 
4521 	if (!capable(CAP_SYS_ADMIN))
4522 		return -EPERM;
4523 
4524 	mutex_lock(&fs_info->balance_mutex);
4525 	if (!fs_info->balance_ctl) {
4526 		ret = -ENOTCONN;
4527 		goto out;
4528 	}
4529 
4530 	bargs = kzalloc(sizeof(*bargs), GFP_KERNEL);
4531 	if (!bargs) {
4532 		ret = -ENOMEM;
4533 		goto out;
4534 	}
4535 
4536 	btrfs_update_ioctl_balance_args(fs_info, bargs);
4537 
4538 	if (copy_to_user(arg, bargs, sizeof(*bargs)))
4539 		ret = -EFAULT;
4540 
4541 	kfree(bargs);
4542 out:
4543 	mutex_unlock(&fs_info->balance_mutex);
4544 	return ret;
4545 }
4546 
btrfs_ioctl_quota_ctl(struct file * file,void __user * arg)4547 static long btrfs_ioctl_quota_ctl(struct file *file, void __user *arg)
4548 {
4549 	struct inode *inode = file_inode(file);
4550 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4551 	struct btrfs_ioctl_quota_ctl_args *sa;
4552 	int ret;
4553 
4554 	if (!capable(CAP_SYS_ADMIN))
4555 		return -EPERM;
4556 
4557 	ret = mnt_want_write_file(file);
4558 	if (ret)
4559 		return ret;
4560 
4561 	sa = memdup_user(arg, sizeof(*sa));
4562 	if (IS_ERR(sa)) {
4563 		ret = PTR_ERR(sa);
4564 		goto drop_write;
4565 	}
4566 
4567 	down_write(&fs_info->subvol_sem);
4568 
4569 	switch (sa->cmd) {
4570 	case BTRFS_QUOTA_CTL_ENABLE:
4571 		ret = btrfs_quota_enable(fs_info);
4572 		break;
4573 	case BTRFS_QUOTA_CTL_DISABLE:
4574 		ret = btrfs_quota_disable(fs_info);
4575 		break;
4576 	default:
4577 		ret = -EINVAL;
4578 		break;
4579 	}
4580 
4581 	kfree(sa);
4582 	up_write(&fs_info->subvol_sem);
4583 drop_write:
4584 	mnt_drop_write_file(file);
4585 	return ret;
4586 }
4587 
btrfs_ioctl_qgroup_assign(struct file * file,void __user * arg)4588 static long btrfs_ioctl_qgroup_assign(struct file *file, void __user *arg)
4589 {
4590 	struct inode *inode = file_inode(file);
4591 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4592 	struct btrfs_root *root = BTRFS_I(inode)->root;
4593 	struct btrfs_ioctl_qgroup_assign_args *sa;
4594 	struct btrfs_trans_handle *trans;
4595 	int ret;
4596 	int err;
4597 
4598 	if (!capable(CAP_SYS_ADMIN))
4599 		return -EPERM;
4600 
4601 	ret = mnt_want_write_file(file);
4602 	if (ret)
4603 		return ret;
4604 
4605 	sa = memdup_user(arg, sizeof(*sa));
4606 	if (IS_ERR(sa)) {
4607 		ret = PTR_ERR(sa);
4608 		goto drop_write;
4609 	}
4610 
4611 	trans = btrfs_join_transaction(root);
4612 	if (IS_ERR(trans)) {
4613 		ret = PTR_ERR(trans);
4614 		goto out;
4615 	}
4616 
4617 	if (sa->assign) {
4618 		ret = btrfs_add_qgroup_relation(trans, sa->src, sa->dst);
4619 	} else {
4620 		ret = btrfs_del_qgroup_relation(trans, sa->src, sa->dst);
4621 	}
4622 
4623 	/* update qgroup status and info */
4624 	err = btrfs_run_qgroups(trans);
4625 	if (err < 0)
4626 		btrfs_handle_fs_error(fs_info, err,
4627 				      "failed to update qgroup status and info");
4628 	err = btrfs_end_transaction(trans);
4629 	if (err && !ret)
4630 		ret = err;
4631 
4632 out:
4633 	kfree(sa);
4634 drop_write:
4635 	mnt_drop_write_file(file);
4636 	return ret;
4637 }
4638 
btrfs_ioctl_qgroup_create(struct file * file,void __user * arg)4639 static long btrfs_ioctl_qgroup_create(struct file *file, void __user *arg)
4640 {
4641 	struct inode *inode = file_inode(file);
4642 	struct btrfs_root *root = BTRFS_I(inode)->root;
4643 	struct btrfs_ioctl_qgroup_create_args *sa;
4644 	struct btrfs_trans_handle *trans;
4645 	int ret;
4646 	int err;
4647 
4648 	if (!capable(CAP_SYS_ADMIN))
4649 		return -EPERM;
4650 
4651 	ret = mnt_want_write_file(file);
4652 	if (ret)
4653 		return ret;
4654 
4655 	sa = memdup_user(arg, sizeof(*sa));
4656 	if (IS_ERR(sa)) {
4657 		ret = PTR_ERR(sa);
4658 		goto drop_write;
4659 	}
4660 
4661 	if (!sa->qgroupid) {
4662 		ret = -EINVAL;
4663 		goto out;
4664 	}
4665 
4666 	trans = btrfs_join_transaction(root);
4667 	if (IS_ERR(trans)) {
4668 		ret = PTR_ERR(trans);
4669 		goto out;
4670 	}
4671 
4672 	if (sa->create) {
4673 		ret = btrfs_create_qgroup(trans, sa->qgroupid);
4674 	} else {
4675 		ret = btrfs_remove_qgroup(trans, sa->qgroupid);
4676 	}
4677 
4678 	err = btrfs_end_transaction(trans);
4679 	if (err && !ret)
4680 		ret = err;
4681 
4682 out:
4683 	kfree(sa);
4684 drop_write:
4685 	mnt_drop_write_file(file);
4686 	return ret;
4687 }
4688 
btrfs_ioctl_qgroup_limit(struct file * file,void __user * arg)4689 static long btrfs_ioctl_qgroup_limit(struct file *file, void __user *arg)
4690 {
4691 	struct inode *inode = file_inode(file);
4692 	struct btrfs_root *root = BTRFS_I(inode)->root;
4693 	struct btrfs_ioctl_qgroup_limit_args *sa;
4694 	struct btrfs_trans_handle *trans;
4695 	int ret;
4696 	int err;
4697 	u64 qgroupid;
4698 
4699 	if (!capable(CAP_SYS_ADMIN))
4700 		return -EPERM;
4701 
4702 	ret = mnt_want_write_file(file);
4703 	if (ret)
4704 		return ret;
4705 
4706 	sa = memdup_user(arg, sizeof(*sa));
4707 	if (IS_ERR(sa)) {
4708 		ret = PTR_ERR(sa);
4709 		goto drop_write;
4710 	}
4711 
4712 	trans = btrfs_join_transaction(root);
4713 	if (IS_ERR(trans)) {
4714 		ret = PTR_ERR(trans);
4715 		goto out;
4716 	}
4717 
4718 	qgroupid = sa->qgroupid;
4719 	if (!qgroupid) {
4720 		/* take the current subvol as qgroup */
4721 		qgroupid = root->root_key.objectid;
4722 	}
4723 
4724 	ret = btrfs_limit_qgroup(trans, qgroupid, &sa->lim);
4725 
4726 	err = btrfs_end_transaction(trans);
4727 	if (err && !ret)
4728 		ret = err;
4729 
4730 out:
4731 	kfree(sa);
4732 drop_write:
4733 	mnt_drop_write_file(file);
4734 	return ret;
4735 }
4736 
btrfs_ioctl_quota_rescan(struct file * file,void __user * arg)4737 static long btrfs_ioctl_quota_rescan(struct file *file, void __user *arg)
4738 {
4739 	struct inode *inode = file_inode(file);
4740 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4741 	struct btrfs_ioctl_quota_rescan_args *qsa;
4742 	int ret;
4743 
4744 	if (!capable(CAP_SYS_ADMIN))
4745 		return -EPERM;
4746 
4747 	ret = mnt_want_write_file(file);
4748 	if (ret)
4749 		return ret;
4750 
4751 	qsa = memdup_user(arg, sizeof(*qsa));
4752 	if (IS_ERR(qsa)) {
4753 		ret = PTR_ERR(qsa);
4754 		goto drop_write;
4755 	}
4756 
4757 	if (qsa->flags) {
4758 		ret = -EINVAL;
4759 		goto out;
4760 	}
4761 
4762 	ret = btrfs_qgroup_rescan(fs_info);
4763 
4764 out:
4765 	kfree(qsa);
4766 drop_write:
4767 	mnt_drop_write_file(file);
4768 	return ret;
4769 }
4770 
btrfs_ioctl_quota_rescan_status(struct btrfs_fs_info * fs_info,void __user * arg)4771 static long btrfs_ioctl_quota_rescan_status(struct btrfs_fs_info *fs_info,
4772 						void __user *arg)
4773 {
4774 	struct btrfs_ioctl_quota_rescan_args qsa = {0};
4775 
4776 	if (!capable(CAP_SYS_ADMIN))
4777 		return -EPERM;
4778 
4779 	if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
4780 		qsa.flags = 1;
4781 		qsa.progress = fs_info->qgroup_rescan_progress.objectid;
4782 	}
4783 
4784 	if (copy_to_user(arg, &qsa, sizeof(qsa)))
4785 		return -EFAULT;
4786 
4787 	return 0;
4788 }
4789 
btrfs_ioctl_quota_rescan_wait(struct btrfs_fs_info * fs_info,void __user * arg)4790 static long btrfs_ioctl_quota_rescan_wait(struct btrfs_fs_info *fs_info,
4791 						void __user *arg)
4792 {
4793 	if (!capable(CAP_SYS_ADMIN))
4794 		return -EPERM;
4795 
4796 	return btrfs_qgroup_wait_for_completion(fs_info, true);
4797 }
4798 
_btrfs_ioctl_set_received_subvol(struct file * file,struct user_namespace * mnt_userns,struct btrfs_ioctl_received_subvol_args * sa)4799 static long _btrfs_ioctl_set_received_subvol(struct file *file,
4800 					    struct user_namespace *mnt_userns,
4801 					    struct btrfs_ioctl_received_subvol_args *sa)
4802 {
4803 	struct inode *inode = file_inode(file);
4804 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4805 	struct btrfs_root *root = BTRFS_I(inode)->root;
4806 	struct btrfs_root_item *root_item = &root->root_item;
4807 	struct btrfs_trans_handle *trans;
4808 	struct timespec64 ct = current_time(inode);
4809 	int ret = 0;
4810 	int received_uuid_changed;
4811 
4812 	if (!inode_owner_or_capable(mnt_userns, inode))
4813 		return -EPERM;
4814 
4815 	ret = mnt_want_write_file(file);
4816 	if (ret < 0)
4817 		return ret;
4818 
4819 	down_write(&fs_info->subvol_sem);
4820 
4821 	if (btrfs_ino(BTRFS_I(inode)) != BTRFS_FIRST_FREE_OBJECTID) {
4822 		ret = -EINVAL;
4823 		goto out;
4824 	}
4825 
4826 	if (btrfs_root_readonly(root)) {
4827 		ret = -EROFS;
4828 		goto out;
4829 	}
4830 
4831 	/*
4832 	 * 1 - root item
4833 	 * 2 - uuid items (received uuid + subvol uuid)
4834 	 */
4835 	trans = btrfs_start_transaction(root, 3);
4836 	if (IS_ERR(trans)) {
4837 		ret = PTR_ERR(trans);
4838 		trans = NULL;
4839 		goto out;
4840 	}
4841 
4842 	sa->rtransid = trans->transid;
4843 	sa->rtime.sec = ct.tv_sec;
4844 	sa->rtime.nsec = ct.tv_nsec;
4845 
4846 	received_uuid_changed = memcmp(root_item->received_uuid, sa->uuid,
4847 				       BTRFS_UUID_SIZE);
4848 	if (received_uuid_changed &&
4849 	    !btrfs_is_empty_uuid(root_item->received_uuid)) {
4850 		ret = btrfs_uuid_tree_remove(trans, root_item->received_uuid,
4851 					  BTRFS_UUID_KEY_RECEIVED_SUBVOL,
4852 					  root->root_key.objectid);
4853 		if (ret && ret != -ENOENT) {
4854 		        btrfs_abort_transaction(trans, ret);
4855 		        btrfs_end_transaction(trans);
4856 		        goto out;
4857 		}
4858 	}
4859 	memcpy(root_item->received_uuid, sa->uuid, BTRFS_UUID_SIZE);
4860 	btrfs_set_root_stransid(root_item, sa->stransid);
4861 	btrfs_set_root_rtransid(root_item, sa->rtransid);
4862 	btrfs_set_stack_timespec_sec(&root_item->stime, sa->stime.sec);
4863 	btrfs_set_stack_timespec_nsec(&root_item->stime, sa->stime.nsec);
4864 	btrfs_set_stack_timespec_sec(&root_item->rtime, sa->rtime.sec);
4865 	btrfs_set_stack_timespec_nsec(&root_item->rtime, sa->rtime.nsec);
4866 
4867 	ret = btrfs_update_root(trans, fs_info->tree_root,
4868 				&root->root_key, &root->root_item);
4869 	if (ret < 0) {
4870 		btrfs_end_transaction(trans);
4871 		goto out;
4872 	}
4873 	if (received_uuid_changed && !btrfs_is_empty_uuid(sa->uuid)) {
4874 		ret = btrfs_uuid_tree_add(trans, sa->uuid,
4875 					  BTRFS_UUID_KEY_RECEIVED_SUBVOL,
4876 					  root->root_key.objectid);
4877 		if (ret < 0 && ret != -EEXIST) {
4878 			btrfs_abort_transaction(trans, ret);
4879 			btrfs_end_transaction(trans);
4880 			goto out;
4881 		}
4882 	}
4883 	ret = btrfs_commit_transaction(trans);
4884 out:
4885 	up_write(&fs_info->subvol_sem);
4886 	mnt_drop_write_file(file);
4887 	return ret;
4888 }
4889 
4890 #ifdef CONFIG_64BIT
btrfs_ioctl_set_received_subvol_32(struct file * file,void __user * arg)4891 static long btrfs_ioctl_set_received_subvol_32(struct file *file,
4892 						void __user *arg)
4893 {
4894 	struct btrfs_ioctl_received_subvol_args_32 *args32 = NULL;
4895 	struct btrfs_ioctl_received_subvol_args *args64 = NULL;
4896 	int ret = 0;
4897 
4898 	args32 = memdup_user(arg, sizeof(*args32));
4899 	if (IS_ERR(args32))
4900 		return PTR_ERR(args32);
4901 
4902 	args64 = kmalloc(sizeof(*args64), GFP_KERNEL);
4903 	if (!args64) {
4904 		ret = -ENOMEM;
4905 		goto out;
4906 	}
4907 
4908 	memcpy(args64->uuid, args32->uuid, BTRFS_UUID_SIZE);
4909 	args64->stransid = args32->stransid;
4910 	args64->rtransid = args32->rtransid;
4911 	args64->stime.sec = args32->stime.sec;
4912 	args64->stime.nsec = args32->stime.nsec;
4913 	args64->rtime.sec = args32->rtime.sec;
4914 	args64->rtime.nsec = args32->rtime.nsec;
4915 	args64->flags = args32->flags;
4916 
4917 	ret = _btrfs_ioctl_set_received_subvol(file, file_mnt_user_ns(file), args64);
4918 	if (ret)
4919 		goto out;
4920 
4921 	memcpy(args32->uuid, args64->uuid, BTRFS_UUID_SIZE);
4922 	args32->stransid = args64->stransid;
4923 	args32->rtransid = args64->rtransid;
4924 	args32->stime.sec = args64->stime.sec;
4925 	args32->stime.nsec = args64->stime.nsec;
4926 	args32->rtime.sec = args64->rtime.sec;
4927 	args32->rtime.nsec = args64->rtime.nsec;
4928 	args32->flags = args64->flags;
4929 
4930 	ret = copy_to_user(arg, args32, sizeof(*args32));
4931 	if (ret)
4932 		ret = -EFAULT;
4933 
4934 out:
4935 	kfree(args32);
4936 	kfree(args64);
4937 	return ret;
4938 }
4939 #endif
4940 
btrfs_ioctl_set_received_subvol(struct file * file,void __user * arg)4941 static long btrfs_ioctl_set_received_subvol(struct file *file,
4942 					    void __user *arg)
4943 {
4944 	struct btrfs_ioctl_received_subvol_args *sa = NULL;
4945 	int ret = 0;
4946 
4947 	sa = memdup_user(arg, sizeof(*sa));
4948 	if (IS_ERR(sa))
4949 		return PTR_ERR(sa);
4950 
4951 	ret = _btrfs_ioctl_set_received_subvol(file, file_mnt_user_ns(file), sa);
4952 
4953 	if (ret)
4954 		goto out;
4955 
4956 	ret = copy_to_user(arg, sa, sizeof(*sa));
4957 	if (ret)
4958 		ret = -EFAULT;
4959 
4960 out:
4961 	kfree(sa);
4962 	return ret;
4963 }
4964 
btrfs_ioctl_get_fslabel(struct btrfs_fs_info * fs_info,void __user * arg)4965 static int btrfs_ioctl_get_fslabel(struct btrfs_fs_info *fs_info,
4966 					void __user *arg)
4967 {
4968 	size_t len;
4969 	int ret;
4970 	char label[BTRFS_LABEL_SIZE];
4971 
4972 	spin_lock(&fs_info->super_lock);
4973 	memcpy(label, fs_info->super_copy->label, BTRFS_LABEL_SIZE);
4974 	spin_unlock(&fs_info->super_lock);
4975 
4976 	len = strnlen(label, BTRFS_LABEL_SIZE);
4977 
4978 	if (len == BTRFS_LABEL_SIZE) {
4979 		btrfs_warn(fs_info,
4980 			   "label is too long, return the first %zu bytes",
4981 			   --len);
4982 	}
4983 
4984 	ret = copy_to_user(arg, label, len);
4985 
4986 	return ret ? -EFAULT : 0;
4987 }
4988 
btrfs_ioctl_set_fslabel(struct file * file,void __user * arg)4989 static int btrfs_ioctl_set_fslabel(struct file *file, void __user *arg)
4990 {
4991 	struct inode *inode = file_inode(file);
4992 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4993 	struct btrfs_root *root = BTRFS_I(inode)->root;
4994 	struct btrfs_super_block *super_block = fs_info->super_copy;
4995 	struct btrfs_trans_handle *trans;
4996 	char label[BTRFS_LABEL_SIZE];
4997 	int ret;
4998 
4999 	if (!capable(CAP_SYS_ADMIN))
5000 		return -EPERM;
5001 
5002 	if (copy_from_user(label, arg, sizeof(label)))
5003 		return -EFAULT;
5004 
5005 	if (strnlen(label, BTRFS_LABEL_SIZE) == BTRFS_LABEL_SIZE) {
5006 		btrfs_err(fs_info,
5007 			  "unable to set label with more than %d bytes",
5008 			  BTRFS_LABEL_SIZE - 1);
5009 		return -EINVAL;
5010 	}
5011 
5012 	ret = mnt_want_write_file(file);
5013 	if (ret)
5014 		return ret;
5015 
5016 	trans = btrfs_start_transaction(root, 0);
5017 	if (IS_ERR(trans)) {
5018 		ret = PTR_ERR(trans);
5019 		goto out_unlock;
5020 	}
5021 
5022 	spin_lock(&fs_info->super_lock);
5023 	strcpy(super_block->label, label);
5024 	spin_unlock(&fs_info->super_lock);
5025 	ret = btrfs_commit_transaction(trans);
5026 
5027 out_unlock:
5028 	mnt_drop_write_file(file);
5029 	return ret;
5030 }
5031 
5032 #define INIT_FEATURE_FLAGS(suffix) \
5033 	{ .compat_flags = BTRFS_FEATURE_COMPAT_##suffix, \
5034 	  .compat_ro_flags = BTRFS_FEATURE_COMPAT_RO_##suffix, \
5035 	  .incompat_flags = BTRFS_FEATURE_INCOMPAT_##suffix }
5036 
btrfs_ioctl_get_supported_features(void __user * arg)5037 int btrfs_ioctl_get_supported_features(void __user *arg)
5038 {
5039 	static const struct btrfs_ioctl_feature_flags features[3] = {
5040 		INIT_FEATURE_FLAGS(SUPP),
5041 		INIT_FEATURE_FLAGS(SAFE_SET),
5042 		INIT_FEATURE_FLAGS(SAFE_CLEAR)
5043 	};
5044 
5045 	if (copy_to_user(arg, &features, sizeof(features)))
5046 		return -EFAULT;
5047 
5048 	return 0;
5049 }
5050 
btrfs_ioctl_get_features(struct btrfs_fs_info * fs_info,void __user * arg)5051 static int btrfs_ioctl_get_features(struct btrfs_fs_info *fs_info,
5052 					void __user *arg)
5053 {
5054 	struct btrfs_super_block *super_block = fs_info->super_copy;
5055 	struct btrfs_ioctl_feature_flags features;
5056 
5057 	features.compat_flags = btrfs_super_compat_flags(super_block);
5058 	features.compat_ro_flags = btrfs_super_compat_ro_flags(super_block);
5059 	features.incompat_flags = btrfs_super_incompat_flags(super_block);
5060 
5061 	if (copy_to_user(arg, &features, sizeof(features)))
5062 		return -EFAULT;
5063 
5064 	return 0;
5065 }
5066 
check_feature_bits(struct btrfs_fs_info * fs_info,enum btrfs_feature_set set,u64 change_mask,u64 flags,u64 supported_flags,u64 safe_set,u64 safe_clear)5067 static int check_feature_bits(struct btrfs_fs_info *fs_info,
5068 			      enum btrfs_feature_set set,
5069 			      u64 change_mask, u64 flags, u64 supported_flags,
5070 			      u64 safe_set, u64 safe_clear)
5071 {
5072 	const char *type = btrfs_feature_set_name(set);
5073 	char *names;
5074 	u64 disallowed, unsupported;
5075 	u64 set_mask = flags & change_mask;
5076 	u64 clear_mask = ~flags & change_mask;
5077 
5078 	unsupported = set_mask & ~supported_flags;
5079 	if (unsupported) {
5080 		names = btrfs_printable_features(set, unsupported);
5081 		if (names) {
5082 			btrfs_warn(fs_info,
5083 				   "this kernel does not support the %s feature bit%s",
5084 				   names, strchr(names, ',') ? "s" : "");
5085 			kfree(names);
5086 		} else
5087 			btrfs_warn(fs_info,
5088 				   "this kernel does not support %s bits 0x%llx",
5089 				   type, unsupported);
5090 		return -EOPNOTSUPP;
5091 	}
5092 
5093 	disallowed = set_mask & ~safe_set;
5094 	if (disallowed) {
5095 		names = btrfs_printable_features(set, disallowed);
5096 		if (names) {
5097 			btrfs_warn(fs_info,
5098 				   "can't set the %s feature bit%s while mounted",
5099 				   names, strchr(names, ',') ? "s" : "");
5100 			kfree(names);
5101 		} else
5102 			btrfs_warn(fs_info,
5103 				   "can't set %s bits 0x%llx while mounted",
5104 				   type, disallowed);
5105 		return -EPERM;
5106 	}
5107 
5108 	disallowed = clear_mask & ~safe_clear;
5109 	if (disallowed) {
5110 		names = btrfs_printable_features(set, disallowed);
5111 		if (names) {
5112 			btrfs_warn(fs_info,
5113 				   "can't clear the %s feature bit%s while mounted",
5114 				   names, strchr(names, ',') ? "s" : "");
5115 			kfree(names);
5116 		} else
5117 			btrfs_warn(fs_info,
5118 				   "can't clear %s bits 0x%llx while mounted",
5119 				   type, disallowed);
5120 		return -EPERM;
5121 	}
5122 
5123 	return 0;
5124 }
5125 
5126 #define check_feature(fs_info, change_mask, flags, mask_base)	\
5127 check_feature_bits(fs_info, FEAT_##mask_base, change_mask, flags,	\
5128 		   BTRFS_FEATURE_ ## mask_base ## _SUPP,	\
5129 		   BTRFS_FEATURE_ ## mask_base ## _SAFE_SET,	\
5130 		   BTRFS_FEATURE_ ## mask_base ## _SAFE_CLEAR)
5131 
btrfs_ioctl_set_features(struct file * file,void __user * arg)5132 static int btrfs_ioctl_set_features(struct file *file, void __user *arg)
5133 {
5134 	struct inode *inode = file_inode(file);
5135 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5136 	struct btrfs_root *root = BTRFS_I(inode)->root;
5137 	struct btrfs_super_block *super_block = fs_info->super_copy;
5138 	struct btrfs_ioctl_feature_flags flags[2];
5139 	struct btrfs_trans_handle *trans;
5140 	u64 newflags;
5141 	int ret;
5142 
5143 	if (!capable(CAP_SYS_ADMIN))
5144 		return -EPERM;
5145 
5146 	if (copy_from_user(flags, arg, sizeof(flags)))
5147 		return -EFAULT;
5148 
5149 	/* Nothing to do */
5150 	if (!flags[0].compat_flags && !flags[0].compat_ro_flags &&
5151 	    !flags[0].incompat_flags)
5152 		return 0;
5153 
5154 	ret = check_feature(fs_info, flags[0].compat_flags,
5155 			    flags[1].compat_flags, COMPAT);
5156 	if (ret)
5157 		return ret;
5158 
5159 	ret = check_feature(fs_info, flags[0].compat_ro_flags,
5160 			    flags[1].compat_ro_flags, COMPAT_RO);
5161 	if (ret)
5162 		return ret;
5163 
5164 	ret = check_feature(fs_info, flags[0].incompat_flags,
5165 			    flags[1].incompat_flags, INCOMPAT);
5166 	if (ret)
5167 		return ret;
5168 
5169 	ret = mnt_want_write_file(file);
5170 	if (ret)
5171 		return ret;
5172 
5173 	trans = btrfs_start_transaction(root, 0);
5174 	if (IS_ERR(trans)) {
5175 		ret = PTR_ERR(trans);
5176 		goto out_drop_write;
5177 	}
5178 
5179 	spin_lock(&fs_info->super_lock);
5180 	newflags = btrfs_super_compat_flags(super_block);
5181 	newflags |= flags[0].compat_flags & flags[1].compat_flags;
5182 	newflags &= ~(flags[0].compat_flags & ~flags[1].compat_flags);
5183 	btrfs_set_super_compat_flags(super_block, newflags);
5184 
5185 	newflags = btrfs_super_compat_ro_flags(super_block);
5186 	newflags |= flags[0].compat_ro_flags & flags[1].compat_ro_flags;
5187 	newflags &= ~(flags[0].compat_ro_flags & ~flags[1].compat_ro_flags);
5188 	btrfs_set_super_compat_ro_flags(super_block, newflags);
5189 
5190 	newflags = btrfs_super_incompat_flags(super_block);
5191 	newflags |= flags[0].incompat_flags & flags[1].incompat_flags;
5192 	newflags &= ~(flags[0].incompat_flags & ~flags[1].incompat_flags);
5193 	btrfs_set_super_incompat_flags(super_block, newflags);
5194 	spin_unlock(&fs_info->super_lock);
5195 
5196 	ret = btrfs_commit_transaction(trans);
5197 out_drop_write:
5198 	mnt_drop_write_file(file);
5199 
5200 	return ret;
5201 }
5202 
_btrfs_ioctl_send(struct inode * inode,void __user * argp,bool compat)5203 static int _btrfs_ioctl_send(struct inode *inode, void __user *argp, bool compat)
5204 {
5205 	struct btrfs_ioctl_send_args *arg;
5206 	int ret;
5207 
5208 	if (compat) {
5209 #if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT)
5210 		struct btrfs_ioctl_send_args_32 args32;
5211 
5212 		ret = copy_from_user(&args32, argp, sizeof(args32));
5213 		if (ret)
5214 			return -EFAULT;
5215 		arg = kzalloc(sizeof(*arg), GFP_KERNEL);
5216 		if (!arg)
5217 			return -ENOMEM;
5218 		arg->send_fd = args32.send_fd;
5219 		arg->clone_sources_count = args32.clone_sources_count;
5220 		arg->clone_sources = compat_ptr(args32.clone_sources);
5221 		arg->parent_root = args32.parent_root;
5222 		arg->flags = args32.flags;
5223 		memcpy(arg->reserved, args32.reserved,
5224 		       sizeof(args32.reserved));
5225 #else
5226 		return -ENOTTY;
5227 #endif
5228 	} else {
5229 		arg = memdup_user(argp, sizeof(*arg));
5230 		if (IS_ERR(arg))
5231 			return PTR_ERR(arg);
5232 	}
5233 	ret = btrfs_ioctl_send(inode, arg);
5234 	kfree(arg);
5235 	return ret;
5236 }
5237 
btrfs_ioctl_encoded_read(struct file * file,void __user * argp,bool compat)5238 static int btrfs_ioctl_encoded_read(struct file *file, void __user *argp,
5239 				    bool compat)
5240 {
5241 	struct btrfs_ioctl_encoded_io_args args = { 0 };
5242 	size_t copy_end_kernel = offsetofend(struct btrfs_ioctl_encoded_io_args,
5243 					     flags);
5244 	size_t copy_end;
5245 	struct iovec iovstack[UIO_FASTIOV];
5246 	struct iovec *iov = iovstack;
5247 	struct iov_iter iter;
5248 	loff_t pos;
5249 	struct kiocb kiocb;
5250 	ssize_t ret;
5251 
5252 	if (!capable(CAP_SYS_ADMIN)) {
5253 		ret = -EPERM;
5254 		goto out_acct;
5255 	}
5256 
5257 	if (compat) {
5258 #if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT)
5259 		struct btrfs_ioctl_encoded_io_args_32 args32;
5260 
5261 		copy_end = offsetofend(struct btrfs_ioctl_encoded_io_args_32,
5262 				       flags);
5263 		if (copy_from_user(&args32, argp, copy_end)) {
5264 			ret = -EFAULT;
5265 			goto out_acct;
5266 		}
5267 		args.iov = compat_ptr(args32.iov);
5268 		args.iovcnt = args32.iovcnt;
5269 		args.offset = args32.offset;
5270 		args.flags = args32.flags;
5271 #else
5272 		return -ENOTTY;
5273 #endif
5274 	} else {
5275 		copy_end = copy_end_kernel;
5276 		if (copy_from_user(&args, argp, copy_end)) {
5277 			ret = -EFAULT;
5278 			goto out_acct;
5279 		}
5280 	}
5281 	if (args.flags != 0) {
5282 		ret = -EINVAL;
5283 		goto out_acct;
5284 	}
5285 
5286 	ret = import_iovec(READ, args.iov, args.iovcnt, ARRAY_SIZE(iovstack),
5287 			   &iov, &iter);
5288 	if (ret < 0)
5289 		goto out_acct;
5290 
5291 	if (iov_iter_count(&iter) == 0) {
5292 		ret = 0;
5293 		goto out_iov;
5294 	}
5295 	pos = args.offset;
5296 	ret = rw_verify_area(READ, file, &pos, args.len);
5297 	if (ret < 0)
5298 		goto out_iov;
5299 
5300 	init_sync_kiocb(&kiocb, file);
5301 	kiocb.ki_pos = pos;
5302 
5303 	ret = btrfs_encoded_read(&kiocb, &iter, &args);
5304 	if (ret >= 0) {
5305 		fsnotify_access(file);
5306 		if (copy_to_user(argp + copy_end,
5307 				 (char *)&args + copy_end_kernel,
5308 				 sizeof(args) - copy_end_kernel))
5309 			ret = -EFAULT;
5310 	}
5311 
5312 out_iov:
5313 	kfree(iov);
5314 out_acct:
5315 	if (ret > 0)
5316 		add_rchar(current, ret);
5317 	inc_syscr(current);
5318 	return ret;
5319 }
5320 
btrfs_ioctl_encoded_write(struct file * file,void __user * argp,bool compat)5321 static int btrfs_ioctl_encoded_write(struct file *file, void __user *argp, bool compat)
5322 {
5323 	struct btrfs_ioctl_encoded_io_args args;
5324 	struct iovec iovstack[UIO_FASTIOV];
5325 	struct iovec *iov = iovstack;
5326 	struct iov_iter iter;
5327 	loff_t pos;
5328 	struct kiocb kiocb;
5329 	ssize_t ret;
5330 
5331 	if (!capable(CAP_SYS_ADMIN)) {
5332 		ret = -EPERM;
5333 		goto out_acct;
5334 	}
5335 
5336 	if (!(file->f_mode & FMODE_WRITE)) {
5337 		ret = -EBADF;
5338 		goto out_acct;
5339 	}
5340 
5341 	if (compat) {
5342 #if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT)
5343 		struct btrfs_ioctl_encoded_io_args_32 args32;
5344 
5345 		if (copy_from_user(&args32, argp, sizeof(args32))) {
5346 			ret = -EFAULT;
5347 			goto out_acct;
5348 		}
5349 		args.iov = compat_ptr(args32.iov);
5350 		args.iovcnt = args32.iovcnt;
5351 		args.offset = args32.offset;
5352 		args.flags = args32.flags;
5353 		args.len = args32.len;
5354 		args.unencoded_len = args32.unencoded_len;
5355 		args.unencoded_offset = args32.unencoded_offset;
5356 		args.compression = args32.compression;
5357 		args.encryption = args32.encryption;
5358 		memcpy(args.reserved, args32.reserved, sizeof(args.reserved));
5359 #else
5360 		return -ENOTTY;
5361 #endif
5362 	} else {
5363 		if (copy_from_user(&args, argp, sizeof(args))) {
5364 			ret = -EFAULT;
5365 			goto out_acct;
5366 		}
5367 	}
5368 
5369 	ret = -EINVAL;
5370 	if (args.flags != 0)
5371 		goto out_acct;
5372 	if (memchr_inv(args.reserved, 0, sizeof(args.reserved)))
5373 		goto out_acct;
5374 	if (args.compression == BTRFS_ENCODED_IO_COMPRESSION_NONE &&
5375 	    args.encryption == BTRFS_ENCODED_IO_ENCRYPTION_NONE)
5376 		goto out_acct;
5377 	if (args.compression >= BTRFS_ENCODED_IO_COMPRESSION_TYPES ||
5378 	    args.encryption >= BTRFS_ENCODED_IO_ENCRYPTION_TYPES)
5379 		goto out_acct;
5380 	if (args.unencoded_offset > args.unencoded_len)
5381 		goto out_acct;
5382 	if (args.len > args.unencoded_len - args.unencoded_offset)
5383 		goto out_acct;
5384 
5385 	ret = import_iovec(WRITE, args.iov, args.iovcnt, ARRAY_SIZE(iovstack),
5386 			   &iov, &iter);
5387 	if (ret < 0)
5388 		goto out_acct;
5389 
5390 	file_start_write(file);
5391 
5392 	if (iov_iter_count(&iter) == 0) {
5393 		ret = 0;
5394 		goto out_end_write;
5395 	}
5396 	pos = args.offset;
5397 	ret = rw_verify_area(WRITE, file, &pos, args.len);
5398 	if (ret < 0)
5399 		goto out_end_write;
5400 
5401 	init_sync_kiocb(&kiocb, file);
5402 	ret = kiocb_set_rw_flags(&kiocb, 0);
5403 	if (ret)
5404 		goto out_end_write;
5405 	kiocb.ki_pos = pos;
5406 
5407 	ret = btrfs_do_write_iter(&kiocb, &iter, &args);
5408 	if (ret > 0)
5409 		fsnotify_modify(file);
5410 
5411 out_end_write:
5412 	file_end_write(file);
5413 	kfree(iov);
5414 out_acct:
5415 	if (ret > 0)
5416 		add_wchar(current, ret);
5417 	inc_syscw(current);
5418 	return ret;
5419 }
5420 
btrfs_ioctl(struct file * file,unsigned int cmd,unsigned long arg)5421 long btrfs_ioctl(struct file *file, unsigned int
5422 		cmd, unsigned long arg)
5423 {
5424 	struct inode *inode = file_inode(file);
5425 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5426 	struct btrfs_root *root = BTRFS_I(inode)->root;
5427 	void __user *argp = (void __user *)arg;
5428 
5429 	switch (cmd) {
5430 	case FS_IOC_GETVERSION:
5431 		return btrfs_ioctl_getversion(inode, argp);
5432 	case FS_IOC_GETFSLABEL:
5433 		return btrfs_ioctl_get_fslabel(fs_info, argp);
5434 	case FS_IOC_SETFSLABEL:
5435 		return btrfs_ioctl_set_fslabel(file, argp);
5436 	case FITRIM:
5437 		return btrfs_ioctl_fitrim(fs_info, argp);
5438 	case BTRFS_IOC_SNAP_CREATE:
5439 		return btrfs_ioctl_snap_create(file, argp, 0);
5440 	case BTRFS_IOC_SNAP_CREATE_V2:
5441 		return btrfs_ioctl_snap_create_v2(file, argp, 0);
5442 	case BTRFS_IOC_SUBVOL_CREATE:
5443 		return btrfs_ioctl_snap_create(file, argp, 1);
5444 	case BTRFS_IOC_SUBVOL_CREATE_V2:
5445 		return btrfs_ioctl_snap_create_v2(file, argp, 1);
5446 	case BTRFS_IOC_SNAP_DESTROY:
5447 		return btrfs_ioctl_snap_destroy(file, argp, false);
5448 	case BTRFS_IOC_SNAP_DESTROY_V2:
5449 		return btrfs_ioctl_snap_destroy(file, argp, true);
5450 	case BTRFS_IOC_SUBVOL_GETFLAGS:
5451 		return btrfs_ioctl_subvol_getflags(inode, argp);
5452 	case BTRFS_IOC_SUBVOL_SETFLAGS:
5453 		return btrfs_ioctl_subvol_setflags(file, argp);
5454 	case BTRFS_IOC_DEFAULT_SUBVOL:
5455 		return btrfs_ioctl_default_subvol(file, argp);
5456 	case BTRFS_IOC_DEFRAG:
5457 		return btrfs_ioctl_defrag(file, NULL);
5458 	case BTRFS_IOC_DEFRAG_RANGE:
5459 		return btrfs_ioctl_defrag(file, argp);
5460 	case BTRFS_IOC_RESIZE:
5461 		return btrfs_ioctl_resize(file, argp);
5462 	case BTRFS_IOC_ADD_DEV:
5463 		return btrfs_ioctl_add_dev(fs_info, argp);
5464 	case BTRFS_IOC_RM_DEV:
5465 		return btrfs_ioctl_rm_dev(file, argp);
5466 	case BTRFS_IOC_RM_DEV_V2:
5467 		return btrfs_ioctl_rm_dev_v2(file, argp);
5468 	case BTRFS_IOC_FS_INFO:
5469 		return btrfs_ioctl_fs_info(fs_info, argp);
5470 	case BTRFS_IOC_DEV_INFO:
5471 		return btrfs_ioctl_dev_info(fs_info, argp);
5472 	case BTRFS_IOC_TREE_SEARCH:
5473 		return btrfs_ioctl_tree_search(inode, argp);
5474 	case BTRFS_IOC_TREE_SEARCH_V2:
5475 		return btrfs_ioctl_tree_search_v2(inode, argp);
5476 	case BTRFS_IOC_INO_LOOKUP:
5477 		return btrfs_ioctl_ino_lookup(root, argp);
5478 	case BTRFS_IOC_INO_PATHS:
5479 		return btrfs_ioctl_ino_to_path(root, argp);
5480 	case BTRFS_IOC_LOGICAL_INO:
5481 		return btrfs_ioctl_logical_to_ino(fs_info, argp, 1);
5482 	case BTRFS_IOC_LOGICAL_INO_V2:
5483 		return btrfs_ioctl_logical_to_ino(fs_info, argp, 2);
5484 	case BTRFS_IOC_SPACE_INFO:
5485 		return btrfs_ioctl_space_info(fs_info, argp);
5486 	case BTRFS_IOC_SYNC: {
5487 		int ret;
5488 
5489 		ret = btrfs_start_delalloc_roots(fs_info, LONG_MAX, false);
5490 		if (ret)
5491 			return ret;
5492 		ret = btrfs_sync_fs(inode->i_sb, 1);
5493 		/*
5494 		 * The transaction thread may want to do more work,
5495 		 * namely it pokes the cleaner kthread that will start
5496 		 * processing uncleaned subvols.
5497 		 */
5498 		wake_up_process(fs_info->transaction_kthread);
5499 		return ret;
5500 	}
5501 	case BTRFS_IOC_START_SYNC:
5502 		return btrfs_ioctl_start_sync(root, argp);
5503 	case BTRFS_IOC_WAIT_SYNC:
5504 		return btrfs_ioctl_wait_sync(fs_info, argp);
5505 	case BTRFS_IOC_SCRUB:
5506 		return btrfs_ioctl_scrub(file, argp);
5507 	case BTRFS_IOC_SCRUB_CANCEL:
5508 		return btrfs_ioctl_scrub_cancel(fs_info);
5509 	case BTRFS_IOC_SCRUB_PROGRESS:
5510 		return btrfs_ioctl_scrub_progress(fs_info, argp);
5511 	case BTRFS_IOC_BALANCE_V2:
5512 		return btrfs_ioctl_balance(file, argp);
5513 	case BTRFS_IOC_BALANCE_CTL:
5514 		return btrfs_ioctl_balance_ctl(fs_info, arg);
5515 	case BTRFS_IOC_BALANCE_PROGRESS:
5516 		return btrfs_ioctl_balance_progress(fs_info, argp);
5517 	case BTRFS_IOC_SET_RECEIVED_SUBVOL:
5518 		return btrfs_ioctl_set_received_subvol(file, argp);
5519 #ifdef CONFIG_64BIT
5520 	case BTRFS_IOC_SET_RECEIVED_SUBVOL_32:
5521 		return btrfs_ioctl_set_received_subvol_32(file, argp);
5522 #endif
5523 	case BTRFS_IOC_SEND:
5524 		return _btrfs_ioctl_send(inode, argp, false);
5525 #if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT)
5526 	case BTRFS_IOC_SEND_32:
5527 		return _btrfs_ioctl_send(inode, argp, true);
5528 #endif
5529 	case BTRFS_IOC_GET_DEV_STATS:
5530 		return btrfs_ioctl_get_dev_stats(fs_info, argp);
5531 	case BTRFS_IOC_QUOTA_CTL:
5532 		return btrfs_ioctl_quota_ctl(file, argp);
5533 	case BTRFS_IOC_QGROUP_ASSIGN:
5534 		return btrfs_ioctl_qgroup_assign(file, argp);
5535 	case BTRFS_IOC_QGROUP_CREATE:
5536 		return btrfs_ioctl_qgroup_create(file, argp);
5537 	case BTRFS_IOC_QGROUP_LIMIT:
5538 		return btrfs_ioctl_qgroup_limit(file, argp);
5539 	case BTRFS_IOC_QUOTA_RESCAN:
5540 		return btrfs_ioctl_quota_rescan(file, argp);
5541 	case BTRFS_IOC_QUOTA_RESCAN_STATUS:
5542 		return btrfs_ioctl_quota_rescan_status(fs_info, argp);
5543 	case BTRFS_IOC_QUOTA_RESCAN_WAIT:
5544 		return btrfs_ioctl_quota_rescan_wait(fs_info, argp);
5545 	case BTRFS_IOC_DEV_REPLACE:
5546 		return btrfs_ioctl_dev_replace(fs_info, argp);
5547 	case BTRFS_IOC_GET_SUPPORTED_FEATURES:
5548 		return btrfs_ioctl_get_supported_features(argp);
5549 	case BTRFS_IOC_GET_FEATURES:
5550 		return btrfs_ioctl_get_features(fs_info, argp);
5551 	case BTRFS_IOC_SET_FEATURES:
5552 		return btrfs_ioctl_set_features(file, argp);
5553 	case BTRFS_IOC_GET_SUBVOL_INFO:
5554 		return btrfs_ioctl_get_subvol_info(inode, argp);
5555 	case BTRFS_IOC_GET_SUBVOL_ROOTREF:
5556 		return btrfs_ioctl_get_subvol_rootref(root, argp);
5557 	case BTRFS_IOC_INO_LOOKUP_USER:
5558 		return btrfs_ioctl_ino_lookup_user(file, argp);
5559 	case FS_IOC_ENABLE_VERITY:
5560 		return fsverity_ioctl_enable(file, (const void __user *)argp);
5561 	case FS_IOC_MEASURE_VERITY:
5562 		return fsverity_ioctl_measure(file, argp);
5563 	case BTRFS_IOC_ENCODED_READ:
5564 		return btrfs_ioctl_encoded_read(file, argp, false);
5565 	case BTRFS_IOC_ENCODED_WRITE:
5566 		return btrfs_ioctl_encoded_write(file, argp, false);
5567 #if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT)
5568 	case BTRFS_IOC_ENCODED_READ_32:
5569 		return btrfs_ioctl_encoded_read(file, argp, true);
5570 	case BTRFS_IOC_ENCODED_WRITE_32:
5571 		return btrfs_ioctl_encoded_write(file, argp, true);
5572 #endif
5573 	}
5574 
5575 	return -ENOTTY;
5576 }
5577 
5578 #ifdef CONFIG_COMPAT
btrfs_compat_ioctl(struct file * file,unsigned int cmd,unsigned long arg)5579 long btrfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
5580 {
5581 	/*
5582 	 * These all access 32-bit values anyway so no further
5583 	 * handling is necessary.
5584 	 */
5585 	switch (cmd) {
5586 	case FS_IOC32_GETVERSION:
5587 		cmd = FS_IOC_GETVERSION;
5588 		break;
5589 	}
5590 
5591 	return btrfs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
5592 }
5593 #endif
5594