1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2007 Oracle. All rights reserved.
4 */
5
6 #include <linux/blkdev.h>
7 #include <linux/module.h>
8 #include <linux/fs.h>
9 #include <linux/pagemap.h>
10 #include <linux/highmem.h>
11 #include <linux/time.h>
12 #include <linux/init.h>
13 #include <linux/seq_file.h>
14 #include <linux/string.h>
15 #include <linux/backing-dev.h>
16 #include <linux/mount.h>
17 #include <linux/writeback.h>
18 #include <linux/statfs.h>
19 #include <linux/compat.h>
20 #include <linux/parser.h>
21 #include <linux/ctype.h>
22 #include <linux/namei.h>
23 #include <linux/miscdevice.h>
24 #include <linux/magic.h>
25 #include <linux/slab.h>
26 #include <linux/ratelimit.h>
27 #include <linux/crc32c.h>
28 #include <linux/btrfs.h>
29 #include "delayed-inode.h"
30 #include "ctree.h"
31 #include "disk-io.h"
32 #include "transaction.h"
33 #include "btrfs_inode.h"
34 #include "print-tree.h"
35 #include "props.h"
36 #include "xattr.h"
37 #include "volumes.h"
38 #include "export.h"
39 #include "compression.h"
40 #include "rcu-string.h"
41 #include "dev-replace.h"
42 #include "free-space-cache.h"
43 #include "backref.h"
44 #include "space-info.h"
45 #include "sysfs.h"
46 #include "zoned.h"
47 #include "tests/btrfs-tests.h"
48 #include "block-group.h"
49 #include "discard.h"
50 #include "qgroup.h"
51 #include "raid56.h"
52 #define CREATE_TRACE_POINTS
53 #include <trace/events/btrfs.h>
54
55 static const struct super_operations btrfs_super_ops;
56
57 /*
58 * Types for mounting the default subvolume and a subvolume explicitly
59 * requested by subvol=/path. That way the callchain is straightforward and we
60 * don't have to play tricks with the mount options and recursive calls to
61 * btrfs_mount.
62 *
63 * The new btrfs_root_fs_type also servers as a tag for the bdev_holder.
64 */
65 static struct file_system_type btrfs_fs_type;
66 static struct file_system_type btrfs_root_fs_type;
67
68 static int btrfs_remount(struct super_block *sb, int *flags, char *data);
69
70 #ifdef CONFIG_PRINTK
71
72 #define STATE_STRING_PREFACE ": state "
73 #define STATE_STRING_BUF_LEN (sizeof(STATE_STRING_PREFACE) + BTRFS_FS_STATE_COUNT)
74
75 /*
76 * Characters to print to indicate error conditions or uncommon filesystem state.
77 * RO is not an error.
78 */
79 static const char fs_state_chars[] = {
80 [BTRFS_FS_STATE_ERROR] = 'E',
81 [BTRFS_FS_STATE_REMOUNTING] = 'M',
82 [BTRFS_FS_STATE_RO] = 0,
83 [BTRFS_FS_STATE_TRANS_ABORTED] = 'A',
84 [BTRFS_FS_STATE_DEV_REPLACING] = 'R',
85 [BTRFS_FS_STATE_DUMMY_FS_INFO] = 0,
86 [BTRFS_FS_STATE_NO_CSUMS] = 'C',
87 [BTRFS_FS_STATE_LOG_CLEANUP_ERROR] = 'L',
88 };
89
btrfs_state_to_string(const struct btrfs_fs_info * info,char * buf)90 static void btrfs_state_to_string(const struct btrfs_fs_info *info, char *buf)
91 {
92 unsigned int bit;
93 bool states_printed = false;
94 unsigned long fs_state = READ_ONCE(info->fs_state);
95 char *curr = buf;
96
97 memcpy(curr, STATE_STRING_PREFACE, sizeof(STATE_STRING_PREFACE));
98 curr += sizeof(STATE_STRING_PREFACE) - 1;
99
100 for_each_set_bit(bit, &fs_state, sizeof(fs_state)) {
101 WARN_ON_ONCE(bit >= BTRFS_FS_STATE_COUNT);
102 if ((bit < BTRFS_FS_STATE_COUNT) && fs_state_chars[bit]) {
103 *curr++ = fs_state_chars[bit];
104 states_printed = true;
105 }
106 }
107
108 /* If no states were printed, reset the buffer */
109 if (!states_printed)
110 curr = buf;
111
112 *curr++ = 0;
113 }
114 #endif
115
116 /*
117 * Generally the error codes correspond to their respective errors, but there
118 * are a few special cases.
119 *
120 * EUCLEAN: Any sort of corruption that we encounter. The tree-checker for
121 * instance will return EUCLEAN if any of the blocks are corrupted in
122 * a way that is problematic. We want to reserve EUCLEAN for these
123 * sort of corruptions.
124 *
125 * EROFS: If we check BTRFS_FS_STATE_ERROR and fail out with a return error, we
126 * need to use EROFS for this case. We will have no idea of the
127 * original failure, that will have been reported at the time we tripped
128 * over the error. Each subsequent error that doesn't have any context
129 * of the original error should use EROFS when handling BTRFS_FS_STATE_ERROR.
130 */
btrfs_decode_error(int errno)131 const char * __attribute_const__ btrfs_decode_error(int errno)
132 {
133 char *errstr = "unknown";
134
135 switch (errno) {
136 case -ENOENT: /* -2 */
137 errstr = "No such entry";
138 break;
139 case -EIO: /* -5 */
140 errstr = "IO failure";
141 break;
142 case -ENOMEM: /* -12*/
143 errstr = "Out of memory";
144 break;
145 case -EEXIST: /* -17 */
146 errstr = "Object already exists";
147 break;
148 case -ENOSPC: /* -28 */
149 errstr = "No space left";
150 break;
151 case -EROFS: /* -30 */
152 errstr = "Readonly filesystem";
153 break;
154 case -EOPNOTSUPP: /* -95 */
155 errstr = "Operation not supported";
156 break;
157 case -EUCLEAN: /* -117 */
158 errstr = "Filesystem corrupted";
159 break;
160 case -EDQUOT: /* -122 */
161 errstr = "Quota exceeded";
162 break;
163 }
164
165 return errstr;
166 }
167
168 /*
169 * __btrfs_handle_fs_error decodes expected errors from the caller and
170 * invokes the appropriate error response.
171 */
172 __cold
__btrfs_handle_fs_error(struct btrfs_fs_info * fs_info,const char * function,unsigned int line,int errno,const char * fmt,...)173 void __btrfs_handle_fs_error(struct btrfs_fs_info *fs_info, const char *function,
174 unsigned int line, int errno, const char *fmt, ...)
175 {
176 struct super_block *sb = fs_info->sb;
177 #ifdef CONFIG_PRINTK
178 char statestr[STATE_STRING_BUF_LEN];
179 const char *errstr;
180 #endif
181
182 /*
183 * Special case: if the error is EROFS, and we're already
184 * under SB_RDONLY, then it is safe here.
185 */
186 if (errno == -EROFS && sb_rdonly(sb))
187 return;
188
189 #ifdef CONFIG_PRINTK
190 errstr = btrfs_decode_error(errno);
191 btrfs_state_to_string(fs_info, statestr);
192 if (fmt) {
193 struct va_format vaf;
194 va_list args;
195
196 va_start(args, fmt);
197 vaf.fmt = fmt;
198 vaf.va = &args;
199
200 pr_crit("BTRFS: error (device %s%s) in %s:%d: errno=%d %s (%pV)\n",
201 sb->s_id, statestr, function, line, errno, errstr, &vaf);
202 va_end(args);
203 } else {
204 pr_crit("BTRFS: error (device %s%s) in %s:%d: errno=%d %s\n",
205 sb->s_id, statestr, function, line, errno, errstr);
206 }
207 #endif
208
209 /*
210 * Today we only save the error info to memory. Long term we'll
211 * also send it down to the disk
212 */
213 set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state);
214
215 /* Don't go through full error handling during mount */
216 if (!(sb->s_flags & SB_BORN))
217 return;
218
219 if (sb_rdonly(sb))
220 return;
221
222 btrfs_discard_stop(fs_info);
223
224 /* btrfs handle error by forcing the filesystem readonly */
225 btrfs_set_sb_rdonly(sb);
226 btrfs_info(fs_info, "forced readonly");
227 /*
228 * Note that a running device replace operation is not canceled here
229 * although there is no way to update the progress. It would add the
230 * risk of a deadlock, therefore the canceling is omitted. The only
231 * penalty is that some I/O remains active until the procedure
232 * completes. The next time when the filesystem is mounted writable
233 * again, the device replace operation continues.
234 */
235 }
236
237 #ifdef CONFIG_PRINTK
238 static const char * const logtypes[] = {
239 "emergency",
240 "alert",
241 "critical",
242 "error",
243 "warning",
244 "notice",
245 "info",
246 "debug",
247 };
248
249
250 /*
251 * Use one ratelimit state per log level so that a flood of less important
252 * messages doesn't cause more important ones to be dropped.
253 */
254 static struct ratelimit_state printk_limits[] = {
255 RATELIMIT_STATE_INIT(printk_limits[0], DEFAULT_RATELIMIT_INTERVAL, 100),
256 RATELIMIT_STATE_INIT(printk_limits[1], DEFAULT_RATELIMIT_INTERVAL, 100),
257 RATELIMIT_STATE_INIT(printk_limits[2], DEFAULT_RATELIMIT_INTERVAL, 100),
258 RATELIMIT_STATE_INIT(printk_limits[3], DEFAULT_RATELIMIT_INTERVAL, 100),
259 RATELIMIT_STATE_INIT(printk_limits[4], DEFAULT_RATELIMIT_INTERVAL, 100),
260 RATELIMIT_STATE_INIT(printk_limits[5], DEFAULT_RATELIMIT_INTERVAL, 100),
261 RATELIMIT_STATE_INIT(printk_limits[6], DEFAULT_RATELIMIT_INTERVAL, 100),
262 RATELIMIT_STATE_INIT(printk_limits[7], DEFAULT_RATELIMIT_INTERVAL, 100),
263 };
264
_btrfs_printk(const struct btrfs_fs_info * fs_info,const char * fmt,...)265 void __cold _btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...)
266 {
267 char lvl[PRINTK_MAX_SINGLE_HEADER_LEN + 1] = "\0";
268 struct va_format vaf;
269 va_list args;
270 int kern_level;
271 const char *type = logtypes[4];
272 struct ratelimit_state *ratelimit = &printk_limits[4];
273
274 va_start(args, fmt);
275
276 while ((kern_level = printk_get_level(fmt)) != 0) {
277 size_t size = printk_skip_level(fmt) - fmt;
278
279 if (kern_level >= '0' && kern_level <= '7') {
280 memcpy(lvl, fmt, size);
281 lvl[size] = '\0';
282 type = logtypes[kern_level - '0'];
283 ratelimit = &printk_limits[kern_level - '0'];
284 }
285 fmt += size;
286 }
287
288 vaf.fmt = fmt;
289 vaf.va = &args;
290
291 if (__ratelimit(ratelimit)) {
292 if (fs_info) {
293 char statestr[STATE_STRING_BUF_LEN];
294
295 btrfs_state_to_string(fs_info, statestr);
296 _printk("%sBTRFS %s (device %s%s): %pV\n", lvl, type,
297 fs_info->sb->s_id, statestr, &vaf);
298 } else {
299 _printk("%sBTRFS %s: %pV\n", lvl, type, &vaf);
300 }
301 }
302
303 va_end(args);
304 }
305 #endif
306
307 #if BITS_PER_LONG == 32
btrfs_warn_32bit_limit(struct btrfs_fs_info * fs_info)308 void __cold btrfs_warn_32bit_limit(struct btrfs_fs_info *fs_info)
309 {
310 if (!test_and_set_bit(BTRFS_FS_32BIT_WARN, &fs_info->flags)) {
311 btrfs_warn(fs_info, "reaching 32bit limit for logical addresses");
312 btrfs_warn(fs_info,
313 "due to page cache limit on 32bit systems, btrfs can't access metadata at or beyond %lluT",
314 BTRFS_32BIT_MAX_FILE_SIZE >> 40);
315 btrfs_warn(fs_info,
316 "please consider upgrading to 64bit kernel/hardware");
317 }
318 }
319
btrfs_err_32bit_limit(struct btrfs_fs_info * fs_info)320 void __cold btrfs_err_32bit_limit(struct btrfs_fs_info *fs_info)
321 {
322 if (!test_and_set_bit(BTRFS_FS_32BIT_ERROR, &fs_info->flags)) {
323 btrfs_err(fs_info, "reached 32bit limit for logical addresses");
324 btrfs_err(fs_info,
325 "due to page cache limit on 32bit systems, metadata beyond %lluT can't be accessed",
326 BTRFS_32BIT_MAX_FILE_SIZE >> 40);
327 btrfs_err(fs_info,
328 "please consider upgrading to 64bit kernel/hardware");
329 }
330 }
331 #endif
332
333 /*
334 * We only mark the transaction aborted and then set the file system read-only.
335 * This will prevent new transactions from starting or trying to join this
336 * one.
337 *
338 * This means that error recovery at the call site is limited to freeing
339 * any local memory allocations and passing the error code up without
340 * further cleanup. The transaction should complete as it normally would
341 * in the call path but will return -EIO.
342 *
343 * We'll complete the cleanup in btrfs_end_transaction and
344 * btrfs_commit_transaction.
345 */
346 __cold
__btrfs_abort_transaction(struct btrfs_trans_handle * trans,const char * function,unsigned int line,int errno,bool first_hit)347 void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
348 const char *function,
349 unsigned int line, int errno, bool first_hit)
350 {
351 struct btrfs_fs_info *fs_info = trans->fs_info;
352
353 WRITE_ONCE(trans->aborted, errno);
354 WRITE_ONCE(trans->transaction->aborted, errno);
355 if (first_hit && errno == -ENOSPC)
356 btrfs_dump_space_info_for_trans_abort(fs_info);
357 /* Wake up anybody who may be waiting on this transaction */
358 wake_up(&fs_info->transaction_wait);
359 wake_up(&fs_info->transaction_blocked_wait);
360 __btrfs_handle_fs_error(fs_info, function, line, errno, NULL);
361 }
362 /*
363 * __btrfs_panic decodes unexpected, fatal errors from the caller,
364 * issues an alert, and either panics or BUGs, depending on mount options.
365 */
366 __cold
__btrfs_panic(struct btrfs_fs_info * fs_info,const char * function,unsigned int line,int errno,const char * fmt,...)367 void __btrfs_panic(struct btrfs_fs_info *fs_info, const char *function,
368 unsigned int line, int errno, const char *fmt, ...)
369 {
370 char *s_id = "<unknown>";
371 const char *errstr;
372 struct va_format vaf = { .fmt = fmt };
373 va_list args;
374
375 if (fs_info)
376 s_id = fs_info->sb->s_id;
377
378 va_start(args, fmt);
379 vaf.va = &args;
380
381 errstr = btrfs_decode_error(errno);
382 if (fs_info && (btrfs_test_opt(fs_info, PANIC_ON_FATAL_ERROR)))
383 panic(KERN_CRIT "BTRFS panic (device %s) in %s:%d: %pV (errno=%d %s)\n",
384 s_id, function, line, &vaf, errno, errstr);
385
386 btrfs_crit(fs_info, "panic in %s:%d: %pV (errno=%d %s)",
387 function, line, &vaf, errno, errstr);
388 va_end(args);
389 /* Caller calls BUG() */
390 }
391
btrfs_put_super(struct super_block * sb)392 static void btrfs_put_super(struct super_block *sb)
393 {
394 close_ctree(btrfs_sb(sb));
395 }
396
397 enum {
398 Opt_acl, Opt_noacl,
399 Opt_clear_cache,
400 Opt_commit_interval,
401 Opt_compress,
402 Opt_compress_force,
403 Opt_compress_force_type,
404 Opt_compress_type,
405 Opt_degraded,
406 Opt_device,
407 Opt_fatal_errors,
408 Opt_flushoncommit, Opt_noflushoncommit,
409 Opt_max_inline,
410 Opt_barrier, Opt_nobarrier,
411 Opt_datacow, Opt_nodatacow,
412 Opt_datasum, Opt_nodatasum,
413 Opt_defrag, Opt_nodefrag,
414 Opt_discard, Opt_nodiscard,
415 Opt_discard_mode,
416 Opt_norecovery,
417 Opt_ratio,
418 Opt_rescan_uuid_tree,
419 Opt_skip_balance,
420 Opt_space_cache, Opt_no_space_cache,
421 Opt_space_cache_version,
422 Opt_ssd, Opt_nossd,
423 Opt_ssd_spread, Opt_nossd_spread,
424 Opt_subvol,
425 Opt_subvol_empty,
426 Opt_subvolid,
427 Opt_thread_pool,
428 Opt_treelog, Opt_notreelog,
429 Opt_user_subvol_rm_allowed,
430
431 /* Rescue options */
432 Opt_rescue,
433 Opt_usebackuproot,
434 Opt_nologreplay,
435 Opt_ignorebadroots,
436 Opt_ignoredatacsums,
437 Opt_rescue_all,
438
439 /* Deprecated options */
440 Opt_recovery,
441 Opt_inode_cache, Opt_noinode_cache,
442
443 /* Debugging options */
444 Opt_check_integrity,
445 Opt_check_integrity_including_extent_data,
446 Opt_check_integrity_print_mask,
447 Opt_enospc_debug, Opt_noenospc_debug,
448 #ifdef CONFIG_BTRFS_DEBUG
449 Opt_fragment_data, Opt_fragment_metadata, Opt_fragment_all,
450 #endif
451 #ifdef CONFIG_BTRFS_FS_REF_VERIFY
452 Opt_ref_verify,
453 #endif
454 Opt_err,
455 };
456
457 static const match_table_t tokens = {
458 {Opt_acl, "acl"},
459 {Opt_noacl, "noacl"},
460 {Opt_clear_cache, "clear_cache"},
461 {Opt_commit_interval, "commit=%u"},
462 {Opt_compress, "compress"},
463 {Opt_compress_type, "compress=%s"},
464 {Opt_compress_force, "compress-force"},
465 {Opt_compress_force_type, "compress-force=%s"},
466 {Opt_degraded, "degraded"},
467 {Opt_device, "device=%s"},
468 {Opt_fatal_errors, "fatal_errors=%s"},
469 {Opt_flushoncommit, "flushoncommit"},
470 {Opt_noflushoncommit, "noflushoncommit"},
471 {Opt_inode_cache, "inode_cache"},
472 {Opt_noinode_cache, "noinode_cache"},
473 {Opt_max_inline, "max_inline=%s"},
474 {Opt_barrier, "barrier"},
475 {Opt_nobarrier, "nobarrier"},
476 {Opt_datacow, "datacow"},
477 {Opt_nodatacow, "nodatacow"},
478 {Opt_datasum, "datasum"},
479 {Opt_nodatasum, "nodatasum"},
480 {Opt_defrag, "autodefrag"},
481 {Opt_nodefrag, "noautodefrag"},
482 {Opt_discard, "discard"},
483 {Opt_discard_mode, "discard=%s"},
484 {Opt_nodiscard, "nodiscard"},
485 {Opt_norecovery, "norecovery"},
486 {Opt_ratio, "metadata_ratio=%u"},
487 {Opt_rescan_uuid_tree, "rescan_uuid_tree"},
488 {Opt_skip_balance, "skip_balance"},
489 {Opt_space_cache, "space_cache"},
490 {Opt_no_space_cache, "nospace_cache"},
491 {Opt_space_cache_version, "space_cache=%s"},
492 {Opt_ssd, "ssd"},
493 {Opt_nossd, "nossd"},
494 {Opt_ssd_spread, "ssd_spread"},
495 {Opt_nossd_spread, "nossd_spread"},
496 {Opt_subvol, "subvol=%s"},
497 {Opt_subvol_empty, "subvol="},
498 {Opt_subvolid, "subvolid=%s"},
499 {Opt_thread_pool, "thread_pool=%u"},
500 {Opt_treelog, "treelog"},
501 {Opt_notreelog, "notreelog"},
502 {Opt_user_subvol_rm_allowed, "user_subvol_rm_allowed"},
503
504 /* Rescue options */
505 {Opt_rescue, "rescue=%s"},
506 /* Deprecated, with alias rescue=nologreplay */
507 {Opt_nologreplay, "nologreplay"},
508 /* Deprecated, with alias rescue=usebackuproot */
509 {Opt_usebackuproot, "usebackuproot"},
510
511 /* Deprecated options */
512 {Opt_recovery, "recovery"},
513
514 /* Debugging options */
515 {Opt_check_integrity, "check_int"},
516 {Opt_check_integrity_including_extent_data, "check_int_data"},
517 {Opt_check_integrity_print_mask, "check_int_print_mask=%u"},
518 {Opt_enospc_debug, "enospc_debug"},
519 {Opt_noenospc_debug, "noenospc_debug"},
520 #ifdef CONFIG_BTRFS_DEBUG
521 {Opt_fragment_data, "fragment=data"},
522 {Opt_fragment_metadata, "fragment=metadata"},
523 {Opt_fragment_all, "fragment=all"},
524 #endif
525 #ifdef CONFIG_BTRFS_FS_REF_VERIFY
526 {Opt_ref_verify, "ref_verify"},
527 #endif
528 {Opt_err, NULL},
529 };
530
531 static const match_table_t rescue_tokens = {
532 {Opt_usebackuproot, "usebackuproot"},
533 {Opt_nologreplay, "nologreplay"},
534 {Opt_ignorebadroots, "ignorebadroots"},
535 {Opt_ignorebadroots, "ibadroots"},
536 {Opt_ignoredatacsums, "ignoredatacsums"},
537 {Opt_ignoredatacsums, "idatacsums"},
538 {Opt_rescue_all, "all"},
539 {Opt_err, NULL},
540 };
541
check_ro_option(struct btrfs_fs_info * fs_info,unsigned long opt,const char * opt_name)542 static bool check_ro_option(struct btrfs_fs_info *fs_info, unsigned long opt,
543 const char *opt_name)
544 {
545 if (fs_info->mount_opt & opt) {
546 btrfs_err(fs_info, "%s must be used with ro mount option",
547 opt_name);
548 return true;
549 }
550 return false;
551 }
552
parse_rescue_options(struct btrfs_fs_info * info,const char * options)553 static int parse_rescue_options(struct btrfs_fs_info *info, const char *options)
554 {
555 char *opts;
556 char *orig;
557 char *p;
558 substring_t args[MAX_OPT_ARGS];
559 int ret = 0;
560
561 opts = kstrdup(options, GFP_KERNEL);
562 if (!opts)
563 return -ENOMEM;
564 orig = opts;
565
566 while ((p = strsep(&opts, ":")) != NULL) {
567 int token;
568
569 if (!*p)
570 continue;
571 token = match_token(p, rescue_tokens, args);
572 switch (token){
573 case Opt_usebackuproot:
574 btrfs_info(info,
575 "trying to use backup root at mount time");
576 btrfs_set_opt(info->mount_opt, USEBACKUPROOT);
577 break;
578 case Opt_nologreplay:
579 btrfs_set_and_info(info, NOLOGREPLAY,
580 "disabling log replay at mount time");
581 break;
582 case Opt_ignorebadroots:
583 btrfs_set_and_info(info, IGNOREBADROOTS,
584 "ignoring bad roots");
585 break;
586 case Opt_ignoredatacsums:
587 btrfs_set_and_info(info, IGNOREDATACSUMS,
588 "ignoring data csums");
589 break;
590 case Opt_rescue_all:
591 btrfs_info(info, "enabling all of the rescue options");
592 btrfs_set_and_info(info, IGNOREDATACSUMS,
593 "ignoring data csums");
594 btrfs_set_and_info(info, IGNOREBADROOTS,
595 "ignoring bad roots");
596 btrfs_set_and_info(info, NOLOGREPLAY,
597 "disabling log replay at mount time");
598 break;
599 case Opt_err:
600 btrfs_info(info, "unrecognized rescue option '%s'", p);
601 ret = -EINVAL;
602 goto out;
603 default:
604 break;
605 }
606
607 }
608 out:
609 kfree(orig);
610 return ret;
611 }
612
613 /*
614 * Regular mount options parser. Everything that is needed only when
615 * reading in a new superblock is parsed here.
616 * XXX JDM: This needs to be cleaned up for remount.
617 */
btrfs_parse_options(struct btrfs_fs_info * info,char * options,unsigned long new_flags)618 int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
619 unsigned long new_flags)
620 {
621 substring_t args[MAX_OPT_ARGS];
622 char *p, *num;
623 int intarg;
624 int ret = 0;
625 char *compress_type;
626 bool compress_force = false;
627 enum btrfs_compression_type saved_compress_type;
628 int saved_compress_level;
629 bool saved_compress_force;
630 int no_compress = 0;
631 const bool remounting = test_bit(BTRFS_FS_STATE_REMOUNTING, &info->fs_state);
632
633 if (btrfs_fs_compat_ro(info, FREE_SPACE_TREE))
634 btrfs_set_opt(info->mount_opt, FREE_SPACE_TREE);
635 else if (btrfs_free_space_cache_v1_active(info)) {
636 if (btrfs_is_zoned(info)) {
637 btrfs_info(info,
638 "zoned: clearing existing space cache");
639 btrfs_set_super_cache_generation(info->super_copy, 0);
640 } else {
641 btrfs_set_opt(info->mount_opt, SPACE_CACHE);
642 }
643 }
644
645 /*
646 * Even the options are empty, we still need to do extra check
647 * against new flags
648 */
649 if (!options)
650 goto check;
651
652 while ((p = strsep(&options, ",")) != NULL) {
653 int token;
654 if (!*p)
655 continue;
656
657 token = match_token(p, tokens, args);
658 switch (token) {
659 case Opt_degraded:
660 btrfs_info(info, "allowing degraded mounts");
661 btrfs_set_opt(info->mount_opt, DEGRADED);
662 break;
663 case Opt_subvol:
664 case Opt_subvol_empty:
665 case Opt_subvolid:
666 case Opt_device:
667 /*
668 * These are parsed by btrfs_parse_subvol_options or
669 * btrfs_parse_device_options and can be ignored here.
670 */
671 break;
672 case Opt_nodatasum:
673 btrfs_set_and_info(info, NODATASUM,
674 "setting nodatasum");
675 break;
676 case Opt_datasum:
677 if (btrfs_test_opt(info, NODATASUM)) {
678 if (btrfs_test_opt(info, NODATACOW))
679 btrfs_info(info,
680 "setting datasum, datacow enabled");
681 else
682 btrfs_info(info, "setting datasum");
683 }
684 btrfs_clear_opt(info->mount_opt, NODATACOW);
685 btrfs_clear_opt(info->mount_opt, NODATASUM);
686 break;
687 case Opt_nodatacow:
688 if (!btrfs_test_opt(info, NODATACOW)) {
689 if (!btrfs_test_opt(info, COMPRESS) ||
690 !btrfs_test_opt(info, FORCE_COMPRESS)) {
691 btrfs_info(info,
692 "setting nodatacow, compression disabled");
693 } else {
694 btrfs_info(info, "setting nodatacow");
695 }
696 }
697 btrfs_clear_opt(info->mount_opt, COMPRESS);
698 btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS);
699 btrfs_set_opt(info->mount_opt, NODATACOW);
700 btrfs_set_opt(info->mount_opt, NODATASUM);
701 break;
702 case Opt_datacow:
703 btrfs_clear_and_info(info, NODATACOW,
704 "setting datacow");
705 break;
706 case Opt_compress_force:
707 case Opt_compress_force_type:
708 compress_force = true;
709 fallthrough;
710 case Opt_compress:
711 case Opt_compress_type:
712 saved_compress_type = btrfs_test_opt(info,
713 COMPRESS) ?
714 info->compress_type : BTRFS_COMPRESS_NONE;
715 saved_compress_force =
716 btrfs_test_opt(info, FORCE_COMPRESS);
717 saved_compress_level = info->compress_level;
718 if (token == Opt_compress ||
719 token == Opt_compress_force ||
720 strncmp(args[0].from, "zlib", 4) == 0) {
721 compress_type = "zlib";
722
723 info->compress_type = BTRFS_COMPRESS_ZLIB;
724 info->compress_level = BTRFS_ZLIB_DEFAULT_LEVEL;
725 /*
726 * args[0] contains uninitialized data since
727 * for these tokens we don't expect any
728 * parameter.
729 */
730 if (token != Opt_compress &&
731 token != Opt_compress_force)
732 info->compress_level =
733 btrfs_compress_str2level(
734 BTRFS_COMPRESS_ZLIB,
735 args[0].from + 4);
736 btrfs_set_opt(info->mount_opt, COMPRESS);
737 btrfs_clear_opt(info->mount_opt, NODATACOW);
738 btrfs_clear_opt(info->mount_opt, NODATASUM);
739 no_compress = 0;
740 } else if (strncmp(args[0].from, "lzo", 3) == 0) {
741 compress_type = "lzo";
742 info->compress_type = BTRFS_COMPRESS_LZO;
743 info->compress_level = 0;
744 btrfs_set_opt(info->mount_opt, COMPRESS);
745 btrfs_clear_opt(info->mount_opt, NODATACOW);
746 btrfs_clear_opt(info->mount_opt, NODATASUM);
747 btrfs_set_fs_incompat(info, COMPRESS_LZO);
748 no_compress = 0;
749 } else if (strncmp(args[0].from, "zstd", 4) == 0) {
750 compress_type = "zstd";
751 info->compress_type = BTRFS_COMPRESS_ZSTD;
752 info->compress_level =
753 btrfs_compress_str2level(
754 BTRFS_COMPRESS_ZSTD,
755 args[0].from + 4);
756 btrfs_set_opt(info->mount_opt, COMPRESS);
757 btrfs_clear_opt(info->mount_opt, NODATACOW);
758 btrfs_clear_opt(info->mount_opt, NODATASUM);
759 btrfs_set_fs_incompat(info, COMPRESS_ZSTD);
760 no_compress = 0;
761 } else if (strncmp(args[0].from, "no", 2) == 0) {
762 compress_type = "no";
763 info->compress_level = 0;
764 info->compress_type = 0;
765 btrfs_clear_opt(info->mount_opt, COMPRESS);
766 btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS);
767 compress_force = false;
768 no_compress++;
769 } else {
770 btrfs_err(info, "unrecognized compression value %s",
771 args[0].from);
772 ret = -EINVAL;
773 goto out;
774 }
775
776 if (compress_force) {
777 btrfs_set_opt(info->mount_opt, FORCE_COMPRESS);
778 } else {
779 /*
780 * If we remount from compress-force=xxx to
781 * compress=xxx, we need clear FORCE_COMPRESS
782 * flag, otherwise, there is no way for users
783 * to disable forcible compression separately.
784 */
785 btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS);
786 }
787 if (no_compress == 1) {
788 btrfs_info(info, "use no compression");
789 } else if ((info->compress_type != saved_compress_type) ||
790 (compress_force != saved_compress_force) ||
791 (info->compress_level != saved_compress_level)) {
792 btrfs_info(info, "%s %s compression, level %d",
793 (compress_force) ? "force" : "use",
794 compress_type, info->compress_level);
795 }
796 compress_force = false;
797 break;
798 case Opt_ssd:
799 btrfs_set_and_info(info, SSD,
800 "enabling ssd optimizations");
801 btrfs_clear_opt(info->mount_opt, NOSSD);
802 break;
803 case Opt_ssd_spread:
804 btrfs_set_and_info(info, SSD,
805 "enabling ssd optimizations");
806 btrfs_set_and_info(info, SSD_SPREAD,
807 "using spread ssd allocation scheme");
808 btrfs_clear_opt(info->mount_opt, NOSSD);
809 break;
810 case Opt_nossd:
811 btrfs_set_opt(info->mount_opt, NOSSD);
812 btrfs_clear_and_info(info, SSD,
813 "not using ssd optimizations");
814 fallthrough;
815 case Opt_nossd_spread:
816 btrfs_clear_and_info(info, SSD_SPREAD,
817 "not using spread ssd allocation scheme");
818 break;
819 case Opt_barrier:
820 btrfs_clear_and_info(info, NOBARRIER,
821 "turning on barriers");
822 break;
823 case Opt_nobarrier:
824 btrfs_set_and_info(info, NOBARRIER,
825 "turning off barriers");
826 break;
827 case Opt_thread_pool:
828 ret = match_int(&args[0], &intarg);
829 if (ret) {
830 btrfs_err(info, "unrecognized thread_pool value %s",
831 args[0].from);
832 goto out;
833 } else if (intarg == 0) {
834 btrfs_err(info, "invalid value 0 for thread_pool");
835 ret = -EINVAL;
836 goto out;
837 }
838 info->thread_pool_size = intarg;
839 break;
840 case Opt_max_inline:
841 num = match_strdup(&args[0]);
842 if (num) {
843 info->max_inline = memparse(num, NULL);
844 kfree(num);
845
846 if (info->max_inline) {
847 info->max_inline = min_t(u64,
848 info->max_inline,
849 info->sectorsize);
850 }
851 btrfs_info(info, "max_inline at %llu",
852 info->max_inline);
853 } else {
854 ret = -ENOMEM;
855 goto out;
856 }
857 break;
858 case Opt_acl:
859 #ifdef CONFIG_BTRFS_FS_POSIX_ACL
860 info->sb->s_flags |= SB_POSIXACL;
861 break;
862 #else
863 btrfs_err(info, "support for ACL not compiled in!");
864 ret = -EINVAL;
865 goto out;
866 #endif
867 case Opt_noacl:
868 info->sb->s_flags &= ~SB_POSIXACL;
869 break;
870 case Opt_notreelog:
871 btrfs_set_and_info(info, NOTREELOG,
872 "disabling tree log");
873 break;
874 case Opt_treelog:
875 btrfs_clear_and_info(info, NOTREELOG,
876 "enabling tree log");
877 break;
878 case Opt_norecovery:
879 case Opt_nologreplay:
880 btrfs_warn(info,
881 "'nologreplay' is deprecated, use 'rescue=nologreplay' instead");
882 btrfs_set_and_info(info, NOLOGREPLAY,
883 "disabling log replay at mount time");
884 break;
885 case Opt_flushoncommit:
886 btrfs_set_and_info(info, FLUSHONCOMMIT,
887 "turning on flush-on-commit");
888 break;
889 case Opt_noflushoncommit:
890 btrfs_clear_and_info(info, FLUSHONCOMMIT,
891 "turning off flush-on-commit");
892 break;
893 case Opt_ratio:
894 ret = match_int(&args[0], &intarg);
895 if (ret) {
896 btrfs_err(info, "unrecognized metadata_ratio value %s",
897 args[0].from);
898 goto out;
899 }
900 info->metadata_ratio = intarg;
901 btrfs_info(info, "metadata ratio %u",
902 info->metadata_ratio);
903 break;
904 case Opt_discard:
905 case Opt_discard_mode:
906 if (token == Opt_discard ||
907 strcmp(args[0].from, "sync") == 0) {
908 btrfs_clear_opt(info->mount_opt, DISCARD_ASYNC);
909 btrfs_set_and_info(info, DISCARD_SYNC,
910 "turning on sync discard");
911 } else if (strcmp(args[0].from, "async") == 0) {
912 btrfs_clear_opt(info->mount_opt, DISCARD_SYNC);
913 btrfs_set_and_info(info, DISCARD_ASYNC,
914 "turning on async discard");
915 } else {
916 btrfs_err(info, "unrecognized discard mode value %s",
917 args[0].from);
918 ret = -EINVAL;
919 goto out;
920 }
921 break;
922 case Opt_nodiscard:
923 btrfs_clear_and_info(info, DISCARD_SYNC,
924 "turning off discard");
925 btrfs_clear_and_info(info, DISCARD_ASYNC,
926 "turning off async discard");
927 break;
928 case Opt_space_cache:
929 case Opt_space_cache_version:
930 /*
931 * We already set FREE_SPACE_TREE above because we have
932 * compat_ro(FREE_SPACE_TREE) set, and we aren't going
933 * to allow v1 to be set for extent tree v2, simply
934 * ignore this setting if we're extent tree v2.
935 */
936 if (btrfs_fs_incompat(info, EXTENT_TREE_V2))
937 break;
938 if (token == Opt_space_cache ||
939 strcmp(args[0].from, "v1") == 0) {
940 btrfs_clear_opt(info->mount_opt,
941 FREE_SPACE_TREE);
942 btrfs_set_and_info(info, SPACE_CACHE,
943 "enabling disk space caching");
944 } else if (strcmp(args[0].from, "v2") == 0) {
945 btrfs_clear_opt(info->mount_opt,
946 SPACE_CACHE);
947 btrfs_set_and_info(info, FREE_SPACE_TREE,
948 "enabling free space tree");
949 } else {
950 btrfs_err(info, "unrecognized space_cache value %s",
951 args[0].from);
952 ret = -EINVAL;
953 goto out;
954 }
955 break;
956 case Opt_rescan_uuid_tree:
957 btrfs_set_opt(info->mount_opt, RESCAN_UUID_TREE);
958 break;
959 case Opt_no_space_cache:
960 /*
961 * We cannot operate without the free space tree with
962 * extent tree v2, ignore this option.
963 */
964 if (btrfs_fs_incompat(info, EXTENT_TREE_V2))
965 break;
966 if (btrfs_test_opt(info, SPACE_CACHE)) {
967 btrfs_clear_and_info(info, SPACE_CACHE,
968 "disabling disk space caching");
969 }
970 if (btrfs_test_opt(info, FREE_SPACE_TREE)) {
971 btrfs_clear_and_info(info, FREE_SPACE_TREE,
972 "disabling free space tree");
973 }
974 break;
975 case Opt_inode_cache:
976 case Opt_noinode_cache:
977 btrfs_warn(info,
978 "the 'inode_cache' option is deprecated and has no effect since 5.11");
979 break;
980 case Opt_clear_cache:
981 /*
982 * We cannot clear the free space tree with extent tree
983 * v2, ignore this option.
984 */
985 if (btrfs_fs_incompat(info, EXTENT_TREE_V2))
986 break;
987 btrfs_set_and_info(info, CLEAR_CACHE,
988 "force clearing of disk cache");
989 break;
990 case Opt_user_subvol_rm_allowed:
991 btrfs_set_opt(info->mount_opt, USER_SUBVOL_RM_ALLOWED);
992 break;
993 case Opt_enospc_debug:
994 btrfs_set_opt(info->mount_opt, ENOSPC_DEBUG);
995 break;
996 case Opt_noenospc_debug:
997 btrfs_clear_opt(info->mount_opt, ENOSPC_DEBUG);
998 break;
999 case Opt_defrag:
1000 btrfs_set_and_info(info, AUTO_DEFRAG,
1001 "enabling auto defrag");
1002 break;
1003 case Opt_nodefrag:
1004 btrfs_clear_and_info(info, AUTO_DEFRAG,
1005 "disabling auto defrag");
1006 break;
1007 case Opt_recovery:
1008 case Opt_usebackuproot:
1009 btrfs_warn(info,
1010 "'%s' is deprecated, use 'rescue=usebackuproot' instead",
1011 token == Opt_recovery ? "recovery" :
1012 "usebackuproot");
1013 btrfs_info(info,
1014 "trying to use backup root at mount time");
1015 btrfs_set_opt(info->mount_opt, USEBACKUPROOT);
1016 break;
1017 case Opt_skip_balance:
1018 btrfs_set_opt(info->mount_opt, SKIP_BALANCE);
1019 break;
1020 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
1021 case Opt_check_integrity_including_extent_data:
1022 btrfs_info(info,
1023 "enabling check integrity including extent data");
1024 btrfs_set_opt(info->mount_opt, CHECK_INTEGRITY_DATA);
1025 btrfs_set_opt(info->mount_opt, CHECK_INTEGRITY);
1026 break;
1027 case Opt_check_integrity:
1028 btrfs_info(info, "enabling check integrity");
1029 btrfs_set_opt(info->mount_opt, CHECK_INTEGRITY);
1030 break;
1031 case Opt_check_integrity_print_mask:
1032 ret = match_int(&args[0], &intarg);
1033 if (ret) {
1034 btrfs_err(info,
1035 "unrecognized check_integrity_print_mask value %s",
1036 args[0].from);
1037 goto out;
1038 }
1039 info->check_integrity_print_mask = intarg;
1040 btrfs_info(info, "check_integrity_print_mask 0x%x",
1041 info->check_integrity_print_mask);
1042 break;
1043 #else
1044 case Opt_check_integrity_including_extent_data:
1045 case Opt_check_integrity:
1046 case Opt_check_integrity_print_mask:
1047 btrfs_err(info,
1048 "support for check_integrity* not compiled in!");
1049 ret = -EINVAL;
1050 goto out;
1051 #endif
1052 case Opt_fatal_errors:
1053 if (strcmp(args[0].from, "panic") == 0) {
1054 btrfs_set_opt(info->mount_opt,
1055 PANIC_ON_FATAL_ERROR);
1056 } else if (strcmp(args[0].from, "bug") == 0) {
1057 btrfs_clear_opt(info->mount_opt,
1058 PANIC_ON_FATAL_ERROR);
1059 } else {
1060 btrfs_err(info, "unrecognized fatal_errors value %s",
1061 args[0].from);
1062 ret = -EINVAL;
1063 goto out;
1064 }
1065 break;
1066 case Opt_commit_interval:
1067 intarg = 0;
1068 ret = match_int(&args[0], &intarg);
1069 if (ret) {
1070 btrfs_err(info, "unrecognized commit_interval value %s",
1071 args[0].from);
1072 ret = -EINVAL;
1073 goto out;
1074 }
1075 if (intarg == 0) {
1076 btrfs_info(info,
1077 "using default commit interval %us",
1078 BTRFS_DEFAULT_COMMIT_INTERVAL);
1079 intarg = BTRFS_DEFAULT_COMMIT_INTERVAL;
1080 } else if (intarg > 300) {
1081 btrfs_warn(info, "excessive commit interval %d",
1082 intarg);
1083 }
1084 info->commit_interval = intarg;
1085 break;
1086 case Opt_rescue:
1087 ret = parse_rescue_options(info, args[0].from);
1088 if (ret < 0) {
1089 btrfs_err(info, "unrecognized rescue value %s",
1090 args[0].from);
1091 goto out;
1092 }
1093 break;
1094 #ifdef CONFIG_BTRFS_DEBUG
1095 case Opt_fragment_all:
1096 btrfs_info(info, "fragmenting all space");
1097 btrfs_set_opt(info->mount_opt, FRAGMENT_DATA);
1098 btrfs_set_opt(info->mount_opt, FRAGMENT_METADATA);
1099 break;
1100 case Opt_fragment_metadata:
1101 btrfs_info(info, "fragmenting metadata");
1102 btrfs_set_opt(info->mount_opt,
1103 FRAGMENT_METADATA);
1104 break;
1105 case Opt_fragment_data:
1106 btrfs_info(info, "fragmenting data");
1107 btrfs_set_opt(info->mount_opt, FRAGMENT_DATA);
1108 break;
1109 #endif
1110 #ifdef CONFIG_BTRFS_FS_REF_VERIFY
1111 case Opt_ref_verify:
1112 btrfs_info(info, "doing ref verification");
1113 btrfs_set_opt(info->mount_opt, REF_VERIFY);
1114 break;
1115 #endif
1116 case Opt_err:
1117 btrfs_err(info, "unrecognized mount option '%s'", p);
1118 ret = -EINVAL;
1119 goto out;
1120 default:
1121 break;
1122 }
1123 }
1124 check:
1125 /* We're read-only, don't have to check. */
1126 if (new_flags & SB_RDONLY)
1127 goto out;
1128
1129 if (check_ro_option(info, BTRFS_MOUNT_NOLOGREPLAY, "nologreplay") ||
1130 check_ro_option(info, BTRFS_MOUNT_IGNOREBADROOTS, "ignorebadroots") ||
1131 check_ro_option(info, BTRFS_MOUNT_IGNOREDATACSUMS, "ignoredatacsums"))
1132 ret = -EINVAL;
1133 out:
1134 if (btrfs_fs_compat_ro(info, FREE_SPACE_TREE) &&
1135 !btrfs_test_opt(info, FREE_SPACE_TREE) &&
1136 !btrfs_test_opt(info, CLEAR_CACHE)) {
1137 btrfs_err(info, "cannot disable free space tree");
1138 ret = -EINVAL;
1139
1140 }
1141 if (!ret)
1142 ret = btrfs_check_mountopts_zoned(info);
1143 if (!ret && !remounting) {
1144 if (btrfs_test_opt(info, SPACE_CACHE))
1145 btrfs_info(info, "disk space caching is enabled");
1146 if (btrfs_test_opt(info, FREE_SPACE_TREE))
1147 btrfs_info(info, "using free space tree");
1148 }
1149 return ret;
1150 }
1151
1152 /*
1153 * Parse mount options that are required early in the mount process.
1154 *
1155 * All other options will be parsed on much later in the mount process and
1156 * only when we need to allocate a new super block.
1157 */
btrfs_parse_device_options(const char * options,fmode_t flags,void * holder)1158 static int btrfs_parse_device_options(const char *options, fmode_t flags,
1159 void *holder)
1160 {
1161 substring_t args[MAX_OPT_ARGS];
1162 char *device_name, *opts, *orig, *p;
1163 struct btrfs_device *device = NULL;
1164 int error = 0;
1165
1166 lockdep_assert_held(&uuid_mutex);
1167
1168 if (!options)
1169 return 0;
1170
1171 /*
1172 * strsep changes the string, duplicate it because btrfs_parse_options
1173 * gets called later
1174 */
1175 opts = kstrdup(options, GFP_KERNEL);
1176 if (!opts)
1177 return -ENOMEM;
1178 orig = opts;
1179
1180 while ((p = strsep(&opts, ",")) != NULL) {
1181 int token;
1182
1183 if (!*p)
1184 continue;
1185
1186 token = match_token(p, tokens, args);
1187 if (token == Opt_device) {
1188 device_name = match_strdup(&args[0]);
1189 if (!device_name) {
1190 error = -ENOMEM;
1191 goto out;
1192 }
1193 device = btrfs_scan_one_device(device_name, flags,
1194 holder);
1195 kfree(device_name);
1196 if (IS_ERR(device)) {
1197 error = PTR_ERR(device);
1198 goto out;
1199 }
1200 }
1201 }
1202
1203 out:
1204 kfree(orig);
1205 return error;
1206 }
1207
1208 /*
1209 * Parse mount options that are related to subvolume id
1210 *
1211 * The value is later passed to mount_subvol()
1212 */
btrfs_parse_subvol_options(const char * options,char ** subvol_name,u64 * subvol_objectid)1213 static int btrfs_parse_subvol_options(const char *options, char **subvol_name,
1214 u64 *subvol_objectid)
1215 {
1216 substring_t args[MAX_OPT_ARGS];
1217 char *opts, *orig, *p;
1218 int error = 0;
1219 u64 subvolid;
1220
1221 if (!options)
1222 return 0;
1223
1224 /*
1225 * strsep changes the string, duplicate it because
1226 * btrfs_parse_device_options gets called later
1227 */
1228 opts = kstrdup(options, GFP_KERNEL);
1229 if (!opts)
1230 return -ENOMEM;
1231 orig = opts;
1232
1233 while ((p = strsep(&opts, ",")) != NULL) {
1234 int token;
1235 if (!*p)
1236 continue;
1237
1238 token = match_token(p, tokens, args);
1239 switch (token) {
1240 case Opt_subvol:
1241 kfree(*subvol_name);
1242 *subvol_name = match_strdup(&args[0]);
1243 if (!*subvol_name) {
1244 error = -ENOMEM;
1245 goto out;
1246 }
1247 break;
1248 case Opt_subvolid:
1249 error = match_u64(&args[0], &subvolid);
1250 if (error)
1251 goto out;
1252
1253 /* we want the original fs_tree */
1254 if (subvolid == 0)
1255 subvolid = BTRFS_FS_TREE_OBJECTID;
1256
1257 *subvol_objectid = subvolid;
1258 break;
1259 default:
1260 break;
1261 }
1262 }
1263
1264 out:
1265 kfree(orig);
1266 return error;
1267 }
1268
btrfs_get_subvol_name_from_objectid(struct btrfs_fs_info * fs_info,u64 subvol_objectid)1269 char *btrfs_get_subvol_name_from_objectid(struct btrfs_fs_info *fs_info,
1270 u64 subvol_objectid)
1271 {
1272 struct btrfs_root *root = fs_info->tree_root;
1273 struct btrfs_root *fs_root = NULL;
1274 struct btrfs_root_ref *root_ref;
1275 struct btrfs_inode_ref *inode_ref;
1276 struct btrfs_key key;
1277 struct btrfs_path *path = NULL;
1278 char *name = NULL, *ptr;
1279 u64 dirid;
1280 int len;
1281 int ret;
1282
1283 path = btrfs_alloc_path();
1284 if (!path) {
1285 ret = -ENOMEM;
1286 goto err;
1287 }
1288
1289 name = kmalloc(PATH_MAX, GFP_KERNEL);
1290 if (!name) {
1291 ret = -ENOMEM;
1292 goto err;
1293 }
1294 ptr = name + PATH_MAX - 1;
1295 ptr[0] = '\0';
1296
1297 /*
1298 * Walk up the subvolume trees in the tree of tree roots by root
1299 * backrefs until we hit the top-level subvolume.
1300 */
1301 while (subvol_objectid != BTRFS_FS_TREE_OBJECTID) {
1302 key.objectid = subvol_objectid;
1303 key.type = BTRFS_ROOT_BACKREF_KEY;
1304 key.offset = (u64)-1;
1305
1306 ret = btrfs_search_backwards(root, &key, path);
1307 if (ret < 0) {
1308 goto err;
1309 } else if (ret > 0) {
1310 ret = -ENOENT;
1311 goto err;
1312 }
1313
1314 subvol_objectid = key.offset;
1315
1316 root_ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
1317 struct btrfs_root_ref);
1318 len = btrfs_root_ref_name_len(path->nodes[0], root_ref);
1319 ptr -= len + 1;
1320 if (ptr < name) {
1321 ret = -ENAMETOOLONG;
1322 goto err;
1323 }
1324 read_extent_buffer(path->nodes[0], ptr + 1,
1325 (unsigned long)(root_ref + 1), len);
1326 ptr[0] = '/';
1327 dirid = btrfs_root_ref_dirid(path->nodes[0], root_ref);
1328 btrfs_release_path(path);
1329
1330 fs_root = btrfs_get_fs_root(fs_info, subvol_objectid, true);
1331 if (IS_ERR(fs_root)) {
1332 ret = PTR_ERR(fs_root);
1333 fs_root = NULL;
1334 goto err;
1335 }
1336
1337 /*
1338 * Walk up the filesystem tree by inode refs until we hit the
1339 * root directory.
1340 */
1341 while (dirid != BTRFS_FIRST_FREE_OBJECTID) {
1342 key.objectid = dirid;
1343 key.type = BTRFS_INODE_REF_KEY;
1344 key.offset = (u64)-1;
1345
1346 ret = btrfs_search_backwards(fs_root, &key, path);
1347 if (ret < 0) {
1348 goto err;
1349 } else if (ret > 0) {
1350 ret = -ENOENT;
1351 goto err;
1352 }
1353
1354 dirid = key.offset;
1355
1356 inode_ref = btrfs_item_ptr(path->nodes[0],
1357 path->slots[0],
1358 struct btrfs_inode_ref);
1359 len = btrfs_inode_ref_name_len(path->nodes[0],
1360 inode_ref);
1361 ptr -= len + 1;
1362 if (ptr < name) {
1363 ret = -ENAMETOOLONG;
1364 goto err;
1365 }
1366 read_extent_buffer(path->nodes[0], ptr + 1,
1367 (unsigned long)(inode_ref + 1), len);
1368 ptr[0] = '/';
1369 btrfs_release_path(path);
1370 }
1371 btrfs_put_root(fs_root);
1372 fs_root = NULL;
1373 }
1374
1375 btrfs_free_path(path);
1376 if (ptr == name + PATH_MAX - 1) {
1377 name[0] = '/';
1378 name[1] = '\0';
1379 } else {
1380 memmove(name, ptr, name + PATH_MAX - ptr);
1381 }
1382 return name;
1383
1384 err:
1385 btrfs_put_root(fs_root);
1386 btrfs_free_path(path);
1387 kfree(name);
1388 return ERR_PTR(ret);
1389 }
1390
get_default_subvol_objectid(struct btrfs_fs_info * fs_info,u64 * objectid)1391 static int get_default_subvol_objectid(struct btrfs_fs_info *fs_info, u64 *objectid)
1392 {
1393 struct btrfs_root *root = fs_info->tree_root;
1394 struct btrfs_dir_item *di;
1395 struct btrfs_path *path;
1396 struct btrfs_key location;
1397 u64 dir_id;
1398
1399 path = btrfs_alloc_path();
1400 if (!path)
1401 return -ENOMEM;
1402
1403 /*
1404 * Find the "default" dir item which points to the root item that we
1405 * will mount by default if we haven't been given a specific subvolume
1406 * to mount.
1407 */
1408 dir_id = btrfs_super_root_dir(fs_info->super_copy);
1409 di = btrfs_lookup_dir_item(NULL, root, path, dir_id, "default", 7, 0);
1410 if (IS_ERR(di)) {
1411 btrfs_free_path(path);
1412 return PTR_ERR(di);
1413 }
1414 if (!di) {
1415 /*
1416 * Ok the default dir item isn't there. This is weird since
1417 * it's always been there, but don't freak out, just try and
1418 * mount the top-level subvolume.
1419 */
1420 btrfs_free_path(path);
1421 *objectid = BTRFS_FS_TREE_OBJECTID;
1422 return 0;
1423 }
1424
1425 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
1426 btrfs_free_path(path);
1427 *objectid = location.objectid;
1428 return 0;
1429 }
1430
btrfs_fill_super(struct super_block * sb,struct btrfs_fs_devices * fs_devices,void * data)1431 static int btrfs_fill_super(struct super_block *sb,
1432 struct btrfs_fs_devices *fs_devices,
1433 void *data)
1434 {
1435 struct inode *inode;
1436 struct btrfs_fs_info *fs_info = btrfs_sb(sb);
1437 int err;
1438
1439 sb->s_maxbytes = MAX_LFS_FILESIZE;
1440 sb->s_magic = BTRFS_SUPER_MAGIC;
1441 sb->s_op = &btrfs_super_ops;
1442 sb->s_d_op = &btrfs_dentry_operations;
1443 sb->s_export_op = &btrfs_export_ops;
1444 #ifdef CONFIG_FS_VERITY
1445 sb->s_vop = &btrfs_verityops;
1446 #endif
1447 sb->s_xattr = btrfs_xattr_handlers;
1448 sb->s_time_gran = 1;
1449 #ifdef CONFIG_BTRFS_FS_POSIX_ACL
1450 sb->s_flags |= SB_POSIXACL;
1451 #endif
1452 sb->s_flags |= SB_I_VERSION;
1453 sb->s_iflags |= SB_I_CGROUPWB;
1454
1455 err = super_setup_bdi(sb);
1456 if (err) {
1457 btrfs_err(fs_info, "super_setup_bdi failed");
1458 return err;
1459 }
1460
1461 err = open_ctree(sb, fs_devices, (char *)data);
1462 if (err) {
1463 btrfs_err(fs_info, "open_ctree failed");
1464 return err;
1465 }
1466
1467 inode = btrfs_iget(sb, BTRFS_FIRST_FREE_OBJECTID, fs_info->fs_root);
1468 if (IS_ERR(inode)) {
1469 err = PTR_ERR(inode);
1470 goto fail_close;
1471 }
1472
1473 sb->s_root = d_make_root(inode);
1474 if (!sb->s_root) {
1475 err = -ENOMEM;
1476 goto fail_close;
1477 }
1478
1479 sb->s_flags |= SB_ACTIVE;
1480 return 0;
1481
1482 fail_close:
1483 close_ctree(fs_info);
1484 return err;
1485 }
1486
btrfs_sync_fs(struct super_block * sb,int wait)1487 int btrfs_sync_fs(struct super_block *sb, int wait)
1488 {
1489 struct btrfs_trans_handle *trans;
1490 struct btrfs_fs_info *fs_info = btrfs_sb(sb);
1491 struct btrfs_root *root = fs_info->tree_root;
1492
1493 trace_btrfs_sync_fs(fs_info, wait);
1494
1495 if (!wait) {
1496 filemap_flush(fs_info->btree_inode->i_mapping);
1497 return 0;
1498 }
1499
1500 btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
1501
1502 trans = btrfs_attach_transaction_barrier(root);
1503 if (IS_ERR(trans)) {
1504 /* no transaction, don't bother */
1505 if (PTR_ERR(trans) == -ENOENT) {
1506 /*
1507 * Exit unless we have some pending changes
1508 * that need to go through commit
1509 */
1510 if (fs_info->pending_changes == 0)
1511 return 0;
1512 /*
1513 * A non-blocking test if the fs is frozen. We must not
1514 * start a new transaction here otherwise a deadlock
1515 * happens. The pending operations are delayed to the
1516 * next commit after thawing.
1517 */
1518 if (sb_start_write_trylock(sb))
1519 sb_end_write(sb);
1520 else
1521 return 0;
1522 trans = btrfs_start_transaction(root, 0);
1523 }
1524 if (IS_ERR(trans))
1525 return PTR_ERR(trans);
1526 }
1527 return btrfs_commit_transaction(trans);
1528 }
1529
print_rescue_option(struct seq_file * seq,const char * s,bool * printed)1530 static void print_rescue_option(struct seq_file *seq, const char *s, bool *printed)
1531 {
1532 seq_printf(seq, "%s%s", (*printed) ? ":" : ",rescue=", s);
1533 *printed = true;
1534 }
1535
btrfs_show_options(struct seq_file * seq,struct dentry * dentry)1536 static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
1537 {
1538 struct btrfs_fs_info *info = btrfs_sb(dentry->d_sb);
1539 const char *compress_type;
1540 const char *subvol_name;
1541 bool printed = false;
1542
1543 if (btrfs_test_opt(info, DEGRADED))
1544 seq_puts(seq, ",degraded");
1545 if (btrfs_test_opt(info, NODATASUM))
1546 seq_puts(seq, ",nodatasum");
1547 if (btrfs_test_opt(info, NODATACOW))
1548 seq_puts(seq, ",nodatacow");
1549 if (btrfs_test_opt(info, NOBARRIER))
1550 seq_puts(seq, ",nobarrier");
1551 if (info->max_inline != BTRFS_DEFAULT_MAX_INLINE)
1552 seq_printf(seq, ",max_inline=%llu", info->max_inline);
1553 if (info->thread_pool_size != min_t(unsigned long,
1554 num_online_cpus() + 2, 8))
1555 seq_printf(seq, ",thread_pool=%u", info->thread_pool_size);
1556 if (btrfs_test_opt(info, COMPRESS)) {
1557 compress_type = btrfs_compress_type2str(info->compress_type);
1558 if (btrfs_test_opt(info, FORCE_COMPRESS))
1559 seq_printf(seq, ",compress-force=%s", compress_type);
1560 else
1561 seq_printf(seq, ",compress=%s", compress_type);
1562 if (info->compress_level)
1563 seq_printf(seq, ":%d", info->compress_level);
1564 }
1565 if (btrfs_test_opt(info, NOSSD))
1566 seq_puts(seq, ",nossd");
1567 if (btrfs_test_opt(info, SSD_SPREAD))
1568 seq_puts(seq, ",ssd_spread");
1569 else if (btrfs_test_opt(info, SSD))
1570 seq_puts(seq, ",ssd");
1571 if (btrfs_test_opt(info, NOTREELOG))
1572 seq_puts(seq, ",notreelog");
1573 if (btrfs_test_opt(info, NOLOGREPLAY))
1574 print_rescue_option(seq, "nologreplay", &printed);
1575 if (btrfs_test_opt(info, USEBACKUPROOT))
1576 print_rescue_option(seq, "usebackuproot", &printed);
1577 if (btrfs_test_opt(info, IGNOREBADROOTS))
1578 print_rescue_option(seq, "ignorebadroots", &printed);
1579 if (btrfs_test_opt(info, IGNOREDATACSUMS))
1580 print_rescue_option(seq, "ignoredatacsums", &printed);
1581 if (btrfs_test_opt(info, FLUSHONCOMMIT))
1582 seq_puts(seq, ",flushoncommit");
1583 if (btrfs_test_opt(info, DISCARD_SYNC))
1584 seq_puts(seq, ",discard");
1585 if (btrfs_test_opt(info, DISCARD_ASYNC))
1586 seq_puts(seq, ",discard=async");
1587 if (!(info->sb->s_flags & SB_POSIXACL))
1588 seq_puts(seq, ",noacl");
1589 if (btrfs_free_space_cache_v1_active(info))
1590 seq_puts(seq, ",space_cache");
1591 else if (btrfs_fs_compat_ro(info, FREE_SPACE_TREE))
1592 seq_puts(seq, ",space_cache=v2");
1593 else
1594 seq_puts(seq, ",nospace_cache");
1595 if (btrfs_test_opt(info, RESCAN_UUID_TREE))
1596 seq_puts(seq, ",rescan_uuid_tree");
1597 if (btrfs_test_opt(info, CLEAR_CACHE))
1598 seq_puts(seq, ",clear_cache");
1599 if (btrfs_test_opt(info, USER_SUBVOL_RM_ALLOWED))
1600 seq_puts(seq, ",user_subvol_rm_allowed");
1601 if (btrfs_test_opt(info, ENOSPC_DEBUG))
1602 seq_puts(seq, ",enospc_debug");
1603 if (btrfs_test_opt(info, AUTO_DEFRAG))
1604 seq_puts(seq, ",autodefrag");
1605 if (btrfs_test_opt(info, SKIP_BALANCE))
1606 seq_puts(seq, ",skip_balance");
1607 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
1608 if (btrfs_test_opt(info, CHECK_INTEGRITY_DATA))
1609 seq_puts(seq, ",check_int_data");
1610 else if (btrfs_test_opt(info, CHECK_INTEGRITY))
1611 seq_puts(seq, ",check_int");
1612 if (info->check_integrity_print_mask)
1613 seq_printf(seq, ",check_int_print_mask=%d",
1614 info->check_integrity_print_mask);
1615 #endif
1616 if (info->metadata_ratio)
1617 seq_printf(seq, ",metadata_ratio=%u", info->metadata_ratio);
1618 if (btrfs_test_opt(info, PANIC_ON_FATAL_ERROR))
1619 seq_puts(seq, ",fatal_errors=panic");
1620 if (info->commit_interval != BTRFS_DEFAULT_COMMIT_INTERVAL)
1621 seq_printf(seq, ",commit=%u", info->commit_interval);
1622 #ifdef CONFIG_BTRFS_DEBUG
1623 if (btrfs_test_opt(info, FRAGMENT_DATA))
1624 seq_puts(seq, ",fragment=data");
1625 if (btrfs_test_opt(info, FRAGMENT_METADATA))
1626 seq_puts(seq, ",fragment=metadata");
1627 #endif
1628 if (btrfs_test_opt(info, REF_VERIFY))
1629 seq_puts(seq, ",ref_verify");
1630 seq_printf(seq, ",subvolid=%llu",
1631 BTRFS_I(d_inode(dentry))->root->root_key.objectid);
1632 subvol_name = btrfs_get_subvol_name_from_objectid(info,
1633 BTRFS_I(d_inode(dentry))->root->root_key.objectid);
1634 if (!IS_ERR(subvol_name)) {
1635 seq_puts(seq, ",subvol=");
1636 seq_escape(seq, subvol_name, " \t\n\\");
1637 kfree(subvol_name);
1638 }
1639 return 0;
1640 }
1641
btrfs_test_super(struct super_block * s,void * data)1642 static int btrfs_test_super(struct super_block *s, void *data)
1643 {
1644 struct btrfs_fs_info *p = data;
1645 struct btrfs_fs_info *fs_info = btrfs_sb(s);
1646
1647 return fs_info->fs_devices == p->fs_devices;
1648 }
1649
btrfs_set_super(struct super_block * s,void * data)1650 static int btrfs_set_super(struct super_block *s, void *data)
1651 {
1652 int err = set_anon_super(s, data);
1653 if (!err)
1654 s->s_fs_info = data;
1655 return err;
1656 }
1657
1658 /*
1659 * subvolumes are identified by ino 256
1660 */
is_subvolume_inode(struct inode * inode)1661 static inline int is_subvolume_inode(struct inode *inode)
1662 {
1663 if (inode && inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
1664 return 1;
1665 return 0;
1666 }
1667
mount_subvol(const char * subvol_name,u64 subvol_objectid,struct vfsmount * mnt)1668 static struct dentry *mount_subvol(const char *subvol_name, u64 subvol_objectid,
1669 struct vfsmount *mnt)
1670 {
1671 struct dentry *root;
1672 int ret;
1673
1674 if (!subvol_name) {
1675 if (!subvol_objectid) {
1676 ret = get_default_subvol_objectid(btrfs_sb(mnt->mnt_sb),
1677 &subvol_objectid);
1678 if (ret) {
1679 root = ERR_PTR(ret);
1680 goto out;
1681 }
1682 }
1683 subvol_name = btrfs_get_subvol_name_from_objectid(
1684 btrfs_sb(mnt->mnt_sb), subvol_objectid);
1685 if (IS_ERR(subvol_name)) {
1686 root = ERR_CAST(subvol_name);
1687 subvol_name = NULL;
1688 goto out;
1689 }
1690
1691 }
1692
1693 root = mount_subtree(mnt, subvol_name);
1694 /* mount_subtree() drops our reference on the vfsmount. */
1695 mnt = NULL;
1696
1697 if (!IS_ERR(root)) {
1698 struct super_block *s = root->d_sb;
1699 struct btrfs_fs_info *fs_info = btrfs_sb(s);
1700 struct inode *root_inode = d_inode(root);
1701 u64 root_objectid = BTRFS_I(root_inode)->root->root_key.objectid;
1702
1703 ret = 0;
1704 if (!is_subvolume_inode(root_inode)) {
1705 btrfs_err(fs_info, "'%s' is not a valid subvolume",
1706 subvol_name);
1707 ret = -EINVAL;
1708 }
1709 if (subvol_objectid && root_objectid != subvol_objectid) {
1710 /*
1711 * This will also catch a race condition where a
1712 * subvolume which was passed by ID is renamed and
1713 * another subvolume is renamed over the old location.
1714 */
1715 btrfs_err(fs_info,
1716 "subvol '%s' does not match subvolid %llu",
1717 subvol_name, subvol_objectid);
1718 ret = -EINVAL;
1719 }
1720 if (ret) {
1721 dput(root);
1722 root = ERR_PTR(ret);
1723 deactivate_locked_super(s);
1724 }
1725 }
1726
1727 out:
1728 mntput(mnt);
1729 kfree(subvol_name);
1730 return root;
1731 }
1732
1733 /*
1734 * Find a superblock for the given device / mount point.
1735 *
1736 * Note: This is based on mount_bdev from fs/super.c with a few additions
1737 * for multiple device setup. Make sure to keep it in sync.
1738 */
btrfs_mount_root(struct file_system_type * fs_type,int flags,const char * device_name,void * data)1739 static struct dentry *btrfs_mount_root(struct file_system_type *fs_type,
1740 int flags, const char *device_name, void *data)
1741 {
1742 struct block_device *bdev = NULL;
1743 struct super_block *s;
1744 struct btrfs_device *device = NULL;
1745 struct btrfs_fs_devices *fs_devices = NULL;
1746 struct btrfs_fs_info *fs_info = NULL;
1747 void *new_sec_opts = NULL;
1748 fmode_t mode = FMODE_READ;
1749 int error = 0;
1750
1751 if (!(flags & SB_RDONLY))
1752 mode |= FMODE_WRITE;
1753
1754 if (data) {
1755 error = security_sb_eat_lsm_opts(data, &new_sec_opts);
1756 if (error)
1757 return ERR_PTR(error);
1758 }
1759
1760 /*
1761 * Setup a dummy root and fs_info for test/set super. This is because
1762 * we don't actually fill this stuff out until open_ctree, but we need
1763 * then open_ctree will properly initialize the file system specific
1764 * settings later. btrfs_init_fs_info initializes the static elements
1765 * of the fs_info (locks and such) to make cleanup easier if we find a
1766 * superblock with our given fs_devices later on at sget() time.
1767 */
1768 fs_info = kvzalloc(sizeof(struct btrfs_fs_info), GFP_KERNEL);
1769 if (!fs_info) {
1770 error = -ENOMEM;
1771 goto error_sec_opts;
1772 }
1773 btrfs_init_fs_info(fs_info);
1774
1775 fs_info->super_copy = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_KERNEL);
1776 fs_info->super_for_commit = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_KERNEL);
1777 if (!fs_info->super_copy || !fs_info->super_for_commit) {
1778 error = -ENOMEM;
1779 goto error_fs_info;
1780 }
1781
1782 mutex_lock(&uuid_mutex);
1783 error = btrfs_parse_device_options(data, mode, fs_type);
1784 if (error) {
1785 mutex_unlock(&uuid_mutex);
1786 goto error_fs_info;
1787 }
1788
1789 device = btrfs_scan_one_device(device_name, mode, fs_type);
1790 if (IS_ERR(device)) {
1791 mutex_unlock(&uuid_mutex);
1792 error = PTR_ERR(device);
1793 goto error_fs_info;
1794 }
1795
1796 fs_devices = device->fs_devices;
1797 fs_info->fs_devices = fs_devices;
1798
1799 error = btrfs_open_devices(fs_devices, mode, fs_type);
1800 mutex_unlock(&uuid_mutex);
1801 if (error)
1802 goto error_fs_info;
1803
1804 if (!(flags & SB_RDONLY) && fs_devices->rw_devices == 0) {
1805 error = -EACCES;
1806 goto error_close_devices;
1807 }
1808
1809 bdev = fs_devices->latest_dev->bdev;
1810 s = sget(fs_type, btrfs_test_super, btrfs_set_super, flags | SB_NOSEC,
1811 fs_info);
1812 if (IS_ERR(s)) {
1813 error = PTR_ERR(s);
1814 goto error_close_devices;
1815 }
1816
1817 if (s->s_root) {
1818 btrfs_close_devices(fs_devices);
1819 btrfs_free_fs_info(fs_info);
1820 if ((flags ^ s->s_flags) & SB_RDONLY)
1821 error = -EBUSY;
1822 } else {
1823 snprintf(s->s_id, sizeof(s->s_id), "%pg", bdev);
1824 shrinker_debugfs_rename(&s->s_shrink, "sb-%s:%s", fs_type->name,
1825 s->s_id);
1826 btrfs_sb(s)->bdev_holder = fs_type;
1827 if (!strstr(crc32c_impl(), "generic"))
1828 set_bit(BTRFS_FS_CSUM_IMPL_FAST, &fs_info->flags);
1829 error = btrfs_fill_super(s, fs_devices, data);
1830 }
1831 if (!error)
1832 error = security_sb_set_mnt_opts(s, new_sec_opts, 0, NULL);
1833 security_free_mnt_opts(&new_sec_opts);
1834 if (error) {
1835 deactivate_locked_super(s);
1836 return ERR_PTR(error);
1837 }
1838
1839 return dget(s->s_root);
1840
1841 error_close_devices:
1842 btrfs_close_devices(fs_devices);
1843 error_fs_info:
1844 btrfs_free_fs_info(fs_info);
1845 error_sec_opts:
1846 security_free_mnt_opts(&new_sec_opts);
1847 return ERR_PTR(error);
1848 }
1849
1850 /*
1851 * Mount function which is called by VFS layer.
1852 *
1853 * In order to allow mounting a subvolume directly, btrfs uses mount_subtree()
1854 * which needs vfsmount* of device's root (/). This means device's root has to
1855 * be mounted internally in any case.
1856 *
1857 * Operation flow:
1858 * 1. Parse subvol id related options for later use in mount_subvol().
1859 *
1860 * 2. Mount device's root (/) by calling vfs_kern_mount().
1861 *
1862 * NOTE: vfs_kern_mount() is used by VFS to call btrfs_mount() in the
1863 * first place. In order to avoid calling btrfs_mount() again, we use
1864 * different file_system_type which is not registered to VFS by
1865 * register_filesystem() (btrfs_root_fs_type). As a result,
1866 * btrfs_mount_root() is called. The return value will be used by
1867 * mount_subtree() in mount_subvol().
1868 *
1869 * 3. Call mount_subvol() to get the dentry of subvolume. Since there is
1870 * "btrfs subvolume set-default", mount_subvol() is called always.
1871 */
btrfs_mount(struct file_system_type * fs_type,int flags,const char * device_name,void * data)1872 static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
1873 const char *device_name, void *data)
1874 {
1875 struct vfsmount *mnt_root;
1876 struct dentry *root;
1877 char *subvol_name = NULL;
1878 u64 subvol_objectid = 0;
1879 int error = 0;
1880
1881 error = btrfs_parse_subvol_options(data, &subvol_name,
1882 &subvol_objectid);
1883 if (error) {
1884 kfree(subvol_name);
1885 return ERR_PTR(error);
1886 }
1887
1888 /* mount device's root (/) */
1889 mnt_root = vfs_kern_mount(&btrfs_root_fs_type, flags, device_name, data);
1890 if (PTR_ERR_OR_ZERO(mnt_root) == -EBUSY) {
1891 if (flags & SB_RDONLY) {
1892 mnt_root = vfs_kern_mount(&btrfs_root_fs_type,
1893 flags & ~SB_RDONLY, device_name, data);
1894 } else {
1895 mnt_root = vfs_kern_mount(&btrfs_root_fs_type,
1896 flags | SB_RDONLY, device_name, data);
1897 if (IS_ERR(mnt_root)) {
1898 root = ERR_CAST(mnt_root);
1899 kfree(subvol_name);
1900 goto out;
1901 }
1902
1903 down_write(&mnt_root->mnt_sb->s_umount);
1904 error = btrfs_remount(mnt_root->mnt_sb, &flags, NULL);
1905 up_write(&mnt_root->mnt_sb->s_umount);
1906 if (error < 0) {
1907 root = ERR_PTR(error);
1908 mntput(mnt_root);
1909 kfree(subvol_name);
1910 goto out;
1911 }
1912 }
1913 }
1914 if (IS_ERR(mnt_root)) {
1915 root = ERR_CAST(mnt_root);
1916 kfree(subvol_name);
1917 goto out;
1918 }
1919
1920 /* mount_subvol() will free subvol_name and mnt_root */
1921 root = mount_subvol(subvol_name, subvol_objectid, mnt_root);
1922
1923 out:
1924 return root;
1925 }
1926
btrfs_resize_thread_pool(struct btrfs_fs_info * fs_info,u32 new_pool_size,u32 old_pool_size)1927 static void btrfs_resize_thread_pool(struct btrfs_fs_info *fs_info,
1928 u32 new_pool_size, u32 old_pool_size)
1929 {
1930 if (new_pool_size == old_pool_size)
1931 return;
1932
1933 fs_info->thread_pool_size = new_pool_size;
1934
1935 btrfs_info(fs_info, "resize thread pool %d -> %d",
1936 old_pool_size, new_pool_size);
1937
1938 btrfs_workqueue_set_max(fs_info->workers, new_pool_size);
1939 btrfs_workqueue_set_max(fs_info->hipri_workers, new_pool_size);
1940 btrfs_workqueue_set_max(fs_info->delalloc_workers, new_pool_size);
1941 btrfs_workqueue_set_max(fs_info->caching_workers, new_pool_size);
1942 btrfs_workqueue_set_max(fs_info->endio_write_workers, new_pool_size);
1943 btrfs_workqueue_set_max(fs_info->endio_freespace_worker, new_pool_size);
1944 btrfs_workqueue_set_max(fs_info->delayed_workers, new_pool_size);
1945 }
1946
btrfs_remount_begin(struct btrfs_fs_info * fs_info,unsigned long old_opts,int flags)1947 static inline void btrfs_remount_begin(struct btrfs_fs_info *fs_info,
1948 unsigned long old_opts, int flags)
1949 {
1950 if (btrfs_raw_test_opt(old_opts, AUTO_DEFRAG) &&
1951 (!btrfs_raw_test_opt(fs_info->mount_opt, AUTO_DEFRAG) ||
1952 (flags & SB_RDONLY))) {
1953 /* wait for any defraggers to finish */
1954 wait_event(fs_info->transaction_wait,
1955 (atomic_read(&fs_info->defrag_running) == 0));
1956 if (flags & SB_RDONLY)
1957 sync_filesystem(fs_info->sb);
1958 }
1959 }
1960
btrfs_remount_cleanup(struct btrfs_fs_info * fs_info,unsigned long old_opts)1961 static inline void btrfs_remount_cleanup(struct btrfs_fs_info *fs_info,
1962 unsigned long old_opts)
1963 {
1964 const bool cache_opt = btrfs_test_opt(fs_info, SPACE_CACHE);
1965
1966 /*
1967 * We need to cleanup all defragable inodes if the autodefragment is
1968 * close or the filesystem is read only.
1969 */
1970 if (btrfs_raw_test_opt(old_opts, AUTO_DEFRAG) &&
1971 (!btrfs_raw_test_opt(fs_info->mount_opt, AUTO_DEFRAG) || sb_rdonly(fs_info->sb))) {
1972 btrfs_cleanup_defrag_inodes(fs_info);
1973 }
1974
1975 /* If we toggled discard async */
1976 if (!btrfs_raw_test_opt(old_opts, DISCARD_ASYNC) &&
1977 btrfs_test_opt(fs_info, DISCARD_ASYNC))
1978 btrfs_discard_resume(fs_info);
1979 else if (btrfs_raw_test_opt(old_opts, DISCARD_ASYNC) &&
1980 !btrfs_test_opt(fs_info, DISCARD_ASYNC))
1981 btrfs_discard_cleanup(fs_info);
1982
1983 /* If we toggled space cache */
1984 if (cache_opt != btrfs_free_space_cache_v1_active(fs_info))
1985 btrfs_set_free_space_cache_v1_active(fs_info, cache_opt);
1986 }
1987
btrfs_remount(struct super_block * sb,int * flags,char * data)1988 static int btrfs_remount(struct super_block *sb, int *flags, char *data)
1989 {
1990 struct btrfs_fs_info *fs_info = btrfs_sb(sb);
1991 unsigned old_flags = sb->s_flags;
1992 unsigned long old_opts = fs_info->mount_opt;
1993 unsigned long old_compress_type = fs_info->compress_type;
1994 u64 old_max_inline = fs_info->max_inline;
1995 u32 old_thread_pool_size = fs_info->thread_pool_size;
1996 u32 old_metadata_ratio = fs_info->metadata_ratio;
1997 int ret;
1998
1999 sync_filesystem(sb);
2000 set_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state);
2001
2002 if (data) {
2003 void *new_sec_opts = NULL;
2004
2005 ret = security_sb_eat_lsm_opts(data, &new_sec_opts);
2006 if (!ret)
2007 ret = security_sb_remount(sb, new_sec_opts);
2008 security_free_mnt_opts(&new_sec_opts);
2009 if (ret)
2010 goto restore;
2011 }
2012
2013 ret = btrfs_parse_options(fs_info, data, *flags);
2014 if (ret)
2015 goto restore;
2016
2017 ret = btrfs_check_features(fs_info, !(*flags & SB_RDONLY));
2018 if (ret < 0)
2019 goto restore;
2020
2021 btrfs_remount_begin(fs_info, old_opts, *flags);
2022 btrfs_resize_thread_pool(fs_info,
2023 fs_info->thread_pool_size, old_thread_pool_size);
2024
2025 if ((bool)btrfs_test_opt(fs_info, FREE_SPACE_TREE) !=
2026 (bool)btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) &&
2027 (!sb_rdonly(sb) || (*flags & SB_RDONLY))) {
2028 btrfs_warn(fs_info,
2029 "remount supports changing free space tree only from ro to rw");
2030 /* Make sure free space cache options match the state on disk */
2031 if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
2032 btrfs_set_opt(fs_info->mount_opt, FREE_SPACE_TREE);
2033 btrfs_clear_opt(fs_info->mount_opt, SPACE_CACHE);
2034 }
2035 if (btrfs_free_space_cache_v1_active(fs_info)) {
2036 btrfs_clear_opt(fs_info->mount_opt, FREE_SPACE_TREE);
2037 btrfs_set_opt(fs_info->mount_opt, SPACE_CACHE);
2038 }
2039 }
2040
2041 if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb))
2042 goto out;
2043
2044 if (*flags & SB_RDONLY) {
2045 /*
2046 * this also happens on 'umount -rf' or on shutdown, when
2047 * the filesystem is busy.
2048 */
2049 cancel_work_sync(&fs_info->async_reclaim_work);
2050 cancel_work_sync(&fs_info->async_data_reclaim_work);
2051
2052 btrfs_discard_cleanup(fs_info);
2053
2054 /* wait for the uuid_scan task to finish */
2055 down(&fs_info->uuid_tree_rescan_sem);
2056 /* avoid complains from lockdep et al. */
2057 up(&fs_info->uuid_tree_rescan_sem);
2058
2059 btrfs_set_sb_rdonly(sb);
2060
2061 /*
2062 * Setting SB_RDONLY will put the cleaner thread to
2063 * sleep at the next loop if it's already active.
2064 * If it's already asleep, we'll leave unused block
2065 * groups on disk until we're mounted read-write again
2066 * unless we clean them up here.
2067 */
2068 btrfs_delete_unused_bgs(fs_info);
2069
2070 /*
2071 * The cleaner task could be already running before we set the
2072 * flag BTRFS_FS_STATE_RO (and SB_RDONLY in the superblock).
2073 * We must make sure that after we finish the remount, i.e. after
2074 * we call btrfs_commit_super(), the cleaner can no longer start
2075 * a transaction - either because it was dropping a dead root,
2076 * running delayed iputs or deleting an unused block group (the
2077 * cleaner picked a block group from the list of unused block
2078 * groups before we were able to in the previous call to
2079 * btrfs_delete_unused_bgs()).
2080 */
2081 wait_on_bit(&fs_info->flags, BTRFS_FS_CLEANER_RUNNING,
2082 TASK_UNINTERRUPTIBLE);
2083
2084 /*
2085 * We've set the superblock to RO mode, so we might have made
2086 * the cleaner task sleep without running all pending delayed
2087 * iputs. Go through all the delayed iputs here, so that if an
2088 * unmount happens without remounting RW we don't end up at
2089 * finishing close_ctree() with a non-empty list of delayed
2090 * iputs.
2091 */
2092 btrfs_run_delayed_iputs(fs_info);
2093
2094 btrfs_dev_replace_suspend_for_unmount(fs_info);
2095 btrfs_scrub_cancel(fs_info);
2096 btrfs_pause_balance(fs_info);
2097
2098 /*
2099 * Pause the qgroup rescan worker if it is running. We don't want
2100 * it to be still running after we are in RO mode, as after that,
2101 * by the time we unmount, it might have left a transaction open,
2102 * so we would leak the transaction and/or crash.
2103 */
2104 btrfs_qgroup_wait_for_completion(fs_info, false);
2105
2106 ret = btrfs_commit_super(fs_info);
2107 if (ret)
2108 goto restore;
2109 } else {
2110 if (BTRFS_FS_ERROR(fs_info)) {
2111 btrfs_err(fs_info,
2112 "Remounting read-write after error is not allowed");
2113 ret = -EINVAL;
2114 goto restore;
2115 }
2116 if (fs_info->fs_devices->rw_devices == 0) {
2117 ret = -EACCES;
2118 goto restore;
2119 }
2120
2121 if (!btrfs_check_rw_degradable(fs_info, NULL)) {
2122 btrfs_warn(fs_info,
2123 "too many missing devices, writable remount is not allowed");
2124 ret = -EACCES;
2125 goto restore;
2126 }
2127
2128 if (btrfs_super_log_root(fs_info->super_copy) != 0) {
2129 btrfs_warn(fs_info,
2130 "mount required to replay tree-log, cannot remount read-write");
2131 ret = -EINVAL;
2132 goto restore;
2133 }
2134
2135 /*
2136 * NOTE: when remounting with a change that does writes, don't
2137 * put it anywhere above this point, as we are not sure to be
2138 * safe to write until we pass the above checks.
2139 */
2140 ret = btrfs_start_pre_rw_mount(fs_info);
2141 if (ret)
2142 goto restore;
2143
2144 btrfs_clear_sb_rdonly(sb);
2145
2146 set_bit(BTRFS_FS_OPEN, &fs_info->flags);
2147 }
2148 out:
2149 /*
2150 * We need to set SB_I_VERSION here otherwise it'll get cleared by VFS,
2151 * since the absence of the flag means it can be toggled off by remount.
2152 */
2153 *flags |= SB_I_VERSION;
2154
2155 wake_up_process(fs_info->transaction_kthread);
2156 btrfs_remount_cleanup(fs_info, old_opts);
2157 btrfs_clear_oneshot_options(fs_info);
2158 clear_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state);
2159
2160 return 0;
2161
2162 restore:
2163 /* We've hit an error - don't reset SB_RDONLY */
2164 if (sb_rdonly(sb))
2165 old_flags |= SB_RDONLY;
2166 if (!(old_flags & SB_RDONLY))
2167 clear_bit(BTRFS_FS_STATE_RO, &fs_info->fs_state);
2168 sb->s_flags = old_flags;
2169 fs_info->mount_opt = old_opts;
2170 fs_info->compress_type = old_compress_type;
2171 fs_info->max_inline = old_max_inline;
2172 btrfs_resize_thread_pool(fs_info,
2173 old_thread_pool_size, fs_info->thread_pool_size);
2174 fs_info->metadata_ratio = old_metadata_ratio;
2175 btrfs_remount_cleanup(fs_info, old_opts);
2176 clear_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state);
2177
2178 return ret;
2179 }
2180
2181 /* Used to sort the devices by max_avail(descending sort) */
btrfs_cmp_device_free_bytes(const void * a,const void * b)2182 static int btrfs_cmp_device_free_bytes(const void *a, const void *b)
2183 {
2184 const struct btrfs_device_info *dev_info1 = a;
2185 const struct btrfs_device_info *dev_info2 = b;
2186
2187 if (dev_info1->max_avail > dev_info2->max_avail)
2188 return -1;
2189 else if (dev_info1->max_avail < dev_info2->max_avail)
2190 return 1;
2191 return 0;
2192 }
2193
2194 /*
2195 * sort the devices by max_avail, in which max free extent size of each device
2196 * is stored.(Descending Sort)
2197 */
btrfs_descending_sort_devices(struct btrfs_device_info * devices,size_t nr_devices)2198 static inline void btrfs_descending_sort_devices(
2199 struct btrfs_device_info *devices,
2200 size_t nr_devices)
2201 {
2202 sort(devices, nr_devices, sizeof(struct btrfs_device_info),
2203 btrfs_cmp_device_free_bytes, NULL);
2204 }
2205
2206 /*
2207 * The helper to calc the free space on the devices that can be used to store
2208 * file data.
2209 */
btrfs_calc_avail_data_space(struct btrfs_fs_info * fs_info,u64 * free_bytes)2210 static inline int btrfs_calc_avail_data_space(struct btrfs_fs_info *fs_info,
2211 u64 *free_bytes)
2212 {
2213 struct btrfs_device_info *devices_info;
2214 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2215 struct btrfs_device *device;
2216 u64 type;
2217 u64 avail_space;
2218 u64 min_stripe_size;
2219 int num_stripes = 1;
2220 int i = 0, nr_devices;
2221 const struct btrfs_raid_attr *rattr;
2222
2223 /*
2224 * We aren't under the device list lock, so this is racy-ish, but good
2225 * enough for our purposes.
2226 */
2227 nr_devices = fs_info->fs_devices->open_devices;
2228 if (!nr_devices) {
2229 smp_mb();
2230 nr_devices = fs_info->fs_devices->open_devices;
2231 ASSERT(nr_devices);
2232 if (!nr_devices) {
2233 *free_bytes = 0;
2234 return 0;
2235 }
2236 }
2237
2238 devices_info = kmalloc_array(nr_devices, sizeof(*devices_info),
2239 GFP_KERNEL);
2240 if (!devices_info)
2241 return -ENOMEM;
2242
2243 /* calc min stripe number for data space allocation */
2244 type = btrfs_data_alloc_profile(fs_info);
2245 rattr = &btrfs_raid_array[btrfs_bg_flags_to_raid_index(type)];
2246
2247 if (type & BTRFS_BLOCK_GROUP_RAID0)
2248 num_stripes = nr_devices;
2249 else if (type & BTRFS_BLOCK_GROUP_RAID1_MASK)
2250 num_stripes = rattr->ncopies;
2251 else if (type & BTRFS_BLOCK_GROUP_RAID10)
2252 num_stripes = 4;
2253
2254 /* Adjust for more than 1 stripe per device */
2255 min_stripe_size = rattr->dev_stripes * BTRFS_STRIPE_LEN;
2256
2257 rcu_read_lock();
2258 list_for_each_entry_rcu(device, &fs_devices->devices, dev_list) {
2259 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
2260 &device->dev_state) ||
2261 !device->bdev ||
2262 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
2263 continue;
2264
2265 if (i >= nr_devices)
2266 break;
2267
2268 avail_space = device->total_bytes - device->bytes_used;
2269
2270 /* align with stripe_len */
2271 avail_space = rounddown(avail_space, BTRFS_STRIPE_LEN);
2272
2273 /*
2274 * Ensure we have at least min_stripe_size on top of the
2275 * reserved space on the device.
2276 */
2277 if (avail_space <= BTRFS_DEVICE_RANGE_RESERVED + min_stripe_size)
2278 continue;
2279
2280 avail_space -= BTRFS_DEVICE_RANGE_RESERVED;
2281
2282 devices_info[i].dev = device;
2283 devices_info[i].max_avail = avail_space;
2284
2285 i++;
2286 }
2287 rcu_read_unlock();
2288
2289 nr_devices = i;
2290
2291 btrfs_descending_sort_devices(devices_info, nr_devices);
2292
2293 i = nr_devices - 1;
2294 avail_space = 0;
2295 while (nr_devices >= rattr->devs_min) {
2296 num_stripes = min(num_stripes, nr_devices);
2297
2298 if (devices_info[i].max_avail >= min_stripe_size) {
2299 int j;
2300 u64 alloc_size;
2301
2302 avail_space += devices_info[i].max_avail * num_stripes;
2303 alloc_size = devices_info[i].max_avail;
2304 for (j = i + 1 - num_stripes; j <= i; j++)
2305 devices_info[j].max_avail -= alloc_size;
2306 }
2307 i--;
2308 nr_devices--;
2309 }
2310
2311 kfree(devices_info);
2312 *free_bytes = avail_space;
2313 return 0;
2314 }
2315
2316 /*
2317 * Calculate numbers for 'df', pessimistic in case of mixed raid profiles.
2318 *
2319 * If there's a redundant raid level at DATA block groups, use the respective
2320 * multiplier to scale the sizes.
2321 *
2322 * Unused device space usage is based on simulating the chunk allocator
2323 * algorithm that respects the device sizes and order of allocations. This is
2324 * a close approximation of the actual use but there are other factors that may
2325 * change the result (like a new metadata chunk).
2326 *
2327 * If metadata is exhausted, f_bavail will be 0.
2328 */
btrfs_statfs(struct dentry * dentry,struct kstatfs * buf)2329 static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
2330 {
2331 struct btrfs_fs_info *fs_info = btrfs_sb(dentry->d_sb);
2332 struct btrfs_super_block *disk_super = fs_info->super_copy;
2333 struct btrfs_space_info *found;
2334 u64 total_used = 0;
2335 u64 total_free_data = 0;
2336 u64 total_free_meta = 0;
2337 u32 bits = fs_info->sectorsize_bits;
2338 __be32 *fsid = (__be32 *)fs_info->fs_devices->fsid;
2339 unsigned factor = 1;
2340 struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
2341 int ret;
2342 u64 thresh = 0;
2343 int mixed = 0;
2344
2345 list_for_each_entry(found, &fs_info->space_info, list) {
2346 if (found->flags & BTRFS_BLOCK_GROUP_DATA) {
2347 int i;
2348
2349 total_free_data += found->disk_total - found->disk_used;
2350 total_free_data -=
2351 btrfs_account_ro_block_groups_free_space(found);
2352
2353 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
2354 if (!list_empty(&found->block_groups[i]))
2355 factor = btrfs_bg_type_to_factor(
2356 btrfs_raid_array[i].bg_flag);
2357 }
2358 }
2359
2360 /*
2361 * Metadata in mixed block goup profiles are accounted in data
2362 */
2363 if (!mixed && found->flags & BTRFS_BLOCK_GROUP_METADATA) {
2364 if (found->flags & BTRFS_BLOCK_GROUP_DATA)
2365 mixed = 1;
2366 else
2367 total_free_meta += found->disk_total -
2368 found->disk_used;
2369 }
2370
2371 total_used += found->disk_used;
2372 }
2373
2374 buf->f_blocks = div_u64(btrfs_super_total_bytes(disk_super), factor);
2375 buf->f_blocks >>= bits;
2376 buf->f_bfree = buf->f_blocks - (div_u64(total_used, factor) >> bits);
2377
2378 /* Account global block reserve as used, it's in logical size already */
2379 spin_lock(&block_rsv->lock);
2380 /* Mixed block groups accounting is not byte-accurate, avoid overflow */
2381 if (buf->f_bfree >= block_rsv->size >> bits)
2382 buf->f_bfree -= block_rsv->size >> bits;
2383 else
2384 buf->f_bfree = 0;
2385 spin_unlock(&block_rsv->lock);
2386
2387 buf->f_bavail = div_u64(total_free_data, factor);
2388 ret = btrfs_calc_avail_data_space(fs_info, &total_free_data);
2389 if (ret)
2390 return ret;
2391 buf->f_bavail += div_u64(total_free_data, factor);
2392 buf->f_bavail = buf->f_bavail >> bits;
2393
2394 /*
2395 * We calculate the remaining metadata space minus global reserve. If
2396 * this is (supposedly) smaller than zero, there's no space. But this
2397 * does not hold in practice, the exhausted state happens where's still
2398 * some positive delta. So we apply some guesswork and compare the
2399 * delta to a 4M threshold. (Practically observed delta was ~2M.)
2400 *
2401 * We probably cannot calculate the exact threshold value because this
2402 * depends on the internal reservations requested by various
2403 * operations, so some operations that consume a few metadata will
2404 * succeed even if the Avail is zero. But this is better than the other
2405 * way around.
2406 */
2407 thresh = SZ_4M;
2408
2409 /*
2410 * We only want to claim there's no available space if we can no longer
2411 * allocate chunks for our metadata profile and our global reserve will
2412 * not fit in the free metadata space. If we aren't ->full then we
2413 * still can allocate chunks and thus are fine using the currently
2414 * calculated f_bavail.
2415 */
2416 if (!mixed && block_rsv->space_info->full &&
2417 total_free_meta - thresh < block_rsv->size)
2418 buf->f_bavail = 0;
2419
2420 buf->f_type = BTRFS_SUPER_MAGIC;
2421 buf->f_bsize = dentry->d_sb->s_blocksize;
2422 buf->f_namelen = BTRFS_NAME_LEN;
2423
2424 /* We treat it as constant endianness (it doesn't matter _which_)
2425 because we want the fsid to come out the same whether mounted
2426 on a big-endian or little-endian host */
2427 buf->f_fsid.val[0] = be32_to_cpu(fsid[0]) ^ be32_to_cpu(fsid[2]);
2428 buf->f_fsid.val[1] = be32_to_cpu(fsid[1]) ^ be32_to_cpu(fsid[3]);
2429 /* Mask in the root object ID too, to disambiguate subvols */
2430 buf->f_fsid.val[0] ^=
2431 BTRFS_I(d_inode(dentry))->root->root_key.objectid >> 32;
2432 buf->f_fsid.val[1] ^=
2433 BTRFS_I(d_inode(dentry))->root->root_key.objectid;
2434
2435 return 0;
2436 }
2437
btrfs_kill_super(struct super_block * sb)2438 static void btrfs_kill_super(struct super_block *sb)
2439 {
2440 struct btrfs_fs_info *fs_info = btrfs_sb(sb);
2441 kill_anon_super(sb);
2442 btrfs_free_fs_info(fs_info);
2443 }
2444
2445 static struct file_system_type btrfs_fs_type = {
2446 .owner = THIS_MODULE,
2447 .name = "btrfs",
2448 .mount = btrfs_mount,
2449 .kill_sb = btrfs_kill_super,
2450 .fs_flags = FS_REQUIRES_DEV | FS_BINARY_MOUNTDATA,
2451 };
2452
2453 static struct file_system_type btrfs_root_fs_type = {
2454 .owner = THIS_MODULE,
2455 .name = "btrfs",
2456 .mount = btrfs_mount_root,
2457 .kill_sb = btrfs_kill_super,
2458 .fs_flags = FS_REQUIRES_DEV | FS_BINARY_MOUNTDATA | FS_ALLOW_IDMAP,
2459 };
2460
2461 MODULE_ALIAS_FS("btrfs");
2462
btrfs_control_open(struct inode * inode,struct file * file)2463 static int btrfs_control_open(struct inode *inode, struct file *file)
2464 {
2465 /*
2466 * The control file's private_data is used to hold the
2467 * transaction when it is started and is used to keep
2468 * track of whether a transaction is already in progress.
2469 */
2470 file->private_data = NULL;
2471 return 0;
2472 }
2473
2474 /*
2475 * Used by /dev/btrfs-control for devices ioctls.
2476 */
btrfs_control_ioctl(struct file * file,unsigned int cmd,unsigned long arg)2477 static long btrfs_control_ioctl(struct file *file, unsigned int cmd,
2478 unsigned long arg)
2479 {
2480 struct btrfs_ioctl_vol_args *vol;
2481 struct btrfs_device *device = NULL;
2482 dev_t devt = 0;
2483 int ret = -ENOTTY;
2484
2485 if (!capable(CAP_SYS_ADMIN))
2486 return -EPERM;
2487
2488 vol = memdup_user((void __user *)arg, sizeof(*vol));
2489 if (IS_ERR(vol))
2490 return PTR_ERR(vol);
2491 vol->name[BTRFS_PATH_NAME_MAX] = '\0';
2492
2493 switch (cmd) {
2494 case BTRFS_IOC_SCAN_DEV:
2495 mutex_lock(&uuid_mutex);
2496 device = btrfs_scan_one_device(vol->name, FMODE_READ,
2497 &btrfs_root_fs_type);
2498 ret = PTR_ERR_OR_ZERO(device);
2499 mutex_unlock(&uuid_mutex);
2500 break;
2501 case BTRFS_IOC_FORGET_DEV:
2502 if (vol->name[0] != 0) {
2503 ret = lookup_bdev(vol->name, &devt);
2504 if (ret)
2505 break;
2506 }
2507 ret = btrfs_forget_devices(devt);
2508 break;
2509 case BTRFS_IOC_DEVICES_READY:
2510 mutex_lock(&uuid_mutex);
2511 device = btrfs_scan_one_device(vol->name, FMODE_READ,
2512 &btrfs_root_fs_type);
2513 if (IS_ERR(device)) {
2514 mutex_unlock(&uuid_mutex);
2515 ret = PTR_ERR(device);
2516 break;
2517 }
2518 ret = !(device->fs_devices->num_devices ==
2519 device->fs_devices->total_devices);
2520 mutex_unlock(&uuid_mutex);
2521 break;
2522 case BTRFS_IOC_GET_SUPPORTED_FEATURES:
2523 ret = btrfs_ioctl_get_supported_features((void __user*)arg);
2524 break;
2525 }
2526
2527 kfree(vol);
2528 return ret;
2529 }
2530
btrfs_freeze(struct super_block * sb)2531 static int btrfs_freeze(struct super_block *sb)
2532 {
2533 struct btrfs_trans_handle *trans;
2534 struct btrfs_fs_info *fs_info = btrfs_sb(sb);
2535 struct btrfs_root *root = fs_info->tree_root;
2536
2537 set_bit(BTRFS_FS_FROZEN, &fs_info->flags);
2538 /*
2539 * We don't need a barrier here, we'll wait for any transaction that
2540 * could be in progress on other threads (and do delayed iputs that
2541 * we want to avoid on a frozen filesystem), or do the commit
2542 * ourselves.
2543 */
2544 trans = btrfs_attach_transaction_barrier(root);
2545 if (IS_ERR(trans)) {
2546 /* no transaction, don't bother */
2547 if (PTR_ERR(trans) == -ENOENT)
2548 return 0;
2549 return PTR_ERR(trans);
2550 }
2551 return btrfs_commit_transaction(trans);
2552 }
2553
check_dev_super(struct btrfs_device * dev)2554 static int check_dev_super(struct btrfs_device *dev)
2555 {
2556 struct btrfs_fs_info *fs_info = dev->fs_info;
2557 struct btrfs_super_block *sb;
2558 u16 csum_type;
2559 int ret = 0;
2560
2561 /* This should be called with fs still frozen. */
2562 ASSERT(test_bit(BTRFS_FS_FROZEN, &fs_info->flags));
2563
2564 /* Missing dev, no need to check. */
2565 if (!dev->bdev)
2566 return 0;
2567
2568 /* Only need to check the primary super block. */
2569 sb = btrfs_read_dev_one_super(dev->bdev, 0, true);
2570 if (IS_ERR(sb))
2571 return PTR_ERR(sb);
2572
2573 /* Verify the checksum. */
2574 csum_type = btrfs_super_csum_type(sb);
2575 if (csum_type != btrfs_super_csum_type(fs_info->super_copy)) {
2576 btrfs_err(fs_info, "csum type changed, has %u expect %u",
2577 csum_type, btrfs_super_csum_type(fs_info->super_copy));
2578 ret = -EUCLEAN;
2579 goto out;
2580 }
2581
2582 if (btrfs_check_super_csum(fs_info, sb)) {
2583 btrfs_err(fs_info, "csum for on-disk super block no longer matches");
2584 ret = -EUCLEAN;
2585 goto out;
2586 }
2587
2588 /* Btrfs_validate_super() includes fsid check against super->fsid. */
2589 ret = btrfs_validate_super(fs_info, sb, 0);
2590 if (ret < 0)
2591 goto out;
2592
2593 if (btrfs_super_generation(sb) != fs_info->last_trans_committed) {
2594 btrfs_err(fs_info, "transid mismatch, has %llu expect %llu",
2595 btrfs_super_generation(sb),
2596 fs_info->last_trans_committed);
2597 ret = -EUCLEAN;
2598 goto out;
2599 }
2600 out:
2601 btrfs_release_disk_super(sb);
2602 return ret;
2603 }
2604
btrfs_unfreeze(struct super_block * sb)2605 static int btrfs_unfreeze(struct super_block *sb)
2606 {
2607 struct btrfs_fs_info *fs_info = btrfs_sb(sb);
2608 struct btrfs_device *device;
2609 int ret = 0;
2610
2611 /*
2612 * Make sure the fs is not changed by accident (like hibernation then
2613 * modified by other OS).
2614 * If we found anything wrong, we mark the fs error immediately.
2615 *
2616 * And since the fs is frozen, no one can modify the fs yet, thus
2617 * we don't need to hold device_list_mutex.
2618 */
2619 list_for_each_entry(device, &fs_info->fs_devices->devices, dev_list) {
2620 ret = check_dev_super(device);
2621 if (ret < 0) {
2622 btrfs_handle_fs_error(fs_info, ret,
2623 "super block on devid %llu got modified unexpectedly",
2624 device->devid);
2625 break;
2626 }
2627 }
2628 clear_bit(BTRFS_FS_FROZEN, &fs_info->flags);
2629
2630 /*
2631 * We still return 0, to allow VFS layer to unfreeze the fs even the
2632 * above checks failed. Since the fs is either fine or read-only, we're
2633 * safe to continue, without causing further damage.
2634 */
2635 return 0;
2636 }
2637
btrfs_show_devname(struct seq_file * m,struct dentry * root)2638 static int btrfs_show_devname(struct seq_file *m, struct dentry *root)
2639 {
2640 struct btrfs_fs_info *fs_info = btrfs_sb(root->d_sb);
2641
2642 /*
2643 * There should be always a valid pointer in latest_dev, it may be stale
2644 * for a short moment in case it's being deleted but still valid until
2645 * the end of RCU grace period.
2646 */
2647 rcu_read_lock();
2648 seq_escape(m, rcu_str_deref(fs_info->fs_devices->latest_dev->name), " \t\n\\");
2649 rcu_read_unlock();
2650
2651 return 0;
2652 }
2653
2654 static const struct super_operations btrfs_super_ops = {
2655 .drop_inode = btrfs_drop_inode,
2656 .evict_inode = btrfs_evict_inode,
2657 .put_super = btrfs_put_super,
2658 .sync_fs = btrfs_sync_fs,
2659 .show_options = btrfs_show_options,
2660 .show_devname = btrfs_show_devname,
2661 .alloc_inode = btrfs_alloc_inode,
2662 .destroy_inode = btrfs_destroy_inode,
2663 .free_inode = btrfs_free_inode,
2664 .statfs = btrfs_statfs,
2665 .remount_fs = btrfs_remount,
2666 .freeze_fs = btrfs_freeze,
2667 .unfreeze_fs = btrfs_unfreeze,
2668 };
2669
2670 static const struct file_operations btrfs_ctl_fops = {
2671 .open = btrfs_control_open,
2672 .unlocked_ioctl = btrfs_control_ioctl,
2673 .compat_ioctl = compat_ptr_ioctl,
2674 .owner = THIS_MODULE,
2675 .llseek = noop_llseek,
2676 };
2677
2678 static struct miscdevice btrfs_misc = {
2679 .minor = BTRFS_MINOR,
2680 .name = "btrfs-control",
2681 .fops = &btrfs_ctl_fops
2682 };
2683
2684 MODULE_ALIAS_MISCDEV(BTRFS_MINOR);
2685 MODULE_ALIAS("devname:btrfs-control");
2686
btrfs_interface_init(void)2687 static int __init btrfs_interface_init(void)
2688 {
2689 return misc_register(&btrfs_misc);
2690 }
2691
btrfs_interface_exit(void)2692 static __cold void btrfs_interface_exit(void)
2693 {
2694 misc_deregister(&btrfs_misc);
2695 }
2696
btrfs_print_mod_info(void)2697 static void __init btrfs_print_mod_info(void)
2698 {
2699 static const char options[] = ""
2700 #ifdef CONFIG_BTRFS_DEBUG
2701 ", debug=on"
2702 #endif
2703 #ifdef CONFIG_BTRFS_ASSERT
2704 ", assert=on"
2705 #endif
2706 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
2707 ", integrity-checker=on"
2708 #endif
2709 #ifdef CONFIG_BTRFS_FS_REF_VERIFY
2710 ", ref-verify=on"
2711 #endif
2712 #ifdef CONFIG_BLK_DEV_ZONED
2713 ", zoned=yes"
2714 #else
2715 ", zoned=no"
2716 #endif
2717 #ifdef CONFIG_FS_VERITY
2718 ", fsverity=yes"
2719 #else
2720 ", fsverity=no"
2721 #endif
2722 ;
2723 pr_info("Btrfs loaded, crc32c=%s%s\n", crc32c_impl(), options);
2724 }
2725
init_btrfs_fs(void)2726 static int __init init_btrfs_fs(void)
2727 {
2728 int err;
2729
2730 btrfs_props_init();
2731
2732 err = btrfs_init_sysfs();
2733 if (err)
2734 return err;
2735
2736 btrfs_init_compress();
2737
2738 err = btrfs_init_cachep();
2739 if (err)
2740 goto free_compress;
2741
2742 err = extent_state_init_cachep();
2743 if (err)
2744 goto free_cachep;
2745
2746 err = extent_buffer_init_cachep();
2747 if (err)
2748 goto free_extent_cachep;
2749
2750 err = btrfs_bioset_init();
2751 if (err)
2752 goto free_eb_cachep;
2753
2754 err = extent_map_init();
2755 if (err)
2756 goto free_bioset;
2757
2758 err = ordered_data_init();
2759 if (err)
2760 goto free_extent_map;
2761
2762 err = btrfs_delayed_inode_init();
2763 if (err)
2764 goto free_ordered_data;
2765
2766 err = btrfs_auto_defrag_init();
2767 if (err)
2768 goto free_delayed_inode;
2769
2770 err = btrfs_delayed_ref_init();
2771 if (err)
2772 goto free_auto_defrag;
2773
2774 err = btrfs_prelim_ref_init();
2775 if (err)
2776 goto free_delayed_ref;
2777
2778 err = btrfs_interface_init();
2779 if (err)
2780 goto free_prelim_ref;
2781
2782 btrfs_print_mod_info();
2783
2784 err = btrfs_run_sanity_tests();
2785 if (err)
2786 goto unregister_ioctl;
2787
2788 err = register_filesystem(&btrfs_fs_type);
2789 if (err)
2790 goto unregister_ioctl;
2791
2792 return 0;
2793
2794 unregister_ioctl:
2795 btrfs_interface_exit();
2796 free_prelim_ref:
2797 btrfs_prelim_ref_exit();
2798 free_delayed_ref:
2799 btrfs_delayed_ref_exit();
2800 free_auto_defrag:
2801 btrfs_auto_defrag_exit();
2802 free_delayed_inode:
2803 btrfs_delayed_inode_exit();
2804 free_ordered_data:
2805 ordered_data_exit();
2806 free_extent_map:
2807 extent_map_exit();
2808 free_bioset:
2809 btrfs_bioset_exit();
2810 free_eb_cachep:
2811 extent_buffer_free_cachep();
2812 free_extent_cachep:
2813 extent_state_free_cachep();
2814 free_cachep:
2815 btrfs_destroy_cachep();
2816 free_compress:
2817 btrfs_exit_compress();
2818 btrfs_exit_sysfs();
2819
2820 return err;
2821 }
2822
exit_btrfs_fs(void)2823 static void __exit exit_btrfs_fs(void)
2824 {
2825 btrfs_destroy_cachep();
2826 btrfs_delayed_ref_exit();
2827 btrfs_auto_defrag_exit();
2828 btrfs_delayed_inode_exit();
2829 btrfs_prelim_ref_exit();
2830 ordered_data_exit();
2831 extent_map_exit();
2832 btrfs_bioset_exit();
2833 extent_state_free_cachep();
2834 extent_buffer_free_cachep();
2835 btrfs_interface_exit();
2836 unregister_filesystem(&btrfs_fs_type);
2837 btrfs_exit_sysfs();
2838 btrfs_cleanup_fs_uuids();
2839 btrfs_exit_compress();
2840 }
2841
2842 late_initcall(init_btrfs_fs);
2843 module_exit(exit_btrfs_fs)
2844
2845 MODULE_LICENSE("GPL");
2846 MODULE_SOFTDEP("pre: crc32c");
2847 MODULE_SOFTDEP("pre: xxhash64");
2848 MODULE_SOFTDEP("pre: sha256");
2849 MODULE_SOFTDEP("pre: blake2b-256");
2850