1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2007 Oracle.  All rights reserved.
4  */
5 
6 #include <linux/sched.h>
7 #include <linux/sched/mm.h>
8 #include <linux/bio.h>
9 #include <linux/slab.h>
10 #include <linux/blkdev.h>
11 #include <linux/ratelimit.h>
12 #include <linux/kthread.h>
13 #include <linux/raid/pq.h>
14 #include <linux/semaphore.h>
15 #include <linux/uuid.h>
16 #include <linux/list_sort.h>
17 #include <linux/namei.h>
18 #include "misc.h"
19 #include "ctree.h"
20 #include "extent_map.h"
21 #include "disk-io.h"
22 #include "transaction.h"
23 #include "print-tree.h"
24 #include "volumes.h"
25 #include "raid56.h"
26 #include "async-thread.h"
27 #include "check-integrity.h"
28 #include "rcu-string.h"
29 #include "dev-replace.h"
30 #include "sysfs.h"
31 #include "tree-checker.h"
32 #include "space-info.h"
33 #include "block-group.h"
34 #include "discard.h"
35 #include "zoned.h"
36 
37 static struct bio_set btrfs_bioset;
38 
39 #define BTRFS_BLOCK_GROUP_STRIPE_MASK	(BTRFS_BLOCK_GROUP_RAID0 | \
40 					 BTRFS_BLOCK_GROUP_RAID10 | \
41 					 BTRFS_BLOCK_GROUP_RAID56_MASK)
42 
43 const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
44 	[BTRFS_RAID_RAID10] = {
45 		.sub_stripes	= 2,
46 		.dev_stripes	= 1,
47 		.devs_max	= 0,	/* 0 == as many as possible */
48 		.devs_min	= 2,
49 		.tolerated_failures = 1,
50 		.devs_increment	= 2,
51 		.ncopies	= 2,
52 		.nparity        = 0,
53 		.raid_name	= "raid10",
54 		.bg_flag	= BTRFS_BLOCK_GROUP_RAID10,
55 		.mindev_error	= BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET,
56 	},
57 	[BTRFS_RAID_RAID1] = {
58 		.sub_stripes	= 1,
59 		.dev_stripes	= 1,
60 		.devs_max	= 2,
61 		.devs_min	= 2,
62 		.tolerated_failures = 1,
63 		.devs_increment	= 2,
64 		.ncopies	= 2,
65 		.nparity        = 0,
66 		.raid_name	= "raid1",
67 		.bg_flag	= BTRFS_BLOCK_GROUP_RAID1,
68 		.mindev_error	= BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET,
69 	},
70 	[BTRFS_RAID_RAID1C3] = {
71 		.sub_stripes	= 1,
72 		.dev_stripes	= 1,
73 		.devs_max	= 3,
74 		.devs_min	= 3,
75 		.tolerated_failures = 2,
76 		.devs_increment	= 3,
77 		.ncopies	= 3,
78 		.nparity        = 0,
79 		.raid_name	= "raid1c3",
80 		.bg_flag	= BTRFS_BLOCK_GROUP_RAID1C3,
81 		.mindev_error	= BTRFS_ERROR_DEV_RAID1C3_MIN_NOT_MET,
82 	},
83 	[BTRFS_RAID_RAID1C4] = {
84 		.sub_stripes	= 1,
85 		.dev_stripes	= 1,
86 		.devs_max	= 4,
87 		.devs_min	= 4,
88 		.tolerated_failures = 3,
89 		.devs_increment	= 4,
90 		.ncopies	= 4,
91 		.nparity        = 0,
92 		.raid_name	= "raid1c4",
93 		.bg_flag	= BTRFS_BLOCK_GROUP_RAID1C4,
94 		.mindev_error	= BTRFS_ERROR_DEV_RAID1C4_MIN_NOT_MET,
95 	},
96 	[BTRFS_RAID_DUP] = {
97 		.sub_stripes	= 1,
98 		.dev_stripes	= 2,
99 		.devs_max	= 1,
100 		.devs_min	= 1,
101 		.tolerated_failures = 0,
102 		.devs_increment	= 1,
103 		.ncopies	= 2,
104 		.nparity        = 0,
105 		.raid_name	= "dup",
106 		.bg_flag	= BTRFS_BLOCK_GROUP_DUP,
107 		.mindev_error	= 0,
108 	},
109 	[BTRFS_RAID_RAID0] = {
110 		.sub_stripes	= 1,
111 		.dev_stripes	= 1,
112 		.devs_max	= 0,
113 		.devs_min	= 1,
114 		.tolerated_failures = 0,
115 		.devs_increment	= 1,
116 		.ncopies	= 1,
117 		.nparity        = 0,
118 		.raid_name	= "raid0",
119 		.bg_flag	= BTRFS_BLOCK_GROUP_RAID0,
120 		.mindev_error	= 0,
121 	},
122 	[BTRFS_RAID_SINGLE] = {
123 		.sub_stripes	= 1,
124 		.dev_stripes	= 1,
125 		.devs_max	= 1,
126 		.devs_min	= 1,
127 		.tolerated_failures = 0,
128 		.devs_increment	= 1,
129 		.ncopies	= 1,
130 		.nparity        = 0,
131 		.raid_name	= "single",
132 		.bg_flag	= 0,
133 		.mindev_error	= 0,
134 	},
135 	[BTRFS_RAID_RAID5] = {
136 		.sub_stripes	= 1,
137 		.dev_stripes	= 1,
138 		.devs_max	= 0,
139 		.devs_min	= 2,
140 		.tolerated_failures = 1,
141 		.devs_increment	= 1,
142 		.ncopies	= 1,
143 		.nparity        = 1,
144 		.raid_name	= "raid5",
145 		.bg_flag	= BTRFS_BLOCK_GROUP_RAID5,
146 		.mindev_error	= BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET,
147 	},
148 	[BTRFS_RAID_RAID6] = {
149 		.sub_stripes	= 1,
150 		.dev_stripes	= 1,
151 		.devs_max	= 0,
152 		.devs_min	= 3,
153 		.tolerated_failures = 2,
154 		.devs_increment	= 1,
155 		.ncopies	= 1,
156 		.nparity        = 2,
157 		.raid_name	= "raid6",
158 		.bg_flag	= BTRFS_BLOCK_GROUP_RAID6,
159 		.mindev_error	= BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET,
160 	},
161 };
162 
163 /*
164  * Convert block group flags (BTRFS_BLOCK_GROUP_*) to btrfs_raid_types, which
165  * can be used as index to access btrfs_raid_array[].
166  */
btrfs_bg_flags_to_raid_index(u64 flags)167 enum btrfs_raid_types __attribute_const__ btrfs_bg_flags_to_raid_index(u64 flags)
168 {
169 	const u64 profile = (flags & BTRFS_BLOCK_GROUP_PROFILE_MASK);
170 
171 	if (!profile)
172 		return BTRFS_RAID_SINGLE;
173 
174 	return BTRFS_BG_FLAG_TO_INDEX(profile);
175 }
176 
btrfs_bg_type_to_raid_name(u64 flags)177 const char *btrfs_bg_type_to_raid_name(u64 flags)
178 {
179 	const int index = btrfs_bg_flags_to_raid_index(flags);
180 
181 	if (index >= BTRFS_NR_RAID_TYPES)
182 		return NULL;
183 
184 	return btrfs_raid_array[index].raid_name;
185 }
186 
btrfs_nr_parity_stripes(u64 type)187 int btrfs_nr_parity_stripes(u64 type)
188 {
189 	enum btrfs_raid_types index = btrfs_bg_flags_to_raid_index(type);
190 
191 	return btrfs_raid_array[index].nparity;
192 }
193 
194 /*
195  * Fill @buf with textual description of @bg_flags, no more than @size_buf
196  * bytes including terminating null byte.
197  */
btrfs_describe_block_groups(u64 bg_flags,char * buf,u32 size_buf)198 void btrfs_describe_block_groups(u64 bg_flags, char *buf, u32 size_buf)
199 {
200 	int i;
201 	int ret;
202 	char *bp = buf;
203 	u64 flags = bg_flags;
204 	u32 size_bp = size_buf;
205 
206 	if (!flags) {
207 		strcpy(bp, "NONE");
208 		return;
209 	}
210 
211 #define DESCRIBE_FLAG(flag, desc)						\
212 	do {								\
213 		if (flags & (flag)) {					\
214 			ret = snprintf(bp, size_bp, "%s|", (desc));	\
215 			if (ret < 0 || ret >= size_bp)			\
216 				goto out_overflow;			\
217 			size_bp -= ret;					\
218 			bp += ret;					\
219 			flags &= ~(flag);				\
220 		}							\
221 	} while (0)
222 
223 	DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_DATA, "data");
224 	DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_SYSTEM, "system");
225 	DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_METADATA, "metadata");
226 
227 	DESCRIBE_FLAG(BTRFS_AVAIL_ALLOC_BIT_SINGLE, "single");
228 	for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
229 		DESCRIBE_FLAG(btrfs_raid_array[i].bg_flag,
230 			      btrfs_raid_array[i].raid_name);
231 #undef DESCRIBE_FLAG
232 
233 	if (flags) {
234 		ret = snprintf(bp, size_bp, "0x%llx|", flags);
235 		size_bp -= ret;
236 	}
237 
238 	if (size_bp < size_buf)
239 		buf[size_buf - size_bp - 1] = '\0'; /* remove last | */
240 
241 	/*
242 	 * The text is trimmed, it's up to the caller to provide sufficiently
243 	 * large buffer
244 	 */
245 out_overflow:;
246 }
247 
248 static int init_first_rw_device(struct btrfs_trans_handle *trans);
249 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info);
250 static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
251 static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
252 			     enum btrfs_map_op op, u64 logical, u64 *length,
253 			     struct btrfs_io_context **bioc_ret,
254 			     struct btrfs_io_stripe *smap,
255 			     int *mirror_num_ret, int need_raid_map);
256 
257 /*
258  * Device locking
259  * ==============
260  *
261  * There are several mutexes that protect manipulation of devices and low-level
262  * structures like chunks but not block groups, extents or files
263  *
264  * uuid_mutex (global lock)
265  * ------------------------
266  * protects the fs_uuids list that tracks all per-fs fs_devices, resulting from
267  * the SCAN_DEV ioctl registration or from mount either implicitly (the first
268  * device) or requested by the device= mount option
269  *
270  * the mutex can be very coarse and can cover long-running operations
271  *
272  * protects: updates to fs_devices counters like missing devices, rw devices,
273  * seeding, structure cloning, opening/closing devices at mount/umount time
274  *
275  * global::fs_devs - add, remove, updates to the global list
276  *
277  * does not protect: manipulation of the fs_devices::devices list in general
278  * but in mount context it could be used to exclude list modifications by eg.
279  * scan ioctl
280  *
281  * btrfs_device::name - renames (write side), read is RCU
282  *
283  * fs_devices::device_list_mutex (per-fs, with RCU)
284  * ------------------------------------------------
285  * protects updates to fs_devices::devices, ie. adding and deleting
286  *
287  * simple list traversal with read-only actions can be done with RCU protection
288  *
289  * may be used to exclude some operations from running concurrently without any
290  * modifications to the list (see write_all_supers)
291  *
292  * Is not required at mount and close times, because our device list is
293  * protected by the uuid_mutex at that point.
294  *
295  * balance_mutex
296  * -------------
297  * protects balance structures (status, state) and context accessed from
298  * several places (internally, ioctl)
299  *
300  * chunk_mutex
301  * -----------
302  * protects chunks, adding or removing during allocation, trim or when a new
303  * device is added/removed. Additionally it also protects post_commit_list of
304  * individual devices, since they can be added to the transaction's
305  * post_commit_list only with chunk_mutex held.
306  *
307  * cleaner_mutex
308  * -------------
309  * a big lock that is held by the cleaner thread and prevents running subvolume
310  * cleaning together with relocation or delayed iputs
311  *
312  *
313  * Lock nesting
314  * ============
315  *
316  * uuid_mutex
317  *   device_list_mutex
318  *     chunk_mutex
319  *   balance_mutex
320  *
321  *
322  * Exclusive operations
323  * ====================
324  *
325  * Maintains the exclusivity of the following operations that apply to the
326  * whole filesystem and cannot run in parallel.
327  *
328  * - Balance (*)
329  * - Device add
330  * - Device remove
331  * - Device replace (*)
332  * - Resize
333  *
334  * The device operations (as above) can be in one of the following states:
335  *
336  * - Running state
337  * - Paused state
338  * - Completed state
339  *
340  * Only device operations marked with (*) can go into the Paused state for the
341  * following reasons:
342  *
343  * - ioctl (only Balance can be Paused through ioctl)
344  * - filesystem remounted as read-only
345  * - filesystem unmounted and mounted as read-only
346  * - system power-cycle and filesystem mounted as read-only
347  * - filesystem or device errors leading to forced read-only
348  *
349  * The status of exclusive operation is set and cleared atomically.
350  * During the course of Paused state, fs_info::exclusive_operation remains set.
351  * A device operation in Paused or Running state can be canceled or resumed
352  * either by ioctl (Balance only) or when remounted as read-write.
353  * The exclusive status is cleared when the device operation is canceled or
354  * completed.
355  */
356 
357 DEFINE_MUTEX(uuid_mutex);
358 static LIST_HEAD(fs_uuids);
btrfs_get_fs_uuids(void)359 struct list_head * __attribute_const__ btrfs_get_fs_uuids(void)
360 {
361 	return &fs_uuids;
362 }
363 
364 /*
365  * alloc_fs_devices - allocate struct btrfs_fs_devices
366  * @fsid:		if not NULL, copy the UUID to fs_devices::fsid
367  * @metadata_fsid:	if not NULL, copy the UUID to fs_devices::metadata_fsid
368  *
369  * Return a pointer to a new struct btrfs_fs_devices on success, or ERR_PTR().
370  * The returned struct is not linked onto any lists and can be destroyed with
371  * kfree() right away.
372  */
alloc_fs_devices(const u8 * fsid,const u8 * metadata_fsid)373 static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid,
374 						 const u8 *metadata_fsid)
375 {
376 	struct btrfs_fs_devices *fs_devs;
377 
378 	fs_devs = kzalloc(sizeof(*fs_devs), GFP_KERNEL);
379 	if (!fs_devs)
380 		return ERR_PTR(-ENOMEM);
381 
382 	mutex_init(&fs_devs->device_list_mutex);
383 
384 	INIT_LIST_HEAD(&fs_devs->devices);
385 	INIT_LIST_HEAD(&fs_devs->alloc_list);
386 	INIT_LIST_HEAD(&fs_devs->fs_list);
387 	INIT_LIST_HEAD(&fs_devs->seed_list);
388 	if (fsid)
389 		memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE);
390 
391 	if (metadata_fsid)
392 		memcpy(fs_devs->metadata_uuid, metadata_fsid, BTRFS_FSID_SIZE);
393 	else if (fsid)
394 		memcpy(fs_devs->metadata_uuid, fsid, BTRFS_FSID_SIZE);
395 
396 	return fs_devs;
397 }
398 
btrfs_free_device(struct btrfs_device * device)399 void btrfs_free_device(struct btrfs_device *device)
400 {
401 	WARN_ON(!list_empty(&device->post_commit_list));
402 	rcu_string_free(device->name);
403 	extent_io_tree_release(&device->alloc_state);
404 	btrfs_destroy_dev_zone_info(device);
405 	kfree(device);
406 }
407 
free_fs_devices(struct btrfs_fs_devices * fs_devices)408 static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
409 {
410 	struct btrfs_device *device;
411 	WARN_ON(fs_devices->opened);
412 	while (!list_empty(&fs_devices->devices)) {
413 		device = list_entry(fs_devices->devices.next,
414 				    struct btrfs_device, dev_list);
415 		list_del(&device->dev_list);
416 		btrfs_free_device(device);
417 	}
418 	kfree(fs_devices);
419 }
420 
btrfs_cleanup_fs_uuids(void)421 void __exit btrfs_cleanup_fs_uuids(void)
422 {
423 	struct btrfs_fs_devices *fs_devices;
424 
425 	while (!list_empty(&fs_uuids)) {
426 		fs_devices = list_entry(fs_uuids.next,
427 					struct btrfs_fs_devices, fs_list);
428 		list_del(&fs_devices->fs_list);
429 		free_fs_devices(fs_devices);
430 	}
431 }
432 
find_fsid(const u8 * fsid,const u8 * metadata_fsid)433 static noinline struct btrfs_fs_devices *find_fsid(
434 		const u8 *fsid, const u8 *metadata_fsid)
435 {
436 	struct btrfs_fs_devices *fs_devices;
437 
438 	ASSERT(fsid);
439 
440 	/* Handle non-split brain cases */
441 	list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
442 		if (metadata_fsid) {
443 			if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0
444 			    && memcmp(metadata_fsid, fs_devices->metadata_uuid,
445 				      BTRFS_FSID_SIZE) == 0)
446 				return fs_devices;
447 		} else {
448 			if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
449 				return fs_devices;
450 		}
451 	}
452 	return NULL;
453 }
454 
find_fsid_with_metadata_uuid(struct btrfs_super_block * disk_super)455 static struct btrfs_fs_devices *find_fsid_with_metadata_uuid(
456 				struct btrfs_super_block *disk_super)
457 {
458 
459 	struct btrfs_fs_devices *fs_devices;
460 
461 	/*
462 	 * Handle scanned device having completed its fsid change but
463 	 * belonging to a fs_devices that was created by first scanning
464 	 * a device which didn't have its fsid/metadata_uuid changed
465 	 * at all and the CHANGING_FSID_V2 flag set.
466 	 */
467 	list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
468 		if (fs_devices->fsid_change &&
469 		    memcmp(disk_super->metadata_uuid, fs_devices->fsid,
470 			   BTRFS_FSID_SIZE) == 0 &&
471 		    memcmp(fs_devices->fsid, fs_devices->metadata_uuid,
472 			   BTRFS_FSID_SIZE) == 0) {
473 			return fs_devices;
474 		}
475 	}
476 	/*
477 	 * Handle scanned device having completed its fsid change but
478 	 * belonging to a fs_devices that was created by a device that
479 	 * has an outdated pair of fsid/metadata_uuid and
480 	 * CHANGING_FSID_V2 flag set.
481 	 */
482 	list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
483 		if (fs_devices->fsid_change &&
484 		    memcmp(fs_devices->metadata_uuid,
485 			   fs_devices->fsid, BTRFS_FSID_SIZE) != 0 &&
486 		    memcmp(disk_super->metadata_uuid, fs_devices->metadata_uuid,
487 			   BTRFS_FSID_SIZE) == 0) {
488 			return fs_devices;
489 		}
490 	}
491 
492 	return find_fsid(disk_super->fsid, disk_super->metadata_uuid);
493 }
494 
495 
496 static int
btrfs_get_bdev_and_sb(const char * device_path,fmode_t flags,void * holder,int flush,struct block_device ** bdev,struct btrfs_super_block ** disk_super)497 btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder,
498 		      int flush, struct block_device **bdev,
499 		      struct btrfs_super_block **disk_super)
500 {
501 	int ret;
502 
503 	*bdev = blkdev_get_by_path(device_path, flags, holder);
504 
505 	if (IS_ERR(*bdev)) {
506 		ret = PTR_ERR(*bdev);
507 		goto error;
508 	}
509 
510 	if (flush)
511 		sync_blockdev(*bdev);
512 	ret = set_blocksize(*bdev, BTRFS_BDEV_BLOCKSIZE);
513 	if (ret) {
514 		blkdev_put(*bdev, flags);
515 		goto error;
516 	}
517 	invalidate_bdev(*bdev);
518 	*disk_super = btrfs_read_dev_super(*bdev);
519 	if (IS_ERR(*disk_super)) {
520 		ret = PTR_ERR(*disk_super);
521 		blkdev_put(*bdev, flags);
522 		goto error;
523 	}
524 
525 	return 0;
526 
527 error:
528 	*bdev = NULL;
529 	return ret;
530 }
531 
532 /**
533  *  Search and remove all stale devices (which are not mounted).
534  *  When both inputs are NULL, it will search and release all stale devices.
535  *
536  *  @devt:	Optional. When provided will it release all unmounted devices
537  *		matching this devt only.
538  *  @skip_device:  Optional. Will skip this device when searching for the stale
539  *		devices.
540  *
541  *  Return:	0 for success or if @devt is 0.
542  *		-EBUSY if @devt is a mounted device.
543  *		-ENOENT if @devt does not match any device in the list.
544  */
btrfs_free_stale_devices(dev_t devt,struct btrfs_device * skip_device)545 static int btrfs_free_stale_devices(dev_t devt, struct btrfs_device *skip_device)
546 {
547 	struct btrfs_fs_devices *fs_devices, *tmp_fs_devices;
548 	struct btrfs_device *device, *tmp_device;
549 	int ret = 0;
550 
551 	lockdep_assert_held(&uuid_mutex);
552 
553 	if (devt)
554 		ret = -ENOENT;
555 
556 	list_for_each_entry_safe(fs_devices, tmp_fs_devices, &fs_uuids, fs_list) {
557 
558 		mutex_lock(&fs_devices->device_list_mutex);
559 		list_for_each_entry_safe(device, tmp_device,
560 					 &fs_devices->devices, dev_list) {
561 			if (skip_device && skip_device == device)
562 				continue;
563 			if (devt && devt != device->devt)
564 				continue;
565 			if (fs_devices->opened) {
566 				/* for an already deleted device return 0 */
567 				if (devt && ret != 0)
568 					ret = -EBUSY;
569 				break;
570 			}
571 
572 			/* delete the stale device */
573 			fs_devices->num_devices--;
574 			list_del(&device->dev_list);
575 			btrfs_free_device(device);
576 
577 			ret = 0;
578 		}
579 		mutex_unlock(&fs_devices->device_list_mutex);
580 
581 		if (fs_devices->num_devices == 0) {
582 			btrfs_sysfs_remove_fsid(fs_devices);
583 			list_del(&fs_devices->fs_list);
584 			free_fs_devices(fs_devices);
585 		}
586 	}
587 
588 	return ret;
589 }
590 
591 /*
592  * This is only used on mount, and we are protected from competing things
593  * messing with our fs_devices by the uuid_mutex, thus we do not need the
594  * fs_devices->device_list_mutex here.
595  */
btrfs_open_one_device(struct btrfs_fs_devices * fs_devices,struct btrfs_device * device,fmode_t flags,void * holder)596 static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices,
597 			struct btrfs_device *device, fmode_t flags,
598 			void *holder)
599 {
600 	struct block_device *bdev;
601 	struct btrfs_super_block *disk_super;
602 	u64 devid;
603 	int ret;
604 
605 	if (device->bdev)
606 		return -EINVAL;
607 	if (!device->name)
608 		return -EINVAL;
609 
610 	ret = btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1,
611 				    &bdev, &disk_super);
612 	if (ret)
613 		return ret;
614 
615 	devid = btrfs_stack_device_id(&disk_super->dev_item);
616 	if (devid != device->devid)
617 		goto error_free_page;
618 
619 	if (memcmp(device->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE))
620 		goto error_free_page;
621 
622 	device->generation = btrfs_super_generation(disk_super);
623 
624 	if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
625 		if (btrfs_super_incompat_flags(disk_super) &
626 		    BTRFS_FEATURE_INCOMPAT_METADATA_UUID) {
627 			pr_err(
628 		"BTRFS: Invalid seeding and uuid-changed device detected\n");
629 			goto error_free_page;
630 		}
631 
632 		clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
633 		fs_devices->seeding = true;
634 	} else {
635 		if (bdev_read_only(bdev))
636 			clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
637 		else
638 			set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
639 	}
640 
641 	if (!bdev_nonrot(bdev))
642 		fs_devices->rotating = true;
643 
644 	device->bdev = bdev;
645 	clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
646 	device->mode = flags;
647 
648 	fs_devices->open_devices++;
649 	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
650 	    device->devid != BTRFS_DEV_REPLACE_DEVID) {
651 		fs_devices->rw_devices++;
652 		list_add_tail(&device->dev_alloc_list, &fs_devices->alloc_list);
653 	}
654 	btrfs_release_disk_super(disk_super);
655 
656 	return 0;
657 
658 error_free_page:
659 	btrfs_release_disk_super(disk_super);
660 	blkdev_put(bdev, flags);
661 
662 	return -EINVAL;
663 }
664 
665 /*
666  * Handle scanned device having its CHANGING_FSID_V2 flag set and the fs_devices
667  * being created with a disk that has already completed its fsid change. Such
668  * disk can belong to an fs which has its FSID changed or to one which doesn't.
669  * Handle both cases here.
670  */
find_fsid_inprogress(struct btrfs_super_block * disk_super)671 static struct btrfs_fs_devices *find_fsid_inprogress(
672 					struct btrfs_super_block *disk_super)
673 {
674 	struct btrfs_fs_devices *fs_devices;
675 
676 	list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
677 		if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid,
678 			   BTRFS_FSID_SIZE) != 0 &&
679 		    memcmp(fs_devices->metadata_uuid, disk_super->fsid,
680 			   BTRFS_FSID_SIZE) == 0 && !fs_devices->fsid_change) {
681 			return fs_devices;
682 		}
683 	}
684 
685 	return find_fsid(disk_super->fsid, NULL);
686 }
687 
688 
find_fsid_changed(struct btrfs_super_block * disk_super)689 static struct btrfs_fs_devices *find_fsid_changed(
690 					struct btrfs_super_block *disk_super)
691 {
692 	struct btrfs_fs_devices *fs_devices;
693 
694 	/*
695 	 * Handles the case where scanned device is part of an fs that had
696 	 * multiple successful changes of FSID but currently device didn't
697 	 * observe it. Meaning our fsid will be different than theirs. We need
698 	 * to handle two subcases :
699 	 *  1 - The fs still continues to have different METADATA/FSID uuids.
700 	 *  2 - The fs is switched back to its original FSID (METADATA/FSID
701 	 *  are equal).
702 	 */
703 	list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
704 		/* Changed UUIDs */
705 		if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid,
706 			   BTRFS_FSID_SIZE) != 0 &&
707 		    memcmp(fs_devices->metadata_uuid, disk_super->metadata_uuid,
708 			   BTRFS_FSID_SIZE) == 0 &&
709 		    memcmp(fs_devices->fsid, disk_super->fsid,
710 			   BTRFS_FSID_SIZE) != 0)
711 			return fs_devices;
712 
713 		/* Unchanged UUIDs */
714 		if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid,
715 			   BTRFS_FSID_SIZE) == 0 &&
716 		    memcmp(fs_devices->fsid, disk_super->metadata_uuid,
717 			   BTRFS_FSID_SIZE) == 0)
718 			return fs_devices;
719 	}
720 
721 	return NULL;
722 }
723 
find_fsid_reverted_metadata(struct btrfs_super_block * disk_super)724 static struct btrfs_fs_devices *find_fsid_reverted_metadata(
725 				struct btrfs_super_block *disk_super)
726 {
727 	struct btrfs_fs_devices *fs_devices;
728 
729 	/*
730 	 * Handle the case where the scanned device is part of an fs whose last
731 	 * metadata UUID change reverted it to the original FSID. At the same
732 	 * time * fs_devices was first created by another constitutent device
733 	 * which didn't fully observe the operation. This results in an
734 	 * btrfs_fs_devices created with metadata/fsid different AND
735 	 * btrfs_fs_devices::fsid_change set AND the metadata_uuid of the
736 	 * fs_devices equal to the FSID of the disk.
737 	 */
738 	list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
739 		if (memcmp(fs_devices->fsid, fs_devices->metadata_uuid,
740 			   BTRFS_FSID_SIZE) != 0 &&
741 		    memcmp(fs_devices->metadata_uuid, disk_super->fsid,
742 			   BTRFS_FSID_SIZE) == 0 &&
743 		    fs_devices->fsid_change)
744 			return fs_devices;
745 	}
746 
747 	return NULL;
748 }
749 /*
750  * Add new device to list of registered devices
751  *
752  * Returns:
753  * device pointer which was just added or updated when successful
754  * error pointer when failed
755  */
device_list_add(const char * path,struct btrfs_super_block * disk_super,bool * new_device_added)756 static noinline struct btrfs_device *device_list_add(const char *path,
757 			   struct btrfs_super_block *disk_super,
758 			   bool *new_device_added)
759 {
760 	struct btrfs_device *device;
761 	struct btrfs_fs_devices *fs_devices = NULL;
762 	struct rcu_string *name;
763 	u64 found_transid = btrfs_super_generation(disk_super);
764 	u64 devid = btrfs_stack_device_id(&disk_super->dev_item);
765 	dev_t path_devt;
766 	int error;
767 	bool has_metadata_uuid = (btrfs_super_incompat_flags(disk_super) &
768 		BTRFS_FEATURE_INCOMPAT_METADATA_UUID);
769 	bool fsid_change_in_progress = (btrfs_super_flags(disk_super) &
770 					BTRFS_SUPER_FLAG_CHANGING_FSID_V2);
771 
772 	error = lookup_bdev(path, &path_devt);
773 	if (error) {
774 		btrfs_err(NULL, "failed to lookup block device for path %s: %d",
775 			  path, error);
776 		return ERR_PTR(error);
777 	}
778 
779 	if (fsid_change_in_progress) {
780 		if (!has_metadata_uuid)
781 			fs_devices = find_fsid_inprogress(disk_super);
782 		else
783 			fs_devices = find_fsid_changed(disk_super);
784 	} else if (has_metadata_uuid) {
785 		fs_devices = find_fsid_with_metadata_uuid(disk_super);
786 	} else {
787 		fs_devices = find_fsid_reverted_metadata(disk_super);
788 		if (!fs_devices)
789 			fs_devices = find_fsid(disk_super->fsid, NULL);
790 	}
791 
792 
793 	if (!fs_devices) {
794 		if (has_metadata_uuid)
795 			fs_devices = alloc_fs_devices(disk_super->fsid,
796 						      disk_super->metadata_uuid);
797 		else
798 			fs_devices = alloc_fs_devices(disk_super->fsid, NULL);
799 
800 		if (IS_ERR(fs_devices))
801 			return ERR_CAST(fs_devices);
802 
803 		fs_devices->fsid_change = fsid_change_in_progress;
804 
805 		mutex_lock(&fs_devices->device_list_mutex);
806 		list_add(&fs_devices->fs_list, &fs_uuids);
807 
808 		device = NULL;
809 	} else {
810 		struct btrfs_dev_lookup_args args = {
811 			.devid = devid,
812 			.uuid = disk_super->dev_item.uuid,
813 		};
814 
815 		mutex_lock(&fs_devices->device_list_mutex);
816 		device = btrfs_find_device(fs_devices, &args);
817 
818 		/*
819 		 * If this disk has been pulled into an fs devices created by
820 		 * a device which had the CHANGING_FSID_V2 flag then replace the
821 		 * metadata_uuid/fsid values of the fs_devices.
822 		 */
823 		if (fs_devices->fsid_change &&
824 		    found_transid > fs_devices->latest_generation) {
825 			memcpy(fs_devices->fsid, disk_super->fsid,
826 					BTRFS_FSID_SIZE);
827 
828 			if (has_metadata_uuid)
829 				memcpy(fs_devices->metadata_uuid,
830 				       disk_super->metadata_uuid,
831 				       BTRFS_FSID_SIZE);
832 			else
833 				memcpy(fs_devices->metadata_uuid,
834 				       disk_super->fsid, BTRFS_FSID_SIZE);
835 
836 			fs_devices->fsid_change = false;
837 		}
838 	}
839 
840 	if (!device) {
841 		if (fs_devices->opened) {
842 			btrfs_err(NULL,
843 		"device %s belongs to fsid %pU, and the fs is already mounted",
844 				  path, fs_devices->fsid);
845 			mutex_unlock(&fs_devices->device_list_mutex);
846 			return ERR_PTR(-EBUSY);
847 		}
848 
849 		device = btrfs_alloc_device(NULL, &devid,
850 					    disk_super->dev_item.uuid);
851 		if (IS_ERR(device)) {
852 			mutex_unlock(&fs_devices->device_list_mutex);
853 			/* we can safely leave the fs_devices entry around */
854 			return device;
855 		}
856 
857 		name = rcu_string_strdup(path, GFP_NOFS);
858 		if (!name) {
859 			btrfs_free_device(device);
860 			mutex_unlock(&fs_devices->device_list_mutex);
861 			return ERR_PTR(-ENOMEM);
862 		}
863 		rcu_assign_pointer(device->name, name);
864 		device->devt = path_devt;
865 
866 		list_add_rcu(&device->dev_list, &fs_devices->devices);
867 		fs_devices->num_devices++;
868 
869 		device->fs_devices = fs_devices;
870 		*new_device_added = true;
871 
872 		if (disk_super->label[0])
873 			pr_info(
874 	"BTRFS: device label %s devid %llu transid %llu %s scanned by %s (%d)\n",
875 				disk_super->label, devid, found_transid, path,
876 				current->comm, task_pid_nr(current));
877 		else
878 			pr_info(
879 	"BTRFS: device fsid %pU devid %llu transid %llu %s scanned by %s (%d)\n",
880 				disk_super->fsid, devid, found_transid, path,
881 				current->comm, task_pid_nr(current));
882 
883 	} else if (!device->name || strcmp(device->name->str, path)) {
884 		/*
885 		 * When FS is already mounted.
886 		 * 1. If you are here and if the device->name is NULL that
887 		 *    means this device was missing at time of FS mount.
888 		 * 2. If you are here and if the device->name is different
889 		 *    from 'path' that means either
890 		 *      a. The same device disappeared and reappeared with
891 		 *         different name. or
892 		 *      b. The missing-disk-which-was-replaced, has
893 		 *         reappeared now.
894 		 *
895 		 * We must allow 1 and 2a above. But 2b would be a spurious
896 		 * and unintentional.
897 		 *
898 		 * Further in case of 1 and 2a above, the disk at 'path'
899 		 * would have missed some transaction when it was away and
900 		 * in case of 2a the stale bdev has to be updated as well.
901 		 * 2b must not be allowed at all time.
902 		 */
903 
904 		/*
905 		 * For now, we do allow update to btrfs_fs_device through the
906 		 * btrfs dev scan cli after FS has been mounted.  We're still
907 		 * tracking a problem where systems fail mount by subvolume id
908 		 * when we reject replacement on a mounted FS.
909 		 */
910 		if (!fs_devices->opened && found_transid < device->generation) {
911 			/*
912 			 * That is if the FS is _not_ mounted and if you
913 			 * are here, that means there is more than one
914 			 * disk with same uuid and devid.We keep the one
915 			 * with larger generation number or the last-in if
916 			 * generation are equal.
917 			 */
918 			mutex_unlock(&fs_devices->device_list_mutex);
919 			btrfs_err(NULL,
920 "device %s already registered with a higher generation, found %llu expect %llu",
921 				  path, found_transid, device->generation);
922 			return ERR_PTR(-EEXIST);
923 		}
924 
925 		/*
926 		 * We are going to replace the device path for a given devid,
927 		 * make sure it's the same device if the device is mounted
928 		 *
929 		 * NOTE: the device->fs_info may not be reliable here so pass
930 		 * in a NULL to message helpers instead. This avoids a possible
931 		 * use-after-free when the fs_info and fs_info->sb are already
932 		 * torn down.
933 		 */
934 		if (device->bdev) {
935 			if (device->devt != path_devt) {
936 				mutex_unlock(&fs_devices->device_list_mutex);
937 				btrfs_warn_in_rcu(NULL,
938 	"duplicate device %s devid %llu generation %llu scanned by %s (%d)",
939 						  path, devid, found_transid,
940 						  current->comm,
941 						  task_pid_nr(current));
942 				return ERR_PTR(-EEXIST);
943 			}
944 			btrfs_info_in_rcu(NULL,
945 	"devid %llu device path %s changed to %s scanned by %s (%d)",
946 					  devid, rcu_str_deref(device->name),
947 					  path, current->comm,
948 					  task_pid_nr(current));
949 		}
950 
951 		name = rcu_string_strdup(path, GFP_NOFS);
952 		if (!name) {
953 			mutex_unlock(&fs_devices->device_list_mutex);
954 			return ERR_PTR(-ENOMEM);
955 		}
956 		rcu_string_free(device->name);
957 		rcu_assign_pointer(device->name, name);
958 		if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
959 			fs_devices->missing_devices--;
960 			clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
961 		}
962 		device->devt = path_devt;
963 	}
964 
965 	/*
966 	 * Unmount does not free the btrfs_device struct but would zero
967 	 * generation along with most of the other members. So just update
968 	 * it back. We need it to pick the disk with largest generation
969 	 * (as above).
970 	 */
971 	if (!fs_devices->opened) {
972 		device->generation = found_transid;
973 		fs_devices->latest_generation = max_t(u64, found_transid,
974 						fs_devices->latest_generation);
975 	}
976 
977 	fs_devices->total_devices = btrfs_super_num_devices(disk_super);
978 
979 	mutex_unlock(&fs_devices->device_list_mutex);
980 	return device;
981 }
982 
clone_fs_devices(struct btrfs_fs_devices * orig)983 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
984 {
985 	struct btrfs_fs_devices *fs_devices;
986 	struct btrfs_device *device;
987 	struct btrfs_device *orig_dev;
988 	int ret = 0;
989 
990 	lockdep_assert_held(&uuid_mutex);
991 
992 	fs_devices = alloc_fs_devices(orig->fsid, NULL);
993 	if (IS_ERR(fs_devices))
994 		return fs_devices;
995 
996 	fs_devices->total_devices = orig->total_devices;
997 
998 	list_for_each_entry(orig_dev, &orig->devices, dev_list) {
999 		struct rcu_string *name;
1000 
1001 		device = btrfs_alloc_device(NULL, &orig_dev->devid,
1002 					    orig_dev->uuid);
1003 		if (IS_ERR(device)) {
1004 			ret = PTR_ERR(device);
1005 			goto error;
1006 		}
1007 
1008 		/*
1009 		 * This is ok to do without rcu read locked because we hold the
1010 		 * uuid mutex so nothing we touch in here is going to disappear.
1011 		 */
1012 		if (orig_dev->name) {
1013 			name = rcu_string_strdup(orig_dev->name->str,
1014 					GFP_KERNEL);
1015 			if (!name) {
1016 				btrfs_free_device(device);
1017 				ret = -ENOMEM;
1018 				goto error;
1019 			}
1020 			rcu_assign_pointer(device->name, name);
1021 		}
1022 
1023 		if (orig_dev->zone_info) {
1024 			struct btrfs_zoned_device_info *zone_info;
1025 
1026 			zone_info = btrfs_clone_dev_zone_info(orig_dev);
1027 			if (!zone_info) {
1028 				btrfs_free_device(device);
1029 				ret = -ENOMEM;
1030 				goto error;
1031 			}
1032 			device->zone_info = zone_info;
1033 		}
1034 
1035 		list_add(&device->dev_list, &fs_devices->devices);
1036 		device->fs_devices = fs_devices;
1037 		fs_devices->num_devices++;
1038 	}
1039 	return fs_devices;
1040 error:
1041 	free_fs_devices(fs_devices);
1042 	return ERR_PTR(ret);
1043 }
1044 
__btrfs_free_extra_devids(struct btrfs_fs_devices * fs_devices,struct btrfs_device ** latest_dev)1045 static void __btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices,
1046 				      struct btrfs_device **latest_dev)
1047 {
1048 	struct btrfs_device *device, *next;
1049 
1050 	/* This is the initialized path, it is safe to release the devices. */
1051 	list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
1052 		if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state)) {
1053 			if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT,
1054 				      &device->dev_state) &&
1055 			    !test_bit(BTRFS_DEV_STATE_MISSING,
1056 				      &device->dev_state) &&
1057 			    (!*latest_dev ||
1058 			     device->generation > (*latest_dev)->generation)) {
1059 				*latest_dev = device;
1060 			}
1061 			continue;
1062 		}
1063 
1064 		/*
1065 		 * We have already validated the presence of BTRFS_DEV_REPLACE_DEVID,
1066 		 * in btrfs_init_dev_replace() so just continue.
1067 		 */
1068 		if (device->devid == BTRFS_DEV_REPLACE_DEVID)
1069 			continue;
1070 
1071 		if (device->bdev) {
1072 			blkdev_put(device->bdev, device->mode);
1073 			device->bdev = NULL;
1074 			fs_devices->open_devices--;
1075 		}
1076 		if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
1077 			list_del_init(&device->dev_alloc_list);
1078 			clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
1079 			fs_devices->rw_devices--;
1080 		}
1081 		list_del_init(&device->dev_list);
1082 		fs_devices->num_devices--;
1083 		btrfs_free_device(device);
1084 	}
1085 
1086 }
1087 
1088 /*
1089  * After we have read the system tree and know devids belonging to this
1090  * filesystem, remove the device which does not belong there.
1091  */
btrfs_free_extra_devids(struct btrfs_fs_devices * fs_devices)1092 void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices)
1093 {
1094 	struct btrfs_device *latest_dev = NULL;
1095 	struct btrfs_fs_devices *seed_dev;
1096 
1097 	mutex_lock(&uuid_mutex);
1098 	__btrfs_free_extra_devids(fs_devices, &latest_dev);
1099 
1100 	list_for_each_entry(seed_dev, &fs_devices->seed_list, seed_list)
1101 		__btrfs_free_extra_devids(seed_dev, &latest_dev);
1102 
1103 	fs_devices->latest_dev = latest_dev;
1104 
1105 	mutex_unlock(&uuid_mutex);
1106 }
1107 
btrfs_close_bdev(struct btrfs_device * device)1108 static void btrfs_close_bdev(struct btrfs_device *device)
1109 {
1110 	if (!device->bdev)
1111 		return;
1112 
1113 	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
1114 		sync_blockdev(device->bdev);
1115 		invalidate_bdev(device->bdev);
1116 	}
1117 
1118 	blkdev_put(device->bdev, device->mode);
1119 }
1120 
btrfs_close_one_device(struct btrfs_device * device)1121 static void btrfs_close_one_device(struct btrfs_device *device)
1122 {
1123 	struct btrfs_fs_devices *fs_devices = device->fs_devices;
1124 
1125 	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
1126 	    device->devid != BTRFS_DEV_REPLACE_DEVID) {
1127 		list_del_init(&device->dev_alloc_list);
1128 		fs_devices->rw_devices--;
1129 	}
1130 
1131 	if (device->devid == BTRFS_DEV_REPLACE_DEVID)
1132 		clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
1133 
1134 	if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
1135 		clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
1136 		fs_devices->missing_devices--;
1137 	}
1138 
1139 	btrfs_close_bdev(device);
1140 	if (device->bdev) {
1141 		fs_devices->open_devices--;
1142 		device->bdev = NULL;
1143 	}
1144 	clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
1145 	btrfs_destroy_dev_zone_info(device);
1146 
1147 	device->fs_info = NULL;
1148 	atomic_set(&device->dev_stats_ccnt, 0);
1149 	extent_io_tree_release(&device->alloc_state);
1150 
1151 	/*
1152 	 * Reset the flush error record. We might have a transient flush error
1153 	 * in this mount, and if so we aborted the current transaction and set
1154 	 * the fs to an error state, guaranteeing no super blocks can be further
1155 	 * committed. However that error might be transient and if we unmount the
1156 	 * filesystem and mount it again, we should allow the mount to succeed
1157 	 * (btrfs_check_rw_degradable() should not fail) - if after mounting the
1158 	 * filesystem again we still get flush errors, then we will again abort
1159 	 * any transaction and set the error state, guaranteeing no commits of
1160 	 * unsafe super blocks.
1161 	 */
1162 	device->last_flush_error = 0;
1163 
1164 	/* Verify the device is back in a pristine state  */
1165 	ASSERT(!test_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state));
1166 	ASSERT(!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state));
1167 	ASSERT(list_empty(&device->dev_alloc_list));
1168 	ASSERT(list_empty(&device->post_commit_list));
1169 }
1170 
close_fs_devices(struct btrfs_fs_devices * fs_devices)1171 static void close_fs_devices(struct btrfs_fs_devices *fs_devices)
1172 {
1173 	struct btrfs_device *device, *tmp;
1174 
1175 	lockdep_assert_held(&uuid_mutex);
1176 
1177 	if (--fs_devices->opened > 0)
1178 		return;
1179 
1180 	list_for_each_entry_safe(device, tmp, &fs_devices->devices, dev_list)
1181 		btrfs_close_one_device(device);
1182 
1183 	WARN_ON(fs_devices->open_devices);
1184 	WARN_ON(fs_devices->rw_devices);
1185 	fs_devices->opened = 0;
1186 	fs_devices->seeding = false;
1187 	fs_devices->fs_info = NULL;
1188 }
1189 
btrfs_close_devices(struct btrfs_fs_devices * fs_devices)1190 void btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
1191 {
1192 	LIST_HEAD(list);
1193 	struct btrfs_fs_devices *tmp;
1194 
1195 	mutex_lock(&uuid_mutex);
1196 	close_fs_devices(fs_devices);
1197 	if (!fs_devices->opened)
1198 		list_splice_init(&fs_devices->seed_list, &list);
1199 
1200 	list_for_each_entry_safe(fs_devices, tmp, &list, seed_list) {
1201 		close_fs_devices(fs_devices);
1202 		list_del(&fs_devices->seed_list);
1203 		free_fs_devices(fs_devices);
1204 	}
1205 	mutex_unlock(&uuid_mutex);
1206 }
1207 
open_fs_devices(struct btrfs_fs_devices * fs_devices,fmode_t flags,void * holder)1208 static int open_fs_devices(struct btrfs_fs_devices *fs_devices,
1209 				fmode_t flags, void *holder)
1210 {
1211 	struct btrfs_device *device;
1212 	struct btrfs_device *latest_dev = NULL;
1213 	struct btrfs_device *tmp_device;
1214 
1215 	flags |= FMODE_EXCL;
1216 
1217 	list_for_each_entry_safe(device, tmp_device, &fs_devices->devices,
1218 				 dev_list) {
1219 		int ret;
1220 
1221 		ret = btrfs_open_one_device(fs_devices, device, flags, holder);
1222 		if (ret == 0 &&
1223 		    (!latest_dev || device->generation > latest_dev->generation)) {
1224 			latest_dev = device;
1225 		} else if (ret == -ENODATA) {
1226 			fs_devices->num_devices--;
1227 			list_del(&device->dev_list);
1228 			btrfs_free_device(device);
1229 		}
1230 	}
1231 	if (fs_devices->open_devices == 0)
1232 		return -EINVAL;
1233 
1234 	fs_devices->opened = 1;
1235 	fs_devices->latest_dev = latest_dev;
1236 	fs_devices->total_rw_bytes = 0;
1237 	fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_REGULAR;
1238 	fs_devices->read_policy = BTRFS_READ_POLICY_PID;
1239 
1240 	return 0;
1241 }
1242 
devid_cmp(void * priv,const struct list_head * a,const struct list_head * b)1243 static int devid_cmp(void *priv, const struct list_head *a,
1244 		     const struct list_head *b)
1245 {
1246 	const struct btrfs_device *dev1, *dev2;
1247 
1248 	dev1 = list_entry(a, struct btrfs_device, dev_list);
1249 	dev2 = list_entry(b, struct btrfs_device, dev_list);
1250 
1251 	if (dev1->devid < dev2->devid)
1252 		return -1;
1253 	else if (dev1->devid > dev2->devid)
1254 		return 1;
1255 	return 0;
1256 }
1257 
btrfs_open_devices(struct btrfs_fs_devices * fs_devices,fmode_t flags,void * holder)1258 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
1259 		       fmode_t flags, void *holder)
1260 {
1261 	int ret;
1262 
1263 	lockdep_assert_held(&uuid_mutex);
1264 	/*
1265 	 * The device_list_mutex cannot be taken here in case opening the
1266 	 * underlying device takes further locks like open_mutex.
1267 	 *
1268 	 * We also don't need the lock here as this is called during mount and
1269 	 * exclusion is provided by uuid_mutex
1270 	 */
1271 
1272 	if (fs_devices->opened) {
1273 		fs_devices->opened++;
1274 		ret = 0;
1275 	} else {
1276 		list_sort(NULL, &fs_devices->devices, devid_cmp);
1277 		ret = open_fs_devices(fs_devices, flags, holder);
1278 	}
1279 
1280 	return ret;
1281 }
1282 
btrfs_release_disk_super(struct btrfs_super_block * super)1283 void btrfs_release_disk_super(struct btrfs_super_block *super)
1284 {
1285 	struct page *page = virt_to_page(super);
1286 
1287 	put_page(page);
1288 }
1289 
btrfs_read_disk_super(struct block_device * bdev,u64 bytenr,u64 bytenr_orig)1290 static struct btrfs_super_block *btrfs_read_disk_super(struct block_device *bdev,
1291 						       u64 bytenr, u64 bytenr_orig)
1292 {
1293 	struct btrfs_super_block *disk_super;
1294 	struct page *page;
1295 	void *p;
1296 	pgoff_t index;
1297 
1298 	/* make sure our super fits in the device */
1299 	if (bytenr + PAGE_SIZE >= bdev_nr_bytes(bdev))
1300 		return ERR_PTR(-EINVAL);
1301 
1302 	/* make sure our super fits in the page */
1303 	if (sizeof(*disk_super) > PAGE_SIZE)
1304 		return ERR_PTR(-EINVAL);
1305 
1306 	/* make sure our super doesn't straddle pages on disk */
1307 	index = bytenr >> PAGE_SHIFT;
1308 	if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_SHIFT != index)
1309 		return ERR_PTR(-EINVAL);
1310 
1311 	/* pull in the page with our super */
1312 	page = read_cache_page_gfp(bdev->bd_inode->i_mapping, index, GFP_KERNEL);
1313 
1314 	if (IS_ERR(page))
1315 		return ERR_CAST(page);
1316 
1317 	p = page_address(page);
1318 
1319 	/* align our pointer to the offset of the super block */
1320 	disk_super = p + offset_in_page(bytenr);
1321 
1322 	if (btrfs_super_bytenr(disk_super) != bytenr_orig ||
1323 	    btrfs_super_magic(disk_super) != BTRFS_MAGIC) {
1324 		btrfs_release_disk_super(p);
1325 		return ERR_PTR(-EINVAL);
1326 	}
1327 
1328 	if (disk_super->label[0] && disk_super->label[BTRFS_LABEL_SIZE - 1])
1329 		disk_super->label[BTRFS_LABEL_SIZE - 1] = 0;
1330 
1331 	return disk_super;
1332 }
1333 
btrfs_forget_devices(dev_t devt)1334 int btrfs_forget_devices(dev_t devt)
1335 {
1336 	int ret;
1337 
1338 	mutex_lock(&uuid_mutex);
1339 	ret = btrfs_free_stale_devices(devt, NULL);
1340 	mutex_unlock(&uuid_mutex);
1341 
1342 	return ret;
1343 }
1344 
1345 /*
1346  * Look for a btrfs signature on a device. This may be called out of the mount path
1347  * and we are not allowed to call set_blocksize during the scan. The superblock
1348  * is read via pagecache
1349  */
btrfs_scan_one_device(const char * path,fmode_t flags,void * holder)1350 struct btrfs_device *btrfs_scan_one_device(const char *path, fmode_t flags,
1351 					   void *holder)
1352 {
1353 	struct btrfs_super_block *disk_super;
1354 	bool new_device_added = false;
1355 	struct btrfs_device *device = NULL;
1356 	struct block_device *bdev;
1357 	u64 bytenr, bytenr_orig;
1358 	int ret;
1359 
1360 	lockdep_assert_held(&uuid_mutex);
1361 
1362 	/*
1363 	 * we would like to check all the supers, but that would make
1364 	 * a btrfs mount succeed after a mkfs from a different FS.
1365 	 * So, we need to add a special mount option to scan for
1366 	 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
1367 	 */
1368 	flags |= FMODE_EXCL;
1369 
1370 	bdev = blkdev_get_by_path(path, flags, holder);
1371 	if (IS_ERR(bdev))
1372 		return ERR_CAST(bdev);
1373 
1374 	bytenr_orig = btrfs_sb_offset(0);
1375 	ret = btrfs_sb_log_location_bdev(bdev, 0, READ, &bytenr);
1376 	if (ret) {
1377 		device = ERR_PTR(ret);
1378 		goto error_bdev_put;
1379 	}
1380 
1381 	disk_super = btrfs_read_disk_super(bdev, bytenr, bytenr_orig);
1382 	if (IS_ERR(disk_super)) {
1383 		device = ERR_CAST(disk_super);
1384 		goto error_bdev_put;
1385 	}
1386 
1387 	device = device_list_add(path, disk_super, &new_device_added);
1388 	if (!IS_ERR(device) && new_device_added)
1389 		btrfs_free_stale_devices(device->devt, device);
1390 
1391 	btrfs_release_disk_super(disk_super);
1392 
1393 error_bdev_put:
1394 	blkdev_put(bdev, flags);
1395 
1396 	return device;
1397 }
1398 
1399 /*
1400  * Try to find a chunk that intersects [start, start + len] range and when one
1401  * such is found, record the end of it in *start
1402  */
contains_pending_extent(struct btrfs_device * device,u64 * start,u64 len)1403 static bool contains_pending_extent(struct btrfs_device *device, u64 *start,
1404 				    u64 len)
1405 {
1406 	u64 physical_start, physical_end;
1407 
1408 	lockdep_assert_held(&device->fs_info->chunk_mutex);
1409 
1410 	if (!find_first_extent_bit(&device->alloc_state, *start,
1411 				   &physical_start, &physical_end,
1412 				   CHUNK_ALLOCATED, NULL)) {
1413 
1414 		if (in_range(physical_start, *start, len) ||
1415 		    in_range(*start, physical_start,
1416 			     physical_end - physical_start)) {
1417 			*start = physical_end + 1;
1418 			return true;
1419 		}
1420 	}
1421 	return false;
1422 }
1423 
dev_extent_search_start(struct btrfs_device * device,u64 start)1424 static u64 dev_extent_search_start(struct btrfs_device *device, u64 start)
1425 {
1426 	switch (device->fs_devices->chunk_alloc_policy) {
1427 	case BTRFS_CHUNK_ALLOC_REGULAR:
1428 		return max_t(u64, start, BTRFS_DEVICE_RANGE_RESERVED);
1429 	case BTRFS_CHUNK_ALLOC_ZONED:
1430 		/*
1431 		 * We don't care about the starting region like regular
1432 		 * allocator, because we anyway use/reserve the first two zones
1433 		 * for superblock logging.
1434 		 */
1435 		return ALIGN(start, device->zone_info->zone_size);
1436 	default:
1437 		BUG();
1438 	}
1439 }
1440 
dev_extent_hole_check_zoned(struct btrfs_device * device,u64 * hole_start,u64 * hole_size,u64 num_bytes)1441 static bool dev_extent_hole_check_zoned(struct btrfs_device *device,
1442 					u64 *hole_start, u64 *hole_size,
1443 					u64 num_bytes)
1444 {
1445 	u64 zone_size = device->zone_info->zone_size;
1446 	u64 pos;
1447 	int ret;
1448 	bool changed = false;
1449 
1450 	ASSERT(IS_ALIGNED(*hole_start, zone_size));
1451 
1452 	while (*hole_size > 0) {
1453 		pos = btrfs_find_allocatable_zones(device, *hole_start,
1454 						   *hole_start + *hole_size,
1455 						   num_bytes);
1456 		if (pos != *hole_start) {
1457 			*hole_size = *hole_start + *hole_size - pos;
1458 			*hole_start = pos;
1459 			changed = true;
1460 			if (*hole_size < num_bytes)
1461 				break;
1462 		}
1463 
1464 		ret = btrfs_ensure_empty_zones(device, pos, num_bytes);
1465 
1466 		/* Range is ensured to be empty */
1467 		if (!ret)
1468 			return changed;
1469 
1470 		/* Given hole range was invalid (outside of device) */
1471 		if (ret == -ERANGE) {
1472 			*hole_start += *hole_size;
1473 			*hole_size = 0;
1474 			return true;
1475 		}
1476 
1477 		*hole_start += zone_size;
1478 		*hole_size -= zone_size;
1479 		changed = true;
1480 	}
1481 
1482 	return changed;
1483 }
1484 
1485 /**
1486  * dev_extent_hole_check - check if specified hole is suitable for allocation
1487  * @device:	the device which we have the hole
1488  * @hole_start: starting position of the hole
1489  * @hole_size:	the size of the hole
1490  * @num_bytes:	the size of the free space that we need
1491  *
1492  * This function may modify @hole_start and @hole_size to reflect the suitable
1493  * position for allocation. Returns 1 if hole position is updated, 0 otherwise.
1494  */
dev_extent_hole_check(struct btrfs_device * device,u64 * hole_start,u64 * hole_size,u64 num_bytes)1495 static bool dev_extent_hole_check(struct btrfs_device *device, u64 *hole_start,
1496 				  u64 *hole_size, u64 num_bytes)
1497 {
1498 	bool changed = false;
1499 	u64 hole_end = *hole_start + *hole_size;
1500 
1501 	for (;;) {
1502 		/*
1503 		 * Check before we set max_hole_start, otherwise we could end up
1504 		 * sending back this offset anyway.
1505 		 */
1506 		if (contains_pending_extent(device, hole_start, *hole_size)) {
1507 			if (hole_end >= *hole_start)
1508 				*hole_size = hole_end - *hole_start;
1509 			else
1510 				*hole_size = 0;
1511 			changed = true;
1512 		}
1513 
1514 		switch (device->fs_devices->chunk_alloc_policy) {
1515 		case BTRFS_CHUNK_ALLOC_REGULAR:
1516 			/* No extra check */
1517 			break;
1518 		case BTRFS_CHUNK_ALLOC_ZONED:
1519 			if (dev_extent_hole_check_zoned(device, hole_start,
1520 							hole_size, num_bytes)) {
1521 				changed = true;
1522 				/*
1523 				 * The changed hole can contain pending extent.
1524 				 * Loop again to check that.
1525 				 */
1526 				continue;
1527 			}
1528 			break;
1529 		default:
1530 			BUG();
1531 		}
1532 
1533 		break;
1534 	}
1535 
1536 	return changed;
1537 }
1538 
1539 /*
1540  * find_free_dev_extent_start - find free space in the specified device
1541  * @device:	  the device which we search the free space in
1542  * @num_bytes:	  the size of the free space that we need
1543  * @search_start: the position from which to begin the search
1544  * @start:	  store the start of the free space.
1545  * @len:	  the size of the free space. that we find, or the size
1546  *		  of the max free space if we don't find suitable free space
1547  *
1548  * this uses a pretty simple search, the expectation is that it is
1549  * called very infrequently and that a given device has a small number
1550  * of extents
1551  *
1552  * @start is used to store the start of the free space if we find. But if we
1553  * don't find suitable free space, it will be used to store the start position
1554  * of the max free space.
1555  *
1556  * @len is used to store the size of the free space that we find.
1557  * But if we don't find suitable free space, it is used to store the size of
1558  * the max free space.
1559  *
1560  * NOTE: This function will search *commit* root of device tree, and does extra
1561  * check to ensure dev extents are not double allocated.
1562  * This makes the function safe to allocate dev extents but may not report
1563  * correct usable device space, as device extent freed in current transaction
1564  * is not reported as available.
1565  */
find_free_dev_extent_start(struct btrfs_device * device,u64 num_bytes,u64 search_start,u64 * start,u64 * len)1566 static int find_free_dev_extent_start(struct btrfs_device *device,
1567 				u64 num_bytes, u64 search_start, u64 *start,
1568 				u64 *len)
1569 {
1570 	struct btrfs_fs_info *fs_info = device->fs_info;
1571 	struct btrfs_root *root = fs_info->dev_root;
1572 	struct btrfs_key key;
1573 	struct btrfs_dev_extent *dev_extent;
1574 	struct btrfs_path *path;
1575 	u64 hole_size;
1576 	u64 max_hole_start;
1577 	u64 max_hole_size;
1578 	u64 extent_end;
1579 	u64 search_end = device->total_bytes;
1580 	int ret;
1581 	int slot;
1582 	struct extent_buffer *l;
1583 
1584 	search_start = dev_extent_search_start(device, search_start);
1585 
1586 	WARN_ON(device->zone_info &&
1587 		!IS_ALIGNED(num_bytes, device->zone_info->zone_size));
1588 
1589 	path = btrfs_alloc_path();
1590 	if (!path)
1591 		return -ENOMEM;
1592 
1593 	max_hole_start = search_start;
1594 	max_hole_size = 0;
1595 
1596 again:
1597 	if (search_start >= search_end ||
1598 		test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
1599 		ret = -ENOSPC;
1600 		goto out;
1601 	}
1602 
1603 	path->reada = READA_FORWARD;
1604 	path->search_commit_root = 1;
1605 	path->skip_locking = 1;
1606 
1607 	key.objectid = device->devid;
1608 	key.offset = search_start;
1609 	key.type = BTRFS_DEV_EXTENT_KEY;
1610 
1611 	ret = btrfs_search_backwards(root, &key, path);
1612 	if (ret < 0)
1613 		goto out;
1614 
1615 	while (1) {
1616 		l = path->nodes[0];
1617 		slot = path->slots[0];
1618 		if (slot >= btrfs_header_nritems(l)) {
1619 			ret = btrfs_next_leaf(root, path);
1620 			if (ret == 0)
1621 				continue;
1622 			if (ret < 0)
1623 				goto out;
1624 
1625 			break;
1626 		}
1627 		btrfs_item_key_to_cpu(l, &key, slot);
1628 
1629 		if (key.objectid < device->devid)
1630 			goto next;
1631 
1632 		if (key.objectid > device->devid)
1633 			break;
1634 
1635 		if (key.type != BTRFS_DEV_EXTENT_KEY)
1636 			goto next;
1637 
1638 		if (key.offset > search_start) {
1639 			hole_size = key.offset - search_start;
1640 			dev_extent_hole_check(device, &search_start, &hole_size,
1641 					      num_bytes);
1642 
1643 			if (hole_size > max_hole_size) {
1644 				max_hole_start = search_start;
1645 				max_hole_size = hole_size;
1646 			}
1647 
1648 			/*
1649 			 * If this free space is greater than which we need,
1650 			 * it must be the max free space that we have found
1651 			 * until now, so max_hole_start must point to the start
1652 			 * of this free space and the length of this free space
1653 			 * is stored in max_hole_size. Thus, we return
1654 			 * max_hole_start and max_hole_size and go back to the
1655 			 * caller.
1656 			 */
1657 			if (hole_size >= num_bytes) {
1658 				ret = 0;
1659 				goto out;
1660 			}
1661 		}
1662 
1663 		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1664 		extent_end = key.offset + btrfs_dev_extent_length(l,
1665 								  dev_extent);
1666 		if (extent_end > search_start)
1667 			search_start = extent_end;
1668 next:
1669 		path->slots[0]++;
1670 		cond_resched();
1671 	}
1672 
1673 	/*
1674 	 * At this point, search_start should be the end of
1675 	 * allocated dev extents, and when shrinking the device,
1676 	 * search_end may be smaller than search_start.
1677 	 */
1678 	if (search_end > search_start) {
1679 		hole_size = search_end - search_start;
1680 		if (dev_extent_hole_check(device, &search_start, &hole_size,
1681 					  num_bytes)) {
1682 			btrfs_release_path(path);
1683 			goto again;
1684 		}
1685 
1686 		if (hole_size > max_hole_size) {
1687 			max_hole_start = search_start;
1688 			max_hole_size = hole_size;
1689 		}
1690 	}
1691 
1692 	/* See above. */
1693 	if (max_hole_size < num_bytes)
1694 		ret = -ENOSPC;
1695 	else
1696 		ret = 0;
1697 
1698 out:
1699 	btrfs_free_path(path);
1700 	*start = max_hole_start;
1701 	if (len)
1702 		*len = max_hole_size;
1703 	return ret;
1704 }
1705 
find_free_dev_extent(struct btrfs_device * device,u64 num_bytes,u64 * start,u64 * len)1706 int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
1707 			 u64 *start, u64 *len)
1708 {
1709 	/* FIXME use last free of some kind */
1710 	return find_free_dev_extent_start(device, num_bytes, 0, start, len);
1711 }
1712 
btrfs_free_dev_extent(struct btrfs_trans_handle * trans,struct btrfs_device * device,u64 start,u64 * dev_extent_len)1713 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
1714 			  struct btrfs_device *device,
1715 			  u64 start, u64 *dev_extent_len)
1716 {
1717 	struct btrfs_fs_info *fs_info = device->fs_info;
1718 	struct btrfs_root *root = fs_info->dev_root;
1719 	int ret;
1720 	struct btrfs_path *path;
1721 	struct btrfs_key key;
1722 	struct btrfs_key found_key;
1723 	struct extent_buffer *leaf = NULL;
1724 	struct btrfs_dev_extent *extent = NULL;
1725 
1726 	path = btrfs_alloc_path();
1727 	if (!path)
1728 		return -ENOMEM;
1729 
1730 	key.objectid = device->devid;
1731 	key.offset = start;
1732 	key.type = BTRFS_DEV_EXTENT_KEY;
1733 again:
1734 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1735 	if (ret > 0) {
1736 		ret = btrfs_previous_item(root, path, key.objectid,
1737 					  BTRFS_DEV_EXTENT_KEY);
1738 		if (ret)
1739 			goto out;
1740 		leaf = path->nodes[0];
1741 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1742 		extent = btrfs_item_ptr(leaf, path->slots[0],
1743 					struct btrfs_dev_extent);
1744 		BUG_ON(found_key.offset > start || found_key.offset +
1745 		       btrfs_dev_extent_length(leaf, extent) < start);
1746 		key = found_key;
1747 		btrfs_release_path(path);
1748 		goto again;
1749 	} else if (ret == 0) {
1750 		leaf = path->nodes[0];
1751 		extent = btrfs_item_ptr(leaf, path->slots[0],
1752 					struct btrfs_dev_extent);
1753 	} else {
1754 		goto out;
1755 	}
1756 
1757 	*dev_extent_len = btrfs_dev_extent_length(leaf, extent);
1758 
1759 	ret = btrfs_del_item(trans, root, path);
1760 	if (ret == 0)
1761 		set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags);
1762 out:
1763 	btrfs_free_path(path);
1764 	return ret;
1765 }
1766 
find_next_chunk(struct btrfs_fs_info * fs_info)1767 static u64 find_next_chunk(struct btrfs_fs_info *fs_info)
1768 {
1769 	struct extent_map_tree *em_tree;
1770 	struct extent_map *em;
1771 	struct rb_node *n;
1772 	u64 ret = 0;
1773 
1774 	em_tree = &fs_info->mapping_tree;
1775 	read_lock(&em_tree->lock);
1776 	n = rb_last(&em_tree->map.rb_root);
1777 	if (n) {
1778 		em = rb_entry(n, struct extent_map, rb_node);
1779 		ret = em->start + em->len;
1780 	}
1781 	read_unlock(&em_tree->lock);
1782 
1783 	return ret;
1784 }
1785 
find_next_devid(struct btrfs_fs_info * fs_info,u64 * devid_ret)1786 static noinline int find_next_devid(struct btrfs_fs_info *fs_info,
1787 				    u64 *devid_ret)
1788 {
1789 	int ret;
1790 	struct btrfs_key key;
1791 	struct btrfs_key found_key;
1792 	struct btrfs_path *path;
1793 
1794 	path = btrfs_alloc_path();
1795 	if (!path)
1796 		return -ENOMEM;
1797 
1798 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1799 	key.type = BTRFS_DEV_ITEM_KEY;
1800 	key.offset = (u64)-1;
1801 
1802 	ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0);
1803 	if (ret < 0)
1804 		goto error;
1805 
1806 	if (ret == 0) {
1807 		/* Corruption */
1808 		btrfs_err(fs_info, "corrupted chunk tree devid -1 matched");
1809 		ret = -EUCLEAN;
1810 		goto error;
1811 	}
1812 
1813 	ret = btrfs_previous_item(fs_info->chunk_root, path,
1814 				  BTRFS_DEV_ITEMS_OBJECTID,
1815 				  BTRFS_DEV_ITEM_KEY);
1816 	if (ret) {
1817 		*devid_ret = 1;
1818 	} else {
1819 		btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1820 				      path->slots[0]);
1821 		*devid_ret = found_key.offset + 1;
1822 	}
1823 	ret = 0;
1824 error:
1825 	btrfs_free_path(path);
1826 	return ret;
1827 }
1828 
1829 /*
1830  * the device information is stored in the chunk root
1831  * the btrfs_device struct should be fully filled in
1832  */
btrfs_add_dev_item(struct btrfs_trans_handle * trans,struct btrfs_device * device)1833 static int btrfs_add_dev_item(struct btrfs_trans_handle *trans,
1834 			    struct btrfs_device *device)
1835 {
1836 	int ret;
1837 	struct btrfs_path *path;
1838 	struct btrfs_dev_item *dev_item;
1839 	struct extent_buffer *leaf;
1840 	struct btrfs_key key;
1841 	unsigned long ptr;
1842 
1843 	path = btrfs_alloc_path();
1844 	if (!path)
1845 		return -ENOMEM;
1846 
1847 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1848 	key.type = BTRFS_DEV_ITEM_KEY;
1849 	key.offset = device->devid;
1850 
1851 	btrfs_reserve_chunk_metadata(trans, true);
1852 	ret = btrfs_insert_empty_item(trans, trans->fs_info->chunk_root, path,
1853 				      &key, sizeof(*dev_item));
1854 	btrfs_trans_release_chunk_metadata(trans);
1855 	if (ret)
1856 		goto out;
1857 
1858 	leaf = path->nodes[0];
1859 	dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1860 
1861 	btrfs_set_device_id(leaf, dev_item, device->devid);
1862 	btrfs_set_device_generation(leaf, dev_item, 0);
1863 	btrfs_set_device_type(leaf, dev_item, device->type);
1864 	btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1865 	btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1866 	btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1867 	btrfs_set_device_total_bytes(leaf, dev_item,
1868 				     btrfs_device_get_disk_total_bytes(device));
1869 	btrfs_set_device_bytes_used(leaf, dev_item,
1870 				    btrfs_device_get_bytes_used(device));
1871 	btrfs_set_device_group(leaf, dev_item, 0);
1872 	btrfs_set_device_seek_speed(leaf, dev_item, 0);
1873 	btrfs_set_device_bandwidth(leaf, dev_item, 0);
1874 	btrfs_set_device_start_offset(leaf, dev_item, 0);
1875 
1876 	ptr = btrfs_device_uuid(dev_item);
1877 	write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1878 	ptr = btrfs_device_fsid(dev_item);
1879 	write_extent_buffer(leaf, trans->fs_info->fs_devices->metadata_uuid,
1880 			    ptr, BTRFS_FSID_SIZE);
1881 	btrfs_mark_buffer_dirty(leaf);
1882 
1883 	ret = 0;
1884 out:
1885 	btrfs_free_path(path);
1886 	return ret;
1887 }
1888 
1889 /*
1890  * Function to update ctime/mtime for a given device path.
1891  * Mainly used for ctime/mtime based probe like libblkid.
1892  *
1893  * We don't care about errors here, this is just to be kind to userspace.
1894  */
update_dev_time(const char * device_path)1895 static void update_dev_time(const char *device_path)
1896 {
1897 	struct path path;
1898 	struct timespec64 now;
1899 	int ret;
1900 
1901 	ret = kern_path(device_path, LOOKUP_FOLLOW, &path);
1902 	if (ret)
1903 		return;
1904 
1905 	now = current_time(d_inode(path.dentry));
1906 	inode_update_time(d_inode(path.dentry), &now, S_MTIME | S_CTIME);
1907 	path_put(&path);
1908 }
1909 
btrfs_rm_dev_item(struct btrfs_trans_handle * trans,struct btrfs_device * device)1910 static int btrfs_rm_dev_item(struct btrfs_trans_handle *trans,
1911 			     struct btrfs_device *device)
1912 {
1913 	struct btrfs_root *root = device->fs_info->chunk_root;
1914 	int ret;
1915 	struct btrfs_path *path;
1916 	struct btrfs_key key;
1917 
1918 	path = btrfs_alloc_path();
1919 	if (!path)
1920 		return -ENOMEM;
1921 
1922 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1923 	key.type = BTRFS_DEV_ITEM_KEY;
1924 	key.offset = device->devid;
1925 
1926 	btrfs_reserve_chunk_metadata(trans, false);
1927 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1928 	btrfs_trans_release_chunk_metadata(trans);
1929 	if (ret) {
1930 		if (ret > 0)
1931 			ret = -ENOENT;
1932 		goto out;
1933 	}
1934 
1935 	ret = btrfs_del_item(trans, root, path);
1936 out:
1937 	btrfs_free_path(path);
1938 	return ret;
1939 }
1940 
1941 /*
1942  * Verify that @num_devices satisfies the RAID profile constraints in the whole
1943  * filesystem. It's up to the caller to adjust that number regarding eg. device
1944  * replace.
1945  */
btrfs_check_raid_min_devices(struct btrfs_fs_info * fs_info,u64 num_devices)1946 static int btrfs_check_raid_min_devices(struct btrfs_fs_info *fs_info,
1947 		u64 num_devices)
1948 {
1949 	u64 all_avail;
1950 	unsigned seq;
1951 	int i;
1952 
1953 	do {
1954 		seq = read_seqbegin(&fs_info->profiles_lock);
1955 
1956 		all_avail = fs_info->avail_data_alloc_bits |
1957 			    fs_info->avail_system_alloc_bits |
1958 			    fs_info->avail_metadata_alloc_bits;
1959 	} while (read_seqretry(&fs_info->profiles_lock, seq));
1960 
1961 	for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
1962 		if (!(all_avail & btrfs_raid_array[i].bg_flag))
1963 			continue;
1964 
1965 		if (num_devices < btrfs_raid_array[i].devs_min)
1966 			return btrfs_raid_array[i].mindev_error;
1967 	}
1968 
1969 	return 0;
1970 }
1971 
btrfs_find_next_active_device(struct btrfs_fs_devices * fs_devs,struct btrfs_device * device)1972 static struct btrfs_device * btrfs_find_next_active_device(
1973 		struct btrfs_fs_devices *fs_devs, struct btrfs_device *device)
1974 {
1975 	struct btrfs_device *next_device;
1976 
1977 	list_for_each_entry(next_device, &fs_devs->devices, dev_list) {
1978 		if (next_device != device &&
1979 		    !test_bit(BTRFS_DEV_STATE_MISSING, &next_device->dev_state)
1980 		    && next_device->bdev)
1981 			return next_device;
1982 	}
1983 
1984 	return NULL;
1985 }
1986 
1987 /*
1988  * Helper function to check if the given device is part of s_bdev / latest_dev
1989  * and replace it with the provided or the next active device, in the context
1990  * where this function called, there should be always be another device (or
1991  * this_dev) which is active.
1992  */
btrfs_assign_next_active_device(struct btrfs_device * device,struct btrfs_device * next_device)1993 void __cold btrfs_assign_next_active_device(struct btrfs_device *device,
1994 					    struct btrfs_device *next_device)
1995 {
1996 	struct btrfs_fs_info *fs_info = device->fs_info;
1997 
1998 	if (!next_device)
1999 		next_device = btrfs_find_next_active_device(fs_info->fs_devices,
2000 							    device);
2001 	ASSERT(next_device);
2002 
2003 	if (fs_info->sb->s_bdev &&
2004 			(fs_info->sb->s_bdev == device->bdev))
2005 		fs_info->sb->s_bdev = next_device->bdev;
2006 
2007 	if (fs_info->fs_devices->latest_dev->bdev == device->bdev)
2008 		fs_info->fs_devices->latest_dev = next_device;
2009 }
2010 
2011 /*
2012  * Return btrfs_fs_devices::num_devices excluding the device that's being
2013  * currently replaced.
2014  */
btrfs_num_devices(struct btrfs_fs_info * fs_info)2015 static u64 btrfs_num_devices(struct btrfs_fs_info *fs_info)
2016 {
2017 	u64 num_devices = fs_info->fs_devices->num_devices;
2018 
2019 	down_read(&fs_info->dev_replace.rwsem);
2020 	if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) {
2021 		ASSERT(num_devices > 1);
2022 		num_devices--;
2023 	}
2024 	up_read(&fs_info->dev_replace.rwsem);
2025 
2026 	return num_devices;
2027 }
2028 
btrfs_scratch_superblocks(struct btrfs_fs_info * fs_info,struct block_device * bdev,const char * device_path)2029 void btrfs_scratch_superblocks(struct btrfs_fs_info *fs_info,
2030 			       struct block_device *bdev,
2031 			       const char *device_path)
2032 {
2033 	struct btrfs_super_block *disk_super;
2034 	int copy_num;
2035 
2036 	if (!bdev)
2037 		return;
2038 
2039 	for (copy_num = 0; copy_num < BTRFS_SUPER_MIRROR_MAX; copy_num++) {
2040 		struct page *page;
2041 		int ret;
2042 
2043 		disk_super = btrfs_read_dev_one_super(bdev, copy_num, false);
2044 		if (IS_ERR(disk_super))
2045 			continue;
2046 
2047 		if (bdev_is_zoned(bdev)) {
2048 			btrfs_reset_sb_log_zones(bdev, copy_num);
2049 			continue;
2050 		}
2051 
2052 		memset(&disk_super->magic, 0, sizeof(disk_super->magic));
2053 
2054 		page = virt_to_page(disk_super);
2055 		set_page_dirty(page);
2056 		lock_page(page);
2057 		/* write_on_page() unlocks the page */
2058 		ret = write_one_page(page);
2059 		if (ret)
2060 			btrfs_warn(fs_info,
2061 				"error clearing superblock number %d (%d)",
2062 				copy_num, ret);
2063 		btrfs_release_disk_super(disk_super);
2064 
2065 	}
2066 
2067 	/* Notify udev that device has changed */
2068 	btrfs_kobject_uevent(bdev, KOBJ_CHANGE);
2069 
2070 	/* Update ctime/mtime for device path for libblkid */
2071 	update_dev_time(device_path);
2072 }
2073 
btrfs_rm_device(struct btrfs_fs_info * fs_info,struct btrfs_dev_lookup_args * args,struct block_device ** bdev,fmode_t * mode)2074 int btrfs_rm_device(struct btrfs_fs_info *fs_info,
2075 		    struct btrfs_dev_lookup_args *args,
2076 		    struct block_device **bdev, fmode_t *mode)
2077 {
2078 	struct btrfs_trans_handle *trans;
2079 	struct btrfs_device *device;
2080 	struct btrfs_fs_devices *cur_devices;
2081 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2082 	u64 num_devices;
2083 	int ret = 0;
2084 
2085 	if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) {
2086 		btrfs_err(fs_info, "device remove not supported on extent tree v2 yet");
2087 		return -EINVAL;
2088 	}
2089 
2090 	/*
2091 	 * The device list in fs_devices is accessed without locks (neither
2092 	 * uuid_mutex nor device_list_mutex) as it won't change on a mounted
2093 	 * filesystem and another device rm cannot run.
2094 	 */
2095 	num_devices = btrfs_num_devices(fs_info);
2096 
2097 	ret = btrfs_check_raid_min_devices(fs_info, num_devices - 1);
2098 	if (ret)
2099 		return ret;
2100 
2101 	device = btrfs_find_device(fs_info->fs_devices, args);
2102 	if (!device) {
2103 		if (args->missing)
2104 			ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND;
2105 		else
2106 			ret = -ENOENT;
2107 		return ret;
2108 	}
2109 
2110 	if (btrfs_pinned_by_swapfile(fs_info, device)) {
2111 		btrfs_warn_in_rcu(fs_info,
2112 		  "cannot remove device %s (devid %llu) due to active swapfile",
2113 				  rcu_str_deref(device->name), device->devid);
2114 		return -ETXTBSY;
2115 	}
2116 
2117 	if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
2118 		return BTRFS_ERROR_DEV_TGT_REPLACE;
2119 
2120 	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
2121 	    fs_info->fs_devices->rw_devices == 1)
2122 		return BTRFS_ERROR_DEV_ONLY_WRITABLE;
2123 
2124 	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
2125 		mutex_lock(&fs_info->chunk_mutex);
2126 		list_del_init(&device->dev_alloc_list);
2127 		device->fs_devices->rw_devices--;
2128 		mutex_unlock(&fs_info->chunk_mutex);
2129 	}
2130 
2131 	ret = btrfs_shrink_device(device, 0);
2132 	if (ret)
2133 		goto error_undo;
2134 
2135 	trans = btrfs_start_transaction(fs_info->chunk_root, 0);
2136 	if (IS_ERR(trans)) {
2137 		ret = PTR_ERR(trans);
2138 		goto error_undo;
2139 	}
2140 
2141 	ret = btrfs_rm_dev_item(trans, device);
2142 	if (ret) {
2143 		/* Any error in dev item removal is critical */
2144 		btrfs_crit(fs_info,
2145 			   "failed to remove device item for devid %llu: %d",
2146 			   device->devid, ret);
2147 		btrfs_abort_transaction(trans, ret);
2148 		btrfs_end_transaction(trans);
2149 		return ret;
2150 	}
2151 
2152 	clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
2153 	btrfs_scrub_cancel_dev(device);
2154 
2155 	/*
2156 	 * the device list mutex makes sure that we don't change
2157 	 * the device list while someone else is writing out all
2158 	 * the device supers. Whoever is writing all supers, should
2159 	 * lock the device list mutex before getting the number of
2160 	 * devices in the super block (super_copy). Conversely,
2161 	 * whoever updates the number of devices in the super block
2162 	 * (super_copy) should hold the device list mutex.
2163 	 */
2164 
2165 	/*
2166 	 * In normal cases the cur_devices == fs_devices. But in case
2167 	 * of deleting a seed device, the cur_devices should point to
2168 	 * its own fs_devices listed under the fs_devices->seed_list.
2169 	 */
2170 	cur_devices = device->fs_devices;
2171 	mutex_lock(&fs_devices->device_list_mutex);
2172 	list_del_rcu(&device->dev_list);
2173 
2174 	cur_devices->num_devices--;
2175 	cur_devices->total_devices--;
2176 	/* Update total_devices of the parent fs_devices if it's seed */
2177 	if (cur_devices != fs_devices)
2178 		fs_devices->total_devices--;
2179 
2180 	if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
2181 		cur_devices->missing_devices--;
2182 
2183 	btrfs_assign_next_active_device(device, NULL);
2184 
2185 	if (device->bdev) {
2186 		cur_devices->open_devices--;
2187 		/* remove sysfs entry */
2188 		btrfs_sysfs_remove_device(device);
2189 	}
2190 
2191 	num_devices = btrfs_super_num_devices(fs_info->super_copy) - 1;
2192 	btrfs_set_super_num_devices(fs_info->super_copy, num_devices);
2193 	mutex_unlock(&fs_devices->device_list_mutex);
2194 
2195 	/*
2196 	 * At this point, the device is zero sized and detached from the
2197 	 * devices list.  All that's left is to zero out the old supers and
2198 	 * free the device.
2199 	 *
2200 	 * We cannot call btrfs_close_bdev() here because we're holding the sb
2201 	 * write lock, and blkdev_put() will pull in the ->open_mutex on the
2202 	 * block device and it's dependencies.  Instead just flush the device
2203 	 * and let the caller do the final blkdev_put.
2204 	 */
2205 	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
2206 		btrfs_scratch_superblocks(fs_info, device->bdev,
2207 					  device->name->str);
2208 		if (device->bdev) {
2209 			sync_blockdev(device->bdev);
2210 			invalidate_bdev(device->bdev);
2211 		}
2212 	}
2213 
2214 	*bdev = device->bdev;
2215 	*mode = device->mode;
2216 	synchronize_rcu();
2217 	btrfs_free_device(device);
2218 
2219 	/*
2220 	 * This can happen if cur_devices is the private seed devices list.  We
2221 	 * cannot call close_fs_devices() here because it expects the uuid_mutex
2222 	 * to be held, but in fact we don't need that for the private
2223 	 * seed_devices, we can simply decrement cur_devices->opened and then
2224 	 * remove it from our list and free the fs_devices.
2225 	 */
2226 	if (cur_devices->num_devices == 0) {
2227 		list_del_init(&cur_devices->seed_list);
2228 		ASSERT(cur_devices->opened == 1);
2229 		cur_devices->opened--;
2230 		free_fs_devices(cur_devices);
2231 	}
2232 
2233 	ret = btrfs_commit_transaction(trans);
2234 
2235 	return ret;
2236 
2237 error_undo:
2238 	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
2239 		mutex_lock(&fs_info->chunk_mutex);
2240 		list_add(&device->dev_alloc_list,
2241 			 &fs_devices->alloc_list);
2242 		device->fs_devices->rw_devices++;
2243 		mutex_unlock(&fs_info->chunk_mutex);
2244 	}
2245 	return ret;
2246 }
2247 
btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device * srcdev)2248 void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev)
2249 {
2250 	struct btrfs_fs_devices *fs_devices;
2251 
2252 	lockdep_assert_held(&srcdev->fs_info->fs_devices->device_list_mutex);
2253 
2254 	/*
2255 	 * in case of fs with no seed, srcdev->fs_devices will point
2256 	 * to fs_devices of fs_info. However when the dev being replaced is
2257 	 * a seed dev it will point to the seed's local fs_devices. In short
2258 	 * srcdev will have its correct fs_devices in both the cases.
2259 	 */
2260 	fs_devices = srcdev->fs_devices;
2261 
2262 	list_del_rcu(&srcdev->dev_list);
2263 	list_del(&srcdev->dev_alloc_list);
2264 	fs_devices->num_devices--;
2265 	if (test_bit(BTRFS_DEV_STATE_MISSING, &srcdev->dev_state))
2266 		fs_devices->missing_devices--;
2267 
2268 	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &srcdev->dev_state))
2269 		fs_devices->rw_devices--;
2270 
2271 	if (srcdev->bdev)
2272 		fs_devices->open_devices--;
2273 }
2274 
btrfs_rm_dev_replace_free_srcdev(struct btrfs_device * srcdev)2275 void btrfs_rm_dev_replace_free_srcdev(struct btrfs_device *srcdev)
2276 {
2277 	struct btrfs_fs_devices *fs_devices = srcdev->fs_devices;
2278 
2279 	mutex_lock(&uuid_mutex);
2280 
2281 	btrfs_close_bdev(srcdev);
2282 	synchronize_rcu();
2283 	btrfs_free_device(srcdev);
2284 
2285 	/* if this is no devs we rather delete the fs_devices */
2286 	if (!fs_devices->num_devices) {
2287 		/*
2288 		 * On a mounted FS, num_devices can't be zero unless it's a
2289 		 * seed. In case of a seed device being replaced, the replace
2290 		 * target added to the sprout FS, so there will be no more
2291 		 * device left under the seed FS.
2292 		 */
2293 		ASSERT(fs_devices->seeding);
2294 
2295 		list_del_init(&fs_devices->seed_list);
2296 		close_fs_devices(fs_devices);
2297 		free_fs_devices(fs_devices);
2298 	}
2299 	mutex_unlock(&uuid_mutex);
2300 }
2301 
btrfs_destroy_dev_replace_tgtdev(struct btrfs_device * tgtdev)2302 void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device *tgtdev)
2303 {
2304 	struct btrfs_fs_devices *fs_devices = tgtdev->fs_info->fs_devices;
2305 
2306 	mutex_lock(&fs_devices->device_list_mutex);
2307 
2308 	btrfs_sysfs_remove_device(tgtdev);
2309 
2310 	if (tgtdev->bdev)
2311 		fs_devices->open_devices--;
2312 
2313 	fs_devices->num_devices--;
2314 
2315 	btrfs_assign_next_active_device(tgtdev, NULL);
2316 
2317 	list_del_rcu(&tgtdev->dev_list);
2318 
2319 	mutex_unlock(&fs_devices->device_list_mutex);
2320 
2321 	btrfs_scratch_superblocks(tgtdev->fs_info, tgtdev->bdev,
2322 				  tgtdev->name->str);
2323 
2324 	btrfs_close_bdev(tgtdev);
2325 	synchronize_rcu();
2326 	btrfs_free_device(tgtdev);
2327 }
2328 
2329 /**
2330  * Populate args from device at path
2331  *
2332  * @fs_info:	the filesystem
2333  * @args:	the args to populate
2334  * @path:	the path to the device
2335  *
2336  * This will read the super block of the device at @path and populate @args with
2337  * the devid, fsid, and uuid.  This is meant to be used for ioctls that need to
2338  * lookup a device to operate on, but need to do it before we take any locks.
2339  * This properly handles the special case of "missing" that a user may pass in,
2340  * and does some basic sanity checks.  The caller must make sure that @path is
2341  * properly NUL terminated before calling in, and must call
2342  * btrfs_put_dev_args_from_path() in order to free up the temporary fsid and
2343  * uuid buffers.
2344  *
2345  * Return: 0 for success, -errno for failure
2346  */
btrfs_get_dev_args_from_path(struct btrfs_fs_info * fs_info,struct btrfs_dev_lookup_args * args,const char * path)2347 int btrfs_get_dev_args_from_path(struct btrfs_fs_info *fs_info,
2348 				 struct btrfs_dev_lookup_args *args,
2349 				 const char *path)
2350 {
2351 	struct btrfs_super_block *disk_super;
2352 	struct block_device *bdev;
2353 	int ret;
2354 
2355 	if (!path || !path[0])
2356 		return -EINVAL;
2357 	if (!strcmp(path, "missing")) {
2358 		args->missing = true;
2359 		return 0;
2360 	}
2361 
2362 	args->uuid = kzalloc(BTRFS_UUID_SIZE, GFP_KERNEL);
2363 	args->fsid = kzalloc(BTRFS_FSID_SIZE, GFP_KERNEL);
2364 	if (!args->uuid || !args->fsid) {
2365 		btrfs_put_dev_args_from_path(args);
2366 		return -ENOMEM;
2367 	}
2368 
2369 	ret = btrfs_get_bdev_and_sb(path, FMODE_READ, fs_info->bdev_holder, 0,
2370 				    &bdev, &disk_super);
2371 	if (ret) {
2372 		btrfs_put_dev_args_from_path(args);
2373 		return ret;
2374 	}
2375 
2376 	args->devid = btrfs_stack_device_id(&disk_super->dev_item);
2377 	memcpy(args->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE);
2378 	if (btrfs_fs_incompat(fs_info, METADATA_UUID))
2379 		memcpy(args->fsid, disk_super->metadata_uuid, BTRFS_FSID_SIZE);
2380 	else
2381 		memcpy(args->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
2382 	btrfs_release_disk_super(disk_super);
2383 	blkdev_put(bdev, FMODE_READ);
2384 	return 0;
2385 }
2386 
2387 /*
2388  * Only use this jointly with btrfs_get_dev_args_from_path() because we will
2389  * allocate our ->uuid and ->fsid pointers, everybody else uses local variables
2390  * that don't need to be freed.
2391  */
btrfs_put_dev_args_from_path(struct btrfs_dev_lookup_args * args)2392 void btrfs_put_dev_args_from_path(struct btrfs_dev_lookup_args *args)
2393 {
2394 	kfree(args->uuid);
2395 	kfree(args->fsid);
2396 	args->uuid = NULL;
2397 	args->fsid = NULL;
2398 }
2399 
btrfs_find_device_by_devspec(struct btrfs_fs_info * fs_info,u64 devid,const char * device_path)2400 struct btrfs_device *btrfs_find_device_by_devspec(
2401 		struct btrfs_fs_info *fs_info, u64 devid,
2402 		const char *device_path)
2403 {
2404 	BTRFS_DEV_LOOKUP_ARGS(args);
2405 	struct btrfs_device *device;
2406 	int ret;
2407 
2408 	if (devid) {
2409 		args.devid = devid;
2410 		device = btrfs_find_device(fs_info->fs_devices, &args);
2411 		if (!device)
2412 			return ERR_PTR(-ENOENT);
2413 		return device;
2414 	}
2415 
2416 	ret = btrfs_get_dev_args_from_path(fs_info, &args, device_path);
2417 	if (ret)
2418 		return ERR_PTR(ret);
2419 	device = btrfs_find_device(fs_info->fs_devices, &args);
2420 	btrfs_put_dev_args_from_path(&args);
2421 	if (!device)
2422 		return ERR_PTR(-ENOENT);
2423 	return device;
2424 }
2425 
btrfs_init_sprout(struct btrfs_fs_info * fs_info)2426 static struct btrfs_fs_devices *btrfs_init_sprout(struct btrfs_fs_info *fs_info)
2427 {
2428 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2429 	struct btrfs_fs_devices *old_devices;
2430 	struct btrfs_fs_devices *seed_devices;
2431 
2432 	lockdep_assert_held(&uuid_mutex);
2433 	if (!fs_devices->seeding)
2434 		return ERR_PTR(-EINVAL);
2435 
2436 	/*
2437 	 * Private copy of the seed devices, anchored at
2438 	 * fs_info->fs_devices->seed_list
2439 	 */
2440 	seed_devices = alloc_fs_devices(NULL, NULL);
2441 	if (IS_ERR(seed_devices))
2442 		return seed_devices;
2443 
2444 	/*
2445 	 * It's necessary to retain a copy of the original seed fs_devices in
2446 	 * fs_uuids so that filesystems which have been seeded can successfully
2447 	 * reference the seed device from open_seed_devices. This also supports
2448 	 * multiple fs seed.
2449 	 */
2450 	old_devices = clone_fs_devices(fs_devices);
2451 	if (IS_ERR(old_devices)) {
2452 		kfree(seed_devices);
2453 		return old_devices;
2454 	}
2455 
2456 	list_add(&old_devices->fs_list, &fs_uuids);
2457 
2458 	memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
2459 	seed_devices->opened = 1;
2460 	INIT_LIST_HEAD(&seed_devices->devices);
2461 	INIT_LIST_HEAD(&seed_devices->alloc_list);
2462 	mutex_init(&seed_devices->device_list_mutex);
2463 
2464 	return seed_devices;
2465 }
2466 
2467 /*
2468  * Splice seed devices into the sprout fs_devices.
2469  * Generate a new fsid for the sprouted read-write filesystem.
2470  */
btrfs_setup_sprout(struct btrfs_fs_info * fs_info,struct btrfs_fs_devices * seed_devices)2471 static void btrfs_setup_sprout(struct btrfs_fs_info *fs_info,
2472 			       struct btrfs_fs_devices *seed_devices)
2473 {
2474 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2475 	struct btrfs_super_block *disk_super = fs_info->super_copy;
2476 	struct btrfs_device *device;
2477 	u64 super_flags;
2478 
2479 	/*
2480 	 * We are updating the fsid, the thread leading to device_list_add()
2481 	 * could race, so uuid_mutex is needed.
2482 	 */
2483 	lockdep_assert_held(&uuid_mutex);
2484 
2485 	/*
2486 	 * The threads listed below may traverse dev_list but can do that without
2487 	 * device_list_mutex:
2488 	 * - All device ops and balance - as we are in btrfs_exclop_start.
2489 	 * - Various dev_list readers - are using RCU.
2490 	 * - btrfs_ioctl_fitrim() - is using RCU.
2491 	 *
2492 	 * For-read threads as below are using device_list_mutex:
2493 	 * - Readonly scrub btrfs_scrub_dev()
2494 	 * - Readonly scrub btrfs_scrub_progress()
2495 	 * - btrfs_get_dev_stats()
2496 	 */
2497 	lockdep_assert_held(&fs_devices->device_list_mutex);
2498 
2499 	list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
2500 			      synchronize_rcu);
2501 	list_for_each_entry(device, &seed_devices->devices, dev_list)
2502 		device->fs_devices = seed_devices;
2503 
2504 	fs_devices->seeding = false;
2505 	fs_devices->num_devices = 0;
2506 	fs_devices->open_devices = 0;
2507 	fs_devices->missing_devices = 0;
2508 	fs_devices->rotating = false;
2509 	list_add(&seed_devices->seed_list, &fs_devices->seed_list);
2510 
2511 	generate_random_uuid(fs_devices->fsid);
2512 	memcpy(fs_devices->metadata_uuid, fs_devices->fsid, BTRFS_FSID_SIZE);
2513 	memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
2514 
2515 	super_flags = btrfs_super_flags(disk_super) &
2516 		      ~BTRFS_SUPER_FLAG_SEEDING;
2517 	btrfs_set_super_flags(disk_super, super_flags);
2518 }
2519 
2520 /*
2521  * Store the expected generation for seed devices in device items.
2522  */
btrfs_finish_sprout(struct btrfs_trans_handle * trans)2523 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans)
2524 {
2525 	BTRFS_DEV_LOOKUP_ARGS(args);
2526 	struct btrfs_fs_info *fs_info = trans->fs_info;
2527 	struct btrfs_root *root = fs_info->chunk_root;
2528 	struct btrfs_path *path;
2529 	struct extent_buffer *leaf;
2530 	struct btrfs_dev_item *dev_item;
2531 	struct btrfs_device *device;
2532 	struct btrfs_key key;
2533 	u8 fs_uuid[BTRFS_FSID_SIZE];
2534 	u8 dev_uuid[BTRFS_UUID_SIZE];
2535 	int ret;
2536 
2537 	path = btrfs_alloc_path();
2538 	if (!path)
2539 		return -ENOMEM;
2540 
2541 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2542 	key.offset = 0;
2543 	key.type = BTRFS_DEV_ITEM_KEY;
2544 
2545 	while (1) {
2546 		btrfs_reserve_chunk_metadata(trans, false);
2547 		ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2548 		btrfs_trans_release_chunk_metadata(trans);
2549 		if (ret < 0)
2550 			goto error;
2551 
2552 		leaf = path->nodes[0];
2553 next_slot:
2554 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
2555 			ret = btrfs_next_leaf(root, path);
2556 			if (ret > 0)
2557 				break;
2558 			if (ret < 0)
2559 				goto error;
2560 			leaf = path->nodes[0];
2561 			btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2562 			btrfs_release_path(path);
2563 			continue;
2564 		}
2565 
2566 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2567 		if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
2568 		    key.type != BTRFS_DEV_ITEM_KEY)
2569 			break;
2570 
2571 		dev_item = btrfs_item_ptr(leaf, path->slots[0],
2572 					  struct btrfs_dev_item);
2573 		args.devid = btrfs_device_id(leaf, dev_item);
2574 		read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
2575 				   BTRFS_UUID_SIZE);
2576 		read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
2577 				   BTRFS_FSID_SIZE);
2578 		args.uuid = dev_uuid;
2579 		args.fsid = fs_uuid;
2580 		device = btrfs_find_device(fs_info->fs_devices, &args);
2581 		BUG_ON(!device); /* Logic error */
2582 
2583 		if (device->fs_devices->seeding) {
2584 			btrfs_set_device_generation(leaf, dev_item,
2585 						    device->generation);
2586 			btrfs_mark_buffer_dirty(leaf);
2587 		}
2588 
2589 		path->slots[0]++;
2590 		goto next_slot;
2591 	}
2592 	ret = 0;
2593 error:
2594 	btrfs_free_path(path);
2595 	return ret;
2596 }
2597 
btrfs_init_new_device(struct btrfs_fs_info * fs_info,const char * device_path)2598 int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path)
2599 {
2600 	struct btrfs_root *root = fs_info->dev_root;
2601 	struct btrfs_trans_handle *trans;
2602 	struct btrfs_device *device;
2603 	struct block_device *bdev;
2604 	struct super_block *sb = fs_info->sb;
2605 	struct rcu_string *name;
2606 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2607 	struct btrfs_fs_devices *seed_devices;
2608 	u64 orig_super_total_bytes;
2609 	u64 orig_super_num_devices;
2610 	int ret = 0;
2611 	bool seeding_dev = false;
2612 	bool locked = false;
2613 
2614 	if (sb_rdonly(sb) && !fs_devices->seeding)
2615 		return -EROFS;
2616 
2617 	bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
2618 				  fs_info->bdev_holder);
2619 	if (IS_ERR(bdev))
2620 		return PTR_ERR(bdev);
2621 
2622 	if (!btrfs_check_device_zone_type(fs_info, bdev)) {
2623 		ret = -EINVAL;
2624 		goto error;
2625 	}
2626 
2627 	if (fs_devices->seeding) {
2628 		seeding_dev = true;
2629 		down_write(&sb->s_umount);
2630 		mutex_lock(&uuid_mutex);
2631 		locked = true;
2632 	}
2633 
2634 	sync_blockdev(bdev);
2635 
2636 	rcu_read_lock();
2637 	list_for_each_entry_rcu(device, &fs_devices->devices, dev_list) {
2638 		if (device->bdev == bdev) {
2639 			ret = -EEXIST;
2640 			rcu_read_unlock();
2641 			goto error;
2642 		}
2643 	}
2644 	rcu_read_unlock();
2645 
2646 	device = btrfs_alloc_device(fs_info, NULL, NULL);
2647 	if (IS_ERR(device)) {
2648 		/* we can safely leave the fs_devices entry around */
2649 		ret = PTR_ERR(device);
2650 		goto error;
2651 	}
2652 
2653 	name = rcu_string_strdup(device_path, GFP_KERNEL);
2654 	if (!name) {
2655 		ret = -ENOMEM;
2656 		goto error_free_device;
2657 	}
2658 	rcu_assign_pointer(device->name, name);
2659 
2660 	device->fs_info = fs_info;
2661 	device->bdev = bdev;
2662 	ret = lookup_bdev(device_path, &device->devt);
2663 	if (ret)
2664 		goto error_free_device;
2665 
2666 	ret = btrfs_get_dev_zone_info(device, false);
2667 	if (ret)
2668 		goto error_free_device;
2669 
2670 	trans = btrfs_start_transaction(root, 0);
2671 	if (IS_ERR(trans)) {
2672 		ret = PTR_ERR(trans);
2673 		goto error_free_zone;
2674 	}
2675 
2676 	set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
2677 	device->generation = trans->transid;
2678 	device->io_width = fs_info->sectorsize;
2679 	device->io_align = fs_info->sectorsize;
2680 	device->sector_size = fs_info->sectorsize;
2681 	device->total_bytes =
2682 		round_down(bdev_nr_bytes(bdev), fs_info->sectorsize);
2683 	device->disk_total_bytes = device->total_bytes;
2684 	device->commit_total_bytes = device->total_bytes;
2685 	set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
2686 	clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
2687 	device->mode = FMODE_EXCL;
2688 	device->dev_stats_valid = 1;
2689 	set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE);
2690 
2691 	if (seeding_dev) {
2692 		btrfs_clear_sb_rdonly(sb);
2693 
2694 		/* GFP_KERNEL allocation must not be under device_list_mutex */
2695 		seed_devices = btrfs_init_sprout(fs_info);
2696 		if (IS_ERR(seed_devices)) {
2697 			ret = PTR_ERR(seed_devices);
2698 			btrfs_abort_transaction(trans, ret);
2699 			goto error_trans;
2700 		}
2701 	}
2702 
2703 	mutex_lock(&fs_devices->device_list_mutex);
2704 	if (seeding_dev) {
2705 		btrfs_setup_sprout(fs_info, seed_devices);
2706 		btrfs_assign_next_active_device(fs_info->fs_devices->latest_dev,
2707 						device);
2708 	}
2709 
2710 	device->fs_devices = fs_devices;
2711 
2712 	mutex_lock(&fs_info->chunk_mutex);
2713 	list_add_rcu(&device->dev_list, &fs_devices->devices);
2714 	list_add(&device->dev_alloc_list, &fs_devices->alloc_list);
2715 	fs_devices->num_devices++;
2716 	fs_devices->open_devices++;
2717 	fs_devices->rw_devices++;
2718 	fs_devices->total_devices++;
2719 	fs_devices->total_rw_bytes += device->total_bytes;
2720 
2721 	atomic64_add(device->total_bytes, &fs_info->free_chunk_space);
2722 
2723 	if (!bdev_nonrot(bdev))
2724 		fs_devices->rotating = true;
2725 
2726 	orig_super_total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
2727 	btrfs_set_super_total_bytes(fs_info->super_copy,
2728 		round_down(orig_super_total_bytes + device->total_bytes,
2729 			   fs_info->sectorsize));
2730 
2731 	orig_super_num_devices = btrfs_super_num_devices(fs_info->super_copy);
2732 	btrfs_set_super_num_devices(fs_info->super_copy,
2733 				    orig_super_num_devices + 1);
2734 
2735 	/*
2736 	 * we've got more storage, clear any full flags on the space
2737 	 * infos
2738 	 */
2739 	btrfs_clear_space_info_full(fs_info);
2740 
2741 	mutex_unlock(&fs_info->chunk_mutex);
2742 
2743 	/* Add sysfs device entry */
2744 	btrfs_sysfs_add_device(device);
2745 
2746 	mutex_unlock(&fs_devices->device_list_mutex);
2747 
2748 	if (seeding_dev) {
2749 		mutex_lock(&fs_info->chunk_mutex);
2750 		ret = init_first_rw_device(trans);
2751 		mutex_unlock(&fs_info->chunk_mutex);
2752 		if (ret) {
2753 			btrfs_abort_transaction(trans, ret);
2754 			goto error_sysfs;
2755 		}
2756 	}
2757 
2758 	ret = btrfs_add_dev_item(trans, device);
2759 	if (ret) {
2760 		btrfs_abort_transaction(trans, ret);
2761 		goto error_sysfs;
2762 	}
2763 
2764 	if (seeding_dev) {
2765 		ret = btrfs_finish_sprout(trans);
2766 		if (ret) {
2767 			btrfs_abort_transaction(trans, ret);
2768 			goto error_sysfs;
2769 		}
2770 
2771 		/*
2772 		 * fs_devices now represents the newly sprouted filesystem and
2773 		 * its fsid has been changed by btrfs_sprout_splice().
2774 		 */
2775 		btrfs_sysfs_update_sprout_fsid(fs_devices);
2776 	}
2777 
2778 	ret = btrfs_commit_transaction(trans);
2779 
2780 	if (seeding_dev) {
2781 		mutex_unlock(&uuid_mutex);
2782 		up_write(&sb->s_umount);
2783 		locked = false;
2784 
2785 		if (ret) /* transaction commit */
2786 			return ret;
2787 
2788 		ret = btrfs_relocate_sys_chunks(fs_info);
2789 		if (ret < 0)
2790 			btrfs_handle_fs_error(fs_info, ret,
2791 				    "Failed to relocate sys chunks after device initialization. This can be fixed using the \"btrfs balance\" command.");
2792 		trans = btrfs_attach_transaction(root);
2793 		if (IS_ERR(trans)) {
2794 			if (PTR_ERR(trans) == -ENOENT)
2795 				return 0;
2796 			ret = PTR_ERR(trans);
2797 			trans = NULL;
2798 			goto error_sysfs;
2799 		}
2800 		ret = btrfs_commit_transaction(trans);
2801 	}
2802 
2803 	/*
2804 	 * Now that we have written a new super block to this device, check all
2805 	 * other fs_devices list if device_path alienates any other scanned
2806 	 * device.
2807 	 * We can ignore the return value as it typically returns -EINVAL and
2808 	 * only succeeds if the device was an alien.
2809 	 */
2810 	btrfs_forget_devices(device->devt);
2811 
2812 	/* Update ctime/mtime for blkid or udev */
2813 	update_dev_time(device_path);
2814 
2815 	return ret;
2816 
2817 error_sysfs:
2818 	btrfs_sysfs_remove_device(device);
2819 	mutex_lock(&fs_info->fs_devices->device_list_mutex);
2820 	mutex_lock(&fs_info->chunk_mutex);
2821 	list_del_rcu(&device->dev_list);
2822 	list_del(&device->dev_alloc_list);
2823 	fs_info->fs_devices->num_devices--;
2824 	fs_info->fs_devices->open_devices--;
2825 	fs_info->fs_devices->rw_devices--;
2826 	fs_info->fs_devices->total_devices--;
2827 	fs_info->fs_devices->total_rw_bytes -= device->total_bytes;
2828 	atomic64_sub(device->total_bytes, &fs_info->free_chunk_space);
2829 	btrfs_set_super_total_bytes(fs_info->super_copy,
2830 				    orig_super_total_bytes);
2831 	btrfs_set_super_num_devices(fs_info->super_copy,
2832 				    orig_super_num_devices);
2833 	mutex_unlock(&fs_info->chunk_mutex);
2834 	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2835 error_trans:
2836 	if (seeding_dev)
2837 		btrfs_set_sb_rdonly(sb);
2838 	if (trans)
2839 		btrfs_end_transaction(trans);
2840 error_free_zone:
2841 	btrfs_destroy_dev_zone_info(device);
2842 error_free_device:
2843 	btrfs_free_device(device);
2844 error:
2845 	blkdev_put(bdev, FMODE_EXCL);
2846 	if (locked) {
2847 		mutex_unlock(&uuid_mutex);
2848 		up_write(&sb->s_umount);
2849 	}
2850 	return ret;
2851 }
2852 
btrfs_update_device(struct btrfs_trans_handle * trans,struct btrfs_device * device)2853 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
2854 					struct btrfs_device *device)
2855 {
2856 	int ret;
2857 	struct btrfs_path *path;
2858 	struct btrfs_root *root = device->fs_info->chunk_root;
2859 	struct btrfs_dev_item *dev_item;
2860 	struct extent_buffer *leaf;
2861 	struct btrfs_key key;
2862 
2863 	path = btrfs_alloc_path();
2864 	if (!path)
2865 		return -ENOMEM;
2866 
2867 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2868 	key.type = BTRFS_DEV_ITEM_KEY;
2869 	key.offset = device->devid;
2870 
2871 	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2872 	if (ret < 0)
2873 		goto out;
2874 
2875 	if (ret > 0) {
2876 		ret = -ENOENT;
2877 		goto out;
2878 	}
2879 
2880 	leaf = path->nodes[0];
2881 	dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
2882 
2883 	btrfs_set_device_id(leaf, dev_item, device->devid);
2884 	btrfs_set_device_type(leaf, dev_item, device->type);
2885 	btrfs_set_device_io_align(leaf, dev_item, device->io_align);
2886 	btrfs_set_device_io_width(leaf, dev_item, device->io_width);
2887 	btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
2888 	btrfs_set_device_total_bytes(leaf, dev_item,
2889 				     btrfs_device_get_disk_total_bytes(device));
2890 	btrfs_set_device_bytes_used(leaf, dev_item,
2891 				    btrfs_device_get_bytes_used(device));
2892 	btrfs_mark_buffer_dirty(leaf);
2893 
2894 out:
2895 	btrfs_free_path(path);
2896 	return ret;
2897 }
2898 
btrfs_grow_device(struct btrfs_trans_handle * trans,struct btrfs_device * device,u64 new_size)2899 int btrfs_grow_device(struct btrfs_trans_handle *trans,
2900 		      struct btrfs_device *device, u64 new_size)
2901 {
2902 	struct btrfs_fs_info *fs_info = device->fs_info;
2903 	struct btrfs_super_block *super_copy = fs_info->super_copy;
2904 	u64 old_total;
2905 	u64 diff;
2906 	int ret;
2907 
2908 	if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
2909 		return -EACCES;
2910 
2911 	new_size = round_down(new_size, fs_info->sectorsize);
2912 
2913 	mutex_lock(&fs_info->chunk_mutex);
2914 	old_total = btrfs_super_total_bytes(super_copy);
2915 	diff = round_down(new_size - device->total_bytes, fs_info->sectorsize);
2916 
2917 	if (new_size <= device->total_bytes ||
2918 	    test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
2919 		mutex_unlock(&fs_info->chunk_mutex);
2920 		return -EINVAL;
2921 	}
2922 
2923 	btrfs_set_super_total_bytes(super_copy,
2924 			round_down(old_total + diff, fs_info->sectorsize));
2925 	device->fs_devices->total_rw_bytes += diff;
2926 
2927 	btrfs_device_set_total_bytes(device, new_size);
2928 	btrfs_device_set_disk_total_bytes(device, new_size);
2929 	btrfs_clear_space_info_full(device->fs_info);
2930 	if (list_empty(&device->post_commit_list))
2931 		list_add_tail(&device->post_commit_list,
2932 			      &trans->transaction->dev_update_list);
2933 	mutex_unlock(&fs_info->chunk_mutex);
2934 
2935 	btrfs_reserve_chunk_metadata(trans, false);
2936 	ret = btrfs_update_device(trans, device);
2937 	btrfs_trans_release_chunk_metadata(trans);
2938 
2939 	return ret;
2940 }
2941 
btrfs_free_chunk(struct btrfs_trans_handle * trans,u64 chunk_offset)2942 static int btrfs_free_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
2943 {
2944 	struct btrfs_fs_info *fs_info = trans->fs_info;
2945 	struct btrfs_root *root = fs_info->chunk_root;
2946 	int ret;
2947 	struct btrfs_path *path;
2948 	struct btrfs_key key;
2949 
2950 	path = btrfs_alloc_path();
2951 	if (!path)
2952 		return -ENOMEM;
2953 
2954 	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2955 	key.offset = chunk_offset;
2956 	key.type = BTRFS_CHUNK_ITEM_KEY;
2957 
2958 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2959 	if (ret < 0)
2960 		goto out;
2961 	else if (ret > 0) { /* Logic error or corruption */
2962 		btrfs_handle_fs_error(fs_info, -ENOENT,
2963 				      "Failed lookup while freeing chunk.");
2964 		ret = -ENOENT;
2965 		goto out;
2966 	}
2967 
2968 	ret = btrfs_del_item(trans, root, path);
2969 	if (ret < 0)
2970 		btrfs_handle_fs_error(fs_info, ret,
2971 				      "Failed to delete chunk item.");
2972 out:
2973 	btrfs_free_path(path);
2974 	return ret;
2975 }
2976 
btrfs_del_sys_chunk(struct btrfs_fs_info * fs_info,u64 chunk_offset)2977 static int btrfs_del_sys_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
2978 {
2979 	struct btrfs_super_block *super_copy = fs_info->super_copy;
2980 	struct btrfs_disk_key *disk_key;
2981 	struct btrfs_chunk *chunk;
2982 	u8 *ptr;
2983 	int ret = 0;
2984 	u32 num_stripes;
2985 	u32 array_size;
2986 	u32 len = 0;
2987 	u32 cur;
2988 	struct btrfs_key key;
2989 
2990 	lockdep_assert_held(&fs_info->chunk_mutex);
2991 	array_size = btrfs_super_sys_array_size(super_copy);
2992 
2993 	ptr = super_copy->sys_chunk_array;
2994 	cur = 0;
2995 
2996 	while (cur < array_size) {
2997 		disk_key = (struct btrfs_disk_key *)ptr;
2998 		btrfs_disk_key_to_cpu(&key, disk_key);
2999 
3000 		len = sizeof(*disk_key);
3001 
3002 		if (key.type == BTRFS_CHUNK_ITEM_KEY) {
3003 			chunk = (struct btrfs_chunk *)(ptr + len);
3004 			num_stripes = btrfs_stack_chunk_num_stripes(chunk);
3005 			len += btrfs_chunk_item_size(num_stripes);
3006 		} else {
3007 			ret = -EIO;
3008 			break;
3009 		}
3010 		if (key.objectid == BTRFS_FIRST_CHUNK_TREE_OBJECTID &&
3011 		    key.offset == chunk_offset) {
3012 			memmove(ptr, ptr + len, array_size - (cur + len));
3013 			array_size -= len;
3014 			btrfs_set_super_sys_array_size(super_copy, array_size);
3015 		} else {
3016 			ptr += len;
3017 			cur += len;
3018 		}
3019 	}
3020 	return ret;
3021 }
3022 
3023 /*
3024  * btrfs_get_chunk_map() - Find the mapping containing the given logical extent.
3025  * @logical: Logical block offset in bytes.
3026  * @length: Length of extent in bytes.
3027  *
3028  * Return: Chunk mapping or ERR_PTR.
3029  */
btrfs_get_chunk_map(struct btrfs_fs_info * fs_info,u64 logical,u64 length)3030 struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info,
3031 				       u64 logical, u64 length)
3032 {
3033 	struct extent_map_tree *em_tree;
3034 	struct extent_map *em;
3035 
3036 	em_tree = &fs_info->mapping_tree;
3037 	read_lock(&em_tree->lock);
3038 	em = lookup_extent_mapping(em_tree, logical, length);
3039 	read_unlock(&em_tree->lock);
3040 
3041 	if (!em) {
3042 		btrfs_crit(fs_info, "unable to find logical %llu length %llu",
3043 			   logical, length);
3044 		return ERR_PTR(-EINVAL);
3045 	}
3046 
3047 	if (em->start > logical || em->start + em->len < logical) {
3048 		btrfs_crit(fs_info,
3049 			   "found a bad mapping, wanted %llu-%llu, found %llu-%llu",
3050 			   logical, length, em->start, em->start + em->len);
3051 		free_extent_map(em);
3052 		return ERR_PTR(-EINVAL);
3053 	}
3054 
3055 	/* callers are responsible for dropping em's ref. */
3056 	return em;
3057 }
3058 
remove_chunk_item(struct btrfs_trans_handle * trans,struct map_lookup * map,u64 chunk_offset)3059 static int remove_chunk_item(struct btrfs_trans_handle *trans,
3060 			     struct map_lookup *map, u64 chunk_offset)
3061 {
3062 	int i;
3063 
3064 	/*
3065 	 * Removing chunk items and updating the device items in the chunks btree
3066 	 * requires holding the chunk_mutex.
3067 	 * See the comment at btrfs_chunk_alloc() for the details.
3068 	 */
3069 	lockdep_assert_held(&trans->fs_info->chunk_mutex);
3070 
3071 	for (i = 0; i < map->num_stripes; i++) {
3072 		int ret;
3073 
3074 		ret = btrfs_update_device(trans, map->stripes[i].dev);
3075 		if (ret)
3076 			return ret;
3077 	}
3078 
3079 	return btrfs_free_chunk(trans, chunk_offset);
3080 }
3081 
btrfs_remove_chunk(struct btrfs_trans_handle * trans,u64 chunk_offset)3082 int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
3083 {
3084 	struct btrfs_fs_info *fs_info = trans->fs_info;
3085 	struct extent_map *em;
3086 	struct map_lookup *map;
3087 	u64 dev_extent_len = 0;
3088 	int i, ret = 0;
3089 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
3090 
3091 	em = btrfs_get_chunk_map(fs_info, chunk_offset, 1);
3092 	if (IS_ERR(em)) {
3093 		/*
3094 		 * This is a logic error, but we don't want to just rely on the
3095 		 * user having built with ASSERT enabled, so if ASSERT doesn't
3096 		 * do anything we still error out.
3097 		 */
3098 		ASSERT(0);
3099 		return PTR_ERR(em);
3100 	}
3101 	map = em->map_lookup;
3102 
3103 	/*
3104 	 * First delete the device extent items from the devices btree.
3105 	 * We take the device_list_mutex to avoid racing with the finishing phase
3106 	 * of a device replace operation. See the comment below before acquiring
3107 	 * fs_info->chunk_mutex. Note that here we do not acquire the chunk_mutex
3108 	 * because that can result in a deadlock when deleting the device extent
3109 	 * items from the devices btree - COWing an extent buffer from the btree
3110 	 * may result in allocating a new metadata chunk, which would attempt to
3111 	 * lock again fs_info->chunk_mutex.
3112 	 */
3113 	mutex_lock(&fs_devices->device_list_mutex);
3114 	for (i = 0; i < map->num_stripes; i++) {
3115 		struct btrfs_device *device = map->stripes[i].dev;
3116 		ret = btrfs_free_dev_extent(trans, device,
3117 					    map->stripes[i].physical,
3118 					    &dev_extent_len);
3119 		if (ret) {
3120 			mutex_unlock(&fs_devices->device_list_mutex);
3121 			btrfs_abort_transaction(trans, ret);
3122 			goto out;
3123 		}
3124 
3125 		if (device->bytes_used > 0) {
3126 			mutex_lock(&fs_info->chunk_mutex);
3127 			btrfs_device_set_bytes_used(device,
3128 					device->bytes_used - dev_extent_len);
3129 			atomic64_add(dev_extent_len, &fs_info->free_chunk_space);
3130 			btrfs_clear_space_info_full(fs_info);
3131 			mutex_unlock(&fs_info->chunk_mutex);
3132 		}
3133 	}
3134 	mutex_unlock(&fs_devices->device_list_mutex);
3135 
3136 	/*
3137 	 * We acquire fs_info->chunk_mutex for 2 reasons:
3138 	 *
3139 	 * 1) Just like with the first phase of the chunk allocation, we must
3140 	 *    reserve system space, do all chunk btree updates and deletions, and
3141 	 *    update the system chunk array in the superblock while holding this
3142 	 *    mutex. This is for similar reasons as explained on the comment at
3143 	 *    the top of btrfs_chunk_alloc();
3144 	 *
3145 	 * 2) Prevent races with the final phase of a device replace operation
3146 	 *    that replaces the device object associated with the map's stripes,
3147 	 *    because the device object's id can change at any time during that
3148 	 *    final phase of the device replace operation
3149 	 *    (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the
3150 	 *    replaced device and then see it with an ID of
3151 	 *    BTRFS_DEV_REPLACE_DEVID, which would cause a failure when updating
3152 	 *    the device item, which does not exists on the chunk btree.
3153 	 *    The finishing phase of device replace acquires both the
3154 	 *    device_list_mutex and the chunk_mutex, in that order, so we are
3155 	 *    safe by just acquiring the chunk_mutex.
3156 	 */
3157 	trans->removing_chunk = true;
3158 	mutex_lock(&fs_info->chunk_mutex);
3159 
3160 	check_system_chunk(trans, map->type);
3161 
3162 	ret = remove_chunk_item(trans, map, chunk_offset);
3163 	/*
3164 	 * Normally we should not get -ENOSPC since we reserved space before
3165 	 * through the call to check_system_chunk().
3166 	 *
3167 	 * Despite our system space_info having enough free space, we may not
3168 	 * be able to allocate extents from its block groups, because all have
3169 	 * an incompatible profile, which will force us to allocate a new system
3170 	 * block group with the right profile, or right after we called
3171 	 * check_system_space() above, a scrub turned the only system block group
3172 	 * with enough free space into RO mode.
3173 	 * This is explained with more detail at do_chunk_alloc().
3174 	 *
3175 	 * So if we get -ENOSPC, allocate a new system chunk and retry once.
3176 	 */
3177 	if (ret == -ENOSPC) {
3178 		const u64 sys_flags = btrfs_system_alloc_profile(fs_info);
3179 		struct btrfs_block_group *sys_bg;
3180 
3181 		sys_bg = btrfs_create_chunk(trans, sys_flags);
3182 		if (IS_ERR(sys_bg)) {
3183 			ret = PTR_ERR(sys_bg);
3184 			btrfs_abort_transaction(trans, ret);
3185 			goto out;
3186 		}
3187 
3188 		ret = btrfs_chunk_alloc_add_chunk_item(trans, sys_bg);
3189 		if (ret) {
3190 			btrfs_abort_transaction(trans, ret);
3191 			goto out;
3192 		}
3193 
3194 		ret = remove_chunk_item(trans, map, chunk_offset);
3195 		if (ret) {
3196 			btrfs_abort_transaction(trans, ret);
3197 			goto out;
3198 		}
3199 	} else if (ret) {
3200 		btrfs_abort_transaction(trans, ret);
3201 		goto out;
3202 	}
3203 
3204 	trace_btrfs_chunk_free(fs_info, map, chunk_offset, em->len);
3205 
3206 	if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
3207 		ret = btrfs_del_sys_chunk(fs_info, chunk_offset);
3208 		if (ret) {
3209 			btrfs_abort_transaction(trans, ret);
3210 			goto out;
3211 		}
3212 	}
3213 
3214 	mutex_unlock(&fs_info->chunk_mutex);
3215 	trans->removing_chunk = false;
3216 
3217 	/*
3218 	 * We are done with chunk btree updates and deletions, so release the
3219 	 * system space we previously reserved (with check_system_chunk()).
3220 	 */
3221 	btrfs_trans_release_chunk_metadata(trans);
3222 
3223 	ret = btrfs_remove_block_group(trans, chunk_offset, em);
3224 	if (ret) {
3225 		btrfs_abort_transaction(trans, ret);
3226 		goto out;
3227 	}
3228 
3229 out:
3230 	if (trans->removing_chunk) {
3231 		mutex_unlock(&fs_info->chunk_mutex);
3232 		trans->removing_chunk = false;
3233 	}
3234 	/* once for us */
3235 	free_extent_map(em);
3236 	return ret;
3237 }
3238 
btrfs_relocate_chunk(struct btrfs_fs_info * fs_info,u64 chunk_offset)3239 int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
3240 {
3241 	struct btrfs_root *root = fs_info->chunk_root;
3242 	struct btrfs_trans_handle *trans;
3243 	struct btrfs_block_group *block_group;
3244 	u64 length;
3245 	int ret;
3246 
3247 	if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) {
3248 		btrfs_err(fs_info,
3249 			  "relocate: not supported on extent tree v2 yet");
3250 		return -EINVAL;
3251 	}
3252 
3253 	/*
3254 	 * Prevent races with automatic removal of unused block groups.
3255 	 * After we relocate and before we remove the chunk with offset
3256 	 * chunk_offset, automatic removal of the block group can kick in,
3257 	 * resulting in a failure when calling btrfs_remove_chunk() below.
3258 	 *
3259 	 * Make sure to acquire this mutex before doing a tree search (dev
3260 	 * or chunk trees) to find chunks. Otherwise the cleaner kthread might
3261 	 * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after
3262 	 * we release the path used to search the chunk/dev tree and before
3263 	 * the current task acquires this mutex and calls us.
3264 	 */
3265 	lockdep_assert_held(&fs_info->reclaim_bgs_lock);
3266 
3267 	/* step one, relocate all the extents inside this chunk */
3268 	btrfs_scrub_pause(fs_info);
3269 	ret = btrfs_relocate_block_group(fs_info, chunk_offset);
3270 	btrfs_scrub_continue(fs_info);
3271 	if (ret)
3272 		return ret;
3273 
3274 	block_group = btrfs_lookup_block_group(fs_info, chunk_offset);
3275 	if (!block_group)
3276 		return -ENOENT;
3277 	btrfs_discard_cancel_work(&fs_info->discard_ctl, block_group);
3278 	length = block_group->length;
3279 	btrfs_put_block_group(block_group);
3280 
3281 	/*
3282 	 * On a zoned file system, discard the whole block group, this will
3283 	 * trigger a REQ_OP_ZONE_RESET operation on the device zone. If
3284 	 * resetting the zone fails, don't treat it as a fatal problem from the
3285 	 * filesystem's point of view.
3286 	 */
3287 	if (btrfs_is_zoned(fs_info)) {
3288 		ret = btrfs_discard_extent(fs_info, chunk_offset, length, NULL);
3289 		if (ret)
3290 			btrfs_info(fs_info,
3291 				"failed to reset zone %llu after relocation",
3292 				chunk_offset);
3293 	}
3294 
3295 	trans = btrfs_start_trans_remove_block_group(root->fs_info,
3296 						     chunk_offset);
3297 	if (IS_ERR(trans)) {
3298 		ret = PTR_ERR(trans);
3299 		btrfs_handle_fs_error(root->fs_info, ret, NULL);
3300 		return ret;
3301 	}
3302 
3303 	/*
3304 	 * step two, delete the device extents and the
3305 	 * chunk tree entries
3306 	 */
3307 	ret = btrfs_remove_chunk(trans, chunk_offset);
3308 	btrfs_end_transaction(trans);
3309 	return ret;
3310 }
3311 
btrfs_relocate_sys_chunks(struct btrfs_fs_info * fs_info)3312 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info)
3313 {
3314 	struct btrfs_root *chunk_root = fs_info->chunk_root;
3315 	struct btrfs_path *path;
3316 	struct extent_buffer *leaf;
3317 	struct btrfs_chunk *chunk;
3318 	struct btrfs_key key;
3319 	struct btrfs_key found_key;
3320 	u64 chunk_type;
3321 	bool retried = false;
3322 	int failed = 0;
3323 	int ret;
3324 
3325 	path = btrfs_alloc_path();
3326 	if (!path)
3327 		return -ENOMEM;
3328 
3329 again:
3330 	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
3331 	key.offset = (u64)-1;
3332 	key.type = BTRFS_CHUNK_ITEM_KEY;
3333 
3334 	while (1) {
3335 		mutex_lock(&fs_info->reclaim_bgs_lock);
3336 		ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
3337 		if (ret < 0) {
3338 			mutex_unlock(&fs_info->reclaim_bgs_lock);
3339 			goto error;
3340 		}
3341 		BUG_ON(ret == 0); /* Corruption */
3342 
3343 		ret = btrfs_previous_item(chunk_root, path, key.objectid,
3344 					  key.type);
3345 		if (ret)
3346 			mutex_unlock(&fs_info->reclaim_bgs_lock);
3347 		if (ret < 0)
3348 			goto error;
3349 		if (ret > 0)
3350 			break;
3351 
3352 		leaf = path->nodes[0];
3353 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3354 
3355 		chunk = btrfs_item_ptr(leaf, path->slots[0],
3356 				       struct btrfs_chunk);
3357 		chunk_type = btrfs_chunk_type(leaf, chunk);
3358 		btrfs_release_path(path);
3359 
3360 		if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
3361 			ret = btrfs_relocate_chunk(fs_info, found_key.offset);
3362 			if (ret == -ENOSPC)
3363 				failed++;
3364 			else
3365 				BUG_ON(ret);
3366 		}
3367 		mutex_unlock(&fs_info->reclaim_bgs_lock);
3368 
3369 		if (found_key.offset == 0)
3370 			break;
3371 		key.offset = found_key.offset - 1;
3372 	}
3373 	ret = 0;
3374 	if (failed && !retried) {
3375 		failed = 0;
3376 		retried = true;
3377 		goto again;
3378 	} else if (WARN_ON(failed && retried)) {
3379 		ret = -ENOSPC;
3380 	}
3381 error:
3382 	btrfs_free_path(path);
3383 	return ret;
3384 }
3385 
3386 /*
3387  * return 1 : allocate a data chunk successfully,
3388  * return <0: errors during allocating a data chunk,
3389  * return 0 : no need to allocate a data chunk.
3390  */
btrfs_may_alloc_data_chunk(struct btrfs_fs_info * fs_info,u64 chunk_offset)3391 static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info,
3392 				      u64 chunk_offset)
3393 {
3394 	struct btrfs_block_group *cache;
3395 	u64 bytes_used;
3396 	u64 chunk_type;
3397 
3398 	cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3399 	ASSERT(cache);
3400 	chunk_type = cache->flags;
3401 	btrfs_put_block_group(cache);
3402 
3403 	if (!(chunk_type & BTRFS_BLOCK_GROUP_DATA))
3404 		return 0;
3405 
3406 	spin_lock(&fs_info->data_sinfo->lock);
3407 	bytes_used = fs_info->data_sinfo->bytes_used;
3408 	spin_unlock(&fs_info->data_sinfo->lock);
3409 
3410 	if (!bytes_used) {
3411 		struct btrfs_trans_handle *trans;
3412 		int ret;
3413 
3414 		trans =	btrfs_join_transaction(fs_info->tree_root);
3415 		if (IS_ERR(trans))
3416 			return PTR_ERR(trans);
3417 
3418 		ret = btrfs_force_chunk_alloc(trans, BTRFS_BLOCK_GROUP_DATA);
3419 		btrfs_end_transaction(trans);
3420 		if (ret < 0)
3421 			return ret;
3422 		return 1;
3423 	}
3424 
3425 	return 0;
3426 }
3427 
insert_balance_item(struct btrfs_fs_info * fs_info,struct btrfs_balance_control * bctl)3428 static int insert_balance_item(struct btrfs_fs_info *fs_info,
3429 			       struct btrfs_balance_control *bctl)
3430 {
3431 	struct btrfs_root *root = fs_info->tree_root;
3432 	struct btrfs_trans_handle *trans;
3433 	struct btrfs_balance_item *item;
3434 	struct btrfs_disk_balance_args disk_bargs;
3435 	struct btrfs_path *path;
3436 	struct extent_buffer *leaf;
3437 	struct btrfs_key key;
3438 	int ret, err;
3439 
3440 	path = btrfs_alloc_path();
3441 	if (!path)
3442 		return -ENOMEM;
3443 
3444 	trans = btrfs_start_transaction(root, 0);
3445 	if (IS_ERR(trans)) {
3446 		btrfs_free_path(path);
3447 		return PTR_ERR(trans);
3448 	}
3449 
3450 	key.objectid = BTRFS_BALANCE_OBJECTID;
3451 	key.type = BTRFS_TEMPORARY_ITEM_KEY;
3452 	key.offset = 0;
3453 
3454 	ret = btrfs_insert_empty_item(trans, root, path, &key,
3455 				      sizeof(*item));
3456 	if (ret)
3457 		goto out;
3458 
3459 	leaf = path->nodes[0];
3460 	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
3461 
3462 	memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item));
3463 
3464 	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data);
3465 	btrfs_set_balance_data(leaf, item, &disk_bargs);
3466 	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta);
3467 	btrfs_set_balance_meta(leaf, item, &disk_bargs);
3468 	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
3469 	btrfs_set_balance_sys(leaf, item, &disk_bargs);
3470 
3471 	btrfs_set_balance_flags(leaf, item, bctl->flags);
3472 
3473 	btrfs_mark_buffer_dirty(leaf);
3474 out:
3475 	btrfs_free_path(path);
3476 	err = btrfs_commit_transaction(trans);
3477 	if (err && !ret)
3478 		ret = err;
3479 	return ret;
3480 }
3481 
del_balance_item(struct btrfs_fs_info * fs_info)3482 static int del_balance_item(struct btrfs_fs_info *fs_info)
3483 {
3484 	struct btrfs_root *root = fs_info->tree_root;
3485 	struct btrfs_trans_handle *trans;
3486 	struct btrfs_path *path;
3487 	struct btrfs_key key;
3488 	int ret, err;
3489 
3490 	path = btrfs_alloc_path();
3491 	if (!path)
3492 		return -ENOMEM;
3493 
3494 	trans = btrfs_start_transaction_fallback_global_rsv(root, 0);
3495 	if (IS_ERR(trans)) {
3496 		btrfs_free_path(path);
3497 		return PTR_ERR(trans);
3498 	}
3499 
3500 	key.objectid = BTRFS_BALANCE_OBJECTID;
3501 	key.type = BTRFS_TEMPORARY_ITEM_KEY;
3502 	key.offset = 0;
3503 
3504 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
3505 	if (ret < 0)
3506 		goto out;
3507 	if (ret > 0) {
3508 		ret = -ENOENT;
3509 		goto out;
3510 	}
3511 
3512 	ret = btrfs_del_item(trans, root, path);
3513 out:
3514 	btrfs_free_path(path);
3515 	err = btrfs_commit_transaction(trans);
3516 	if (err && !ret)
3517 		ret = err;
3518 	return ret;
3519 }
3520 
3521 /*
3522  * This is a heuristic used to reduce the number of chunks balanced on
3523  * resume after balance was interrupted.
3524  */
update_balance_args(struct btrfs_balance_control * bctl)3525 static void update_balance_args(struct btrfs_balance_control *bctl)
3526 {
3527 	/*
3528 	 * Turn on soft mode for chunk types that were being converted.
3529 	 */
3530 	if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)
3531 		bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT;
3532 	if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)
3533 		bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT;
3534 	if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)
3535 		bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT;
3536 
3537 	/*
3538 	 * Turn on usage filter if is not already used.  The idea is
3539 	 * that chunks that we have already balanced should be
3540 	 * reasonably full.  Don't do it for chunks that are being
3541 	 * converted - that will keep us from relocating unconverted
3542 	 * (albeit full) chunks.
3543 	 */
3544 	if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3545 	    !(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3546 	    !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3547 		bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE;
3548 		bctl->data.usage = 90;
3549 	}
3550 	if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3551 	    !(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3552 	    !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3553 		bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE;
3554 		bctl->sys.usage = 90;
3555 	}
3556 	if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3557 	    !(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3558 	    !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3559 		bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE;
3560 		bctl->meta.usage = 90;
3561 	}
3562 }
3563 
3564 /*
3565  * Clear the balance status in fs_info and delete the balance item from disk.
3566  */
reset_balance_state(struct btrfs_fs_info * fs_info)3567 static void reset_balance_state(struct btrfs_fs_info *fs_info)
3568 {
3569 	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3570 	int ret;
3571 
3572 	BUG_ON(!fs_info->balance_ctl);
3573 
3574 	spin_lock(&fs_info->balance_lock);
3575 	fs_info->balance_ctl = NULL;
3576 	spin_unlock(&fs_info->balance_lock);
3577 
3578 	kfree(bctl);
3579 	ret = del_balance_item(fs_info);
3580 	if (ret)
3581 		btrfs_handle_fs_error(fs_info, ret, NULL);
3582 }
3583 
3584 /*
3585  * Balance filters.  Return 1 if chunk should be filtered out
3586  * (should not be balanced).
3587  */
chunk_profiles_filter(u64 chunk_type,struct btrfs_balance_args * bargs)3588 static int chunk_profiles_filter(u64 chunk_type,
3589 				 struct btrfs_balance_args *bargs)
3590 {
3591 	chunk_type = chunk_to_extended(chunk_type) &
3592 				BTRFS_EXTENDED_PROFILE_MASK;
3593 
3594 	if (bargs->profiles & chunk_type)
3595 		return 0;
3596 
3597 	return 1;
3598 }
3599 
chunk_usage_range_filter(struct btrfs_fs_info * fs_info,u64 chunk_offset,struct btrfs_balance_args * bargs)3600 static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
3601 			      struct btrfs_balance_args *bargs)
3602 {
3603 	struct btrfs_block_group *cache;
3604 	u64 chunk_used;
3605 	u64 user_thresh_min;
3606 	u64 user_thresh_max;
3607 	int ret = 1;
3608 
3609 	cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3610 	chunk_used = cache->used;
3611 
3612 	if (bargs->usage_min == 0)
3613 		user_thresh_min = 0;
3614 	else
3615 		user_thresh_min = div_factor_fine(cache->length,
3616 						  bargs->usage_min);
3617 
3618 	if (bargs->usage_max == 0)
3619 		user_thresh_max = 1;
3620 	else if (bargs->usage_max > 100)
3621 		user_thresh_max = cache->length;
3622 	else
3623 		user_thresh_max = div_factor_fine(cache->length,
3624 						  bargs->usage_max);
3625 
3626 	if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max)
3627 		ret = 0;
3628 
3629 	btrfs_put_block_group(cache);
3630 	return ret;
3631 }
3632 
chunk_usage_filter(struct btrfs_fs_info * fs_info,u64 chunk_offset,struct btrfs_balance_args * bargs)3633 static int chunk_usage_filter(struct btrfs_fs_info *fs_info,
3634 		u64 chunk_offset, struct btrfs_balance_args *bargs)
3635 {
3636 	struct btrfs_block_group *cache;
3637 	u64 chunk_used, user_thresh;
3638 	int ret = 1;
3639 
3640 	cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3641 	chunk_used = cache->used;
3642 
3643 	if (bargs->usage_min == 0)
3644 		user_thresh = 1;
3645 	else if (bargs->usage > 100)
3646 		user_thresh = cache->length;
3647 	else
3648 		user_thresh = div_factor_fine(cache->length, bargs->usage);
3649 
3650 	if (chunk_used < user_thresh)
3651 		ret = 0;
3652 
3653 	btrfs_put_block_group(cache);
3654 	return ret;
3655 }
3656 
chunk_devid_filter(struct extent_buffer * leaf,struct btrfs_chunk * chunk,struct btrfs_balance_args * bargs)3657 static int chunk_devid_filter(struct extent_buffer *leaf,
3658 			      struct btrfs_chunk *chunk,
3659 			      struct btrfs_balance_args *bargs)
3660 {
3661 	struct btrfs_stripe *stripe;
3662 	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3663 	int i;
3664 
3665 	for (i = 0; i < num_stripes; i++) {
3666 		stripe = btrfs_stripe_nr(chunk, i);
3667 		if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
3668 			return 0;
3669 	}
3670 
3671 	return 1;
3672 }
3673 
calc_data_stripes(u64 type,int num_stripes)3674 static u64 calc_data_stripes(u64 type, int num_stripes)
3675 {
3676 	const int index = btrfs_bg_flags_to_raid_index(type);
3677 	const int ncopies = btrfs_raid_array[index].ncopies;
3678 	const int nparity = btrfs_raid_array[index].nparity;
3679 
3680 	return (num_stripes - nparity) / ncopies;
3681 }
3682 
3683 /* [pstart, pend) */
chunk_drange_filter(struct extent_buffer * leaf,struct btrfs_chunk * chunk,struct btrfs_balance_args * bargs)3684 static int chunk_drange_filter(struct extent_buffer *leaf,
3685 			       struct btrfs_chunk *chunk,
3686 			       struct btrfs_balance_args *bargs)
3687 {
3688 	struct btrfs_stripe *stripe;
3689 	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3690 	u64 stripe_offset;
3691 	u64 stripe_length;
3692 	u64 type;
3693 	int factor;
3694 	int i;
3695 
3696 	if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
3697 		return 0;
3698 
3699 	type = btrfs_chunk_type(leaf, chunk);
3700 	factor = calc_data_stripes(type, num_stripes);
3701 
3702 	for (i = 0; i < num_stripes; i++) {
3703 		stripe = btrfs_stripe_nr(chunk, i);
3704 		if (btrfs_stripe_devid(leaf, stripe) != bargs->devid)
3705 			continue;
3706 
3707 		stripe_offset = btrfs_stripe_offset(leaf, stripe);
3708 		stripe_length = btrfs_chunk_length(leaf, chunk);
3709 		stripe_length = div_u64(stripe_length, factor);
3710 
3711 		if (stripe_offset < bargs->pend &&
3712 		    stripe_offset + stripe_length > bargs->pstart)
3713 			return 0;
3714 	}
3715 
3716 	return 1;
3717 }
3718 
3719 /* [vstart, vend) */
chunk_vrange_filter(struct extent_buffer * leaf,struct btrfs_chunk * chunk,u64 chunk_offset,struct btrfs_balance_args * bargs)3720 static int chunk_vrange_filter(struct extent_buffer *leaf,
3721 			       struct btrfs_chunk *chunk,
3722 			       u64 chunk_offset,
3723 			       struct btrfs_balance_args *bargs)
3724 {
3725 	if (chunk_offset < bargs->vend &&
3726 	    chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
3727 		/* at least part of the chunk is inside this vrange */
3728 		return 0;
3729 
3730 	return 1;
3731 }
3732 
chunk_stripes_range_filter(struct extent_buffer * leaf,struct btrfs_chunk * chunk,struct btrfs_balance_args * bargs)3733 static int chunk_stripes_range_filter(struct extent_buffer *leaf,
3734 			       struct btrfs_chunk *chunk,
3735 			       struct btrfs_balance_args *bargs)
3736 {
3737 	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3738 
3739 	if (bargs->stripes_min <= num_stripes
3740 			&& num_stripes <= bargs->stripes_max)
3741 		return 0;
3742 
3743 	return 1;
3744 }
3745 
chunk_soft_convert_filter(u64 chunk_type,struct btrfs_balance_args * bargs)3746 static int chunk_soft_convert_filter(u64 chunk_type,
3747 				     struct btrfs_balance_args *bargs)
3748 {
3749 	if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
3750 		return 0;
3751 
3752 	chunk_type = chunk_to_extended(chunk_type) &
3753 				BTRFS_EXTENDED_PROFILE_MASK;
3754 
3755 	if (bargs->target == chunk_type)
3756 		return 1;
3757 
3758 	return 0;
3759 }
3760 
should_balance_chunk(struct extent_buffer * leaf,struct btrfs_chunk * chunk,u64 chunk_offset)3761 static int should_balance_chunk(struct extent_buffer *leaf,
3762 				struct btrfs_chunk *chunk, u64 chunk_offset)
3763 {
3764 	struct btrfs_fs_info *fs_info = leaf->fs_info;
3765 	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3766 	struct btrfs_balance_args *bargs = NULL;
3767 	u64 chunk_type = btrfs_chunk_type(leaf, chunk);
3768 
3769 	/* type filter */
3770 	if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
3771 	      (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
3772 		return 0;
3773 	}
3774 
3775 	if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
3776 		bargs = &bctl->data;
3777 	else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
3778 		bargs = &bctl->sys;
3779 	else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
3780 		bargs = &bctl->meta;
3781 
3782 	/* profiles filter */
3783 	if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
3784 	    chunk_profiles_filter(chunk_type, bargs)) {
3785 		return 0;
3786 	}
3787 
3788 	/* usage filter */
3789 	if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
3790 	    chunk_usage_filter(fs_info, chunk_offset, bargs)) {
3791 		return 0;
3792 	} else if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3793 	    chunk_usage_range_filter(fs_info, chunk_offset, bargs)) {
3794 		return 0;
3795 	}
3796 
3797 	/* devid filter */
3798 	if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
3799 	    chunk_devid_filter(leaf, chunk, bargs)) {
3800 		return 0;
3801 	}
3802 
3803 	/* drange filter, makes sense only with devid filter */
3804 	if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
3805 	    chunk_drange_filter(leaf, chunk, bargs)) {
3806 		return 0;
3807 	}
3808 
3809 	/* vrange filter */
3810 	if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
3811 	    chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
3812 		return 0;
3813 	}
3814 
3815 	/* stripes filter */
3816 	if ((bargs->flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) &&
3817 	    chunk_stripes_range_filter(leaf, chunk, bargs)) {
3818 		return 0;
3819 	}
3820 
3821 	/* soft profile changing mode */
3822 	if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
3823 	    chunk_soft_convert_filter(chunk_type, bargs)) {
3824 		return 0;
3825 	}
3826 
3827 	/*
3828 	 * limited by count, must be the last filter
3829 	 */
3830 	if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT)) {
3831 		if (bargs->limit == 0)
3832 			return 0;
3833 		else
3834 			bargs->limit--;
3835 	} else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) {
3836 		/*
3837 		 * Same logic as the 'limit' filter; the minimum cannot be
3838 		 * determined here because we do not have the global information
3839 		 * about the count of all chunks that satisfy the filters.
3840 		 */
3841 		if (bargs->limit_max == 0)
3842 			return 0;
3843 		else
3844 			bargs->limit_max--;
3845 	}
3846 
3847 	return 1;
3848 }
3849 
__btrfs_balance(struct btrfs_fs_info * fs_info)3850 static int __btrfs_balance(struct btrfs_fs_info *fs_info)
3851 {
3852 	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3853 	struct btrfs_root *chunk_root = fs_info->chunk_root;
3854 	u64 chunk_type;
3855 	struct btrfs_chunk *chunk;
3856 	struct btrfs_path *path = NULL;
3857 	struct btrfs_key key;
3858 	struct btrfs_key found_key;
3859 	struct extent_buffer *leaf;
3860 	int slot;
3861 	int ret;
3862 	int enospc_errors = 0;
3863 	bool counting = true;
3864 	/* The single value limit and min/max limits use the same bytes in the */
3865 	u64 limit_data = bctl->data.limit;
3866 	u64 limit_meta = bctl->meta.limit;
3867 	u64 limit_sys = bctl->sys.limit;
3868 	u32 count_data = 0;
3869 	u32 count_meta = 0;
3870 	u32 count_sys = 0;
3871 	int chunk_reserved = 0;
3872 
3873 	path = btrfs_alloc_path();
3874 	if (!path) {
3875 		ret = -ENOMEM;
3876 		goto error;
3877 	}
3878 
3879 	/* zero out stat counters */
3880 	spin_lock(&fs_info->balance_lock);
3881 	memset(&bctl->stat, 0, sizeof(bctl->stat));
3882 	spin_unlock(&fs_info->balance_lock);
3883 again:
3884 	if (!counting) {
3885 		/*
3886 		 * The single value limit and min/max limits use the same bytes
3887 		 * in the
3888 		 */
3889 		bctl->data.limit = limit_data;
3890 		bctl->meta.limit = limit_meta;
3891 		bctl->sys.limit = limit_sys;
3892 	}
3893 	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
3894 	key.offset = (u64)-1;
3895 	key.type = BTRFS_CHUNK_ITEM_KEY;
3896 
3897 	while (1) {
3898 		if ((!counting && atomic_read(&fs_info->balance_pause_req)) ||
3899 		    atomic_read(&fs_info->balance_cancel_req)) {
3900 			ret = -ECANCELED;
3901 			goto error;
3902 		}
3903 
3904 		mutex_lock(&fs_info->reclaim_bgs_lock);
3905 		ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
3906 		if (ret < 0) {
3907 			mutex_unlock(&fs_info->reclaim_bgs_lock);
3908 			goto error;
3909 		}
3910 
3911 		/*
3912 		 * this shouldn't happen, it means the last relocate
3913 		 * failed
3914 		 */
3915 		if (ret == 0)
3916 			BUG(); /* FIXME break ? */
3917 
3918 		ret = btrfs_previous_item(chunk_root, path, 0,
3919 					  BTRFS_CHUNK_ITEM_KEY);
3920 		if (ret) {
3921 			mutex_unlock(&fs_info->reclaim_bgs_lock);
3922 			ret = 0;
3923 			break;
3924 		}
3925 
3926 		leaf = path->nodes[0];
3927 		slot = path->slots[0];
3928 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
3929 
3930 		if (found_key.objectid != key.objectid) {
3931 			mutex_unlock(&fs_info->reclaim_bgs_lock);
3932 			break;
3933 		}
3934 
3935 		chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
3936 		chunk_type = btrfs_chunk_type(leaf, chunk);
3937 
3938 		if (!counting) {
3939 			spin_lock(&fs_info->balance_lock);
3940 			bctl->stat.considered++;
3941 			spin_unlock(&fs_info->balance_lock);
3942 		}
3943 
3944 		ret = should_balance_chunk(leaf, chunk, found_key.offset);
3945 
3946 		btrfs_release_path(path);
3947 		if (!ret) {
3948 			mutex_unlock(&fs_info->reclaim_bgs_lock);
3949 			goto loop;
3950 		}
3951 
3952 		if (counting) {
3953 			mutex_unlock(&fs_info->reclaim_bgs_lock);
3954 			spin_lock(&fs_info->balance_lock);
3955 			bctl->stat.expected++;
3956 			spin_unlock(&fs_info->balance_lock);
3957 
3958 			if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
3959 				count_data++;
3960 			else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
3961 				count_sys++;
3962 			else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
3963 				count_meta++;
3964 
3965 			goto loop;
3966 		}
3967 
3968 		/*
3969 		 * Apply limit_min filter, no need to check if the LIMITS
3970 		 * filter is used, limit_min is 0 by default
3971 		 */
3972 		if (((chunk_type & BTRFS_BLOCK_GROUP_DATA) &&
3973 					count_data < bctl->data.limit_min)
3974 				|| ((chunk_type & BTRFS_BLOCK_GROUP_METADATA) &&
3975 					count_meta < bctl->meta.limit_min)
3976 				|| ((chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) &&
3977 					count_sys < bctl->sys.limit_min)) {
3978 			mutex_unlock(&fs_info->reclaim_bgs_lock);
3979 			goto loop;
3980 		}
3981 
3982 		if (!chunk_reserved) {
3983 			/*
3984 			 * We may be relocating the only data chunk we have,
3985 			 * which could potentially end up with losing data's
3986 			 * raid profile, so lets allocate an empty one in
3987 			 * advance.
3988 			 */
3989 			ret = btrfs_may_alloc_data_chunk(fs_info,
3990 							 found_key.offset);
3991 			if (ret < 0) {
3992 				mutex_unlock(&fs_info->reclaim_bgs_lock);
3993 				goto error;
3994 			} else if (ret == 1) {
3995 				chunk_reserved = 1;
3996 			}
3997 		}
3998 
3999 		ret = btrfs_relocate_chunk(fs_info, found_key.offset);
4000 		mutex_unlock(&fs_info->reclaim_bgs_lock);
4001 		if (ret == -ENOSPC) {
4002 			enospc_errors++;
4003 		} else if (ret == -ETXTBSY) {
4004 			btrfs_info(fs_info,
4005 	   "skipping relocation of block group %llu due to active swapfile",
4006 				   found_key.offset);
4007 			ret = 0;
4008 		} else if (ret) {
4009 			goto error;
4010 		} else {
4011 			spin_lock(&fs_info->balance_lock);
4012 			bctl->stat.completed++;
4013 			spin_unlock(&fs_info->balance_lock);
4014 		}
4015 loop:
4016 		if (found_key.offset == 0)
4017 			break;
4018 		key.offset = found_key.offset - 1;
4019 	}
4020 
4021 	if (counting) {
4022 		btrfs_release_path(path);
4023 		counting = false;
4024 		goto again;
4025 	}
4026 error:
4027 	btrfs_free_path(path);
4028 	if (enospc_errors) {
4029 		btrfs_info(fs_info, "%d enospc errors during balance",
4030 			   enospc_errors);
4031 		if (!ret)
4032 			ret = -ENOSPC;
4033 	}
4034 
4035 	return ret;
4036 }
4037 
4038 /**
4039  * alloc_profile_is_valid - see if a given profile is valid and reduced
4040  * @flags: profile to validate
4041  * @extended: if true @flags is treated as an extended profile
4042  */
alloc_profile_is_valid(u64 flags,int extended)4043 static int alloc_profile_is_valid(u64 flags, int extended)
4044 {
4045 	u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK :
4046 			       BTRFS_BLOCK_GROUP_PROFILE_MASK);
4047 
4048 	flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;
4049 
4050 	/* 1) check that all other bits are zeroed */
4051 	if (flags & ~mask)
4052 		return 0;
4053 
4054 	/* 2) see if profile is reduced */
4055 	if (flags == 0)
4056 		return !extended; /* "0" is valid for usual profiles */
4057 
4058 	return has_single_bit_set(flags);
4059 }
4060 
balance_need_close(struct btrfs_fs_info * fs_info)4061 static inline int balance_need_close(struct btrfs_fs_info *fs_info)
4062 {
4063 	/* cancel requested || normal exit path */
4064 	return atomic_read(&fs_info->balance_cancel_req) ||
4065 		(atomic_read(&fs_info->balance_pause_req) == 0 &&
4066 		 atomic_read(&fs_info->balance_cancel_req) == 0);
4067 }
4068 
4069 /*
4070  * Validate target profile against allowed profiles and return true if it's OK.
4071  * Otherwise print the error message and return false.
4072  */
validate_convert_profile(struct btrfs_fs_info * fs_info,const struct btrfs_balance_args * bargs,u64 allowed,const char * type)4073 static inline int validate_convert_profile(struct btrfs_fs_info *fs_info,
4074 		const struct btrfs_balance_args *bargs,
4075 		u64 allowed, const char *type)
4076 {
4077 	if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
4078 		return true;
4079 
4080 	/* Profile is valid and does not have bits outside of the allowed set */
4081 	if (alloc_profile_is_valid(bargs->target, 1) &&
4082 	    (bargs->target & ~allowed) == 0)
4083 		return true;
4084 
4085 	btrfs_err(fs_info, "balance: invalid convert %s profile %s",
4086 			type, btrfs_bg_type_to_raid_name(bargs->target));
4087 	return false;
4088 }
4089 
4090 /*
4091  * Fill @buf with textual description of balance filter flags @bargs, up to
4092  * @size_buf including the terminating null. The output may be trimmed if it
4093  * does not fit into the provided buffer.
4094  */
describe_balance_args(struct btrfs_balance_args * bargs,char * buf,u32 size_buf)4095 static void describe_balance_args(struct btrfs_balance_args *bargs, char *buf,
4096 				 u32 size_buf)
4097 {
4098 	int ret;
4099 	u32 size_bp = size_buf;
4100 	char *bp = buf;
4101 	u64 flags = bargs->flags;
4102 	char tmp_buf[128] = {'\0'};
4103 
4104 	if (!flags)
4105 		return;
4106 
4107 #define CHECK_APPEND_NOARG(a)						\
4108 	do {								\
4109 		ret = snprintf(bp, size_bp, (a));			\
4110 		if (ret < 0 || ret >= size_bp)				\
4111 			goto out_overflow;				\
4112 		size_bp -= ret;						\
4113 		bp += ret;						\
4114 	} while (0)
4115 
4116 #define CHECK_APPEND_1ARG(a, v1)					\
4117 	do {								\
4118 		ret = snprintf(bp, size_bp, (a), (v1));			\
4119 		if (ret < 0 || ret >= size_bp)				\
4120 			goto out_overflow;				\
4121 		size_bp -= ret;						\
4122 		bp += ret;						\
4123 	} while (0)
4124 
4125 #define CHECK_APPEND_2ARG(a, v1, v2)					\
4126 	do {								\
4127 		ret = snprintf(bp, size_bp, (a), (v1), (v2));		\
4128 		if (ret < 0 || ret >= size_bp)				\
4129 			goto out_overflow;				\
4130 		size_bp -= ret;						\
4131 		bp += ret;						\
4132 	} while (0)
4133 
4134 	if (flags & BTRFS_BALANCE_ARGS_CONVERT)
4135 		CHECK_APPEND_1ARG("convert=%s,",
4136 				  btrfs_bg_type_to_raid_name(bargs->target));
4137 
4138 	if (flags & BTRFS_BALANCE_ARGS_SOFT)
4139 		CHECK_APPEND_NOARG("soft,");
4140 
4141 	if (flags & BTRFS_BALANCE_ARGS_PROFILES) {
4142 		btrfs_describe_block_groups(bargs->profiles, tmp_buf,
4143 					    sizeof(tmp_buf));
4144 		CHECK_APPEND_1ARG("profiles=%s,", tmp_buf);
4145 	}
4146 
4147 	if (flags & BTRFS_BALANCE_ARGS_USAGE)
4148 		CHECK_APPEND_1ARG("usage=%llu,", bargs->usage);
4149 
4150 	if (flags & BTRFS_BALANCE_ARGS_USAGE_RANGE)
4151 		CHECK_APPEND_2ARG("usage=%u..%u,",
4152 				  bargs->usage_min, bargs->usage_max);
4153 
4154 	if (flags & BTRFS_BALANCE_ARGS_DEVID)
4155 		CHECK_APPEND_1ARG("devid=%llu,", bargs->devid);
4156 
4157 	if (flags & BTRFS_BALANCE_ARGS_DRANGE)
4158 		CHECK_APPEND_2ARG("drange=%llu..%llu,",
4159 				  bargs->pstart, bargs->pend);
4160 
4161 	if (flags & BTRFS_BALANCE_ARGS_VRANGE)
4162 		CHECK_APPEND_2ARG("vrange=%llu..%llu,",
4163 				  bargs->vstart, bargs->vend);
4164 
4165 	if (flags & BTRFS_BALANCE_ARGS_LIMIT)
4166 		CHECK_APPEND_1ARG("limit=%llu,", bargs->limit);
4167 
4168 	if (flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)
4169 		CHECK_APPEND_2ARG("limit=%u..%u,",
4170 				bargs->limit_min, bargs->limit_max);
4171 
4172 	if (flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE)
4173 		CHECK_APPEND_2ARG("stripes=%u..%u,",
4174 				  bargs->stripes_min, bargs->stripes_max);
4175 
4176 #undef CHECK_APPEND_2ARG
4177 #undef CHECK_APPEND_1ARG
4178 #undef CHECK_APPEND_NOARG
4179 
4180 out_overflow:
4181 
4182 	if (size_bp < size_buf)
4183 		buf[size_buf - size_bp - 1] = '\0'; /* remove last , */
4184 	else
4185 		buf[0] = '\0';
4186 }
4187 
describe_balance_start_or_resume(struct btrfs_fs_info * fs_info)4188 static void describe_balance_start_or_resume(struct btrfs_fs_info *fs_info)
4189 {
4190 	u32 size_buf = 1024;
4191 	char tmp_buf[192] = {'\0'};
4192 	char *buf;
4193 	char *bp;
4194 	u32 size_bp = size_buf;
4195 	int ret;
4196 	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
4197 
4198 	buf = kzalloc(size_buf, GFP_KERNEL);
4199 	if (!buf)
4200 		return;
4201 
4202 	bp = buf;
4203 
4204 #define CHECK_APPEND_1ARG(a, v1)					\
4205 	do {								\
4206 		ret = snprintf(bp, size_bp, (a), (v1));			\
4207 		if (ret < 0 || ret >= size_bp)				\
4208 			goto out_overflow;				\
4209 		size_bp -= ret;						\
4210 		bp += ret;						\
4211 	} while (0)
4212 
4213 	if (bctl->flags & BTRFS_BALANCE_FORCE)
4214 		CHECK_APPEND_1ARG("%s", "-f ");
4215 
4216 	if (bctl->flags & BTRFS_BALANCE_DATA) {
4217 		describe_balance_args(&bctl->data, tmp_buf, sizeof(tmp_buf));
4218 		CHECK_APPEND_1ARG("-d%s ", tmp_buf);
4219 	}
4220 
4221 	if (bctl->flags & BTRFS_BALANCE_METADATA) {
4222 		describe_balance_args(&bctl->meta, tmp_buf, sizeof(tmp_buf));
4223 		CHECK_APPEND_1ARG("-m%s ", tmp_buf);
4224 	}
4225 
4226 	if (bctl->flags & BTRFS_BALANCE_SYSTEM) {
4227 		describe_balance_args(&bctl->sys, tmp_buf, sizeof(tmp_buf));
4228 		CHECK_APPEND_1ARG("-s%s ", tmp_buf);
4229 	}
4230 
4231 #undef CHECK_APPEND_1ARG
4232 
4233 out_overflow:
4234 
4235 	if (size_bp < size_buf)
4236 		buf[size_buf - size_bp - 1] = '\0'; /* remove last " " */
4237 	btrfs_info(fs_info, "balance: %s %s",
4238 		   (bctl->flags & BTRFS_BALANCE_RESUME) ?
4239 		   "resume" : "start", buf);
4240 
4241 	kfree(buf);
4242 }
4243 
4244 /*
4245  * Should be called with balance mutexe held
4246  */
btrfs_balance(struct btrfs_fs_info * fs_info,struct btrfs_balance_control * bctl,struct btrfs_ioctl_balance_args * bargs)4247 int btrfs_balance(struct btrfs_fs_info *fs_info,
4248 		  struct btrfs_balance_control *bctl,
4249 		  struct btrfs_ioctl_balance_args *bargs)
4250 {
4251 	u64 meta_target, data_target;
4252 	u64 allowed;
4253 	int mixed = 0;
4254 	int ret;
4255 	u64 num_devices;
4256 	unsigned seq;
4257 	bool reducing_redundancy;
4258 	int i;
4259 
4260 	if (btrfs_fs_closing(fs_info) ||
4261 	    atomic_read(&fs_info->balance_pause_req) ||
4262 	    btrfs_should_cancel_balance(fs_info)) {
4263 		ret = -EINVAL;
4264 		goto out;
4265 	}
4266 
4267 	allowed = btrfs_super_incompat_flags(fs_info->super_copy);
4268 	if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
4269 		mixed = 1;
4270 
4271 	/*
4272 	 * In case of mixed groups both data and meta should be picked,
4273 	 * and identical options should be given for both of them.
4274 	 */
4275 	allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA;
4276 	if (mixed && (bctl->flags & allowed)) {
4277 		if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
4278 		    !(bctl->flags & BTRFS_BALANCE_METADATA) ||
4279 		    memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
4280 			btrfs_err(fs_info,
4281 	  "balance: mixed groups data and metadata options must be the same");
4282 			ret = -EINVAL;
4283 			goto out;
4284 		}
4285 	}
4286 
4287 	/*
4288 	 * rw_devices will not change at the moment, device add/delete/replace
4289 	 * are exclusive
4290 	 */
4291 	num_devices = fs_info->fs_devices->rw_devices;
4292 
4293 	/*
4294 	 * SINGLE profile on-disk has no profile bit, but in-memory we have a
4295 	 * special bit for it, to make it easier to distinguish.  Thus we need
4296 	 * to set it manually, or balance would refuse the profile.
4297 	 */
4298 	allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
4299 	for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++)
4300 		if (num_devices >= btrfs_raid_array[i].devs_min)
4301 			allowed |= btrfs_raid_array[i].bg_flag;
4302 
4303 	if (!validate_convert_profile(fs_info, &bctl->data, allowed, "data") ||
4304 	    !validate_convert_profile(fs_info, &bctl->meta, allowed, "metadata") ||
4305 	    !validate_convert_profile(fs_info, &bctl->sys,  allowed, "system")) {
4306 		ret = -EINVAL;
4307 		goto out;
4308 	}
4309 
4310 	/*
4311 	 * Allow to reduce metadata or system integrity only if force set for
4312 	 * profiles with redundancy (copies, parity)
4313 	 */
4314 	allowed = 0;
4315 	for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++) {
4316 		if (btrfs_raid_array[i].ncopies >= 2 ||
4317 		    btrfs_raid_array[i].tolerated_failures >= 1)
4318 			allowed |= btrfs_raid_array[i].bg_flag;
4319 	}
4320 	do {
4321 		seq = read_seqbegin(&fs_info->profiles_lock);
4322 
4323 		if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
4324 		     (fs_info->avail_system_alloc_bits & allowed) &&
4325 		     !(bctl->sys.target & allowed)) ||
4326 		    ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
4327 		     (fs_info->avail_metadata_alloc_bits & allowed) &&
4328 		     !(bctl->meta.target & allowed)))
4329 			reducing_redundancy = true;
4330 		else
4331 			reducing_redundancy = false;
4332 
4333 		/* if we're not converting, the target field is uninitialized */
4334 		meta_target = (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
4335 			bctl->meta.target : fs_info->avail_metadata_alloc_bits;
4336 		data_target = (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
4337 			bctl->data.target : fs_info->avail_data_alloc_bits;
4338 	} while (read_seqretry(&fs_info->profiles_lock, seq));
4339 
4340 	if (reducing_redundancy) {
4341 		if (bctl->flags & BTRFS_BALANCE_FORCE) {
4342 			btrfs_info(fs_info,
4343 			   "balance: force reducing metadata redundancy");
4344 		} else {
4345 			btrfs_err(fs_info,
4346 	"balance: reduces metadata redundancy, use --force if you want this");
4347 			ret = -EINVAL;
4348 			goto out;
4349 		}
4350 	}
4351 
4352 	if (btrfs_get_num_tolerated_disk_barrier_failures(meta_target) <
4353 		btrfs_get_num_tolerated_disk_barrier_failures(data_target)) {
4354 		btrfs_warn(fs_info,
4355 	"balance: metadata profile %s has lower redundancy than data profile %s",
4356 				btrfs_bg_type_to_raid_name(meta_target),
4357 				btrfs_bg_type_to_raid_name(data_target));
4358 	}
4359 
4360 	ret = insert_balance_item(fs_info, bctl);
4361 	if (ret && ret != -EEXIST)
4362 		goto out;
4363 
4364 	if (!(bctl->flags & BTRFS_BALANCE_RESUME)) {
4365 		BUG_ON(ret == -EEXIST);
4366 		BUG_ON(fs_info->balance_ctl);
4367 		spin_lock(&fs_info->balance_lock);
4368 		fs_info->balance_ctl = bctl;
4369 		spin_unlock(&fs_info->balance_lock);
4370 	} else {
4371 		BUG_ON(ret != -EEXIST);
4372 		spin_lock(&fs_info->balance_lock);
4373 		update_balance_args(bctl);
4374 		spin_unlock(&fs_info->balance_lock);
4375 	}
4376 
4377 	ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4378 	set_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags);
4379 	describe_balance_start_or_resume(fs_info);
4380 	mutex_unlock(&fs_info->balance_mutex);
4381 
4382 	ret = __btrfs_balance(fs_info);
4383 
4384 	mutex_lock(&fs_info->balance_mutex);
4385 	if (ret == -ECANCELED && atomic_read(&fs_info->balance_pause_req)) {
4386 		btrfs_info(fs_info, "balance: paused");
4387 		btrfs_exclop_balance(fs_info, BTRFS_EXCLOP_BALANCE_PAUSED);
4388 	}
4389 	/*
4390 	 * Balance can be canceled by:
4391 	 *
4392 	 * - Regular cancel request
4393 	 *   Then ret == -ECANCELED and balance_cancel_req > 0
4394 	 *
4395 	 * - Fatal signal to "btrfs" process
4396 	 *   Either the signal caught by wait_reserve_ticket() and callers
4397 	 *   got -EINTR, or caught by btrfs_should_cancel_balance() and
4398 	 *   got -ECANCELED.
4399 	 *   Either way, in this case balance_cancel_req = 0, and
4400 	 *   ret == -EINTR or ret == -ECANCELED.
4401 	 *
4402 	 * So here we only check the return value to catch canceled balance.
4403 	 */
4404 	else if (ret == -ECANCELED || ret == -EINTR)
4405 		btrfs_info(fs_info, "balance: canceled");
4406 	else
4407 		btrfs_info(fs_info, "balance: ended with status: %d", ret);
4408 
4409 	clear_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags);
4410 
4411 	if (bargs) {
4412 		memset(bargs, 0, sizeof(*bargs));
4413 		btrfs_update_ioctl_balance_args(fs_info, bargs);
4414 	}
4415 
4416 	if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
4417 	    balance_need_close(fs_info)) {
4418 		reset_balance_state(fs_info);
4419 		btrfs_exclop_finish(fs_info);
4420 	}
4421 
4422 	wake_up(&fs_info->balance_wait_q);
4423 
4424 	return ret;
4425 out:
4426 	if (bctl->flags & BTRFS_BALANCE_RESUME)
4427 		reset_balance_state(fs_info);
4428 	else
4429 		kfree(bctl);
4430 	btrfs_exclop_finish(fs_info);
4431 
4432 	return ret;
4433 }
4434 
balance_kthread(void * data)4435 static int balance_kthread(void *data)
4436 {
4437 	struct btrfs_fs_info *fs_info = data;
4438 	int ret = 0;
4439 
4440 	sb_start_write(fs_info->sb);
4441 	mutex_lock(&fs_info->balance_mutex);
4442 	if (fs_info->balance_ctl)
4443 		ret = btrfs_balance(fs_info, fs_info->balance_ctl, NULL);
4444 	mutex_unlock(&fs_info->balance_mutex);
4445 	sb_end_write(fs_info->sb);
4446 
4447 	return ret;
4448 }
4449 
btrfs_resume_balance_async(struct btrfs_fs_info * fs_info)4450 int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
4451 {
4452 	struct task_struct *tsk;
4453 
4454 	mutex_lock(&fs_info->balance_mutex);
4455 	if (!fs_info->balance_ctl) {
4456 		mutex_unlock(&fs_info->balance_mutex);
4457 		return 0;
4458 	}
4459 	mutex_unlock(&fs_info->balance_mutex);
4460 
4461 	if (btrfs_test_opt(fs_info, SKIP_BALANCE)) {
4462 		btrfs_info(fs_info, "balance: resume skipped");
4463 		return 0;
4464 	}
4465 
4466 	spin_lock(&fs_info->super_lock);
4467 	ASSERT(fs_info->exclusive_operation == BTRFS_EXCLOP_BALANCE_PAUSED);
4468 	fs_info->exclusive_operation = BTRFS_EXCLOP_BALANCE;
4469 	spin_unlock(&fs_info->super_lock);
4470 	/*
4471 	 * A ro->rw remount sequence should continue with the paused balance
4472 	 * regardless of who pauses it, system or the user as of now, so set
4473 	 * the resume flag.
4474 	 */
4475 	spin_lock(&fs_info->balance_lock);
4476 	fs_info->balance_ctl->flags |= BTRFS_BALANCE_RESUME;
4477 	spin_unlock(&fs_info->balance_lock);
4478 
4479 	tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
4480 	return PTR_ERR_OR_ZERO(tsk);
4481 }
4482 
btrfs_recover_balance(struct btrfs_fs_info * fs_info)4483 int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
4484 {
4485 	struct btrfs_balance_control *bctl;
4486 	struct btrfs_balance_item *item;
4487 	struct btrfs_disk_balance_args disk_bargs;
4488 	struct btrfs_path *path;
4489 	struct extent_buffer *leaf;
4490 	struct btrfs_key key;
4491 	int ret;
4492 
4493 	path = btrfs_alloc_path();
4494 	if (!path)
4495 		return -ENOMEM;
4496 
4497 	key.objectid = BTRFS_BALANCE_OBJECTID;
4498 	key.type = BTRFS_TEMPORARY_ITEM_KEY;
4499 	key.offset = 0;
4500 
4501 	ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
4502 	if (ret < 0)
4503 		goto out;
4504 	if (ret > 0) { /* ret = -ENOENT; */
4505 		ret = 0;
4506 		goto out;
4507 	}
4508 
4509 	bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
4510 	if (!bctl) {
4511 		ret = -ENOMEM;
4512 		goto out;
4513 	}
4514 
4515 	leaf = path->nodes[0];
4516 	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
4517 
4518 	bctl->flags = btrfs_balance_flags(leaf, item);
4519 	bctl->flags |= BTRFS_BALANCE_RESUME;
4520 
4521 	btrfs_balance_data(leaf, item, &disk_bargs);
4522 	btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
4523 	btrfs_balance_meta(leaf, item, &disk_bargs);
4524 	btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs);
4525 	btrfs_balance_sys(leaf, item, &disk_bargs);
4526 	btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
4527 
4528 	/*
4529 	 * This should never happen, as the paused balance state is recovered
4530 	 * during mount without any chance of other exclusive ops to collide.
4531 	 *
4532 	 * This gives the exclusive op status to balance and keeps in paused
4533 	 * state until user intervention (cancel or umount). If the ownership
4534 	 * cannot be assigned, show a message but do not fail. The balance
4535 	 * is in a paused state and must have fs_info::balance_ctl properly
4536 	 * set up.
4537 	 */
4538 	if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE_PAUSED))
4539 		btrfs_warn(fs_info,
4540 	"balance: cannot set exclusive op status, resume manually");
4541 
4542 	btrfs_release_path(path);
4543 
4544 	mutex_lock(&fs_info->balance_mutex);
4545 	BUG_ON(fs_info->balance_ctl);
4546 	spin_lock(&fs_info->balance_lock);
4547 	fs_info->balance_ctl = bctl;
4548 	spin_unlock(&fs_info->balance_lock);
4549 	mutex_unlock(&fs_info->balance_mutex);
4550 out:
4551 	btrfs_free_path(path);
4552 	return ret;
4553 }
4554 
btrfs_pause_balance(struct btrfs_fs_info * fs_info)4555 int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
4556 {
4557 	int ret = 0;
4558 
4559 	mutex_lock(&fs_info->balance_mutex);
4560 	if (!fs_info->balance_ctl) {
4561 		mutex_unlock(&fs_info->balance_mutex);
4562 		return -ENOTCONN;
4563 	}
4564 
4565 	if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
4566 		atomic_inc(&fs_info->balance_pause_req);
4567 		mutex_unlock(&fs_info->balance_mutex);
4568 
4569 		wait_event(fs_info->balance_wait_q,
4570 			   !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4571 
4572 		mutex_lock(&fs_info->balance_mutex);
4573 		/* we are good with balance_ctl ripped off from under us */
4574 		BUG_ON(test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4575 		atomic_dec(&fs_info->balance_pause_req);
4576 	} else {
4577 		ret = -ENOTCONN;
4578 	}
4579 
4580 	mutex_unlock(&fs_info->balance_mutex);
4581 	return ret;
4582 }
4583 
btrfs_cancel_balance(struct btrfs_fs_info * fs_info)4584 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
4585 {
4586 	mutex_lock(&fs_info->balance_mutex);
4587 	if (!fs_info->balance_ctl) {
4588 		mutex_unlock(&fs_info->balance_mutex);
4589 		return -ENOTCONN;
4590 	}
4591 
4592 	/*
4593 	 * A paused balance with the item stored on disk can be resumed at
4594 	 * mount time if the mount is read-write. Otherwise it's still paused
4595 	 * and we must not allow cancelling as it deletes the item.
4596 	 */
4597 	if (sb_rdonly(fs_info->sb)) {
4598 		mutex_unlock(&fs_info->balance_mutex);
4599 		return -EROFS;
4600 	}
4601 
4602 	atomic_inc(&fs_info->balance_cancel_req);
4603 	/*
4604 	 * if we are running just wait and return, balance item is
4605 	 * deleted in btrfs_balance in this case
4606 	 */
4607 	if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
4608 		mutex_unlock(&fs_info->balance_mutex);
4609 		wait_event(fs_info->balance_wait_q,
4610 			   !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4611 		mutex_lock(&fs_info->balance_mutex);
4612 	} else {
4613 		mutex_unlock(&fs_info->balance_mutex);
4614 		/*
4615 		 * Lock released to allow other waiters to continue, we'll
4616 		 * reexamine the status again.
4617 		 */
4618 		mutex_lock(&fs_info->balance_mutex);
4619 
4620 		if (fs_info->balance_ctl) {
4621 			reset_balance_state(fs_info);
4622 			btrfs_exclop_finish(fs_info);
4623 			btrfs_info(fs_info, "balance: canceled");
4624 		}
4625 	}
4626 
4627 	BUG_ON(fs_info->balance_ctl ||
4628 		test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4629 	atomic_dec(&fs_info->balance_cancel_req);
4630 	mutex_unlock(&fs_info->balance_mutex);
4631 	return 0;
4632 }
4633 
btrfs_uuid_scan_kthread(void * data)4634 int btrfs_uuid_scan_kthread(void *data)
4635 {
4636 	struct btrfs_fs_info *fs_info = data;
4637 	struct btrfs_root *root = fs_info->tree_root;
4638 	struct btrfs_key key;
4639 	struct btrfs_path *path = NULL;
4640 	int ret = 0;
4641 	struct extent_buffer *eb;
4642 	int slot;
4643 	struct btrfs_root_item root_item;
4644 	u32 item_size;
4645 	struct btrfs_trans_handle *trans = NULL;
4646 	bool closing = false;
4647 
4648 	path = btrfs_alloc_path();
4649 	if (!path) {
4650 		ret = -ENOMEM;
4651 		goto out;
4652 	}
4653 
4654 	key.objectid = 0;
4655 	key.type = BTRFS_ROOT_ITEM_KEY;
4656 	key.offset = 0;
4657 
4658 	while (1) {
4659 		if (btrfs_fs_closing(fs_info)) {
4660 			closing = true;
4661 			break;
4662 		}
4663 		ret = btrfs_search_forward(root, &key, path,
4664 				BTRFS_OLDEST_GENERATION);
4665 		if (ret) {
4666 			if (ret > 0)
4667 				ret = 0;
4668 			break;
4669 		}
4670 
4671 		if (key.type != BTRFS_ROOT_ITEM_KEY ||
4672 		    (key.objectid < BTRFS_FIRST_FREE_OBJECTID &&
4673 		     key.objectid != BTRFS_FS_TREE_OBJECTID) ||
4674 		    key.objectid > BTRFS_LAST_FREE_OBJECTID)
4675 			goto skip;
4676 
4677 		eb = path->nodes[0];
4678 		slot = path->slots[0];
4679 		item_size = btrfs_item_size(eb, slot);
4680 		if (item_size < sizeof(root_item))
4681 			goto skip;
4682 
4683 		read_extent_buffer(eb, &root_item,
4684 				   btrfs_item_ptr_offset(eb, slot),
4685 				   (int)sizeof(root_item));
4686 		if (btrfs_root_refs(&root_item) == 0)
4687 			goto skip;
4688 
4689 		if (!btrfs_is_empty_uuid(root_item.uuid) ||
4690 		    !btrfs_is_empty_uuid(root_item.received_uuid)) {
4691 			if (trans)
4692 				goto update_tree;
4693 
4694 			btrfs_release_path(path);
4695 			/*
4696 			 * 1 - subvol uuid item
4697 			 * 1 - received_subvol uuid item
4698 			 */
4699 			trans = btrfs_start_transaction(fs_info->uuid_root, 2);
4700 			if (IS_ERR(trans)) {
4701 				ret = PTR_ERR(trans);
4702 				break;
4703 			}
4704 			continue;
4705 		} else {
4706 			goto skip;
4707 		}
4708 update_tree:
4709 		btrfs_release_path(path);
4710 		if (!btrfs_is_empty_uuid(root_item.uuid)) {
4711 			ret = btrfs_uuid_tree_add(trans, root_item.uuid,
4712 						  BTRFS_UUID_KEY_SUBVOL,
4713 						  key.objectid);
4714 			if (ret < 0) {
4715 				btrfs_warn(fs_info, "uuid_tree_add failed %d",
4716 					ret);
4717 				break;
4718 			}
4719 		}
4720 
4721 		if (!btrfs_is_empty_uuid(root_item.received_uuid)) {
4722 			ret = btrfs_uuid_tree_add(trans,
4723 						  root_item.received_uuid,
4724 						 BTRFS_UUID_KEY_RECEIVED_SUBVOL,
4725 						  key.objectid);
4726 			if (ret < 0) {
4727 				btrfs_warn(fs_info, "uuid_tree_add failed %d",
4728 					ret);
4729 				break;
4730 			}
4731 		}
4732 
4733 skip:
4734 		btrfs_release_path(path);
4735 		if (trans) {
4736 			ret = btrfs_end_transaction(trans);
4737 			trans = NULL;
4738 			if (ret)
4739 				break;
4740 		}
4741 
4742 		if (key.offset < (u64)-1) {
4743 			key.offset++;
4744 		} else if (key.type < BTRFS_ROOT_ITEM_KEY) {
4745 			key.offset = 0;
4746 			key.type = BTRFS_ROOT_ITEM_KEY;
4747 		} else if (key.objectid < (u64)-1) {
4748 			key.offset = 0;
4749 			key.type = BTRFS_ROOT_ITEM_KEY;
4750 			key.objectid++;
4751 		} else {
4752 			break;
4753 		}
4754 		cond_resched();
4755 	}
4756 
4757 out:
4758 	btrfs_free_path(path);
4759 	if (trans && !IS_ERR(trans))
4760 		btrfs_end_transaction(trans);
4761 	if (ret)
4762 		btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret);
4763 	else if (!closing)
4764 		set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags);
4765 	up(&fs_info->uuid_tree_rescan_sem);
4766 	return 0;
4767 }
4768 
btrfs_create_uuid_tree(struct btrfs_fs_info * fs_info)4769 int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info)
4770 {
4771 	struct btrfs_trans_handle *trans;
4772 	struct btrfs_root *tree_root = fs_info->tree_root;
4773 	struct btrfs_root *uuid_root;
4774 	struct task_struct *task;
4775 	int ret;
4776 
4777 	/*
4778 	 * 1 - root node
4779 	 * 1 - root item
4780 	 */
4781 	trans = btrfs_start_transaction(tree_root, 2);
4782 	if (IS_ERR(trans))
4783 		return PTR_ERR(trans);
4784 
4785 	uuid_root = btrfs_create_tree(trans, BTRFS_UUID_TREE_OBJECTID);
4786 	if (IS_ERR(uuid_root)) {
4787 		ret = PTR_ERR(uuid_root);
4788 		btrfs_abort_transaction(trans, ret);
4789 		btrfs_end_transaction(trans);
4790 		return ret;
4791 	}
4792 
4793 	fs_info->uuid_root = uuid_root;
4794 
4795 	ret = btrfs_commit_transaction(trans);
4796 	if (ret)
4797 		return ret;
4798 
4799 	down(&fs_info->uuid_tree_rescan_sem);
4800 	task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid");
4801 	if (IS_ERR(task)) {
4802 		/* fs_info->update_uuid_tree_gen remains 0 in all error case */
4803 		btrfs_warn(fs_info, "failed to start uuid_scan task");
4804 		up(&fs_info->uuid_tree_rescan_sem);
4805 		return PTR_ERR(task);
4806 	}
4807 
4808 	return 0;
4809 }
4810 
4811 /*
4812  * shrinking a device means finding all of the device extents past
4813  * the new size, and then following the back refs to the chunks.
4814  * The chunk relocation code actually frees the device extent
4815  */
btrfs_shrink_device(struct btrfs_device * device,u64 new_size)4816 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
4817 {
4818 	struct btrfs_fs_info *fs_info = device->fs_info;
4819 	struct btrfs_root *root = fs_info->dev_root;
4820 	struct btrfs_trans_handle *trans;
4821 	struct btrfs_dev_extent *dev_extent = NULL;
4822 	struct btrfs_path *path;
4823 	u64 length;
4824 	u64 chunk_offset;
4825 	int ret;
4826 	int slot;
4827 	int failed = 0;
4828 	bool retried = false;
4829 	struct extent_buffer *l;
4830 	struct btrfs_key key;
4831 	struct btrfs_super_block *super_copy = fs_info->super_copy;
4832 	u64 old_total = btrfs_super_total_bytes(super_copy);
4833 	u64 old_size = btrfs_device_get_total_bytes(device);
4834 	u64 diff;
4835 	u64 start;
4836 
4837 	new_size = round_down(new_size, fs_info->sectorsize);
4838 	start = new_size;
4839 	diff = round_down(old_size - new_size, fs_info->sectorsize);
4840 
4841 	if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
4842 		return -EINVAL;
4843 
4844 	path = btrfs_alloc_path();
4845 	if (!path)
4846 		return -ENOMEM;
4847 
4848 	path->reada = READA_BACK;
4849 
4850 	trans = btrfs_start_transaction(root, 0);
4851 	if (IS_ERR(trans)) {
4852 		btrfs_free_path(path);
4853 		return PTR_ERR(trans);
4854 	}
4855 
4856 	mutex_lock(&fs_info->chunk_mutex);
4857 
4858 	btrfs_device_set_total_bytes(device, new_size);
4859 	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
4860 		device->fs_devices->total_rw_bytes -= diff;
4861 		atomic64_sub(diff, &fs_info->free_chunk_space);
4862 	}
4863 
4864 	/*
4865 	 * Once the device's size has been set to the new size, ensure all
4866 	 * in-memory chunks are synced to disk so that the loop below sees them
4867 	 * and relocates them accordingly.
4868 	 */
4869 	if (contains_pending_extent(device, &start, diff)) {
4870 		mutex_unlock(&fs_info->chunk_mutex);
4871 		ret = btrfs_commit_transaction(trans);
4872 		if (ret)
4873 			goto done;
4874 	} else {
4875 		mutex_unlock(&fs_info->chunk_mutex);
4876 		btrfs_end_transaction(trans);
4877 	}
4878 
4879 again:
4880 	key.objectid = device->devid;
4881 	key.offset = (u64)-1;
4882 	key.type = BTRFS_DEV_EXTENT_KEY;
4883 
4884 	do {
4885 		mutex_lock(&fs_info->reclaim_bgs_lock);
4886 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4887 		if (ret < 0) {
4888 			mutex_unlock(&fs_info->reclaim_bgs_lock);
4889 			goto done;
4890 		}
4891 
4892 		ret = btrfs_previous_item(root, path, 0, key.type);
4893 		if (ret) {
4894 			mutex_unlock(&fs_info->reclaim_bgs_lock);
4895 			if (ret < 0)
4896 				goto done;
4897 			ret = 0;
4898 			btrfs_release_path(path);
4899 			break;
4900 		}
4901 
4902 		l = path->nodes[0];
4903 		slot = path->slots[0];
4904 		btrfs_item_key_to_cpu(l, &key, path->slots[0]);
4905 
4906 		if (key.objectid != device->devid) {
4907 			mutex_unlock(&fs_info->reclaim_bgs_lock);
4908 			btrfs_release_path(path);
4909 			break;
4910 		}
4911 
4912 		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
4913 		length = btrfs_dev_extent_length(l, dev_extent);
4914 
4915 		if (key.offset + length <= new_size) {
4916 			mutex_unlock(&fs_info->reclaim_bgs_lock);
4917 			btrfs_release_path(path);
4918 			break;
4919 		}
4920 
4921 		chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
4922 		btrfs_release_path(path);
4923 
4924 		/*
4925 		 * We may be relocating the only data chunk we have,
4926 		 * which could potentially end up with losing data's
4927 		 * raid profile, so lets allocate an empty one in
4928 		 * advance.
4929 		 */
4930 		ret = btrfs_may_alloc_data_chunk(fs_info, chunk_offset);
4931 		if (ret < 0) {
4932 			mutex_unlock(&fs_info->reclaim_bgs_lock);
4933 			goto done;
4934 		}
4935 
4936 		ret = btrfs_relocate_chunk(fs_info, chunk_offset);
4937 		mutex_unlock(&fs_info->reclaim_bgs_lock);
4938 		if (ret == -ENOSPC) {
4939 			failed++;
4940 		} else if (ret) {
4941 			if (ret == -ETXTBSY) {
4942 				btrfs_warn(fs_info,
4943 		   "could not shrink block group %llu due to active swapfile",
4944 					   chunk_offset);
4945 			}
4946 			goto done;
4947 		}
4948 	} while (key.offset-- > 0);
4949 
4950 	if (failed && !retried) {
4951 		failed = 0;
4952 		retried = true;
4953 		goto again;
4954 	} else if (failed && retried) {
4955 		ret = -ENOSPC;
4956 		goto done;
4957 	}
4958 
4959 	/* Shrinking succeeded, else we would be at "done". */
4960 	trans = btrfs_start_transaction(root, 0);
4961 	if (IS_ERR(trans)) {
4962 		ret = PTR_ERR(trans);
4963 		goto done;
4964 	}
4965 
4966 	mutex_lock(&fs_info->chunk_mutex);
4967 	/* Clear all state bits beyond the shrunk device size */
4968 	clear_extent_bits(&device->alloc_state, new_size, (u64)-1,
4969 			  CHUNK_STATE_MASK);
4970 
4971 	btrfs_device_set_disk_total_bytes(device, new_size);
4972 	if (list_empty(&device->post_commit_list))
4973 		list_add_tail(&device->post_commit_list,
4974 			      &trans->transaction->dev_update_list);
4975 
4976 	WARN_ON(diff > old_total);
4977 	btrfs_set_super_total_bytes(super_copy,
4978 			round_down(old_total - diff, fs_info->sectorsize));
4979 	mutex_unlock(&fs_info->chunk_mutex);
4980 
4981 	btrfs_reserve_chunk_metadata(trans, false);
4982 	/* Now btrfs_update_device() will change the on-disk size. */
4983 	ret = btrfs_update_device(trans, device);
4984 	btrfs_trans_release_chunk_metadata(trans);
4985 	if (ret < 0) {
4986 		btrfs_abort_transaction(trans, ret);
4987 		btrfs_end_transaction(trans);
4988 	} else {
4989 		ret = btrfs_commit_transaction(trans);
4990 	}
4991 done:
4992 	btrfs_free_path(path);
4993 	if (ret) {
4994 		mutex_lock(&fs_info->chunk_mutex);
4995 		btrfs_device_set_total_bytes(device, old_size);
4996 		if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
4997 			device->fs_devices->total_rw_bytes += diff;
4998 		atomic64_add(diff, &fs_info->free_chunk_space);
4999 		mutex_unlock(&fs_info->chunk_mutex);
5000 	}
5001 	return ret;
5002 }
5003 
btrfs_add_system_chunk(struct btrfs_fs_info * fs_info,struct btrfs_key * key,struct btrfs_chunk * chunk,int item_size)5004 static int btrfs_add_system_chunk(struct btrfs_fs_info *fs_info,
5005 			   struct btrfs_key *key,
5006 			   struct btrfs_chunk *chunk, int item_size)
5007 {
5008 	struct btrfs_super_block *super_copy = fs_info->super_copy;
5009 	struct btrfs_disk_key disk_key;
5010 	u32 array_size;
5011 	u8 *ptr;
5012 
5013 	lockdep_assert_held(&fs_info->chunk_mutex);
5014 
5015 	array_size = btrfs_super_sys_array_size(super_copy);
5016 	if (array_size + item_size + sizeof(disk_key)
5017 			> BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
5018 		return -EFBIG;
5019 
5020 	ptr = super_copy->sys_chunk_array + array_size;
5021 	btrfs_cpu_key_to_disk(&disk_key, key);
5022 	memcpy(ptr, &disk_key, sizeof(disk_key));
5023 	ptr += sizeof(disk_key);
5024 	memcpy(ptr, chunk, item_size);
5025 	item_size += sizeof(disk_key);
5026 	btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
5027 
5028 	return 0;
5029 }
5030 
5031 /*
5032  * sort the devices in descending order by max_avail, total_avail
5033  */
btrfs_cmp_device_info(const void * a,const void * b)5034 static int btrfs_cmp_device_info(const void *a, const void *b)
5035 {
5036 	const struct btrfs_device_info *di_a = a;
5037 	const struct btrfs_device_info *di_b = b;
5038 
5039 	if (di_a->max_avail > di_b->max_avail)
5040 		return -1;
5041 	if (di_a->max_avail < di_b->max_avail)
5042 		return 1;
5043 	if (di_a->total_avail > di_b->total_avail)
5044 		return -1;
5045 	if (di_a->total_avail < di_b->total_avail)
5046 		return 1;
5047 	return 0;
5048 }
5049 
check_raid56_incompat_flag(struct btrfs_fs_info * info,u64 type)5050 static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
5051 {
5052 	if (!(type & BTRFS_BLOCK_GROUP_RAID56_MASK))
5053 		return;
5054 
5055 	btrfs_set_fs_incompat(info, RAID56);
5056 }
5057 
check_raid1c34_incompat_flag(struct btrfs_fs_info * info,u64 type)5058 static void check_raid1c34_incompat_flag(struct btrfs_fs_info *info, u64 type)
5059 {
5060 	if (!(type & (BTRFS_BLOCK_GROUP_RAID1C3 | BTRFS_BLOCK_GROUP_RAID1C4)))
5061 		return;
5062 
5063 	btrfs_set_fs_incompat(info, RAID1C34);
5064 }
5065 
5066 /*
5067  * Structure used internally for btrfs_create_chunk() function.
5068  * Wraps needed parameters.
5069  */
5070 struct alloc_chunk_ctl {
5071 	u64 start;
5072 	u64 type;
5073 	/* Total number of stripes to allocate */
5074 	int num_stripes;
5075 	/* sub_stripes info for map */
5076 	int sub_stripes;
5077 	/* Stripes per device */
5078 	int dev_stripes;
5079 	/* Maximum number of devices to use */
5080 	int devs_max;
5081 	/* Minimum number of devices to use */
5082 	int devs_min;
5083 	/* ndevs has to be a multiple of this */
5084 	int devs_increment;
5085 	/* Number of copies */
5086 	int ncopies;
5087 	/* Number of stripes worth of bytes to store parity information */
5088 	int nparity;
5089 	u64 max_stripe_size;
5090 	u64 max_chunk_size;
5091 	u64 dev_extent_min;
5092 	u64 stripe_size;
5093 	u64 chunk_size;
5094 	int ndevs;
5095 };
5096 
init_alloc_chunk_ctl_policy_regular(struct btrfs_fs_devices * fs_devices,struct alloc_chunk_ctl * ctl)5097 static void init_alloc_chunk_ctl_policy_regular(
5098 				struct btrfs_fs_devices *fs_devices,
5099 				struct alloc_chunk_ctl *ctl)
5100 {
5101 	struct btrfs_space_info *space_info;
5102 
5103 	space_info = btrfs_find_space_info(fs_devices->fs_info, ctl->type);
5104 	ASSERT(space_info);
5105 
5106 	ctl->max_chunk_size = READ_ONCE(space_info->chunk_size);
5107 	ctl->max_stripe_size = ctl->max_chunk_size;
5108 
5109 	if (ctl->type & BTRFS_BLOCK_GROUP_SYSTEM)
5110 		ctl->devs_max = min_t(int, ctl->devs_max, BTRFS_MAX_DEVS_SYS_CHUNK);
5111 
5112 	/* We don't want a chunk larger than 10% of writable space */
5113 	ctl->max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
5114 				  ctl->max_chunk_size);
5115 	ctl->dev_extent_min = BTRFS_STRIPE_LEN * ctl->dev_stripes;
5116 }
5117 
init_alloc_chunk_ctl_policy_zoned(struct btrfs_fs_devices * fs_devices,struct alloc_chunk_ctl * ctl)5118 static void init_alloc_chunk_ctl_policy_zoned(
5119 				      struct btrfs_fs_devices *fs_devices,
5120 				      struct alloc_chunk_ctl *ctl)
5121 {
5122 	u64 zone_size = fs_devices->fs_info->zone_size;
5123 	u64 limit;
5124 	int min_num_stripes = ctl->devs_min * ctl->dev_stripes;
5125 	int min_data_stripes = (min_num_stripes - ctl->nparity) / ctl->ncopies;
5126 	u64 min_chunk_size = min_data_stripes * zone_size;
5127 	u64 type = ctl->type;
5128 
5129 	ctl->max_stripe_size = zone_size;
5130 	if (type & BTRFS_BLOCK_GROUP_DATA) {
5131 		ctl->max_chunk_size = round_down(BTRFS_MAX_DATA_CHUNK_SIZE,
5132 						 zone_size);
5133 	} else if (type & BTRFS_BLOCK_GROUP_METADATA) {
5134 		ctl->max_chunk_size = ctl->max_stripe_size;
5135 	} else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
5136 		ctl->max_chunk_size = 2 * ctl->max_stripe_size;
5137 		ctl->devs_max = min_t(int, ctl->devs_max,
5138 				      BTRFS_MAX_DEVS_SYS_CHUNK);
5139 	} else {
5140 		BUG();
5141 	}
5142 
5143 	/* We don't want a chunk larger than 10% of writable space */
5144 	limit = max(round_down(div_factor(fs_devices->total_rw_bytes, 1),
5145 			       zone_size),
5146 		    min_chunk_size);
5147 	ctl->max_chunk_size = min(limit, ctl->max_chunk_size);
5148 	ctl->dev_extent_min = zone_size * ctl->dev_stripes;
5149 }
5150 
init_alloc_chunk_ctl(struct btrfs_fs_devices * fs_devices,struct alloc_chunk_ctl * ctl)5151 static void init_alloc_chunk_ctl(struct btrfs_fs_devices *fs_devices,
5152 				 struct alloc_chunk_ctl *ctl)
5153 {
5154 	int index = btrfs_bg_flags_to_raid_index(ctl->type);
5155 
5156 	ctl->sub_stripes = btrfs_raid_array[index].sub_stripes;
5157 	ctl->dev_stripes = btrfs_raid_array[index].dev_stripes;
5158 	ctl->devs_max = btrfs_raid_array[index].devs_max;
5159 	if (!ctl->devs_max)
5160 		ctl->devs_max = BTRFS_MAX_DEVS(fs_devices->fs_info);
5161 	ctl->devs_min = btrfs_raid_array[index].devs_min;
5162 	ctl->devs_increment = btrfs_raid_array[index].devs_increment;
5163 	ctl->ncopies = btrfs_raid_array[index].ncopies;
5164 	ctl->nparity = btrfs_raid_array[index].nparity;
5165 	ctl->ndevs = 0;
5166 
5167 	switch (fs_devices->chunk_alloc_policy) {
5168 	case BTRFS_CHUNK_ALLOC_REGULAR:
5169 		init_alloc_chunk_ctl_policy_regular(fs_devices, ctl);
5170 		break;
5171 	case BTRFS_CHUNK_ALLOC_ZONED:
5172 		init_alloc_chunk_ctl_policy_zoned(fs_devices, ctl);
5173 		break;
5174 	default:
5175 		BUG();
5176 	}
5177 }
5178 
gather_device_info(struct btrfs_fs_devices * fs_devices,struct alloc_chunk_ctl * ctl,struct btrfs_device_info * devices_info)5179 static int gather_device_info(struct btrfs_fs_devices *fs_devices,
5180 			      struct alloc_chunk_ctl *ctl,
5181 			      struct btrfs_device_info *devices_info)
5182 {
5183 	struct btrfs_fs_info *info = fs_devices->fs_info;
5184 	struct btrfs_device *device;
5185 	u64 total_avail;
5186 	u64 dev_extent_want = ctl->max_stripe_size * ctl->dev_stripes;
5187 	int ret;
5188 	int ndevs = 0;
5189 	u64 max_avail;
5190 	u64 dev_offset;
5191 
5192 	/*
5193 	 * in the first pass through the devices list, we gather information
5194 	 * about the available holes on each device.
5195 	 */
5196 	list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
5197 		if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
5198 			WARN(1, KERN_ERR
5199 			       "BTRFS: read-only device in alloc_list\n");
5200 			continue;
5201 		}
5202 
5203 		if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
5204 					&device->dev_state) ||
5205 		    test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
5206 			continue;
5207 
5208 		if (device->total_bytes > device->bytes_used)
5209 			total_avail = device->total_bytes - device->bytes_used;
5210 		else
5211 			total_avail = 0;
5212 
5213 		/* If there is no space on this device, skip it. */
5214 		if (total_avail < ctl->dev_extent_min)
5215 			continue;
5216 
5217 		ret = find_free_dev_extent(device, dev_extent_want, &dev_offset,
5218 					   &max_avail);
5219 		if (ret && ret != -ENOSPC)
5220 			return ret;
5221 
5222 		if (ret == 0)
5223 			max_avail = dev_extent_want;
5224 
5225 		if (max_avail < ctl->dev_extent_min) {
5226 			if (btrfs_test_opt(info, ENOSPC_DEBUG))
5227 				btrfs_debug(info,
5228 			"%s: devid %llu has no free space, have=%llu want=%llu",
5229 					    __func__, device->devid, max_avail,
5230 					    ctl->dev_extent_min);
5231 			continue;
5232 		}
5233 
5234 		if (ndevs == fs_devices->rw_devices) {
5235 			WARN(1, "%s: found more than %llu devices\n",
5236 			     __func__, fs_devices->rw_devices);
5237 			break;
5238 		}
5239 		devices_info[ndevs].dev_offset = dev_offset;
5240 		devices_info[ndevs].max_avail = max_avail;
5241 		devices_info[ndevs].total_avail = total_avail;
5242 		devices_info[ndevs].dev = device;
5243 		++ndevs;
5244 	}
5245 	ctl->ndevs = ndevs;
5246 
5247 	/*
5248 	 * now sort the devices by hole size / available space
5249 	 */
5250 	sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
5251 	     btrfs_cmp_device_info, NULL);
5252 
5253 	return 0;
5254 }
5255 
decide_stripe_size_regular(struct alloc_chunk_ctl * ctl,struct btrfs_device_info * devices_info)5256 static int decide_stripe_size_regular(struct alloc_chunk_ctl *ctl,
5257 				      struct btrfs_device_info *devices_info)
5258 {
5259 	/* Number of stripes that count for block group size */
5260 	int data_stripes;
5261 
5262 	/*
5263 	 * The primary goal is to maximize the number of stripes, so use as
5264 	 * many devices as possible, even if the stripes are not maximum sized.
5265 	 *
5266 	 * The DUP profile stores more than one stripe per device, the
5267 	 * max_avail is the total size so we have to adjust.
5268 	 */
5269 	ctl->stripe_size = div_u64(devices_info[ctl->ndevs - 1].max_avail,
5270 				   ctl->dev_stripes);
5271 	ctl->num_stripes = ctl->ndevs * ctl->dev_stripes;
5272 
5273 	/* This will have to be fixed for RAID1 and RAID10 over more drives */
5274 	data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies;
5275 
5276 	/*
5277 	 * Use the number of data stripes to figure out how big this chunk is
5278 	 * really going to be in terms of logical address space, and compare
5279 	 * that answer with the max chunk size. If it's higher, we try to
5280 	 * reduce stripe_size.
5281 	 */
5282 	if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) {
5283 		/*
5284 		 * Reduce stripe_size, round it up to a 16MB boundary again and
5285 		 * then use it, unless it ends up being even bigger than the
5286 		 * previous value we had already.
5287 		 */
5288 		ctl->stripe_size = min(round_up(div_u64(ctl->max_chunk_size,
5289 							data_stripes), SZ_16M),
5290 				       ctl->stripe_size);
5291 	}
5292 
5293 	/* Stripe size should not go beyond 1G. */
5294 	ctl->stripe_size = min_t(u64, ctl->stripe_size, SZ_1G);
5295 
5296 	/* Align to BTRFS_STRIPE_LEN */
5297 	ctl->stripe_size = round_down(ctl->stripe_size, BTRFS_STRIPE_LEN);
5298 	ctl->chunk_size = ctl->stripe_size * data_stripes;
5299 
5300 	return 0;
5301 }
5302 
decide_stripe_size_zoned(struct alloc_chunk_ctl * ctl,struct btrfs_device_info * devices_info)5303 static int decide_stripe_size_zoned(struct alloc_chunk_ctl *ctl,
5304 				    struct btrfs_device_info *devices_info)
5305 {
5306 	u64 zone_size = devices_info[0].dev->zone_info->zone_size;
5307 	/* Number of stripes that count for block group size */
5308 	int data_stripes;
5309 
5310 	/*
5311 	 * It should hold because:
5312 	 *    dev_extent_min == dev_extent_want == zone_size * dev_stripes
5313 	 */
5314 	ASSERT(devices_info[ctl->ndevs - 1].max_avail == ctl->dev_extent_min);
5315 
5316 	ctl->stripe_size = zone_size;
5317 	ctl->num_stripes = ctl->ndevs * ctl->dev_stripes;
5318 	data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies;
5319 
5320 	/* stripe_size is fixed in zoned filesysmte. Reduce ndevs instead. */
5321 	if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) {
5322 		ctl->ndevs = div_u64(div_u64(ctl->max_chunk_size * ctl->ncopies,
5323 					     ctl->stripe_size) + ctl->nparity,
5324 				     ctl->dev_stripes);
5325 		ctl->num_stripes = ctl->ndevs * ctl->dev_stripes;
5326 		data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies;
5327 		ASSERT(ctl->stripe_size * data_stripes <= ctl->max_chunk_size);
5328 	}
5329 
5330 	ctl->chunk_size = ctl->stripe_size * data_stripes;
5331 
5332 	return 0;
5333 }
5334 
decide_stripe_size(struct btrfs_fs_devices * fs_devices,struct alloc_chunk_ctl * ctl,struct btrfs_device_info * devices_info)5335 static int decide_stripe_size(struct btrfs_fs_devices *fs_devices,
5336 			      struct alloc_chunk_ctl *ctl,
5337 			      struct btrfs_device_info *devices_info)
5338 {
5339 	struct btrfs_fs_info *info = fs_devices->fs_info;
5340 
5341 	/*
5342 	 * Round down to number of usable stripes, devs_increment can be any
5343 	 * number so we can't use round_down() that requires power of 2, while
5344 	 * rounddown is safe.
5345 	 */
5346 	ctl->ndevs = rounddown(ctl->ndevs, ctl->devs_increment);
5347 
5348 	if (ctl->ndevs < ctl->devs_min) {
5349 		if (btrfs_test_opt(info, ENOSPC_DEBUG)) {
5350 			btrfs_debug(info,
5351 	"%s: not enough devices with free space: have=%d minimum required=%d",
5352 				    __func__, ctl->ndevs, ctl->devs_min);
5353 		}
5354 		return -ENOSPC;
5355 	}
5356 
5357 	ctl->ndevs = min(ctl->ndevs, ctl->devs_max);
5358 
5359 	switch (fs_devices->chunk_alloc_policy) {
5360 	case BTRFS_CHUNK_ALLOC_REGULAR:
5361 		return decide_stripe_size_regular(ctl, devices_info);
5362 	case BTRFS_CHUNK_ALLOC_ZONED:
5363 		return decide_stripe_size_zoned(ctl, devices_info);
5364 	default:
5365 		BUG();
5366 	}
5367 }
5368 
create_chunk(struct btrfs_trans_handle * trans,struct alloc_chunk_ctl * ctl,struct btrfs_device_info * devices_info)5369 static struct btrfs_block_group *create_chunk(struct btrfs_trans_handle *trans,
5370 			struct alloc_chunk_ctl *ctl,
5371 			struct btrfs_device_info *devices_info)
5372 {
5373 	struct btrfs_fs_info *info = trans->fs_info;
5374 	struct map_lookup *map = NULL;
5375 	struct extent_map_tree *em_tree;
5376 	struct btrfs_block_group *block_group;
5377 	struct extent_map *em;
5378 	u64 start = ctl->start;
5379 	u64 type = ctl->type;
5380 	int ret;
5381 	int i;
5382 	int j;
5383 
5384 	map = kmalloc(map_lookup_size(ctl->num_stripes), GFP_NOFS);
5385 	if (!map)
5386 		return ERR_PTR(-ENOMEM);
5387 	map->num_stripes = ctl->num_stripes;
5388 
5389 	for (i = 0; i < ctl->ndevs; ++i) {
5390 		for (j = 0; j < ctl->dev_stripes; ++j) {
5391 			int s = i * ctl->dev_stripes + j;
5392 			map->stripes[s].dev = devices_info[i].dev;
5393 			map->stripes[s].physical = devices_info[i].dev_offset +
5394 						   j * ctl->stripe_size;
5395 		}
5396 	}
5397 	map->stripe_len = BTRFS_STRIPE_LEN;
5398 	map->io_align = BTRFS_STRIPE_LEN;
5399 	map->io_width = BTRFS_STRIPE_LEN;
5400 	map->type = type;
5401 	map->sub_stripes = ctl->sub_stripes;
5402 
5403 	trace_btrfs_chunk_alloc(info, map, start, ctl->chunk_size);
5404 
5405 	em = alloc_extent_map();
5406 	if (!em) {
5407 		kfree(map);
5408 		return ERR_PTR(-ENOMEM);
5409 	}
5410 	set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
5411 	em->map_lookup = map;
5412 	em->start = start;
5413 	em->len = ctl->chunk_size;
5414 	em->block_start = 0;
5415 	em->block_len = em->len;
5416 	em->orig_block_len = ctl->stripe_size;
5417 
5418 	em_tree = &info->mapping_tree;
5419 	write_lock(&em_tree->lock);
5420 	ret = add_extent_mapping(em_tree, em, 0);
5421 	if (ret) {
5422 		write_unlock(&em_tree->lock);
5423 		free_extent_map(em);
5424 		return ERR_PTR(ret);
5425 	}
5426 	write_unlock(&em_tree->lock);
5427 
5428 	block_group = btrfs_make_block_group(trans, 0, type, start, ctl->chunk_size);
5429 	if (IS_ERR(block_group))
5430 		goto error_del_extent;
5431 
5432 	for (i = 0; i < map->num_stripes; i++) {
5433 		struct btrfs_device *dev = map->stripes[i].dev;
5434 
5435 		btrfs_device_set_bytes_used(dev,
5436 					    dev->bytes_used + ctl->stripe_size);
5437 		if (list_empty(&dev->post_commit_list))
5438 			list_add_tail(&dev->post_commit_list,
5439 				      &trans->transaction->dev_update_list);
5440 	}
5441 
5442 	atomic64_sub(ctl->stripe_size * map->num_stripes,
5443 		     &info->free_chunk_space);
5444 
5445 	free_extent_map(em);
5446 	check_raid56_incompat_flag(info, type);
5447 	check_raid1c34_incompat_flag(info, type);
5448 
5449 	return block_group;
5450 
5451 error_del_extent:
5452 	write_lock(&em_tree->lock);
5453 	remove_extent_mapping(em_tree, em);
5454 	write_unlock(&em_tree->lock);
5455 
5456 	/* One for our allocation */
5457 	free_extent_map(em);
5458 	/* One for the tree reference */
5459 	free_extent_map(em);
5460 
5461 	return block_group;
5462 }
5463 
btrfs_create_chunk(struct btrfs_trans_handle * trans,u64 type)5464 struct btrfs_block_group *btrfs_create_chunk(struct btrfs_trans_handle *trans,
5465 					    u64 type)
5466 {
5467 	struct btrfs_fs_info *info = trans->fs_info;
5468 	struct btrfs_fs_devices *fs_devices = info->fs_devices;
5469 	struct btrfs_device_info *devices_info = NULL;
5470 	struct alloc_chunk_ctl ctl;
5471 	struct btrfs_block_group *block_group;
5472 	int ret;
5473 
5474 	lockdep_assert_held(&info->chunk_mutex);
5475 
5476 	if (!alloc_profile_is_valid(type, 0)) {
5477 		ASSERT(0);
5478 		return ERR_PTR(-EINVAL);
5479 	}
5480 
5481 	if (list_empty(&fs_devices->alloc_list)) {
5482 		if (btrfs_test_opt(info, ENOSPC_DEBUG))
5483 			btrfs_debug(info, "%s: no writable device", __func__);
5484 		return ERR_PTR(-ENOSPC);
5485 	}
5486 
5487 	if (!(type & BTRFS_BLOCK_GROUP_TYPE_MASK)) {
5488 		btrfs_err(info, "invalid chunk type 0x%llx requested", type);
5489 		ASSERT(0);
5490 		return ERR_PTR(-EINVAL);
5491 	}
5492 
5493 	ctl.start = find_next_chunk(info);
5494 	ctl.type = type;
5495 	init_alloc_chunk_ctl(fs_devices, &ctl);
5496 
5497 	devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info),
5498 			       GFP_NOFS);
5499 	if (!devices_info)
5500 		return ERR_PTR(-ENOMEM);
5501 
5502 	ret = gather_device_info(fs_devices, &ctl, devices_info);
5503 	if (ret < 0) {
5504 		block_group = ERR_PTR(ret);
5505 		goto out;
5506 	}
5507 
5508 	ret = decide_stripe_size(fs_devices, &ctl, devices_info);
5509 	if (ret < 0) {
5510 		block_group = ERR_PTR(ret);
5511 		goto out;
5512 	}
5513 
5514 	block_group = create_chunk(trans, &ctl, devices_info);
5515 
5516 out:
5517 	kfree(devices_info);
5518 	return block_group;
5519 }
5520 
5521 /*
5522  * This function, btrfs_chunk_alloc_add_chunk_item(), typically belongs to the
5523  * phase 1 of chunk allocation. It belongs to phase 2 only when allocating system
5524  * chunks.
5525  *
5526  * See the comment at btrfs_chunk_alloc() for details about the chunk allocation
5527  * phases.
5528  */
btrfs_chunk_alloc_add_chunk_item(struct btrfs_trans_handle * trans,struct btrfs_block_group * bg)5529 int btrfs_chunk_alloc_add_chunk_item(struct btrfs_trans_handle *trans,
5530 				     struct btrfs_block_group *bg)
5531 {
5532 	struct btrfs_fs_info *fs_info = trans->fs_info;
5533 	struct btrfs_root *chunk_root = fs_info->chunk_root;
5534 	struct btrfs_key key;
5535 	struct btrfs_chunk *chunk;
5536 	struct btrfs_stripe *stripe;
5537 	struct extent_map *em;
5538 	struct map_lookup *map;
5539 	size_t item_size;
5540 	int i;
5541 	int ret;
5542 
5543 	/*
5544 	 * We take the chunk_mutex for 2 reasons:
5545 	 *
5546 	 * 1) Updates and insertions in the chunk btree must be done while holding
5547 	 *    the chunk_mutex, as well as updating the system chunk array in the
5548 	 *    superblock. See the comment on top of btrfs_chunk_alloc() for the
5549 	 *    details;
5550 	 *
5551 	 * 2) To prevent races with the final phase of a device replace operation
5552 	 *    that replaces the device object associated with the map's stripes,
5553 	 *    because the device object's id can change at any time during that
5554 	 *    final phase of the device replace operation
5555 	 *    (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the
5556 	 *    replaced device and then see it with an ID of BTRFS_DEV_REPLACE_DEVID,
5557 	 *    which would cause a failure when updating the device item, which does
5558 	 *    not exists, or persisting a stripe of the chunk item with such ID.
5559 	 *    Here we can't use the device_list_mutex because our caller already
5560 	 *    has locked the chunk_mutex, and the final phase of device replace
5561 	 *    acquires both mutexes - first the device_list_mutex and then the
5562 	 *    chunk_mutex. Using any of those two mutexes protects us from a
5563 	 *    concurrent device replace.
5564 	 */
5565 	lockdep_assert_held(&fs_info->chunk_mutex);
5566 
5567 	em = btrfs_get_chunk_map(fs_info, bg->start, bg->length);
5568 	if (IS_ERR(em)) {
5569 		ret = PTR_ERR(em);
5570 		btrfs_abort_transaction(trans, ret);
5571 		return ret;
5572 	}
5573 
5574 	map = em->map_lookup;
5575 	item_size = btrfs_chunk_item_size(map->num_stripes);
5576 
5577 	chunk = kzalloc(item_size, GFP_NOFS);
5578 	if (!chunk) {
5579 		ret = -ENOMEM;
5580 		btrfs_abort_transaction(trans, ret);
5581 		goto out;
5582 	}
5583 
5584 	for (i = 0; i < map->num_stripes; i++) {
5585 		struct btrfs_device *device = map->stripes[i].dev;
5586 
5587 		ret = btrfs_update_device(trans, device);
5588 		if (ret)
5589 			goto out;
5590 	}
5591 
5592 	stripe = &chunk->stripe;
5593 	for (i = 0; i < map->num_stripes; i++) {
5594 		struct btrfs_device *device = map->stripes[i].dev;
5595 		const u64 dev_offset = map->stripes[i].physical;
5596 
5597 		btrfs_set_stack_stripe_devid(stripe, device->devid);
5598 		btrfs_set_stack_stripe_offset(stripe, dev_offset);
5599 		memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
5600 		stripe++;
5601 	}
5602 
5603 	btrfs_set_stack_chunk_length(chunk, bg->length);
5604 	btrfs_set_stack_chunk_owner(chunk, BTRFS_EXTENT_TREE_OBJECTID);
5605 	btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
5606 	btrfs_set_stack_chunk_type(chunk, map->type);
5607 	btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
5608 	btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
5609 	btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
5610 	btrfs_set_stack_chunk_sector_size(chunk, fs_info->sectorsize);
5611 	btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
5612 
5613 	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
5614 	key.type = BTRFS_CHUNK_ITEM_KEY;
5615 	key.offset = bg->start;
5616 
5617 	ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
5618 	if (ret)
5619 		goto out;
5620 
5621 	set_bit(BLOCK_GROUP_FLAG_CHUNK_ITEM_INSERTED, &bg->runtime_flags);
5622 
5623 	if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
5624 		ret = btrfs_add_system_chunk(fs_info, &key, chunk, item_size);
5625 		if (ret)
5626 			goto out;
5627 	}
5628 
5629 out:
5630 	kfree(chunk);
5631 	free_extent_map(em);
5632 	return ret;
5633 }
5634 
init_first_rw_device(struct btrfs_trans_handle * trans)5635 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans)
5636 {
5637 	struct btrfs_fs_info *fs_info = trans->fs_info;
5638 	u64 alloc_profile;
5639 	struct btrfs_block_group *meta_bg;
5640 	struct btrfs_block_group *sys_bg;
5641 
5642 	/*
5643 	 * When adding a new device for sprouting, the seed device is read-only
5644 	 * so we must first allocate a metadata and a system chunk. But before
5645 	 * adding the block group items to the extent, device and chunk btrees,
5646 	 * we must first:
5647 	 *
5648 	 * 1) Create both chunks without doing any changes to the btrees, as
5649 	 *    otherwise we would get -ENOSPC since the block groups from the
5650 	 *    seed device are read-only;
5651 	 *
5652 	 * 2) Add the device item for the new sprout device - finishing the setup
5653 	 *    of a new block group requires updating the device item in the chunk
5654 	 *    btree, so it must exist when we attempt to do it. The previous step
5655 	 *    ensures this does not fail with -ENOSPC.
5656 	 *
5657 	 * After that we can add the block group items to their btrees:
5658 	 * update existing device item in the chunk btree, add a new block group
5659 	 * item to the extent btree, add a new chunk item to the chunk btree and
5660 	 * finally add the new device extent items to the devices btree.
5661 	 */
5662 
5663 	alloc_profile = btrfs_metadata_alloc_profile(fs_info);
5664 	meta_bg = btrfs_create_chunk(trans, alloc_profile);
5665 	if (IS_ERR(meta_bg))
5666 		return PTR_ERR(meta_bg);
5667 
5668 	alloc_profile = btrfs_system_alloc_profile(fs_info);
5669 	sys_bg = btrfs_create_chunk(trans, alloc_profile);
5670 	if (IS_ERR(sys_bg))
5671 		return PTR_ERR(sys_bg);
5672 
5673 	return 0;
5674 }
5675 
btrfs_chunk_max_errors(struct map_lookup * map)5676 static inline int btrfs_chunk_max_errors(struct map_lookup *map)
5677 {
5678 	const int index = btrfs_bg_flags_to_raid_index(map->type);
5679 
5680 	return btrfs_raid_array[index].tolerated_failures;
5681 }
5682 
btrfs_chunk_writeable(struct btrfs_fs_info * fs_info,u64 chunk_offset)5683 bool btrfs_chunk_writeable(struct btrfs_fs_info *fs_info, u64 chunk_offset)
5684 {
5685 	struct extent_map *em;
5686 	struct map_lookup *map;
5687 	int miss_ndevs = 0;
5688 	int i;
5689 	bool ret = true;
5690 
5691 	em = btrfs_get_chunk_map(fs_info, chunk_offset, 1);
5692 	if (IS_ERR(em))
5693 		return false;
5694 
5695 	map = em->map_lookup;
5696 	for (i = 0; i < map->num_stripes; i++) {
5697 		if (test_bit(BTRFS_DEV_STATE_MISSING,
5698 					&map->stripes[i].dev->dev_state)) {
5699 			miss_ndevs++;
5700 			continue;
5701 		}
5702 		if (!test_bit(BTRFS_DEV_STATE_WRITEABLE,
5703 					&map->stripes[i].dev->dev_state)) {
5704 			ret = false;
5705 			goto end;
5706 		}
5707 	}
5708 
5709 	/*
5710 	 * If the number of missing devices is larger than max errors, we can
5711 	 * not write the data into that chunk successfully.
5712 	 */
5713 	if (miss_ndevs > btrfs_chunk_max_errors(map))
5714 		ret = false;
5715 end:
5716 	free_extent_map(em);
5717 	return ret;
5718 }
5719 
btrfs_mapping_tree_free(struct extent_map_tree * tree)5720 void btrfs_mapping_tree_free(struct extent_map_tree *tree)
5721 {
5722 	struct extent_map *em;
5723 
5724 	while (1) {
5725 		write_lock(&tree->lock);
5726 		em = lookup_extent_mapping(tree, 0, (u64)-1);
5727 		if (em)
5728 			remove_extent_mapping(tree, em);
5729 		write_unlock(&tree->lock);
5730 		if (!em)
5731 			break;
5732 		/* once for us */
5733 		free_extent_map(em);
5734 		/* once for the tree */
5735 		free_extent_map(em);
5736 	}
5737 }
5738 
btrfs_num_copies(struct btrfs_fs_info * fs_info,u64 logical,u64 len)5739 int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
5740 {
5741 	struct extent_map *em;
5742 	struct map_lookup *map;
5743 	enum btrfs_raid_types index;
5744 	int ret = 1;
5745 
5746 	em = btrfs_get_chunk_map(fs_info, logical, len);
5747 	if (IS_ERR(em))
5748 		/*
5749 		 * We could return errors for these cases, but that could get
5750 		 * ugly and we'd probably do the same thing which is just not do
5751 		 * anything else and exit, so return 1 so the callers don't try
5752 		 * to use other copies.
5753 		 */
5754 		return 1;
5755 
5756 	map = em->map_lookup;
5757 	index = btrfs_bg_flags_to_raid_index(map->type);
5758 
5759 	/* Non-RAID56, use their ncopies from btrfs_raid_array. */
5760 	if (!(map->type & BTRFS_BLOCK_GROUP_RAID56_MASK))
5761 		ret = btrfs_raid_array[index].ncopies;
5762 	else if (map->type & BTRFS_BLOCK_GROUP_RAID5)
5763 		ret = 2;
5764 	else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
5765 		/*
5766 		 * There could be two corrupted data stripes, we need
5767 		 * to loop retry in order to rebuild the correct data.
5768 		 *
5769 		 * Fail a stripe at a time on every retry except the
5770 		 * stripe under reconstruction.
5771 		 */
5772 		ret = map->num_stripes;
5773 	free_extent_map(em);
5774 
5775 	down_read(&fs_info->dev_replace.rwsem);
5776 	if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace) &&
5777 	    fs_info->dev_replace.tgtdev)
5778 		ret++;
5779 	up_read(&fs_info->dev_replace.rwsem);
5780 
5781 	return ret;
5782 }
5783 
btrfs_full_stripe_len(struct btrfs_fs_info * fs_info,u64 logical)5784 unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info,
5785 				    u64 logical)
5786 {
5787 	struct extent_map *em;
5788 	struct map_lookup *map;
5789 	unsigned long len = fs_info->sectorsize;
5790 
5791 	if (!btrfs_fs_incompat(fs_info, RAID56))
5792 		return len;
5793 
5794 	em = btrfs_get_chunk_map(fs_info, logical, len);
5795 
5796 	if (!WARN_ON(IS_ERR(em))) {
5797 		map = em->map_lookup;
5798 		if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
5799 			len = map->stripe_len * nr_data_stripes(map);
5800 		free_extent_map(em);
5801 	}
5802 	return len;
5803 }
5804 
btrfs_is_parity_mirror(struct btrfs_fs_info * fs_info,u64 logical,u64 len)5805 int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
5806 {
5807 	struct extent_map *em;
5808 	struct map_lookup *map;
5809 	int ret = 0;
5810 
5811 	if (!btrfs_fs_incompat(fs_info, RAID56))
5812 		return 0;
5813 
5814 	em = btrfs_get_chunk_map(fs_info, logical, len);
5815 
5816 	if(!WARN_ON(IS_ERR(em))) {
5817 		map = em->map_lookup;
5818 		if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
5819 			ret = 1;
5820 		free_extent_map(em);
5821 	}
5822 	return ret;
5823 }
5824 
find_live_mirror(struct btrfs_fs_info * fs_info,struct map_lookup * map,int first,int dev_replace_is_ongoing)5825 static int find_live_mirror(struct btrfs_fs_info *fs_info,
5826 			    struct map_lookup *map, int first,
5827 			    int dev_replace_is_ongoing)
5828 {
5829 	int i;
5830 	int num_stripes;
5831 	int preferred_mirror;
5832 	int tolerance;
5833 	struct btrfs_device *srcdev;
5834 
5835 	ASSERT((map->type &
5836 		 (BTRFS_BLOCK_GROUP_RAID1_MASK | BTRFS_BLOCK_GROUP_RAID10)));
5837 
5838 	if (map->type & BTRFS_BLOCK_GROUP_RAID10)
5839 		num_stripes = map->sub_stripes;
5840 	else
5841 		num_stripes = map->num_stripes;
5842 
5843 	switch (fs_info->fs_devices->read_policy) {
5844 	default:
5845 		/* Shouldn't happen, just warn and use pid instead of failing */
5846 		btrfs_warn_rl(fs_info,
5847 			      "unknown read_policy type %u, reset to pid",
5848 			      fs_info->fs_devices->read_policy);
5849 		fs_info->fs_devices->read_policy = BTRFS_READ_POLICY_PID;
5850 		fallthrough;
5851 	case BTRFS_READ_POLICY_PID:
5852 		preferred_mirror = first + (current->pid % num_stripes);
5853 		break;
5854 	}
5855 
5856 	if (dev_replace_is_ongoing &&
5857 	    fs_info->dev_replace.cont_reading_from_srcdev_mode ==
5858 	     BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID)
5859 		srcdev = fs_info->dev_replace.srcdev;
5860 	else
5861 		srcdev = NULL;
5862 
5863 	/*
5864 	 * try to avoid the drive that is the source drive for a
5865 	 * dev-replace procedure, only choose it if no other non-missing
5866 	 * mirror is available
5867 	 */
5868 	for (tolerance = 0; tolerance < 2; tolerance++) {
5869 		if (map->stripes[preferred_mirror].dev->bdev &&
5870 		    (tolerance || map->stripes[preferred_mirror].dev != srcdev))
5871 			return preferred_mirror;
5872 		for (i = first; i < first + num_stripes; i++) {
5873 			if (map->stripes[i].dev->bdev &&
5874 			    (tolerance || map->stripes[i].dev != srcdev))
5875 				return i;
5876 		}
5877 	}
5878 
5879 	/* we couldn't find one that doesn't fail.  Just return something
5880 	 * and the io error handling code will clean up eventually
5881 	 */
5882 	return preferred_mirror;
5883 }
5884 
5885 /* Bubble-sort the stripe set to put the parity/syndrome stripes last */
sort_parity_stripes(struct btrfs_io_context * bioc,int num_stripes)5886 static void sort_parity_stripes(struct btrfs_io_context *bioc, int num_stripes)
5887 {
5888 	int i;
5889 	int again = 1;
5890 
5891 	while (again) {
5892 		again = 0;
5893 		for (i = 0; i < num_stripes - 1; i++) {
5894 			/* Swap if parity is on a smaller index */
5895 			if (bioc->raid_map[i] > bioc->raid_map[i + 1]) {
5896 				swap(bioc->stripes[i], bioc->stripes[i + 1]);
5897 				swap(bioc->raid_map[i], bioc->raid_map[i + 1]);
5898 				again = 1;
5899 			}
5900 		}
5901 	}
5902 }
5903 
alloc_btrfs_io_context(struct btrfs_fs_info * fs_info,int total_stripes,int real_stripes)5904 static struct btrfs_io_context *alloc_btrfs_io_context(struct btrfs_fs_info *fs_info,
5905 						       int total_stripes,
5906 						       int real_stripes)
5907 {
5908 	struct btrfs_io_context *bioc = kzalloc(
5909 		 /* The size of btrfs_io_context */
5910 		sizeof(struct btrfs_io_context) +
5911 		/* Plus the variable array for the stripes */
5912 		sizeof(struct btrfs_io_stripe) * (total_stripes) +
5913 		/* Plus the variable array for the tgt dev */
5914 		sizeof(int) * (real_stripes) +
5915 		/*
5916 		 * Plus the raid_map, which includes both the tgt dev
5917 		 * and the stripes.
5918 		 */
5919 		sizeof(u64) * (total_stripes),
5920 		GFP_NOFS|__GFP_NOFAIL);
5921 
5922 	refcount_set(&bioc->refs, 1);
5923 
5924 	bioc->fs_info = fs_info;
5925 	bioc->tgtdev_map = (int *)(bioc->stripes + total_stripes);
5926 	bioc->raid_map = (u64 *)(bioc->tgtdev_map + real_stripes);
5927 
5928 	return bioc;
5929 }
5930 
btrfs_get_bioc(struct btrfs_io_context * bioc)5931 void btrfs_get_bioc(struct btrfs_io_context *bioc)
5932 {
5933 	WARN_ON(!refcount_read(&bioc->refs));
5934 	refcount_inc(&bioc->refs);
5935 }
5936 
btrfs_put_bioc(struct btrfs_io_context * bioc)5937 void btrfs_put_bioc(struct btrfs_io_context *bioc)
5938 {
5939 	if (!bioc)
5940 		return;
5941 	if (refcount_dec_and_test(&bioc->refs))
5942 		kfree(bioc);
5943 }
5944 
5945 /*
5946  * Please note that, discard won't be sent to target device of device
5947  * replace.
5948  */
btrfs_map_discard(struct btrfs_fs_info * fs_info,u64 logical,u64 * length_ret,u32 * num_stripes)5949 struct btrfs_discard_stripe *btrfs_map_discard(struct btrfs_fs_info *fs_info,
5950 					       u64 logical, u64 *length_ret,
5951 					       u32 *num_stripes)
5952 {
5953 	struct extent_map *em;
5954 	struct map_lookup *map;
5955 	struct btrfs_discard_stripe *stripes;
5956 	u64 length = *length_ret;
5957 	u64 offset;
5958 	u64 stripe_nr;
5959 	u64 stripe_nr_end;
5960 	u64 stripe_end_offset;
5961 	u64 stripe_cnt;
5962 	u64 stripe_len;
5963 	u64 stripe_offset;
5964 	u32 stripe_index;
5965 	u32 factor = 0;
5966 	u32 sub_stripes = 0;
5967 	u64 stripes_per_dev = 0;
5968 	u32 remaining_stripes = 0;
5969 	u32 last_stripe = 0;
5970 	int ret;
5971 	int i;
5972 
5973 	em = btrfs_get_chunk_map(fs_info, logical, length);
5974 	if (IS_ERR(em))
5975 		return ERR_CAST(em);
5976 
5977 	map = em->map_lookup;
5978 
5979 	/* we don't discard raid56 yet */
5980 	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5981 		ret = -EOPNOTSUPP;
5982 		goto out_free_map;
5983 }
5984 
5985 	offset = logical - em->start;
5986 	length = min_t(u64, em->start + em->len - logical, length);
5987 	*length_ret = length;
5988 
5989 	stripe_len = map->stripe_len;
5990 	/*
5991 	 * stripe_nr counts the total number of stripes we have to stride
5992 	 * to get to this block
5993 	 */
5994 	stripe_nr = div64_u64(offset, stripe_len);
5995 
5996 	/* stripe_offset is the offset of this block in its stripe */
5997 	stripe_offset = offset - stripe_nr * stripe_len;
5998 
5999 	stripe_nr_end = round_up(offset + length, map->stripe_len);
6000 	stripe_nr_end = div64_u64(stripe_nr_end, map->stripe_len);
6001 	stripe_cnt = stripe_nr_end - stripe_nr;
6002 	stripe_end_offset = stripe_nr_end * map->stripe_len -
6003 			    (offset + length);
6004 	/*
6005 	 * after this, stripe_nr is the number of stripes on this
6006 	 * device we have to walk to find the data, and stripe_index is
6007 	 * the number of our device in the stripe array
6008 	 */
6009 	*num_stripes = 1;
6010 	stripe_index = 0;
6011 	if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
6012 			 BTRFS_BLOCK_GROUP_RAID10)) {
6013 		if (map->type & BTRFS_BLOCK_GROUP_RAID0)
6014 			sub_stripes = 1;
6015 		else
6016 			sub_stripes = map->sub_stripes;
6017 
6018 		factor = map->num_stripes / sub_stripes;
6019 		*num_stripes = min_t(u64, map->num_stripes,
6020 				    sub_stripes * stripe_cnt);
6021 		stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
6022 		stripe_index *= sub_stripes;
6023 		stripes_per_dev = div_u64_rem(stripe_cnt, factor,
6024 					      &remaining_stripes);
6025 		div_u64_rem(stripe_nr_end - 1, factor, &last_stripe);
6026 		last_stripe *= sub_stripes;
6027 	} else if (map->type & (BTRFS_BLOCK_GROUP_RAID1_MASK |
6028 				BTRFS_BLOCK_GROUP_DUP)) {
6029 		*num_stripes = map->num_stripes;
6030 	} else {
6031 		stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
6032 					&stripe_index);
6033 	}
6034 
6035 	stripes = kcalloc(*num_stripes, sizeof(*stripes), GFP_NOFS);
6036 	if (!stripes) {
6037 		ret = -ENOMEM;
6038 		goto out_free_map;
6039 	}
6040 
6041 	for (i = 0; i < *num_stripes; i++) {
6042 		stripes[i].physical =
6043 			map->stripes[stripe_index].physical +
6044 			stripe_offset + stripe_nr * map->stripe_len;
6045 		stripes[i].dev = map->stripes[stripe_index].dev;
6046 
6047 		if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
6048 				 BTRFS_BLOCK_GROUP_RAID10)) {
6049 			stripes[i].length = stripes_per_dev * map->stripe_len;
6050 
6051 			if (i / sub_stripes < remaining_stripes)
6052 				stripes[i].length += map->stripe_len;
6053 
6054 			/*
6055 			 * Special for the first stripe and
6056 			 * the last stripe:
6057 			 *
6058 			 * |-------|...|-------|
6059 			 *     |----------|
6060 			 *    off     end_off
6061 			 */
6062 			if (i < sub_stripes)
6063 				stripes[i].length -= stripe_offset;
6064 
6065 			if (stripe_index >= last_stripe &&
6066 			    stripe_index <= (last_stripe +
6067 					     sub_stripes - 1))
6068 				stripes[i].length -= stripe_end_offset;
6069 
6070 			if (i == sub_stripes - 1)
6071 				stripe_offset = 0;
6072 		} else {
6073 			stripes[i].length = length;
6074 		}
6075 
6076 		stripe_index++;
6077 		if (stripe_index == map->num_stripes) {
6078 			stripe_index = 0;
6079 			stripe_nr++;
6080 		}
6081 	}
6082 
6083 	free_extent_map(em);
6084 	return stripes;
6085 out_free_map:
6086 	free_extent_map(em);
6087 	return ERR_PTR(ret);
6088 }
6089 
6090 /*
6091  * In dev-replace case, for repair case (that's the only case where the mirror
6092  * is selected explicitly when calling btrfs_map_block), blocks left of the
6093  * left cursor can also be read from the target drive.
6094  *
6095  * For REQ_GET_READ_MIRRORS, the target drive is added as the last one to the
6096  * array of stripes.
6097  * For READ, it also needs to be supported using the same mirror number.
6098  *
6099  * If the requested block is not left of the left cursor, EIO is returned. This
6100  * can happen because btrfs_num_copies() returns one more in the dev-replace
6101  * case.
6102  */
get_extra_mirror_from_replace(struct btrfs_fs_info * fs_info,u64 logical,u64 length,u64 srcdev_devid,int * mirror_num,u64 * physical)6103 static int get_extra_mirror_from_replace(struct btrfs_fs_info *fs_info,
6104 					 u64 logical, u64 length,
6105 					 u64 srcdev_devid, int *mirror_num,
6106 					 u64 *physical)
6107 {
6108 	struct btrfs_io_context *bioc = NULL;
6109 	int num_stripes;
6110 	int index_srcdev = 0;
6111 	int found = 0;
6112 	u64 physical_of_found = 0;
6113 	int i;
6114 	int ret = 0;
6115 
6116 	ret = __btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
6117 				logical, &length, &bioc, NULL, NULL, 0);
6118 	if (ret) {
6119 		ASSERT(bioc == NULL);
6120 		return ret;
6121 	}
6122 
6123 	num_stripes = bioc->num_stripes;
6124 	if (*mirror_num > num_stripes) {
6125 		/*
6126 		 * BTRFS_MAP_GET_READ_MIRRORS does not contain this mirror,
6127 		 * that means that the requested area is not left of the left
6128 		 * cursor
6129 		 */
6130 		btrfs_put_bioc(bioc);
6131 		return -EIO;
6132 	}
6133 
6134 	/*
6135 	 * process the rest of the function using the mirror_num of the source
6136 	 * drive. Therefore look it up first.  At the end, patch the device
6137 	 * pointer to the one of the target drive.
6138 	 */
6139 	for (i = 0; i < num_stripes; i++) {
6140 		if (bioc->stripes[i].dev->devid != srcdev_devid)
6141 			continue;
6142 
6143 		/*
6144 		 * In case of DUP, in order to keep it simple, only add the
6145 		 * mirror with the lowest physical address
6146 		 */
6147 		if (found &&
6148 		    physical_of_found <= bioc->stripes[i].physical)
6149 			continue;
6150 
6151 		index_srcdev = i;
6152 		found = 1;
6153 		physical_of_found = bioc->stripes[i].physical;
6154 	}
6155 
6156 	btrfs_put_bioc(bioc);
6157 
6158 	ASSERT(found);
6159 	if (!found)
6160 		return -EIO;
6161 
6162 	*mirror_num = index_srcdev + 1;
6163 	*physical = physical_of_found;
6164 	return ret;
6165 }
6166 
is_block_group_to_copy(struct btrfs_fs_info * fs_info,u64 logical)6167 static bool is_block_group_to_copy(struct btrfs_fs_info *fs_info, u64 logical)
6168 {
6169 	struct btrfs_block_group *cache;
6170 	bool ret;
6171 
6172 	/* Non zoned filesystem does not use "to_copy" flag */
6173 	if (!btrfs_is_zoned(fs_info))
6174 		return false;
6175 
6176 	cache = btrfs_lookup_block_group(fs_info, logical);
6177 
6178 	ret = test_bit(BLOCK_GROUP_FLAG_TO_COPY, &cache->runtime_flags);
6179 
6180 	btrfs_put_block_group(cache);
6181 	return ret;
6182 }
6183 
handle_ops_on_dev_replace(enum btrfs_map_op op,struct btrfs_io_context ** bioc_ret,struct btrfs_dev_replace * dev_replace,u64 logical,int * num_stripes_ret,int * max_errors_ret)6184 static void handle_ops_on_dev_replace(enum btrfs_map_op op,
6185 				      struct btrfs_io_context **bioc_ret,
6186 				      struct btrfs_dev_replace *dev_replace,
6187 				      u64 logical,
6188 				      int *num_stripes_ret, int *max_errors_ret)
6189 {
6190 	struct btrfs_io_context *bioc = *bioc_ret;
6191 	u64 srcdev_devid = dev_replace->srcdev->devid;
6192 	int tgtdev_indexes = 0;
6193 	int num_stripes = *num_stripes_ret;
6194 	int max_errors = *max_errors_ret;
6195 	int i;
6196 
6197 	if (op == BTRFS_MAP_WRITE) {
6198 		int index_where_to_add;
6199 
6200 		/*
6201 		 * A block group which have "to_copy" set will eventually
6202 		 * copied by dev-replace process. We can avoid cloning IO here.
6203 		 */
6204 		if (is_block_group_to_copy(dev_replace->srcdev->fs_info, logical))
6205 			return;
6206 
6207 		/*
6208 		 * duplicate the write operations while the dev replace
6209 		 * procedure is running. Since the copying of the old disk to
6210 		 * the new disk takes place at run time while the filesystem is
6211 		 * mounted writable, the regular write operations to the old
6212 		 * disk have to be duplicated to go to the new disk as well.
6213 		 *
6214 		 * Note that device->missing is handled by the caller, and that
6215 		 * the write to the old disk is already set up in the stripes
6216 		 * array.
6217 		 */
6218 		index_where_to_add = num_stripes;
6219 		for (i = 0; i < num_stripes; i++) {
6220 			if (bioc->stripes[i].dev->devid == srcdev_devid) {
6221 				/* write to new disk, too */
6222 				struct btrfs_io_stripe *new =
6223 					bioc->stripes + index_where_to_add;
6224 				struct btrfs_io_stripe *old =
6225 					bioc->stripes + i;
6226 
6227 				new->physical = old->physical;
6228 				new->dev = dev_replace->tgtdev;
6229 				bioc->tgtdev_map[i] = index_where_to_add;
6230 				index_where_to_add++;
6231 				max_errors++;
6232 				tgtdev_indexes++;
6233 			}
6234 		}
6235 		num_stripes = index_where_to_add;
6236 	} else if (op == BTRFS_MAP_GET_READ_MIRRORS) {
6237 		int index_srcdev = 0;
6238 		int found = 0;
6239 		u64 physical_of_found = 0;
6240 
6241 		/*
6242 		 * During the dev-replace procedure, the target drive can also
6243 		 * be used to read data in case it is needed to repair a corrupt
6244 		 * block elsewhere. This is possible if the requested area is
6245 		 * left of the left cursor. In this area, the target drive is a
6246 		 * full copy of the source drive.
6247 		 */
6248 		for (i = 0; i < num_stripes; i++) {
6249 			if (bioc->stripes[i].dev->devid == srcdev_devid) {
6250 				/*
6251 				 * In case of DUP, in order to keep it simple,
6252 				 * only add the mirror with the lowest physical
6253 				 * address
6254 				 */
6255 				if (found &&
6256 				    physical_of_found <= bioc->stripes[i].physical)
6257 					continue;
6258 				index_srcdev = i;
6259 				found = 1;
6260 				physical_of_found = bioc->stripes[i].physical;
6261 			}
6262 		}
6263 		if (found) {
6264 			struct btrfs_io_stripe *tgtdev_stripe =
6265 				bioc->stripes + num_stripes;
6266 
6267 			tgtdev_stripe->physical = physical_of_found;
6268 			tgtdev_stripe->dev = dev_replace->tgtdev;
6269 			bioc->tgtdev_map[index_srcdev] = num_stripes;
6270 
6271 			tgtdev_indexes++;
6272 			num_stripes++;
6273 		}
6274 	}
6275 
6276 	*num_stripes_ret = num_stripes;
6277 	*max_errors_ret = max_errors;
6278 	bioc->num_tgtdevs = tgtdev_indexes;
6279 	*bioc_ret = bioc;
6280 }
6281 
need_full_stripe(enum btrfs_map_op op)6282 static bool need_full_stripe(enum btrfs_map_op op)
6283 {
6284 	return (op == BTRFS_MAP_WRITE || op == BTRFS_MAP_GET_READ_MIRRORS);
6285 }
6286 
6287 /*
6288  * Calculate the geometry of a particular (address, len) tuple. This
6289  * information is used to calculate how big a particular bio can get before it
6290  * straddles a stripe.
6291  *
6292  * @fs_info: the filesystem
6293  * @em:      mapping containing the logical extent
6294  * @op:      type of operation - write or read
6295  * @logical: address that we want to figure out the geometry of
6296  * @io_geom: pointer used to return values
6297  *
6298  * Returns < 0 in case a chunk for the given logical address cannot be found,
6299  * usually shouldn't happen unless @logical is corrupted, 0 otherwise.
6300  */
btrfs_get_io_geometry(struct btrfs_fs_info * fs_info,struct extent_map * em,enum btrfs_map_op op,u64 logical,struct btrfs_io_geometry * io_geom)6301 int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, struct extent_map *em,
6302 			  enum btrfs_map_op op, u64 logical,
6303 			  struct btrfs_io_geometry *io_geom)
6304 {
6305 	struct map_lookup *map;
6306 	u64 len;
6307 	u64 offset;
6308 	u64 stripe_offset;
6309 	u64 stripe_nr;
6310 	u32 stripe_len;
6311 	u64 raid56_full_stripe_start = (u64)-1;
6312 	int data_stripes;
6313 
6314 	ASSERT(op != BTRFS_MAP_DISCARD);
6315 
6316 	map = em->map_lookup;
6317 	/* Offset of this logical address in the chunk */
6318 	offset = logical - em->start;
6319 	/* Len of a stripe in a chunk */
6320 	stripe_len = map->stripe_len;
6321 	/*
6322 	 * Stripe_nr is where this block falls in
6323 	 * stripe_offset is the offset of this block in its stripe.
6324 	 */
6325 	stripe_nr = div64_u64_rem(offset, stripe_len, &stripe_offset);
6326 	ASSERT(stripe_offset < U32_MAX);
6327 
6328 	data_stripes = nr_data_stripes(map);
6329 
6330 	/* Only stripe based profiles needs to check against stripe length. */
6331 	if (map->type & BTRFS_BLOCK_GROUP_STRIPE_MASK) {
6332 		u64 max_len = stripe_len - stripe_offset;
6333 
6334 		/*
6335 		 * In case of raid56, we need to know the stripe aligned start
6336 		 */
6337 		if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
6338 			unsigned long full_stripe_len = stripe_len * data_stripes;
6339 			raid56_full_stripe_start = offset;
6340 
6341 			/*
6342 			 * Allow a write of a full stripe, but make sure we
6343 			 * don't allow straddling of stripes
6344 			 */
6345 			raid56_full_stripe_start = div64_u64(raid56_full_stripe_start,
6346 					full_stripe_len);
6347 			raid56_full_stripe_start *= full_stripe_len;
6348 
6349 			/*
6350 			 * For writes to RAID[56], allow a full stripeset across
6351 			 * all disks. For other RAID types and for RAID[56]
6352 			 * reads, just allow a single stripe (on a single disk).
6353 			 */
6354 			if (op == BTRFS_MAP_WRITE) {
6355 				max_len = stripe_len * data_stripes -
6356 					  (offset - raid56_full_stripe_start);
6357 			}
6358 		}
6359 		len = min_t(u64, em->len - offset, max_len);
6360 	} else {
6361 		len = em->len - offset;
6362 	}
6363 
6364 	io_geom->len = len;
6365 	io_geom->offset = offset;
6366 	io_geom->stripe_len = stripe_len;
6367 	io_geom->stripe_nr = stripe_nr;
6368 	io_geom->stripe_offset = stripe_offset;
6369 	io_geom->raid56_stripe_offset = raid56_full_stripe_start;
6370 
6371 	return 0;
6372 }
6373 
set_io_stripe(struct btrfs_io_stripe * dst,const struct map_lookup * map,u32 stripe_index,u64 stripe_offset,u64 stripe_nr)6374 static void set_io_stripe(struct btrfs_io_stripe *dst, const struct map_lookup *map,
6375 		          u32 stripe_index, u64 stripe_offset, u64 stripe_nr)
6376 {
6377 	dst->dev = map->stripes[stripe_index].dev;
6378 	dst->physical = map->stripes[stripe_index].physical +
6379 			stripe_offset + stripe_nr * map->stripe_len;
6380 }
6381 
__btrfs_map_block(struct btrfs_fs_info * fs_info,enum btrfs_map_op op,u64 logical,u64 * length,struct btrfs_io_context ** bioc_ret,struct btrfs_io_stripe * smap,int * mirror_num_ret,int need_raid_map)6382 static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
6383 			     enum btrfs_map_op op, u64 logical, u64 *length,
6384 			     struct btrfs_io_context **bioc_ret,
6385 			     struct btrfs_io_stripe *smap,
6386 			     int *mirror_num_ret, int need_raid_map)
6387 {
6388 	struct extent_map *em;
6389 	struct map_lookup *map;
6390 	u64 stripe_offset;
6391 	u64 stripe_nr;
6392 	u64 stripe_len;
6393 	u32 stripe_index;
6394 	int data_stripes;
6395 	int i;
6396 	int ret = 0;
6397 	int mirror_num = (mirror_num_ret ? *mirror_num_ret : 0);
6398 	int num_stripes;
6399 	int max_errors = 0;
6400 	int tgtdev_indexes = 0;
6401 	struct btrfs_io_context *bioc = NULL;
6402 	struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
6403 	int dev_replace_is_ongoing = 0;
6404 	int num_alloc_stripes;
6405 	int patch_the_first_stripe_for_dev_replace = 0;
6406 	u64 physical_to_patch_in_first_stripe = 0;
6407 	u64 raid56_full_stripe_start = (u64)-1;
6408 	struct btrfs_io_geometry geom;
6409 
6410 	ASSERT(bioc_ret);
6411 	ASSERT(op != BTRFS_MAP_DISCARD);
6412 
6413 	em = btrfs_get_chunk_map(fs_info, logical, *length);
6414 	ASSERT(!IS_ERR(em));
6415 
6416 	ret = btrfs_get_io_geometry(fs_info, em, op, logical, &geom);
6417 	if (ret < 0)
6418 		return ret;
6419 
6420 	map = em->map_lookup;
6421 
6422 	*length = geom.len;
6423 	stripe_len = geom.stripe_len;
6424 	stripe_nr = geom.stripe_nr;
6425 	stripe_offset = geom.stripe_offset;
6426 	raid56_full_stripe_start = geom.raid56_stripe_offset;
6427 	data_stripes = nr_data_stripes(map);
6428 
6429 	down_read(&dev_replace->rwsem);
6430 	dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
6431 	/*
6432 	 * Hold the semaphore for read during the whole operation, write is
6433 	 * requested at commit time but must wait.
6434 	 */
6435 	if (!dev_replace_is_ongoing)
6436 		up_read(&dev_replace->rwsem);
6437 
6438 	if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 &&
6439 	    !need_full_stripe(op) && dev_replace->tgtdev != NULL) {
6440 		ret = get_extra_mirror_from_replace(fs_info, logical, *length,
6441 						    dev_replace->srcdev->devid,
6442 						    &mirror_num,
6443 					    &physical_to_patch_in_first_stripe);
6444 		if (ret)
6445 			goto out;
6446 		else
6447 			patch_the_first_stripe_for_dev_replace = 1;
6448 	} else if (mirror_num > map->num_stripes) {
6449 		mirror_num = 0;
6450 	}
6451 
6452 	num_stripes = 1;
6453 	stripe_index = 0;
6454 	if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
6455 		stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
6456 				&stripe_index);
6457 		if (!need_full_stripe(op))
6458 			mirror_num = 1;
6459 	} else if (map->type & BTRFS_BLOCK_GROUP_RAID1_MASK) {
6460 		if (need_full_stripe(op))
6461 			num_stripes = map->num_stripes;
6462 		else if (mirror_num)
6463 			stripe_index = mirror_num - 1;
6464 		else {
6465 			stripe_index = find_live_mirror(fs_info, map, 0,
6466 					    dev_replace_is_ongoing);
6467 			mirror_num = stripe_index + 1;
6468 		}
6469 
6470 	} else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
6471 		if (need_full_stripe(op)) {
6472 			num_stripes = map->num_stripes;
6473 		} else if (mirror_num) {
6474 			stripe_index = mirror_num - 1;
6475 		} else {
6476 			mirror_num = 1;
6477 		}
6478 
6479 	} else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
6480 		u32 factor = map->num_stripes / map->sub_stripes;
6481 
6482 		stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
6483 		stripe_index *= map->sub_stripes;
6484 
6485 		if (need_full_stripe(op))
6486 			num_stripes = map->sub_stripes;
6487 		else if (mirror_num)
6488 			stripe_index += mirror_num - 1;
6489 		else {
6490 			int old_stripe_index = stripe_index;
6491 			stripe_index = find_live_mirror(fs_info, map,
6492 					      stripe_index,
6493 					      dev_replace_is_ongoing);
6494 			mirror_num = stripe_index - old_stripe_index + 1;
6495 		}
6496 
6497 	} else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
6498 		ASSERT(map->stripe_len == BTRFS_STRIPE_LEN);
6499 		if (need_raid_map && (need_full_stripe(op) || mirror_num > 1)) {
6500 			/* push stripe_nr back to the start of the full stripe */
6501 			stripe_nr = div64_u64(raid56_full_stripe_start,
6502 					stripe_len * data_stripes);
6503 
6504 			/* RAID[56] write or recovery. Return all stripes */
6505 			num_stripes = map->num_stripes;
6506 			max_errors = btrfs_chunk_max_errors(map);
6507 
6508 			/* Return the length to the full stripe end */
6509 			*length = min(logical + *length,
6510 				      raid56_full_stripe_start + em->start +
6511 				      data_stripes * stripe_len) - logical;
6512 			stripe_index = 0;
6513 			stripe_offset = 0;
6514 		} else {
6515 			/*
6516 			 * Mirror #0 or #1 means the original data block.
6517 			 * Mirror #2 is RAID5 parity block.
6518 			 * Mirror #3 is RAID6 Q block.
6519 			 */
6520 			stripe_nr = div_u64_rem(stripe_nr,
6521 					data_stripes, &stripe_index);
6522 			if (mirror_num > 1)
6523 				stripe_index = data_stripes + mirror_num - 2;
6524 
6525 			/* We distribute the parity blocks across stripes */
6526 			div_u64_rem(stripe_nr + stripe_index, map->num_stripes,
6527 					&stripe_index);
6528 			if (!need_full_stripe(op) && mirror_num <= 1)
6529 				mirror_num = 1;
6530 		}
6531 	} else {
6532 		/*
6533 		 * after this, stripe_nr is the number of stripes on this
6534 		 * device we have to walk to find the data, and stripe_index is
6535 		 * the number of our device in the stripe array
6536 		 */
6537 		stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
6538 				&stripe_index);
6539 		mirror_num = stripe_index + 1;
6540 	}
6541 	if (stripe_index >= map->num_stripes) {
6542 		btrfs_crit(fs_info,
6543 			   "stripe index math went horribly wrong, got stripe_index=%u, num_stripes=%u",
6544 			   stripe_index, map->num_stripes);
6545 		ret = -EINVAL;
6546 		goto out;
6547 	}
6548 
6549 	num_alloc_stripes = num_stripes;
6550 	if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL) {
6551 		if (op == BTRFS_MAP_WRITE)
6552 			num_alloc_stripes <<= 1;
6553 		if (op == BTRFS_MAP_GET_READ_MIRRORS)
6554 			num_alloc_stripes++;
6555 		tgtdev_indexes = num_stripes;
6556 	}
6557 
6558 	/*
6559 	 * If this I/O maps to a single device, try to return the device and
6560 	 * physical block information on the stack instead of allocating an
6561 	 * I/O context structure.
6562 	 */
6563 	if (smap && num_alloc_stripes == 1 &&
6564 	    !((map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) && mirror_num > 1) &&
6565 	    (!need_full_stripe(op) || !dev_replace_is_ongoing ||
6566 	     !dev_replace->tgtdev)) {
6567 		if (patch_the_first_stripe_for_dev_replace) {
6568 			smap->dev = dev_replace->tgtdev;
6569 			smap->physical = physical_to_patch_in_first_stripe;
6570 			*mirror_num_ret = map->num_stripes + 1;
6571 		} else {
6572 			set_io_stripe(smap, map, stripe_index, stripe_offset,
6573 				      stripe_nr);
6574 			*mirror_num_ret = mirror_num;
6575 		}
6576 		*bioc_ret = NULL;
6577 		ret = 0;
6578 		goto out;
6579 	}
6580 
6581 	bioc = alloc_btrfs_io_context(fs_info, num_alloc_stripes, tgtdev_indexes);
6582 	if (!bioc) {
6583 		ret = -ENOMEM;
6584 		goto out;
6585 	}
6586 
6587 	for (i = 0; i < num_stripes; i++) {
6588 		set_io_stripe(&bioc->stripes[i], map, stripe_index, stripe_offset,
6589 			      stripe_nr);
6590 		stripe_index++;
6591 	}
6592 
6593 	/* Build raid_map */
6594 	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK && need_raid_map &&
6595 	    (need_full_stripe(op) || mirror_num > 1)) {
6596 		u64 tmp;
6597 		unsigned rot;
6598 
6599 		/* Work out the disk rotation on this stripe-set */
6600 		div_u64_rem(stripe_nr, num_stripes, &rot);
6601 
6602 		/* Fill in the logical address of each stripe */
6603 		tmp = stripe_nr * data_stripes;
6604 		for (i = 0; i < data_stripes; i++)
6605 			bioc->raid_map[(i + rot) % num_stripes] =
6606 				em->start + (tmp + i) * map->stripe_len;
6607 
6608 		bioc->raid_map[(i + rot) % map->num_stripes] = RAID5_P_STRIPE;
6609 		if (map->type & BTRFS_BLOCK_GROUP_RAID6)
6610 			bioc->raid_map[(i + rot + 1) % num_stripes] =
6611 				RAID6_Q_STRIPE;
6612 
6613 		sort_parity_stripes(bioc, num_stripes);
6614 	}
6615 
6616 	if (need_full_stripe(op))
6617 		max_errors = btrfs_chunk_max_errors(map);
6618 
6619 	if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL &&
6620 	    need_full_stripe(op)) {
6621 		handle_ops_on_dev_replace(op, &bioc, dev_replace, logical,
6622 					  &num_stripes, &max_errors);
6623 	}
6624 
6625 	*bioc_ret = bioc;
6626 	bioc->map_type = map->type;
6627 	bioc->num_stripes = num_stripes;
6628 	bioc->max_errors = max_errors;
6629 	bioc->mirror_num = mirror_num;
6630 
6631 	/*
6632 	 * this is the case that REQ_READ && dev_replace_is_ongoing &&
6633 	 * mirror_num == num_stripes + 1 && dev_replace target drive is
6634 	 * available as a mirror
6635 	 */
6636 	if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) {
6637 		WARN_ON(num_stripes > 1);
6638 		bioc->stripes[0].dev = dev_replace->tgtdev;
6639 		bioc->stripes[0].physical = physical_to_patch_in_first_stripe;
6640 		bioc->mirror_num = map->num_stripes + 1;
6641 	}
6642 out:
6643 	if (dev_replace_is_ongoing) {
6644 		lockdep_assert_held(&dev_replace->rwsem);
6645 		/* Unlock and let waiting writers proceed */
6646 		up_read(&dev_replace->rwsem);
6647 	}
6648 	free_extent_map(em);
6649 	return ret;
6650 }
6651 
btrfs_map_block(struct btrfs_fs_info * fs_info,enum btrfs_map_op op,u64 logical,u64 * length,struct btrfs_io_context ** bioc_ret,int mirror_num)6652 int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
6653 		      u64 logical, u64 *length,
6654 		      struct btrfs_io_context **bioc_ret, int mirror_num)
6655 {
6656 	return __btrfs_map_block(fs_info, op, logical, length, bioc_ret,
6657 				 NULL, &mirror_num, 0);
6658 }
6659 
6660 /* For Scrub/replace */
btrfs_map_sblock(struct btrfs_fs_info * fs_info,enum btrfs_map_op op,u64 logical,u64 * length,struct btrfs_io_context ** bioc_ret)6661 int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
6662 		     u64 logical, u64 *length,
6663 		     struct btrfs_io_context **bioc_ret)
6664 {
6665 	return __btrfs_map_block(fs_info, op, logical, length, bioc_ret,
6666 				 NULL, NULL, 1);
6667 }
6668 
6669 /*
6670  * Initialize a btrfs_bio structure.  This skips the embedded bio itself as it
6671  * is already initialized by the block layer.
6672  */
btrfs_bio_init(struct btrfs_bio * bbio,btrfs_bio_end_io_t end_io,void * private)6673 static inline void btrfs_bio_init(struct btrfs_bio *bbio,
6674 				  btrfs_bio_end_io_t end_io, void *private)
6675 {
6676 	memset(bbio, 0, offsetof(struct btrfs_bio, bio));
6677 	bbio->end_io = end_io;
6678 	bbio->private = private;
6679 }
6680 
6681 /*
6682  * Allocate a btrfs_bio structure.  The btrfs_bio is the main I/O container for
6683  * btrfs, and is used for all I/O submitted through btrfs_submit_bio.
6684  *
6685  * Just like the underlying bio_alloc_bioset it will not fail as it is backed by
6686  * a mempool.
6687  */
btrfs_bio_alloc(unsigned int nr_vecs,blk_opf_t opf,btrfs_bio_end_io_t end_io,void * private)6688 struct bio *btrfs_bio_alloc(unsigned int nr_vecs, blk_opf_t opf,
6689 			    btrfs_bio_end_io_t end_io, void *private)
6690 {
6691 	struct bio *bio;
6692 
6693 	bio = bio_alloc_bioset(NULL, nr_vecs, opf, GFP_NOFS, &btrfs_bioset);
6694 	btrfs_bio_init(btrfs_bio(bio), end_io, private);
6695 	return bio;
6696 }
6697 
btrfs_bio_clone_partial(struct bio * orig,u64 offset,u64 size,btrfs_bio_end_io_t end_io,void * private)6698 struct bio *btrfs_bio_clone_partial(struct bio *orig, u64 offset, u64 size,
6699 				    btrfs_bio_end_io_t end_io, void *private)
6700 {
6701 	struct bio *bio;
6702 	struct btrfs_bio *bbio;
6703 
6704 	ASSERT(offset <= UINT_MAX && size <= UINT_MAX);
6705 
6706 	bio = bio_alloc_clone(orig->bi_bdev, orig, GFP_NOFS, &btrfs_bioset);
6707 	bbio = btrfs_bio(bio);
6708 	btrfs_bio_init(bbio, end_io, private);
6709 
6710 	bio_trim(bio, offset >> 9, size >> 9);
6711 	bbio->iter = bio->bi_iter;
6712 	return bio;
6713 }
6714 
btrfs_log_dev_io_error(struct bio * bio,struct btrfs_device * dev)6715 static void btrfs_log_dev_io_error(struct bio *bio, struct btrfs_device *dev)
6716 {
6717 	if (!dev || !dev->bdev)
6718 		return;
6719 	if (bio->bi_status != BLK_STS_IOERR && bio->bi_status != BLK_STS_TARGET)
6720 		return;
6721 
6722 	if (btrfs_op(bio) == BTRFS_MAP_WRITE)
6723 		btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
6724 	if (!(bio->bi_opf & REQ_RAHEAD))
6725 		btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
6726 	if (bio->bi_opf & REQ_PREFLUSH)
6727 		btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_FLUSH_ERRS);
6728 }
6729 
btrfs_end_io_wq(struct btrfs_fs_info * fs_info,struct bio * bio)6730 static struct workqueue_struct *btrfs_end_io_wq(struct btrfs_fs_info *fs_info,
6731 						struct bio *bio)
6732 {
6733 	if (bio->bi_opf & REQ_META)
6734 		return fs_info->endio_meta_workers;
6735 	return fs_info->endio_workers;
6736 }
6737 
btrfs_end_bio_work(struct work_struct * work)6738 static void btrfs_end_bio_work(struct work_struct *work)
6739 {
6740 	struct btrfs_bio *bbio =
6741 		container_of(work, struct btrfs_bio, end_io_work);
6742 
6743 	bbio->end_io(bbio);
6744 }
6745 
btrfs_simple_end_io(struct bio * bio)6746 static void btrfs_simple_end_io(struct bio *bio)
6747 {
6748 	struct btrfs_fs_info *fs_info = bio->bi_private;
6749 	struct btrfs_bio *bbio = btrfs_bio(bio);
6750 
6751 	btrfs_bio_counter_dec(fs_info);
6752 
6753 	if (bio->bi_status)
6754 		btrfs_log_dev_io_error(bio, bbio->device);
6755 
6756 	if (bio_op(bio) == REQ_OP_READ) {
6757 		INIT_WORK(&bbio->end_io_work, btrfs_end_bio_work);
6758 		queue_work(btrfs_end_io_wq(fs_info, bio), &bbio->end_io_work);
6759 	} else {
6760 		bbio->end_io(bbio);
6761 	}
6762 }
6763 
btrfs_raid56_end_io(struct bio * bio)6764 static void btrfs_raid56_end_io(struct bio *bio)
6765 {
6766 	struct btrfs_io_context *bioc = bio->bi_private;
6767 	struct btrfs_bio *bbio = btrfs_bio(bio);
6768 
6769 	btrfs_bio_counter_dec(bioc->fs_info);
6770 	bbio->mirror_num = bioc->mirror_num;
6771 	bbio->end_io(bbio);
6772 
6773 	btrfs_put_bioc(bioc);
6774 }
6775 
btrfs_orig_write_end_io(struct bio * bio)6776 static void btrfs_orig_write_end_io(struct bio *bio)
6777 {
6778 	struct btrfs_io_stripe *stripe = bio->bi_private;
6779 	struct btrfs_io_context *bioc = stripe->bioc;
6780 	struct btrfs_bio *bbio = btrfs_bio(bio);
6781 
6782 	btrfs_bio_counter_dec(bioc->fs_info);
6783 
6784 	if (bio->bi_status) {
6785 		atomic_inc(&bioc->error);
6786 		btrfs_log_dev_io_error(bio, stripe->dev);
6787 	}
6788 
6789 	/*
6790 	 * Only send an error to the higher layers if it is beyond the tolerance
6791 	 * threshold.
6792 	 */
6793 	if (atomic_read(&bioc->error) > bioc->max_errors)
6794 		bio->bi_status = BLK_STS_IOERR;
6795 	else
6796 		bio->bi_status = BLK_STS_OK;
6797 
6798 	bbio->end_io(bbio);
6799 	btrfs_put_bioc(bioc);
6800 }
6801 
btrfs_clone_write_end_io(struct bio * bio)6802 static void btrfs_clone_write_end_io(struct bio *bio)
6803 {
6804 	struct btrfs_io_stripe *stripe = bio->bi_private;
6805 
6806 	if (bio->bi_status) {
6807 		atomic_inc(&stripe->bioc->error);
6808 		btrfs_log_dev_io_error(bio, stripe->dev);
6809 	}
6810 
6811 	/* Pass on control to the original bio this one was cloned from */
6812 	bio_endio(stripe->bioc->orig_bio);
6813 	bio_put(bio);
6814 }
6815 
btrfs_submit_dev_bio(struct btrfs_device * dev,struct bio * bio)6816 static void btrfs_submit_dev_bio(struct btrfs_device *dev, struct bio *bio)
6817 {
6818 	if (!dev || !dev->bdev ||
6819 	    test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) ||
6820 	    (btrfs_op(bio) == BTRFS_MAP_WRITE &&
6821 	     !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))) {
6822 		bio_io_error(bio);
6823 		return;
6824 	}
6825 
6826 	bio_set_dev(bio, dev->bdev);
6827 
6828 	/*
6829 	 * For zone append writing, bi_sector must point the beginning of the
6830 	 * zone
6831 	 */
6832 	if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
6833 		u64 physical = bio->bi_iter.bi_sector << SECTOR_SHIFT;
6834 
6835 		if (btrfs_dev_is_sequential(dev, physical)) {
6836 			u64 zone_start = round_down(physical,
6837 						    dev->fs_info->zone_size);
6838 
6839 			bio->bi_iter.bi_sector = zone_start >> SECTOR_SHIFT;
6840 		} else {
6841 			bio->bi_opf &= ~REQ_OP_ZONE_APPEND;
6842 			bio->bi_opf |= REQ_OP_WRITE;
6843 		}
6844 	}
6845 	btrfs_debug_in_rcu(dev->fs_info,
6846 	"%s: rw %d 0x%x, sector=%llu, dev=%lu (%s id %llu), size=%u",
6847 		__func__, bio_op(bio), bio->bi_opf, bio->bi_iter.bi_sector,
6848 		(unsigned long)dev->bdev->bd_dev, rcu_str_deref(dev->name),
6849 		dev->devid, bio->bi_iter.bi_size);
6850 
6851 	btrfsic_check_bio(bio);
6852 	submit_bio(bio);
6853 }
6854 
btrfs_submit_mirrored_bio(struct btrfs_io_context * bioc,int dev_nr)6855 static void btrfs_submit_mirrored_bio(struct btrfs_io_context *bioc, int dev_nr)
6856 {
6857 	struct bio *orig_bio = bioc->orig_bio, *bio;
6858 
6859 	ASSERT(bio_op(orig_bio) != REQ_OP_READ);
6860 
6861 	/* Reuse the bio embedded into the btrfs_bio for the last mirror */
6862 	if (dev_nr == bioc->num_stripes - 1) {
6863 		bio = orig_bio;
6864 		bio->bi_end_io = btrfs_orig_write_end_io;
6865 	} else {
6866 		bio = bio_alloc_clone(NULL, orig_bio, GFP_NOFS, &fs_bio_set);
6867 		bio_inc_remaining(orig_bio);
6868 		bio->bi_end_io = btrfs_clone_write_end_io;
6869 	}
6870 
6871 	bio->bi_private = &bioc->stripes[dev_nr];
6872 	bio->bi_iter.bi_sector = bioc->stripes[dev_nr].physical >> SECTOR_SHIFT;
6873 	bioc->stripes[dev_nr].bioc = bioc;
6874 	btrfs_submit_dev_bio(bioc->stripes[dev_nr].dev, bio);
6875 }
6876 
btrfs_submit_bio(struct btrfs_fs_info * fs_info,struct bio * bio,int mirror_num)6877 void btrfs_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio, int mirror_num)
6878 {
6879 	u64 logical = bio->bi_iter.bi_sector << 9;
6880 	u64 length = bio->bi_iter.bi_size;
6881 	u64 map_length = length;
6882 	struct btrfs_io_context *bioc = NULL;
6883 	struct btrfs_io_stripe smap;
6884 	int ret;
6885 
6886 	btrfs_bio_counter_inc_blocked(fs_info);
6887 	ret = __btrfs_map_block(fs_info, btrfs_op(bio), logical, &map_length,
6888 				&bioc, &smap, &mirror_num, 1);
6889 	if (ret) {
6890 		btrfs_bio_counter_dec(fs_info);
6891 		btrfs_bio_end_io(btrfs_bio(bio), errno_to_blk_status(ret));
6892 		return;
6893 	}
6894 
6895 	if (map_length < length) {
6896 		btrfs_crit(fs_info,
6897 			   "mapping failed logical %llu bio len %llu len %llu",
6898 			   logical, length, map_length);
6899 		BUG();
6900 	}
6901 
6902 	if (!bioc) {
6903 		/* Single mirror read/write fast path */
6904 		btrfs_bio(bio)->mirror_num = mirror_num;
6905 		btrfs_bio(bio)->device = smap.dev;
6906 		bio->bi_iter.bi_sector = smap.physical >> SECTOR_SHIFT;
6907 		bio->bi_private = fs_info;
6908 		bio->bi_end_io = btrfs_simple_end_io;
6909 		btrfs_submit_dev_bio(smap.dev, bio);
6910 	} else if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
6911 		/* Parity RAID write or read recovery */
6912 		bio->bi_private = bioc;
6913 		bio->bi_end_io = btrfs_raid56_end_io;
6914 		if (bio_op(bio) == REQ_OP_READ)
6915 			raid56_parity_recover(bio, bioc, mirror_num);
6916 		else
6917 			raid56_parity_write(bio, bioc);
6918 	} else {
6919 		/* Write to multiple mirrors */
6920 		int total_devs = bioc->num_stripes;
6921 		int dev_nr;
6922 
6923 		bioc->orig_bio = bio;
6924 		for (dev_nr = 0; dev_nr < total_devs; dev_nr++)
6925 			btrfs_submit_mirrored_bio(bioc, dev_nr);
6926 	}
6927 }
6928 
dev_args_match_fs_devices(const struct btrfs_dev_lookup_args * args,const struct btrfs_fs_devices * fs_devices)6929 static bool dev_args_match_fs_devices(const struct btrfs_dev_lookup_args *args,
6930 				      const struct btrfs_fs_devices *fs_devices)
6931 {
6932 	if (args->fsid == NULL)
6933 		return true;
6934 	if (memcmp(fs_devices->metadata_uuid, args->fsid, BTRFS_FSID_SIZE) == 0)
6935 		return true;
6936 	return false;
6937 }
6938 
dev_args_match_device(const struct btrfs_dev_lookup_args * args,const struct btrfs_device * device)6939 static bool dev_args_match_device(const struct btrfs_dev_lookup_args *args,
6940 				  const struct btrfs_device *device)
6941 {
6942 	if (args->missing) {
6943 		if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state) &&
6944 		    !device->bdev)
6945 			return true;
6946 		return false;
6947 	}
6948 
6949 	if (device->devid != args->devid)
6950 		return false;
6951 	if (args->uuid && memcmp(device->uuid, args->uuid, BTRFS_UUID_SIZE) != 0)
6952 		return false;
6953 	return true;
6954 }
6955 
6956 /*
6957  * Find a device specified by @devid or @uuid in the list of @fs_devices, or
6958  * return NULL.
6959  *
6960  * If devid and uuid are both specified, the match must be exact, otherwise
6961  * only devid is used.
6962  */
btrfs_find_device(const struct btrfs_fs_devices * fs_devices,const struct btrfs_dev_lookup_args * args)6963 struct btrfs_device *btrfs_find_device(const struct btrfs_fs_devices *fs_devices,
6964 				       const struct btrfs_dev_lookup_args *args)
6965 {
6966 	struct btrfs_device *device;
6967 	struct btrfs_fs_devices *seed_devs;
6968 
6969 	if (dev_args_match_fs_devices(args, fs_devices)) {
6970 		list_for_each_entry(device, &fs_devices->devices, dev_list) {
6971 			if (dev_args_match_device(args, device))
6972 				return device;
6973 		}
6974 	}
6975 
6976 	list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) {
6977 		if (!dev_args_match_fs_devices(args, seed_devs))
6978 			continue;
6979 		list_for_each_entry(device, &seed_devs->devices, dev_list) {
6980 			if (dev_args_match_device(args, device))
6981 				return device;
6982 		}
6983 	}
6984 
6985 	return NULL;
6986 }
6987 
add_missing_dev(struct btrfs_fs_devices * fs_devices,u64 devid,u8 * dev_uuid)6988 static struct btrfs_device *add_missing_dev(struct btrfs_fs_devices *fs_devices,
6989 					    u64 devid, u8 *dev_uuid)
6990 {
6991 	struct btrfs_device *device;
6992 	unsigned int nofs_flag;
6993 
6994 	/*
6995 	 * We call this under the chunk_mutex, so we want to use NOFS for this
6996 	 * allocation, however we don't want to change btrfs_alloc_device() to
6997 	 * always do NOFS because we use it in a lot of other GFP_KERNEL safe
6998 	 * places.
6999 	 */
7000 	nofs_flag = memalloc_nofs_save();
7001 	device = btrfs_alloc_device(NULL, &devid, dev_uuid);
7002 	memalloc_nofs_restore(nofs_flag);
7003 	if (IS_ERR(device))
7004 		return device;
7005 
7006 	list_add(&device->dev_list, &fs_devices->devices);
7007 	device->fs_devices = fs_devices;
7008 	fs_devices->num_devices++;
7009 
7010 	set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
7011 	fs_devices->missing_devices++;
7012 
7013 	return device;
7014 }
7015 
7016 /**
7017  * btrfs_alloc_device - allocate struct btrfs_device
7018  * @fs_info:	used only for generating a new devid, can be NULL if
7019  *		devid is provided (i.e. @devid != NULL).
7020  * @devid:	a pointer to devid for this device.  If NULL a new devid
7021  *		is generated.
7022  * @uuid:	a pointer to UUID for this device.  If NULL a new UUID
7023  *		is generated.
7024  *
7025  * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR()
7026  * on error.  Returned struct is not linked onto any lists and must be
7027  * destroyed with btrfs_free_device.
7028  */
btrfs_alloc_device(struct btrfs_fs_info * fs_info,const u64 * devid,const u8 * uuid)7029 struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
7030 					const u64 *devid,
7031 					const u8 *uuid)
7032 {
7033 	struct btrfs_device *dev;
7034 	u64 tmp;
7035 
7036 	if (WARN_ON(!devid && !fs_info))
7037 		return ERR_PTR(-EINVAL);
7038 
7039 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
7040 	if (!dev)
7041 		return ERR_PTR(-ENOMEM);
7042 
7043 	INIT_LIST_HEAD(&dev->dev_list);
7044 	INIT_LIST_HEAD(&dev->dev_alloc_list);
7045 	INIT_LIST_HEAD(&dev->post_commit_list);
7046 
7047 	atomic_set(&dev->dev_stats_ccnt, 0);
7048 	btrfs_device_data_ordered_init(dev);
7049 	extent_io_tree_init(fs_info, &dev->alloc_state,
7050 			    IO_TREE_DEVICE_ALLOC_STATE, NULL);
7051 
7052 	if (devid)
7053 		tmp = *devid;
7054 	else {
7055 		int ret;
7056 
7057 		ret = find_next_devid(fs_info, &tmp);
7058 		if (ret) {
7059 			btrfs_free_device(dev);
7060 			return ERR_PTR(ret);
7061 		}
7062 	}
7063 	dev->devid = tmp;
7064 
7065 	if (uuid)
7066 		memcpy(dev->uuid, uuid, BTRFS_UUID_SIZE);
7067 	else
7068 		generate_random_uuid(dev->uuid);
7069 
7070 	return dev;
7071 }
7072 
btrfs_report_missing_device(struct btrfs_fs_info * fs_info,u64 devid,u8 * uuid,bool error)7073 static void btrfs_report_missing_device(struct btrfs_fs_info *fs_info,
7074 					u64 devid, u8 *uuid, bool error)
7075 {
7076 	if (error)
7077 		btrfs_err_rl(fs_info, "devid %llu uuid %pU is missing",
7078 			      devid, uuid);
7079 	else
7080 		btrfs_warn_rl(fs_info, "devid %llu uuid %pU is missing",
7081 			      devid, uuid);
7082 }
7083 
btrfs_calc_stripe_length(const struct extent_map * em)7084 u64 btrfs_calc_stripe_length(const struct extent_map *em)
7085 {
7086 	const struct map_lookup *map = em->map_lookup;
7087 	const int data_stripes = calc_data_stripes(map->type, map->num_stripes);
7088 
7089 	return div_u64(em->len, data_stripes);
7090 }
7091 
7092 #if BITS_PER_LONG == 32
7093 /*
7094  * Due to page cache limit, metadata beyond BTRFS_32BIT_MAX_FILE_SIZE
7095  * can't be accessed on 32bit systems.
7096  *
7097  * This function do mount time check to reject the fs if it already has
7098  * metadata chunk beyond that limit.
7099  */
check_32bit_meta_chunk(struct btrfs_fs_info * fs_info,u64 logical,u64 length,u64 type)7100 static int check_32bit_meta_chunk(struct btrfs_fs_info *fs_info,
7101 				  u64 logical, u64 length, u64 type)
7102 {
7103 	if (!(type & BTRFS_BLOCK_GROUP_METADATA))
7104 		return 0;
7105 
7106 	if (logical + length < MAX_LFS_FILESIZE)
7107 		return 0;
7108 
7109 	btrfs_err_32bit_limit(fs_info);
7110 	return -EOVERFLOW;
7111 }
7112 
7113 /*
7114  * This is to give early warning for any metadata chunk reaching
7115  * BTRFS_32BIT_EARLY_WARN_THRESHOLD.
7116  * Although we can still access the metadata, it's not going to be possible
7117  * once the limit is reached.
7118  */
warn_32bit_meta_chunk(struct btrfs_fs_info * fs_info,u64 logical,u64 length,u64 type)7119 static void warn_32bit_meta_chunk(struct btrfs_fs_info *fs_info,
7120 				  u64 logical, u64 length, u64 type)
7121 {
7122 	if (!(type & BTRFS_BLOCK_GROUP_METADATA))
7123 		return;
7124 
7125 	if (logical + length < BTRFS_32BIT_EARLY_WARN_THRESHOLD)
7126 		return;
7127 
7128 	btrfs_warn_32bit_limit(fs_info);
7129 }
7130 #endif
7131 
handle_missing_device(struct btrfs_fs_info * fs_info,u64 devid,u8 * uuid)7132 static struct btrfs_device *handle_missing_device(struct btrfs_fs_info *fs_info,
7133 						  u64 devid, u8 *uuid)
7134 {
7135 	struct btrfs_device *dev;
7136 
7137 	if (!btrfs_test_opt(fs_info, DEGRADED)) {
7138 		btrfs_report_missing_device(fs_info, devid, uuid, true);
7139 		return ERR_PTR(-ENOENT);
7140 	}
7141 
7142 	dev = add_missing_dev(fs_info->fs_devices, devid, uuid);
7143 	if (IS_ERR(dev)) {
7144 		btrfs_err(fs_info, "failed to init missing device %llu: %ld",
7145 			  devid, PTR_ERR(dev));
7146 		return dev;
7147 	}
7148 	btrfs_report_missing_device(fs_info, devid, uuid, false);
7149 
7150 	return dev;
7151 }
7152 
read_one_chunk(struct btrfs_key * key,struct extent_buffer * leaf,struct btrfs_chunk * chunk)7153 static int read_one_chunk(struct btrfs_key *key, struct extent_buffer *leaf,
7154 			  struct btrfs_chunk *chunk)
7155 {
7156 	BTRFS_DEV_LOOKUP_ARGS(args);
7157 	struct btrfs_fs_info *fs_info = leaf->fs_info;
7158 	struct extent_map_tree *map_tree = &fs_info->mapping_tree;
7159 	struct map_lookup *map;
7160 	struct extent_map *em;
7161 	u64 logical;
7162 	u64 length;
7163 	u64 devid;
7164 	u64 type;
7165 	u8 uuid[BTRFS_UUID_SIZE];
7166 	int index;
7167 	int num_stripes;
7168 	int ret;
7169 	int i;
7170 
7171 	logical = key->offset;
7172 	length = btrfs_chunk_length(leaf, chunk);
7173 	type = btrfs_chunk_type(leaf, chunk);
7174 	index = btrfs_bg_flags_to_raid_index(type);
7175 	num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
7176 
7177 #if BITS_PER_LONG == 32
7178 	ret = check_32bit_meta_chunk(fs_info, logical, length, type);
7179 	if (ret < 0)
7180 		return ret;
7181 	warn_32bit_meta_chunk(fs_info, logical, length, type);
7182 #endif
7183 
7184 	/*
7185 	 * Only need to verify chunk item if we're reading from sys chunk array,
7186 	 * as chunk item in tree block is already verified by tree-checker.
7187 	 */
7188 	if (leaf->start == BTRFS_SUPER_INFO_OFFSET) {
7189 		ret = btrfs_check_chunk_valid(leaf, chunk, logical);
7190 		if (ret)
7191 			return ret;
7192 	}
7193 
7194 	read_lock(&map_tree->lock);
7195 	em = lookup_extent_mapping(map_tree, logical, 1);
7196 	read_unlock(&map_tree->lock);
7197 
7198 	/* already mapped? */
7199 	if (em && em->start <= logical && em->start + em->len > logical) {
7200 		free_extent_map(em);
7201 		return 0;
7202 	} else if (em) {
7203 		free_extent_map(em);
7204 	}
7205 
7206 	em = alloc_extent_map();
7207 	if (!em)
7208 		return -ENOMEM;
7209 	map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
7210 	if (!map) {
7211 		free_extent_map(em);
7212 		return -ENOMEM;
7213 	}
7214 
7215 	set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
7216 	em->map_lookup = map;
7217 	em->start = logical;
7218 	em->len = length;
7219 	em->orig_start = 0;
7220 	em->block_start = 0;
7221 	em->block_len = em->len;
7222 
7223 	map->num_stripes = num_stripes;
7224 	map->io_width = btrfs_chunk_io_width(leaf, chunk);
7225 	map->io_align = btrfs_chunk_io_align(leaf, chunk);
7226 	map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
7227 	map->type = type;
7228 	/*
7229 	 * We can't use the sub_stripes value, as for profiles other than
7230 	 * RAID10, they may have 0 as sub_stripes for filesystems created by
7231 	 * older mkfs (<v5.4).
7232 	 * In that case, it can cause divide-by-zero errors later.
7233 	 * Since currently sub_stripes is fixed for each profile, let's
7234 	 * use the trusted value instead.
7235 	 */
7236 	map->sub_stripes = btrfs_raid_array[index].sub_stripes;
7237 	map->verified_stripes = 0;
7238 	em->orig_block_len = btrfs_calc_stripe_length(em);
7239 	for (i = 0; i < num_stripes; i++) {
7240 		map->stripes[i].physical =
7241 			btrfs_stripe_offset_nr(leaf, chunk, i);
7242 		devid = btrfs_stripe_devid_nr(leaf, chunk, i);
7243 		args.devid = devid;
7244 		read_extent_buffer(leaf, uuid, (unsigned long)
7245 				   btrfs_stripe_dev_uuid_nr(chunk, i),
7246 				   BTRFS_UUID_SIZE);
7247 		args.uuid = uuid;
7248 		map->stripes[i].dev = btrfs_find_device(fs_info->fs_devices, &args);
7249 		if (!map->stripes[i].dev) {
7250 			map->stripes[i].dev = handle_missing_device(fs_info,
7251 								    devid, uuid);
7252 			if (IS_ERR(map->stripes[i].dev)) {
7253 				ret = PTR_ERR(map->stripes[i].dev);
7254 				free_extent_map(em);
7255 				return ret;
7256 			}
7257 		}
7258 
7259 		set_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
7260 				&(map->stripes[i].dev->dev_state));
7261 	}
7262 
7263 	write_lock(&map_tree->lock);
7264 	ret = add_extent_mapping(map_tree, em, 0);
7265 	write_unlock(&map_tree->lock);
7266 	if (ret < 0) {
7267 		btrfs_err(fs_info,
7268 			  "failed to add chunk map, start=%llu len=%llu: %d",
7269 			  em->start, em->len, ret);
7270 	}
7271 	free_extent_map(em);
7272 
7273 	return ret;
7274 }
7275 
fill_device_from_item(struct extent_buffer * leaf,struct btrfs_dev_item * dev_item,struct btrfs_device * device)7276 static void fill_device_from_item(struct extent_buffer *leaf,
7277 				 struct btrfs_dev_item *dev_item,
7278 				 struct btrfs_device *device)
7279 {
7280 	unsigned long ptr;
7281 
7282 	device->devid = btrfs_device_id(leaf, dev_item);
7283 	device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
7284 	device->total_bytes = device->disk_total_bytes;
7285 	device->commit_total_bytes = device->disk_total_bytes;
7286 	device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
7287 	device->commit_bytes_used = device->bytes_used;
7288 	device->type = btrfs_device_type(leaf, dev_item);
7289 	device->io_align = btrfs_device_io_align(leaf, dev_item);
7290 	device->io_width = btrfs_device_io_width(leaf, dev_item);
7291 	device->sector_size = btrfs_device_sector_size(leaf, dev_item);
7292 	WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID);
7293 	clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
7294 
7295 	ptr = btrfs_device_uuid(dev_item);
7296 	read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
7297 }
7298 
open_seed_devices(struct btrfs_fs_info * fs_info,u8 * fsid)7299 static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info,
7300 						  u8 *fsid)
7301 {
7302 	struct btrfs_fs_devices *fs_devices;
7303 	int ret;
7304 
7305 	lockdep_assert_held(&uuid_mutex);
7306 	ASSERT(fsid);
7307 
7308 	/* This will match only for multi-device seed fs */
7309 	list_for_each_entry(fs_devices, &fs_info->fs_devices->seed_list, seed_list)
7310 		if (!memcmp(fs_devices->fsid, fsid, BTRFS_FSID_SIZE))
7311 			return fs_devices;
7312 
7313 
7314 	fs_devices = find_fsid(fsid, NULL);
7315 	if (!fs_devices) {
7316 		if (!btrfs_test_opt(fs_info, DEGRADED))
7317 			return ERR_PTR(-ENOENT);
7318 
7319 		fs_devices = alloc_fs_devices(fsid, NULL);
7320 		if (IS_ERR(fs_devices))
7321 			return fs_devices;
7322 
7323 		fs_devices->seeding = true;
7324 		fs_devices->opened = 1;
7325 		return fs_devices;
7326 	}
7327 
7328 	/*
7329 	 * Upon first call for a seed fs fsid, just create a private copy of the
7330 	 * respective fs_devices and anchor it at fs_info->fs_devices->seed_list
7331 	 */
7332 	fs_devices = clone_fs_devices(fs_devices);
7333 	if (IS_ERR(fs_devices))
7334 		return fs_devices;
7335 
7336 	ret = open_fs_devices(fs_devices, FMODE_READ, fs_info->bdev_holder);
7337 	if (ret) {
7338 		free_fs_devices(fs_devices);
7339 		return ERR_PTR(ret);
7340 	}
7341 
7342 	if (!fs_devices->seeding) {
7343 		close_fs_devices(fs_devices);
7344 		free_fs_devices(fs_devices);
7345 		return ERR_PTR(-EINVAL);
7346 	}
7347 
7348 	list_add(&fs_devices->seed_list, &fs_info->fs_devices->seed_list);
7349 
7350 	return fs_devices;
7351 }
7352 
read_one_dev(struct extent_buffer * leaf,struct btrfs_dev_item * dev_item)7353 static int read_one_dev(struct extent_buffer *leaf,
7354 			struct btrfs_dev_item *dev_item)
7355 {
7356 	BTRFS_DEV_LOOKUP_ARGS(args);
7357 	struct btrfs_fs_info *fs_info = leaf->fs_info;
7358 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7359 	struct btrfs_device *device;
7360 	u64 devid;
7361 	int ret;
7362 	u8 fs_uuid[BTRFS_FSID_SIZE];
7363 	u8 dev_uuid[BTRFS_UUID_SIZE];
7364 
7365 	devid = btrfs_device_id(leaf, dev_item);
7366 	args.devid = devid;
7367 	read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
7368 			   BTRFS_UUID_SIZE);
7369 	read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
7370 			   BTRFS_FSID_SIZE);
7371 	args.uuid = dev_uuid;
7372 	args.fsid = fs_uuid;
7373 
7374 	if (memcmp(fs_uuid, fs_devices->metadata_uuid, BTRFS_FSID_SIZE)) {
7375 		fs_devices = open_seed_devices(fs_info, fs_uuid);
7376 		if (IS_ERR(fs_devices))
7377 			return PTR_ERR(fs_devices);
7378 	}
7379 
7380 	device = btrfs_find_device(fs_info->fs_devices, &args);
7381 	if (!device) {
7382 		if (!btrfs_test_opt(fs_info, DEGRADED)) {
7383 			btrfs_report_missing_device(fs_info, devid,
7384 							dev_uuid, true);
7385 			return -ENOENT;
7386 		}
7387 
7388 		device = add_missing_dev(fs_devices, devid, dev_uuid);
7389 		if (IS_ERR(device)) {
7390 			btrfs_err(fs_info,
7391 				"failed to add missing dev %llu: %ld",
7392 				devid, PTR_ERR(device));
7393 			return PTR_ERR(device);
7394 		}
7395 		btrfs_report_missing_device(fs_info, devid, dev_uuid, false);
7396 	} else {
7397 		if (!device->bdev) {
7398 			if (!btrfs_test_opt(fs_info, DEGRADED)) {
7399 				btrfs_report_missing_device(fs_info,
7400 						devid, dev_uuid, true);
7401 				return -ENOENT;
7402 			}
7403 			btrfs_report_missing_device(fs_info, devid,
7404 							dev_uuid, false);
7405 		}
7406 
7407 		if (!device->bdev &&
7408 		    !test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
7409 			/*
7410 			 * this happens when a device that was properly setup
7411 			 * in the device info lists suddenly goes bad.
7412 			 * device->bdev is NULL, and so we have to set
7413 			 * device->missing to one here
7414 			 */
7415 			device->fs_devices->missing_devices++;
7416 			set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
7417 		}
7418 
7419 		/* Move the device to its own fs_devices */
7420 		if (device->fs_devices != fs_devices) {
7421 			ASSERT(test_bit(BTRFS_DEV_STATE_MISSING,
7422 							&device->dev_state));
7423 
7424 			list_move(&device->dev_list, &fs_devices->devices);
7425 			device->fs_devices->num_devices--;
7426 			fs_devices->num_devices++;
7427 
7428 			device->fs_devices->missing_devices--;
7429 			fs_devices->missing_devices++;
7430 
7431 			device->fs_devices = fs_devices;
7432 		}
7433 	}
7434 
7435 	if (device->fs_devices != fs_info->fs_devices) {
7436 		BUG_ON(test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state));
7437 		if (device->generation !=
7438 		    btrfs_device_generation(leaf, dev_item))
7439 			return -EINVAL;
7440 	}
7441 
7442 	fill_device_from_item(leaf, dev_item, device);
7443 	if (device->bdev) {
7444 		u64 max_total_bytes = bdev_nr_bytes(device->bdev);
7445 
7446 		if (device->total_bytes > max_total_bytes) {
7447 			btrfs_err(fs_info,
7448 			"device total_bytes should be at most %llu but found %llu",
7449 				  max_total_bytes, device->total_bytes);
7450 			return -EINVAL;
7451 		}
7452 	}
7453 	set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
7454 	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
7455 	   !test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
7456 		device->fs_devices->total_rw_bytes += device->total_bytes;
7457 		atomic64_add(device->total_bytes - device->bytes_used,
7458 				&fs_info->free_chunk_space);
7459 	}
7460 	ret = 0;
7461 	return ret;
7462 }
7463 
btrfs_read_sys_array(struct btrfs_fs_info * fs_info)7464 int btrfs_read_sys_array(struct btrfs_fs_info *fs_info)
7465 {
7466 	struct btrfs_super_block *super_copy = fs_info->super_copy;
7467 	struct extent_buffer *sb;
7468 	struct btrfs_disk_key *disk_key;
7469 	struct btrfs_chunk *chunk;
7470 	u8 *array_ptr;
7471 	unsigned long sb_array_offset;
7472 	int ret = 0;
7473 	u32 num_stripes;
7474 	u32 array_size;
7475 	u32 len = 0;
7476 	u32 cur_offset;
7477 	u64 type;
7478 	struct btrfs_key key;
7479 
7480 	ASSERT(BTRFS_SUPER_INFO_SIZE <= fs_info->nodesize);
7481 
7482 	/*
7483 	 * We allocated a dummy extent, just to use extent buffer accessors.
7484 	 * There will be unused space after BTRFS_SUPER_INFO_SIZE, but
7485 	 * that's fine, we will not go beyond system chunk array anyway.
7486 	 */
7487 	sb = alloc_dummy_extent_buffer(fs_info, BTRFS_SUPER_INFO_OFFSET);
7488 	if (!sb)
7489 		return -ENOMEM;
7490 	set_extent_buffer_uptodate(sb);
7491 
7492 	write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
7493 	array_size = btrfs_super_sys_array_size(super_copy);
7494 
7495 	array_ptr = super_copy->sys_chunk_array;
7496 	sb_array_offset = offsetof(struct btrfs_super_block, sys_chunk_array);
7497 	cur_offset = 0;
7498 
7499 	while (cur_offset < array_size) {
7500 		disk_key = (struct btrfs_disk_key *)array_ptr;
7501 		len = sizeof(*disk_key);
7502 		if (cur_offset + len > array_size)
7503 			goto out_short_read;
7504 
7505 		btrfs_disk_key_to_cpu(&key, disk_key);
7506 
7507 		array_ptr += len;
7508 		sb_array_offset += len;
7509 		cur_offset += len;
7510 
7511 		if (key.type != BTRFS_CHUNK_ITEM_KEY) {
7512 			btrfs_err(fs_info,
7513 			    "unexpected item type %u in sys_array at offset %u",
7514 				  (u32)key.type, cur_offset);
7515 			ret = -EIO;
7516 			break;
7517 		}
7518 
7519 		chunk = (struct btrfs_chunk *)sb_array_offset;
7520 		/*
7521 		 * At least one btrfs_chunk with one stripe must be present,
7522 		 * exact stripe count check comes afterwards
7523 		 */
7524 		len = btrfs_chunk_item_size(1);
7525 		if (cur_offset + len > array_size)
7526 			goto out_short_read;
7527 
7528 		num_stripes = btrfs_chunk_num_stripes(sb, chunk);
7529 		if (!num_stripes) {
7530 			btrfs_err(fs_info,
7531 			"invalid number of stripes %u in sys_array at offset %u",
7532 				  num_stripes, cur_offset);
7533 			ret = -EIO;
7534 			break;
7535 		}
7536 
7537 		type = btrfs_chunk_type(sb, chunk);
7538 		if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) {
7539 			btrfs_err(fs_info,
7540 			"invalid chunk type %llu in sys_array at offset %u",
7541 				  type, cur_offset);
7542 			ret = -EIO;
7543 			break;
7544 		}
7545 
7546 		len = btrfs_chunk_item_size(num_stripes);
7547 		if (cur_offset + len > array_size)
7548 			goto out_short_read;
7549 
7550 		ret = read_one_chunk(&key, sb, chunk);
7551 		if (ret)
7552 			break;
7553 
7554 		array_ptr += len;
7555 		sb_array_offset += len;
7556 		cur_offset += len;
7557 	}
7558 	clear_extent_buffer_uptodate(sb);
7559 	free_extent_buffer_stale(sb);
7560 	return ret;
7561 
7562 out_short_read:
7563 	btrfs_err(fs_info, "sys_array too short to read %u bytes at offset %u",
7564 			len, cur_offset);
7565 	clear_extent_buffer_uptodate(sb);
7566 	free_extent_buffer_stale(sb);
7567 	return -EIO;
7568 }
7569 
7570 /*
7571  * Check if all chunks in the fs are OK for read-write degraded mount
7572  *
7573  * If the @failing_dev is specified, it's accounted as missing.
7574  *
7575  * Return true if all chunks meet the minimal RW mount requirements.
7576  * Return false if any chunk doesn't meet the minimal RW mount requirements.
7577  */
btrfs_check_rw_degradable(struct btrfs_fs_info * fs_info,struct btrfs_device * failing_dev)7578 bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info,
7579 					struct btrfs_device *failing_dev)
7580 {
7581 	struct extent_map_tree *map_tree = &fs_info->mapping_tree;
7582 	struct extent_map *em;
7583 	u64 next_start = 0;
7584 	bool ret = true;
7585 
7586 	read_lock(&map_tree->lock);
7587 	em = lookup_extent_mapping(map_tree, 0, (u64)-1);
7588 	read_unlock(&map_tree->lock);
7589 	/* No chunk at all? Return false anyway */
7590 	if (!em) {
7591 		ret = false;
7592 		goto out;
7593 	}
7594 	while (em) {
7595 		struct map_lookup *map;
7596 		int missing = 0;
7597 		int max_tolerated;
7598 		int i;
7599 
7600 		map = em->map_lookup;
7601 		max_tolerated =
7602 			btrfs_get_num_tolerated_disk_barrier_failures(
7603 					map->type);
7604 		for (i = 0; i < map->num_stripes; i++) {
7605 			struct btrfs_device *dev = map->stripes[i].dev;
7606 
7607 			if (!dev || !dev->bdev ||
7608 			    test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) ||
7609 			    dev->last_flush_error)
7610 				missing++;
7611 			else if (failing_dev && failing_dev == dev)
7612 				missing++;
7613 		}
7614 		if (missing > max_tolerated) {
7615 			if (!failing_dev)
7616 				btrfs_warn(fs_info,
7617 	"chunk %llu missing %d devices, max tolerance is %d for writable mount",
7618 				   em->start, missing, max_tolerated);
7619 			free_extent_map(em);
7620 			ret = false;
7621 			goto out;
7622 		}
7623 		next_start = extent_map_end(em);
7624 		free_extent_map(em);
7625 
7626 		read_lock(&map_tree->lock);
7627 		em = lookup_extent_mapping(map_tree, next_start,
7628 					   (u64)(-1) - next_start);
7629 		read_unlock(&map_tree->lock);
7630 	}
7631 out:
7632 	return ret;
7633 }
7634 
readahead_tree_node_children(struct extent_buffer * node)7635 static void readahead_tree_node_children(struct extent_buffer *node)
7636 {
7637 	int i;
7638 	const int nr_items = btrfs_header_nritems(node);
7639 
7640 	for (i = 0; i < nr_items; i++)
7641 		btrfs_readahead_node_child(node, i);
7642 }
7643 
btrfs_read_chunk_tree(struct btrfs_fs_info * fs_info)7644 int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
7645 {
7646 	struct btrfs_root *root = fs_info->chunk_root;
7647 	struct btrfs_path *path;
7648 	struct extent_buffer *leaf;
7649 	struct btrfs_key key;
7650 	struct btrfs_key found_key;
7651 	int ret;
7652 	int slot;
7653 	int iter_ret = 0;
7654 	u64 total_dev = 0;
7655 	u64 last_ra_node = 0;
7656 
7657 	path = btrfs_alloc_path();
7658 	if (!path)
7659 		return -ENOMEM;
7660 
7661 	/*
7662 	 * uuid_mutex is needed only if we are mounting a sprout FS
7663 	 * otherwise we don't need it.
7664 	 */
7665 	mutex_lock(&uuid_mutex);
7666 
7667 	/*
7668 	 * It is possible for mount and umount to race in such a way that
7669 	 * we execute this code path, but open_fs_devices failed to clear
7670 	 * total_rw_bytes. We certainly want it cleared before reading the
7671 	 * device items, so clear it here.
7672 	 */
7673 	fs_info->fs_devices->total_rw_bytes = 0;
7674 
7675 	/*
7676 	 * Lockdep complains about possible circular locking dependency between
7677 	 * a disk's open_mutex (struct gendisk.open_mutex), the rw semaphores
7678 	 * used for freeze procection of a fs (struct super_block.s_writers),
7679 	 * which we take when starting a transaction, and extent buffers of the
7680 	 * chunk tree if we call read_one_dev() while holding a lock on an
7681 	 * extent buffer of the chunk tree. Since we are mounting the filesystem
7682 	 * and at this point there can't be any concurrent task modifying the
7683 	 * chunk tree, to keep it simple, just skip locking on the chunk tree.
7684 	 */
7685 	ASSERT(!test_bit(BTRFS_FS_OPEN, &fs_info->flags));
7686 	path->skip_locking = 1;
7687 
7688 	/*
7689 	 * Read all device items, and then all the chunk items. All
7690 	 * device items are found before any chunk item (their object id
7691 	 * is smaller than the lowest possible object id for a chunk
7692 	 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID).
7693 	 */
7694 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
7695 	key.offset = 0;
7696 	key.type = 0;
7697 	btrfs_for_each_slot(root, &key, &found_key, path, iter_ret) {
7698 		struct extent_buffer *node = path->nodes[1];
7699 
7700 		leaf = path->nodes[0];
7701 		slot = path->slots[0];
7702 
7703 		if (node) {
7704 			if (last_ra_node != node->start) {
7705 				readahead_tree_node_children(node);
7706 				last_ra_node = node->start;
7707 			}
7708 		}
7709 		if (found_key.type == BTRFS_DEV_ITEM_KEY) {
7710 			struct btrfs_dev_item *dev_item;
7711 			dev_item = btrfs_item_ptr(leaf, slot,
7712 						  struct btrfs_dev_item);
7713 			ret = read_one_dev(leaf, dev_item);
7714 			if (ret)
7715 				goto error;
7716 			total_dev++;
7717 		} else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
7718 			struct btrfs_chunk *chunk;
7719 
7720 			/*
7721 			 * We are only called at mount time, so no need to take
7722 			 * fs_info->chunk_mutex. Plus, to avoid lockdep warnings,
7723 			 * we always lock first fs_info->chunk_mutex before
7724 			 * acquiring any locks on the chunk tree. This is a
7725 			 * requirement for chunk allocation, see the comment on
7726 			 * top of btrfs_chunk_alloc() for details.
7727 			 */
7728 			chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
7729 			ret = read_one_chunk(&found_key, leaf, chunk);
7730 			if (ret)
7731 				goto error;
7732 		}
7733 	}
7734 	/* Catch error found during iteration */
7735 	if (iter_ret < 0) {
7736 		ret = iter_ret;
7737 		goto error;
7738 	}
7739 
7740 	/*
7741 	 * After loading chunk tree, we've got all device information,
7742 	 * do another round of validation checks.
7743 	 */
7744 	if (total_dev != fs_info->fs_devices->total_devices) {
7745 		btrfs_warn(fs_info,
7746 "super block num_devices %llu mismatch with DEV_ITEM count %llu, will be repaired on next transaction commit",
7747 			  btrfs_super_num_devices(fs_info->super_copy),
7748 			  total_dev);
7749 		fs_info->fs_devices->total_devices = total_dev;
7750 		btrfs_set_super_num_devices(fs_info->super_copy, total_dev);
7751 	}
7752 	if (btrfs_super_total_bytes(fs_info->super_copy) <
7753 	    fs_info->fs_devices->total_rw_bytes) {
7754 		btrfs_err(fs_info,
7755 	"super_total_bytes %llu mismatch with fs_devices total_rw_bytes %llu",
7756 			  btrfs_super_total_bytes(fs_info->super_copy),
7757 			  fs_info->fs_devices->total_rw_bytes);
7758 		ret = -EINVAL;
7759 		goto error;
7760 	}
7761 	ret = 0;
7762 error:
7763 	mutex_unlock(&uuid_mutex);
7764 
7765 	btrfs_free_path(path);
7766 	return ret;
7767 }
7768 
btrfs_init_devices_late(struct btrfs_fs_info * fs_info)7769 int btrfs_init_devices_late(struct btrfs_fs_info *fs_info)
7770 {
7771 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs;
7772 	struct btrfs_device *device;
7773 	int ret = 0;
7774 
7775 	fs_devices->fs_info = fs_info;
7776 
7777 	mutex_lock(&fs_devices->device_list_mutex);
7778 	list_for_each_entry(device, &fs_devices->devices, dev_list)
7779 		device->fs_info = fs_info;
7780 
7781 	list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) {
7782 		list_for_each_entry(device, &seed_devs->devices, dev_list) {
7783 			device->fs_info = fs_info;
7784 			ret = btrfs_get_dev_zone_info(device, false);
7785 			if (ret)
7786 				break;
7787 		}
7788 
7789 		seed_devs->fs_info = fs_info;
7790 	}
7791 	mutex_unlock(&fs_devices->device_list_mutex);
7792 
7793 	return ret;
7794 }
7795 
btrfs_dev_stats_value(const struct extent_buffer * eb,const struct btrfs_dev_stats_item * ptr,int index)7796 static u64 btrfs_dev_stats_value(const struct extent_buffer *eb,
7797 				 const struct btrfs_dev_stats_item *ptr,
7798 				 int index)
7799 {
7800 	u64 val;
7801 
7802 	read_extent_buffer(eb, &val,
7803 			   offsetof(struct btrfs_dev_stats_item, values) +
7804 			    ((unsigned long)ptr) + (index * sizeof(u64)),
7805 			   sizeof(val));
7806 	return val;
7807 }
7808 
btrfs_set_dev_stats_value(struct extent_buffer * eb,struct btrfs_dev_stats_item * ptr,int index,u64 val)7809 static void btrfs_set_dev_stats_value(struct extent_buffer *eb,
7810 				      struct btrfs_dev_stats_item *ptr,
7811 				      int index, u64 val)
7812 {
7813 	write_extent_buffer(eb, &val,
7814 			    offsetof(struct btrfs_dev_stats_item, values) +
7815 			     ((unsigned long)ptr) + (index * sizeof(u64)),
7816 			    sizeof(val));
7817 }
7818 
btrfs_device_init_dev_stats(struct btrfs_device * device,struct btrfs_path * path)7819 static int btrfs_device_init_dev_stats(struct btrfs_device *device,
7820 				       struct btrfs_path *path)
7821 {
7822 	struct btrfs_dev_stats_item *ptr;
7823 	struct extent_buffer *eb;
7824 	struct btrfs_key key;
7825 	int item_size;
7826 	int i, ret, slot;
7827 
7828 	if (!device->fs_info->dev_root)
7829 		return 0;
7830 
7831 	key.objectid = BTRFS_DEV_STATS_OBJECTID;
7832 	key.type = BTRFS_PERSISTENT_ITEM_KEY;
7833 	key.offset = device->devid;
7834 	ret = btrfs_search_slot(NULL, device->fs_info->dev_root, &key, path, 0, 0);
7835 	if (ret) {
7836 		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7837 			btrfs_dev_stat_set(device, i, 0);
7838 		device->dev_stats_valid = 1;
7839 		btrfs_release_path(path);
7840 		return ret < 0 ? ret : 0;
7841 	}
7842 	slot = path->slots[0];
7843 	eb = path->nodes[0];
7844 	item_size = btrfs_item_size(eb, slot);
7845 
7846 	ptr = btrfs_item_ptr(eb, slot, struct btrfs_dev_stats_item);
7847 
7848 	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
7849 		if (item_size >= (1 + i) * sizeof(__le64))
7850 			btrfs_dev_stat_set(device, i,
7851 					   btrfs_dev_stats_value(eb, ptr, i));
7852 		else
7853 			btrfs_dev_stat_set(device, i, 0);
7854 	}
7855 
7856 	device->dev_stats_valid = 1;
7857 	btrfs_dev_stat_print_on_load(device);
7858 	btrfs_release_path(path);
7859 
7860 	return 0;
7861 }
7862 
btrfs_init_dev_stats(struct btrfs_fs_info * fs_info)7863 int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
7864 {
7865 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs;
7866 	struct btrfs_device *device;
7867 	struct btrfs_path *path = NULL;
7868 	int ret = 0;
7869 
7870 	path = btrfs_alloc_path();
7871 	if (!path)
7872 		return -ENOMEM;
7873 
7874 	mutex_lock(&fs_devices->device_list_mutex);
7875 	list_for_each_entry(device, &fs_devices->devices, dev_list) {
7876 		ret = btrfs_device_init_dev_stats(device, path);
7877 		if (ret)
7878 			goto out;
7879 	}
7880 	list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) {
7881 		list_for_each_entry(device, &seed_devs->devices, dev_list) {
7882 			ret = btrfs_device_init_dev_stats(device, path);
7883 			if (ret)
7884 				goto out;
7885 		}
7886 	}
7887 out:
7888 	mutex_unlock(&fs_devices->device_list_mutex);
7889 
7890 	btrfs_free_path(path);
7891 	return ret;
7892 }
7893 
update_dev_stat_item(struct btrfs_trans_handle * trans,struct btrfs_device * device)7894 static int update_dev_stat_item(struct btrfs_trans_handle *trans,
7895 				struct btrfs_device *device)
7896 {
7897 	struct btrfs_fs_info *fs_info = trans->fs_info;
7898 	struct btrfs_root *dev_root = fs_info->dev_root;
7899 	struct btrfs_path *path;
7900 	struct btrfs_key key;
7901 	struct extent_buffer *eb;
7902 	struct btrfs_dev_stats_item *ptr;
7903 	int ret;
7904 	int i;
7905 
7906 	key.objectid = BTRFS_DEV_STATS_OBJECTID;
7907 	key.type = BTRFS_PERSISTENT_ITEM_KEY;
7908 	key.offset = device->devid;
7909 
7910 	path = btrfs_alloc_path();
7911 	if (!path)
7912 		return -ENOMEM;
7913 	ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
7914 	if (ret < 0) {
7915 		btrfs_warn_in_rcu(fs_info,
7916 			"error %d while searching for dev_stats item for device %s",
7917 			      ret, rcu_str_deref(device->name));
7918 		goto out;
7919 	}
7920 
7921 	if (ret == 0 &&
7922 	    btrfs_item_size(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
7923 		/* need to delete old one and insert a new one */
7924 		ret = btrfs_del_item(trans, dev_root, path);
7925 		if (ret != 0) {
7926 			btrfs_warn_in_rcu(fs_info,
7927 				"delete too small dev_stats item for device %s failed %d",
7928 				      rcu_str_deref(device->name), ret);
7929 			goto out;
7930 		}
7931 		ret = 1;
7932 	}
7933 
7934 	if (ret == 1) {
7935 		/* need to insert a new item */
7936 		btrfs_release_path(path);
7937 		ret = btrfs_insert_empty_item(trans, dev_root, path,
7938 					      &key, sizeof(*ptr));
7939 		if (ret < 0) {
7940 			btrfs_warn_in_rcu(fs_info,
7941 				"insert dev_stats item for device %s failed %d",
7942 				rcu_str_deref(device->name), ret);
7943 			goto out;
7944 		}
7945 	}
7946 
7947 	eb = path->nodes[0];
7948 	ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item);
7949 	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7950 		btrfs_set_dev_stats_value(eb, ptr, i,
7951 					  btrfs_dev_stat_read(device, i));
7952 	btrfs_mark_buffer_dirty(eb);
7953 
7954 out:
7955 	btrfs_free_path(path);
7956 	return ret;
7957 }
7958 
7959 /*
7960  * called from commit_transaction. Writes all changed device stats to disk.
7961  */
btrfs_run_dev_stats(struct btrfs_trans_handle * trans)7962 int btrfs_run_dev_stats(struct btrfs_trans_handle *trans)
7963 {
7964 	struct btrfs_fs_info *fs_info = trans->fs_info;
7965 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7966 	struct btrfs_device *device;
7967 	int stats_cnt;
7968 	int ret = 0;
7969 
7970 	mutex_lock(&fs_devices->device_list_mutex);
7971 	list_for_each_entry(device, &fs_devices->devices, dev_list) {
7972 		stats_cnt = atomic_read(&device->dev_stats_ccnt);
7973 		if (!device->dev_stats_valid || stats_cnt == 0)
7974 			continue;
7975 
7976 
7977 		/*
7978 		 * There is a LOAD-LOAD control dependency between the value of
7979 		 * dev_stats_ccnt and updating the on-disk values which requires
7980 		 * reading the in-memory counters. Such control dependencies
7981 		 * require explicit read memory barriers.
7982 		 *
7983 		 * This memory barriers pairs with smp_mb__before_atomic in
7984 		 * btrfs_dev_stat_inc/btrfs_dev_stat_set and with the full
7985 		 * barrier implied by atomic_xchg in
7986 		 * btrfs_dev_stats_read_and_reset
7987 		 */
7988 		smp_rmb();
7989 
7990 		ret = update_dev_stat_item(trans, device);
7991 		if (!ret)
7992 			atomic_sub(stats_cnt, &device->dev_stats_ccnt);
7993 	}
7994 	mutex_unlock(&fs_devices->device_list_mutex);
7995 
7996 	return ret;
7997 }
7998 
btrfs_dev_stat_inc_and_print(struct btrfs_device * dev,int index)7999 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index)
8000 {
8001 	btrfs_dev_stat_inc(dev, index);
8002 
8003 	if (!dev->dev_stats_valid)
8004 		return;
8005 	btrfs_err_rl_in_rcu(dev->fs_info,
8006 		"bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
8007 			   rcu_str_deref(dev->name),
8008 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
8009 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
8010 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
8011 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
8012 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
8013 }
8014 
btrfs_dev_stat_print_on_load(struct btrfs_device * dev)8015 static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
8016 {
8017 	int i;
8018 
8019 	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
8020 		if (btrfs_dev_stat_read(dev, i) != 0)
8021 			break;
8022 	if (i == BTRFS_DEV_STAT_VALUES_MAX)
8023 		return; /* all values == 0, suppress message */
8024 
8025 	btrfs_info_in_rcu(dev->fs_info,
8026 		"bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
8027 	       rcu_str_deref(dev->name),
8028 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
8029 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
8030 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
8031 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
8032 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
8033 }
8034 
btrfs_get_dev_stats(struct btrfs_fs_info * fs_info,struct btrfs_ioctl_get_dev_stats * stats)8035 int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info,
8036 			struct btrfs_ioctl_get_dev_stats *stats)
8037 {
8038 	BTRFS_DEV_LOOKUP_ARGS(args);
8039 	struct btrfs_device *dev;
8040 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
8041 	int i;
8042 
8043 	mutex_lock(&fs_devices->device_list_mutex);
8044 	args.devid = stats->devid;
8045 	dev = btrfs_find_device(fs_info->fs_devices, &args);
8046 	mutex_unlock(&fs_devices->device_list_mutex);
8047 
8048 	if (!dev) {
8049 		btrfs_warn(fs_info, "get dev_stats failed, device not found");
8050 		return -ENODEV;
8051 	} else if (!dev->dev_stats_valid) {
8052 		btrfs_warn(fs_info, "get dev_stats failed, not yet valid");
8053 		return -ENODEV;
8054 	} else if (stats->flags & BTRFS_DEV_STATS_RESET) {
8055 		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
8056 			if (stats->nr_items > i)
8057 				stats->values[i] =
8058 					btrfs_dev_stat_read_and_reset(dev, i);
8059 			else
8060 				btrfs_dev_stat_set(dev, i, 0);
8061 		}
8062 		btrfs_info(fs_info, "device stats zeroed by %s (%d)",
8063 			   current->comm, task_pid_nr(current));
8064 	} else {
8065 		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
8066 			if (stats->nr_items > i)
8067 				stats->values[i] = btrfs_dev_stat_read(dev, i);
8068 	}
8069 	if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX)
8070 		stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX;
8071 	return 0;
8072 }
8073 
8074 /*
8075  * Update the size and bytes used for each device where it changed.  This is
8076  * delayed since we would otherwise get errors while writing out the
8077  * superblocks.
8078  *
8079  * Must be invoked during transaction commit.
8080  */
btrfs_commit_device_sizes(struct btrfs_transaction * trans)8081 void btrfs_commit_device_sizes(struct btrfs_transaction *trans)
8082 {
8083 	struct btrfs_device *curr, *next;
8084 
8085 	ASSERT(trans->state == TRANS_STATE_COMMIT_DOING);
8086 
8087 	if (list_empty(&trans->dev_update_list))
8088 		return;
8089 
8090 	/*
8091 	 * We don't need the device_list_mutex here.  This list is owned by the
8092 	 * transaction and the transaction must complete before the device is
8093 	 * released.
8094 	 */
8095 	mutex_lock(&trans->fs_info->chunk_mutex);
8096 	list_for_each_entry_safe(curr, next, &trans->dev_update_list,
8097 				 post_commit_list) {
8098 		list_del_init(&curr->post_commit_list);
8099 		curr->commit_total_bytes = curr->disk_total_bytes;
8100 		curr->commit_bytes_used = curr->bytes_used;
8101 	}
8102 	mutex_unlock(&trans->fs_info->chunk_mutex);
8103 }
8104 
8105 /*
8106  * Multiplicity factor for simple profiles: DUP, RAID1-like and RAID10.
8107  */
btrfs_bg_type_to_factor(u64 flags)8108 int btrfs_bg_type_to_factor(u64 flags)
8109 {
8110 	const int index = btrfs_bg_flags_to_raid_index(flags);
8111 
8112 	return btrfs_raid_array[index].ncopies;
8113 }
8114 
8115 
8116 
verify_one_dev_extent(struct btrfs_fs_info * fs_info,u64 chunk_offset,u64 devid,u64 physical_offset,u64 physical_len)8117 static int verify_one_dev_extent(struct btrfs_fs_info *fs_info,
8118 				 u64 chunk_offset, u64 devid,
8119 				 u64 physical_offset, u64 physical_len)
8120 {
8121 	struct btrfs_dev_lookup_args args = { .devid = devid };
8122 	struct extent_map_tree *em_tree = &fs_info->mapping_tree;
8123 	struct extent_map *em;
8124 	struct map_lookup *map;
8125 	struct btrfs_device *dev;
8126 	u64 stripe_len;
8127 	bool found = false;
8128 	int ret = 0;
8129 	int i;
8130 
8131 	read_lock(&em_tree->lock);
8132 	em = lookup_extent_mapping(em_tree, chunk_offset, 1);
8133 	read_unlock(&em_tree->lock);
8134 
8135 	if (!em) {
8136 		btrfs_err(fs_info,
8137 "dev extent physical offset %llu on devid %llu doesn't have corresponding chunk",
8138 			  physical_offset, devid);
8139 		ret = -EUCLEAN;
8140 		goto out;
8141 	}
8142 
8143 	map = em->map_lookup;
8144 	stripe_len = btrfs_calc_stripe_length(em);
8145 	if (physical_len != stripe_len) {
8146 		btrfs_err(fs_info,
8147 "dev extent physical offset %llu on devid %llu length doesn't match chunk %llu, have %llu expect %llu",
8148 			  physical_offset, devid, em->start, physical_len,
8149 			  stripe_len);
8150 		ret = -EUCLEAN;
8151 		goto out;
8152 	}
8153 
8154 	/*
8155 	 * Very old mkfs.btrfs (before v4.1) will not respect the reserved
8156 	 * space. Although kernel can handle it without problem, better to warn
8157 	 * the users.
8158 	 */
8159 	if (physical_offset < BTRFS_DEVICE_RANGE_RESERVED)
8160 		btrfs_warn(fs_info,
8161 		"devid %llu physical %llu len %llu inside the reserved space",
8162 			   devid, physical_offset, physical_len);
8163 
8164 	for (i = 0; i < map->num_stripes; i++) {
8165 		if (map->stripes[i].dev->devid == devid &&
8166 		    map->stripes[i].physical == physical_offset) {
8167 			found = true;
8168 			if (map->verified_stripes >= map->num_stripes) {
8169 				btrfs_err(fs_info,
8170 				"too many dev extents for chunk %llu found",
8171 					  em->start);
8172 				ret = -EUCLEAN;
8173 				goto out;
8174 			}
8175 			map->verified_stripes++;
8176 			break;
8177 		}
8178 	}
8179 	if (!found) {
8180 		btrfs_err(fs_info,
8181 	"dev extent physical offset %llu devid %llu has no corresponding chunk",
8182 			physical_offset, devid);
8183 		ret = -EUCLEAN;
8184 	}
8185 
8186 	/* Make sure no dev extent is beyond device boundary */
8187 	dev = btrfs_find_device(fs_info->fs_devices, &args);
8188 	if (!dev) {
8189 		btrfs_err(fs_info, "failed to find devid %llu", devid);
8190 		ret = -EUCLEAN;
8191 		goto out;
8192 	}
8193 
8194 	if (physical_offset + physical_len > dev->disk_total_bytes) {
8195 		btrfs_err(fs_info,
8196 "dev extent devid %llu physical offset %llu len %llu is beyond device boundary %llu",
8197 			  devid, physical_offset, physical_len,
8198 			  dev->disk_total_bytes);
8199 		ret = -EUCLEAN;
8200 		goto out;
8201 	}
8202 
8203 	if (dev->zone_info) {
8204 		u64 zone_size = dev->zone_info->zone_size;
8205 
8206 		if (!IS_ALIGNED(physical_offset, zone_size) ||
8207 		    !IS_ALIGNED(physical_len, zone_size)) {
8208 			btrfs_err(fs_info,
8209 "zoned: dev extent devid %llu physical offset %llu len %llu is not aligned to device zone",
8210 				  devid, physical_offset, physical_len);
8211 			ret = -EUCLEAN;
8212 			goto out;
8213 		}
8214 	}
8215 
8216 out:
8217 	free_extent_map(em);
8218 	return ret;
8219 }
8220 
verify_chunk_dev_extent_mapping(struct btrfs_fs_info * fs_info)8221 static int verify_chunk_dev_extent_mapping(struct btrfs_fs_info *fs_info)
8222 {
8223 	struct extent_map_tree *em_tree = &fs_info->mapping_tree;
8224 	struct extent_map *em;
8225 	struct rb_node *node;
8226 	int ret = 0;
8227 
8228 	read_lock(&em_tree->lock);
8229 	for (node = rb_first_cached(&em_tree->map); node; node = rb_next(node)) {
8230 		em = rb_entry(node, struct extent_map, rb_node);
8231 		if (em->map_lookup->num_stripes !=
8232 		    em->map_lookup->verified_stripes) {
8233 			btrfs_err(fs_info,
8234 			"chunk %llu has missing dev extent, have %d expect %d",
8235 				  em->start, em->map_lookup->verified_stripes,
8236 				  em->map_lookup->num_stripes);
8237 			ret = -EUCLEAN;
8238 			goto out;
8239 		}
8240 	}
8241 out:
8242 	read_unlock(&em_tree->lock);
8243 	return ret;
8244 }
8245 
8246 /*
8247  * Ensure that all dev extents are mapped to correct chunk, otherwise
8248  * later chunk allocation/free would cause unexpected behavior.
8249  *
8250  * NOTE: This will iterate through the whole device tree, which should be of
8251  * the same size level as the chunk tree.  This slightly increases mount time.
8252  */
btrfs_verify_dev_extents(struct btrfs_fs_info * fs_info)8253 int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info)
8254 {
8255 	struct btrfs_path *path;
8256 	struct btrfs_root *root = fs_info->dev_root;
8257 	struct btrfs_key key;
8258 	u64 prev_devid = 0;
8259 	u64 prev_dev_ext_end = 0;
8260 	int ret = 0;
8261 
8262 	/*
8263 	 * We don't have a dev_root because we mounted with ignorebadroots and
8264 	 * failed to load the root, so we want to skip the verification in this
8265 	 * case for sure.
8266 	 *
8267 	 * However if the dev root is fine, but the tree itself is corrupted
8268 	 * we'd still fail to mount.  This verification is only to make sure
8269 	 * writes can happen safely, so instead just bypass this check
8270 	 * completely in the case of IGNOREBADROOTS.
8271 	 */
8272 	if (btrfs_test_opt(fs_info, IGNOREBADROOTS))
8273 		return 0;
8274 
8275 	key.objectid = 1;
8276 	key.type = BTRFS_DEV_EXTENT_KEY;
8277 	key.offset = 0;
8278 
8279 	path = btrfs_alloc_path();
8280 	if (!path)
8281 		return -ENOMEM;
8282 
8283 	path->reada = READA_FORWARD;
8284 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
8285 	if (ret < 0)
8286 		goto out;
8287 
8288 	if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
8289 		ret = btrfs_next_leaf(root, path);
8290 		if (ret < 0)
8291 			goto out;
8292 		/* No dev extents at all? Not good */
8293 		if (ret > 0) {
8294 			ret = -EUCLEAN;
8295 			goto out;
8296 		}
8297 	}
8298 	while (1) {
8299 		struct extent_buffer *leaf = path->nodes[0];
8300 		struct btrfs_dev_extent *dext;
8301 		int slot = path->slots[0];
8302 		u64 chunk_offset;
8303 		u64 physical_offset;
8304 		u64 physical_len;
8305 		u64 devid;
8306 
8307 		btrfs_item_key_to_cpu(leaf, &key, slot);
8308 		if (key.type != BTRFS_DEV_EXTENT_KEY)
8309 			break;
8310 		devid = key.objectid;
8311 		physical_offset = key.offset;
8312 
8313 		dext = btrfs_item_ptr(leaf, slot, struct btrfs_dev_extent);
8314 		chunk_offset = btrfs_dev_extent_chunk_offset(leaf, dext);
8315 		physical_len = btrfs_dev_extent_length(leaf, dext);
8316 
8317 		/* Check if this dev extent overlaps with the previous one */
8318 		if (devid == prev_devid && physical_offset < prev_dev_ext_end) {
8319 			btrfs_err(fs_info,
8320 "dev extent devid %llu physical offset %llu overlap with previous dev extent end %llu",
8321 				  devid, physical_offset, prev_dev_ext_end);
8322 			ret = -EUCLEAN;
8323 			goto out;
8324 		}
8325 
8326 		ret = verify_one_dev_extent(fs_info, chunk_offset, devid,
8327 					    physical_offset, physical_len);
8328 		if (ret < 0)
8329 			goto out;
8330 		prev_devid = devid;
8331 		prev_dev_ext_end = physical_offset + physical_len;
8332 
8333 		ret = btrfs_next_item(root, path);
8334 		if (ret < 0)
8335 			goto out;
8336 		if (ret > 0) {
8337 			ret = 0;
8338 			break;
8339 		}
8340 	}
8341 
8342 	/* Ensure all chunks have corresponding dev extents */
8343 	ret = verify_chunk_dev_extent_mapping(fs_info);
8344 out:
8345 	btrfs_free_path(path);
8346 	return ret;
8347 }
8348 
8349 /*
8350  * Check whether the given block group or device is pinned by any inode being
8351  * used as a swapfile.
8352  */
btrfs_pinned_by_swapfile(struct btrfs_fs_info * fs_info,void * ptr)8353 bool btrfs_pinned_by_swapfile(struct btrfs_fs_info *fs_info, void *ptr)
8354 {
8355 	struct btrfs_swapfile_pin *sp;
8356 	struct rb_node *node;
8357 
8358 	spin_lock(&fs_info->swapfile_pins_lock);
8359 	node = fs_info->swapfile_pins.rb_node;
8360 	while (node) {
8361 		sp = rb_entry(node, struct btrfs_swapfile_pin, node);
8362 		if (ptr < sp->ptr)
8363 			node = node->rb_left;
8364 		else if (ptr > sp->ptr)
8365 			node = node->rb_right;
8366 		else
8367 			break;
8368 	}
8369 	spin_unlock(&fs_info->swapfile_pins_lock);
8370 	return node != NULL;
8371 }
8372 
relocating_repair_kthread(void * data)8373 static int relocating_repair_kthread(void *data)
8374 {
8375 	struct btrfs_block_group *cache = data;
8376 	struct btrfs_fs_info *fs_info = cache->fs_info;
8377 	u64 target;
8378 	int ret = 0;
8379 
8380 	target = cache->start;
8381 	btrfs_put_block_group(cache);
8382 
8383 	sb_start_write(fs_info->sb);
8384 	if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE)) {
8385 		btrfs_info(fs_info,
8386 			   "zoned: skip relocating block group %llu to repair: EBUSY",
8387 			   target);
8388 		sb_end_write(fs_info->sb);
8389 		return -EBUSY;
8390 	}
8391 
8392 	mutex_lock(&fs_info->reclaim_bgs_lock);
8393 
8394 	/* Ensure block group still exists */
8395 	cache = btrfs_lookup_block_group(fs_info, target);
8396 	if (!cache)
8397 		goto out;
8398 
8399 	if (!test_bit(BLOCK_GROUP_FLAG_RELOCATING_REPAIR, &cache->runtime_flags))
8400 		goto out;
8401 
8402 	ret = btrfs_may_alloc_data_chunk(fs_info, target);
8403 	if (ret < 0)
8404 		goto out;
8405 
8406 	btrfs_info(fs_info,
8407 		   "zoned: relocating block group %llu to repair IO failure",
8408 		   target);
8409 	ret = btrfs_relocate_chunk(fs_info, target);
8410 
8411 out:
8412 	if (cache)
8413 		btrfs_put_block_group(cache);
8414 	mutex_unlock(&fs_info->reclaim_bgs_lock);
8415 	btrfs_exclop_finish(fs_info);
8416 	sb_end_write(fs_info->sb);
8417 
8418 	return ret;
8419 }
8420 
btrfs_repair_one_zone(struct btrfs_fs_info * fs_info,u64 logical)8421 bool btrfs_repair_one_zone(struct btrfs_fs_info *fs_info, u64 logical)
8422 {
8423 	struct btrfs_block_group *cache;
8424 
8425 	if (!btrfs_is_zoned(fs_info))
8426 		return false;
8427 
8428 	/* Do not attempt to repair in degraded state */
8429 	if (btrfs_test_opt(fs_info, DEGRADED))
8430 		return true;
8431 
8432 	cache = btrfs_lookup_block_group(fs_info, logical);
8433 	if (!cache)
8434 		return true;
8435 
8436 	if (test_and_set_bit(BLOCK_GROUP_FLAG_RELOCATING_REPAIR, &cache->runtime_flags)) {
8437 		btrfs_put_block_group(cache);
8438 		return true;
8439 	}
8440 
8441 	kthread_run(relocating_repair_kthread, cache,
8442 		    "btrfs-relocating-repair");
8443 
8444 	return true;
8445 }
8446 
btrfs_bioset_init(void)8447 int __init btrfs_bioset_init(void)
8448 {
8449 	if (bioset_init(&btrfs_bioset, BIO_POOL_SIZE,
8450 			offsetof(struct btrfs_bio, bio),
8451 			BIOSET_NEED_BVECS))
8452 		return -ENOMEM;
8453 	return 0;
8454 }
8455 
btrfs_bioset_exit(void)8456 void __cold btrfs_bioset_exit(void)
8457 {
8458 	bioset_exit(&btrfs_bioset);
8459 }
8460